From 2f2b1f232af234ee5b1cadae938199e8d9a93cfe Mon Sep 17 00:00:00 2001 From: Nyakku Shigure Date: Fri, 17 Mar 2023 01:46:48 +0800 Subject: [PATCH] [CodeStyle][B009][B010] use normal property access instead of getattr/setattr (#51530) --- pyproject.toml | 4 ++ python/paddle/dataset/common.py | 5 +- python/paddle/distributed/fleet/fleet.py | 2 +- .../distributed/fleet/layers/mpu/mp_layers.py | 8 +-- .../parallel_layers/pp_layers.py | 4 +- .../sharding/group_sharded_stage3.py | 24 +++---- .../custom_op/test_custom_raw_op_kernel_op.py | 2 +- .../test_convert_operators.py | 2 +- .../fluid/tests/unittests/eager_op_test.py | 13 ++-- .../paddle/fluid/tests/unittests/op_test.py | 13 ++-- python/paddle/hapi/model_summary.py | 4 +- .../distributed/utils/io/save_for_auto.py | 4 +- .../paddle/jit/dy2static/convert_call_func.py | 2 +- .../jit/dy2static/program_translator.py | 6 +- python/paddle/jit/dy2static/utils.py | 12 ++-- python/paddle/nn/functional/vision.py | 2 +- python/paddle/nn/quant/qat/conv.py | 20 +++--- python/paddle/nn/quant/qat/linear.py | 6 +- python/paddle/nn/quant/quant_layers.py | 66 +++++++++---------- 19 files changed, 99 insertions(+), 100 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index f244f6c1d4..da0f272d46 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -54,6 +54,10 @@ select = [ # NumPy-specific rules "NPY001", + + # Bugbear + "B009", + "B010", ] unfixable = [ "NPY001" diff --git a/python/paddle/dataset/common.py b/python/paddle/dataset/common.py index eab4d37676..fb8c4ba969 100644 --- a/python/paddle/dataset/common.py +++ b/python/paddle/dataset/common.py @@ -138,10 +138,7 @@ def fetch_all(): if "fetch" in dir( importlib.import_module("paddle.dataset.%s" % module_name) ): - getattr( - importlib.import_module("paddle.dataset.%s" % module_name), - "fetch", - )() + importlib.import_module('paddle.dataset.%s' % module_name).fetch() def split(reader, line_count, suffix="%05d.pickle", dumper=pickle.dump): diff --git a/python/paddle/distributed/fleet/fleet.py b/python/paddle/distributed/fleet/fleet.py index 96a964b699..55650a5b5f 100755 --- a/python/paddle/distributed/fleet/fleet.py +++ b/python/paddle/distributed/fleet/fleet.py @@ -1282,7 +1282,7 @@ class Fleet: self.origin_main_program = loss.block.program # add distributed attr if not hasattr(self.origin_main_program, "distributed_info_"): - setattr(self.origin_main_program, "distributed_info_", dict()) + self.origin_main_program.distributed_info_ = dict() self.origin_main_program.distributed_info_[ "dp_degree" ] = self._user_defined_strategy.sharding_configs["dp_degree"] diff --git a/python/paddle/distributed/fleet/layers/mpu/mp_layers.py b/python/paddle/distributed/fleet/layers/mpu/mp_layers.py index 794acde2ec..c891425602 100644 --- a/python/paddle/distributed/fleet/layers/mpu/mp_layers.py +++ b/python/paddle/distributed/fleet/layers/mpu/mp_layers.py @@ -143,7 +143,7 @@ class VocabParallelEmbedding(Layer): self.weight.is_distributed = True if self.is_mp else False if self.weight.is_distributed: - setattr(self.weight, "split_axis", 0) + self.weight.split_axis = 0 def forward(self, x): if self.is_mp: @@ -277,7 +277,7 @@ class ColumnParallelLinear(Layer): self.weight.is_distributed = True if self.is_mp else False if self.weight.is_distributed: - setattr(self.weight, "split_axis", 1) + self.weight.split_axis = 1 if has_bias: # initialize bias to zero like Megatron @@ -289,7 +289,7 @@ class ColumnParallelLinear(Layer): ) self.bias.is_distributed = True if self.is_mp else False if self.bias.is_distributed: - setattr(self.bias, "split_axis", 0) + self.bias.split_axis = 0 else: self.bias = None @@ -443,7 +443,7 @@ class RowParallelLinear(Layer): self.weight.is_distributed = True if self.is_mp else False if self.weight.is_distributed: - setattr(self.weight, "split_axis", 0) + self.weight.split_axis = 0 if has_bias: self.bias = self.create_parameter( diff --git a/python/paddle/distributed/fleet/meta_parallel/parallel_layers/pp_layers.py b/python/paddle/distributed/fleet/meta_parallel/parallel_layers/pp_layers.py index 3b164d2afa..3c1d615ed6 100755 --- a/python/paddle/distributed/fleet/meta_parallel/parallel_layers/pp_layers.py +++ b/python/paddle/distributed/fleet/meta_parallel/parallel_layers/pp_layers.py @@ -493,7 +493,7 @@ class PipelineLayer(nn.Layer): for param in comm['layer'].parameters(): if self.global_rank != min(comm['ranks']): - setattr(param, 'is_firstly_shared', False) + param.is_firstly_shared = False def allreduce_shared_weight_gradients(self): for key, comm in self.shared_comm.items(): @@ -641,7 +641,7 @@ class PipelineLayer(nn.Layer): for param in self.shared_layers[ layer.layer_name ].parameters(): - setattr(param, "is_firstly_shared", True) + param.is_firstly_shared = True if layer.forward_func is None: run_function.append(self.shared_layers[layer.layer_name]) diff --git a/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_stage3.py b/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_stage3.py index 5fb2e9a58d..9e440f3fe6 100644 --- a/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_stage3.py +++ b/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_stage3.py @@ -1047,18 +1047,18 @@ def _create_params_grad(trainable_params, param2buffer_size, task_flow): def _PartitionParam(param): if not hasattr(param, "fw_storage"): - setattr(param, "fw_storage", None) - setattr(param, "bw_storage", None) - setattr(param, "master_weight", None) - setattr(param, "status", "all") - setattr(param, "use_count", 0) + param.fw_storage = None + param.bw_storage = None + param.master_weight = None + param.status = "all" + param.use_count = 0 return param def _UnsliceParam(param): if not hasattr(param, "unslice"): - setattr(param, "unslice", True) - setattr(param, "master_weight", None) + param.unslice = True + param.master_weight = None return param @@ -1078,11 +1078,11 @@ def _VarBaseWrapper(param): def _OptimizerWrapper(optimizer, offload, group, update_params_slice): if not hasattr(optimizer, "_optim"): - setattr(optimizer, "_optim", optimizer) - setattr(optimizer, "offload", offload) - setattr(optimizer, "_group", group) - setattr(optimizer, "update_scaler", None) - setattr(optimizer, "update_slice", update_params_slice) + optimizer._optim = optimizer + optimizer.offload = offload + optimizer._group = group + optimizer.update_scaler = None + optimizer.update_slice = update_params_slice return optimizer diff --git a/python/paddle/fluid/tests/custom_op/test_custom_raw_op_kernel_op.py b/python/paddle/fluid/tests/custom_op/test_custom_raw_op_kernel_op.py index 3fcf94e2f1..9762d29c48 100644 --- a/python/paddle/fluid/tests/custom_op/test_custom_raw_op_kernel_op.py +++ b/python/paddle/fluid/tests/custom_op/test_custom_raw_op_kernel_op.py @@ -67,7 +67,7 @@ class TestCustomRawReluOp(unittest.TestCase): def custom_raw_relu(self, x): module = importlib.import_module(MODULE_NAME) - custom_raw_relu_op = getattr(module, "custom_raw_relu") + custom_raw_relu_op = module.custom_raw_relu self.assertIsNotNone(custom_raw_relu_op) return custom_raw_relu_op(x) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_convert_operators.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_convert_operators.py index 812abb18ff..8952039460 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_convert_operators.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_convert_operators.py @@ -31,7 +31,7 @@ class ForwardNotExist(paddle.nn.Layer): net = ForwardNotExist() -setattr(net, "forward", "A string so that convert forward will fail") +net.forward = "A string so that convert forward will fail" class TestConvertCall(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/eager_op_test.py b/python/paddle/fluid/tests/unittests/eager_op_test.py index 9615a9c815..3aeb0bc87c 100644 --- a/python/paddle/fluid/tests/unittests/eager_op_test.py +++ b/python/paddle/fluid/tests/unittests/eager_op_test.py @@ -449,7 +449,7 @@ class OpTest(unittest.TestCase): ) or ( hasattr(self, 'mkldnn_data_type') - and getattr(self, 'mkldnn_data_type') == "bfloat16" + and self.mkldnn_data_type == "bfloat16" ) or ( hasattr(self, 'attrs') @@ -469,7 +469,7 @@ class OpTest(unittest.TestCase): ) or ( hasattr(self, 'mkldnn_data_type') - and getattr(self, 'mkldnn_data_type') == "float16" + and self.mkldnn_data_type == "float16" ) or ( hasattr(self, 'attrs') @@ -1713,7 +1713,7 @@ class OpTest(unittest.TestCase): prim_checker = PrimForwardChecker(self, place) prim_checker.check() # Support operators which are not in the NO_FP64_CHECK_GRAD_OP_LIST list can be test prim with fp32 - setattr(self.__class__, 'check_prim', True) + self.__class__.check_prim = True self.__class__.op_type = self.op_type # set some flags by the combination of arguments. self.infer_dtype_from_inputs_outputs(self.inputs, self.outputs) @@ -1728,8 +1728,9 @@ class OpTest(unittest.TestCase): if self.is_mkldnn_op(): check_dygraph = False - if hasattr(self, 'force_fp32_output') and getattr( - self, 'force_fp32_output' + if ( + hasattr(self, 'force_fp32_output') + and self.force_fp32_output ): atol = 1e-2 if atol < 1e-2 else atol else: @@ -2078,7 +2079,7 @@ class OpTest(unittest.TestCase): ) prim_grad_checker.check() # Support operators which are not in the NO_FP64_CHECK_GRAD_OP_LIST list can be test prim with fp32 - setattr(self.__class__, 'check_prim', True) + self.__class__.check_prim = True self._check_grad_helper() if only_check_prim: return diff --git a/python/paddle/fluid/tests/unittests/op_test.py b/python/paddle/fluid/tests/unittests/op_test.py index 74e35abe5d..cdb521fe25 100644 --- a/python/paddle/fluid/tests/unittests/op_test.py +++ b/python/paddle/fluid/tests/unittests/op_test.py @@ -451,7 +451,7 @@ class OpTest(unittest.TestCase): ) or ( hasattr(self, 'mkldnn_data_type') - and getattr(self, 'mkldnn_data_type') == "bfloat16" + and self.mkldnn_data_type == "bfloat16" ) or ( hasattr(self, 'attrs') @@ -471,7 +471,7 @@ class OpTest(unittest.TestCase): ) or ( hasattr(self, 'mkldnn_data_type') - and getattr(self, 'mkldnn_data_type') == "float16" + and self.mkldnn_data_type == "float16" ) or ( hasattr(self, 'attrs') @@ -1502,7 +1502,7 @@ class OpTest(unittest.TestCase): prim_checker = PrimForwardChecker(self, place) prim_checker.check() # Support operators which not in the NO_FP64_CHECK_GRAD_OP_LIST list can be test prim with fp32 - setattr(self.__class__, 'check_prim', True) + self.__class__.check_prim = True self.__class__.op_type = self.op_type # disable legacy dygraph check when check_eager is True if check_eager: @@ -1907,8 +1907,9 @@ class OpTest(unittest.TestCase): if self.is_mkldnn_op(): check_dygraph = False check_eager = False - if hasattr(self, 'force_fp32_output') and getattr( - self, 'force_fp32_output' + if ( + hasattr(self, 'force_fp32_output') + and self.force_fp32_output ): atol = 1e-2 if atol < 1e-2 else atol else: @@ -2288,7 +2289,7 @@ class OpTest(unittest.TestCase): ) prim_grad_checker.check() # Support operators which not in the NO_FP64_CHECK_GRAD_OP_LIST list can be test prim with fp32 - setattr(self.__class__, 'check_prim', True) + self.__class__.check_prim = True self._check_grad_helper() if only_check_prim: return diff --git a/python/paddle/hapi/model_summary.py b/python/paddle/hapi/model_summary.py index d6234eacd1..c36b9552be 100644 --- a/python/paddle/hapi/model_summary.py +++ b/python/paddle/hapi/model_summary.py @@ -312,8 +312,8 @@ def summary_string(model, input_size=None, dtypes=None, input=None): params += np.prod(v.shape) try: - if (getattr(getattr(layer, k), 'trainable')) and ( - not getattr(getattr(layer, k), 'stop_gradient') + if (getattr(layer, k).trainable) and ( + not getattr(layer, k).stop_gradient ): summary[m_key]["trainable_params"] += np.prod(v.shape) summary[m_key]["trainable"] = True diff --git a/python/paddle/incubate/distributed/utils/io/save_for_auto.py b/python/paddle/incubate/distributed/utils/io/save_for_auto.py index 3008201d2f..701c22e50f 100644 --- a/python/paddle/incubate/distributed/utils/io/save_for_auto.py +++ b/python/paddle/incubate/distributed/utils/io/save_for_auto.py @@ -219,7 +219,7 @@ def _get_dims_mapping(dist_parameter, mp_group): dist_shape = np.array(dist_parameter.shape) if hasattr(dist_parameter, "split_axis"): - aixs = getattr(dist_parameter, "split_axis") + aixs = dist_parameter.split_axis mapping = [-1 for _ in dist_shape] mapping[aixs] = 1 logger.debug( @@ -351,7 +351,7 @@ def _get_wrapped_dist_state_dict(dist_state_dict): logger.debug(f"not first used : {v.name}") continue wrapped_state_dict[name_mapping[v.name]] = v - setattr(v, "dims_mapping", _get_dims_mapping(v, mp_group)) + v.dims_mapping = _get_dims_mapping(v, mp_group) logger.debug( f"saving param: {v.name} -> {name_mapping[v.name]} shape: {v.shape}" ) diff --git a/python/paddle/jit/dy2static/convert_call_func.py b/python/paddle/jit/dy2static/convert_call_func.py index acbbe0e5d7..b5ce104d4d 100644 --- a/python/paddle/jit/dy2static/convert_call_func.py +++ b/python/paddle/jit/dy2static/convert_call_func.py @@ -312,7 +312,7 @@ def convert_call(func): # Bound mothod will be convert into plain function after `convert_to_static`. # So descriptor mechanism is used to bound `self` instance on function to # keep it as bound method. - setattr(func, 'forward', forward_func.__get__(func)) + func.forward = forward_func.__get__(func) except (IOError, OSError, TypeError): # NOTE: func.forward may have been decorated. func_self = None if func_self else func_self diff --git a/python/paddle/jit/dy2static/program_translator.py b/python/paddle/jit/dy2static/program_translator.py index 5b3eae6f8e..9fbf2b6103 100644 --- a/python/paddle/jit/dy2static/program_translator.py +++ b/python/paddle/jit/dy2static/program_translator.py @@ -314,8 +314,8 @@ class StaticFunction: # save the instance `self` while decorating a method of class. if inspect.ismethod(function): - self._dygraph_function = getattr(function, '__func__') - self._class_instance = getattr(function, '__self__') + self._dygraph_function = function.__func__ + self._class_instance = function.__self__ if not hasattr(self._class_instance, '_original_funcs'): raise TypeError( @@ -885,7 +885,7 @@ class HookHelper: self.need_apply_hook = ( with_hook and isinstance(self.class_instance, layers.Layer) - and getattr(func, "__name__") == "forward" + and func.__name__ == "forward" ) def apply_pre_hooks(self, inputs): diff --git a/python/paddle/jit/dy2static/utils.py b/python/paddle/jit/dy2static/utils.py index 0752e32b3f..28c0a0cfd1 100644 --- a/python/paddle/jit/dy2static/utils.py +++ b/python/paddle/jit/dy2static/utils.py @@ -576,7 +576,7 @@ def ast_to_func(ast_root, dyfunc, delete_on_exit=True): # The 'forward' or 'another_forward' of 'TranslatedLayer' cannot be obtained # through 'func_name'. So set the special function name '__i_m_p_l__'. if hasattr(module, '__i_m_p_l__'): - callable_func = getattr(module, '__i_m_p_l__') + callable_func = module.__i_m_p_l__ callable_func.__name__ = func_name elif hasattr(module, func_name): callable_func = getattr(module, func_name) @@ -1120,11 +1120,11 @@ class FunctionNameLivenessAnalysis(gast.NodeVisitor): def _reset_name_scope(self, node): # always reset the node as empty namescope. - setattr(node, "pd_scope", NameScope()) + node.pd_scope = NameScope() def _get_name_scope(self, node): if not hasattr(node, "pd_scope"): - setattr(node, "pd_scope", NameScope()) + node.pd_scope = NameScope() return node.pd_scope def _current_name_scope(self): @@ -1224,11 +1224,7 @@ class FunctionNameLivenessAnalysis(gast.NodeVisitor): ) def pre_func(): - setattr( - node, - "before_created", - self._nearest_function_scope().existed_vars(), - ) + node.before_created = self._nearest_function_scope().existed_vars() self._visit_scope_node(node, pre_func, post_func) diff --git a/python/paddle/nn/functional/vision.py b/python/paddle/nn/functional/vision.py index 1acf1bcbc5..b7cebf4a58 100644 --- a/python/paddle/nn/functional/vision.py +++ b/python/paddle/nn/functional/vision.py @@ -320,7 +320,7 @@ def grid_sample( 'use_cudnn', use_cudnn, ) - out = getattr(_legacy_C_ops, 'grid_sampler')(x, grid, *attrs) + out = _legacy_C_ops.grid_sampler(x, grid, *attrs) else: helper = LayerHelper("grid_sample", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'grid_sample') diff --git a/python/paddle/nn/quant/qat/conv.py b/python/paddle/nn/quant/qat/conv.py index 4c8e6915c1..f2ffc7b103 100644 --- a/python/paddle/nn/quant/qat/conv.py +++ b/python/paddle/nn/quant/qat/conv.py @@ -30,18 +30,18 @@ class QuantedConv2D(ConvertibleQuantedLayer): super(QuantedConv2D, self).__init__() # For Conv2D - self._groups = getattr(layer, '_groups') - self._stride = getattr(layer, '_stride') - self._padding = getattr(layer, '_padding') - self._padding_mode = getattr(layer, '_padding_mode') + self._groups = layer._groups + self._stride = layer._stride + self._padding = layer._padding + self._padding_mode = layer._padding_mode if self._padding_mode != 'zeros': - self._reversed_padding_repeated_twice = getattr( - layer, '_reversed_padding_repeated_twice' + self._reversed_padding_repeated_twice = ( + layer._reversed_padding_repeated_twice ) - self._dilation = getattr(layer, '_dilation') - self._data_format = getattr(layer, '_data_format') - self.weight = getattr(layer, 'weight') - self.bias = getattr(layer, 'bias') + self._dilation = layer._dilation + self._data_format = layer._data_format + self.weight = layer.weight + self.bias = layer.bias self.weight_quanter = None self.activation_quanter = None diff --git a/python/paddle/nn/quant/qat/linear.py b/python/paddle/nn/quant/qat/linear.py index b089486531..c0e015ce51 100644 --- a/python/paddle/nn/quant/qat/linear.py +++ b/python/paddle/nn/quant/qat/linear.py @@ -28,9 +28,9 @@ class QuantedLinear(ConvertibleQuantedLayer): def __init__(self, layer: Layer, q_config): super(QuantedLinear, self).__init__() # For Linear - self.weight = getattr(layer, 'weight') - self.bias = getattr(layer, 'bias') - self.name = getattr(layer, 'name') + self.weight = layer.weight + self.bias = layer.bias + self.name = layer.name # For FakeQuant self.weight_quanter = None diff --git a/python/paddle/nn/quant/quant_layers.py b/python/paddle/nn/quant/quant_layers.py index 257009a8ff..9e7b4c55ba 100644 --- a/python/paddle/nn/quant/quant_layers.py +++ b/python/paddle/nn/quant/quant_layers.py @@ -533,18 +533,18 @@ class QuantizedConv2D(Layer): ): super().__init__() # For Conv2D - self._groups = getattr(layer, '_groups') - self._stride = getattr(layer, '_stride') - self._padding = getattr(layer, '_padding') - self._padding_mode = getattr(layer, '_padding_mode') + self._groups = layer._groups + self._stride = layer._stride + self._padding = layer._padding + self._padding_mode = layer._padding_mode if self._padding_mode != 'zeros': - self._reversed_padding_repeated_twice = getattr( - layer, '_reversed_padding_repeated_twice' + self._reversed_padding_repeated_twice = ( + layer._reversed_padding_repeated_twice ) - self._dilation = getattr(layer, '_dilation') - self._data_format = getattr(layer, '_data_format') - self.weight = getattr(layer, 'weight') - self.bias = getattr(layer, 'bias') + self._dilation = layer._dilation + self._data_format = layer._data_format + self.weight = layer.weight + self.bias = layer.bias # For FakeQuant self._conv2d_quant_axis = 0 @@ -654,14 +654,14 @@ class QuantizedConv2DTranspose(Layer): """ super().__init__() # For Conv2DTranspose - self._groups = getattr(layer, '_groups') - self._stride = getattr(layer, '_stride') - self._padding = getattr(layer, '_padding') - self._output_padding = getattr(layer, 'output_padding') - self._dilation = getattr(layer, '_dilation') - self._data_format = getattr(layer, '_data_format') - self.weight = getattr(layer, 'weight') - self.bias = getattr(layer, 'bias') + self._groups = layer._groups + self._stride = layer._stride + self._padding = layer._padding + self._output_padding = layer.output_padding + self._dilation = layer._dilation + self._data_format = layer._data_format + self.weight = layer.weight + self.bias = layer.bias # For FakeQuant self._conv2d_transpose_quant_axis = 1 if weight_quant_layer is not None: @@ -748,9 +748,9 @@ class QuantizedLinear(Layer): ): super().__init__() # For Linear - self.weight = getattr(layer, 'weight') - self.bias = getattr(layer, 'bias') - self.name = getattr(layer, 'name') + self.weight = layer.weight + self.bias = layer.bias + self.name = layer.name # For FakeQuant self._linear_quant_axis = 1 @@ -829,15 +829,15 @@ class QuantizedColumnParallelLinear(Layer): act_quant_layer is None ), "When quantizing ColumnParallelLinear, act_quant_layer should be None." - self.weight = getattr(layer, 'weight') - self.bias = getattr(layer, 'bias') - self.name = getattr(layer, '_name') + self.weight = layer.weight + self.bias = layer.bias + self.name = layer._name # For FakeQuant self._linear_quant_axis = 1 - self.is_mp = getattr(layer, 'is_mp') - self.model_parallel_group = getattr(layer, 'model_parallel_group') - self.gather_output = getattr(layer, 'gather_output') + self.is_mp = layer.is_mp + self.model_parallel_group = layer.model_parallel_group + self.gather_output = layer.gather_output self._fake_quant_weight = _get_fake_quant_type( weight_quantize_type, @@ -923,15 +923,15 @@ class QuantizedRowParallelLinear(Layer): ), "When quantizing RowParallelLinear, act_quant_layer cannot defined by yourself." # For Linear - self.weight = getattr(layer, 'weight') - self.bias = getattr(layer, 'bias') - self.name = getattr(layer, '_name') + self.weight = layer.weight + self.bias = layer.bias + self.name = layer._name # For FakeQuant self._linear_quant_axis = 1 - self.input_is_parallel = getattr(layer, 'input_is_parallel') - self.is_mp = getattr(layer, 'is_mp') - self.model_parallel_group = getattr(layer, 'model_parallel_group') + self.input_is_parallel = layer.input_is_parallel + self.is_mp = layer.is_mp + self.model_parallel_group = layer.model_parallel_group self._fake_quant_weight = _get_fake_quant_type( weight_quantize_type, -- GitLab