Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
wmsofts
Paddle
提交
2f2b1f23
P
Paddle
项目概览
wmsofts
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
前往新版Gitcode,体验更适合开发者的 AI 搜索 >>
未验证
提交
2f2b1f23
编写于
3月 17, 2023
作者:
N
Nyakku Shigure
提交者:
GitHub
3月 17, 2023
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[CodeStyle][B009][B010] use normal property access instead of getattr/setattr (#51530)
上级
d1e2c61b
变更
19
隐藏空白更改
内联
并排
Showing
19 changed file
with
99 addition
and
100 deletion
+99
-100
pyproject.toml
pyproject.toml
+4
-0
python/paddle/dataset/common.py
python/paddle/dataset/common.py
+1
-4
python/paddle/distributed/fleet/fleet.py
python/paddle/distributed/fleet/fleet.py
+1
-1
python/paddle/distributed/fleet/layers/mpu/mp_layers.py
python/paddle/distributed/fleet/layers/mpu/mp_layers.py
+4
-4
python/paddle/distributed/fleet/meta_parallel/parallel_layers/pp_layers.py
...tributed/fleet/meta_parallel/parallel_layers/pp_layers.py
+2
-2
python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_stage3.py
...uted/fleet/meta_parallel/sharding/group_sharded_stage3.py
+12
-12
python/paddle/fluid/tests/custom_op/test_custom_raw_op_kernel_op.py
...dle/fluid/tests/custom_op/test_custom_raw_op_kernel_op.py
+1
-1
python/paddle/fluid/tests/unittests/dygraph_to_static/test_convert_operators.py
...sts/unittests/dygraph_to_static/test_convert_operators.py
+1
-1
python/paddle/fluid/tests/unittests/eager_op_test.py
python/paddle/fluid/tests/unittests/eager_op_test.py
+7
-6
python/paddle/fluid/tests/unittests/op_test.py
python/paddle/fluid/tests/unittests/op_test.py
+7
-6
python/paddle/hapi/model_summary.py
python/paddle/hapi/model_summary.py
+2
-2
python/paddle/incubate/distributed/utils/io/save_for_auto.py
python/paddle/incubate/distributed/utils/io/save_for_auto.py
+2
-2
python/paddle/jit/dy2static/convert_call_func.py
python/paddle/jit/dy2static/convert_call_func.py
+1
-1
python/paddle/jit/dy2static/program_translator.py
python/paddle/jit/dy2static/program_translator.py
+3
-3
python/paddle/jit/dy2static/utils.py
python/paddle/jit/dy2static/utils.py
+4
-8
python/paddle/nn/functional/vision.py
python/paddle/nn/functional/vision.py
+1
-1
python/paddle/nn/quant/qat/conv.py
python/paddle/nn/quant/qat/conv.py
+10
-10
python/paddle/nn/quant/qat/linear.py
python/paddle/nn/quant/qat/linear.py
+3
-3
python/paddle/nn/quant/quant_layers.py
python/paddle/nn/quant/quant_layers.py
+33
-33
未找到文件。
pyproject.toml
浏览文件 @
2f2b1f23
...
...
@@ -54,6 +54,10 @@ select = [
# NumPy-specific rules
"NPY001"
,
# Bugbear
"B009"
,
"B010"
,
]
unfixable
=
[
"NPY001"
...
...
python/paddle/dataset/common.py
浏览文件 @
2f2b1f23
...
...
@@ -138,10 +138,7 @@ def fetch_all():
if
"fetch"
in
dir
(
importlib
.
import_module
(
"paddle.dataset.%s"
%
module_name
)
):
getattr
(
importlib
.
import_module
(
"paddle.dataset.%s"
%
module_name
),
"fetch"
,
)()
importlib
.
import_module
(
'paddle.dataset.%s'
%
module_name
).
fetch
()
def
split
(
reader
,
line_count
,
suffix
=
"%05d.pickle"
,
dumper
=
pickle
.
dump
):
...
...
python/paddle/distributed/fleet/fleet.py
浏览文件 @
2f2b1f23
...
...
@@ -1282,7 +1282,7 @@ class Fleet:
self
.
origin_main_program
=
loss
.
block
.
program
# add distributed attr
if
not
hasattr
(
self
.
origin_main_program
,
"distributed_info_"
):
se
tattr
(
self
.
origin_main_program
,
"distributed_info_"
,
dict
()
)
se
lf
.
origin_main_program
.
distributed_info_
=
dict
(
)
self
.
origin_main_program
.
distributed_info_
[
"dp_degree"
]
=
self
.
_user_defined_strategy
.
sharding_configs
[
"dp_degree"
]
...
...
python/paddle/distributed/fleet/layers/mpu/mp_layers.py
浏览文件 @
2f2b1f23
...
...
@@ -143,7 +143,7 @@ class VocabParallelEmbedding(Layer):
self
.
weight
.
is_distributed
=
True
if
self
.
is_mp
else
False
if
self
.
weight
.
is_distributed
:
se
tattr
(
self
.
weight
,
"split_axis"
,
0
)
se
lf
.
weight
.
split_axis
=
0
def
forward
(
self
,
x
):
if
self
.
is_mp
:
...
...
@@ -277,7 +277,7 @@ class ColumnParallelLinear(Layer):
self
.
weight
.
is_distributed
=
True
if
self
.
is_mp
else
False
if
self
.
weight
.
is_distributed
:
se
tattr
(
self
.
weight
,
"split_axis"
,
1
)
se
lf
.
weight
.
split_axis
=
1
if
has_bias
:
# initialize bias to zero like Megatron
...
...
@@ -289,7 +289,7 @@ class ColumnParallelLinear(Layer):
)
self
.
bias
.
is_distributed
=
True
if
self
.
is_mp
else
False
if
self
.
bias
.
is_distributed
:
se
tattr
(
self
.
bias
,
"split_axis"
,
0
)
se
lf
.
bias
.
split_axis
=
0
else
:
self
.
bias
=
None
...
...
@@ -443,7 +443,7 @@ class RowParallelLinear(Layer):
self
.
weight
.
is_distributed
=
True
if
self
.
is_mp
else
False
if
self
.
weight
.
is_distributed
:
se
tattr
(
self
.
weight
,
"split_axis"
,
0
)
se
lf
.
weight
.
split_axis
=
0
if
has_bias
:
self
.
bias
=
self
.
create_parameter
(
...
...
python/paddle/distributed/fleet/meta_parallel/parallel_layers/pp_layers.py
浏览文件 @
2f2b1f23
...
...
@@ -493,7 +493,7 @@ class PipelineLayer(nn.Layer):
for
param
in
comm
[
'layer'
].
parameters
():
if
self
.
global_rank
!=
min
(
comm
[
'ranks'
]):
setattr
(
param
,
'is_firstly_shared'
,
False
)
param
.
is_firstly_shared
=
False
def
allreduce_shared_weight_gradients
(
self
):
for
key
,
comm
in
self
.
shared_comm
.
items
():
...
...
@@ -641,7 +641,7 @@ class PipelineLayer(nn.Layer):
for
param
in
self
.
shared_layers
[
layer
.
layer_name
].
parameters
():
setattr
(
param
,
"is_firstly_shared"
,
True
)
param
.
is_firstly_shared
=
True
if
layer
.
forward_func
is
None
:
run_function
.
append
(
self
.
shared_layers
[
layer
.
layer_name
])
...
...
python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_stage3.py
浏览文件 @
2f2b1f23
...
...
@@ -1047,18 +1047,18 @@ def _create_params_grad(trainable_params, param2buffer_size, task_flow):
def
_PartitionParam
(
param
):
if
not
hasattr
(
param
,
"fw_storage"
):
setattr
(
param
,
"fw_storage"
,
None
)
setattr
(
param
,
"bw_storage"
,
None
)
setattr
(
param
,
"master_weight"
,
None
)
setattr
(
param
,
"status"
,
"all"
)
setattr
(
param
,
"use_count"
,
0
)
param
.
fw_storage
=
None
param
.
bw_storage
=
None
param
.
master_weight
=
None
param
.
status
=
"all"
param
.
use_count
=
0
return
param
def
_UnsliceParam
(
param
):
if
not
hasattr
(
param
,
"unslice"
):
setattr
(
param
,
"unslice"
,
True
)
setattr
(
param
,
"master_weight"
,
None
)
param
.
unslice
=
True
param
.
master_weight
=
None
return
param
...
...
@@ -1078,11 +1078,11 @@ def _VarBaseWrapper(param):
def
_OptimizerWrapper
(
optimizer
,
offload
,
group
,
update_params_slice
):
if
not
hasattr
(
optimizer
,
"_optim"
):
setattr
(
optimizer
,
"_optim"
,
optimizer
)
setattr
(
optimizer
,
"offload"
,
offload
)
setattr
(
optimizer
,
"_group"
,
group
)
setattr
(
optimizer
,
"update_scaler"
,
None
)
setattr
(
optimizer
,
"update_slice"
,
update_params_slice
)
optimizer
.
_optim
=
optimizer
optimizer
.
offload
=
offload
optimizer
.
_group
=
group
optimizer
.
update_scaler
=
None
optimizer
.
update_slice
=
update_params_slice
return
optimizer
...
...
python/paddle/fluid/tests/custom_op/test_custom_raw_op_kernel_op.py
浏览文件 @
2f2b1f23
...
...
@@ -67,7 +67,7 @@ class TestCustomRawReluOp(unittest.TestCase):
def
custom_raw_relu
(
self
,
x
):
module
=
importlib
.
import_module
(
MODULE_NAME
)
custom_raw_relu_op
=
getattr
(
module
,
"custom_raw_relu"
)
custom_raw_relu_op
=
module
.
custom_raw_relu
self
.
assertIsNotNone
(
custom_raw_relu_op
)
return
custom_raw_relu_op
(
x
)
...
...
python/paddle/fluid/tests/unittests/dygraph_to_static/test_convert_operators.py
浏览文件 @
2f2b1f23
...
...
@@ -31,7 +31,7 @@ class ForwardNotExist(paddle.nn.Layer):
net
=
ForwardNotExist
()
setattr
(
net
,
"forward"
,
"A string so that convert forward will fail"
)
net
.
forward
=
"A string so that convert forward will fail"
class
TestConvertCall
(
unittest
.
TestCase
):
...
...
python/paddle/fluid/tests/unittests/eager_op_test.py
浏览文件 @
2f2b1f23
...
...
@@ -449,7 +449,7 @@ class OpTest(unittest.TestCase):
)
or
(
hasattr
(
self
,
'mkldnn_data_type'
)
and
getattr
(
self
,
'mkldnn_data_type'
)
==
"bfloat16"
and
self
.
mkldnn_data_type
==
"bfloat16"
)
or
(
hasattr
(
self
,
'attrs'
)
...
...
@@ -469,7 +469,7 @@ class OpTest(unittest.TestCase):
)
or
(
hasattr
(
self
,
'mkldnn_data_type'
)
and
getattr
(
self
,
'mkldnn_data_type'
)
==
"float16"
and
self
.
mkldnn_data_type
==
"float16"
)
or
(
hasattr
(
self
,
'attrs'
)
...
...
@@ -1713,7 +1713,7 @@ class OpTest(unittest.TestCase):
prim_checker
=
PrimForwardChecker
(
self
,
place
)
prim_checker
.
check
()
# Support operators which are not in the NO_FP64_CHECK_GRAD_OP_LIST list can be test prim with fp32
se
tattr
(
self
.
__class__
,
'check_prim'
,
True
)
se
lf
.
__class__
.
check_prim
=
True
self
.
__class__
.
op_type
=
self
.
op_type
# set some flags by the combination of arguments.
self
.
infer_dtype_from_inputs_outputs
(
self
.
inputs
,
self
.
outputs
)
...
...
@@ -1728,8 +1728,9 @@ class OpTest(unittest.TestCase):
if
self
.
is_mkldnn_op
():
check_dygraph
=
False
if
hasattr
(
self
,
'force_fp32_output'
)
and
getattr
(
self
,
'force_fp32_output'
if
(
hasattr
(
self
,
'force_fp32_output'
)
and
self
.
force_fp32_output
):
atol
=
1e-2
if
atol
<
1e-2
else
atol
else
:
...
...
@@ -2078,7 +2079,7 @@ class OpTest(unittest.TestCase):
)
prim_grad_checker
.
check
()
# Support operators which are not in the NO_FP64_CHECK_GRAD_OP_LIST list can be test prim with fp32
se
tattr
(
self
.
__class__
,
'check_prim'
,
True
)
se
lf
.
__class__
.
check_prim
=
True
self
.
_check_grad_helper
()
if
only_check_prim
:
return
...
...
python/paddle/fluid/tests/unittests/op_test.py
浏览文件 @
2f2b1f23
...
...
@@ -451,7 +451,7 @@ class OpTest(unittest.TestCase):
)
or
(
hasattr
(
self
,
'mkldnn_data_type'
)
and
getattr
(
self
,
'mkldnn_data_type'
)
==
"bfloat16"
and
self
.
mkldnn_data_type
==
"bfloat16"
)
or
(
hasattr
(
self
,
'attrs'
)
...
...
@@ -471,7 +471,7 @@ class OpTest(unittest.TestCase):
)
or
(
hasattr
(
self
,
'mkldnn_data_type'
)
and
getattr
(
self
,
'mkldnn_data_type'
)
==
"float16"
and
self
.
mkldnn_data_type
==
"float16"
)
or
(
hasattr
(
self
,
'attrs'
)
...
...
@@ -1502,7 +1502,7 @@ class OpTest(unittest.TestCase):
prim_checker
=
PrimForwardChecker
(
self
,
place
)
prim_checker
.
check
()
# Support operators which not in the NO_FP64_CHECK_GRAD_OP_LIST list can be test prim with fp32
se
tattr
(
self
.
__class__
,
'check_prim'
,
True
)
se
lf
.
__class__
.
check_prim
=
True
self
.
__class__
.
op_type
=
self
.
op_type
# disable legacy dygraph check when check_eager is True
if
check_eager
:
...
...
@@ -1907,8 +1907,9 @@ class OpTest(unittest.TestCase):
if
self
.
is_mkldnn_op
():
check_dygraph
=
False
check_eager
=
False
if
hasattr
(
self
,
'force_fp32_output'
)
and
getattr
(
self
,
'force_fp32_output'
if
(
hasattr
(
self
,
'force_fp32_output'
)
and
self
.
force_fp32_output
):
atol
=
1e-2
if
atol
<
1e-2
else
atol
else
:
...
...
@@ -2288,7 +2289,7 @@ class OpTest(unittest.TestCase):
)
prim_grad_checker
.
check
()
# Support operators which not in the NO_FP64_CHECK_GRAD_OP_LIST list can be test prim with fp32
se
tattr
(
self
.
__class__
,
'check_prim'
,
True
)
se
lf
.
__class__
.
check_prim
=
True
self
.
_check_grad_helper
()
if
only_check_prim
:
return
...
...
python/paddle/hapi/model_summary.py
浏览文件 @
2f2b1f23
...
...
@@ -312,8 +312,8 @@ def summary_string(model, input_size=None, dtypes=None, input=None):
params
+=
np
.
prod
(
v
.
shape
)
try
:
if
(
getattr
(
getattr
(
layer
,
k
),
'trainable'
)
)
and
(
not
getattr
(
getattr
(
layer
,
k
),
'stop_gradient'
)
if
(
getattr
(
layer
,
k
).
trainable
)
and
(
not
getattr
(
layer
,
k
).
stop_gradient
):
summary
[
m_key
][
"trainable_params"
]
+=
np
.
prod
(
v
.
shape
)
summary
[
m_key
][
"trainable"
]
=
True
...
...
python/paddle/incubate/distributed/utils/io/save_for_auto.py
浏览文件 @
2f2b1f23
...
...
@@ -219,7 +219,7 @@ def _get_dims_mapping(dist_parameter, mp_group):
dist_shape
=
np
.
array
(
dist_parameter
.
shape
)
if
hasattr
(
dist_parameter
,
"split_axis"
):
aixs
=
getattr
(
dist_parameter
,
"split_axis"
)
aixs
=
dist_parameter
.
split_axis
mapping
=
[
-
1
for
_
in
dist_shape
]
mapping
[
aixs
]
=
1
logger
.
debug
(
...
...
@@ -351,7 +351,7 @@ def _get_wrapped_dist_state_dict(dist_state_dict):
logger
.
debug
(
f
"not first used :
{
v
.
name
}
"
)
continue
wrapped_state_dict
[
name_mapping
[
v
.
name
]]
=
v
setattr
(
v
,
"dims_mapping"
,
_get_dims_mapping
(
v
,
mp_group
)
)
v
.
dims_mapping
=
_get_dims_mapping
(
v
,
mp_group
)
logger
.
debug
(
f
"saving param:
{
v
.
name
}
->
{
name_mapping
[
v
.
name
]
}
shape:
{
v
.
shape
}
"
)
...
...
python/paddle/jit/dy2static/convert_call_func.py
浏览文件 @
2f2b1f23
...
...
@@ -312,7 +312,7 @@ def convert_call(func):
# Bound mothod will be convert into plain function after `convert_to_static`.
# So descriptor mechanism is used to bound `self` instance on function to
# keep it as bound method.
setattr
(
func
,
'forward'
,
forward_func
.
__get__
(
func
)
)
func
.
forward
=
forward_func
.
__get__
(
func
)
except
(
IOError
,
OSError
,
TypeError
):
# NOTE: func.forward may have been decorated.
func_self
=
None
if
func_self
else
func_self
...
...
python/paddle/jit/dy2static/program_translator.py
浏览文件 @
2f2b1f23
...
...
@@ -314,8 +314,8 @@ class StaticFunction:
# save the instance `self` while decorating a method of class.
if
inspect
.
ismethod
(
function
):
self
.
_dygraph_function
=
getattr
(
function
,
'__func__'
)
self
.
_class_instance
=
getattr
(
function
,
'__self__'
)
self
.
_dygraph_function
=
function
.
__func__
self
.
_class_instance
=
function
.
__self__
if
not
hasattr
(
self
.
_class_instance
,
'_original_funcs'
):
raise
TypeError
(
...
...
@@ -885,7 +885,7 @@ class HookHelper:
self
.
need_apply_hook
=
(
with_hook
and
isinstance
(
self
.
class_instance
,
layers
.
Layer
)
and
getattr
(
func
,
"__name__"
)
==
"forward"
and
func
.
__name__
==
"forward"
)
def
apply_pre_hooks
(
self
,
inputs
):
...
...
python/paddle/jit/dy2static/utils.py
浏览文件 @
2f2b1f23
...
...
@@ -576,7 +576,7 @@ def ast_to_func(ast_root, dyfunc, delete_on_exit=True):
# The 'forward' or 'another_forward' of 'TranslatedLayer' cannot be obtained
# through 'func_name'. So set the special function name '__i_m_p_l__'.
if
hasattr
(
module
,
'__i_m_p_l__'
):
callable_func
=
getattr
(
module
,
'__i_m_p_l__'
)
callable_func
=
module
.
__i_m_p_l__
callable_func
.
__name__
=
func_name
elif
hasattr
(
module
,
func_name
):
callable_func
=
getattr
(
module
,
func_name
)
...
...
@@ -1120,11 +1120,11 @@ class FunctionNameLivenessAnalysis(gast.NodeVisitor):
def
_reset_name_scope
(
self
,
node
):
# always reset the node as empty namescope.
setattr
(
node
,
"pd_scope"
,
NameScope
()
)
node
.
pd_scope
=
NameScope
(
)
def
_get_name_scope
(
self
,
node
):
if
not
hasattr
(
node
,
"pd_scope"
):
setattr
(
node
,
"pd_scope"
,
NameScope
()
)
node
.
pd_scope
=
NameScope
(
)
return
node
.
pd_scope
def
_current_name_scope
(
self
):
...
...
@@ -1224,11 +1224,7 @@ class FunctionNameLivenessAnalysis(gast.NodeVisitor):
)
def
pre_func
():
setattr
(
node
,
"before_created"
,
self
.
_nearest_function_scope
().
existed_vars
(),
)
node
.
before_created
=
self
.
_nearest_function_scope
().
existed_vars
()
self
.
_visit_scope_node
(
node
,
pre_func
,
post_func
)
...
...
python/paddle/nn/functional/vision.py
浏览文件 @
2f2b1f23
...
...
@@ -320,7 +320,7 @@ def grid_sample(
'use_cudnn'
,
use_cudnn
,
)
out
=
getattr
(
_legacy_C_ops
,
'grid_sampler'
)
(
x
,
grid
,
*
attrs
)
out
=
_legacy_C_ops
.
grid_sampler
(
x
,
grid
,
*
attrs
)
else
:
helper
=
LayerHelper
(
"grid_sample"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'grid_sample'
)
...
...
python/paddle/nn/quant/qat/conv.py
浏览文件 @
2f2b1f23
...
...
@@ -30,18 +30,18 @@ class QuantedConv2D(ConvertibleQuantedLayer):
super
(
QuantedConv2D
,
self
).
__init__
()
# For Conv2D
self
.
_groups
=
getattr
(
layer
,
'_groups'
)
self
.
_stride
=
getattr
(
layer
,
'_stride'
)
self
.
_padding
=
getattr
(
layer
,
'_padding'
)
self
.
_padding_mode
=
getattr
(
layer
,
'_padding_mode'
)
self
.
_groups
=
layer
.
_groups
self
.
_stride
=
layer
.
_stride
self
.
_padding
=
layer
.
_padding
self
.
_padding_mode
=
layer
.
_padding_mode
if
self
.
_padding_mode
!=
'zeros'
:
self
.
_reversed_padding_repeated_twice
=
getattr
(
layer
,
'_reversed_padding_repeated_twice'
self
.
_reversed_padding_repeated_twice
=
(
layer
.
_reversed_padding_repeated_twice
)
self
.
_dilation
=
getattr
(
layer
,
'_dilation'
)
self
.
_data_format
=
getattr
(
layer
,
'_data_format'
)
self
.
weight
=
getattr
(
layer
,
'weight'
)
self
.
bias
=
getattr
(
layer
,
'bias'
)
self
.
_dilation
=
layer
.
_dilation
self
.
_data_format
=
layer
.
_data_format
self
.
weight
=
layer
.
weight
self
.
bias
=
layer
.
bias
self
.
weight_quanter
=
None
self
.
activation_quanter
=
None
...
...
python/paddle/nn/quant/qat/linear.py
浏览文件 @
2f2b1f23
...
...
@@ -28,9 +28,9 @@ class QuantedLinear(ConvertibleQuantedLayer):
def
__init__
(
self
,
layer
:
Layer
,
q_config
):
super
(
QuantedLinear
,
self
).
__init__
()
# For Linear
self
.
weight
=
getattr
(
layer
,
'weight'
)
self
.
bias
=
getattr
(
layer
,
'bias'
)
self
.
name
=
getattr
(
layer
,
'name'
)
self
.
weight
=
layer
.
weight
self
.
bias
=
layer
.
bias
self
.
name
=
layer
.
name
# For FakeQuant
self
.
weight_quanter
=
None
...
...
python/paddle/nn/quant/quant_layers.py
浏览文件 @
2f2b1f23
...
...
@@ -533,18 +533,18 @@ class QuantizedConv2D(Layer):
):
super
().
__init__
()
# For Conv2D
self
.
_groups
=
getattr
(
layer
,
'_groups'
)
self
.
_stride
=
getattr
(
layer
,
'_stride'
)
self
.
_padding
=
getattr
(
layer
,
'_padding'
)
self
.
_padding_mode
=
getattr
(
layer
,
'_padding_mode'
)
self
.
_groups
=
layer
.
_groups
self
.
_stride
=
layer
.
_stride
self
.
_padding
=
layer
.
_padding
self
.
_padding_mode
=
layer
.
_padding_mode
if
self
.
_padding_mode
!=
'zeros'
:
self
.
_reversed_padding_repeated_twice
=
getattr
(
layer
,
'_reversed_padding_repeated_twice'
self
.
_reversed_padding_repeated_twice
=
(
layer
.
_reversed_padding_repeated_twice
)
self
.
_dilation
=
getattr
(
layer
,
'_dilation'
)
self
.
_data_format
=
getattr
(
layer
,
'_data_format'
)
self
.
weight
=
getattr
(
layer
,
'weight'
)
self
.
bias
=
getattr
(
layer
,
'bias'
)
self
.
_dilation
=
layer
.
_dilation
self
.
_data_format
=
layer
.
_data_format
self
.
weight
=
layer
.
weight
self
.
bias
=
layer
.
bias
# For FakeQuant
self
.
_conv2d_quant_axis
=
0
...
...
@@ -654,14 +654,14 @@ class QuantizedConv2DTranspose(Layer):
"""
super
().
__init__
()
# For Conv2DTranspose
self
.
_groups
=
getattr
(
layer
,
'_groups'
)
self
.
_stride
=
getattr
(
layer
,
'_stride'
)
self
.
_padding
=
getattr
(
layer
,
'_padding'
)
self
.
_output_padding
=
getattr
(
layer
,
'output_padding'
)
self
.
_dilation
=
getattr
(
layer
,
'_dilation'
)
self
.
_data_format
=
getattr
(
layer
,
'_data_format'
)
self
.
weight
=
getattr
(
layer
,
'weight'
)
self
.
bias
=
getattr
(
layer
,
'bias'
)
self
.
_groups
=
layer
.
_groups
self
.
_stride
=
layer
.
_stride
self
.
_padding
=
layer
.
_padding
self
.
_output_padding
=
layer
.
output_padding
self
.
_dilation
=
layer
.
_dilation
self
.
_data_format
=
layer
.
_data_format
self
.
weight
=
layer
.
weight
self
.
bias
=
layer
.
bias
# For FakeQuant
self
.
_conv2d_transpose_quant_axis
=
1
if
weight_quant_layer
is
not
None
:
...
...
@@ -748,9 +748,9 @@ class QuantizedLinear(Layer):
):
super
().
__init__
()
# For Linear
self
.
weight
=
getattr
(
layer
,
'weight'
)
self
.
bias
=
getattr
(
layer
,
'bias'
)
self
.
name
=
getattr
(
layer
,
'name'
)
self
.
weight
=
layer
.
weight
self
.
bias
=
layer
.
bias
self
.
name
=
layer
.
name
# For FakeQuant
self
.
_linear_quant_axis
=
1
...
...
@@ -829,15 +829,15 @@ class QuantizedColumnParallelLinear(Layer):
act_quant_layer
is
None
),
"When quantizing ColumnParallelLinear, act_quant_layer should be None."
self
.
weight
=
getattr
(
layer
,
'weight'
)
self
.
bias
=
getattr
(
layer
,
'bias'
)
self
.
name
=
getattr
(
layer
,
'_name'
)
self
.
weight
=
layer
.
weight
self
.
bias
=
layer
.
bias
self
.
name
=
layer
.
_name
# For FakeQuant
self
.
_linear_quant_axis
=
1
self
.
is_mp
=
getattr
(
layer
,
'is_mp'
)
self
.
model_parallel_group
=
getattr
(
layer
,
'model_parallel_group'
)
self
.
gather_output
=
getattr
(
layer
,
'gather_output'
)
self
.
is_mp
=
layer
.
is_mp
self
.
model_parallel_group
=
layer
.
model_parallel_group
self
.
gather_output
=
layer
.
gather_output
self
.
_fake_quant_weight
=
_get_fake_quant_type
(
weight_quantize_type
,
...
...
@@ -923,15 +923,15 @@ class QuantizedRowParallelLinear(Layer):
),
"When quantizing RowParallelLinear, act_quant_layer cannot defined by yourself."
# For Linear
self
.
weight
=
getattr
(
layer
,
'weight'
)
self
.
bias
=
getattr
(
layer
,
'bias'
)
self
.
name
=
getattr
(
layer
,
'_name'
)
self
.
weight
=
layer
.
weight
self
.
bias
=
layer
.
bias
self
.
name
=
layer
.
_name
# For FakeQuant
self
.
_linear_quant_axis
=
1
self
.
input_is_parallel
=
getattr
(
layer
,
'input_is_parallel'
)
self
.
is_mp
=
getattr
(
layer
,
'is_mp'
)
self
.
model_parallel_group
=
getattr
(
layer
,
'model_parallel_group'
)
self
.
input_is_parallel
=
layer
.
input_is_parallel
self
.
is_mp
=
layer
.
is_mp
self
.
model_parallel_group
=
layer
.
model_parallel_group
self
.
_fake_quant_weight
=
_get_fake_quant_type
(
weight_quantize_type
,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录