diff --git a/ppocr/optimizer/optimizer.py b/ppocr/optimizer/optimizer.py index 144f011c79ec2303b7fbc73ac078afe3ce92c255..ffe72d7db309ab832a258dcc73916f9fa4485c2b 100644 --- a/ppocr/optimizer/optimizer.py +++ b/ppocr/optimizer/optimizer.py @@ -84,8 +84,7 @@ class Adam(object): if self.group_lr: if self.training_step == 'LF_2': import paddle - if isinstance(model, paddle.fluid.dygraph.parallel. - DataParallel): # multi gpu + if isinstance(model, paddle.DataParallel): # multi gpu mlm = model._layers.head.MLM_VRM.MLM.parameters() pre_mlm_pp = model._layers.head.MLM_VRM.Prediction.pp_share.parameters( ) diff --git a/ppstructure/kie/requirements.txt b/ppstructure/kie/requirements.txt index 61c230d3ed5bedc093c40af8228d3ea685382f54..c8b9018de03d71f23e4acf2a30862ef06c164252 100644 --- a/ppstructure/kie/requirements.txt +++ b/ppstructure/kie/requirements.txt @@ -4,4 +4,4 @@ seqeval pypandoc attrdict3 python_docx -paddlenlp>=2.4.1 +paddlenlp>=2.5.2 \ No newline at end of file diff --git a/test_tipc/supplementary/data_loader.py b/test_tipc/supplementary/data_loader.py index 049e7b2d36306d4bb7264d1c45a072ed84bbba60..f0245dd27cc5bb5d7272d6950f27b4ae0ba899f2 100644 --- a/test_tipc/supplementary/data_loader.py +++ b/test_tipc/supplementary/data_loader.py @@ -1,7 +1,6 @@ import numpy as np from paddle.vision.datasets import Cifar100 from paddle.vision.transforms import Normalize -from paddle.fluid.dataloader.collate import default_collate_fn import signal import os from paddle.io import Dataset, DataLoader, DistributedBatchSampler diff --git a/test_tipc/supplementary/train.py b/test_tipc/supplementary/train.py index a15a99ff85e6667700f7e57800e1feeb013da869..f582123407956b335aac8a0845cae50769dae829 100644 --- a/test_tipc/supplementary/train.py +++ b/test_tipc/supplementary/train.py @@ -71,7 +71,7 @@ def amp_scaler(config): 'FLAGS_cudnn_batchnorm_spatial_persistent': 1, 'FLAGS_max_inplace_grad_add': 8, } - paddle.fluid.set_flags(AMP_RELATED_FLAGS_SETTING) + paddle.set_flags(AMP_RELATED_FLAGS_SETTING) scale_loss = config["AMP"].get("scale_loss", 1.0) use_dynamic_loss_scaling = config["AMP"].get("use_dynamic_loss_scaling", False) diff --git a/tools/eval.py b/tools/eval.py index 2084ed170ff5164651236df296aa4baaca22bf74..b4c69b6d37532103f1316eb3b7a14b472d741ed3 100755 --- a/tools/eval.py +++ b/tools/eval.py @@ -112,7 +112,7 @@ def main(): 'FLAGS_cudnn_batchnorm_spatial_persistent': 1, 'FLAGS_max_inplace_grad_add': 8, } - paddle.fluid.set_flags(AMP_RELATED_FLAGS_SETTING) + paddle.set_flags(AMP_RELATED_FLAGS_SETTING) scale_loss = config["Global"].get("scale_loss", 1.0) use_dynamic_loss_scaling = config["Global"].get( "use_dynamic_loss_scaling", False) diff --git a/tools/infer/utility.py b/tools/infer/utility.py index 53a7d5ee54897e0503e473ca8ec04453cfb70e56..fd3567730b2da5da49f7c9ff1a9a4cda6a25bdf8 100644 --- a/tools/infer/utility.py +++ b/tools/infer/utility.py @@ -313,7 +313,7 @@ def get_infer_gpuid(): if sysstr == "Windows": return 0 - if not paddle.fluid.core.is_compiled_with_rocm(): + if not paddle.device.is_compiled_with_rocm: cmd = "env | grep CUDA_VISIBLE_DEVICES" else: cmd = "env | grep HIP_VISIBLE_DEVICES"