diff --git a/fluid/DeepASR/data_utils/util.py b/fluid/DeepASR/data_utils/util.py index 0a48f4696547377dbe89934355e8eaac38966fab..27f5ba8305c0e72852230658858d77fed9d233a4 100644 --- a/fluid/DeepASR/data_utils/util.py +++ b/fluid/DeepASR/data_utils/util.py @@ -28,7 +28,7 @@ def to_lodtensor(data, place): def lodtensor_to_ndarray(lod_tensor): """conver lodtensor to ndarray """ - dims = lod_tensor.get_dims() + dims = lod_tensor._get_dims() ret = np.zeros(shape=dims).astype('float32') for i in xrange(np.product(dims)): ret.ravel()[i] = lod_tensor.get_float_element(i) diff --git a/fluid/DeepASR/model_utils/model.py b/fluid/DeepASR/model_utils/model.py index 3f4fdccfc93bb14496ce54ae59e49651eca2a537..8ae7e66fc781226c0316fceee65cfb805a1770ba 100644 --- a/fluid/DeepASR/model_utils/model.py +++ b/fluid/DeepASR/model_utils/model.py @@ -88,7 +88,7 @@ def stacked_lstmp_model(frame_dim, # When the execution place is specified to CUDAPlace, the program will # run on all $CUDA_VISIBLE_DEVICES GPUs. Otherwise the program will # run on all CPU devices. - places = fluid.layers.get_places() + places = fluid.layers.device.get_places() pd = fluid.layers.ParallelDo(places) with pd.do(): feat_ = pd.read_input(feature) diff --git a/fluid/language_model/train_on_cloud.py b/fluid/language_model/train_on_cloud.py index 50b7adc05d8e19ca4be7e5f585f07ec67a1cc5fb..17318400a951944c4bc6a4290b1cbfd3bc5680fa 100644 --- a/fluid/language_model/train_on_cloud.py +++ b/fluid/language_model/train_on_cloud.py @@ -187,7 +187,7 @@ def do_train(train_reader, init_low_bound, init_high_bound) avg_cost = fluid.layers.mean(x=cost) else: - places = fluid.layers.get_places() + places = fluid.layers.device.get_places() pd = fluid.layers.ParallelDo(places) with pd.do(): cost = network( diff --git a/fluid/sequence_tagging_for_ner/network_conf.py b/fluid/sequence_tagging_for_ner/network_conf.py index 5eaa704f67641bd9bb98bbac162a0adb7a72c246..17ee1951bd5955ca7a8534b50380e3f694c047f4 100644 --- a/fluid/sequence_tagging_for_ner/network_conf.py +++ b/fluid/sequence_tagging_for_ner/network_conf.py @@ -109,7 +109,7 @@ def ner_net(word_dict_len, label_dict_len, parallel, stack_num=2): name="target", shape=[1], dtype='int64', lod_level=1) if parallel: - places = fluid.layers.get_places() + places = fluid.layers.device.get_places() pd = fluid.layers.ParallelDo(places) with pd.do(): word_ = pd.read_input(word) diff --git a/fluid/text_classification/train.py b/fluid/text_classification/train.py index 9078f4788319dbf76677c86eef53445fa1e85c1a..0ef923544f18d62f5a049ac79353ce4884b8d195 100644 --- a/fluid/text_classification/train.py +++ b/fluid/text_classification/train.py @@ -34,7 +34,7 @@ def train(train_reader, if not parallel: cost, acc, prediction = network(data, label, len(word_dict)) else: - places = fluid.layers.get_places(device_count=2) + places = fluid.layers.device.get_places(device_count=2) pd = fluid.layers.ParallelDo(places) with pd.do(): cost, acc, prediction = network(