diff --git a/cmake/external/mkldnn.cmake b/cmake/external/mkldnn.cmake index 260985cc8aa4ad0f231798666c048703b64c6d15..baf253df2755657b01b67c410f63b7d8422d4df3 100644 --- a/cmake/external/mkldnn.cmake +++ b/cmake/external/mkldnn.cmake @@ -54,7 +54,7 @@ ExternalProject_Add( ${EXTERNAL_PROJECT_LOG_ARGS} DEPENDS ${MKLDNN_DEPENDS} GIT_REPOSITORY "https://github.com/01org/mkl-dnn.git" - GIT_TAG "a29d8487a63afca3d5b8c5bbdbb473cf8ccc6e51" + GIT_TAG "64e03a1939e0d526aa8e9f2e3f7dc0ad8d372944" PREFIX ${MKLDNN_SOURCES_DIR} UPDATE_COMMAND "" CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} diff --git a/paddle/fluid/framework/tensor.cc b/paddle/fluid/framework/tensor.cc index 56bb9142dabe0d5546e321e675a5acba7bf4d306..d61dbb98a235ca9af089d35318b7f4c68cb125cc 100644 --- a/paddle/fluid/framework/tensor.cc +++ b/paddle/fluid/framework/tensor.cc @@ -31,7 +31,8 @@ size_t Tensor::memory_size() const { return holder_ == nullptr ? 0UL : holder_->size() - offset_; } -void* Tensor::mutable_data(platform::Place place, std::type_index type) { +void* Tensor::mutable_data(platform::Place place, std::type_index type, + size_t requested_size) { if (holder_ != nullptr) { holder_->set_type(type); } @@ -39,7 +40,7 @@ void* Tensor::mutable_data(platform::Place place, std::type_index type) { "When calling this method, the Tensor's numel must be " "equal or larger than zero. " "Please check Tensor::Resize has been called first."); - int64_t size = numel() * SizeOfType(type); + size_t size = requested_size ? requested_size : numel() * SizeOfType(type); /* some versions of boost::variant don't have operator!= */ if (holder_ == nullptr || !(holder_->place() == place) || holder_->size() < size + offset_) { @@ -68,10 +69,10 @@ void* Tensor::mutable_data(platform::Place place, std::type_index type) { offset_); } -void* Tensor::mutable_data(platform::Place place) { +void* Tensor::mutable_data(platform::Place place, size_t requested_size) { PADDLE_ENFORCE(this->holder_ != nullptr, "Cannot invoke mutable data if current hold nothing."); - return mutable_data(place, holder_->type()); + return mutable_data(place, holder_->type(), requested_size); } Tensor& Tensor::ShareDataWith(const Tensor& src) { diff --git a/paddle/fluid/framework/tensor.h b/paddle/fluid/framework/tensor.h index 0bbfd66148e9bc9080654bf1b0b34477115a0e6b..4cf95fa0ae07823289fbf337062190f05e6c6bcf 100644 --- a/paddle/fluid/framework/tensor.h +++ b/paddle/fluid/framework/tensor.h @@ -89,22 +89,24 @@ class Tensor { * @note If not exist, then allocation. */ template - T* mutable_data(platform::Place place); + T* mutable_data(platform::Place place, size_t requested_size = 0); - void* mutable_data(platform::Place place, std::type_index type); + void* mutable_data(platform::Place place, std::type_index type, + size_t requested_size = 0); - void* mutable_data(platform::Place place); + void* mutable_data(platform::Place place, size_t requested_size = 0); /** * @brief Return a pointer to mutable memory block. * - * @param[in] dims The dimensions of the memory block. - * @param[in] place The place of the memory block. + * @param[in] dims The dimensions of the memory block. + * @param[in] place The place of the memory block. + * @param[in] requested_size The size of the block in bytes. * * @note If not exist, then allocation. */ template - T* mutable_data(DDim dims, platform::Place place); + T* mutable_data(DDim dims, platform::Place place, size_t requested_size = 0); /*! Return the dimensions of the memory block. */ const DDim& dims() const; diff --git a/paddle/fluid/framework/tensor_impl.h b/paddle/fluid/framework/tensor_impl.h index b7b62eef23ec351686378c913d18fc72308fd7b2..6d3047c95d6cf30c2a5308d4f69ded367066d78c 100644 --- a/paddle/fluid/framework/tensor_impl.h +++ b/paddle/fluid/framework/tensor_impl.h @@ -46,16 +46,17 @@ inline T* Tensor::data() { } template -inline T* Tensor::mutable_data(DDim dims, platform::Place place) { +inline T* Tensor::mutable_data(DDim dims, platform::Place place, + size_t requested_size) { static_assert(std::is_pod::value, "T must be POD"); Resize(dims); - return mutable_data(place); + return mutable_data(place, requested_size); } template -inline T* Tensor::mutable_data(platform::Place place) { +inline T* Tensor::mutable_data(platform::Place place, size_t requested_size) { static_assert(std::is_pod::value, "T must be POD"); - return reinterpret_cast(mutable_data(place, typeid(T))); + return reinterpret_cast(mutable_data(place, typeid(T), requested_size)); } inline Tensor ReshapeToMatrix(const Tensor& src, int num_col_dims) { diff --git a/paddle/fluid/operators/conv_mkldnn_op.cc b/paddle/fluid/operators/conv_mkldnn_op.cc index 527a87db533ac25c3170fbb3ae6a9b9aff589b3d..c5cbadc892904dc064b49ebc461944c4671a69da 100644 --- a/paddle/fluid/operators/conv_mkldnn_op.cc +++ b/paddle/fluid/operators/conv_mkldnn_op.cc @@ -53,6 +53,18 @@ class ConvMKLDNNHandler : public platform::MKLDNNHandler { key_ += "-BWD"; } + size_t GetDstMemorySize() const { + return conv_pd_->dst_primitive_desc().get_size(); + } + + size_t GetDiffWeightsMemorySize() const { + return conv_bwd_weights_pd_->diff_weights_primitive_desc().get_size(); + } + + size_t GetDiffSourceMemorySize() const { + return conv_bwd_data_pd_->diff_src_primitive_desc().get_size(); + } + std::shared_ptr AcquireSrcMemoryFromWeightsPrimitive( const std::shared_ptr user_memory_p, std::vector& pipeline) { // NOLINT @@ -294,7 +306,6 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { const T* input_data = input->data(); const T* filter_data = filter->data(); - T* output_data = output->mutable_data(ctx.GetPlace()); std::vector src_tz = paddle::framework::vectorize2int(input->dims()); std::vector weights_tz = @@ -354,6 +365,8 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { auto user_weights_memory_p = handler.AcquireWeightsMemory( user_weights_md, to_void_cast(filter_data)); + T* output_data = + output->mutable_data(ctx.GetPlace(), handler.GetDstMemorySize()); // create reorder primitive if the input format is not the preferred one auto src_memory_p = handler.AcquireSrcMemoryFromPrimitive(user_src_memory_p, pipeline); @@ -476,13 +489,6 @@ class ConvMKLDNNGradOpKernel : public paddle::framework::OpKernel { T* input_grad_data = nullptr; T* filter_grad_data = nullptr; - if (input_grad) { - input_grad_data = input_grad->mutable_data(ctx.GetPlace()); - } - if (filter_grad) { - filter_grad_data = filter_grad->mutable_data(ctx.GetPlace()); - } - std::vector src_tz = paddle::framework::vectorize2int(input->dims()); std::vector weights_tz = paddle::framework::vectorize2int(filter->dims()); @@ -568,6 +574,9 @@ class ConvMKLDNNGradOpKernel : public paddle::framework::OpKernel { handler.AcquireDiffDstMemoryFromWeightsPrimitive( user_diff_dst_memory_p, pipeline); + const size_t size = handler.GetDiffWeightsMemorySize(); + filter_grad_data = filter_grad->mutable_data(ctx.GetPlace(), size); + auto diff_weights_memory_p = handler.AcquireDiffWeightsMemoryFromWeightsPrimitive( reinterpret_cast(filter_grad_data)); @@ -590,6 +599,9 @@ class ConvMKLDNNGradOpKernel : public paddle::framework::OpKernel { handler.AcquireDiffDstMemoryFromDataPrimitive(user_diff_dst_memory_p, pipeline); + const size_t size = handler.GetDiffSourceMemorySize(); + input_grad_data = input_grad->mutable_data(ctx.GetPlace(), size); + auto diff_src_memory_p = handler.AcquireDiffSrcMemoryFromDataPrimitive( reinterpret_cast(input_grad_data)); diff --git a/python/paddle/dataset/common.py b/python/paddle/dataset/common.py index 1d7ff582c86a40c8c2086e0de16e89d69c94da60..ece4046f5b7a7eff5be724d6f890665be7f3344e 100644 --- a/python/paddle/dataset/common.py +++ b/python/paddle/dataset/common.py @@ -19,6 +19,7 @@ import hashlib import os import errno import shutil +import six import sys import importlib import paddle.dataset @@ -94,6 +95,8 @@ def download(url, module_name, md5sum, save_name=None): dl = 0 total_length = int(total_length) for data in r.iter_content(chunk_size=4096): + if six.PY2: + data = six.b(data) dl += len(data) f.write(data) done = int(50 * dl / total_length) diff --git a/python/paddle/dataset/flowers.py b/python/paddle/dataset/flowers.py index aa73bbaf7024ec873d9e921205536f12e097ff32..0a1cdaceaf3be48a06b1c0b5b979e90f50e9000c 100644 --- a/python/paddle/dataset/flowers.py +++ b/python/paddle/dataset/flowers.py @@ -35,6 +35,7 @@ import itertools import functools from .common import download import tarfile +import six import scipy.io as scio from paddle.dataset.image import * from paddle.reader import * @@ -45,10 +46,10 @@ from six.moves import cPickle as pickle from six.moves import zip __all__ = ['train', 'test', 'valid'] -DATA_URL = 'http://www.robots.ox.ac.uk/~vgg/data/flowers/102/102flowers.tgz' -LABEL_URL = 'http://www.robots.ox.ac.uk/~vgg/data/flowers/102/imagelabels.mat' -SETID_URL = 'http://www.robots.ox.ac.uk/~vgg/data/flowers/102/setid.mat' -DATA_MD5 = '33bfc11892f1e405ca193ae9a9f2a118' +DATA_URL = 'http://paddlemodels.cdn.bcebos.com/flowers/102flowers.tgz' +LABEL_URL = 'http://paddlemodels.cdn.bcebos.com/flowers/imagelabels.mat' +SETID_URL = 'http://paddlemodels.cdn.bcebos.com/flowers/setid.mat' +DATA_MD5 = '52808999861908f626f3c1f4e79d11fa' LABEL_MD5 = 'e0620be6f572b9609742df49c70aed4d' SETID_MD5 = 'a5357ecc9cb78c4bef273ce3793fc85c' # In official 'readme', tstid is the flag of test data @@ -120,7 +121,10 @@ def reader_creator(data_file, file = file.strip() batch = None with open(file, 'rb') as f: - batch = pickle.load(f) + if six.PY2: + batch = pickle.load(f) + else: + batch = pickle.load(f, encoding='bytes') data = batch['data'] labels = batch['label'] for sample, label in zip(data, batch['label']):