diff --git a/paddle/gserver/tests/LayerGradUtil.cpp b/paddle/gserver/tests/LayerGradUtil.cpp index 7617af10ba719490d1b33dd297b070cd8c7c292c..a0b1cd471dd02fd20bb2247395bdb74651610bbf 100644 --- a/paddle/gserver/tests/LayerGradUtil.cpp +++ b/paddle/gserver/tests/LayerGradUtil.cpp @@ -778,8 +778,10 @@ void testProjectionGrad(ProjectionConfig conf, config.biasSize = biasSize == 0 ? config.layerConfig.size() : biasSize; config.layerConfig.set_bias_size(config.biasSize); config.layerConfig.set_shared_biases(sharedBias); - config.inputDefs.push_back( - {inputType, "layer_0", conf.input_size(), parameterSize}); + config.inputDefs.push_back({inputType, + "layer_0", + static_cast(conf.input_size()), + parameterSize}); *config.layerConfig.add_inputs()->mutable_proj_conf() = conf; config.testState = testState; testLayerGrad(config, "mixed", batchSize, false, useGpu); diff --git a/paddle/math/MathFunctions.cpp b/paddle/math/MathFunctions.cpp index 91817eab468de29f0bf693433657f20103f87570..6203cd3b9ab9f95853cd3c46750fd55d6dfbba4a 100644 --- a/paddle/math/MathFunctions.cpp +++ b/paddle/math/MathFunctions.cpp @@ -111,7 +111,7 @@ int getrf(const CBLAS_ORDER order, return LAPACKE_dgetrf(order, M, N, A, lda, ipiv); #endif #else - LOG(FATAL) << "Not implemented". + LOG(FATAL) << "Not implemented"; #endif return 0; } @@ -129,7 +129,7 @@ int getri(const CBLAS_ORDER order, return LAPACKE_sgetri(order, N, A, lda, ipiv); #endif #else - LOG(FATAL) << "Not implemented". + LOG(FATAL) << "Not implemented"; #endif return 0; } @@ -147,7 +147,7 @@ int getri(const CBLAS_ORDER order, return LAPACKE_dgetri(order, N, A, lda, ipiv); #endif #else - LOG(FATAL) << "Not implemented". + LOG(FATAL) << "Not implemented"; #endif return 0; } diff --git a/paddle/utils/arch/linux/Locks.cpp b/paddle/utils/arch/linux/Locks.cpp index 2a6f96e04d024ac3977bc154dbeeb69ce9ab3a5d..c189229cfccf7d907118bd87a15889c8fab00724 100644 --- a/paddle/utils/arch/linux/Locks.cpp +++ b/paddle/utils/arch/linux/Locks.cpp @@ -38,34 +38,68 @@ void Semaphore::post() { sem_post(&m->sem); } class SpinLockPrivate { public: - inline SpinLockPrivate() { pthread_spin_init(&lock_, 0); } - inline ~SpinLockPrivate() { pthread_spin_destroy(&lock_); } + inline SpinLockPrivate() { +#ifndef __ANDROID__ + pthread_spin_init(&lock_, 0); +#else + lock_ = 0; +#endif + } + inline ~SpinLockPrivate() { +#ifndef __ANDROID__ + pthread_spin_destroy(&lock_); +#endif + } +#ifndef __ANDROID__ pthread_spinlock_t lock_; - char padding_[64 - sizeof(pthread_spinlock_t)]; +#else + unsigned long lock_; +#endif + char padding_[64 - sizeof(lock_)]; }; SpinLock::SpinLock() : m(new SpinLockPrivate()) {} SpinLock::~SpinLock() { delete m; } -void SpinLock::lock() { pthread_spin_lock(&m->lock_); } +void SpinLock::lock() { +#ifndef __ANDROID__ + pthread_spin_lock(&m->lock_); +#endif +} -void SpinLock::unlock() { pthread_spin_unlock(&m->lock_); } +void SpinLock::unlock() { +#ifndef __ANDROID__ + pthread_spin_unlock(&m->lock_); +#endif +} class ThreadBarrierPrivate { public: +#ifndef __ANDROID__ pthread_barrier_t barrier_; +#else + unsigned long barrier_; +#endif }; ThreadBarrier::ThreadBarrier(int count) : m(new ThreadBarrierPrivate()) { +#ifndef __ANDROID__ pthread_barrier_init(&m->barrier_, nullptr, count); +#endif } ThreadBarrier::~ThreadBarrier() { +#ifndef __ANDROID__ pthread_barrier_destroy(&m->barrier_); +#endif delete m; } -void ThreadBarrier::wait() { pthread_barrier_wait(&m->barrier_); } +void ThreadBarrier::wait() { +#ifndef __ANDROID__ + pthread_barrier_wait(&m->barrier_); +#endif +} } // namespace paddle