提交 4c9c5501 编写于 作者: Z Zhen Wang

use fp32 to initilize parameter values.

上级 8d74782e
......@@ -11,31 +11,16 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <thrust/random.h>
#include <thrust/transform.h>
#include <type_traits>
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/operators/fill_constant_op.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
namespace details {
template <typename T>
struct RandomDistributionType {
using Type = T;
};
template <>
struct RandomDistributionType<platform::float16> {
using Type = float;
};
} // namespace details
template <typename T>
struct GaussianGenerator {
T mean_, std_;
......@@ -49,16 +34,12 @@ struct GaussianGenerator {
: mean_(mean), std_(std), seed_(seed), offset_(offset) {}
__host__ __device__ T operator()(const unsigned int n) const {
using DataType = typename details::RandomDistributionType<T>::Type;
thrust::minstd_rand rng;
rng.seed(seed_);
thrust::normal_distribution<DataType> dist(static_cast<DataType>(mean_),
static_cast<DataType>(std_));
thrust::normal_distribution<T> dist(mean_, std_);
unsigned int new_n = n + offset_;
rng.discard(new_n);
T out = static_cast<T>(dist(rng));
return out;
return dist(rng);
}
};
......@@ -141,13 +122,10 @@ class GPUGaussianRandomBatchSizeLikeKernel : public framework::OpKernel<T> {
} // namespace operators
} // namespace paddle
REGISTER_OP_CUDA_KERNEL(
gaussian_random, paddle::operators::GPUGaussianRandomKernel<float>,
paddle::operators::GPUGaussianRandomKernel<double>,
paddle::operators::GPUGaussianRandomKernel<paddle::platform::float16>);
REGISTER_OP_CUDA_KERNEL(gaussian_random,
paddle::operators::GPUGaussianRandomKernel<float>,
paddle::operators::GPUGaussianRandomKernel<double>);
REGISTER_OP_CUDA_KERNEL(
gaussian_random_batch_size_like,
paddle::operators::GPUGaussianRandomBatchSizeLikeKernel<float>,
paddle::operators::GPUGaussianRandomBatchSizeLikeKernel<double>,
paddle::operators::GPUGaussianRandomBatchSizeLikeKernel<
paddle::platform::float16>);
paddle::operators::GPUGaussianRandomBatchSizeLikeKernel<double>);
......@@ -18,7 +18,6 @@ limitations under the License. */
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/operators/uniform_random_op.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
......@@ -164,12 +163,9 @@ class GPUUniformRandomKernel : public framework::OpKernel<T> {
} // namespace operators
} // namespace paddle
REGISTER_OP_CUDA_KERNEL(
uniform_random, paddle::operators::GPUUniformRandomKernel<float>,
paddle::operators::GPUUniformRandomKernel<double>,
paddle::operators::GPUUniformRandomKernel<paddle::platform::float16>);
REGISTER_OP_CUDA_KERNEL(
uniform_random_batch_size_like,
REGISTER_OP_CUDA_KERNEL(uniform_random,
paddle::operators::GPUUniformRandomKernel<float>,
paddle::operators::GPUUniformRandomKernel<double>,
paddle::operators::GPUUniformRandomKernel<paddle::platform::float16>);
paddle::operators::GPUUniformRandomKernel<double>);
REGISTER_OP_CUDA_KERNEL(uniform_random_batch_size_like,
paddle::operators::GPUUniformRandomKernel<float>,
paddle::operators::GPUUniformRandomKernel<double>);
......@@ -131,10 +131,6 @@ struct PADDLE_ALIGN(2) float16 {
#endif
}
HOSTDEVICE inline float16(int32_t val) : float16(static_cast<float>(val)) {}
HOSTDEVICE inline float16(uint32_t val) : float16(static_cast<float>(val)) {}
HOSTDEVICE inline explicit float16(bool b) : x(b ? 0x3c00 : 0) {}
template <class T>
......
......@@ -267,35 +267,18 @@ def cast_net_to_fp16(program):
op._set_attr('dtype', core.VarDesc.VarType.FP16)
def cast_parameters_to_fp16(program):
def cast_parameters_to_fp16(exe, program, scope=None):
exe_scope = scope if scope is not None else global_scope()
global_block = program.global_block()
all_parameters = global_block.all_parameters()
is_bn_params = lambda param: (param.name.find('bn') != -1 and (param.name.endswith('_offset') or param.name.endswith('_mean') or param.name.endswith('_scale') or param.name.endswith('_variance')))
all_param_names = {p.name for p in all_parameters if not is_bn_params(p)}
ops = global_block.ops
for param in all_parameters:
if param.name in all_param_names:
param_var = global_block.var(param.name)
if param_var.dtype == core.VarDesc.VarType.FP32:
param_var.desc.set_dtype(core.VarDesc.VarType.FP16)
for op in ops:
target_op = False
for out_name in op.output_names:
for out_var_name in op.output(out_name):
if out_var_name in all_param_names:
target_op = True
if target_op:
if op.has_attr('in_dtype') and op.attr(
'in_dtype') == core.VarDesc.VarType.FP32:
op._set_attr('in_dtype', core.VarDesc.VarType.FP16)
if op.has_attr('out_dtype') and op.attr(
'out_dtype') == core.VarDesc.VarType.FP32:
op._set_attr('out_dtype', core.VarDesc.VarType.FP16)
if op.has_attr('dtype') and op.attr(
'dtype') == core.VarDesc.VarType.FP32:
op._set_attr('dtype', core.VarDesc.VarType.FP16)
if not (param.name.find('bn') != -1 and
(param.name.endswith('_offset') or param.name.endswith('_mean')
or param.name.endswith('_scale') or
param.name.endswith('_variance'))):
param_t = exe_scope.find_var(param.name).get_tensor()
data = np.array(param_t)
param_t.set(np.float16(data), exe.place)
def rewrite_program(main_prog, amp_lists):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册