未验证 提交 12fb18dd 编写于 作者: G gouzil 提交者: GitHub

add modernize-redundant-void-arg check (#55652)

上级 6c675ed9
......@@ -174,7 +174,7 @@ Checks: '
modernize-make-unique,
-modernize-pass-by-value,
-modernize-raw-string-literal,
-modernize-redundant-void-arg,
modernize-redundant-void-arg,
-modernize-replace-auto-ptr,
-modernize-replace-random-shuffle,
-modernize-shrink-to-fit,
......
......@@ -115,17 +115,17 @@ class BufferedLineFileReader {
FILEReader reader(fp);
return read_lines<FILEReader>(&reader, func, skip_lines);
}
uint64_t file_size(void) { return total_len_; }
uint64_t file_size() { return total_len_; }
void set_sample_rate(float r) { sample_rate_ = r; }
size_t get_sample_line() { return sample_line_; }
bool is_error(void) { return (error_line_ > 10); }
bool is_error() { return (error_line_ > 10); }
private:
SampleFunc get_sample_func() {
if (std::abs(sample_rate_ - 1.0f) < 1e-5f) {
return [this](void) { return true; };
return []() { return true; };
}
return [this](void) {
return [this]() {
return (uniform_distribution_(random_engine_) < sample_rate_);
};
}
......@@ -2155,7 +2155,7 @@ void SlotRecordInMemoryDataFeed::LoadIntoMemory() {
LoadIntoMemoryByCommand();
}
}
void SlotRecordInMemoryDataFeed::LoadIntoMemoryByLib(void) {
void SlotRecordInMemoryDataFeed::LoadIntoMemoryByLib() {
if (true) {
// user defined file format analysis
LoadIntoMemoryByFile();
......@@ -2164,7 +2164,7 @@ void SlotRecordInMemoryDataFeed::LoadIntoMemoryByLib(void) {
}
}
void SlotRecordInMemoryDataFeed::LoadIntoMemoryByFile(void) {
void SlotRecordInMemoryDataFeed::LoadIntoMemoryByFile() {
#if (defined _LINUX) && (defined PADDLE_WITH_HETERPS) && \
(defined PADDLE_WITH_PSLIB)
paddle::framework::CustomParser* parser =
......@@ -2237,7 +2237,7 @@ void SlotRecordInMemoryDataFeed::LoadIntoMemoryByFile(void) {
#endif
}
void SlotRecordInMemoryDataFeed::LoadIntoMemoryByLine(void) {
void SlotRecordInMemoryDataFeed::LoadIntoMemoryByLine() {
#ifdef _LINUX
paddle::framework::CustomParser* parser =
global_dlmanager_pool().Load(so_parser_name_, all_slots_info_);
......@@ -2333,7 +2333,7 @@ void SlotRecordInMemoryDataFeed::LoadIntoMemoryByLine(void) {
#endif
}
void SlotRecordInMemoryDataFeed::LoadIntoMemoryByCommand(void) {
void SlotRecordInMemoryDataFeed::LoadIntoMemoryByCommand() {
#ifdef _LINUX
std::string filename;
BufferedLineFileReader line_reader;
......@@ -3222,7 +3222,7 @@ void MiniBatchGpuPack::pack_instance(const SlotRecord* ins_vec, int num) {
transfer_to_gpu();
}
void MiniBatchGpuPack::transfer_to_gpu(void) {
void MiniBatchGpuPack::transfer_to_gpu() {
copy_host2device(&value_.d_uint64_lens, buf_.h_uint64_lens);
copy_host2device(&value_.d_uint64_keys, buf_.h_uint64_keys);
copy_host2device(&value_.d_uint64_offset, buf_.h_uint64_offset);
......
......@@ -427,7 +427,7 @@ bool IsGpuMallocRecorded(int dev_id) {
return RecordedGpuMallocHelper::Instance(dev_id)->NeedRecord();
}
void EmptyCache(void) {
void EmptyCache() {
std::vector<int> devices = GetSelectedDevices();
for (auto device : devices) {
memory::Release(CUDAPlace(device));
......
......@@ -1835,87 +1835,87 @@ static PyObject* tensor__eq__method(TensorObject* self,
PyMethodDef math_op_patch_methods[] = {
{"__add__",
(PyCFunction)(void (*)(void))tensor__add__method,
(PyCFunction)(void (*)())tensor__add__method,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"__radd__",
(PyCFunction)(void (*)(void))tensor__add__method,
(PyCFunction)(void (*)())tensor__add__method,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"__sub__",
(PyCFunction)(void (*)(void))tensor__sub__method,
(PyCFunction)(void (*)())tensor__sub__method,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"__rsub__",
(PyCFunction)(void (*)(void))tensor__rsub__method,
(PyCFunction)(void (*)())tensor__rsub__method,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"__mul__",
(PyCFunction)(void (*)(void))tensor__mul__method,
(PyCFunction)(void (*)())tensor__mul__method,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"__rmul__",
(PyCFunction)(void (*)(void))tensor__mul__method,
(PyCFunction)(void (*)())tensor__mul__method,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"__div__",
(PyCFunction)(void (*)(void))tensor__div__method,
(PyCFunction)(void (*)())tensor__div__method,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"__truediv__",
(PyCFunction)(void (*)(void))tensor__div__method,
(PyCFunction)(void (*)())tensor__div__method,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"__rdiv__",
(PyCFunction)(void (*)(void))tensor__rdiv__method,
(PyCFunction)(void (*)())tensor__rdiv__method,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"__rtruediv__",
(PyCFunction)(void (*)(void))tensor__rdiv__method,
(PyCFunction)(void (*)())tensor__rdiv__method,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"__floordiv__",
(PyCFunction)(void (*)(void))tensor__floordiv__method,
(PyCFunction)(void (*)())tensor__floordiv__method,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"__pow__",
(PyCFunction)(void (*)(void))tensor__pow__method,
(PyCFunction)(void (*)())tensor__pow__method,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"__rpow__",
(PyCFunction)(void (*)(void))tensor__rpow__method,
(PyCFunction)(void (*)())tensor__rpow__method,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"__mod__",
(PyCFunction)(void (*)(void))tensor__mod__method,
(PyCFunction)(void (*)())tensor__mod__method,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"__matmul__",
(PyCFunction)(void (*)(void))tensor__matmul__method,
(PyCFunction)(void (*)())tensor__matmul__method,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"__gt__",
(PyCFunction)(void (*)(void))tensor__gt__method,
(PyCFunction)(void (*)())tensor__gt__method,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"__ge__",
(PyCFunction)(void (*)(void))tensor__ge__method,
(PyCFunction)(void (*)())tensor__ge__method,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"__lt__",
(PyCFunction)(void (*)(void))tensor__lt__method,
(PyCFunction)(void (*)())tensor__lt__method,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"__le__",
(PyCFunction)(void (*)(void))tensor__le__method,
(PyCFunction)(void (*)())tensor__le__method,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"__eq__",
(PyCFunction)(void (*)(void))tensor__eq__method,
(PyCFunction)(void (*)())tensor__eq__method,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"__ne__",
(PyCFunction)(void (*)(void))tensor__ne__method,
(PyCFunction)(void (*)())tensor__ne__method,
METH_VARARGS | METH_KEYWORDS,
NULL},
{NULL, NULL, 0, NULL}};
......
......@@ -2034,11 +2034,11 @@ static PyObject* tensor_method__is_string_tensor_hold_allocation(
PyMethodDef variable_methods[] = {
{"numpy",
(PyCFunction)(void (*)(void))tensor_method_numpy,
(PyCFunction)(void (*)())tensor_method_numpy,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"_is_initialized",
(PyCFunction)(void (*)(void))tensor_method__is_initialized,
(PyCFunction)(void (*)())tensor_method__is_initialized,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"_is_dense_tensor_hold_allocation",
......@@ -2047,227 +2047,227 @@ PyMethodDef variable_methods[] = {
METH_VARARGS | METH_KEYWORDS,
NULL},
{"_copy_to",
(PyCFunction)(void (*)(void))tensor_method__copy_to,
(PyCFunction)(void (*)())tensor_method__copy_to,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"copy_",
(PyCFunction)(void (*)(void))tensor_method_copy_,
(PyCFunction)(void (*)())tensor_method_copy_,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"clone",
(PyCFunction)(void (*)(void))tensor_method_clone,
(PyCFunction)(void (*)())tensor_method_clone,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"reconstruct_from_",
(PyCFunction)(void (*)(void))tensor_method_reconstruct_from_,
(PyCFunction)(void (*)())tensor_method_reconstruct_from_,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"retain_grads",
(PyCFunction)(void (*)(void))tensor_retain_grads,
(PyCFunction)(void (*)())tensor_retain_grads,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"clear_gradient",
(PyCFunction)(void (*)(void))tensor_clear_gradient,
(PyCFunction)(void (*)())tensor_clear_gradient,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"is_dense",
(PyCFunction)(void (*)(void))tensor_method_is_dense,
(PyCFunction)(void (*)())tensor_method_is_dense,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"is_dist",
(PyCFunction)(void (*)(void))tensor_method_is_dist,
(PyCFunction)(void (*)())tensor_method_is_dist,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"_zero_grads",
(PyCFunction)(void (*)(void))tensor__zero_grads,
(PyCFunction)(void (*)())tensor__zero_grads,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"_share_buffer_to",
(PyCFunction)(void (*)(void))tensor__share_buffer_to,
(PyCFunction)(void (*)())tensor__share_buffer_to,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"_is_shared_buffer_with",
(PyCFunction)(void (*)(void))tensor__is_shared_buffer_with,
(PyCFunction)(void (*)())tensor__is_shared_buffer_with,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"_share_underline_tensor_to",
(PyCFunction)(void (*)(void))tensor__share_underline_tensor_to,
(PyCFunction)(void (*)())tensor__share_underline_tensor_to,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"_is_shared_underline_tensor_with",
(PyCFunction)(void (*)(void))tensor__is_shared_underline_tensor_with,
(PyCFunction)(void (*)())tensor__is_shared_underline_tensor_with,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"detach",
(PyCFunction)(void (*)(void))tensor_method_detach,
(PyCFunction)(void (*)())tensor_method_detach,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"get_tensor",
(PyCFunction)(void (*)(void))tensor_method_get_underline_tensor,
(PyCFunction)(void (*)())tensor_method_get_underline_tensor,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"get_selected_rows",
(PyCFunction)(void (*)(void))tensor_method_get_underline_selected_rows,
(PyCFunction)(void (*)())tensor_method_get_underline_selected_rows,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"_get_tensor_from_selected_rows",
(PyCFunction)(void (*)(void))tensor_method__get_tensor_from_selected_rows,
(PyCFunction)(void (*)())tensor_method__get_tensor_from_selected_rows,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"_getitem_index_not_tensor",
(PyCFunction)(void (*)(void))tensor__getitem_index_not_tensor,
(PyCFunction)(void (*)())tensor__getitem_index_not_tensor,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"_getitem_from_offset",
(PyCFunction)(void (*)(void))tensor__getitem_from_offset,
(PyCFunction)(void (*)())tensor__getitem_from_offset,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"__setitem_eager_tensor__",
(PyCFunction)(void (*)(void))tensor_method__setitem_eager_tensor,
(PyCFunction)(void (*)())tensor_method__setitem_eager_tensor,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"_register_grad_hook",
(PyCFunction)(void (*)(void))tensor_register_grad_hook,
(PyCFunction)(void (*)())tensor_register_grad_hook,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"_remove_grad_hook",
(PyCFunction)(void (*)(void))tensor_remove_grad_hook,
(PyCFunction)(void (*)())tensor_remove_grad_hook,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"_register_backward_hook",
(PyCFunction)(void (*)(void))tensor_register_reduce_hook,
(PyCFunction)(void (*)())tensor_register_reduce_hook,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"_set_grad_type",
(PyCFunction)(void (*)(void))tensor__set_grad_type,
(PyCFunction)(void (*)())tensor__set_grad_type,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"_clear",
(PyCFunction)(void (*)(void))tensor__clear,
(PyCFunction)(void (*)())tensor__clear,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"_clear_dataptr",
(PyCFunction)(void (*)(void))tensor__clear_dataptr,
(PyCFunction)(void (*)())tensor__clear_dataptr,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"_copy_gradient_from",
(PyCFunction)(void (*)(void))tensor__copy_gradient_from,
(PyCFunction)(void (*)())tensor__copy_gradient_from,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"_tensor_use_gpudnn",
(PyCFunction)(void (*)(void))tensor__use_gpudnn,
(PyCFunction)(void (*)())tensor__use_gpudnn,
METH_VARARGS | METH_KEYWORDS,
NULL},
/** the methods to adapt old dygraph, will be removed in the future **/
{"set_string_list",
(PyCFunction)(void (*)(void))tensor_method_set_string_list,
(PyCFunction)(void (*)())tensor_method_set_string_list,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"set_vocab",
(PyCFunction)(void (*)(void))tensor_method_set_vocab,
(PyCFunction)(void (*)())tensor_method_set_vocab,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"get_map_tensor",
(PyCFunction)(void (*)(void))tensor_method_get_map_tensor,
(PyCFunction)(void (*)())tensor_method_get_map_tensor,
METH_VARARGS | METH_KEYWORDS,
NULL},
/***the method of sparse tensor****/
{"nnz",
(PyCFunction)(void (*)(void))tensor_method_get_non_zero_nums,
(PyCFunction)(void (*)())tensor_method_get_non_zero_nums,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"indices",
(PyCFunction)(void (*)(void))tensor_method_get_non_zero_indices,
(PyCFunction)(void (*)())tensor_method_get_non_zero_indices,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"values",
(PyCFunction)(void (*)(void))tensor_method_get_non_zero_elements,
(PyCFunction)(void (*)())tensor_method_get_non_zero_elements,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"crows",
(PyCFunction)(void (*)(void))tensor_method_get_non_zero_crows,
(PyCFunction)(void (*)())tensor_method_get_non_zero_crows,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"cols",
(PyCFunction)(void (*)(void))tensor_method_get_non_zero_cols,
(PyCFunction)(void (*)())tensor_method_get_non_zero_cols,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"is_sparse",
(PyCFunction)(void (*)(void))tensor_method_is_sparse,
(PyCFunction)(void (*)())tensor_method_is_sparse,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"is_sparse_coo",
(PyCFunction)(void (*)(void))tensor_method_is_sparse_coo,
(PyCFunction)(void (*)())tensor_method_is_sparse_coo,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"is_sparse_csr",
(PyCFunction)(void (*)(void))tensor_method_is_sparse_csr,
(PyCFunction)(void (*)())tensor_method_is_sparse_csr,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"is_same_shape",
(PyCFunction)(void (*)(void))tensor_method_is_same_shape,
(PyCFunction)(void (*)())tensor_method_is_same_shape,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"to_sparse_csr",
(PyCFunction)(void (*)(void))tensor_method_to_sparse_csr,
(PyCFunction)(void (*)())tensor_method_to_sparse_csr,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"element_size",
(PyCFunction)(void (*)(void))tensor_method_element_size,
(PyCFunction)(void (*)())tensor_method_element_size,
METH_VARARGS | METH_KEYWORDS,
NULL},
/***the method of sparse tensor****/
{"_inplace_version",
(PyCFunction)(void (*)(void))tensor__inplace_version,
(PyCFunction)(void (*)())tensor__inplace_version,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"_bump_inplace_version",
(PyCFunction)(void (*)(void))tensor__bump_inplace_version,
(PyCFunction)(void (*)())tensor__bump_inplace_version,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"is_selected_rows",
(PyCFunction)(void (*)(void))tensor_method_is_selected_rows,
(PyCFunction)(void (*)())tensor_method_is_selected_rows,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"rows",
(PyCFunction)(void (*)(void))tensor_method_get_rows,
(PyCFunction)(void (*)())tensor_method_get_rows,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"_reset_grad_inplace_version",
(PyCFunction)(void (*)(void))tensor__reset_grad_inplace_version,
(PyCFunction)(void (*)())tensor__reset_grad_inplace_version,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"_share_memory",
(PyCFunction)(void (*)(void))tensor_method__share_memory,
(PyCFunction)(void (*)())tensor_method__share_memory,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"_offset",
(PyCFunction)(void (*)(void))tensor__offset,
(PyCFunction)(void (*)())tensor__offset,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"_grad_name",
(PyCFunction)(void (*)(void))tensor__grad_name,
(PyCFunction)(void (*)())tensor__grad_name,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"_grad_value",
(PyCFunction)(void (*)(void))tensor__grad_value,
(PyCFunction)(void (*)())tensor__grad_value,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"_unset_fake_empty",
(PyCFunction)(void (*)(void))tensor__unset_fake_empty,
(PyCFunction)(void (*)())tensor__unset_fake_empty,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"data_ptr",
(PyCFunction)(void (*)(void))tensor_data_ptr,
(PyCFunction)(void (*)())tensor_data_ptr,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"_grad_ivar",
(PyCFunction)(void (*)(void))tensor__grad_ivar,
(PyCFunction)(void (*)())tensor__grad_ivar,
METH_VARARGS | METH_KEYWORDS,
NULL},
#if defined(PADDLE_WITH_CUDA)
{"_tensor_uva",
(PyCFunction)(void (*)(void))tensor_method__uva,
(PyCFunction)(void (*)())tensor_method__uva,
METH_VARARGS | METH_KEYWORDS,
NULL},
#endif
......@@ -2276,11 +2276,11 @@ PyMethodDef variable_methods[] = {
// variable_methods for core.eager.StringTensor
PyMethodDef string_tensor_variable_methods[] = {
{"numpy",
(PyCFunction)(void (*)(void))tensor_method_numpy_for_string_tensor,
(PyCFunction)(void (*)())tensor_method_numpy_for_string_tensor,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"_is_initialized",
(PyCFunction)(void (*)(void))tensor_method__is_initialized,
(PyCFunction)(void (*)())tensor_method__is_initialized,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"_is_string_tensor_hold_allocation",
......
......@@ -664,12 +664,9 @@ int tensor_properties_set_materialize_grads(PyLayerObject* self,
}
PyMethodDef pylayer_methods[] = {
{"name",
(PyCFunction)(void (*)(void))pylayer_method_name,
METH_NOARGS,
NULL},
{"name", (PyCFunction)(void (*)())pylayer_method_name, METH_NOARGS, NULL},
{"apply",
(PyCFunction)(void (*)(void))pylayer_method_apply,
(PyCFunction)(void (*)())pylayer_method_apply,
METH_CLASS | METH_VARARGS | METH_KEYWORDS,
NULL},
{NULL, NULL, 0, NULL}};
......
......@@ -54,7 +54,7 @@ typedef PyFrameObject FrameObject;
// Use static variable to save customed eval hook.
static Py_tss_t eval_frame_callback_key = {0, 0};
inline static PyObject *eval_frame_callback_get(void) {
inline static PyObject *eval_frame_callback_get() {
void *result = PyThread_tss_get(&eval_frame_callback_key);
if (unlikely(result == NULL)) {
Py_RETURN_NONE;
......@@ -271,7 +271,7 @@ static PyObject *set_eval_frame_py(PyObject *callback) {
return set_eval_frame(callback, PyThreadState_GET());
}
PyMODINIT_FUNC PyInit__eval_frame(void) {
PyMODINIT_FUNC PyInit__eval_frame() {
int result = PyThread_tss_create(&eval_frame_callback_key);
VLOG(7) << "Set PyThread_tss_create return: " << result;
......
......@@ -52,7 +52,7 @@ OneDNNContextThreadLocals::Body::~Body() {
void OneDNNContextThreadLocals::Body::set_cur_mkldnn_session_id(size_t sid) {
cur_mkldnn_session_id = sid;
}
size_t OneDNNContextThreadLocals::Body::get_cur_mkldnn_session_id(void) {
size_t OneDNNContextThreadLocals::Body::get_cur_mkldnn_session_id() {
return cur_mkldnn_session_id;
}
......@@ -70,11 +70,11 @@ void OneDNNContextThreadLocals::Body::set_cur_paddle_data_layout(
cur_paddle_data_layout = dl;
}
DataLayout OneDNNContextThreadLocals::Body::get_cur_paddle_data_layout(void) {
DataLayout OneDNNContextThreadLocals::Body::get_cur_paddle_data_layout() {
return cur_paddle_data_layout;
}
void OneDNNContextThreadLocals::Body::log_lib_version(void) {
void OneDNNContextThreadLocals::Body::log_lib_version() {
if (!said_once) {
said_once = true;
auto dv = dnnl::version();
......@@ -239,7 +239,7 @@ struct OneDNNContext::Impl {
return;
}
unsigned int GetCachedObjectsNumber(void) const {
unsigned int GetCachedObjectsNumber() const {
unsigned int num_entries = 0;
for (auto const& l3 : *p_blobmap_) {
for (auto const& l2 : *(l3.second)) {
......@@ -412,7 +412,7 @@ void OneDNNContext::SetBlob(const std::string& name,
impl_->SetBlob(name, data);
}
unsigned int OneDNNContext::GetCachedObjectsNumber(void) const {
unsigned int OneDNNContext::GetCachedObjectsNumber() const {
return impl_->GetCachedObjectsNumber();
}
......
......@@ -17,7 +17,7 @@
#include "paddle/phi/core/enforce.h"
namespace phi {
const std::string& GetKernelTypeForVarContext::GetVarName(void) const {
const std::string& GetKernelTypeForVarContext::GetVarName() const {
PADDLE_ENFORCE_NE(
var_name_,
nullptr,
......@@ -26,7 +26,7 @@ const std::string& GetKernelTypeForVarContext::GetVarName(void) const {
return *var_name_;
}
const DenseTensor& GetKernelTypeForVarContext::GetTensor(void) const {
const DenseTensor& GetKernelTypeForVarContext::GetTensor() const {
PADDLE_ENFORCE_NE(
tensor_,
nullptr,
......@@ -35,7 +35,7 @@ const DenseTensor& GetKernelTypeForVarContext::GetTensor(void) const {
return *tensor_;
}
const KernelKey& GetKernelTypeForVarContext::GetKernelKey(void) const {
const KernelKey& GetKernelTypeForVarContext::GetKernelKey() const {
PADDLE_ENFORCE_NE(
kernel_key_,
nullptr,
......@@ -44,7 +44,7 @@ const KernelKey& GetKernelTypeForVarContext::GetKernelKey(void) const {
return *kernel_key_;
}
const AttributeMap& GetKernelTypeForVarContext::GetAttrs(void) const {
const AttributeMap& GetKernelTypeForVarContext::GetAttrs() const {
return *attrs_;
}
......
......@@ -58,11 +58,11 @@ class SumOneDNNHandler : public OneDNNHandlerNoCachingT<T, dnnl::sum> {
using OneDNNHandlerNoCachingT<T, dnnl::sum>::AcquireDstMemory;
std::shared_ptr<dnnl::memory> AcquireDstMemory(void) {
std::shared_ptr<dnnl::memory> AcquireDstMemory() {
return this->AcquireMemoryFromPrimitive(this->fwd_pd_->dst_desc());
}
inline int GetNumInputs(void) { return num_inputs_; }
inline int GetNumInputs() { return num_inputs_; }
private:
int num_inputs_;
......
......@@ -307,7 +307,7 @@ TEST(AccumulationNode, Tensor) {
->data<paddle::platform::float16>()[0],
paddle::platform::float16(10.0f));
auto reduce_hook_1 = [&](void) -> void {
auto reduce_hook_1 = [&]() -> void {
auto* input_et_ptr =
std::dynamic_pointer_cast<phi::DenseTensor>(input_et.impl())
->mutable_data<paddle::platform::float16>(
......@@ -334,7 +334,7 @@ TEST(AccumulationNode, Tensor) {
// Reduce Hook case 2: Call RegisterReduceHook and ApplyReduceHooks directly
VLOG(6) << "Test Reduce Hook";
auto reduce_hook_2 = [&](void) -> void {
auto reduce_hook_2 = [&]() -> void {
auto* ret_et0_ptr = std::dynamic_pointer_cast<phi::DenseTensor>(et0.impl())
->mutable_data<paddle::platform::float16>(
paddle::platform::CPUPlace());
......
......@@ -76,7 +76,7 @@ void test_sigmoid(bool is_remove_gradient_hook) {
true);
VLOG(6) << "Make ReduceHook function";
auto reduce_hook = [&](void) -> void {
auto reduce_hook = [&]() -> void {
auto* t_ptr = std::dynamic_pointer_cast<phi::DenseTensor>(tensor.impl())
->data<float>();
for (int i = 0; i < tensor.numel(); i++) {
......@@ -214,7 +214,7 @@ void test_matmul(bool is_remove_gradient_hook) {
2.0,
true);
auto reduce_hook = [&](void) -> void {
auto reduce_hook = [&]() -> void {
auto* t_ptr =
std::dynamic_pointer_cast<phi::DenseTensor>(Y.impl())->data<float>();
for (int i = 0; i < Y.numel(); i++) {
......@@ -278,7 +278,7 @@ void test_backward_final_hooks() {
true);
VLOG(6) << "Make ReduceHook function";
auto backward_final_hook = [&](void) -> void {
auto backward_final_hook = [&]() -> void {
auto* t_ptr =
std::dynamic_pointer_cast<phi::DenseTensor>(X.impl())->data<float>();
VLOG(6) << "Run Target Backward Hook";
......
......@@ -75,7 +75,7 @@ void SetInput(std::vector<std::vector<PaddleTensor>> *inputs,
}
#ifdef PADDLE_WITH_MKLDNN
int GetNumCachedObjects(void) {
int GetNumCachedObjects() {
auto &pool = platform::DeviceContextPool::Instance();
phi::CPUPlace place;
auto onednn_dev_ctx = dynamic_cast<phi::OneDNNContext *>(pool.Get(place));
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册