未验证 提交 b54abbe8 编写于 作者: A Allen Guo 提交者: GitHub

fix format for paddle/phi/api/lib/tensor.cc (#44972)

上级 e84250e8
......@@ -126,7 +126,6 @@ void PrepareInputs(std::vector<PaddleTensor> *input_slots,
init_zero_tensor.lod.assign({one_batch.lod3});
lod_tensor_tensor.shape = rnn_link_data_shape;
lod_tensor_tensor.lod.assign({one_batch.lod1});
// clang-format off
week_tensor.shape.assign(
{static_cast<int>(one_batch.rnn_week_datas.size()),
static_cast<int>(one_batch.rnn_week_datas.front().size())});
......@@ -135,7 +134,6 @@ void PrepareInputs(std::vector<PaddleTensor> *input_slots,
{static_cast<int>(one_batch.rnn_minute_datas.size()),
static_cast<int>(one_batch.rnn_minute_datas.front().size())});
minute_tensor.lod.assign({one_batch.lod3});
// clang-format on
// assign data
TensorAssignData<float>(&lod_attention_tensor,
std::vector<std::vector<float>>({{0, 0}}));
......
......@@ -173,7 +173,6 @@ void LaunchFusedSoftmaxMaskKernel(const T* src,
dim3 block(warp_size, warps_per_block);
dim3 grid(DIV_UP(seq_len, warps_per_block), batch_size, head_num);
// clang-format off
int elements = ElementsCeil(seq_len);
switch (elements) {
case 1: { // <=32
......@@ -203,7 +202,6 @@ void LaunchFusedSoftmaxMaskKernel(const T* src,
"seq_len must be between (0, 4096], received the seq_len is %d",
seq_len));
}
// clang-format on
}
} // namespace operators
......
......@@ -1167,12 +1167,11 @@ All parameter, weight, gradient are variables in Paddle.
.def("empty", []() { return kEmptyVarName; })
.def("temp", []() { return kTempVarName; });
// clang-format off
py::class_<paddle::platform::DeviceContext>(m, "DeviceContext")
.def_static("create",
[](paddle::platform::CPUPlace& place)
-> paddle::platform::DeviceContext* {
auto* context = new phi::CPUContext();
[](paddle::platform::CPUPlace &place)
-> paddle::platform::DeviceContext * {
auto *context = new phi::CPUContext();
context->SetAllocator(
paddle::memory::allocation::AllocatorFacade::Instance()
.GetAllocator(place)
......@@ -1187,12 +1186,12 @@ All parameter, weight, gradient are variables in Paddle.
.get());
return context;
})
.def_static("create",
[](paddle::platform::XPUPlace& place)
-> paddle::platform::DeviceContext* {
.def_static(
"create",
[](paddle::platform::XPUPlace &place)
-> paddle::platform::DeviceContext * {
#ifndef PADDLE_WITH_XPU
PADDLE_THROW(
platform::errors::PermissionDenied(
PADDLE_THROW(platform::errors::PermissionDenied(
"Cannot use XPUPlace in CPU/GPU version, "
"Please recompile or reinstall Paddle with XPU support."));
#else
......@@ -1212,24 +1211,24 @@ All parameter, weight, gradient are variables in Paddle.
return context;
#endif
})
.def_static("create",
[](paddle::platform::MLUPlace& place)
-> paddle::platform::DeviceContext* {
.def_static(
"create",
[](paddle::platform::MLUPlace &place)
-> paddle::platform::DeviceContext * {
#ifndef PADDLE_WITH_MLU
PADDLE_THROW(
platform::errors::PermissionDenied(
PADDLE_THROW(platform::errors::PermissionDenied(
"Cannot use MLUPlace in CPU/GPU version, "
"Please recompile or reinstall Paddle with MLU support."));
#else
return new paddle::platform::MLUDeviceContext(place);
#endif
})
.def_static("create",
[](paddle::platform::NPUPlace& place)
-> paddle::platform::DeviceContext* {
.def_static(
"create",
[](paddle::platform::NPUPlace &place)
-> paddle::platform::DeviceContext * {
#ifndef PADDLE_WITH_ASCEND_CL
PADDLE_THROW(
platform::errors::PermissionDenied(
PADDLE_THROW(platform::errors::PermissionDenied(
"Cannot use NPUPlace in CPU/GPU/XPU version, "
"Please recompile or reinstall Paddle with NPU support."));
#else
......@@ -1237,11 +1236,10 @@ All parameter, weight, gradient are variables in Paddle.
#endif
})
.def_static("create",
[](paddle::platform::CustomPlace& place)
-> paddle::platform::DeviceContext* {
[](paddle::platform::CustomPlace &place)
-> paddle::platform::DeviceContext * {
#ifndef PADDLE_WITH_CUSTOM_DEVICE
PADDLE_THROW(
platform::errors::PermissionDenied(
PADDLE_THROW(platform::errors::PermissionDenied(
"Cannot use CustomPlace in CPU/GPU/XPU version, "
"Please recompile or reinstall Paddle with "
"CustomDevice support."));
......@@ -1249,12 +1247,12 @@ All parameter, weight, gradient are variables in Paddle.
return new paddle::platform::CustomDeviceContext(place);
#endif
})
.def_static("create",
[](paddle::platform::CUDAPlace& place)
-> paddle::platform::DeviceContext* {
.def_static(
"create",
[](paddle::platform::CUDAPlace &place)
-> paddle::platform::DeviceContext * {
#if !defined(PADDLE_WITH_CUDA) && !defined(PADDLE_WITH_HIP)
PADDLE_THROW(
platform::errors::PermissionDenied(
PADDLE_THROW(platform::errors::PermissionDenied(
"Cannot use CUDAPlace in CPU only version, "
"Please recompile or reinstall Paddle with CUDA support."));
#else
......@@ -1279,19 +1277,18 @@ All parameter, weight, gradient are variables in Paddle.
return context;
#endif
})
.def_static("create",
[](paddle::platform::CUDAPinnedPlace& place)
-> paddle::platform::DeviceContext* {
.def_static(
"create",
[](paddle::platform::CUDAPinnedPlace &place)
-> paddle::platform::DeviceContext * {
#if !defined(PADDLE_WITH_CUDA) && !defined(PADDLE_WITH_HIP)
PADDLE_THROW(
platform::errors::PermissionDenied(
PADDLE_THROW(platform::errors::PermissionDenied(
"Cannot use CUDAPinnedPlace in CPU only version, "
"Please recompile or reinstall Paddle with CUDA support."));
#else
return new paddle::platform::CUDAPinnedDeviceContext(place);
#endif
});;
// clang-format on
});
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
py::class_<platform::Communicator>(m, "Communicator").def(py::init<>());
#endif
......
......@@ -172,20 +172,54 @@ const Type& UI1();
template <typename T>
Type type_of();
// clang-format off
template <> inline Type type_of<float>() { return F32(); }
template <> inline Type type_of<double>() { return F64(); }
template <> inline Type type_of<unsigned char>() { return UI8(); }
template <> inline Type type_of<int16_t>() { return UI16(); }
template <> inline Type type_of<int32_t>() { return I32(); }
template <> inline Type type_of<uint32_t>() { return UI32(); }
template <> inline Type type_of<bool>() { return UI1(); }
template <> inline Type type_of<char>() { return I8(); }
template <> inline Type type_of<int64_t>() { return I64(); }
template <> inline Type type_of<uint64_t>() { return UI64(); }
template <> inline Type type_of<signed char>() { return I8(); }
template <> inline Type type_of<void>() { return Void(); }
// clang-format on
template <>
inline Type type_of<float>() {
return F32();
}
template <>
inline Type type_of<double>() {
return F64();
}
template <>
inline Type type_of<unsigned char>() {
return UI8();
}
template <>
inline Type type_of<int16_t>() {
return UI16();
}
template <>
inline Type type_of<int32_t>() {
return I32();
}
template <>
inline Type type_of<uint32_t>() {
return UI32();
}
template <>
inline Type type_of<bool>() {
return UI1();
}
template <>
inline Type type_of<char>() {
return I8();
}
template <>
inline Type type_of<int64_t>() {
return I64();
}
template <>
inline Type type_of<uint64_t>() {
return UI64();
}
template <>
inline Type type_of<signed char>() {
return I8();
}
template <>
inline Type type_of<void>() {
return Void();
}
template <>
inline Type type_of<int8_t*>() {
Type x = Int(8);
......
......@@ -12,7 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
// clang-format off
#include "paddle/phi/api/lib/data_transform.h"
#include "paddle/phi/api/lib/kernel_dispatch.h"
......@@ -24,7 +23,6 @@ limitations under the License. */
#include "paddle/phi/kernels/transfer_layout_kernel.h"
#include "paddle/fluid/framework/tensor_util.h"
// clang-format on
namespace paddle {
namespace experimental {
......
......@@ -12,7 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
// clang-format off
#include "paddle/phi/api/include/tensor.h"
#include <memory>
......@@ -35,7 +34,6 @@ limitations under the License. */
#include "paddle/phi/core/tensor_base.h"
#include "paddle/phi/core/tensor_meta.h"
#include "paddle/phi/core/tensor_utils.h"
// clang-format off
namespace paddle {
namespace experimental {
......@@ -312,8 +310,8 @@ void Tensor::set_impl(std::shared_ptr<phi::TensorBase> &&impl) {
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
gpuStream_t Tensor::stream() const {
int device_id = phi::backends::gpu::GetCurrentDeviceId();
auto* gpu_context = DeviceContextPool::Instance()
.Get<AllocationType::GPU>(GPUPlace(device_id));
auto *gpu_context = DeviceContextPool::Instance().Get<AllocationType::GPU>(
GPUPlace(device_id));
return gpu_context->stream();
}
#endif
......
......@@ -12,7 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
// clang-format off
#include "paddle/phi/api/include/tensor.h"
#include "paddle/phi/common/int_array.h"
......@@ -25,7 +24,6 @@ limitations under the License. */
#include "paddle/phi/api/lib/kernel_dispatch.h"
#include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/infermeta/unary.h"
// clang-format off
namespace paddle {
namespace experimental {
......@@ -115,13 +113,12 @@ void Tensor::copy_(const Tensor &src,
// Deep Copy AutoGrad info from src to self.
*autograd_meta_ = *(src.autograd_meta_);
}
kernel_key_set.backend_set =
kernel_key_set.backend_set |
kernel_key_set.backend_set = kernel_key_set.backend_set |
BackendSet(phi::TransToPhiBackend(target_place));
auto kernel_key = kernel_key_set.GetHighestPriorityKernelKey();
auto place = phi::TransToPhiPlace(kernel_key.backend());
auto& pool = paddle::experimental::DeviceContextPool::Instance();
auto* dev_ctx = pool.GetMutable(
auto &pool = paddle::experimental::DeviceContextPool::Instance();
auto *dev_ctx = pool.GetMutable(
place.GetType() == target_place.GetType() ? target_place : place);
Backend kernel_backend = Backend::UNDEFINED;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册