未验证 提交 d16360c8 编写于 作者: Y ykkk2333 提交者: GitHub

fix bugs of tipc, test=kunlun (#46540)

* migrate sigmoid with cross entropy, and tile xpu kernels to phi, test=kunlun

* migrate add_n kernep to phi, test=kunlun

* fix bugs of tipc, test=kunlun
上级 678c200b
......@@ -189,6 +189,11 @@ void TensorFromArray(const T* src,
size,
reinterpret_cast<const platform::CustomDeviceContext&>(ctx).stream());
}
#endif
#ifdef PADDLE_WITH_XPU
else if (platform::is_xpu_place(dst_place)) { // NOLINT
memory::Copy(dst_place, dst_ptr, src_place, src_ptr, size);
}
#endif
else { // NOLINT
PADDLE_THROW(platform::errors::Unimplemented(
......
......@@ -471,6 +471,7 @@ XPUOpMap& get_kl2_ops() {
pOpKernelType(vartype::INT64, XPUPlace())})},
{"scatter",
XPUKernelSet({pOpKernelType(vartype::INT64, XPUPlace()),
pOpKernelType(vartype::INT32, XPUPlace()),
pOpKernelType(vartype::FP32, XPUPlace())})},
{"sampling_id",
XPUKernelSet({pOpKernelType(vartype::FP32, XPUPlace()),
......
......@@ -174,4 +174,12 @@ PD_REGISTER_GENERAL_KERNEL(assign_array,
ALL_LAYOUT,
phi::AssignArrayKernel<phi::XPUContext>,
ALL_DTYPE) {}
PD_REGISTER_KERNEL(assign_value,
XPU,
ALL_LAYOUT,
phi::AssignValueKernel,
bool,
int,
float,
int64_t) {}
#endif
......@@ -88,3 +88,7 @@ PD_REGISTER_KERNEL(batch_norm_infer,
float,
phi::dtype::float16) {}
#endif
#ifdef PADDLE_WITH_XPU
PD_REGISTER_KERNEL(
batch_norm_infer, XPU, ALL_LAYOUT, phi::BatchNormInferKernel, float) {}
#endif
......@@ -126,4 +126,19 @@ PD_REGISTER_KERNEL(empty,
int64_t,
bool,
phi::dtype::float16) {}
PD_REGISTER_KERNEL(empty_like,
XPU,
ALL_LAYOUT,
phi::EmptyLikeKernel,
float,
double,
int8_t,
uint8_t,
int16_t,
int,
int64_t,
bool,
phi::dtype::float16) {
kernel->InputAt(0).SetBackend(phi::Backend::ALL_BACKEND);
}
#endif
......@@ -28,7 +28,7 @@ void StackGradKernel(const Context& dev_ctx,
auto outs = x_grad;
auto dy_dims = out.dims();
if (axis < 0) axis += dy_dims.size() + 1;
if (axis < 0) axis += dy_dims.size();
auto dy_shape = phi::vectorize<int>(dy_dims);
std::vector<int> dx_dims_list(x_grad.size(), 1);
......
......@@ -135,11 +135,11 @@ void TopkKernel(const Context& dev_ctx,
// Transpose back to original dims
std::vector<int> trans_back_axes;
for (int i = 0; i < axis; i++) {
trans_axes.emplace_back(i);
trans_back_axes.emplace_back(i);
}
trans_axes.emplace_back(trans_out_dims.size() - 1);
trans_back_axes.emplace_back(trans_out_dims.size() - 1);
for (int i = axis; i < trans_out_dims.size() - 1; i++) {
trans_axes.emplace_back(i);
trans_back_axes.emplace_back(i);
}
std::vector<int> trans_out_shape_host(trans_back_axes.size(), 0);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册