cpu_info.h 2.4 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
L
liaogang 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

#include <stddef.h>

P
peizhilin 已提交
19 20
#ifdef _WIN32
#if defined(__AVX2__)
A
Adam 已提交
21
#include <immintrin.h>  // avx2
P
peizhilin 已提交
22
#elif defined(__AVX__)
A
Adam 已提交
23
#include <intrin.h>  // avx
P
peizhilin 已提交
24 25 26 27 28 29 30 31 32 33 34 35 36 37 38
#endif               // AVX
#else                // WIN32
#ifdef __AVX__
#include <immintrin.h>
#endif
#endif  // WIN32

#if defined(_WIN32)
#define ALIGN32_BEG __declspec(align(32))
#define ALIGN32_END
#else
#define ALIGN32_BEG
#define ALIGN32_END __attribute__((aligned(32)))
#endif  // _WIN32

A
Adam 已提交
39 40 41 42
#ifndef PADDLE_WITH_XBYAK
#ifdef _WIN32
#define cpuid(reg, x) __cpuidex(reg, x, 0)
#else
W
Wilber 已提交
43
#if !defined(WITH_NV_JETSON) && !defined(PADDLE_WITH_ARM) && \
W
Wilber 已提交
44
    !defined(PADDLE_WITH_SW) && !defined(PADDLE_WITH_MIPS)
A
Adam 已提交
45 46 47 48 49 50
#include <cpuid.h>
inline void cpuid(int reg[4], int x) {
  __cpuid_count(x, 0, reg[0], reg[1], reg[2], reg[3]);
}
#endif
#endif
51
#endif
A
Adam 已提交
52

P
PuQing 已提交
53 54
#include "paddle/phi/backends/cpu/cpu_info.h"

L
liaogang 已提交
55 56 57
namespace paddle {
namespace platform {

S
sneaxiy 已提交
58 59
size_t CpuTotalPhysicalMemory();

L
liaogang 已提交
60 61 62
//! Get the maximum allocation size for a machine.
size_t CpuMaxAllocSize();

63 64 65
//! Get the maximum allocation size for a machine.
size_t CUDAPinnedMaxAllocSize();

L
liaogang 已提交
66 67 68 69 70
//! Get the minimum chunk size for buddy allocator.
size_t CpuMinChunkSize();

//! Get the maximum chunk size for buddy allocator.
size_t CpuMaxChunkSize();
L
liaogang 已提交
71

72 73 74 75 76 77
//! Get the minimum chunk size for buddy allocator.
size_t CUDAPinnedMinChunkSize();

//! Get the maximum chunk size for buddy allocator.
size_t CUDAPinnedMaxChunkSize();

78 79 80 81 82 83 84 85 86
//! Get the maximum allocation size for a machine.
size_t NPUPinnedMaxAllocSize();

//! Get the minimum chunk size for buddy allocator.
size_t NPUPinnedMinChunkSize();

//! Get the maximum chunk size for buddy allocator.
size_t NPUPinnedMaxChunkSize();

P
PuQing 已提交
87
using namespace phi::backends::cpu;  // NOLINT
T
tensor-tang 已提交
88 89

// May I use some instruction
T
tensor-tang 已提交
90
bool MayIUse(const cpu_isa_t cpu_isa);
T
tensor-tang 已提交
91

L
liaogang 已提交
92 93
}  // namespace platform
}  // namespace paddle