提交 453f85d4 编写于 作者: M Mel Gorman 提交者: Linus Torvalds

mm: remove __GFP_COLD

As the page free path makes no distinction between cache hot and cold
pages, there is no real useful ordering of pages in the free list that
allocation requests can take advantage of.  Juding from the users of
__GFP_COLD, it is likely that a number of them are the result of copying
other sites instead of actually measuring the impact.  Remove the
__GFP_COLD parameter which simplifies a number of paths in the page
allocator.

This is potentially controversial but bear in mind that the size of the
per-cpu pagelists versus modern cache sizes means that the whole per-cpu
list can often fit in the L3 cache.  Hence, there is only a potential
benefit for microbenchmarks that alloc/free pages in a tight loop.  It's
even worse when THP is taken into account which has little or no chance
of getting a cache-hot page as the per-cpu list is bypassed and the
zeroing of multiple pages will thrash the cache anyway.

The truncate microbenchmarks are not shown as this patch affects the
allocation path and not the free path.  A page fault microbenchmark was
tested but it showed no sigificant difference which is not surprising
given that the __GFP_COLD branches are a miniscule percentage of the
fault path.

Link: http://lkml.kernel.org/r/20171018075952.10627-9-mgorman@techsingularity.netSigned-off-by: NMel Gorman <mgorman@techsingularity.net>
Acked-by: NVlastimil Babka <vbabka@suse.cz>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 2d4894b5
...@@ -517,7 +517,7 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num) ...@@ -517,7 +517,7 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
rc = ena_alloc_rx_page(rx_ring, rx_info, rc = ena_alloc_rx_page(rx_ring, rx_info,
__GFP_COLD | GFP_ATOMIC | __GFP_COMP); GFP_ATOMIC | __GFP_COMP);
if (unlikely(rc < 0)) { if (unlikely(rc < 0)) {
netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
"failed to alloc buffer for rx queue %d\n", "failed to alloc buffer for rx queue %d\n",
......
...@@ -295,7 +295,7 @@ static int xgbe_alloc_pages(struct xgbe_prv_data *pdata, ...@@ -295,7 +295,7 @@ static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
order = alloc_order; order = alloc_order;
/* Try to obtain pages, decreasing order if necessary */ /* Try to obtain pages, decreasing order if necessary */
gfp = GFP_ATOMIC | __GFP_COLD | __GFP_COMP | __GFP_NOWARN; gfp = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
while (order >= 0) { while (order >= 0) {
pages = alloc_pages_node(node, gfp, order); pages = alloc_pages_node(node, gfp, order);
if (pages) if (pages)
......
...@@ -304,8 +304,7 @@ int aq_ring_rx_fill(struct aq_ring_s *self) ...@@ -304,8 +304,7 @@ int aq_ring_rx_fill(struct aq_ring_s *self)
buff->flags = 0U; buff->flags = 0U;
buff->len = AQ_CFG_RX_FRAME_MAX; buff->len = AQ_CFG_RX_FRAME_MAX;
buff->page = alloc_pages(GFP_ATOMIC | __GFP_COLD | buff->page = alloc_pages(GFP_ATOMIC | __GFP_COMP, pages_order);
__GFP_COMP, pages_order);
if (!buff->page) { if (!buff->page) {
err = -ENOMEM; err = -ENOMEM;
goto err_exit; goto err_exit;
......
...@@ -195,7 +195,7 @@ static inline void ...@@ -195,7 +195,7 @@ static inline void
struct sk_buff *skb; struct sk_buff *skb;
struct octeon_skb_page_info *skb_pg_info; struct octeon_skb_page_info *skb_pg_info;
page = alloc_page(GFP_ATOMIC | __GFP_COLD); page = alloc_page(GFP_ATOMIC);
if (unlikely(!page)) if (unlikely(!page))
return NULL; return NULL;
......
...@@ -193,7 +193,7 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv) ...@@ -193,7 +193,7 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
if (mlx4_en_prepare_rx_desc(priv, ring, if (mlx4_en_prepare_rx_desc(priv, ring,
ring->actual_size, ring->actual_size,
GFP_KERNEL | __GFP_COLD)) { GFP_KERNEL)) {
if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) { if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) {
en_err(priv, "Failed to allocate enough rx buffers\n"); en_err(priv, "Failed to allocate enough rx buffers\n");
return -ENOMEM; return -ENOMEM;
...@@ -552,8 +552,7 @@ static void mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv, ...@@ -552,8 +552,7 @@ static void mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv,
do { do {
if (mlx4_en_prepare_rx_desc(priv, ring, if (mlx4_en_prepare_rx_desc(priv, ring,
ring->prod & ring->size_mask, ring->prod & ring->size_mask,
GFP_ATOMIC | __GFP_COLD | GFP_ATOMIC | __GFP_MEMALLOC))
__GFP_MEMALLOC))
break; break;
ring->prod++; ring->prod++;
} while (likely(--missing)); } while (likely(--missing));
......
...@@ -1185,7 +1185,7 @@ static void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr) ...@@ -1185,7 +1185,7 @@ static void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
} else { } else {
struct page *page; struct page *page;
page = alloc_page(GFP_KERNEL | __GFP_COLD); page = alloc_page(GFP_KERNEL);
frag = page ? page_address(page) : NULL; frag = page ? page_address(page) : NULL;
} }
if (!frag) { if (!frag) {
...@@ -1212,7 +1212,7 @@ static void *nfp_net_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr) ...@@ -1212,7 +1212,7 @@ static void *nfp_net_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
} else { } else {
struct page *page; struct page *page;
page = alloc_page(GFP_ATOMIC | __GFP_COLD); page = alloc_page(GFP_ATOMIC);
frag = page ? page_address(page) : NULL; frag = page ? page_address(page) : NULL;
} }
if (!frag) { if (!frag) {
......
...@@ -1092,8 +1092,7 @@ static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring, ...@@ -1092,8 +1092,7 @@ static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
{ {
if (!rx_ring->pg_chunk.page) { if (!rx_ring->pg_chunk.page) {
u64 map; u64 map;
rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP | rx_ring->pg_chunk.page = alloc_pages(__GFP_COMP | GFP_ATOMIC,
GFP_ATOMIC,
qdev->lbq_buf_order); qdev->lbq_buf_order);
if (unlikely(!rx_ring->pg_chunk.page)) { if (unlikely(!rx_ring->pg_chunk.page)) {
netif_err(qdev, drv, qdev->ndev, netif_err(qdev, drv, qdev->ndev,
......
...@@ -163,7 +163,7 @@ static int ef4_init_rx_buffers(struct ef4_rx_queue *rx_queue, bool atomic) ...@@ -163,7 +163,7 @@ static int ef4_init_rx_buffers(struct ef4_rx_queue *rx_queue, bool atomic)
do { do {
page = ef4_reuse_page(rx_queue); page = ef4_reuse_page(rx_queue);
if (page == NULL) { if (page == NULL) {
page = alloc_pages(__GFP_COLD | __GFP_COMP | page = alloc_pages(__GFP_COMP |
(atomic ? GFP_ATOMIC : GFP_KERNEL), (atomic ? GFP_ATOMIC : GFP_KERNEL),
efx->rx_buffer_order); efx->rx_buffer_order);
if (unlikely(page == NULL)) if (unlikely(page == NULL))
......
...@@ -163,7 +163,7 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic) ...@@ -163,7 +163,7 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
do { do {
page = efx_reuse_page(rx_queue); page = efx_reuse_page(rx_queue);
if (page == NULL) { if (page == NULL) {
page = alloc_pages(__GFP_COLD | __GFP_COMP | page = alloc_pages(__GFP_COMP |
(atomic ? GFP_ATOMIC : GFP_KERNEL), (atomic ? GFP_ATOMIC : GFP_KERNEL),
efx->rx_buffer_order); efx->rx_buffer_order);
if (unlikely(page == NULL)) if (unlikely(page == NULL))
......
...@@ -335,7 +335,7 @@ static int xlgmac_alloc_pages(struct xlgmac_pdata *pdata, ...@@ -335,7 +335,7 @@ static int xlgmac_alloc_pages(struct xlgmac_pdata *pdata,
dma_addr_t pages_dma; dma_addr_t pages_dma;
/* Try to obtain pages, decreasing order if necessary */ /* Try to obtain pages, decreasing order if necessary */
gfp |= __GFP_COLD | __GFP_COMP | __GFP_NOWARN; gfp |= __GFP_COMP | __GFP_NOWARN;
while (order >= 0) { while (order >= 0) {
pages = alloc_pages(gfp, order); pages = alloc_pages(gfp, order);
if (pages) if (pages)
......
...@@ -906,7 +906,7 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) ...@@ -906,7 +906,7 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
sw_data[0] = (u32)bufptr; sw_data[0] = (u32)bufptr;
} else { } else {
/* Allocate a secondary receive queue entry */ /* Allocate a secondary receive queue entry */
page = alloc_page(GFP_ATOMIC | GFP_DMA | __GFP_COLD); page = alloc_page(GFP_ATOMIC | GFP_DMA);
if (unlikely(!page)) { if (unlikely(!page)) {
dev_warn_ratelimited(netcp->ndev_dev, "Secondary page alloc failed\n"); dev_warn_ratelimited(netcp->ndev_dev, "Secondary page alloc failed\n");
goto fail; goto fail;
......
...@@ -988,7 +988,6 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, ...@@ -988,7 +988,6 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
int err; int err;
bool oom; bool oom;
gfp |= __GFP_COLD;
do { do {
if (vi->mergeable_rx_bufs) if (vi->mergeable_rx_bufs)
err = add_recvbuf_mergeable(vi, rq, gfp); err = add_recvbuf_mergeable(vi, rq, gfp);
......
...@@ -1152,7 +1152,7 @@ static int mdc_read_page_remote(void *data, struct page *page0) ...@@ -1152,7 +1152,7 @@ static int mdc_read_page_remote(void *data, struct page *page0)
} }
for (npages = 1; npages < max_pages; npages++) { for (npages = 1; npages < max_pages; npages++) {
page = page_cache_alloc_cold(inode->i_mapping); page = page_cache_alloc(inode->i_mapping);
if (!page) if (!page)
break; break;
page_pool[npages] = page; page_pool[npages] = page;
......
...@@ -256,8 +256,7 @@ static int cachefiles_read_backing_file_one(struct cachefiles_object *object, ...@@ -256,8 +256,7 @@ static int cachefiles_read_backing_file_one(struct cachefiles_object *object,
goto backing_page_already_present; goto backing_page_already_present;
if (!newpage) { if (!newpage) {
newpage = __page_cache_alloc(cachefiles_gfp | newpage = __page_cache_alloc(cachefiles_gfp);
__GFP_COLD);
if (!newpage) if (!newpage)
goto nomem_monitor; goto nomem_monitor;
} }
...@@ -493,8 +492,7 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object, ...@@ -493,8 +492,7 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
goto backing_page_already_present; goto backing_page_already_present;
if (!newpage) { if (!newpage) {
newpage = __page_cache_alloc(cachefiles_gfp | newpage = __page_cache_alloc(cachefiles_gfp);
__GFP_COLD);
if (!newpage) if (!newpage)
goto nomem; goto nomem;
} }
......
...@@ -24,7 +24,6 @@ struct vm_area_struct; ...@@ -24,7 +24,6 @@ struct vm_area_struct;
#define ___GFP_HIGH 0x20u #define ___GFP_HIGH 0x20u
#define ___GFP_IO 0x40u #define ___GFP_IO 0x40u
#define ___GFP_FS 0x80u #define ___GFP_FS 0x80u
#define ___GFP_COLD 0x100u
#define ___GFP_NOWARN 0x200u #define ___GFP_NOWARN 0x200u
#define ___GFP_RETRY_MAYFAIL 0x400u #define ___GFP_RETRY_MAYFAIL 0x400u
#define ___GFP_NOFAIL 0x800u #define ___GFP_NOFAIL 0x800u
...@@ -192,16 +191,12 @@ struct vm_area_struct; ...@@ -192,16 +191,12 @@ struct vm_area_struct;
/* /*
* Action modifiers * Action modifiers
* *
* __GFP_COLD indicates that the caller does not expect to be used in the near
* future. Where possible, a cache-cold page will be returned.
*
* __GFP_NOWARN suppresses allocation failure reports. * __GFP_NOWARN suppresses allocation failure reports.
* *
* __GFP_COMP address compound page metadata. * __GFP_COMP address compound page metadata.
* *
* __GFP_ZERO returns a zeroed page on success. * __GFP_ZERO returns a zeroed page on success.
*/ */
#define __GFP_COLD ((__force gfp_t)___GFP_COLD)
#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN) #define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN)
#define __GFP_COMP ((__force gfp_t)___GFP_COMP) #define __GFP_COMP ((__force gfp_t)___GFP_COMP)
#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) #define __GFP_ZERO ((__force gfp_t)___GFP_ZERO)
......
...@@ -234,15 +234,9 @@ static inline struct page *page_cache_alloc(struct address_space *x) ...@@ -234,15 +234,9 @@ static inline struct page *page_cache_alloc(struct address_space *x)
return __page_cache_alloc(mapping_gfp_mask(x)); return __page_cache_alloc(mapping_gfp_mask(x));
} }
static inline struct page *page_cache_alloc_cold(struct address_space *x)
{
return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
}
static inline gfp_t readahead_gfp_mask(struct address_space *x) static inline gfp_t readahead_gfp_mask(struct address_space *x)
{ {
return mapping_gfp_mask(x) | return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN;
__GFP_COLD | __GFP_NORETRY | __GFP_NOWARN;
} }
typedef int filler_t(void *, struct page *); typedef int filler_t(void *, struct page *);
......
...@@ -2672,7 +2672,7 @@ static inline struct page *__dev_alloc_pages(gfp_t gfp_mask, ...@@ -2672,7 +2672,7 @@ static inline struct page *__dev_alloc_pages(gfp_t gfp_mask,
* 4. __GFP_MEMALLOC is ignored if __GFP_NOMEMALLOC is set due to * 4. __GFP_MEMALLOC is ignored if __GFP_NOMEMALLOC is set due to
* code in gfp_to_alloc_flags that should be enforcing this. * code in gfp_to_alloc_flags that should be enforcing this.
*/ */
gfp_mask |= __GFP_COLD | __GFP_COMP | __GFP_MEMALLOC; gfp_mask |= __GFP_COMP | __GFP_MEMALLOC;
return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order); return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
} }
......
...@@ -467,9 +467,6 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags) ...@@ -467,9 +467,6 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
* Also it is possible to set different flags by OR'ing * Also it is possible to set different flags by OR'ing
* in one or more of the following additional @flags: * in one or more of the following additional @flags:
* *
* %__GFP_COLD - Request cache-cold pages instead of
* trying to return cache-warm pages.
*
* %__GFP_HIGH - This allocation has high priority and may use emergency pools. * %__GFP_HIGH - This allocation has high priority and may use emergency pools.
* *
* %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail
......
...@@ -32,7 +32,6 @@ ...@@ -32,7 +32,6 @@
{(unsigned long)__GFP_ATOMIC, "__GFP_ATOMIC"}, \ {(unsigned long)__GFP_ATOMIC, "__GFP_ATOMIC"}, \
{(unsigned long)__GFP_IO, "__GFP_IO"}, \ {(unsigned long)__GFP_IO, "__GFP_IO"}, \
{(unsigned long)__GFP_FS, "__GFP_FS"}, \ {(unsigned long)__GFP_FS, "__GFP_FS"}, \
{(unsigned long)__GFP_COLD, "__GFP_COLD"}, \
{(unsigned long)__GFP_NOWARN, "__GFP_NOWARN"}, \ {(unsigned long)__GFP_NOWARN, "__GFP_NOWARN"}, \
{(unsigned long)__GFP_RETRY_MAYFAIL, "__GFP_RETRY_MAYFAIL"}, \ {(unsigned long)__GFP_RETRY_MAYFAIL, "__GFP_RETRY_MAYFAIL"}, \
{(unsigned long)__GFP_NOFAIL, "__GFP_NOFAIL"}, \ {(unsigned long)__GFP_NOFAIL, "__GFP_NOFAIL"}, \
......
...@@ -1884,7 +1884,7 @@ static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem) ...@@ -1884,7 +1884,7 @@ static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
*/ */
static inline int get_highmem_buffer(int safe_needed) static inline int get_highmem_buffer(int safe_needed)
{ {
buffer = get_image_page(GFP_ATOMIC | __GFP_COLD, safe_needed); buffer = get_image_page(GFP_ATOMIC, safe_needed);
return buffer ? 0 : -ENOMEM; return buffer ? 0 : -ENOMEM;
} }
...@@ -1945,7 +1945,7 @@ static int swsusp_alloc(struct memory_bitmap *copy_bm, ...@@ -1945,7 +1945,7 @@ static int swsusp_alloc(struct memory_bitmap *copy_bm,
while (nr_pages-- > 0) { while (nr_pages-- > 0) {
struct page *page; struct page *page;
page = alloc_image_page(GFP_ATOMIC | __GFP_COLD); page = alloc_image_page(GFP_ATOMIC);
if (!page) if (!page)
goto err_out; goto err_out;
memory_bm_set_bit(copy_bm, page_to_pfn(page)); memory_bm_set_bit(copy_bm, page_to_pfn(page));
......
...@@ -2272,7 +2272,7 @@ static ssize_t generic_file_buffered_read(struct kiocb *iocb, ...@@ -2272,7 +2272,7 @@ static ssize_t generic_file_buffered_read(struct kiocb *iocb,
* Ok, it wasn't cached, so we need to create a new * Ok, it wasn't cached, so we need to create a new
* page.. * page..
*/ */
page = page_cache_alloc_cold(mapping); page = page_cache_alloc(mapping);
if (!page) { if (!page) {
error = -ENOMEM; error = -ENOMEM;
goto out; goto out;
...@@ -2384,7 +2384,7 @@ static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask) ...@@ -2384,7 +2384,7 @@ static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask)
int ret; int ret;
do { do {
page = __page_cache_alloc(gfp_mask|__GFP_COLD); page = __page_cache_alloc(gfp_mask);
if (!page) if (!page)
return -ENOMEM; return -ENOMEM;
...@@ -2788,7 +2788,7 @@ static struct page *do_read_cache_page(struct address_space *mapping, ...@@ -2788,7 +2788,7 @@ static struct page *do_read_cache_page(struct address_space *mapping,
repeat: repeat:
page = find_get_page(mapping, index); page = find_get_page(mapping, index);
if (!page) { if (!page) {
page = __page_cache_alloc(gfp | __GFP_COLD); page = __page_cache_alloc(gfp);
if (!page) if (!page)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
err = add_to_page_cache_lru(page, mapping, index, gfp); err = add_to_page_cache_lru(page, mapping, index, gfp);
......
...@@ -2336,7 +2336,7 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype) ...@@ -2336,7 +2336,7 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype)
*/ */
static int rmqueue_bulk(struct zone *zone, unsigned int order, static int rmqueue_bulk(struct zone *zone, unsigned int order,
unsigned long count, struct list_head *list, unsigned long count, struct list_head *list,
int migratetype, bool cold) int migratetype)
{ {
int i, alloced = 0; int i, alloced = 0;
...@@ -2358,10 +2358,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, ...@@ -2358,10 +2358,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
* merge IO requests if the physical pages are ordered * merge IO requests if the physical pages are ordered
* properly. * properly.
*/ */
if (likely(!cold)) list_add(&page->lru, list);
list_add(&page->lru, list);
else
list_add_tail(&page->lru, list);
list = &page->lru; list = &page->lru;
alloced++; alloced++;
if (is_migrate_cma(get_pcppage_migratetype(page))) if (is_migrate_cma(get_pcppage_migratetype(page)))
...@@ -2795,7 +2792,7 @@ static inline void zone_statistics(struct zone *preferred_zone, struct zone *z) ...@@ -2795,7 +2792,7 @@ static inline void zone_statistics(struct zone *preferred_zone, struct zone *z)
/* Remove page from the per-cpu list, caller must protect the list */ /* Remove page from the per-cpu list, caller must protect the list */
static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype, static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
bool cold, struct per_cpu_pages *pcp, struct per_cpu_pages *pcp,
struct list_head *list) struct list_head *list)
{ {
struct page *page; struct page *page;
...@@ -2804,16 +2801,12 @@ static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype, ...@@ -2804,16 +2801,12 @@ static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
if (list_empty(list)) { if (list_empty(list)) {
pcp->count += rmqueue_bulk(zone, 0, pcp->count += rmqueue_bulk(zone, 0,
pcp->batch, list, pcp->batch, list,
migratetype, cold); migratetype);
if (unlikely(list_empty(list))) if (unlikely(list_empty(list)))
return NULL; return NULL;
} }
if (cold) page = list_first_entry(list, struct page, lru);
page = list_last_entry(list, struct page, lru);
else
page = list_first_entry(list, struct page, lru);
list_del(&page->lru); list_del(&page->lru);
pcp->count--; pcp->count--;
} while (check_new_pcp(page)); } while (check_new_pcp(page));
...@@ -2828,14 +2821,13 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone, ...@@ -2828,14 +2821,13 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
{ {
struct per_cpu_pages *pcp; struct per_cpu_pages *pcp;
struct list_head *list; struct list_head *list;
bool cold = ((gfp_flags & __GFP_COLD) != 0);
struct page *page; struct page *page;
unsigned long flags; unsigned long flags;
local_irq_save(flags); local_irq_save(flags);
pcp = &this_cpu_ptr(zone->pageset)->pcp; pcp = &this_cpu_ptr(zone->pageset)->pcp;
list = &pcp->lists[migratetype]; list = &pcp->lists[migratetype];
page = __rmqueue_pcplist(zone, migratetype, cold, pcp, list); page = __rmqueue_pcplist(zone, migratetype, pcp, list);
if (page) { if (page) {
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
zone_statistics(preferred_zone, zone); zone_statistics(preferred_zone, zone);
......
...@@ -81,7 +81,7 @@ static void pcpu_free_pages(struct pcpu_chunk *chunk, ...@@ -81,7 +81,7 @@ static void pcpu_free_pages(struct pcpu_chunk *chunk,
static int pcpu_alloc_pages(struct pcpu_chunk *chunk, static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
struct page **pages, int page_start, int page_end) struct page **pages, int page_start, int page_end)
{ {
const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD; const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM;
unsigned int cpu, tcpu; unsigned int cpu, tcpu;
int i; int i;
......
...@@ -353,7 +353,7 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) ...@@ -353,7 +353,7 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
*/ */
void *netdev_alloc_frag(unsigned int fragsz) void *netdev_alloc_frag(unsigned int fragsz)
{ {
return __netdev_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD); return __netdev_alloc_frag(fragsz, GFP_ATOMIC);
} }
EXPORT_SYMBOL(netdev_alloc_frag); EXPORT_SYMBOL(netdev_alloc_frag);
...@@ -366,7 +366,7 @@ static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) ...@@ -366,7 +366,7 @@ static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
void *napi_alloc_frag(unsigned int fragsz) void *napi_alloc_frag(unsigned int fragsz)
{ {
return __napi_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD); return __napi_alloc_frag(fragsz, GFP_ATOMIC);
} }
EXPORT_SYMBOL(napi_alloc_frag); EXPORT_SYMBOL(napi_alloc_frag);
......
...@@ -641,7 +641,6 @@ static const struct { ...@@ -641,7 +641,6 @@ static const struct {
{ "__GFP_ATOMIC", "_A" }, { "__GFP_ATOMIC", "_A" },
{ "__GFP_IO", "I" }, { "__GFP_IO", "I" },
{ "__GFP_FS", "F" }, { "__GFP_FS", "F" },
{ "__GFP_COLD", "CO" },
{ "__GFP_NOWARN", "NWR" }, { "__GFP_NOWARN", "NWR" },
{ "__GFP_RETRY_MAYFAIL", "R" }, { "__GFP_RETRY_MAYFAIL", "R" },
{ "__GFP_NOFAIL", "NF" }, { "__GFP_NOFAIL", "NF" },
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册