Skip to content

Commit d0164ad

Browse files
gormanmtorvalds
authored andcommitted
mm, page_alloc: distinguish between being unable to sleep, unwilling to sleep and avoiding waking kswapd
__GFP_WAIT has been used to identify atomic context in callers that hold spinlocks or are in interrupts. They are expected to be high priority and have access one of two watermarks lower than "min" which can be referred to as the "atomic reserve". __GFP_HIGH users get access to the first lower watermark and can be called the "high priority reserve". Over time, callers had a requirement to not block when fallback options were available. Some have abused __GFP_WAIT leading to a situation where an optimisitic allocation with a fallback option can access atomic reserves. This patch uses __GFP_ATOMIC to identify callers that are truely atomic, cannot sleep and have no alternative. High priority users continue to use __GFP_HIGH. __GFP_DIRECT_RECLAIM identifies callers that can sleep and are willing to enter direct reclaim. __GFP_KSWAPD_RECLAIM to identify callers that want to wake kswapd for background reclaim. __GFP_WAIT is redefined as a caller that is willing to enter direct reclaim and wake kswapd for background reclaim. This patch then converts a number of sites o __GFP_ATOMIC is used by callers that are high priority and have memory pools for those requests. GFP_ATOMIC uses this flag. o Callers that have a limited mempool to guarantee forward progress clear __GFP_DIRECT_RECLAIM but keep __GFP_KSWAPD_RECLAIM. bio allocations fall into this category where kswapd will still be woken but atomic reserves are not used as there is a one-entry mempool to guarantee progress. o Callers that are checking if they are non-blocking should use the helper gfpflags_allow_blocking() where possible. This is because checking for __GFP_WAIT as was done historically now can trigger false positives. Some exceptions like dm-crypt.c exist where the code intent is clearer if __GFP_DIRECT_RECLAIM is used instead of the helper due to flag manipulations. o Callers that built their own GFP flags instead of starting with GFP_KERNEL and friends now also need to specify __GFP_KSWAPD_RECLAIM. The first key hazard to watch out for is callers that removed __GFP_WAIT and was depending on access to atomic reserves for inconspicuous reasons. In some cases it may be appropriate for them to use __GFP_HIGH. The second key hazard is callers that assembled their own combination of GFP flags instead of starting with something like GFP_KERNEL. They may now wish to specify __GFP_KSWAPD_RECLAIM. It's almost certainly harmless if it's missed in most cases as other activity will wake kswapd. Signed-off-by: Mel Gorman <[email protected]> Acked-by: Vlastimil Babka <[email protected]> Acked-by: Michal Hocko <[email protected]> Acked-by: Johannes Weiner <[email protected]> Cc: Christoph Lameter <[email protected]> Cc: David Rientjes <[email protected]> Cc: Vitaly Wool <[email protected]> Cc: Rik van Riel <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 016c13d commit d0164ad

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

66 files changed

+210
-172
lines changed

Documentation/vm/balance

+8-6
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,14 @@
11
Started Jan 2000 by Kanoj Sarcar <[email protected]>
22

3-
Memory balancing is needed for non __GFP_WAIT as well as for non
4-
__GFP_IO allocations.
3+
Memory balancing is needed for !__GFP_ATOMIC and !__GFP_KSWAPD_RECLAIM as
4+
well as for non __GFP_IO allocations.
55

6-
There are two reasons to be requesting non __GFP_WAIT allocations:
7-
the caller can not sleep (typically intr context), or does not want
8-
to incur cost overheads of page stealing and possible swap io for
9-
whatever reasons.
6+
The first reason why a caller may avoid reclaim is that the caller can not
7+
sleep due to holding a spinlock or is in interrupt context. The second may
8+
be that the caller is willing to fail the allocation without incurring the
9+
overhead of page reclaim. This may happen for opportunistic high-order
10+
allocation requests that have order-0 fallback options. In such cases,
11+
the caller may also wish to avoid waking kswapd.
1012

1113
__GFP_IO allocation requests are made to prevent file system deadlocks.
1214

arch/arm/mm/dma-mapping.c

+3-3
Original file line numberDiff line numberDiff line change
@@ -651,12 +651,12 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
651651

652652
if (nommu())
653653
addr = __alloc_simple_buffer(dev, size, gfp, &page);
654-
else if (dev_get_cma_area(dev) && (gfp & __GFP_WAIT))
654+
else if (dev_get_cma_area(dev) && (gfp & __GFP_DIRECT_RECLAIM))
655655
addr = __alloc_from_contiguous(dev, size, prot, &page,
656656
caller, want_vaddr);
657657
else if (is_coherent)
658658
addr = __alloc_simple_buffer(dev, size, gfp, &page);
659-
else if (!(gfp & __GFP_WAIT))
659+
else if (!gfpflags_allow_blocking(gfp))
660660
addr = __alloc_from_pool(size, &page);
661661
else
662662
addr = __alloc_remap_buffer(dev, size, gfp, prot, &page,
@@ -1363,7 +1363,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
13631363
*handle = DMA_ERROR_CODE;
13641364
size = PAGE_ALIGN(size);
13651365

1366-
if (!(gfp & __GFP_WAIT))
1366+
if (!gfpflags_allow_blocking(gfp))
13671367
return __iommu_alloc_atomic(dev, size, handle);
13681368

13691369
/*

arch/arm/xen/mm.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@
2525
unsigned long xen_get_swiotlb_free_pages(unsigned int order)
2626
{
2727
struct memblock_region *reg;
28-
gfp_t flags = __GFP_NOWARN;
28+
gfp_t flags = __GFP_NOWARN|__GFP_KSWAPD_RECLAIM;
2929

3030
for_each_memblock(memory, reg) {
3131
if (reg->base < (phys_addr_t)0xffffffff) {

arch/arm64/mm/dma-mapping.c

+2-2
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
100100
if (IS_ENABLED(CONFIG_ZONE_DMA) &&
101101
dev->coherent_dma_mask <= DMA_BIT_MASK(32))
102102
flags |= GFP_DMA;
103-
if (dev_get_cma_area(dev) && (flags & __GFP_WAIT)) {
103+
if (dev_get_cma_area(dev) && gfpflags_allow_blocking(flags)) {
104104
struct page *page;
105105
void *addr;
106106

@@ -148,7 +148,7 @@ static void *__dma_alloc(struct device *dev, size_t size,
148148

149149
size = PAGE_ALIGN(size);
150150

151-
if (!coherent && !(flags & __GFP_WAIT)) {
151+
if (!coherent && !gfpflags_allow_blocking(flags)) {
152152
struct page *page = NULL;
153153
void *addr = __alloc_from_pool(size, &page, flags);
154154

arch/x86/kernel/pci-dma.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,7 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
9090
again:
9191
page = NULL;
9292
/* CMA can be used only in the context which permits sleeping */
93-
if (flag & __GFP_WAIT) {
93+
if (gfpflags_allow_blocking(flag)) {
9494
page = dma_alloc_from_contiguous(dev, count, get_order(size));
9595
if (page && page_to_phys(page) + size > dma_mask) {
9696
dma_release_from_contiguous(dev, page, count);

block/bio.c

+13-13
Original file line numberDiff line numberDiff line change
@@ -211,7 +211,7 @@ struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx,
211211
bvl = mempool_alloc(pool, gfp_mask);
212212
} else {
213213
struct biovec_slab *bvs = bvec_slabs + *idx;
214-
gfp_t __gfp_mask = gfp_mask & ~(__GFP_WAIT | __GFP_IO);
214+
gfp_t __gfp_mask = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_IO);
215215

216216
/*
217217
* Make this allocation restricted and don't dump info on
@@ -221,11 +221,11 @@ struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx,
221221
__gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
222222

223223
/*
224-
* Try a slab allocation. If this fails and __GFP_WAIT
224+
* Try a slab allocation. If this fails and __GFP_DIRECT_RECLAIM
225225
* is set, retry with the 1-entry mempool
226226
*/
227227
bvl = kmem_cache_alloc(bvs->slab, __gfp_mask);
228-
if (unlikely(!bvl && (gfp_mask & __GFP_WAIT))) {
228+
if (unlikely(!bvl && (gfp_mask & __GFP_DIRECT_RECLAIM))) {
229229
*idx = BIOVEC_MAX_IDX;
230230
goto fallback;
231231
}
@@ -395,12 +395,12 @@ static void punt_bios_to_rescuer(struct bio_set *bs)
395395
* If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is
396396
* backed by the @bs's mempool.
397397
*
398-
* When @bs is not NULL, if %__GFP_WAIT is set then bio_alloc will always be
399-
* able to allocate a bio. This is due to the mempool guarantees. To make this
400-
* work, callers must never allocate more than 1 bio at a time from this pool.
401-
* Callers that need to allocate more than 1 bio must always submit the
402-
* previously allocated bio for IO before attempting to allocate a new one.
403-
* Failure to do so can cause deadlocks under memory pressure.
398+
* When @bs is not NULL, if %__GFP_DIRECT_RECLAIM is set then bio_alloc will
399+
* always be able to allocate a bio. This is due to the mempool guarantees.
400+
* To make this work, callers must never allocate more than 1 bio at a time
401+
* from this pool. Callers that need to allocate more than 1 bio must always
402+
* submit the previously allocated bio for IO before attempting to allocate
403+
* a new one. Failure to do so can cause deadlocks under memory pressure.
404404
*
405405
* Note that when running under generic_make_request() (i.e. any block
406406
* driver), bios are not submitted until after you return - see the code in
@@ -459,13 +459,13 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
459459
* We solve this, and guarantee forward progress, with a rescuer
460460
* workqueue per bio_set. If we go to allocate and there are
461461
* bios on current->bio_list, we first try the allocation
462-
* without __GFP_WAIT; if that fails, we punt those bios we
463-
* would be blocking to the rescuer workqueue before we retry
464-
* with the original gfp_flags.
462+
* without __GFP_DIRECT_RECLAIM; if that fails, we punt those
463+
* bios we would be blocking to the rescuer workqueue before
464+
* we retry with the original gfp_flags.
465465
*/
466466

467467
if (current->bio_list && !bio_list_empty(current->bio_list))
468-
gfp_mask &= ~__GFP_WAIT;
468+
gfp_mask &= ~__GFP_DIRECT_RECLAIM;
469469

470470
p = mempool_alloc(bs->bio_pool, gfp_mask);
471471
if (!p && gfp_mask != saved_gfp) {

block/blk-core.c

+8-8
Original file line numberDiff line numberDiff line change
@@ -1206,8 +1206,8 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
12061206
* @bio: bio to allocate request for (can be %NULL)
12071207
* @gfp_mask: allocation mask
12081208
*
1209-
* Get a free request from @q. If %__GFP_WAIT is set in @gfp_mask, this
1210-
* function keeps retrying under memory pressure and fails iff @q is dead.
1209+
* Get a free request from @q. If %__GFP_DIRECT_RECLAIM is set in @gfp_mask,
1210+
* this function keeps retrying under memory pressure and fails iff @q is dead.
12111211
*
12121212
* Must be called with @q->queue_lock held and,
12131213
* Returns ERR_PTR on failure, with @q->queue_lock held.
@@ -1227,7 +1227,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
12271227
if (!IS_ERR(rq))
12281228
return rq;
12291229

1230-
if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dying(q))) {
1230+
if (!gfpflags_allow_blocking(gfp_mask) || unlikely(blk_queue_dying(q))) {
12311231
blk_put_rl(rl);
12321232
return rq;
12331233
}
@@ -1305,11 +1305,11 @@ EXPORT_SYMBOL(blk_get_request);
13051305
* BUG.
13061306
*
13071307
* WARNING: When allocating/cloning a bio-chain, careful consideration should be
1308-
* given to how you allocate bios. In particular, you cannot use __GFP_WAIT for
1309-
* anything but the first bio in the chain. Otherwise you risk waiting for IO
1310-
* completion of a bio that hasn't been submitted yet, thus resulting in a
1311-
* deadlock. Alternatively bios should be allocated using bio_kmalloc() instead
1312-
* of bio_alloc(), as that avoids the mempool deadlock.
1308+
* given to how you allocate bios. In particular, you cannot use
1309+
* __GFP_DIRECT_RECLAIM for anything but the first bio in the chain. Otherwise
1310+
* you risk waiting for IO completion of a bio that hasn't been submitted yet,
1311+
* thus resulting in a deadlock. Alternatively bios should be allocated using
1312+
* bio_kmalloc() instead of bio_alloc(), as that avoids the mempool deadlock.
13131313
* If possible a big IO should be split into smaller parts when allocation
13141314
* fails. Partial allocation should not be an error, or you risk a live-lock.
13151315
*/

block/blk-ioc.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -289,7 +289,7 @@ struct io_context *get_task_io_context(struct task_struct *task,
289289
{
290290
struct io_context *ioc;
291291

292-
might_sleep_if(gfp_flags & __GFP_WAIT);
292+
might_sleep_if(gfpflags_allow_blocking(gfp_flags));
293293

294294
do {
295295
task_lock(task);

block/blk-mq-tag.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -268,7 +268,7 @@ static int bt_get(struct blk_mq_alloc_data *data,
268268
if (tag != -1)
269269
return tag;
270270

271-
if (!(data->gfp & __GFP_WAIT))
271+
if (!gfpflags_allow_blocking(data->gfp))
272272
return -1;
273273

274274
bs = bt_wait_ptr(bt, hctx);

block/blk-mq.c

+3-3
Original file line numberDiff line numberDiff line change
@@ -244,11 +244,11 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
244244

245245
ctx = blk_mq_get_ctx(q);
246246
hctx = q->mq_ops->map_queue(q, ctx->cpu);
247-
blk_mq_set_alloc_data(&alloc_data, q, gfp & ~__GFP_WAIT,
247+
blk_mq_set_alloc_data(&alloc_data, q, gfp & ~__GFP_DIRECT_RECLAIM,
248248
reserved, ctx, hctx);
249249

250250
rq = __blk_mq_alloc_request(&alloc_data, rw);
251-
if (!rq && (gfp & __GFP_WAIT)) {
251+
if (!rq && (gfp & __GFP_DIRECT_RECLAIM)) {
252252
__blk_mq_run_hw_queue(hctx);
253253
blk_mq_put_ctx(ctx);
254254

@@ -1186,7 +1186,7 @@ static struct request *blk_mq_map_request(struct request_queue *q,
11861186
ctx = blk_mq_get_ctx(q);
11871187
hctx = q->mq_ops->map_queue(q, ctx->cpu);
11881188
blk_mq_set_alloc_data(&alloc_data, q,
1189-
__GFP_WAIT|GFP_ATOMIC, false, ctx, hctx);
1189+
__GFP_WAIT|__GFP_HIGH, false, ctx, hctx);
11901190
rq = __blk_mq_alloc_request(&alloc_data, rw);
11911191
ctx = alloc_data.ctx;
11921192
hctx = alloc_data.hctx;

drivers/block/drbd/drbd_receiver.c

+2-1
Original file line numberDiff line numberDiff line change
@@ -357,7 +357,8 @@ drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t secto
357357
}
358358

359359
if (has_payload && data_size) {
360-
page = drbd_alloc_pages(peer_device, nr_pages, (gfp_mask & __GFP_WAIT));
360+
page = drbd_alloc_pages(peer_device, nr_pages,
361+
gfpflags_allow_blocking(gfp_mask));
361362
if (!page)
362363
goto fail;
363364
}

drivers/block/osdblk.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -271,7 +271,7 @@ static struct bio *bio_chain_clone(struct bio *old_chain, gfp_t gfpmask)
271271
goto err_out;
272272

273273
tmp->bi_bdev = NULL;
274-
gfpmask &= ~__GFP_WAIT;
274+
gfpmask &= ~__GFP_DIRECT_RECLAIM;
275275
tmp->bi_next = NULL;
276276

277277
if (!new_chain)

drivers/connector/connector.c

+2-1
Original file line numberDiff line numberDiff line change
@@ -124,7 +124,8 @@ int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid, u32 __group,
124124
if (group)
125125
return netlink_broadcast(dev->nls, skb, portid, group,
126126
gfp_mask);
127-
return netlink_unicast(dev->nls, skb, portid, !(gfp_mask&__GFP_WAIT));
127+
return netlink_unicast(dev->nls, skb, portid,
128+
!gfpflags_allow_blocking(gfp_mask));
128129
}
129130
EXPORT_SYMBOL_GPL(cn_netlink_send_mult);
130131

drivers/firewire/core-cdev.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -486,7 +486,7 @@ static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
486486
static int add_client_resource(struct client *client,
487487
struct client_resource *resource, gfp_t gfp_mask)
488488
{
489-
bool preload = !!(gfp_mask & __GFP_WAIT);
489+
bool preload = gfpflags_allow_blocking(gfp_mask);
490490
unsigned long flags;
491491
int ret;
492492

drivers/gpu/drm/i915/i915_gem.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -2215,7 +2215,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
22152215
*/
22162216
mapping = file_inode(obj->base.filp)->i_mapping;
22172217
gfp = mapping_gfp_mask(mapping);
2218-
gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
2218+
gfp |= __GFP_NORETRY | __GFP_NOWARN;
22192219
gfp &= ~(__GFP_IO | __GFP_WAIT);
22202220
sg = st->sgl;
22212221
st->nents = 0;

drivers/infiniband/core/sa_query.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -1083,7 +1083,7 @@ static void init_mad(struct ib_sa_mad *mad, struct ib_mad_agent *agent)
10831083

10841084
static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
10851085
{
1086-
bool preload = !!(gfp_mask & __GFP_WAIT);
1086+
bool preload = gfpflags_allow_blocking(gfp_mask);
10871087
unsigned long flags;
10881088
int ret, id;
10891089

drivers/iommu/amd_iommu.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -2668,7 +2668,7 @@ static void *alloc_coherent(struct device *dev, size_t size,
26682668

26692669
page = alloc_pages(flag | __GFP_NOWARN, get_order(size));
26702670
if (!page) {
2671-
if (!(flag & __GFP_WAIT))
2671+
if (!gfpflags_allow_blocking(flag))
26722672
return NULL;
26732673

26742674
page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,

drivers/iommu/intel-iommu.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -3647,7 +3647,7 @@ static void *intel_alloc_coherent(struct device *dev, size_t size,
36473647
flags |= GFP_DMA32;
36483648
}
36493649

3650-
if (flags & __GFP_WAIT) {
3650+
if (gfpflags_allow_blocking(flags)) {
36513651
unsigned int count = size >> PAGE_SHIFT;
36523652

36533653
page = dma_alloc_from_contiguous(dev, count, order);

drivers/md/dm-crypt.c

+3-3
Original file line numberDiff line numberDiff line change
@@ -994,7 +994,7 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
994994
struct bio_vec *bvec;
995995

996996
retry:
997-
if (unlikely(gfp_mask & __GFP_WAIT))
997+
if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
998998
mutex_lock(&cc->bio_alloc_lock);
999999

10001000
clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
@@ -1010,7 +1010,7 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
10101010
if (!page) {
10111011
crypt_free_buffer_pages(cc, clone);
10121012
bio_put(clone);
1013-
gfp_mask |= __GFP_WAIT;
1013+
gfp_mask |= __GFP_DIRECT_RECLAIM;
10141014
goto retry;
10151015
}
10161016

@@ -1027,7 +1027,7 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
10271027
}
10281028

10291029
return_clone:
1030-
if (unlikely(gfp_mask & __GFP_WAIT))
1030+
if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
10311031
mutex_unlock(&cc->bio_alloc_lock);
10321032

10331033
return clone;

drivers/md/dm-kcopyd.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -244,7 +244,7 @@ static int kcopyd_get_pages(struct dm_kcopyd_client *kc,
244244
*pages = NULL;
245245

246246
do {
247-
pl = alloc_pl(__GFP_NOWARN | __GFP_NORETRY);
247+
pl = alloc_pl(__GFP_NOWARN | __GFP_NORETRY | __GFP_KSWAPD_RECLAIM);
248248
if (unlikely(!pl)) {
249249
/* Use reserved pages */
250250
pl = kc->pages;

drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -1297,7 +1297,7 @@ static struct solo_enc_dev *solo_enc_alloc(struct solo_dev *solo_dev,
12971297
solo_enc->vidq.ops = &solo_enc_video_qops;
12981298
solo_enc->vidq.mem_ops = &vb2_dma_sg_memops;
12991299
solo_enc->vidq.drv_priv = solo_enc;
1300-
solo_enc->vidq.gfp_flags = __GFP_DMA32;
1300+
solo_enc->vidq.gfp_flags = __GFP_DMA32 | __GFP_KSWAPD_RECLAIM;
13011301
solo_enc->vidq.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
13021302
solo_enc->vidq.buf_struct_size = sizeof(struct solo_vb2_buf);
13031303
solo_enc->vidq.lock = &solo_enc->lock;

drivers/media/pci/solo6x10/solo6x10-v4l2.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -678,7 +678,7 @@ int solo_v4l2_init(struct solo_dev *solo_dev, unsigned nr)
678678
solo_dev->vidq.mem_ops = &vb2_dma_contig_memops;
679679
solo_dev->vidq.drv_priv = solo_dev;
680680
solo_dev->vidq.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
681-
solo_dev->vidq.gfp_flags = __GFP_DMA32;
681+
solo_dev->vidq.gfp_flags = __GFP_DMA32 | __GFP_KSWAPD_RECLAIM;
682682
solo_dev->vidq.buf_struct_size = sizeof(struct solo_vb2_buf);
683683
solo_dev->vidq.lock = &solo_dev->lock;
684684
ret = vb2_queue_init(&solo_dev->vidq);

drivers/media/pci/tw68/tw68-video.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -979,7 +979,7 @@ int tw68_video_init2(struct tw68_dev *dev, int video_nr)
979979
dev->vidq.ops = &tw68_video_qops;
980980
dev->vidq.mem_ops = &vb2_dma_sg_memops;
981981
dev->vidq.drv_priv = dev;
982-
dev->vidq.gfp_flags = __GFP_DMA32;
982+
dev->vidq.gfp_flags = __GFP_DMA32 | __GFP_KSWAPD_RECLAIM;
983983
dev->vidq.buf_struct_size = sizeof(struct tw68_buf);
984984
dev->vidq.lock = &dev->lock;
985985
dev->vidq.min_buffers_needed = 2;

drivers/mtd/mtdcore.c

+1-2
Original file line numberDiff line numberDiff line change
@@ -1188,8 +1188,7 @@ EXPORT_SYMBOL_GPL(mtd_writev);
11881188
*/
11891189
void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
11901190
{
1191-
gfp_t flags = __GFP_NOWARN | __GFP_WAIT |
1192-
__GFP_NORETRY | __GFP_NO_KSWAPD;
1191+
gfp_t flags = __GFP_NOWARN | __GFP_DIRECT_RECLAIM | __GFP_NORETRY;
11931192
size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
11941193
void *kbuf;
11951194

drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -691,7 +691,7 @@ static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
691691
{
692692
if (fp->rx_frag_size) {
693693
/* GFP_KERNEL allocations are used only during initialization */
694-
if (unlikely(gfp_mask & __GFP_WAIT))
694+
if (unlikely(gfpflags_allow_blocking(gfp_mask)))
695695
return (void *)__get_free_page(gfp_mask);
696696

697697
return netdev_alloc_frag(fp->rx_frag_size);

drivers/staging/android/ion/ion_system_heap.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@
2727
#include "ion_priv.h"
2828

2929
static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
30-
__GFP_NORETRY) & ~__GFP_WAIT;
30+
__GFP_NORETRY) & ~__GFP_DIRECT_RECLAIM;
3131
static gfp_t low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN);
3232
static const unsigned int orders[] = {8, 4, 0};
3333
static const int num_orders = ARRAY_SIZE(orders);

drivers/staging/lustre/include/linux/libcfs/libcfs_private.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -95,7 +95,7 @@ do { \
9595
do { \
9696
LASSERT(!in_interrupt() || \
9797
((size) <= LIBCFS_VMALLOC_SIZE && \
98-
((mask) & __GFP_WAIT) == 0)); \
98+
!gfpflags_allow_blocking(mask))); \
9999
} while (0)
100100

101101
#define LIBCFS_ALLOC_POST(ptr, size) \

0 commit comments

Comments
 (0)