Skip to content

Commit f510973

Browse files
committed
foo
1 parent f3c79b6 commit f510973

File tree

28 files changed

+686
-25
lines changed

28 files changed

+686
-25
lines changed
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,65 @@
1+
From: Miaohe Lin <[email protected]>
2+
Subject: mm/z3fold: always clear PAGE_CLAIMED under z3fold page lock
3+
4+
Think about the below race window:
5+
6+
CPU1 CPU2
7+
z3fold_reclaim_page z3fold_free
8+
test_and_set_bit PAGE_CLAIMED
9+
failed to reclaim page
10+
z3fold_page_lock(zhdr);
11+
add back to the lru list;
12+
z3fold_page_unlock(zhdr);
13+
get_z3fold_header
14+
page_claimed=test_and_set_bit PAGE_CLAIMED
15+
16+
clear_bit(PAGE_CLAIMED, &page->private);
17+
18+
if (!page_claimed) /* it's false true */
19+
free_handle is not called
20+
21+
free_handle won't be called in this case. So z3fold_buddy_slots will leak.
22+
Fix it by always clear PAGE_CLAIMED under z3fold page lock.
23+
24+
Link: https://lkml.kernel.org/r/[email protected]
25+
Signed-off-by: Miaohe Lin <[email protected]>
26+
Cc: Vitaly Wool <[email protected]>
27+
Signed-off-by: Andrew Morton <[email protected]>
28+
---
29+
30+
mm/z3fold.c | 6 +++---
31+
1 file changed, 3 insertions(+), 3 deletions(-)
32+
33+
--- a/mm/z3fold.c~mm-z3fold-always-clear-page_claimed-under-z3fold-page-lock
34+
+++ a/mm/z3fold.c
35+
@@ -1221,8 +1221,8 @@ static void z3fold_free(struct z3fold_po
36+
return;
37+
}
38+
if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
39+
- put_z3fold_header(zhdr);
40+
clear_bit(PAGE_CLAIMED, &page->private);
41+
+ put_z3fold_header(zhdr);
42+
return;
43+
}
44+
if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) {
45+
@@ -1424,8 +1424,8 @@ next:
46+
spin_unlock(&pool->lock);
47+
if (list_empty(&zhdr->buddy))
48+
add_to_unbuddied(pool, zhdr);
49+
- z3fold_page_unlock(zhdr);
50+
clear_bit(PAGE_CLAIMED, &page->private);
51+
+ z3fold_page_unlock(zhdr);
52+
}
53+
54+
/* We started off locked to we need to lock the pool back */
55+
@@ -1577,8 +1577,8 @@ static int z3fold_page_migrate(struct ad
56+
if (!z3fold_page_trylock(zhdr))
57+
return -EAGAIN;
58+
if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0) {
59+
- z3fold_page_unlock(zhdr);
60+
clear_bit(PAGE_CLAIMED, &page->private);
61+
+ z3fold_page_unlock(zhdr);
62+
return -EBUSY;
63+
}
64+
if (work_pending(&zhdr->work)) {
65+
_
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,41 @@
1+
From: Miaohe Lin <[email protected]>
2+
Subject: mm/z3fold: fix possible null pointer dereferencing
3+
4+
alloc_slots could fail to allocate memory under heavy memory pressure. So
5+
we should check zhdr->slots against NULL to avoid future null pointer
6+
dereferencing.
7+
8+
Link: https://lkml.kernel.org/r/[email protected]
9+
Fixes: fc5488651c7d ("z3fold: simplify freeing slots")
10+
Signed-off-by: Miaohe Lin <[email protected]>
11+
Cc: Vitaly Wool <[email protected]>
12+
Signed-off-by: Andrew Morton <[email protected]>
13+
---
14+
15+
mm/z3fold.c | 12 +++++++++++-
16+
1 file changed, 11 insertions(+), 1 deletion(-)
17+
18+
--- a/mm/z3fold.c~mm-z3fold-fix-possible-null-pointer-dereferencing
19+
+++ a/mm/z3fold.c
20+
@@ -940,9 +940,19 @@ lookup:
21+
}
22+
}
23+
24+
- if (zhdr && !zhdr->slots)
25+
+ if (zhdr && !zhdr->slots) {
26+
zhdr->slots = alloc_slots(pool, GFP_ATOMIC);
27+
+ if (!zhdr->slots)
28+
+ goto out_fail;
29+
+ }
30+
return zhdr;
31+
+
32+
+out_fail:
33+
+ if (!kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
34+
+ add_to_unbuddied(pool, zhdr);
35+
+ z3fold_page_unlock(zhdr);
36+
+ }
37+
+ return NULL;
38+
}
39+
40+
/*
41+
_
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,39 @@
1+
From: Miaohe Lin <[email protected]>
2+
Subject: mm/z3fold: fix sheduling while atomic
3+
4+
Patch series "A few fixup patches for z3fold".
5+
6+
This series contains a few fixup patches to fix sheduling while atomic,
7+
fix possible null pointer dereferencing, fix various race conditions and
8+
so on. More details can be found in the respective changelogs.
9+
10+
11+
This patch (of 9):
12+
13+
z3fold's page_lock is always held when calling alloc_slots. So gfp should
14+
be GFP_ATOMIC to avoid "scheduling while atomic" bug.
15+
16+
Link: https://lkml.kernel.org/r/[email protected]
17+
Link: https://lkml.kernel.org/r/[email protected]
18+
Fixes: fc5488651c7d ("z3fold: simplify freeing slots")
19+
Signed-off-by: Miaohe Lin <[email protected]>
20+
Cc: Vitaly Wool <[email protected]>
21+
Signed-off-by: Andrew Morton <[email protected]>
22+
---
23+
24+
mm/z3fold.c | 3 +--
25+
1 file changed, 1 insertion(+), 2 deletions(-)
26+
27+
--- a/mm/z3fold.c~mm-z3fold-fix-sheduling-while-atomic
28+
+++ a/mm/z3fold.c
29+
@@ -941,8 +941,7 @@ lookup:
30+
}
31+
32+
if (zhdr && !zhdr->slots)
33+
- zhdr->slots = alloc_slots(pool,
34+
- can_sleep ? GFP_NOIO : GFP_ATOMIC);
35+
+ zhdr->slots = alloc_slots(pool, GFP_ATOMIC);
36+
return zhdr;
37+
}
38+
39+
_
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,88 @@
1+
From: Miaohe Lin <[email protected]>
2+
Subject: mm/z3fold: fix z3fold_page_migrate races with z3fold_map
3+
4+
Think about the below scenario:
5+
6+
CPU1 CPU2
7+
z3fold_page_migrate z3fold_map
8+
z3fold_page_trylock
9+
...
10+
z3fold_page_unlock
11+
/* slots still points to old zhdr*/
12+
get_z3fold_header
13+
get slots from handle
14+
get old zhdr from slots
15+
z3fold_page_trylock
16+
return *old* zhdr
17+
encode_handle(new_zhdr, FIRST|LAST|MIDDLE)
18+
put_page(page) /* zhdr is freed! */
19+
but zhdr is still used by caller!
20+
21+
z3fold_map can map freed z3fold page and lead to use-after-free bug. To
22+
fix it, we add PAGE_MIGRATED to indicate z3fold page is migrated and soon
23+
to be released. So get_z3fold_header won't return such page.
24+
25+
Link: https://lkml.kernel.org/r/[email protected]
26+
Fixes: 1f862989b04a ("mm/z3fold.c: support page migration")
27+
Signed-off-by: Miaohe Lin <[email protected]>
28+
Cc: Vitaly Wool <[email protected]>
29+
Signed-off-by: Andrew Morton <[email protected]>
30+
---
31+
32+
mm/z3fold.c | 16 ++++++++++++----
33+
1 file changed, 12 insertions(+), 4 deletions(-)
34+
35+
--- a/mm/z3fold.c~mm-z3fold-fix-z3fold_page_migrate-races-with-z3fold_map
36+
+++ a/mm/z3fold.c
37+
@@ -181,6 +181,7 @@ enum z3fold_page_flags {
38+
NEEDS_COMPACTING,
39+
PAGE_STALE,
40+
PAGE_CLAIMED, /* by either reclaim or free */
41+
+ PAGE_MIGRATED, /* page is migrated and soon to be released */
42+
};
43+
44+
/*
45+
@@ -270,8 +271,13 @@ static inline struct z3fold_header *get_
46+
zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
47+
locked = z3fold_page_trylock(zhdr);
48+
read_unlock(&slots->lock);
49+
- if (locked)
50+
- break;
51+
+ if (locked) {
52+
+ struct page *page = virt_to_page(zhdr);
53+
+
54+
+ if (!test_bit(PAGE_MIGRATED, &page->private))
55+
+ break;
56+
+ z3fold_page_unlock(zhdr);
57+
+ }
58+
cpu_relax();
59+
} while (true);
60+
} else {
61+
@@ -389,6 +395,7 @@ static struct z3fold_header *init_z3fold
62+
clear_bit(NEEDS_COMPACTING, &page->private);
63+
clear_bit(PAGE_STALE, &page->private);
64+
clear_bit(PAGE_CLAIMED, &page->private);
65+
+ clear_bit(PAGE_MIGRATED, &page->private);
66+
if (headless)
67+
return zhdr;
68+
69+
@@ -1576,7 +1583,7 @@ static int z3fold_page_migrate(struct ad
70+
new_zhdr = page_address(newpage);
71+
memcpy(new_zhdr, zhdr, PAGE_SIZE);
72+
newpage->private = page->private;
73+
- page->private = 0;
74+
+ set_bit(PAGE_MIGRATED, &page->private);
75+
z3fold_page_unlock(zhdr);
76+
spin_lock_init(&new_zhdr->page_lock);
77+
INIT_WORK(&new_zhdr->work, compact_page_work);
78+
@@ -1606,7 +1613,8 @@ static int z3fold_page_migrate(struct ad
79+
80+
queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);
81+
82+
- clear_bit(PAGE_CLAIMED, &page->private);
83+
+ /* PAGE_CLAIMED and PAGE_MIGRATED are cleared now. */
84+
+ page->private = 0;
85+
put_page(page);
86+
return 0;
87+
}
88+
_
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,80 @@
1+
From: Miaohe Lin <[email protected]>
2+
Subject: mm/z3fold: fix z3fold_reclaim_page races with z3fold_free
3+
4+
Think about the below scenario:
5+
6+
CPU1 CPU2
7+
z3fold_reclaim_page z3fold_free
8+
spin_lock(&pool->lock) get_z3fold_header -- hold page_lock
9+
kref_get_unless_zero
10+
kref_put--zhdr->refcount can be 1 now
11+
!z3fold_page_trylock
12+
kref_put -- zhdr->refcount is 0 now
13+
release_z3fold_page
14+
WARN_ON(!list_empty(&zhdr->buddy)); -- we're on buddy now!
15+
spin_lock(&pool->lock); -- deadlock here!
16+
17+
z3fold_reclaim_page might race with z3fold_free and will lead to pool lock
18+
deadlock and zhdr buddy non-empty warning. To fix this, defer getting the
19+
refcount until page_lock is held just like what __z3fold_alloc does. Note
20+
this has the side effect that we won't break the reclaim if we meet a soon
21+
to be released z3fold page now.
22+
23+
Link: https://lkml.kernel.org/r/[email protected]
24+
Fixes: dcf5aedb24f8 ("z3fold: stricter locking and more careful reclaim")
25+
Signed-off-by: Miaohe Lin <[email protected]>
26+
Cc: Vitaly Wool <[email protected]>
27+
Signed-off-by: Andrew Morton <[email protected]>
28+
---
29+
30+
mm/z3fold.c | 18 +++---------------
31+
1 file changed, 3 insertions(+), 15 deletions(-)
32+
33+
--- a/mm/z3fold.c~mm-z3fold-fix-z3fold_reclaim_page-races-with-z3fold_free
34+
+++ a/mm/z3fold.c
35+
@@ -519,13 +519,6 @@ static void __release_z3fold_page(struct
36+
atomic64_dec(&pool->pages_nr);
37+
}
38+
39+
-static void release_z3fold_page(struct kref *ref)
40+
-{
41+
- struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
42+
- refcount);
43+
- __release_z3fold_page(zhdr, false);
44+
-}
45+
-
46+
static void release_z3fold_page_locked(struct kref *ref)
47+
{
48+
struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
49+
@@ -1317,12 +1310,7 @@ static int z3fold_reclaim_page(struct z3
50+
break;
51+
}
52+
53+
- if (kref_get_unless_zero(&zhdr->refcount) == 0) {
54+
- zhdr = NULL;
55+
- break;
56+
- }
57+
if (!z3fold_page_trylock(zhdr)) {
58+
- kref_put(&zhdr->refcount, release_z3fold_page);
59+
zhdr = NULL;
60+
continue; /* can't evict at this point */
61+
}
62+
@@ -1333,14 +1321,14 @@ static int z3fold_reclaim_page(struct z3
63+
*/
64+
if (zhdr->foreign_handles ||
65+
test_and_set_bit(PAGE_CLAIMED, &page->private)) {
66+
- if (!kref_put(&zhdr->refcount,
67+
- release_z3fold_page_locked))
68+
- z3fold_page_unlock(zhdr);
69+
+ z3fold_page_unlock(zhdr);
70+
zhdr = NULL;
71+
continue; /* can't evict such page */
72+
}
73+
list_del_init(&zhdr->buddy);
74+
zhdr->cpu = -1;
75+
+ /* See comment in __z3fold_alloc. */
76+
+ kref_get(&zhdr->refcount);
77+
break;
78+
}
79+
80+
_
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,39 @@
1+
From: Miaohe Lin <[email protected]>
2+
Subject: mm/z3fold: put z3fold page back into unbuddied list when reclaim or migration fails
3+
4+
When doing z3fold page reclaim or migration, the page is removed from
5+
unbuddied list. If reclaim or migration succeeds, it's fine as page is
6+
released. But in case it fails, the page is not put back into unbuddied
7+
list now. The page will be leaked until next compaction work, reclaim or
8+
migration is done.
9+
10+
Link: https://lkml.kernel.org/r/[email protected]
11+
Signed-off-by: Miaohe Lin <[email protected]>
12+
Cc: Vitaly Wool <[email protected]>
13+
Signed-off-by: Andrew Morton <[email protected]>
14+
---
15+
16+
mm/z3fold.c | 4 ++++
17+
1 file changed, 4 insertions(+)
18+
19+
--- a/mm/z3fold.c~mm-z3fold-put-z3fold-page-back-into-unbuddied-list-when-reclaim-or-migration-fails
20+
+++ a/mm/z3fold.c
21+
@@ -1422,6 +1422,8 @@ next:
22+
spin_lock(&pool->lock);
23+
list_add(&page->lru, &pool->lru);
24+
spin_unlock(&pool->lock);
25+
+ if (list_empty(&zhdr->buddy))
26+
+ add_to_unbuddied(pool, zhdr);
27+
z3fold_page_unlock(zhdr);
28+
clear_bit(PAGE_CLAIMED, &page->private);
29+
}
30+
@@ -1638,6 +1640,8 @@ static void z3fold_page_putback(struct p
31+
spin_lock(&pool->lock);
32+
list_add(&page->lru, &pool->lru);
33+
spin_unlock(&pool->lock);
34+
+ if (list_empty(&zhdr->buddy))
35+
+ add_to_unbuddied(pool, zhdr);
36+
clear_bit(PAGE_CLAIMED, &page->private);
37+
z3fold_page_unlock(zhdr);
38+
}
39+
_

0 commit comments

Comments
 (0)