|
| 1 | +From: Miaohe Lin < [email protected]> |
| 2 | +Subject: mm/z3fold: fix z3fold_page_migrate races with z3fold_map |
| 3 | + |
| 4 | +Think about the below scenario: |
| 5 | + |
| 6 | +CPU1 CPU2 |
| 7 | + z3fold_page_migrate z3fold_map |
| 8 | + z3fold_page_trylock |
| 9 | + ... |
| 10 | + z3fold_page_unlock |
| 11 | + /* slots still points to old zhdr*/ |
| 12 | + get_z3fold_header |
| 13 | + get slots from handle |
| 14 | + get old zhdr from slots |
| 15 | + z3fold_page_trylock |
| 16 | + return *old* zhdr |
| 17 | + encode_handle(new_zhdr, FIRST|LAST|MIDDLE) |
| 18 | + put_page(page) /* zhdr is freed! */ |
| 19 | + but zhdr is still used by caller! |
| 20 | + |
| 21 | +z3fold_map can map freed z3fold page and lead to use-after-free bug. To |
| 22 | +fix it, we add PAGE_MIGRATED to indicate z3fold page is migrated and soon |
| 23 | +to be released. So get_z3fold_header won't return such page. |
| 24 | + |
| 25 | +Link: https://lkml.kernel.org/r/ [email protected] |
| 26 | +Fixes: 1f862989b04a ("mm/z3fold.c: support page migration") |
| 27 | +Signed-off-by: Miaohe Lin < [email protected]> |
| 28 | +Cc: Vitaly Wool < [email protected]> |
| 29 | +Signed-off-by: Andrew Morton < [email protected]> |
| 30 | +--- |
| 31 | + |
| 32 | + mm/z3fold.c | 16 ++++++++++++---- |
| 33 | + 1 file changed, 12 insertions(+), 4 deletions(-) |
| 34 | + |
| 35 | +--- a/mm/z3fold.c~mm-z3fold-fix-z3fold_page_migrate-races-with-z3fold_map |
| 36 | ++++ a/mm/z3fold.c |
| 37 | +@@ -181,6 +181,7 @@ enum z3fold_page_flags { |
| 38 | + NEEDS_COMPACTING, |
| 39 | + PAGE_STALE, |
| 40 | + PAGE_CLAIMED, /* by either reclaim or free */ |
| 41 | ++ PAGE_MIGRATED, /* page is migrated and soon to be released */ |
| 42 | + }; |
| 43 | + |
| 44 | + /* |
| 45 | +@@ -270,8 +271,13 @@ static inline struct z3fold_header *get_ |
| 46 | + zhdr = (struct z3fold_header *)(addr & PAGE_MASK); |
| 47 | + locked = z3fold_page_trylock(zhdr); |
| 48 | + read_unlock(&slots->lock); |
| 49 | +- if (locked) |
| 50 | +- break; |
| 51 | ++ if (locked) { |
| 52 | ++ struct page *page = virt_to_page(zhdr); |
| 53 | ++ |
| 54 | ++ if (!test_bit(PAGE_MIGRATED, &page->private)) |
| 55 | ++ break; |
| 56 | ++ z3fold_page_unlock(zhdr); |
| 57 | ++ } |
| 58 | + cpu_relax(); |
| 59 | + } while (true); |
| 60 | + } else { |
| 61 | +@@ -389,6 +395,7 @@ static struct z3fold_header *init_z3fold |
| 62 | + clear_bit(NEEDS_COMPACTING, &page->private); |
| 63 | + clear_bit(PAGE_STALE, &page->private); |
| 64 | + clear_bit(PAGE_CLAIMED, &page->private); |
| 65 | ++ clear_bit(PAGE_MIGRATED, &page->private); |
| 66 | + if (headless) |
| 67 | + return zhdr; |
| 68 | + |
| 69 | +@@ -1576,7 +1583,7 @@ static int z3fold_page_migrate(struct ad |
| 70 | + new_zhdr = page_address(newpage); |
| 71 | + memcpy(new_zhdr, zhdr, PAGE_SIZE); |
| 72 | + newpage->private = page->private; |
| 73 | +- page->private = 0; |
| 74 | ++ set_bit(PAGE_MIGRATED, &page->private); |
| 75 | + z3fold_page_unlock(zhdr); |
| 76 | + spin_lock_init(&new_zhdr->page_lock); |
| 77 | + INIT_WORK(&new_zhdr->work, compact_page_work); |
| 78 | +@@ -1606,7 +1613,8 @@ static int z3fold_page_migrate(struct ad |
| 79 | + |
| 80 | + queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work); |
| 81 | + |
| 82 | +- clear_bit(PAGE_CLAIMED, &page->private); |
| 83 | ++ /* PAGE_CLAIMED and PAGE_MIGRATED are cleared now. */ |
| 84 | ++ page->private = 0; |
| 85 | + put_page(page); |
| 86 | + return 0; |
| 87 | + } |
| 88 | +_ |
0 commit comments