@@ -2498,6 +2498,7 @@ struct vmap_block {
2498
2498
struct list_head free_list ;
2499
2499
struct rcu_head rcu_head ;
2500
2500
struct list_head purge ;
2501
+ unsigned int cpu ;
2501
2502
};
2502
2503
2503
2504
/* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
@@ -2625,8 +2626,15 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
2625
2626
free_vmap_area (va );
2626
2627
return ERR_PTR (err );
2627
2628
}
2628
-
2629
- vbq = raw_cpu_ptr (& vmap_block_queue );
2629
+ /*
2630
+ * list_add_tail_rcu could happened in another core
2631
+ * rather than vb->cpu due to task migration, which
2632
+ * is safe as list_add_tail_rcu will ensure the list's
2633
+ * integrity together with list_for_each_rcu from read
2634
+ * side.
2635
+ */
2636
+ vb -> cpu = raw_smp_processor_id ();
2637
+ vbq = per_cpu_ptr (& vmap_block_queue , vb -> cpu );
2630
2638
spin_lock (& vbq -> lock );
2631
2639
list_add_tail_rcu (& vb -> free_list , & vbq -> free );
2632
2640
spin_unlock (& vbq -> lock );
@@ -2654,9 +2662,10 @@ static void free_vmap_block(struct vmap_block *vb)
2654
2662
}
2655
2663
2656
2664
static bool purge_fragmented_block (struct vmap_block * vb ,
2657
- struct vmap_block_queue * vbq , struct list_head * purge_list ,
2658
- bool force_purge )
2665
+ struct list_head * purge_list , bool force_purge )
2659
2666
{
2667
+ struct vmap_block_queue * vbq = & per_cpu (vmap_block_queue , vb -> cpu );
2668
+
2660
2669
if (vb -> free + vb -> dirty != VMAP_BBMAP_BITS ||
2661
2670
vb -> dirty == VMAP_BBMAP_BITS )
2662
2671
return false;
@@ -2704,7 +2713,7 @@ static void purge_fragmented_blocks(int cpu)
2704
2713
continue ;
2705
2714
2706
2715
spin_lock (& vb -> lock );
2707
- purge_fragmented_block (vb , vbq , & purge , true);
2716
+ purge_fragmented_block (vb , & purge , true);
2708
2717
spin_unlock (& vb -> lock );
2709
2718
}
2710
2719
rcu_read_unlock ();
@@ -2841,7 +2850,7 @@ static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush)
2841
2850
* not purgeable, check whether there is dirty
2842
2851
* space to be flushed.
2843
2852
*/
2844
- if (!purge_fragmented_block (vb , vbq , & purge_list , false) &&
2853
+ if (!purge_fragmented_block (vb , & purge_list , false) &&
2845
2854
vb -> dirty_max && vb -> dirty != VMAP_BBMAP_BITS ) {
2846
2855
unsigned long va_start = vb -> va -> va_start ;
2847
2856
unsigned long s , e ;
0 commit comments