@@ -277,10 +277,6 @@ static bool verbose = 0;
277
277
278
278
static unsigned int max_consumers = 5 ;
279
279
280
- #if (LINUX_VERSION_CODE >= KERNEL_VERSION (4 , 10 , 0 ))
281
- static enum cpuhp_state hp_state = 0 ;
282
- #endif
283
-
284
280
#define vpr_info (fmt , ...) \
285
281
do { \
286
282
if(verbose) \
@@ -446,6 +442,7 @@ static int ppm_open(struct inode *inode, struct file *filp) {
446
442
consumer -> consumer_id = consumer_id ;
447
443
consumer -> buffer_bytes_dim = g_buffer_bytes_dim ;
448
444
consumer -> tracepoints_attached = 0 ; /* Start with no tracepoints */
445
+ consumer -> hotplug_cpu = -1 ;
449
446
450
447
/*
451
448
* Initialize the ring buffers array
@@ -476,14 +473,6 @@ static int ppm_open(struct inode *inode, struct file *filp) {
476
473
ring -> info = NULL ;
477
474
}
478
475
479
- /*
480
- * If a cpu is offline when the consumer is first created, we
481
- * will never get events for that cpu even if it later comes
482
- * online via hotplug. We could allocate these rings on-demand
483
- * later in this function if needed for hotplug, but that
484
- * requires the consumer to know to call open again, and that is
485
- * not supported.
486
- */
487
476
for_each_online_cpu (cpu ) {
488
477
ring = per_cpu_ptr (consumer -> ring_buffers , cpu );
489
478
@@ -1820,6 +1809,27 @@ static int record_event_consumer(struct ppm_consumer_t *consumer,
1820
1809
ASSERT (ring );
1821
1810
1822
1811
ring_info = ring -> info ;
1812
+ if (!ring_info ) {
1813
+ // If we haven't got the ring info, it means
1814
+ // the event was generated by a CPU that was not
1815
+ // online when the ring buffers were initialized.
1816
+ // Store info about hotplugged CPU here to later
1817
+ // send hotplug events on cpu0.
1818
+ consumer -> hotplug_cpu = cpu ;
1819
+ put_cpu ();
1820
+ return res ;
1821
+ }
1822
+
1823
+ // Manage hotplug on cpu 0
1824
+ if (consumer -> hotplug_cpu != -1 && cpu == 0 ) {
1825
+ event_type = PPME_CPU_HOTPLUG_E ;
1826
+ drop_flags = UF_NEVER_DROP ;
1827
+ tp_type = INTERNAL_EVENTS ;
1828
+ event_datap -> category = PPMC_CONTEXT_SWITCH ;
1829
+ event_datap -> event_info .context_data .sched_prev = (void * )(long )consumer -> hotplug_cpu ;
1830
+ event_datap -> event_info .context_data .sched_next = (void * )(long )0 ;
1831
+ }
1832
+
1823
1833
if (event_datap -> category == PPMC_CONTEXT_SWITCH &&
1824
1834
event_datap -> event_info .context_data .sched_prev != NULL ) {
1825
1835
if (event_type != PPME_SCAPEVENT_E && event_type != PPME_CPU_HOTPLUG_E ) {
@@ -2771,96 +2781,12 @@ static char *ppm_devnode(struct device *dev, mode_t *mode)
2771
2781
}
2772
2782
#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 20) */
2773
2783
2774
- static int do_cpu_callback (unsigned long cpu , long sd_action ) {
2775
- struct ppm_ring_buffer_context * ring ;
2776
- struct ppm_consumer_t * consumer ;
2777
- struct event_data_t event_data ;
2778
-
2779
- if (sd_action != 0 ) {
2780
- rcu_read_lock ();
2781
-
2782
- list_for_each_entry_rcu (consumer , & g_consumer_list , node ) {
2783
- ring = per_cpu_ptr (consumer -> ring_buffers , cpu );
2784
- if (sd_action == 1 ) {
2785
- /*
2786
- * If the cpu was offline when the consumer was created,
2787
- * this won't do anything because we never created a ring
2788
- * buffer. We can't safely create one here because we're
2789
- * in atomic context, and the consumer needs to call open
2790
- * on this device anyways, so do it in ppm_open.
2791
- */
2792
- ring -> cpu_online = true;
2793
- } else if (sd_action == 2 ) {
2794
- ring -> cpu_online = false;
2795
- }
2796
- }
2797
-
2798
- rcu_read_unlock ();
2799
-
2800
- event_data .category = PPMC_CONTEXT_SWITCH ;
2801
- event_data .event_info .context_data .sched_prev = (void * )cpu ;
2802
- event_data .event_info .context_data .sched_next = (void * )sd_action ;
2803
- record_event_all_consumers (PPME_CPU_HOTPLUG_E , UF_NEVER_DROP , & event_data , INTERNAL_EVENTS );
2804
- }
2805
- return 0 ;
2806
- }
2807
-
2808
- #if (LINUX_VERSION_CODE >= KERNEL_VERSION (4 , 10 , 0 ))
2809
- static int scap_cpu_online (unsigned int cpu ) {
2810
- vpr_info ("scap_cpu_online on cpu %d\n" , cpu );
2811
- return do_cpu_callback (cpu , 1 );
2812
- }
2813
-
2814
- static int scap_cpu_offline (unsigned int cpu ) {
2815
- vpr_info ("scap_cpu_offline on cpu %d\n" , cpu );
2816
- return do_cpu_callback (cpu , 2 );
2817
- }
2818
- #else /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) */
2819
- /*
2820
- * This gets called every time a CPU is added or removed
2821
- */
2822
- static int cpu_callback (struct notifier_block * self , unsigned long action , void * hcpu ) {
2823
- unsigned long cpu = (unsigned long )hcpu ;
2824
- long sd_action = 0 ;
2825
-
2826
- switch (action ) {
2827
- case CPU_UP_PREPARE :
2828
- #if LINUX_VERSION_CODE > KERNEL_VERSION (2 , 6 , 20 )
2829
- case CPU_UP_PREPARE_FROZEN :
2830
- #endif
2831
- sd_action = 1 ;
2832
- break ;
2833
- case CPU_DOWN_PREPARE :
2834
- #if LINUX_VERSION_CODE > KERNEL_VERSION (2 , 6 , 20 )
2835
- case CPU_DOWN_PREPARE_FROZEN :
2836
- #endif
2837
- sd_action = 2 ;
2838
- break ;
2839
- default :
2840
- break ;
2841
- }
2842
-
2843
- if (do_cpu_callback (cpu , sd_action ) < 0 )
2844
- return NOTIFY_BAD ;
2845
- else
2846
- return NOTIFY_OK ;
2847
- }
2848
-
2849
- static struct notifier_block cpu_notifier = {
2850
- .notifier_call = & cpu_callback ,
2851
- .next = NULL ,
2852
- };
2853
- #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) */
2854
-
2855
2784
static int scap_init (void ) {
2856
2785
dev_t dev ;
2857
2786
unsigned int cpu ;
2858
2787
unsigned int num_cpus ;
2859
2788
int ret ;
2860
2789
int acrret = 0 ;
2861
- #if (LINUX_VERSION_CODE >= KERNEL_VERSION (4 , 10 , 0 ))
2862
- int hp_ret ;
2863
- #endif
2864
2790
int j ;
2865
2791
int n_created_devices = 0 ;
2866
2792
#if LINUX_VERSION_CODE > KERNEL_VERSION (2 , 6 , 20 )
@@ -2964,25 +2890,6 @@ static int scap_init(void) {
2964
2890
goto init_module_err ;
2965
2891
}
2966
2892
2967
- /*
2968
- * Set up our callback in case we get a hotplug even while we are
2969
- * initializing the cpu structures
2970
- */
2971
- #if (LINUX_VERSION_CODE >= KERNEL_VERSION (4 , 10 , 0 ))
2972
- hp_ret = cpuhp_setup_state_nocalls (CPUHP_AP_ONLINE_DYN ,
2973
- DRIVER_NAME "/driver:online" ,
2974
- scap_cpu_online ,
2975
- scap_cpu_offline );
2976
- if (hp_ret <= 0 ) {
2977
- pr_err ("error registering cpu hotplug callback\n" );
2978
- ret = hp_ret ;
2979
- goto init_module_err ;
2980
- }
2981
- hp_state = hp_ret ;
2982
- #else
2983
- register_cpu_notifier (& cpu_notifier );
2984
- #endif
2985
-
2986
2893
// Initialize globals
2987
2894
g_tracepoints_attached = 0 ;
2988
2895
for (j = 0 ; j < KMOD_PROG_ATTACHED_MAX ; j ++ ) {
@@ -3041,13 +2948,6 @@ static void scap_exit(void) {
3041
2948
#if LINUX_VERSION_CODE > KERNEL_VERSION (2 , 6 , 20 )
3042
2949
tracepoint_synchronize_unregister ();
3043
2950
#endif
3044
-
3045
- #if (LINUX_VERSION_CODE >= KERNEL_VERSION (4 , 10 , 0 ))
3046
- if (hp_state > 0 )
3047
- cpuhp_remove_state_nocalls (hp_state );
3048
- #else
3049
- unregister_cpu_notifier (& cpu_notifier );
3050
- #endif
3051
2951
}
3052
2952
3053
2953
module_init (scap_init );
0 commit comments