36
36
// Forward declarations
37
37
static void bucket_update_stats (bucket_t * bucket , int in_use , int in_pool );
38
38
static bool bucket_can_pool (bucket_t * bucket );
39
- static slab_list_item_t * bucket_get_avail_slab ( bucket_t * bucket ,
40
- bool * from_pool );
39
+ static slab_list_item_t *
40
+ bucket_get_avail_slab ( disjoint_pool_t * pool , bucket_t * bucket , bool * from_pool );
41
41
42
42
static __TLS umf_result_t TLS_last_allocation_error ;
43
43
@@ -69,7 +69,7 @@ static size_t bucket_slab_alloc_size(bucket_t *bucket) {
69
69
return utils_max (bucket -> size , bucket_slab_min_size (bucket ));
70
70
}
71
71
72
- static slab_t * create_slab (bucket_t * bucket ) {
72
+ static slab_t * create_slab (bucket_t * bucket , void * mem_ptr ) {
73
73
assert (bucket );
74
74
75
75
umf_result_t res = UMF_RESULT_SUCCESS ;
@@ -110,13 +110,17 @@ static slab_t *create_slab(bucket_t *bucket) {
110
110
// padding at the end of the slab
111
111
slab -> slab_size = bucket_slab_alloc_size (bucket );
112
112
113
- // TODO not true
114
- // NOTE: originally slabs memory were allocated without alignment
115
- // with this registering a slab is simpler and doesn't require multimap
116
- res = umfMemoryProviderAlloc (provider , slab -> slab_size , 0 , & slab -> mem_ptr );
117
- if (res != UMF_RESULT_SUCCESS ) {
118
- LOG_ERR ("allocation of slab data failed!" );
119
- goto free_slab ;
113
+ // if the mem_ptr is provided, we use the user-provided memory instead of
114
+ // allocating a new one
115
+ if (mem_ptr ) {
116
+ slab -> mem_ptr = mem_ptr ;
117
+ } else {
118
+ res = umfMemoryProviderAlloc (provider , slab -> slab_size , 0 ,
119
+ & slab -> mem_ptr );
120
+ if (res != UMF_RESULT_SUCCESS ) {
121
+ LOG_ERR ("allocation of slab data failed!" );
122
+ goto free_slab ;
123
+ }
120
124
}
121
125
122
126
// raw allocation is not available for user so mark it as inaccessible
@@ -301,6 +305,9 @@ static void bucket_free_chunk(bucket_t *bucket, void *ptr, slab_t *slab,
301
305
// pool or freed.
302
306
* to_pool = bucket_can_pool (bucket );
303
307
if (* to_pool == false) {
308
+
309
+ // TODO - reuse strategy?
310
+
304
311
// remove slab
305
312
slab_list_item_t * slab_it = & slab -> iter ;
306
313
assert (slab_it -> val != NULL );
@@ -317,8 +324,9 @@ static void bucket_free_chunk(bucket_t *bucket, void *ptr, slab_t *slab,
317
324
}
318
325
319
326
// NOTE: this function must be called under bucket->bucket_lock
320
- static void * bucket_get_free_chunk (bucket_t * bucket , bool * from_pool ) {
321
- slab_list_item_t * slab_it = bucket_get_avail_slab (bucket , from_pool );
327
+ static void * bucket_get_free_chunk (disjoint_pool_t * pool , bucket_t * bucket ,
328
+ bool * from_pool ) {
329
+ slab_list_item_t * slab_it = bucket_get_avail_slab (pool , bucket , from_pool );
322
330
if (slab_it == NULL ) {
323
331
return NULL ;
324
332
}
@@ -342,7 +350,7 @@ static size_t bucket_chunk_cut_off(bucket_t *bucket) {
342
350
}
343
351
344
352
static slab_t * bucket_create_slab (bucket_t * bucket ) {
345
- slab_t * slab = create_slab (bucket );
353
+ slab_t * slab = create_slab (bucket , NULL );
346
354
if (slab == NULL ) {
347
355
LOG_ERR ("create_slab failed!" )
348
356
return NULL ;
@@ -362,8 +370,87 @@ static slab_t *bucket_create_slab(bucket_t *bucket) {
362
370
return slab ;
363
371
}
364
372
365
- static slab_list_item_t * bucket_get_avail_slab (bucket_t * bucket ,
373
+ static slab_list_item_t * bucket_get_avail_slab (disjoint_pool_t * pool ,
374
+ bucket_t * bucket ,
366
375
bool * from_pool ) {
376
+ if (pool == NULL || bucket == NULL ) {
377
+ return NULL ;
378
+ }
379
+
380
+ if (bucket -> available_slabs == NULL && pool -> params .reuse_strategy == 1 ) {
381
+ // try to find slabs in larger buckets
382
+ for (size_t i = 0 ; i < pool -> buckets_num ; i ++ ) {
383
+ bucket_t * larger_bucket = pool -> buckets [i ];
384
+ if (larger_bucket -> size < bucket -> size ) {
385
+ continue ;
386
+ }
387
+
388
+ if (larger_bucket -> available_slabs == NULL ||
389
+ larger_bucket -> available_slabs -> val -> num_chunks_allocated > 0 ) {
390
+ continue ;
391
+ }
392
+
393
+ if (larger_bucket -> size % bucket -> size != 0 ) {
394
+ // TODO what about this case?
395
+ continue ;
396
+ }
397
+
398
+ // move available slab from larger bucket to smaller one
399
+ slab_list_item_t * slab_it = larger_bucket -> available_slabs ;
400
+ assert (slab_it -> val != NULL );
401
+ DL_DELETE (larger_bucket -> available_slabs , slab_it );
402
+ // TODO check global lock + bucket locks
403
+ pool_unregister_slab (larger_bucket -> pool , slab_it -> val );
404
+ larger_bucket -> available_slabs_num -- ;
405
+ larger_bucket -> chunked_slabs_in_pool -- ;
406
+ //
407
+ bucket_update_stats (larger_bucket , 0 , -1 );
408
+
409
+ void * mem_ptr = slab_it -> val -> mem_ptr ;
410
+ while (mem_ptr < slab_get_end (slab_it -> val )) {
411
+ slab_t * slab = create_slab (bucket , mem_ptr );
412
+ assert (slab != NULL );
413
+
414
+ // register the slab in the pool
415
+ umf_result_t res = pool_register_slab (bucket -> pool , slab );
416
+ if (res != UMF_RESULT_SUCCESS ) {
417
+ // TODO handle errors
418
+ return NULL ;
419
+ }
420
+
421
+ DL_PREPEND (bucket -> available_slabs , & slab -> iter );
422
+ bucket -> available_slabs_num ++ ;
423
+ bucket -> chunked_slabs_in_pool ++ ;
424
+ //
425
+ bucket_update_stats (bucket , 0 , 1 );
426
+
427
+ mem_ptr = (void * )((uintptr_t )mem_ptr + slab -> slab_size );
428
+ }
429
+ // Ensure that we used the whole slab
430
+ assert (mem_ptr == slab_get_end (slab_it -> val ));
431
+ umf_ba_global_free (slab_it -> val );
432
+
433
+ // TODO common code
434
+ slab_t * slab = bucket -> available_slabs -> val ;
435
+ // Allocation from existing slab is treated as from pool for statistics.
436
+ * from_pool = true;
437
+ if (slab -> num_chunks_allocated == 0 ) {
438
+ assert (bucket -> chunked_slabs_in_pool > 0 );
439
+ // If this was an empty slab, it was in the pool.
440
+ // Now it is no longer in the pool, so update count.
441
+ -- bucket -> chunked_slabs_in_pool ;
442
+ uint64_t size_to_sub = bucket_slab_alloc_size (bucket );
443
+ uint64_t old_size = utils_fetch_and_sub_u64 (
444
+ & bucket -> shared_limits -> total_size , size_to_sub );
445
+ (void )old_size ;
446
+ assert (old_size >= size_to_sub );
447
+ bucket_update_stats (bucket , 1 , -1 );
448
+ }
449
+
450
+ return bucket -> available_slabs ;
451
+ }
452
+ }
453
+
367
454
if (bucket -> available_slabs == NULL ) {
368
455
bucket_create_slab (bucket );
369
456
* from_pool = false;
@@ -403,10 +490,12 @@ static void bucket_update_stats(bucket_t *bucket, int in_use, int in_pool) {
403
490
return ;
404
491
}
405
492
493
+ assert (in_use >= 0 || bucket -> curr_slabs_in_use >= (size_t )(- in_use ));
406
494
bucket -> curr_slabs_in_use += in_use ;
407
495
bucket -> max_slabs_in_use =
408
496
utils_max (bucket -> curr_slabs_in_use , bucket -> max_slabs_in_use );
409
497
498
+ assert (in_pool >= 0 || bucket -> curr_slabs_in_pool >= (size_t )(- in_pool ));
410
499
bucket -> curr_slabs_in_pool += in_pool ;
411
500
bucket -> max_slabs_in_pool =
412
501
utils_max (bucket -> curr_slabs_in_pool , bucket -> max_slabs_in_pool );
@@ -542,7 +631,7 @@ static void *disjoint_pool_allocate(disjoint_pool_t *pool, size_t size) {
542
631
utils_mutex_lock (& bucket -> bucket_lock );
543
632
544
633
bool from_pool = false;
545
- ptr = bucket_get_free_chunk (bucket , & from_pool );
634
+ ptr = bucket_get_free_chunk (pool , bucket , & from_pool );
546
635
547
636
if (ptr == NULL ) {
548
637
TLS_last_allocation_error = UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY ;
@@ -759,7 +848,7 @@ void *disjoint_pool_aligned_malloc(void *pool, size_t size, size_t alignment) {
759
848
760
849
utils_mutex_lock (& bucket -> bucket_lock );
761
850
762
- ptr = bucket_get_free_chunk (bucket , & from_pool );
851
+ ptr = bucket_get_free_chunk (pool , bucket , & from_pool );
763
852
764
853
if (ptr == NULL ) {
765
854
TLS_last_allocation_error = UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY ;
@@ -984,6 +1073,7 @@ umfDisjointPoolParamsCreate(umf_disjoint_pool_params_handle_t *hParams) {
984
1073
.capacity = 0 ,
985
1074
.min_bucket_size = UMF_DISJOINT_POOL_MIN_BUCKET_DEFAULT_SIZE ,
986
1075
.cur_pool_size = 0 ,
1076
+ .reuse_strategy = 0 ,
987
1077
.pool_trace = 0 ,
988
1078
.shared_limits = NULL ,
989
1079
.name = {* DEFAULT_NAME },
@@ -1056,7 +1146,6 @@ umfDisjointPoolParamsSetMinBucketSize(umf_disjoint_pool_params_handle_t hParams,
1056
1146
hParams -> min_bucket_size = minBucketSize ;
1057
1147
return UMF_RESULT_SUCCESS ;
1058
1148
}
1059
-
1060
1149
umf_result_t
1061
1150
umfDisjointPoolParamsSetTrace (umf_disjoint_pool_params_handle_t hParams ,
1062
1151
int poolTrace ) {
@@ -1069,6 +1158,18 @@ umfDisjointPoolParamsSetTrace(umf_disjoint_pool_params_handle_t hParams,
1069
1158
return UMF_RESULT_SUCCESS ;
1070
1159
}
1071
1160
1161
+ umf_result_t
1162
+ umfDisjointPoolParamsSetReuseStrategy (umf_disjoint_pool_params_handle_t hParams ,
1163
+ unsigned int reuseStrategy ) {
1164
+ if (!hParams ) {
1165
+ LOG_ERR ("disjoint pool params handle is NULL" );
1166
+ return UMF_RESULT_ERROR_INVALID_ARGUMENT ;
1167
+ }
1168
+
1169
+ hParams -> reuse_strategy = reuseStrategy ;
1170
+ return UMF_RESULT_SUCCESS ;
1171
+ }
1172
+
1072
1173
umf_result_t umfDisjointPoolParamsSetSharedLimits (
1073
1174
umf_disjoint_pool_params_handle_t hParams ,
1074
1175
umf_disjoint_pool_shared_limits_handle_t hSharedLimits ) {
0 commit comments