@@ -196,11 +196,7 @@ static int grow_slab_list (const unsigned int id) {
196
196
197
197
static int do_slabs_newslab (const unsigned int id ) {
198
198
slabclass_t * p = & slabclass [id ];
199
- #ifdef ALLOW_SLABS_REASSIGN
200
- int len = settings .item_size_max ;
201
- #else
202
199
int len = p -> size * p -> perslab ;
203
- #endif
204
200
char * ptr ;
205
201
206
202
if ((mem_limit && mem_malloced + len > mem_limit && p -> slabs > 0 ) ||
@@ -393,87 +389,6 @@ static void do_slabs_stats(ADD_STAT add_stats, void *c) {
393
389
add_stats (NULL , 0 , NULL , 0 , c );
394
390
}
395
391
396
- #ifdef ALLOW_SLABS_REASSIGN
397
- /* Blows away all the items in a slab class and moves its slabs to another
398
- class. This is only used by the "slabs reassign" command, for manual tweaking
399
- of memory allocation. It's disabled by default since it requires that all
400
- slabs be the same size (which can waste space for chunk size mantissas of
401
- other than 2.0).
402
- 1 = success
403
- 0 = fail
404
- -1 = tried. busy. send again shortly. */
405
- int do_slabs_reassign (unsigned char srcid , unsigned char dstid ) {
406
- void * slab , * slab_end ;
407
- slabclass_t * p , * dp ;
408
- void * iter ;
409
- bool was_busy = false;
410
-
411
- if (srcid < POWER_SMALLEST || srcid > power_largest ||
412
- dstid < POWER_SMALLEST || dstid > power_largest )
413
- return 0 ;
414
-
415
- p = & slabclass [srcid ];
416
- dp = & slabclass [dstid ];
417
-
418
- /* fail if src still populating, or no slab to give up in src */
419
- if (p -> end_page_ptr || ! p -> slabs )
420
- return 0 ;
421
-
422
- /* fail if dst is still growing or we can't make room to hold its new one */
423
- if (dp -> end_page_ptr || ! grow_slab_list (dstid ))
424
- return 0 ;
425
-
426
- if (p -> killing == 0 ) p -> killing = 1 ;
427
-
428
- slab = p -> slab_list [p -> killing - 1 ];
429
- slab_end = (char * )slab + settings .item_size_max ;
430
-
431
- for (iter = slab ; iter < slab_end ; (char * )iter += p -> size ) {
432
- item * it = (item * )iter ;
433
- if (it -> slabs_clsid ) {
434
- if (it -> refcount ) was_busy = true;
435
- item_unlink (it );
436
- }
437
- }
438
-
439
- /* go through free list and discard items that are no longer part of this slab */
440
- {
441
- int fi ;
442
- for (fi = p -> sl_curr - 1 ; fi >= 0 ; fi -- ) {
443
- if (p -> slots [fi ] >= slab && p -> slots [fi ] < slab_end ) {
444
- p -> sl_curr -- ;
445
- if (p -> sl_curr > fi ) p -> slots [fi ] = p -> slots [p -> sl_curr ];
446
- }
447
- }
448
- }
449
-
450
- if (was_busy ) return -1 ;
451
-
452
- /* if good, now move it to the dst slab class */
453
- p -> slab_list [p -> killing - 1 ] = p -> slab_list [p -> slabs - 1 ];
454
- p -> slabs -- ;
455
- p -> killing = 0 ;
456
- dp -> slab_list [dp -> slabs ++ ] = slab ;
457
- dp -> end_page_ptr = slab ;
458
- dp -> end_page_free = dp -> perslab ;
459
- /* this isn't too critical, but other parts of the code do asserts to
460
- make sure this field is always 0. */
461
- for (iter = slab ; iter < slab_end ; (char * )iter += dp -> size ) {
462
- ((item * )iter )-> slabs_clsid = 0 ;
463
- }
464
- return 1 ;
465
- }
466
-
467
- int slabs_reassign (unsigned char srcid , unsigned char dstid ) {
468
- int ret ;
469
-
470
- pthread_mutex_lock (& slabs_lock );
471
- ret = do_slabs_reassign (srcid , dstid );
472
- pthread_mutex_unlock (& slabs_lock );
473
- return ret ;
474
- }
475
- #endif
476
-
477
392
static void * memory_allocate (size_t size ) {
478
393
void * ret ;
479
394
0 commit comments