From b8acb1ed4aa5bcd8dc231ed8bd3ef2d7c856265b Mon Sep 17 00:00:00 2001 From: glebius Date: Tue, 1 Mar 2016 00:33:32 +0000 Subject: [PATCH] Remove UMA_ZONE_REFCNT feature, now unused. Blessed by: jeff --- sys/vm/uma.h | 19 ++--------- sys/vm/uma_core.c | 87 +++-------------------------------------------- sys/vm/uma_int.h | 10 ------ 3 files changed, 6 insertions(+), 110 deletions(-) diff --git a/sys/vm/uma.h b/sys/vm/uma.h index 21c61214315d..6ac78ef5cfcf 100644 --- a/sys/vm/uma.h +++ b/sys/vm/uma.h @@ -262,7 +262,7 @@ uma_zone_t uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor, * information in the vm_page. */ #define UMA_ZONE_SECONDARY 0x0200 /* Zone is a Secondary Zone */ -#define UMA_ZONE_REFCNT 0x0400 /* Allocate refcnts in slabs */ +/* 0x0400 Unused */ #define UMA_ZONE_MAXBUCKET 0x0800 /* Use largest buckets */ #define UMA_ZONE_CACHESPREAD 0x1000 /* * Spread memory start locations across @@ -287,7 +287,7 @@ uma_zone_t uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor, */ #define UMA_ZONE_INHERIT \ (UMA_ZONE_OFFPAGE | UMA_ZONE_MALLOC | UMA_ZONE_NOFREE | \ - UMA_ZONE_HASH | UMA_ZONE_REFCNT | UMA_ZONE_VTOSLAB | UMA_ZONE_PCPU) + UMA_ZONE_HASH | UMA_ZONE_VTOSLAB | UMA_ZONE_PCPU) /* Definitions for align */ #define UMA_ALIGN_PTR (sizeof(void *) - 1) /* Alignment fit for ptr */ @@ -622,21 +622,6 @@ void uma_zone_set_freef(uma_zone_t zone, uma_free freef); */ void uma_prealloc(uma_zone_t zone, int itemcnt); -/* - * Used to lookup the reference counter allocated for an item - * from a UMA_ZONE_REFCNT zone. For UMA_ZONE_REFCNT zones, - * reference counters are allocated for items and stored in - * the underlying slab header. - * - * Arguments: - * zone The UMA_ZONE_REFCNT zone to which the item belongs. - * item The address of the item for which we want a refcnt. - * - * Returns: - * A pointer to a uint32_t reference counter. - */ -uint32_t *uma_find_refcnt(uma_zone_t zone, void *item); - /* * Used to determine if a fixed-size zone is exhausted. * diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c index 0d45046e7e51..262f8b0877bb 100644 --- a/sys/vm/uma_core.c +++ b/sys/vm/uma_core.c @@ -112,7 +112,6 @@ static uma_zone_t zones = &masterzone_z; /* This is the zone from which all of uma_slab_t's are allocated. */ static uma_zone_t slabzone; -static uma_zone_t slabrefzone; /* With refcounters (for UMA_ZONE_REFCNT) */ /* * The initial hash tables come out of this zone so they can be allocated @@ -154,12 +153,6 @@ static int booted = 0; #define UMA_STARTUP 1 #define UMA_STARTUP2 2 -/* - * Only mbuf clusters use ref zones. Just provide enough references - * to support the one user. New code should not use the ref facility. - */ -static const u_int uma_max_ipers_ref = PAGE_SIZE / MCLBYTES; - /* * This is the handle used to schedule events that need to happen * outside of the allocation fast path. @@ -951,7 +944,6 @@ zone_drain(uma_zone_t zone) static uma_slab_t keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int wait) { - uma_slabrefcnt_t slabref; uma_alloc allocf; uma_slab_t slab; uint8_t *mem; @@ -1014,11 +1006,6 @@ keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int wait) #ifdef INVARIANTS BIT_ZERO(SLAB_SETSIZE, &slab->us_debugfree); #endif - if (keg->uk_flags & UMA_ZONE_REFCNT) { - slabref = (uma_slabrefcnt_t)slab; - for (i = 0; i < keg->uk_ipers; i++) - slabref->us_refcnt[i] = 0; - } if (keg->uk_init != NULL) { for (i = 0; i < keg->uk_ipers; i++) @@ -1266,9 +1253,6 @@ keg_small_init(uma_keg_t keg) keg->uk_rsize < sizeof(struct pcpu), ("%s: size %u too large", __func__, keg->uk_rsize)); - if (keg->uk_flags & UMA_ZONE_REFCNT) - rsize += sizeof(uint32_t); - if (keg->uk_flags & UMA_ZONE_OFFPAGE) shsize = 0; else @@ -1356,8 +1340,6 @@ keg_large_init(uma_keg_t keg) /* Check whether we have enough space to not do OFFPAGE. */ if ((keg->uk_flags & UMA_ZONE_OFFPAGE) == 0) { shsize = sizeof(struct uma_slab); - if (keg->uk_flags & UMA_ZONE_REFCNT) - shsize += keg->uk_ipers * sizeof(uint32_t); if (shsize & UMA_ALIGN_PTR) shsize = (shsize & ~UMA_ALIGN_PTR) + (UMA_ALIGN_PTR + 1); @@ -1446,7 +1428,7 @@ keg_ctor(void *mem, int size, void *udata, int flags) if (arg->flags & UMA_ZONE_ZINIT) keg->uk_init = zero_init; - if (arg->flags & UMA_ZONE_REFCNT || arg->flags & UMA_ZONE_MALLOC) + if (arg->flags & UMA_ZONE_MALLOC) keg->uk_flags |= UMA_ZONE_VTOSLAB; if (arg->flags & UMA_ZONE_PCPU) @@ -1458,13 +1440,6 @@ keg_ctor(void *mem, int size, void *udata, int flags) if (keg->uk_flags & UMA_ZONE_CACHESPREAD) { keg_cachespread_init(keg); - } else if (keg->uk_flags & UMA_ZONE_REFCNT) { - if (keg->uk_size > - (UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt) - - sizeof(uint32_t))) - keg_large_init(keg); - else - keg_small_init(keg); } else { if (keg->uk_size > (UMA_SLAB_SIZE - sizeof(struct uma_slab))) keg_large_init(keg); @@ -1472,15 +1447,8 @@ keg_ctor(void *mem, int size, void *udata, int flags) keg_small_init(keg); } - if (keg->uk_flags & UMA_ZONE_OFFPAGE) { - if (keg->uk_flags & UMA_ZONE_REFCNT) { - if (keg->uk_ipers > uma_max_ipers_ref) - panic("Too many ref items per zone: %d > %d\n", - keg->uk_ipers, uma_max_ipers_ref); - keg->uk_slabzone = slabrefzone; - } else - keg->uk_slabzone = slabzone; - } + if (keg->uk_flags & UMA_ZONE_OFFPAGE) + keg->uk_slabzone = slabzone; /* * If we haven't booted yet we need allocations to go through the @@ -1517,10 +1485,6 @@ keg_ctor(void *mem, int size, void *udata, int flags) /* Size of the slab struct and free list */ totsize = sizeof(struct uma_slab); - /* Size of the reference counts. */ - if (keg->uk_flags & UMA_ZONE_REFCNT) - totsize += keg->uk_ipers * sizeof(uint32_t); - if (totsize & UMA_ALIGN_PTR) totsize = (totsize & ~UMA_ALIGN_PTR) + (UMA_ALIGN_PTR + 1); @@ -1534,8 +1498,6 @@ keg_ctor(void *mem, int size, void *udata, int flags) * sure here anyway. */ totsize = keg->uk_pgoff + sizeof(struct uma_slab); - if (keg->uk_flags & UMA_ZONE_REFCNT) - totsize += keg->uk_ipers * sizeof(uint32_t); if (totsize > PAGE_SIZE * keg->uk_ppera) { printf("zone %s ipers %d rsize %d size %d\n", zone->uz_name, keg->uk_ipers, keg->uk_rsize, @@ -1797,7 +1759,6 @@ uma_startup(void *bootmem, int boot_pages) { struct uma_zctor_args args; uma_slab_t slab; - u_int slabsize; int i; #ifdef UMA_DEBUG @@ -1856,18 +1817,6 @@ uma_startup(void *bootmem, int boot_pages) NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); - /* - * We also create a zone for the bigger slabs with reference - * counts in them, to accomodate UMA_ZONE_REFCNT zones. - */ - slabsize = sizeof(struct uma_slab_refcnt); - slabsize += uma_max_ipers_ref * sizeof(uint32_t); - slabrefzone = uma_zcreate("UMA RCntSlabs", - slabsize, - NULL, NULL, NULL, NULL, - UMA_ALIGN_PTR, - UMA_ZFLAG_INTERNAL); - hashzone = uma_zcreate("UMA Hash", sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT, NULL, NULL, NULL, NULL, @@ -2090,14 +2039,7 @@ uma_zsecond_add(uma_zone_t zone, uma_zone_t master) error = EINVAL; goto out; } - /* - * Both must either be refcnt, or not be refcnt. - */ - if ((zone->uz_flags & UMA_ZONE_REFCNT) != - (master->uz_flags & UMA_ZONE_REFCNT)) { - error = EINVAL; - goto out; - } + /* * The underlying object must be the same size. rsize * may be different. @@ -3219,26 +3161,6 @@ uma_prealloc(uma_zone_t zone, int items) KEG_UNLOCK(keg); } -/* See uma.h */ -uint32_t * -uma_find_refcnt(uma_zone_t zone, void *item) -{ - uma_slabrefcnt_t slabref; - uma_slab_t slab; - uma_keg_t keg; - uint32_t *refcnt; - int idx; - - slab = vtoslab((vm_offset_t)item & (~UMA_SLAB_MASK)); - slabref = (uma_slabrefcnt_t)slab; - keg = slab->us_keg; - KASSERT(keg->uk_flags & UMA_ZONE_REFCNT, - ("uma_find_refcnt(): zone possibly not UMA_ZONE_REFCNT")); - idx = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize; - refcnt = &slabref->us_refcnt[idx]; - return refcnt; -} - /* See uma.h */ static void uma_reclaim_locked(bool kmem_danger) @@ -3260,7 +3182,6 @@ uma_reclaim_locked(bool kmem_danger) * zones are drained. We have to do the same for buckets. */ zone_drain(slabzone); - zone_drain(slabrefzone); bucket_zone_drain(); } diff --git a/sys/vm/uma_int.h b/sys/vm/uma_int.h index 132cc63e7538..c4235ce20a64 100644 --- a/sys/vm/uma_int.h +++ b/sys/vm/uma_int.h @@ -250,17 +250,7 @@ struct uma_slab { #define us_link us_type._us_link #define us_size us_type._us_size -/* - * The slab structure for UMA_ZONE_REFCNT zones for whose items we - * maintain reference counters in the slab for. - */ -struct uma_slab_refcnt { - struct uma_slab us_head; /* slab header data */ - uint32_t us_refcnt[0]; /* Actually larger. */ -}; - typedef struct uma_slab * uma_slab_t; -typedef struct uma_slab_refcnt * uma_slabrefcnt_t; typedef uma_slab_t (*uma_slaballoc)(uma_zone_t, uma_keg_t, int); struct uma_klink {