Clean up uma_int.h a bit.

This makes it easier to write libkvm programs that access UMA data
structures.

- Remove a couple of unused slab functions and make others local to
  uma_core.c.  Similarly move SLAB_BITSETS, which affects the layout of
  slab structures, to uma_core.c.
- Stop defining the slab structures under _KERNEL.  There's no real
  reason they can't be visible to userspace like the rest of UMA's
  structures are.
- Group KEG_ASSERT_COLD with other keg macros.
- Convert an assertion about MAXMEMDOM to use _Static_assert.

No functional change intended.

Discussed with:	jeff
Reviewed by:	rlibby
Sponsored by:	The FreeBSD Foundation
Differential Revision:	https://reviews.freebsd.org/D23980
This commit is contained in:
Mark Johnston 2020-03-07 15:37:23 +00:00
parent d726e6331b
commit 54007ce8ae
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=358732
2 changed files with 19 additions and 48 deletions

View File

@ -116,6 +116,16 @@ __FBSDID("$FreeBSD$");
static uma_zone_t kegs; static uma_zone_t kegs;
static uma_zone_t zones; static uma_zone_t zones;
/*
* On INVARIANTS builds, the slab contains a second bitset of the same size,
* "dbg_bits", which is laid out immediately after us_free.
*/
#ifdef INVARIANTS
#define SLAB_BITSETS 2
#else
#define SLAB_BITSETS 1
#endif
/* /*
* These are the two zones from which all offpage uma_slab_ts are allocated. * These are the two zones from which all offpage uma_slab_ts are allocated.
* *
@ -1898,7 +1908,7 @@ zero_init(void *mem, int size, int flags)
} }
#ifdef INVARIANTS #ifdef INVARIANTS
struct noslabbits * static struct noslabbits *
slab_dbg_bits(uma_slab_t slab, uma_keg_t keg) slab_dbg_bits(uma_slab_t slab, uma_keg_t keg)
{ {
@ -1909,7 +1919,7 @@ slab_dbg_bits(uma_slab_t slab, uma_keg_t keg)
/* /*
* Actual size of embedded struct slab (!OFFPAGE). * Actual size of embedded struct slab (!OFFPAGE).
*/ */
size_t static size_t
slab_sizeof(int nitems) slab_sizeof(int nitems)
{ {
size_t s; size_t s;
@ -1918,15 +1928,6 @@ slab_sizeof(int nitems)
return (roundup(s, UMA_ALIGN_PTR + 1)); return (roundup(s, UMA_ALIGN_PTR + 1));
} }
/*
* Size of memory for embedded slabs (!OFFPAGE).
*/
size_t
slab_space(int nitems)
{
return (UMA_SLAB_SIZE - slab_sizeof(nitems));
}
#define UMA_FIXPT_SHIFT 31 #define UMA_FIXPT_SHIFT 31
#define UMA_FRAC_FIXPT(n, d) \ #define UMA_FRAC_FIXPT(n, d) \
((uint32_t)(((uint64_t)(n) << UMA_FIXPT_SHIFT) / (d))) ((uint32_t)(((uint64_t)(n) << UMA_FIXPT_SHIFT) / (d)))
@ -1967,18 +1968,6 @@ slab_ipers_hdr(u_int size, u_int rsize, u_int slabsize, bool hdr)
return (ipers); return (ipers);
} }
/*
* Compute the number of items that will fit in a slab for a startup zone.
*/
int
slab_ipers(size_t size, int align)
{
int rsize;
rsize = roundup(size, align + 1); /* Assume no CACHESPREAD */
return (slab_ipers_hdr(size, rsize, UMA_SLAB_SIZE, true));
}
struct keg_layout_result { struct keg_layout_result {
u_int format; u_int format;
u_int slabsize; u_int slabsize;

View File

@ -368,11 +368,6 @@ struct uma_keg {
}; };
typedef struct uma_keg * uma_keg_t; typedef struct uma_keg * uma_keg_t;
#ifdef _KERNEL
#define KEG_ASSERT_COLD(k) \
KASSERT(uma_keg_get_allocs((k)) == 0, \
("keg %s initialization after use.", (k)->uk_name))
/* /*
* Free bits per-slab. * Free bits per-slab.
*/ */
@ -391,29 +386,13 @@ struct uma_slab {
uint8_t us_domain; /* Backing NUMA domain. */ uint8_t us_domain; /* Backing NUMA domain. */
struct noslabbits us_free; /* Free bitmask, flexible. */ struct noslabbits us_free; /* Free bitmask, flexible. */
}; };
_Static_assert(sizeof(struct uma_slab) == offsetof(struct uma_slab, us_free), _Static_assert(sizeof(struct uma_slab) == __offsetof(struct uma_slab, us_free),
"us_free field must be last"); "us_free field must be last");
#if MAXMEMDOM >= 255 _Static_assert(MAXMEMDOM < 255,
#error "Slab domain type insufficient" "us_domain field is not wide enough");
#endif
typedef struct uma_slab * uma_slab_t; typedef struct uma_slab * uma_slab_t;
/*
* On INVARIANTS builds, the slab contains a second bitset of the same size,
* "dbg_bits", which is laid out immediately after us_free.
*/
#ifdef INVARIANTS
#define SLAB_BITSETS 2
#else
#define SLAB_BITSETS 1
#endif
/* These three functions are for embedded (!OFFPAGE) use only. */
size_t slab_sizeof(int nitems);
size_t slab_space(int nitems);
int slab_ipers(size_t size, int align);
/* /*
* Slab structure with a full sized bitset and hash link for both * Slab structure with a full sized bitset and hash link for both
* HASH and OFFPAGE zones. * HASH and OFFPAGE zones.
@ -460,7 +439,6 @@ slab_item_index(uma_slab_t slab, uma_keg_t keg, void *item)
data = (uintptr_t)slab_data(slab, keg); data = (uintptr_t)slab_data(slab, keg);
return (((uintptr_t)item - data) / keg->uk_rsize); return (((uintptr_t)item - data) / keg->uk_rsize);
} }
#endif /* _KERNEL */
STAILQ_HEAD(uma_bucketlist, uma_bucket); STAILQ_HEAD(uma_bucketlist, uma_bucket);
@ -579,6 +557,10 @@ static __inline uma_slab_t hash_sfind(struct uma_hash *hash, uint8_t *data);
("%s: Invalid zone %p type", __func__, (zone))); \ ("%s: Invalid zone %p type", __func__, (zone))); \
} while (0) } while (0)
#define KEG_ASSERT_COLD(k) \
KASSERT(uma_keg_get_allocs((k)) == 0, \
("keg %s initialization after use.", (k)->uk_name))
/* Domains are contiguous after the last CPU */ /* Domains are contiguous after the last CPU */
#define ZDOM_GET(z, n) \ #define ZDOM_GET(z, n) \
(&((uma_zone_domain_t)&(z)->uz_cpu[mp_maxid + 1])[n]) (&((uma_zone_domain_t)&(z)->uz_cpu[mp_maxid + 1])[n])