mirror of
https://git.hardenedbsd.org/hardenedbsd/HardenedBSD.git
synced 2024-11-25 10:01:02 +01:00
3e00c11a4f
Update pagesizes[] to include the L3 ATTR_CONTIGUOUS (L3C) page size, which is 64KB when the base page size is 4KB and 2MB when the base page size is 16KB. Add support for L3C pages to shm_create_largepage(). Add support for creating L3C page mappings to pmap_enter(psind=1). Add support for reporting L3C page mappings to mincore(2) and procstat(8). Update vm_fault_soft_fast() and vm_fault_populate() to handle multiple superpage sizes. Declare arm64 as supporting two superpage reservation sizes, and simulate two superpage reservation sizes, updating the vm_page's psind field to reflect the correct page size from pagesizes[]. (The next patch in this series will replace this simulation. This patch is already big enough.) Co-authored-by: Eliot Solomon <ehs3@rice.edu> Reviewed by: kib Differential Revision: https://reviews.freebsd.org/D45766
386 lines
10 KiB
C
386 lines
10 KiB
C
/*-
|
|
* SPDX-License-Identifier: BSD-2-Clause
|
|
*
|
|
* Copyright (c) 2017, Jeffrey Roberson <jeff@freebsd.org>
|
|
* All rights reserved.
|
|
*
|
|
* Redistribution and use in source and binary forms, with or without
|
|
* modification, are permitted provided that the following conditions
|
|
* are met:
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
* notice unmodified, this list of conditions, and the following
|
|
* disclaimer.
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
* documentation and/or other materials provided with the distribution.
|
|
*
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
*
|
|
*/
|
|
|
|
#include <sys/cdefs.h>
|
|
#include "opt_vm.h"
|
|
|
|
#include <sys/param.h>
|
|
#include <sys/systm.h>
|
|
#include <sys/bitset.h>
|
|
#include <sys/domainset.h>
|
|
#include <sys/proc.h>
|
|
#include <sys/lock.h>
|
|
#include <sys/mutex.h>
|
|
#include <sys/malloc.h>
|
|
#include <sys/rwlock.h>
|
|
#include <sys/vmmeter.h>
|
|
|
|
#include <vm/vm.h>
|
|
#include <vm/vm_param.h>
|
|
#include <vm/vm_domainset.h>
|
|
#include <vm/vm_object.h>
|
|
#include <vm/vm_page.h>
|
|
#include <vm/vm_phys.h>
|
|
|
|
#ifdef NUMA
|
|
/*
|
|
* Iterators are written such that the first nowait pass has as short a
|
|
* codepath as possible to eliminate bloat from the allocator. It is
|
|
* assumed that most allocations are successful.
|
|
*/
|
|
|
|
static int vm_domainset_default_stride = 64;
|
|
|
|
/*
|
|
* Determine which policy is to be used for this allocation.
|
|
*/
|
|
static void
|
|
vm_domainset_iter_init(struct vm_domainset_iter *di, struct domainset *ds,
|
|
int *iter, struct vm_object *obj, vm_pindex_t pindex)
|
|
{
|
|
|
|
di->di_domain = ds;
|
|
di->di_iter = iter;
|
|
di->di_policy = ds->ds_policy;
|
|
DOMAINSET_COPY(&ds->ds_mask, &di->di_valid_mask);
|
|
if (di->di_policy == DOMAINSET_POLICY_INTERLEAVE) {
|
|
#if VM_NRESERVLEVEL > 0
|
|
if (vm_object_reserv(obj)) {
|
|
/*
|
|
* Color the pindex so we end up on the correct
|
|
* reservation boundary.
|
|
*/
|
|
pindex += obj->pg_color;
|
|
#if VM_NRESERVLEVEL > 1
|
|
pindex >>= VM_LEVEL_1_ORDER;
|
|
#endif
|
|
pindex >>= VM_LEVEL_0_ORDER;
|
|
} else
|
|
#endif
|
|
pindex /= vm_domainset_default_stride;
|
|
/*
|
|
* Offset pindex so the first page of each object does
|
|
* not end up in domain 0.
|
|
*/
|
|
if (obj != NULL)
|
|
pindex += (((uintptr_t)obj) / sizeof(*obj));
|
|
di->di_offset = pindex;
|
|
}
|
|
/* Skip domains below min on the first pass. */
|
|
di->di_minskip = true;
|
|
}
|
|
|
|
static void
|
|
vm_domainset_iter_rr(struct vm_domainset_iter *di, int *domain)
|
|
{
|
|
|
|
*domain = di->di_domain->ds_order[
|
|
++(*di->di_iter) % di->di_domain->ds_cnt];
|
|
}
|
|
|
|
static void
|
|
vm_domainset_iter_prefer(struct vm_domainset_iter *di, int *domain)
|
|
{
|
|
int d;
|
|
|
|
do {
|
|
d = di->di_domain->ds_order[
|
|
++(*di->di_iter) % di->di_domain->ds_cnt];
|
|
} while (d == di->di_domain->ds_prefer);
|
|
*domain = d;
|
|
}
|
|
|
|
static void
|
|
vm_domainset_iter_interleave(struct vm_domainset_iter *di, int *domain)
|
|
{
|
|
int d;
|
|
|
|
d = di->di_offset % di->di_domain->ds_cnt;
|
|
*di->di_iter = d;
|
|
*domain = di->di_domain->ds_order[d];
|
|
}
|
|
|
|
static void
|
|
vm_domainset_iter_next(struct vm_domainset_iter *di, int *domain)
|
|
{
|
|
|
|
KASSERT(di->di_n > 0,
|
|
("vm_domainset_iter_first: Invalid n %d", di->di_n));
|
|
switch (di->di_policy) {
|
|
case DOMAINSET_POLICY_FIRSTTOUCH:
|
|
/*
|
|
* To prevent impossible allocations we convert an invalid
|
|
* first-touch to round-robin.
|
|
*/
|
|
/* FALLTHROUGH */
|
|
case DOMAINSET_POLICY_INTERLEAVE:
|
|
/* FALLTHROUGH */
|
|
case DOMAINSET_POLICY_ROUNDROBIN:
|
|
vm_domainset_iter_rr(di, domain);
|
|
break;
|
|
case DOMAINSET_POLICY_PREFER:
|
|
vm_domainset_iter_prefer(di, domain);
|
|
break;
|
|
default:
|
|
panic("vm_domainset_iter_first: Unknown policy %d",
|
|
di->di_policy);
|
|
}
|
|
KASSERT(*domain < vm_ndomains,
|
|
("vm_domainset_iter_next: Invalid domain %d", *domain));
|
|
}
|
|
|
|
static void
|
|
vm_domainset_iter_first(struct vm_domainset_iter *di, int *domain)
|
|
{
|
|
|
|
switch (di->di_policy) {
|
|
case DOMAINSET_POLICY_FIRSTTOUCH:
|
|
*domain = PCPU_GET(domain);
|
|
if (DOMAINSET_ISSET(*domain, &di->di_valid_mask)) {
|
|
/*
|
|
* Add an extra iteration because we will visit the
|
|
* current domain a second time in the rr iterator.
|
|
*/
|
|
di->di_n = di->di_domain->ds_cnt + 1;
|
|
break;
|
|
}
|
|
/*
|
|
* To prevent impossible allocations we convert an invalid
|
|
* first-touch to round-robin.
|
|
*/
|
|
/* FALLTHROUGH */
|
|
case DOMAINSET_POLICY_ROUNDROBIN:
|
|
di->di_n = di->di_domain->ds_cnt;
|
|
vm_domainset_iter_rr(di, domain);
|
|
break;
|
|
case DOMAINSET_POLICY_PREFER:
|
|
*domain = di->di_domain->ds_prefer;
|
|
di->di_n = di->di_domain->ds_cnt;
|
|
break;
|
|
case DOMAINSET_POLICY_INTERLEAVE:
|
|
vm_domainset_iter_interleave(di, domain);
|
|
di->di_n = di->di_domain->ds_cnt;
|
|
break;
|
|
default:
|
|
panic("vm_domainset_iter_first: Unknown policy %d",
|
|
di->di_policy);
|
|
}
|
|
KASSERT(di->di_n > 0,
|
|
("vm_domainset_iter_first: Invalid n %d", di->di_n));
|
|
KASSERT(*domain < vm_ndomains,
|
|
("vm_domainset_iter_first: Invalid domain %d", *domain));
|
|
}
|
|
|
|
void
|
|
vm_domainset_iter_page_init(struct vm_domainset_iter *di, struct vm_object *obj,
|
|
vm_pindex_t pindex, int *domain, int *req)
|
|
{
|
|
struct domainset_ref *dr;
|
|
|
|
/*
|
|
* Object policy takes precedence over thread policy. The policies
|
|
* are immutable and unsynchronized. Updates can race but pointer
|
|
* loads are assumed to be atomic.
|
|
*/
|
|
if (obj != NULL && obj->domain.dr_policy != NULL)
|
|
dr = &obj->domain;
|
|
else
|
|
dr = &curthread->td_domain;
|
|
vm_domainset_iter_init(di, dr->dr_policy, &dr->dr_iter, obj, pindex);
|
|
di->di_flags = *req;
|
|
*req = (di->di_flags & ~(VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL)) |
|
|
VM_ALLOC_NOWAIT;
|
|
vm_domainset_iter_first(di, domain);
|
|
if (vm_page_count_min_domain(*domain))
|
|
vm_domainset_iter_page(di, obj, domain);
|
|
}
|
|
|
|
int
|
|
vm_domainset_iter_page(struct vm_domainset_iter *di, struct vm_object *obj,
|
|
int *domain)
|
|
{
|
|
if (__predict_false(DOMAINSET_EMPTY(&di->di_valid_mask)))
|
|
return (ENOMEM);
|
|
|
|
/* If there are more domains to visit we run the iterator. */
|
|
while (--di->di_n != 0) {
|
|
vm_domainset_iter_next(di, domain);
|
|
if (DOMAINSET_ISSET(*domain, &di->di_valid_mask) &&
|
|
(!di->di_minskip || !vm_page_count_min_domain(*domain)))
|
|
return (0);
|
|
}
|
|
|
|
/* If we skipped domains below min restart the search. */
|
|
if (di->di_minskip) {
|
|
di->di_minskip = false;
|
|
vm_domainset_iter_first(di, domain);
|
|
return (0);
|
|
}
|
|
|
|
/* If we visited all domains and this was a NOWAIT we return error. */
|
|
if ((di->di_flags & (VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL)) == 0)
|
|
return (ENOMEM);
|
|
|
|
/* Wait for one of the domains to accumulate some free pages. */
|
|
if (obj != NULL)
|
|
VM_OBJECT_WUNLOCK(obj);
|
|
vm_wait_doms(&di->di_valid_mask, 0);
|
|
if (obj != NULL)
|
|
VM_OBJECT_WLOCK(obj);
|
|
if ((di->di_flags & VM_ALLOC_WAITFAIL) != 0)
|
|
return (ENOMEM);
|
|
|
|
/* Restart the search. */
|
|
vm_domainset_iter_first(di, domain);
|
|
|
|
return (0);
|
|
}
|
|
|
|
static void
|
|
_vm_domainset_iter_policy_init(struct vm_domainset_iter *di, int *domain,
|
|
int *flags)
|
|
{
|
|
|
|
di->di_flags = *flags;
|
|
*flags = (di->di_flags & ~M_WAITOK) | M_NOWAIT;
|
|
vm_domainset_iter_first(di, domain);
|
|
if (vm_page_count_min_domain(*domain))
|
|
vm_domainset_iter_policy(di, domain);
|
|
}
|
|
|
|
void
|
|
vm_domainset_iter_policy_init(struct vm_domainset_iter *di,
|
|
struct domainset *ds, int *domain, int *flags)
|
|
{
|
|
|
|
vm_domainset_iter_init(di, ds, &curthread->td_domain.dr_iter, NULL, 0);
|
|
_vm_domainset_iter_policy_init(di, domain, flags);
|
|
}
|
|
|
|
void
|
|
vm_domainset_iter_policy_ref_init(struct vm_domainset_iter *di,
|
|
struct domainset_ref *dr, int *domain, int *flags)
|
|
{
|
|
|
|
vm_domainset_iter_init(di, dr->dr_policy, &dr->dr_iter, NULL, 0);
|
|
_vm_domainset_iter_policy_init(di, domain, flags);
|
|
}
|
|
|
|
int
|
|
vm_domainset_iter_policy(struct vm_domainset_iter *di, int *domain)
|
|
{
|
|
if (DOMAINSET_EMPTY(&di->di_valid_mask))
|
|
return (ENOMEM);
|
|
|
|
/* If there are more domains to visit we run the iterator. */
|
|
while (--di->di_n != 0) {
|
|
vm_domainset_iter_next(di, domain);
|
|
if (DOMAINSET_ISSET(*domain, &di->di_valid_mask) &&
|
|
(!di->di_minskip || !vm_page_count_min_domain(*domain)))
|
|
return (0);
|
|
}
|
|
|
|
/* If we skipped domains below min restart the search. */
|
|
if (di->di_minskip) {
|
|
di->di_minskip = false;
|
|
vm_domainset_iter_first(di, domain);
|
|
return (0);
|
|
}
|
|
|
|
/* If we visited all domains and this was a NOWAIT we return error. */
|
|
if ((di->di_flags & M_WAITOK) == 0)
|
|
return (ENOMEM);
|
|
|
|
/* Wait for one of the domains to accumulate some free pages. */
|
|
vm_wait_doms(&di->di_valid_mask, 0);
|
|
|
|
/* Restart the search. */
|
|
vm_domainset_iter_first(di, domain);
|
|
|
|
return (0);
|
|
}
|
|
|
|
void
|
|
vm_domainset_iter_ignore(struct vm_domainset_iter *di, int domain)
|
|
{
|
|
KASSERT(DOMAINSET_ISSET(domain, &di->di_valid_mask),
|
|
("%s: domain %d not present in di_valid_mask for di %p",
|
|
__func__, domain, di));
|
|
DOMAINSET_CLR(domain, &di->di_valid_mask);
|
|
}
|
|
|
|
#else /* !NUMA */
|
|
|
|
int
|
|
vm_domainset_iter_page(struct vm_domainset_iter *di, struct vm_object *obj,
|
|
int *domain)
|
|
{
|
|
|
|
return (EJUSTRETURN);
|
|
}
|
|
|
|
void
|
|
vm_domainset_iter_page_init(struct vm_domainset_iter *di, struct vm_object *obj,
|
|
vm_pindex_t pindex, int *domain, int *flags)
|
|
{
|
|
|
|
*domain = 0;
|
|
}
|
|
|
|
int
|
|
vm_domainset_iter_policy(struct vm_domainset_iter *di, int *domain)
|
|
{
|
|
|
|
return (EJUSTRETURN);
|
|
}
|
|
|
|
void
|
|
vm_domainset_iter_policy_init(struct vm_domainset_iter *di,
|
|
struct domainset *ds, int *domain, int *flags)
|
|
{
|
|
|
|
*domain = 0;
|
|
}
|
|
|
|
void
|
|
vm_domainset_iter_policy_ref_init(struct vm_domainset_iter *di,
|
|
struct domainset_ref *dr, int *domain, int *flags)
|
|
{
|
|
|
|
*domain = 0;
|
|
}
|
|
|
|
void
|
|
vm_domainset_iter_ignore(struct vm_domainset_iter *di __unused,
|
|
int domain __unused)
|
|
{
|
|
}
|
|
|
|
#endif /* NUMA */
|