mirror of
https://git.hardenedbsd.org/hardenedbsd/HardenedBSD.git
synced 2024-11-25 01:55:19 +01:00
Merge branch 'freebsd/current/main' into hardened/current/master
This commit is contained in:
commit
b8b30db9db
@ -2074,7 +2074,7 @@ real-update-packages: stage-packages .PHONY
|
||||
continue; \
|
||||
fi ; \
|
||||
newsum=$$(pkg query -F ${REPODIR}/${PKG_ABI}/${PKG_VERSION}/$${newpkgname} '%X') ; \
|
||||
if [ "${BRANCH_EXT_FROM}" == "${BRANCH_EXT}" -a $${oldsum}" == "$${newsum}" ]; then \
|
||||
if [ "${BRANCH_EXT_FROM}" == "${BRANCH_EXT}" -a "$${oldsum}" == "$${newsum}" ]; then \
|
||||
echo "==> Keeping old ${PKG_NAME_PREFIX}-$${pkgname}-${PKG_VERSION_FROM}.${PKG_EXT}" ; \
|
||||
rm ${REPODIR}/${PKG_ABI}/${PKG_VERSION}/$${newpkgname} ; \
|
||||
cp $${pkg} ${REPODIR}/${PKG_ABI}/${PKG_VERSION} ; \
|
||||
|
@ -60,6 +60,9 @@
|
||||
#define HAVE_LONG_FILE_NAMES 1
|
||||
#define MIXEDCASE_FILENAMES 1
|
||||
#define STDC_HEADERS 1
|
||||
#define USE_GETCAP 1
|
||||
#define USE_BSD_TPUTS 1
|
||||
#define HAVE_BSD_CGETENT 1
|
||||
#define HAVE_SYS_TYPES_H 1
|
||||
#define HAVE_SYS_STAT_H 1
|
||||
#define HAVE_STDLIB_H 1
|
||||
|
@ -1324,13 +1324,12 @@ pmap_bootstrap(vm_size_t kernlen)
|
||||
min_pa = pmap_early_vtophys(KERNBASE);
|
||||
|
||||
physmap_idx = physmem_avail(physmap, nitems(physmap));
|
||||
physmap_idx /= 2;
|
||||
|
||||
/*
|
||||
* Find the minimum physical address. physmap is sorted,
|
||||
* but may contain empty ranges.
|
||||
*/
|
||||
for (i = 0; i < physmap_idx * 2; i += 2) {
|
||||
for (i = 0; i < physmap_idx; i += 2) {
|
||||
if (physmap[i] == physmap[i + 1])
|
||||
continue;
|
||||
if (physmap[i] <= min_pa)
|
||||
@ -1440,7 +1439,6 @@ pmap_bootstrap_san1(vm_offset_t va, int scale)
|
||||
* allocation since pmap_bootstrap().
|
||||
*/
|
||||
physmap_idx = physmem_avail(physmap, nitems(physmap));
|
||||
physmap_idx /= 2;
|
||||
|
||||
eva = va + (virtual_avail - VM_MIN_KERNEL_ADDRESS) / scale;
|
||||
|
||||
@ -1449,7 +1447,7 @@ pmap_bootstrap_san1(vm_offset_t va, int scale)
|
||||
* the shadow map as high up as we can to avoid depleting the lower 4GB in case
|
||||
* it's needed for, e.g., an xhci controller that can only do 32-bit DMA.
|
||||
*/
|
||||
for (i = (physmap_idx * 2) - 2; i >= 0; i -= 2) {
|
||||
for (i = physmap_idx - 2; i >= 0; i -= 2) {
|
||||
vm_paddr_t plow, phigh;
|
||||
|
||||
/* L2 mappings must be backed by memory that is L2-aligned */
|
||||
|
@ -177,7 +177,8 @@ physmem_print_tables(void)
|
||||
*
|
||||
* Updates the value at *pavail with the sum of all pages in all hw regions.
|
||||
*
|
||||
* Returns the number of pages of non-excluded memory added to the avail list.
|
||||
* Returns the number of entries in the avail list, which is twice the number
|
||||
* of returned regions.
|
||||
*/
|
||||
static size_t
|
||||
regions_to_avail(vm_paddr_t *avail, uint32_t exflags, size_t maxavail,
|
||||
|
@ -134,9 +134,10 @@ enum rt_scope_t {
|
||||
|
||||
/*
|
||||
* Routing table identifiers.
|
||||
* FreeBSD route table numbering starts from 0, where 0 is a valid default routing table.
|
||||
* Indicating "all tables" via netlink can be done by not including RTA_TABLE attribute
|
||||
* and keeping rtm_table=0 (compatibility) or setting RTA_TABLE value to RT_TABLE_UNSPEC.
|
||||
* FreeBSD route table numbering starts from 0, where 0 is a valid default
|
||||
* routing table. Indicating "all tables" via netlink can be done by not
|
||||
* including RTA_TABLE attribute and keeping rtm_table=0 (compatibility) or
|
||||
* setting RTA_TABLE value to RT_TABLE_UNSPEC.
|
||||
*/
|
||||
#define RT_TABLE_MAIN 0 /* RT_DEFAULT_FIB */
|
||||
#define RT_TABLE_UNSPEC 0xFFFFFFFF /* RT_ALL_FIBS */
|
||||
|
@ -475,6 +475,7 @@ struct nl_parsed_route {
|
||||
uint32_t rta_nh_id;
|
||||
uint32_t rta_weight;
|
||||
uint32_t rtax_mtu;
|
||||
uint8_t rtm_table;
|
||||
uint8_t rtm_family;
|
||||
uint8_t rtm_dst_len;
|
||||
uint8_t rtm_protocol;
|
||||
@ -507,6 +508,7 @@ static const struct nlfield_parser nlf_p_rtmsg[] = {
|
||||
{ .off_in = _IN(rtm_dst_len), .off_out = _OUT(rtm_dst_len), .cb = nlf_get_u8 },
|
||||
{ .off_in = _IN(rtm_protocol), .off_out = _OUT(rtm_protocol), .cb = nlf_get_u8 },
|
||||
{ .off_in = _IN(rtm_type), .off_out = _OUT(rtm_type), .cb = nlf_get_u8 },
|
||||
{ .off_in = _IN(rtm_table), .off_out = _OUT(rtm_table), .cb = nlf_get_u8 },
|
||||
{ .off_in = _IN(rtm_flags), .off_out = _OUT(rtm_flags), .cb = nlf_get_u32 },
|
||||
};
|
||||
#undef _IN
|
||||
@ -937,7 +939,10 @@ rtnl_handle_newroute(struct nlmsghdr *hdr, struct nlpcb *nlp,
|
||||
return (EINVAL);
|
||||
}
|
||||
|
||||
if (attrs.rta_table >= V_rt_numfibs) {
|
||||
if (attrs.rtm_table > 0 && attrs.rta_table == 0) {
|
||||
/* pre-2.6.19 Linux API compatibility */
|
||||
attrs.rta_table = attrs.rtm_table;
|
||||
} else if (attrs.rta_table >= V_rt_numfibs) {
|
||||
NLMSG_REPORT_ERR_MSG(npt, "invalid fib");
|
||||
return (EINVAL);
|
||||
}
|
||||
|
@ -36,10 +36,8 @@
|
||||
#define _MACHINE_MACHDEP_H_
|
||||
|
||||
struct riscv_bootparams {
|
||||
vm_offset_t kern_l1pt; /* Kernel L1 base */
|
||||
vm_offset_t kern_phys; /* Kernel base (physical) addr */
|
||||
vm_offset_t kern_stack;
|
||||
vm_offset_t dtbp_virt; /* Device tree blob virtual addr */
|
||||
vm_offset_t dtbp_phys; /* Device tree blob physical addr */
|
||||
vm_offset_t modulep; /* loader(8) metadata */
|
||||
};
|
||||
|
@ -133,7 +133,7 @@ struct thread;
|
||||
|
||||
void pmap_activate_boot(pmap_t);
|
||||
void pmap_activate_sw(struct thread *);
|
||||
void pmap_bootstrap(vm_offset_t, vm_paddr_t, vm_size_t);
|
||||
void pmap_bootstrap(vm_paddr_t, vm_size_t);
|
||||
int pmap_change_attr(vm_offset_t va, vm_size_t size, int mode);
|
||||
void pmap_kenter(vm_offset_t sva, vm_size_t size, vm_paddr_t pa, int mode);
|
||||
void pmap_kenter_device(vm_offset_t, vm_size_t, vm_paddr_t);
|
||||
|
@ -93,5 +93,3 @@ typedef uint64_t pn_t; /* page number */
|
||||
#define PTE_SIZE 8
|
||||
|
||||
#endif /* !_MACHINE_PTE_H_ */
|
||||
|
||||
/* End of pte.h */
|
||||
|
@ -209,8 +209,6 @@
|
||||
#define PS_STRINGS_SV39 (USRSTACK_SV39 - sizeof(struct ps_strings))
|
||||
#define PS_STRINGS_SV48 (USRSTACK_SV48 - sizeof(struct ps_strings))
|
||||
|
||||
#define VM_EARLY_DTB_ADDRESS (VM_MAX_KERNEL_ADDRESS - (2 * L2_SIZE))
|
||||
|
||||
/*
|
||||
* How many physical pages per kmem arena virtual page.
|
||||
*/
|
||||
@ -240,13 +238,16 @@
|
||||
extern vm_paddr_t dmap_phys_base;
|
||||
extern vm_paddr_t dmap_phys_max;
|
||||
extern vm_offset_t dmap_max_addr;
|
||||
extern vm_offset_t init_pt_va;
|
||||
#endif
|
||||
|
||||
#define ZERO_REGION_SIZE (64 * 1024) /* 64KB */
|
||||
|
||||
/*
|
||||
* The top of KVA is reserved for early device mappings.
|
||||
*/
|
||||
#define DEVMAP_MAX_VADDR VM_MAX_KERNEL_ADDRESS
|
||||
#define PMAP_MAPDEV_EARLY_SIZE L2_SIZE
|
||||
#define DEVMAP_MIN_VADDR (DEVMAP_MAX_VADDR - PMAP_MAPDEV_EARLY_SIZE)
|
||||
#define PMAP_MAPDEV_EARLY_SIZE (4 * L2_SIZE)
|
||||
|
||||
/*
|
||||
* No non-transparent large page support in the pmap.
|
||||
|
@ -58,9 +58,10 @@
|
||||
ASSYM(KERNBASE, KERNBASE);
|
||||
ASSYM(VM_MAXUSER_ADDRESS, VM_MAXUSER_ADDRESS);
|
||||
ASSYM(VM_MAX_KERNEL_ADDRESS, VM_MAX_KERNEL_ADDRESS);
|
||||
ASSYM(VM_EARLY_DTB_ADDRESS, VM_EARLY_DTB_ADDRESS);
|
||||
ASSYM(PMAP_MAPDEV_EARLY_SIZE, PMAP_MAPDEV_EARLY_SIZE);
|
||||
|
||||
ASSYM(PM_SATP, offsetof(struct pmap, pm_satp));
|
||||
|
||||
ASSYM(PCB_ONFAULT, offsetof(struct pcb, pcb_onfault));
|
||||
ASSYM(PCB_SIZE, sizeof(struct pcb));
|
||||
ASSYM(PCB_RA, offsetof(struct pcb, pcb_ra));
|
||||
@ -98,10 +99,8 @@ ASSYM(TF_SCAUSE, offsetof(struct trapframe, tf_scause));
|
||||
ASSYM(TF_SSTATUS, offsetof(struct trapframe, tf_sstatus));
|
||||
|
||||
ASSYM(RISCV_BOOTPARAMS_SIZE, sizeof(struct riscv_bootparams));
|
||||
ASSYM(RISCV_BOOTPARAMS_KERN_L1PT, offsetof(struct riscv_bootparams, kern_l1pt));
|
||||
ASSYM(RISCV_BOOTPARAMS_KERN_PHYS, offsetof(struct riscv_bootparams, kern_phys));
|
||||
ASSYM(RISCV_BOOTPARAMS_KERN_STACK, offsetof(struct riscv_bootparams,
|
||||
kern_stack));
|
||||
ASSYM(RISCV_BOOTPARAMS_DTBP_VIRT, offsetof(struct riscv_bootparams, dtbp_virt));
|
||||
ASSYM(RISCV_BOOTPARAMS_DTBP_PHYS, offsetof(struct riscv_bootparams, dtbp_phys));
|
||||
ASSYM(RISCV_BOOTPARAMS_MODULEP, offsetof(struct riscv_bootparams, modulep));
|
||||
|
@ -1,6 +1,10 @@
|
||||
/*-
|
||||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*
|
||||
* Copyright (c) 2015-2018 Ruslan Bukin <br@bsdpad.com>
|
||||
* All rights reserved.
|
||||
* Copyright (c) 2019-2021 Mitchell Horne <mhorne@FreeBSD.org>
|
||||
* Copyright (c) 2022-2024 The FreeBSD Foundation
|
||||
*
|
||||
* Portions of this software were developed by SRI International and the
|
||||
* University of Cambridge Computer Laboratory under DARPA/AFRL contract
|
||||
@ -10,6 +14,9 @@
|
||||
* Computer Laboratory as part of the CTSRD Project, with support from the
|
||||
* UK Higher Education Innovation Fund (HEIF).
|
||||
*
|
||||
* Portions of this software were developed by Mitchell Horne
|
||||
* <mhorne@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
@ -36,7 +43,6 @@
|
||||
|
||||
#include <machine/asm.h>
|
||||
#include <machine/param.h>
|
||||
#include <machine/trap.h>
|
||||
#include <machine/riscvreg.h>
|
||||
#include <machine/pte.h>
|
||||
|
||||
@ -104,26 +110,51 @@ _start:
|
||||
mv a1, zero
|
||||
|
||||
/*
|
||||
* Set up page tables: map a 1GB region starting at KERNBASE using 2MB
|
||||
* superpages, starting from the first 2MB physical page into which the
|
||||
* kernel was loaded. Also reserve an L2 page for the early device map
|
||||
* and map the DTB, if any, using the second-last entry of that L2
|
||||
* page. This is hopefully enough to get us to pmap_bootstrap().
|
||||
* Set up page tables: Our goal is to enable virtual memory, doing the
|
||||
* minimum amount of work in assembly; just what is required to
|
||||
* bootstrap. We will construct the real page tables in C code, in
|
||||
* pmap_bootstrap().
|
||||
*
|
||||
* Implementations are required to provide SV39 mode, so we use that
|
||||
* initially and will optionally enable SV48 mode during kernel pmap
|
||||
* initialization.
|
||||
* Here we map a 1GB region starting at KERNBASE using 2MB superpages,
|
||||
* starting from the first 2MB physical page into which the kernel was
|
||||
* loaded.
|
||||
*
|
||||
* We also use an L1 entry to create a 1GB identity map (1:1 PA->VA).
|
||||
* This is useful for two reasons:
|
||||
* - handling the DTB pointer passed from SBI firmware (physical addr)
|
||||
* - simpler construction of pagetables in pmap_bootstrap()
|
||||
*
|
||||
* Implementations are required to provide Sv39 mode, so we use that
|
||||
* here and will conditionally enable Sv48 (or higher) later.
|
||||
*
|
||||
* We arrive here with:
|
||||
* a0 - modulep or zero
|
||||
* a1 - zero or dtbp
|
||||
*/
|
||||
pagetables:
|
||||
/* Get the kernel's load address */
|
||||
/* Get the kernel's load address (kernstart) in s9 */
|
||||
jal get_physmem
|
||||
|
||||
/* Construct 1GB Identity Map (1:1 PA->VA) */
|
||||
lla s1, bootstrap_pt_l1
|
||||
|
||||
srli s2, s9, L1_SHIFT /* kernstart >> L1_SHIFT */
|
||||
andi a5, s2, Ln_ADDR_MASK /* & Ln_ADDR_MASK */
|
||||
li t4, (PTE_KERN)
|
||||
slli s2, s2, PTE_PPN2_S /* (s2 << PTE_PPN2_S) */
|
||||
or t6, t4, s2
|
||||
|
||||
/* Store L1 PTE entry to position */
|
||||
li a6, PTE_SIZE
|
||||
mulw a5, a5, a6 /* calculate L1 slot */
|
||||
add t0, s1, a5
|
||||
sd t6, (t0) /* Store new PTE */
|
||||
|
||||
/* Construct the virtual address space at KERNBASE */
|
||||
|
||||
/* Add L1 entry for kernel */
|
||||
lla s1, pagetable_l1
|
||||
lla s2, pagetable_l2 /* Link to next level PN */
|
||||
lla s1, bootstrap_pt_l1
|
||||
lla s2, bootstrap_pt_l2 /* Link to next level PN */
|
||||
srli s2, s2, PAGE_SHIFT
|
||||
|
||||
li a5, KERNBASE
|
||||
@ -140,9 +171,9 @@ pagetables:
|
||||
sd t6, (t0)
|
||||
|
||||
/* Level 2 superpages (512 x 2MiB) */
|
||||
lla s1, pagetable_l2
|
||||
lla s1, bootstrap_pt_l2
|
||||
srli t4, s9, L2_SHIFT /* Div physmem base by 2 MiB */
|
||||
li t2, 512 /* Build 512 entries */
|
||||
li t2, Ln_ENTRIES /* Build 512 entries */
|
||||
add t3, t4, t2
|
||||
li t0, (PTE_KERN | PTE_X)
|
||||
1:
|
||||
@ -154,47 +185,6 @@ pagetables:
|
||||
addi t4, t4, 1
|
||||
bltu t4, t3, 1b
|
||||
|
||||
/* Create an L1 table entry for early devmap */
|
||||
lla s1, pagetable_l1
|
||||
lla s2, pagetable_l2_devmap /* Link to next level PN */
|
||||
srli s2, s2, PAGE_SHIFT
|
||||
|
||||
li a5, (VM_MAX_KERNEL_ADDRESS - PMAP_MAPDEV_EARLY_SIZE)
|
||||
srli a5, a5, L1_SHIFT /* >> L1_SHIFT */
|
||||
andi a5, a5, Ln_ADDR_MASK /* & Ln_ADDR_MASK */
|
||||
li t4, PTE_V
|
||||
slli t5, s2, PTE_PPN0_S /* (s2 << PTE_PPN0_S) */
|
||||
or t6, t4, t5
|
||||
|
||||
/* Store the L1 table entry */
|
||||
li a6, PTE_SIZE
|
||||
mulw a5, a5, a6
|
||||
add t0, s1, a5
|
||||
sd t6, (t0)
|
||||
|
||||
/* Check if we have a DTB that needs to be mapped */
|
||||
beqz a1, 2f
|
||||
|
||||
/* Create an L2 mapping for the DTB */
|
||||
lla s1, pagetable_l2_devmap
|
||||
mv s2, a1
|
||||
srli s2, s2, PAGE_SHIFT
|
||||
/* Mask off any bits that aren't aligned */
|
||||
andi s2, s2, ~((1 << (PTE_PPN1_S - PTE_PPN0_S)) - 1)
|
||||
|
||||
li t0, (PTE_KERN)
|
||||
slli t2, s2, PTE_PPN0_S /* << PTE_PPN0_S */
|
||||
or t0, t0, t2
|
||||
|
||||
/* Store the L2 table entry for the DTB */
|
||||
li a6, PTE_SIZE
|
||||
li a5, VM_EARLY_DTB_ADDRESS
|
||||
srli a5, a5, L2_SHIFT /* >> L2_SHIFT */
|
||||
andi a5, a5, Ln_ADDR_MASK /* & Ln_ADDR_MASK */
|
||||
mulw a5, a5, a6
|
||||
add t1, s1, a5
|
||||
sd t0, (t1)
|
||||
|
||||
/* Page tables END */
|
||||
|
||||
/*
|
||||
@ -202,7 +192,6 @@ pagetables:
|
||||
* may generate a page fault. We simply wish to continue onwards, so
|
||||
* have the trap deliver us to 'va'.
|
||||
*/
|
||||
2:
|
||||
lla t0, va
|
||||
sub t0, t0, s9
|
||||
li t1, KERNBASE
|
||||
@ -210,7 +199,7 @@ pagetables:
|
||||
csrw stvec, t0
|
||||
|
||||
/* Set page tables base register */
|
||||
lla s2, pagetable_l1
|
||||
lla s2, bootstrap_pt_l1
|
||||
srli s2, s2, PAGE_SHIFT
|
||||
li t0, SATP_MODE_SV39
|
||||
or s2, s2, t0
|
||||
@ -251,21 +240,11 @@ va:
|
||||
bltu t0, t1, 1b
|
||||
|
||||
/* Fill riscv_bootparams */
|
||||
la t0, pagetable_l1
|
||||
sd t0, RISCV_BOOTPARAMS_KERN_L1PT(sp)
|
||||
sd s9, RISCV_BOOTPARAMS_KERN_PHYS(sp)
|
||||
|
||||
la t0, initstack
|
||||
sd t0, RISCV_BOOTPARAMS_KERN_STACK(sp)
|
||||
|
||||
li t0, (VM_EARLY_DTB_ADDRESS)
|
||||
/* Add offset of DTB within superpage */
|
||||
li t1, (L2_OFFSET)
|
||||
and t1, a1, t1
|
||||
add t0, t0, t1
|
||||
sd t0, RISCV_BOOTPARAMS_DTBP_VIRT(sp)
|
||||
sd a1, RISCV_BOOTPARAMS_DTBP_PHYS(sp)
|
||||
|
||||
sd a0, RISCV_BOOTPARAMS_MODULEP(sp)
|
||||
|
||||
mv a0, sp
|
||||
@ -293,12 +272,13 @@ initstack:
|
||||
.space (PAGE_SIZE * KSTACK_PAGES)
|
||||
initstack_end:
|
||||
|
||||
.align 12
|
||||
pagetable_l1:
|
||||
/*
|
||||
* Static space for the bootstrap page tables. Unused after pmap_bootstrap().
|
||||
*/
|
||||
.balign PAGE_SIZE
|
||||
bootstrap_pt_l1:
|
||||
.space PAGE_SIZE
|
||||
pagetable_l2:
|
||||
.space PAGE_SIZE
|
||||
pagetable_l2_devmap:
|
||||
bootstrap_pt_l2:
|
||||
.space PAGE_SIZE
|
||||
|
||||
.align 3
|
||||
@ -307,10 +287,6 @@ virt_map:
|
||||
hart_lottery:
|
||||
.space 4
|
||||
|
||||
.globl init_pt_va
|
||||
init_pt_va:
|
||||
.quad pagetable_l2 /* XXX: Keep page tables VA */
|
||||
|
||||
#ifndef SMP
|
||||
ENTRY(mpentry)
|
||||
1:
|
||||
@ -358,10 +334,8 @@ ENTRY(mpentry)
|
||||
csrw stvec, t0
|
||||
|
||||
/* Set page tables base register */
|
||||
lla s2, pagetable_l1
|
||||
srli s2, s2, PAGE_SHIFT
|
||||
li t0, SATP_MODE_SV39
|
||||
or s2, s2, t0
|
||||
lla t2, kernel_pmap_store
|
||||
ld s2, PM_SATP(t2)
|
||||
sfence.vma
|
||||
csrw satp, s2
|
||||
|
||||
|
@ -380,13 +380,16 @@ fake_preload_metadata(struct riscv_bootparams *rvbp)
|
||||
PRELOAD_PUSH_VALUE(uint32_t, sizeof(size_t));
|
||||
PRELOAD_PUSH_VALUE(uint64_t, (size_t)((vm_offset_t)&end - KERNBASE));
|
||||
|
||||
/* Copy the DTB to KVA space. */
|
||||
/*
|
||||
* Copy the DTB to KVA space. We are able to dereference the physical
|
||||
* address due to the identity map created in locore.
|
||||
*/
|
||||
lastaddr = roundup(lastaddr, sizeof(int));
|
||||
PRELOAD_PUSH_VALUE(uint32_t, MODINFO_METADATA | MODINFOMD_DTBP);
|
||||
PRELOAD_PUSH_VALUE(uint32_t, sizeof(vm_offset_t));
|
||||
PRELOAD_PUSH_VALUE(vm_offset_t, lastaddr);
|
||||
dtb_size = fdt_totalsize(rvbp->dtbp_virt);
|
||||
memmove((void *)lastaddr, (const void *)rvbp->dtbp_virt, dtb_size);
|
||||
dtb_size = fdt_totalsize(rvbp->dtbp_phys);
|
||||
memmove((void *)lastaddr, (const void *)rvbp->dtbp_phys, dtb_size);
|
||||
lastaddr = roundup(lastaddr + dtb_size, sizeof(int));
|
||||
|
||||
PRELOAD_PUSH_VALUE(uint32_t, MODINFO_METADATA | MODINFOMD_KERNEND);
|
||||
@ -564,7 +567,7 @@ initriscv(struct riscv_bootparams *rvbp)
|
||||
|
||||
/* Bootstrap enough of pmap to enter the kernel proper */
|
||||
kernlen = (lastaddr - KERNBASE);
|
||||
pmap_bootstrap(rvbp->kern_l1pt, rvbp->kern_phys, kernlen);
|
||||
pmap_bootstrap(rvbp->kern_phys, kernlen);
|
||||
|
||||
physmem_init_kernel_globals();
|
||||
|
||||
|
@ -243,13 +243,9 @@ CTASSERT((DMAP_MIN_ADDRESS & ~L1_OFFSET) == DMAP_MIN_ADDRESS);
|
||||
CTASSERT((DMAP_MAX_ADDRESS & ~L1_OFFSET) == DMAP_MAX_ADDRESS);
|
||||
|
||||
/*
|
||||
* This code assumes that the early DEVMAP is L2_SIZE aligned and is fully
|
||||
* contained within a single L2 entry. The early DTB is mapped immediately
|
||||
* before the devmap L2 entry.
|
||||
* This code assumes that the early DEVMAP is L2_SIZE aligned.
|
||||
*/
|
||||
CTASSERT((PMAP_MAPDEV_EARLY_SIZE & L2_OFFSET) == 0);
|
||||
CTASSERT((VM_EARLY_DTB_ADDRESS & L2_OFFSET) == 0);
|
||||
CTASSERT(VM_EARLY_DTB_ADDRESS < (VM_MAX_KERNEL_ADDRESS - PMAP_MAPDEV_EARLY_SIZE));
|
||||
|
||||
static struct rwlock_padalign pvh_global_lock;
|
||||
static struct mtx_padalign allpmaps_lock;
|
||||
@ -327,6 +323,8 @@ static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t, struct spglist *);
|
||||
|
||||
static int pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode);
|
||||
|
||||
static uint64_t pmap_satp_mode(void);
|
||||
|
||||
#define pmap_clear(pte) pmap_store(pte, 0)
|
||||
#define pmap_clear_bits(pte, bits) atomic_clear_64(pte, bits)
|
||||
#define pmap_load_store(pte, entry) atomic_swap_64(pte, entry)
|
||||
@ -364,6 +362,28 @@ pagezero(void *p)
|
||||
((((l2) & ~PTE_HI_MASK) >> PTE_PPN1_S) << L2_SHIFT)
|
||||
#define PTE_TO_VM_PAGE(pte) PHYS_TO_VM_PAGE(PTE_TO_PHYS(pte))
|
||||
|
||||
/*
|
||||
* Construct a page table entry of the specified level pointing to physical
|
||||
* address pa, with PTE bits 'bits'.
|
||||
*
|
||||
* A leaf PTE of any level must point to an address matching its alignment,
|
||||
* e.g. L2 pages must be 2MB aligned in memory.
|
||||
*/
|
||||
#define L1_PTE(pa, bits) ((((pa) >> L1_SHIFT) << PTE_PPN2_S) | (bits))
|
||||
#define L2_PTE(pa, bits) ((((pa) >> L2_SHIFT) << PTE_PPN1_S) | (bits))
|
||||
#define L3_PTE(pa, bits) ((((pa) >> L3_SHIFT) << PTE_PPN0_S) | (bits))
|
||||
|
||||
/*
|
||||
* Construct a page directory entry (PDE), pointing to next level entry at pa,
|
||||
* with PTE bits 'bits'.
|
||||
*
|
||||
* Unlike PTEs, page directory entries can point to any 4K-aligned physical
|
||||
* address.
|
||||
*/
|
||||
#define L0_PDE(pa, bits) L3_PTE(pa, bits)
|
||||
#define L1_PDE(pa, bits) L3_PTE(pa, bits)
|
||||
#define L2_PDE(pa, bits) L3_PTE(pa, bits)
|
||||
|
||||
static __inline pd_entry_t *
|
||||
pmap_l0(pmap_t pmap, vm_offset_t va)
|
||||
{
|
||||
@ -504,135 +524,290 @@ pmap_distribute_l1(struct pmap *pmap, vm_pindex_t l1index,
|
||||
mtx_unlock(&allpmaps_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* This should only be used during pmap bootstrap e.g. by
|
||||
* pmap_create_pagetables().
|
||||
*/
|
||||
static pt_entry_t *
|
||||
pmap_early_page_idx(vm_offset_t l1pt, vm_offset_t va, u_int *l1_slot,
|
||||
u_int *l2_slot)
|
||||
pmap_early_alloc_tables(vm_paddr_t *freemempos, int npages)
|
||||
{
|
||||
pt_entry_t *l2;
|
||||
pd_entry_t *l1 __diagused;
|
||||
pt_entry_t *pt;
|
||||
|
||||
l1 = (pd_entry_t *)l1pt;
|
||||
*l1_slot = (va >> L1_SHIFT) & Ln_ADDR_MASK;
|
||||
pt = (pt_entry_t *)*freemempos;
|
||||
*freemempos += npages * PAGE_SIZE;
|
||||
bzero(pt, npages * PAGE_SIZE);
|
||||
|
||||
/* Check locore has used a table L1 map */
|
||||
KASSERT((l1[*l1_slot] & PTE_RX) == 0,
|
||||
("Invalid bootstrap L1 table"));
|
||||
|
||||
/* Find the address of the L2 table */
|
||||
l2 = (pt_entry_t *)init_pt_va;
|
||||
*l2_slot = pmap_l2_index(va);
|
||||
|
||||
return (l2);
|
||||
return (pt);
|
||||
}
|
||||
|
||||
/*
|
||||
* Construct the direct map -- a linear mapping of physical memory into
|
||||
* the kernel address space.
|
||||
*
|
||||
* We walk the list of physical memory segments (of arbitrary size and
|
||||
* address) mapping each appropriately using L2 and L1 superpages.
|
||||
* Consequently, the DMAP address space will have unmapped regions
|
||||
* corresponding to any holes between physical memory segments.
|
||||
*
|
||||
* The lowest usable physical address will always be mapped to
|
||||
* DMAP_MIN_ADDRESS.
|
||||
*/
|
||||
static vm_paddr_t
|
||||
pmap_early_vtophys(vm_offset_t l1pt, vm_offset_t va)
|
||||
{
|
||||
u_int l1_slot, l2_slot;
|
||||
pt_entry_t *l2;
|
||||
vm_paddr_t ret;
|
||||
|
||||
l2 = pmap_early_page_idx(l1pt, va, &l1_slot, &l2_slot);
|
||||
|
||||
/* Check locore has used L2 superpages */
|
||||
KASSERT((l2[l2_slot] & PTE_RX) != 0,
|
||||
("Invalid bootstrap L2 table"));
|
||||
|
||||
/* L2 is superpages */
|
||||
ret = L2PTE_TO_PHYS(l2[l2_slot]);
|
||||
ret += (va & L2_OFFSET);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static void
|
||||
pmap_bootstrap_dmap(vm_offset_t kern_l1, vm_paddr_t min_pa, vm_paddr_t max_pa)
|
||||
pmap_bootstrap_dmap(pd_entry_t *l1, vm_paddr_t freemempos)
|
||||
{
|
||||
vm_paddr_t physmap[PHYS_AVAIL_ENTRIES];
|
||||
vm_offset_t va;
|
||||
vm_paddr_t pa;
|
||||
pd_entry_t *l1;
|
||||
u_int l1_slot;
|
||||
pt_entry_t entry;
|
||||
pn_t pn;
|
||||
vm_paddr_t min_pa, max_pa, pa, endpa;
|
||||
pd_entry_t *l2;
|
||||
u_int l1slot, l2slot;
|
||||
int physmap_idx;
|
||||
|
||||
pa = dmap_phys_base = min_pa & ~L1_OFFSET;
|
||||
va = DMAP_MIN_ADDRESS;
|
||||
l1 = (pd_entry_t *)kern_l1;
|
||||
l1_slot = pmap_l1_index(DMAP_MIN_ADDRESS);
|
||||
physmap_idx = physmem_avail(physmap, nitems(physmap));
|
||||
min_pa = physmap[0];
|
||||
max_pa = physmap[physmap_idx - 1];
|
||||
|
||||
for (; va < DMAP_MAX_ADDRESS && pa < max_pa;
|
||||
pa += L1_SIZE, va += L1_SIZE, l1_slot++) {
|
||||
KASSERT(l1_slot < Ln_ENTRIES, ("Invalid L1 index"));
|
||||
printf("physmap_idx %u\n", physmap_idx);
|
||||
printf("min_pa %lx\n", min_pa);
|
||||
printf("max_pa %lx\n", max_pa);
|
||||
|
||||
/* superpages */
|
||||
pn = (pa / PAGE_SIZE);
|
||||
entry = PTE_KERN;
|
||||
entry |= (pn << PTE_PPN0_S);
|
||||
pmap_store(&l1[l1_slot], entry);
|
||||
/* Set the limits of the DMAP region. */
|
||||
dmap_phys_base = rounddown(min_pa, L1_SIZE);
|
||||
dmap_phys_max = max_pa;
|
||||
|
||||
/* Walk the physmap table. */
|
||||
l2 = NULL;
|
||||
l1slot = Ln_ENTRIES; /* sentinel value */
|
||||
for (int idx = 0; idx < physmap_idx; idx += 2) {
|
||||
pa = rounddown(physmap[idx], L2_SIZE);
|
||||
endpa = physmap[idx + 1];
|
||||
|
||||
/* Virtual address for this range. */
|
||||
va = PHYS_TO_DMAP(pa);
|
||||
|
||||
/* Any 1GB possible for this range? */
|
||||
if (roundup(pa, L1_SIZE) + L1_SIZE > endpa)
|
||||
goto l2end;
|
||||
|
||||
/* Loop until the next 1GB boundary. */
|
||||
while ((pa & L1_OFFSET) != 0) {
|
||||
if (l2 == NULL || pmap_l1_index(va) != l1slot) {
|
||||
/* Need to alloc another page table. */
|
||||
l2 = pmap_early_alloc_tables(&freemempos, 1);
|
||||
|
||||
/* Link it. */
|
||||
l1slot = pmap_l1_index(va);
|
||||
pmap_store(&l1[l1slot],
|
||||
L1_PDE((vm_paddr_t)l2, PTE_V));
|
||||
}
|
||||
|
||||
/* map l2 pages */
|
||||
l2slot = pmap_l2_index(va);
|
||||
pmap_store(&l2[l2slot], L2_PTE(pa, PTE_KERN));
|
||||
|
||||
pa += L2_SIZE;
|
||||
va += L2_SIZE;
|
||||
}
|
||||
|
||||
/* Map what we can with 1GB superpages. */
|
||||
while (pa + L1_SIZE - 1 < endpa) {
|
||||
/* map l1 pages */
|
||||
l1slot = pmap_l1_index(va);
|
||||
pmap_store(&l1[l1slot], L1_PTE(pa, PTE_KERN));
|
||||
|
||||
pa += L1_SIZE;
|
||||
va += L1_SIZE;
|
||||
}
|
||||
|
||||
l2end:
|
||||
while (pa < endpa) {
|
||||
if (l2 == NULL || pmap_l1_index(va) != l1slot) {
|
||||
/* Need to alloc another page table. */
|
||||
l2 = pmap_early_alloc_tables(&freemempos, 1);
|
||||
|
||||
/* Link it. */
|
||||
l1slot = pmap_l1_index(va);
|
||||
pmap_store(&l1[l1slot],
|
||||
L1_PDE((vm_paddr_t)l2, PTE_V));
|
||||
}
|
||||
|
||||
/* map l2 pages */
|
||||
l2slot = pmap_l2_index(va);
|
||||
pmap_store(&l2[l2slot], L2_PTE(pa, PTE_KERN));
|
||||
|
||||
pa += L2_SIZE;
|
||||
va += L2_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
/* Set the upper limit of the DMAP region */
|
||||
dmap_phys_max = pa;
|
||||
/* And finally, the limit on DMAP VA. */
|
||||
dmap_max_addr = va;
|
||||
|
||||
sfence_vma();
|
||||
return (freemempos);
|
||||
}
|
||||
|
||||
static vm_offset_t
|
||||
pmap_bootstrap_l3(vm_offset_t l1pt, vm_offset_t va, vm_offset_t l3_start)
|
||||
/*
|
||||
* Create a new set of pagetables to run the kernel with.
|
||||
*
|
||||
* An initial, temporary setup was created in locore.S, which serves well
|
||||
* enough to get us this far. It mapped kernstart -> KERNBASE, using 2MB
|
||||
* superpages, and created a 1GB identity map, which allows this function
|
||||
* to dereference physical addresses.
|
||||
*
|
||||
* The memory backing these page tables is allocated in the space
|
||||
* immediately following the kernel's preload area. Depending on the size
|
||||
* of this area, some, all, or none of these pages can be implicitly
|
||||
* mapped by the kernel's 2MB mappings. This memory will only ever be
|
||||
* accessed through the direct map, however.
|
||||
*/
|
||||
static vm_paddr_t
|
||||
pmap_create_pagetables(vm_paddr_t kernstart, vm_size_t kernlen,
|
||||
vm_paddr_t *root_pt_phys)
|
||||
{
|
||||
vm_offset_t l3pt;
|
||||
pt_entry_t entry;
|
||||
pd_entry_t *l2;
|
||||
vm_paddr_t pa;
|
||||
u_int l2_slot;
|
||||
pn_t pn;
|
||||
pt_entry_t *l0, *l1, *kern_l2, *kern_l3, *devmap_l3;
|
||||
pd_entry_t *devmap_l2;
|
||||
vm_paddr_t kernend, freemempos, pa;
|
||||
int nkernl2, nkernl3, ndevmapl3;
|
||||
int i, slot;
|
||||
int mode;
|
||||
|
||||
KASSERT((va & L2_OFFSET) == 0, ("Invalid virtual address"));
|
||||
kernend = kernstart + kernlen;
|
||||
|
||||
l2 = pmap_l2(kernel_pmap, va);
|
||||
l2 = (pd_entry_t *)((uintptr_t)l2 & ~(PAGE_SIZE - 1));
|
||||
l2_slot = pmap_l2_index(va);
|
||||
l3pt = l3_start;
|
||||
/* Static allocations begin after the kernel staging area. */
|
||||
freemempos = roundup2(kernend, PAGE_SIZE);
|
||||
|
||||
for (; va < VM_MAX_KERNEL_ADDRESS; l2_slot++, va += L2_SIZE) {
|
||||
KASSERT(l2_slot < Ln_ENTRIES, ("Invalid L2 index"));
|
||||
/* Detect Sv48 mode. */
|
||||
mode = PMAP_MODE_SV39;
|
||||
TUNABLE_INT_FETCH("vm.pmap.mode", &mode);
|
||||
|
||||
pa = pmap_early_vtophys(l1pt, l3pt);
|
||||
pn = (pa / PAGE_SIZE);
|
||||
entry = (PTE_V);
|
||||
entry |= (pn << PTE_PPN0_S);
|
||||
pmap_store(&l2[l2_slot], entry);
|
||||
l3pt += PAGE_SIZE;
|
||||
if (mode == PMAP_MODE_SV48 && (mmu_caps & MMU_SV48) != 0) {
|
||||
/*
|
||||
* Sv48 mode: allocate an L0 page table to be the root. The
|
||||
* layout of KVA is otherwise identical to Sv39.
|
||||
*/
|
||||
l0 = pmap_early_alloc_tables(&freemempos, 1);
|
||||
*root_pt_phys = (vm_paddr_t)l0;
|
||||
pmap_mode = PMAP_MODE_SV48;
|
||||
} else {
|
||||
l0 = NULL;
|
||||
}
|
||||
|
||||
/* Clean the L2 page table */
|
||||
memset((void *)l3_start, 0, l3pt - l3_start);
|
||||
/*
|
||||
* Allocate an L1 page table.
|
||||
*/
|
||||
l1 = pmap_early_alloc_tables(&freemempos, 1);
|
||||
if (pmap_mode == PMAP_MODE_SV39)
|
||||
*root_pt_phys = (vm_paddr_t)l1;
|
||||
|
||||
return (l3pt);
|
||||
/*
|
||||
* Allocate a set of L2 page tables for KVA. Most likely, only 1 is
|
||||
* needed.
|
||||
*/
|
||||
nkernl2 = howmany(howmany(kernlen, L2_SIZE), Ln_ENTRIES);
|
||||
kern_l2 = pmap_early_alloc_tables(&freemempos, nkernl2);
|
||||
|
||||
/*
|
||||
* Allocate an L2 page table for the static devmap, located at the end
|
||||
* of KVA. We can expect that the devmap will always be less than 1GB
|
||||
* in size.
|
||||
*/
|
||||
devmap_l2 = pmap_early_alloc_tables(&freemempos, 1);
|
||||
|
||||
/* Allocate L3 page tables for the devmap. */
|
||||
ndevmapl3 = howmany(howmany(PMAP_MAPDEV_EARLY_SIZE, L3_SIZE),
|
||||
Ln_ENTRIES);
|
||||
devmap_l3 = pmap_early_alloc_tables(&freemempos, ndevmapl3);
|
||||
|
||||
/*
|
||||
* Allocate some L3 bootstrap pages, for early KVA allocations before
|
||||
* vm_mem_init() has run. For example, the message buffer.
|
||||
*
|
||||
* A somewhat arbitrary choice of 32MB. This should be more than enough
|
||||
* for any early allocations. There is no need to worry about waste, as
|
||||
* whatever is not used will be consumed by later calls to
|
||||
* pmap_growkernel().
|
||||
*/
|
||||
nkernl3 = 16;
|
||||
kern_l3 = pmap_early_alloc_tables(&freemempos, nkernl3);
|
||||
|
||||
/* Bootstrap the direct map. */
|
||||
freemempos = pmap_bootstrap_dmap(l1, freemempos);
|
||||
|
||||
/* Allocations are done. */
|
||||
if (freemempos < roundup2(kernend, L2_SIZE))
|
||||
freemempos = roundup2(kernend, L2_SIZE);
|
||||
|
||||
/*
|
||||
* Map the kernel (and preloaded modules or data) using L2 superpages.
|
||||
*
|
||||
* kernstart is 2MB-aligned. This is enforced by loader(8) and required
|
||||
* by locore assembly.
|
||||
*
|
||||
* TODO: eventually, this should be done with proper permissions for
|
||||
* each segment, rather than mapping the entire kernel and preloaded
|
||||
* modules RWX.
|
||||
*/
|
||||
slot = pmap_l2_index(KERNBASE);
|
||||
for (pa = kernstart; pa < kernend; pa += L2_SIZE, slot++) {
|
||||
pmap_store(&kern_l2[slot], L2_PTE(pa, PTE_KERN | PTE_X));
|
||||
}
|
||||
|
||||
/*
|
||||
* Connect the L3 bootstrap pages to the kernel L2 table. The L3 PTEs
|
||||
* themselves are invalid.
|
||||
*/
|
||||
slot = pmap_l2_index(freemempos - kernstart + KERNBASE);
|
||||
for (i = 0; i < nkernl3; i++, slot++) {
|
||||
pa = (vm_paddr_t)kern_l3 + ptoa(i);
|
||||
pmap_store(&kern_l2[slot], L2_PDE(pa, PTE_V));
|
||||
}
|
||||
|
||||
/* Connect the L2 tables to the L1 table. */
|
||||
slot = pmap_l1_index(KERNBASE);
|
||||
for (i = 0; i < nkernl2; i++, slot++) {
|
||||
pa = (vm_paddr_t)kern_l2 + ptoa(i);
|
||||
pmap_store(&l1[slot], L1_PDE(pa, PTE_V));
|
||||
}
|
||||
|
||||
/* Connect the L1 table to L0, if in use. */
|
||||
if (pmap_mode == PMAP_MODE_SV48) {
|
||||
slot = pmap_l0_index(KERNBASE);
|
||||
pmap_store(&l0[slot], L0_PDE((vm_paddr_t)l1, PTE_V));
|
||||
}
|
||||
|
||||
/*
|
||||
* Connect the devmap L3 pages to the L2 table. The devmap PTEs
|
||||
* themselves are invalid.
|
||||
*/
|
||||
slot = pmap_l2_index(DEVMAP_MIN_VADDR);
|
||||
for (i = 0; i < ndevmapl3; i++, slot++) {
|
||||
pa = (vm_paddr_t)devmap_l3 + ptoa(i);
|
||||
pmap_store(&devmap_l2[slot], L2_PDE(pa, PTE_V));
|
||||
}
|
||||
|
||||
/* Connect the devmap L2 pages to the L1 table. */
|
||||
slot = pmap_l1_index(DEVMAP_MIN_VADDR);
|
||||
pa = (vm_paddr_t)devmap_l2;
|
||||
pmap_store(&l1[slot], L1_PDE(pa, PTE_V));
|
||||
|
||||
/* Return the next position of free memory */
|
||||
return (freemempos);
|
||||
}
|
||||
|
||||
/*
|
||||
* Bootstrap the system enough to run with virtual memory.
|
||||
*/
|
||||
void
|
||||
pmap_bootstrap(vm_offset_t l1pt, vm_paddr_t kernstart, vm_size_t kernlen)
|
||||
pmap_bootstrap(vm_paddr_t kernstart, vm_size_t kernlen)
|
||||
{
|
||||
vm_paddr_t physmap[PHYS_AVAIL_ENTRIES];
|
||||
uint64_t satp;
|
||||
vm_offset_t dpcpu, freemempos, l0pv, msgbufpv;
|
||||
vm_paddr_t l0pa, l1pa, max_pa, min_pa, pa;
|
||||
pd_entry_t *l0p;
|
||||
pt_entry_t *l2p;
|
||||
u_int l1_slot, l2_slot;
|
||||
u_int physmap_idx;
|
||||
int i, mode;
|
||||
vm_paddr_t freemempos, pa;
|
||||
vm_paddr_t root_pt_phys;
|
||||
vm_offset_t freeva;
|
||||
vm_offset_t dpcpu, msgbufpv;
|
||||
pt_entry_t *pte;
|
||||
int i;
|
||||
|
||||
printf("pmap_bootstrap %lx %lx %lx\n", l1pt, kernstart, kernlen);
|
||||
printf("pmap_bootstrap %lx %lx\n", kernstart, kernlen);
|
||||
|
||||
/* Set this early so we can use the pagetable walking functions */
|
||||
kernel_pmap_store.pm_top = (pd_entry_t *)l1pt;
|
||||
kernel_pmap_store.pm_stage = PM_STAGE1;
|
||||
PMAP_LOCK_INIT(kernel_pmap);
|
||||
TAILQ_INIT(&kernel_pmap->pm_pvchunk);
|
||||
vm_radix_init(&kernel_pmap->pm_root);
|
||||
@ -646,105 +821,62 @@ pmap_bootstrap(vm_offset_t l1pt, vm_paddr_t kernstart, vm_size_t kernlen)
|
||||
*/
|
||||
CPU_SET(PCPU_GET(hart), &kernel_pmap->pm_active);
|
||||
|
||||
/* Assume the address we were loaded to is a valid physical address. */
|
||||
min_pa = max_pa = kernstart;
|
||||
|
||||
physmap_idx = physmem_avail(physmap, nitems(physmap));
|
||||
physmap_idx /= 2;
|
||||
|
||||
/*
|
||||
* Find the minimum physical address. physmap is sorted,
|
||||
* but may contain empty ranges.
|
||||
*/
|
||||
for (i = 0; i < physmap_idx * 2; i += 2) {
|
||||
if (physmap[i] == physmap[i + 1])
|
||||
continue;
|
||||
if (physmap[i] <= min_pa)
|
||||
min_pa = physmap[i];
|
||||
if (physmap[i + 1] > max_pa)
|
||||
max_pa = physmap[i + 1];
|
||||
}
|
||||
printf("physmap_idx %u\n", physmap_idx);
|
||||
printf("min_pa %lx\n", min_pa);
|
||||
printf("max_pa %lx\n", max_pa);
|
||||
|
||||
/* Create a direct map region early so we can use it for pa -> va */
|
||||
pmap_bootstrap_dmap(l1pt, min_pa, max_pa);
|
||||
|
||||
/*
|
||||
* Read the page table to find out what is already mapped.
|
||||
* This assumes we have mapped a block of memory from KERNBASE
|
||||
* using a single L1 entry.
|
||||
*/
|
||||
(void)pmap_early_page_idx(l1pt, KERNBASE, &l1_slot, &l2_slot);
|
||||
|
||||
/* Sanity check the index, KERNBASE should be the first VA */
|
||||
KASSERT(l2_slot == 0, ("The L2 index is non-zero"));
|
||||
|
||||
freemempos = roundup2(KERNBASE + kernlen, PAGE_SIZE);
|
||||
|
||||
/* Create the l3 tables for the early devmap */
|
||||
freemempos = pmap_bootstrap_l3(l1pt,
|
||||
VM_MAX_KERNEL_ADDRESS - PMAP_MAPDEV_EARLY_SIZE, freemempos);
|
||||
|
||||
/*
|
||||
* Invalidate the mapping we created for the DTB. At this point a copy
|
||||
* has been created, and we no longer need it. We want to avoid the
|
||||
* possibility of an aliased mapping in the future.
|
||||
*/
|
||||
l2p = pmap_l2(kernel_pmap, VM_EARLY_DTB_ADDRESS);
|
||||
if ((pmap_load(l2p) & PTE_V) != 0)
|
||||
pmap_clear(l2p);
|
||||
/* Create a new set of pagetables to run the kernel in. */
|
||||
freemempos = pmap_create_pagetables(kernstart, kernlen, &root_pt_phys);
|
||||
|
||||
/* Switch to the newly created page tables. */
|
||||
kernel_pmap->pm_stage = PM_STAGE1;
|
||||
kernel_pmap->pm_top = (pd_entry_t *)PHYS_TO_DMAP(root_pt_phys);
|
||||
kernel_pmap->pm_satp = atop(root_pt_phys) | pmap_satp_mode();
|
||||
csr_write(satp, kernel_pmap->pm_satp);
|
||||
sfence_vma();
|
||||
|
||||
#define alloc_pages(var, np) \
|
||||
(var) = freemempos; \
|
||||
freemempos += (np * PAGE_SIZE); \
|
||||
memset((char *)(var), 0, ((np) * PAGE_SIZE));
|
||||
/*
|
||||
* Now, we need to make a few more static reservations from KVA.
|
||||
*
|
||||
* Set freeva to freemempos virtual address, and be sure to advance
|
||||
* them together.
|
||||
*/
|
||||
freeva = freemempos - kernstart + KERNBASE;
|
||||
#define reserve_space(var, pa, size) \
|
||||
do { \
|
||||
var = freeva; \
|
||||
pa = freemempos; \
|
||||
freeva += size; \
|
||||
freemempos += size; \
|
||||
} while (0)
|
||||
|
||||
mode = 0;
|
||||
TUNABLE_INT_FETCH("vm.pmap.mode", &mode);
|
||||
if (mode == PMAP_MODE_SV48 && (mmu_caps & MMU_SV48) != 0) {
|
||||
/*
|
||||
* Enable SV48 mode: allocate an L0 page and set SV48 mode in
|
||||
* SATP. If the implementation does not provide SV48 mode,
|
||||
* the mode read back from the (WARL) SATP register will be
|
||||
* unchanged, and we continue in SV39 mode.
|
||||
*/
|
||||
alloc_pages(l0pv, 1);
|
||||
l0p = (void *)l0pv;
|
||||
l1pa = pmap_early_vtophys(l1pt, l1pt);
|
||||
l0p[pmap_l0_index(KERNBASE)] = PTE_V |
|
||||
((l1pa >> PAGE_SHIFT) << PTE_PPN0_S);
|
||||
/* Allocate the dynamic per-cpu area. */
|
||||
reserve_space(dpcpu, pa, DPCPU_SIZE);
|
||||
|
||||
l0pa = pmap_early_vtophys(l1pt, l0pv);
|
||||
csr_write(satp, (l0pa >> PAGE_SHIFT) | SATP_MODE_SV48);
|
||||
satp = csr_read(satp);
|
||||
if ((satp & SATP_MODE_M) == SATP_MODE_SV48) {
|
||||
pmap_mode = PMAP_MODE_SV48;
|
||||
kernel_pmap_store.pm_top = l0p;
|
||||
} else {
|
||||
/* Mode didn't change, give the page back. */
|
||||
freemempos -= PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
/* Map it. */
|
||||
pte = pmap_l3(kernel_pmap, dpcpu);
|
||||
KASSERT(pte != NULL, ("Bootstrap pages missing"));
|
||||
for (i = 0; i < howmany(DPCPU_SIZE, PAGE_SIZE); i++)
|
||||
pmap_store(&pte[i], L3_PTE(pa + ptoa(i), PTE_KERN));
|
||||
|
||||
/* Allocate dynamic per-cpu area. */
|
||||
alloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE);
|
||||
/* Now, it can be initialized. */
|
||||
dpcpu_init((void *)dpcpu, 0);
|
||||
|
||||
/* Allocate memory for the msgbuf, e.g. for /sbin/dmesg */
|
||||
alloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
|
||||
reserve_space(msgbufpv, pa, round_page(msgbufsize));
|
||||
msgbufp = (void *)msgbufpv;
|
||||
|
||||
virtual_avail = roundup2(freemempos, L2_SIZE);
|
||||
virtual_end = VM_MAX_KERNEL_ADDRESS - PMAP_MAPDEV_EARLY_SIZE;
|
||||
kernel_vm_end = virtual_avail;
|
||||
/* Map it. */
|
||||
pte = pmap_l3(kernel_pmap, msgbufpv);
|
||||
KASSERT(pte != NULL, ("Bootstrap pages missing"));
|
||||
for (i = 0; i < howmany(msgbufsize, PAGE_SIZE); i++)
|
||||
pmap_store(&pte[i], L3_PTE(pa + ptoa(i), PTE_KERN));
|
||||
|
||||
pa = pmap_early_vtophys(l1pt, freemempos);
|
||||
#undef reserve_space
|
||||
|
||||
physmem_exclude_region(kernstart, pa - kernstart, EXFLAG_NOALLOC);
|
||||
/* Mark the bounds of our available virtual address space */
|
||||
virtual_avail = kernel_vm_end = freeva;
|
||||
virtual_end = DEVMAP_MIN_VADDR;
|
||||
|
||||
/* Exclude the reserved physical memory from allocations. */
|
||||
physmem_exclude_region(kernstart, freemempos - kernstart,
|
||||
EXFLAG_NOALLOC);
|
||||
}
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user