arm64: Remove ATTR_DEFAULT from pte.h

ATTR_SH(ATTR_SH_IS) will soon be dynamic as the field is moved out of
the page tables in FEAT_LPA2. When this happens ATTR_DEFAULT will just
be ATTR_AF.

Rather than keeping ATTR_DEFAULT with one attribute remove it.

Reviewed by:	alc, kib, markj
Sponsored by:	Arm Ltd
Differential Revision:	https://reviews.freebsd.org/D46466
This commit is contained in:
Andrew Turner 2024-09-05 13:11:55 +01:00
parent 3a3aa2cc07
commit e2990a9ee4
7 changed files with 36 additions and 35 deletions

View File

@ -214,7 +214,7 @@ efi_create_1t1_map(struct efi_md *map, int ndesc, int descsz)
p->md_phys, mode, p->md_pages);
}
l3_attr = ATTR_DEFAULT | ATTR_S1_IDX(mode) |
l3_attr = ATTR_AF | ATTR_SH(ATTR_SH_IS) | ATTR_S1_IDX(mode) |
ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_S1_nG | L3_PAGE;
if (mode == VM_MEMATTR_DEVICE || p->md_attr & EFI_MD_ATTR_XP)
l3_attr |= ATTR_S1_XN;

View File

@ -747,7 +747,7 @@ LENTRY(build_l2_block_pagetable)
/* Build the L2 block entry */
orr x12, x7, #L2_BLOCK
orr x12, x12, #(ATTR_DEFAULT)
orr x12, x12, #(ATTR_AF | ATTR_SH(ATTR_SH_IS))
orr x12, x12, #(ATTR_S1_UXN)
#ifdef __ARM_FEATURE_BTI_DEFAULT
orr x12, x12, #(ATTR_S1_GP)
@ -823,7 +823,7 @@ LENTRY(build_l3_page_pagetable)
/* Build the L3 page entry */
orr x12, x7, #L3_PAGE
orr x12, x12, #(ATTR_DEFAULT)
orr x12, x12, #(ATTR_AF | ATTR_SH(ATTR_SH_IS))
orr x12, x12, #(ATTR_S1_UXN)
#ifdef __ARM_FEATURE_BTI_DEFAULT
orr x12, x12, #(ATTR_S1_GP)

View File

@ -310,8 +310,8 @@ cpu_minidumpsys(struct dumperinfo *di, const struct minidumpstate *state)
for (i = 0; i < Ln_ENTRIES; i++) {
for (j = 0; j < Ln_ENTRIES; j++) {
tmpbuffer[j] = (pa + i * L2_SIZE +
j * PAGE_SIZE) | ATTR_DEFAULT |
L3_PAGE;
j * PAGE_SIZE) | ATTR_AF |
ATTR_SH(ATTR_SH_IS) | L3_PAGE;
}
error = blk_write(di, (char *)&tmpbuffer, 0,
PAGE_SIZE);
@ -330,7 +330,7 @@ cpu_minidumpsys(struct dumperinfo *di, const struct minidumpstate *state)
/* Generate fake l3 entries based upon the l1 entry */
for (i = 0; i < Ln_ENTRIES; i++) {
tmpbuffer[i] = (pa + i * PAGE_SIZE) |
ATTR_DEFAULT | L3_PAGE;
ATTR_AF | ATTR_SH(ATTR_SH_IS) | L3_PAGE;
}
error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE);
if (error)

View File

@ -185,8 +185,8 @@
#else
#define ATTR_KERN_GP 0
#endif
#define PMAP_SAN_PTE_BITS (ATTR_DEFAULT | ATTR_S1_XN | ATTR_KERN_GP | \
ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | ATTR_S1_AP(ATTR_S1_AP_RW))
#define PMAP_SAN_PTE_BITS (ATTR_AF | ATTR_SH(ATTR_SH_IS) | ATTR_S1_XN | \
ATTR_KERN_GP | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | ATTR_S1_AP(ATTR_S1_AP_RW))
struct pmap_large_md_page {
struct rwlock pv_lock;
@ -1150,7 +1150,7 @@ pmap_bootstrap_l2_block(struct pmap_bootstrap_state *state, int i)
MPASS((state->pa & L2_OFFSET) == 0);
MPASS(state->l2[l2_slot] == 0);
pmap_store(&state->l2[l2_slot], PHYS_TO_PTE(state->pa) |
ATTR_DEFAULT | ATTR_S1_XN | ATTR_KERN_GP |
ATTR_AF | ATTR_SH(ATTR_SH_IS) | ATTR_S1_XN | ATTR_KERN_GP |
ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | contig | L2_BLOCK);
}
MPASS(state->va == (state->pa - dmap_phys_base + DMAP_MIN_ADDRESS));
@ -1200,7 +1200,7 @@ pmap_bootstrap_l3_page(struct pmap_bootstrap_state *state, int i)
MPASS((state->pa & L3_OFFSET) == 0);
MPASS(state->l3[l3_slot] == 0);
pmap_store(&state->l3[l3_slot], PHYS_TO_PTE(state->pa) |
ATTR_DEFAULT | ATTR_S1_XN | ATTR_KERN_GP |
ATTR_AF | ATTR_SH(ATTR_SH_IS) | ATTR_S1_XN | ATTR_KERN_GP |
ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | contig | L3_PAGE);
}
MPASS(state->va == (state->pa - dmap_phys_base + DMAP_MIN_ADDRESS));
@ -1242,7 +1242,8 @@ pmap_bootstrap_dmap(void)
MPASS((bs_state.pa & L1_OFFSET) == 0);
pmap_store(
&bs_state.l1[pmap_l1_index(bs_state.va)],
PHYS_TO_PTE(bs_state.pa) | ATTR_DEFAULT |
PHYS_TO_PTE(bs_state.pa) | ATTR_AF |
ATTR_SH(ATTR_SH_IS) |
ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) |
ATTR_S1_XN | ATTR_KERN_GP | L1_BLOCK);
}
@ -2111,8 +2112,8 @@ pmap_kenter(vm_offset_t sva, vm_size_t size, vm_paddr_t pa, int mode)
KASSERT((size & PAGE_MASK) == 0,
("pmap_kenter: Mapping is not page-sized"));
attr = ATTR_DEFAULT | ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_S1_XN |
ATTR_KERN_GP | ATTR_S1_IDX(mode);
attr = ATTR_AF | ATTR_SH(ATTR_SH_IS) | ATTR_S1_AP(ATTR_S1_AP_RW) |
ATTR_S1_XN | ATTR_KERN_GP | ATTR_S1_IDX(mode);
old_l3e = 0;
va = sva;
while (size != 0) {
@ -2326,7 +2327,8 @@ pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
("pmap_qenter: Invalid level %d", lvl));
m = ma[i];
attr = ATTR_DEFAULT | ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_S1_XN |
attr = ATTR_AF | ATTR_SH(ATTR_SH_IS) |
ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_S1_XN |
ATTR_KERN_GP | ATTR_S1_IDX(m->md.pv_memattr) | L3_PAGE;
pte = pmap_l2_to_l3(pde, va);
old_l3e |= pmap_load_store(pte, VM_PAGE_TO_PTE(m) | attr);
@ -5122,7 +5124,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
if ((m->oflags & VPO_UNMANAGED) == 0)
VM_PAGE_OBJECT_BUSY_ASSERT(m);
pa = VM_PAGE_TO_PHYS(m);
new_l3 = (pt_entry_t)(PHYS_TO_PTE(pa) | ATTR_DEFAULT | L3_PAGE);
new_l3 = (pt_entry_t)(PHYS_TO_PTE(pa) | ATTR_AF | ATTR_SH(ATTR_SH_IS) |
L3_PAGE);
new_l3 |= pmap_pte_memattr(pmap, m->md.pv_memattr);
new_l3 |= pmap_pte_prot(pmap, prot);
if ((flags & PMAP_ENTER_WIRED) != 0)
@ -5465,13 +5468,13 @@ pmap_enter_l2_rx(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
KASSERT(ADDR_IS_CANONICAL(va),
("%s: Address not in canonical form: %lx", __func__, va));
new_l2 = (pd_entry_t)(VM_PAGE_TO_PTE(m) | ATTR_DEFAULT |
new_l2 = (pd_entry_t)(VM_PAGE_TO_PTE(m) | ATTR_SH(ATTR_SH_IS) |
ATTR_S1_IDX(m->md.pv_memattr) | ATTR_S1_AP(ATTR_S1_AP_RO) |
L2_BLOCK);
if ((m->oflags & VPO_UNMANAGED) == 0) {
if ((m->oflags & VPO_UNMANAGED) == 0)
new_l2 |= ATTR_SW_MANAGED;
new_l2 &= ~ATTR_AF;
}
else
new_l2 |= ATTR_AF;
if ((prot & VM_PROT_EXECUTE) == 0 ||
m->md.pv_memattr == VM_MEMATTR_DEVICE)
new_l2 |= ATTR_S1_XN;
@ -5694,13 +5697,13 @@ pmap_enter_l3c_rx(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t *ml3p,
KASSERT(ADDR_IS_CANONICAL(va),
("%s: Address not in canonical form: %lx", __func__, va));
l3e = VM_PAGE_TO_PTE(m) | ATTR_DEFAULT |
l3e = VM_PAGE_TO_PTE(m) | ATTR_SH(ATTR_SH_IS) |
ATTR_S1_IDX(m->md.pv_memattr) | ATTR_S1_AP(ATTR_S1_AP_RO) |
ATTR_CONTIGUOUS | L3_PAGE;
if ((m->oflags & VPO_UNMANAGED) == 0) {
if ((m->oflags & VPO_UNMANAGED) == 0)
l3e |= ATTR_SW_MANAGED;
l3e &= ~ATTR_AF;
}
else
l3e |= ATTR_AF;
if ((prot & VM_PROT_EXECUTE) == 0 ||
m->md.pv_memattr == VM_MEMATTR_DEVICE)
l3e |= ATTR_S1_XN;
@ -6091,8 +6094,8 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
pmap_resident_count_inc(pmap, 1);
pa = VM_PAGE_TO_PHYS(m);
l3_val = PHYS_TO_PTE(pa) | ATTR_DEFAULT | ATTR_S1_IDX(m->md.pv_memattr) |
ATTR_S1_AP(ATTR_S1_AP_RO) | L3_PAGE;
l3_val = PHYS_TO_PTE(pa) | ATTR_SH(ATTR_SH_IS) |
ATTR_S1_IDX(m->md.pv_memattr) | ATTR_S1_AP(ATTR_S1_AP_RO) | L3_PAGE;
l3_val |= pmap_pte_bti(pmap, va);
if ((prot & VM_PROT_EXECUTE) == 0 ||
m->md.pv_memattr == VM_MEMATTR_DEVICE)
@ -6107,10 +6110,10 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
/*
* Now validate mapping with RO protection
*/
if ((m->oflags & VPO_UNMANAGED) == 0) {
if ((m->oflags & VPO_UNMANAGED) == 0)
l3_val |= ATTR_SW_MANAGED;
l3_val &= ~ATTR_AF;
}
else
l3_val |= ATTR_AF;
/* Sync icache before the mapping is stored to PTE */
if ((prot & VM_PROT_EXECUTE) && pmap != kernel_pmap &&
@ -7741,9 +7744,9 @@ pmap_mapbios(vm_paddr_t pa, vm_size_t size)
/* Insert L2_BLOCK */
l2 = pmap_l1_to_l2(pde, va);
old_l2e |= pmap_load_store(l2,
PHYS_TO_PTE(pa) | ATTR_DEFAULT | ATTR_S1_XN |
ATTR_KERN_GP | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) |
L2_BLOCK);
PHYS_TO_PTE(pa) | ATTR_AF | ATTR_SH(ATTR_SH_IS) |
ATTR_S1_XN | ATTR_KERN_GP |
ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | L2_BLOCK);
va += L2_SIZE;
pa += L2_SIZE;

View File

@ -111,8 +111,6 @@ typedef uint64_t pt_entry_t; /* page table entry */
#define ATTR_S2_MEMATTR_WT 0xa
#define ATTR_S2_MEMATTR_WB 0xf
#define ATTR_DEFAULT (ATTR_AF | ATTR_SH(ATTR_SH_IS))
#define ATTR_DESCR_MASK 3
#define ATTR_DESCR_VALID 1
#define ATTR_DESCR_TYPE_MASK 2

View File

@ -708,7 +708,7 @@ smmu_pmap_enter(struct smmu_pmap *pmap, vm_offset_t va, vm_paddr_t pa,
KASSERT(va < VM_MAXUSER_ADDRESS, ("wrong address space"));
va = trunc_page(va);
new_l3 = (pt_entry_t)(pa | ATTR_DEFAULT |
new_l3 = (pt_entry_t)(pa | ATTR_AF | ATTR_SH(ATTR_SH_IS) |
ATTR_S1_IDX(VM_MEMATTR_DEVICE) | IOMMU_L3_PAGE);
if ((prot & VM_PROT_WRITE) == 0)
new_l3 |= ATTR_S1_AP(ATTR_S1_AP_RO);

View File

@ -294,7 +294,7 @@ vmmpmap_enter(vm_offset_t va, vm_size_t size, vm_paddr_t pa, vm_prot_t prot)
KASSERT((size & PAGE_MASK) == 0,
("%s: Mapping is not page-sized", __func__));
l3e = ATTR_DEFAULT | L3_PAGE;
l3e = ATTR_AF | ATTR_SH(ATTR_SH_IS) | L3_PAGE;
/* This bit is res1 at EL2 */
l3e |= ATTR_S1_AP(ATTR_S1_AP_USER);
/* Only normal memory is used at EL2 */