Add routines for ARM System MMU (SMMU) pmap management.

Reviewed by:	markj
Discussed with:	kib
Sponsored by:	DARPA, Innovate UK
Differential Revision:	https://reviews.freebsd.org/D26877
This commit is contained in:
Ruslan Bukin 2020-11-02 19:56:15 +00:00
parent 9b4e77cb97
commit 268f7e2539
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=367282
2 changed files with 184 additions and 0 deletions

View File

@ -3604,6 +3604,184 @@ restart:
return (KERN_SUCCESS);
}
/*
* Add a single SMMU entry. This function does not sleep.
*/
int
pmap_senter(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
vm_prot_t prot, u_int flags)
{
pd_entry_t *pde;
pt_entry_t new_l3, orig_l3;
pt_entry_t *l3;
vm_page_t mpte;
int lvl;
int rv;
PMAP_ASSERT_STAGE1(pmap);
KASSERT(va < VM_MAXUSER_ADDRESS, ("wrong address space"));
va = trunc_page(va);
new_l3 = (pt_entry_t)(pa | ATTR_DEFAULT |
ATTR_S1_IDX(VM_MEMATTR_DEVICE) | L3_PAGE);
if ((prot & VM_PROT_WRITE) == 0)
new_l3 |= ATTR_S1_AP(ATTR_S1_AP_RO);
new_l3 |= ATTR_S1_XN; /* Execute never. */
new_l3 |= ATTR_S1_AP(ATTR_S1_AP_USER);
new_l3 |= ATTR_S1_nG; /* Non global. */
CTR2(KTR_PMAP, "pmap_senter: %.16lx -> %.16lx", va, pa);
PMAP_LOCK(pmap);
/*
* In the case that a page table page is not
* resident, we are creating it here.
*/
retry:
pde = pmap_pde(pmap, va, &lvl);
if (pde != NULL && lvl == 2) {
l3 = pmap_l2_to_l3(pde, va);
} else {
mpte = _pmap_alloc_l3(pmap, pmap_l2_pindex(va), NULL);
if (mpte == NULL) {
CTR0(KTR_PMAP, "pmap_enter: mpte == NULL");
rv = KERN_RESOURCE_SHORTAGE;
goto out;
}
goto retry;
}
orig_l3 = pmap_load(l3);
KASSERT(!pmap_l3_valid(orig_l3), ("l3 is valid"));
/* New mapping */
pmap_store(l3, new_l3);
pmap_resident_count_inc(pmap, 1);
dsb(ishst);
rv = KERN_SUCCESS;
out:
PMAP_UNLOCK(pmap);
return (rv);
}
/*
* Remove a single SMMU entry.
*/
int
pmap_sremove(pmap_t pmap, vm_offset_t va)
{
pt_entry_t *pte;
int lvl;
int rc;
PMAP_LOCK(pmap);
pte = pmap_pte(pmap, va, &lvl);
KASSERT(lvl == 3,
("Invalid SMMU pagetable level: %d != 3", lvl));
if (pte != NULL) {
pmap_resident_count_dec(pmap, 1);
pmap_clear(pte);
rc = KERN_SUCCESS;
} else
rc = KERN_FAILURE;
PMAP_UNLOCK(pmap);
return (rc);
}
/*
* Remove all the allocated L1, L2 pages from SMMU pmap.
* All the L3 entires must be cleared in advance, otherwise
* this function panics.
*/
void
pmap_sremove_pages(pmap_t pmap)
{
pd_entry_t l0e, *l1, l1e, *l2, l2e;
pt_entry_t *l3, l3e;
vm_page_t m, m0, m1;
vm_offset_t sva;
vm_paddr_t pa;
vm_paddr_t pa0;
vm_paddr_t pa1;
int i, j, k, l;
PMAP_LOCK(pmap);
for (sva = VM_MINUSER_ADDRESS, i = pmap_l0_index(sva);
(i < Ln_ENTRIES && sva < VM_MAXUSER_ADDRESS); i++) {
l0e = pmap->pm_l0[i];
if ((l0e & ATTR_DESCR_VALID) == 0) {
sva += L0_SIZE;
continue;
}
pa0 = l0e & ~ATTR_MASK;
m0 = PHYS_TO_VM_PAGE(pa0);
l1 = (pd_entry_t *)PHYS_TO_DMAP(pa0);
for (j = pmap_l1_index(sva); j < Ln_ENTRIES; j++) {
l1e = l1[j];
if ((l1e & ATTR_DESCR_VALID) == 0) {
sva += L1_SIZE;
continue;
}
if ((l1e & ATTR_DESCR_MASK) == L1_BLOCK) {
sva += L1_SIZE;
continue;
}
pa1 = l1e & ~ATTR_MASK;
m1 = PHYS_TO_VM_PAGE(pa1);
l2 = (pd_entry_t *)PHYS_TO_DMAP(pa1);
for (k = pmap_l2_index(sva); k < Ln_ENTRIES; k++) {
l2e = l2[k];
if ((l2e & ATTR_DESCR_VALID) == 0) {
sva += L2_SIZE;
continue;
}
pa = l2e & ~ATTR_MASK;
m = PHYS_TO_VM_PAGE(pa);
l3 = (pt_entry_t *)PHYS_TO_DMAP(pa);
for (l = pmap_l3_index(sva); l < Ln_ENTRIES;
l++, sva += L3_SIZE) {
l3e = l3[l];
if ((l3e & ATTR_DESCR_VALID) == 0)
continue;
panic("%s: l3e found for va %jx\n",
__func__, sva);
}
vm_page_unwire_noq(m1);
vm_page_unwire_noq(m);
pmap_resident_count_dec(pmap, 1);
vm_page_free(m);
pmap_clear(&l2[k]);
}
vm_page_unwire_noq(m0);
pmap_resident_count_dec(pmap, 1);
vm_page_free(m1);
pmap_clear(&l1[j]);
}
pmap_resident_count_dec(pmap, 1);
vm_page_free(m0);
pmap_clear(&pmap->pm_l0[i]);
}
KASSERT(pmap->pm_stats.resident_count == 0,
("Invalid resident count %jd", pmap->pm_stats.resident_count));
PMAP_UNLOCK(pmap);
}
/*
* Insert the given physical page (p) at
* the specified virtual address (v) in the

View File

@ -187,6 +187,12 @@ bool pmap_get_tables(pmap_t, vm_offset_t, pd_entry_t **, pd_entry_t **,
int pmap_fault(pmap_t, uint64_t, uint64_t);
/* System MMU (SMMU). */
int pmap_senter(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, vm_prot_t prot,
u_int flags);
int pmap_sremove(pmap_t pmap, vm_offset_t va);
void pmap_sremove_pages(pmap_t pmap);
struct pcb *pmap_switch(struct thread *, struct thread *);
extern void (*pmap_clean_stage2_tlbi)(void);