mirror of
https://git.hardenedbsd.org/hardenedbsd/HardenedBSD.git
synced 2024-11-24 17:44:17 +01:00
Remove stray whitespaces from sys/amd64/
Signed-off-by: Joshua Rogers <Joshua@Joshua.Hu> Reviewed by: imp Pull Request: https://github.com/freebsd/freebsd-src/pull/1418
This commit is contained in:
parent
1b1e392aed
commit
f3754afd59
@ -50,7 +50,7 @@
|
||||
*
|
||||
* Search some or all of the BIOS region for a signature string.
|
||||
*
|
||||
* (start) Optional offset returned from this function
|
||||
* (start) Optional offset returned from this function
|
||||
* (for searching for multiple matches), or NULL
|
||||
* to start the search from the base of the BIOS.
|
||||
* Note that this will be a _physical_ address in
|
||||
@ -68,7 +68,7 @@ u_int32_t
|
||||
bios_sigsearch(u_int32_t start, u_char *sig, int siglen, int paralen, int sigofs)
|
||||
{
|
||||
u_char *sp, *end;
|
||||
|
||||
|
||||
/* compute the starting address */
|
||||
if ((start >= BIOS_START) && (start <= (BIOS_START + BIOS_SIZE))) {
|
||||
sp = (char *)BIOS_PADDRTOVADDR(start);
|
||||
|
@ -386,7 +386,7 @@ END(savectx)
|
||||
/*
|
||||
* resumectx(pcb)
|
||||
* Resuming processor state from pcb.
|
||||
*/
|
||||
*/
|
||||
ENTRY(resumectx)
|
||||
/* Switch to KPML5/4phys. */
|
||||
movq KPML4phys,%rax
|
||||
|
@ -1391,7 +1391,7 @@ db_disasm(db_addr_t loc, bool altfmt)
|
||||
case 0xc8:
|
||||
i_name = "monitor";
|
||||
i_size = NONE;
|
||||
i_mode = 0;
|
||||
i_mode = 0;
|
||||
break;
|
||||
case 0xc9:
|
||||
i_name = "mwait";
|
||||
|
@ -114,7 +114,7 @@ dtrace_invop_calltrap_addr:
|
||||
movq $0,TF_ADDR(%rsp)
|
||||
movq $0,TF_ERR(%rsp)
|
||||
jmp alltraps_noen_u
|
||||
|
||||
|
||||
.globl X\l
|
||||
.type X\l,@function
|
||||
X\l:
|
||||
|
@ -538,7 +538,7 @@ fpuformat(void)
|
||||
* (FP_X_INV, FP_X_DZ)
|
||||
* 4 Denormal operand (FP_X_DNML)
|
||||
* 5 Numeric over/underflow (FP_X_OFL, FP_X_UFL)
|
||||
* 6 Inexact result (FP_X_IMP)
|
||||
* 6 Inexact result (FP_X_IMP)
|
||||
*/
|
||||
static char fpetable[128] = {
|
||||
0,
|
||||
|
@ -2,21 +2,21 @@
|
||||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*
|
||||
* Copyright (c) KATO Takenori, 1997, 1998.
|
||||
*
|
||||
*
|
||||
* All rights reserved. Unpublished rights reserved under the copyright
|
||||
* laws of Japan.
|
||||
*
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer as
|
||||
* the first lines of this file unmodified.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
|
@ -229,7 +229,7 @@ cpu_startup(void *dummy)
|
||||
* namely: incorrect CPU frequency detection and failure to
|
||||
* start the APs.
|
||||
* We do this by disabling a bit in the SMI_EN (SMI Control and
|
||||
* Enable register) of the Intel ICH LPC Interface Bridge.
|
||||
* Enable register) of the Intel ICH LPC Interface Bridge.
|
||||
*/
|
||||
sysenv = kern_getenv("smbios.system.product");
|
||||
if (sysenv != NULL) {
|
||||
|
@ -191,7 +191,7 @@ memmmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr,
|
||||
* This is basically just an ioctl shim for mem_range_attr_get
|
||||
* and mem_range_attr_set.
|
||||
*/
|
||||
int
|
||||
int
|
||||
memioctl_md(struct cdev *dev __unused, u_long cmd, caddr_t data, int flags,
|
||||
struct thread *td)
|
||||
{
|
||||
@ -221,7 +221,7 @@ memioctl_md(struct cdev *dev __unused, u_long cmd, caddr_t data, int flags,
|
||||
M_MEMDESC, M_WAITOK);
|
||||
error = mem_range_attr_get(md, &nd);
|
||||
if (!error)
|
||||
error = copyout(md, mo->mo_desc,
|
||||
error = copyout(md, mo->mo_desc,
|
||||
nd * sizeof(struct mem_range_desc));
|
||||
free(md, M_MEMDESC);
|
||||
}
|
||||
@ -229,7 +229,7 @@ memioctl_md(struct cdev *dev __unused, u_long cmd, caddr_t data, int flags,
|
||||
nd = mem_range_softc.mr_ndesc;
|
||||
mo->mo_arg[0] = nd;
|
||||
break;
|
||||
|
||||
|
||||
case MEMRANGE_SET:
|
||||
md = (struct mem_range_desc *)malloc(sizeof(struct mem_range_desc),
|
||||
M_MEMDESC, M_WAITOK);
|
||||
|
@ -62,7 +62,7 @@ mptramp_start:
|
||||
|
||||
/* Enable protected mode */
|
||||
movl $CR0_PE, %eax
|
||||
mov %eax, %cr0
|
||||
mov %eax, %cr0
|
||||
|
||||
/*
|
||||
* Now execute a far jump to turn on protected mode. This
|
||||
@ -207,7 +207,7 @@ bootcode:
|
||||
* are interpreted slightly differently.
|
||||
* %ds: +A, +W, -E, DPL=0, +P, +D, +G
|
||||
* %ss: +A, +W, -E, DPL=0, +P, +B, +G
|
||||
* Accessed, Writeable, Expand up, Present, 32 bit, 4GB
|
||||
* Accessed, Writeable, Expand up, Present, 32 bit, 4GB
|
||||
* For %ds, +D means 'default operand size is 32 bit'.
|
||||
* For %ss, +B means the stack register is %esp rather than %sp.
|
||||
*/
|
||||
@ -237,7 +237,7 @@ mptramp_nx:
|
||||
/*
|
||||
* The pseudo descriptor for lgdt to use.
|
||||
*/
|
||||
lgdt_desc:
|
||||
lgdt_desc:
|
||||
.word gdtend-gdt /* Length */
|
||||
.long gdt-mptramp_start /* Offset plus %ds << 4 */
|
||||
|
||||
|
@ -1824,7 +1824,7 @@ create_pagetables(vm_paddr_t *firstaddr)
|
||||
* then the residual physical memory is mapped with 2MB pages. Later,
|
||||
* if pmap_mapdev{_attr}() uses the direct map for non-write-back
|
||||
* memory, pmap_change_attr() will demote any 2MB or 1GB page mappings
|
||||
* that are partially used.
|
||||
* that are partially used.
|
||||
*/
|
||||
pd_p = (pd_entry_t *)DMPDphys;
|
||||
for (i = NPDEPG * ndm1g, j = 0; i < NPDEPG * ndmpdp; i++, j++) {
|
||||
@ -2279,7 +2279,7 @@ pmap_bootstrap_la57(void *arg __unused)
|
||||
vm_page_free(m_pd);
|
||||
vm_page_free(m_pt);
|
||||
|
||||
/*
|
||||
/*
|
||||
* Recursively map PML5 to itself in order to get PTmap and
|
||||
* PDmap.
|
||||
*/
|
||||
@ -2510,7 +2510,7 @@ pmap_init(void)
|
||||
/*
|
||||
* Initialize the vm page array entries for the kernel pmap's
|
||||
* page table pages.
|
||||
*/
|
||||
*/
|
||||
PMAP_LOCK(kernel_pmap);
|
||||
for (i = 0; i < nkpt; i++) {
|
||||
mpte = PHYS_TO_VM_PAGE(KPTphys + (i << PAGE_SHIFT));
|
||||
@ -3461,12 +3461,12 @@ pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde)
|
||||
cpuid = PCPU_GET(cpuid);
|
||||
other_cpus = all_cpus;
|
||||
CPU_CLR(cpuid, &other_cpus);
|
||||
if (pmap == kernel_pmap || pmap_type_guest(pmap))
|
||||
if (pmap == kernel_pmap || pmap_type_guest(pmap))
|
||||
active = all_cpus;
|
||||
else {
|
||||
active = pmap->pm_active;
|
||||
}
|
||||
if (CPU_OVERLAP(&active, &other_cpus)) {
|
||||
if (CPU_OVERLAP(&active, &other_cpus)) {
|
||||
act.store = cpuid;
|
||||
act.invalidate = active;
|
||||
act.va = va;
|
||||
@ -4226,7 +4226,7 @@ _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
|
||||
|
||||
pmap_pt_page_count_adj(pmap, -1);
|
||||
|
||||
/*
|
||||
/*
|
||||
* Put page on a list so that it is released after
|
||||
* *ALL* TLB shootdown is done
|
||||
*/
|
||||
@ -4377,7 +4377,7 @@ pmap_pinit_pml5(vm_page_t pml5pg)
|
||||
pm_pml5[pmap_pml5e_index(UPT_MAX_ADDRESS)] = KPML4phys | X86_PG_V |
|
||||
X86_PG_RW | X86_PG_A | X86_PG_M;
|
||||
|
||||
/*
|
||||
/*
|
||||
* Install self-referential address mapping entry.
|
||||
*/
|
||||
pm_pml5[PML5PML5I] = VM_PAGE_TO_PHYS(pml5pg) |
|
||||
@ -5179,7 +5179,7 @@ pmap_growkernel(vm_offset_t addr)
|
||||
end = (end + NBPDR) & ~PDRMASK;
|
||||
if (end - 1 >= vm_map_max(kernel_map)) {
|
||||
end = vm_map_max(kernel_map);
|
||||
break;
|
||||
break;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
@ -5196,7 +5196,7 @@ pmap_growkernel(vm_offset_t addr)
|
||||
end = (end + NBPDR) & ~PDRMASK;
|
||||
if (end - 1 >= vm_map_max(kernel_map)) {
|
||||
end = vm_map_max(kernel_map);
|
||||
break;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@ -6146,7 +6146,7 @@ pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
|
||||
* PG_A set. If the old PDE has PG_RW set, it also has PG_M
|
||||
* set. Thus, there is no danger of a race with another
|
||||
* processor changing the setting of PG_A and/or PG_M between
|
||||
* the read above and the store below.
|
||||
* the read above and the store below.
|
||||
*/
|
||||
if (workaround_erratum383)
|
||||
pmap_update_pde(pmap, va, pde, newpde);
|
||||
@ -6276,7 +6276,7 @@ pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
|
||||
* pmap_remove_pte: do the things to unmap a page in a process
|
||||
*/
|
||||
static int
|
||||
pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va,
|
||||
pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va,
|
||||
pd_entry_t ptepde, struct spglist *free, struct rwlock **lockp)
|
||||
{
|
||||
struct md_page *pvh;
|
||||
@ -6889,7 +6889,7 @@ pmap_pde_ept_executable(pmap_t pmap, pd_entry_t pde)
|
||||
* single page table page (PTP) to a single 2MB page mapping. For promotion
|
||||
* to occur, two conditions must be met: (1) the 4KB page mappings must map
|
||||
* aligned, contiguous physical memory and (2) the 4KB page mappings must have
|
||||
* identical characteristics.
|
||||
* identical characteristics.
|
||||
*/
|
||||
static bool
|
||||
pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va, vm_page_t mpte,
|
||||
@ -7237,7 +7237,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
goto out;
|
||||
}
|
||||
if (psind == 1) {
|
||||
/* Assert the required virtual and physical alignment. */
|
||||
/* Assert the required virtual and physical alignment. */
|
||||
KASSERT((va & PDRMASK) == 0, ("pmap_enter: va unaligned"));
|
||||
KASSERT(m->psind > 0, ("pmap_enter: m->psind < psind"));
|
||||
rv = pmap_enter_pde(pmap, va, newpte | PG_PS, flags, m, &lock);
|
||||
@ -7933,7 +7933,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
|
||||
* Map using 2MB pages. Since "ptepa" is 2M aligned and
|
||||
* "size" is a multiple of 2M, adding the PAT setting to "pa"
|
||||
* will not affect the termination of this loop.
|
||||
*/
|
||||
*/
|
||||
PMAP_LOCK(pmap);
|
||||
for (pa = ptepa | pmap_cache_bits(pmap, pat_mode, true);
|
||||
pa < ptepa + size; pa += NBPDR) {
|
||||
@ -8168,7 +8168,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
|
||||
srcptepaddr = *pde;
|
||||
if (srcptepaddr == 0)
|
||||
continue;
|
||||
|
||||
|
||||
if (srcptepaddr & PG_PS) {
|
||||
/*
|
||||
* We can only virtual copy whole superpages.
|
||||
@ -8246,7 +8246,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
|
||||
pmap_abort_ptp(dst_pmap, addr, dstmpte);
|
||||
goto out;
|
||||
}
|
||||
/* Have we copied all of the valid mappings? */
|
||||
/* Have we copied all of the valid mappings? */
|
||||
if (dstmpte->ref_count >= srcmpte->ref_count)
|
||||
break;
|
||||
}
|
||||
|
@ -68,7 +68,7 @@ ENTRY(__vdso_sigcode)
|
||||
*
|
||||
* LLVM libunwind from stable/13 cannot parse register numbers higher
|
||||
* than 32. Disable %rflags, %fs.base, and %gs.base annotations.
|
||||
*/
|
||||
*/
|
||||
.cfi_offset %fs, SIGF_UC + UC_FS
|
||||
.cfi_offset %gs, SIGF_UC + UC_GS
|
||||
.cfi_offset %es, SIGF_UC + UC_ES
|
||||
|
@ -311,7 +311,7 @@ sysarch(struct thread *td, struct sysarch_args *uap)
|
||||
error = copyout(&pcb->pcb_fsbase, uap->parms,
|
||||
sizeof(pcb->pcb_fsbase));
|
||||
break;
|
||||
|
||||
|
||||
case AMD64_SET_FSBASE:
|
||||
error = copyin(uap->parms, &a64base, sizeof(a64base));
|
||||
if (error == 0) {
|
||||
|
@ -723,7 +723,7 @@ trap_pfault(struct trapframe *frame, bool usermode, int *signo, int *ucode)
|
||||
* Due to both processor errata and lazy TLB invalidation when
|
||||
* access restrictions are removed from virtual pages, memory
|
||||
* accesses that are allowed by the physical mapping layer may
|
||||
* nonetheless cause one spurious page fault per virtual page.
|
||||
* nonetheless cause one spurious page fault per virtual page.
|
||||
* When the thread is executing a "no faulting" section that
|
||||
* is bracketed by vm_fault_{disable,enable}_pagefaults(),
|
||||
* every page fault is treated as a spurious page fault,
|
||||
|
@ -606,7 +606,7 @@ cpu_set_upcall(struct thread *td, void (*entry)(void *), void *arg,
|
||||
stack_t *stack)
|
||||
{
|
||||
|
||||
/*
|
||||
/*
|
||||
* Do any extra cleaning that needs to be done.
|
||||
* The thread may have optional components
|
||||
* that are not present in a fresh thread.
|
||||
|
@ -92,7 +92,7 @@
|
||||
|
||||
#define END(x) .size x, . - x; .cfi_endproc
|
||||
/*
|
||||
* WEAK_REFERENCE(): create a weak reference alias from sym.
|
||||
* WEAK_REFERENCE(): create a weak reference alias from sym.
|
||||
* The macro is not a general asm macro that takes arbitrary names,
|
||||
* but one that takes only C names. It does the non-null name
|
||||
* translation inside the macro.
|
||||
|
@ -29,6 +29,6 @@
|
||||
#ifndef _AMD64_BUS_DMA_H_
|
||||
#define _AMD64_BUS_DMA_H_
|
||||
|
||||
#include <x86/bus_dma.h>
|
||||
#include <x86/bus_dma.h>
|
||||
|
||||
#endif /* _AMD64_BUS_DMA_H_ */
|
||||
|
@ -78,7 +78,7 @@
|
||||
* ALIGNED_POINTER is a boolean macro that checks whether an address
|
||||
* is valid to fetch data elements of type t from on this architecture.
|
||||
* This does not reflect the optimal alignment, just the possibility
|
||||
* (within reasonable limits).
|
||||
* (within reasonable limits).
|
||||
*/
|
||||
#define ALIGNED_POINTER(p, t) 1
|
||||
|
||||
@ -154,7 +154,7 @@
|
||||
#define amd64_btop(x) ((unsigned long)(x) >> PAGE_SHIFT)
|
||||
#define amd64_ptob(x) ((unsigned long)(x) << PAGE_SHIFT)
|
||||
|
||||
#define pgtok(x) ((unsigned long)(x) * (PAGE_SIZE / 1024))
|
||||
#define pgtok(x) ((unsigned long)(x) * (PAGE_SIZE / 1024))
|
||||
|
||||
#define INKERNEL(va) (((va) >= DMAP_MIN_ADDRESS && (va) < DMAP_MAX_ADDRESS) \
|
||||
|| ((va) >= VM_MIN_KERNEL_ADDRESS && (va) < VM_MAX_KERNEL_ADDRESS))
|
||||
|
@ -140,7 +140,7 @@
|
||||
#define PGEX_PK 0x20 /* protection key violation */
|
||||
#define PGEX_SGX 0x8000 /* SGX-related */
|
||||
|
||||
/*
|
||||
/*
|
||||
* undef the PG_xx macros that define bits in the regular x86 PTEs that
|
||||
* have a different position in nested PTEs. This is done when compiling
|
||||
* code that needs to be aware of the differences between regular x86 and
|
||||
|
@ -12,7 +12,7 @@
|
||||
* no representations about the suitability of this software for any
|
||||
* purpose. It is provided "as is" without express or implied
|
||||
* warranty.
|
||||
*
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
|
||||
* ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
|
||||
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
|
@ -34,7 +34,7 @@
|
||||
* On this machine, the only purpose for which sf_buf is used is to implement
|
||||
* an opaque pointer required by the machine-independent parts of the kernel.
|
||||
* That pointer references the vm_page that is "mapped" by the sf_buf. The
|
||||
* actual mapping is provided by the direct virtual-to-physical mapping.
|
||||
* actual mapping is provided by the direct virtual-to-physical mapping.
|
||||
*/
|
||||
static inline vm_offset_t
|
||||
sf_buf_kva(struct sf_buf *sf)
|
||||
|
@ -124,7 +124,7 @@ enum x2apic_state {
|
||||
/*
|
||||
* The VM name has to fit into the pathname length constraints of devfs,
|
||||
* governed primarily by SPECNAMELEN. The length is the total number of
|
||||
* characters in the full path, relative to the mount point and not
|
||||
* characters in the full path, relative to the mount point and not
|
||||
* including any leading '/' characters.
|
||||
* A prefix and a suffix are added to the name specified by the user.
|
||||
* The prefix is usually "vmm/" or "vmm.io/", but can be a few characters
|
||||
@ -465,7 +465,7 @@ struct vm_copyinfo {
|
||||
/*
|
||||
* Set up 'copyinfo[]' to copy to/from guest linear address space starting
|
||||
* at 'gla' and 'len' bytes long. The 'prot' should be set to PROT_READ for
|
||||
* a copyin or PROT_WRITE for a copyout.
|
||||
* a copyin or PROT_WRITE for a copyout.
|
||||
*
|
||||
* retval is_fault Interpretation
|
||||
* 0 0 Success
|
||||
|
@ -301,7 +301,7 @@ enum {
|
||||
IOCNUM_UNMAP_PPTDEV_MMIO = 46,
|
||||
|
||||
/* statistics */
|
||||
IOCNUM_VM_STATS = 50,
|
||||
IOCNUM_VM_STATS = 50,
|
||||
IOCNUM_VM_STAT_DESC = 51,
|
||||
|
||||
/* kernel device state */
|
||||
|
@ -112,7 +112,7 @@ void vie_init(struct vie *vie, const char *inst_bytes, int inst_length);
|
||||
* 'gla' is the guest linear address provided by the hardware assist
|
||||
* that caused the nested page table fault. It is used to verify that
|
||||
* the software instruction decoding is in agreement with the hardware.
|
||||
*
|
||||
*
|
||||
* Some hardware assists do not provide the 'gla' to the hypervisor.
|
||||
* To skip the 'gla' verification for this or any other reason pass
|
||||
* in VIE_INVALID_GLA instead.
|
||||
|
@ -89,7 +89,7 @@
|
||||
* The number of PHYSSEG entries must be one greater than the number
|
||||
* of phys_avail entries because the phys_avail entry that spans the
|
||||
* largest physical address that is accessible by ISA DMA is split
|
||||
* into two PHYSSEG entries.
|
||||
* into two PHYSSEG entries.
|
||||
*/
|
||||
#define VM_PHYSSEG_MAX 63
|
||||
|
||||
|
@ -118,7 +118,7 @@ pci_docfgregread(int domain, int bus, int slot, int func, int reg, int bytes)
|
||||
return (-1);
|
||||
}
|
||||
|
||||
/*
|
||||
/*
|
||||
* Read configuration space register
|
||||
*/
|
||||
u_int32_t
|
||||
@ -144,8 +144,8 @@ pci_cfgregread(int domain, int bus, int slot, int func, int reg, int bytes)
|
||||
return (pci_docfgregread(domain, bus, slot, func, reg, bytes));
|
||||
}
|
||||
|
||||
/*
|
||||
* Write configuration space register
|
||||
/*
|
||||
* Write configuration space register
|
||||
*/
|
||||
void
|
||||
pci_cfgregwrite(int domain, int bus, int slot, int func, int reg, uint32_t data,
|
||||
@ -171,7 +171,7 @@ pci_cfgregwrite(int domain, int bus, int slot, int func, int reg, uint32_t data,
|
||||
pcireg_cfgwrite(bus, slot, func, reg, data, bytes);
|
||||
}
|
||||
|
||||
/*
|
||||
/*
|
||||
* Configuration space access using direct register operations
|
||||
*/
|
||||
|
||||
@ -184,7 +184,7 @@ pci_cfgenable(unsigned bus, unsigned slot, unsigned func, int reg, int bytes)
|
||||
if (bus <= PCI_BUSMAX && slot <= PCI_SLOTMAX && func <= PCI_FUNCMAX &&
|
||||
(unsigned)reg <= PCI_REGMAX && bytes != 3 &&
|
||||
(unsigned)bytes <= 4 && (reg & (bytes - 1)) == 0) {
|
||||
outl(CONF1_ADDR_PORT, (1U << 31) | (bus << 16) | (slot << 11)
|
||||
outl(CONF1_ADDR_PORT, (1U << 31) | (bus << 16) | (slot << 11)
|
||||
| (func << 8) | (reg & ~0x03));
|
||||
dataport = CONF1_DATA_PORT + (reg & 0x03);
|
||||
}
|
||||
|
@ -71,7 +71,7 @@
|
||||
* User .-- EENTER -- Go to entry point of enclave
|
||||
* space | EEXIT -- Exit back to main application
|
||||
* ENCLU '-- ERESUME -- Resume enclave execution (e.g. after exception)
|
||||
*
|
||||
*
|
||||
* Enclave lifecycle from driver point of view:
|
||||
* 1) User calls mmap() on /dev/sgx: we allocate a VM object
|
||||
* 2) User calls ioctl SGX_IOC_ENCLAVE_CREATE: we look for the VM object
|
||||
|
@ -928,8 +928,8 @@ amdvi_teardown_hw(struct amdvi_softc *softc)
|
||||
|
||||
dev = softc->dev;
|
||||
|
||||
/*
|
||||
* Called after disable, h/w is stopped by now, free all the resources.
|
||||
/*
|
||||
* Called after disable, h/w is stopped by now, free all the resources.
|
||||
*/
|
||||
amdvi_free_evt_intr_res(dev);
|
||||
|
||||
|
@ -211,8 +211,8 @@ struct amdvi_ctrl {
|
||||
uint64_t limit:40;
|
||||
uint16_t :12;
|
||||
} excl;
|
||||
/*
|
||||
* Revision 2 only.
|
||||
/*
|
||||
* Revision 2 only.
|
||||
*/
|
||||
uint64_t ex_feature;
|
||||
struct {
|
||||
@ -253,8 +253,8 @@ CTASSERT(offsetof(struct amdvi_ctrl, pad2)== 0x2028);
|
||||
CTASSERT(offsetof(struct amdvi_ctrl, pad3)== 0x2040);
|
||||
|
||||
#define AMDVI_MMIO_V1_SIZE (4 * PAGE_SIZE) /* v1 size */
|
||||
/*
|
||||
* AMF IOMMU v2 size including event counters
|
||||
/*
|
||||
* AMF IOMMU v2 size including event counters
|
||||
*/
|
||||
#define AMDVI_MMIO_V2_SIZE (8 * PAGE_SIZE)
|
||||
|
||||
|
@ -51,7 +51,7 @@
|
||||
|
||||
device_t *ivhd_devs; /* IVHD or AMD-Vi device list. */
|
||||
int ivhd_count; /* Number of IVHD header. */
|
||||
/*
|
||||
/*
|
||||
* Cached IVHD header list.
|
||||
* Single entry for each IVHD, filtered the legacy one.
|
||||
*/
|
||||
@ -225,7 +225,7 @@ ivhd_dev_parse(ACPI_IVRS_HARDWARE1 *ivhd, struct amdvi_softc *softc)
|
||||
break;
|
||||
|
||||
default:
|
||||
device_printf(softc->dev,
|
||||
device_printf(softc->dev,
|
||||
"unknown type: 0x%x\n", ivhd->Header.Type);
|
||||
return (-1);
|
||||
}
|
||||
@ -366,7 +366,7 @@ ivhd_identify(driver_t *driver, device_t parent)
|
||||
ivrs_ivinfo = ivrs->Info;
|
||||
printf("AMD-Vi: IVRS Info VAsize = %d PAsize = %d GVAsize = %d"
|
||||
" flags:%b\n",
|
||||
REG_BITS(ivrs_ivinfo, 21, 15), REG_BITS(ivrs_ivinfo, 14, 8),
|
||||
REG_BITS(ivrs_ivinfo, 21, 15), REG_BITS(ivrs_ivinfo, 14, 8),
|
||||
REG_BITS(ivrs_ivinfo, 7, 5), REG_BITS(ivrs_ivinfo, 22, 22),
|
||||
"\020\001EFRSup");
|
||||
|
||||
@ -439,7 +439,7 @@ ivhd_probe(device_t dev)
|
||||
return (ENXIO);
|
||||
|
||||
unit = device_get_unit(dev);
|
||||
KASSERT((unit < ivhd_count),
|
||||
KASSERT((unit < ivhd_count),
|
||||
("ivhd unit %d > count %d", unit, ivhd_count));
|
||||
ivhd = ivhd_hdrs[unit];
|
||||
KASSERT(ivhd, ("ivhd is NULL"));
|
||||
@ -506,7 +506,7 @@ ivhd_print_flag(device_t dev, enum IvrsType ivhd_type, uint8_t flag)
|
||||
* Feature in legacy IVHD type(0x10) and attribute in newer type(0x11 and 0x40).
|
||||
*/
|
||||
static void
|
||||
ivhd_print_feature(device_t dev, enum IvrsType ivhd_type, uint32_t feature)
|
||||
ivhd_print_feature(device_t dev, enum IvrsType ivhd_type, uint32_t feature)
|
||||
{
|
||||
switch (ivhd_type) {
|
||||
case IVRS_TYPE_HARDWARE_LEGACY:
|
||||
@ -639,7 +639,7 @@ ivhd_attach(device_t dev)
|
||||
int status, unit;
|
||||
|
||||
unit = device_get_unit(dev);
|
||||
KASSERT((unit < ivhd_count),
|
||||
KASSERT((unit < ivhd_count),
|
||||
("ivhd unit %d > count %d", unit, ivhd_count));
|
||||
/* Make sure its same device for which attach is called. */
|
||||
KASSERT((ivhd_devs[unit] == dev),
|
||||
@ -658,12 +658,12 @@ ivhd_attach(device_t dev)
|
||||
softc->pci_seg = ivhd->PciSegmentGroup;
|
||||
softc->pci_rid = ivhd->Header.DeviceId;
|
||||
softc->ivhd_flag = ivhd->Header.Flags;
|
||||
/*
|
||||
/*
|
||||
* On lgeacy IVHD type(0x10), it is documented as feature
|
||||
* but in newer type it is attribute.
|
||||
*/
|
||||
softc->ivhd_feature = ivhd->FeatureReporting;
|
||||
/*
|
||||
/*
|
||||
* PCI capability has more capabilities that are not part of IVRS.
|
||||
*/
|
||||
softc->cap_off = ivhd->CapabilityOffset;
|
||||
@ -694,7 +694,7 @@ ivhd_attach(device_t dev)
|
||||
|
||||
status = amdvi_setup_hw(softc);
|
||||
if (status != 0) {
|
||||
device_printf(dev, "couldn't be initialised, error=%d\n",
|
||||
device_printf(dev, "couldn't be initialised, error=%d\n",
|
||||
status);
|
||||
goto fail;
|
||||
}
|
||||
|
@ -58,7 +58,7 @@ svm_npt_init(int ipinum)
|
||||
npt_flags = ipinum & NPT_IPIMASK;
|
||||
TUNABLE_INT_FETCH("hw.vmm.npt.enable_superpage", &enable_superpage);
|
||||
if (enable_superpage)
|
||||
npt_flags |= PMAP_PDE_SUPERPAGE;
|
||||
npt_flags |= PMAP_PDE_SUPERPAGE;
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
@ -282,7 +282,7 @@ svm_modresume(void)
|
||||
{
|
||||
|
||||
svm_enable(NULL);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef BHYVE_SNAPSHOT
|
||||
void
|
||||
@ -301,14 +301,14 @@ svm_set_tsc_offset(struct svm_vcpu *vcpu, uint64_t offset)
|
||||
#endif
|
||||
|
||||
/* Pentium compatible MSRs */
|
||||
#define MSR_PENTIUM_START 0
|
||||
#define MSR_PENTIUM_START 0
|
||||
#define MSR_PENTIUM_END 0x1FFF
|
||||
/* AMD 6th generation and Intel compatible MSRs */
|
||||
#define MSR_AMD6TH_START 0xC0000000UL
|
||||
#define MSR_AMD6TH_END 0xC0001FFFUL
|
||||
#define MSR_AMD6TH_START 0xC0000000UL
|
||||
#define MSR_AMD6TH_END 0xC0001FFFUL
|
||||
/* AMD 7th and 8th generation compatible MSRs */
|
||||
#define MSR_AMD7TH_START 0xC0010000UL
|
||||
#define MSR_AMD7TH_END 0xC0011FFFUL
|
||||
#define MSR_AMD7TH_START 0xC0010000UL
|
||||
#define MSR_AMD7TH_END 0xC0011FFFUL
|
||||
|
||||
/*
|
||||
* Get the index and bit position for a MSR in permission bitmap.
|
||||
@ -328,12 +328,12 @@ svm_msr_index(uint64_t msr, int *index, int *bit)
|
||||
return (0);
|
||||
}
|
||||
|
||||
base += (MSR_PENTIUM_END - MSR_PENTIUM_START + 1);
|
||||
base += (MSR_PENTIUM_END - MSR_PENTIUM_START + 1);
|
||||
if (msr >= MSR_AMD6TH_START && msr <= MSR_AMD6TH_END) {
|
||||
off = (msr - MSR_AMD6TH_START);
|
||||
off = (msr - MSR_AMD6TH_START);
|
||||
*index = (off + base) / 4;
|
||||
return (0);
|
||||
}
|
||||
}
|
||||
|
||||
base += (MSR_AMD6TH_END - MSR_AMD6TH_START + 1);
|
||||
if (msr >= MSR_AMD7TH_START && msr <= MSR_AMD7TH_END) {
|
||||
@ -852,7 +852,7 @@ svm_npf_emul_fault(uint64_t exitinfo1)
|
||||
return (false);
|
||||
}
|
||||
|
||||
return (true);
|
||||
return (true);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -893,7 +893,7 @@ svm_handle_inst_emul(struct vmcb *vmcb, uint64_t gpa, struct vm_exit *vmexit)
|
||||
default:
|
||||
vmexit->u.inst_emul.cs_base = 0;
|
||||
vmexit->u.inst_emul.cs_d = 0;
|
||||
break;
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -994,7 +994,7 @@ svm_save_intinfo(struct svm_softc *svm_sc, struct svm_vcpu *vcpu)
|
||||
uint64_t intinfo;
|
||||
|
||||
ctrl = svm_get_vmcb_ctrl(vcpu);
|
||||
intinfo = ctrl->exitintinfo;
|
||||
intinfo = ctrl->exitintinfo;
|
||||
if (!VMCB_EXITINTINFO_VALID(intinfo))
|
||||
return;
|
||||
|
||||
@ -1533,7 +1533,7 @@ svm_vmexit(struct svm_softc *svm_sc, struct svm_vcpu *vcpu,
|
||||
eax = state->rax;
|
||||
ecx = ctx->sctx_rcx;
|
||||
edx = ctx->sctx_rdx;
|
||||
retu = false;
|
||||
retu = false;
|
||||
|
||||
if (info1) {
|
||||
vmm_stat_incr(vcpu->vcpu, VMEXIT_WRMSR, 1);
|
||||
@ -1667,7 +1667,7 @@ svm_vmexit(struct svm_softc *svm_sc, struct svm_vcpu *vcpu,
|
||||
default:
|
||||
vmm_stat_incr(vcpu->vcpu, VMEXIT_UNKNOWN, 1);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
SVM_CTR4(vcpu, "%s %s vmexit at %#lx/%d",
|
||||
handled ? "handled" : "unhandled", exit_reason_to_str(code),
|
||||
@ -2231,7 +2231,7 @@ svm_run(void *vcpui, register_t rip, pmap_t pmap, struct vm_eventinfo *evinfo)
|
||||
/* Restore host LDTR. */
|
||||
lldt(ldt_sel);
|
||||
|
||||
/* #VMEXIT disables interrupts so re-enable them here. */
|
||||
/* #VMEXIT disables interrupts so re-enable them here. */
|
||||
enable_gintr();
|
||||
|
||||
/* Update 'nextrip' */
|
||||
|
@ -59,7 +59,7 @@ static uint64_t host_msrs[HOST_MSR_NUM];
|
||||
void
|
||||
svm_msr_init(void)
|
||||
{
|
||||
/*
|
||||
/*
|
||||
* It is safe to cache the values of the following MSRs because they
|
||||
* don't change based on curcpu, curproc or curthread.
|
||||
*/
|
||||
|
@ -137,7 +137,7 @@ ept_dump(uint64_t *ptp, int nlevels)
|
||||
|
||||
if (ptpval == 0)
|
||||
continue;
|
||||
|
||||
|
||||
for (t = 0; t < tabs; t++)
|
||||
printf("\t");
|
||||
printf("%3d 0x%016lx\n", i, ptpval);
|
||||
|
@ -3665,7 +3665,7 @@ vmx_setcap(void *vcpui, int type, int val)
|
||||
vlapic = vm_lapic(vcpu->vcpu);
|
||||
vlapic->ipi_exit = val;
|
||||
break;
|
||||
case VM_CAP_MASK_HWINTR:
|
||||
case VM_CAP_MASK_HWINTR:
|
||||
retval = 0;
|
||||
break;
|
||||
default:
|
||||
|
@ -257,7 +257,7 @@ ppt_teardown_msi(struct pptdev *ppt)
|
||||
|
||||
if (res != NULL)
|
||||
bus_release_resource(ppt->dev, SYS_RES_IRQ, rid, res);
|
||||
|
||||
|
||||
ppt->msi.res[i] = NULL;
|
||||
ppt->msi.cookie[i] = NULL;
|
||||
}
|
||||
@ -268,7 +268,7 @@ ppt_teardown_msi(struct pptdev *ppt)
|
||||
ppt->msi.num_msgs = 0;
|
||||
}
|
||||
|
||||
static void
|
||||
static void
|
||||
ppt_teardown_msix_intr(struct pptdev *ppt, int idx)
|
||||
{
|
||||
int rid;
|
||||
@ -279,25 +279,25 @@ ppt_teardown_msix_intr(struct pptdev *ppt, int idx)
|
||||
res = ppt->msix.res[idx];
|
||||
cookie = ppt->msix.cookie[idx];
|
||||
|
||||
if (cookie != NULL)
|
||||
if (cookie != NULL)
|
||||
bus_teardown_intr(ppt->dev, res, cookie);
|
||||
|
||||
if (res != NULL)
|
||||
if (res != NULL)
|
||||
bus_release_resource(ppt->dev, SYS_RES_IRQ, rid, res);
|
||||
|
||||
ppt->msix.res[idx] = NULL;
|
||||
ppt->msix.cookie[idx] = NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
static void
|
||||
ppt_teardown_msix(struct pptdev *ppt)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (ppt->msix.num_msgs == 0)
|
||||
if (ppt->msix.num_msgs == 0)
|
||||
return;
|
||||
|
||||
for (i = 0; i < ppt->msix.num_msgs; i++)
|
||||
for (i = 0; i < ppt->msix.num_msgs; i++)
|
||||
ppt_teardown_msix_intr(ppt, i);
|
||||
|
||||
free(ppt->msix.res, M_PPTMSIX);
|
||||
@ -307,14 +307,14 @@ ppt_teardown_msix(struct pptdev *ppt)
|
||||
pci_release_msi(ppt->dev);
|
||||
|
||||
if (ppt->msix.msix_table_res) {
|
||||
bus_release_resource(ppt->dev, SYS_RES_MEMORY,
|
||||
bus_release_resource(ppt->dev, SYS_RES_MEMORY,
|
||||
ppt->msix.msix_table_rid,
|
||||
ppt->msix.msix_table_res);
|
||||
ppt->msix.msix_table_res = NULL;
|
||||
ppt->msix.msix_table_rid = 0;
|
||||
}
|
||||
if (ppt->msix.msix_pba_res) {
|
||||
bus_release_resource(ppt->dev, SYS_RES_MEMORY,
|
||||
bus_release_resource(ppt->dev, SYS_RES_MEMORY,
|
||||
ppt->msix.msix_pba_rid,
|
||||
ppt->msix.msix_pba_res);
|
||||
ppt->msix.msix_pba_res = NULL;
|
||||
@ -678,10 +678,10 @@ ppt_setup_msix(struct vm *vm, int bus, int slot, int func,
|
||||
return (EBUSY);
|
||||
|
||||
dinfo = device_get_ivars(ppt->dev);
|
||||
if (!dinfo)
|
||||
if (!dinfo)
|
||||
return (ENXIO);
|
||||
|
||||
/*
|
||||
/*
|
||||
* First-time configuration:
|
||||
* Allocate the MSI-X table
|
||||
* Allocate the IRQ resources
|
||||
|
@ -231,7 +231,7 @@ vhpet_timer_interrupt(struct vhpet *vhpet, int n)
|
||||
lapic_intr_msi(vhpet->vm, vhpet->timer[n].msireg >> 32,
|
||||
vhpet->timer[n].msireg & 0xffffffff);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
pin = vhpet_timer_ioapic_pin(vhpet, n);
|
||||
if (pin == 0) {
|
||||
@ -493,7 +493,7 @@ vhpet_mmio_write(struct vcpu *vcpu, uint64_t gpa, uint64_t val, int size,
|
||||
if ((offset & 0x4) != 0) {
|
||||
mask <<= 32;
|
||||
data <<= 32;
|
||||
}
|
||||
}
|
||||
break;
|
||||
default:
|
||||
VM_CTR2(vhpet->vm, "hpet invalid mmio write: "
|
||||
@ -647,7 +647,7 @@ vhpet_mmio_read(struct vcpu *vcpu, uint64_t gpa, uint64_t *rval, int size,
|
||||
|
||||
if (offset == HPET_CAPABILITIES || offset == HPET_CAPABILITIES + 4) {
|
||||
data = vhpet_capabilities();
|
||||
goto done;
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (offset == HPET_CONFIG || offset == HPET_CONFIG + 4) {
|
||||
|
@ -383,7 +383,7 @@ vlapic_lvt_write_handler(struct vlapic *vlapic, uint32_t offset)
|
||||
int idx;
|
||||
|
||||
lapic = vlapic->apic_page;
|
||||
lvtptr = vlapic_get_lvtptr(vlapic, offset);
|
||||
lvtptr = vlapic_get_lvtptr(vlapic, offset);
|
||||
val = *lvtptr;
|
||||
idx = lvt_off_to_idx(offset);
|
||||
|
||||
@ -804,7 +804,7 @@ vlapic_icrtmr_write_handler(struct vlapic *vlapic)
|
||||
/*
|
||||
* This function populates 'dmask' with the set of vcpus that match the
|
||||
* addressing specified by the (dest, phys, lowprio) tuple.
|
||||
*
|
||||
*
|
||||
* 'x2apic_dest' specifies whether 'dest' is interpreted as x2APIC (32-bit)
|
||||
* or xAPIC (8-bit) destination field.
|
||||
*/
|
||||
@ -1251,7 +1251,7 @@ vlapic_pending_intr(struct vlapic *vlapic, int *vecptr)
|
||||
if (vecptr != NULL)
|
||||
*vecptr = vector;
|
||||
return (1);
|
||||
} else
|
||||
} else
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -1269,7 +1269,7 @@ vlapic_intr_accepted(struct vlapic *vlapic, int vector)
|
||||
return ((*vlapic->ops.intr_accepted)(vlapic, vector));
|
||||
|
||||
/*
|
||||
* clear the ready bit for vector being accepted in irr
|
||||
* clear the ready bit for vector being accepted in irr
|
||||
* and set the vector as in service in isr.
|
||||
*/
|
||||
idx = (vector / 32) * 4;
|
||||
@ -1409,17 +1409,17 @@ vlapic_read(struct vlapic *vlapic, int mmio_access, uint64_t offset,
|
||||
case APIC_OFFSET_ESR:
|
||||
*data = lapic->esr;
|
||||
break;
|
||||
case APIC_OFFSET_ICR_LOW:
|
||||
case APIC_OFFSET_ICR_LOW:
|
||||
*data = lapic->icr_lo;
|
||||
if (x2apic(vlapic))
|
||||
*data |= (uint64_t)lapic->icr_hi << 32;
|
||||
break;
|
||||
case APIC_OFFSET_ICR_HI:
|
||||
case APIC_OFFSET_ICR_HI:
|
||||
*data = lapic->icr_hi;
|
||||
break;
|
||||
case APIC_OFFSET_CMCI_LVT:
|
||||
case APIC_OFFSET_TIMER_LVT ... APIC_OFFSET_ERROR_LVT:
|
||||
*data = vlapic_get_lvt(vlapic, offset);
|
||||
*data = vlapic_get_lvt(vlapic, offset);
|
||||
#ifdef INVARIANTS
|
||||
reg = vlapic_get_lvtptr(vlapic, offset);
|
||||
KASSERT(*data == *reg, ("inconsistent lvt value at "
|
||||
@ -1509,7 +1509,7 @@ vlapic_write(struct vlapic *vlapic, int mmio_access, uint64_t offset,
|
||||
lapic->svr = data;
|
||||
vlapic_svr_write_handler(vlapic);
|
||||
break;
|
||||
case APIC_OFFSET_ICR_LOW:
|
||||
case APIC_OFFSET_ICR_LOW:
|
||||
lapic->icr_lo = data;
|
||||
if (x2apic(vlapic))
|
||||
lapic->icr_hi = data >> 32;
|
||||
|
@ -346,7 +346,7 @@ rtc_to_secs(struct vrtc *vrtc)
|
||||
|
||||
/*
|
||||
* Ignore 'rtc->dow' because some guests like Linux don't bother
|
||||
* setting it at all while others like OpenBSD/i386 set it incorrectly.
|
||||
* setting it at all while others like OpenBSD/i386 set it incorrectly.
|
||||
*
|
||||
* clock_ct_to_ts() does not depend on 'ct.dow' anyways so ignore it.
|
||||
*/
|
||||
|
@ -355,7 +355,7 @@ vcpu_cleanup(struct vcpu *vcpu, bool destroy)
|
||||
vmmops_vcpu_cleanup(vcpu->cookie);
|
||||
vcpu->cookie = NULL;
|
||||
if (destroy) {
|
||||
vmm_stat_free(vcpu->stats);
|
||||
vmm_stat_free(vcpu->stats);
|
||||
fpu_save_area_free(vcpu->guestfpu);
|
||||
vcpu_lock_destroy(vcpu);
|
||||
free(vcpu, M_VM);
|
||||
@ -2458,7 +2458,7 @@ vmm_is_pptdev(int bus, int slot, int func)
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
if (cp2 != NULL)
|
||||
*cp2++ = ' ';
|
||||
|
||||
@ -2887,7 +2887,7 @@ vm_get_rescnt(struct vcpu *vcpu, struct vmm_stat_type *stat)
|
||||
if (vcpu->vcpuid == 0) {
|
||||
vmm_stat_set(vcpu, VMM_MEM_RESIDENT, PAGE_SIZE *
|
||||
vmspace_resident_count(vcpu->vm->vmspace));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
@ -2897,7 +2897,7 @@ vm_get_wiredcnt(struct vcpu *vcpu, struct vmm_stat_type *stat)
|
||||
if (vcpu->vcpuid == 0) {
|
||||
vmm_stat_set(vcpu, VMM_MEM_WIRED, PAGE_SIZE *
|
||||
pmap_wired_count(vmspace_pmap(vcpu->vm->vmspace)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
VMM_STAT_FUNC(VMM_MEM_RESIDENT, "Resident memory", vm_get_rescnt);
|
||||
|
@ -583,7 +583,7 @@ emulate_mov(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
|
||||
/*
|
||||
* MOV from AX/EAX/RAX to seg:moffset
|
||||
* A3: mov moffs16, AX
|
||||
* A3: mov moffs32, EAX
|
||||
* A3: mov moffs32, EAX
|
||||
* REX.W + A3: mov moffs64, RAX
|
||||
*/
|
||||
error = vie_read_register(vcpu, VM_REG_GUEST_RAX, &val);
|
||||
@ -1118,7 +1118,7 @@ emulate_or(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
|
||||
error = vie_read_register(vcpu, reg, &val1);
|
||||
if (error)
|
||||
break;
|
||||
|
||||
|
||||
/* get the second operand */
|
||||
error = memread(vcpu, gpa, &val2, size, arg);
|
||||
if (error)
|
||||
@ -1468,7 +1468,7 @@ emulate_sub(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
|
||||
case 0x2B:
|
||||
/*
|
||||
* SUB r/m from r and store the result in r
|
||||
*
|
||||
*
|
||||
* 2B/r SUB r16, r/m16
|
||||
* 2B/r SUB r32, r/m32
|
||||
* REX.W + 2B/r SUB r64, r/m64
|
||||
@ -1912,7 +1912,7 @@ vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum vm_reg_name seg,
|
||||
if (SEG_DESC_UNUSABLE(desc->access))
|
||||
return (-1);
|
||||
|
||||
/*
|
||||
/*
|
||||
* The processor generates a #NP exception when a segment
|
||||
* register is loaded with a selector that points to a
|
||||
* descriptor that is not present. If this was the case then
|
||||
|
@ -95,7 +95,7 @@ vmm_mmio_alloc(struct vmspace *vmspace, vm_paddr_t gpa, size_t len,
|
||||
* has incremented the reference count on the sglist. Dropping the
|
||||
* initial reference count ensures that the sglist will be freed
|
||||
* when the object is deallocated.
|
||||
*
|
||||
*
|
||||
* If the object could not be allocated then we end up freeing the
|
||||
* sglist.
|
||||
*/
|
||||
|
Loading…
Reference in New Issue
Block a user