From f3754afd5901857787271e73f9c34d3b9069a03f Mon Sep 17 00:00:00 2001 From: Joshua Rogers Date: Thu, 12 Sep 2024 18:35:12 +0200 Subject: [PATCH] Remove stray whitespaces from sys/amd64/ Signed-off-by: Joshua Rogers Reviewed by: imp Pull Request: https://github.com/freebsd/freebsd-src/pull/1418 --- sys/amd64/amd64/bios.c | 4 +-- sys/amd64/amd64/cpu_switch.S | 2 +- sys/amd64/amd64/db_disasm.c | 2 +- sys/amd64/amd64/exception.S | 2 +- sys/amd64/amd64/fpu.c | 2 +- sys/amd64/amd64/initcpu.c | 8 +++--- sys/amd64/amd64/machdep.c | 2 +- sys/amd64/amd64/mem.c | 6 ++--- sys/amd64/amd64/mpboot.S | 6 ++--- sys/amd64/amd64/pmap.c | 32 ++++++++++++------------ sys/amd64/amd64/sigtramp.S | 2 +- sys/amd64/amd64/sys_machdep.c | 2 +- sys/amd64/amd64/trap.c | 2 +- sys/amd64/amd64/vm_machdep.c | 2 +- sys/amd64/include/asm.h | 2 +- sys/amd64/include/bus_dma.h | 2 +- sys/amd64/include/param.h | 4 +-- sys/amd64/include/pmap.h | 2 +- sys/amd64/include/resource.h | 2 +- sys/amd64/include/sf_buf.h | 2 +- sys/amd64/include/vmm.h | 4 +-- sys/amd64/include/vmm_dev.h | 2 +- sys/amd64/include/vmm_instruction_emul.h | 2 +- sys/amd64/include/vmparam.h | 2 +- sys/amd64/pci/pci_cfgreg.c | 10 ++++---- sys/amd64/sgx/sgx.c | 2 +- sys/amd64/vmm/amd/amdvi_hw.c | 4 +-- sys/amd64/vmm/amd/amdvi_priv.h | 8 +++--- sys/amd64/vmm/amd/ivrs_drv.c | 18 ++++++------- sys/amd64/vmm/amd/npt.c | 2 +- sys/amd64/vmm/amd/svm.c | 30 +++++++++++----------- sys/amd64/vmm/amd/svm_msr.c | 2 +- sys/amd64/vmm/intel/ept.c | 2 +- sys/amd64/vmm/intel/vmx.c | 2 +- sys/amd64/vmm/io/ppt.c | 22 ++++++++-------- sys/amd64/vmm/io/vhpet.c | 6 ++--- sys/amd64/vmm/io/vlapic.c | 16 ++++++------ sys/amd64/vmm/io/vrtc.c | 2 +- sys/amd64/vmm/vmm.c | 8 +++--- sys/amd64/vmm/vmm_instruction_emul.c | 8 +++--- sys/amd64/vmm/vmm_mem.c | 2 +- 41 files changed, 121 insertions(+), 121 deletions(-) diff --git a/sys/amd64/amd64/bios.c b/sys/amd64/amd64/bios.c index 4dbca4424aa4..3f53b80d6920 100644 --- a/sys/amd64/amd64/bios.c +++ b/sys/amd64/amd64/bios.c @@ -50,7 +50,7 @@ * * Search some or all of the BIOS region for a signature string. * - * (start) Optional offset returned from this function + * (start) Optional offset returned from this function * (for searching for multiple matches), or NULL * to start the search from the base of the BIOS. * Note that this will be a _physical_ address in @@ -68,7 +68,7 @@ u_int32_t bios_sigsearch(u_int32_t start, u_char *sig, int siglen, int paralen, int sigofs) { u_char *sp, *end; - + /* compute the starting address */ if ((start >= BIOS_START) && (start <= (BIOS_START + BIOS_SIZE))) { sp = (char *)BIOS_PADDRTOVADDR(start); diff --git a/sys/amd64/amd64/cpu_switch.S b/sys/amd64/amd64/cpu_switch.S index 2a3eced01ab3..32d1b91d50b4 100644 --- a/sys/amd64/amd64/cpu_switch.S +++ b/sys/amd64/amd64/cpu_switch.S @@ -386,7 +386,7 @@ END(savectx) /* * resumectx(pcb) * Resuming processor state from pcb. - */ + */ ENTRY(resumectx) /* Switch to KPML5/4phys. */ movq KPML4phys,%rax diff --git a/sys/amd64/amd64/db_disasm.c b/sys/amd64/amd64/db_disasm.c index bca12894efdf..5f658fb3b0d0 100644 --- a/sys/amd64/amd64/db_disasm.c +++ b/sys/amd64/amd64/db_disasm.c @@ -1391,7 +1391,7 @@ db_disasm(db_addr_t loc, bool altfmt) case 0xc8: i_name = "monitor"; i_size = NONE; - i_mode = 0; + i_mode = 0; break; case 0xc9: i_name = "mwait"; diff --git a/sys/amd64/amd64/exception.S b/sys/amd64/amd64/exception.S index 7b6cde824203..c3d5819378d1 100644 --- a/sys/amd64/amd64/exception.S +++ b/sys/amd64/amd64/exception.S @@ -114,7 +114,7 @@ dtrace_invop_calltrap_addr: movq $0,TF_ADDR(%rsp) movq $0,TF_ERR(%rsp) jmp alltraps_noen_u - + .globl X\l .type X\l,@function X\l: diff --git a/sys/amd64/amd64/fpu.c b/sys/amd64/amd64/fpu.c index ebc8b869b368..1c38949f7375 100644 --- a/sys/amd64/amd64/fpu.c +++ b/sys/amd64/amd64/fpu.c @@ -538,7 +538,7 @@ fpuformat(void) * (FP_X_INV, FP_X_DZ) * 4 Denormal operand (FP_X_DNML) * 5 Numeric over/underflow (FP_X_OFL, FP_X_UFL) - * 6 Inexact result (FP_X_IMP) + * 6 Inexact result (FP_X_IMP) */ static char fpetable[128] = { 0, diff --git a/sys/amd64/amd64/initcpu.c b/sys/amd64/amd64/initcpu.c index c5266ffcc235..05e482f7783b 100644 --- a/sys/amd64/amd64/initcpu.c +++ b/sys/amd64/amd64/initcpu.c @@ -2,21 +2,21 @@ * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) KATO Takenori, 1997, 1998. - * + * * All rights reserved. Unpublished rights reserved under the copyright * laws of Japan. - * + * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: - * + * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer as * the first lines of this file unmodified. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * + * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. diff --git a/sys/amd64/amd64/machdep.c b/sys/amd64/amd64/machdep.c index 5dbc41553452..e2ae3843a119 100644 --- a/sys/amd64/amd64/machdep.c +++ b/sys/amd64/amd64/machdep.c @@ -229,7 +229,7 @@ cpu_startup(void *dummy) * namely: incorrect CPU frequency detection and failure to * start the APs. * We do this by disabling a bit in the SMI_EN (SMI Control and - * Enable register) of the Intel ICH LPC Interface Bridge. + * Enable register) of the Intel ICH LPC Interface Bridge. */ sysenv = kern_getenv("smbios.system.product"); if (sysenv != NULL) { diff --git a/sys/amd64/amd64/mem.c b/sys/amd64/amd64/mem.c index 25d22805ed3e..413b7c74890e 100644 --- a/sys/amd64/amd64/mem.c +++ b/sys/amd64/amd64/mem.c @@ -191,7 +191,7 @@ memmmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr, * This is basically just an ioctl shim for mem_range_attr_get * and mem_range_attr_set. */ -int +int memioctl_md(struct cdev *dev __unused, u_long cmd, caddr_t data, int flags, struct thread *td) { @@ -221,7 +221,7 @@ memioctl_md(struct cdev *dev __unused, u_long cmd, caddr_t data, int flags, M_MEMDESC, M_WAITOK); error = mem_range_attr_get(md, &nd); if (!error) - error = copyout(md, mo->mo_desc, + error = copyout(md, mo->mo_desc, nd * sizeof(struct mem_range_desc)); free(md, M_MEMDESC); } @@ -229,7 +229,7 @@ memioctl_md(struct cdev *dev __unused, u_long cmd, caddr_t data, int flags, nd = mem_range_softc.mr_ndesc; mo->mo_arg[0] = nd; break; - + case MEMRANGE_SET: md = (struct mem_range_desc *)malloc(sizeof(struct mem_range_desc), M_MEMDESC, M_WAITOK); diff --git a/sys/amd64/amd64/mpboot.S b/sys/amd64/amd64/mpboot.S index db1f3de209c7..e3d940088f14 100644 --- a/sys/amd64/amd64/mpboot.S +++ b/sys/amd64/amd64/mpboot.S @@ -62,7 +62,7 @@ mptramp_start: /* Enable protected mode */ movl $CR0_PE, %eax - mov %eax, %cr0 + mov %eax, %cr0 /* * Now execute a far jump to turn on protected mode. This @@ -207,7 +207,7 @@ bootcode: * are interpreted slightly differently. * %ds: +A, +W, -E, DPL=0, +P, +D, +G * %ss: +A, +W, -E, DPL=0, +P, +B, +G - * Accessed, Writeable, Expand up, Present, 32 bit, 4GB + * Accessed, Writeable, Expand up, Present, 32 bit, 4GB * For %ds, +D means 'default operand size is 32 bit'. * For %ss, +B means the stack register is %esp rather than %sp. */ @@ -237,7 +237,7 @@ mptramp_nx: /* * The pseudo descriptor for lgdt to use. */ -lgdt_desc: +lgdt_desc: .word gdtend-gdt /* Length */ .long gdt-mptramp_start /* Offset plus %ds << 4 */ diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c index 4cb833757489..3b7220369b61 100644 --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -1824,7 +1824,7 @@ create_pagetables(vm_paddr_t *firstaddr) * then the residual physical memory is mapped with 2MB pages. Later, * if pmap_mapdev{_attr}() uses the direct map for non-write-back * memory, pmap_change_attr() will demote any 2MB or 1GB page mappings - * that are partially used. + * that are partially used. */ pd_p = (pd_entry_t *)DMPDphys; for (i = NPDEPG * ndm1g, j = 0; i < NPDEPG * ndmpdp; i++, j++) { @@ -2279,7 +2279,7 @@ pmap_bootstrap_la57(void *arg __unused) vm_page_free(m_pd); vm_page_free(m_pt); - /* + /* * Recursively map PML5 to itself in order to get PTmap and * PDmap. */ @@ -2510,7 +2510,7 @@ pmap_init(void) /* * Initialize the vm page array entries for the kernel pmap's * page table pages. - */ + */ PMAP_LOCK(kernel_pmap); for (i = 0; i < nkpt; i++) { mpte = PHYS_TO_VM_PAGE(KPTphys + (i << PAGE_SHIFT)); @@ -3461,12 +3461,12 @@ pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde) cpuid = PCPU_GET(cpuid); other_cpus = all_cpus; CPU_CLR(cpuid, &other_cpus); - if (pmap == kernel_pmap || pmap_type_guest(pmap)) + if (pmap == kernel_pmap || pmap_type_guest(pmap)) active = all_cpus; else { active = pmap->pm_active; } - if (CPU_OVERLAP(&active, &other_cpus)) { + if (CPU_OVERLAP(&active, &other_cpus)) { act.store = cpuid; act.invalidate = active; act.va = va; @@ -4226,7 +4226,7 @@ _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free) pmap_pt_page_count_adj(pmap, -1); - /* + /* * Put page on a list so that it is released after * *ALL* TLB shootdown is done */ @@ -4377,7 +4377,7 @@ pmap_pinit_pml5(vm_page_t pml5pg) pm_pml5[pmap_pml5e_index(UPT_MAX_ADDRESS)] = KPML4phys | X86_PG_V | X86_PG_RW | X86_PG_A | X86_PG_M; - /* + /* * Install self-referential address mapping entry. */ pm_pml5[PML5PML5I] = VM_PAGE_TO_PHYS(pml5pg) | @@ -5179,7 +5179,7 @@ pmap_growkernel(vm_offset_t addr) end = (end + NBPDR) & ~PDRMASK; if (end - 1 >= vm_map_max(kernel_map)) { end = vm_map_max(kernel_map); - break; + break; } continue; } @@ -5196,7 +5196,7 @@ pmap_growkernel(vm_offset_t addr) end = (end + NBPDR) & ~PDRMASK; if (end - 1 >= vm_map_max(kernel_map)) { end = vm_map_max(kernel_map); - break; + break; } } @@ -6146,7 +6146,7 @@ pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde, vm_offset_t va, * PG_A set. If the old PDE has PG_RW set, it also has PG_M * set. Thus, there is no danger of a race with another * processor changing the setting of PG_A and/or PG_M between - * the read above and the store below. + * the read above and the store below. */ if (workaround_erratum383) pmap_update_pde(pmap, va, pde, newpde); @@ -6276,7 +6276,7 @@ pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva, * pmap_remove_pte: do the things to unmap a page in a process */ static int -pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va, +pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va, pd_entry_t ptepde, struct spglist *free, struct rwlock **lockp) { struct md_page *pvh; @@ -6889,7 +6889,7 @@ pmap_pde_ept_executable(pmap_t pmap, pd_entry_t pde) * single page table page (PTP) to a single 2MB page mapping. For promotion * to occur, two conditions must be met: (1) the 4KB page mappings must map * aligned, contiguous physical memory and (2) the 4KB page mappings must have - * identical characteristics. + * identical characteristics. */ static bool pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va, vm_page_t mpte, @@ -7237,7 +7237,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, goto out; } if (psind == 1) { - /* Assert the required virtual and physical alignment. */ + /* Assert the required virtual and physical alignment. */ KASSERT((va & PDRMASK) == 0, ("pmap_enter: va unaligned")); KASSERT(m->psind > 0, ("pmap_enter: m->psind < psind")); rv = pmap_enter_pde(pmap, va, newpte | PG_PS, flags, m, &lock); @@ -7933,7 +7933,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, * Map using 2MB pages. Since "ptepa" is 2M aligned and * "size" is a multiple of 2M, adding the PAT setting to "pa" * will not affect the termination of this loop. - */ + */ PMAP_LOCK(pmap); for (pa = ptepa | pmap_cache_bits(pmap, pat_mode, true); pa < ptepa + size; pa += NBPDR) { @@ -8168,7 +8168,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, srcptepaddr = *pde; if (srcptepaddr == 0) continue; - + if (srcptepaddr & PG_PS) { /* * We can only virtual copy whole superpages. @@ -8246,7 +8246,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, pmap_abort_ptp(dst_pmap, addr, dstmpte); goto out; } - /* Have we copied all of the valid mappings? */ + /* Have we copied all of the valid mappings? */ if (dstmpte->ref_count >= srcmpte->ref_count) break; } diff --git a/sys/amd64/amd64/sigtramp.S b/sys/amd64/amd64/sigtramp.S index 4676d5f67650..9aed66a199a1 100644 --- a/sys/amd64/amd64/sigtramp.S +++ b/sys/amd64/amd64/sigtramp.S @@ -68,7 +68,7 @@ ENTRY(__vdso_sigcode) * * LLVM libunwind from stable/13 cannot parse register numbers higher * than 32. Disable %rflags, %fs.base, and %gs.base annotations. - */ + */ .cfi_offset %fs, SIGF_UC + UC_FS .cfi_offset %gs, SIGF_UC + UC_GS .cfi_offset %es, SIGF_UC + UC_ES diff --git a/sys/amd64/amd64/sys_machdep.c b/sys/amd64/amd64/sys_machdep.c index c8bddb0acb19..70a369ec64a3 100644 --- a/sys/amd64/amd64/sys_machdep.c +++ b/sys/amd64/amd64/sys_machdep.c @@ -311,7 +311,7 @@ sysarch(struct thread *td, struct sysarch_args *uap) error = copyout(&pcb->pcb_fsbase, uap->parms, sizeof(pcb->pcb_fsbase)); break; - + case AMD64_SET_FSBASE: error = copyin(uap->parms, &a64base, sizeof(a64base)); if (error == 0) { diff --git a/sys/amd64/amd64/trap.c b/sys/amd64/amd64/trap.c index 67146240ba58..a6d6c792ee92 100644 --- a/sys/amd64/amd64/trap.c +++ b/sys/amd64/amd64/trap.c @@ -723,7 +723,7 @@ trap_pfault(struct trapframe *frame, bool usermode, int *signo, int *ucode) * Due to both processor errata and lazy TLB invalidation when * access restrictions are removed from virtual pages, memory * accesses that are allowed by the physical mapping layer may - * nonetheless cause one spurious page fault per virtual page. + * nonetheless cause one spurious page fault per virtual page. * When the thread is executing a "no faulting" section that * is bracketed by vm_fault_{disable,enable}_pagefaults(), * every page fault is treated as a spurious page fault, diff --git a/sys/amd64/amd64/vm_machdep.c b/sys/amd64/amd64/vm_machdep.c index f6d52fa4c02b..f74091438648 100644 --- a/sys/amd64/amd64/vm_machdep.c +++ b/sys/amd64/amd64/vm_machdep.c @@ -606,7 +606,7 @@ cpu_set_upcall(struct thread *td, void (*entry)(void *), void *arg, stack_t *stack) { - /* + /* * Do any extra cleaning that needs to be done. * The thread may have optional components * that are not present in a fresh thread. diff --git a/sys/amd64/include/asm.h b/sys/amd64/include/asm.h index 6a2e326d42aa..6535e44d5cef 100644 --- a/sys/amd64/include/asm.h +++ b/sys/amd64/include/asm.h @@ -92,7 +92,7 @@ #define END(x) .size x, . - x; .cfi_endproc /* - * WEAK_REFERENCE(): create a weak reference alias from sym. + * WEAK_REFERENCE(): create a weak reference alias from sym. * The macro is not a general asm macro that takes arbitrary names, * but one that takes only C names. It does the non-null name * translation inside the macro. diff --git a/sys/amd64/include/bus_dma.h b/sys/amd64/include/bus_dma.h index 82765dacc30a..30d819cc3425 100644 --- a/sys/amd64/include/bus_dma.h +++ b/sys/amd64/include/bus_dma.h @@ -29,6 +29,6 @@ #ifndef _AMD64_BUS_DMA_H_ #define _AMD64_BUS_DMA_H_ -#include +#include #endif /* _AMD64_BUS_DMA_H_ */ diff --git a/sys/amd64/include/param.h b/sys/amd64/include/param.h index 660f69593709..8ad1c0e93c6a 100644 --- a/sys/amd64/include/param.h +++ b/sys/amd64/include/param.h @@ -78,7 +78,7 @@ * ALIGNED_POINTER is a boolean macro that checks whether an address * is valid to fetch data elements of type t from on this architecture. * This does not reflect the optimal alignment, just the possibility - * (within reasonable limits). + * (within reasonable limits). */ #define ALIGNED_POINTER(p, t) 1 @@ -154,7 +154,7 @@ #define amd64_btop(x) ((unsigned long)(x) >> PAGE_SHIFT) #define amd64_ptob(x) ((unsigned long)(x) << PAGE_SHIFT) -#define pgtok(x) ((unsigned long)(x) * (PAGE_SIZE / 1024)) +#define pgtok(x) ((unsigned long)(x) * (PAGE_SIZE / 1024)) #define INKERNEL(va) (((va) >= DMAP_MIN_ADDRESS && (va) < DMAP_MAX_ADDRESS) \ || ((va) >= VM_MIN_KERNEL_ADDRESS && (va) < VM_MAX_KERNEL_ADDRESS)) diff --git a/sys/amd64/include/pmap.h b/sys/amd64/include/pmap.h index 0819b3bc2945..adeb89d08bb5 100644 --- a/sys/amd64/include/pmap.h +++ b/sys/amd64/include/pmap.h @@ -140,7 +140,7 @@ #define PGEX_PK 0x20 /* protection key violation */ #define PGEX_SGX 0x8000 /* SGX-related */ -/* +/* * undef the PG_xx macros that define bits in the regular x86 PTEs that * have a different position in nested PTEs. This is done when compiling * code that needs to be aware of the differences between regular x86 and diff --git a/sys/amd64/include/resource.h b/sys/amd64/include/resource.h index 9477572176e0..d44fd653cc84 100644 --- a/sys/amd64/include/resource.h +++ b/sys/amd64/include/resource.h @@ -12,7 +12,7 @@ * no representations about the suitability of this software for any * purpose. It is provided "as is" without express or implied * warranty. - * + * * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF diff --git a/sys/amd64/include/sf_buf.h b/sys/amd64/include/sf_buf.h index 71e6b72acd1c..4a64928ae141 100644 --- a/sys/amd64/include/sf_buf.h +++ b/sys/amd64/include/sf_buf.h @@ -34,7 +34,7 @@ * On this machine, the only purpose for which sf_buf is used is to implement * an opaque pointer required by the machine-independent parts of the kernel. * That pointer references the vm_page that is "mapped" by the sf_buf. The - * actual mapping is provided by the direct virtual-to-physical mapping. + * actual mapping is provided by the direct virtual-to-physical mapping. */ static inline vm_offset_t sf_buf_kva(struct sf_buf *sf) diff --git a/sys/amd64/include/vmm.h b/sys/amd64/include/vmm.h index 37972d54bd99..be88fc867e98 100644 --- a/sys/amd64/include/vmm.h +++ b/sys/amd64/include/vmm.h @@ -124,7 +124,7 @@ enum x2apic_state { /* * The VM name has to fit into the pathname length constraints of devfs, * governed primarily by SPECNAMELEN. The length is the total number of - * characters in the full path, relative to the mount point and not + * characters in the full path, relative to the mount point and not * including any leading '/' characters. * A prefix and a suffix are added to the name specified by the user. * The prefix is usually "vmm/" or "vmm.io/", but can be a few characters @@ -465,7 +465,7 @@ struct vm_copyinfo { /* * Set up 'copyinfo[]' to copy to/from guest linear address space starting * at 'gla' and 'len' bytes long. The 'prot' should be set to PROT_READ for - * a copyin or PROT_WRITE for a copyout. + * a copyin or PROT_WRITE for a copyout. * * retval is_fault Interpretation * 0 0 Success diff --git a/sys/amd64/include/vmm_dev.h b/sys/amd64/include/vmm_dev.h index b77b0ef5d996..1f86538ce5f3 100644 --- a/sys/amd64/include/vmm_dev.h +++ b/sys/amd64/include/vmm_dev.h @@ -301,7 +301,7 @@ enum { IOCNUM_UNMAP_PPTDEV_MMIO = 46, /* statistics */ - IOCNUM_VM_STATS = 50, + IOCNUM_VM_STATS = 50, IOCNUM_VM_STAT_DESC = 51, /* kernel device state */ diff --git a/sys/amd64/include/vmm_instruction_emul.h b/sys/amd64/include/vmm_instruction_emul.h index a21754ac777f..d5f0363cfb41 100644 --- a/sys/amd64/include/vmm_instruction_emul.h +++ b/sys/amd64/include/vmm_instruction_emul.h @@ -112,7 +112,7 @@ void vie_init(struct vie *vie, const char *inst_bytes, int inst_length); * 'gla' is the guest linear address provided by the hardware assist * that caused the nested page table fault. It is used to verify that * the software instruction decoding is in agreement with the hardware. - * + * * Some hardware assists do not provide the 'gla' to the hypervisor. * To skip the 'gla' verification for this or any other reason pass * in VIE_INVALID_GLA instead. diff --git a/sys/amd64/include/vmparam.h b/sys/amd64/include/vmparam.h index ad6bf28f1c3b..026da4b460e2 100644 --- a/sys/amd64/include/vmparam.h +++ b/sys/amd64/include/vmparam.h @@ -89,7 +89,7 @@ * The number of PHYSSEG entries must be one greater than the number * of phys_avail entries because the phys_avail entry that spans the * largest physical address that is accessible by ISA DMA is split - * into two PHYSSEG entries. + * into two PHYSSEG entries. */ #define VM_PHYSSEG_MAX 63 diff --git a/sys/amd64/pci/pci_cfgreg.c b/sys/amd64/pci/pci_cfgreg.c index 90e15b15553d..df3ea5659dd1 100644 --- a/sys/amd64/pci/pci_cfgreg.c +++ b/sys/amd64/pci/pci_cfgreg.c @@ -118,7 +118,7 @@ pci_docfgregread(int domain, int bus, int slot, int func, int reg, int bytes) return (-1); } -/* +/* * Read configuration space register */ u_int32_t @@ -144,8 +144,8 @@ pci_cfgregread(int domain, int bus, int slot, int func, int reg, int bytes) return (pci_docfgregread(domain, bus, slot, func, reg, bytes)); } -/* - * Write configuration space register +/* + * Write configuration space register */ void pci_cfgregwrite(int domain, int bus, int slot, int func, int reg, uint32_t data, @@ -171,7 +171,7 @@ pci_cfgregwrite(int domain, int bus, int slot, int func, int reg, uint32_t data, pcireg_cfgwrite(bus, slot, func, reg, data, bytes); } -/* +/* * Configuration space access using direct register operations */ @@ -184,7 +184,7 @@ pci_cfgenable(unsigned bus, unsigned slot, unsigned func, int reg, int bytes) if (bus <= PCI_BUSMAX && slot <= PCI_SLOTMAX && func <= PCI_FUNCMAX && (unsigned)reg <= PCI_REGMAX && bytes != 3 && (unsigned)bytes <= 4 && (reg & (bytes - 1)) == 0) { - outl(CONF1_ADDR_PORT, (1U << 31) | (bus << 16) | (slot << 11) + outl(CONF1_ADDR_PORT, (1U << 31) | (bus << 16) | (slot << 11) | (func << 8) | (reg & ~0x03)); dataport = CONF1_DATA_PORT + (reg & 0x03); } diff --git a/sys/amd64/sgx/sgx.c b/sys/amd64/sgx/sgx.c index b25862fd2a59..1ada2a189f3c 100644 --- a/sys/amd64/sgx/sgx.c +++ b/sys/amd64/sgx/sgx.c @@ -71,7 +71,7 @@ * User .-- EENTER -- Go to entry point of enclave * space | EEXIT -- Exit back to main application * ENCLU '-- ERESUME -- Resume enclave execution (e.g. after exception) - * + * * Enclave lifecycle from driver point of view: * 1) User calls mmap() on /dev/sgx: we allocate a VM object * 2) User calls ioctl SGX_IOC_ENCLAVE_CREATE: we look for the VM object diff --git a/sys/amd64/vmm/amd/amdvi_hw.c b/sys/amd64/vmm/amd/amdvi_hw.c index e87d173023f8..881bfa2ef946 100644 --- a/sys/amd64/vmm/amd/amdvi_hw.c +++ b/sys/amd64/vmm/amd/amdvi_hw.c @@ -928,8 +928,8 @@ amdvi_teardown_hw(struct amdvi_softc *softc) dev = softc->dev; - /* - * Called after disable, h/w is stopped by now, free all the resources. + /* + * Called after disable, h/w is stopped by now, free all the resources. */ amdvi_free_evt_intr_res(dev); diff --git a/sys/amd64/vmm/amd/amdvi_priv.h b/sys/amd64/vmm/amd/amdvi_priv.h index a48390f1c067..2a2646b6907e 100644 --- a/sys/amd64/vmm/amd/amdvi_priv.h +++ b/sys/amd64/vmm/amd/amdvi_priv.h @@ -211,8 +211,8 @@ struct amdvi_ctrl { uint64_t limit:40; uint16_t :12; } excl; - /* - * Revision 2 only. + /* + * Revision 2 only. */ uint64_t ex_feature; struct { @@ -253,8 +253,8 @@ CTASSERT(offsetof(struct amdvi_ctrl, pad2)== 0x2028); CTASSERT(offsetof(struct amdvi_ctrl, pad3)== 0x2040); #define AMDVI_MMIO_V1_SIZE (4 * PAGE_SIZE) /* v1 size */ -/* - * AMF IOMMU v2 size including event counters +/* + * AMF IOMMU v2 size including event counters */ #define AMDVI_MMIO_V2_SIZE (8 * PAGE_SIZE) diff --git a/sys/amd64/vmm/amd/ivrs_drv.c b/sys/amd64/vmm/amd/ivrs_drv.c index 34aebc62fed4..c75e0fcc2d68 100644 --- a/sys/amd64/vmm/amd/ivrs_drv.c +++ b/sys/amd64/vmm/amd/ivrs_drv.c @@ -51,7 +51,7 @@ device_t *ivhd_devs; /* IVHD or AMD-Vi device list. */ int ivhd_count; /* Number of IVHD header. */ -/* +/* * Cached IVHD header list. * Single entry for each IVHD, filtered the legacy one. */ @@ -225,7 +225,7 @@ ivhd_dev_parse(ACPI_IVRS_HARDWARE1 *ivhd, struct amdvi_softc *softc) break; default: - device_printf(softc->dev, + device_printf(softc->dev, "unknown type: 0x%x\n", ivhd->Header.Type); return (-1); } @@ -366,7 +366,7 @@ ivhd_identify(driver_t *driver, device_t parent) ivrs_ivinfo = ivrs->Info; printf("AMD-Vi: IVRS Info VAsize = %d PAsize = %d GVAsize = %d" " flags:%b\n", - REG_BITS(ivrs_ivinfo, 21, 15), REG_BITS(ivrs_ivinfo, 14, 8), + REG_BITS(ivrs_ivinfo, 21, 15), REG_BITS(ivrs_ivinfo, 14, 8), REG_BITS(ivrs_ivinfo, 7, 5), REG_BITS(ivrs_ivinfo, 22, 22), "\020\001EFRSup"); @@ -439,7 +439,7 @@ ivhd_probe(device_t dev) return (ENXIO); unit = device_get_unit(dev); - KASSERT((unit < ivhd_count), + KASSERT((unit < ivhd_count), ("ivhd unit %d > count %d", unit, ivhd_count)); ivhd = ivhd_hdrs[unit]; KASSERT(ivhd, ("ivhd is NULL")); @@ -506,7 +506,7 @@ ivhd_print_flag(device_t dev, enum IvrsType ivhd_type, uint8_t flag) * Feature in legacy IVHD type(0x10) and attribute in newer type(0x11 and 0x40). */ static void -ivhd_print_feature(device_t dev, enum IvrsType ivhd_type, uint32_t feature) +ivhd_print_feature(device_t dev, enum IvrsType ivhd_type, uint32_t feature) { switch (ivhd_type) { case IVRS_TYPE_HARDWARE_LEGACY: @@ -639,7 +639,7 @@ ivhd_attach(device_t dev) int status, unit; unit = device_get_unit(dev); - KASSERT((unit < ivhd_count), + KASSERT((unit < ivhd_count), ("ivhd unit %d > count %d", unit, ivhd_count)); /* Make sure its same device for which attach is called. */ KASSERT((ivhd_devs[unit] == dev), @@ -658,12 +658,12 @@ ivhd_attach(device_t dev) softc->pci_seg = ivhd->PciSegmentGroup; softc->pci_rid = ivhd->Header.DeviceId; softc->ivhd_flag = ivhd->Header.Flags; - /* + /* * On lgeacy IVHD type(0x10), it is documented as feature * but in newer type it is attribute. */ softc->ivhd_feature = ivhd->FeatureReporting; - /* + /* * PCI capability has more capabilities that are not part of IVRS. */ softc->cap_off = ivhd->CapabilityOffset; @@ -694,7 +694,7 @@ ivhd_attach(device_t dev) status = amdvi_setup_hw(softc); if (status != 0) { - device_printf(dev, "couldn't be initialised, error=%d\n", + device_printf(dev, "couldn't be initialised, error=%d\n", status); goto fail; } diff --git a/sys/amd64/vmm/amd/npt.c b/sys/amd64/vmm/amd/npt.c index edd61d6b62ea..6fd6628053f2 100644 --- a/sys/amd64/vmm/amd/npt.c +++ b/sys/amd64/vmm/amd/npt.c @@ -58,7 +58,7 @@ svm_npt_init(int ipinum) npt_flags = ipinum & NPT_IPIMASK; TUNABLE_INT_FETCH("hw.vmm.npt.enable_superpage", &enable_superpage); if (enable_superpage) - npt_flags |= PMAP_PDE_SUPERPAGE; + npt_flags |= PMAP_PDE_SUPERPAGE; return (0); } diff --git a/sys/amd64/vmm/amd/svm.c b/sys/amd64/vmm/amd/svm.c index 1015b04ee161..cc0b1c0c8725 100644 --- a/sys/amd64/vmm/amd/svm.c +++ b/sys/amd64/vmm/amd/svm.c @@ -282,7 +282,7 @@ svm_modresume(void) { svm_enable(NULL); -} +} #ifdef BHYVE_SNAPSHOT void @@ -301,14 +301,14 @@ svm_set_tsc_offset(struct svm_vcpu *vcpu, uint64_t offset) #endif /* Pentium compatible MSRs */ -#define MSR_PENTIUM_START 0 +#define MSR_PENTIUM_START 0 #define MSR_PENTIUM_END 0x1FFF /* AMD 6th generation and Intel compatible MSRs */ -#define MSR_AMD6TH_START 0xC0000000UL -#define MSR_AMD6TH_END 0xC0001FFFUL +#define MSR_AMD6TH_START 0xC0000000UL +#define MSR_AMD6TH_END 0xC0001FFFUL /* AMD 7th and 8th generation compatible MSRs */ -#define MSR_AMD7TH_START 0xC0010000UL -#define MSR_AMD7TH_END 0xC0011FFFUL +#define MSR_AMD7TH_START 0xC0010000UL +#define MSR_AMD7TH_END 0xC0011FFFUL /* * Get the index and bit position for a MSR in permission bitmap. @@ -328,12 +328,12 @@ svm_msr_index(uint64_t msr, int *index, int *bit) return (0); } - base += (MSR_PENTIUM_END - MSR_PENTIUM_START + 1); + base += (MSR_PENTIUM_END - MSR_PENTIUM_START + 1); if (msr >= MSR_AMD6TH_START && msr <= MSR_AMD6TH_END) { - off = (msr - MSR_AMD6TH_START); + off = (msr - MSR_AMD6TH_START); *index = (off + base) / 4; return (0); - } + } base += (MSR_AMD6TH_END - MSR_AMD6TH_START + 1); if (msr >= MSR_AMD7TH_START && msr <= MSR_AMD7TH_END) { @@ -852,7 +852,7 @@ svm_npf_emul_fault(uint64_t exitinfo1) return (false); } - return (true); + return (true); } static void @@ -893,7 +893,7 @@ svm_handle_inst_emul(struct vmcb *vmcb, uint64_t gpa, struct vm_exit *vmexit) default: vmexit->u.inst_emul.cs_base = 0; vmexit->u.inst_emul.cs_d = 0; - break; + break; } /* @@ -994,7 +994,7 @@ svm_save_intinfo(struct svm_softc *svm_sc, struct svm_vcpu *vcpu) uint64_t intinfo; ctrl = svm_get_vmcb_ctrl(vcpu); - intinfo = ctrl->exitintinfo; + intinfo = ctrl->exitintinfo; if (!VMCB_EXITINTINFO_VALID(intinfo)) return; @@ -1533,7 +1533,7 @@ svm_vmexit(struct svm_softc *svm_sc, struct svm_vcpu *vcpu, eax = state->rax; ecx = ctx->sctx_rcx; edx = ctx->sctx_rdx; - retu = false; + retu = false; if (info1) { vmm_stat_incr(vcpu->vcpu, VMEXIT_WRMSR, 1); @@ -1667,7 +1667,7 @@ svm_vmexit(struct svm_softc *svm_sc, struct svm_vcpu *vcpu, default: vmm_stat_incr(vcpu->vcpu, VMEXIT_UNKNOWN, 1); break; - } + } SVM_CTR4(vcpu, "%s %s vmexit at %#lx/%d", handled ? "handled" : "unhandled", exit_reason_to_str(code), @@ -2231,7 +2231,7 @@ svm_run(void *vcpui, register_t rip, pmap_t pmap, struct vm_eventinfo *evinfo) /* Restore host LDTR. */ lldt(ldt_sel); - /* #VMEXIT disables interrupts so re-enable them here. */ + /* #VMEXIT disables interrupts so re-enable them here. */ enable_gintr(); /* Update 'nextrip' */ diff --git a/sys/amd64/vmm/amd/svm_msr.c b/sys/amd64/vmm/amd/svm_msr.c index 629dd0bd3b46..1f7be6029e64 100644 --- a/sys/amd64/vmm/amd/svm_msr.c +++ b/sys/amd64/vmm/amd/svm_msr.c @@ -59,7 +59,7 @@ static uint64_t host_msrs[HOST_MSR_NUM]; void svm_msr_init(void) { - /* + /* * It is safe to cache the values of the following MSRs because they * don't change based on curcpu, curproc or curthread. */ diff --git a/sys/amd64/vmm/intel/ept.c b/sys/amd64/vmm/intel/ept.c index ed41f99fb4e6..5432c7da5df7 100644 --- a/sys/amd64/vmm/intel/ept.c +++ b/sys/amd64/vmm/intel/ept.c @@ -137,7 +137,7 @@ ept_dump(uint64_t *ptp, int nlevels) if (ptpval == 0) continue; - + for (t = 0; t < tabs; t++) printf("\t"); printf("%3d 0x%016lx\n", i, ptpval); diff --git a/sys/amd64/vmm/intel/vmx.c b/sys/amd64/vmm/intel/vmx.c index 63b0cf3d2ff9..34b5fecc149c 100644 --- a/sys/amd64/vmm/intel/vmx.c +++ b/sys/amd64/vmm/intel/vmx.c @@ -3665,7 +3665,7 @@ vmx_setcap(void *vcpui, int type, int val) vlapic = vm_lapic(vcpu->vcpu); vlapic->ipi_exit = val; break; - case VM_CAP_MASK_HWINTR: + case VM_CAP_MASK_HWINTR: retval = 0; break; default: diff --git a/sys/amd64/vmm/io/ppt.c b/sys/amd64/vmm/io/ppt.c index 109707dbcbd5..3b043c64fbde 100644 --- a/sys/amd64/vmm/io/ppt.c +++ b/sys/amd64/vmm/io/ppt.c @@ -257,7 +257,7 @@ ppt_teardown_msi(struct pptdev *ppt) if (res != NULL) bus_release_resource(ppt->dev, SYS_RES_IRQ, rid, res); - + ppt->msi.res[i] = NULL; ppt->msi.cookie[i] = NULL; } @@ -268,7 +268,7 @@ ppt_teardown_msi(struct pptdev *ppt) ppt->msi.num_msgs = 0; } -static void +static void ppt_teardown_msix_intr(struct pptdev *ppt, int idx) { int rid; @@ -279,25 +279,25 @@ ppt_teardown_msix_intr(struct pptdev *ppt, int idx) res = ppt->msix.res[idx]; cookie = ppt->msix.cookie[idx]; - if (cookie != NULL) + if (cookie != NULL) bus_teardown_intr(ppt->dev, res, cookie); - if (res != NULL) + if (res != NULL) bus_release_resource(ppt->dev, SYS_RES_IRQ, rid, res); ppt->msix.res[idx] = NULL; ppt->msix.cookie[idx] = NULL; } -static void +static void ppt_teardown_msix(struct pptdev *ppt) { int i; - if (ppt->msix.num_msgs == 0) + if (ppt->msix.num_msgs == 0) return; - for (i = 0; i < ppt->msix.num_msgs; i++) + for (i = 0; i < ppt->msix.num_msgs; i++) ppt_teardown_msix_intr(ppt, i); free(ppt->msix.res, M_PPTMSIX); @@ -307,14 +307,14 @@ ppt_teardown_msix(struct pptdev *ppt) pci_release_msi(ppt->dev); if (ppt->msix.msix_table_res) { - bus_release_resource(ppt->dev, SYS_RES_MEMORY, + bus_release_resource(ppt->dev, SYS_RES_MEMORY, ppt->msix.msix_table_rid, ppt->msix.msix_table_res); ppt->msix.msix_table_res = NULL; ppt->msix.msix_table_rid = 0; } if (ppt->msix.msix_pba_res) { - bus_release_resource(ppt->dev, SYS_RES_MEMORY, + bus_release_resource(ppt->dev, SYS_RES_MEMORY, ppt->msix.msix_pba_rid, ppt->msix.msix_pba_res); ppt->msix.msix_pba_res = NULL; @@ -678,10 +678,10 @@ ppt_setup_msix(struct vm *vm, int bus, int slot, int func, return (EBUSY); dinfo = device_get_ivars(ppt->dev); - if (!dinfo) + if (!dinfo) return (ENXIO); - /* + /* * First-time configuration: * Allocate the MSI-X table * Allocate the IRQ resources diff --git a/sys/amd64/vmm/io/vhpet.c b/sys/amd64/vmm/io/vhpet.c index a1dfc7ba731f..88063f2952e5 100644 --- a/sys/amd64/vmm/io/vhpet.c +++ b/sys/amd64/vmm/io/vhpet.c @@ -231,7 +231,7 @@ vhpet_timer_interrupt(struct vhpet *vhpet, int n) lapic_intr_msi(vhpet->vm, vhpet->timer[n].msireg >> 32, vhpet->timer[n].msireg & 0xffffffff); return; - } + } pin = vhpet_timer_ioapic_pin(vhpet, n); if (pin == 0) { @@ -493,7 +493,7 @@ vhpet_mmio_write(struct vcpu *vcpu, uint64_t gpa, uint64_t val, int size, if ((offset & 0x4) != 0) { mask <<= 32; data <<= 32; - } + } break; default: VM_CTR2(vhpet->vm, "hpet invalid mmio write: " @@ -647,7 +647,7 @@ vhpet_mmio_read(struct vcpu *vcpu, uint64_t gpa, uint64_t *rval, int size, if (offset == HPET_CAPABILITIES || offset == HPET_CAPABILITIES + 4) { data = vhpet_capabilities(); - goto done; + goto done; } if (offset == HPET_CONFIG || offset == HPET_CONFIG + 4) { diff --git a/sys/amd64/vmm/io/vlapic.c b/sys/amd64/vmm/io/vlapic.c index dce7528ac773..9879dfa164a4 100644 --- a/sys/amd64/vmm/io/vlapic.c +++ b/sys/amd64/vmm/io/vlapic.c @@ -383,7 +383,7 @@ vlapic_lvt_write_handler(struct vlapic *vlapic, uint32_t offset) int idx; lapic = vlapic->apic_page; - lvtptr = vlapic_get_lvtptr(vlapic, offset); + lvtptr = vlapic_get_lvtptr(vlapic, offset); val = *lvtptr; idx = lvt_off_to_idx(offset); @@ -804,7 +804,7 @@ vlapic_icrtmr_write_handler(struct vlapic *vlapic) /* * This function populates 'dmask' with the set of vcpus that match the * addressing specified by the (dest, phys, lowprio) tuple. - * + * * 'x2apic_dest' specifies whether 'dest' is interpreted as x2APIC (32-bit) * or xAPIC (8-bit) destination field. */ @@ -1251,7 +1251,7 @@ vlapic_pending_intr(struct vlapic *vlapic, int *vecptr) if (vecptr != NULL) *vecptr = vector; return (1); - } else + } else break; } } @@ -1269,7 +1269,7 @@ vlapic_intr_accepted(struct vlapic *vlapic, int vector) return ((*vlapic->ops.intr_accepted)(vlapic, vector)); /* - * clear the ready bit for vector being accepted in irr + * clear the ready bit for vector being accepted in irr * and set the vector as in service in isr. */ idx = (vector / 32) * 4; @@ -1409,17 +1409,17 @@ vlapic_read(struct vlapic *vlapic, int mmio_access, uint64_t offset, case APIC_OFFSET_ESR: *data = lapic->esr; break; - case APIC_OFFSET_ICR_LOW: + case APIC_OFFSET_ICR_LOW: *data = lapic->icr_lo; if (x2apic(vlapic)) *data |= (uint64_t)lapic->icr_hi << 32; break; - case APIC_OFFSET_ICR_HI: + case APIC_OFFSET_ICR_HI: *data = lapic->icr_hi; break; case APIC_OFFSET_CMCI_LVT: case APIC_OFFSET_TIMER_LVT ... APIC_OFFSET_ERROR_LVT: - *data = vlapic_get_lvt(vlapic, offset); + *data = vlapic_get_lvt(vlapic, offset); #ifdef INVARIANTS reg = vlapic_get_lvtptr(vlapic, offset); KASSERT(*data == *reg, ("inconsistent lvt value at " @@ -1509,7 +1509,7 @@ vlapic_write(struct vlapic *vlapic, int mmio_access, uint64_t offset, lapic->svr = data; vlapic_svr_write_handler(vlapic); break; - case APIC_OFFSET_ICR_LOW: + case APIC_OFFSET_ICR_LOW: lapic->icr_lo = data; if (x2apic(vlapic)) lapic->icr_hi = data >> 32; diff --git a/sys/amd64/vmm/io/vrtc.c b/sys/amd64/vmm/io/vrtc.c index 7dc651a7b883..a56c77b7bf73 100644 --- a/sys/amd64/vmm/io/vrtc.c +++ b/sys/amd64/vmm/io/vrtc.c @@ -346,7 +346,7 @@ rtc_to_secs(struct vrtc *vrtc) /* * Ignore 'rtc->dow' because some guests like Linux don't bother - * setting it at all while others like OpenBSD/i386 set it incorrectly. + * setting it at all while others like OpenBSD/i386 set it incorrectly. * * clock_ct_to_ts() does not depend on 'ct.dow' anyways so ignore it. */ diff --git a/sys/amd64/vmm/vmm.c b/sys/amd64/vmm/vmm.c index 20006e63cfeb..a2c2b342bee4 100644 --- a/sys/amd64/vmm/vmm.c +++ b/sys/amd64/vmm/vmm.c @@ -355,7 +355,7 @@ vcpu_cleanup(struct vcpu *vcpu, bool destroy) vmmops_vcpu_cleanup(vcpu->cookie); vcpu->cookie = NULL; if (destroy) { - vmm_stat_free(vcpu->stats); + vmm_stat_free(vcpu->stats); fpu_save_area_free(vcpu->guestfpu); vcpu_lock_destroy(vcpu); free(vcpu, M_VM); @@ -2458,7 +2458,7 @@ vmm_is_pptdev(int bus, int slot, int func) found = true; break; } - + if (cp2 != NULL) *cp2++ = ' '; @@ -2887,7 +2887,7 @@ vm_get_rescnt(struct vcpu *vcpu, struct vmm_stat_type *stat) if (vcpu->vcpuid == 0) { vmm_stat_set(vcpu, VMM_MEM_RESIDENT, PAGE_SIZE * vmspace_resident_count(vcpu->vm->vmspace)); - } + } } static void @@ -2897,7 +2897,7 @@ vm_get_wiredcnt(struct vcpu *vcpu, struct vmm_stat_type *stat) if (vcpu->vcpuid == 0) { vmm_stat_set(vcpu, VMM_MEM_WIRED, PAGE_SIZE * pmap_wired_count(vmspace_pmap(vcpu->vm->vmspace))); - } + } } VMM_STAT_FUNC(VMM_MEM_RESIDENT, "Resident memory", vm_get_rescnt); diff --git a/sys/amd64/vmm/vmm_instruction_emul.c b/sys/amd64/vmm/vmm_instruction_emul.c index 1b042621c0eb..6e1501493082 100644 --- a/sys/amd64/vmm/vmm_instruction_emul.c +++ b/sys/amd64/vmm/vmm_instruction_emul.c @@ -583,7 +583,7 @@ emulate_mov(struct vcpu *vcpu, uint64_t gpa, struct vie *vie, /* * MOV from AX/EAX/RAX to seg:moffset * A3: mov moffs16, AX - * A3: mov moffs32, EAX + * A3: mov moffs32, EAX * REX.W + A3: mov moffs64, RAX */ error = vie_read_register(vcpu, VM_REG_GUEST_RAX, &val); @@ -1118,7 +1118,7 @@ emulate_or(struct vcpu *vcpu, uint64_t gpa, struct vie *vie, error = vie_read_register(vcpu, reg, &val1); if (error) break; - + /* get the second operand */ error = memread(vcpu, gpa, &val2, size, arg); if (error) @@ -1468,7 +1468,7 @@ emulate_sub(struct vcpu *vcpu, uint64_t gpa, struct vie *vie, case 0x2B: /* * SUB r/m from r and store the result in r - * + * * 2B/r SUB r16, r/m16 * 2B/r SUB r32, r/m32 * REX.W + 2B/r SUB r64, r/m64 @@ -1912,7 +1912,7 @@ vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum vm_reg_name seg, if (SEG_DESC_UNUSABLE(desc->access)) return (-1); - /* + /* * The processor generates a #NP exception when a segment * register is loaded with a selector that points to a * descriptor that is not present. If this was the case then diff --git a/sys/amd64/vmm/vmm_mem.c b/sys/amd64/vmm/vmm_mem.c index f3954a40996e..0e953b6af534 100644 --- a/sys/amd64/vmm/vmm_mem.c +++ b/sys/amd64/vmm/vmm_mem.c @@ -95,7 +95,7 @@ vmm_mmio_alloc(struct vmspace *vmspace, vm_paddr_t gpa, size_t len, * has incremented the reference count on the sglist. Dropping the * initial reference count ensures that the sglist will be freed * when the object is deallocated. - * + * * If the object could not be allocated then we end up freeing the * sglist. */