2015-03-23 12:54:56 +01:00
|
|
|
/*-
|
|
|
|
* Copyright (c) 1990 The Regents of the University of California.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* This code is derived from software contributed to Berkeley by
|
|
|
|
* William Jolitz.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
2023-07-26 01:59:26 +02:00
|
|
|
#ifdef __arm__
|
|
|
|
#include <arm/param.h>
|
|
|
|
#else /* !__arm__ */
|
|
|
|
|
2015-03-23 12:54:56 +01:00
|
|
|
#ifndef _MACHINE_PARAM_H_
|
|
|
|
#define _MACHINE_PARAM_H_
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Machine dependent constants for arm64.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <machine/_align.h>
|
|
|
|
|
|
|
|
#define STACKALIGNBYTES (16 - 1)
|
|
|
|
#define STACKALIGN(p) ((uint64_t)(p) & ~STACKALIGNBYTES)
|
|
|
|
|
2016-03-02 16:20:42 +01:00
|
|
|
#define __PCI_REROUTE_INTERRUPT
|
|
|
|
|
2015-03-23 12:54:56 +01:00
|
|
|
#ifndef MACHINE
|
|
|
|
#define MACHINE "arm64"
|
|
|
|
#endif
|
|
|
|
#ifndef MACHINE_ARCH
|
|
|
|
#define MACHINE_ARCH "aarch64"
|
|
|
|
#endif
|
2017-11-24 14:50:53 +01:00
|
|
|
#ifndef MACHINE_ARCH32
|
|
|
|
#define MACHINE_ARCH32 "armv7"
|
|
|
|
#endif
|
2015-03-23 12:54:56 +01:00
|
|
|
|
2020-02-05 20:08:21 +01:00
|
|
|
#ifdef SMP
|
2015-03-23 12:54:56 +01:00
|
|
|
#ifndef MAXCPU
|
2023-08-03 23:47:06 +02:00
|
|
|
#define MAXCPU 1024
|
2015-03-23 12:54:56 +01:00
|
|
|
#endif
|
|
|
|
#else
|
|
|
|
#define MAXCPU 1
|
2020-02-05 20:08:21 +01:00
|
|
|
#endif
|
2015-03-23 12:54:56 +01:00
|
|
|
|
|
|
|
#ifndef MAXMEMDOM
|
2020-12-01 21:10:55 +01:00
|
|
|
#define MAXMEMDOM 8
|
2015-03-23 12:54:56 +01:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#define ALIGNBYTES _ALIGNBYTES
|
|
|
|
#define ALIGN(p) _ALIGN(p)
|
|
|
|
/*
|
|
|
|
* ALIGNED_POINTER is a boolean macro that checks whether an address
|
|
|
|
* is valid to fetch data elements of type t from on this architecture.
|
|
|
|
* This does not reflect the optimal alignment, just the possibility
|
|
|
|
* (within reasonable limits).
|
|
|
|
*/
|
|
|
|
#define ALIGNED_POINTER(p, t) ((((u_long)(p)) & (sizeof(t) - 1)) == 0)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* CACHE_LINE_SIZE is the compile-time maximum cache line size for an
|
|
|
|
* architecture. It should be used with appropriate caution.
|
|
|
|
*/
|
2016-10-24 15:44:24 +02:00
|
|
|
#define CACHE_LINE_SHIFT 7
|
2015-03-23 12:54:56 +01:00
|
|
|
#define CACHE_LINE_SIZE (1 << CACHE_LINE_SHIFT)
|
|
|
|
|
2020-12-23 10:46:13 +01:00
|
|
|
#define PAGE_SHIFT_4K 12
|
|
|
|
#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
|
2015-03-23 12:54:56 +01:00
|
|
|
|
2015-07-06 20:27:41 +02:00
|
|
|
#define PAGE_SHIFT_16K 14
|
|
|
|
#define PAGE_SIZE_16K (1 << PAGE_SHIFT_16K)
|
|
|
|
|
2015-03-23 12:54:56 +01:00
|
|
|
#define PAGE_SHIFT_64K 16
|
|
|
|
#define PAGE_SIZE_64K (1 << PAGE_SHIFT_64K)
|
|
|
|
|
2020-12-23 10:46:13 +01:00
|
|
|
#define PAGE_SHIFT PAGE_SHIFT_4K
|
2022-08-09 17:15:56 +02:00
|
|
|
#define PAGE_SIZE (1 << PAGE_SHIFT)
|
|
|
|
#define PAGE_MASK (PAGE_SIZE - 1)
|
2020-12-23 10:46:13 +01:00
|
|
|
|
2024-07-12 09:44:56 +02:00
|
|
|
#define MAXPAGESIZES 4 /* maximum number of supported page sizes */
|
2015-03-23 12:54:56 +01:00
|
|
|
|
|
|
|
#ifndef KSTACK_PAGES
|
arm64: add KASAN support
This entails:
- Marking some obvious candidates for __nosanitizeaddress
- Similar trap frame markings as amd64, for similar reasons
- Shadow map implementation
The shadow map implementation is roughly similar to what was done on
amd64, with some exceptions. Attempting to use available space at
preinit_map_va + PMAP_PREINIT_MAPPING_SIZE (up to the end of that range,
as depicted in the physmap) results in odd failures, so we instead
search the physmap for free regions that we can carve out, fragmenting
the shadow map as necessary to try and fit as much as we need for the
initial kernel map. pmap_bootstrap_san() is thus after
pmap_bootstrap(), which still included some technically reserved areas
of the memory map that needed to be included in the DMAP.
The odd failure noted above may be a bug, but I haven't investigated it
all that much.
Initial work by mhorne with additional fixes from kevans and markj.
Reviewed by: andrew, markj
Sponsored by: Juniper Networks, Inc.
Sponsored by: Klara, Inc.
Differential Revision: https://reviews.freebsd.org/D36701
2023-03-23 22:26:06 +01:00
|
|
|
#if defined(KASAN) || defined(KMSAN)
|
|
|
|
#define KSTACK_PAGES 6
|
|
|
|
#else
|
2015-03-23 12:54:56 +01:00
|
|
|
#define KSTACK_PAGES 4 /* pages of kernel stack (with pcb) */
|
|
|
|
#endif
|
arm64: add KASAN support
This entails:
- Marking some obvious candidates for __nosanitizeaddress
- Similar trap frame markings as amd64, for similar reasons
- Shadow map implementation
The shadow map implementation is roughly similar to what was done on
amd64, with some exceptions. Attempting to use available space at
preinit_map_va + PMAP_PREINIT_MAPPING_SIZE (up to the end of that range,
as depicted in the physmap) results in odd failures, so we instead
search the physmap for free regions that we can carve out, fragmenting
the shadow map as necessary to try and fit as much as we need for the
initial kernel map. pmap_bootstrap_san() is thus after
pmap_bootstrap(), which still included some technically reserved areas
of the memory map that needed to be included in the DMAP.
The odd failure noted above may be a bug, but I haven't investigated it
all that much.
Initial work by mhorne with additional fixes from kevans and markj.
Reviewed by: andrew, markj
Sponsored by: Juniper Networks, Inc.
Sponsored by: Klara, Inc.
Differential Revision: https://reviews.freebsd.org/D36701
2023-03-23 22:26:06 +01:00
|
|
|
#endif
|
2015-03-23 12:54:56 +01:00
|
|
|
|
|
|
|
#define KSTACK_GUARD_PAGES 1 /* pages of kstack guard; 0 disables */
|
|
|
|
#define PCPU_PAGES 1
|
|
|
|
|
arm64: Disable per-thread stack-smashing protection in data_abort()
With PERTHREAD_SSP configured, the compiler's stack-smashing protection
uses a per-thread canary value instead of a global value. The value is
stored in td->td_md.md_canary; the sp_el0 register always contains a
pointer to that value, and certain functions selected by the compiler
will store the canary value on the stack as a part of the function
prologue (and will verify the copy as part of the epilogue). In
particular, the thread structure may be accessed.
This happens to occur in data_abort(), which leads to the same problem
addressed by commit 2c10be9e06d4 ("arm64: Handle translation faults for
thread structures"). This commit fixes that directly, by disabling SSP
in data_abort() and a couple of related functions by using a function
attribute. It also moves the update of sp_el0 out of C code in case
the compiler decides to start checking the canary in pmap_switch()
someday.
A different solution might be to move the canary value to the PCB, which
currently lives on the kernel stack and isn't subject to the same
problem as thread structures (if only because guard pages inhibit
superpage promotion). However, there isn't any particular reason the
PCB has to live on the stack today; on amd64 it is embedded in struct
thread, reintroducing the same problem. Keeping the reference canary
value at the top of the stack is also rather dubious since it could be
clobbered by a sufficiently large stack overflow.
A third solution could be to go back to the approach of commit
5aa5420ff2e8, and modify UMA to use the direct map for thread structures
even if KASAN is enabled. But, transient promotions and demotions in
the direct map are possible too.
Reviewed by: alc, kib, andrew
MFC after: 1 month
Sponsored by: Juniper Networks, Inc.
Sponsored by: Klara, Inc.
Differential Revision: https://reviews.freebsd.org/D37255
2022-11-07 21:53:41 +01:00
|
|
|
#ifdef PERTHREAD_SSP
|
|
|
|
#define NO_PERTHREAD_SSP __nostackprotector
|
|
|
|
#else
|
|
|
|
#define NO_PERTHREAD_SSP
|
|
|
|
#endif
|
|
|
|
|
2015-03-23 12:54:56 +01:00
|
|
|
/*
|
|
|
|
* Mach derived conversion macros
|
|
|
|
*/
|
|
|
|
#define round_page(x) (((unsigned long)(x) + PAGE_MASK) & ~PAGE_MASK)
|
|
|
|
#define trunc_page(x) ((unsigned long)(x) & ~PAGE_MASK)
|
|
|
|
|
|
|
|
#define atop(x) ((unsigned long)(x) >> PAGE_SHIFT)
|
|
|
|
#define ptoa(x) ((unsigned long)(x) << PAGE_SHIFT)
|
|
|
|
|
|
|
|
#define arm64_btop(x) ((unsigned long)(x) >> PAGE_SHIFT)
|
|
|
|
#define arm64_ptob(x) ((unsigned long)(x) << PAGE_SHIFT)
|
|
|
|
|
|
|
|
#define pgtok(x) ((unsigned long)(x) * (PAGE_SIZE / 1024))
|
|
|
|
|
|
|
|
#endif /* !_MACHINE_PARAM_H_ */
|
2023-07-26 01:59:26 +02:00
|
|
|
|
|
|
|
#endif /* !__arm__ */
|