mirror of
https://git.hardenedbsd.org/hardenedbsd/HardenedBSD.git
synced 2024-12-23 10:06:25 +01:00
2d8acc0f4a
1) Start using TSM. Struct procs continue to point to upages structure, after being freed. Struct vmspace continues to point to pte object and kva space for kstack. u_map is now superfluous. 2) vm_map's don't need to be reference counted. They always exist either in the kernel or in a vmspace. The vmspaces are managed by reference counts. 3) Remove the "wired" vm_map nonsense. 4) No need to keep a cache of kernel stack kva's. 5) Get rid of strange looking ++var, and change to var++. 6) Change more data structures to use our "zone" allocator. Added struct proc, struct vmspace and struct vnode. This saves a significant amount of kva space and physical memory. Additionally, this enables TSM for the zone managed memory. 7) Keep ioopt disabled for now. 8) Remove the now bogus "single use" map concept. 9) Use generation counts or id's for data structures residing in TSM, where it allows us to avoid unneeded restart overhead during traversals, where blocking might occur. 10) Account better for memory deficits, so the pageout daemon will be able to make enough memory available (experimental.) 11) Fix some vnode locking problems. (From Tor, I think.) 12) Add a check in ufs_lookup, to avoid lots of unneeded calls to bcmp. (experimental.) 13) Significantly shrink, cleanup, and make slightly faster the vm_fault.c code. Use generation counts, get rid of unneded collpase operations, and clean up the cluster code. 14) Make vm_zone more suitable for TSM. This commit is partially as a result of discussions and contributions from other people, including DG, Tor Egge, PHK, and probably others that I have forgotten to attribute (so let me know, if I forgot.) This is not the infamous, final cleanup of the vnode stuff, but a necessary step. Vnode mgmt should be correct, but things might still change, and there is still some missing stuff (like ioopt, and physical backing of non-merged cache files, debugging of layering concepts.)
106 lines
4.7 KiB
C
106 lines
4.7 KiB
C
/*-
|
|
* Copyright (c) 1992, 1993
|
|
* The Regents of the University of California. All rights reserved.
|
|
*
|
|
* Redistribution and use in source and binary forms, with or without
|
|
* modification, are permitted provided that the following conditions
|
|
* are met:
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
* notice, this list of conditions and the following disclaimer.
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
* documentation and/or other materials provided with the distribution.
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
* must display the following acknowledgement:
|
|
* This product includes software developed by the University of
|
|
* California, Berkeley and its contributors.
|
|
* 4. Neither the name of the University nor the names of its contributors
|
|
* may be used to endorse or promote products derived from this software
|
|
* without specific prior written permission.
|
|
*
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
* SUCH DAMAGE.
|
|
*
|
|
* @(#)vm_extern.h 8.2 (Berkeley) 1/12/94
|
|
* $Id: vm_extern.h,v 1.36 1997/12/31 02:35:29 alex Exp $
|
|
*/
|
|
|
|
#ifndef _VM_EXTERN_H_
|
|
#define _VM_EXTERN_H_
|
|
|
|
struct buf;
|
|
struct proc;
|
|
struct vmspace;
|
|
struct vmtotal;
|
|
struct mount;
|
|
struct vnode;
|
|
|
|
#ifdef KERNEL
|
|
|
|
#ifdef TYPEDEF_FOR_UAP
|
|
int getpagesize __P((struct proc * p, void *, int *));
|
|
int madvise __P((struct proc *, void *, int *));
|
|
int mincore __P((struct proc *, void *, int *));
|
|
int mprotect __P((struct proc *, void *, int *));
|
|
int msync __P((struct proc *, void *, int *));
|
|
int munmap __P((struct proc *, void *, int *));
|
|
int obreak __P((struct proc *, void *, int *));
|
|
int sbrk __P((struct proc *, void *, int *));
|
|
int smmap __P((struct proc *, void *, int *));
|
|
int sstk __P((struct proc *, void *, int *));
|
|
int swapon __P((struct proc *, void *, int *));
|
|
#endif
|
|
|
|
void faultin __P((struct proc *p));
|
|
int grow __P((struct proc *, u_int));
|
|
int kernacc __P((caddr_t, int, int));
|
|
vm_offset_t kmem_alloc __P((vm_map_t, vm_size_t));
|
|
vm_offset_t kmem_alloc_pageable __P((vm_map_t, vm_size_t));
|
|
vm_offset_t kmem_alloc_wait __P((vm_map_t, vm_size_t));
|
|
void kmem_free __P((vm_map_t, vm_offset_t, vm_size_t));
|
|
void kmem_free_wakeup __P((vm_map_t, vm_offset_t, vm_size_t));
|
|
void kmem_init __P((vm_offset_t, vm_offset_t));
|
|
vm_offset_t kmem_malloc __P((vm_map_t, vm_size_t, boolean_t));
|
|
vm_map_t kmem_suballoc __P((vm_map_t, vm_offset_t *, vm_offset_t *, vm_size_t));
|
|
void munmapfd __P((struct proc *, int));
|
|
int pager_cache __P((vm_object_t, boolean_t));
|
|
int swaponvp __P((struct proc *, struct vnode *, dev_t , u_long));
|
|
void swapout_procs __P((int));
|
|
int useracc __P((caddr_t, int, int));
|
|
int vm_fault __P((vm_map_t, vm_offset_t, vm_prot_t, int));
|
|
void vm_fault_copy_entry __P((vm_map_t, vm_map_t, vm_map_entry_t, vm_map_entry_t));
|
|
void vm_fault_unwire __P((vm_map_t, vm_offset_t, vm_offset_t));
|
|
int vm_fault_wire __P((vm_map_t, vm_offset_t, vm_offset_t));
|
|
int vm_fault_user_wire __P((vm_map_t, vm_offset_t, vm_offset_t));
|
|
void vm_fork __P((struct proc *, struct proc *, int));
|
|
int vm_mmap __P((vm_map_t, vm_offset_t *, vm_size_t, vm_prot_t, vm_prot_t, int, void *, vm_ooffset_t));
|
|
vm_offset_t vm_page_alloc_contig __P((vm_offset_t, vm_offset_t, vm_offset_t, vm_offset_t));
|
|
void vm_set_page_size __P((void));
|
|
void vmmeter __P((void));
|
|
struct vmspace *vmspace_alloc __P((vm_offset_t, vm_offset_t));
|
|
struct vmspace *vmspace_fork __P((struct vmspace *));
|
|
void vmspace_exec __P((struct proc *));
|
|
void vmspace_unshare __P((struct proc *));
|
|
void vmspace_free __P((struct vmspace *));
|
|
void vnode_pager_setsize __P((struct vnode *, vm_ooffset_t));
|
|
void vnode_pager_umount __P((struct mount *));
|
|
void vnode_pager_uncache __P((struct vnode *, struct proc *));
|
|
void vslock __P((caddr_t, u_int));
|
|
void vsunlock __P((caddr_t, u_int, int));
|
|
void vm_object_print __P((/* db_expr_t */ int, boolean_t, /* db_expr_t */ int,
|
|
char *));
|
|
void vm_fault_quick __P((caddr_t v, int prot));
|
|
|
|
#endif /* KERNEL */
|
|
|
|
#endif /* !_VM_EXTERN_H_ */
|