Deprecated remaining use of vm_deallocate. Deprecated vm_allocate_with_

pager(). Almost completely rewrote vm_mmap(); when John gets done with
the bottom half, it will be a complete rewrite. Deprecated most use of
vm_object_setpager(). Removed side effect of setting object persist
in vm_object_enter and moved this into the pager(s). A few other
cosmetic changes.
This commit is contained in:
David Greenman 1995-02-21 01:22:48 +00:00
parent d9459480fb
commit 7fb0c17ed2
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=6585
6 changed files with 110 additions and 167 deletions

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)device_pager.c 8.1 (Berkeley) 6/11/93
* $Id: device_pager.c,v 1.4 1994/10/02 17:48:58 phk Exp $
* $Id: device_pager.c,v 1.5 1995/01/09 16:05:29 davidg Exp $
*/
/*
@ -177,7 +177,7 @@ top:
*/
object = devp->devp_object = vm_object_allocate(0);
vm_object_enter(object, pager);
vm_object_setpager(object, pager, (vm_offset_t) foff, FALSE);
object->pager = pager;
/*
* Finally, put it on the managed list so other can find it.
* First we re-lookup in case someone else beat us to this
@ -199,8 +199,7 @@ top:
#endif
} else {
/*
* vm_object_lookup() gains a reference and also removes the
* object from the cache.
* Gain a reference to the object.
*/
object = vm_object_lookup(pager);
#ifdef DIAGNOSTIC

View File

@ -39,7 +39,7 @@
* from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
*
* @(#)swap_pager.c 8.9 (Berkeley) 3/21/94
* $Id: swap_pager.c,v 1.24 1995/01/24 10:12:12 davidg Exp $
* $Id: swap_pager.c,v 1.25 1995/02/02 09:08:00 davidg Exp $
*/
/*
@ -283,7 +283,7 @@ swap_pager_alloc(handle, size, prot, offset)
*/
object = vm_object_allocate(size);
vm_object_enter(object, pager);
vm_object_setpager(object, pager, 0, FALSE);
object->pager = pager;
} else {
swp->sw_flags = 0;
TAILQ_INSERT_TAIL(&swap_pager_un_list, pager, pg_list);
@ -1523,8 +1523,10 @@ swap_pager_output(swp, m, count, flags, rtvals)
* during the pageout process, we activate it.
*/
if ((m[i]->flags & PG_ACTIVE) == 0 &&
((m[i]->flags & PG_WANTED) || pmap_is_referenced(VM_PAGE_TO_PHYS(m[i]))))
((m[i]->flags & (PG_WANTED|PG_REFERENCED)) ||
pmap_is_referenced(VM_PAGE_TO_PHYS(m[i])))) {
vm_page_activate(m[i]);
}
}
}
} else {

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_kern.c,v 1.9 1995/01/24 10:12:51 davidg Exp $
* $Id: vm_kern.c,v 1.10 1995/02/02 09:08:33 davidg Exp $
*/
/*
@ -185,7 +185,7 @@ kmem_alloc(map, size)
}
vm_page_zero_fill(mem);
mem->flags &= ~PG_BUSY;
mem->valid |= VM_PAGE_BITS_ALL;
mem->valid = VM_PAGE_BITS_ALL;
}
vm_object_unlock(kernel_object);
@ -355,7 +355,7 @@ kmem_malloc(map, size, waitflag)
vm_page_zero_fill(m);
#endif
m->flags &= ~PG_BUSY;
m->valid |= VM_PAGE_BITS_ALL;
m->valid = VM_PAGE_BITS_ALL;
}
vm_object_unlock(kmem_object);

View File

@ -38,7 +38,7 @@
* from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
*
* @(#)vm_mmap.c 8.4 (Berkeley) 1/12/94
* $Id: vm_mmap.c,v 1.8 1995/01/09 16:05:48 davidg Exp $
* $Id: vm_mmap.c,v 1.9 1995/02/15 09:22:17 davidg Exp $
*/
/*
@ -613,12 +613,16 @@ vm_mmap(map, addr, size, prot, maxprot, flags, handle, foff)
if (size == 0)
return (0);
size = round_page(size);
if ((flags & MAP_FIXED) == 0) {
fitit = TRUE;
*addr = round_page(*addr);
} else {
if (*addr != trunc_page(*addr))
return (EINVAL);
fitit = FALSE;
(void) vm_deallocate(map, *addr, size);
(void) vm_map_remove(map, *addr, *addr + size);
}
/*
@ -640,175 +644,113 @@ vm_mmap(map, addr, size, prot, maxprot, flags, handle, foff)
if (pager == NULL)
return (type == PG_DEVICE ? EINVAL : ENOMEM);
/*
* Find object and release extra reference gained by lookup
* Guarantee that the pager has an object.
*/
object = vm_object_lookup(pager);
if (handle && object == NULL) {
panic("vm_mmap: vm_object_lookup failed");
if (object == NULL) {
if (handle != NULL)
panic("vm_mmap: pager didn't allocate an object (and should have)");
/*
* Should only happen for unnamed anonymous regions.
*/
object = vm_object_allocate(size);
object->pager = pager;
} else {
/*
* Lose vm_object_lookup() reference.
*/
vm_object_deallocate(object);
}
vm_object_deallocate(object);
/*
* Anonymous memory.
* Anonymous memory, shared file, or character special file.
*/
if (flags & MAP_ANON) {
rv = vm_allocate_with_pager(map, addr, size, fitit,
pager, foff, TRUE);
if ((flags & (MAP_ANON|MAP_SHARED)) || (type == PG_DEVICE)) {
rv = vm_map_find(map, object, foff, addr, size, fitit);
if (rv != KERN_SUCCESS) {
if (handle == NULL)
vm_pager_deallocate(pager);
else
vm_object_deallocate(object);
/*
* Lose reference gained by vm_pager_allocate(). This
* will also destroy the pager if noone else holds a
* reference.
*/
vm_object_deallocate(object);
goto out;
}
/*
* Don't cache anonymous objects. Loses the reference gained
* by vm_pager_allocate. Note that object will be NULL when
* handle == NULL, this is ok since vm_allocate_with_pager has
* made sure that these objects are uncached.
*/
(void) pager_cache(object, FALSE);
#ifdef DEBUG
if (mmapdebug & MDB_MAPIT)
printf("vm_mmap(%d): ANON *addr %x size %x pager %x\n",
curproc->p_pid, *addr, size, pager);
#endif
}
/*
* Must be a mapped file. Distinguish between character special and
* regular files.
* A COW regular file
*/
else if (vp->v_type == VCHR) {
rv = vm_allocate_with_pager(map, addr, size, fitit,
pager, foff, FALSE);
else {
vm_map_t tmap;
vm_offset_t off;
/* locate and allocate the target address space */
rv = vm_map_find(map, NULL, 0, addr, size, fitit);
if (rv != KERN_SUCCESS) {
vm_object_deallocate(object);
goto out;
}
off = VM_MIN_ADDRESS;
tmap = vm_map_create(NULL, off, off + size, TRUE);
rv = vm_map_find(tmap, object, foff, &off, size, FALSE);
if (rv != KERN_SUCCESS) {
vm_object_deallocate(object);
vm_map_deallocate(tmap);
goto out;
}
/*
* Uncache the object and lose the reference gained by
* vm_pager_allocate(). If the call to
* vm_allocate_with_pager() was sucessful, then we gained an
* additional reference ensuring the object will continue to
* exist. If the call failed then the deallocate call below
* will terminate the object which is fine.
* (XXX) MAP_PRIVATE implies that we see changes made
* by others. To ensure that we need to guarentee
* that no copy object is created (otherwise original
* pages would be pushed to the copy object and we
* would never see changes made by others). We
* totally sleeze it right now by marking the object
* internal temporarily.
*/
(void) pager_cache(object, FALSE);
if ((flags & MAP_COPY) == 0)
object->flags |= OBJ_INTERNAL;
rv = vm_map_copy(map, tmap, *addr, size, off,
FALSE, FALSE);
object->flags &= ~OBJ_INTERNAL;
/*
* (XXX) My oh my, this only gets worse... Force
* creation of a shadow object so that vm_map_fork
* will do the right thing.
*/
if ((flags & MAP_COPY) == 0) {
vm_map_t tmap;
vm_map_entry_t tentry;
vm_object_t tobject;
vm_offset_t toffset;
vm_prot_t tprot;
boolean_t twired, tsu;
tmap = map;
vm_map_lookup(&tmap, *addr, VM_PROT_WRITE,
&tentry, &tobject, &toffset,
&tprot, &twired, &tsu);
vm_map_lookup_done(tmap, tentry);
}
/*
* (XXX) Map copy code cannot detect sharing unless a
* sharing map is involved. So we cheat and write
* protect everything ourselves.
*/
vm_object_pmap_copy(object, foff, foff + size);
vm_map_deallocate(tmap);
if (rv != KERN_SUCCESS)
goto out;
}
/*
* A regular file
* "Pre-fault" resident pages.
*/
else {
#ifdef DEBUG
if (object == NULL)
printf("vm_mmap: no object: vp %x, pager %x\n",
vp, pager);
#endif
/*
* Map it directly. Allows modifications to go out to the
* vnode.
*/
if (flags & MAP_SHARED) {
rv = vm_allocate_with_pager(map, addr, size,
fitit, pager,
foff, FALSE);
if (rv != KERN_SUCCESS) {
vm_object_deallocate(object);
goto out;
}
/*
* Don't cache the object. This is the easiest way of
* ensuring that data gets back to the filesystem
* because vnode_pager_deallocate() will fsync the
* vnode. pager_cache() will lose the extra ref.
*/
if (prot & VM_PROT_WRITE)
pager_cache(object, FALSE);
else
vm_object_deallocate(object);
if (map->pmap)
pmap_object_init_pt(map->pmap, *addr, object, foff, size);
}
/*
* Copy-on-write of file. Two flavors. MAP_COPY is true COW,
* you essentially get a snapshot of the region at the time of
* mapping. MAP_PRIVATE means only that your changes are not
* reflected back to the object. Changes made by others will
* be seen.
*/
else {
vm_map_t tmap;
vm_offset_t off;
/* locate and allocate the target address space */
rv = vm_map_find(map, NULL, (vm_offset_t) 0,
addr, size, fitit);
if (rv != KERN_SUCCESS) {
vm_object_deallocate(object);
goto out;
}
tmap = vm_map_create(NULL, VM_MIN_ADDRESS,
VM_MIN_ADDRESS + size, TRUE);
off = VM_MIN_ADDRESS;
rv = vm_allocate_with_pager(tmap, &off, size,
TRUE, pager,
foff, FALSE);
if (rv != KERN_SUCCESS) {
vm_object_deallocate(object);
vm_map_deallocate(tmap);
goto out;
}
/*
* (XXX) MAP_PRIVATE implies that we see changes made
* by others. To ensure that we need to guarentee
* that no copy object is created (otherwise original
* pages would be pushed to the copy object and we
* would never see changes made by others). We
* totally sleeze it right now by marking the object
* internal temporarily.
*/
if ((flags & MAP_COPY) == 0)
object->flags |= OBJ_INTERNAL;
rv = vm_map_copy(map, tmap, *addr, size, off,
FALSE, FALSE);
object->flags &= ~OBJ_INTERNAL;
/*
* (XXX) My oh my, this only gets worse... Force
* creation of a shadow object so that vm_map_fork
* will do the right thing.
*/
if ((flags & MAP_COPY) == 0) {
vm_map_t tmap;
vm_map_entry_t tentry;
vm_object_t tobject;
vm_offset_t toffset;
vm_prot_t tprot;
boolean_t twired, tsu;
tmap = map;
vm_map_lookup(&tmap, *addr, VM_PROT_WRITE,
&tentry, &tobject, &toffset,
&tprot, &twired, &tsu);
vm_map_lookup_done(tmap, tentry);
}
/*
* (XXX) Map copy code cannot detect sharing unless a
* sharing map is involved. So we cheat and write
* protect everything ourselves.
*/
vm_object_pmap_copy(object, foff, foff + size);
if (map->pmap)
pmap_object_init_pt(map->pmap, *addr, object, foff, size);
vm_object_deallocate(object);
vm_map_deallocate(tmap);
if (rv != KERN_SUCCESS)
goto out;
}
#ifdef DEBUG
if (mmapdebug & MDB_MAPIT)
printf("vm_mmap(%d): FILE *addr %x size %x pager %x\n",
curproc->p_pid, *addr, size, pager);
#endif
if ((type == PG_VNODE) && (map->pmap != NULL)) {
pmap_object_init_pt(map->pmap, *addr, object, foff, size);
}
/*
* Correct protection (default is VM_PROT_ALL). If maxprot is
* different than prot, we must set both explicitly.
@ -819,7 +761,7 @@ vm_mmap(map, addr, size, prot, maxprot, flags, handle, foff)
if (rv == KERN_SUCCESS && prot != maxprot)
rv = vm_map_protect(map, *addr, *addr + size, prot, FALSE);
if (rv != KERN_SUCCESS) {
(void) vm_deallocate(map, *addr, size);
(void) vm_map_remove(map, *addr, *addr + size);
goto out;
}
/*
@ -828,7 +770,7 @@ vm_mmap(map, addr, size, prot, maxprot, flags, handle, foff)
if (flags & MAP_SHARED) {
rv = vm_map_inherit(map, *addr, *addr + size, VM_INHERIT_SHARE);
if (rv != KERN_SUCCESS) {
(void) vm_deallocate(map, *addr, size);
(void) vm_map_remove(map, *addr, *addr + size);
goto out;
}
}

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_object.c,v 1.23 1995/02/18 06:48:33 davidg Exp $
* $Id: vm_object.c,v 1.24 1995/02/20 14:21:58 davidg Exp $
*/
/*
@ -1119,7 +1119,6 @@ vm_object_enter(object, pager)
entry = (vm_object_hash_entry_t)
malloc((u_long) sizeof *entry, M_VMOBJHASH, M_WAITOK);
entry->object = object;
object->flags |= OBJ_CANPERSIST;
vm_object_cache_lock();
TAILQ_INSERT_TAIL(bucket, entry, hash_links);

View File

@ -37,7 +37,7 @@
* SUCH DAMAGE.
*
* from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91
* $Id: vnode_pager.c,v 1.21 1995/01/24 10:14:09 davidg Exp $
* $Id: vnode_pager.c,v 1.22 1995/02/03 06:46:28 davidg Exp $
*/
/*
@ -175,8 +175,9 @@ vnode_pager_alloc(handle, size, prot, offset)
if ((rtval = VOP_GETATTR(vp, &vattr, p->p_ucred, p)) == 0) {
object = vm_object_allocate(round_page(vattr.va_size));
object->flags &= ~OBJ_INTERNAL;
object->flags |= OBJ_CANPERSIST;
vm_object_enter(object, pager);
vm_object_setpager(object, pager, 0, TRUE);
object->pager = pager;
} else {
printf("Error in getattr: %d\n", rtval);
free((caddr_t) vnp, M_VMPGDATA);