Adjust the threshold for blocking on movement of pages from the cache

queue in vm_fault.

Move the PG_BUSY in vm_fault to the correct place.

Remove redundant/unnecessary code in pmap.c.

Properly block on rundown of page table pages, if they are busy.

I think that the VM system is in pretty good shape now, and the following
individuals (among others, in no particular order) have helped with this
recent bunch of bugs, thanks!  If I left anyone out, I apologize!

Stephen McKay, Stephen Hocking, Eric J. Chet, Dan O'Brien, James Raynard,
Marc Fournier.
This commit is contained in:
John Dyson 1996-06-08 06:48:35 +00:00
parent 477af064b2
commit 886d3e1150
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=16197
5 changed files with 34 additions and 57 deletions

View File

@ -39,7 +39,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
* $Id: pmap.c,v 1.100 1996/06/05 06:36:21 dyson Exp $
* $Id: pmap.c,v 1.101 1996/06/07 02:36:08 dyson Exp $
*/
/*
@ -730,17 +730,6 @@ pmap_release(pmap)
if (object->ref_count != 1)
panic("pmap_release: pteobj reference count != 1");
/*
* Wait until any (bogus) paging activity on this object is
* complete.
*/
s = splvm();
while (object->paging_in_progress) {
object->flags |= OBJ_PIPWNT;
tsleep(object,PVM,"pmrlob",0);
}
splx(s);
ptdpg = NULL;
retry:
for (p = TAILQ_FIRST(&object->memq); p != NULL; p = n) {
@ -749,15 +738,14 @@ retry:
ptdpg = p;
continue;
}
if ((p->flags & PG_BUSY) || p->busy)
continue;
if (!pmap_release_free_page(pmap, p))
goto retry;
}
if (ptdpg == NULL)
panic("pmap_release: missing page table directory page");
pmap_release_free_page(pmap, ptdpg);
if (!pmap_release_free_page(pmap, ptdpg))
goto retry;
vm_object_deallocate(object);
if (pdstackptr < PDSTACKMAX) {
@ -1173,7 +1161,11 @@ pmap_remove(pmap, sva, eva)
}
}
/*
* Remove pte mapping, don't do everything that we would do
* for normal pages because many things aren't necessary (like
* pmap_update())...
*/
void
pmap_remove_pte_mapping(pa)
vm_offset_t pa;
@ -1189,7 +1181,6 @@ pmap_remove_pte_mapping(pa)
unsigned tpte;
struct pmap *pmap;
anyvalid = 1;
pmap = pv->pv_pmap;
pte = get_ptbase(pmap) + i386_btop(pv->pv_va);
if (tpte = *pte) {
@ -1200,13 +1191,11 @@ pmap_remove_pte_mapping(pa)
}
}
if (anyvalid) {
for (pv = *ppv; pv; pv = npv) {
npv = pv->pv_next;
free_pv_entry(pv);
}
*ppv = NULL;
for (pv = *ppv; pv; pv = npv) {
npv = pv->pv_next;
free_pv_entry(pv);
}
*ppv = NULL;
}
/*
@ -1221,7 +1210,7 @@ pmap_remove_pte_mapping(pa)
* inefficient because they iteratively called
* pmap_remove (slow...)
*/
static __inline void
static void
pmap_remove_all(pa)
vm_offset_t pa;
{

View File

@ -39,7 +39,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
* $Id: pmap.c,v 1.100 1996/06/05 06:36:21 dyson Exp $
* $Id: pmap.c,v 1.101 1996/06/07 02:36:08 dyson Exp $
*/
/*
@ -730,17 +730,6 @@ pmap_release(pmap)
if (object->ref_count != 1)
panic("pmap_release: pteobj reference count != 1");
/*
* Wait until any (bogus) paging activity on this object is
* complete.
*/
s = splvm();
while (object->paging_in_progress) {
object->flags |= OBJ_PIPWNT;
tsleep(object,PVM,"pmrlob",0);
}
splx(s);
ptdpg = NULL;
retry:
for (p = TAILQ_FIRST(&object->memq); p != NULL; p = n) {
@ -749,15 +738,14 @@ retry:
ptdpg = p;
continue;
}
if ((p->flags & PG_BUSY) || p->busy)
continue;
if (!pmap_release_free_page(pmap, p))
goto retry;
}
if (ptdpg == NULL)
panic("pmap_release: missing page table directory page");
pmap_release_free_page(pmap, ptdpg);
if (!pmap_release_free_page(pmap, ptdpg))
goto retry;
vm_object_deallocate(object);
if (pdstackptr < PDSTACKMAX) {
@ -1173,7 +1161,11 @@ pmap_remove(pmap, sva, eva)
}
}
/*
* Remove pte mapping, don't do everything that we would do
* for normal pages because many things aren't necessary (like
* pmap_update())...
*/
void
pmap_remove_pte_mapping(pa)
vm_offset_t pa;
@ -1189,7 +1181,6 @@ pmap_remove_pte_mapping(pa)
unsigned tpte;
struct pmap *pmap;
anyvalid = 1;
pmap = pv->pv_pmap;
pte = get_ptbase(pmap) + i386_btop(pv->pv_va);
if (tpte = *pte) {
@ -1200,13 +1191,11 @@ pmap_remove_pte_mapping(pa)
}
}
if (anyvalid) {
for (pv = *ppv; pv; pv = npv) {
npv = pv->pv_next;
free_pv_entry(pv);
}
*ppv = NULL;
for (pv = *ppv; pv; pv = npv) {
npv = pv->pv_next;
free_pv_entry(pv);
}
*ppv = NULL;
}
/*
@ -1221,7 +1210,7 @@ pmap_remove_pte_mapping(pa)
* inefficient because they iteratively called
* pmap_remove (slow...)
*/
static __inline void
static void
pmap_remove_all(pa)
vm_offset_t pa;
{

View File

@ -66,7 +66,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_fault.c,v 1.47 1996/05/31 00:37:56 dyson Exp $
* $Id: vm_fault.c,v 1.48 1996/06/01 20:50:57 dyson Exp $
*/
/*
@ -283,16 +283,15 @@ RetryFault:;
/*
* Mark page busy for other processes, and the pagedaemon.
*/
m->flags |= PG_BUSY;
if ((m->queue == PQ_CACHE) &&
(cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_reserved) {
(cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min) {
UNLOCK_AND_DEALLOCATE;
VM_WAIT;
PAGE_WAKEUP(m);
goto RetryFault;
}
vm_page_unqueue(m);
m->flags |= PG_BUSY;
vm_page_unqueue_nowakeup(m);
if (m->valid &&
((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) &&

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
* $Id: vm_page.c,v 1.53 1996/05/31 00:38:03 dyson Exp $
* $Id: vm_page.c,v 1.54 1996/06/05 03:31:48 dyson Exp $
*/
/*
@ -141,7 +141,6 @@ static inline __pure int
vm_page_hash __P((vm_object_t object, vm_pindex_t pindex))
__pure2;
static void vm_page_unqueue_nowakeup __P((vm_page_t m));
static int vm_page_freechk_and_unqueue __P((vm_page_t m));
static void vm_page_free_wakeup __P((void));
@ -526,7 +525,7 @@ vm_page_rename(m, new_object, new_pindex)
/*
* vm_page_unqueue without any wakeup
*/
static __inline void
__inline void
vm_page_unqueue_nowakeup(m)
vm_page_t m;
{

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_page.h,v 1.26 1996/05/18 04:00:18 dyson Exp $
* $Id: vm_page.h,v 1.27 1996/06/05 03:31:49 dyson Exp $
*/
/*
@ -251,6 +251,7 @@ vm_offset_t vm_page_startup __P((vm_offset_t, vm_offset_t, vm_offset_t));
void vm_page_unwire __P((vm_page_t));
void vm_page_wire __P((vm_page_t));
void vm_page_unqueue __P((vm_page_t));
void vm_page_unqueue_nowakeup __P((vm_page_t));
void vm_page_set_validclean __P((vm_page_t, int, int));
void vm_page_set_invalid __P((vm_page_t, int, int));
static __inline boolean_t vm_page_zero_fill __P((vm_page_t));