Use locks specific to the lltable code, rather than borrow the ifnet

list/index locks, to protect link layer address tables.  This avoids
lock order issues during interface teardown, but maintains the bug that
sysctl copy routines may be called while a non-sleepable lock is held.

Reviewed by:	bz, kmacy
MFC after:	3 days
This commit is contained in:
Robert Watson 2009-08-25 09:52:38 +00:00
parent 18159f6a49
commit dc56e98f0d
4 changed files with 24 additions and 16 deletions

View File

@ -62,6 +62,9 @@ static SLIST_HEAD(, lltable) lltables = SLIST_HEAD_INITIALIZER(lltables);
extern void arprequest(struct ifnet *, struct in_addr *, struct in_addr *,
u_char *);
struct rwlock lltable_rwlock;
RW_SYSINIT(lltable_rwlock, &lltable_rwlock, "lltable_rwlock");
/*
* Dump arp state for a specific address family.
*/
@ -71,7 +74,7 @@ lltable_sysctl_dumparp(int af, struct sysctl_req *wr)
struct lltable *llt;
int error = 0;
IFNET_RLOCK();
LLTABLE_RLOCK();
SLIST_FOREACH(llt, &lltables, llt_link) {
if (llt->llt_af == af) {
error = llt->llt_dump(llt, wr);
@ -80,7 +83,7 @@ lltable_sysctl_dumparp(int af, struct sysctl_req *wr)
}
}
done:
IFNET_RUNLOCK();
LLTABLE_RUNLOCK();
return (error);
}
@ -144,8 +147,6 @@ llentry_update(struct llentry **llep, struct lltable *lt,
/*
* Free all entries from given table and free itself.
* Since lltables collects from all of the intefaces,
* the caller of this function must acquire IFNET_WLOCK().
*/
void
lltable_free(struct lltable *llt)
@ -155,9 +156,9 @@ lltable_free(struct lltable *llt)
KASSERT(llt != NULL, ("%s: llt is NULL", __func__));
IFNET_WLOCK();
LLTABLE_WLOCK();
SLIST_REMOVE(&lltables, llt, lltable, llt_link);
IFNET_WUNLOCK();
LLTABLE_WUNLOCK();
for (i=0; i < LLTBL_HASHTBL_SIZE; i++) {
LIST_FOREACH_SAFE(lle, &llt->lle_head[i], lle_next, next) {
@ -178,7 +179,7 @@ lltable_drain(int af)
struct llentry *lle;
register int i;
IFNET_RLOCK();
LLTABLE_RLOCK();
SLIST_FOREACH(llt, &lltables, llt_link) {
if (llt->llt_af != af)
continue;
@ -192,7 +193,7 @@ lltable_drain(int af)
}
}
}
IFNET_RUNLOCK();
LLTABLE_RUNLOCK();
}
void
@ -200,14 +201,14 @@ lltable_prefix_free(int af, struct sockaddr *prefix, struct sockaddr *mask)
{
struct lltable *llt;
IFNET_RLOCK_NOSLEEP();
LLTABLE_RLOCK();
SLIST_FOREACH(llt, &lltables, llt_link) {
if (llt->llt_af != af)
continue;
llt->llt_prefix_free(llt, prefix, mask);
}
IFNET_RUNLOCK_NOSLEEP();
LLTABLE_RUNLOCK();
}
@ -230,9 +231,9 @@ lltable_init(struct ifnet *ifp, int af)
for (i = 0; i < LLTBL_HASHTBL_SIZE; i++)
LIST_INIT(&llt->lle_head[i]);
IFNET_WLOCK();
LLTABLE_WLOCK();
SLIST_INSERT_HEAD(&lltables, llt, llt_link);
IFNET_WUNLOCK();
LLTABLE_WUNLOCK();
return (llt);
}
@ -300,13 +301,13 @@ lla_rt_output(struct rt_msghdr *rtm, struct rt_addrinfo *info)
}
/* XXX linked list may be too expensive */
IFNET_RLOCK_NOSLEEP();
LLTABLE_RLOCK();
SLIST_FOREACH(llt, &lltables, llt_link) {
if (llt->llt_af == dst->sa_family &&
llt->llt_ifp == ifp)
break;
}
IFNET_RUNLOCK_NOSLEEP();
LLTABLE_RUNLOCK();
KASSERT(llt != NULL, ("Yep, ugly hacks are bad\n"));
if (flags && LLE_CREATE)

View File

@ -41,6 +41,13 @@ struct rt_addrinfo;
struct llentry;
LIST_HEAD(llentries, llentry);
extern struct rwlock lltable_rwlock;
#define LLTABLE_RLOCK() rw_rlock(&lltable_rwlock)
#define LLTABLE_RUNLOCK() rw_runlock(&lltable_rwlock)
#define LLTABLE_WLOCK() rw_wlock(&lltable_rwlock)
#define LLTABLE_WUNLOCK() rw_wunlock(&lltable_rwlock)
#define LLTABLE_LOCK_ASSERT() rw_assert(&lltable_rwlock, RA_LOCKED)
/*
* Code referencing llentry must at least hold
* a shared lock

View File

@ -1407,7 +1407,7 @@ in_lltable_dump(struct lltable *llt, struct sysctl_req *wr)
} arpc;
int error, i;
IFNET_RLOCK_ASSERT();
LLTABLE_LOCK_ASSERT();
error = 0;
for (i = 0; i < LLTBL_HASHTBL_SIZE; i++) {

View File

@ -2495,7 +2495,7 @@ in6_lltable_dump(struct lltable *llt, struct sysctl_req *wr)
} ndpc;
int i, error;
IFNET_RLOCK_ASSERT();
LLTABLE_LOCK_ASSERT();
error = 0;
for (i = 0; i < LLTBL_HASHTBL_SIZE; i++) {