From owner-svn-src-projects@FreeBSD.ORG Fri Mar 2 11:27:08 2012 Return-Path: Delivered-To: svn-src-projects@freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2001:4f8:fff6::34]) by hub.freebsd.org (Postfix) with ESMTP id 38BB61065670; Fri, 2 Mar 2012 11:27:08 +0000 (UTC) (envelope-from glebius@FreeBSD.org) Received: from svn.freebsd.org (svn.freebsd.org [IPv6:2001:4f8:fff6::2c]) by mx1.freebsd.org (Postfix) with ESMTP id 21BE18FC1B; Fri, 2 Mar 2012 11:27:08 +0000 (UTC) Received: from svn.freebsd.org (localhost [127.0.0.1]) by svn.freebsd.org (8.14.4/8.14.4) with ESMTP id q22BR8nH027903; Fri, 2 Mar 2012 11:27:08 GMT (envelope-from glebius@svn.freebsd.org) Received: (from glebius@localhost) by svn.freebsd.org (8.14.4/8.14.4/Submit) id q22BR8dA027898; Fri, 2 Mar 2012 11:27:08 GMT (envelope-from glebius@svn.freebsd.org) Message-Id: <201203021127.q22BR8dA027898@svn.freebsd.org> From: Gleb Smirnoff Date: Fri, 2 Mar 2012 11:27:08 +0000 (UTC) To: src-committers@freebsd.org, svn-src-projects@freebsd.org X-SVN-Group: projects MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Cc: Subject: svn commit: r232386 - projects/pf/head/sys/contrib/pf/net X-BeenThere: svn-src-projects@freebsd.org X-Mailman-Version: 2.1.5 Precedence: list List-Id: "SVN commit messages for the src " projects" tree" List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Fri, 02 Mar 2012 11:27:08 -0000 Author: glebius Date: Fri Mar 2 11:27:07 2012 New Revision: 232386 URL: http://svn.freebsd.org/changeset/base/232386 Log: - Add separate mutex to lock state keys rbtree. - Add separate mutex to lock state IDs rbtree. - Add separate rwlock to lock the global states list. Modified: projects/pf/head/sys/contrib/pf/net/if_pfsync.c projects/pf/head/sys/contrib/pf/net/pf.c projects/pf/head/sys/contrib/pf/net/pf_ioctl.c projects/pf/head/sys/contrib/pf/net/pfvar.h Modified: projects/pf/head/sys/contrib/pf/net/if_pfsync.c ============================================================================== --- projects/pf/head/sys/contrib/pf/net/if_pfsync.c Fri Mar 2 10:03:38 2012 (r232385) +++ projects/pf/head/sys/contrib/pf/net/if_pfsync.c Fri Mar 2 11:27:07 2012 (r232386) @@ -698,18 +698,23 @@ pfsync_in_clr(struct pfsync_pkt *pkt, st creatorid = clr[i].creatorid; if (clr[i].ifname[0] == '\0') { + PF_KEYS_LOCK(); + PF_IDS_LOCK(); for (st = RB_MIN(pf_state_tree_id, &V_tree_id); st; st = nexts) { nexts = RB_NEXT(pf_state_tree_id, &V_tree_id, st); if (st->creatorid == creatorid) { SET(st->state_flags, PFSTATE_NOSYNC); - pf_unlink_state(st); + pf_unlink_state(st, 1); } } + PF_IDS_UNLOCK(); + PF_KEYS_UNLOCK(); } else { if (pfi_kif_get(clr[i].ifname) == NULL) continue; + PF_KEYS_LOCK(); /* XXX correct? */ for (sk = RB_MIN(pf_state_tree, &V_pf_statetbl); sk; sk = nextsk) { @@ -719,10 +724,11 @@ pfsync_in_clr(struct pfsync_pkt *pkt, st if (si->creatorid == creatorid) { SET(si->state_flags, PFSTATE_NOSYNC); - pf_unlink_state(si); + pf_unlink_state(si, 0); } } } + PF_KEYS_UNLOCK(); } } PF_UNLOCK(); @@ -1104,7 +1110,7 @@ pfsync_in_del(struct pfsync_pkt *pkt, st continue; } SET(st->state_flags, PFSTATE_NOSYNC); - pf_unlink_state(st); + pf_unlink_state(st, 0); } PF_UNLOCK(); @@ -1142,7 +1148,7 @@ pfsync_in_del_c(struct pfsync_pkt *pkt, } SET(st->state_flags, PFSTATE_NOSYNC); - pf_unlink_state(st); + pf_unlink_state(st, 0); } PF_UNLOCK(); @@ -2212,9 +2218,11 @@ pfsync_bulk_update(void *arg) i++; } + PF_LIST_RLOCK(); st = TAILQ_NEXT(st, entry_list); if (st == NULL) st = TAILQ_FIRST(&V_state_list); + PF_LIST_RUNLOCK(); if (st == sc->sc_bulk_last) { /* we're done */ Modified: projects/pf/head/sys/contrib/pf/net/pf.c ============================================================================== --- projects/pf/head/sys/contrib/pf/net/pf.c Fri Mar 2 10:03:38 2012 (r232385) +++ projects/pf/head/sys/contrib/pf/net/pf.c Fri Mar 2 11:27:07 2012 (r232386) @@ -489,6 +489,8 @@ pf_src_connlimit(struct pf_state **state struct pf_state *st; V_pf_status.lcounters[LCNT_OVERLOAD_FLUSH]++; + PF_IDS_LOCK(); + /* XXXGL: this cycle should go into a separate taskq */ RB_FOREACH(st, pf_state_tree_id, &V_tree_id) { sk = st->key[PF_SK_WIRE]; /* @@ -513,6 +515,7 @@ pf_src_connlimit(struct pf_state **state killed++; } } + PF_IDS_UNLOCK(); if (V_pf_status.debug >= PF_DEBUG_MISC) printf(", %u states killed", killed); } @@ -680,7 +683,8 @@ pf_state_key_attach(struct pf_state_key struct pf_state_key *cur; struct pf_state *si, *olds = NULL; - KASSERT(s->key[idx] == NULL, ("%s: key is null!", __func__)); + PF_KEYS_ASSERT(); + KASSERT(s->key[idx] == NULL, ("%s: a key already attached", __func__)); if ((cur = RB_INSERT(pf_state_tree, &V_pf_statetbl, sk)) != NULL) { /* key exists. check for same kif, if none, add to key */ @@ -730,7 +734,7 @@ pf_state_key_attach(struct pf_state_key TAILQ_INSERT_HEAD(&s->key[idx]->states, s, key_list); if (olds) - pf_unlink_state(olds); + pf_unlink_state(olds, 0); return (0); } @@ -738,6 +742,9 @@ pf_state_key_attach(struct pf_state_key static void pf_detach_state(struct pf_state *s) { + + PF_KEYS_ASSERT(); + if (s->key[PF_SK_WIRE] == s->key[PF_SK_STACK]) s->key[PF_SK_WIRE] = NULL; @@ -753,6 +760,8 @@ pf_state_key_detach(struct pf_state *s, { struct pf_state *si; + PF_KEYS_ASSERT(); + si = TAILQ_FIRST(&s->key[idx]->states); while (si && si != s) si = TAILQ_NEXT(si, key_list); @@ -833,17 +842,22 @@ pf_state_insert(struct pfi_kif *kif, str s->kif = kif; + PF_KEYS_LOCK(); if (skw == sks) { - if (pf_state_key_attach(skw, s, PF_SK_WIRE)) + if (pf_state_key_attach(skw, s, PF_SK_WIRE)) { + PF_KEYS_UNLOCK(); return (-1); + } s->key[PF_SK_STACK] = s->key[PF_SK_WIRE]; } else { if (pf_state_key_attach(skw, s, PF_SK_WIRE)) { + PF_KEYS_UNLOCK(); uma_zfree(V_pf_state_key_pl, sks); return (-1); } if (pf_state_key_attach(sks, s, PF_SK_STACK)) { pf_state_key_detach(s, PF_SK_WIRE); + PF_KEYS_UNLOCK(); return (-1); } } @@ -852,7 +866,9 @@ pf_state_insert(struct pfi_kif *kif, str s->id = htobe64(V_pf_status.stateid++); s->creatorid = V_pf_status.hostid; } + PF_IDS_LOCK(); if (RB_INSERT(pf_state_tree_id, &V_tree_id, s) != NULL) { + PF_IDS_UNLOCK(); if (V_pf_status.debug >= PF_DEBUG_MISC) { printf("pf: state insert failed: " "id: %016llx creatorid: %08x", @@ -862,7 +878,11 @@ pf_state_insert(struct pfi_kif *kif, str pf_detach_state(s); return (-1); } + PF_IDS_UNLOCK(); + PF_KEYS_UNLOCK(); + PF_LIST_WLOCK(); TAILQ_INSERT_TAIL(&V_state_list, s, entry_list); + PF_LIST_WUNLOCK(); V_pf_status.fcounters[FCNT_STATE_INSERT]++; V_pf_status.states++; pfi_kif_ref(kif, PFI_KIF_REF_STATE); @@ -875,9 +895,14 @@ pf_state_insert(struct pfi_kif *kif, str struct pf_state * pf_find_state_byid(struct pf_state_cmp *key) { + struct pf_state *s; + V_pf_status.fcounters[FCNT_STATE_SEARCH]++; + PF_IDS_LOCK(); + s = RB_FIND(pf_state_tree_id, &V_tree_id, (struct pf_state *)key); + PF_IDS_UNLOCK(); - return (RB_FIND(pf_state_tree_id, &V_tree_id, (struct pf_state *)key)); + return (s); } /* XXX debug function, intended to be removed one day */ @@ -920,13 +945,16 @@ pf_find_state(struct pfi_kif *kif, struc V_pf_status.fcounters[FCNT_STATE_SEARCH]++; + PF_KEYS_LOCK(); if (dir == PF_OUT && pftag->statekey && ((struct pf_state_key *)pftag->statekey)->reverse) sk = ((struct pf_state_key *)pftag->statekey)->reverse; else { if ((sk = RB_FIND(pf_state_tree, &V_pf_statetbl, - (struct pf_state_key *)key)) == NULL) + (struct pf_state_key *)key)) == NULL) { + PF_KEYS_UNLOCK(); return (NULL); + } if (dir == PF_OUT && pftag->statekey && pf_compare_state_keys(pftag->statekey, sk, kif, dir) == 0) { @@ -943,8 +971,11 @@ pf_find_state(struct pfi_kif *kif, struc TAILQ_FOREACH(si, &sk->states, key_list) if ((si->kif == V_pfi_all || si->kif == kif) && sk == (dir == PF_IN ? si->key[PF_SK_WIRE] : - si->key[PF_SK_STACK])) + si->key[PF_SK_STACK])) { + PF_KEYS_UNLOCK(); return (si); + } + PF_KEYS_UNLOCK(); return (NULL); } @@ -957,14 +988,17 @@ pf_find_state_all(struct pf_state_key_cm V_pf_status.fcounters[FCNT_STATE_SEARCH]++; + PF_KEYS_LOCK(); sk = RB_FIND(pf_state_tree, &V_pf_statetbl, (struct pf_state_key *)key); if (sk != NULL) { TAILQ_FOREACH(s, &sk->states, key_list) if (dir == PF_INOUT || (sk == (dir == PF_IN ? s->key[PF_SK_WIRE] : s->key[PF_SK_STACK]))) { - if (more == NULL) + if (more == NULL) { + PF_KEYS_UNLOCK(); return (s); + } if (ret) (*more)++; @@ -972,6 +1006,7 @@ pf_find_state_all(struct pf_state_key_cm ret = s; } } + PF_KEYS_UNLOCK(); return (ret); } @@ -1145,9 +1180,11 @@ pf_src_tree_remove_state(struct pf_state } void -pf_unlink_state(struct pf_state *cur) +pf_unlink_state(struct pf_state *cur, int idslocked) { + PF_KEYS_ASSERT(); + if (cur->src.state == PF_TCPS_PROXY_DST) { /* XXX wire key the right one? */ pf_send_tcp(NULL, cur->rule.ptr, cur->key[PF_SK_WIRE]->af, @@ -1158,7 +1195,11 @@ pf_unlink_state(struct pf_state *cur) cur->src.seqhi, cur->src.seqlo + 1, TH_RST|TH_ACK, 0, 0, 0, 1, cur->tag, NULL, NULL); } + if (!idslocked) + PF_IDS_LOCK(); RB_REMOVE(pf_state_tree_id, &V_tree_id, cur); + if (!idslocked) + PF_IDS_UNLOCK(); #if NPFLOW > 0 if (cur->state_flags & PFSTATE_PFLOW) if (export_pflow_ptr != NULL) @@ -1177,6 +1218,8 @@ static void pf_free_state(struct pf_state *cur) { + PF_LIST_WASSERT(); + if (pfsync_state_in_use_ptr != NULL && pfsync_state_in_use_ptr(cur)) return; @@ -1210,6 +1253,7 @@ pf_purge_expired_states(u_int32_t maxche struct pf_state *next; int locked = waslocked; + PF_LIST_WLOCK(); while (maxcheck--) { /* wrap to start of list when we hit the end */ if (cur == NULL) { @@ -1224,23 +1268,30 @@ pf_purge_expired_states(u_int32_t maxche if (cur->timeout == PFTM_UNLINKED) { /* free unlinked state */ if (! locked) { - if (!sx_try_upgrade(&V_pf_consistency_lock)) + if (!sx_try_upgrade(&V_pf_consistency_lock)) { + PF_LIST_WUNLOCK(); return (0); /* XXXGL */ + } locked = 1; } pf_free_state(cur); } else if (pf_state_expires(cur) <= time_second) { + PF_KEYS_LOCK(); /* unlink and free expired state */ - pf_unlink_state(cur); + pf_unlink_state(cur, 0); + PF_KEYS_UNLOCK(); if (! locked) { - if (!sx_try_upgrade(&V_pf_consistency_lock)) + if (!sx_try_upgrade(&V_pf_consistency_lock)) { + PF_LIST_WUNLOCK(); return (0); /* XXXGL */ + } locked = 1; } pf_free_state(cur); } cur = next; } + PF_LIST_WUNLOCK(); if (!waslocked && locked) sx_downgrade(&V_pf_consistency_lock); @@ -3877,7 +3928,7 @@ pf_test_state_tcp(struct pf_state **stat } /* XXX make sure it's the same direction ?? */ (*state)->src.state = (*state)->dst.state = TCPS_CLOSED; - pf_unlink_state(*state); + pf_unlink_state(*state, 0); *state = NULL; return (PF_DROP); } Modified: projects/pf/head/sys/contrib/pf/net/pf_ioctl.c ============================================================================== --- projects/pf/head/sys/contrib/pf/net/pf_ioctl.c Fri Mar 2 10:03:38 2012 (r232385) +++ projects/pf/head/sys/contrib/pf/net/pf_ioctl.c Fri Mar 2 11:27:07 2012 (r232386) @@ -172,9 +172,6 @@ struct cdev *pf_dev; static void pf_clear_states(void); static int pf_clear_tables(void); static void pf_clear_srcnodes(void); -/* - * XXX - These are new and need to be checked when moveing to a new version - */ /* * Wrapper functions for pfil(9) hooks @@ -209,6 +206,9 @@ static volatile VNET_DEFINE(int, pf_pfil VNET_DEFINE(int, pf_end_threads); struct mtx pf_mtx; +struct mtx pf_state_keys_mtx; +struct mtx pf_state_ids_mtx; +struct rwlock pf_state_list_lock; struct rwlock pf_rules_lock; /* pfsync */ @@ -235,6 +235,9 @@ init_pf_mutex(void) { mtx_init(&pf_mtx, "pf Giant", NULL, MTX_DEF); + mtx_init(&pf_state_keys_mtx, "pf state keys", NULL, MTX_DEF); + mtx_init(&pf_state_ids_mtx, "pf state ids", NULL, MTX_DEF); + rw_init(&pf_state_list_lock, "pf state list"); rw_init(&pf_rules_lock, "pf rulesets"); /* XXXGL: name */ sx_init(&V_pf_consistency_lock, "pf_statetbl_lock"); @@ -245,6 +248,9 @@ destroy_pf_mutex(void) { mtx_destroy(&pf_mtx); + mtx_destroy(&pf_state_keys_mtx); + mtx_destroy(&pf_state_ids_mtx); + rw_destroy(&pf_state_list_lock); rw_destroy(&pf_rules_lock); sx_destroy(&V_pf_consistency_lock); } @@ -1682,6 +1688,8 @@ pfioctl(struct cdev *dev, u_long cmd, ca struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; u_int killed = 0; + PF_KEYS_LOCK(); + PF_IDS_LOCK(); for (s = RB_MIN(pf_state_tree_id, &V_tree_id); s; s = nexts) { nexts = RB_NEXT(pf_state_tree_id, &V_tree_id, s); @@ -1689,10 +1697,12 @@ pfioctl(struct cdev *dev, u_long cmd, ca s->kif->pfik_name)) { /* don't send out individual delete messages */ SET(s->state_flags, PFSTATE_NOSYNC); - pf_unlink_state(s); + pf_unlink_state(s, 1); killed++; } } + PF_IDS_UNLOCK(); + PF_KEYS_UNLOCK(); psk->psk_killed = killed; if (pfsync_clear_states_ptr != NULL) pfsync_clear_states_ptr(V_pf_status.hostid, psk->psk_ifname); @@ -1711,12 +1721,14 @@ pfioctl(struct cdev *dev, u_long cmd, ca if (psk->psk_pfcmp.creatorid == 0) psk->psk_pfcmp.creatorid = V_pf_status.hostid; if ((s = pf_find_state_byid(&psk->psk_pfcmp))) { - pf_unlink_state(s); + pf_unlink_state(s, 0); psk->psk_killed = 1; } break; } + PF_KEYS_LOCK(); + PF_IDS_LOCK(); for (s = RB_MIN(pf_state_tree_id, &V_tree_id); s; s = nexts) { nexts = RB_NEXT(pf_state_tree_id, &V_tree_id, s); @@ -1756,10 +1768,12 @@ pfioctl(struct cdev *dev, u_long cmd, ca !strcmp(psk->psk_label, s->rule.ptr->label))) && (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname, s->kif->pfik_name))) { - pf_unlink_state(s); + pf_unlink_state(s, 1); killed++; } } + PF_IDS_UNLOCK(); + PF_KEYS_UNLOCK(); psk->psk_killed = killed; break; } @@ -1799,7 +1813,7 @@ pfioctl(struct cdev *dev, u_long cmd, ca case DIOCGETSTATES: { struct pfioc_states *ps = (struct pfioc_states *)addr; struct pf_state *state; - struct pfsync_state *p, *pstore; + struct pfsync_state *p, pstore; u_int32_t nr = 0; if (ps->ps_len == 0) { @@ -1808,32 +1822,31 @@ pfioctl(struct cdev *dev, u_long cmd, ca break; } - PF_UNLOCK(); - pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK); - PF_LOCK(); - p = ps->ps_states; + PF_LIST_RLOCK(); state = TAILQ_FIRST(&V_state_list); while (state) { if (state->timeout != PFTM_UNLINKED) { if ((nr+1) * sizeof(*p) > (unsigned)ps->ps_len) break; - pfsync_state_export(pstore, state); - PF_COPYOUT(pstore, p, sizeof(*p), error); - if (error) { - free(pstore, M_TEMP); + pfsync_state_export(&pstore, state); + PF_LIST_RUNLOCK(); /* XXXGL: ref state? */ + PF_UNLOCK(); + error = copyout(&pstore, p, sizeof(*p)); + PF_LOCK(); + if (error) goto fail; - } + PF_LIST_RLOCK(); p++; nr++; } state = TAILQ_NEXT(state, entry_list); } + PF_LIST_RUNLOCK(); ps->ps_len = sizeof(struct pfsync_state) * nr; - free(pstore, M_TEMP); break; } @@ -2964,14 +2977,18 @@ pfioctl(struct cdev *dev, u_long cmd, ca struct pf_src_node *n; struct pf_state *state; + PF_IDS_LOCK(); RB_FOREACH(state, pf_state_tree_id, &V_tree_id) { state->src_node = NULL; state->nat_src_node = NULL; } + PF_IDS_UNLOCK(); + PF_KEYS_LOCK(); RB_FOREACH(n, pf_src_tree, &V_tree_src_tracking) { n->expire = 1; n->states = 0; } + PF_KEYS_UNLOCK(); pf_purge_expired_src_nodes(1); V_pf_status.src_nodes = 0; break; @@ -2984,6 +3001,7 @@ pfioctl(struct cdev *dev, u_long cmd, ca (struct pfioc_src_node_kill *)addr; u_int killed = 0; + PF_KEYS_LOCK(); RB_FOREACH(sn, pf_src_tree, &V_tree_src_tracking) { if (PF_MATCHA(psnk->psnk_src.neg, &psnk->psnk_src.addr.v.a.addr, @@ -2995,6 +3013,7 @@ pfioctl(struct cdev *dev, u_long cmd, ca &sn->raddr, sn->af)) { /* Handle state to src_node linkage */ if (sn->states != 0) { + PF_IDS_LOCK(); RB_FOREACH(s, pf_state_tree_id, &V_tree_id) { if (s->src_node == sn) @@ -3002,12 +3021,14 @@ pfioctl(struct cdev *dev, u_long cmd, ca if (s->nat_src_node == sn) s->nat_src_node = NULL; } + PF_IDS_UNLOCK(); sn->states = 0; } sn->expire = 1; killed++; } } + PF_KEYS_UNLOCK(); if (killed > 0) pf_purge_expired_src_nodes(1); @@ -3141,13 +3162,17 @@ static void pf_clear_states(void) { struct pf_state *state; - + + PF_KEYS_LOCK(); + PF_IDS_LOCK(); RB_FOREACH(state, pf_state_tree_id, &V_tree_id) { state->timeout = PFTM_PURGE; /* don't send out individual delete messages */ state->sync_state = PFSTATE_NOSYNC; - pf_unlink_state(state); + pf_unlink_state(state, 1); } + PF_IDS_UNLOCK(); + PF_KEYS_UNLOCK(); #if 0 /* NPFSYNC */ /* @@ -3177,14 +3202,18 @@ pf_clear_srcnodes(void) struct pf_src_node *n; struct pf_state *state; + PF_IDS_LOCK(); RB_FOREACH(state, pf_state_tree_id, &V_tree_id) { state->src_node = NULL; state->nat_src_node = NULL; } + PF_IDS_UNLOCK(); + PF_KEYS_LOCK(); RB_FOREACH(n, pf_src_tree, &V_tree_src_tracking) { n->expire = 1; n->states = 0; } + PF_KEYS_UNLOCK(); } /* * XXX - Check for version missmatch!!! Modified: projects/pf/head/sys/contrib/pf/net/pfvar.h ============================================================================== --- projects/pf/head/sys/contrib/pf/net/pfvar.h Fri Mar 2 10:03:38 2012 (r232385) +++ projects/pf/head/sys/contrib/pf/net/pfvar.h Fri Mar 2 11:27:07 2012 (r232386) @@ -208,6 +208,24 @@ extern struct mtx pf_mtx; #define PF_LOCK() mtx_lock(&pf_mtx) #define PF_UNLOCK() mtx_unlock(&pf_mtx) +extern struct mtx pf_state_keys_mtx; +#define PF_KEYS_ASSERT() mtx_assert(&pf_state_keys_mtx, MA_OWNED) +#define PF_KEYS_LOCK() mtx_lock(&pf_state_keys_mtx) +#define PF_KEYS_UNLOCK() mtx_unlock(&pf_state_keys_mtx) + +extern struct mtx pf_state_ids_mtx; +#define PF_IDS_ASSERT() mtx_assert(&pf_state_ids_mtx, MA_OWNED) +#define PF_IDS_LOCK() mtx_lock(&pf_state_ids_mtx) +#define PF_IDS_UNLOCK() mtx_unlock(&pf_state_ids_mtx) + +extern struct rwlock pf_state_list_lock; +#define PF_LIST_RASSERT() rw_assert(&pf_state_list_lock, RA_RLOCKED) +#define PF_LIST_RLOCK() rw_rlock(&pf_state_list_lock) +#define PF_LIST_RUNLOCK() rw_runlock(&pf_state_list_lock) +#define PF_LIST_WASSERT() rw_assert(&pf_state_list_lock, RA_WLOCKED) +#define PF_LIST_WLOCK() rw_wlock(&pf_state_list_lock) +#define PF_LIST_WUNLOCK() rw_wunlock(&pf_state_list_lock) + extern struct rwlock pf_rules_lock; #define PF_RULES_RLOCK() rw_rlock(&pf_rules_lock) #define PF_RULES_RUNLOCK() rw_runlock(&pf_rules_lock) @@ -1770,7 +1788,7 @@ VNET_DECLARE(uma_zone_t, pfi_addr_pl); extern void pf_purge_thread(void *); extern int pf_purge_expired_src_nodes(int); -extern void pf_unlink_state(struct pf_state *); +extern void pf_unlink_state(struct pf_state *, int); extern int pf_state_insert(struct pfi_kif *, struct pf_state_key *, struct pf_state_key *,