From owner-svn-src-projects@FreeBSD.ORG Fri Mar 2 12:33:11 2012 Return-Path: Delivered-To: svn-src-projects@freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [69.147.83.52]) by hub.freebsd.org (Postfix) with ESMTP id 0A54C106567E; Fri, 2 Mar 2012 12:33:10 +0000 (UTC) (envelope-from glebius@FreeBSD.org) Received: from svn.freebsd.org (svn.freebsd.org [IPv6:2001:4f8:fff6::2c]) by mx1.freebsd.org (Postfix) with ESMTP id CA77C8FC28; Fri, 2 Mar 2012 12:33:10 +0000 (UTC) Received: from svn.freebsd.org (localhost [127.0.0.1]) by svn.freebsd.org (8.14.4/8.14.4) with ESMTP id q22CXAsv030208; Fri, 2 Mar 2012 12:33:10 GMT (envelope-from glebius@svn.freebsd.org) Received: (from glebius@localhost) by svn.freebsd.org (8.14.4/8.14.4/Submit) id q22CXARQ030198; Fri, 2 Mar 2012 12:33:10 GMT (envelope-from glebius@svn.freebsd.org) Message-Id: <201203021233.q22CXARQ030198@svn.freebsd.org> From: Gleb Smirnoff Date: Fri, 2 Mar 2012 12:33:10 +0000 (UTC) To: src-committers@freebsd.org, svn-src-projects@freebsd.org X-SVN-Group: projects MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Cc: Subject: svn commit: r232390 - projects/pf/head/sys/contrib/pf/net X-BeenThere: svn-src-projects@freebsd.org X-Mailman-Version: 2.1.5 Precedence: list List-Id: "SVN commit messages for the src " projects" tree" List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Fri, 02 Mar 2012 12:33:11 -0000 Author: glebius Date: Fri Mar 2 12:33:10 2012 New Revision: 232390 URL: http://svn.freebsd.org/changeset/base/232390 Log: - Due to the "V" our zone names do not match original OpenBSD names, so there is not reason to name our zones as "pools", so this change mechanically renames them. - Provide better zone names in uma_zcreate(). Modified: projects/pf/head/sys/contrib/pf/net/if_pfsync.c projects/pf/head/sys/contrib/pf/net/pf.c projects/pf/head/sys/contrib/pf/net/pf_if.c projects/pf/head/sys/contrib/pf/net/pf_ioctl.c projects/pf/head/sys/contrib/pf/net/pf_lb.c projects/pf/head/sys/contrib/pf/net/pf_norm.c projects/pf/head/sys/contrib/pf/net/pf_osfp.c projects/pf/head/sys/contrib/pf/net/pf_table.c projects/pf/head/sys/contrib/pf/net/pfvar.h Modified: projects/pf/head/sys/contrib/pf/net/if_pfsync.c ============================================================================== --- projects/pf/head/sys/contrib/pf/net/if_pfsync.c Fri Mar 2 11:50:37 2012 (r232389) +++ projects/pf/head/sys/contrib/pf/net/if_pfsync.c Fri Mar 2 12:33:10 2012 (r232390) @@ -425,7 +425,7 @@ pfsync_alloc_scrub_memory(struct pfsync_ struct pf_state_peer *d) { if (s->scrub.scrub_flag && d->scrub == NULL) { - d->scrub = uma_zalloc(V_pf_state_scrub_pl, M_NOWAIT | M_ZERO); + d->scrub = uma_zalloc(V_pf_state_scrub_z, M_NOWAIT | M_ZERO); if (d->scrub == NULL) return (ENOMEM); } @@ -481,7 +481,7 @@ pfsync_state_import(struct pfsync_state else pool_flags = M_NOWAIT | M_ZERO; - if ((st = uma_zalloc(V_pf_state_pl, pool_flags)) == NULL) + if ((st = uma_zalloc(V_pf_state_z, pool_flags)) == NULL) goto cleanup; if ((skw = pf_alloc_state_key(pool_flags)) == NULL) @@ -576,17 +576,17 @@ cleanup: if (skw == sks) sks = NULL; if (skw != NULL) - uma_zfree(V_pf_state_key_pl, skw); + uma_zfree(V_pf_state_key_z, skw); if (sks != NULL) - uma_zfree(V_pf_state_key_pl, sks); + uma_zfree(V_pf_state_key_z, sks); cleanup_state: /* pf_state_insert frees the state keys */ if (st) { if (st->dst.scrub) - uma_zfree(V_pf_state_scrub_pl, st->dst.scrub); + uma_zfree(V_pf_state_scrub_z, st->dst.scrub); if (st->src.scrub) - uma_zfree(V_pf_state_scrub_pl, st->src.scrub); - uma_zfree(V_pf_state_pl, st); + uma_zfree(V_pf_state_scrub_z, st->src.scrub); + uma_zfree(V_pf_state_z, st); } return (error); } Modified: projects/pf/head/sys/contrib/pf/net/pf.c ============================================================================== --- projects/pf/head/sys/contrib/pf/net/pf.c Fri Mar 2 11:50:37 2012 (r232389) +++ projects/pf/head/sys/contrib/pf/net/pf.c Fri Mar 2 12:33:10 2012 (r232390) @@ -155,12 +155,12 @@ struct pf_anchor_stackframe { VNET_DEFINE(struct pf_anchor_stackframe, pf_anchor_stack[64]); #define V_pf_anchor_stack VNET(pf_anchor_stack) -VNET_DEFINE(uma_zone_t, pf_src_tree_pl); -VNET_DEFINE(uma_zone_t, pf_rule_pl); -VNET_DEFINE(uma_zone_t, pf_pooladdr_pl); -VNET_DEFINE(uma_zone_t, pf_state_pl); -VNET_DEFINE(uma_zone_t, pf_state_key_pl); -VNET_DEFINE(uma_zone_t, pf_altq_pl); +VNET_DEFINE(uma_zone_t, pf_src_tree_z); +VNET_DEFINE(uma_zone_t, pf_rule_z); +VNET_DEFINE(uma_zone_t, pf_pooladdr_z); +VNET_DEFINE(uma_zone_t, pf_state_z); +VNET_DEFINE(uma_zone_t, pf_state_key_z); +VNET_DEFINE(uma_zone_t, pf_altq_z); static void pf_src_tree_remove_state(struct pf_state *); static void pf_init_threshold(struct pf_threshold *, u_int32_t, @@ -549,7 +549,7 @@ pf_insert_src_node(struct pf_src_node ** if (*sn == NULL) { if (!rule->max_src_nodes || rule->src_nodes < rule->max_src_nodes) - (*sn) = uma_zalloc(V_pf_src_tree_pl, M_NOWAIT | M_ZERO); + (*sn) = uma_zalloc(V_pf_src_tree_z, M_NOWAIT | M_ZERO); else V_pf_status.lcounters[LCNT_SRCNODES]++; if ((*sn) == NULL) @@ -573,7 +573,7 @@ pf_insert_src_node(struct pf_src_node ** pf_print_host(&(*sn)->addr, 0, af); printf("\n"); } - uma_zfree(V_pf_src_tree_pl, *sn); + uma_zfree(V_pf_src_tree_z, *sn); return (-1); } (*sn)->creation = time_second; @@ -718,11 +718,11 @@ pf_state_key_attach(struct pf_state_key sk : NULL); printf("\n"); } - uma_zfree(V_pf_state_key_pl, sk); + uma_zfree(V_pf_state_key_z, sk); return (-1); /* collision! */ } } - uma_zfree(V_pf_state_key_pl, sk); + uma_zfree(V_pf_state_key_z, sk); s->key[idx] = cur; } else s->key[idx] = sk; @@ -773,7 +773,7 @@ pf_state_key_detach(struct pf_state *s, RB_REMOVE(pf_state_tree, &V_pf_statetbl, s->key[idx]); if (s->key[idx]->reverse) s->key[idx]->reverse->reverse = NULL; - uma_zfree(V_pf_state_key_pl, s->key[idx]); + uma_zfree(V_pf_state_key_z, s->key[idx]); } s->key[idx] = NULL; } @@ -783,7 +783,7 @@ pf_alloc_state_key(int pool_flags) { struct pf_state_key *sk; - if ((sk = uma_zalloc(V_pf_state_key_pl, pool_flags)) == NULL) + if ((sk = uma_zalloc(V_pf_state_key_z, pool_flags)) == NULL) return (NULL); TAILQ_INIT(&sk->states); @@ -852,7 +852,7 @@ pf_state_insert(struct pfi_kif *kif, str } else { if (pf_state_key_attach(skw, s, PF_SK_WIRE)) { PF_KEYS_UNLOCK(); - uma_zfree(V_pf_state_key_pl, sks); + uma_zfree(V_pf_state_key_z, sks); return (-1); } if (pf_state_key_attach(sks, s, PF_SK_STACK)) { @@ -1140,7 +1140,7 @@ pf_purge_expired_src_nodes(int waslocked RB_REMOVE(pf_src_tree, &V_tree_src_tracking, cur); V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++; V_pf_status.src_nodes--; - uma_zfree(V_pf_src_tree_pl, cur); + uma_zfree(V_pf_src_tree_z, cur); } } @@ -1241,7 +1241,7 @@ pf_free_state(struct pf_state *cur) TAILQ_REMOVE(&V_state_list, cur, entry_list); if (cur->tag) pf_tag_unref(cur->tag); - uma_zfree(V_pf_state_pl, cur); + uma_zfree(V_pf_state_z, cur); V_pf_status.fcounters[FCNT_STATE_REMOVALS]++; V_pf_status.states--; } @@ -3058,9 +3058,9 @@ pf_test_rule(struct pf_rule **rm, struct return (action); } else { if (sk != NULL) - uma_zfree(V_pf_state_key_pl, sk); + uma_zfree(V_pf_state_key_z, sk); if (nk != NULL) - uma_zfree(V_pf_state_key_pl, nk); + uma_zfree(V_pf_state_key_z, nk); } /* copy back packet headers if we performed NAT operations */ @@ -3084,9 +3084,9 @@ pf_test_rule(struct pf_rule **rm, struct cleanup: if (sk != NULL) - uma_zfree(V_pf_state_key_pl, sk); + uma_zfree(V_pf_state_key_z, sk); if (nk != NULL) - uma_zfree(V_pf_state_key_pl, nk); + uma_zfree(V_pf_state_key_z, nk); return (PF_DROP); } @@ -3123,7 +3123,7 @@ pf_create_state(struct pf_rule *r, struc REASON_SET(&reason, PFRES_SRCLIMIT); goto csfailed; } - s = uma_zalloc(V_pf_state_pl, M_NOWAIT | M_ZERO); + s = uma_zalloc(V_pf_state_z, M_NOWAIT | M_ZERO); if (s == NULL) { REASON_SET(&reason, PFRES_MEMORY); goto csfailed; @@ -3214,7 +3214,7 @@ pf_create_state(struct pf_rule *r, struc REASON_SET(&reason, PFRES_MEMORY); pf_src_tree_remove_state(s); STATE_DEC_COUNTERS(s); - uma_zfree(V_pf_state_pl, s); + uma_zfree(V_pf_state_z, s); return (PF_DROP); } if ((pd->flags & PFDESC_TCP_NORM) && s->src.scrub && @@ -3226,7 +3226,7 @@ pf_create_state(struct pf_rule *r, struc pf_normalize_tcp_cleanup(s); pf_src_tree_remove_state(s); STATE_DEC_COUNTERS(s); - uma_zfree(V_pf_state_pl, s); + uma_zfree(V_pf_state_z, s); return (PF_DROP); } } @@ -3242,7 +3242,7 @@ pf_create_state(struct pf_rule *r, struc REASON_SET(&reason, PFRES_STATEINS); pf_src_tree_remove_state(s); STATE_DEC_COUNTERS(s); - uma_zfree(V_pf_state_pl, s); + uma_zfree(V_pf_state_z, s); return (PF_DROP); } else *sm = s; @@ -3290,21 +3290,21 @@ pf_create_state(struct pf_rule *r, struc csfailed: if (sk != NULL) - uma_zfree(V_pf_state_key_pl, sk); + uma_zfree(V_pf_state_key_z, sk); if (nk != NULL) - uma_zfree(V_pf_state_key_pl, nk); + uma_zfree(V_pf_state_key_z, nk); if (sn != NULL && sn->states == 0 && sn->expire == 0) { RB_REMOVE(pf_src_tree, &V_tree_src_tracking, sn); V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++; V_pf_status.src_nodes--; - uma_zfree(V_pf_src_tree_pl, sn); + uma_zfree(V_pf_src_tree_z, sn); } if (nsn != sn && nsn != NULL && nsn->states == 0 && nsn->expire == 0) { RB_REMOVE(pf_src_tree, &V_tree_src_tracking, nsn); V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++; V_pf_status.src_nodes--; - uma_zfree(V_pf_src_tree_pl, nsn); + uma_zfree(V_pf_src_tree_z, nsn); } return (PF_DROP); } Modified: projects/pf/head/sys/contrib/pf/net/pf_if.c ============================================================================== --- projects/pf/head/sys/contrib/pf/net/pf_if.c Fri Mar 2 11:50:37 2012 (r232389) +++ projects/pf/head/sys/contrib/pf/net/pf_if.c Fri Mar 2 12:33:10 2012 (r232390) @@ -65,7 +65,7 @@ __FBSDID("$FreeBSD$"); #endif /* INET6 */ VNET_DEFINE(struct pfi_kif *, pfi_all); -VNET_DEFINE(uma_zone_t, pfi_addr_pl); +VNET_DEFINE(uma_zone_t, pfi_addr_z); VNET_DEFINE(struct pfi_ifhead, pfi_ifs); #define V_pfi_ifs VNET(pfi_ifs) VNET_DEFINE(long, pfi_update); @@ -395,7 +395,7 @@ pfi_dynaddr_setup(struct pf_addr_wrap *a if (aw->type != PF_ADDR_DYNIFTL) return (0); /* XXX: revisit! */ - if ((dyn = uma_zalloc(V_pfi_addr_pl, M_WAITOK | M_ZERO)) + if ((dyn = uma_zalloc(V_pfi_addr_z, M_WAITOK | M_ZERO)) == NULL) return (1); @@ -451,7 +451,7 @@ _bad: pf_remove_if_empty_ruleset(ruleset); if (dyn->pfid_kif != NULL) pfi_kif_unref(dyn->pfid_kif, PFI_KIF_REF_RULE); - uma_zfree(V_pfi_addr_pl, dyn); + uma_zfree(V_pfi_addr_z, dyn); return (rv); } @@ -640,7 +640,7 @@ pfi_dynaddr_remove(struct pf_addr_wrap * aw->p.dyn->pfid_kif = NULL; pfr_detach_table(aw->p.dyn->pfid_kt); aw->p.dyn->pfid_kt = NULL; - uma_zfree(V_pfi_addr_pl, aw->p.dyn); + uma_zfree(V_pfi_addr_z, aw->p.dyn); aw->p.dyn = NULL; } Modified: projects/pf/head/sys/contrib/pf/net/pf_ioctl.c ============================================================================== --- projects/pf/head/sys/contrib/pf/net/pf_ioctl.c Fri Mar 2 11:50:37 2012 (r232389) +++ projects/pf/head/sys/contrib/pf/net/pf_ioctl.c Fri Mar 2 12:33:10 2012 (r232390) @@ -259,15 +259,15 @@ static void cleanup_pf_zone(void) { - uma_zdestroy(V_pf_src_tree_pl); - uma_zdestroy(V_pf_rule_pl); - uma_zdestroy(V_pf_state_pl); - uma_zdestroy(V_pf_state_key_pl); - uma_zdestroy(V_pf_altq_pl); - uma_zdestroy(V_pf_pooladdr_pl); - uma_zdestroy(V_pfr_ktable_pl); - uma_zdestroy(V_pfr_kentry_pl); - uma_zdestroy(V_pfi_addr_pl); + uma_zdestroy(V_pf_src_tree_z); + uma_zdestroy(V_pf_rule_z); + uma_zdestroy(V_pf_state_z); + uma_zdestroy(V_pf_state_key_z); + uma_zdestroy(V_pf_altq_z); + uma_zdestroy(V_pf_pooladdr_z); + uma_zdestroy(V_pfr_ktable_z); + uma_zdestroy(V_pfr_kentry_z); + uma_zdestroy(V_pfi_addr_z); } int @@ -275,38 +275,41 @@ pfattach(void) { u_int32_t *my_timeout = V_pf_default_rule.timeout; - V_pf_src_tree_pl = uma_zcreate("pfsrctrpl", sizeof(struct pf_src_node), - NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); - V_pf_rule_pl = uma_zcreate("pfrulepl", sizeof(struct pf_rule), + V_pf_src_tree_z = uma_zcreate("pf src nodes", + sizeof(struct pf_src_node), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, + 0); + V_pf_rule_z = uma_zcreate("pf rules", sizeof(struct pf_rule), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); - V_pf_state_pl = uma_zcreate("pfstatepl", sizeof(struct pf_state), + V_pf_state_z = uma_zcreate("pf states", sizeof(struct pf_state), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); - V_pf_state_key_pl = uma_zcreate("pfstatekeypl", + V_pf_state_key_z = uma_zcreate("pf state keys", sizeof(struct pf_state_key), NULL, NULL, NULL, NULL,UMA_ALIGN_PTR, 0); - V_pf_altq_pl = uma_zcreate("pfaltqpl", sizeof(struct pf_altq), + V_pf_altq_z = uma_zcreate("pf altq", sizeof(struct pf_altq), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); - V_pf_pooladdr_pl = uma_zcreate("pfpooladdrpl", + V_pf_pooladdr_z = uma_zcreate("pf pool addresses", sizeof(struct pf_pooladdr), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); - V_pfr_ktable_pl = uma_zcreate("pfrktable", sizeof(struct pfr_ktable), - NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); - V_pfr_kentry_pl = uma_zcreate("pfrkentry", sizeof(struct pfr_kentry), - NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); - V_pfi_addr_pl = uma_zcreate("pfiaddrpl", sizeof(struct pfi_dynaddr), + V_pfr_ktable_z = uma_zcreate("pf tables", + sizeof(struct pfr_ktable), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, + 0); + V_pfr_kentry_z = uma_zcreate("pf table entries", + sizeof(struct pfr_kentry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, + 0); + V_pfi_addr_z = uma_zcreate("pf pfi_dynaddr", sizeof(struct pfi_dynaddr), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); pfr_initialize(); pfi_initialize(); pf_osfp_initialize(); pf_normalize_init(); - V_pf_pool_limits[PF_LIMIT_STATES].pp = V_pf_state_pl; + V_pf_pool_limits[PF_LIMIT_STATES].pp = V_pf_state_z; V_pf_pool_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT; - V_pf_pool_limits[PF_LIMIT_SRC_NODES].pp = V_pf_src_tree_pl; + V_pf_pool_limits[PF_LIMIT_SRC_NODES].pp = V_pf_src_tree_z; V_pf_pool_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT; - V_pf_pool_limits[PF_LIMIT_TABLES].pp = V_pfr_ktable_pl; + V_pf_pool_limits[PF_LIMIT_TABLES].pp = V_pfr_ktable_z; V_pf_pool_limits[PF_LIMIT_TABLES].limit = PFR_KTABLE_HIWAT; - V_pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].pp = V_pfr_kentry_pl; + V_pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].pp = V_pfr_kentry_z; V_pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit = PFR_KENTRY_HIWAT; uma_zone_set_max(V_pf_pool_limits[PF_LIMIT_STATES].pp, V_pf_pool_limits[PF_LIMIT_STATES].limit); @@ -432,7 +435,7 @@ pf_empty_pool(struct pf_palist *poola) pf_tbladdr_remove(&empty_pool_pa->addr); pfi_kif_unref(empty_pool_pa->kif, PFI_KIF_REF_RULE); TAILQ_REMOVE(poola, empty_pool_pa, entries); - uma_zfree(V_pf_pooladdr_pl, empty_pool_pa); + uma_zfree(V_pf_pooladdr_z, empty_pool_pa); } } @@ -479,7 +482,7 @@ pf_rm_rule(struct pf_rulequeue *rulequeu pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE); pf_anchor_remove(rule); pf_empty_pool(&rule->rpool.list); - uma_zfree(V_pf_rule_pl, rule); + uma_zfree(V_pf_rule_z, rule); } static u_int16_t @@ -618,7 +621,7 @@ pf_begin_altq(u_int32_t *ticket) error = altq_remove(altq); } else pf_qid_unref(altq->qid); - uma_zfree(V_pf_altq_pl, altq); + uma_zfree(V_pf_altq_z, altq); } if (error) return (error); @@ -644,7 +647,7 @@ pf_rollback_altq(u_int32_t ticket) error = altq_remove(altq); } else pf_qid_unref(altq->qid); - uma_zfree(V_pf_altq_pl, altq); + uma_zfree(V_pf_altq_z, altq); } V_altqs_inactive_open = 0; return (error); @@ -695,7 +698,7 @@ pf_commit_altq(u_int32_t ticket) error = err; } else pf_qid_unref(altq->qid); - uma_zfree(V_pf_altq_pl, altq); + uma_zfree(V_pf_altq_z, altq); } V_altqs_inactive_open = 0; @@ -775,7 +778,7 @@ pf_altq_ifnet_event(struct ifnet *ifp, i /* Copy the current active set */ TAILQ_FOREACH(a1, V_pf_altqs_active, entries) { - a2 = uma_zalloc(V_pf_altq_pl, M_NOWAIT); + a2 = uma_zalloc(V_pf_altq_z, M_NOWAIT); if (a2 == NULL) { error = ENOMEM; break; @@ -785,7 +788,7 @@ pf_altq_ifnet_event(struct ifnet *ifp, i if (a2->qname[0] != 0) { if ((a2->qid = pf_qname2qid(a2->qname)) == 0) { error = EBUSY; - uma_zfree(V_pf_altq_pl, a2); + uma_zfree(V_pf_altq_z, a2); break; } a2->altq_disc = NULL; @@ -811,7 +814,7 @@ pf_altq_ifnet_event(struct ifnet *ifp, i error = EBUSY; if (error) { - uma_zfree(V_pf_altq_pl, a2); + uma_zfree(V_pf_altq_z, a2); break; } } @@ -1261,7 +1264,7 @@ pfioctl(struct cdev *dev, u_long cmd, ca error = EBUSY; break; } - rule = uma_zalloc(V_pf_rule_pl, M_NOWAIT); + rule = uma_zalloc(V_pf_rule_z, M_NOWAIT); if (rule == NULL) { error = ENOMEM; break; @@ -1278,14 +1281,14 @@ pfioctl(struct cdev *dev, u_long cmd, ca rule->entries.tqe_prev = NULL; #ifndef INET if (rule->af == AF_INET) { - uma_zfree(V_pf_rule_pl, rule); + uma_zfree(V_pf_rule_z, rule); error = EAFNOSUPPORT; break; } #endif /* INET */ #ifndef INET6 if (rule->af == AF_INET6) { - uma_zfree(V_pf_rule_pl, rule); + uma_zfree(V_pf_rule_z, rule); error = EAFNOSUPPORT; break; } @@ -1299,7 +1302,7 @@ pfioctl(struct cdev *dev, u_long cmd, ca if (rule->ifname[0]) { rule->kif = pfi_kif_get(rule->ifname); if (rule->kif == NULL) { - uma_zfree(V_pf_rule_pl, rule); + uma_zfree(V_pf_rule_z, rule); error = EINVAL; break; } @@ -1510,7 +1513,7 @@ pfioctl(struct cdev *dev, u_long cmd, ca } if (pcr->action != PF_CHANGE_REMOVE) { - newrule = uma_zalloc(V_pf_rule_pl, M_NOWAIT); + newrule = uma_zalloc(V_pf_rule_z, M_NOWAIT); if (newrule == NULL) { error = ENOMEM; break; @@ -1524,14 +1527,14 @@ pfioctl(struct cdev *dev, u_long cmd, ca newrule->entries.tqe_prev = NULL; #ifndef INET if (newrule->af == AF_INET) { - uma_zfree(V_pf_rule_pl, newrule); + uma_zfree(V_pf_rule_z, newrule); error = EAFNOSUPPORT; break; } #endif /* INET */ #ifndef INET6 if (newrule->af == AF_INET6) { - uma_zfree(V_pf_rule_pl, newrule); + uma_zfree(V_pf_rule_z, newrule); error = EAFNOSUPPORT; break; } @@ -1539,7 +1542,7 @@ pfioctl(struct cdev *dev, u_long cmd, ca if (newrule->ifname[0]) { newrule->kif = pfi_kif_get(newrule->ifname); if (newrule->kif == NULL) { - uma_zfree(V_pf_rule_pl, newrule); + uma_zfree(V_pf_rule_z, newrule); error = EINVAL; break; } @@ -2062,7 +2065,7 @@ pfioctl(struct cdev *dev, u_long cmd, ca error = EBUSY; break; } - altq = uma_zalloc(V_pf_altq_pl, M_NOWAIT); + altq = uma_zalloc(V_pf_altq_z, M_NOWAIT); if (altq == NULL) { error = ENOMEM; break; @@ -2077,7 +2080,7 @@ pfioctl(struct cdev *dev, u_long cmd, ca if (altq->qname[0] != 0) { if ((altq->qid = pf_qname2qid(altq->qname)) == 0) { error = EBUSY; - uma_zfree(V_pf_altq_pl, altq); + uma_zfree(V_pf_altq_z, altq); break; } altq->altq_disc = NULL; @@ -2100,7 +2103,7 @@ pfioctl(struct cdev *dev, u_long cmd, ca PF_LOCK(); } if (error) { - uma_zfree(V_pf_altq_pl, altq); + uma_zfree(V_pf_altq_z, altq); break; } @@ -2218,7 +2221,7 @@ pfioctl(struct cdev *dev, u_long cmd, ca error = EINVAL; break; } - pa = uma_zalloc(V_pf_pooladdr_pl, M_NOWAIT); + pa = uma_zalloc(V_pf_pooladdr_z, M_NOWAIT); if (pa == NULL) { error = ENOMEM; break; @@ -2227,7 +2230,7 @@ pfioctl(struct cdev *dev, u_long cmd, ca if (pa->ifname[0]) { pa->kif = pfi_kif_get(pa->ifname); if (pa->kif == NULL) { - uma_zfree(V_pf_pooladdr_pl, pa); + uma_zfree(V_pf_pooladdr_z, pa); error = EINVAL; break; } @@ -2236,7 +2239,7 @@ pfioctl(struct cdev *dev, u_long cmd, ca if (pfi_dynaddr_setup(&pa->addr, pp->af)) { pfi_dynaddr_remove(&pa->addr); pfi_kif_unref(pa->kif, PFI_KIF_REF_RULE); - uma_zfree(V_pf_pooladdr_pl, pa); + uma_zfree(V_pf_pooladdr_z, pa); error = EINVAL; break; } @@ -2312,7 +2315,7 @@ pfioctl(struct cdev *dev, u_long cmd, ca break; } if (pca->action != PF_CHANGE_REMOVE) { - newpa = uma_zalloc(V_pf_pooladdr_pl, + newpa = uma_zalloc(V_pf_pooladdr_z, M_NOWAIT); if (newpa == NULL) { error = ENOMEM; @@ -2321,14 +2324,14 @@ pfioctl(struct cdev *dev, u_long cmd, ca bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr)); #ifndef INET if (pca->af == AF_INET) { - uma_zfree(V_pf_pooladdr_pl, newpa); + uma_zfree(V_pf_pooladdr_z, newpa); error = EAFNOSUPPORT; break; } #endif /* INET */ #ifndef INET6 if (pca->af == AF_INET6) { - uma_zfree(V_pf_pooladdr_pl, newpa); + uma_zfree(V_pf_pooladdr_z, newpa); error = EAFNOSUPPORT; break; } @@ -2336,7 +2339,7 @@ pfioctl(struct cdev *dev, u_long cmd, ca if (newpa->ifname[0]) { newpa->kif = pfi_kif_get(newpa->ifname); if (newpa->kif == NULL) { - uma_zfree(V_pf_pooladdr_pl, newpa); + uma_zfree(V_pf_pooladdr_z, newpa); error = EINVAL; break; } @@ -2347,7 +2350,7 @@ pfioctl(struct cdev *dev, u_long cmd, ca pf_tbladdr_setup(ruleset, &newpa->addr)) { pfi_dynaddr_remove(&newpa->addr); pfi_kif_unref(newpa->kif, PFI_KIF_REF_RULE); - uma_zfree(V_pf_pooladdr_pl, newpa); + uma_zfree(V_pf_pooladdr_z, newpa); error = EINVAL; break; } @@ -2376,7 +2379,7 @@ pfioctl(struct cdev *dev, u_long cmd, ca pfi_dynaddr_remove(&oldpa->addr); pf_tbladdr_remove(&oldpa->addr); pfi_kif_unref(oldpa->kif, PFI_KIF_REF_RULE); - uma_zfree(V_pf_pooladdr_pl, oldpa); + uma_zfree(V_pf_pooladdr_z, oldpa); } else { if (oldpa == NULL) TAILQ_INSERT_TAIL(&pool->list, newpa, entries); Modified: projects/pf/head/sys/contrib/pf/net/pf_lb.c ============================================================================== --- projects/pf/head/sys/contrib/pf/net/pf_lb.c Fri Mar 2 11:50:37 2012 (r232389) +++ projects/pf/head/sys/contrib/pf/net/pf_lb.c Fri Mar 2 12:33:10 2012 (r232390) @@ -691,8 +691,8 @@ pf_get_translation(struct pf_pdesc *pd, * Pretend there was no match. */ if (!bcmp(*skp, *nkp, sizeof(struct pf_state_key_cmp))) { - uma_zfree(V_pf_state_key_pl, *nkp); - uma_zfree(V_pf_state_key_pl, *skp); + uma_zfree(V_pf_state_key_z, *nkp); + uma_zfree(V_pf_state_key_z, *skp); *skw = *sks = *nkp = *skp = NULL; return (NULL); } Modified: projects/pf/head/sys/contrib/pf/net/pf_norm.c ============================================================================== --- projects/pf/head/sys/contrib/pf/net/pf_norm.c Fri Mar 2 11:50:37 2012 (r232389) +++ projects/pf/head/sys/contrib/pf/net/pf_norm.c Fri Mar 2 12:33:10 2012 (r232390) @@ -107,16 +107,16 @@ static struct mtx pf_frag_mtx; #define PF_FRAG_UNLOCK() mtx_unlock(&pf_frag_mtx) #define PF_FRAG_ASSERT() mtx_assert(&pf_frag_mtx, MA_OWNED) -VNET_DEFINE(uma_zone_t, pf_state_scrub_pl); /* XXX: shared with pfsync */ +VNET_DEFINE(uma_zone_t, pf_state_scrub_z); /* XXX: shared with pfsync */ -static VNET_DEFINE(uma_zone_t, pf_frent_pl); -#define V_pf_frent_pl VNET(pf_frent_pl) -static VNET_DEFINE(uma_zone_t, pf_frag_pl); -#define V_pf_frag_pl VNET(pf_frag_pl) -static VNET_DEFINE(uma_zone_t, pf_cache_pl); -#define V_pf_cache_pl VNET(pf_cache_pl) -static VNET_DEFINE(uma_zone_t, pf_cent_pl); -#define V_pf_cent_pl VNET(pf_cent_pl) +static VNET_DEFINE(uma_zone_t, pf_frent_z); +#define V_pf_frent_z VNET(pf_frent_z) +static VNET_DEFINE(uma_zone_t, pf_frag_z); +#define V_pf_frag_z VNET(pf_frag_z) +static VNET_DEFINE(uma_zone_t, pf_cache_z); +#define V_pf_cache_z VNET(pf_cache_z) +static VNET_DEFINE(uma_zone_t, pf_cent_z); +#define V_pf_cent_z VNET(pf_cent_z) static VNET_DEFINE(int, pf_nfrents); #define V_pf_nfrents VNET(pf_nfrents) static VNET_DEFINE(int, pf_ncache); @@ -166,29 +166,29 @@ void pf_normalize_init(void) { - V_pf_frent_pl = uma_zcreate("pffrent", sizeof(struct pf_frent), + V_pf_frent_z = uma_zcreate("pffrent", sizeof(struct pf_frent), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); /* XXXGL: two zones of struct pf_fragment */ - V_pf_frag_pl = uma_zcreate("pffrag", sizeof(struct pf_fragment), + V_pf_frag_z = uma_zcreate("pffrag", sizeof(struct pf_fragment), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); - V_pf_cache_pl = uma_zcreate("pffrcache", sizeof(struct pf_fragment), + V_pf_cache_z = uma_zcreate("pffrcache", sizeof(struct pf_fragment), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); - V_pf_cent_pl = uma_zcreate("pffrcent", sizeof(struct pf_frcache), + V_pf_cent_z = uma_zcreate("pffrcent", sizeof(struct pf_frcache), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); - V_pf_state_scrub_pl = uma_zcreate("pfstatescrub", + V_pf_state_scrub_z = uma_zcreate("pfstatescrub", sizeof(struct pf_state_scrub), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); /* * XXX * No high water mark support(It's hint not hard limit). - * uma_zone_set_max(pf_frag_pl, PFFRAG_FRAG_HIWAT); + * uma_zone_set_max(pf_frag_z, PFFRAG_FRAG_HIWAT); */ - uma_zone_set_max(V_pf_frent_pl, PFFRAG_FRENT_HIWAT); - uma_zone_set_max(V_pf_cache_pl, PFFRAG_FRCACHE_HIWAT); - uma_zone_set_max(V_pf_cent_pl, PFFRAG_FRCENT_HIWAT); + uma_zone_set_max(V_pf_frent_z, PFFRAG_FRENT_HIWAT); + uma_zone_set_max(V_pf_cache_z, PFFRAG_FRCACHE_HIWAT); + uma_zone_set_max(V_pf_cent_z, PFFRAG_FRCENT_HIWAT); - V_pf_pool_limits[PF_LIMIT_FRAGS].pp = V_pf_frent_pl; + V_pf_pool_limits[PF_LIMIT_FRAGS].pp = V_pf_frent_z; V_pf_pool_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT; mtx_init(&pf_frag_mtx, "pf fragments", NULL, MTX_DEF); @@ -201,11 +201,11 @@ void pf_normalize_cleanup(void) { - uma_zdestroy(V_pf_frent_pl); - uma_zdestroy(V_pf_frag_pl); - uma_zdestroy(V_pf_cache_pl); - uma_zdestroy(V_pf_cent_pl); - uma_zdestroy(V_pf_state_scrub_pl); + uma_zdestroy(V_pf_frent_z); + uma_zdestroy(V_pf_frag_z); + uma_zdestroy(V_pf_cache_z); + uma_zdestroy(V_pf_cent_z); + uma_zdestroy(V_pf_state_scrub_z); mtx_destroy(&pf_frag_mtx); } @@ -315,7 +315,7 @@ pf_free_fragment(struct pf_fragment *fra LIST_REMOVE(frent, fr_next); m_freem(frent->fr_m); - uma_zfree(V_pf_frent_pl, frent); + uma_zfree(V_pf_frent_z, frent); V_pf_nfrents--; } } else { @@ -329,7 +329,7 @@ pf_free_fragment(struct pf_fragment *fra ("! (LIST_EMPTY() || LIST_FIRST()->fr_off >" " frcache->fr_end): %s", __FUNCTION__)); - uma_zfree(V_pf_cent_pl, frcache); + uma_zfree(V_pf_cent_z, frcache); V_pf_ncache--; } } @@ -383,11 +383,11 @@ pf_remove_fragment(struct pf_fragment *f if (BUFFER_FRAGMENTS(frag)) { RB_REMOVE(pf_frag_tree, &V_pf_frag_tree, frag); TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next); - uma_zfree(V_pf_frag_pl, frag); + uma_zfree(V_pf_frag_z, frag); } else { RB_REMOVE(pf_frag_tree, &V_pf_cache_tree, frag); TAILQ_REMOVE(&V_pf_cachequeue, frag, frag_next); - uma_zfree(V_pf_cache_pl, frag); + uma_zfree(V_pf_cache_z, frag); } } @@ -415,10 +415,10 @@ pf_reassemble(struct mbuf **m0, struct p /* Create a new reassembly queue for this packet */ if (*frag == NULL) { - *frag = uma_zalloc(V_pf_frag_pl, M_NOWAIT); + *frag = uma_zalloc(V_pf_frag_z, M_NOWAIT); if (*frag == NULL) { pf_flush_fragments(); - *frag = uma_zalloc(V_pf_frag_pl, M_NOWAIT); + *frag = uma_zalloc(V_pf_frag_z, M_NOWAIT); if (*frag == NULL) goto drop_fragment; } @@ -494,7 +494,7 @@ pf_reassemble(struct mbuf **m0, struct p next = LIST_NEXT(frea, fr_next); m_freem(frea->fr_m); LIST_REMOVE(frea, fr_next); - uma_zfree(V_pf_frent_pl, frea); + uma_zfree(V_pf_frent_z, frea); V_pf_nfrents--; } @@ -551,13 +551,13 @@ pf_reassemble(struct mbuf **m0, struct p m2 = m->m_next; m->m_next = NULL; m_cat(m, m2); - uma_zfree(V_pf_frent_pl, frent); + uma_zfree(V_pf_frent_z, frent); V_pf_nfrents--; for (frent = next; frent != NULL; frent = next) { next = LIST_NEXT(frent, fr_next); m2 = frent->fr_m; - uma_zfree(V_pf_frent_pl, frent); + uma_zfree(V_pf_frent_z, frent); V_pf_nfrents--; m->m_pkthdr.csum_flags &= m2->m_pkthdr.csum_flags; m->m_pkthdr.csum_data += m2->m_pkthdr.csum_data; @@ -593,7 +593,7 @@ pf_reassemble(struct mbuf **m0, struct p drop_fragment: /* Oops - fail safe - drop packet */ - uma_zfree(V_pf_frent_pl, frent); + uma_zfree(V_pf_frent_z, frent); V_pf_nfrents--; m_freem(m); return (NULL); @@ -616,18 +616,18 @@ pf_fragcache(struct mbuf **m0, struct ip /* Create a new range queue for this packet */ if (*frag == NULL) { - *frag = uma_zalloc(V_pf_cache_pl, M_NOWAIT); + *frag = uma_zalloc(V_pf_cache_z, M_NOWAIT); if (*frag == NULL) { pf_flush_fragments(); - *frag = uma_zalloc(V_pf_cache_pl, M_NOWAIT); + *frag = uma_zalloc(V_pf_cache_z, M_NOWAIT); if (*frag == NULL) goto no_mem; } /* Get an entry for the queue */ - cur = uma_zalloc(V_pf_cent_pl, M_NOWAIT); + cur = uma_zalloc(V_pf_cent_z, M_NOWAIT); if (cur == NULL) { - uma_zfree(V_pf_cache_pl, *frag); + uma_zfree(V_pf_cache_z, *frag); *frag = NULL; goto no_mem; } @@ -749,7 +749,7 @@ pf_fragcache(struct mbuf **m0, struct ip h->ip_id, -precut, frp->fr_off, frp->fr_end, off, max)); - cur = uma_zalloc(V_pf_cent_pl, M_NOWAIT); + cur = uma_zalloc(V_pf_cent_z, M_NOWAIT); if (cur == NULL) goto no_mem; V_pf_ncache++; @@ -804,7 +804,7 @@ pf_fragcache(struct mbuf **m0, struct ip h->ip_id, -aftercut, off, max, fra->fr_off, fra->fr_end)); - cur = uma_zalloc(V_pf_cent_pl, M_NOWAIT); + cur = uma_zalloc(V_pf_cent_z, M_NOWAIT); if (cur == NULL) goto no_mem; V_pf_ncache++; @@ -825,7 +825,7 @@ pf_fragcache(struct mbuf **m0, struct ip max, fra->fr_off, fra->fr_end)); fra->fr_off = cur->fr_off; LIST_REMOVE(cur, fr_next); - uma_zfree(V_pf_cent_pl, cur); + uma_zfree(V_pf_cent_z, cur); V_pf_ncache--; cur = NULL; @@ -839,7 +839,7 @@ pf_fragcache(struct mbuf **m0, struct ip max, fra->fr_off, fra->fr_end)); fra->fr_off = frp->fr_off; LIST_REMOVE(frp, fr_next); - uma_zfree(V_pf_cent_pl, frp); + uma_zfree(V_pf_cent_z, frp); V_pf_ncache--; frp = NULL; @@ -1015,7 +1015,7 @@ pf_normalize_ip(struct mbuf **m0, int di goto bad; /* Get an entry for the fragment queue */ - frent = uma_zalloc(V_pf_frent_pl, M_NOWAIT); + frent = uma_zalloc(V_pf_frent_z, M_NOWAIT); if (frent == NULL) { PF_FRAG_UNLOCK(); REASON_SET(reason, PFRES_MEMORY); @@ -1459,7 +1459,7 @@ pf_normalize_tcp_init(struct mbuf *m, in KASSERT((src->scrub == NULL), ("pf_normalize_tcp_init: src->scrub != NULL")); - src->scrub = uma_zalloc(V_pf_state_scrub_pl, M_NOWAIT); + src->scrub = uma_zalloc(V_pf_state_scrub_z, M_NOWAIT); if (src->scrub == NULL) return (1); bzero(src->scrub, sizeof(*src->scrub)); @@ -1536,9 +1536,9 @@ void pf_normalize_tcp_cleanup(struct pf_state *state) { if (state->src.scrub) - uma_zfree(V_pf_state_scrub_pl, state->src.scrub); + uma_zfree(V_pf_state_scrub_z, state->src.scrub); if (state->dst.scrub) - uma_zfree(V_pf_state_scrub_pl, state->dst.scrub); + uma_zfree(V_pf_state_scrub_z, state->dst.scrub); /* Someday... flush the TCP segment reassembly descriptors. */ } Modified: projects/pf/head/sys/contrib/pf/net/pf_osfp.c ============================================================================== --- projects/pf/head/sys/contrib/pf/net/pf_osfp.c Fri Mar 2 11:50:37 2012 (r232389) +++ projects/pf/head/sys/contrib/pf/net/pf_osfp.c Fri Mar 2 12:33:10 2012 (r232390) @@ -75,10 +75,10 @@ typedef uma_zone_t pool_t; SLIST_HEAD(pf_osfp_list, pf_os_fingerprint); VNET_DEFINE(struct pf_osfp_list, pf_osfp_list); #define V_pf_osfp_list VNET(pf_osfp_list) -VNET_DEFINE(pool_t, pf_osfp_entry_pl); -#define pf_osfp_entry_pl VNET(pf_osfp_entry_pl) -VNET_DEFINE(pool_t, pf_osfp_pl); -#define pf_osfp_pl VNET(pf_osfp_pl) +VNET_DEFINE(pool_t, pf_osfp_entry_z); +#define pf_osfp_entry_pl VNET(pf_osfp_entry_z) +VNET_DEFINE(pool_t, pf_osfp_z); +#define pf_osfp_pl VNET(pf_osfp_z) static struct pf_osfp_enlist *pf_osfp_fingerprint_hdr(const struct ip *, const struct ip6_hdr *, @@ -307,9 +307,9 @@ pf_osfp_match(struct pf_osfp_enlist *lis void pf_osfp_initialize(void) { - pf_osfp_entry_pl = uma_zcreate("pfospfen", sizeof(struct pf_osfp_entry), + pf_osfp_entry_z = uma_zcreate("pfospfen", sizeof(struct pf_osfp_entry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); - pf_osfp_pl = uma_zcreate("pfosfp", sizeof(struct pf_os_fingerprint), + pf_osfp_z = uma_zcreate("pfosfp", sizeof(struct pf_os_fingerprint), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); SLIST_INIT(&V_pf_osfp_list); @@ -319,8 +319,8 @@ void pf_osfp_cleanup(void) { - uma_zdestroy(pf_osfp_entry_pl); - uma_zdestroy(pf_osfp_pl); + uma_zdestroy(pf_osfp_entry_z); + uma_zdestroy(pf_osfp_z); } #endif @@ -335,9 +335,9 @@ pf_osfp_flush(void) SLIST_REMOVE_HEAD(&V_pf_osfp_list, fp_next); while ((entry = SLIST_FIRST(&fp->fp_oses))) { SLIST_REMOVE_HEAD(&fp->fp_oses, fp_entry); - uma_zfree(pf_osfp_entry_pl, entry); + uma_zfree(pf_osfp_entry_z, entry); } - uma_zfree(pf_osfp_pl, fp); + uma_zfree(pf_osfp_z, fp); } } @@ -390,11 +390,11 @@ pf_osfp_add(struct pf_osfp_ioctl *fpioc) if (PF_OSFP_ENTRY_EQ(entry, &fpioc->fp_os)) return (EEXIST); } - if ((entry = uma_zalloc(pf_osfp_entry_pl, + if ((entry = uma_zalloc(pf_osfp_entry_z, M_NOWAIT)) == NULL) return (ENOMEM); } else { - if ((fp = uma_zalloc(pf_osfp_pl, + if ((fp = uma_zalloc(pf_osfp_z, M_NOWAIT)) == NULL) return (ENOMEM); memset(fp, 0, sizeof(*fp)); @@ -407,9 +407,9 @@ pf_osfp_add(struct pf_osfp_ioctl *fpioc) fp->fp_wscale = fpioc->fp_wscale; fp->fp_ttl = fpioc->fp_ttl; SLIST_INIT(&fp->fp_oses); - if ((entry = uma_zalloc(pf_osfp_entry_pl, + if ((entry = uma_zalloc(pf_osfp_entry_z, M_NOWAIT)) == NULL) { - uma_zfree(pf_osfp_pl, fp); + uma_zfree(pf_osfp_z, fp); return (ENOMEM); } pf_osfp_insert(&V_pf_osfp_list, fp); Modified: projects/pf/head/sys/contrib/pf/net/pf_table.c ============================================================================== --- projects/pf/head/sys/contrib/pf/net/pf_table.c Fri Mar 2 11:50:37 2012 (r232389) +++ projects/pf/head/sys/contrib/pf/net/pf_table.c Fri Mar 2 12:33:10 2012 (r232390) @@ -154,10 +154,10 @@ struct pfr_walktree { #define senderr(e) do { rv = (e); goto _bad; } while (0) -VNET_DEFINE(uma_zone_t, pfr_ktable_pl); -VNET_DEFINE(uma_zone_t, pfr_kentry_pl); -VNET_DEFINE(uma_zone_t, pfr_kcounters_pl); -#define V_pfr_kcounters_pl VNET(pfr_kcounters_pl) +VNET_DEFINE(uma_zone_t, pfr_ktable_z); +VNET_DEFINE(uma_zone_t, pfr_kentry_z); +VNET_DEFINE(uma_zone_t, pfr_kcounters_z); +#define V_pfr_kcounters_z VNET(pfr_kcounters_z) VNET_DEFINE(struct sockaddr_in, pfr_sin); #define V_pfr_sin VNET(pfr_sin) VNET_DEFINE(struct sockaddr_in6, pfr_sin6); @@ -843,7 +843,7 @@ pfr_create_kentry(struct pfr_addr *ad, i { struct pfr_kentry *ke; - ke = uma_zalloc(V_pfr_kentry_pl, M_NOWAIT | M_ZERO); + ke = uma_zalloc(V_pfr_kentry_z, M_NOWAIT | M_ZERO); if (ke == NULL) return (NULL); @@ -872,8 +872,8 @@ static void pfr_destroy_kentry(struct pfr_kentry *ke) { if (ke->pfrke_counters) - uma_zfree(V_pfr_kcounters_pl, ke->pfrke_counters); - uma_zfree(V_pfr_kentry_pl, ke); + uma_zfree(V_pfr_kcounters_z, ke->pfrke_counters); + uma_zfree(V_pfr_kentry_z, ke); } static void @@ -953,7 +953,7 @@ pfr_clstats_kentries(struct pfr_kentrywo if (negchange) p->pfrke_not = !p->pfrke_not; if (p->pfrke_counters) { - uma_zfree(V_pfr_kcounters_pl, p->pfrke_counters); + uma_zfree(V_pfr_kcounters_z, p->pfrke_counters); p->pfrke_counters = NULL; } p->pfrke_tzero = tzero; @@ -1919,7 +1919,7 @@ pfr_create_ktable(struct pfr_table *tbl, struct pfr_ktable *kt; struct pf_ruleset *rs; - kt = uma_zalloc(V_pfr_ktable_pl, M_NOWAIT|M_ZERO); + kt = uma_zalloc(V_pfr_ktable_z, M_NOWAIT|M_ZERO); if (kt == NULL) return (NULL); kt->pfrkt_t = *tbl; @@ -1981,7 +1981,7 @@ pfr_destroy_ktable(struct pfr_ktable *kt kt->pfrkt_rs->tables--; *** DIFF OUTPUT TRUNCATED AT 1000 LINES ***