Skip site navigation (1)Skip section navigation (2)
Date:      Fri, 8 Jan 2010 17:46:27 +0000 (UTC)
From:      Luigi Rizzo <luigi@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-user@freebsd.org
Subject:   svn commit: r201809 - user/luigi/ipfw3-head/sys/netinet/ipfw
Message-ID:  <201001081746.o08HkRAZ046062@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: luigi
Date: Fri Jan  8 17:46:27 2010
New Revision: 201809
URL: http://svn.freebsd.org/changeset/base/201809

Log:
  some more changes

Modified:
  user/luigi/ipfw3-head/sys/netinet/ipfw/ip_dn_io.c
  user/luigi/ipfw3-head/sys/netinet/ipfw/ip_dummynet.c

Modified: user/luigi/ipfw3-head/sys/netinet/ipfw/ip_dn_io.c
==============================================================================
--- user/luigi/ipfw3-head/sys/netinet/ipfw/ip_dn_io.c	Fri Jan  8 17:21:56 2010	(r201808)
+++ user/luigi/ipfw3-head/sys/netinet/ipfw/ip_dn_io.c	Fri Jan  8 17:46:27 2010	(r201809)
@@ -105,7 +105,6 @@ static unsigned long	io_pkt_drop;
  * The heap is checked at every tick and all entities with expired events
  * are extracted.
  */
-static struct dn_heap *system_heap;
 
 /*
  * The key for the heap is used for two different values:
@@ -255,22 +254,19 @@ dn_tag_get(struct mbuf *m)
 }
 
 /*
- * It is called when we have some packet from delay line to send.
- * If there are leftover packets, this delay line is reinserted into extract
- * heap
- * XXX OK
+ * Fetch packets from the delay line which are due now. If there are
+ * leftover packets, reinsert the delay line in the heap.
  */
 static struct mbuf *
-transmit_event(struct delay_line *dline, dn_key l_curr_time)
+transmit_event(struct delay_line *dline, dn_key now)
 {
 	struct mbuf *m;
-	struct dn_pkt_tag *pkt;
-
 	struct mbuf *head = NULL, *tail = NULL;
-	/* XXX scheduler lock */
+
+	/* XXX we are under scheduler lock */
 	while ((m = dline->head) != NULL) {
 		pkt = dn_tag_get(m);
-		if (!DN_KEY_LEQ(pkt->output_time, l_curr_time))
+		if (!DN_KEY_LEQ(pkt->output_time, now))
 			break;
 		dline->head = m->m_nextpkt;
 		if (tail != NULL)
@@ -283,12 +279,11 @@ transmit_event(struct delay_line *dline,
 	if (tail != NULL)
 		tail->m_nextpkt = NULL;
 
-	/* If there are leftover packets, put into the heap for next event. */
 	if ((m = dline->head) != NULL) {
-		pkt = dn_tag_get(m);
-		//DN_HEAP_LOCK();
-		heap_insert(system_heap, pkt->output_time, dline);
-		//DN_HEAP_UNLOCK();
+		struct dn_pkt_tag *pkt = dn_tag_get(m);
+		DN_HEAP_LOCK();
+		heap_insert(&dn_cfg.system_heap, pkt->output_time, dline);
+		DN_HEAP_UNLOCK();
 	}
 	/* XXX scheduler unlock */
 	return head;
@@ -296,6 +291,7 @@ transmit_event(struct delay_line *dline,
 
 #define div64(a, b)	((int64_t)(a) / (int64_t)(b))
 
+#if 0
 /*
  * Compute how many ticks we have to wait before being able to send
  * a packet. This is computed as the "wire time" for the packet
@@ -314,23 +310,24 @@ set_ticks(struct mbuf *m, struct dn_flow
 		ret = 0;
 	return ret;
 }
+#endif
 
 /*
  * Convert the additional MAC overheads/delays into an equivalent
  * number of bits for the given data rate. The samples are in milliseconds
  * so we need to divide by 1000.
  */
-static dn_key
+static uint64_t
 compute_extra_bits(struct mbuf *pkt, struct new_pipe *p)
 {
 	int index;
-	dn_key extra_bits;
+	uint64_t extra_bits;
 	struct new_profile *pf = p->profile;
 
 	if (!pf || pf->samples_no == 0)
 		return 0;
 	index  = random() % pf->samples_no;
-	extra_bits = div64((dn_key)pf->samples[index] * p->bandwidth, 1000);
+	extra_bits = div64((uint64_t)pf->samples[index] * p->bandwidth, 1000);
 	if (index >= pf->loss_level) {
 		struct dn_pkt_tag *dt = dn_tag_get(pkt);
 		if (dt)
@@ -474,6 +471,9 @@ create_scheduler_instance(struct new_sch
 
 	if (si == NULL)
 		goto error;
+	si->dline = malloc(sizeof(*si->dline, M_DUMMYNET, M_NOWAIT | M_ZERO);
+	if (si->dline == NULL)
+		goto error;
 
 	set_oid(&si->oid, DN_SCH_I, 0, l);
 
@@ -505,37 +505,31 @@ find_scheduler(struct new_sch *sch_t, st
                 struct ipfw_flow_id *id)
 {
     struct new_sch_inst *prev, *s; /* returning scheduler instance */
-    struct ipfw_flow_id *id_t;
-    int i = 0;
-
-    id_t = malloc(sizeof(struct ipfw_flow_id), M_DUMMYNET, M_NOWAIT);
-    if (id_t == NULL) {
-        printf("dummynet: no memory for flowid\n");
-        return NULL;
-    }
-    /* XXX check return value */
-    *id_t = *id; /* The original id isn't modified */
-    do_mask(&sch_t->sched_mask, id_t);
+    int i;
+    struct ipfw_flow_id id_t;
+    
     if ( !(sch_t->flags & DN_SCH_HAVE_MASK) ) {
+	i = 0;
         s = sch_t->sch_i[0];
     } else {
-        /* first, do the masking, then hash */
-        i = do_hash(id_t);
+ 	id_t = *id;
+	do_mask(&sch_t->sched_mask, &id_t);
+        i = do_hash(&id_t);
         i = i % sch_t->sch_i_size;
         /* finally, scan the current hash bucket for a match */
-        searches++ ;
-        for (prev=NULL, s = sch_t->sch_i[i] ; s ; ) {
+        searches++; /* XXX stats */
+        for (prev=NULL, s=sch_t->sch_i[i] ; s ; prev=s, s=s->next) {
             search_steps++;
-            if (!mask_are_equals(id_t, &s->id))
+            if (!mask_are_equals(&id_t, &s->id))
                 break; /* found */
-            prev = s ;
-            s = s->next ;
         }
+#if 0 /* XXX we used to move to front, but what for ? */
         if (s && prev != NULL) { /* found and not in front */
             prev->next = s->next ;
             s->next = sch_t->sch_i[i] ;
             sch_t->sch_i[i] = s ;
         }
+#endif
     }
    
     if (s == NULL) { /* no match, need to allocate a new entry */
@@ -546,16 +540,17 @@ find_scheduler(struct new_sch *sch_t, st
         s->next = sch_t->sch_i[i];
         sch_t->sch_i[i] = s;
         sch_t->sch_i_elements++;
-        if (s != NULL) {
-            s->id = *id_t;
-            s->hash_slot = i;
-        }
+	s->hash_slot = i;
+        if (sch_t->flags & DN_SCH_HAVE_MASK)
+	    s->id = id_t;
     }
     return s;
 }
 
 /*
- * Send traffic from a scheduler instance.
+ * Send traffic from a scheduler instance due by 'now'
+ * XXX now we grab the lock at each insert, perhaps should
+ * batch entries to improve performance.
  */
 static struct mbuf *
 serve_sched(struct new_sch_inst *s, dn_key now)
@@ -563,52 +558,55 @@ serve_sched(struct new_sch_inst *s, dn_k
 	struct mbuf *head;
 	struct new_sch *sch_t = s->sched;
 	struct mbuf *tosend = NULL;
-	dn_key len_scaled;
 	struct new_pipe *pipe = sch_t->pipe;
 	int delay_line_idle = (s->dline->head == NULL);
-	int done;
+	int done, bw;
+
+	bw = pipe->bandwidth;
 	s->flags &= ~DN_SCH_ACTIVE;
 
-	if (pipe->bandwidth > 0)
-		s->numbytes += (now - s->sched_time) * pipe->bandwidth;
+	if (bw > 0)
+		s->numbytes += (now - s->sched_time) * bw;
 	else
 		s->numbytes = 0;
 	s->sched_time = now;
 	done = 0;
-	while (s->numbytes >= 0 && (tosend = sch_t->fp->dequeue(s + 1)) != NULL) {
+	while (s->numbytes >= 0 &&
+	    (tosend = sch_t->fp->dequeue(s + 1)) != NULL) {
+		uint64_t len_scaled;
 		done++;
-		len_scaled = pipe->bandwidth ? tosend->m_pkthdr.len * 8 * hz
+		len_scaled = bw ? tosend->m_pkthdr.len * 8 * hz
 			+ compute_extra_bits(tosend, pipe) * hz : 0;
 		s->numbytes -= len_scaled;
 		/* Move packet in the delay line */
 		move_pkt(tosend, pipe, s->dline);
 	}
-	if (done > 0 && s->numbytes < 0) {
-		/* credit has become negative, so reinsert the
-		* instance in the heap for when credit will be
-		* positive again. Also update the output time
-		* of the last packet, which is 'tosend'
-		*/
-		dn_key t = 0;
-		if (pipe->bandwidth > 0)
-			t = (pipe->bandwidth - 1 - s->numbytes) / pipe->bandwidth;
-		/* Delay the output time because under credit */
-		dn_tag_get(tosend)->output_time += t;
-
+	if (s->numbytes >= 0) {
+		/* Instance is idle, because it did not return
+		 * packets while credit was available.
+		 */
+		s->idle_time = curr_time;
+	} else {
+		/* Credit has become negative, so reinsert the
+		 * instance in the heap for when credit will be
+		 * positive again. Also update the output time
+		 * of the last packet, which is 'tosend'
+		 */
+		dn_key t;
+		KASSERT (bw > 0, "bw=0 and credit<0 ?");
+		t = (bw - 1 - s->numbytes) / bw;
+		/* Delay output time because under credit */
+		if (tosend)
+			dn_tag_get(tosend)->output_time += t;
 		s->sched->inst_counter++;
 		s->flags |= DN_SCH_ACTIVE;
 		DN_HEAP_LOCK();
-		heap_insert(system_heap, curr_time + t, s);
+		heap_insert(&dn_cfg.system_heap, curr_time + t, s);
 		DN_HEAP_UNLOCK();
-	} else {
-		/* scheduler instance should be idle, because it
-		 * did not return packets while credit was available.
-		 */
-		s->idle_time = curr_time;
 	}
 
 	head = (delay_line_idle && done) ?
-		transmit_event(s->dline, curr_time) : NULL;
+		transmit_event(s->dline, now) : NULL;
 	return head;
 }
 
@@ -620,124 +618,131 @@ serve_sched(struct new_sch_inst *s, dn_k
 void
 dummynet_task(void *context, int pending)
 {
-	struct new_sch_inst *s;
-	struct new_sch *sch_t;
-	struct mbuf *head = NULL;
-	struct timeval t;
+    struct new_sch *sch_t;
+    struct timeval t;
 
-	DUMMYNET_LOCK();
+    DUMMYNET_LOCK();
 
- 	/* Update number of lost(coalesced) ticks. */
- 	tick_lost += pending - 1;
+    /* Update number of lost(coalesced) ticks. */
+    tick_lost += pending - 1;
  
- 	getmicrouptime(&t);
- 	/* Last tick duration (usec). */
- 	tick_last = (t.tv_sec - dn_cfg.prev_t.tv_sec) * 1000000 +
- 	    (t.tv_usec - dn_cfg.prev_t.tv_usec);
- 	/* Last tick vs standard tick difference (usec). */
- 	tick_delta = (tick_last * hz - 1000000) / hz;
- 	/* Accumulated tick difference (usec). */
- 	tick_delta_sum += tick_delta;
+    getmicrouptime(&t);
+    /* Last tick duration (usec). */
+    tick_last = (t.tv_sec - dn_cfg.prev_t.tv_sec) * 1000000 +
+	(t.tv_usec - dn_cfg.prev_t.tv_usec);
+    /* Last tick vs standard tick difference (usec). */
+    tick_delta = (tick_last * hz - 1000000) / hz;
+    /* Accumulated tick difference (usec). */
+    tick_delta_sum += tick_delta;
  
- 	dn_cfg.prev_t = t;
+    dn_cfg.prev_t = t;
  
- 	/*
- 	 * Adjust curr_time if accumulated tick difference greater than
- 	 * 'standard' tick. Since curr_time should be monotonically increasing,
- 	 * we do positive adjustment as required and throttle curr_time in
- 	 * case of negative adjustment.
- 	 */
-  	curr_time++;
- 	if (tick_delta_sum - tick >= 0) {
- 		int diff = tick_delta_sum / tick;
+    /*
+     * Adjust curr_time if the accumulated tick difference is
+     * greater than the 'standard' tick. Since curr_time should
+     * be monotonically increasing, we do positive adjustments
+     * as required, and throttle curr_time in case of negative
+     * adjustment.
+     */
+    curr_time++;
+    if (tick_delta_sum - tick >= 0) {
+	int diff = tick_delta_sum / tick;
  
- 		curr_time += diff;
- 		tick_diff += diff;
- 		tick_delta_sum %= tick;
- 		tick_adjustment++;
- 	} else if (tick_delta_sum + tick <= 0) {
- 		curr_time--;
- 		tick_diff--;
- 		tick_delta_sum += tick;
- 		tick_adjustment++;
- 	}
-	DUMMYNET_UNLOCK();
-	for (;;) {
-	    struct dn_id *p;    /* generic parameter to handler */
-	    DN_HEAP_LOCK();
-	    if (system_heap->elements > 0 &&
-		DN_KEY_LEQ(HEAP_TOP(system_heap)->key, curr_time)) {
-		    p = HEAP_TOP(system_heap)->object;
-		    heap_extract(system_heap, NULL);
-	    } else {
-		p = NULL;
-	    }
-	    DN_HEAP_UNLOCK();
-	    if (p == NULL)
-		break;
+	curr_time += diff;
+	tick_diff += diff;
+	tick_delta_sum %= tick;
+	tick_adjustment++;
+    } else if (tick_delta_sum + tick <= 0) {
+	curr_time--;
+	tick_diff--;
+	tick_delta_sum += tick;
+	tick_adjustment++;
+    }
+    DUMMYNET_UNLOCK();
 
-	    if (p->type == DN_SCH_I) {
-		/*
-		 * Working with scheduler instances:
-		 * - Remove a scheduler instance from the heap and decrement
-		 *   the scheduler counter.
-		 * - If the scheduler is deleting and no other scheduler
-		 *   instances (of this scheduler) are into the heap,
-		 *   it's now possible to delete scheduler and call the
-		 *   function to do this;
-		 * - If the scheduer is deleting and this isn't the last
-		 *   instance in the heap, don't call the dequeue() function
-		 *   so the instance isn't inserted in the heap
-		 * - Else, call the dequeue() function.
-		 */
-		s = (struct new_sch_inst *)p;
-		sch_t = s->sched;
-		DN_S_LOCK(sch_t);
-
-		sch_t->inst_counter--;
-		if (sch_t->flags & DN_SCH_DELETE) {
-		    /* Wait for scheduler->busy == 0 */
-		    while(sch_t->busy) { /* XXX check */
-			DN_S_UNLOCK(sch_t);
-			DN_S_LOCK(sch_t);
-		    }
-		    /* Scheduler is deleting, don't dequeue packets from
-		     * this instance
-		     */
-		    if (sch_t->inst_counter == 0) {
-			/* No other scheduler instance in the heap.
-			 * We can safely delete scheduler
-			 */
-			really_deletescheduler(sch_t);
-			DN_S_UNLOCK(sch_t); /* XXX */
-		    }
-		} else {
-		    head = serve_sched(s, curr_time);
+/*
+ * XXX perhaps work in two passes ? First extract all
+ * eligible entries from the heap, then process them
+ * individually ?
+ */
+    for (;;) {
+	struct dn_id *p;    /* generic parameter to handler */
+	struct mbuf *head = NULL;
+
+	DN_HEAP_LOCK();
+	if (&dn_cfg.system_heap->elements > 0 &&
+		DN_KEY_LEQ(HEAP_TOP(&dn_cfg.system_heap)->key, curr_time)) {
+	    p = HEAP_TOP(&dn_cfg.system_heap)->object;
+	    heap_extract(&dn_cfg.system_heap, NULL);
+	} else {
+	    p = NULL;
+	}
+	DN_HEAP_UNLOCK();
+	if (p == NULL)
+	    break;
+
+	if (p->type == DN_SCH_I) {
+	    /*
+	     * Working with scheduler instances:
+	     * - decrement the scheduler counter.
+	     * - If the scheduler is deleting and no other
+	     *   instances (of this scheduler) are in the heap,
+	     *   we can now delete scheduler and call the
+	     *   function to do this. Otherwise,
+	     *   don't call the dequeue() function
+	     *   so the instance isn't inserted in the heap
+	     * - if the scheduler is not deleting, call the
+	     *   dequeue() function.
+	     */
+	    struct new_sch_inst *s = (struct new_sch_inst *)p;
+
+	    sch_t = s->sched;
+	    DN_S_LOCK(sch_t);
+	    sch_t->inst_counter--;
+	    if (sch_t->flags & DN_SCH_DELETE) {
+		/* Wait for scheduler->busy == 0 */
+		while (sch_t->busy) { /* XXX check */
 		    DN_S_UNLOCK(sch_t);
-		    if (head != NULL)
-			dummynet_send(head);
+		    DN_S_LOCK(sch_t);
 		}
-	    } else { /* extracted a delay line */
-		struct delay_line *dline = (struct delay_line *)p;
-		/*
-		 * Managing delay lines.
-		 * If the pointer to the scheduler instance is NULL, the delay
-		 * line should be deleted because pipe or scheduler was deleted,
-		 * else the transmit event is called to send out packets and
-		 * eventually reinsert the delay line into the heap.
+		/* Scheduler is deleting, do not dequeue
+		 * packets from this instance
 		 */
-		if (dline->si == NULL)
-		    delete_delay_line(dline);
-		else {
-		    DN_S_LOCK(dline->si->ptr_sched);
-		    head = transmit_event(dline, curr_time);
-		    DN_S_UNLOCK(dline->si->ptr_sched);
-		    if (head != NULL)
-			dummynet_send(head);
+		if (sch_t->inst_counter == 0) {
+		    /* No other instances in the heap.
+		     * We can safely delete the scheduler
+		     */
+		    really_deletescheduler(sch_t);
 		}
+	    } else {
+		head = serve_sched(s, curr_time);
 	    }
-        }
-	dn_reschedule();
+	    DN_S_UNLOCK(sch_t);
+	} else { /* extracted a delay line */
+	    /*
+	     * Managing delay lines.
+	     * If the pointer to the scheduler instance is NULL,
+	     * the delay line should be deleted because pipe or
+	     * scheduler was deleted.
+	     * Otherwise call transmit event to send out packets
+	     * due by now, and possibly reinsert the delay line
+	     * into the heap.
+	     */
+	    struct delay_line *dline = (struct delay_line *)p;
+	    if (dline->si == NULL)
+		delete_delay_line(dline);
+	    else {
+		DN_S_LOCK(dline->si->ptr_sched);
+		head = transmit_event(dline, curr_time);
+		DN_S_UNLOCK(dline->si->ptr_sched);
+		if (head != NULL)
+		    dummynet_send(head);
+	    }
+	}
+	if (head != NULL)
+	    dummynet_send(head);
+    }
+    dn_reschedule();
 }
 
 static void
@@ -1145,19 +1150,15 @@ dummynet_io(struct mbuf **m0, int dir, s
 	struct new_queue *q = NULL;
 	struct new_sch *sch;
 	struct new_sch_inst *sch_inst;
-	struct delay_line *dline;
 	int ret;
 	dn_key now; /* save a copy of curr_time */
-	int delay_line_idle;
 	int fs_id = (fwa->rule.info & IPFW_INFO_MASK) +
 		(fwa->rule.info & IPFW_IS_PIPE) ? DN_PIPEOFFSET : 0;
 
 	KASSERT(m->m_nextpkt == NULL,
 	    ("dummynet_io: mbuf queue passed to dummynet"));
 
-	/*
-	 * find flowset and schedset, protected by the UH lock
-	 */
+	/* find flowset and schedset, protected by the UH lock */
 	DUMMYNET_LOCK();
 	io_pkt++;
 	fs = ipdn_locate_flowset(fs_id);
@@ -1187,10 +1188,8 @@ dummynet_io(struct mbuf **m0, int dir, s
 	sch_inst = find_scheduler(sch, fs, &(fwa->f_id));
 	if (sch_inst == NULL)
 		goto dropit;
-	dline = sch_inst->dline;
-	delay_line_idle = (dline->head == NULL);
 
-	/* Now do the masking */
+	/* Apply the flow_mask */
 	do_mask(&fs->flow_mask, &(fwa->f_id));
 
 	/* tag the mbuf */
@@ -1229,15 +1228,12 @@ dummynet_io(struct mbuf **m0, int dir, s
 	}
 
 	/*
-	 * Now check if the dequeue should be called now.
-	 * If the instance is in the heap, the dequeue() will be called later,
-	 * and we are done.
+	 * If the instance is in the heap, dequeue() will be
+	 * called later, and we are done. Otherwise it is idle,
+	 * compute the initial allowance from io_fast and burst.
 	 */
 	if (sch_inst->flags & DN_SCH_ACTIVE)
 		goto done;
-	// XXX see if we can merge with dummynet task.
-        /* If the instance is not in the heap, credit must be >= 0 */
-
         sch_inst->numbytes = dn_cfg.io_fast ? pipe->bandwidth : 0;
         if (pipe->burst) {
 		uint64_t burst = (now - sch_inst->idle_time) *

Modified: user/luigi/ipfw3-head/sys/netinet/ipfw/ip_dummynet.c
==============================================================================
--- user/luigi/ipfw3-head/sys/netinet/ipfw/ip_dummynet.c	Fri Jan  8 17:21:56 2010	(r201808)
+++ user/luigi/ipfw3-head/sys/netinet/ipfw/ip_dummynet.c	Fri Jan  8 17:46:27 2010	(r201809)
@@ -130,10 +130,7 @@ static __inline void dn_free_pkts(struct
 int
 delete_delay_line(struct delay_line *dline)
 {
-    struct mbuf *m;
-                     
-    m = dline->head;
-    dn_free_pkts(m);
+    dn_free_pkts(dline->head);
     free(dline, M_DUMMYNET);
     return 0;
 }
@@ -144,7 +141,7 @@ delete_scheduler_instance(struct new_sch
     struct new_sch *sch_t = si->sched;
  
     sch_t->fp->free_sched(si + 1);
-    /* XXX packet from delay line must be freed */
+    /* XXX packets in delay line must be freed */
     if (si->dline->head == NULL || sch_t->flags & DN_SCH_DELETE_DELAY_LINE) {
         /* Delay line empty, or forced delete, so delete delay line now */
         delete_delay_line(si->dline);
@@ -253,8 +250,7 @@ purge_pipe(struct new_pipe *pipe)
 }
 
 /*
- * Delete all pipes and heaps returning memory. Must also
- * remove references from all ipfw rules to all pipes.
+ * Deelete all objects.
  */
 static void
 dummynet_flush(void)
@@ -265,22 +261,50 @@ dummynet_flush(void)
 
 	DUMMYNET_LOCK();
 
-	/*
-	 * Now purge all queued pkts and delete all pipes.
-	 *
-	 * XXXGL: can we merge the for(;;) cycles into one or not?
-	 */
-	for (i = 0; i < DN_HASHSIZE; i++)
-		SLIST_FOREACH_SAFE(fs, &flowsethash[i], next, fs1) {
-			SLIST_REMOVE(&flowsethash[i], fs, new_fs, next);
-			purge_flow_set(fs, 1);
-		}
-	for (i = 0; i < DN_HASHSIZE; i++)
-		SLIST_FOREACH_SAFE(pipe, &pipehash[i], next, pipe1) {
-			SLIST_REMOVE(&pipehash[i], pipe, new_pipe, next);
-			purge_pipe(pipe);
-			free_pipe(pipe);
-		}
+    /* Clear heap so we don't have unwanted events. */
+    heap_free(&dn_cfg.system_heap);
+
+    /* Free all pipes */
+    for (i = 0; i < HASHSIZE; i++) {
+        SLIST_FOREACH_SAFE(pipe, &pipehash[i], next, pipe1) {
+            SLIST_REMOVE(&pipehash[i], pipe, new_pipe, next);
+            free_pipe(pipe);
+        }
+    }
+
+    /* Free flowsets in the flowset unlinked list*/
+    SLIST_FOREACH_SAFE(fs, &flowsetunlinked, next, fs1) {
+            SLIST_REMOVE(&flowsetunlinked, fs, new_fs, next);
+            free(fs, M_DUMMYNET);
+    }
+
+    /* Free all flowsets in the system */
+    for (i = 0; i < HASHSIZE; i++) {
+        SLIST_FOREACH_SAFE(fs, &flowsethash[i], next, fs1) {
+            SLIST_REMOVE(&flowsethash[i], fs, new_fs, next);
+            fs->fp->delete_alg_fs(fs->alg_fs);
+            fs->fp->ref_count--;
+            free(fs->alg_fs, M_DUMMYNET);
+            free(fs, M_DUMMYNET);
+        }
+    }
+
+    /* Free all schedulers */
+    for (i = 0; i < HASHSIZE; i++) {
+        SLIST_FOREACH_SAFE(sch_t, &schedulerhash[i], next, sch_t1) {
+            SLIST_REMOVE(&schedulerhash[i], sch_t, new_sch, next);
+            sch_t->flags |= DN_SCH_DELETE_DELAY_LINE;
+            really_deletescheduler(sch_t);
+        }
+    }
+
+    /* XXX flowset that are removed from list but not yet deleted?
+     *     delay line not linked with a schedler instance?
+     */
+
+    /* Reinitialize system heap... */
+    heap_init(&dn_cfg.system_heap, 16);
+
 	DUMMYNET_UNLOCK();
 }
 



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201001081746.o08HkRAZ046062>