Skip site navigation (1)Skip section navigation (2)
Date:      Mon, 16 Jun 2008 15:27:17 GMT
From:      Marko Zec <zec@FreeBSD.org>
To:        Perforce Change Reviews <perforce@FreeBSD.org>
Subject:   PERFORCE change 143592 for review
Message-ID:  <200806161527.m5GFRHTM041713@repoman.freebsd.org>

next in thread | raw e-mail | index | archive | help
http://perforce.freebsd.org/chv.cgi?CH=143592

Change 143592 by zec@zec_tca51 on 2008/06/16 15:26:38

	Back out experimental changes for virtualizing CPU load
	accounting and scheduling globals; allow for nooptions VIMAGE
	GENERIC kernel + modules to compile cleanly; and clean up some
	of the whitespace junk / noise.

Affected files ...

.. //depot/projects/vimage/src/sys/compat/linprocfs/linprocfs.c#17 edit
.. //depot/projects/vimage/src/sys/compat/linux/linux_misc.c#16 edit
.. //depot/projects/vimage/src/sys/i386/conf/VIMAGE#13 edit
.. //depot/projects/vimage/src/sys/i386/i386/dump_machdep.c#8 edit
.. //depot/projects/vimage/src/sys/i386/i386/minidump_machdep.c#8 edit
.. //depot/projects/vimage/src/sys/kern/kern_clock.c#16 edit
.. //depot/projects/vimage/src/sys/kern/kern_synch.c#17 edit
.. //depot/projects/vimage/src/sys/kern/kern_vimage.c#60 edit
.. //depot/projects/vimage/src/sys/kern/kern_xxx.c#5 edit
.. //depot/projects/vimage/src/sys/kern/sched_4bsd.c#21 edit
.. //depot/projects/vimage/src/sys/kern/tty.c#15 edit
.. //depot/projects/vimage/src/sys/net/bpf.c#20 edit
.. //depot/projects/vimage/src/sys/net/bpf.h#6 edit
.. //depot/projects/vimage/src/sys/net/if_ethersubr.c#20 edit
.. //depot/projects/vimage/src/sys/net/route.c#18 edit
.. //depot/projects/vimage/src/sys/net/vnet.h#10 edit
.. //depot/projects/vimage/src/sys/net80211/ieee80211.c#15 edit
.. //depot/projects/vimage/src/sys/netgraph/ng_base.c#28 edit
.. //depot/projects/vimage/src/sys/netgraph/ng_bridge.c#4 edit
.. //depot/projects/vimage/src/sys/netgraph/ng_ether.c#11 edit
.. //depot/projects/vimage/src/sys/netgraph/ng_hub.c#3 edit
.. //depot/projects/vimage/src/sys/netgraph/ng_wormhole.c#3 edit
.. //depot/projects/vimage/src/sys/netinet/in_pcb.c#21 edit
.. //depot/projects/vimage/src/sys/sys/resource.h#6 edit
.. //depot/projects/vimage/src/sys/sys/sched.h#9 edit
.. //depot/projects/vimage/src/sys/vm/vm_meter.c#9 edit

Differences ...

==== //depot/projects/vimage/src/sys/compat/linprocfs/linprocfs.c#17 (text+ko) ====

@@ -512,16 +512,15 @@
 static int
 linprocfs_doloadavg(PFS_FILL_ARGS)
 {
-	INIT_VPROCG(TD_TO_VPROCG(curthread));
 
 	sbuf_printf(sb,
 	    "%d.%02d %d.%02d %d.%02d %d/%d %d\n",
-	    (int)(V_averunnable.ldavg[0] / V_averunnable.fscale),
-	    (int)(V_averunnable.ldavg[0] * 100 / V_averunnable.fscale % 100),
-	    (int)(V_averunnable.ldavg[1] / V_averunnable.fscale),
-	    (int)(V_averunnable.ldavg[1] * 100 / V_averunnable.fscale % 100),
-	    (int)(V_averunnable.ldavg[2] / V_averunnable.fscale),
-	    (int)(V_averunnable.ldavg[2] * 100 / V_averunnable.fscale % 100),
+	    (int)(averunnable.ldavg[0] / averunnable.fscale),
+	    (int)(averunnable.ldavg[0] * 100 / averunnable.fscale % 100),
+	    (int)(averunnable.ldavg[1] / averunnable.fscale),
+	    (int)(averunnable.ldavg[1] * 100 / averunnable.fscale % 100),
+	    (int)(averunnable.ldavg[2] / averunnable.fscale),
+	    (int)(averunnable.ldavg[2] * 100 / averunnable.fscale % 100),
 	    1,				/* number of running tasks */
 	    nprocs,			/* number of tasks */
 	    lastpid			/* the last pid */

==== //depot/projects/vimage/src/sys/compat/linux/linux_misc.c#16 (text+ko) ====

@@ -126,7 +126,6 @@
 int
 linux_sysinfo(struct thread *td, struct linux_sysinfo_args *args)
 {
-	INIT_VPROCG(TD_TO_VPROCG(td));
 	struct l_sysinfo sysinfo;
 	vm_object_t object;
 	int i, j;
@@ -139,8 +138,8 @@
 
 	/* Use the information from the mib to get our load averages */
 	for (i = 0; i < 3; i++)
-		sysinfo.loads[i] = V_averunnable.ldavg[i] *
-		    LINUX_SYSINFO_LOADS_SCALE / V_averunnable.fscale;
+		sysinfo.loads[i] = averunnable.ldavg[i] *
+		    LINUX_SYSINFO_LOADS_SCALE / averunnable.fscale;
 
 	sysinfo.totalram = physmem * PAGE_SIZE;
 	sysinfo.freeram = sysinfo.totalram - cnt.v_wire_count * PAGE_SIZE;

==== //depot/projects/vimage/src/sys/i386/conf/VIMAGE#13 (text+ko) ====

@@ -8,13 +8,6 @@
 ident		VIMAGE
 
 options 	VIMAGE
-options         IPFIREWALL
-options         IPFIREWALL_VERBOSE
-options         IPFIREWALL_VERBOSE_LIMIT=100
-options         IPFIREWALL_DEFAULT_TO_ACCEPT
-options         IPFIREWALL_FORWARD
-options         IPFIREWALL_NAT
-options         LIBALIAS
 
 #
 # Some kernel subsystems and functions don't yet compile with VIMAGE.  Remove

==== //depot/projects/vimage/src/sys/i386/i386/dump_machdep.c#8 (text+ko) ====

@@ -112,7 +112,6 @@
 mkdumpheader(struct kerneldumpheader *kdh, uint32_t archver, uint64_t dumplen,
     uint32_t blksz)
 {
-	INIT_VPROCG(TD_TO_VPROCG(&thread0)); /* XXX */
 
 	bzero(kdh, sizeof(*kdh));
 	strncpy(kdh->magic, KERNELDUMPMAGIC, sizeof(kdh->magic));

==== //depot/projects/vimage/src/sys/i386/i386/minidump_machdep.c#8 (text) ====

@@ -86,7 +86,6 @@
 mkdumpheader(struct kerneldumpheader *kdh, uint32_t archver, uint64_t dumplen,
     uint32_t blksz)
 {
-	INIT_VPROCG(TD_TO_VPROCG(curthread));
 
 	bzero(kdh, sizeof(*kdh));
 	strncpy(kdh->magic, KERNELDUMPMAGIC, sizeof(kdh->magic));

==== //depot/projects/vimage/src/sys/kern/kern_clock.c#16 (text+ko) ====

@@ -89,8 +89,6 @@
 static int
 sysctl_kern_cp_time(SYSCTL_HANDLER_ARGS)
 {
-	INIT_VPROCG(TD_TO_VPROCG(curthread));
-
 	int error;
 	long cp_time[CPUSTATES];
 #ifdef SCTL_MASK32
@@ -104,14 +102,14 @@
 		if (!req->oldptr)
 			return SYSCTL_OUT(req, 0, sizeof(cp_time32));
 		for (i = 0; i < CPUSTATES; i++)
-			cp_time32[i] = (unsigned int)V_cp_time[i];
+			cp_time32[i] = (unsigned int)cp_time[i];
 		error = SYSCTL_OUT(req, cp_time32, sizeof(cp_time32));
 	} else
 #endif
 	{
 		if (!req->oldptr)
-			return SYSCTL_OUT(req, 0, sizeof(V_cp_time));
-		error = SYSCTL_OUT(req, V_cp_time, sizeof(V_cp_time));
+			return SYSCTL_OUT(req, 0, sizeof(cp_time));
+		error = SYSCTL_OUT(req, cp_time, sizeof(cp_time));
 	}
 	return error;
 }
@@ -460,11 +458,7 @@
 
 	td = curthread;
 	p = td->td_proc;
-#ifdef VIMAGE
-	INIT_VPROCG(TD_TO_VPROCG(td));
 	INIT_VCPU(TD_TO_VCPU(td));
-	struct vprocg *vprocg_iter;
-#endif
 
 	cp_time = (long *)PCPU_PTR(cp_time);
 	if (usermode) {
@@ -502,14 +496,10 @@
 				sel = CP_IDLE;
 		}
 	}
-	atomic_add_long(&V_cp_time[sel], 1); /* XXX remove atomic! */
+	atomic_add_long(&cp_time[sel], 1); /* XXX remove atomic! */
 #ifdef VIMAGE
 	if (sel != CP_INTR)
 		sel = CP_IDLE;
-	/* XXX list locking? */
-	LIST_FOREACH(vprocg_iter, &vprocg_head, vprocg_le)
-		if (vprocg != vprocg_iter)
-			atomic_add_long(&vprocg_iter->_cp_time[sel], 1);
 
 	/* Per-vcpu average accounting */
 	mtx_lock_spin(&vcpu_list_mtx);
@@ -531,6 +521,7 @@
 		 * avg2 loses half of its value in roughly 1350 ms.
 		 */
 		weight_fixp = 0x80000000 / tot_acc_statcalls;
+		/* XXX list locking? */
 		LIST_FOREACH(vcpu, &vcpu_head, vcpu_le) {
 			avg0 = (weight_fixp * V_acc_statcalls) >> 15;
 			V_avg1_fixp = (3 * V_avg1_fixp + avg0) >> 2;

==== //depot/projects/vimage/src/sys/kern/kern_synch.c#17 (text+ko) ====

@@ -80,10 +80,8 @@
 static struct callout loadav_callout;
 static struct callout lbolt_callout;
 
-#ifndef VIMAGE
 struct loadavg averunnable =
 	{ {0, 0, 0}, FSCALE };	/* load average, of runnable procs */
-#endif
 
 /*
  * Constants for averages over 1, 5, and 15 minutes
@@ -501,13 +499,8 @@
 	struct loadavg *avg;
 
 	VPROCG_ITERLOOP_BEGIN();
-	INIT_VPROCG(vprocg_iter);
-#ifdef VIMAGE
-	nrun = sched_load(vprocg_iter);
-#else
 	nrun = sched_load();
-#endif
-	avg = &V_averunnable;
+	avg = &averunnable;
 
 	for (i = 0; i < 3; i++)
 		avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +

==== //depot/projects/vimage/src/sys/kern/kern_vimage.c#60 (text+ko) ====

@@ -538,8 +538,10 @@
 	case SIOCGPVIMAGE:
 		vimage_relative_name(vip, vip_r, vi_req->vi_name,
 		    sizeof (vi_req->vi_name));
+#ifdef NOTYET
 		bcopy(&vip_r->v_procg->_averunnable, &vi_req->averunnable,
 		    sizeof (vi_req->averunnable));
+#endif
 		vi_req->vi_proc_count = vip_r->v_procg->nprocs;
 		vi_req->vi_if_count = vip_r->v_net->ifccnt;
 		vi_req->vi_sock_count = vip_r->v_net->sockcnt;
@@ -690,9 +692,6 @@
 	vip->v_cpu = vcpu;
 	vcpu->vcpu_id = last_vcpu_id++;
 
-	/* Struct vprocg initialization - perhaps move to anther place? */
-	V_averunnable.fscale = FSCALE;
-
 	/* Initialize / attach vnet module instances. */
 	CURVNET_SET_QUIET(vnet);
 	TAILQ_FOREACH(vml, &vnet_modlink_head, vml_mod_le)

==== //depot/projects/vimage/src/sys/kern/kern_xxx.c#5 (text+ko) ====

@@ -273,21 +273,20 @@
         struct setdomainname_args *uap;
 {
 	INIT_VPROCG(TD_TO_VPROCG(td));
-        int error, domainnamelen;
+	int error, domainnamelen;
 
 	error = priv_check(td, PRIV_SETDOMAINNAME);
-printf("setdomainname error=%d\n", error);
 	if (error)
 		return (error);
 	mtx_lock(&Giant);
-        if ((u_int)uap->len > sizeof (V_domainname) - 1) {
+	if ((u_int)uap->len > sizeof (V_domainname) - 1) {
 		error = EINVAL;
 		goto done2;
 	}
-        domainnamelen = uap->len;
-        error = copyin(uap->domainname, V_domainname, uap->len);
-        V_domainname[domainnamelen] = 0;
+	domainnamelen = uap->len;
+	error = copyin(uap->domainname, V_domainname, uap->len);
+	V_domainname[domainnamelen] = 0;
 done2:
 	mtx_unlock(&Giant);
-        return (error);
+	return (error);
 }

==== //depot/projects/vimage/src/sys/kern/sched_4bsd.c#21 (text+ko) ====

@@ -38,7 +38,6 @@
 #include "opt_hwpmc_hooks.h"
 #include "opt_sched.h"
 #include "opt_kdtrace.h"
-#include "opt_vimage.h"
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -56,7 +55,6 @@
 #include <sys/sx.h>
 #include <sys/turnstile.h>
 #include <sys/umtx.h>
-#include <sys/vimage.h>
 #include <machine/pcb.h>
 #include <machine/smp.h>
 
@@ -237,19 +235,17 @@
 static __inline void
 sched_load_add(struct thread *td)
 {
-	INIT_VPROCG(TD_TO_VPROCG(td));
 
-	V_sched_tdcnt++;
-	CTR1(KTR_SCHED, "global load: %d", V_sched_tdcnt);
+	sched_tdcnt++;
+	CTR1(KTR_SCHED, "global load: %d", sched_tdcnt);
 }
 
 static __inline void
 sched_load_rem(struct thread *td)
 {
-	INIT_VPROCG(TD_TO_VPROCG(td));
 
-	V_sched_tdcnt--;
-	CTR1(KTR_SCHED, "global load: %d", V_sched_tdcnt);
+	sched_tdcnt--;
+	CTR1(KTR_SCHED, "global load: %d", sched_tdcnt);
 }
 
 #ifdef VIMAGE
@@ -467,8 +463,7 @@
 		PROC_LOCK(p);
 #ifdef VIMAGE
 		if (p->p_ucred != NULL) {
-			INIT_VPROCG(P_TO_VPROCG(p));
-			loadfac = loadfactor(V_averunnable.ldavg[0]);
+			loadfac = loadfactor(averunnable.ldavg[0]);
 		} else
 			loadfac = 0;
 #endif
@@ -576,13 +571,12 @@
 static void
 updatepri(struct thread *td)
 {
-	INIT_VPROCG(TD_TO_VPROCG(td));
 	struct td_sched *ts;
 	fixpt_t loadfac;
 	unsigned int newcpu;
 
 	ts = td->td_sched;
-	loadfac = loadfactor(V_averunnable.ldavg[0]);
+	loadfac = loadfactor(averunnable.ldavg[0]);
 	if (ts->ts_slptime > 5 * loadfac)
 		td->td_estcpu = 0;
 	else {
@@ -1449,13 +1443,9 @@
 }
 
 int
-#ifdef VIMAGE
-sched_load(struct vprocg *vprocg)
-#else
 sched_load(void)
-#endif
 {
-	return (V_sched_tdcnt);
+	return (sched_tdcnt);
 }
 
 int

==== //depot/projects/vimage/src/sys/kern/tty.c#15 (text+ko) ====

@@ -2541,7 +2541,6 @@
 void
 ttyinfo(struct tty *tp)
 {
-	INIT_VPROCG(TD_TO_VPROCG(curthread));
 	struct timeval utime, stime;
 	struct proc *p, *pick;
 	struct thread *td, *picktd;
@@ -2556,7 +2555,7 @@
 		return;
 
 	/* Print load average. */
-	load = (V_averunnable.ldavg[0] * 100 + FSCALE / 2) >> FSHIFT;
+	load = (averunnable.ldavg[0] * 100 + FSCALE / 2) >> FSHIFT;
 	ttyprintf(tp, "load: %d.%02d ", load / 100, load % 100);
 
 	/*

==== //depot/projects/vimage/src/sys/net/bpf.c#20 (text+ko) ====

@@ -1417,7 +1417,7 @@
 		if (target_vimage == NULL)
 			return ENXIO;
 		target_vnet = target_vimage->v_net;
-        }
+	}
 	CURVNET_SET_QUIET(target_vnet);
 #endif
 

==== //depot/projects/vimage/src/sys/net/bpf.h#6 (text+ko) ====


==== //depot/projects/vimage/src/sys/net/if_ethersubr.c#20 (text+ko) ====

@@ -892,16 +892,16 @@
 static void
 ether_reassign(struct ifnet *ifp, struct vnet *vnet, char *dname)
 {
-        u_char eaddr[6];
+	u_char eaddr[6];
 
 	bcopy(IF_LLADDR(ifp), eaddr, 6);
 	ether_ifdetach(ifp);
 	ifp->if_bpf = NULL;
-        if_reassign_common(ifp, vnet, "eth");
+	if_reassign_common(ifp, vnet, "eth");
 	if (dname)
 		snprintf(ifp->if_xname, IFNAMSIZ, "%s", dname);
 
-        CURVNET_SET_QUIET(vnet);
+	CURVNET_SET_QUIET(vnet);
 	ether_ifattach(ifp, eaddr);
 	CURVNET_RESTORE();
 }
@@ -977,7 +977,7 @@
 SYSCTL_NODE(_net_link, IFT_ETHER, ether, CTLFLAG_RW, 0, "Ethernet");
 #if defined(INET) || defined(INET6)
 SYSCTL_V_INT(V_NET, vnet_net, _net_link_ether, OID_AUTO, ipfw, CTLFLAG_RW,
-	     ether_ipfw, 0, "Pass ether pkts through firewall");
+    ether_ipfw, 0, "Pass ether pkts through firewall");
 #endif
 
 #if 0

==== //depot/projects/vimage/src/sys/net/route.c#18 (text+ko) ====

@@ -153,15 +153,15 @@
 {
 	int table;
 	int fam;
-        struct domain *dom;
-        INIT_VNET_NET(curvnet);
+	struct domain *dom;
+	INIT_VNET_NET(curvnet);
 
 	for (dom = domains; dom; dom = dom->dom_next) {
 		if (dom->dom_rtattach)  {
 			for  (table = 0; table < rt_numfibs; table++) {
 				if ( (fam = dom->dom_family) == AF_INET ||
 				    table == 0) {
- 			        	/* for now only AF_INET has > 1 table */
+					/* for now only AF_INET has > 1 table */
 					/* XXX MRT 
 					 * rtattach will be also called
 					 * from vfs_export.c but the
@@ -178,7 +178,7 @@
 			}
 		}
 	}
-        return 0;
+	return (0);
 }
 
 #ifdef VIMAGE
@@ -187,15 +187,15 @@
 {
 	int table;
 	int fam;
-        struct domain *dom;
-        INIT_VNET_NET(curvnet);
+	struct domain *dom;
+	INIT_VNET_NET(curvnet);
 
 	for (dom = domains; dom; dom = dom->dom_next) {
 		if (dom->dom_rtdetach)  {
 			for  (table = 0; table < rt_numfibs; table++) {
 				if ( (fam = dom->dom_family) == AF_INET ||
 				    table == 0) {
- 			        	/* for now only AF_INET has > 1 table */
+ 					/* for now only AF_INET has > 1 table */
 					dom->dom_rtdetach(
 				    	    (void **)&V_rt_tables[table][fam],
 				    	    dom->dom_rtoffset);
@@ -205,7 +205,7 @@
 			}
 		}
 	}
-        return 0;
+	return (0);
 }
 
 VNET_MOD_DECLARE_STATELESS(RTABLE, rtable, rtable_init, rtable_idetach, NET);
@@ -233,7 +233,7 @@
 
 #ifndef _SYS_SYSPROTO_H_
 struct setfib_args {
-	int     fibnum;
+	int	fibnum;
 };
 #endif
 int

==== //depot/projects/vimage/src/sys/net/vnet.h#10 (text+ko) ====

@@ -72,7 +72,7 @@
 
 	LIST_HEAD(, rawcb) _rawcb_list;
 
-        int     _ether_ipfw;
+	int	_ether_ipfw;
 };
 
 #endif

==== //depot/projects/vimage/src/sys/net80211/ieee80211.c#15 (text+ko) ====

@@ -672,18 +672,18 @@
 ieee80211_reassign( struct ieee80211vap *vap, struct vnet *vnet, char *dname)
 {
 	struct ifnet *ifp = vap->iv_ifp;
-        u_char eaddr[6];
+	u_char eaddr[6];
 
 	bcopy(IF_LLADDR(ifp), eaddr, 6);
 	bpfdetach(ifp);
 	ether_ifdetach(ifp);
 	ifp->if_bpf = NULL;
 	vap->iv_rawbpf = NULL;
-        if_reassign_common(ifp, vnet, ifp->if_dname);
+	if_reassign_common(ifp, vnet, ifp->if_dname);
 	if (dname)
 		snprintf(ifp->if_xname, IFNAMSIZ, "%s", dname);
 
-        CURVNET_SET_QUIET(vnet);
+	CURVNET_SET_QUIET(vnet);
 	ether_ifattach(ifp, eaddr);
 	bpfattach2(ifp, DLT_IEEE802_11,
 	    sizeof(struct ieee80211_frame_addr4), &vap->iv_rawbpf);

==== //depot/projects/vimage/src/sys/netgraph/ng_base.c#28 (text+ko) ====

@@ -3112,7 +3112,9 @@
 {
 	INIT_VNET_NETGRAPH(curvnet);
 
+#ifdef VIMAGE
 	LIST_INIT(&V_ng_nodelist); /* XXX should go away */
+#endif
 	V_nextID = 1;
 
 	return 0;

==== //depot/projects/vimage/src/sys/netgraph/ng_bridge.c#4 (text+ko) ====

@@ -375,7 +375,7 @@
 		NG_HOOK_SET_PRIVATE(hook, (void *)linkNum);
 		priv->numLinks++;
 		return (0);
-        }
+	}
 
 	/* Unknown hook name */
 	return (EINVAL);

==== //depot/projects/vimage/src/sys/netgraph/ng_ether.c#11 (text+ko) ====

@@ -782,7 +782,7 @@
 
 static int ng_ether_iattach(const void *unused)
 {
-        INIT_VNET_NET(curvnet);
+	INIT_VNET_NET(curvnet);
 	struct ifnet *ifp;
 
 #ifdef VIMAGE

==== //depot/projects/vimage/src/sys/netgraph/ng_hub.c#3 (text+ko) ====

@@ -62,9 +62,9 @@
 static  int
 ng_hub_newhook(node_p node, hook_p hook, const char *name)
 {
-        if (strcmp(name, "anchor") == 0)
-                node->nd_private = (void *) 1;
-        return 0;
+	if (strcmp(name, "anchor") == 0)
+		node->nd_private = (void *) 1;
+	return (0);
 }
 
 static int

==== //depot/projects/vimage/src/sys/netgraph/ng_wormhole.c#3 (text+ko) ====

@@ -126,19 +126,19 @@
 };
 
 static const struct ng_cmdlist ng_wormhole_cmds[] = {
-        {
+	{
 		.cookie =	NGM_WORMHOLE_COOKIE,
 		.cmd =		NGM_WORMHOLE_PEER,
 		.name =		"peer",
 		.mesgType =	&ng_wormhole_peer_type,
 		.respType =	&ng_wormhole_peer_type,
-        },
-        {
+	},
+	{
 		.cookie =	NGM_WORMHOLE_COOKIE,
 		.cmd =		NGM_WORMHOLE_STATUS,
 		.name =		"status",
 		.respType =	&ng_wormhole_status_type,
-        },
+	},
 	{ 0 }
 };
 
@@ -184,9 +184,9 @@
 		return (ENOMEM);
 
 	NG_NODE_SET_PRIVATE(node, priv);
-        priv->unit = alloc_unr(V_ng_wormhole_unit);
+	priv->unit = alloc_unr(V_ng_wormhole_unit);
 	snprintf(buf, NG_NODESIZ, "%s%d", typestruct.name, priv->unit);
-        if (ng_name_node(node, buf) != 0)
+	if (ng_name_node(node, buf) != 0)
 	    log(LOG_WARNING, "%s: can't acquire netgraph name\n", buf);
 	priv->vnet = curvnet;
 	priv->node = node;
@@ -250,7 +250,7 @@
 					    sizeof(priv->remote_priv));
 			}
 
-                	break;
+			break;
 		case NGM_WORMHOLE_STATUS:
 			NG_MKRESPONSE(resp, msg,
 			    sizeof(priv->status), M_NOWAIT);
@@ -259,15 +259,15 @@
 			else
 				bcopy(&priv->status, resp->data,
 				    sizeof(priv->status));
-                	break;
-        	default:
-                	error = EINVAL;
-                	break;
+			break;
+		default:
+			error = EINVAL;
+			break;
 		}
 		break;
-        default:
-                error = EINVAL;
-                break;
+	default:
+		error = EINVAL;
+		break;
 	}
 	NG_RESPOND_MSG(error, node, item, resp);
 	NG_FREE_MSG(msg);
@@ -276,8 +276,8 @@
 
 static int
 ng_wormhole_peer_parse(const struct ng_parse_type *type,
-        const char *s, int *off, const u_char *const start,
-        u_char *const buf, int *buflen)
+    const char *s, int *off, const u_char *const start,
+    u_char *const buf, int *buflen)
 {
 	char node_name_buf[NG_NODESIZ];
 	char *t;
@@ -298,9 +298,9 @@
 		return (EINVAL);
 	if ((len = t - (s + *off)) > sizeof(node_name_buf) - 1)
 		return (EINVAL);
-        strncpy(node_name_buf, s + *off, len);
-        node_name_buf[len] = '\0';
-        *off += len + 1;	/* vnet name should be in &s[*off] now */
+	strncpy(node_name_buf, s + *off, len);
+	node_name_buf[len] = '\0';
+	*off += len + 1;	/* vnet name should be in &s[*off] now */
 
 	/* XXX should lock all wormhole list here */
 	LIST_FOREACH(*remote_priv, &all_wormholes_head, all_wormholes_le)
@@ -318,7 +318,7 @@
 
 static int
 ng_wormhole_peer_unparse(const struct ng_parse_type *type,
-        const u_char *data, int *off, char *cbuf, int cbuflen)
+    const u_char *data, int *off, char *cbuf, int cbuflen)
 {
 	const priv_p *remote_priv = (const priv_p *)(data + *off);
 
@@ -334,7 +334,7 @@
 
 static int
 ng_wormhole_status_unparse(const struct ng_parse_type *type,
-        const u_char *data, int *off, char *cbuf, int cbuflen)
+    const u_char *data, int *off, char *cbuf, int cbuflen)
 {
 	const int *status = (const int *)(data + *off);
 
@@ -375,7 +375,7 @@
 static int
 ng_wormhole_rcvdata(hook_p hook, item_p item)
 {
-        priv_p priv = NG_NODE_PRIVATE(NG_HOOK_NODE(hook));
+	priv_p priv = NG_NODE_PRIVATE(NG_HOOK_NODE(hook));
 	int error = 0;
 	priv_p remote_priv = priv->remote_priv;
 	struct mbuf *m;
@@ -387,7 +387,7 @@
 		m = NGI_M(item);
 		m->m_flags |= M_REMOTE_VNET;
 		CURVNET_SET_QUIET(remote_priv->vnet);
-                NG_FWD_ITEM_HOOK(error, item, remote_priv->hook);
+		NG_FWD_ITEM_HOOK(error, item, remote_priv->hook);
 		CURVNET_RESTORE();
 	}
 	return (error);

==== //depot/projects/vimage/src/sys/netinet/in_pcb.c#21 (text+ko) ====

@@ -128,8 +128,8 @@
 #endif
 {
 #ifdef VIMAGE
-        INIT_VNET_INET(curvnet);
-        SYSCTL_RESOLVE_V_ARG1();
+	INIT_VNET_INET(curvnet);
+	SYSCTL_RESOLVE_V_ARG1();
 #endif
 	int error;
 

==== //depot/projects/vimage/src/sys/sys/resource.h#6 (text+ko) ====

@@ -153,9 +153,7 @@
 
 #ifdef _KERNEL
 
-#ifndef VIMAGE
 extern struct loadavg averunnable;
-#endif
 void	read_cpu_time(long *cp_time);	/* Writes array of CPUSTATES */
 
 #else

==== //depot/projects/vimage/src/sys/sys/sched.h#9 (text+ko) ====

@@ -64,8 +64,6 @@
 
 #ifdef _KERNEL
 
-struct vprocg;
-
 /*
  * General scheduling info.
  *
@@ -75,12 +73,7 @@
  * sched_runnable:
  *	Runnable threads for this processor.
  */
-#ifdef VIMAGE
-int	sched_load(struct vprocg *);
-void	sched_load_reassign(struct vprocg *, struct vprocg *);
-#else
 int	sched_load(void);
-#endif
 int	sched_rr_interval(void);
 int	sched_runnable(void);
 

==== //depot/projects/vimage/src/sys/vm/vm_meter.c#9 (text+ko) ====

@@ -32,8 +32,6 @@
 #include <sys/cdefs.h>
 __FBSDID("$FreeBSD: src/sys/vm/vm_meter.c,v 1.98 2008/03/19 06:19:01 jeff Exp $");
 
-#include "opt_vimage.h"
-
 #include <sys/param.h>
 #include <sys/systm.h>
 #include <sys/kernel.h>
@@ -53,7 +51,6 @@
 #include <vm/vm_map.h>
 #include <vm/vm_object.h>
 #include <sys/sysctl.h>
-#include <sys/vimage.h>
 
 struct vmmeter cnt;
 
@@ -79,20 +76,19 @@
 static int
 sysctl_vm_loadavg(SYSCTL_HANDLER_ARGS)
 {
-	INIT_VPROCG(TD_TO_VPROCG(curthread));
 	
 #ifdef SCTL_MASK32
 	u_int32_t la[4];
 
 	if (req->flags & SCTL_MASK32) {
-		la[0] = V_averunnable.ldavg[0];
-		la[1] = V_averunnable.ldavg[1];
-		la[2] = V_averunnable.ldavg[2];
-		la[3] = V_averunnable.fscale;
+		la[0] = averunnable.ldavg[0];
+		la[1] = averunnable.ldavg[1];
+		la[2] = averunnable.ldavg[2];
+		la[3] = averunnable.fscale;
 		return SYSCTL_OUT(req, la, sizeof(la));
 	} else
 #endif
-		return SYSCTL_OUT(req, &V_averunnable, sizeof(V_averunnable));
+		return SYSCTL_OUT(req, &averunnable, sizeof(averunnable));
 }
 SYSCTL_PROC(_vm, VM_LOADAVG, loadavg, CTLTYPE_STRUCT|CTLFLAG_RD, 
     NULL, 0, sysctl_vm_loadavg, "S,loadavg", "Machine loadaverage history");



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200806161527.m5GFRHTM041713>