Skip site navigation (1)Skip section navigation (2)
Date:      Thu, 14 Mar 2013 21:39:39 +0000 (UTC)
From:      Jack F Vogel <jfv@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-9@freebsd.org
Subject:   svn commit: r248287 - in stable/9/sys: dev/ixgbe net sys
Message-ID:  <201303142139.r2ELddH3029272@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: jfv
Date: Thu Mar 14 21:39:39 2013
New Revision: 248287
URL: http://svnweb.freebsd.org/changeset/base/248287

Log:
  MFC of the ixgbe driver including the follow revisions:
  
  235547,235964,241616,241646,241856,241885,242403,
  242421,243714,243716,243718,243721,243724,243725,
  243728,243729,243733,243735,243736,243741,243833,
  244514,246128,246482,247035,247056,247822,247823
  
  Note: 246482 includes only the changes in the ixgbe driver
        and the buf_ring supporting code in if_var.h and buf_ring.h
        igb and em will be seperate commits, and non-Intel drivers
        can commit the changes at will.
  
  Reviewed by:rss, jhb

Modified:
  stable/9/sys/dev/ixgbe/LICENSE
  stable/9/sys/dev/ixgbe/ixgbe.c   (contents, props changed)
  stable/9/sys/dev/ixgbe/ixgbe.h
  stable/9/sys/dev/ixgbe/ixgbe_82598.c
  stable/9/sys/dev/ixgbe/ixgbe_82599.c
  stable/9/sys/dev/ixgbe/ixgbe_82599.h
  stable/9/sys/dev/ixgbe/ixgbe_api.c
  stable/9/sys/dev/ixgbe/ixgbe_api.h
  stable/9/sys/dev/ixgbe/ixgbe_common.c
  stable/9/sys/dev/ixgbe/ixgbe_common.h
  stable/9/sys/dev/ixgbe/ixgbe_mbx.h
  stable/9/sys/dev/ixgbe/ixgbe_osdep.h
  stable/9/sys/dev/ixgbe/ixgbe_phy.c
  stable/9/sys/dev/ixgbe/ixgbe_phy.h
  stable/9/sys/dev/ixgbe/ixgbe_type.h
  stable/9/sys/dev/ixgbe/ixgbe_vf.c
  stable/9/sys/dev/ixgbe/ixgbe_vf.h
  stable/9/sys/dev/ixgbe/ixgbe_x540.c
  stable/9/sys/dev/ixgbe/ixgbe_x540.h
  stable/9/sys/dev/ixgbe/ixv.c
  stable/9/sys/net/if_var.h
  stable/9/sys/sys/buf_ring.h
Directory Properties:
  stable/9/sys/dev/ixgbe/   (props changed)
  stable/9/sys/net/   (props changed)
  stable/9/sys/sys/   (props changed)

Modified: stable/9/sys/dev/ixgbe/LICENSE
==============================================================================
--- stable/9/sys/dev/ixgbe/LICENSE	Thu Mar 14 21:21:14 2013	(r248286)
+++ stable/9/sys/dev/ixgbe/LICENSE	Thu Mar 14 21:39:39 2013	(r248287)
@@ -1,6 +1,6 @@
 /******************************************************************************
 
-  Copyright (c) 2001-2011, Intel Corporation 
+  Copyright (c) 2001-2013, Intel Corporation 
   All rights reserved.
   
   Redistribution and use in source and binary forms, with or without 

Modified: stable/9/sys/dev/ixgbe/ixgbe.c
==============================================================================
--- stable/9/sys/dev/ixgbe/ixgbe.c	Thu Mar 14 21:21:14 2013	(r248286)
+++ stable/9/sys/dev/ixgbe/ixgbe.c	Thu Mar 14 21:39:39 2013	(r248287)
@@ -1,6 +1,6 @@
 /******************************************************************************
 
-  Copyright (c) 2001-2012, Intel Corporation 
+  Copyright (c) 2001-2013, Intel Corporation 
   All rights reserved.
   
   Redistribution and use in source and binary forms, with or without 
@@ -47,7 +47,7 @@ int             ixgbe_display_debug_stat
 /*********************************************************************
  *  Driver version
  *********************************************************************/
-char ixgbe_driver_version[] = "2.4.8";
+char ixgbe_driver_version[] = "2.5.7 - STABLE/9";
 
 /*********************************************************************
  *  PCI Device ID Table
@@ -83,7 +83,7 @@ static ixgbe_vendor_info_t ixgbe_vendor_
 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
-	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
+	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
 	/* required last entry */
 	{0, 0, 0, 0, 0}
@@ -104,16 +104,16 @@ static int      ixgbe_probe(device_t);
 static int      ixgbe_attach(device_t);
 static int      ixgbe_detach(device_t);
 static int      ixgbe_shutdown(device_t);
-#if __FreeBSD_version >= 800000
+#ifdef IXGBE_LEGACY_TX
+static void     ixgbe_start(struct ifnet *);
+static void     ixgbe_start_locked(struct tx_ring *, struct ifnet *);
+#else /* ! IXGBE_LEGACY_TX */
 static int	ixgbe_mq_start(struct ifnet *, struct mbuf *);
 static int	ixgbe_mq_start_locked(struct ifnet *,
                     struct tx_ring *, struct mbuf *);
 static void	ixgbe_qflush(struct ifnet *);
 static void	ixgbe_deferred_mq_start(void *, int);
-#else
-static void     ixgbe_start(struct ifnet *);
-static void     ixgbe_start_locked(struct tx_ring *, struct ifnet *);
-#endif
+#endif /* IXGBE_LEGACY_TX */
 static int      ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
 static void	ixgbe_init(void *);
 static void	ixgbe_init_locked(struct adapter *);
@@ -150,7 +150,7 @@ static void     ixgbe_enable_intr(struct
 static void     ixgbe_disable_intr(struct adapter *);
 static void     ixgbe_update_stats_counters(struct adapter *);
 static bool	ixgbe_txeof(struct tx_ring *);
-static bool	ixgbe_rxeof(struct ix_queue *, int);
+static bool	ixgbe_rxeof(struct ix_queue *);
 static void	ixgbe_rx_checksum(u32, struct mbuf *, u32);
 static void     ixgbe_set_promisc(struct adapter *);
 static void     ixgbe_set_multi(struct adapter *);
@@ -163,10 +163,10 @@ static int	ixgbe_set_thermal_test(SYSCTL
 static int	ixgbe_dma_malloc(struct adapter *, bus_size_t,
 		    struct ixgbe_dma_alloc *, int);
 static void     ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *);
-static void	ixgbe_add_rx_process_limit(struct adapter *, const char *,
-		    const char *, int *, int);
-static bool	ixgbe_tx_ctx_setup(struct tx_ring *, struct mbuf *);
-static bool	ixgbe_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
+static int	ixgbe_tx_ctx_setup(struct tx_ring *,
+		    struct mbuf *, u32 *, u32 *);
+static int	ixgbe_tso_setup(struct tx_ring *,
+		    struct mbuf *, u32 *, u32 *);
 static void	ixgbe_set_ivar(struct adapter *, u8, u8, s8);
 static void	ixgbe_configure_ivars(struct adapter *);
 static u8 *	ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
@@ -181,6 +181,9 @@ static __inline void ixgbe_rx_discard(st
 static __inline void ixgbe_rx_input(struct rx_ring *, struct ifnet *,
 		    struct mbuf *, u32);
 
+static void	ixgbe_enable_rx_drop(struct adapter *);
+static void	ixgbe_disable_rx_drop(struct adapter *);
+
 /* Support for pluggable optic modules */
 static bool	ixgbe_sfp_probe(struct adapter *);
 static void	ixgbe_setup_optics(struct adapter *);
@@ -213,7 +216,7 @@ static device_method_t ixgbe_methods[] =
 	DEVMETHOD(device_attach, ixgbe_attach),
 	DEVMETHOD(device_detach, ixgbe_detach),
 	DEVMETHOD(device_shutdown, ixgbe_shutdown),
-	{0, 0}
+	DEVMETHOD_END
 };
 
 static driver_t ixgbe_driver = {
@@ -243,9 +246,13 @@ static int ixgbe_max_interrupt_rate = (4
 TUNABLE_INT("hw.ixgbe.max_interrupt_rate", &ixgbe_max_interrupt_rate);
 
 /* How many packets rxeof tries to clean at a time */
-static int ixgbe_rx_process_limit = 128;
+static int ixgbe_rx_process_limit = 256;
 TUNABLE_INT("hw.ixgbe.rx_process_limit", &ixgbe_rx_process_limit);
 
+/* How many packets txeof tries to clean at a time */
+static int ixgbe_tx_process_limit = 256;
+TUNABLE_INT("hw.ixgbe.tx_process_limit", &ixgbe_tx_process_limit);
+
 /*
 ** Smart speed setting, default to on
 ** this only works as a compile option
@@ -263,15 +270,6 @@ static int ixgbe_enable_msix = 1;
 TUNABLE_INT("hw.ixgbe.enable_msix", &ixgbe_enable_msix);
 
 /*
- * Header split: this causes the hardware to DMA
- * the header into a separate mbuf from the payload,
- * it can be a performance win in some workloads, but
- * in others it actually hurts, its off by default. 
- */
-static int ixgbe_header_split = FALSE;
-TUNABLE_INT("hw.ixgbe.hdr_split", &ixgbe_header_split);
-
-/*
  * Number of Queues, can be set to 0,
  * it then autoconfigures based on the
  * number of cpus with a max of 8. This
@@ -292,6 +290,20 @@ TUNABLE_INT("hw.ixgbe.txd", &ixgbe_txd);
 static int ixgbe_rxd = PERFORM_RXD;
 TUNABLE_INT("hw.ixgbe.rxd", &ixgbe_rxd);
 
+/*
+** HW RSC control: 
+**  this feature only works with
+**  IPv4, and only on 82599 and later.
+**  Also this will cause IP forwarding to
+**  fail and that can't be controlled by
+**  the stack as LRO can. For all these
+**  reasons I've deemed it best to leave
+**  this off and not bother with a tuneable
+**  interface, this would need to be compiled
+**  to enable.
+*/
+static bool ixgbe_rsc_enable = FALSE;
+
 /* Keep running tab on them for sanity check */
 static int ixgbe_total_ports;
 
@@ -533,7 +545,6 @@ ixgbe_attach(device_t dev)
 	case IXGBE_ERR_SFP_NOT_SUPPORTED:
 		device_printf(dev,"Unsupported SFP+ Module\n");
 		error = EIO;
-		device_printf(dev,"Hardware Initialization Failure\n");
 		goto err_late;
 	case IXGBE_ERR_SFP_NOT_PRESENT:
 		device_printf(dev,"No SFP+ Module found\n");
@@ -556,11 +567,6 @@ ixgbe_attach(device_t dev)
 	if (ixgbe_setup_interface(dev, adapter) != 0)
 		goto err_late;
 
-	/* Sysctl for limiting the amount of work done in the taskqueue */
-	ixgbe_add_rx_process_limit(adapter, "rx_processing_limit",
-	    "max number of rx packets to process", &adapter->rx_process_limit,
-	    ixgbe_rx_process_limit);
-
 	/* Initialize statistics */
 	ixgbe_update_stats_counters(adapter);
 
@@ -589,6 +595,9 @@ ixgbe_attach(device_t dev)
 		    "PCIE, or x4 PCIE 2 slot is required.\n");
         }
 
+	/* Set an initial default flow control value */
+	adapter->fc =  ixgbe_fc_full;
+
 	/* let hardware know driver is loaded */
 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
@@ -645,7 +654,7 @@ ixgbe_detach(device_t dev)
 
 	for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
 		if (que->tq) {
-#if __FreeBSD_version >= 800000
+#ifndef IXGBE_LEGACY_TX
 			taskqueue_drain(que->tq, &txr->txq_task);
 #endif
 			taskqueue_drain(que->tq, &que->que_task);
@@ -709,7 +718,7 @@ ixgbe_shutdown(device_t dev)
 }
 
 
-#if __FreeBSD_version < 800000
+#ifdef IXGBE_LEGACY_TX
 /*********************************************************************
  *  Transmit entry point
  *
@@ -728,17 +737,14 @@ ixgbe_start_locked(struct tx_ring *txr, 
 
 	IXGBE_TX_LOCK_ASSERT(txr);
 
-	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
-	    IFF_DRV_RUNNING)
+	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
 		return;
 	if (!adapter->link_active)
 		return;
 
 	while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
-		if (txr->tx_avail <= IXGBE_QUEUE_MIN_FREE) {
-			txr->queue_status |= IXGBE_QUEUE_DEPLETED;
+		if (txr->tx_avail <= IXGBE_QUEUE_MIN_FREE)
 			break;
-                }
 
 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
 		if (m_head == NULL)
@@ -747,8 +753,6 @@ ixgbe_start_locked(struct tx_ring *txr, 
 		if (ixgbe_xmit(txr, &m_head)) {
 			if (m_head != NULL)
 				IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
-			if (txr->tx_avail <= IXGBE_QUEUE_MIN_FREE)
-				txr->queue_status |= IXGBE_QUEUE_DEPLETED;
 			break;
 		}
 		/* Send a copy of the frame to the BPF listener */
@@ -781,7 +785,8 @@ ixgbe_start(struct ifnet *ifp)
 	return;
 }
 
-#else
+#else /* ! IXGBE_LEGACY_TX */
+
 /*
 ** Multiqueue Transmit driver
 **
@@ -803,8 +808,7 @@ ixgbe_mq_start(struct ifnet *ifp, struct
 	txr = &adapter->tx_rings[i];
 	que = &adapter->queues[i];
 
-	if (((txr->queue_status & IXGBE_QUEUE_DEPLETED) == 0) &&
-	    IXGBE_TX_TRYLOCK(txr)) {
+	if (IXGBE_TX_TRYLOCK(txr)) {
 		err = ixgbe_mq_start_locked(ifp, txr, m);
 		IXGBE_TX_UNLOCK(txr);
 	} else {
@@ -823,7 +827,6 @@ ixgbe_mq_start_locked(struct ifnet *ifp,
         int             enqueued, err = 0;
 
 	if (((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) ||
-	    (txr->queue_status == IXGBE_QUEUE_DEPLETED) ||
 	    adapter->link_active == 0) {
 		if (m != NULL)
 			err = drbr_enqueue(ifp, txr->br, m);
@@ -831,22 +834,24 @@ ixgbe_mq_start_locked(struct ifnet *ifp,
 	}
 
 	enqueued = 0;
-	if (m == NULL) {
-		next = drbr_dequeue(ifp, txr->br);
-	} else if (drbr_needs_enqueue(ifp, txr->br)) {
-		if ((err = drbr_enqueue(ifp, txr->br, m)) != 0)
+	if (m != NULL) {
+		err = drbr_enqueue(ifp, txr->br, m);
+		if (err) {
 			return (err);
-		next = drbr_dequeue(ifp, txr->br);
-	} else
-		next = m;
+		}
+	}
 
 	/* Process the queue */
-	while (next != NULL) {
+	while ((next = drbr_peek(ifp, txr->br)) != NULL) {
 		if ((err = ixgbe_xmit(txr, &next)) != 0) {
-			if (next != NULL)
-				err = drbr_enqueue(ifp, txr->br, next);
+			if (next == NULL) {
+				drbr_advance(ifp, txr->br);
+			} else {
+				drbr_putback(ifp, txr->br, next);
+			}
 			break;
 		}
+		drbr_advance(ifp, txr->br);
 		enqueued++;
 		/* Send a copy of the frame to the BPF listener */
 		ETHER_BPF_MTAP(ifp, next);
@@ -854,16 +859,11 @@ ixgbe_mq_start_locked(struct ifnet *ifp,
 			break;
 		if (txr->tx_avail < IXGBE_TX_OP_THRESHOLD)
 			ixgbe_txeof(txr);
-		if (txr->tx_avail < IXGBE_TX_OP_THRESHOLD) {
-			txr->queue_status |= IXGBE_QUEUE_DEPLETED;
-			break;
-		}
-		next = drbr_dequeue(ifp, txr->br);
 	}
 
 	if (enqueued > 0) {
 		/* Set watchdog on */
-		txr->queue_status |= IXGBE_QUEUE_WORKING;
+		txr->queue_status = IXGBE_QUEUE_WORKING;
 		txr->watchdog_time = ticks;
 	}
 
@@ -907,7 +907,7 @@ ixgbe_qflush(struct ifnet *ifp)
 	}
 	if_qflush(ifp);
 }
-#endif /* __FreeBSD_version >= 800000 */
+#endif /* IXGBE_LEGACY_TX */
 
 /*********************************************************************
  *  Ioctl entry point
@@ -922,6 +922,7 @@ static int
 ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
 {
 	struct adapter	*adapter = ifp->if_softc;
+	struct ixgbe_hw *hw = &adapter->hw;
 	struct ifreq	*ifr = (struct ifreq *) data;
 #if defined(INET) || defined(INET6)
 	struct ifaddr *ifa = (struct ifaddr *)data;
@@ -1009,6 +1010,8 @@ ixgbe_ioctl(struct ifnet * ifp, u_long c
 			ifp->if_capenable ^= IFCAP_HWCSUM;
 		if (mask & IFCAP_TSO4)
 			ifp->if_capenable ^= IFCAP_TSO4;
+		if (mask & IFCAP_TSO6)
+			ifp->if_capenable ^= IFCAP_TSO6;
 		if (mask & IFCAP_LRO)
 			ifp->if_capenable ^= IFCAP_LRO;
 		if (mask & IFCAP_VLAN_HWTAGGING)
@@ -1025,7 +1028,22 @@ ixgbe_ioctl(struct ifnet * ifp, u_long c
 		VLAN_CAPABILITIES(ifp);
 		break;
 	}
-
+	case SIOCGI2C:
+	{
+		struct ixgbe_i2c_req	i2c;
+		IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
+		error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
+		if (error)
+			break;
+		if ((i2c.dev_addr != 0xA0) || (i2c.dev_addr != 0xA2)){
+			error = EINVAL;
+			break;
+		}
+		hw->phy.ops.read_i2c_byte(hw, i2c.offset,
+		    i2c.dev_addr, i2c.data);
+		error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
+		break;
+	}
 	default:
 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
 		error = ether_ioctl(ifp, command, data);
@@ -1073,7 +1091,7 @@ ixgbe_init_locked(struct adapter *adapte
 
 	/* Set the various hardware offload abilities */
 	ifp->if_hwassist = 0;
-	if (ifp->if_capenable & IFCAP_TSO4)
+	if (ifp->if_capenable & IFCAP_TSO)
 		ifp->if_hwassist |= CSUM_TSO;
 	if (ifp->if_capenable & IFCAP_TXCSUM) {
 		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
@@ -1098,7 +1116,7 @@ ixgbe_init_locked(struct adapter *adapte
 
 	/*
 	** Determine the correct mbuf pool
-	** for doing jumbo/headersplit
+	** for doing jumbo frames
 	*/
 	if (adapter->max_frame_size <= 2048)
 		adapter->rx_mbuf_sz = MCLBYTES;
@@ -1294,7 +1312,7 @@ ixgbe_init_locked(struct adapter *adapte
 			tmp = IXGBE_LOW_DV(frame);
 		hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
 		
-		adapter->fc = hw->fc.requested_mode = ixgbe_fc_full;
+		hw->fc.requested_mode = adapter->fc;
 		hw->fc.pause_time = IXGBE_FC_PAUSE;
 		hw->fc.send_xon = TRUE;
 	}
@@ -1306,7 +1324,6 @@ ixgbe_init_locked(struct adapter *adapte
 
 	/* Now inform the stack we're ready */
 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
-	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 
 	return;
 }
@@ -1396,10 +1413,10 @@ ixgbe_handle_que(void *context, int pend
 	bool		more;
 
 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
-		more = ixgbe_rxeof(que, adapter->rx_process_limit);
+		more = ixgbe_rxeof(que);
 		IXGBE_TX_LOCK(txr);
 		ixgbe_txeof(txr);
-#if __FreeBSD_version >= 800000
+#ifndef IXGBE_LEGACY_TX
 		if (!drbr_empty(ifp, txr->br))
 			ixgbe_mq_start_locked(ifp, txr, NULL);
 #else
@@ -1444,7 +1461,7 @@ ixgbe_legacy_irq(void *arg)
 		return;
 	}
 
-	more_rx = ixgbe_rxeof(que, adapter->rx_process_limit);
+	more_rx = ixgbe_rxeof(que);
 
 	IXGBE_TX_LOCK(txr);
 	do {
@@ -1490,7 +1507,7 @@ ixgbe_msix_que(void *arg)
 	ixgbe_disable_queue(adapter, que->msix);
 	++que->irqs;
 
-	more_rx = ixgbe_rxeof(que, adapter->rx_process_limit);
+	more_rx = ixgbe_rxeof(que);
 
 	IXGBE_TX_LOCK(txr);
 	more_tx = ixgbe_txeof(txr);
@@ -1499,7 +1516,7 @@ ixgbe_msix_que(void *arg)
 	** has anything queued the task gets
 	** scheduled to handle it.
 	*/
-#if __FreeBSD_version < 800000
+#ifdef IXGBE_LEGACY_TX
 	if (!IFQ_DRV_IS_EMPTY(&adapter->ifp->if_snd))
 #else
 	if (!drbr_empty(adapter->ifp, txr->br))
@@ -1665,7 +1682,7 @@ ixgbe_media_status(struct ifnet * ifp, s
 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
 			break;
 		case IXGBE_LINK_SPEED_1GB_FULL:
-			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
+			ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
 			break;
 		case IXGBE_LINK_SPEED_10GB_FULL:
 			ifmr->ifm_active |= adapter->optics | IFM_FDX;
@@ -1724,9 +1741,9 @@ ixgbe_xmit(struct tx_ring *txr, struct m
 {
 	struct adapter  *adapter = txr->adapter;
 	u32		olinfo_status = 0, cmd_type_len;
-	u32		paylen = 0;
 	int             i, j, error, nsegs;
-	int		first, last = 0;
+	int		first;
+	bool		remap = TRUE;
 	struct mbuf	*m_head;
 	bus_dma_segment_t segs[adapter->num_segs];
 	bus_dmamap_t	map;
@@ -1754,74 +1771,58 @@ ixgbe_xmit(struct tx_ring *txr, struct m
 	/*
 	 * Map the packet for DMA.
 	 */
+retry:
 	error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
 	    *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
 
-	if (error == EFBIG) {
+	if (__predict_false(error)) {
 		struct mbuf *m;
 
-		m = m_defrag(*m_headp, M_DONTWAIT);
-		if (m == NULL) {
-			adapter->mbuf_defrag_failed++;
-			m_freem(*m_headp);
-			*m_headp = NULL;
-			return (ENOBUFS);
-		}
-		*m_headp = m;
-
-		/* Try it again */
-		error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
-		    *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
-
-		if (error == ENOMEM) {
-			adapter->no_tx_dma_setup++;
+		switch (error) {
+		case EFBIG:
+			/* Try it again? - one try */
+			if (remap == TRUE) {
+				remap = FALSE;
+				m = m_defrag(*m_headp, M_NOWAIT);
+				if (m == NULL) {
+					adapter->mbuf_defrag_failed++;
+					m_freem(*m_headp);
+					*m_headp = NULL;
+					return (ENOBUFS);
+				}
+				*m_headp = m;
+				goto retry;
+			} else
+				return (error);
+		case ENOMEM:
+			txr->no_tx_dma_setup++;
 			return (error);
-		} else if (error != 0) {
-			adapter->no_tx_dma_setup++;
+		default:
+			txr->no_tx_dma_setup++;
 			m_freem(*m_headp);
 			*m_headp = NULL;
 			return (error);
 		}
-	} else if (error == ENOMEM) {
-		adapter->no_tx_dma_setup++;
-		return (error);
-	} else if (error != 0) {
-		adapter->no_tx_dma_setup++;
-		m_freem(*m_headp);
-		*m_headp = NULL;
-		return (error);
 	}
 
 	/* Make certain there are enough descriptors */
 	if (nsegs > txr->tx_avail - 2) {
 		txr->no_desc_avail++;
-		error = ENOBUFS;
-		goto xmit_fail;
+		bus_dmamap_unload(txr->txtag, map);
+		return (ENOBUFS);
 	}
 	m_head = *m_headp;
 
 	/*
 	** Set up the appropriate offload context
-	** this becomes the first descriptor of 
-	** a packet.
+	** this will consume the first descriptor
 	*/
-	if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
-		if (ixgbe_tso_setup(txr, m_head, &paylen)) {
-			cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
-			olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
-			olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
-			olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
-			++adapter->tso_tx;
-		} else
-			return (ENXIO);
-	} else if (ixgbe_tx_ctx_setup(txr, m_head))
-		olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
-
-#ifdef IXGBE_IEEE1588
-        /* This is changing soon to an mtag detection */
-        if (we detect this mbuf has a TSTAMP mtag)
-                cmd_type_len |= IXGBE_ADVTXD_MAC_TSTAMP;
-#endif
+	error = ixgbe_tx_ctx_setup(txr, m_head, &cmd_type_len, &olinfo_status);
+	if (__predict_false(error)) {
+		if (error == ENOBUFS)
+			*m_headp = NULL;
+		return (error);
+	}
 
 #ifdef IXGBE_FDIR
 	/* Do the flow director magic */
@@ -1833,10 +1834,6 @@ ixgbe_xmit(struct tx_ring *txr, struct m
 		}
 	}
 #endif
-        /* Record payload length */
-	if (paylen == 0)
-        	olinfo_status |= m_head->m_pkthdr.len <<
-		    IXGBE_ADVTXD_PAYLEN_SHIFT;
 
 	i = txr->next_avail_desc;
 	for (j = 0; j < nsegs; j++) {
@@ -1852,13 +1849,9 @@ ixgbe_xmit(struct tx_ring *txr, struct m
 		txd->read.cmd_type_len = htole32(txr->txd_cmd |
 		    cmd_type_len |seglen);
 		txd->read.olinfo_status = htole32(olinfo_status);
-		last = i; /* descriptor that will get completion IRQ */
 
-		if (++i == adapter->num_tx_desc)
+		if (++i == txr->num_desc)
 			i = 0;
-
-		txbuf->m_head = NULL;
-		txbuf->eop_index = -1;
 	}
 
 	txd->read.cmd_type_len |=
@@ -1867,14 +1860,19 @@ ixgbe_xmit(struct tx_ring *txr, struct m
 	txr->next_avail_desc = i;
 
 	txbuf->m_head = m_head;
-	/* Swap the dma map between the first and last descriptor */
+	/*
+	** Here we swap the map so the last descriptor,
+	** which gets the completion interrupt has the
+	** real map, and the first descriptor gets the
+	** unused map from this descriptor.
+	*/
 	txr->tx_buffers[first].map = txbuf->map;
 	txbuf->map = map;
 	bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
 
-        /* Set the index of the descriptor that will be marked done */
+        /* Set the EOP descriptor that will be marked done */
         txbuf = &txr->tx_buffers[first];
-	txbuf->eop_index = last;
+	txbuf->eop = txd;
 
         bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
@@ -1887,10 +1885,6 @@ ixgbe_xmit(struct tx_ring *txr, struct m
 
 	return (0);
 
-xmit_fail:
-	bus_dmamap_unload(txr->txtag, txbuf->map);
-	return (error);
-
 }
 
 static void
@@ -1940,18 +1934,6 @@ ixgbe_set_multi(struct adapter *adapter)
 	bzero(mta, sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
 	    MAX_NUM_MULTICAST_ADDRESSES);
 
-	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
-	fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
-	if (ifp->if_flags & IFF_PROMISC)
-		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
-	else if (ifp->if_flags & IFF_ALLMULTI) {
-		fctrl |= IXGBE_FCTRL_MPE;
-		fctrl &= ~IXGBE_FCTRL_UPE;
-	} else
-		fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
-	
-	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
-
 #if __FreeBSD_version < 800000
 	IF_ADDR_LOCK(ifp);
 #else
@@ -1960,6 +1942,8 @@ ixgbe_set_multi(struct adapter *adapter)
 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
 		if (ifma->ifma_addr->sa_family != AF_LINK)
 			continue;
+		if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
+			break;
 		bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
 		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
 		    IXGBE_ETH_LENGTH_OF_ADDRESS);
@@ -1971,9 +1955,24 @@ ixgbe_set_multi(struct adapter *adapter)
 	if_maddr_runlock(ifp);
 #endif
 
-	update_ptr = mta;
-	ixgbe_update_mc_addr_list(&adapter->hw,
-	    update_ptr, mcnt, ixgbe_mc_array_itr, TRUE);
+	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
+	fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
+	if (ifp->if_flags & IFF_PROMISC)
+		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
+	else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
+	    ifp->if_flags & IFF_ALLMULTI) {
+		fctrl |= IXGBE_FCTRL_MPE;
+		fctrl &= ~IXGBE_FCTRL_UPE;
+	} else
+		fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
+	
+	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
+
+	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
+		update_ptr = mta;
+		ixgbe_update_mc_addr_list(&adapter->hw,
+		    update_ptr, mcnt, ixgbe_mc_array_itr, TRUE);
+	}
 
 	return;
 }
@@ -2009,13 +2008,11 @@ ixgbe_local_timer(void *arg)
 {
 	struct adapter	*adapter = arg;
 	device_t	dev = adapter->dev;
-	struct ifnet	*ifp = adapter->ifp;
 	struct ix_queue *que = adapter->queues;
 	struct tx_ring	*txr = adapter->tx_rings;
-	int		hung, busy, paused;
+	int		hung = 0, paused = 0;
 
 	mtx_assert(&adapter->core_mtx, MA_OWNED);
-	hung = busy = paused = 0;
 
 	/* Check for pluggable optics */
 	if (adapter->sfp_probe)
@@ -2034,27 +2031,18 @@ ixgbe_local_timer(void *arg)
 
 	/*
 	** Check the TX queues status
-	**      - central locked handling of OACTIVE
 	**      - watchdog only if all queues show hung
 	*/          
 	for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
-		if ((txr->queue_status & IXGBE_QUEUE_HUNG) &&
+		if ((txr->queue_status == IXGBE_QUEUE_HUNG) &&
 		    (paused == 0))
 			++hung;
-		if (txr->queue_status & IXGBE_QUEUE_DEPLETED)
-			++busy;
-		if ((txr->queue_status & IXGBE_QUEUE_IDLE) == 0)
+		else if (txr->queue_status == IXGBE_QUEUE_WORKING)
 			taskqueue_enqueue(que->tq, &que->que_task);
         }
 	/* Only truely watchdog if all queues show hung */
         if (hung == adapter->num_queues)
                 goto watchdog;
-	/* Only turn off the stack flow when ALL are depleted */
-        if (busy == adapter->num_queues)
-                ifp->if_drv_flags |= IFF_DRV_OACTIVE;
-        else if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) &&
-            (busy < adapter->num_queues))
-                ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 
 out:
 	ixgbe_rearm_queues(adapter, adapter->que_mask);
@@ -2083,7 +2071,6 @@ static void
 ixgbe_update_link_status(struct adapter *adapter)
 {
 	struct ifnet	*ifp = adapter->ifp;
-	struct tx_ring *txr = adapter->tx_rings;
 	device_t dev = adapter->dev;
 
 
@@ -2104,9 +2091,6 @@ ixgbe_update_link_status(struct adapter 
 				device_printf(dev,"Link is Down\n");
 			if_link_state_change(ifp, LINK_STATE_DOWN);
 			adapter->link_active = FALSE;
-			for (int i = 0; i < adapter->num_queues;
-			    i++, txr++)
-				txr->queue_status = IXGBE_QUEUE_IDLE;
 		}
 	}
 
@@ -2137,7 +2121,6 @@ ixgbe_stop(void *arg)
 
 	/* Let the stack know...*/
 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
-	ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 
 	ixgbe_reset_hw(hw);
 	hw->adapter_stopped = FALSE;
@@ -2196,7 +2179,7 @@ ixgbe_setup_optics(struct adapter *adapt
 {
 	struct ixgbe_hw *hw = &adapter->hw;
 	int		layer;
-	
+
 	layer = ixgbe_get_supported_physical_layer(hw);
 
 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
@@ -2209,6 +2192,11 @@ ixgbe_setup_optics(struct adapter *adapt
 		return;
 	}
 
+	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
+		adapter->optics = IFM_1000_SX;
+		return;
+	}
+
 	if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_LR |
 	    IXGBE_PHYSICAL_LAYER_10GBASE_LRM)) {
 		adapter->optics = IFM_10G_LR;
@@ -2244,12 +2232,12 @@ ixgbe_setup_optics(struct adapter *adapt
 static int
 ixgbe_allocate_legacy(struct adapter *adapter)
 {
-	device_t dev = adapter->dev;
+	device_t	dev = adapter->dev;
 	struct		ix_queue *que = adapter->queues;
-#if __FreeBSD_version >= 800000
+#ifndef IXGBE_LEGACY_TX
 	struct tx_ring		*txr = adapter->tx_rings;
 #endif
-	int error, rid = 0;
+	int		error, rid = 0;
 
 	/* MSI RID at 1 */
 	if (adapter->msix == 1)
@@ -2268,7 +2256,7 @@ ixgbe_allocate_legacy(struct adapter *ad
 	 * Try allocating a fast interrupt and the associated deferred
 	 * processing contexts.
 	 */
-#if __FreeBSD_version >= 800000
+#ifndef IXGBE_LEGACY_TX
 	TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
 #endif
 	TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
@@ -2350,7 +2338,7 @@ ixgbe_allocate_msix(struct adapter *adap
 		if (adapter->num_queues > 1)
 			bus_bind_intr(dev, que->res, i);
 
-#if __FreeBSD_version >= 800000
+#ifndef IXGBE_LEGACY_TX
 		TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
 #endif
 		TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
@@ -2596,7 +2584,7 @@ ixgbe_setup_interface(device_t dev, stru
 	ifp->if_softc = adapter;
 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
 	ifp->if_ioctl = ixgbe_ioctl;
-#if __FreeBSD_version >= 800000
+#ifndef IXGBE_LEGACY_TX
 	ifp->if_transmit = ixgbe_mq_start;
 	ifp->if_qflush = ixgbe_qflush;
 #else
@@ -2614,16 +2602,14 @@ ixgbe_setup_interface(device_t dev, stru
 	 */
 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
 
-	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM;
+	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO | IFCAP_VLAN_HWCSUM;
 	ifp->if_capabilities |= IFCAP_JUMBO_MTU;
+	ifp->if_capabilities |= IFCAP_LRO;
 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
 			     |  IFCAP_VLAN_HWTSO
 			     |  IFCAP_VLAN_MTU;
 	ifp->if_capenable = ifp->if_capabilities;
 
-	/* Don't enable LRO by default */
-	ifp->if_capabilities |= IFCAP_LRO;
-
 	/*
 	** Don't turn this on by default, if vlans are
 	** created on another pseudo device (eg. lagg)
@@ -2672,7 +2658,7 @@ ixgbe_config_link(struct adapter *adapte
 			taskqueue_enqueue(adapter->tq, &adapter->mod_task);
 	} else {
 		if (hw->mac.ops.check_link)
-			err = ixgbe_check_link(hw, &autoneg,
+			err = ixgbe_check_link(hw, &adapter->link_speed,
 			    &adapter->link_up, FALSE);
 		if (err)
 			goto out;
@@ -2683,8 +2669,8 @@ ixgbe_config_link(struct adapter *adapte
 		if (err)
 			goto out;
 		if (hw->mac.ops.setup_link)
-                	err = hw->mac.ops.setup_link(hw, autoneg,
-			    negotiate, adapter->link_up);
+                	err = hw->mac.ops.setup_link(hw,
+			    autoneg, adapter->link_up);
 	}
 out:
 	return;
@@ -2823,6 +2809,7 @@ ixgbe_allocate_queues(struct adapter *ad
 		txr = &adapter->tx_rings[i];
 		txr->adapter = adapter;
 		txr->me = i;
+		txr->num_desc = adapter->num_tx_desc;
 
 		/* Initialize the TX side lock */
 		snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
@@ -2846,7 +2833,7 @@ ixgbe_allocate_queues(struct adapter *ad
 			error = ENOMEM;
 			goto err_tx_desc;
         	}
-#if __FreeBSD_version >= 800000
+#ifndef IXGBE_LEGACY_TX
 		/* Allocate a buf ring */
 		txr->br = buf_ring_alloc(IXGBE_BR_SIZE, M_DEVBUF,
 		    M_WAITOK, &txr->tx_mtx);
@@ -2869,6 +2856,7 @@ ixgbe_allocate_queues(struct adapter *ad
 		/* Set up some basics */
 		rxr->adapter = adapter;
 		rxr->me = i;
+		rxr->num_desc = adapter->num_rx_desc;
 
 		/* Initialize the RX side lock */
 		snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
@@ -3014,7 +3002,7 @@ ixgbe_setup_transmit_ring(struct tx_ring
 
 	/* Free any existing tx buffers. */
         txbuf = txr->tx_buffers;
-	for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
+	for (i = 0; i < txr->num_desc; i++, txbuf++) {
 		if (txbuf->m_head != NULL) {
 			bus_dmamap_sync(txr->txtag, txbuf->map,
 			    BUS_DMASYNC_POSTWRITE);
@@ -3038,8 +3026,8 @@ ixgbe_setup_transmit_ring(struct tx_ring
 			netmap_load_map(txr->txtag, txbuf->map, NMB(slot + si));
 		}
 #endif /* DEV_NETMAP */
-		/* Clear the EOP index */
-		txbuf->eop_index = -1;
+		/* Clear the EOP descriptor pointer */
+		txbuf->eop = NULL;
         }
 
 #ifdef IXGBE_FDIR
@@ -3093,7 +3081,7 @@ ixgbe_initialize_transmit_units(struct a
 		       (tdba & 0x00000000ffffffffULL));
 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32));
 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),
-		    adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
+		    adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
 
 		/* Setup the HW Tx Head and Tail descriptor pointers */
 		IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
@@ -3103,6 +3091,9 @@ ixgbe_initialize_transmit_units(struct a
 		txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
 		txr->queue_status = IXGBE_QUEUE_IDLE;
 
+		/* Set the processing limit */
+		txr->process_limit = ixgbe_tx_process_limit;
+
 		/* Disable Head Writeback */
 		switch (hw->mac.type) {
 		case ixgbe_mac_82598EB:
@@ -3204,7 +3195,7 @@ ixgbe_free_transmit_buffers(struct tx_ri
 			tx_buffer->map = NULL;
 		}
 	}
-#if __FreeBSD_version >= 800000
+#ifdef IXGBE_LEGACY_TX
 	if (txr->br != NULL)
 		buf_ring_free(txr->br, M_DEVBUF);
 #endif
@@ -3221,43 +3212,49 @@ ixgbe_free_transmit_buffers(struct tx_ri
 
 /*********************************************************************
  *
- *  Advanced Context Descriptor setup for VLAN or CSUM
+ *  Advanced Context Descriptor setup for VLAN, CSUM or TSO
  *
  **********************************************************************/
 
-static bool
-ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
+static int
+ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp,
+    u32 *cmd_type_len, u32 *olinfo_status)
 {
-	struct adapter *adapter = txr->adapter;
 	struct ixgbe_adv_tx_context_desc *TXD;
-	struct ixgbe_tx_buf        *tx_buffer;
-	u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
 	struct ether_vlan_header *eh;
 	struct ip *ip;
 	struct ip6_hdr *ip6;
-	int  ehdrlen, ip_hlen = 0;
+	u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
+	int	ehdrlen, ip_hlen = 0;
 	u16	etype;
 	u8	ipproto = 0;
-	bool	offload = TRUE;
-	int ctxd = txr->next_avail_desc;
-	u16 vtag = 0;
-
+	int	offload = TRUE;
+	int	ctxd = txr->next_avail_desc;
+	u16	vtag = 0;
+
+	/* First check if TSO is to be used */
+	if (mp->m_pkthdr.csum_flags & CSUM_TSO)
+		return (ixgbe_tso_setup(txr, mp, cmd_type_len, olinfo_status));
 
 	if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
 		offload = FALSE;
 
-	tx_buffer = &txr->tx_buffers[ctxd];
+	/* Indicate the whole packet as payload when not doing TSO */
+       	*olinfo_status |= mp->m_pkthdr.len << IXGBE_ADVTXD_PAYLEN_SHIFT;
+
+	/* Now ready a context descriptor */
 	TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
 
 	/*
 	** In advanced descriptors the vlan tag must 
-	** be placed into the descriptor itself.
+	** be placed into the context descriptor. Hence
+	** we need to make one even if not doing offloads.
 	*/
 	if (mp->m_flags & M_VLANTAG) {
 		vtag = htole16(mp->m_pkthdr.ether_vtag);
 		vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
-	} else if (offload == FALSE)
-		return FALSE;
+	} else if (offload == FALSE) /* ... no offload to do */
+		return (0);
 
 	/*
 	 * Determine where frame payload starts.
@@ -3286,6 +3283,7 @@ ixgbe_tx_ctx_setup(struct tx_ring *txr, 
 		case ETHERTYPE_IPV6:
 			ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
 			ip_hlen = sizeof(struct ip6_hdr);
+			/* XXX-BZ this will go badly in case of ext hdrs. */
 			ipproto = ip6->ip6_nxt;
 			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201303142139.r2ELddH3029272>