From owner-svn-src-stable-8@freebsd.org Fri Mar 24 03:30:56 2017 Return-Path: Delivered-To: svn-src-stable-8@mailman.ysv.freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2001:1900:2254:206a::19:1]) by mailman.ysv.freebsd.org (Postfix) with ESMTP id 66F95D17E81; Fri, 24 Mar 2017 03:30:56 +0000 (UTC) (envelope-from davidcs@FreeBSD.org) Received: from repo.freebsd.org (repo.freebsd.org [IPv6:2610:1c1:1:6068::e6a:0]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (Client did not present a certificate) by mx1.freebsd.org (Postfix) with ESMTPS id 2830E6ED; Fri, 24 Mar 2017 03:30:56 +0000 (UTC) (envelope-from davidcs@FreeBSD.org) Received: from repo.freebsd.org ([127.0.1.37]) by repo.freebsd.org (8.15.2/8.15.2) with ESMTP id v2O3UtWu068964; Fri, 24 Mar 2017 03:30:55 GMT (envelope-from davidcs@FreeBSD.org) Received: (from davidcs@localhost) by repo.freebsd.org (8.15.2/8.15.2/Submit) id v2O3UsDf068959; Fri, 24 Mar 2017 03:30:54 GMT (envelope-from davidcs@FreeBSD.org) Message-Id: <201703240330.v2O3UsDf068959@repo.freebsd.org> X-Authentication-Warning: repo.freebsd.org: davidcs set sender to davidcs@FreeBSD.org using -f From: David C Somayajulu Date: Fri, 24 Mar 2017 03:30:54 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-8@freebsd.org Subject: svn commit: r315884 - stable/8/sys/dev/bxe X-SVN-Group: stable-8 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-BeenThere: svn-src-stable-8@freebsd.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: SVN commit messages for only the 8-stable src tree List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Fri, 24 Mar 2017 03:30:56 -0000 Author: davidcs Date: Fri Mar 24 03:30:54 2017 New Revision: 315884 URL: https://svnweb.freebsd.org/changeset/base/315884 Log: MFC r314365 1. state checks in bxe_tx_mq_start_locked() and bxe_tx_mq_start() to sync threads during interface down or detach. 2. add sysctl to set pause frame parameters 3. increase max segs for TSO packets to BXE_TSO_MAX_SEGMENTS (32) 4. add debug messages for PHY 5. HW LRO support restricted to FreeBSD versions 8.x and above. Submitted by: Vaishali.Kulkarni@cavium.com Modified: stable/8/sys/dev/bxe/bxe.c stable/8/sys/dev/bxe/bxe.h stable/8/sys/dev/bxe/bxe_elink.c stable/8/sys/dev/bxe/bxe_stats.c stable/8/sys/dev/bxe/bxe_stats.h Directory Properties: stable/8/ (props changed) stable/8/sys/ (props changed) stable/8/sys/dev/ (props changed) Modified: stable/8/sys/dev/bxe/bxe.c ============================================================================== --- stable/8/sys/dev/bxe/bxe.c Fri Mar 24 03:17:59 2017 (r315883) +++ stable/8/sys/dev/bxe/bxe.c Fri Mar 24 03:30:54 2017 (r315884) @@ -27,7 +27,7 @@ #include __FBSDID("$FreeBSD$"); -#define BXE_DRIVER_VERSION "1.78.81" +#define BXE_DRIVER_VERSION "1.78.90" #include "bxe.h" #include "ecore_sp.h" @@ -500,7 +500,21 @@ static const struct { { STATS_OFFSET32(mbuf_alloc_tpa), 4, STATS_FLAGS_FUNC, "mbuf_alloc_tpa"}, { STATS_OFFSET32(tx_queue_full_return), - 4, STATS_FLAGS_FUNC, "tx_queue_full_return"} + 4, STATS_FLAGS_FUNC, "tx_queue_full_return"}, + { STATS_OFFSET32(bxe_tx_mq_sc_state_failures), + 4, STATS_FLAGS_FUNC, "bxe_tx_mq_sc_state_failures"}, + { STATS_OFFSET32(tx_request_link_down_failures), + 4, STATS_FLAGS_FUNC, "tx_request_link_down_failures"}, + { STATS_OFFSET32(bd_avail_too_less_failures), + 4, STATS_FLAGS_FUNC, "bd_avail_too_less_failures"}, + { STATS_OFFSET32(tx_mq_not_empty), + 4, STATS_FLAGS_FUNC, "tx_mq_not_empty"}, + { STATS_OFFSET32(nsegs_path1_errors), + 4, STATS_FLAGS_FUNC, "nsegs_path1_errors"}, + { STATS_OFFSET32(nsegs_path2_errors), + 4, STATS_FLAGS_FUNC, "nsegs_path2_errors"} + + }; static const struct { @@ -613,7 +627,19 @@ static const struct { { Q_STATS_OFFSET32(mbuf_alloc_tpa), 4, "mbuf_alloc_tpa"}, { Q_STATS_OFFSET32(tx_queue_full_return), - 4, "tx_queue_full_return"} + 4, "tx_queue_full_return"}, + { Q_STATS_OFFSET32(bxe_tx_mq_sc_state_failures), + 4, "bxe_tx_mq_sc_state_failures"}, + { Q_STATS_OFFSET32(tx_request_link_down_failures), + 4, "tx_request_link_down_failures"}, + { Q_STATS_OFFSET32(bd_avail_too_less_failures), + 4, "bd_avail_too_less_failures"}, + { Q_STATS_OFFSET32(tx_mq_not_empty), + 4, "tx_mq_not_empty"}, + { Q_STATS_OFFSET32(nsegs_path1_errors), + 4, "nsegs_path1_errors"}, + { Q_STATS_OFFSET32(nsegs_path2_errors), + 4, "nsegs_path2_errors"} }; #define BXE_NUM_ETH_STATS ARRAY_SIZE(bxe_eth_stats_arr) @@ -683,6 +709,7 @@ static void bxe_handle_fp_tq(void *conte static int bxe_add_cdev(struct bxe_softc *sc); static void bxe_del_cdev(struct bxe_softc *sc); +int bxe_grc_dump(struct bxe_softc *sc); static int bxe_alloc_buf_rings(struct bxe_softc *sc); static void bxe_free_buf_rings(struct bxe_softc *sc); @@ -5224,12 +5251,24 @@ bxe_tx_encap(struct bxe_fastpath *fp, st fp->eth_q_stats.tx_dma_mapping_failure++; /* No sense in trying to defrag/copy chain, drop it. :( */ rc = error; - } - else { - /* if the chain is still too long then drop it */ - if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) { - bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); - rc = ENODEV; + } else { + /* if the chain is still too long then drop it */ + if(m0->m_pkthdr.csum_flags & CSUM_TSO) { + /* + * in case TSO is enabled nsegs should be checked against + * BXE_TSO_MAX_SEGMENTS + */ + if (__predict_false(nsegs > BXE_TSO_MAX_SEGMENTS)) { + bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); + fp->eth_q_stats.nsegs_path1_errors++; + rc = ENODEV; + } + } else { + if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) { + bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); + fp->eth_q_stats.nsegs_path2_errors++; + rc = ENODEV; + } } } } @@ -5629,6 +5668,11 @@ bxe_tx_mq_start_locked(struct bxe_softc BXE_FP_TX_LOCK_ASSERT(fp); + if (sc->state != BXE_STATE_OPEN) { + fp->eth_q_stats.bxe_tx_mq_sc_state_failures++; + return ENETDOWN; + } + if (!tx_br) { BLOGE(sc, "Multiqueue TX and no buf_ring!\n"); return (EINVAL); @@ -5739,6 +5783,11 @@ bxe_tx_mq_start(struct ifnet *ifp, fp = &sc->fp[fp_index]; + if (sc->state != BXE_STATE_OPEN) { + fp->eth_q_stats.bxe_tx_mq_sc_state_failures++; + return ENETDOWN; + } + if (BXE_FP_TX_TRYLOCK(fp)) { rc = bxe_tx_mq_start_locked(sc, ifp, fp, m); BXE_FP_TX_UNLOCK(fp); @@ -5759,7 +5808,7 @@ bxe_mq_flush(struct ifnet *ifp) for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; - if (fp->state != BXE_FP_STATE_OPEN) { + if (fp->state != BXE_FP_STATE_IRQ) { BLOGD(sc, DBG_LOAD, "Not clearing fp[%02d] buf_ring (state=%d)\n", fp->index, fp->state); continue; @@ -5976,6 +6025,7 @@ bxe_free_mem(struct bxe_softc *sc) static int bxe_alloc_mem(struct bxe_softc *sc) { + int context_size; int allocated; int i; @@ -6968,7 +7018,7 @@ bxe_link_attn(struct bxe_softc *sc) /* Make sure that we are synced with the current statistics */ bxe_stats_handle(sc, STATS_EVENT_STOP); - + BLOGI(sc, "link_vars phy_flags : %x\n", sc->link_vars.phy_flags); elink_link_update(&sc->link_params, &sc->link_vars); if (sc->link_vars.link_up) { @@ -11160,7 +11210,9 @@ bxe_get_q_flags(struct bxe_softc *sc, if (sc->ifnet->if_capenable & IFCAP_LRO) { bxe_set_bit(ECORE_Q_FLG_TPA, &flags); +#if __FreeBSD_version >= 800000 bxe_set_bit(ECORE_Q_FLG_TPA_IPV6, &flags); +#endif } if (leading) { @@ -11597,13 +11649,13 @@ static void bxe_calc_fc_adv(struct bxe_softc *sc) { uint8_t cfg_idx = bxe_get_link_cfg_idx(sc); + + + sc->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | + ADVERTISED_Pause); + switch (sc->link_vars.ieee_fc & MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) { - case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE: - default: - sc->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | - ADVERTISED_Pause); - break; case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH: sc->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause | @@ -11613,6 +11665,10 @@ bxe_calc_fc_adv(struct bxe_softc *sc) case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC: sc->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause; break; + + default: + break; + } } @@ -11694,15 +11750,17 @@ bxe_link_report_locked(struct bxe_softc return; } + ELINK_DEBUG_P2(sc, "Change in link status : cur_data = %x, last_reported_link = %x\n", + cur_data.link_report_flags, sc->last_reported_link.link_report_flags); sc->link_cnt++; + ELINK_DEBUG_P1(sc, "link status change count = %x\n", sc->link_cnt); /* report new link params and remember the state for the next time */ memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data)); if (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN, &cur_data.link_report_flags)) { if_link_state_change(sc->ifnet, LINK_STATE_DOWN); - BLOGI(sc, "NIC Link is Down\n"); } else { const char *duplex; const char *flow; @@ -11710,8 +11768,10 @@ bxe_link_report_locked(struct bxe_softc if (bxe_test_and_clear_bit(BXE_LINK_REPORT_FULL_DUPLEX, &cur_data.link_report_flags)) { duplex = "full"; + ELINK_DEBUG_P0(sc, "link set to full duplex\n"); } else { duplex = "half"; + ELINK_DEBUG_P0(sc, "link set to half duplex\n"); } /* @@ -12678,6 +12738,7 @@ bxe_init_ifnet(struct bxe_softc *sc) ifmedia_set(&sc->ifmedia, (IFM_ETHER | IFM_AUTO)); sc->ifmedia.ifm_media = sc->ifmedia.ifm_cur->ifm_media; /* XXX ? */ + BLOGI(sc, "IFMEDIA flags : %x\n", sc->ifmedia.ifm_media); /* allocate the ifnet structure */ if ((ifp = if_alloc(IFT_ETHER)) == NULL) { @@ -14028,6 +14089,8 @@ bxe_link_settings_supported(struct bxe_s BLOGD(sc, DBG_LOAD, "PHY supported 0=0x%08x 1=0x%08x\n", sc->port.supported[0], sc->port.supported[1]); + ELINK_DEBUG_P2(sc, "PHY supported 0=0x%08x 1=0x%08x\n", + sc->port.supported[0], sc->port.supported[1]); } static void @@ -14092,6 +14155,8 @@ bxe_link_settings_requested(struct bxe_s sc->link_params.req_duplex[idx] = DUPLEX_HALF; sc->port.advertising[idx] |= (ADVERTISED_10baseT_Half | ADVERTISED_TP); + ELINK_DEBUG_P1(sc, "driver requesting DUPLEX_HALF req_duplex = %x!\n", + sc->link_params.req_duplex[idx]); } else { BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " "speed_cap_mask=0x%08x\n", @@ -14196,6 +14261,11 @@ bxe_link_settings_requested(struct bxe_s sc->link_params.req_duplex[idx], sc->link_params.req_flow_ctrl[idx], sc->port.advertising[idx]); + ELINK_DEBUG_P3(sc, "req_line_speed=%d req_duplex=%d " + "advertising=0x%x\n", + sc->link_params.req_line_speed[idx], + sc->link_params.req_duplex[idx], + sc->port.advertising[idx]); } } @@ -14208,11 +14278,12 @@ bxe_get_phy_info(struct bxe_softc *sc) /* shmem data already read in bxe_get_shmem_info() */ - BLOGD(sc, DBG_LOAD, "lane_config=0x%08x speed_cap_mask0=0x%08x " + ELINK_DEBUG_P3(sc, "lane_config=0x%08x speed_cap_mask0=0x%08x " "link_config0=0x%08x\n", sc->link_params.lane_config, sc->link_params.speed_cap_mask[0], sc->port.link_config[0]); + bxe_link_settings_supported(sc, sc->link_params.switch_cfg); bxe_link_settings_requested(sc); @@ -14243,6 +14314,7 @@ bxe_get_phy_info(struct bxe_softc *sc) /* get the media type */ bxe_media_detect(sc); + ELINK_DEBUG_P1(sc, "detected media type\n", sc->media); } static void @@ -15598,6 +15670,86 @@ bxe_sysctl_eth_q_stat(SYSCTL_HANDLER_ARG return (sysctl_handle_64(oidp, &value, 0, req)); } +static void bxe_force_link_reset(struct bxe_softc *sc) +{ + + bxe_acquire_phy_lock(sc); + elink_link_reset(&sc->link_params, &sc->link_vars, 1); + bxe_release_phy_lock(sc); +} + +static int +bxe_sysctl_pauseparam(SYSCTL_HANDLER_ARGS) +{ + struct bxe_softc *sc = (struct bxe_softc *)arg1;; + uint32_t cfg_idx = bxe_get_link_cfg_idx(sc); + int rc = 0; + int error; + int result; + + + error = sysctl_handle_int(oidp, &sc->bxe_pause_param, 0, req); + + if (error || !req->newptr) { + return (error); + } + if ((sc->bxe_pause_param < 0) || (sc->bxe_pause_param > 8)) { + BLOGW(sc, "invalid pause param (%d) - use intergers between 1 & 8\n",sc->bxe_pause_param); + sc->bxe_pause_param = 8; + } + + result = (sc->bxe_pause_param << PORT_FEATURE_FLOW_CONTROL_SHIFT); + + + if((result & 0x400) && !(sc->port.supported[cfg_idx] & ELINK_SUPPORTED_Autoneg)) { + BLOGW(sc, "Does not support Autoneg pause_param %d\n", sc->bxe_pause_param); + return -EINVAL; + } + + if(IS_MF(sc)) + return 0; + sc->link_params.req_flow_ctrl[cfg_idx] = ELINK_FLOW_CTRL_AUTO; + if(result & ELINK_FLOW_CTRL_RX) + sc->link_params.req_flow_ctrl[cfg_idx] |= ELINK_FLOW_CTRL_RX; + + if(result & ELINK_FLOW_CTRL_TX) + sc->link_params.req_flow_ctrl[cfg_idx] |= ELINK_FLOW_CTRL_TX; + if(sc->link_params.req_flow_ctrl[cfg_idx] == ELINK_FLOW_CTRL_AUTO) + sc->link_params.req_flow_ctrl[cfg_idx] = ELINK_FLOW_CTRL_NONE; + + if(result & 0x400) { + if (sc->link_params.req_line_speed[cfg_idx] == ELINK_SPEED_AUTO_NEG) { + sc->link_params.req_flow_ctrl[cfg_idx] = + ELINK_FLOW_CTRL_AUTO; + } + sc->link_params.req_fc_auto_adv = 0; + if (result & ELINK_FLOW_CTRL_RX) + sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_RX; + + if (result & ELINK_FLOW_CTRL_TX) + sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_TX; + if (!sc->link_params.req_fc_auto_adv) + sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_NONE; + } + if (IS_PF(sc)) { + if (sc->link_vars.link_up) { + bxe_stats_handle(sc, STATS_EVENT_STOP); + } + if (sc->ifnet->if_drv_flags & IFF_DRV_RUNNING) { + bxe_force_link_reset(sc); + bxe_acquire_phy_lock(sc); + + rc = elink_phy_init(&sc->link_params, &sc->link_vars); + + bxe_release_phy_lock(sc); + + bxe_calc_fc_adv(sc); + } + } + return rc; +} + + static void bxe_add_sysctls(struct bxe_softc *sc) { @@ -15698,6 +15850,12 @@ bxe_add_sysctls(struct bxe_softc *sc) CTLFLAG_RW, &sc->rx_budget, 0, "rx processing budget"); + SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_param", + CTLTYPE_UINT | CTLFLAG_RW, sc, 0, + bxe_sysctl_pauseparam, "IU", + "need pause frames- DEF:0/TX:1/RX:2/BOTH:3/AUTO:4/AUTOTX:5/AUTORX:6/AUTORXTX:7/NONE:8"); + + SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "state", CTLTYPE_UINT | CTLFLAG_RW, sc, 0, bxe_sysctl_state, "IU", "dump driver state"); @@ -18032,6 +18190,7 @@ bxe_reset_port(struct bxe_softc *sc) int port = SC_PORT(sc); uint32_t val; + ELINK_DEBUG_P0(sc, "bxe_reset_port called\n"); /* reset physical Link */ bxe_link_reset(sc); @@ -18534,8 +18693,6 @@ bxe_grc_dump(struct bxe_softc *sc) uint32_t reg_val; uint32_t reg_addr; uint32_t cmd_offset; - int context_size; - int allocated; struct ecore_ilt *ilt = SC_ILT(sc); struct bxe_fastpath *fp; struct ilt_client_info *ilt_cli; @@ -18630,67 +18787,80 @@ bxe_grc_dump(struct bxe_softc *sc) bxe_pretend_func(sc, SC_ABS_FUNC(sc)); - context_size = (sizeof(union cdu_context) * BXE_L2_CID_COUNT(sc)); - for (i = 0, allocated = 0; allocated < context_size; i++) { - - BLOGI(sc, "cdu_context i %d paddr %#jx vaddr %p size 0x%zx\n", i, - (uintmax_t)sc->context[i].vcxt_dma.paddr, - sc->context[i].vcxt_dma.vaddr, - sc->context[i].size); - allocated += sc->context[i].size; - } - BLOGI(sc, "fw stats start_paddr %#jx end_paddr %#jx vaddr %p size 0x%x\n", - (uintmax_t)sc->fw_stats_req_mapping, - (uintmax_t)sc->fw_stats_data_mapping, - sc->fw_stats_req, (sc->fw_stats_req_size + sc->fw_stats_data_size)); - BLOGI(sc, "def_status_block paddr %p vaddr %p size 0x%zx\n", - (void *)sc->def_sb_dma.paddr, sc->def_sb, - sizeof(struct host_sp_status_block)); - BLOGI(sc, "event_queue paddr %#jx vaddr %p size 0x%x\n", - (uintmax_t)sc->eq_dma.paddr, sc->eq_dma.vaddr, BCM_PAGE_SIZE); - BLOGI(sc, "slow path paddr %#jx vaddr %p size 0x%zx\n", - (uintmax_t)sc->sp_dma.paddr, sc->sp_dma.vaddr, - sizeof(struct bxe_slowpath)); - BLOGI(sc, "slow path queue paddr %#jx vaddr %p size 0x%x\n", - (uintmax_t)sc->spq_dma.paddr, sc->spq_dma.vaddr, BCM_PAGE_SIZE); - BLOGI(sc, "fw_buf paddr %#jx vaddr %p size 0x%x\n", - (uintmax_t)sc->gz_buf_dma.paddr, sc->gz_buf_dma.vaddr, - FW_BUF_SIZE); - for (i = 0; i < sc->num_queues; i++) { - fp = &sc->fp[i]; - BLOGI(sc, "FP status block fp %d paddr %#jx vaddr %p size 0x%zx\n", i, - (uintmax_t)fp->sb_dma.paddr, fp->sb_dma.vaddr, - sizeof(union bxe_host_hc_status_block)); - BLOGI(sc, "TX BD CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i, - (uintmax_t)fp->tx_dma.paddr, fp->tx_dma.vaddr, - (BCM_PAGE_SIZE * TX_BD_NUM_PAGES)); - BLOGI(sc, "RX BD CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i, - (uintmax_t)fp->rx_dma.paddr, fp->rx_dma.vaddr, - (BCM_PAGE_SIZE * RX_BD_NUM_PAGES)); - BLOGI(sc, "RX RCQ CHAIN fp %d paddr %#jx vaddr %p size 0x%zx\n", i, - (uintmax_t)fp->rcq_dma.paddr, fp->rcq_dma.vaddr, - (BCM_PAGE_SIZE * RCQ_NUM_PAGES)); - BLOGI(sc, "RX SGE CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i, - (uintmax_t)fp->rx_sge_dma.paddr, fp->rx_sge_dma.vaddr, - (BCM_PAGE_SIZE * RX_SGE_NUM_PAGES)); - } - - ilt_cli = &ilt->clients[1]; - for (i = ilt_cli->start; i <= ilt_cli->end; i++) { - BLOGI(sc, "ECORE_ILT paddr %#jx vaddr %p size 0x%x\n", - (uintmax_t)(((struct bxe_dma *)((&ilt->lines[i])->page))->paddr), - ((struct bxe_dma *)((&ilt->lines[i])->page))->vaddr, BCM_PAGE_SIZE); - } - - - cmd_offset = DMAE_REG_CMD_MEM; - for (i = 0; i < 224; i++) { - reg_addr = (cmd_offset +(i * 4)); - reg_val = REG_RD(sc, reg_addr); - BLOGI(sc, "DMAE_REG_CMD_MEM i=%d reg_addr 0x%x reg_val 0x%08x\n",i, - reg_addr, reg_val); - } + if(sc->state == BXE_STATE_OPEN) { + if(sc->fw_stats_req != NULL) { + BLOGI(sc, "fw stats start_paddr %#jx end_paddr %#jx vaddr %p size 0x%x\n", + (uintmax_t)sc->fw_stats_req_mapping, + (uintmax_t)sc->fw_stats_data_mapping, + sc->fw_stats_req, (sc->fw_stats_req_size + sc->fw_stats_data_size)); + } + if(sc->def_sb != NULL) { + BLOGI(sc, "def_status_block paddr %p vaddr %p size 0x%zx\n", + (void *)sc->def_sb_dma.paddr, sc->def_sb, + sizeof(struct host_sp_status_block)); + } + if(sc->eq_dma.vaddr != NULL) { + BLOGI(sc, "event_queue paddr %#jx vaddr %p size 0x%x\n", + (uintmax_t)sc->eq_dma.paddr, sc->eq_dma.vaddr, BCM_PAGE_SIZE); + } + if(sc->sp_dma.vaddr != NULL) { + BLOGI(sc, "slow path paddr %#jx vaddr %p size 0x%zx\n", + (uintmax_t)sc->sp_dma.paddr, sc->sp_dma.vaddr, + sizeof(struct bxe_slowpath)); + } + if(sc->spq_dma.vaddr != NULL) { + BLOGI(sc, "slow path queue paddr %#jx vaddr %p size 0x%x\n", + (uintmax_t)sc->spq_dma.paddr, sc->spq_dma.vaddr, BCM_PAGE_SIZE); + } + if(sc->gz_buf_dma.vaddr != NULL) { + BLOGI(sc, "fw_buf paddr %#jx vaddr %p size 0x%x\n", + (uintmax_t)sc->gz_buf_dma.paddr, sc->gz_buf_dma.vaddr, + FW_BUF_SIZE); + } + for (i = 0; i < sc->num_queues; i++) { + fp = &sc->fp[i]; + if(fp->sb_dma.vaddr != NULL && fp->tx_dma.vaddr != NULL && + fp->rx_dma.vaddr != NULL && fp->rcq_dma.vaddr != NULL && + fp->rx_sge_dma.vaddr != NULL) { + + BLOGI(sc, "FP status block fp %d paddr %#jx vaddr %p size 0x%zx\n", i, + (uintmax_t)fp->sb_dma.paddr, fp->sb_dma.vaddr, + sizeof(union bxe_host_hc_status_block)); + BLOGI(sc, "TX BD CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i, + (uintmax_t)fp->tx_dma.paddr, fp->tx_dma.vaddr, + (BCM_PAGE_SIZE * TX_BD_NUM_PAGES)); + BLOGI(sc, "RX BD CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i, + (uintmax_t)fp->rx_dma.paddr, fp->rx_dma.vaddr, + (BCM_PAGE_SIZE * RX_BD_NUM_PAGES)); + BLOGI(sc, "RX RCQ CHAIN fp %d paddr %#jx vaddr %p size 0x%zx\n", i, + (uintmax_t)fp->rcq_dma.paddr, fp->rcq_dma.vaddr, + (BCM_PAGE_SIZE * RCQ_NUM_PAGES)); + BLOGI(sc, "RX SGE CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i, + (uintmax_t)fp->rx_sge_dma.paddr, fp->rx_sge_dma.vaddr, + (BCM_PAGE_SIZE * RX_SGE_NUM_PAGES)); + } + } + if(ilt != NULL ) { + ilt_cli = &ilt->clients[1]; + if(ilt->lines != NULL) { + for (i = ilt_cli->start; i <= ilt_cli->end; i++) { + BLOGI(sc, "ECORE_ILT paddr %#jx vaddr %p size 0x%x\n", + (uintmax_t)(((struct bxe_dma *)((&ilt->lines[i])->page))->paddr), + ((struct bxe_dma *)((&ilt->lines[i])->page))->vaddr, BCM_PAGE_SIZE); + } + } + } + + + cmd_offset = DMAE_REG_CMD_MEM; + for (i = 0; i < 224; i++) { + reg_addr = (cmd_offset +(i * 4)); + reg_val = REG_RD(sc, reg_addr); + BLOGI(sc, "DMAE_REG_CMD_MEM i=%d reg_addr 0x%x reg_val 0x%08x\n",i, + reg_addr, reg_val); + } + } BLOGI(sc, "Collection of grcdump done\n"); sc->grcdump_done = 1; Modified: stable/8/sys/dev/bxe/bxe.h ============================================================================== --- stable/8/sys/dev/bxe/bxe.h Fri Mar 24 03:17:59 2017 (r315883) +++ stable/8/sys/dev/bxe/bxe.h Fri Mar 24 03:30:54 2017 (r315884) @@ -1332,7 +1332,7 @@ struct bxe_softc { struct ifmedia ifmedia; /* network interface media structure */ int media; - int state; /* device state */ + volatile int state; /* device state */ #define BXE_STATE_CLOSED 0x0000 #define BXE_STATE_OPENING_WAITING_LOAD 0x1000 #define BXE_STATE_OPENING_WAITING_PORT 0x2000 @@ -1791,7 +1791,7 @@ struct bxe_softc { unsigned int trigger_grcdump; unsigned int grcdump_done; unsigned int grcdump_started; - + int bxe_pause_param; void *eeprom; }; /* struct bxe_softc */ @@ -2297,7 +2297,6 @@ void bxe_dump_mem(struct bxe_softc *sc, uint8_t *mem, uint32_t len); void bxe_dump_mbuf_data(struct bxe_softc *sc, char *pTag, struct mbuf *m, uint8_t contents); -extern int bxe_grc_dump(struct bxe_softc *sc); #if __FreeBSD_version >= 800000 #if __FreeBSD_version >= 1000000 Modified: stable/8/sys/dev/bxe/bxe_elink.c ============================================================================== --- stable/8/sys/dev/bxe/bxe_elink.c Fri Mar 24 03:17:59 2017 (r315883) +++ stable/8/sys/dev/bxe/bxe_elink.c Fri Mar 24 03:30:54 2017 (r315884) @@ -4333,6 +4333,7 @@ static void elink_pause_resolve(struct e * although we advertised both, need to enable * RX only. */ + if (params->req_fc_auto_adv == ELINK_FLOW_CTRL_BOTH) { ELINK_DEBUG_P0(sc, "Flow Control: RX & TX\n"); vars->flow_ctrl = ELINK_FLOW_CTRL_BOTH; @@ -5538,6 +5539,7 @@ static void elink_sync_link(struct elink vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP); if (vars->link_up) { ELINK_DEBUG_P0(sc, "phy link up\n"); + ELINK_DEBUG_P1(sc, "link status = %x\n", vars->link_status); vars->phy_link_up = 1; vars->duplex = DUPLEX_FULL; @@ -6443,6 +6445,8 @@ static elink_status_t elink_get_link_spe vars->flow_ctrl = ELINK_FLOW_CTRL_NONE; vars->mac_type = ELINK_MAC_TYPE_NONE; } + ELINK_DEBUG_P2(sc, " in elink_get_link_speed_duplex vars->link_status = %x, vars->duplex = %x\n", + vars->link_status, vars->duplex); ELINK_DEBUG_P2(sc, " phy_link_up %x line_speed %d\n", vars->phy_link_up, vars->line_speed); return ELINK_STATUS_OK; @@ -6462,8 +6466,16 @@ static elink_status_t elink_link_setting MDIO_REG_BANK_GP_STATUS, MDIO_GP_STATUS_TOP_AN_STATUS1, &gp_status); - if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS) + if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS) { duplex = DUPLEX_FULL; + ELINK_DEBUG_P1(sc, "duplex status read from phy is = %x\n", + duplex); + } else { + ELINK_DEBUG_P1(sc, "phy status does not allow interface to be FULL_DUPLEX : %x\n", + gp_status); + } + + if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) link_up = 1; speed_mask = gp_status & ELINK_GP_STATUS_SPEED_MASK; @@ -6539,6 +6551,8 @@ static elink_status_t elink_warpcore_rea elink_cl45_read(sc, phy, MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL5_LINK_STATUS, &link_up); link_up &= 0x1; + ELINK_DEBUG_P1(sc, "params->loopback_mode link_up read = %x\n", + link_up); } else if ((phy->req_line_speed > ELINK_SPEED_10000) && (phy->supported & ELINK_SUPPORTED_20000baseMLD2_Full)) { uint16_t temp_link_up; @@ -6568,6 +6582,8 @@ static elink_status_t elink_warpcore_rea elink_cl45_read(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_STATUS, &an_link); link_up |= (an_link & (1<<2)); + ELINK_DEBUG_P2(sc,"an_link = %x, link_up = %x\n", an_link, + link_up); } if (link_up && ELINK_SINGLE_MEDIA_DIRECT(params)) { uint16_t pd, gp_status4; @@ -6587,12 +6603,17 @@ static elink_status_t elink_warpcore_rea if (pd & (1<<15)) vars->link_status |= LINK_STATUS_PARALLEL_DETECTION_USED; + ELINK_DEBUG_P2(sc, "pd = %x, link_status = %x\n", + pd, vars->link_status); } elink_ext_phy_resolve_fc(phy, params, vars); vars->duplex = duplex; + ELINK_DEBUG_P3(sc, " ELINK_SINGLE_MEDIA_DIRECT duplex %x flow_ctrl 0x%x link_status 0x%x\n", + vars->duplex, vars->flow_ctrl, vars->link_status); } } - + ELINK_DEBUG_P3(sc, "duplex %x flow_ctrl 0x%x link_status 0x%x\n", + vars->duplex, vars->flow_ctrl, vars->link_status); if ((vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) && ELINK_SINGLE_MEDIA_DIRECT(params)) { uint16_t val; @@ -6607,7 +6628,8 @@ static elink_status_t elink_warpcore_rea MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KR)) vars->link_status |= LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE; - + ELINK_DEBUG_P2(sc, "val = %x, link_status = %x\n", + val, vars->link_status); elink_cl45_read(sc, phy, MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL3_LP_UP1, &val); @@ -6617,6 +6639,8 @@ static elink_status_t elink_warpcore_rea if (val & (MDIO_OVER_1G_UP1_10G | MDIO_OVER_1G_UP1_10GH)) vars->link_status |= LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE; + ELINK_DEBUG_P2(sc, "val = %x, link_status = %x\n", + val, vars->link_status); } @@ -7808,6 +7832,19 @@ elink_status_t elink_link_update(struct ELINK_DEBUG_P1(sc, "Active external phy selected: %x\n", active_external_phy); } + + ELINK_DEBUG_P3(sc, "vars : phy_flags = %x, mac_type = %x, phy_link_up = %x\n", + vars->phy_flags, vars->mac_type, vars->phy_link_up); + ELINK_DEBUG_P3(sc, "vars : link_up = %x, line_speed = %x, duplex = %x\n", + vars->link_up, vars->line_speed, vars->duplex); + ELINK_DEBUG_P3(sc, "vars : flow_ctrl = %x, ieee_fc = %x, link_status = %x\n", + vars->flow_ctrl, vars->ieee_fc, vars->link_status); + ELINK_DEBUG_P3(sc, "vars : eee_status = %x, fault_detected = %x, check_kr2_recovery_cnt = %x\n", + vars->eee_status, vars->fault_detected, vars->check_kr2_recovery_cnt); + ELINK_DEBUG_P3(sc, "vars : periodic_flags = %x, aeu_int_mask = %x, rx_tx_asic_rst = %x\n", + vars->periodic_flags, vars->aeu_int_mask, vars->rx_tx_asic_rst); + ELINK_DEBUG_P2(sc, "vars : turn_to_run_wc_rt = %x, rsrv2 = %x\n", + vars->turn_to_run_wc_rt, vars->rsrv2); for (phy_index = ELINK_EXT_PHY1; phy_index < params->num_phys; phy_index++) { @@ -7835,6 +7872,7 @@ elink_status_t elink_link_update(struct " link speed %d\n", vars->line_speed, ext_phy_line_speed); vars->phy_link_up = 0; + ELINK_DEBUG_P0(sc, "phy_link_up set to 0\n"); } else if (prev_line_speed != vars->line_speed) { REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); @@ -7883,6 +7921,12 @@ elink_status_t elink_link_update(struct ELINK_SINGLE_MEDIA_DIRECT(params)) && (phy_vars[active_external_phy].fault_detected == 0)); + if(vars->link_up) { + ELINK_DEBUG_P0(sc, "local phy and external phy are up\n"); + } else { + ELINK_DEBUG_P0(sc, "either local phy or external phy or both are down\n"); + } + /* Update the PFC configuration in case it was changed */ if (params->feature_config_flags & ELINK_FEATURE_CONFIG_PFC_ENABLED) vars->link_status |= LINK_STATUS_PFC_ENABLED; @@ -12943,6 +12987,8 @@ static void elink_populate_preemphasis(s phy->tx_preemphasis[i << 1] = ((tx>>16) & 0xffff); phy->tx_preemphasis[(i << 1) + 1] = (tx & 0xffff); + ELINK_DEBUG_P2(sc,"phy->rx_preemphasis = %x, phy->tx_preemphasis = %x\n", + phy->rx_preemphasis[i << 1], phy->tx_preemphasis[i << 1]); } } @@ -13070,6 +13116,8 @@ static elink_status_t elink_populate_int phy->flags |= ELINK_FLAGS_MDC_MDIO_WA; else phy->flags |= ELINK_FLAGS_MDC_MDIO_WA_B0; + ELINK_DEBUG_P3(sc, "media_type = %x, flags = %x, supported = %x\n", + phy->media_type, phy->flags, phy->supported); } else { switch (switch_cfg) { @@ -13300,6 +13348,9 @@ static void elink_phy_def_cfg(struct eli break; } + ELINK_DEBUG_P2(sc, "Default config phy idx %x, req_duplex config %x\n", + phy_index, phy->req_duplex); + switch (link_config & PORT_FEATURE_FLOW_CONTROL_MASK) { case PORT_FEATURE_FLOW_CONTROL_AUTO: phy->req_flow_ctrl = ELINK_FLOW_CTRL_AUTO; @@ -13317,6 +13368,8 @@ static void elink_phy_def_cfg(struct eli phy->req_flow_ctrl = ELINK_FLOW_CTRL_NONE; break; } + ELINK_DEBUG_P3(sc, "Requested Duplex = %x, line_speed = %x, flow_ctrl = %x\n", + phy->req_duplex, phy->req_line_speed, phy->req_flow_ctrl); } uint32_t elink_phy_selection(struct elink_params *params) @@ -13924,6 +13977,18 @@ elink_status_t elink_phy_init(struct eli /* Check if link flap can be avoided */ lfa_status = elink_check_lfa(params); + ELINK_DEBUG_P3(sc, " params : port = %x, loopback_mode = %x req_duplex = %x\n", + params->port, params->loopback_mode, params->req_duplex[0]); + ELINK_DEBUG_P3(sc, " params : switch_cfg = %x, lane_config = %x req_duplex[1] = %x\n", + params->switch_cfg, params->lane_config, params->req_duplex[1]); + ELINK_DEBUG_P3(sc, " params : chip_id = %x, feature_config_flags = %x, num_phys = %x\n", + params->chip_id, params->feature_config_flags, params->num_phys); + ELINK_DEBUG_P3(sc, " params : rsrv = %x, eee_mode = %x, hw_led_mode = x\n", + params->rsrv, params->eee_mode, params->hw_led_mode); + ELINK_DEBUG_P3(sc, " params : multi_phy = %x, req_fc_auto_adv = %x, link_flags = %x\n", + params->multi_phy_config, params->req_fc_auto_adv, params->link_flags); + ELINK_DEBUG_P2(sc, " params : lfa_base = %x, link_attr = %x\n", + params->lfa_base, params->link_attr_sync); if (lfa_status == 0) { ELINK_DEBUG_P0(sc, "Link Flap Avoidance in progress\n"); return elink_avoid_link_flap(params, vars); Modified: stable/8/sys/dev/bxe/bxe_stats.c ============================================================================== --- stable/8/sys/dev/bxe/bxe_stats.c Fri Mar 24 03:17:59 2017 (r315883) +++ stable/8/sys/dev/bxe/bxe_stats.c Fri Mar 24 03:30:54 2017 (r315884) @@ -36,6 +36,8 @@ __FBSDID("$FreeBSD$"); #define BITS_PER_LONG 64 #endif +extern int bxe_grc_dump(struct bxe_softc *sc); + static inline long bxe_hilo(uint32_t *hiref) { Modified: stable/8/sys/dev/bxe/bxe_stats.h ============================================================================== --- stable/8/sys/dev/bxe/bxe_stats.h Fri Mar 24 03:17:59 2017 (r315883) +++ stable/8/sys/dev/bxe/bxe_stats.h Fri Mar 24 03:30:54 2017 (r315884) @@ -266,6 +266,14 @@ struct bxe_eth_stats { /* num. of times tx queue full occured */ uint32_t tx_queue_full_return; + /* debug stats */ + uint32_t bxe_tx_mq_sc_state_failures; + uint32_t tx_request_link_down_failures; + uint32_t bd_avail_too_less_failures; + uint32_t tx_mq_not_empty; + uint32_t nsegs_path1_errors; + uint32_t nsegs_path2_errors; + }; @@ -372,6 +380,15 @@ struct bxe_eth_q_stats { /* num. of times tx queue full occured */ uint32_t tx_queue_full_return; + + /* debug stats */ + uint32_t bxe_tx_mq_sc_state_failures; + uint32_t tx_request_link_down_failures; + uint32_t bd_avail_too_less_failures; + uint32_t tx_mq_not_empty; + uint32_t nsegs_path1_errors; + uint32_t nsegs_path2_errors; + }; struct bxe_eth_stats_old {