Skip site navigation (1)Skip section navigation (2)
Date:      Mon, 26 Sep 2016 15:12:46 +0300
From:      Slawa Olhovchenkov <slw@zxy.spb.ru>
To:        Julien Charbon <julien.charbon@gmail.com>
Cc:        Konstantin Belousov <kostikbel@gmail.com>, freebsd-stable@FreeBSD.org, hiren panchasara <hiren@strugglingcoder.info>
Subject:   Re: 11.0 stuck on high network load
Message-ID:  <20160926121246.GL2840@zxy.spb.ru>
In-Reply-To: <dc2798ff-2ace-81f7-a563-18ffa1ace990@gmail.com>
References:  <78cbcdc9-f565-1046-c157-2ddd8fcccc62@freebsd.org> <20160919204328.GN2840@zxy.spb.ru> <8ba75d6e-4f01-895e-0aed-53c6c6692cb9@freebsd.org> <20160920202633.GQ2840@zxy.spb.ru> <f644cd52-4377-aa90-123a-3a2887972bbc@freebsd.org> <20160921195155.GW2840@zxy.spb.ru> <e4e0188c-b22b-29af-ed15-b650c3ec4553@gmail.com> <20160923200143.GG2840@zxy.spb.ru> <20160925124626.GI2840@zxy.spb.ru> <dc2798ff-2ace-81f7-a563-18ffa1ace990@gmail.com>

next in thread | previous in thread | raw e-mail | index | archive | help
On Mon, Sep 26, 2016 at 11:33:12AM +0200, Julien Charbon wrote:

> >>>  - tcp_input()/tcp_twstart()/tcp_tw_2msl_scan(reuse=1)
> >>
> >> My current hypothesis:
> >>
> >> nginx do write() (or may be close()?) to socket, kernel lock
> >> first inp in V_twq_2msl, happen callout for pfslowtimo() on the same
> >> CPU core and tcp_tw_2msl_scan infinity locked on same inp.
> >>
> >> In this case you modification can't help, before next try we need some
> >> like yeld().
> > 
> > Or may be locks leaks.
> > Or both.
> 
>  You are totally right, pfslowtimo()/tcp_tw_2msl_scan(reuse=0) is
> infinitely blocked on INP_WLOCK() by "something" (that could be related
> to write()).
> 
>  As I reached my limit of debugging without WITNESS, could you share
> your /etc/sysctl.conf, /boot/loader.conf files?  And any specific
> configuration you have (like having a Nginx workers affinity, Nginx
> special options, etc.).  Like that I can try to reproduce it on releng/11.0.

I am use double socket server, E5-2620.
Double Intel 10G NIC, affinity to CPU 6..11.
Nginx affinity to CPU 0..5.
I.e. on CPU 0 only nginx worker affinity exist and NIC IRQ handler
activity only on CPU 6..11.

/boot/loader.conf:

kern.geom.label.gptid.enable="0"
zfs_load="YES"
#### generated by conf.pl #########
hw.memtest.tests=0
machdep.hyperthreading_allowed=0
kern.geom.label.disk_ident.enable=0
if_igb_load=yes
if_ix_load=yes
hw.ix.num_queues=3
hw.ix.rxd=4096
hw.ix.txd=4096
hw.ix.rx_process_limit=-1
hw.ix.tx_process_limit=-1
if_lagg_load=YES
net.link.lagg.default_use_flowid=0
accf_http_load=yes
aio_load=yes
cc_htcp_load=yes
kern.ipc.nmbclusters=1048576
net.inet.tcp.reass.maxsegments=32768
net.inet.tcp.hostcache.cachelimit=0
net.inet.tcp.hostcache.hashsize=32768
net.inet.tcp.syncache.hashsize=32768
#net.inet.tcp.tcbhashsize=262144
net.inet.tcp.tcbhashsize=65536
net.inet.tcp.maxtcptw=16384
kern.pin_default_swi=1
kern.pin_pcpu_swi=1
kern.hwpmc.nbuffers=131072
hw.cxgbe.qsize_rxq=16384
hw.cxgbe.qsize_txq=16384
hw.cxgbe.nrxq10g=3
kernel="kernel.VSTREAM"
kernels="kernel"
hw.mps.max_chains=3072
###
hw.vga.textmode=1
uhci_load=yes
ohci_load=yes
ehci_load=yes
xhci_load=yes
ukbd_load=yes
umass_load=yes
###
boot_multicons="YES"
boot_serial="YES"
comconsole_speed="115200"
comconsole_port=760
#console="comconsole,vidconsole"
console="vidconsole,comconsole"
hint.uart.0.flags="0x00"
hint.uart.1.flags="0x10"

/etc/sysctl.conf:

kern.random.sys.harvest.ethernet=0
kern.threads.max_threads_per_proc=20000
net.inet.ip.maxfragpackets=32768
net.inet.ip.fastforwarding=1
kern.ipc.somaxconn=4096
kern.ipc.nmbjumbop=2097152
kern.ipc.maxsockbuf=16777216
net.inet.tcp.sendbuf_max=16777216
net.inet.tcp.recvbuf_max=16777216
net.inet.tcp.sendbuf_inc=16384
net.inet.tcp.sendspace=2097152
#net.inet.tcp.maxtcptw=444800
net.inet.tcp.fast_finwait2_recycle=1
net.inet.tcp.msl=1000
net.inet.tcp.cc.algorithm=htcp
net.inet.tcp.per_cpu_timers=1
#net.inet.tcp.syncookies=0
net.inet6.ip6.auto_linklocal=0
kern.maxfiles=300000
kern.maxfilesperproc=80000
#hw.intr_storm_threshold=9000
vfs.zfs.prefetch_disable=1
vfs.zfs.vdev.max_pending=1000
vfs.zfs.l2arc_noprefetch=0
vfs.zfs.l2arc_norw=0
vfs.zfs.l2arc_write_boost=134217728
vfs.zfs.l2arc_write_max=33554432
vfs.aio.max_aio_procs=512
vfs.aio.max_aio_queue_per_proc=8192
vfs.aio.max_aio_per_proc=8192
vfs.aio.max_aio_queue=65536
net.inet.tcp.finwait2_timeout=5000

kern.corefile=/tmp/%N.%P.core
kern.sugid_coredump=1

Now (after this nginx lockout) I am use you patch witch modification:
act return NULL at write lock and now see only mbuf-related work.




Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?20160926121246.GL2840>