Skip site navigation (1)Skip section navigation (2)
Date:      Sat, 17 Dec 2011 10:29:42 +0200
From:      =?windows-1251?B?yu7t/Oru4iDF4uPl7ejp?= <kes-kes@yandex.ru>
To:        freebsd-questions@freebsd.org
Subject:   high load system do not take all CPU time
Message-ID:  <1374625746.20111217102942@yandex.ru>

next in thread | raw e-mail | index | archive | help

How to debug why system do not use free CPU resouces?

On this pictures you can see that CPU can not exceed 400tics
http://piccy.info/view3/2368839/c9022754d5fcd64aff04482dd360b5b2/
http://piccy.info/view3/2368837/a12aeed98681ed10f1a22f5b5edc5abc/
http://piccy.info/view3/2368836/da6a67703af80eb0ab8088ab8421385c/


On these pictures you can see that problems begin with trafic on re0
when CPU load rise to "maximum"
http://piccy.info/view3/2368834/512139edc56eea736881affcda490eca/
http://piccy.info/view3/2368827/d27aead22eff69fd1ec2b6aa15e2cea3/

But there is 25% CPU idle yet at that moment.

# sysctl -a net.isr
net.isr.numthreads: 4
net.isr.maxprot: 16
net.isr.defaultqlimit: 256
net.isr.maxqlimit: 10240
net.isr.bindthreads: 0
net.isr.maxthreads: 4
net.isr.direct: 0
net.isr.direct_force: 0

# sysctl -a kern.smp
kern.smp.forward_signal_enabled: 1
kern.smp.topology: 0
kern.smp.cpus: 4
kern.smp.disabled: 0
kern.smp.active: 1
kern.smp.maxcpus: 32
kern.smp.maxid: 3


# uname -a
FreeBSD flux 9.0-CURRENT FreeBSD 9.0-CURRENT #4: Fri Jun 10 01:30:12 UTC 2011     :/usr/obj/usr/src/sys/PAE_KES  i386

sysctl.conf
#ngctl: can't create node: No buffer space available
#kern.ipc.maxsockbuf=8388608
#ngctl: send msg: No buffer space available
net.graph.recvspace=524288
#Нужно ли тюнить? net.graph.maxdgram=524288

#Чтобы после пайпа пакет шел дальше по файрволу
net.inet.ip.fw.one_pass=0
#Или можно использовать команду: ipfw disable one_pass

#man ipfw: Fast mode allows certain packets to bypass dummynet scheduler if packet flow does not exceed pipe's bandwidth
net.inet.ip.dummynet.io_fast=1


##
## https://calomel.org/network_performance.html
##
kern.ipc.maxsockbuf=16777216      # kernel socket buffer space
kern.ipc.nmbclusters=262144       # kernel mbuf space raised 275MB of kernel dedicated ram
kern.ipc.somaxconn=32768          # size of the listen queue for accepting new TCP connections
kern.ipc.maxsockets=204800        # increase the limit of the open sockets
#kern.randompid=348                # randomized processes id's
net.inet.icmp.icmplim=50          # reply to no more than 50 ICMP packets per sec
net.inet.ip.process_options=0     # do not processes any TCP options in the TCP headers
net.inet.ip.redirect=0            # do not allow ip header redirects
net.inet.ip.rtexpire=2            # route cache expire in two seconds
net.inet.ip.rtminexpire=2         # "
net.inet.ip.rtmaxcache=256        # route cache entries increased
#net.inet.icmp.drop_redirect=1     # drop icmp redirects
#net.inet.tcp.blackhole=2          # drop any TCP packets to closed ports
net.inet.tcp.delayed_ack=0        # no need to delay ACK's
net.inet.tcp.drop_synfin=1        # drop TCP packets which have SYN and FIN set
net.inet.tcp.msl=15000             # close lost tcp connections in 7.5 seconds (default 30)
net.inet.tcp.nolocaltimewait=1    # do not create TIME_WAIT state for localhost
#net.inet.tcp.path_mtu_discovery=0 # disable MTU path discovery
net.inet.tcp.recvbuf_max=16777216 # TCP receive buffer space
net.inet.tcp.recvspace=8192       # decrease buffers for incoming data
net.inet.tcp.sendbuf_max=16777216 # TCP send buffer space
net.inet.tcp.sendspace=16384      # decrease buffers for outgoing data
#net.inet.udp.blackhole=1          # drop any UDP packets to closed ports
security.bsd.see_other_uids=0     # keeps users segregated to their own processes list
security.bsd.see_other_gids=0     # "

net.inet.ip.fastforwarding=1       #allow packet to leave packet without latency if bandwidth not exceed
net.inet.ip.intr_queue_maxlen=1024 #256 #interrupt queue length

#FireBird
kern.ipc.shmall=32768
kern.ipc.shmmax=134217728
kern.ipc.semmap=256

as you can see
 net.isr.maxthreads=4                  # Max number of threads for NIC IRQ balancing (4 cores in box)
and there is really 4 netisr


#top -SIHP
last pid: 93050;  load averages:  1.45,  1.41,  1.29                                                                                        up 9+16:32:06  10:28:43
237 processes: 5 running, 210 sleeping, 2 stopped, 20 waiting
CPU 0:  0.8% user,  0.0% nice,  8.7% system, 17.7% interrupt, 72.8% idle
CPU 1:  0.0% user,  0.0% nice,  9.1% system, 20.1% interrupt, 70.9% idle
CPU 2:  0.4% user,  0.0% nice,  9.4% system, 19.7% interrupt, 70.5% idle
CPU 3:  1.2% user,  0.0% nice,  6.3% system, 22.4% interrupt, 70.1% idle
Mem: 843M Active, 2476M Inact, 347M Wired, 150M Cache, 112M Buf, 80M Free
Swap: 4096M Total, 15M Used, 4080M Free

  PID USERNAME   PRI NICE   SIZE    RES STATE   C   TIME   WCPU COMMAND
   11 root       155 ki31     0K    32K RUN     2 155.8H 77.59% {idle: cpu2}
   11 root       155 ki31     0K    32K CPU3    3 158.1H 75.98% {idle: cpu3}
   11 root       155 ki31     0K    32K CPU0    0 150.5H 71.14% {idle: cpu0}
   11 root       155 ki31     0K    32K CPU1    1 157.2H 63.87% {idle: cpu1}
   12 root       -72    -     0K   160K WAIT    1  65.0H 28.56% {swi1: netisr 3}
   12 root       -92    -     0K   160K WAIT    0  26.2H 16.41% {irq256: re0}
   12 root       -72    -     0K   160K WAIT    2  42.5H 13.67% {swi1: netisr 1}
   12 root       -72    -     0K   160K WAIT    3  22.2H 12.06% {swi1: netisr 2}
   12 root       -72    -     0K   160K WAIT    3  18.0H 11.62% {swi1: netisr 0}
   13 root       -16    -     0K    32K sleep   3 535:58  3.81% {ng_queue2}
   13 root       -16    -     0K    32K sleep   0 535:52  3.71% {ng_queue1}
13474 root        22    0 15392K  5932K select  0  12:36  3.66% snmpd
   13 root       -16    -     0K    32K sleep   0 536:48  3.12% {ng_queue3}
   13 root       -16    -     0K    32K sleep   1 536:19  2.98% {ng_queue0}
 5587 root        20  -20   276M   200M select  2 171:55  0.10% {mpd5}
12637 freeradius  23    0 31844K 23436K select  2  15:29  0.05% {initial thread}

but why in time of load it do not exceed 400tics? (75%)


loader.conf
##
## https://calomel.org/network_performance.html
##
autoboot_delay="3"                    # reduce boot menu delay from 10 to 3 seconds
loader_logo="beastie"                 # old FreeBSD logo menu
net.inet.tcp.tcbhashsize=4096         # tcb hash size
net.inet.tcp.syncache.hashsize=1024   # syncache hash size
net.inet.tcp.syncache.bucketlimit=100 # syncache bucket limit
net.isr.bindthreads=0                 # do not bind threads to CPUs
net.isr.direct=0                      # interrupt handling via multiple CPU
net.isr.direct_force=0                # "
net.isr.maxthreads=4                  # Max number of threads for NIC IRQ balancing (4 cores in box)
#vm.kmem_size=512M       #1G                       # physical memory available for kernel (320Mb by default)

#FireBird
kern.ipc.semmni=256
kern.ipc.semmns=512
kern.ipc.semmnu=256

# sysctl -a | grep cpu
cpu     I686_CPU
device  cpufreq
kern.ccpu: 0
kern.sched.cpusetsize: 4
  <cpu count="4" mask="0xf">0, 1, 2, 3</cpu>
    <cpu count="4" mask="0xf">0, 1, 2, 3</cpu>
      <cpu count="2" mask="0x3">0, 1</cpu>
      <cpu count="2" mask="0xc">2, 3</cpu>
kern.smp.cpus: 4
kern.smp.maxcpus: 32
net.inet.tcp.per_cpu_timers: 0
debug.cpufreq.verbose: 0
debug.cpufreq.lowest: 0
debug.kdb.stop_cpus: 1
debug.PMAP1changedcpu: 2785232
hw.ncpu: 4
hw.acpi.cpu.cx_lowest: C1
machdep.hlt_cpus: 0
machdep.hlt_logical_cpus: 0
machdep.logical_cpus_mask: 10
security.jail.param.cpuset.id: 0
dev.cpu.0.%desc: ACPI CPU
dev.cpu.0.%driver: cpu
dev.cpu.0.%location: handle=\_PR_.P000
dev.cpu.0.%pnpinfo: _HID=none _UID=0
dev.cpu.0.%parent: acpi0
dev.cpu.0.freq: 3100
dev.cpu.0.freq_levels: 3100/65000 3000/62000 2900/59000 2800/56000 2700/54000 2600/51000 2500/49000 2400/46000 2300/44000 2200/41000 2100/39000 2000/37000 1900/35000 1800/32000 1700/30000 1600/28000 1400/24500 1200/21000 1000/17500 800/14000 600/10500 400/7000 200/3500
dev.cpu.0.cx_supported: C1/1 C2/104
dev.cpu.0.cx_lowest: C1
dev.cpu.0.cx_usage: 100.00% 0.00% last 28us
dev.cpu.1.%desc: ACPI CPU
dev.cpu.1.%driver: cpu
dev.cpu.1.%location: handle=\_PR_.P001
dev.cpu.1.%pnpinfo: _HID=none _UID=0
dev.cpu.1.%parent: acpi0
dev.cpu.1.cx_supported: C1/1 C2/104
dev.cpu.1.cx_lowest: C1
dev.cpu.1.cx_usage: 100.00% 0.00% last 44us
dev.cpu.2.%desc: ACPI CPU
dev.cpu.2.%driver: cpu
dev.cpu.2.%location: handle=\_PR_.P002
dev.cpu.2.%pnpinfo: _HID=none _UID=0
dev.cpu.2.%parent: acpi0
dev.cpu.2.cx_supported: C1/1 C2/104
dev.cpu.2.cx_lowest: C1
dev.cpu.2.cx_usage: 100.00% 0.00% last 79us
dev.cpu.3.%desc: ACPI CPU
dev.cpu.3.%driver: cpu
dev.cpu.3.%location: handle=\_PR_.P003
dev.cpu.3.%pnpinfo: _HID=none _UID=0
dev.cpu.3.%parent: acpi0
dev.cpu.3.cx_supported: C1/1 C2/104
dev.cpu.3.cx_lowest: C1
dev.cpu.3.cx_usage: 100.00% 0.00% last 68us
dev.acpi_perf.0.%parent: cpu0
dev.acpi_perf.1.%parent: cpu1
dev.acpi_perf.2.%parent: cpu2
dev.acpi_perf.3.%parent: cpu3
dev.est.0.%parent: cpu0
dev.est.1.%parent: cpu1
dev.est.2.%parent: cpu2
dev.est.3.%parent: cpu3
dev.cpufreq.0.%driver: cpufreq
dev.cpufreq.0.%parent: cpu0
dev.cpufreq.1.%driver: cpufreq
dev.cpufreq.1.%parent: cpu1
dev.cpufreq.2.%driver: cpufreq
dev.cpufreq.2.%parent: cpu2
dev.cpufreq.3.%driver: cpufreq
dev.cpufreq.3.%parent: cpu3
dev.p4tcc.0.%parent: cpu0
dev.p4tcc.1.%parent: cpu1
dev.p4tcc.2.%parent: cpu2
dev.p4tcc.3.%parent: cpu3






Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?1374625746.20111217102942>