Skip site navigation (1)Skip section navigation (2)
Date:      Fri, 7 Aug 2009 15:37:11 +0400
From:      pluknet <pluknet@gmail.com>
To:        freebsd-stable <freebsd-stable@freebsd.org>
Subject:   panic in vgonel()
Message-ID:  <a31046fc0908070437p2b1c96denf24268ce52107a34@mail.gmail.com>

next in thread | raw e-mail | index | archive | help
This is on 7.2-R amd64.

I'm curious if it might be due to glusterfs on it.

Fatal trap 12: page fault while in kernel mode
cpuid = 3; apic id = 03
fault virtual address   = 0x0
fault code              = supervisor write data, page not present
instruction pointer     = 0x8:0xffffffff805a52ba
stack pointer           = 0x10:0xfffffffefc3474a0
frame pointer           = 0x10:0xfffffffefc347510
code segment            = base 0x0, limit 0xfffff, type
                        = DPL 0, pres 1, long 1, def32 0, gran 1
processor eflags        = interrupt enabled, resume, IOPL = 0
current process         = 35425 (find)

db> bt
Tracing pid 35425 tid 100194 td 0xffffff003c165370
vgonel() at vgonel+0x1aa
vnlru_free() at vnlru_free+0x36c
getnewvnode() at getnewvnode+0x281
ffs_vgetf() at ffs_vgetf+0xdf
ufs_lookup() at ufs_lookup+0x2dd
vfs_cache_lookup() at vfs_cache_lookup+0xf3
VOP_LOOKUP_APV() at VOP_LOOKUP_APV+0x40
lookup() at lookup+0x598
namei() at namei+0x33e
kern_lstat() at kern_lstat+0x5e
lstat() at lstat+0x2a
syscall() at syscall+0x256
Xfast_syscall() at Xfast_syscall+0xab
--- syscall (190, FreeBSD ELF64, lstat), rip = 0x80071063c, rsp =
0x7fffffffea48, rbp = 0x800a06910 ---
db> show pcpu
cpuid        = 3
curthread    = 0xffffff003c165370: pid 35425 "find"
curpcb       = 0xfffffffefc347d40
fpcurthread  = none
idlethread   = 0xffffff000143c370: pid 15 "idle: cpu3"
db> show proc 35425
Process 35425 (find) at 0xffffff003c1868f0:
 state: NORMAL
 uid: 0  gids: 0, 0, 5
 parent: pid 35421 at 0xffffff0004855000
 ABI: FreeBSD ELF64
 arguments: find
 threads: 1
100194                   Run     CPU 3                       find
db> show allpcpu
Current CPU: 3

cpuid        = 0
curthread    = 0xffffff00014306e0: pid 18 "idle: cpu0"
curpcb       = 0xfffffffe80064d40
fpcurthread  = none
idlethread   = 0xffffff00014306e0: pid 18 "idle: cpu0"

cpuid        = 1
curthread    = 0xffffff0001430a50: pid 17 "idle: cpu1"
curpcb       = 0xfffffffe8005fd40
fpcurthread  = none
idlethread   = 0xffffff0001430a50: pid 17 "idle: cpu1"

cpuid        = 2
curthread    = 0xffffff000143c000: pid 16 "idle: cpu2"
curpcb       = 0xfffffffe8005ad40
fpcurthread  = none
idlethread   = 0xffffff000143c000: pid 16 "idle: cpu2"

cpuid        = 3
curthread    = 0xffffff003c165370: pid 35425 "find"
curpcb       = 0xfffffffefc347d40
fpcurthread  = none
idlethread   = 0xffffff000143c370: pid 15 "idle: cpu3"

cpuid        = 4
curthread    = 0xffffff000143c6e0: pid 14 "idle: cpu4"
curpcb       = 0xfffffffe80050d40
fpcurthread  = none
idlethread   = 0xffffff000143c6e0: pid 14 "idle: cpu4"

cpuid        = 5
curthread    = 0xffffff000142e000: pid 13 "idle: cpu5"
curpcb       = 0xfffffffe8004bd40
fpcurthread  = none
idlethread   = 0xffffff000142e000: pid 13 "idle: cpu5"

cpuid        = 6
curthread    = 0xffffff000142e370: pid 12 "idle: cpu6"
fpcurthread  = none
idlethread   = 0xffffff000142e370: pid 12 "idle: cpu6"

cpuid        = 7
curthread    = 0xffffff000142e6e0: pid 11 "idle: cpu7"
curpcb       = 0xfffffffe80041d40
fpcurthread  = none
idlethread   = 0xffffff000142e6e0: pid 11 "idle: cpu7"
db> show lockedvnods
Locked vnodes

0xffffff0033b6a1f8: tag ufs, type VDIR
    usecount 3, writecount 0, refcount 6 mountedhere 0
    flags ()
    v_object 0xffffff001a78ebc8 ref 0 pages 1
     lock type ufs: EXCL (count 1) by thread 0xffffff003c165370 (pid 35425)
        ino 143271749, on dev aacd0s1g

db> ps
  pid  ppid  pgrp   uid   state   wmesg         wchan        cmd
35428 35426 35316     0  S       piperd   0xffffff00180802e8 cat
35426 35421 35316     0  S       wait     0xffffff0004dee8f0 sh
35425 35421 35316     0  R       CPU 3                       find
35421 35418 35316     0  S       wait     0xffffff0004855000 sh
35420 35419 35316     0  S       piperd   0xffffff003c16d000 mail
35419 35411 35316     0  S       wait     0xffffff003c68a478 sh
35418 35411 35316     0  S       wait     0xffffff003cabe000 sh
35411 35410 35316     0  S       wait     0xffffff001856c478 sh
35410 35325 35316     0  S       wait     0xffffff00048568f0 sh
35327 35326 35316     0  S       piperd   0xffffff003c16e000 mail
35326 35318 35316     0  S       wait     0xffffff0004698478 sh
35325 35318 35316     0  S       wait     0xffffff003c331478 sh
35318 35316 35316     0  S       wait     0xffffff00181628f0 sh
35316 35314 35316     0  Ss      wait     0xffffff0004975478 sh
35314   685   685     0  S       piperd   0xffffff00184d92e8 cron
34062     1 34062     0  Ss      (threaded)                  glusterfsd
100361                   S       fu_msg   0xffffff00183f2c00 glusterfsd
100216                   S       nanslp   0xffffffff80b681a8 glusterfsd
100073                   S       select   0xffffffff80b808d0 glusterfsd
34050     1 34050     0  Rs      (threaded)                  glusterfsd
100086                   S       nanslp   0xffffffff80b681a8 glusterfsd
100322                   RunQ                                glusterfsd
33886     1 33886     0  Ss      select   0xffffffff80b808d0 mountd
31645 31611 31611    80  S       accept   0xffffff001828332e httpd
31644 31611 31611    80  S       accept   0xffffff001828332e httpd
31643 31611 31611    80  S       accept   0xffffff001828332e httpd
31616 31611 31611    80  S       accept   0xffffff001828332e httpd
31615 31611 31611    80  S       accept   0xffffff001828332e httpd
31614 31611 31611    80  S       accept   0xffffff001828332e httpd
31613 31611 31611    80  S       accept   0xffffff001828332e httpd
31612 31611 31611    80  S       accept   0xffffff001828332e httpd
31611     1 31611     0  Ss      select   0xffffffff80b808d0 httpd
  733     1     1     0  S       nanslp   0xffffffff80b681a8 getty
  732     1   732     0  Ss+     ttyin    0xffffff0004705410 getty
  731     1   731     0  Ss+     ttyin    0xffffff0004728810 getty
  730     1   730     0  Ss+     ttyin    0xffffff0004728c10 getty
  729     1   729     0  Ss+     ttyin    0xffffff0004734010 getty
  728     1   728     0  Ss+     ttyin    0xffffff0004734410 getty
  727     1   727     0  Ss+     ttyin    0xffffff0004734810 getty
  726     1   726     0  Ss+     ttyin    0xffffff0004734c10 getty
  725     1   725     0  Ss+     ttyin    0xffffff0004735010 getty
  724     1   724     0  Ss+     ttyin    0xffffff0004735410 getty
  707     1   707     0  Ss      select   0xffffffff80b808d0 inetd
  685     1   685     0  Ss      nanslp   0xffffffff80b681a8 cron
  678     1   678     0  Ss      select   0xffffffff80b808d0 sshd
  665     1   665    26  Ss      select   0xffffffff80b808d0 exim-4.69-4
  655     1   655   136  Ss      select   0xffffffff80b808d0 dhcpd
  576     1   576     0  Ss      select   0xffffffff80b808d0 xinetd
  562   558   558     0  S       -        0xffffff0004884400 nfsd
  561   558   558     0  S       -        0xffffff0004db2c00 nfsd
  560   558   558     0  S       -        0xffffff000481d800 nfsd
  559   558   558     0  S       -        0xffffff000481d600 nfsd
  558     1   558     0  Ss      select   0xffffffff80b808d0 nfsd
  514     1   514     0  Ss      select   0xffffffff80b808d0 rpcbind
  497     1   497     0  Ss      select   0xffffffff80b808d0 syslogd
  436     1   436     0  Ss      select   0xffffffff80b808d0 devd
  144     1   144     0  Ss      pause    0xffffff0004883538 adjkerntz
   53     0     0     0  SL      sdflush  0xffffffff80b91c58 [softdepflush]
   52     0     0     0  SL      syncer   0xffffffff80b67e60 [syncer]
   51     0     0     0  SL      vlruwt   0xffffff00047af478 [vnlru]
   50     0     0     0  SL      psleep   0xffffffff80b8115c [bufdaemon]
   49     0     0     0  SL      pgzero   0xffffffff80b9380c [pagezero]
   48     0     0     0  SL      psleep   0xffffffff80b92b90 [vmdaemon]
   47     0     0     0  SL      psleep   0xffffffff80b92b4c [pagedaemon]
   46     0     0     0  SL      waiting_ 0xffffffff80b84f48 [sctp_iterator]
   45     0     0     0  WL                                  [irq1: atkbd0]
   44     0     0     0  WL                                  [swi0: sio]
   43     0     0     0  WL                                  [irq15: ata1]
   42     0     0     0  WL                                  [irq14: ata0]
   41     0     0     0  SL      usbevt   0xffffff000467f420 [usb4]
   40     0     0     0  SL      usbevt   0xfffffffe80255420 [usb3]
   39     0     0     0  SL      usbevt   0xfffffffe80253420 [usb2]
   38     0     0     0  SL      usbevt   0xfffffffe80251420 [usb1]
   37     0     0     0  WL                                  [irq22:
uhci1 uhci3]
   36     0     0     0  SL      usbtsk   0xffffffff80b63708 [usbtask-dr]
   35     0     0     0  SL      usbtsk   0xffffffff80b636e0 [usbtask-hc]
   34     0     0     0  SL      usbevt   0xfffffffe8024f420 [usb0]
   33     0     0     0  WL                                  [irq23:
uhci0 uhci+]
   32     0     0     0  WL                                  [irq257: bce1]
   31     0     0     0  WL                                  [irq256: bce0]
   30     0     0     0  SL      aifthd   0xffffff0001454478 [aac0aif]
   29     0     0     0  WL                                  [irq17: aac0]
   28     0     0     0  WL                                  [irq9: acpi0]
   27     0     0     0  WL                                  [swi2: cambio]
   26     0     0     0  SL      ccb_scan 0xffffffff80b300e0 [xpt_thrd]
    9     0     0     0  SL      -        0xffffff000155cd00 [kqueue taskq]
    8     0     0     0  SL      -        0xffffff000155cd80 [acpi_task_2]
    7     0     0     0  SL      -        0xffffff000155cd80 [acpi_task_1]
    6     0     0     0  SL      -        0xffffff000155cd80 [acpi_task_0]
   25     0     0     0  WL                                  [swi6: task queue]
   24     0     0     0  WL                                  [swi6: Giant taskq]
    5     0     0     0  SL      -        0xffffff000157e580 [thread taskq]
   23     0     0     0  WL                                  [swi5: +]
   22     0     0     0  SL      -        0xffffffff80b67e68 [yarrow]
    4     0     0     0  SL      -        0xffffffff80b63e38 [g_down]
    3     0     0     0  SL      -        0xffffffff80b63e30 [g_up]
    2     0     0     0  SL      -        0xffffffff80b63e20 [g_event]
   21     0     0     0  WL                                  [swi1: net]
   20     0     0     0  WL                                  [swi3: vm]
   19     0     0     0  WL                                  [swi4: clock sio]
   18     0     0     0  RL      CPU 0                       [idle: cpu0]
   17     0     0     0  RL      CPU 1                       [idle: cpu1]
   16     0     0     0  RL      CPU 2                       [idle: cpu2]
   15     0     0     0  RL                                  [idle: cpu3]
   14     0     0     0  RL      CPU 4                       [idle: cpu4]
   13     0     0     0  RL      CPU 5                       [idle: cpu5]
   12     0     0     0  RL      CPU 6                       [idle: cpu6]
   11     0     0     0  RL      CPU 7                       [idle: cpu7]
    1     0     1     0  SLs     wait     0xffffff000142b8f0 [init]
   10     0     0     0  SL      audit_wo 0xffffffff80b910e0 [audit]
    0     0     0     0  SLs     sched    0xffffffff80b63f40 [swapper]

Also looks a bit weird:
            devbuf        16996        35483K        17121

-- 
wbr,
pluknet



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?a31046fc0908070437p2b1c96denf24268ce52107a34>