From owner-p4-projects Fri Sep 20 22:40:21 2002 Delivered-To: p4-projects@freebsd.org Received: by hub.freebsd.org (Postfix, from userid 32767) id 9A6D237B404; Fri, 20 Sep 2002 22:40:03 -0700 (PDT) Delivered-To: perforce@freebsd.org Received: from mx1.FreeBSD.org (mx1.FreeBSD.org [216.136.204.125]) by hub.freebsd.org (Postfix) with ESMTP id 3CE3237B401 for ; Fri, 20 Sep 2002 22:40:03 -0700 (PDT) Received: from freefall.freebsd.org (freefall.FreeBSD.org [216.136.204.21]) by mx1.FreeBSD.org (Postfix) with ESMTP id B71F643E3B for ; Fri, 20 Sep 2002 22:40:02 -0700 (PDT) (envelope-from peter@freebsd.org) Received: from freefall.freebsd.org (perforce@localhost [127.0.0.1]) by freefall.freebsd.org (8.12.6/8.12.6) with ESMTP id g8L5e2Co042571 for ; Fri, 20 Sep 2002 22:40:02 -0700 (PDT) (envelope-from peter@freebsd.org) Received: (from perforce@localhost) by freefall.freebsd.org (8.12.6/8.12.6/Submit) id g8L5e21s042553 for perforce@freebsd.org; Fri, 20 Sep 2002 22:40:02 -0700 (PDT) Date: Fri, 20 Sep 2002 22:40:02 -0700 (PDT) Message-Id: <200209210540.g8L5e21s042553@freefall.freebsd.org> X-Authentication-Warning: freefall.freebsd.org: perforce set sender to peter@freebsd.org using -f From: Peter Wemm Subject: PERFORCE change 17832 for review To: Perforce Change Reviews Sender: owner-p4-projects@FreeBSD.ORG Precedence: bulk List-ID: List-Archive: (Web Archive) List-Help: (List Instructions) List-Subscribe: List-Unsubscribe: X-Loop: FreeBSD.ORG http://people.freebsd.org/~peter/p4db/chv.cgi?CH=17832 Change 17832 by peter@peter_daintree on 2002/09/20 22:39:53 Do not use the lazy critical mask stuff. It's just more things to go wrong at this stage. I plan to use plain C for the interrupt handlers anyway while getting started, just like alpha and ia64. Less assembler to get wrong is better. Affected files ... .. //depot/projects/hammer/sys/x86_64/include/critical.h#2 edit .. //depot/projects/hammer/sys/x86_64/include/proc.h#2 edit .. //depot/projects/hammer/sys/x86_64/isa/icu_vector.s#2 edit .. //depot/projects/hammer/sys/x86_64/x86_64/critical.c#3 edit Differences ... ==== //depot/projects/hammer/sys/x86_64/include/critical.h#2 (text+ko) ==== @@ -23,7 +23,6 @@ /* * Prototypes - see //critical.c */ -void cpu_unpend(void); void cpu_critical_fork_exit(void); void cpu_thread_link(struct thread *td); @@ -34,12 +33,15 @@ * * This routine is called from critical_enter() on the 0->1 transition * of td_critnest, prior to it being incremented to 1. - * - * If new-style critical section handling we do not have to do anything. - * However, as a side effect any interrupts occuring while td_critnest - * is non-zero will be deferred. */ -#define cpu_critical_enter() +static __inline void +cpu_critical_enter(void) +{ + struct thread *td; + + td = curthread; + td->td_md.md_savecrit = intr_disable(); +} /* * cpu_critical_exit: @@ -47,27 +49,14 @@ * This routine is called from critical_exit() on a 1->0 transition * of td_critnest, after it has been decremented to 0. We are * exiting the last critical section. - * - * Note that the td->critnest (1->0) transition interrupt race against - * our int_pending/unpend() check below is handled by the interrupt - * code for us, so we do not have to do anything fancy. */ static __inline void cpu_critical_exit(void) { - /* - * We may have to schedule pending interrupts. Create - * conditions similar to an interrupt context and call - * unpend(). - * - * note: we do this even if we are in an interrupt - * nesting level. Deep nesting is protected by - * critical_*() and if we conditionalized it then we - * would have to check int_pending again whenever - * we decrement td_intr_nesting_level to 0. - */ - if (PCPU_GET(int_pending)) - cpu_unpend(); + struct thread *td; + + td = curthread; + intr_restore(td->td_md.md_savecrit); } #else /* !__GNUC__ */ ==== //depot/projects/hammer/sys/x86_64/include/proc.h#2 (text+ko) ==== @@ -51,6 +51,7 @@ * Machine-dependent part of the proc structure for i386. */ struct mdthread { + register_t md_savecrit; }; struct mdproc { ==== //depot/projects/hammer/sys/x86_64/isa/icu_vector.s#2 (text+ko) ==== @@ -4,7 +4,6 @@ */ #define IRQ_BIT(irq_num) (1 << ((irq_num) % 8)) -#define IRQ_LBIT(irq_num) (1 << (irq_num)) #define IRQ_BYTE(irq_num) ((irq_num) >> 3) #ifdef AUTO_EOI_1 @@ -38,43 +37,6 @@ #endif -#define PUSH_FRAME \ - pushl $0 ; /* dummy error code */ \ - pushl $0 ; /* dummy trap type */ \ - pushal ; /* 8 ints */ \ - pushl %ds ; /* save data and extra segments ... */ \ - pushl %es ; \ - pushl %fs - -#define PUSH_DUMMY \ - pushfl ; /* eflags */ \ - pushl %cs ; /* cs */ \ - pushl 12(%esp) ; /* original caller eip */ \ - pushl $0 ; /* dummy error code */ \ - pushl $0 ; /* dummy trap type */ \ - subl $11*4,%esp - -#define POP_FRAME \ - popl %fs ; \ - popl %es ; \ - popl %ds ; \ - popal ; \ - addl $4+4,%esp - -#define POP_DUMMY \ - addl $16*4,%esp - -#define MASK_IRQ(icu, irq_num) \ - movb imen + IRQ_BYTE(irq_num),%al ; \ - orb $IRQ_BIT(irq_num),%al ; \ - movb %al,imen + IRQ_BYTE(irq_num) ; \ - outb %al,$icu+ICU_IMR_OFFSET - -#define UNMASK_IRQ(icu, irq_num) \ - movb imen + IRQ_BYTE(irq_num),%al ; \ - andb $~IRQ_BIT(irq_num),%al ; \ - movb %al,imen + IRQ_BYTE(irq_num) ; \ - outb %al,$icu+ICU_IMR_OFFSET /* * Macros for interrupt interrupt entry, call to handler, and exit. */ @@ -83,72 +45,33 @@ .text ; \ SUPERALIGN_TEXT ; \ IDTVEC(vec_name) ; \ - PUSH_FRAME ; \ + pushl $0 ; /* dummy error code */ \ + pushl $0 ; /* dummy trap type */ \ + pushal ; \ + pushl %ds ; /* save our data and extra segments */ \ + pushl %es ; \ + pushl %fs ; \ mov $KDSEL,%ax ; \ mov %ax,%ds ; \ mov %ax,%es ; \ mov $KPSEL,%ax ; \ mov %ax,%fs ; \ FAKE_MCOUNT((12+ACTUALLY_PUSHED)*4(%esp)) ; \ + call critical_enter ; \ movl PCPU(CURTHREAD),%ebx ; \ - cmpl $0,TD_CRITNEST(%ebx) ; \ - je 1f ; \ -; \ - movl $1,PCPU(INT_PENDING) ; \ - orl $IRQ_LBIT(irq_num),PCPU(FPENDING) ; \ - MASK_IRQ(icu, irq_num) ; \ - enable_icus ; \ - jmp 10f ; \ -1: ; \ - incl TD_CRITNEST(%ebx) ; \ incl TD_INTR_NESTING_LEVEL(%ebx) ; \ pushl intr_unit + (irq_num) * 4 ; \ - call *intr_handler + (irq_num) * 4 ; \ + call *intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \ + enable_icus ; /* (re)enable ASAP (helps edge trigger?) */ \ addl $4,%esp ; \ - enable_icus ; \ incl cnt+V_INTR ; /* book-keeping can wait */ \ movl intr_countp + (irq_num) * 4,%eax ; \ incl (%eax) ; \ - decl TD_CRITNEST(%ebx) ; \ - cmpl $0,PCPU(INT_PENDING) ; \ - je 2f ; \ -; \ - call i386_unpend ; \ -2: ; \ decl TD_INTR_NESTING_LEVEL(%ebx) ; \ -10: ; \ + call critical_exit ; \ MEXITCOUNT ; \ jmp doreti -/* - * Restart a fast interrupt that was held up by a critical section. - * This routine is called from unpend(). unpend() ensures we are - * in a critical section and deals with the interrupt nesting level - * for us. If we previously masked the irq, we have to unmask it. - * - * We have a choice. We can regenerate the irq using the 'int' - * instruction or we can create a dummy frame and call the interrupt - * handler directly. I've chosen to use the dummy-frame method. - */ -#define FAST_UNPEND(irq_num, vec_name, icu) \ - .text ; \ - SUPERALIGN_TEXT ; \ -IDTVEC(vec_name) ; \ -; \ - pushl %ebp ; \ - movl %esp, %ebp ; \ - PUSH_DUMMY ; \ - pushl intr_unit + (irq_num) * 4 ; \ - call *intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \ - addl $4, %esp ; \ - incl cnt+V_INTR ; /* book-keeping can wait */ \ - movl intr_countp + (irq_num) * 4,%eax ; \ - incl (%eax) ; \ - UNMASK_IRQ(icu, irq_num) ; \ - POP_DUMMY ; \ - popl %ebp ; \ - ret - /* * Slow, threaded interrupts. * @@ -162,92 +85,71 @@ .text ; \ SUPERALIGN_TEXT ; \ IDTVEC(vec_name) ; \ - PUSH_FRAME ; \ + pushl $0 ; /* dummy error code */ \ + pushl $0 ; /* dummy trap type */ \ + pushal ; \ + pushl %ds ; /* save our data and extra segments */ \ + pushl %es ; \ + pushl %fs ; \ mov $KDSEL,%ax ; /* load kernel ds, es and fs */ \ mov %ax,%ds ; \ mov %ax,%es ; \ mov $KPSEL,%ax ; \ mov %ax,%fs ; \ -; \ maybe_extra_ipending ; \ - MASK_IRQ(icu, irq_num) ; \ + movb imen + IRQ_BYTE(irq_num),%al ; \ + orb $IRQ_BIT(irq_num),%al ; \ + movb %al,imen + IRQ_BYTE(irq_num) ; \ + outb %al,$icu+ICU_IMR_OFFSET ; \ enable_icus ; \ -; \ movl PCPU(CURTHREAD),%ebx ; \ - cmpl $0,TD_CRITNEST(%ebx) ; \ - je 1f ; \ - movl $1,PCPU(INT_PENDING); \ - orl $IRQ_LBIT(irq_num),PCPU(IPENDING) ; \ - jmp 10f ; \ -1: ; \ incl TD_INTR_NESTING_LEVEL(%ebx) ; \ -; \ FAKE_MCOUNT(13*4(%esp)) ; /* XXX late to avoid double count */ \ - cmpl $0,PCPU(INT_PENDING) ; \ - je 9f ; \ - call i386_unpend ; \ -9: ; \ pushl $irq_num; /* pass the IRQ */ \ call sched_ithd ; \ addl $4, %esp ; /* discard the parameter */ \ -; \ decl TD_INTR_NESTING_LEVEL(%ebx) ; \ -10: ; \ MEXITCOUNT ; \ + /* We could usually avoid the following jmp by inlining some of */ \ + /* doreti, but it's probably better to use less cache. */ \ jmp doreti MCOUNT_LABEL(bintr) - FAST_INTR(0,fastintr0, IO_ICU1, ENABLE_ICU1) - FAST_INTR(1,fastintr1, IO_ICU1, ENABLE_ICU1) - FAST_INTR(2,fastintr2, IO_ICU1, ENABLE_ICU1) - FAST_INTR(3,fastintr3, IO_ICU1, ENABLE_ICU1) - FAST_INTR(4,fastintr4, IO_ICU1, ENABLE_ICU1) - FAST_INTR(5,fastintr5, IO_ICU1, ENABLE_ICU1) - FAST_INTR(6,fastintr6, IO_ICU1, ENABLE_ICU1) - FAST_INTR(7,fastintr7, IO_ICU1, ENABLE_ICU1) - FAST_INTR(8,fastintr8, IO_ICU2, ENABLE_ICU1_AND_2) - FAST_INTR(9,fastintr9, IO_ICU2, ENABLE_ICU1_AND_2) - FAST_INTR(10,fastintr10, IO_ICU2, ENABLE_ICU1_AND_2) - FAST_INTR(11,fastintr11, IO_ICU2, ENABLE_ICU1_AND_2) - FAST_INTR(12,fastintr12, IO_ICU2, ENABLE_ICU1_AND_2) - FAST_INTR(13,fastintr13, IO_ICU2, ENABLE_ICU1_AND_2) - FAST_INTR(14,fastintr14, IO_ICU2, ENABLE_ICU1_AND_2) - FAST_INTR(15,fastintr15, IO_ICU2, ENABLE_ICU1_AND_2) + FAST_INTR(0,fastintr0, ENABLE_ICU1) + FAST_INTR(1,fastintr1, ENABLE_ICU1) + FAST_INTR(2,fastintr2, ENABLE_ICU1) + FAST_INTR(3,fastintr3, ENABLE_ICU1) + FAST_INTR(4,fastintr4, ENABLE_ICU1) + FAST_INTR(5,fastintr5, ENABLE_ICU1) + FAST_INTR(6,fastintr6, ENABLE_ICU1) + FAST_INTR(7,fastintr7, ENABLE_ICU1) + FAST_INTR(8,fastintr8, ENABLE_ICU1_AND_2) + FAST_INTR(9,fastintr9, ENABLE_ICU1_AND_2) + FAST_INTR(10,fastintr10, ENABLE_ICU1_AND_2) + FAST_INTR(11,fastintr11, ENABLE_ICU1_AND_2) + FAST_INTR(12,fastintr12, ENABLE_ICU1_AND_2) + FAST_INTR(13,fastintr13, ENABLE_ICU1_AND_2) + FAST_INTR(14,fastintr14, ENABLE_ICU1_AND_2) + FAST_INTR(15,fastintr15, ENABLE_ICU1_AND_2) #define CLKINTR_PENDING movl $1,CNAME(clkintr_pending) /* Threaded interrupts */ - INTR(0,intr0, IO_ICU1, ENABLE_ICU1, CLKINTR_PENDING) - INTR(1,intr1, IO_ICU1, ENABLE_ICU1,) - INTR(2,intr2, IO_ICU1, ENABLE_ICU1,) - INTR(3,intr3, IO_ICU1, ENABLE_ICU1,) - INTR(4,intr4, IO_ICU1, ENABLE_ICU1,) - INTR(5,intr5, IO_ICU1, ENABLE_ICU1,) - INTR(6,intr6, IO_ICU1, ENABLE_ICU1,) - INTR(7,intr7, IO_ICU1, ENABLE_ICU1,) - INTR(8,intr8, IO_ICU2, ENABLE_ICU1_AND_2,) - INTR(9,intr9, IO_ICU2, ENABLE_ICU1_AND_2,) - INTR(10,intr10, IO_ICU2, ENABLE_ICU1_AND_2,) - INTR(11,intr11, IO_ICU2, ENABLE_ICU1_AND_2,) - INTR(12,intr12, IO_ICU2, ENABLE_ICU1_AND_2,) - INTR(13,intr13, IO_ICU2, ENABLE_ICU1_AND_2,) - INTR(14,intr14, IO_ICU2, ENABLE_ICU1_AND_2,) - INTR(15,intr15, IO_ICU2, ENABLE_ICU1_AND_2,) + INTR(0,intr0, ENABLE_ICU1, CLKINTR_PENDING) + INTR(1,intr1, ENABLE_ICU1,) + INTR(2,intr2, ENABLE_ICU1,) + INTR(3,intr3, ENABLE_ICU1,) + INTR(4,intr4, ENABLE_ICU1,) + INTR(5,intr5, ENABLE_ICU1,) + INTR(6,intr6, ENABLE_ICU1,) + INTR(7,intr7, ENABLE_ICU1,) + INTR(8,intr8, ENABLE_ICU1_AND_2,) + INTR(9,intr9, ENABLE_ICU1_AND_2,) + INTR(10,intr10, ENABLE_ICU1_AND_2,) + INTR(11,intr11, ENABLE_ICU1_AND_2,) + INTR(12,intr12, ENABLE_ICU1_AND_2,) + INTR(13,intr13, ENABLE_ICU1_AND_2,) + INTR(14,intr14, ENABLE_ICU1_AND_2,) + INTR(15,intr15, ENABLE_ICU1_AND_2,) - FAST_UNPEND(0,fastunpend0, IO_ICU1) - FAST_UNPEND(1,fastunpend1, IO_ICU1) - FAST_UNPEND(2,fastunpend2, IO_ICU1) - FAST_UNPEND(3,fastunpend3, IO_ICU1) - FAST_UNPEND(4,fastunpend4, IO_ICU1) - FAST_UNPEND(5,fastunpend5, IO_ICU1) - FAST_UNPEND(6,fastunpend6, IO_ICU1) - FAST_UNPEND(7,fastunpend7, IO_ICU1) - FAST_UNPEND(8,fastunpend8, IO_ICU2) - FAST_UNPEND(9,fastunpend9, IO_ICU2) - FAST_UNPEND(10,fastunpend10, IO_ICU2) - FAST_UNPEND(11,fastunpend11, IO_ICU2) - FAST_UNPEND(12,fastunpend12, IO_ICU2) - FAST_UNPEND(13,fastunpend13, IO_ICU2) - FAST_UNPEND(14,fastunpend14, IO_ICU2) - FAST_UNPEND(15,fastunpend15, IO_ICU2) MCOUNT_LABEL(eintr) ==== //depot/projects/hammer/sys/x86_64/x86_64/critical.c#3 (text+ko) ==== @@ -18,135 +18,24 @@ #include /* - * XXX this mess to get sched_ithd() and call_fast_unpend() - */ -#include -#include -#include -#include - -void i386_unpend(void); /* NOTE: not static, called from assembly */ - -/* - * cpu_unpend() - called from critical_exit() inline after quick - * interrupt-pending check. + * cpu_critical_fork_exit() - cleanup after fork */ void -cpu_unpend(void) +cpu_critical_fork_exit(void) { - register_t eflags; struct thread *td; - td = curthread; - eflags = intr_disable(); - if (PCPU_GET(int_pending)) { - ++td->td_intr_nesting_level; - i386_unpend(); - --td->td_intr_nesting_level; - } - intr_restore(eflags); -} - -/* - * cpu_critical_fork_exit() - cleanup after fork - * - * For i386 we do not have to do anything, td_critnest is - * handled by the fork trampoline code. - */ -void -cpu_critical_fork_exit(void) -{ + td = cuthread; + td->td_critnest = 1; + td->td_md.md_critnest = read_eflags() & PSL_I; } /* * cpu_thread_link() - thread linkup, initialize machine-dependant fields - * - * There are currently no machine-dependant fields that require - * initialization. */ void cpu_thread_link(struct thread *td) { -} -/* - * Called from cpu_unpend or called from the assembly vector code - * to process any interrupts which may have occured while we were in - * a critical section. - * - * - interrupts must be disabled - * - td_critnest must be 0 - * - td_intr_nesting_level must be incremented by the caller - * - * NOT STATIC (called from assembly) - */ -void -i386_unpend(void) -{ - KASSERT(curthread->td_critnest == 0, ("unpend critnest != 0")); - KASSERT((read_eflags() & PSL_I) == 0, ("unpend interrupts enabled1")); - curthread->td_critnest = 1; - for (;;) { - u_int32_t mask; - int irq; - - /* - * Fast interrupts have priority - */ - if ((mask = PCPU_GET(fpending)) != 0) { - irq = bsfl(mask); - PCPU_SET(fpending, mask & ~(1 << irq)); - call_fast_unpend(irq); - KASSERT((read_eflags() & PSL_I) == 0, - ("unpend interrupts enabled2 %d", irq)); - continue; - } - - /* - * Threaded interrupts come next - */ - if ((mask = PCPU_GET(ipending)) != 0) { - irq = bsfl(mask); - PCPU_SET(ipending, mask & ~(1 << irq)); - sched_ithd((void *)irq); - KASSERT((read_eflags() & PSL_I) == 0, - ("unpend interrupts enabled3 %d", irq)); - continue; - } - - /* - * Software interrupts and delayed IPIs are last - * - * XXX give the bits #defined names. see also - * isa/xxx_vector.s - */ - if ((mask = PCPU_GET(spending)) != 0) { - irq = bsfl(mask); - PCPU_SET(spending, mask & ~(1 << irq)); - switch(irq) { - case 0: /* bit 0 - hardclock */ - mtx_lock_spin(&sched_lock); - hardclock_process(curthread, 0); - mtx_unlock_spin(&sched_lock); - break; - case 1: /* bit 1 - statclock */ - mtx_lock_spin(&sched_lock); - statclock_process(curthread->td_kse, - (register_t)i386_unpend, 0); - mtx_unlock_spin(&sched_lock); - break; - } - KASSERT((read_eflags() & PSL_I) == 0, - ("unpend interrupts enabled4 %d", irq)); - continue; - } - break; - } - /* - * Interrupts are still disabled, we can safely clear int_pending - * and td_critnest. - */ - KASSERT((read_eflags() & PSL_I) == 0, ("unpend interrupts enabled5")); - PCPU_SET(int_pending, 0); - curthread->td_critnest = 0; + td->td_md.md_savecrit = 0; } To Unsubscribe: send mail to majordomo@FreeBSD.org with "unsubscribe p4-projects" in the body of the message