Merge master.kernel.org:/home/rmk/linux-2.6-arm
authorLinus Torvalds <torvalds@g5.osdl.org>
Wed, 19 Apr 2006 01:01:19 +0000 (18:01 -0700)
committerLinus Torvalds <torvalds@g5.osdl.org>
Wed, 19 Apr 2006 01:01:19 +0000 (18:01 -0700)
* master.kernel.org:/home/rmk/linux-2.6-arm:
  [ARM] 3479/1: Corgi SSP: Fix potential concurrent access problem
  [ARM] 3478/1: SharpSL SCOOP: Fix potenial build failure

40 files changed:
arch/i386/Kconfig.debug
arch/i386/kernel/acpi/boot.c
arch/powerpc/Kconfig
arch/powerpc/Makefile
arch/powerpc/kernel/Makefile
arch/powerpc/kernel/asm-offsets.c
arch/powerpc/kernel/entry_32.S
arch/powerpc/kernel/head_64.S
arch/powerpc/kernel/idle.c
arch/powerpc/kernel/idle_6xx.S
arch/powerpc/kernel/idle_power4.S
arch/powerpc/kernel/irq.c
arch/powerpc/kernel/prom_init.c
arch/powerpc/kernel/rtas-proc.c
arch/powerpc/platforms/cell/spufs/switch.c
arch/powerpc/platforms/chrp/chrp.h
arch/powerpc/platforms/chrp/pci.c
arch/powerpc/platforms/chrp/setup.c
arch/powerpc/platforms/iseries/setup.c
arch/powerpc/platforms/pseries/eeh.c
arch/powerpc/platforms/pseries/rtasd.c
arch/ppc/syslib/ppc_sys.c
arch/x86_64/defconfig
arch/x86_64/ia32/ia32entry.S
arch/x86_64/kernel/traps.c
block/as-iosched.c
block/cfq-iosched.c
drivers/char/drm/drmP.h
drivers/char/drm/drm_drv.c
drivers/char/drm/drm_memory.c
drivers/char/drm/drm_memory.h
drivers/char/drm/drm_memory_debug.h
drivers/char/drm/drm_pci.c
drivers/char/drm/via_irq.c
fs/open.c
include/asm-powerpc/irq.h
include/asm-powerpc/thread_info.h
include/asm-x86_64/mmzone.h
include/asm-x86_64/unistd.h
init/Kconfig

index 6e97df6..c92191b 100644 (file)
@@ -81,4 +81,13 @@ config X86_MPPARSE
        depends on X86_LOCAL_APIC && !X86_VISWS
        default y
 
+config DOUBLEFAULT
+       default y
+       bool "Enable doublefault exception handler" if EMBEDDED
+       help
+          This option allows trapping of rare doublefault exceptions that
+          would otherwise cause a system to silently reboot. Disabling this
+          option saves about 4k and might cause you much additional grey
+          hair.
+
 endmenu
index 030a000..049a255 100644 (file)
@@ -168,7 +168,7 @@ int __init acpi_parse_mcfg(unsigned long phys_addr, unsigned long size)
        unsigned long i;
        int config_size;
 
-       if (!phys_addr || !size || !cpu_has_apic)
+       if (!phys_addr || !size)
                return -EINVAL;
 
        mcfg = (struct acpi_table_mcfg *)__acpi_map_table(phys_addr, size);
@@ -1102,6 +1102,9 @@ int __init acpi_boot_table_init(void)
        dmi_check_system(acpi_dmi_table);
 #endif
 
+       if (!cpu_has_apic)
+               return -ENODEV;
+
        /*
         * If acpi_disabled, bail out
         * One exception: acpi=ht continues far enough to enumerate LAPICs
index 167e70e..6729c98 100644 (file)
@@ -366,6 +366,7 @@ config PPC_PMAC64
        select U3_DART
        select MPIC_BROKEN_U3
        select GENERIC_TBSYNC
+       select PPC_970_NAP
        default y
 
 config PPC_PREP
@@ -383,6 +384,7 @@ config PPC_MAPLE
        select MPIC_BROKEN_U3
        select GENERIC_TBSYNC
        select PPC_UDBG_16550
+       select PPC_970_NAP
        default n
        help
           This option enables support for the Maple 970FX Evaluation Board.
@@ -457,6 +459,10 @@ config PPC_MPC106
        bool
        default n
 
+config PPC_970_NAP
+       bool
+       default n
+
 source "drivers/cpufreq/Kconfig"
 
 config CPU_FREQ_PMAC
index 6ec84d3..ed5b26a 100644 (file)
@@ -104,6 +104,10 @@ ifndef CONFIG_FSL_BOOKE
 CFLAGS         += -mstring
 endif
 
+ifeq ($(CONFIG_6xx),y)
+CFLAGS         += -mcpu=powerpc
+endif
+
 cpu-as-$(CONFIG_PPC64BRIDGE)   += -Wa,-mppc64bridge
 cpu-as-$(CONFIG_4xx)           += -Wa,-m405
 cpu-as-$(CONFIG_6xx)           += -Wa,-maltivec
index 0cc0995..803858e 100644 (file)
@@ -20,7 +20,7 @@ obj-$(CONFIG_PPC64)           += setup_64.o binfmt_elf32.o sys_ppc32.o \
                                   firmware.o sysfs.o
 obj-$(CONFIG_PPC64)            += vdso64/
 obj-$(CONFIG_ALTIVEC)          += vecemu.o vector.o
-obj-$(CONFIG_POWER4)           += idle_power4.o
+obj-$(CONFIG_PPC_970_NAP)      += idle_power4.o
 obj-$(CONFIG_PPC_OF)           += of_device.o prom_parse.o
 procfs-$(CONFIG_PPC64)         := proc_ppc64.o
 obj-$(CONFIG_PROC_FS)          += $(procfs-y)
index 54b48f3..8f85c5e 100644 (file)
@@ -91,6 +91,7 @@ int main(void)
 #endif /* CONFIG_PPC64 */
 
        DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
+       DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
        DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
        DEFINE(TI_TASK, offsetof(struct thread_info, task));
 #ifdef CONFIG_PPC32
index b3a9794..8866fd2 100644 (file)
@@ -128,37 +128,36 @@ transfer_to_handler:
        stw     r12,4(r11)
 #endif
        b       3f
+
 2:     /* if from kernel, check interrupted DOZE/NAP mode and
          * check for stack overflow
          */
+       lwz     r9,THREAD_INFO-THREAD(r12)
+       cmplw   r1,r9                   /* if r1 <= current->thread_info */
+       ble-    stack_ovf               /* then the kernel stack overflowed */
+5:
 #ifdef CONFIG_6xx
-       mfspr   r11,SPRN_HID0
-       mtcr    r11
-BEGIN_FTR_SECTION
-       bt-     8,4f                    /* Check DOZE */
-END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
-BEGIN_FTR_SECTION
-       bt-     9,4f                    /* Check NAP */
-END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
+       tophys(r9,r9)                   /* check local flags */
+       lwz     r12,TI_LOCAL_FLAGS(r9)
+       mtcrf   0x01,r12
+       bt-     31-TLF_NAPPING,4f
 #endif /* CONFIG_6xx */
        .globl transfer_to_handler_cont
 transfer_to_handler_cont:
-       lwz     r11,THREAD_INFO-THREAD(r12)
-       cmplw   r1,r11                  /* if r1 <= current->thread_info */
-       ble-    stack_ovf               /* then the kernel stack overflowed */
 3:
        mflr    r9
        lwz     r11,0(r9)               /* virtual address of handler */
        lwz     r9,4(r9)                /* where to go when done */
-       FIX_SRR1(r10,r12)
        mtspr   SPRN_SRR0,r11
        mtspr   SPRN_SRR1,r10
        mtlr    r9
        SYNC
        RFI                             /* jump to handler, enable MMU */
 
-#ifdef CONFIG_6xx      
-4:     b       power_save_6xx_restore
+#ifdef CONFIG_6xx
+4:     rlwinm  r12,r12,0,~_TLF_NAPPING
+       stw     r12,TI_LOCAL_FLAGS(r9)
+       b       power_save_6xx_restore
 #endif
 
 /*
@@ -167,10 +166,10 @@ transfer_to_handler_cont:
  */
 stack_ovf:
        /* sometimes we use a statically-allocated stack, which is OK. */
-       lis     r11,_end@h
-       ori     r11,r11,_end@l
-       cmplw   r1,r11
-       ble     3b                      /* r1 <= &_end is OK */
+       lis     r12,_end@h
+       ori     r12,r12,_end@l
+       cmplw   r1,r12
+       ble     5b                      /* r1 <= &_end is OK */
        SAVE_NVGPRS(r11)
        addi    r3,r1,STACK_FRAME_OVERHEAD
        lis     r1,init_thread_union@ha
index a5ae04a..b7d1404 100644 (file)
@@ -376,11 +376,28 @@ label##_common:                                           \
        bl      hdlr;                                   \
        b       .ret_from_except
 
+/*
+ * Like STD_EXCEPTION_COMMON, but for exceptions that can occur
+ * in the idle task and therefore need the special idle handling.
+ */
+#define STD_EXCEPTION_COMMON_IDLE(trap, label, hdlr)   \
+       .align  7;                                      \
+       .globl label##_common;                          \
+label##_common:                                                \
+       EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN);      \
+       FINISH_NAP;                                     \
+       DISABLE_INTS;                                   \
+       bl      .save_nvgprs;                           \
+       addi    r3,r1,STACK_FRAME_OVERHEAD;             \
+       bl      hdlr;                                   \
+       b       .ret_from_except
+
 #define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr)   \
        .align  7;                                      \
        .globl label##_common;                          \
 label##_common:                                                \
        EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN);      \
+       FINISH_NAP;                                     \
        DISABLE_INTS;                                   \
        bl      .ppc64_runlatch_on;                     \
        addi    r3,r1,STACK_FRAME_OVERHEAD;             \
@@ -388,6 +405,25 @@ label##_common:                                            \
        b       .ret_from_except_lite
 
 /*
+ * When the idle code in power4_idle puts the CPU into NAP mode,
+ * it has to do so in a loop, and relies on the external interrupt
+ * and decrementer interrupt entry code to get it out of the loop.
+ * It sets the _TLF_NAPPING bit in current_thread_info()->local_flags
+ * to signal that it is in the loop and needs help to get out.
+ */
+#ifdef CONFIG_PPC_970_NAP
+#define FINISH_NAP                             \
+BEGIN_FTR_SECTION                              \
+       clrrdi  r11,r1,THREAD_SHIFT;            \
+       ld      r9,TI_LOCAL_FLAGS(r11);         \
+       andi.   r10,r9,_TLF_NAPPING;            \
+       bnel    power4_fixup_nap;               \
+END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
+#else
+#define FINISH_NAP
+#endif
+
+/*
  * Start of pSeries system interrupt routines
  */
        . = 0x100
@@ -772,6 +808,7 @@ hardware_interrupt_iSeries_masked:
        .globl machine_check_common
 machine_check_common:
        EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
+       FINISH_NAP
        DISABLE_INTS
        bl      .save_nvgprs
        addi    r3,r1,STACK_FRAME_OVERHEAD
@@ -783,7 +820,7 @@ machine_check_common:
        STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
        STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
        STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
-       STD_EXCEPTION_COMMON(0xf00, performance_monitor, .performance_monitor_exception)
+       STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception)
        STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
 #ifdef CONFIG_ALTIVEC
        STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
@@ -1034,6 +1071,7 @@ unrecov_slb:
        .globl hardware_interrupt_entry
 hardware_interrupt_common:
        EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
+       FINISH_NAP
 hardware_interrupt_entry:
        DISABLE_INTS
        bl      .ppc64_runlatch_on
@@ -1041,6 +1079,15 @@ hardware_interrupt_entry:
        bl      .do_IRQ
        b       .ret_from_except_lite
 
+#ifdef CONFIG_PPC_970_NAP
+power4_fixup_nap:
+       andc    r9,r9,r10
+       std     r9,TI_LOCAL_FLAGS(r11)
+       ld      r10,_LINK(r1)           /* make idle task do the */
+       std     r10,_NIP(r1)            /* equivalent of a blr */
+       blr
+#endif
+
        .align  7
        .globl alignment_common
 alignment_common:
index e9f321d..d491052 100644 (file)
@@ -50,9 +50,9 @@ void cpu_idle(void)
 
        set_thread_flag(TIF_POLLING_NRFLAG);
        while (1) {
-               ppc64_runlatch_off();
-
                while (!need_resched() && !cpu_should_die()) {
+                       ppc64_runlatch_off();
+
                        if (ppc_md.power_save) {
                                clear_thread_flag(TIF_POLLING_NRFLAG);
                                /*
index 12a4efb..b45fa0e 100644 (file)
@@ -22,8 +22,6 @@
 #include <asm/ppc_asm.h>
 #include <asm/asm-offsets.h>
 
-#undef DEBUG
-
        .text
 
 /*
@@ -109,12 +107,6 @@ BEGIN_FTR_SECTION
        dcbf    0,r4
        dcbf    0,r4
 END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
-#ifdef DEBUG
-       lis     r6,nap_enter_count@ha
-       lwz     r4,nap_enter_count@l(r6)
-       addi    r4,r4,1
-       stw     r4,nap_enter_count@l(r6)
-#endif 
 2:
 BEGIN_FTR_SECTION
        /* Go to low speed mode on some 750FX */
@@ -144,48 +136,42 @@ BEGIN_FTR_SECTION
        DSSALL
        sync
 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+       rlwinm  r9,r1,0,0,31-THREAD_SHIFT       /* current thread_info */
+       lwz     r8,TI_LOCAL_FLAGS(r9)   /* set napping bit */
+       ori     r8,r8,_TLF_NAPPING      /* so when we take an exception */
+       stw     r8,TI_LOCAL_FLAGS(r9)   /* it will return to our caller */
        mfmsr   r7
        ori     r7,r7,MSR_EE
        oris    r7,r7,MSR_POW@h
-       sync
-       isync
+1:     sync
        mtmsr   r7
        isync
-       sync
-       blr
-       
+       b       1b
+
 /*
  * Return from NAP/DOZE mode, restore some CPU specific registers,
  * we are called with DR/IR still off and r2 containing physical
- * address of current.
+ * address of current.  R11 points to the exception frame (physical
+ * address).  We have to preserve r10.
  */
 _GLOBAL(power_save_6xx_restore)
-       mfspr   r11,SPRN_HID0
-       rlwinm. r11,r11,0,10,8  /* Clear NAP & copy NAP bit !state to cr1 EQ */
-       cror    4*cr1+eq,4*cr0+eq,4*cr0+eq
-BEGIN_FTR_SECTION
-       rlwinm  r11,r11,0,9,7   /* Clear DOZE */
-END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
-       mtspr   SPRN_HID0, r11
+       lwz     r9,_LINK(r11)           /* interrupted in ppc6xx_idle: */
+       stw     r9,_NIP(r11)            /* make it do a blr */
 
-#ifdef DEBUG
-       beq     cr1,1f
-       lis     r11,(nap_return_count-KERNELBASE)@ha
-       lwz     r9,nap_return_count@l(r11)
-       addi    r9,r9,1
-       stw     r9,nap_return_count@l(r11)
-1:
-#endif
-       
-       rlwinm  r9,r1,0,0,18
-       tophys(r9,r9)
-       lwz     r11,TI_CPU(r9)
+#ifdef CONFIG_SMP
+       mfspr   r12,SPRN_SPRG3
+       lwz     r11,TI_CPU(r12)         /* get cpu number * 4 */
        slwi    r11,r11,2
+#else
+       li      r11,0
+#endif
        /* Todo make sure all these are in the same page
-        * and load r22 (@ha part + CPU offset) only once
+        * and load r11 (@ha part + CPU offset) only once
         */
 BEGIN_FTR_SECTION
-       beq     cr1,1f
+       mfspr   r9,SPRN_HID0
+       andis.  r9,r9,HID0_NAP@h
+       beq     1f
        addis   r9,r11,(nap_save_msscr0-KERNELBASE)@ha
        lwz     r9,nap_save_msscr0@l(r9)
        mtspr   SPRN_MSSCR0, r9
@@ -210,10 +196,3 @@ _GLOBAL(nap_save_hid1)
 
 _GLOBAL(powersave_lowspeed)
        .long   0
-
-#ifdef DEBUG
-_GLOBAL(nap_enter_count)
-       .space  4
-_GLOBAL(nap_return_count)
-       .space  4
-#endif
index 6dad1c0..d85c7c9 100644 (file)
@@ -35,12 +35,16 @@ BEGIN_FTR_SECTION
        DSSALL
        sync
 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+       clrrdi  r9,r1,THREAD_SHIFT      /* current thread_info */
+       ld      r8,TI_LOCAL_FLAGS(r9)   /* set napping bit */
+       ori     r8,r8,_TLF_NAPPING      /* so when we take an exception */
+       std     r8,TI_LOCAL_FLAGS(r9)   /* it will return to our caller */
        mfmsr   r7
        ori     r7,r7,MSR_EE
        oris    r7,r7,MSR_POW@h
-       sync
+1:     sync
        isync
        mtmsrd  r7
        isync
-       sync
-       blr
+       b       1b
+
index bb5c950..57d560c 100644 (file)
@@ -272,18 +272,26 @@ unsigned int virt_irq_to_real_map[NR_IRQS];
  * Don't use virtual irqs 0, 1, 2 for devices.
  * The pcnet32 driver considers interrupt numbers < 2 to be invalid,
  * and 2 is the XICS IPI interrupt.
- * We limit virtual irqs to 17 less than NR_IRQS so that when we
- * offset them by 16 (to reserve the first 16 for ISA interrupts)
- * we don't end up with an interrupt number >= NR_IRQS.
+ * We limit virtual irqs to __irq_offet_value less than virt_irq_max so
+ * that when we offset them we don't end up with an interrupt
+ * number >= virt_irq_max.
  */
 #define MIN_VIRT_IRQ   3
-#define MAX_VIRT_IRQ   (NR_IRQS - NUM_ISA_INTERRUPTS - 1)
-#define NR_VIRT_IRQS   (MAX_VIRT_IRQ - MIN_VIRT_IRQ + 1)
+
+unsigned int virt_irq_max;
+static unsigned int max_virt_irq;
+static unsigned int nr_virt_irqs;
 
 void
 virt_irq_init(void)
 {
        int i;
+
+       if ((virt_irq_max == 0) || (virt_irq_max > (NR_IRQS - 1)))
+               virt_irq_max = NR_IRQS - 1;
+       max_virt_irq = virt_irq_max - __irq_offset_value;
+       nr_virt_irqs = max_virt_irq - MIN_VIRT_IRQ + 1;
+
        for (i = 0; i < NR_IRQS; i++)
                virt_irq_to_real_map[i] = UNDEFINED_IRQ;
 }
@@ -308,17 +316,17 @@ int virt_irq_create_mapping(unsigned int real_irq)
                return real_irq;
        }
 
-       /* map to a number between MIN_VIRT_IRQ and MAX_VIRT_IRQ */
+       /* map to a number between MIN_VIRT_IRQ and max_virt_irq */
        virq = real_irq;
-       if (virq > MAX_VIRT_IRQ)
-               virq = (virq % NR_VIRT_IRQS) + MIN_VIRT_IRQ;
+       if (virq > max_virt_irq)
+               virq = (virq % nr_virt_irqs) + MIN_VIRT_IRQ;
 
        /* search for this number or a free slot */
        first_virq = virq;
        while (virt_irq_to_real_map[virq] != UNDEFINED_IRQ) {
                if (virt_irq_to_real_map[virq] == real_irq)
                        return virq;
-               if (++virq > MAX_VIRT_IRQ)
+               if (++virq > max_virt_irq)
                        virq = MIN_VIRT_IRQ;
                if (virq == first_virq)
                        goto nospace;   /* oops, no free slots */
@@ -330,8 +338,8 @@ int virt_irq_create_mapping(unsigned int real_irq)
  nospace:
        if (!warned) {
                printk(KERN_CRIT "Interrupt table is full\n");
-               printk(KERN_CRIT "Increase NR_IRQS (currently %d) "
-                      "in your kernel sources and rebuild.\n", NR_IRQS);
+               printk(KERN_CRIT "Increase virt_irq_max (currently %d) "
+                      "in your kernel sources and rebuild.\n", virt_irq_max);
                warned = 1;
        }
        return NO_IRQ;
@@ -349,8 +357,8 @@ unsigned int real_irq_to_virt_slowpath(unsigned int real_irq)
 
        virq = real_irq;
 
-       if (virq > MAX_VIRT_IRQ)
-               virq = (virq % NR_VIRT_IRQS) + MIN_VIRT_IRQ;
+       if (virq > max_virt_irq)
+               virq = (virq % nr_virt_irqs) + MIN_VIRT_IRQ;
 
        first_virq = virq;
 
@@ -360,7 +368,7 @@ unsigned int real_irq_to_virt_slowpath(unsigned int real_irq)
 
                virq++;
 
-               if (virq >= MAX_VIRT_IRQ)
+               if (virq >= max_virt_irq)
                        virq = 0;
 
        } while (first_virq != virq);
index d66c5e7..7e4d548 100644 (file)
@@ -1528,12 +1528,11 @@ static int __init prom_find_machine_type(void)
         *    non-IBM designs !
         *  - it has /rtas
         */
-       len = prom_getprop(_prom->root, "model",
+       len = prom_getprop(_prom->root, "device_type",
                           compat, sizeof(compat)-1);
        if (len <= 0)
                return PLATFORM_GENERIC;
-       compat[len] = 0;
-       if (strcmp(compat, "chrp"))
+       if (strncmp(compat, RELOC("chrp"), 4))
                return PLATFORM_GENERIC;
 
        /* Default to pSeries. We need to know if we are running LPAR */
index 456286c..9c9ad1f 100644 (file)
@@ -258,11 +258,11 @@ static int __init proc_rtas_init(void)
        struct proc_dir_entry *entry;
 
        if (!machine_is(pseries))
-               return 1;
+               return -ENODEV;
 
        rtas_node = of_find_node_by_name(NULL, "rtas");
        if (rtas_node == NULL)
-               return 1;
+               return -ENODEV;
 
        entry = create_proc_entry("ppc64/rtas/progress", S_IRUGO|S_IWUSR, NULL);
        if (entry)
index 97898d5..1726bfe 100644 (file)
@@ -1297,7 +1297,7 @@ static inline void setup_decr(struct spu_state *csa, struct spu *spu)
                cycles_t resume_time = get_cycles();
                cycles_t delta_time = resume_time - csa->suspend_time;
 
-               csa->lscsa->decr.slot[0] = delta_time;
+               csa->lscsa->decr.slot[0] -= delta_time;
        }
 }
 
index 63f0aee..996c287 100644 (file)
@@ -9,3 +9,4 @@ extern long chrp_time_init(void);
 
 extern void chrp_find_bridges(void);
 extern void chrp_event_scan(unsigned long);
+extern void chrp_pcibios_fixup(void);
index 8ef279a..ac22487 100644 (file)
@@ -23,6 +23,8 @@
 #include <asm/grackle.h>
 #include <asm/rtas.h>
 
+#include "chrp.h"
+
 /* LongTrail */
 void __iomem *gg2_pci_config_base;
 
@@ -314,6 +316,6 @@ chrp_find_bridges(void)
        }
 
        /* Do not fixup interrupts from OF tree on pegasos */
-       if (is_pegasos == 0)
-               ppc_md.pcibios_fixup = chrp_pcibios_fixup;
+       if (is_pegasos)
+               ppc_md.pcibios_fixup = NULL;
 }
index 23a2017..18d89f3 100644 (file)
@@ -440,8 +440,6 @@ void __init chrp_init_IRQ(void)
 
        if (_chrp_type == _CHRP_Pegasos)
                ppc_md.get_irq        = i8259_irq;
-       else
-               ppc_md.get_irq        = mpic_get_irq;
 
 #if defined(CONFIG_VT) && defined(CONFIG_INPUT_ADBHID) && defined(XMON)
        /* see if there is a keyboard in the device tree
@@ -528,26 +526,24 @@ static int __init chrp_probe(void)
        /* Assume we have an 8259... */
        __irq_offset_value = NUM_ISA_INTERRUPTS;
 
-       ppc_md.setup_arch     = chrp_setup_arch;
-       ppc_md.show_cpuinfo   = chrp_show_cpuinfo;
-
-       ppc_md.init_IRQ       = chrp_init_IRQ;
-       ppc_md.init           = chrp_init2;
-
-       ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot;
-
-       ppc_md.restart        = rtas_restart;
-       ppc_md.power_off      = rtas_power_off;
-       ppc_md.halt           = rtas_halt;
-
-       ppc_md.time_init      = chrp_time_init;
-       ppc_md.calibrate_decr = generic_calibrate_decr;
-
-       /* this may get overridden with rtas routines later... */
-       ppc_md.set_rtc_time   = chrp_set_rtc_time;
-       ppc_md.get_rtc_time   = chrp_get_rtc_time;
-
-#ifdef CONFIG_SMP
-       smp_ops = &chrp_smp_ops;
-#endif /* CONFIG_SMP */
+       return 1;
 }
+
+define_machine(chrp) {
+       .name                   = "CHRP",
+       .probe                  = chrp_probe,
+       .setup_arch             = chrp_setup_arch,
+       .init                   = chrp_init2,
+       .show_cpuinfo           = chrp_show_cpuinfo,
+       .init_IRQ               = chrp_init_IRQ,
+       .get_irq                = mpic_get_irq,
+       .pcibios_fixup          = chrp_pcibios_fixup,
+       .restart                = rtas_restart,
+       .power_off              = rtas_power_off,
+       .halt                   = rtas_halt,
+       .time_init              = chrp_time_init,
+       .set_rtc_time           = chrp_set_rtc_time,
+       .get_rtc_time           = chrp_get_rtc_time,
+       .calibrate_decr         = generic_calibrate_decr,
+       .phys_mem_access_prot   = pci_phys_mem_access_prot,
+};
index 6ce8a40..a6fd9be 100644 (file)
@@ -54,6 +54,7 @@
 #include <asm/iseries/hv_lp_event.h>
 #include <asm/iseries/lpar_map.h>
 #include <asm/udbg.h>
+#include <asm/irq.h>
 
 #include "naca.h"
 #include "setup.h"
@@ -684,6 +685,12 @@ static int __init iseries_probe(void)
        powerpc_firmware_features |= FW_FEATURE_ISERIES;
        powerpc_firmware_features |= FW_FEATURE_LPAR;
 
+       /*
+        * The Hypervisor only allows us up to 256 interrupt
+        * sources (the irq number is passed in a u8).
+        */
+       virt_irq_max = 255;
+
        return 1;
 }
 
index 780fb27..32eaddf 100644 (file)
@@ -957,8 +957,10 @@ static void eeh_remove_device(struct pci_dev *dev)
        pci_addr_cache_remove_device(dev);
 
        dn = pci_device_to_OF_node(dev);
-       PCI_DN(dn)->pcidev = NULL;
-       pci_dev_put (dev);
+       if (PCI_DN(dn)->pcidev) {
+               PCI_DN(dn)->pcidev = NULL;
+               pci_dev_put (dev);
+       }
 }
 
 void eeh_remove_bus_device(struct pci_dev *dev)
index fcc4d56..e0000ce 100644 (file)
@@ -488,7 +488,7 @@ static int __init rtas_init(void)
        /* No RTAS */
        if (rtas_token("event-scan") == RTAS_UNKNOWN_SERVICE) {
                printk(KERN_INFO "rtasd: no event-scan on system\n");
-               return 1;
+               return -ENODEV;
        }
 
        entry = create_proc_entry("ppc64/rtas/error_log", S_IRUSR, NULL);
index 60c724e..7662c4e 100644 (file)
@@ -156,12 +156,13 @@ void platform_notify_map(const struct platform_notify_dev_map *map,
        while (map->bus_id != NULL) {
                idx = -1;
                s = strrchr(dev->bus_id, '.');
-               if (s != NULL)
+               if (s != NULL) {
                        idx = (int)simple_strtol(s + 1, NULL, 10);
-               else
+                       len = s - dev->bus_id;
+               } else {
                        s = dev->bus_id;
-
-               len = s - dev->bus_id;
+                       len = strlen(dev->bus_id);
+               }
 
                if (!strncmp(dev->bus_id, map->bus_id, len)) {
                        pdev = container_of(dev, struct platform_device, dev);
index 3c45ec2..69db0c0 100644 (file)
@@ -1,7 +1,7 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.17-rc1
-# Mon Apr  3 16:11:14 2006
+# Linux kernel version: 2.6.17-rc1-git11
+# Sun Apr 16 07:22:36 2006
 #
 CONFIG_X86_64=y
 CONFIG_64BIT=y
@@ -57,6 +57,7 @@ CONFIG_FUTEX=y
 CONFIG_EPOLL=y
 CONFIG_SHMEM=y
 CONFIG_SLAB=y
+CONFIG_DOUBLEFAULT=y
 # CONFIG_TINY_SHMEM is not set
 CONFIG_BASE_SMALL=0
 # CONFIG_SLOB is not set
@@ -121,6 +122,7 @@ CONFIG_PREEMPT_VOLUNTARY=y
 CONFIG_PREEMPT_BKL=y
 CONFIG_NUMA=y
 CONFIG_K8_NUMA=y
+CONFIG_NODES_SHIFT=6
 CONFIG_X86_64_ACPI_NUMA=y
 CONFIG_NUMA_EMU=y
 CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
@@ -544,7 +546,6 @@ CONFIG_SCSI_SATA_INTEL_COMBINED=y
 # CONFIG_SCSI_INIA100 is not set
 # CONFIG_SCSI_SYM53C8XX_2 is not set
 # CONFIG_SCSI_IPR is not set
-# CONFIG_SCSI_QLOGIC_FC is not set
 # CONFIG_SCSI_QLOGIC_1280 is not set
 # CONFIG_SCSI_QLA_FC is not set
 # CONFIG_SCSI_LPFC is not set
@@ -1045,9 +1046,7 @@ CONFIG_USB_HIDINPUT=y
 # CONFIG_USB_ACECAD is not set
 # CONFIG_USB_KBTAB is not set
 # CONFIG_USB_POWERMATE is not set
-# CONFIG_USB_MTOUCH is not set
-# CONFIG_USB_ITMTOUCH is not set
-# CONFIG_USB_EGALAX is not set
+# CONFIG_USB_TOUCHSCREEN is not set
 # CONFIG_USB_YEALINK is not set
 # CONFIG_USB_XPAD is not set
 # CONFIG_USB_ATI_REMOTE is not set
@@ -1118,6 +1117,14 @@ CONFIG_USB_MON=y
 # CONFIG_NEW_LEDS is not set
 
 #
+# LED drivers
+#
+
+#
+# LED Triggers
+#
+
+#
 # InfiniBand support
 #
 # CONFIG_INFINIBAND is not set
index 5a98026..57fc37e 100644 (file)
@@ -694,4 +694,5 @@ ia32_sys_call_table:
        .quad compat_sys_get_robust_list
        .quad sys_splice
        .quad sys_sync_file_range
+       .quad sys_tee
 ia32_syscall_end:              
index 6bda322..2700b13 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/moduleparam.h>
 #include <linux/nmi.h>
 #include <linux/kprobes.h>
+#include <linux/kexec.h>
 
 #include <asm/system.h>
 #include <asm/uaccess.h>
@@ -433,6 +434,8 @@ void __kprobes __die(const char * str, struct pt_regs * regs, long err)
        printk(KERN_ALERT "RIP ");
        printk_address(regs->rip); 
        printk(" RSP <%016lx>\n", regs->rsp); 
+       if (kexec_should_crash(current))
+               crash_kexec(regs);
 }
 
 void die(const char * str, struct pt_regs * regs, long err)
@@ -455,6 +458,8 @@ void __kprobes die_nmi(char *str, struct pt_regs *regs)
         */
        printk(str, safe_smp_processor_id());
        show_registers(regs);
+       if (kexec_should_crash(current))
+               crash_kexec(regs);
        if (panic_on_timeout || panic_on_oops)
                panic("nmi watchdog");
        printk("console shuts up ...\n");
index 296708c..e25a5d7 100644 (file)
@@ -1844,9 +1844,10 @@ static void __exit as_exit(void)
        DECLARE_COMPLETION(all_gone);
        elv_unregister(&iosched_as);
        ioc_gone = &all_gone;
-       barrier();
+       /* ioc_gone's update must be visible before reading ioc_count */
+       smp_wmb();
        if (atomic_read(&ioc_count))
-               complete(ioc_gone);
+               wait_for_completion(ioc_gone);
        synchronize_rcu();
        kmem_cache_destroy(arq_pool);
 }
index 67d446d..2540dfa 100644 (file)
@@ -1472,19 +1472,37 @@ out:
        return cfqq;
 }
 
+static void
+cfq_drop_dead_cic(struct io_context *ioc, struct cfq_io_context *cic)
+{
+       read_lock(&cfq_exit_lock);
+       rb_erase(&cic->rb_node, &ioc->cic_root);
+       read_unlock(&cfq_exit_lock);
+       kmem_cache_free(cfq_ioc_pool, cic);
+       atomic_dec(&ioc_count);
+}
+
 static struct cfq_io_context *
 cfq_cic_rb_lookup(struct cfq_data *cfqd, struct io_context *ioc)
 {
-       struct rb_node *n = ioc->cic_root.rb_node;
+       struct rb_node *n;
        struct cfq_io_context *cic;
-       void *key = cfqd;
+       void *k, *key = cfqd;
 
+restart:
+       n = ioc->cic_root.rb_node;
        while (n) {
                cic = rb_entry(n, struct cfq_io_context, rb_node);
+               /* ->key must be copied to avoid race with cfq_exit_queue() */
+               k = cic->key;
+               if (unlikely(!k)) {
+                       cfq_drop_dead_cic(ioc, cic);
+                       goto restart;
+               }
 
-               if (key < cic->key)
+               if (key < k)
                        n = n->rb_left;
-               else if (key > cic->key)
+               else if (key > k)
                        n = n->rb_right;
                else
                        return cic;
@@ -1497,29 +1515,37 @@ static inline void
 cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
             struct cfq_io_context *cic)
 {
-       struct rb_node **p = &ioc->cic_root.rb_node;
-       struct rb_node *parent = NULL;
+       struct rb_node **p;
+       struct rb_node *parent;
        struct cfq_io_context *__cic;
-
-       read_lock(&cfq_exit_lock);
+       void *k;
 
        cic->ioc = ioc;
        cic->key = cfqd;
 
        ioc->set_ioprio = cfq_ioc_set_ioprio;
-
+restart:
+       parent = NULL;
+       p = &ioc->cic_root.rb_node;
        while (*p) {
                parent = *p;
                __cic = rb_entry(parent, struct cfq_io_context, rb_node);
+               /* ->key must be copied to avoid race with cfq_exit_queue() */
+               k = __cic->key;
+               if (unlikely(!k)) {
+                       cfq_drop_dead_cic(ioc, cic);
+                       goto restart;
+               }
 
-               if (cic->key < __cic->key)
+               if (cic->key < k)
                        p = &(*p)->rb_left;
-               else if (cic->key > __cic->key)
+               else if (cic->key > k)
                        p = &(*p)->rb_right;
                else
                        BUG();
        }
 
+       read_lock(&cfq_exit_lock);
        rb_link_node(&cic->rb_node, parent, p);
        rb_insert_color(&cic->rb_node, &ioc->cic_root);
        list_add(&cic->queue_list, &cfqd->cic_list);
@@ -2439,9 +2465,10 @@ static void __exit cfq_exit(void)
        DECLARE_COMPLETION(all_gone);
        elv_unregister(&iosched_cfq);
        ioc_gone = &all_gone;
-       barrier();
+       /* ioc_gone's update must be visible before reading ioc_count */
+       smp_wmb();
        if (atomic_read(&ioc_count))
-               complete(ioc_gone);
+               wait_for_completion(ioc_gone);
        synchronize_rcu();
        cfq_slab_kill();
 }
index edc72a6..e1aadae 100644 (file)
@@ -815,8 +815,6 @@ extern int drm_mem_info(char *buf, char **start, off_t offset,
 extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area);
 extern void *drm_ioremap(unsigned long offset, unsigned long size,
                         drm_device_t * dev);
-extern void *drm_ioremap_nocache(unsigned long offset, unsigned long size,
-                                drm_device_t * dev);
 extern void drm_ioremapfree(void *pt, unsigned long size, drm_device_t * dev);
 
 extern DRM_AGP_MEM *drm_alloc_agp(drm_device_t * dev, int pages, u32 type);
@@ -1022,11 +1020,13 @@ static __inline__ void drm_core_ioremap(struct drm_map *map,
        map->handle = drm_ioremap(map->offset, map->size, dev);
 }
 
+#if 0
 static __inline__ void drm_core_ioremap_nocache(struct drm_map *map,
                                                struct drm_device *dev)
 {
        map->handle = drm_ioremap_nocache(map->offset, map->size, dev);
 }
+#endif  /*  0  */
 
 static __inline__ void drm_core_ioremapfree(struct drm_map *map,
                                            struct drm_device *dev)
index dc6bbe8..3c0b882 100644 (file)
@@ -75,8 +75,8 @@ static drm_ioctl_desc_t drm_ioctls[] = {
        [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = {drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
        [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = {drm_getsareactx, DRM_AUTH},
 
-       [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = {drm_addctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
-       [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = {drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+       [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = {drm_addctx, DRM_AUTH|DRM_ROOT_ONLY},
+       [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = {drm_rmctx, DRM_AUTH|DRM_ROOT_ONLY},
        [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = {drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
        [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = {drm_getctx, DRM_AUTH},
        [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = {drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
index dddf8de..7e3318e 100644 (file)
@@ -80,6 +80,71 @@ void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area)
 }
 
 #if __OS_HAS_AGP
+/*
+ * Find the drm_map that covers the range [offset, offset+size).
+ */
+static drm_map_t *drm_lookup_map(unsigned long offset,
+                                unsigned long size, drm_device_t * dev)
+{
+       struct list_head *list;
+       drm_map_list_t *r_list;
+       drm_map_t *map;
+
+       list_for_each(list, &dev->maplist->head) {
+               r_list = (drm_map_list_t *) list;
+               map = r_list->map;
+               if (!map)
+                       continue;
+               if (map->offset <= offset
+                   && (offset + size) <= (map->offset + map->size))
+                       return map;
+       }
+       return NULL;
+}
+
+static void *agp_remap(unsigned long offset, unsigned long size,
+                      drm_device_t * dev)
+{
+       unsigned long *phys_addr_map, i, num_pages =
+           PAGE_ALIGN(size) / PAGE_SIZE;
+       struct drm_agp_mem *agpmem;
+       struct page **page_map;
+       void *addr;
+
+       size = PAGE_ALIGN(size);
+
+#ifdef __alpha__
+       offset -= dev->hose->mem_space->start;
+#endif
+
+       for (agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next)
+               if (agpmem->bound <= offset
+                   && (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >=
+                   (offset + size))
+                       break;
+       if (!agpmem)
+               return NULL;
+
+       /*
+        * OK, we're mapping AGP space on a chipset/platform on which memory accesses by
+        * the CPU do not get remapped by the GART.  We fix this by using the kernel's
+        * page-table instead (that's probably faster anyhow...).
+        */
+       /* note: use vmalloc() because num_pages could be large... */
+       page_map = vmalloc(num_pages * sizeof(struct page *));
+       if (!page_map)
+               return NULL;
+
+       phys_addr_map =
+           agpmem->memory->memory + (offset - agpmem->bound) / PAGE_SIZE;
+       for (i = 0; i < num_pages; ++i)
+               page_map[i] = pfn_to_page(phys_addr_map[i] >> PAGE_SHIFT);
+       addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP);
+       vfree(page_map);
+
+       return addr;
+}
+
 /** Wrapper around agp_allocate_memory() */
 DRM_AGP_MEM *drm_alloc_agp(drm_device_t * dev, int pages, u32 type)
 {
@@ -103,5 +168,74 @@ int drm_unbind_agp(DRM_AGP_MEM * handle)
 {
        return drm_agp_unbind_memory(handle);
 }
+
+#else  /*  __OS_HAS_AGP  */
+
+static inline drm_map_t *drm_lookup_map(unsigned long offset,
+                                       unsigned long size, drm_device_t * dev)
+{
+       return NULL;
+}
+
+static inline void *agp_remap(unsigned long offset, unsigned long size,
+                             drm_device_t * dev)
+{
+       return NULL;
+}
+
 #endif                         /* agp */
+
+void *drm_ioremap(unsigned long offset, unsigned long size,
+                               drm_device_t * dev)
+{
+       if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture) {
+               drm_map_t *map = drm_lookup_map(offset, size, dev);
+
+               if (map && map->type == _DRM_AGP)
+                       return agp_remap(offset, size, dev);
+       }
+       return ioremap(offset, size);
+}
+EXPORT_SYMBOL(drm_ioremap);
+
+#if 0
+void *drm_ioremap_nocache(unsigned long offset,
+                                       unsigned long size, drm_device_t * dev)
+{
+       if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture) {
+               drm_map_t *map = drm_lookup_map(offset, size, dev);
+
+               if (map && map->type == _DRM_AGP)
+                       return agp_remap(offset, size, dev);
+       }
+       return ioremap_nocache(offset, size);
+}
+#endif  /*  0  */
+
+void drm_ioremapfree(void *pt, unsigned long size,
+                                  drm_device_t * dev)
+{
+       /*
+        * This is a bit ugly.  It would be much cleaner if the DRM API would use separate
+        * routines for handling mappings in the AGP space.  Hopefully this can be done in
+        * a future revision of the interface...
+        */
+       if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture
+           && ((unsigned long)pt >= VMALLOC_START
+               && (unsigned long)pt < VMALLOC_END)) {
+               unsigned long offset;
+               drm_map_t *map;
+
+               offset = drm_follow_page(pt) | ((unsigned long)pt & ~PAGE_MASK);
+               map = drm_lookup_map(offset, size, dev);
+               if (map && map->type == _DRM_AGP) {
+                       vunmap(pt);
+                       return;
+               }
+       }
+
+       iounmap(pt);
+}
+EXPORT_SYMBOL(drm_ioremapfree);
+
 #endif                         /* debug_memory */
index 3732a61..714d9ae 100644 (file)
 # endif
 #endif
 
-/*
- * Find the drm_map that covers the range [offset, offset+size).
- */
-static inline drm_map_t *drm_lookup_map(unsigned long offset,
-                                       unsigned long size, drm_device_t * dev)
-{
-       struct list_head *list;
-       drm_map_list_t *r_list;
-       drm_map_t *map;
-
-       list_for_each(list, &dev->maplist->head) {
-               r_list = (drm_map_list_t *) list;
-               map = r_list->map;
-               if (!map)
-                       continue;
-               if (map->offset <= offset
-                   && (offset + size) <= (map->offset + map->size))
-                       return map;
-       }
-       return NULL;
-}
-
-static inline void *agp_remap(unsigned long offset, unsigned long size,
-                             drm_device_t * dev)
-{
-       unsigned long *phys_addr_map, i, num_pages =
-           PAGE_ALIGN(size) / PAGE_SIZE;
-       struct drm_agp_mem *agpmem;
-       struct page **page_map;
-       void *addr;
-
-       size = PAGE_ALIGN(size);
-
-#ifdef __alpha__
-       offset -= dev->hose->mem_space->start;
-#endif
-
-       for (agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next)
-               if (agpmem->bound <= offset
-                   && (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >=
-                   (offset + size))
-                       break;
-       if (!agpmem)
-               return NULL;
-
-       /*
-        * OK, we're mapping AGP space on a chipset/platform on which memory accesses by
-        * the CPU do not get remapped by the GART.  We fix this by using the kernel's
-        * page-table instead (that's probably faster anyhow...).
-        */
-       /* note: use vmalloc() because num_pages could be large... */
-       page_map = vmalloc(num_pages * sizeof(struct page *));
-       if (!page_map)
-               return NULL;
-
-       phys_addr_map =
-           agpmem->memory->memory + (offset - agpmem->bound) / PAGE_SIZE;
-       for (i = 0; i < num_pages; ++i)
-               page_map[i] = pfn_to_page(phys_addr_map[i] >> PAGE_SHIFT);
-       addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP);
-       vfree(page_map);
-
-       return addr;
-}
-
 static inline unsigned long drm_follow_page(void *vaddr)
 {
        pgd_t *pgd = pgd_offset_k((unsigned long)vaddr);
@@ -133,18 +68,6 @@ static inline unsigned long drm_follow_page(void *vaddr)
 
 #else                          /* __OS_HAS_AGP */
 
-static inline drm_map_t *drm_lookup_map(unsigned long offset,
-                                       unsigned long size, drm_device_t * dev)
-{
-       return NULL;
-}
-
-static inline void *agp_remap(unsigned long offset, unsigned long size,
-                             drm_device_t * dev)
-{
-       return NULL;
-}
-
 static inline unsigned long drm_follow_page(void *vaddr)
 {
        return 0;
@@ -152,51 +75,8 @@ static inline unsigned long drm_follow_page(void *vaddr)
 
 #endif
 
-static inline void *drm_ioremap(unsigned long offset, unsigned long size,
-                               drm_device_t * dev)
-{
-       if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture) {
-               drm_map_t *map = drm_lookup_map(offset, size, dev);
-
-               if (map && map->type == _DRM_AGP)
-                       return agp_remap(offset, size, dev);
-       }
-       return ioremap(offset, size);
-}
-
-static inline void *drm_ioremap_nocache(unsigned long offset,
-                                       unsigned long size, drm_device_t * dev)
-{
-       if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture) {
-               drm_map_t *map = drm_lookup_map(offset, size, dev);
-
-               if (map && map->type == _DRM_AGP)
-                       return agp_remap(offset, size, dev);
-       }
-       return ioremap_nocache(offset, size);
-}
-
-static inline void drm_ioremapfree(void *pt, unsigned long size,
-                                  drm_device_t * dev)
-{
-       /*
-        * This is a bit ugly.  It would be much cleaner if the DRM API would use separate
-        * routines for handling mappings in the AGP space.  Hopefully this can be done in
-        * a future revision of the interface...
-        */
-       if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture
-           && ((unsigned long)pt >= VMALLOC_START
-               && (unsigned long)pt < VMALLOC_END)) {
-               unsigned long offset;
-               drm_map_t *map;
-
-               offset = drm_follow_page(pt) | ((unsigned long)pt & ~PAGE_MASK);
-               map = drm_lookup_map(offset, size, dev);
-               if (map && map->type == _DRM_AGP) {
-                       vunmap(pt);
-                       return;
-               }
-       }
+void *drm_ioremap(unsigned long offset, unsigned long size,
+                               drm_device_t * dev);
 
-       iounmap(pt);
-}
+void drm_ioremapfree(void *pt, unsigned long size,
+                                  drm_device_t * dev);
index 7868341..6543b9a 100644 (file)
@@ -229,6 +229,7 @@ void *drm_ioremap (unsigned long offset, unsigned long size,
        return pt;
 }
 
+#if 0
 void *drm_ioremap_nocache (unsigned long offset, unsigned long size,
                            drm_device_t * dev) {
        void *pt;
@@ -251,6 +252,7 @@ void *drm_ioremap_nocache (unsigned long offset, unsigned long size,
        spin_unlock(&drm_mem_lock);
        return pt;
 }
+#endif  /*  0  */
 
 void drm_ioremapfree (void *pt, unsigned long size, drm_device_t * dev) {
        int alloc_count;
index b28ca9c..86a0f1c 100644 (file)
@@ -37,6 +37,7 @@
  */
 
 #include <linux/pci.h>
+#include <linux/dma-mapping.h>
 #include "drmP.h"
 
 /**********************************************************************/
index 6152415..c33d068 100644 (file)
@@ -196,9 +196,9 @@ via_driver_irq_wait(drm_device_t * dev, unsigned int irq, int force_sequence,
 {
        drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
        unsigned int cur_irq_sequence;
-       drm_via_irq_t *cur_irq = dev_priv->via_irqs;
+       drm_via_irq_t *cur_irq;
        int ret = 0;
-       maskarray_t *masks = dev_priv->irq_masks;
+       maskarray_t *masks;
        int real_irq;
 
        DRM_DEBUG("%s\n", __FUNCTION__);
@@ -221,8 +221,9 @@ via_driver_irq_wait(drm_device_t * dev, unsigned int irq, int force_sequence,
                          __FUNCTION__, irq);
                return DRM_ERR(EINVAL);
        }
-       
-       cur_irq += real_irq;
+
+       masks = dev_priv->irq_masks;
+       cur_irq = dev_priv->via_irqs + real_irq;
 
        if (masks[real_irq][2] && !force_sequence) {
                DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
@@ -247,11 +248,12 @@ void via_driver_irq_preinstall(drm_device_t * dev)
 {
        drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
        u32 status;
-       drm_via_irq_t *cur_irq = dev_priv->via_irqs;
+       drm_via_irq_t *cur_irq;
        int i;
 
        DRM_DEBUG("driver_irq_preinstall: dev_priv: %p\n", dev_priv);
        if (dev_priv) {
+               cur_irq = dev_priv->via_irqs;
 
                dev_priv->irq_enable_mask = VIA_IRQ_VBLANK_ENABLE;
                dev_priv->irq_pending_mask = VIA_IRQ_VBLANK_PENDING;
index c32c89d..53ec28c 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -331,7 +331,10 @@ out:
 
 asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length)
 {
-       return do_sys_ftruncate(fd, length, 1);
+       long ret = do_sys_ftruncate(fd, length, 1);
+       /* avoid REGPARM breakage on x86: */
+       prevent_tail_call(ret);
+       return ret;
 }
 
 /* LFS versions of truncate are only needed on 32 bit machines */
@@ -343,7 +346,10 @@ asmlinkage long sys_truncate64(const char __user * path, loff_t length)
 
 asmlinkage long sys_ftruncate64(unsigned int fd, loff_t length)
 {
-       return do_sys_ftruncate(fd, length, 0);
+       long ret = do_sys_ftruncate(fd, length, 0);
+       /* avoid REGPARM breakage on x86: */
+       prevent_tail_call(ret);
+       return ret;
 }
 #endif
 
@@ -1093,20 +1099,30 @@ long do_sys_open(int dfd, const char __user *filename, int flags, int mode)
 
 asmlinkage long sys_open(const char __user *filename, int flags, int mode)
 {
+       long ret;
+
        if (force_o_largefile())
                flags |= O_LARGEFILE;
 
-       return do_sys_open(AT_FDCWD, filename, flags, mode);
+       ret = do_sys_open(AT_FDCWD, filename, flags, mode);
+       /* avoid REGPARM breakage on x86: */
+       prevent_tail_call(ret);
+       return ret;
 }
 EXPORT_SYMBOL_GPL(sys_open);
 
 asmlinkage long sys_openat(int dfd, const char __user *filename, int flags,
                           int mode)
 {
+       long ret;
+
        if (force_o_largefile())
                flags |= O_LARGEFILE;
 
-       return do_sys_open(dfd, filename, flags, mode);
+       ret = do_sys_open(dfd, filename, flags, mode);
+       /* avoid REGPARM breakage on x86: */
+       prevent_tail_call(ret);
+       return ret;
 }
 EXPORT_SYMBOL_GPL(sys_openat);
 
index 51f87d9..7bc6d73 100644 (file)
  */
 extern unsigned int virt_irq_to_real_map[NR_IRQS];
 
+/* The maximum virtual IRQ number that we support.  This
+ * can be set by the platform and will be reduced by the
+ * value of __irq_offset_value.  It defaults to and is
+ * capped by (NR_IRQS - 1).
+ */
+extern unsigned int virt_irq_max;
+
 /* Create a mapping for a real_irq if it doesn't already exist.
  * Return the virtual irq as a convenience.
  */
index ffc7462..88b553c 100644 (file)
@@ -37,6 +37,8 @@ struct thread_info {
        int             preempt_count;          /* 0 => preemptable,
                                                   <0 => BUG */
        struct restart_block restart_block;
+       unsigned long   local_flags;            /* private flags for thread */
+
        /* low level flags - has atomic operations done on it */
        unsigned long   flags ____cacheline_aligned_in_smp;
 };
@@ -143,6 +145,12 @@ static inline struct thread_info *current_thread_info(void)
                                 _TIF_NEED_RESCHED | _TIF_RESTORE_SIGMASK)
 #define _TIF_PERSYSCALL_MASK   (_TIF_RESTOREALL|_TIF_NOERROR)
 
+/* Bits in local_flags */
+/* Don't move TLF_NAPPING without adjusting the code in entry_32.S */
+#define TLF_NAPPING            0       /* idle thread enabled NAP mode */
+
+#define _TLF_NAPPING           (1 << TLF_NAPPING)
+
 #endif /* __KERNEL__ */
 
 #endif /* _ASM_POWERPC_THREAD_INFO_H */
index 6b18cd8..6944e71 100644 (file)
@@ -12,7 +12,8 @@
 
 #include <asm/smp.h>
 
-#define NODEMAPSIZE 0xfff
+/* Should really switch to dynamic allocation at some point */
+#define NODEMAPSIZE 0x4fff
 
 /* Simple perfect hash to map physical addresses to node numbers */
 struct memnode {
index d86494e..98c36ea 100644 (file)
@@ -613,8 +613,10 @@ __SYSCALL(__NR_get_robust_list, sys_get_robust_list)
 __SYSCALL(__NR_splice, sys_splice)
 #define __NR_tee               276
 __SYSCALL(__NR_tee, sys_tee)
+#define __NR_sync_file_range   277
+__SYSCALL(__NR_sync_file_range, sys_sync_file_range)
 
-#define __NR_syscall_max __NR_tee
+#define __NR_syscall_max __NR_sync_file_range
 
 #ifndef __NO_STUBS
 
index f1bc2f0..3b36a1d 100644 (file)
@@ -374,15 +374,6 @@ config SLAB
          SLOB is more space efficient but does not scale well and is
          more susceptible to fragmentation.
 
-config DOUBLEFAULT
-       default y
-       bool "Enable doublefault exception handler" if EMBEDDED && X86_32
-       help
-          This option allows trapping of rare doublefault exceptions that
-          would otherwise cause a system to silently reboot. Disabling this
-          option saves about 4k and might cause you much additional grey
-          hair.
-
 endmenu                # General setup
 
 config TINY_SHMEM