powerpc/spufs: synchronize interaction between spu exception handling and time slicing
authorLuke Browning <lukebrowning@us.ibm.com>
Fri, 13 Jun 2008 04:17:35 +0000 (14:17 +1000)
committerJeremy Kerr <jk@ozlabs.org>
Mon, 16 Jun 2008 04:35:01 +0000 (14:35 +1000)
Time slicing can occur at the same time as spu exception handling
resulting in the wakeup of the wrong thread.

This change uses the the spu's register_lock to enforce synchronization
between bind/unbind and spu exception handling so that they are
mutually exclusive.

Signed-off-by: Luke Browning <lukebrowning@us.ibm.com>
Signed-off-by: Jeremy Kerr <jk@ozlabs.org>
arch/powerpc/platforms/cell/spu_base.c
arch/powerpc/platforms/cell/spufs/sched.c

index 96b5f0f..78f905b 100644 (file)
@@ -219,15 +219,25 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
 extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX
 static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
 {
+       int ret;
+
        pr_debug("%s, %lx, %lx\n", __func__, dsisr, ea);
 
-       /* Handle kernel space hash faults immediately.
-          User hash faults need to be deferred to process context. */
-       if ((dsisr & MFC_DSISR_PTE_NOT_FOUND)
-           && REGION_ID(ea) != USER_REGION_ID
-           && hash_page(ea, _PAGE_PRESENT, 0x300) == 0) {
-               spu_restart_dma(spu);
-               return 0;
+       /*
+        * Handle kernel space hash faults immediately. User hash
+        * faults need to be deferred to process context.
+        */
+       if ((dsisr & MFC_DSISR_PTE_NOT_FOUND) &&
+           (REGION_ID(ea) != USER_REGION_ID)) {
+
+               spin_unlock(&spu->register_lock);
+               ret = hash_page(ea, _PAGE_PRESENT, 0x300);
+               spin_lock(&spu->register_lock);
+
+               if (!ret) {
+                       spu_restart_dma(spu);
+                       return 0;
+               }
        }
 
        spu->class_1_dar = ea;
@@ -325,14 +335,12 @@ spu_irq_class_0(int irq, void *data)
 
        spu->class_0_pending |= stat;
        spu->class_0_dar = spu_mfc_dar_get(spu);
-       spin_unlock(&spu->register_lock);
-
        spu->stop_callback(spu, 0);
-
        spu->class_0_pending = 0;
        spu->class_0_dar = 0;
 
        spu_int_stat_clear(spu, 0, stat);
+       spin_unlock(&spu->register_lock);
 
        return IRQ_HANDLED;
 }
@@ -355,13 +363,12 @@ spu_irq_class_1(int irq, void *data)
                spu_mfc_dsisr_set(spu, 0ul);
        spu_int_stat_clear(spu, 1, stat);
 
-       if (stat & CLASS1_SEGMENT_FAULT_INTR)
-               __spu_trap_data_seg(spu, dar);
-
-       spin_unlock(&spu->register_lock);
        pr_debug("%s: %lx %lx %lx %lx\n", __func__, mask, stat,
                        dar, dsisr);
 
+       if (stat & CLASS1_SEGMENT_FAULT_INTR)
+               __spu_trap_data_seg(spu, dar);
+
        if (stat & CLASS1_STORAGE_FAULT_INTR)
                __spu_trap_data_map(spu, dar, dsisr);
 
@@ -374,6 +381,8 @@ spu_irq_class_1(int irq, void *data)
        spu->class_1_dsisr = 0;
        spu->class_1_dar = 0;
 
+       spin_unlock(&spu->register_lock);
+
        return stat ? IRQ_HANDLED : IRQ_NONE;
 }
 
@@ -392,14 +401,12 @@ spu_irq_class_2(int irq, void *data)
        mask = spu_int_mask_get(spu, 2);
        /* ignore interrupts we're not waiting for */
        stat &= mask;
-
        /* mailbox interrupts are level triggered. mask them now before
         * acknowledging */
        if (stat & mailbox_intrs)
                spu_int_mask_and(spu, 2, ~(stat & mailbox_intrs));
        /* acknowledge all interrupts before the callbacks */
        spu_int_stat_clear(spu, 2, stat);
-       spin_unlock(&spu->register_lock);
 
        pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask);
 
@@ -419,6 +426,9 @@ spu_irq_class_2(int irq, void *data)
                spu->wbox_callback(spu);
 
        spu->stats.class2_intr++;
+
+       spin_unlock(&spu->register_lock);
+
        return stat ? IRQ_HANDLED : IRQ_NONE;
 }
 
index 745dd51..cd72567 100644 (file)
@@ -230,19 +230,23 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
        ctx->stats.slb_flt_base = spu->stats.slb_flt;
        ctx->stats.class2_intr_base = spu->stats.class2_intr;
 
+       spu_associate_mm(spu, ctx->owner);
+
+       spin_lock_irq(&spu->register_lock);
        spu->ctx = ctx;
        spu->flags = 0;
        ctx->spu = spu;
        ctx->ops = &spu_hw_ops;
        spu->pid = current->pid;
        spu->tgid = current->tgid;
-       spu_associate_mm(spu, ctx->owner);
        spu->ibox_callback = spufs_ibox_callback;
        spu->wbox_callback = spufs_wbox_callback;
        spu->stop_callback = spufs_stop_callback;
        spu->mfc_callback = spufs_mfc_callback;
-       mb();
+       spin_unlock_irq(&spu->register_lock);
+
        spu_unmap_mappings(ctx);
+
        spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0);
        spu_restore(&ctx->csa, spu);
        spu->timestamp = jiffies;
@@ -423,18 +427,22 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
        spu_unmap_mappings(ctx);
        spu_save(&ctx->csa, spu);
        spu_switch_log_notify(spu, ctx, SWITCH_LOG_STOP, 0);
+
+       spin_lock_irq(&spu->register_lock);
        spu->timestamp = jiffies;
        ctx->state = SPU_STATE_SAVED;
        spu->ibox_callback = NULL;
        spu->wbox_callback = NULL;
        spu->stop_callback = NULL;
        spu->mfc_callback = NULL;
-       spu_associate_mm(spu, NULL);
        spu->pid = 0;
        spu->tgid = 0;
        ctx->ops = &spu_backing_ops;
        spu->flags = 0;
        spu->ctx = NULL;
+       spin_unlock_irq(&spu->register_lock);
+
+       spu_associate_mm(spu, NULL);
 
        ctx->stats.slb_flt +=
                (spu->stats.slb_flt - ctx->stats.slb_flt_base);