drm/nouveau/mc/tu102: Fix MMU fault interrupts on Turing
authorAlistair Popple <apopple@nvidia.com>
Fri, 30 Oct 2020 02:36:41 +0000 (13:36 +1100)
committerBen Skeggs <bskeggs@redhat.com>
Fri, 29 Jan 2021 06:49:12 +0000 (16:49 +1000)
Turing reports MMU fault interrupts via new top level interrupt
registers. The old PMC MMU interrupt vector is not used by the HW. This
means we can remap the new top-level MMU interrupt to the exisiting PMC
MMU bit which simplifies the implementation until all interrupts are
moved over to using the new top level registers.

Signed-off-by: Alistair Popple <apopple@nvidia.com>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c
drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c

index 45a6a68..f080051 100644 (file)
@@ -22,6 +22,7 @@
 #include "priv.h"
 
 #include <core/memory.h>
+#include <subdev/mc.h>
 #include <subdev/mmu.h>
 #include <engine/fifo.h>
 
@@ -34,6 +35,9 @@ tu102_fault_buffer_intr(struct nvkm_fault_buffer *buffer, bool enable)
         *     which don't appear to actually work anymore, but newer
         *     versions of RM don't appear to touch anything at all..
         */
+       struct nvkm_device *device = buffer->fault->subdev.device;
+
+       nvkm_mc_intr_mask(device, NVKM_SUBDEV_FAULT, enable);
 }
 
 static void
@@ -41,6 +45,11 @@ tu102_fault_buffer_fini(struct nvkm_fault_buffer *buffer)
 {
        struct nvkm_device *device = buffer->fault->subdev.device;
        const u32 foff = buffer->id * 0x20;
+
+       /* Disable the fault interrupts */
+       nvkm_wr32(device, 0xb81408, 0x1);
+       nvkm_wr32(device, 0xb81410, 0x10);
+
        nvkm_mask(device, 0xb83010 + foff, 0x80000000, 0x00000000);
 }
 
@@ -50,6 +59,10 @@ tu102_fault_buffer_init(struct nvkm_fault_buffer *buffer)
        struct nvkm_device *device = buffer->fault->subdev.device;
        const u32 foff = buffer->id * 0x20;
 
+       /* Enable the fault interrupts */
+       nvkm_wr32(device, 0xb81208, 0x1);
+       nvkm_wr32(device, 0xb81210, 0x10);
+
        nvkm_mask(device, 0xb83010 + foff, 0xc0000000, 0x40000000);
        nvkm_wr32(device, 0xb83004 + foff, upper_32_bits(buffer->addr));
        nvkm_wr32(device, 0xb83000 + foff, lower_32_bits(buffer->addr));
@@ -109,14 +122,20 @@ tu102_fault_intr(struct nvkm_fault *fault)
        }
 
        if (stat & 0x00000200) {
+               /* Clear the associated interrupt flag */
+               nvkm_wr32(device, 0xb81010, 0x10);
+
                if (fault->buffer[0]) {
                        nvkm_event_send(&fault->event, 1, 0, NULL, 0);
                        stat &= ~0x00000200;
                }
        }
 
-       /*XXX: guess, can't confirm until we get fw... */
+       /* Replayable MMU fault */
        if (stat & 0x00000100) {
+               /* Clear the associated interrupt flag */
+               nvkm_wr32(device, 0xb81008, 0x1);
+
                if (fault->buffer[1]) {
                        nvkm_event_send(&fault->event, 1, 1, NULL, 0);
                        stat &= ~0x00000100;
index d098c44..cda924d 100644 (file)
  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  * OTHER DEALINGS IN THE SOFTWARE.
  */
+#define tu102_mc(p) container_of((p), struct tu102_mc, base)
 #include "priv.h"
 
+struct tu102_mc {
+       struct nvkm_mc base;
+       spinlock_t lock;
+       bool intr;
+       u32 mask;
+};
+
+static void
+tu102_mc_intr_update(struct tu102_mc *mc)
+{
+       struct nvkm_device *device = mc->base.subdev.device;
+       u32 mask = mc->intr ? mc->mask : 0, i;
+
+       for (i = 0; i < 2; i++) {
+               nvkm_wr32(device, 0x000180 + (i * 0x04), ~mask);
+               nvkm_wr32(device, 0x000160 + (i * 0x04),  mask);
+       }
+
+       if (mask & 0x00000200)
+               nvkm_wr32(device, 0xb81608, 0x6);
+       else
+               nvkm_wr32(device, 0xb81610, 0x6);
+}
+
+void
+tu102_mc_intr_unarm(struct nvkm_mc *base)
+{
+       struct tu102_mc *mc = tu102_mc(base);
+       unsigned long flags;
+
+       spin_lock_irqsave(&mc->lock, flags);
+       mc->intr = false;
+       tu102_mc_intr_update(mc);
+       spin_unlock_irqrestore(&mc->lock, flags);
+}
+
+void
+tu102_mc_intr_rearm(struct nvkm_mc *base)
+{
+       struct tu102_mc *mc = tu102_mc(base);
+       unsigned long flags;
+
+       spin_lock_irqsave(&mc->lock, flags);
+       mc->intr = true;
+       tu102_mc_intr_update(mc);
+       spin_unlock_irqrestore(&mc->lock, flags);
+}
+
+void
+tu102_mc_intr_mask(struct nvkm_mc *base, u32 mask, u32 intr)
+{
+       struct tu102_mc *mc = tu102_mc(base);
+       unsigned long flags;
+
+       spin_lock_irqsave(&mc->lock, flags);
+       mc->mask = (mc->mask & ~mask) | intr;
+       tu102_mc_intr_update(mc);
+       spin_unlock_irqrestore(&mc->lock, flags);
+}
+
+static u32
+tu102_mc_intr_stat(struct nvkm_mc *mc)
+{
+       struct nvkm_device *device = mc->subdev.device;
+       u32 intr0 = nvkm_rd32(device, 0x000100);
+       u32 intr1 = nvkm_rd32(device, 0x000104);
+       u32 intr_top = nvkm_rd32(device, 0xb81600);
+
+       /* Turing and above route the MMU fault interrupts via a different
+        * interrupt tree with different control registers. For the moment remap
+        * them back to the old PMC vector.
+        */
+       if (intr_top & 0x00000006)
+               intr0 |= 0x00000200;
+
+       return intr0 | intr1;
+}
+
 static void
 tu102_mc_intr_hack(struct nvkm_mc *mc, bool *handled)
 {
        struct nvkm_device *device = mc->subdev.device;
        u32 stat = nvkm_rd32(device, 0xb81010);
+
        if (stat & 0x00000050) {
                struct nvkm_subdev *subdev =
                        nvkm_device_subdev(device, NVKM_SUBDEV_FAULT);
@@ -40,16 +120,33 @@ static const struct nvkm_mc_func
 tu102_mc = {
        .init = nv50_mc_init,
        .intr = gp100_mc_intr,
-       .intr_unarm = gp100_mc_intr_unarm,
-       .intr_rearm = gp100_mc_intr_rearm,
-       .intr_mask = gp100_mc_intr_mask,
-       .intr_stat = gf100_mc_intr_stat,
+       .intr_unarm = tu102_mc_intr_unarm,
+       .intr_rearm = tu102_mc_intr_rearm,
+       .intr_mask = tu102_mc_intr_mask,
+       .intr_stat = tu102_mc_intr_stat,
        .intr_hack = tu102_mc_intr_hack,
        .reset = gk104_mc_reset,
 };
 
 int
+tu102_mc_new_(const struct nvkm_mc_func *func, struct nvkm_device *device,
+             int index, struct nvkm_mc **pmc)
+{
+       struct tu102_mc *mc;
+
+       if (!(mc = kzalloc(sizeof(*mc), GFP_KERNEL)))
+               return -ENOMEM;
+       nvkm_mc_ctor(func, device, index, &mc->base);
+       *pmc = &mc->base;
+
+       spin_lock_init(&mc->lock);
+       mc->intr = false;
+       mc->mask = 0x7fffffff;
+       return 0;
+}
+
+int
 tu102_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
 {
-       return gp100_mc_new_(&tu102_mc, device, index, pmc);
+       return tu102_mc_new_(&tu102_mc, device, index, pmc);
 }