soc/fsl: Introduce DPAA 1.x BMan device driver
authorClaudiu Manoil <claudiu.manoil@nxp.com>
Thu, 22 Sep 2016 15:04:08 +0000 (18:04 +0300)
committerScott Wood <oss@buserror.net>
Sun, 25 Sep 2016 07:38:58 +0000 (02:38 -0500)
This driver enables the Freescale DPAA 1.x Buffer Manager block.
BMan is a hardware accelerator that manages buffer pools.  It allows
CPUs and other accelerators connected to the SoC datapath to acquire
and release buffers during data processing.

Signed-off-by: Roy Pledge <roy.pledge@nxp.com>
Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
Signed-off-by: Scott Wood <oss@buserror.net>
drivers/soc/fsl/qbman/Kconfig [new file with mode: 0644]
drivers/soc/fsl/qbman/Makefile [new file with mode: 0644]
drivers/soc/fsl/qbman/bman.c [new file with mode: 0644]
drivers/soc/fsl/qbman/bman_ccsr.c [new file with mode: 0644]
drivers/soc/fsl/qbman/bman_portal.c [new file with mode: 0644]
drivers/soc/fsl/qbman/bman_priv.h [new file with mode: 0644]
drivers/soc/fsl/qbman/dpaa_sys.h [new file with mode: 0644]
include/soc/fsl/bman.h [new file with mode: 0644]

diff --git a/drivers/soc/fsl/qbman/Kconfig b/drivers/soc/fsl/qbman/Kconfig
new file mode 100644 (file)
index 0000000..8f2df64
--- /dev/null
@@ -0,0 +1,28 @@
+menuconfig FSL_DPAA
+       bool "Freescale DPAA 1.x support"
+       depends on FSL_SOC_BOOKE
+       select GENERIC_ALLOCATOR
+       help
+         The Freescale Data Path Acceleration Architecture (DPAA) is a set of
+         hardware components on specific QorIQ multicore processors.
+         This architecture provides the infrastructure to support simplified
+         sharing of networking interfaces and accelerators by multiple CPUs.
+         The major h/w blocks composing DPAA are BMan and QMan.
+
+         The Buffer Manager (BMan) is a hardware buffer pool management block
+         that allows software and accelerators on the datapath to acquire and
+         release buffers in order to build frames.
+
+         The Queue Manager (QMan) is a hardware queue management block
+         that allows software and accelerators on the datapath to enqueue and
+         dequeue frames in order to communicate.
+
+if FSL_DPAA
+
+config FSL_DPAA_CHECKING
+       bool "Additional driver checking"
+       help
+         Compiles in additional checks, to sanity-check the drivers and
+         any use of the exported API. Not recommended for performance.
+
+endif # FSL_DPAA
diff --git a/drivers/soc/fsl/qbman/Makefile b/drivers/soc/fsl/qbman/Makefile
new file mode 100644 (file)
index 0000000..855c3ac
--- /dev/null
@@ -0,0 +1,2 @@
+obj-$(CONFIG_FSL_DPAA)                          += bman_ccsr.o bman_portal.o \
+                                                  bman.o
diff --git a/drivers/soc/fsl/qbman/bman.c b/drivers/soc/fsl/qbman/bman.c
new file mode 100644 (file)
index 0000000..ffa48fd
--- /dev/null
@@ -0,0 +1,797 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *      names of its contributors may be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_priv.h"
+
+#define IRQNAME                "BMan portal %d"
+#define MAX_IRQNAME    16      /* big enough for "BMan portal %d" */
+
+/* Portal register assists */
+
+/* Cache-inhibited register offsets */
+#define BM_REG_RCR_PI_CINH     0x0000
+#define BM_REG_RCR_CI_CINH     0x0004
+#define BM_REG_RCR_ITR         0x0008
+#define BM_REG_CFG             0x0100
+#define BM_REG_SCN(n)          (0x0200 + ((n) << 2))
+#define BM_REG_ISR             0x0e00
+#define BM_REG_IER             0x0e04
+#define BM_REG_ISDR            0x0e08
+#define BM_REG_IIR             0x0e0c
+
+/* Cache-enabled register offsets */
+#define BM_CL_CR               0x0000
+#define BM_CL_RR0              0x0100
+#define BM_CL_RR1              0x0140
+#define BM_CL_RCR              0x1000
+#define BM_CL_RCR_PI_CENA      0x3000
+#define BM_CL_RCR_CI_CENA      0x3100
+
+/*
+ * Portal modes.
+ *   Enum types;
+ *     pmode == production mode
+ *     cmode == consumption mode,
+ *   Enum values use 3 letter codes. First letter matches the portal mode,
+ *   remaining two letters indicate;
+ *     ci == cache-inhibited portal register
+ *     ce == cache-enabled portal register
+ *     vb == in-band valid-bit (cache-enabled)
+ */
+enum bm_rcr_pmode {            /* matches BCSP_CFG::RPM */
+       bm_rcr_pci = 0,         /* PI index, cache-inhibited */
+       bm_rcr_pce = 1,         /* PI index, cache-enabled */
+       bm_rcr_pvb = 2          /* valid-bit */
+};
+enum bm_rcr_cmode {            /* s/w-only */
+       bm_rcr_cci,             /* CI index, cache-inhibited */
+       bm_rcr_cce              /* CI index, cache-enabled */
+};
+
+
+/* --- Portal structures --- */
+
+#define BM_RCR_SIZE            8
+
+/* Release Command */
+struct bm_rcr_entry {
+       union {
+               struct {
+                       u8 _ncw_verb; /* writes to this are non-coherent */
+                       u8 bpid; /* used with BM_RCR_VERB_CMD_BPID_SINGLE */
+                       u8 __reserved1[62];
+               };
+               struct bm_buffer bufs[8];
+       };
+};
+#define BM_RCR_VERB_VBIT               0x80
+#define BM_RCR_VERB_CMD_MASK           0x70    /* one of two values; */
+#define BM_RCR_VERB_CMD_BPID_SINGLE    0x20
+#define BM_RCR_VERB_CMD_BPID_MULTI     0x30
+#define BM_RCR_VERB_BUFCOUNT_MASK      0x0f    /* values 1..8 */
+
+struct bm_rcr {
+       struct bm_rcr_entry *ring, *cursor;
+       u8 ci, available, ithresh, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+       u32 busy;
+       enum bm_rcr_pmode pmode;
+       enum bm_rcr_cmode cmode;
+#endif
+};
+
+/* MC (Management Command) command */
+struct bm_mc_command {
+       u8 _ncw_verb; /* writes to this are non-coherent */
+       u8 bpid; /* used by acquire command */
+       u8 __reserved[62];
+};
+#define BM_MCC_VERB_VBIT               0x80
+#define BM_MCC_VERB_CMD_MASK           0x70    /* where the verb contains; */
+#define BM_MCC_VERB_CMD_ACQUIRE                0x10
+#define BM_MCC_VERB_CMD_QUERY          0x40
+#define BM_MCC_VERB_ACQUIRE_BUFCOUNT   0x0f    /* values 1..8 go here */
+
+/* MC result, Acquire and Query Response */
+union bm_mc_result {
+       struct {
+               u8 verb;
+               u8 bpid;
+               u8 __reserved[62];
+       };
+       struct bm_buffer bufs[8];
+};
+#define BM_MCR_VERB_VBIT               0x80
+#define BM_MCR_VERB_CMD_MASK           BM_MCC_VERB_CMD_MASK
+#define BM_MCR_VERB_CMD_ACQUIRE                BM_MCC_VERB_CMD_ACQUIRE
+#define BM_MCR_VERB_CMD_QUERY          BM_MCC_VERB_CMD_QUERY
+#define BM_MCR_VERB_CMD_ERR_INVALID    0x60
+#define BM_MCR_VERB_CMD_ERR_ECC                0x70
+#define BM_MCR_VERB_ACQUIRE_BUFCOUNT   BM_MCC_VERB_ACQUIRE_BUFCOUNT /* 0..8 */
+#define BM_MCR_TIMEOUT                 10000 /* us */
+
+struct bm_mc {
+       struct bm_mc_command *cr;
+       union bm_mc_result *rr;
+       u8 rridx, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+       enum {
+               /* Can only be _mc_start()ed */
+               mc_idle,
+               /* Can only be _mc_commit()ed or _mc_abort()ed */
+               mc_user,
+               /* Can only be _mc_retry()ed */
+               mc_hw
+       } state;
+#endif
+};
+
+struct bm_addr {
+       void __iomem *ce;       /* cache-enabled */
+       void __iomem *ci;       /* cache-inhibited */
+};
+
+struct bm_portal {
+       struct bm_addr addr;
+       struct bm_rcr rcr;
+       struct bm_mc mc;
+} ____cacheline_aligned;
+
+/* Cache-inhibited register access. */
+static inline u32 bm_in(struct bm_portal *p, u32 offset)
+{
+       return __raw_readl(p->addr.ci + offset);
+}
+
+static inline void bm_out(struct bm_portal *p, u32 offset, u32 val)
+{
+       __raw_writel(val, p->addr.ci + offset);
+}
+
+/* Cache Enabled Portal Access */
+static inline void bm_cl_invalidate(struct bm_portal *p, u32 offset)
+{
+       dpaa_invalidate(p->addr.ce + offset);
+}
+
+static inline void bm_cl_touch_ro(struct bm_portal *p, u32 offset)
+{
+       dpaa_touch_ro(p->addr.ce + offset);
+}
+
+static inline u32 bm_ce_in(struct bm_portal *p, u32 offset)
+{
+       return __raw_readl(p->addr.ce + offset);
+}
+
+struct bman_portal {
+       struct bm_portal p;
+       /* interrupt sources processed by portal_isr(), configurable */
+       unsigned long irq_sources;
+       /* probing time config params for cpu-affine portals */
+       const struct bm_portal_config *config;
+       char irqname[MAX_IRQNAME];
+};
+
+static cpumask_t affine_mask;
+static DEFINE_SPINLOCK(affine_mask_lock);
+static DEFINE_PER_CPU(struct bman_portal, bman_affine_portal);
+
+static inline struct bman_portal *get_affine_portal(void)
+{
+       return &get_cpu_var(bman_affine_portal);
+}
+
+static inline void put_affine_portal(void)
+{
+       put_cpu_var(bman_affine_portal);
+}
+
+/*
+ * This object type refers to a pool, it isn't *the* pool. There may be
+ * more than one such object per BMan buffer pool, eg. if different users of the
+ * pool are operating via different portals.
+ */
+struct bman_pool {
+       /* index of the buffer pool to encapsulate (0-63) */
+       u32 bpid;
+       /* Used for hash-table admin when using depletion notifications. */
+       struct bman_portal *portal;
+       struct bman_pool *next;
+};
+
+static u32 poll_portal_slow(struct bman_portal *p, u32 is);
+
+static irqreturn_t portal_isr(int irq, void *ptr)
+{
+       struct bman_portal *p = ptr;
+       struct bm_portal *portal = &p->p;
+       u32 clear = p->irq_sources;
+       u32 is = bm_in(portal, BM_REG_ISR) & p->irq_sources;
+
+       if (unlikely(!is))
+               return IRQ_NONE;
+
+       clear |= poll_portal_slow(p, is);
+       bm_out(portal, BM_REG_ISR, clear);
+       return IRQ_HANDLED;
+}
+
+/* --- RCR API --- */
+
+#define RCR_SHIFT      ilog2(sizeof(struct bm_rcr_entry))
+#define RCR_CARRY      (uintptr_t)(BM_RCR_SIZE << RCR_SHIFT)
+
+/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
+static struct bm_rcr_entry *rcr_carryclear(struct bm_rcr_entry *p)
+{
+       uintptr_t addr = (uintptr_t)p;
+
+       addr &= ~RCR_CARRY;
+
+       return (struct bm_rcr_entry *)addr;
+}
+
+#ifdef CONFIG_FSL_DPAA_CHECKING
+/* Bit-wise logic to convert a ring pointer to a ring index */
+static int rcr_ptr2idx(struct bm_rcr_entry *e)
+{
+       return ((uintptr_t)e >> RCR_SHIFT) & (BM_RCR_SIZE - 1);
+}
+#endif
+
+/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
+static inline void rcr_inc(struct bm_rcr *rcr)
+{
+       /* increment to the next RCR pointer and handle overflow and 'vbit' */
+       struct bm_rcr_entry *partial = rcr->cursor + 1;
+
+       rcr->cursor = rcr_carryclear(partial);
+       if (partial != rcr->cursor)
+               rcr->vbit ^= BM_RCR_VERB_VBIT;
+}
+
+static int bm_rcr_get_avail(struct bm_portal *portal)
+{
+       struct bm_rcr *rcr = &portal->rcr;
+
+       return rcr->available;
+}
+
+static int bm_rcr_get_fill(struct bm_portal *portal)
+{
+       struct bm_rcr *rcr = &portal->rcr;
+
+       return BM_RCR_SIZE - 1 - rcr->available;
+}
+
+static void bm_rcr_set_ithresh(struct bm_portal *portal, u8 ithresh)
+{
+       struct bm_rcr *rcr = &portal->rcr;
+
+       rcr->ithresh = ithresh;
+       bm_out(portal, BM_REG_RCR_ITR, ithresh);
+}
+
+static void bm_rcr_cce_prefetch(struct bm_portal *portal)
+{
+       __maybe_unused struct bm_rcr *rcr = &portal->rcr;
+
+       DPAA_ASSERT(rcr->cmode == bm_rcr_cce);
+       bm_cl_touch_ro(portal, BM_CL_RCR_CI_CENA);
+}
+
+static u8 bm_rcr_cce_update(struct bm_portal *portal)
+{
+       struct bm_rcr *rcr = &portal->rcr;
+       u8 diff, old_ci = rcr->ci;
+
+       DPAA_ASSERT(rcr->cmode == bm_rcr_cce);
+       rcr->ci = bm_ce_in(portal, BM_CL_RCR_CI_CENA) & (BM_RCR_SIZE - 1);
+       bm_cl_invalidate(portal, BM_CL_RCR_CI_CENA);
+       diff = dpaa_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
+       rcr->available += diff;
+       return diff;
+}
+
+static inline struct bm_rcr_entry *bm_rcr_start(struct bm_portal *portal)
+{
+       struct bm_rcr *rcr = &portal->rcr;
+
+       DPAA_ASSERT(!rcr->busy);
+       if (!rcr->available)
+               return NULL;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+       rcr->busy = 1;
+#endif
+       dpaa_zero(rcr->cursor);
+       return rcr->cursor;
+}
+
+static inline void bm_rcr_pvb_commit(struct bm_portal *portal, u8 myverb)
+{
+       struct bm_rcr *rcr = &portal->rcr;
+       struct bm_rcr_entry *rcursor;
+
+       DPAA_ASSERT(rcr->busy);
+       DPAA_ASSERT(rcr->pmode == bm_rcr_pvb);
+       DPAA_ASSERT(rcr->available >= 1);
+       dma_wmb();
+       rcursor = rcr->cursor;
+       rcursor->_ncw_verb = myverb | rcr->vbit;
+       dpaa_flush(rcursor);
+       rcr_inc(rcr);
+       rcr->available--;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+       rcr->busy = 0;
+#endif
+}
+
+static int bm_rcr_init(struct bm_portal *portal, enum bm_rcr_pmode pmode,
+                      enum bm_rcr_cmode cmode)
+{
+       struct bm_rcr *rcr = &portal->rcr;
+       u32 cfg;
+       u8 pi;
+
+       rcr->ring = portal->addr.ce + BM_CL_RCR;
+       rcr->ci = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1);
+       pi = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1);
+       rcr->cursor = rcr->ring + pi;
+       rcr->vbit = (bm_in(portal, BM_REG_RCR_PI_CINH) & BM_RCR_SIZE) ?
+               BM_RCR_VERB_VBIT : 0;
+       rcr->available = BM_RCR_SIZE - 1
+               - dpaa_cyc_diff(BM_RCR_SIZE, rcr->ci, pi);
+       rcr->ithresh = bm_in(portal, BM_REG_RCR_ITR);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+       rcr->busy = 0;
+       rcr->pmode = pmode;
+       rcr->cmode = cmode;
+#endif
+       cfg = (bm_in(portal, BM_REG_CFG) & 0xffffffe0)
+               | (pmode & 0x3); /* BCSP_CFG::RPM */
+       bm_out(portal, BM_REG_CFG, cfg);
+       return 0;
+}
+
+static void bm_rcr_finish(struct bm_portal *portal)
+{
+#ifdef CONFIG_FSL_DPAA_CHECKING
+       struct bm_rcr *rcr = &portal->rcr;
+       int i;
+
+       DPAA_ASSERT(!rcr->busy);
+
+       i = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1);
+       if (i != rcr_ptr2idx(rcr->cursor))
+               pr_crit("losing uncommited RCR entries\n");
+
+       i = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1);
+       if (i != rcr->ci)
+               pr_crit("missing existing RCR completions\n");
+       if (rcr->ci != rcr_ptr2idx(rcr->cursor))
+               pr_crit("RCR destroyed unquiesced\n");
+#endif
+}
+
+/* --- Management command API --- */
+static int bm_mc_init(struct bm_portal *portal)
+{
+       struct bm_mc *mc = &portal->mc;
+
+       mc->cr = portal->addr.ce + BM_CL_CR;
+       mc->rr = portal->addr.ce + BM_CL_RR0;
+       mc->rridx = (__raw_readb(&mc->cr->_ncw_verb) & BM_MCC_VERB_VBIT) ?
+                   0 : 1;
+       mc->vbit = mc->rridx ? BM_MCC_VERB_VBIT : 0;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+       mc->state = mc_idle;
+#endif
+       return 0;
+}
+
+static void bm_mc_finish(struct bm_portal *portal)
+{
+#ifdef CONFIG_FSL_DPAA_CHECKING
+       struct bm_mc *mc = &portal->mc;
+
+       DPAA_ASSERT(mc->state == mc_idle);
+       if (mc->state != mc_idle)
+               pr_crit("Losing incomplete MC command\n");
+#endif
+}
+
+static inline struct bm_mc_command *bm_mc_start(struct bm_portal *portal)
+{
+       struct bm_mc *mc = &portal->mc;
+
+       DPAA_ASSERT(mc->state == mc_idle);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+       mc->state = mc_user;
+#endif
+       dpaa_zero(mc->cr);
+       return mc->cr;
+}
+
+static inline void bm_mc_commit(struct bm_portal *portal, u8 myverb)
+{
+       struct bm_mc *mc = &portal->mc;
+       union bm_mc_result *rr = mc->rr + mc->rridx;
+
+       DPAA_ASSERT(mc->state == mc_user);
+       dma_wmb();
+       mc->cr->_ncw_verb = myverb | mc->vbit;
+       dpaa_flush(mc->cr);
+       dpaa_invalidate_touch_ro(rr);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+       mc->state = mc_hw;
+#endif
+}
+
+static inline union bm_mc_result *bm_mc_result(struct bm_portal *portal)
+{
+       struct bm_mc *mc = &portal->mc;
+       union bm_mc_result *rr = mc->rr + mc->rridx;
+
+       DPAA_ASSERT(mc->state == mc_hw);
+       /*
+        * The inactive response register's verb byte always returns zero until
+        * its command is submitted and completed. This includes the valid-bit,
+        * in case you were wondering...
+        */
+       if (!__raw_readb(&rr->verb)) {
+               dpaa_invalidate_touch_ro(rr);
+               return NULL;
+       }
+       mc->rridx ^= 1;
+       mc->vbit ^= BM_MCC_VERB_VBIT;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+       mc->state = mc_idle;
+#endif
+       return rr;
+}
+
+static inline int bm_mc_result_timeout(struct bm_portal *portal,
+                                      union bm_mc_result **mcr)
+{
+       int timeout = BM_MCR_TIMEOUT;
+
+       do {
+               *mcr = bm_mc_result(portal);
+               if (*mcr)
+                       break;
+               udelay(1);
+       } while (--timeout);
+
+       return timeout;
+}
+
+/* Disable all BSCN interrupts for the portal */
+static void bm_isr_bscn_disable(struct bm_portal *portal)
+{
+       bm_out(portal, BM_REG_SCN(0), 0);
+       bm_out(portal, BM_REG_SCN(1), 0);
+}
+
+static int bman_create_portal(struct bman_portal *portal,
+                             const struct bm_portal_config *c)
+{
+       struct bm_portal *p;
+       int ret;
+
+       p = &portal->p;
+       /*
+        * prep the low-level portal struct with the mapped addresses from the
+        * config, everything that follows depends on it and "config" is more
+        * for (de)reference...
+        */
+       p->addr.ce = c->addr_virt[DPAA_PORTAL_CE];
+       p->addr.ci = c->addr_virt[DPAA_PORTAL_CI];
+       if (bm_rcr_init(p, bm_rcr_pvb, bm_rcr_cce)) {
+               dev_err(c->dev, "RCR initialisation failed\n");
+               goto fail_rcr;
+       }
+       if (bm_mc_init(p)) {
+               dev_err(c->dev, "MC initialisation failed\n");
+               goto fail_mc;
+       }
+       /*
+        * Default to all BPIDs disabled, we enable as required at
+        * run-time.
+        */
+       bm_isr_bscn_disable(p);
+
+       /* Write-to-clear any stale interrupt status bits */
+       bm_out(p, BM_REG_ISDR, 0xffffffff);
+       portal->irq_sources = 0;
+       bm_out(p, BM_REG_IER, 0);
+       bm_out(p, BM_REG_ISR, 0xffffffff);
+       snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
+       if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) {
+               dev_err(c->dev, "request_irq() failed\n");
+               goto fail_irq;
+       }
+       if (c->cpu != -1 && irq_can_set_affinity(c->irq) &&
+           irq_set_affinity(c->irq, cpumask_of(c->cpu))) {
+               dev_err(c->dev, "irq_set_affinity() failed\n");
+               goto fail_affinity;
+       }
+
+       /* Need RCR to be empty before continuing */
+       ret = bm_rcr_get_fill(p);
+       if (ret) {
+               dev_err(c->dev, "RCR unclean\n");
+               goto fail_rcr_empty;
+       }
+       /* Success */
+       portal->config = c;
+
+       bm_out(p, BM_REG_ISDR, 0);
+       bm_out(p, BM_REG_IIR, 0);
+
+       return 0;
+
+fail_rcr_empty:
+fail_affinity:
+       free_irq(c->irq, portal);
+fail_irq:
+       bm_mc_finish(p);
+fail_mc:
+       bm_rcr_finish(p);
+fail_rcr:
+       return -EIO;
+}
+
+struct bman_portal *bman_create_affine_portal(const struct bm_portal_config *c)
+{
+       struct bman_portal *portal;
+       int err;
+
+       portal = &per_cpu(bman_affine_portal, c->cpu);
+       err = bman_create_portal(portal, c);
+       if (err)
+               return NULL;
+
+       spin_lock(&affine_mask_lock);
+       cpumask_set_cpu(c->cpu, &affine_mask);
+       spin_unlock(&affine_mask_lock);
+
+       return portal;
+}
+
+static u32 poll_portal_slow(struct bman_portal *p, u32 is)
+{
+       u32 ret = is;
+
+       if (is & BM_PIRQ_RCRI) {
+               bm_rcr_cce_update(&p->p);
+               bm_rcr_set_ithresh(&p->p, 0);
+               bm_out(&p->p, BM_REG_ISR, BM_PIRQ_RCRI);
+               is &= ~BM_PIRQ_RCRI;
+       }
+
+       /* There should be no status register bits left undefined */
+       DPAA_ASSERT(!is);
+       return ret;
+}
+
+int bman_p_irqsource_add(struct bman_portal *p, u32 bits)
+{
+       unsigned long irqflags;
+
+       local_irq_save(irqflags);
+       set_bits(bits & BM_PIRQ_VISIBLE, &p->irq_sources);
+       bm_out(&p->p, BM_REG_IER, p->irq_sources);
+       local_irq_restore(irqflags);
+       return 0;
+}
+
+static int bm_shutdown_pool(u32 bpid)
+{
+       struct bm_mc_command *bm_cmd;
+       union bm_mc_result *bm_res;
+
+       while (1) {
+               struct bman_portal *p = get_affine_portal();
+               /* Acquire buffers until empty */
+               bm_cmd = bm_mc_start(&p->p);
+               bm_cmd->bpid = bpid;
+               bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE | 1);
+               if (!bm_mc_result_timeout(&p->p, &bm_res)) {
+                       put_affine_portal();
+                       pr_crit("BMan Acquire Command timedout\n");
+                       return -ETIMEDOUT;
+               }
+               if (!(bm_res->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT)) {
+                       put_affine_portal();
+                       /* Pool is empty */
+                       return 0;
+               }
+               put_affine_portal();
+       }
+
+       return 0;
+}
+
+struct gen_pool *bm_bpalloc;
+
+static int bm_alloc_bpid_range(u32 *result, u32 count)
+{
+       unsigned long addr;
+
+       addr = gen_pool_alloc(bm_bpalloc, count);
+       if (!addr)
+               return -ENOMEM;
+
+       *result = addr & ~DPAA_GENALLOC_OFF;
+
+       return 0;
+}
+
+static int bm_release_bpid(u32 bpid)
+{
+       int ret;
+
+       ret = bm_shutdown_pool(bpid);
+       if (ret) {
+               pr_debug("BPID %d leaked\n", bpid);
+               return ret;
+       }
+
+       gen_pool_free(bm_bpalloc, bpid | DPAA_GENALLOC_OFF, 1);
+       return 0;
+}
+
+struct bman_pool *bman_new_pool(void)
+{
+       struct bman_pool *pool = NULL;
+       u32 bpid;
+
+       if (bm_alloc_bpid_range(&bpid, 1))
+               return NULL;
+
+       pool = kmalloc(sizeof(*pool), GFP_KERNEL);
+       if (!pool)
+               goto err;
+
+       pool->bpid = bpid;
+
+       return pool;
+err:
+       bm_release_bpid(bpid);
+       kfree(pool);
+       return NULL;
+}
+EXPORT_SYMBOL(bman_new_pool);
+
+void bman_free_pool(struct bman_pool *pool)
+{
+       bm_release_bpid(pool->bpid);
+
+       kfree(pool);
+}
+EXPORT_SYMBOL(bman_free_pool);
+
+int bman_get_bpid(const struct bman_pool *pool)
+{
+       return pool->bpid;
+}
+EXPORT_SYMBOL(bman_get_bpid);
+
+static void update_rcr_ci(struct bman_portal *p, int avail)
+{
+       if (avail)
+               bm_rcr_cce_prefetch(&p->p);
+       else
+               bm_rcr_cce_update(&p->p);
+}
+
+int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num)
+{
+       struct bman_portal *p;
+       struct bm_rcr_entry *r;
+       unsigned long irqflags;
+       int avail, timeout = 1000; /* 1ms */
+       int i = num - 1;
+
+       DPAA_ASSERT(num > 0 && num <= 8);
+
+       do {
+               p = get_affine_portal();
+               local_irq_save(irqflags);
+               avail = bm_rcr_get_avail(&p->p);
+               if (avail < 2)
+                       update_rcr_ci(p, avail);
+               r = bm_rcr_start(&p->p);
+               local_irq_restore(irqflags);
+               put_affine_portal();
+               if (likely(r))
+                       break;
+
+               udelay(1);
+       } while (--timeout);
+
+       if (unlikely(!timeout))
+               return -ETIMEDOUT;
+
+       p = get_affine_portal();
+       local_irq_save(irqflags);
+       /*
+        * we can copy all but the first entry, as this can trigger badness
+        * with the valid-bit
+        */
+       bm_buffer_set64(r->bufs, bm_buffer_get64(bufs));
+       bm_buffer_set_bpid(r->bufs, pool->bpid);
+       if (i)
+               memcpy(&r->bufs[1], &bufs[1], i * sizeof(bufs[0]));
+
+       bm_rcr_pvb_commit(&p->p, BM_RCR_VERB_CMD_BPID_SINGLE |
+                         (num & BM_RCR_VERB_BUFCOUNT_MASK));
+
+       local_irq_restore(irqflags);
+       put_affine_portal();
+       return 0;
+}
+EXPORT_SYMBOL(bman_release);
+
+int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num)
+{
+       struct bman_portal *p = get_affine_portal();
+       struct bm_mc_command *mcc;
+       union bm_mc_result *mcr;
+       int ret;
+
+       DPAA_ASSERT(num > 0 && num <= 8);
+
+       mcc = bm_mc_start(&p->p);
+       mcc->bpid = pool->bpid;
+       bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE |
+                    (num & BM_MCC_VERB_ACQUIRE_BUFCOUNT));
+       if (!bm_mc_result_timeout(&p->p, &mcr)) {
+               put_affine_portal();
+               pr_crit("BMan Acquire Timeout\n");
+               return -ETIMEDOUT;
+       }
+       ret = mcr->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT;
+       if (bufs)
+               memcpy(&bufs[0], &mcr->bufs[0], num * sizeof(bufs[0]));
+
+       put_affine_portal();
+       if (ret != num)
+               ret = -ENOMEM;
+       return ret;
+}
+EXPORT_SYMBOL(bman_acquire);
+
+const struct bm_portal_config *
+bman_get_bm_portal_config(const struct bman_portal *portal)
+{
+       return portal->config;
+}
diff --git a/drivers/soc/fsl/qbman/bman_ccsr.c b/drivers/soc/fsl/qbman/bman_ccsr.c
new file mode 100644 (file)
index 0000000..9deb052
--- /dev/null
@@ -0,0 +1,263 @@
+/* Copyright (c) 2009 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *      names of its contributors may be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_priv.h"
+
+u16 bman_ip_rev;
+EXPORT_SYMBOL(bman_ip_rev);
+
+/* Register offsets */
+#define REG_FBPR_FPC           0x0800
+#define REG_ECSR               0x0a00
+#define REG_ECIR               0x0a04
+#define REG_EADR               0x0a08
+#define REG_EDATA(n)           (0x0a10 + ((n) * 0x04))
+#define REG_SBEC(n)            (0x0a80 + ((n) * 0x04))
+#define REG_IP_REV_1           0x0bf8
+#define REG_IP_REV_2           0x0bfc
+#define REG_FBPR_BARE          0x0c00
+#define REG_FBPR_BAR           0x0c04
+#define REG_FBPR_AR            0x0c10
+#define REG_SRCIDR             0x0d04
+#define REG_LIODNR             0x0d08
+#define REG_ERR_ISR            0x0e00
+#define REG_ERR_IER            0x0e04
+#define REG_ERR_ISDR           0x0e08
+
+/* Used by all error interrupt registers except 'inhibit' */
+#define BM_EIRQ_IVCI   0x00000010      /* Invalid Command Verb */
+#define BM_EIRQ_FLWI   0x00000008      /* FBPR Low Watermark */
+#define BM_EIRQ_MBEI   0x00000004      /* Multi-bit ECC Error */
+#define BM_EIRQ_SBEI   0x00000002      /* Single-bit ECC Error */
+#define BM_EIRQ_BSCN   0x00000001      /* pool State Change Notification */
+
+struct bman_hwerr_txt {
+       u32 mask;
+       const char *txt;
+};
+
+static const struct bman_hwerr_txt bman_hwerr_txts[] = {
+       { BM_EIRQ_IVCI, "Invalid Command Verb" },
+       { BM_EIRQ_FLWI, "FBPR Low Watermark" },
+       { BM_EIRQ_MBEI, "Multi-bit ECC Error" },
+       { BM_EIRQ_SBEI, "Single-bit ECC Error" },
+       { BM_EIRQ_BSCN, "Pool State Change Notification" },
+};
+
+/* Only trigger low water mark interrupt once only */
+#define BMAN_ERRS_TO_DISABLE BM_EIRQ_FLWI
+
+/* Pointer to the start of the BMan's CCSR space */
+static u32 __iomem *bm_ccsr_start;
+
+static inline u32 bm_ccsr_in(u32 offset)
+{
+       return ioread32be(bm_ccsr_start + offset/4);
+}
+static inline void bm_ccsr_out(u32 offset, u32 val)
+{
+       iowrite32be(val, bm_ccsr_start + offset/4);
+}
+
+static void bm_get_version(u16 *id, u8 *major, u8 *minor)
+{
+       u32 v = bm_ccsr_in(REG_IP_REV_1);
+       *id = (v >> 16);
+       *major = (v >> 8) & 0xff;
+       *minor = v & 0xff;
+}
+
+/* signal transactions for FBPRs with higher priority */
+#define FBPR_AR_RPRIO_HI BIT(30)
+
+static void bm_set_memory(u64 ba, u32 size)
+{
+       u32 exp = ilog2(size);
+       /* choke if size isn't within range */
+       DPAA_ASSERT(size >= 4096 && size <= 1024*1024*1024 &&
+                  is_power_of_2(size));
+       /* choke if '[e]ba' has lower-alignment than 'size' */
+       DPAA_ASSERT(!(ba & (size - 1)));
+       bm_ccsr_out(REG_FBPR_BARE, upper_32_bits(ba));
+       bm_ccsr_out(REG_FBPR_BAR, lower_32_bits(ba));
+       bm_ccsr_out(REG_FBPR_AR, exp - 1);
+}
+
+/*
+ * Location and size of BMan private memory
+ *
+ * Ideally we would use the DMA API to turn rmem->base into a DMA address
+ * (especially if iommu translations ever get involved).  Unfortunately, the
+ * DMA API currently does not allow mapping anything that is not backed with
+ * a struct page.
+ */
+static dma_addr_t fbpr_a;
+static size_t fbpr_sz;
+
+static int bman_fbpr(struct reserved_mem *rmem)
+{
+       fbpr_a = rmem->base;
+       fbpr_sz = rmem->size;
+
+       WARN_ON(!(fbpr_a && fbpr_sz));
+
+       return 0;
+}
+RESERVEDMEM_OF_DECLARE(bman_fbpr, "fsl,bman-fbpr", bman_fbpr);
+
+static irqreturn_t bman_isr(int irq, void *ptr)
+{
+       u32 isr_val, ier_val, ecsr_val, isr_mask, i;
+       struct device *dev = ptr;
+
+       ier_val = bm_ccsr_in(REG_ERR_IER);
+       isr_val = bm_ccsr_in(REG_ERR_ISR);
+       ecsr_val = bm_ccsr_in(REG_ECSR);
+       isr_mask = isr_val & ier_val;
+
+       if (!isr_mask)
+               return IRQ_NONE;
+
+       for (i = 0; i < ARRAY_SIZE(bman_hwerr_txts); i++) {
+               if (bman_hwerr_txts[i].mask & isr_mask) {
+                       dev_err_ratelimited(dev, "ErrInt: %s\n",
+                                           bman_hwerr_txts[i].txt);
+                       if (bman_hwerr_txts[i].mask & ecsr_val) {
+                               /* Re-arm error capture registers */
+                               bm_ccsr_out(REG_ECSR, ecsr_val);
+                       }
+                       if (bman_hwerr_txts[i].mask & BMAN_ERRS_TO_DISABLE) {
+                               dev_dbg(dev, "Disabling error 0x%x\n",
+                                       bman_hwerr_txts[i].mask);
+                               ier_val &= ~bman_hwerr_txts[i].mask;
+                               bm_ccsr_out(REG_ERR_IER, ier_val);
+                       }
+               }
+       }
+       bm_ccsr_out(REG_ERR_ISR, isr_val);
+
+       return IRQ_HANDLED;
+}
+
+static int fsl_bman_probe(struct platform_device *pdev)
+{
+       int ret, err_irq;
+       struct device *dev = &pdev->dev;
+       struct device_node *node = dev->of_node;
+       struct resource *res;
+       u16 id, bm_pool_cnt;
+       u8 major, minor;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res) {
+               dev_err(dev, "Can't get %s property 'IORESOURCE_MEM'\n",
+                       node->full_name);
+               return -ENXIO;
+       }
+       bm_ccsr_start = devm_ioremap(dev, res->start,
+                                    res->end - res->start + 1);
+       if (!bm_ccsr_start)
+               return -ENXIO;
+
+       bm_get_version(&id, &major, &minor);
+       if (major == 1 && minor == 0) {
+               bman_ip_rev = BMAN_REV10;
+               bm_pool_cnt = BM_POOL_MAX;
+       } else if (major == 2 && minor == 0) {
+               bman_ip_rev = BMAN_REV20;
+               bm_pool_cnt = 8;
+       } else if (major == 2 && minor == 1) {
+               bman_ip_rev = BMAN_REV21;
+               bm_pool_cnt = BM_POOL_MAX;
+       } else {
+               dev_err(dev, "Unknown Bman version:%04x,%02x,%02x\n",
+                       id, major, minor);
+               return -ENODEV;
+       }
+
+       bm_set_memory(fbpr_a, fbpr_sz);
+
+       err_irq = platform_get_irq(pdev, 0);
+       if (err_irq <= 0) {
+               dev_info(dev, "Can't get %s IRQ\n", node->full_name);
+               return -ENODEV;
+       }
+       ret = devm_request_irq(dev, err_irq, bman_isr, IRQF_SHARED, "bman-err",
+                              dev);
+       if (ret)  {
+               dev_err(dev, "devm_request_irq() failed %d for '%s'\n",
+                       ret, node->full_name);
+               return ret;
+       }
+       /* Disable Buffer Pool State Change */
+       bm_ccsr_out(REG_ERR_ISDR, BM_EIRQ_BSCN);
+       /*
+        * Write-to-clear any stale bits, (eg. starvation being asserted prior
+        * to resource allocation during driver init).
+        */
+       bm_ccsr_out(REG_ERR_ISR, 0xffffffff);
+       /* Enable Error Interrupts */
+       bm_ccsr_out(REG_ERR_IER, 0xffffffff);
+
+       bm_bpalloc = devm_gen_pool_create(dev, 0, -1, "bman-bpalloc");
+       if (IS_ERR(bm_bpalloc)) {
+               ret = PTR_ERR(bm_bpalloc);
+               dev_err(dev, "bman-bpalloc pool init failed (%d)\n", ret);
+               return ret;
+       }
+
+       /* seed BMan resource pool */
+       ret = gen_pool_add(bm_bpalloc, DPAA_GENALLOC_OFF, bm_pool_cnt, -1);
+       if (ret) {
+               dev_err(dev, "Failed to seed BPID range [%d..%d] (%d)\n",
+                       0, bm_pool_cnt - 1, ret);
+               return ret;
+       }
+
+       return 0;
+};
+
+static const struct of_device_id fsl_bman_ids[] = {
+       {
+               .compatible = "fsl,bman",
+       },
+       {}
+};
+
+static struct platform_driver fsl_bman_driver = {
+       .driver = {
+               .name = KBUILD_MODNAME,
+               .of_match_table = fsl_bman_ids,
+               .suppress_bind_attrs = true,
+       },
+       .probe = fsl_bman_probe,
+};
+
+builtin_platform_driver(fsl_bman_driver);
diff --git a/drivers/soc/fsl/qbman/bman_portal.c b/drivers/soc/fsl/qbman/bman_portal.c
new file mode 100644 (file)
index 0000000..6579cc1
--- /dev/null
@@ -0,0 +1,219 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *      names of its contributors may be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_priv.h"
+
+static struct bman_portal *affine_bportals[NR_CPUS];
+static struct cpumask portal_cpus;
+/* protect bman global registers and global data shared among portals */
+static DEFINE_SPINLOCK(bman_lock);
+
+static struct bman_portal *init_pcfg(struct bm_portal_config *pcfg)
+{
+       struct bman_portal *p = bman_create_affine_portal(pcfg);
+
+       if (!p) {
+               dev_crit(pcfg->dev, "%s: Portal failure on cpu %d\n",
+                        __func__, pcfg->cpu);
+               return NULL;
+       }
+
+       bman_p_irqsource_add(p, BM_PIRQ_RCRI);
+       affine_bportals[pcfg->cpu] = p;
+
+       dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu);
+
+       return p;
+}
+
+static void bman_offline_cpu(unsigned int cpu)
+{
+       struct bman_portal *p = affine_bportals[cpu];
+       const struct bm_portal_config *pcfg;
+
+       if (!p)
+               return;
+
+       pcfg = bman_get_bm_portal_config(p);
+       if (!pcfg)
+               return;
+
+       irq_set_affinity(pcfg->irq, cpumask_of(0));
+}
+
+static void bman_online_cpu(unsigned int cpu)
+{
+       struct bman_portal *p = affine_bportals[cpu];
+       const struct bm_portal_config *pcfg;
+
+       if (!p)
+               return;
+
+       pcfg = bman_get_bm_portal_config(p);
+       if (!pcfg)
+               return;
+
+       irq_set_affinity(pcfg->irq, cpumask_of(cpu));
+}
+
+static int bman_hotplug_cpu_callback(struct notifier_block *nfb,
+                                    unsigned long action, void *hcpu)
+{
+       unsigned int cpu = (unsigned long)hcpu;
+
+       switch (action) {
+       case CPU_ONLINE:
+       case CPU_ONLINE_FROZEN:
+               bman_online_cpu(cpu);
+               break;
+       case CPU_DOWN_PREPARE:
+       case CPU_DOWN_PREPARE_FROZEN:
+               bman_offline_cpu(cpu);
+       }
+
+       return NOTIFY_OK;
+}
+
+static struct notifier_block bman_hotplug_cpu_notifier = {
+       .notifier_call = bman_hotplug_cpu_callback,
+};
+
+static int bman_portal_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct device_node *node = dev->of_node;
+       struct bm_portal_config *pcfg;
+       struct resource *addr_phys[2];
+       void __iomem *va;
+       int irq, cpu;
+
+       pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
+       if (!pcfg)
+               return -ENOMEM;
+
+       pcfg->dev = dev;
+
+       addr_phys[0] = platform_get_resource(pdev, IORESOURCE_MEM,
+                                            DPAA_PORTAL_CE);
+       if (!addr_phys[0]) {
+               dev_err(dev, "Can't get %s property 'reg::CE'\n",
+                       node->full_name);
+               return -ENXIO;
+       }
+
+       addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM,
+                                            DPAA_PORTAL_CI);
+       if (!addr_phys[1]) {
+               dev_err(dev, "Can't get %s property 'reg::CI'\n",
+                       node->full_name);
+               return -ENXIO;
+       }
+
+       pcfg->cpu = -1;
+
+       irq = platform_get_irq(pdev, 0);
+       if (irq <= 0) {
+               dev_err(dev, "Can't get %s IRQ'\n", node->full_name);
+               return -ENXIO;
+       }
+       pcfg->irq = irq;
+
+       va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0);
+       if (!va)
+               goto err_ioremap1;
+
+       pcfg->addr_virt[DPAA_PORTAL_CE] = va;
+
+       va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]),
+                         _PAGE_GUARDED | _PAGE_NO_CACHE);
+       if (!va)
+               goto err_ioremap2;
+
+       pcfg->addr_virt[DPAA_PORTAL_CI] = va;
+
+       spin_lock(&bman_lock);
+       cpu = cpumask_next_zero(-1, &portal_cpus);
+       if (cpu >= nr_cpu_ids) {
+               /* unassigned portal, skip init */
+               spin_unlock(&bman_lock);
+               return 0;
+       }
+
+       cpumask_set_cpu(cpu, &portal_cpus);
+       spin_unlock(&bman_lock);
+       pcfg->cpu = cpu;
+
+       if (!init_pcfg(pcfg))
+               goto err_ioremap2;
+
+       /* clear irq affinity if assigned cpu is offline */
+       if (!cpu_online(cpu))
+               bman_offline_cpu(cpu);
+
+       return 0;
+
+err_ioremap2:
+       iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]);
+err_ioremap1:
+       dev_err(dev, "ioremap failed\n");
+       return -ENXIO;
+}
+
+static const struct of_device_id bman_portal_ids[] = {
+       {
+               .compatible = "fsl,bman-portal",
+       },
+       {}
+};
+MODULE_DEVICE_TABLE(of, bman_portal_ids);
+
+static struct platform_driver bman_portal_driver = {
+       .driver = {
+               .name = KBUILD_MODNAME,
+               .of_match_table = bman_portal_ids,
+       },
+       .probe = bman_portal_probe,
+};
+
+static int __init bman_portal_driver_register(struct platform_driver *drv)
+{
+       int ret;
+
+       ret = platform_driver_register(drv);
+       if (ret < 0)
+               return ret;
+
+       register_hotcpu_notifier(&bman_hotplug_cpu_notifier);
+
+       return 0;
+}
+
+module_driver(bman_portal_driver,
+             bman_portal_driver_register, platform_driver_unregister);
diff --git a/drivers/soc/fsl/qbman/bman_priv.h b/drivers/soc/fsl/qbman/bman_priv.h
new file mode 100644 (file)
index 0000000..f6896a2
--- /dev/null
@@ -0,0 +1,80 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *      names of its contributors may be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "dpaa_sys.h"
+
+#include <soc/fsl/bman.h>
+
+/* Portal processing (interrupt) sources */
+#define BM_PIRQ_RCRI   0x00000002      /* RCR Ring (below threshold) */
+
+/* Revision info (for errata and feature handling) */
+#define BMAN_REV10 0x0100
+#define BMAN_REV20 0x0200
+#define BMAN_REV21 0x0201
+extern u16 bman_ip_rev;        /* 0 if uninitialised, otherwise BMAN_REVx */
+
+extern struct gen_pool *bm_bpalloc;
+
+struct bm_portal_config {
+       /*
+        * Corenet portal addresses;
+        * [0]==cache-enabled, [1]==cache-inhibited.
+        */
+       void __iomem *addr_virt[2];
+       /* Allow these to be joined in lists */
+       struct list_head list;
+       struct device *dev;
+       /* User-visible portal configuration settings */
+       /* portal is affined to this cpu */
+       int cpu;
+       /* portal interrupt line */
+       int irq;
+};
+
+struct bman_portal *bman_create_affine_portal(
+                       const struct bm_portal_config *config);
+/*
+ * The below bman_p_***() variant might be called in a situation that the cpu
+ * which the portal affine to is not online yet.
+ * @bman_portal specifies which portal the API will use.
+ */
+int bman_p_irqsource_add(struct bman_portal *p, u32 bits);
+
+/*
+ * Used by all portal interrupt registers except 'inhibit'
+ * This mask contains all the "irqsource" bits visible to API users
+ */
+#define BM_PIRQ_VISIBLE        BM_PIRQ_RCRI
+
+const struct bm_portal_config *
+bman_get_bm_portal_config(const struct bman_portal *portal);
diff --git a/drivers/soc/fsl/qbman/dpaa_sys.h b/drivers/soc/fsl/qbman/dpaa_sys.h
new file mode 100644 (file)
index 0000000..b63fd72
--- /dev/null
@@ -0,0 +1,103 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *      names of its contributors may be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DPAA_SYS_H
+#define __DPAA_SYS_H
+
+#include <linux/cpu.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/vmalloc.h>
+#include <linux/platform_device.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/prefetch.h>
+#include <linux/genalloc.h>
+#include <asm/cacheflush.h>
+
+/* For 2-element tables related to cache-inhibited and cache-enabled mappings */
+#define DPAA_PORTAL_CE 0
+#define DPAA_PORTAL_CI 1
+
+#if (L1_CACHE_BYTES != 32) && (L1_CACHE_BYTES != 64)
+#error "Unsupported Cacheline Size"
+#endif
+
+static inline void dpaa_flush(void *p)
+{
+#ifdef CONFIG_PPC
+       flush_dcache_range((unsigned long)p, (unsigned long)p+64);
+#elif defined(CONFIG_ARM32)
+       __cpuc_flush_dcache_area(p, 64);
+#elif defined(CONFIG_ARM64)
+       __flush_dcache_area(p, 64);
+#endif
+}
+
+#define dpaa_invalidate(p) dpaa_flush(p)
+
+#define dpaa_zero(p) memset(p, 0, 64)
+
+static inline void dpaa_touch_ro(void *p)
+{
+#if (L1_CACHE_BYTES == 32)
+       prefetch(p+32);
+#endif
+       prefetch(p);
+}
+
+/* Commonly used combo */
+static inline void dpaa_invalidate_touch_ro(void *p)
+{
+       dpaa_invalidate(p);
+       dpaa_touch_ro(p);
+}
+
+
+#ifdef CONFIG_FSL_DPAA_CHECKING
+#define DPAA_ASSERT(x) WARN_ON(!(x))
+#else
+#define DPAA_ASSERT(x)
+#endif
+
+/* cyclic helper for rings */
+static inline u8 dpaa_cyc_diff(u8 ringsize, u8 first, u8 last)
+{
+       /* 'first' is included, 'last' is excluded */
+       if (first <= last)
+               return last - first;
+       return ringsize + last - first;
+}
+
+/* Offset applied to genalloc pools due to zero being an error return */
+#define DPAA_GENALLOC_OFF      0x80000000
+
+#endif /* __DPAA_SYS_H */
diff --git a/include/soc/fsl/bman.h b/include/soc/fsl/bman.h
new file mode 100644 (file)
index 0000000..eaaf56d
--- /dev/null
@@ -0,0 +1,129 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *      names of its contributors may be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __FSL_BMAN_H
+#define __FSL_BMAN_H
+
+/* wrapper for 48-bit buffers */
+struct bm_buffer {
+       union {
+               struct {
+                       __be16 bpid; /* hi 8-bits reserved */
+                       __be16 hi; /* High 16-bits of 48-bit address */
+                       __be32 lo; /* Low 32-bits of 48-bit address */
+               };
+               __be64 data;
+       };
+} __aligned(8);
+/*
+ * Restore the 48 bit address previously stored in BMan
+ * hardware pools as a dma_addr_t
+ */
+static inline dma_addr_t bm_buf_addr(const struct bm_buffer *buf)
+{
+       return be64_to_cpu(buf->data) & 0xffffffffffffLLU;
+}
+
+static inline u64 bm_buffer_get64(const struct bm_buffer *buf)
+{
+       return be64_to_cpu(buf->data) & 0xffffffffffffLLU;
+}
+
+static inline void bm_buffer_set64(struct bm_buffer *buf, u64 addr)
+{
+       buf->hi = cpu_to_be16(upper_32_bits(addr));
+       buf->lo = cpu_to_be32(lower_32_bits(addr));
+}
+
+static inline u8 bm_buffer_get_bpid(const struct bm_buffer *buf)
+{
+       return be16_to_cpu(buf->bpid) & 0xff;
+}
+
+static inline void bm_buffer_set_bpid(struct bm_buffer *buf, int bpid)
+{
+       buf->bpid = cpu_to_be16(bpid & 0xff);
+}
+
+/* Managed portal, high-level i/face */
+
+/* Portal and Buffer Pools */
+struct bman_portal;
+struct bman_pool;
+
+#define BM_POOL_MAX            64 /* max # of buffer pools */
+
+/**
+ * bman_new_pool - Allocates a Buffer Pool object
+ *
+ * Creates a pool object, and returns a reference to it or NULL on error.
+ */
+struct bman_pool *bman_new_pool(void);
+
+/**
+ * bman_free_pool - Deallocates a Buffer Pool object
+ * @pool: the pool object to release
+ */
+void bman_free_pool(struct bman_pool *pool);
+
+/**
+ * bman_get_bpid - Returns a pool object's BPID.
+ * @pool: the pool object
+ *
+ * The returned value is the index of the encapsulated buffer pool,
+ * in the range of [0, @BM_POOL_MAX-1].
+ */
+int bman_get_bpid(const struct bman_pool *pool);
+
+/**
+ * bman_release - Release buffer(s) to the buffer pool
+ * @pool: the buffer pool object to release to
+ * @bufs: an array of buffers to release
+ * @num: the number of buffers in @bufs (1-8)
+ *
+ * Adds the given buffers to RCR entries. If the RCR ring is unresponsive,
+ * the function will return -ETIMEDOUT. Otherwise, it returns zero.
+ */
+int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num);
+
+/**
+ * bman_acquire - Acquire buffer(s) from a buffer pool
+ * @pool: the buffer pool object to acquire from
+ * @bufs: array for storing the acquired buffers
+ * @num: the number of buffers desired (@bufs is at least this big)
+ *
+ * Issues an "Acquire" command via the portal's management command interface.
+ * The return value will be the number of buffers obtained from the pool, or a
+ * negative error code if a h/w error or pool starvation was encountered. In
+ * the latter case, the content of @bufs is undefined.
+ */
+int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num);
+
+#endif /* __FSL_BMAN_H */