Merge tag 'arm-fixes-5.16' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 10 Nov 2021 19:25:37 +0000 (11:25 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 10 Nov 2021 19:25:37 +0000 (11:25 -0800)
Pull ARM SoC fixes from Arnd Bergmann:
 "This is one set of fixes for the NXP/FSL DPAA2 drivers, addressing a
  few minor issues. I received these just after sending out the last
  v5.15 fixes, and nothing in here seemed urgent enough for a quick
  follow-up"

* tag 'arm-fixes-5.16' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc:
  soc: fsl: dpaa2-console: free buffer before returning from dpaa2_console_read
  soc: fsl: dpio: use the combined functions to protect critical zone
  soc: fsl: dpio: replace smp_processor_id with raw_smp_processor_id

1  2 
drivers/soc/fsl/dpio/dpio-service.c
drivers/soc/fsl/dpio/qbman-portal.c

@@@ -12,7 -12,6 +12,7 @@@
  #include <linux/platform_device.h>
  #include <linux/interrupt.h>
  #include <linux/dma-mapping.h>
 +#include <linux/dim.h>
  #include <linux/slab.h>
  
  #include "dpio.h"
@@@ -29,14 -28,6 +29,14 @@@ struct dpaa2_io 
        spinlock_t lock_notifications;
        struct list_head notifications;
        struct device *dev;
 +
 +      /* Net DIM */
 +      struct dim rx_dim;
 +      /* protect against concurrent Net DIM updates */
 +      spinlock_t dim_lock;
 +      u16 event_ctr;
 +      u64 bytes;
 +      u64 frames;
  };
  
  struct dpaa2_io_store {
@@@ -68,7 -59,7 +68,7 @@@ static inline struct dpaa2_io *service_
         * potentially being migrated away.
         */
        if (cpu < 0)
-               cpu = smp_processor_id();
+               cpu = raw_smp_processor_id();
  
        /* If a specific cpu was requested, pick it up immediately */
        return dpio_by_cpu[cpu];
@@@ -109,17 -100,6 +109,17 @@@ struct dpaa2_io *dpaa2_io_service_selec
  }
  EXPORT_SYMBOL_GPL(dpaa2_io_service_select);
  
 +static void dpaa2_io_dim_work(struct work_struct *w)
 +{
 +      struct dim *dim = container_of(w, struct dim, work);
 +      struct dim_cq_moder moder =
 +              net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
 +      struct dpaa2_io *d = container_of(dim, struct dpaa2_io, rx_dim);
 +
 +      dpaa2_io_set_irq_coalescing(d, moder.usec);
 +      dim->state = DIM_START_MEASURE;
 +}
 +
  /**
   * dpaa2_io_create() - create a dpaa2_io object.
   * @desc: the dpaa2_io descriptor
@@@ -134,7 -114,6 +134,7 @@@ struct dpaa2_io *dpaa2_io_create(const 
                                 struct device *dev)
  {
        struct dpaa2_io *obj = kmalloc(sizeof(*obj), GFP_KERNEL);
 +      u32 qman_256_cycles_per_ns;
  
        if (!obj)
                return NULL;
        obj->dpio_desc = *desc;
        obj->swp_desc.cena_bar = obj->dpio_desc.regs_cena;
        obj->swp_desc.cinh_bar = obj->dpio_desc.regs_cinh;
 +      obj->swp_desc.qman_clk = obj->dpio_desc.qman_clk;
        obj->swp_desc.qman_version = obj->dpio_desc.qman_version;
 +
 +      /* Compute how many 256 QBMAN cycles fit into one ns. This is because
 +       * the interrupt timeout period register needs to be specified in QBMAN
 +       * clock cycles in increments of 256.
 +       */
 +      qman_256_cycles_per_ns = 256000 / (obj->swp_desc.qman_clk / 1000000);
 +      obj->swp_desc.qman_256_cycles_per_ns = qman_256_cycles_per_ns;
        obj->swp = qbman_swp_init(&obj->swp_desc);
  
        if (!obj->swp) {
        INIT_LIST_HEAD(&obj->node);
        spin_lock_init(&obj->lock_mgmt_cmd);
        spin_lock_init(&obj->lock_notifications);
 +      spin_lock_init(&obj->dim_lock);
        INIT_LIST_HEAD(&obj->notifications);
  
        /* For now only enable DQRR interrupts */
  
        obj->dev = dev;
  
 +      memset(&obj->rx_dim, 0, sizeof(obj->rx_dim));
 +      INIT_WORK(&obj->rx_dim.work, dpaa2_io_dim_work);
 +      obj->event_ctr = 0;
 +      obj->bytes = 0;
 +      obj->frames = 0;
 +
        return obj;
  }
  
@@@ -230,8 -194,6 +230,8 @@@ irqreturn_t dpaa2_io_irq(struct dpaa2_i
        struct qbman_swp *swp;
        u32 status;
  
 +      obj->event_ctr++;
 +
        swp = obj->swp;
        status = qbman_swp_interrupt_read_status(swp);
        if (!status)
@@@ -500,7 -462,7 +500,7 @@@ int dpaa2_io_service_enqueue_multiple_f
        qbman_eq_desc_set_no_orp(&ed, 0);
        qbman_eq_desc_set_fq(&ed, fqid);
  
 -      return qbman_swp_enqueue_multiple(d->swp, &ed, fd, 0, nb);
 +      return qbman_swp_enqueue_multiple(d->swp, &ed, fd, NULL, nb);
  }
  EXPORT_SYMBOL(dpaa2_io_service_enqueue_multiple_fq);
  
@@@ -817,82 -779,3 +817,82 @@@ int dpaa2_io_query_bp_count(struct dpaa
        return 0;
  }
  EXPORT_SYMBOL_GPL(dpaa2_io_query_bp_count);
 +
 +/**
 + * dpaa2_io_set_irq_coalescing() - Set new IRQ coalescing values
 + * @d: the given DPIO object
 + * @irq_holdoff: interrupt holdoff (timeout) period in us
 + *
 + * Return 0 for success, or negative error code on error.
 + */
 +int dpaa2_io_set_irq_coalescing(struct dpaa2_io *d, u32 irq_holdoff)
 +{
 +      struct qbman_swp *swp = d->swp;
 +
 +      return qbman_swp_set_irq_coalescing(swp, swp->dqrr.dqrr_size - 1,
 +                                          irq_holdoff);
 +}
 +EXPORT_SYMBOL(dpaa2_io_set_irq_coalescing);
 +
 +/**
 + * dpaa2_io_get_irq_coalescing() - Get the current IRQ coalescing parameters
 + * @d: the given DPIO object
 + * @irq_holdoff: interrupt holdoff (timeout) period in us
 + */
 +void dpaa2_io_get_irq_coalescing(struct dpaa2_io *d, u32 *irq_holdoff)
 +{
 +      struct qbman_swp *swp = d->swp;
 +
 +      qbman_swp_get_irq_coalescing(swp, NULL, irq_holdoff);
 +}
 +EXPORT_SYMBOL(dpaa2_io_get_irq_coalescing);
 +
 +/**
 + * dpaa2_io_set_adaptive_coalescing() - Enable/disable adaptive coalescing
 + * @d: the given DPIO object
 + * @use_adaptive_rx_coalesce: adaptive coalescing state
 + */
 +void dpaa2_io_set_adaptive_coalescing(struct dpaa2_io *d,
 +                                    int use_adaptive_rx_coalesce)
 +{
 +      d->swp->use_adaptive_rx_coalesce = use_adaptive_rx_coalesce;
 +}
 +EXPORT_SYMBOL(dpaa2_io_set_adaptive_coalescing);
 +
 +/**
 + * dpaa2_io_get_adaptive_coalescing() - Query adaptive coalescing state
 + * @d: the given DPIO object
 + *
 + * Return 1 when adaptive coalescing is enabled on the DPIO object and 0
 + * otherwise.
 + */
 +int dpaa2_io_get_adaptive_coalescing(struct dpaa2_io *d)
 +{
 +      return d->swp->use_adaptive_rx_coalesce;
 +}
 +EXPORT_SYMBOL(dpaa2_io_get_adaptive_coalescing);
 +
 +/**
 + * dpaa2_io_update_net_dim() - Update Net DIM
 + * @d: the given DPIO object
 + * @frames: how many frames have been dequeued by the user since the last call
 + * @bytes: how many bytes have been dequeued by the user since the last call
 + */
 +void dpaa2_io_update_net_dim(struct dpaa2_io *d, __u64 frames, __u64 bytes)
 +{
 +      struct dim_sample dim_sample = {};
 +
 +      if (!d->swp->use_adaptive_rx_coalesce)
 +              return;
 +
 +      spin_lock(&d->dim_lock);
 +
 +      d->bytes += bytes;
 +      d->frames += frames;
 +
 +      dim_update_sample(d->event_ctr, d->frames, d->bytes, &dim_sample);
 +      net_dim(&d->rx_dim, dim_sample);
 +
 +      spin_unlock(&d->dim_lock);
 +}
 +EXPORT_SYMBOL(dpaa2_io_update_net_dim);
@@@ -29,7 -29,6 +29,7 @@@
  #define QBMAN_CINH_SWP_EQCR_AM_RT   0x980
  #define QBMAN_CINH_SWP_RCR_AM_RT    0x9c0
  #define QBMAN_CINH_SWP_DQPI    0xa00
 +#define QBMAN_CINH_SWP_DQRR_ITR     0xa80
  #define QBMAN_CINH_SWP_DCAP    0xac0
  #define QBMAN_CINH_SWP_SDQCR   0xb00
  #define QBMAN_CINH_SWP_EQCR_AM_RT2  0xb40
@@@ -39,7 -38,6 +39,7 @@@
  #define QBMAN_CINH_SWP_IER     0xe40
  #define QBMAN_CINH_SWP_ISDR    0xe80
  #define QBMAN_CINH_SWP_IIR     0xec0
 +#define QBMAN_CINH_SWP_ITPR    0xf40
  
  /* CENA register offsets */
  #define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((u32)(n) << 6))
@@@ -357,9 -355,6 +357,9 @@@ struct qbman_swp *qbman_swp_init(const 
                        & p->eqcr.pi_ci_mask;
        p->eqcr.available = p->eqcr.pi_ring_size;
  
 +      /* Initialize the software portal with a irq timeout period of 0us */
 +      qbman_swp_set_irq_coalescing(p, p->dqrr.dqrr_size - 1, 0);
 +
        return p;
  }
  
@@@ -693,9 -688,9 +693,9 @@@ int qbman_swp_enqueue_multiple_direct(s
                p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
                p[0] = cl[0] | s->eqcr.pi_vb;
                if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
 -                      struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
 +                      struct qbman_eq_desc *eq_desc = (struct qbman_eq_desc *)p;
  
 -                      d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
 +                      eq_desc->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
                                ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
                }
                eqcr_pi++;
@@@ -737,8 -732,7 +737,7 @@@ int qbman_swp_enqueue_multiple_mem_back
        int i, num_enqueued = 0;
        unsigned long irq_flags;
  
-       spin_lock(&s->access_spinlock);
-       local_irq_save(irq_flags);
+       spin_lock_irqsave(&s->access_spinlock, irq_flags);
  
        half_mask = (s->eqcr.pi_ci_mask>>1);
        full_mask = s->eqcr.pi_ci_mask;
                s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
                                        eqcr_ci, s->eqcr.ci);
                if (!s->eqcr.available) {
-                       local_irq_restore(irq_flags);
-                       spin_unlock(&s->access_spinlock);
+                       spin_unlock_irqrestore(&s->access_spinlock, irq_flags);
                        return 0;
                }
        }
                p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
                p[0] = cl[0] | s->eqcr.pi_vb;
                if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
 -                      struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
 +                      struct qbman_eq_desc *eq_desc = (struct qbman_eq_desc *)p;
  
 -                      d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
 +                      eq_desc->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
                                ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
                }
                eqcr_pi++;
        dma_wmb();
        qbman_write_register(s, QBMAN_CINH_SWP_EQCR_PI,
                                (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
-       local_irq_restore(irq_flags);
-       spin_unlock(&s->access_spinlock);
+       spin_unlock_irqrestore(&s->access_spinlock, irq_flags);
  
        return num_enqueued;
  }
@@@ -1801,56 -1793,3 +1798,56 @@@ u32 qbman_bp_info_num_free_bufs(struct 
  {
        return le32_to_cpu(a->fill);
  }
 +
 +/**
 + * qbman_swp_set_irq_coalescing() - Set new IRQ coalescing values
 + * @p: the software portal object
 + * @irq_threshold: interrupt threshold
 + * @irq_holdoff: interrupt holdoff (timeout) period in us
 + *
 + * Return 0 for success, or negative error code on error.
 + */
 +int qbman_swp_set_irq_coalescing(struct qbman_swp *p, u32 irq_threshold,
 +                               u32 irq_holdoff)
 +{
 +      u32 itp, max_holdoff;
 +
 +      /* Convert irq_holdoff value from usecs to 256 QBMAN clock cycles
 +       * increments. This depends on the QBMAN internal frequency.
 +       */
 +      itp = (irq_holdoff * 1000) / p->desc->qman_256_cycles_per_ns;
 +      if (itp > 4096) {
 +              max_holdoff = (p->desc->qman_256_cycles_per_ns * 4096) / 1000;
 +              pr_err("irq_holdoff must be <= %uus\n", max_holdoff);
 +              return -EINVAL;
 +      }
 +
 +      if (irq_threshold >= p->dqrr.dqrr_size) {
 +              pr_err("irq_threshold must be < %u\n", p->dqrr.dqrr_size - 1);
 +              return -EINVAL;
 +      }
 +
 +      p->irq_threshold = irq_threshold;
 +      p->irq_holdoff = irq_holdoff;
 +
 +      qbman_write_register(p, QBMAN_CINH_SWP_DQRR_ITR, irq_threshold);
 +      qbman_write_register(p, QBMAN_CINH_SWP_ITPR, itp);
 +
 +      return 0;
 +}
 +
 +/**
 + * qbman_swp_get_irq_coalescing() - Get the current IRQ coalescing parameters
 + * @p: the software portal object
 + * @irq_threshold: interrupt threshold (an IRQ is generated when there are more
 + * DQRR entries in the portal than the threshold)
 + * @irq_holdoff: interrupt holdoff (timeout) period in us
 + */
 +void qbman_swp_get_irq_coalescing(struct qbman_swp *p, u32 *irq_threshold,
 +                                u32 *irq_holdoff)
 +{
 +      if (irq_threshold)
 +              *irq_threshold = p->irq_threshold;
 +      if (irq_holdoff)
 +              *irq_holdoff = p->irq_holdoff;
 +}