/* --- sysfs implementation --- */
+/**
+ * dev_to_dma_chan - convert a device pointer to the its sysfs container object
+ * @dev - device node
+ *
+ * Must be called under dma_list_mutex
+ */
+static struct dma_chan *dev_to_dma_chan(struct device *dev)
+{
+ struct dma_chan_dev *chan_dev;
+
+ chan_dev = container_of(dev, typeof(*chan_dev), device);
+ return chan_dev->chan;
+}
+
static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct dma_chan *chan = to_dma_chan(dev);
+ struct dma_chan *chan;
unsigned long count = 0;
int i;
+ int err;
- for_each_possible_cpu(i)
- count += per_cpu_ptr(chan->local, i)->memcpy_count;
+ mutex_lock(&dma_list_mutex);
+ chan = dev_to_dma_chan(dev);
+ if (chan) {
+ for_each_possible_cpu(i)
+ count += per_cpu_ptr(chan->local, i)->memcpy_count;
+ err = sprintf(buf, "%lu\n", count);
+ } else
+ err = -ENODEV;
+ mutex_unlock(&dma_list_mutex);
- return sprintf(buf, "%lu\n", count);
+ return err;
}
static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct dma_chan *chan = to_dma_chan(dev);
+ struct dma_chan *chan;
unsigned long count = 0;
int i;
+ int err;
- for_each_possible_cpu(i)
- count += per_cpu_ptr(chan->local, i)->bytes_transferred;
+ mutex_lock(&dma_list_mutex);
+ chan = dev_to_dma_chan(dev);
+ if (chan) {
+ for_each_possible_cpu(i)
+ count += per_cpu_ptr(chan->local, i)->bytes_transferred;
+ err = sprintf(buf, "%lu\n", count);
+ } else
+ err = -ENODEV;
+ mutex_unlock(&dma_list_mutex);
- return sprintf(buf, "%lu\n", count);
+ return err;
}
static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct dma_chan *chan = to_dma_chan(dev);
+ struct dma_chan *chan;
+ int err;
- return sprintf(buf, "%d\n", chan->client_count);
+ mutex_lock(&dma_list_mutex);
+ chan = dev_to_dma_chan(dev);
+ if (chan)
+ err = sprintf(buf, "%d\n", chan->client_count);
+ else
+ err = -ENODEV;
+ mutex_unlock(&dma_list_mutex);
+
+ return err;
}
static struct device_attribute dma_attrs[] = {
__ATTR_NULL
};
+static void chan_dev_release(struct device *dev)
+{
+ struct dma_chan_dev *chan_dev;
+
+ chan_dev = container_of(dev, typeof(*chan_dev), device);
+ kfree(chan_dev);
+}
+
static struct class dma_devclass = {
.name = "dma",
.dev_attrs = dma_attrs,
+ .dev_release = chan_dev_release,
};
/* --- client and device registration --- */
list_for_each_entry(chan, &dev->channels, device_node) {
if (chan->client_count) {
pr_debug("%s: %s busy\n",
- __func__, dev_name(&chan->dev));
+ __func__, dma_chan_name(chan));
continue;
}
ret = chan;
if (err == -ENODEV) {
pr_debug("%s: %s module removed\n", __func__,
- dev_name(&chan->dev));
+ dma_chan_name(chan));
list_del_rcu(&device->global_node);
} else if (err)
pr_err("dmaengine: failed to get %s: (%d)\n",
- dev_name(&chan->dev), err);
+ dma_chan_name(chan), err);
else
break;
} else
pr_debug("%s: %s filter said false\n",
- __func__, dev_name(&chan->dev));
+ __func__, dma_chan_name(chan));
chan = NULL;
}
mutex_unlock(&dma_list_mutex);
pr_debug("%s: %s (%s)\n", __func__, chan ? "success" : "fail",
- chan ? dev_name(&chan->dev) : NULL);
+ chan ? dma_chan_name(chan) : NULL);
return chan;
}
break;
} else if (err)
pr_err("dmaengine: failed to get %s: (%d)\n",
- dev_name(&chan->dev), err);
+ dma_chan_name(chan), err);
}
}
chan->local = alloc_percpu(typeof(*chan->local));
if (chan->local == NULL)
continue;
+ chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
+ if (chan->dev == NULL) {
+ free_percpu(chan->local);
+ continue;
+ }
chan->chan_id = chancnt++;
- chan->dev.class = &dma_devclass;
- chan->dev.parent = device->dev;
- dev_set_name(&chan->dev, "dma%dchan%d",
+ chan->dev->device.class = &dma_devclass;
+ chan->dev->device.parent = device->dev;
+ chan->dev->chan = chan;
+ dev_set_name(&chan->dev->device, "dma%dchan%d",
device->dev_id, chan->chan_id);
- rc = device_register(&chan->dev);
+ rc = device_register(&chan->dev->device);
if (rc) {
free_percpu(chan->local);
chan->local = NULL;
list_for_each_entry(chan, &device->channels, device_node) {
if (chan->local == NULL)
continue;
- device_unregister(&chan->dev);
+ mutex_lock(&dma_list_mutex);
+ chan->dev->chan = NULL;
+ mutex_unlock(&dma_list_mutex);
+ device_unregister(&chan->dev->device);
free_percpu(chan->local);
}
return rc;
WARN_ONCE(chan->client_count,
"%s called while %d clients hold a reference\n",
__func__, chan->client_count);
- device_unregister(&chan->dev);
+ mutex_lock(&dma_list_mutex);
+ chan->dev->chan = NULL;
+ mutex_unlock(&dma_list_mutex);
+ device_unregister(&chan->dev->device);
}
}
EXPORT_SYMBOL(dma_async_device_unregister);
return DMA_SUCCESS;
WARN_ONCE(tx->parent, "%s: speculatively walking dependency chain for"
- " %s\n", __func__, dev_name(&tx->chan->dev));
+ " %s\n", __func__, dma_chan_name(tx->chan));
/* poll through the dependency chain, return when tx is complete */
do {
* the controller, though.
*/
+static struct device *chan2dev(struct dma_chan *chan)
+{
+ return &chan->dev->device;
+}
+static struct device *chan2parent(struct dma_chan *chan)
+{
+ return chan->dev->device.parent;
+}
+
static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
{
return list_entry(dwc->active_list.next, struct dw_desc, desc_node);
ret = desc;
break;
}
- dev_dbg(&dwc->chan.dev, "desc %p not ACKed\n", desc);
+ dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc);
i++;
}
spin_unlock_bh(&dwc->lock);
- dev_vdbg(&dwc->chan.dev, "scanned %u descriptors on freelist\n", i);
+ dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i);
return ret;
}
struct dw_desc *child;
list_for_each_entry(child, &desc->txd.tx_list, desc_node)
- dma_sync_single_for_cpu(dwc->chan.dev.parent,
+ dma_sync_single_for_cpu(chan2parent(&dwc->chan),
child->txd.phys, sizeof(child->lli),
DMA_TO_DEVICE);
- dma_sync_single_for_cpu(dwc->chan.dev.parent,
+ dma_sync_single_for_cpu(chan2parent(&dwc->chan),
desc->txd.phys, sizeof(desc->lli),
DMA_TO_DEVICE);
}
spin_lock_bh(&dwc->lock);
list_for_each_entry(child, &desc->txd.tx_list, desc_node)
- dev_vdbg(&dwc->chan.dev,
+ dev_vdbg(chan2dev(&dwc->chan),
"moving child desc %p to freelist\n",
child);
list_splice_init(&desc->txd.tx_list, &dwc->free_list);
- dev_vdbg(&dwc->chan.dev, "moving desc %p to freelist\n", desc);
+ dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc);
list_add(&desc->desc_node, &dwc->free_list);
spin_unlock_bh(&dwc->lock);
}
/* ASSERT: channel is idle */
if (dma_readl(dw, CH_EN) & dwc->mask) {
- dev_err(&dwc->chan.dev,
+ dev_err(chan2dev(&dwc->chan),
"BUG: Attempted to start non-idle channel\n");
- dev_err(&dwc->chan.dev,
+ dev_err(chan2dev(&dwc->chan),
" SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
channel_readl(dwc, SAR),
channel_readl(dwc, DAR),
void *param;
struct dma_async_tx_descriptor *txd = &desc->txd;
- dev_vdbg(&dwc->chan.dev, "descriptor %u complete\n", txd->cookie);
+ dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
dwc->completed = txd->cookie;
callback = txd->callback;
* mapped before they were submitted...
*/
if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP))
- dma_unmap_page(dwc->chan.dev.parent, desc->lli.dar, desc->len,
- DMA_FROM_DEVICE);
+ dma_unmap_page(chan2parent(&dwc->chan), desc->lli.dar,
+ desc->len, DMA_FROM_DEVICE);
if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP))
- dma_unmap_page(dwc->chan.dev.parent, desc->lli.sar, desc->len,
- DMA_TO_DEVICE);
+ dma_unmap_page(chan2parent(&dwc->chan), desc->lli.sar,
+ desc->len, DMA_TO_DEVICE);
/*
* The API requires that no submissions are done from a
LIST_HEAD(list);
if (dma_readl(dw, CH_EN) & dwc->mask) {
- dev_err(&dwc->chan.dev,
+ dev_err(chan2dev(&dwc->chan),
"BUG: XFER bit set, but channel not idle!\n");
/* Try to continue after resetting the channel... */
return;
}
- dev_vdbg(&dwc->chan.dev, "scan_descriptors: llp=0x%x\n", llp);
+ dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp);
list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
if (desc->lli.llp == llp)
dwc_descriptor_complete(dwc, desc);
}
- dev_err(&dwc->chan.dev,
+ dev_err(chan2dev(&dwc->chan),
"BUG: All descriptors done, but channel not idle!\n");
/* Try to continue after resetting the channel... */
static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
{
- dev_printk(KERN_CRIT, &dwc->chan.dev,
+ dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
" desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
lli->sar, lli->dar, lli->llp,
lli->ctlhi, lli->ctllo);
* controller flagged an error instead of scribbling over
* random memory locations.
*/
- dev_printk(KERN_CRIT, &dwc->chan.dev,
+ dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
"Bad descriptor submitted for DMA!\n");
- dev_printk(KERN_CRIT, &dwc->chan.dev,
+ dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
" cookie: %d\n", bad_desc->txd.cookie);
dwc_dump_lli(dwc, &bad_desc->lli);
list_for_each_entry(child, &bad_desc->txd.tx_list, desc_node)
* for DMA. But this is hard to do in a race-free manner.
*/
if (list_empty(&dwc->active_list)) {
- dev_vdbg(&tx->chan->dev, "tx_submit: started %u\n",
+ dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
desc->txd.cookie);
dwc_dostart(dwc, desc);
list_add_tail(&desc->desc_node, &dwc->active_list);
} else {
- dev_vdbg(&tx->chan->dev, "tx_submit: queued %u\n",
+ dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
desc->txd.cookie);
list_add_tail(&desc->desc_node, &dwc->queue);
unsigned int dst_width;
u32 ctllo;
- dev_vdbg(&chan->dev, "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n",
+ dev_vdbg(chan2dev(chan), "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n",
dest, src, len, flags);
if (unlikely(!len)) {
- dev_dbg(&chan->dev, "prep_dma_memcpy: length is zero!\n");
+ dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
return NULL;
}
first = desc;
} else {
prev->lli.llp = desc->txd.phys;
- dma_sync_single_for_device(chan->dev.parent,
+ dma_sync_single_for_device(chan2parent(chan),
prev->txd.phys, sizeof(prev->lli),
DMA_TO_DEVICE);
list_add_tail(&desc->desc_node,
prev->lli.ctllo |= DWC_CTLL_INT_EN;
prev->lli.llp = 0;
- dma_sync_single_for_device(chan->dev.parent,
+ dma_sync_single_for_device(chan2parent(chan),
prev->txd.phys, sizeof(prev->lli),
DMA_TO_DEVICE);
struct scatterlist *sg;
size_t total_len = 0;
- dev_vdbg(&chan->dev, "prep_dma_slave\n");
+ dev_vdbg(chan2dev(chan), "prep_dma_slave\n");
if (unlikely(!dws || !sg_len))
return NULL;
reg_width = dws->reg_width;
prev = first = NULL;
- sg_len = dma_map_sg(chan->dev.parent, sgl, sg_len, direction);
+ sg_len = dma_map_sg(chan2parent(chan), sgl, sg_len, direction);
switch (direction) {
case DMA_TO_DEVICE:
desc = dwc_desc_get(dwc);
if (!desc) {
- dev_err(&chan->dev,
+ dev_err(chan2dev(chan),
"not enough descriptors available\n");
goto err_desc_get;
}
first = desc;
} else {
prev->lli.llp = desc->txd.phys;
- dma_sync_single_for_device(chan->dev.parent,
+ dma_sync_single_for_device(chan2parent(chan),
prev->txd.phys,
sizeof(prev->lli),
DMA_TO_DEVICE);
desc = dwc_desc_get(dwc);
if (!desc) {
- dev_err(&chan->dev,
+ dev_err(chan2dev(chan),
"not enough descriptors available\n");
goto err_desc_get;
}
first = desc;
} else {
prev->lli.llp = desc->txd.phys;
- dma_sync_single_for_device(chan->dev.parent,
+ dma_sync_single_for_device(chan2parent(chan),
prev->txd.phys,
sizeof(prev->lli),
DMA_TO_DEVICE);
prev->lli.ctllo |= DWC_CTLL_INT_EN;
prev->lli.llp = 0;
- dma_sync_single_for_device(chan->dev.parent,
+ dma_sync_single_for_device(chan2parent(chan),
prev->txd.phys, sizeof(prev->lli),
DMA_TO_DEVICE);
u32 cfghi;
u32 cfglo;
- dev_vdbg(&chan->dev, "alloc_chan_resources\n");
+ dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
/* ASSERT: channel is idle */
if (dma_readl(dw, CH_EN) & dwc->mask) {
- dev_dbg(&chan->dev, "DMA channel not idle?\n");
+ dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
return -EIO;
}
desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL);
if (!desc) {
- dev_info(&chan->dev,
+ dev_info(chan2dev(chan),
"only allocated %d descriptors\n", i);
spin_lock_bh(&dwc->lock);
break;
desc->txd.tx_submit = dwc_tx_submit;
desc->txd.flags = DMA_CTRL_ACK;
INIT_LIST_HEAD(&desc->txd.tx_list);
- desc->txd.phys = dma_map_single(chan->dev.parent, &desc->lli,
+ desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli,
sizeof(desc->lli), DMA_TO_DEVICE);
dwc_desc_put(dwc, desc);
spin_unlock_bh(&dwc->lock);
- dev_dbg(&chan->dev,
+ dev_dbg(chan2dev(chan),
"alloc_chan_resources allocated %d descriptors\n", i);
return i;
struct dw_desc *desc, *_desc;
LIST_HEAD(list);
- dev_dbg(&chan->dev, "free_chan_resources (descs allocated=%u)\n",
+ dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n",
dwc->descs_allocated);
/* ASSERT: channel is idle */
spin_unlock_bh(&dwc->lock);
list_for_each_entry_safe(desc, _desc, &list, desc_node) {
- dev_vdbg(&chan->dev, " freeing descriptor %p\n", desc);
- dma_unmap_single(chan->dev.parent, desc->txd.phys,
+ dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
+ dma_unmap_single(chan2parent(chan), desc->txd.phys,
sizeof(desc->lli), DMA_TO_DEVICE);
kfree(desc);
}
- dev_vdbg(&chan->dev, "free_chan_resources done\n");
+ dev_vdbg(chan2dev(chan), "free_chan_resources done\n");
}
/*----------------------------------------------------------------------*/