* @name: name of channel
* @cd: channel platform data
* @runtime_addr: address for RX/TX according to the runtime config
- * @pend_list: queued transactions pending on this channel
- * @issued_list: issued transactions for this channel
* @done_list: list of completed transactions
* @at: active transaction on this channel
* @lock: a lock for this channel data
const char *name;
const struct pl08x_channel_data *cd;
struct dma_slave_config cfg;
- struct list_head pend_list;
- struct list_head issued_list;
struct list_head done_list;
struct pl08x_txd *at;
struct pl08x_driver_data *host;
{
struct pl08x_driver_data *pl08x = plchan->host;
struct pl08x_phy_chan *phychan = plchan->phychan;
+ struct virt_dma_desc *vd = vchan_next_desc(&plchan->vc);
+ struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
struct pl08x_lli *lli;
- struct pl08x_txd *txd;
u32 val;
- txd = list_first_entry(&plchan->issued_list, struct pl08x_txd, node);
- list_del(&txd->node);
+ list_del(&txd->vd.node);
plchan->at = txd;
}
/* Sum up all queued transactions */
- if (!list_empty(&plchan->issued_list)) {
+ if (!list_empty(&plchan->vc.desc_issued)) {
struct pl08x_txd *txdi;
- list_for_each_entry(txdi, &plchan->issued_list, node) {
+ list_for_each_entry(txdi, &plchan->vc.desc_issued, vd.node) {
struct pl08x_sg *dsg;
list_for_each_entry(dsg, &txd->dsg_list, node)
bytes += dsg->len;
}
}
- if (!list_empty(&plchan->pend_list)) {
+ if (!list_empty(&plchan->vc.desc_submitted)) {
struct pl08x_txd *txdi;
- list_for_each_entry(txdi, &plchan->pend_list, node) {
+ list_for_each_entry(txdi, &plchan->vc.desc_submitted, vd.node) {
struct pl08x_sg *dsg;
list_for_each_entry(dsg, &txd->dsg_list, node)
bytes += dsg->len;
LIST_HEAD(head);
struct pl08x_txd *txd;
- list_splice_tail_init(&plchan->issued_list, &head);
- list_splice_tail_init(&plchan->pend_list, &head);
+ vchan_get_all_descriptors(&plchan->vc, &head);
while (!list_empty(&head)) {
- txd = list_first_entry(&head, struct pl08x_txd, node);
+ txd = list_first_entry(&head, struct pl08x_txd, vd.node);
pl08x_release_mux(plchan);
- list_del(&txd->node);
+ list_del(&txd->vd.node);
pl08x_free_txd(pl08x, txd);
}
}
{
}
-static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx)
-{
- struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan);
- struct pl08x_txd *txd = to_pl08x_txd(tx);
- unsigned long flags;
- dma_cookie_t cookie;
-
- spin_lock_irqsave(&plchan->vc.lock, flags);
- cookie = dma_cookie_assign(tx);
-
- /* Put this onto the pending list */
- list_add_tail(&txd->node, &plchan->pend_list);
- spin_unlock_irqrestore(&plchan->vc.lock, flags);
-
- return cookie;
-}
-
static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt(
struct dma_chan *chan, unsigned long flags)
{
unsigned long flags;
spin_lock_irqsave(&plchan->vc.lock, flags);
- list_splice_tail_init(&plchan->pend_list, &plchan->issued_list);
- if (!list_empty(&plchan->issued_list)) {
+ if (vchan_issue_pending(&plchan->vc)) {
if (!plchan->phychan && plchan->state != PL08X_CHAN_WAITING)
pl08x_phy_alloc_and_start(plchan);
}
return 0;
}
-static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan,
- unsigned long flags)
+static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan)
{
struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
if (txd) {
- dma_async_tx_descriptor_init(&txd->vd.tx, &plchan->vc.chan);
- txd->vd.tx.flags = flags;
- txd->vd.tx.tx_submit = pl08x_tx_submit;
- INIT_LIST_HEAD(&txd->node);
INIT_LIST_HEAD(&txd->dsg_list);
/* Always enable error and terminal interrupts */
struct pl08x_sg *dsg;
int ret;
- txd = pl08x_get_txd(plchan, flags);
+ txd = pl08x_get_txd(plchan);
if (!txd) {
dev_err(&pl08x->adev->dev,
"%s no memory for descriptor\n", __func__);
if (ret)
return NULL;
- return &txd->vd.tx;
+ return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
}
static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
__func__, sg_dma_len(sgl), plchan->name);
- txd = pl08x_get_txd(plchan, flags);
+ txd = pl08x_get_txd(plchan);
if (!txd) {
dev_err(&pl08x->adev->dev, "%s no txd\n", __func__);
return NULL;
if (ret)
return NULL;
- return &txd->vd.tx;
+ return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
}
static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
* And start the next descriptor (if any),
* otherwise free this channel.
*/
- if (!list_empty(&plchan->issued_list))
+ if (vchan_next_desc(&plchan->vc))
pl08x_start_next_txd(plchan);
else
pl08x_phy_free(plchan);
"initialize virtual channel \"%s\"\n",
chan->name);
- INIT_LIST_HEAD(&chan->pend_list);
- INIT_LIST_HEAD(&chan->issued_list);
INIT_LIST_HEAD(&chan->done_list);
tasklet_init(&chan->tasklet, pl08x_tasklet,
(unsigned long) chan);