{
struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
- if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
+ if (ioat_tx_status(c, cookie, txstate) == DMA_SUCCESS)
return DMA_SUCCESS;
- ioat3_cleanup_poll(ioat);
+ ioat3_cleanup(ioat);
- return ioat_is_complete(c, cookie, done, used);
+ return ioat_tx_status(c, cookie, txstate);
}
static struct dma_async_tx_descriptor *
struct dma_async_tx_descriptor *next;
struct dma_async_tx_descriptor *parent;
spinlock_t lock;
+ #endif
};
+ #ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH
+ static inline void txd_lock(struct dma_async_tx_descriptor *txd)
+ {
+ }
+ static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
+ {
+ }
+ static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
+ {
+ BUG();
+ }
+ static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
+ {
+ }
+ static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
+ {
+ }
+ static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
+ {
+ return NULL;
+ }
+ static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
+ {
+ return NULL;
+ }
+
+ #else
+ static inline void txd_lock(struct dma_async_tx_descriptor *txd)
+ {
+ spin_lock_bh(&txd->lock);
+ }
+ static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
+ {
+ spin_unlock_bh(&txd->lock);
+ }
+ static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
+ {
+ txd->next = next;
+ next->parent = txd;
+ }
+ static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
+ {
+ txd->parent = NULL;
+ }
+ static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
+ {
+ txd->next = NULL;
+ }
+ static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
+ {
+ return txd->parent;
+ }
+ static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
+ {
+ return txd->next;
+ }
+ #endif
+
/**
+ * struct dma_tx_state - filled in to report the status of
+ * a transfer.
+ * @last: last completed DMA cookie
+ * @used: last issued DMA cookie (i.e. the one in progress)
+ * @residue: the remaining number of bytes left to transmit
+ * on the selected transfer for states DMA_IN_PROGRESS and
+ * DMA_PAUSED if this is implemented in the driver, else 0
+ */
+struct dma_tx_state {
+ dma_cookie_t last;
+ dma_cookie_t used;
+ u32 residue;
+};
+
+/**
* struct dma_device - info on the entity supplying DMA services
* @chancnt: how many DMA channels are supported
* @privatecnt: how many DMA channels are requested by dma_request_channel