The new naming makes more sense.
Signed-off-by: Michal Kazior <michal.kazior@tieto.com>
Signed-off-by: Kalle Valo <kvalo@qca.qualcomm.com>
* ath10k_ce_sendlist_send.
* The caller takes responsibility for any needed locking.
*/
-static int ath10k_ce_send_nolock(struct ce_state *ce_state,
+static int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
void *per_transfer_context,
u32 buffer,
unsigned int nbytes,
return ret;
}
-int ath10k_ce_send(struct ce_state *ce_state,
+int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
void *per_transfer_context,
u32 buffer,
unsigned int nbytes,
sendlist->num_items++;
}
-int ath10k_ce_sendlist_send(struct ce_state *ce_state,
+int ath10k_ce_sendlist_send(struct ath10k_ce_pipe *ce_state,
void *per_transfer_context,
struct ce_sendlist *sendlist,
unsigned int transfer_id)
return ret;
}
-int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
+int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state,
void *per_recv_context,
u32 buffer)
{
* Guts of ath10k_ce_completed_recv_next.
* The caller takes responsibility for any necessary locking.
*/
-static int ath10k_ce_completed_recv_next_nolock(struct ce_state *ce_state,
+static int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
return 0;
}
-int ath10k_ce_completed_recv_next(struct ce_state *ce_state,
+int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
return ret;
}
-int ath10k_ce_revoke_recv_next(struct ce_state *ce_state,
+int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp)
{
* Guts of ath10k_ce_completed_send_next.
* The caller takes responsibility for any necessary locking.
*/
-static int ath10k_ce_completed_send_next_nolock(struct ce_state *ce_state,
+static int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
}
/* NB: Modeled after ath10k_ce_completed_send_next */
-int ath10k_ce_cancel_send_next(struct ce_state *ce_state,
+int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
return ret;
}
-int ath10k_ce_completed_send_next(struct ce_state *ce_state,
+int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ce_state *ce_state = &ar_pci->ce_states[ce_id];
+ struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
u32 ctrl_addr = ce_state->ctrl_addr;
void *transfer_context;
u32 buf;
*
* Called with ce_lock held.
*/
-static void ath10k_ce_per_engine_handler_adjust(struct ce_state *ce_state,
+static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state,
int disable_copy_compl_intr)
{
u32 ctrl_addr = ce_state->ctrl_addr;
ath10k_pci_wake(ar);
for (ce_id = 0; ce_id < ar_pci->ce_count; ce_id++) {
- struct ce_state *ce_state = &ar_pci->ce_states[ce_id];
+ struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
u32 ctrl_addr = ce_state->ctrl_addr;
ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
ath10k_pci_sleep(ar);
}
-void ath10k_ce_send_cb_register(struct ce_state *ce_state,
- void (*send_cb) (struct ce_state *ce_state,
- void *transfer_context,
- u32 buffer,
- unsigned int nbytes,
- unsigned int transfer_id),
+void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
+ void (*send_cb)(struct ath10k_ce_pipe *ce_state,
+ void *transfer_context,
+ u32 buffer,
+ unsigned int nbytes,
+ unsigned int transfer_id),
int disable_interrupts)
{
struct ath10k *ar = ce_state->ar;
spin_unlock_bh(&ar_pci->ce_lock);
}
-void ath10k_ce_recv_cb_register(struct ce_state *ce_state,
- void (*recv_cb) (struct ce_state *ce_state,
- void *transfer_context,
- u32 buffer,
- unsigned int nbytes,
- unsigned int transfer_id,
- unsigned int flags))
+void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state,
+ void (*recv_cb)(struct ath10k_ce_pipe *ce_state,
+ void *transfer_context,
+ u32 buffer,
+ unsigned int nbytes,
+ unsigned int transfer_id,
+ unsigned int flags))
{
struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
static int ath10k_ce_init_src_ring(struct ath10k *ar,
unsigned int ce_id,
- struct ce_state *ce_state,
+ struct ath10k_ce_pipe *ce_state,
const struct ce_attr *attr)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
static int ath10k_ce_init_dest_ring(struct ath10k *ar,
unsigned int ce_id,
- struct ce_state *ce_state,
+ struct ath10k_ce_pipe *ce_state,
const struct ce_attr *attr)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
return 0;
}
-static struct ce_state *ath10k_ce_init_state(struct ath10k *ar,
+static struct ath10k_ce_pipe *ath10k_ce_init_state(struct ath10k *ar,
unsigned int ce_id,
const struct ce_attr *attr)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ce_state *ce_state = &ar_pci->ce_states[ce_id];
+ struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
u32 ctrl_addr = ath10k_ce_base_address(ce_id);
spin_lock_bh(&ar_pci->ce_lock);
* initialization. It may be that only one side or the other is
* initialized by software/firmware.
*/
-struct ce_state *ath10k_ce_init(struct ath10k *ar,
+struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
unsigned int ce_id,
const struct ce_attr *attr)
{
- struct ce_state *ce_state;
+ struct ath10k_ce_pipe *ce_state;
u32 ctrl_addr = ath10k_ce_base_address(ce_id);
int ret;
return ce_state;
}
-void ath10k_ce_deinit(struct ce_state *ce_state)
+void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state)
{
struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
* how to use copy engines.
*/
-struct ce_state;
+struct ath10k_ce_pipe;
#define CE_DESC_FLAGS_GATHER (1 << 0)
void **per_transfer_context;
};
-/* Copy Engine internal state */
-struct ce_state {
+struct ath10k_ce_pipe {
struct ath10k *ar;
unsigned int id;
u32 ctrl_addr;
- void (*send_cb) (struct ce_state *ce_state,
+ void (*send_cb) (struct ath10k_ce_pipe *ce_state,
void *per_transfer_send_context,
u32 buffer,
unsigned int nbytes,
unsigned int transfer_id);
- void (*recv_cb) (struct ce_state *ce_state,
+ void (*recv_cb) (struct ath10k_ce_pipe *ce_state,
void *per_transfer_recv_context,
u32 buffer,
unsigned int nbytes,
*
* Implementation note: pushes 1 buffer to Source ring
*/
-int ath10k_ce_send(struct ce_state *ce_state,
+int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
void *per_transfer_send_context,
u32 buffer,
unsigned int nbytes,
unsigned int transfer_id,
unsigned int flags);
-void ath10k_ce_send_cb_register(struct ce_state *ce_state,
- void (*send_cb) (struct ce_state *ce_state,
- void *transfer_context,
- u32 buffer,
- unsigned int nbytes,
- unsigned int transfer_id),
+void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
+ void (*send_cb)(struct ath10k_ce_pipe *ce_state,
+ void *transfer_context,
+ u32 buffer,
+ unsigned int nbytes,
+ unsigned int transfer_id),
int disable_interrupts);
/* Append a simple buffer (address/length) to a sendlist. */
*
* Implemenation note: Pushes multiple buffers with Gather to Source ring.
*/
-int ath10k_ce_sendlist_send(struct ce_state *ce_state,
+int ath10k_ce_sendlist_send(struct ath10k_ce_pipe *ce_state,
void *per_transfer_send_context,
struct ce_sendlist *sendlist,
/* 14 bits */
*
* Implemenation note: Pushes a buffer to Dest ring.
*/
-int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
+int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state,
void *per_transfer_recv_context,
u32 buffer);
-void ath10k_ce_recv_cb_register(struct ce_state *ce_state,
- void (*recv_cb) (struct ce_state *ce_state,
- void *transfer_context,
- u32 buffer,
- unsigned int nbytes,
- unsigned int transfer_id,
- unsigned int flags));
+void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state,
+ void (*recv_cb)(struct ath10k_ce_pipe *ce_state,
+ void *transfer_context,
+ u32 buffer,
+ unsigned int nbytes,
+ unsigned int transfer_id,
+ unsigned int flags));
/* recv flags */
/* Data is byte-swapped */
* Supply data for the next completed unprocessed receive descriptor.
* Pops buffer from Dest ring.
*/
-int ath10k_ce_completed_recv_next(struct ce_state *ce_state,
+int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
* Supply data for the next completed unprocessed send descriptor.
* Pops 1 completed send buffer from Source ring.
*/
-int ath10k_ce_completed_send_next(struct ce_state *ce_state,
+int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
/*==================CE Engine Initialization=======================*/
/* Initialize an instance of a CE */
-struct ce_state *ath10k_ce_init(struct ath10k *ar,
+struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
unsigned int ce_id,
const struct ce_attr *attr);
* receive buffers. Target DMA must be stopped before using
* this API.
*/
-int ath10k_ce_revoke_recv_next(struct ce_state *ce_state,
+int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp);
* pending sends. Target DMA must be stopped before using
* this API.
*/
-int ath10k_ce_cancel_send_next(struct ce_state *ce_state,
+int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp);
-void ath10k_ce_deinit(struct ce_state *ce_state);
+void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state);
/*==================CE Interrupt Handlers====================*/
void ath10k_ce_per_engine_service_any(struct ath10k *ar);
unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
unsigned int id;
unsigned int flags;
- struct ce_state *ce_diag;
+ struct ath10k_ce_pipe *ce_diag;
/* Host buffer address in CE space */
u32 ce_data;
dma_addr_t ce_data_base = 0;
unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
unsigned int id;
unsigned int flags;
- struct ce_state *ce_diag;
+ struct ath10k_ce_pipe *ce_diag;
void *data_buf = NULL;
u32 ce_data; /* Host buffer address in CE space */
dma_addr_t ce_data_base = 0;
}
/* Called by lower (CE) layer when a send to Target completes. */
-static void ath10k_pci_ce_send_done(struct ce_state *ce_state,
+static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state,
void *transfer_context,
u32 ce_data,
unsigned int nbytes,
}
/* Called by lower (CE) layer when data is received from the Target. */
-static void ath10k_pci_ce_recv_data(struct ce_state *ce_state,
+static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state,
void *transfer_context, u32 ce_data,
unsigned int nbytes,
unsigned int transfer_id,
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf);
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe_id]);
- struct ce_state *ce_hdl = pipe_info->ce_hdl;
+ struct ath10k_ce_pipe *ce_hdl = pipe_info->ce_hdl;
struct ce_sendlist sendlist;
unsigned int len;
u32 flags = 0;
static int ath10k_pci_start_ce(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ce_state *ce_diag = ar_pci->ce_diag;
+ struct ath10k_ce_pipe *ce_diag = ar_pci->ce_diag;
const struct ce_attr *attr;
struct ath10k_pci_pipe *pipe_info;
struct ath10k_pci_compl *compl;
{
struct ath10k *ar = pipe_info->hif_ce_state;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ce_state *ce_state = pipe_info->ce_hdl;
+ struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl;
struct sk_buff *skb;
dma_addr_t ce_data;
int i, ret = 0;
{
struct ath10k *ar;
struct ath10k_pci *ar_pci;
- struct ce_state *ce_hdl;
+ struct ath10k_ce_pipe *ce_hdl;
u32 buf_sz;
struct sk_buff *netbuf;
u32 ce_data;
{
struct ath10k *ar;
struct ath10k_pci *ar_pci;
- struct ce_state *ce_hdl;
+ struct ath10k_ce_pipe *ce_hdl;
struct sk_buff *netbuf;
u32 ce_data;
unsigned int nbytes;
void *resp, u32 *resp_len)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ce_state *ce_tx = ar_pci->pipe_info[BMI_CE_NUM_TO_TARG].ce_hdl;
- struct ce_state *ce_rx = ar_pci->pipe_info[BMI_CE_NUM_TO_HOST].ce_hdl;
+ struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
+ struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
+ struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
+ struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
dma_addr_t req_paddr = 0;
dma_addr_t resp_paddr = 0;
struct bmi_xfer xfer = {};
return ret;
}
-static void ath10k_pci_bmi_send_done(struct ce_state *ce_state,
+static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state,
void *transfer_context,
u32 data,
unsigned int nbytes,
complete(&xfer->done);
}
-static void ath10k_pci_bmi_recv_data(struct ce_state *ce_state,
+static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state,
void *transfer_context,
u32 data,
unsigned int nbytes,
struct ath10k_pci_compl {
struct list_head list;
enum ath10k_pci_compl_state state;
- struct ce_state *ce_state;
+ struct ath10k_ce_pipe *ce_state;
struct ath10k_pci_pipe *pipe_info;
void *transfer_context;
unsigned int nbytes;
/* Per-pipe state. */
struct ath10k_pci_pipe {
/* Handle of underlying Copy Engine */
- struct ce_state *ce_hdl;
+ struct ath10k_ce_pipe *ce_hdl;
/* Our pipe number; facilitiates use of pipe_info ptrs. */
u8 pipe_num;
u32 fw_indicator_address;
/* Copy Engine used for Diagnostic Accesses */
- struct ce_state *ce_diag;
+ struct ath10k_ce_pipe *ce_diag;
/* FIXME: document what this really protects */
spinlock_t ce_lock;
/* Map CE id to ce_state */
- struct ce_state ce_states[CE_COUNT_MAX];
+ struct ath10k_ce_pipe ce_states[CE_COUNT_MAX];
};
static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)