struct nv_falcon_msg *hdr)
{
const struct nvkm_subdev *subdev = priv->falcon->owner;
- struct nvkm_msgqueue_seq *seq;
+ struct nvkm_falcon_qmgr_seq *seq;
- seq = &msgq->qmgr->seq[hdr->seq_id];
+ seq = &msgq->qmgr->seq.id[hdr->seq_id];
if (seq->state != SEQ_STATE_USED && seq->state != SEQ_STATE_CANCELLED) {
nvkm_error(subdev, "msg for unknown sequence %d", seq->id);
return -EINVAL;
*/
#include "qmgr.h"
-struct nvkm_msgqueue_seq *
-nvkm_falcon_qmgr_seq_acquire(struct nvkm_falcon_qmgr *priv)
+struct nvkm_falcon_qmgr_seq *
+nvkm_falcon_qmgr_seq_acquire(struct nvkm_falcon_qmgr *qmgr)
{
- const struct nvkm_subdev *subdev = priv->falcon->owner;
- struct nvkm_msgqueue_seq *seq;
+ const struct nvkm_subdev *subdev = qmgr->falcon->owner;
+ struct nvkm_falcon_qmgr_seq *seq;
u32 index;
- mutex_lock(&priv->seq_lock);
- index = find_first_zero_bit(priv->seq_tbl, NVKM_MSGQUEUE_NUM_SEQUENCES);
- if (index >= NVKM_MSGQUEUE_NUM_SEQUENCES) {
+ mutex_lock(&qmgr->seq.mutex);
+ index = find_first_zero_bit(qmgr->seq.tbl, NVKM_FALCON_QMGR_SEQ_NUM);
+ if (index >= NVKM_FALCON_QMGR_SEQ_NUM) {
nvkm_error(subdev, "no free sequence available\n");
- mutex_unlock(&priv->seq_lock);
+ mutex_unlock(&qmgr->seq.mutex);
return ERR_PTR(-EAGAIN);
}
- set_bit(index, priv->seq_tbl);
- mutex_unlock(&priv->seq_lock);
+ set_bit(index, qmgr->seq.tbl);
+ mutex_unlock(&qmgr->seq.mutex);
- seq = &priv->seq[index];
+ seq = &qmgr->seq.id[index];
seq->state = SEQ_STATE_PENDING;
return seq;
}
void
-nvkm_falcon_qmgr_seq_release(struct nvkm_falcon_qmgr *priv,
- struct nvkm_msgqueue_seq *seq)
+nvkm_falcon_qmgr_seq_release(struct nvkm_falcon_qmgr *qmgr,
+ struct nvkm_falcon_qmgr_seq *seq)
{
- /* no need to acquire seq_lock since clear_bit is atomic */
+ /* no need to acquire seq.mutex since clear_bit is atomic */
seq->state = SEQ_STATE_FREE;
seq->callback = NULL;
reinit_completion(&seq->done);
- clear_bit(seq->id, priv->seq_tbl);
+ clear_bit(seq->id, qmgr->seq.tbl);
}
void
return -ENOMEM;
qmgr->falcon = falcon;
- mutex_init(&qmgr->seq_lock);
- for (i = 0; i < NVKM_MSGQUEUE_NUM_SEQUENCES; i++) {
- qmgr->seq[i].id = i;
- init_completion(&qmgr->seq[i].done);
+ mutex_init(&qmgr->seq.mutex);
+ for (i = 0; i < NVKM_FALCON_QMGR_SEQ_NUM; i++) {
+ qmgr->seq.id[i].id = i;
+ init_completion(&qmgr->seq.id[i].done);
}
return 0;
#define MSG_BUF_SIZE 128
/**
- * struct nvkm_msgqueue_seq - keep track of ongoing commands
+ * struct nvkm_falcon_qmgr_seq - keep track of ongoing commands
*
* Every time a command is sent, a sequence is assigned to it so the
* corresponding message can be matched. Upon receiving the message, a callback
* @callback: callback to call upon receiving matching message
* @completion: completion to signal after callback is called
*/
-struct nvkm_msgqueue_seq {
+struct nvkm_falcon_qmgr_seq {
u16 id;
enum {
SEQ_STATE_FREE = 0,
* We can have an arbitrary number of sequences, but realistically we will
* probably not use that much simultaneously.
*/
-#define NVKM_MSGQUEUE_NUM_SEQUENCES 16
+#define NVKM_FALCON_QMGR_SEQ_NUM 16
struct nvkm_falcon_qmgr {
struct nvkm_falcon *falcon;
- struct mutex seq_lock;
- struct nvkm_msgqueue_seq seq[NVKM_MSGQUEUE_NUM_SEQUENCES];
- unsigned long seq_tbl[BITS_TO_LONGS(NVKM_MSGQUEUE_NUM_SEQUENCES)];
+ struct {
+ struct mutex mutex;
+ struct nvkm_falcon_qmgr_seq id[NVKM_FALCON_QMGR_SEQ_NUM];
+ unsigned long tbl[BITS_TO_LONGS(NVKM_FALCON_QMGR_SEQ_NUM)];
+ } seq;
};
-struct nvkm_msgqueue_seq *
+struct nvkm_falcon_qmgr_seq *
nvkm_falcon_qmgr_seq_acquire(struct nvkm_falcon_qmgr *);
void nvkm_falcon_qmgr_seq_release(struct nvkm_falcon_qmgr *,
- struct nvkm_msgqueue_seq *);
+ struct nvkm_falcon_qmgr_seq *);
#define FLCNQ_PRINTK(t,q,f,a...) \
FLCN_PRINTK(t, (q)->qmgr->falcon, "%s: "f, (q)->name, ##a)