static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
static int ipmi_init_msghandler(void);
-static void smi_recv_tasklet(unsigned long);
+static void smi_recv_tasklet(struct tasklet_struct *t);
static void handle_new_recv_msgs(struct ipmi_smi *intf);
static void need_waiter(struct ipmi_smi *intf);
static int handle_one_recv_msg(struct ipmi_smi *intf,
intf->curr_seq = 0;
spin_lock_init(&intf->waiting_rcv_msgs_lock);
INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
- tasklet_init(&intf->recv_tasklet,
- smi_recv_tasklet,
- (unsigned long) intf);
+ tasklet_setup(&intf->recv_tasklet,
+ smi_recv_tasklet);
atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
spin_lock_init(&intf->xmit_msgs_lock);
INIT_LIST_HEAD(&intf->xmit_msgs);
}
}
-static void smi_recv_tasklet(unsigned long val)
+static void smi_recv_tasklet(struct tasklet_struct *t)
{
unsigned long flags = 0; /* keep us warning-free. */
- struct ipmi_smi *intf = (struct ipmi_smi *) val;
+ struct ipmi_smi *intf = from_tasklet(intf, t, recv_tasklet);
int run_to_completion = intf->run_to_completion;
struct ipmi_smi_msg *newmsg = NULL;
spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
if (run_to_completion)
- smi_recv_tasklet((unsigned long) intf);
+ smi_recv_tasklet(&intf->recv_tasklet);
else
tasklet_schedule(&intf->recv_tasklet);
}