3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2012, Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 #include <linux/pci.h>
19 #include <linux/kthread.h>
20 #include <linux/interrupt.h>
22 #include <linux/jiffies.h>
24 #include <linux/mei.h>
33 * mei_complete_handler - processes completed operation.
35 * @cl: private data of the file object.
36 * @cb_pos: callback block.
38 void mei_irq_complete_handler(struct mei_cl *cl, struct mei_cl_cb *cb_pos)
40 if (cb_pos->fop_type == MEI_FOP_WRITE) {
41 mei_io_cb_free(cb_pos);
43 cl->writing_state = MEI_WRITE_COMPLETE;
44 if (waitqueue_active(&cl->tx_wait))
45 wake_up_interruptible(&cl->tx_wait);
47 } else if (cb_pos->fop_type == MEI_FOP_READ &&
48 MEI_READING == cl->reading_state) {
49 cl->reading_state = MEI_READ_COMPLETE;
50 if (waitqueue_active(&cl->rx_wait))
51 wake_up_interruptible(&cl->rx_wait);
57 * _mei_irq_thread_state_ok - checks if mei header matches file private data
59 * @cl: private data of the file object
60 * @mei_hdr: header of mei client message
62 * returns !=0 if matches, 0 if no match.
64 static int _mei_irq_thread_state_ok(struct mei_cl *cl,
65 struct mei_msg_hdr *mei_hdr)
67 return (cl->host_client_id == mei_hdr->host_addr &&
68 cl->me_client_id == mei_hdr->me_addr &&
69 cl->state == MEI_FILE_CONNECTED &&
70 MEI_READ_COMPLETE != cl->reading_state);
74 * mei_irq_thread_read_client_message - bottom half read routine after ISR to
75 * handle the read mei client message data processing.
77 * @complete_list: An instance of our list structure
78 * @dev: the device structure
79 * @mei_hdr: header of mei client message
81 * returns 0 on success, <0 on failure.
83 static int mei_irq_thread_read_client_message(struct mei_cl_cb *complete_list,
84 struct mei_device *dev,
85 struct mei_msg_hdr *mei_hdr)
88 struct mei_cl_cb *cb_pos = NULL, *cb_next = NULL;
89 unsigned char *buffer = NULL;
91 dev_dbg(&dev->pdev->dev, "start client msg\n");
92 if (list_empty(&dev->read_list.list))
95 list_for_each_entry_safe(cb_pos, cb_next, &dev->read_list.list, list) {
97 if (cl && _mei_irq_thread_state_ok(cl, mei_hdr)) {
98 cl->reading_state = MEI_READING;
99 buffer = cb_pos->response_buffer.data + cb_pos->buf_idx;
101 if (cb_pos->response_buffer.size <
102 mei_hdr->length + cb_pos->buf_idx) {
103 dev_dbg(&dev->pdev->dev, "message overflow.\n");
104 list_del(&cb_pos->list);
108 mei_read_slots(dev, buffer, mei_hdr->length);
110 cb_pos->buf_idx += mei_hdr->length;
111 if (mei_hdr->msg_complete) {
113 list_del(&cb_pos->list);
114 dev_dbg(&dev->pdev->dev,
115 "completed read H cl = %d, ME cl = %d, length = %lu\n",
120 list_add_tail(&cb_pos->list,
121 &complete_list->list);
130 dev_dbg(&dev->pdev->dev, "message read\n");
132 mei_read_slots(dev, dev->rd_msg_buf, mei_hdr->length);
133 dev_dbg(&dev->pdev->dev, "discarding message " MEI_HDR_FMT "\n",
134 MEI_HDR_PRM(mei_hdr));
141 * _mei_irq_thread_close - processes close related operation.
143 * @dev: the device structure.
144 * @slots: free slots.
145 * @cb_pos: callback block.
146 * @cl: private data of the file object.
147 * @cmpl_list: complete list.
149 * returns 0, OK; otherwise, error.
151 static int _mei_irq_thread_close(struct mei_device *dev, s32 *slots,
152 struct mei_cl_cb *cb_pos,
154 struct mei_cl_cb *cmpl_list)
156 if ((*slots * sizeof(u32)) < (sizeof(struct mei_msg_hdr) +
157 sizeof(struct hbm_client_connect_request)))
160 *slots -= mei_data2slots(sizeof(struct hbm_client_connect_request));
162 if (mei_hbm_cl_disconnect_req(dev, cl)) {
165 list_move_tail(&cb_pos->list, &cmpl_list->list);
168 cl->state = MEI_FILE_DISCONNECTING;
171 list_move_tail(&cb_pos->list, &dev->ctrl_rd_list.list);
172 cl->timer_count = MEI_CONNECT_TIMEOUT;
180 * _mei_hb_read - processes read related operation.
182 * @dev: the device structure.
183 * @slots: free slots.
184 * @cb_pos: callback block.
185 * @cl: private data of the file object.
186 * @cmpl_list: complete list.
188 * returns 0, OK; otherwise, error.
190 static int _mei_irq_thread_read(struct mei_device *dev, s32 *slots,
191 struct mei_cl_cb *cb_pos,
193 struct mei_cl_cb *cmpl_list)
195 if ((*slots * sizeof(u32)) < (sizeof(struct mei_msg_hdr) +
196 sizeof(struct hbm_flow_control))) {
197 /* return the cancel routine */
198 list_del(&cb_pos->list);
202 *slots -= mei_data2slots(sizeof(struct hbm_flow_control));
204 if (mei_hbm_cl_flow_control_req(dev, cl)) {
205 cl->status = -ENODEV;
207 list_move_tail(&cb_pos->list, &cmpl_list->list);
210 list_move_tail(&cb_pos->list, &dev->read_list.list);
217 * _mei_irq_thread_ioctl - processes ioctl related operation.
219 * @dev: the device structure.
220 * @slots: free slots.
221 * @cb_pos: callback block.
222 * @cl: private data of the file object.
223 * @cmpl_list: complete list.
225 * returns 0, OK; otherwise, error.
227 static int _mei_irq_thread_ioctl(struct mei_device *dev, s32 *slots,
228 struct mei_cl_cb *cb_pos,
230 struct mei_cl_cb *cmpl_list)
232 if ((*slots * sizeof(u32)) < (sizeof(struct mei_msg_hdr) +
233 sizeof(struct hbm_client_connect_request))) {
234 /* return the cancel routine */
235 list_del(&cb_pos->list);
239 cl->state = MEI_FILE_CONNECTING;
240 *slots -= mei_data2slots(sizeof(struct hbm_client_connect_request));
241 if (mei_hbm_cl_connect_req(dev, cl)) {
242 cl->status = -ENODEV;
244 list_del(&cb_pos->list);
247 list_move_tail(&cb_pos->list, &dev->ctrl_rd_list.list);
248 cl->timer_count = MEI_CONNECT_TIMEOUT;
254 * mei_irq_thread_write_complete - write messages to device.
256 * @dev: the device structure.
257 * @slots: free slots.
258 * @cb: callback block.
259 * @cmpl_list: complete list.
261 * returns 0, OK; otherwise, error.
263 static int mei_irq_thread_write_complete(struct mei_device *dev, s32 *slots,
264 struct mei_cl_cb *cb, struct mei_cl_cb *cmpl_list)
266 struct mei_msg_hdr mei_hdr;
267 struct mei_cl *cl = cb->cl;
268 size_t len = cb->request_buffer.size - cb->buf_idx;
269 size_t msg_slots = mei_data2slots(len);
271 mei_hdr.host_addr = cl->host_client_id;
272 mei_hdr.me_addr = cl->me_client_id;
273 mei_hdr.reserved = 0;
275 if (*slots >= msg_slots) {
276 mei_hdr.length = len;
277 mei_hdr.msg_complete = 1;
278 /* Split the message only if we can write the whole host buffer */
279 } else if (*slots == dev->hbuf_depth) {
281 len = (*slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
282 mei_hdr.length = len;
283 mei_hdr.msg_complete = 0;
285 /* wait for next time the host buffer is empty */
289 dev_dbg(&dev->pdev->dev, "buf: size = %d idx = %lu\n",
290 cb->request_buffer.size, cb->buf_idx);
291 dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(&mei_hdr));
294 if (mei_write_message(dev, &mei_hdr,
295 cb->request_buffer.data + cb->buf_idx)) {
296 cl->status = -ENODEV;
297 list_move_tail(&cb->list, &cmpl_list->list);
301 if (mei_cl_flow_ctrl_reduce(cl))
305 cb->buf_idx += mei_hdr.length;
306 if (mei_hdr.msg_complete)
307 list_move_tail(&cb->list, &dev->write_waiting_list.list);
313 * mei_irq_thread_read_handler - bottom half read routine after ISR to
314 * handle the read processing.
316 * @dev: the device structure
317 * @cmpl_list: An instance of our list structure
318 * @slots: slots to read.
320 * returns 0 on success, <0 on failure.
322 int mei_irq_read_handler(struct mei_device *dev,
323 struct mei_cl_cb *cmpl_list, s32 *slots)
325 struct mei_msg_hdr *mei_hdr;
326 struct mei_cl *cl_pos = NULL;
327 struct mei_cl *cl_next = NULL;
330 if (!dev->rd_msg_hdr) {
331 dev->rd_msg_hdr = mei_read_hdr(dev);
332 dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots);
334 dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots);
336 mei_hdr = (struct mei_msg_hdr *) &dev->rd_msg_hdr;
337 dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
339 if (mei_hdr->reserved || !dev->rd_msg_hdr) {
340 dev_dbg(&dev->pdev->dev, "corrupted message header.\n");
345 if (mei_hdr->host_addr || mei_hdr->me_addr) {
346 list_for_each_entry_safe(cl_pos, cl_next,
347 &dev->file_list, link) {
348 dev_dbg(&dev->pdev->dev,
349 "list_for_each_entry_safe read host"
350 " client = %d, ME client = %d\n",
351 cl_pos->host_client_id,
352 cl_pos->me_client_id);
353 if (cl_pos->host_client_id == mei_hdr->host_addr &&
354 cl_pos->me_client_id == mei_hdr->me_addr)
358 if (&cl_pos->link == &dev->file_list) {
359 dev_dbg(&dev->pdev->dev, "corrupted message header\n");
364 if (((*slots) * sizeof(u32)) < mei_hdr->length) {
365 dev_dbg(&dev->pdev->dev,
366 "we can't read the message slots =%08x.\n",
368 /* we can't read the message */
373 /* decide where to read the message too */
374 if (!mei_hdr->host_addr) {
375 dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_bus_message.\n");
376 mei_hbm_dispatch(dev, mei_hdr);
377 dev_dbg(&dev->pdev->dev, "end mei_irq_thread_read_bus_message.\n");
378 } else if (mei_hdr->host_addr == dev->iamthif_cl.host_client_id &&
379 (MEI_FILE_CONNECTED == dev->iamthif_cl.state) &&
380 (dev->iamthif_state == MEI_IAMTHIF_READING)) {
381 dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_iamthif_message.\n");
383 dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
385 ret = mei_amthif_irq_read_message(cmpl_list, dev, mei_hdr);
389 dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_client_message.\n");
390 ret = mei_irq_thread_read_client_message(cmpl_list,
397 /* reset the number of slots and header */
398 *slots = mei_count_full_read_slots(dev);
401 if (*slots == -EOVERFLOW) {
402 /* overflow - reset */
403 dev_dbg(&dev->pdev->dev, "resetting due to slots overflow.\n");
404 /* set the event since message has been read */
414 * mei_irq_write_handler - dispatch write requests
417 * @dev: the device structure
418 * @cmpl_list: An instance of our list structure
420 * returns 0 on success, <0 on failure.
422 int mei_irq_write_handler(struct mei_device *dev,
423 struct mei_cl_cb *cmpl_list)
427 struct mei_cl_cb *pos = NULL, *next = NULL;
428 struct mei_cl_cb *list;
432 if (!mei_hbuf_is_ready(dev)) {
433 dev_dbg(&dev->pdev->dev, "host buffer is not empty.\n");
436 slots = mei_hbuf_empty_slots(dev);
440 /* complete all waiting for write CB */
441 dev_dbg(&dev->pdev->dev, "complete all waiting for write cb.\n");
443 list = &dev->write_waiting_list;
444 list_for_each_entry_safe(pos, next, &list->list, list) {
450 list_del(&pos->list);
451 if (MEI_WRITING == cl->writing_state &&
452 pos->fop_type == MEI_FOP_WRITE &&
453 cl != &dev->iamthif_cl) {
454 dev_dbg(&dev->pdev->dev, "MEI WRITE COMPLETE\n");
455 cl->writing_state = MEI_WRITE_COMPLETE;
456 list_add_tail(&pos->list, &cmpl_list->list);
458 if (cl == &dev->iamthif_cl) {
459 dev_dbg(&dev->pdev->dev, "check iamthif flow control.\n");
460 if (dev->iamthif_flow_control_pending) {
461 ret = mei_amthif_irq_read(dev, &slots);
468 if (dev->wd_state == MEI_WD_STOPPING) {
469 dev->wd_state = MEI_WD_IDLE;
470 wake_up_interruptible(&dev->wait_stop_wd);
473 if (dev->wr_ext_msg.hdr.length) {
474 mei_write_message(dev, &dev->wr_ext_msg.hdr,
475 dev->wr_ext_msg.data);
476 slots -= mei_data2slots(dev->wr_ext_msg.hdr.length);
477 dev->wr_ext_msg.hdr.length = 0;
479 if (dev->dev_state == MEI_DEV_ENABLED) {
480 if (dev->wd_pending &&
481 mei_cl_flow_ctrl_creds(&dev->wd_cl) > 0) {
482 if (mei_wd_send(dev))
483 dev_dbg(&dev->pdev->dev, "wd send failed.\n");
484 else if (mei_cl_flow_ctrl_reduce(&dev->wd_cl))
487 dev->wd_pending = false;
489 if (dev->wd_state == MEI_WD_RUNNING)
490 slots -= mei_data2slots(MEI_WD_START_MSG_SIZE);
492 slots -= mei_data2slots(MEI_WD_STOP_MSG_SIZE);
496 /* complete control write list CB */
497 dev_dbg(&dev->pdev->dev, "complete control write list cb.\n");
498 list_for_each_entry_safe(pos, next, &dev->ctrl_wr_list.list, list) {
501 list_del(&pos->list);
504 switch (pos->fop_type) {
506 /* send disconnect message */
507 ret = _mei_irq_thread_close(dev, &slots, pos,
514 /* send flow control message */
515 ret = _mei_irq_thread_read(dev, &slots, pos,
522 /* connect message */
523 if (mei_cl_is_other_connecting(cl))
525 ret = _mei_irq_thread_ioctl(dev, &slots, pos,
537 /* complete write list CB */
538 dev_dbg(&dev->pdev->dev, "complete write list cb.\n");
539 list_for_each_entry_safe(pos, next, &dev->write_list.list, list) {
543 if (mei_cl_flow_ctrl_creds(cl) <= 0) {
544 dev_dbg(&dev->pdev->dev,
545 "No flow control credentials for client %d, not sending.\n",
550 if (cl == &dev->iamthif_cl)
551 ret = mei_amthif_irq_write_complete(dev, &slots,
554 ret = mei_irq_thread_write_complete(dev, &slots, pos,
566 * mei_timer - timer function.
568 * @work: pointer to the work_struct structure
570 * NOTE: This function is called by timer interrupt work
572 void mei_timer(struct work_struct *work)
574 unsigned long timeout;
575 struct mei_cl *cl_pos = NULL;
576 struct mei_cl *cl_next = NULL;
577 struct mei_cl_cb *cb_pos = NULL;
578 struct mei_cl_cb *cb_next = NULL;
580 struct mei_device *dev = container_of(work,
581 struct mei_device, timer_work.work);
584 mutex_lock(&dev->device_lock);
585 if (dev->dev_state != MEI_DEV_ENABLED) {
586 if (dev->dev_state == MEI_DEV_INIT_CLIENTS) {
587 if (dev->init_clients_timer) {
588 if (--dev->init_clients_timer == 0) {
589 dev_dbg(&dev->pdev->dev, "IMEI reset due to init clients timeout ,init clients state = %d.\n",
590 dev->init_clients_state);
597 /*** connect/disconnect timeouts ***/
598 list_for_each_entry_safe(cl_pos, cl_next, &dev->file_list, link) {
599 if (cl_pos->timer_count) {
600 if (--cl_pos->timer_count == 0) {
601 dev_dbg(&dev->pdev->dev, "HECI reset due to connect/disconnect timeout.\n");
608 if (dev->iamthif_stall_timer) {
609 if (--dev->iamthif_stall_timer == 0) {
610 dev_dbg(&dev->pdev->dev, "resetting because of hang to amthi.\n");
612 dev->iamthif_msg_buf_size = 0;
613 dev->iamthif_msg_buf_index = 0;
614 dev->iamthif_canceled = false;
615 dev->iamthif_ioctl = true;
616 dev->iamthif_state = MEI_IAMTHIF_IDLE;
617 dev->iamthif_timer = 0;
619 mei_io_cb_free(dev->iamthif_current_cb);
620 dev->iamthif_current_cb = NULL;
622 dev->iamthif_file_object = NULL;
623 mei_amthif_run_next_cmd(dev);
627 if (dev->iamthif_timer) {
629 timeout = dev->iamthif_timer +
630 mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER);
632 dev_dbg(&dev->pdev->dev, "dev->iamthif_timer = %ld\n",
634 dev_dbg(&dev->pdev->dev, "timeout = %ld\n", timeout);
635 dev_dbg(&dev->pdev->dev, "jiffies = %ld\n", jiffies);
636 if (time_after(jiffies, timeout)) {
638 * User didn't read the AMTHI data on time (15sec)
639 * freeing AMTHI for other requests
642 dev_dbg(&dev->pdev->dev, "freeing AMTHI for other requests\n");
644 list_for_each_entry_safe(cb_pos, cb_next,
645 &dev->amthif_rd_complete_list.list, list) {
647 cl_pos = cb_pos->file_object->private_data;
649 /* Finding the AMTHI entry. */
650 if (cl_pos == &dev->iamthif_cl)
651 list_del(&cb_pos->list);
653 mei_io_cb_free(dev->iamthif_current_cb);
654 dev->iamthif_current_cb = NULL;
656 dev->iamthif_file_object->private_data = NULL;
657 dev->iamthif_file_object = NULL;
658 dev->iamthif_timer = 0;
659 mei_amthif_run_next_cmd(dev);
664 schedule_delayed_work(&dev->timer_work, 2 * HZ);
665 mutex_unlock(&dev->device_lock);