2 * linux/drivers/s390/cio/qdio_main.c
4 * Linux for s390 qdio support, buffer handling, qdio API and module support.
6 * Copyright 2000,2008 IBM Corp.
7 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
8 * Jan Glauber <jang@linux.vnet.ibm.com>
9 * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/timer.h>
15 #include <linux/delay.h>
16 #include <linux/gfp.h>
17 #include <asm/atomic.h>
18 #include <asm/debug.h>
25 #include "qdio_debug.h"
27 MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
28 "Jan Glauber <jang@linux.vnet.ibm.com>");
29 MODULE_DESCRIPTION("QDIO base support");
30 MODULE_LICENSE("GPL");
32 static inline int do_siga_sync(unsigned long schid,
33 unsigned int out_mask, unsigned int in_mask,
36 register unsigned long __fc asm ("0") = fc;
37 register unsigned long __schid asm ("1") = schid;
38 register unsigned long out asm ("2") = out_mask;
39 register unsigned long in asm ("3") = in_mask;
47 : "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc");
51 static inline int do_siga_input(unsigned long schid, unsigned int mask,
54 register unsigned long __fc asm ("0") = fc;
55 register unsigned long __schid asm ("1") = schid;
56 register unsigned long __mask asm ("2") = mask;
64 : "d" (__fc), "d" (__schid), "d" (__mask) : "cc", "memory");
69 * do_siga_output - perform SIGA-w/wt function
70 * @schid: subchannel id or in case of QEBSM the subchannel token
71 * @mask: which output queues to process
72 * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
73 * @fc: function code to perform
75 * Returns cc or QDIO_ERROR_SIGA_ACCESS_EXCEPTION.
76 * Note: For IQDC unicast queues only the highest priority queue is processed.
78 static inline int do_siga_output(unsigned long schid, unsigned long mask,
79 unsigned int *bb, unsigned int fc)
81 register unsigned long __fc asm("0") = fc;
82 register unsigned long __schid asm("1") = schid;
83 register unsigned long __mask asm("2") = mask;
84 int cc = QDIO_ERROR_SIGA_ACCESS_EXCEPTION;
92 : "+d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask)
94 *bb = ((unsigned int) __fc) >> 31;
98 static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
100 /* all done or next buffer state different */
101 if (ccq == 0 || ccq == 32)
103 /* not all buffers processed */
104 if (ccq == 96 || ccq == 97)
106 /* notify devices immediately */
107 DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
112 * qdio_do_eqbs - extract buffer states for QEBSM
113 * @q: queue to manipulate
114 * @state: state of the extracted buffers
115 * @start: buffer number to start at
116 * @count: count of buffers to examine
117 * @auto_ack: automatically acknowledge buffers
119 * Returns the number of successfully extracted equal buffer states.
120 * Stops processing if a state is different from the last buffers state.
122 static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
123 int start, int count, int auto_ack)
125 unsigned int ccq = 0;
126 int tmp_count = count, tmp_start = start;
130 BUG_ON(!q->irq_ptr->sch_token);
134 nr += q->irq_ptr->nr_input_qs;
136 ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
138 rc = qdio_check_ccq(q, ccq);
140 /* At least one buffer was processed, return and extract the remaining
143 if ((ccq == 96) && (count != tmp_count)) {
144 qperf_inc(q, eqbs_partial);
145 return (count - tmp_count);
149 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq);
154 DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
155 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
156 q->handler(q->irq_ptr->cdev,
157 QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
158 0, -1, -1, q->irq_ptr->int_parm);
161 return count - tmp_count;
165 * qdio_do_sqbs - set buffer states for QEBSM
166 * @q: queue to manipulate
167 * @state: new state of the buffers
168 * @start: first buffer number to change
169 * @count: how many buffers to change
171 * Returns the number of successfully changed buffers.
172 * Does retrying until the specified count of buffer states is set or an
175 static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
178 unsigned int ccq = 0;
179 int tmp_count = count, tmp_start = start;
186 BUG_ON(!q->irq_ptr->sch_token);
190 nr += q->irq_ptr->nr_input_qs;
192 ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
193 rc = qdio_check_ccq(q, ccq);
195 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
196 qperf_inc(q, sqbs_partial);
200 DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
201 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
202 q->handler(q->irq_ptr->cdev,
203 QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
204 0, -1, -1, q->irq_ptr->int_parm);
208 return count - tmp_count;
211 /* returns number of examined buffers and their common state in *state */
212 static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
213 unsigned char *state, unsigned int count,
216 unsigned char __state = 0;
219 BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
220 BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
223 return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
225 for (i = 0; i < count; i++) {
227 __state = q->slsb.val[bufnr];
228 else if (q->slsb.val[bufnr] != __state)
230 bufnr = next_buf(bufnr);
236 static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
237 unsigned char *state, int auto_ack)
239 return get_buf_states(q, bufnr, state, 1, auto_ack);
242 /* wrap-around safe setting of slsb states, returns number of changed buffers */
243 static inline int set_buf_states(struct qdio_q *q, int bufnr,
244 unsigned char state, int count)
248 BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
249 BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
252 return qdio_do_sqbs(q, state, bufnr, count);
254 for (i = 0; i < count; i++) {
255 xchg(&q->slsb.val[bufnr], state);
256 bufnr = next_buf(bufnr);
261 static inline int set_buf_state(struct qdio_q *q, int bufnr,
264 return set_buf_states(q, bufnr, state, 1);
267 /* set slsb states to initial state */
268 void qdio_init_buf_states(struct qdio_irq *irq_ptr)
273 for_each_input_queue(irq_ptr, q, i)
274 set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT,
275 QDIO_MAX_BUFFERS_PER_Q);
276 for_each_output_queue(irq_ptr, q, i)
277 set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT,
278 QDIO_MAX_BUFFERS_PER_Q);
281 static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
284 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
285 unsigned int fc = QDIO_SIGA_SYNC;
288 if (!need_siga_sync(q))
291 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
292 qperf_inc(q, siga_sync);
295 schid = q->irq_ptr->sch_token;
296 fc |= QDIO_SIGA_QEBSM_FLAG;
299 cc = do_siga_sync(schid, output, input, fc);
301 DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
305 static inline int qdio_siga_sync_q(struct qdio_q *q)
308 return qdio_siga_sync(q, 0, q->mask);
310 return qdio_siga_sync(q, q->mask, 0);
313 static inline int qdio_siga_sync_out(struct qdio_q *q)
315 return qdio_siga_sync(q, ~0U, 0);
318 static inline int qdio_siga_sync_all(struct qdio_q *q)
320 return qdio_siga_sync(q, ~0U, ~0U);
323 static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit)
325 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
326 unsigned int fc = QDIO_SIGA_WRITE;
330 if (q->u.out.use_enh_siga)
334 schid = q->irq_ptr->sch_token;
335 fc |= QDIO_SIGA_QEBSM_FLAG;
338 cc = do_siga_output(schid, q->mask, busy_bit, fc);
340 /* hipersocket busy condition */
342 WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2);
345 start_time = get_clock();
348 if ((get_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE)
354 static inline int qdio_siga_input(struct qdio_q *q)
356 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
357 unsigned int fc = QDIO_SIGA_READ;
360 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
361 qperf_inc(q, siga_read);
364 schid = q->irq_ptr->sch_token;
365 fc |= QDIO_SIGA_QEBSM_FLAG;
368 cc = do_siga_input(schid, q->mask, fc);
370 DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
374 static inline void qdio_sync_after_thinint(struct qdio_q *q)
376 if (pci_out_supported(q)) {
377 if (need_siga_sync_thinint(q))
378 qdio_siga_sync_all(q);
379 else if (need_siga_sync_out_thinint(q))
380 qdio_siga_sync_out(q);
385 int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
386 unsigned char *state)
389 return get_buf_states(q, bufnr, state, 1, 0);
392 static inline void qdio_stop_polling(struct qdio_q *q)
394 if (!q->u.in.polling)
398 qperf_inc(q, stop_polling);
400 /* show the card that we are not polling anymore */
402 set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
404 q->u.in.ack_count = 0;
406 set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
409 static inline void account_sbals(struct qdio_q *q, int count)
413 q->q_stats.nr_sbal_total += count;
414 if (count == QDIO_MAX_BUFFERS_MASK) {
415 q->q_stats.nr_sbals[7]++;
420 q->q_stats.nr_sbals[pos]++;
423 static void announce_buffer_error(struct qdio_q *q, int count)
425 q->qdio_error |= QDIO_ERROR_SLSB_STATE;
427 /* special handling for no target buffer empty */
428 if ((!q->is_input_q &&
429 (q->sbal[q->first_to_check]->element[15].flags & 0xff) == 0x10)) {
430 qperf_inc(q, target_full);
431 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
436 DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
437 DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
438 DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count);
439 DBF_ERROR("F14:%2x F15:%2x",
440 q->sbal[q->first_to_check]->element[14].flags & 0xff,
441 q->sbal[q->first_to_check]->element[15].flags & 0xff);
444 static inline void inbound_primed(struct qdio_q *q, int count)
448 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim: %02x", count);
450 /* for QEBSM the ACK was already set by EQBS */
452 if (!q->u.in.polling) {
454 q->u.in.ack_count = count;
455 q->u.in.ack_start = q->first_to_check;
459 /* delete the previous ACK's */
460 set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
462 q->u.in.ack_count = count;
463 q->u.in.ack_start = q->first_to_check;
468 * ACK the newest buffer. The ACK will be removed in qdio_stop_polling
469 * or by the next inbound run.
471 new = add_buf(q->first_to_check, count - 1);
472 if (q->u.in.polling) {
473 /* reset the previous ACK but first set the new one */
474 set_buf_state(q, new, SLSB_P_INPUT_ACK);
475 set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
478 set_buf_state(q, new, SLSB_P_INPUT_ACK);
481 q->u.in.ack_start = new;
485 /* need to change ALL buffers to get more interrupts */
486 set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT, count);
489 static int get_inbound_buffer_frontier(struct qdio_q *q)
495 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
498 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
499 stop = add_buf(q->first_to_check, count);
501 if (q->first_to_check == stop)
505 * No siga sync here, as a PCI or we after a thin interrupt
506 * already sync'ed the queues.
508 count = get_buf_states(q, q->first_to_check, &state, count, 1);
513 case SLSB_P_INPUT_PRIMED:
514 inbound_primed(q, count);
515 q->first_to_check = add_buf(q->first_to_check, count);
516 if (atomic_sub(count, &q->nr_buf_used) == 0)
517 qperf_inc(q, inbound_queue_full);
518 if (q->irq_ptr->perf_stat_enabled)
519 account_sbals(q, count);
521 case SLSB_P_INPUT_ERROR:
522 announce_buffer_error(q, count);
523 /* process the buffer, the upper layer will take care of it */
524 q->first_to_check = add_buf(q->first_to_check, count);
525 atomic_sub(count, &q->nr_buf_used);
526 if (q->irq_ptr->perf_stat_enabled)
527 account_sbals_error(q, count);
529 case SLSB_CU_INPUT_EMPTY:
530 case SLSB_P_INPUT_NOT_INIT:
531 case SLSB_P_INPUT_ACK:
532 if (q->irq_ptr->perf_stat_enabled)
533 q->q_stats.nr_sbal_nop++;
534 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop");
540 return q->first_to_check;
543 static int qdio_inbound_q_moved(struct qdio_q *q)
547 bufnr = get_inbound_buffer_frontier(q);
549 if ((bufnr != q->last_move) || q->qdio_error) {
550 q->last_move = bufnr;
551 if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
552 q->u.in.timestamp = get_clock();
558 static inline int qdio_inbound_q_done(struct qdio_q *q)
560 unsigned char state = 0;
562 if (!atomic_read(&q->nr_buf_used))
566 get_buf_state(q, q->first_to_check, &state, 0);
568 if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
569 /* more work coming */
572 if (is_thinint_irq(q->irq_ptr))
575 /* don't poll under z/VM */
580 * At this point we know, that inbound first_to_check
581 * has (probably) not moved (see qdio_inbound_processing).
583 if (get_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
584 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
591 static void qdio_kick_handler(struct qdio_q *q)
593 int start = q->first_to_kick;
594 int end = q->first_to_check;
597 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
600 count = sub_buf(end, start);
603 qperf_inc(q, inbound_handler);
604 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count);
606 qperf_inc(q, outbound_handler);
607 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
611 q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
612 q->irq_ptr->int_parm);
614 /* for the next time */
615 q->first_to_kick = end;
619 static void __qdio_inbound_processing(struct qdio_q *q)
621 qperf_inc(q, tasklet_inbound);
623 if (!qdio_inbound_q_moved(q))
626 qdio_kick_handler(q);
628 if (!qdio_inbound_q_done(q)) {
629 /* means poll time is not yet over */
630 qperf_inc(q, tasklet_inbound_resched);
631 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
632 tasklet_schedule(&q->tasklet);
637 qdio_stop_polling(q);
639 * We need to check again to not lose initiative after
640 * resetting the ACK state.
642 if (!qdio_inbound_q_done(q)) {
643 qperf_inc(q, tasklet_inbound_resched2);
644 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
645 tasklet_schedule(&q->tasklet);
649 void qdio_inbound_processing(unsigned long data)
651 struct qdio_q *q = (struct qdio_q *)data;
652 __qdio_inbound_processing(q);
655 static int get_outbound_buffer_frontier(struct qdio_q *q)
660 if (((queue_type(q) != QDIO_IQDIO_QFMT) && !pci_out_supported(q)) ||
661 (queue_type(q) == QDIO_IQDIO_QFMT && multicast_outbound(q)))
665 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
668 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
669 stop = add_buf(q->first_to_check, count);
671 if (q->first_to_check == stop)
672 return q->first_to_check;
674 count = get_buf_states(q, q->first_to_check, &state, count, 0);
676 return q->first_to_check;
679 case SLSB_P_OUTPUT_EMPTY:
680 /* the adapter got it */
681 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out empty:%1d %02x", q->nr, count);
683 atomic_sub(count, &q->nr_buf_used);
684 q->first_to_check = add_buf(q->first_to_check, count);
685 if (q->irq_ptr->perf_stat_enabled)
686 account_sbals(q, count);
688 case SLSB_P_OUTPUT_ERROR:
689 announce_buffer_error(q, count);
690 /* process the buffer, the upper layer will take care of it */
691 q->first_to_check = add_buf(q->first_to_check, count);
692 atomic_sub(count, &q->nr_buf_used);
693 if (q->irq_ptr->perf_stat_enabled)
694 account_sbals_error(q, count);
696 case SLSB_CU_OUTPUT_PRIMED:
697 /* the adapter has not fetched the output yet */
698 if (q->irq_ptr->perf_stat_enabled)
699 q->q_stats.nr_sbal_nop++;
700 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", q->nr);
702 case SLSB_P_OUTPUT_NOT_INIT:
703 case SLSB_P_OUTPUT_HALTED:
708 return q->first_to_check;
711 /* all buffers processed? */
712 static inline int qdio_outbound_q_done(struct qdio_q *q)
714 return atomic_read(&q->nr_buf_used) == 0;
717 static inline int qdio_outbound_q_moved(struct qdio_q *q)
721 bufnr = get_outbound_buffer_frontier(q);
723 if ((bufnr != q->last_move) || q->qdio_error) {
724 q->last_move = bufnr;
725 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
731 static int qdio_kick_outbound_q(struct qdio_q *q)
733 unsigned int busy_bit;
736 if (!need_siga_out(q))
739 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
740 qperf_inc(q, siga_write);
742 cc = qdio_siga_output(q, &busy_bit);
748 DBF_ERROR("%4x cc2 REP:%1d", SCH_NO(q), q->nr);
749 cc |= QDIO_ERROR_SIGA_BUSY;
751 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr);
755 DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
761 static void __qdio_outbound_processing(struct qdio_q *q)
763 qperf_inc(q, tasklet_outbound);
764 BUG_ON(atomic_read(&q->nr_buf_used) < 0);
766 if (qdio_outbound_q_moved(q))
767 qdio_kick_handler(q);
769 if (queue_type(q) == QDIO_ZFCP_QFMT)
770 if (!pci_out_supported(q) && !qdio_outbound_q_done(q))
773 /* bail out for HiperSockets unicast queues */
774 if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q))
777 if ((queue_type(q) == QDIO_IQDIO_QFMT) &&
778 (atomic_read(&q->nr_buf_used)) > QDIO_IQDIO_POLL_LVL)
781 if (q->u.out.pci_out_enabled)
785 * Now we know that queue type is either qeth without pci enabled
786 * or HiperSockets multicast. Make sure buffer switch from PRIMED to
787 * EMPTY is noticed and outbound_handler is called after some time.
789 if (qdio_outbound_q_done(q))
790 del_timer(&q->u.out.timer);
792 if (!timer_pending(&q->u.out.timer))
793 mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
797 if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
799 tasklet_schedule(&q->tasklet);
802 /* outbound tasklet */
803 void qdio_outbound_processing(unsigned long data)
805 struct qdio_q *q = (struct qdio_q *)data;
806 __qdio_outbound_processing(q);
809 void qdio_outbound_timer(unsigned long data)
811 struct qdio_q *q = (struct qdio_q *)data;
813 if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
815 tasklet_schedule(&q->tasklet);
818 static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
823 if (!pci_out_supported(q))
826 for_each_output_queue(q->irq_ptr, out, i)
827 if (!qdio_outbound_q_done(out))
828 tasklet_schedule(&out->tasklet);
831 static void __tiqdio_inbound_processing(struct qdio_q *q)
833 qperf_inc(q, tasklet_inbound);
834 qdio_sync_after_thinint(q);
837 * The interrupt could be caused by a PCI request. Check the
838 * PCI capable outbound queues.
840 qdio_check_outbound_after_thinint(q);
842 if (!qdio_inbound_q_moved(q))
845 qdio_kick_handler(q);
847 if (!qdio_inbound_q_done(q)) {
848 qperf_inc(q, tasklet_inbound_resched);
849 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
850 tasklet_schedule(&q->tasklet);
855 qdio_stop_polling(q);
857 * We need to check again to not lose initiative after
858 * resetting the ACK state.
860 if (!qdio_inbound_q_done(q)) {
861 qperf_inc(q, tasklet_inbound_resched2);
862 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
863 tasklet_schedule(&q->tasklet);
867 void tiqdio_inbound_processing(unsigned long data)
869 struct qdio_q *q = (struct qdio_q *)data;
870 __tiqdio_inbound_processing(q);
873 static inline void qdio_set_state(struct qdio_irq *irq_ptr,
874 enum qdio_irq_states state)
876 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state);
878 irq_ptr->state = state;
882 static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb)
884 if (irb->esw.esw0.erw.cons) {
885 DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no);
886 DBF_ERROR_HEX(irb, 64);
887 DBF_ERROR_HEX(irb->ecw, 64);
891 /* PCI interrupt handler */
892 static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
897 if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
900 for_each_input_queue(irq_ptr, q, i)
901 tasklet_schedule(&q->tasklet);
903 if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED))
906 for_each_output_queue(irq_ptr, q, i) {
907 if (qdio_outbound_q_done(q))
910 if (!siga_syncs_out_pci(q))
913 tasklet_schedule(&q->tasklet);
917 static void qdio_handle_activate_check(struct ccw_device *cdev,
918 unsigned long intparm, int cstat, int dstat)
920 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
923 DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
924 DBF_ERROR("intp :%lx", intparm);
925 DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
927 if (irq_ptr->nr_input_qs) {
928 q = irq_ptr->input_qs[0];
929 } else if (irq_ptr->nr_output_qs) {
930 q = irq_ptr->output_qs[0];
935 q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
936 0, -1, -1, irq_ptr->int_parm);
938 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
941 static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
944 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
946 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
950 if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
952 if (!(dstat & DEV_STAT_DEV_END))
954 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
958 DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
959 DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
960 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
963 /* qdio interrupt handler */
964 void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
967 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
970 if (!intparm || !irq_ptr) {
971 DBF_ERROR("qint:%4x", cdev->private->schid.sch_no);
975 if (irq_ptr->perf_stat_enabled)
976 irq_ptr->perf_stat.qdio_int++;
979 switch (PTR_ERR(irb)) {
981 DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
982 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
983 wake_up(&cdev->private->wait_q);
990 qdio_irq_check_sense(irq_ptr, irb);
991 cstat = irb->scsw.cmd.cstat;
992 dstat = irb->scsw.cmd.dstat;
994 switch (irq_ptr->state) {
995 case QDIO_IRQ_STATE_INACTIVE:
996 qdio_establish_handle_irq(cdev, cstat, dstat);
998 case QDIO_IRQ_STATE_CLEANUP:
999 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1001 case QDIO_IRQ_STATE_ESTABLISHED:
1002 case QDIO_IRQ_STATE_ACTIVE:
1003 if (cstat & SCHN_STAT_PCI) {
1004 qdio_int_handler_pci(irq_ptr);
1008 qdio_handle_activate_check(cdev, intparm, cstat,
1011 case QDIO_IRQ_STATE_STOPPED:
1016 wake_up(&cdev->private->wait_q);
1020 * qdio_get_ssqd_desc - get qdio subchannel description
1021 * @cdev: ccw device to get description for
1022 * @data: where to store the ssqd
1024 * Returns 0 or an error code. The results of the chsc are stored in the
1025 * specified structure.
1027 int qdio_get_ssqd_desc(struct ccw_device *cdev,
1028 struct qdio_ssqd_desc *data)
1031 if (!cdev || !cdev->private)
1034 DBF_EVENT("get ssqd:%4x", cdev->private->schid.sch_no);
1035 return qdio_setup_get_ssqd(NULL, &cdev->private->schid, data);
1037 EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
1039 static void qdio_shutdown_queues(struct ccw_device *cdev)
1041 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1045 for_each_input_queue(irq_ptr, q, i)
1046 tasklet_kill(&q->tasklet);
1048 for_each_output_queue(irq_ptr, q, i) {
1049 del_timer(&q->u.out.timer);
1050 tasklet_kill(&q->tasklet);
1055 * qdio_shutdown - shut down a qdio subchannel
1056 * @cdev: associated ccw device
1057 * @how: use halt or clear to shutdown
1059 int qdio_shutdown(struct ccw_device *cdev, int how)
1061 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1063 unsigned long flags;
1068 BUG_ON(irqs_disabled());
1069 DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no);
1071 mutex_lock(&irq_ptr->setup_mutex);
1073 * Subchannel was already shot down. We cannot prevent being called
1074 * twice since cio may trigger a shutdown asynchronously.
1076 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1077 mutex_unlock(&irq_ptr->setup_mutex);
1082 * Indicate that the device is going down. Scheduling the queue
1083 * tasklets is forbidden from here on.
1085 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
1087 tiqdio_remove_input_queues(irq_ptr);
1088 qdio_shutdown_queues(cdev);
1089 qdio_shutdown_debug_entries(irq_ptr, cdev);
1091 /* cleanup subchannel */
1092 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1094 if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
1095 rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
1097 /* default behaviour is halt */
1098 rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
1100 DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no);
1101 DBF_ERROR("rc:%4d", rc);
1105 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
1106 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1107 wait_event_interruptible_timeout(cdev->private->wait_q,
1108 irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
1109 irq_ptr->state == QDIO_IRQ_STATE_ERR,
1111 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1114 qdio_shutdown_thinint(irq_ptr);
1116 /* restore interrupt handler */
1117 if ((void *)cdev->handler == (void *)qdio_int_handler)
1118 cdev->handler = irq_ptr->orig_handler;
1119 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1121 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1122 mutex_unlock(&irq_ptr->setup_mutex);
1127 EXPORT_SYMBOL_GPL(qdio_shutdown);
1130 * qdio_free - free data structures for a qdio subchannel
1131 * @cdev: associated ccw device
1133 int qdio_free(struct ccw_device *cdev)
1135 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1140 DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no);
1141 mutex_lock(&irq_ptr->setup_mutex);
1143 if (irq_ptr->debug_area != NULL) {
1144 debug_unregister(irq_ptr->debug_area);
1145 irq_ptr->debug_area = NULL;
1147 cdev->private->qdio_data = NULL;
1148 mutex_unlock(&irq_ptr->setup_mutex);
1150 qdio_release_memory(irq_ptr);
1153 EXPORT_SYMBOL_GPL(qdio_free);
1156 * qdio_allocate - allocate qdio queues and associated data
1157 * @init_data: initialization data
1159 int qdio_allocate(struct qdio_initialize *init_data)
1161 struct qdio_irq *irq_ptr;
1163 DBF_EVENT("qallocate:%4x", init_data->cdev->private->schid.sch_no);
1165 if ((init_data->no_input_qs && !init_data->input_handler) ||
1166 (init_data->no_output_qs && !init_data->output_handler))
1169 if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) ||
1170 (init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ))
1173 if ((!init_data->input_sbal_addr_array) ||
1174 (!init_data->output_sbal_addr_array))
1177 /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
1178 irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1182 mutex_init(&irq_ptr->setup_mutex);
1183 qdio_allocate_dbf(init_data, irq_ptr);
1186 * Allocate a page for the chsc calls in qdio_establish.
1187 * Must be pre-allocated since a zfcp recovery will call
1188 * qdio_establish. In case of low memory and swap on a zfcp disk
1189 * we may not be able to allocate memory otherwise.
1191 irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
1192 if (!irq_ptr->chsc_page)
1195 /* qdr is used in ccw1.cda which is u32 */
1196 irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1199 WARN_ON((unsigned long)irq_ptr->qdr & 0xfff);
1201 if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs,
1202 init_data->no_output_qs))
1205 init_data->cdev->private->qdio_data = irq_ptr;
1206 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1209 qdio_release_memory(irq_ptr);
1213 EXPORT_SYMBOL_GPL(qdio_allocate);
1216 * qdio_establish - establish queues on a qdio subchannel
1217 * @init_data: initialization data
1219 int qdio_establish(struct qdio_initialize *init_data)
1221 struct qdio_irq *irq_ptr;
1222 struct ccw_device *cdev = init_data->cdev;
1223 unsigned long saveflags;
1226 DBF_EVENT("qestablish:%4x", cdev->private->schid.sch_no);
1228 irq_ptr = cdev->private->qdio_data;
1232 if (cdev->private->state != DEV_STATE_ONLINE)
1235 mutex_lock(&irq_ptr->setup_mutex);
1236 qdio_setup_irq(init_data);
1238 rc = qdio_establish_thinint(irq_ptr);
1240 mutex_unlock(&irq_ptr->setup_mutex);
1241 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1246 irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
1247 irq_ptr->ccw.flags = CCW_FLAG_SLI;
1248 irq_ptr->ccw.count = irq_ptr->equeue.count;
1249 irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
1251 spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1252 ccw_device_set_options_mask(cdev, 0);
1254 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
1256 DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
1257 DBF_ERROR("rc:%4x", rc);
1259 spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1262 mutex_unlock(&irq_ptr->setup_mutex);
1263 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1267 wait_event_interruptible_timeout(cdev->private->wait_q,
1268 irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
1269 irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
1271 if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
1272 mutex_unlock(&irq_ptr->setup_mutex);
1273 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1277 qdio_setup_ssqd_info(irq_ptr);
1278 DBF_EVENT("qDmmwc:%2x", irq_ptr->ssqd_desc.mmwc);
1279 DBF_EVENT("qib ac:%4x", irq_ptr->qib.ac);
1281 /* qebsm is now setup if available, initialize buffer states */
1282 qdio_init_buf_states(irq_ptr);
1284 mutex_unlock(&irq_ptr->setup_mutex);
1285 qdio_print_subchannel_info(irq_ptr, cdev);
1286 qdio_setup_debug_entries(irq_ptr, cdev);
1289 EXPORT_SYMBOL_GPL(qdio_establish);
1292 * qdio_activate - activate queues on a qdio subchannel
1293 * @cdev: associated cdev
1295 int qdio_activate(struct ccw_device *cdev)
1297 struct qdio_irq *irq_ptr;
1299 unsigned long saveflags;
1301 DBF_EVENT("qactivate:%4x", cdev->private->schid.sch_no);
1303 irq_ptr = cdev->private->qdio_data;
1307 if (cdev->private->state != DEV_STATE_ONLINE)
1310 mutex_lock(&irq_ptr->setup_mutex);
1311 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1316 irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd;
1317 irq_ptr->ccw.flags = CCW_FLAG_SLI;
1318 irq_ptr->ccw.count = irq_ptr->aqueue.count;
1319 irq_ptr->ccw.cda = 0;
1321 spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1322 ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
1324 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE,
1325 0, DOIO_DENY_PREFETCH);
1327 DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no);
1328 DBF_ERROR("rc:%4x", rc);
1330 spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1335 if (is_thinint_irq(irq_ptr))
1336 tiqdio_add_input_queues(irq_ptr);
1338 /* wait for subchannel to become active */
1341 switch (irq_ptr->state) {
1342 case QDIO_IRQ_STATE_STOPPED:
1343 case QDIO_IRQ_STATE_ERR:
1347 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
1351 mutex_unlock(&irq_ptr->setup_mutex);
1354 EXPORT_SYMBOL_GPL(qdio_activate);
1356 static inline int buf_in_between(int bufnr, int start, int count)
1358 int end = add_buf(start, count);
1361 if (bufnr >= start && bufnr < end)
1367 /* wrap-around case */
1368 if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) ||
1376 * handle_inbound - reset processed input buffers
1377 * @q: queue containing the buffers
1379 * @bufnr: first buffer to process
1380 * @count: how many buffers are emptied
1382 static int handle_inbound(struct qdio_q *q, unsigned int callflags,
1383 int bufnr, int count)
1387 qperf_inc(q, inbound_call);
1389 if (!q->u.in.polling)
1392 /* protect against stop polling setting an ACK for an emptied slsb */
1393 if (count == QDIO_MAX_BUFFERS_PER_Q) {
1394 /* overwriting everything, just delete polling status */
1395 q->u.in.polling = 0;
1396 q->u.in.ack_count = 0;
1398 } else if (buf_in_between(q->u.in.ack_start, bufnr, count)) {
1400 /* partial overwrite, just update ack_start */
1401 diff = add_buf(bufnr, count);
1402 diff = sub_buf(diff, q->u.in.ack_start);
1403 q->u.in.ack_count -= diff;
1404 if (q->u.in.ack_count <= 0) {
1405 q->u.in.polling = 0;
1406 q->u.in.ack_count = 0;
1409 q->u.in.ack_start = add_buf(q->u.in.ack_start, diff);
1412 /* the only ACK will be deleted, so stop polling */
1413 q->u.in.polling = 0;
1417 count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
1419 used = atomic_add_return(count, &q->nr_buf_used) - count;
1420 BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q);
1422 /* no need to signal as long as the adapter had free buffers */
1426 if (need_siga_in(q))
1427 return qdio_siga_input(q);
1432 * handle_outbound - process filled outbound buffers
1433 * @q: queue containing the buffers
1435 * @bufnr: first buffer to process
1436 * @count: how many buffers are filled
1438 static int handle_outbound(struct qdio_q *q, unsigned int callflags,
1439 int bufnr, int count)
1441 unsigned char state;
1444 qperf_inc(q, outbound_call);
1446 count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
1447 used = atomic_add_return(count, &q->nr_buf_used);
1448 BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q);
1450 if (callflags & QDIO_FLAG_PCI_OUT) {
1451 q->u.out.pci_out_enabled = 1;
1452 qperf_inc(q, pci_request_int);
1455 q->u.out.pci_out_enabled = 0;
1457 if (queue_type(q) == QDIO_IQDIO_QFMT) {
1458 if (multicast_outbound(q))
1459 rc = qdio_kick_outbound_q(q);
1461 if ((q->irq_ptr->ssqd_desc.mmwc > 1) &&
1463 (count <= q->irq_ptr->ssqd_desc.mmwc)) {
1464 /* exploit enhanced SIGA */
1465 q->u.out.use_enh_siga = 1;
1466 rc = qdio_kick_outbound_q(q);
1469 * One siga-w per buffer required for unicast
1472 q->u.out.use_enh_siga = 0;
1474 rc = qdio_kick_outbound_q(q);
1482 if (need_siga_sync(q)) {
1483 qdio_siga_sync_q(q);
1487 /* try to fast requeue buffers */
1488 get_buf_state(q, prev_buf(bufnr), &state, 0);
1489 if (state != SLSB_CU_OUTPUT_PRIMED)
1490 rc = qdio_kick_outbound_q(q);
1492 qperf_inc(q, fast_requeue);
1495 tasklet_schedule(&q->tasklet);
1500 * do_QDIO - process input or output buffers
1501 * @cdev: associated ccw_device for the qdio subchannel
1502 * @callflags: input or output and special flags from the program
1503 * @q_nr: queue number
1504 * @bufnr: buffer number
1505 * @count: how many buffers to process
1507 int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
1508 int q_nr, unsigned int bufnr, unsigned int count)
1510 struct qdio_irq *irq_ptr;
1512 if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
1515 irq_ptr = cdev->private->qdio_data;
1519 DBF_DEV_EVENT(DBF_INFO, irq_ptr,
1520 "do%02x b:%02x c:%02x", callflags, bufnr, count);
1522 if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
1525 if (callflags & QDIO_FLAG_SYNC_INPUT)
1526 return handle_inbound(irq_ptr->input_qs[q_nr],
1527 callflags, bufnr, count);
1528 else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
1529 return handle_outbound(irq_ptr->output_qs[q_nr],
1530 callflags, bufnr, count);
1533 EXPORT_SYMBOL_GPL(do_QDIO);
1535 static int __init init_QDIO(void)
1539 rc = qdio_setup_init();
1542 rc = tiqdio_allocate_memory();
1545 rc = qdio_debug_init();
1548 rc = tiqdio_register_thinints();
1556 tiqdio_free_memory();
1562 static void __exit exit_QDIO(void)
1564 tiqdio_unregister_thinints();
1565 tiqdio_free_memory();
1570 module_init(init_QDIO);
1571 module_exit(exit_QDIO);