1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright IBM Corp. 2002
4 * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
5 * Martin Schwidefsky (schwidefsky@de.ibm.com)
7 * Status accumulation and basic sense functions.
10 #include <linux/module.h>
11 #include <linux/init.h>
13 #include <asm/ccwdev.h>
17 #include "cio_debug.h"
24 * Check for any kind of channel or interface control check but don't
25 * issue the message for the console device
28 ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb)
30 struct subchannel *sch = to_subchannel(cdev->dev.parent);
33 if (!scsw_is_valid_cstat(&irb->scsw) ||
34 !(scsw_cstat(&irb->scsw) & (SCHN_STAT_CHN_DATA_CHK |
35 SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK)))
37 CIO_MSG_EVENT(0, "Channel-Check or Interface-Control-Check "
39 " ... device %04x on subchannel 0.%x.%04x, dev_stat "
40 ": %02X sch_stat : %02X\n",
41 cdev->private->dev_id.devno, sch->schid.ssid,
43 scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw));
44 sprintf(dbf_text, "chk%x", sch->schid.sch_no);
45 CIO_TRACE_EVENT(0, dbf_text);
46 CIO_HEX_EVENT(0, irb, sizeof(struct irb));
50 * Some paths became not operational (pno bit in scsw is set).
53 ccw_device_path_notoper(struct ccw_device *cdev)
55 struct subchannel *sch;
57 sch = to_subchannel(cdev->dev.parent);
58 if (cio_update_schib(sch))
61 CIO_MSG_EVENT(0, "%s(0.%x.%04x) - path(s) %02x are "
62 "not operational \n", __func__,
63 sch->schid.ssid, sch->schid.sch_no,
64 sch->schib.pmcw.pnom);
66 sch->lpm &= ~sch->schib.pmcw.pnom;
68 cdev->private->flags.doverify = 1;
72 * Copy valid bits from the extended control word to device irb.
75 ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb)
78 * Copy extended control bit if it is valid... yes there
79 * are condition that have to be met for the extended control
80 * bit to have meaning. Sick.
82 cdev->private->dma_area->irb.scsw.cmd.ectl = 0;
83 if ((irb->scsw.cmd.stctl & SCSW_STCTL_ALERT_STATUS) &&
84 !(irb->scsw.cmd.stctl & SCSW_STCTL_INTER_STATUS))
85 cdev->private->dma_area->irb.scsw.cmd.ectl = irb->scsw.cmd.ectl;
86 /* Check if extended control word is valid. */
87 if (!cdev->private->dma_area->irb.scsw.cmd.ectl)
89 /* Copy concurrent sense / model dependent information. */
90 memcpy(&cdev->private->dma_area->irb.ecw, irb->ecw, sizeof(irb->ecw));
94 * Check if extended status word is valid.
97 ccw_device_accumulate_esw_valid(struct irb *irb)
99 if (!irb->scsw.cmd.eswf &&
100 (irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND))
102 if (irb->scsw.cmd.stctl ==
103 (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND) &&
104 !(irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED))
110 * Copy valid bits from the extended status word to device irb.
113 ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb)
115 struct irb *cdev_irb;
116 struct sublog *cdev_sublog, *sublog;
118 if (!ccw_device_accumulate_esw_valid(irb))
121 cdev_irb = &cdev->private->dma_area->irb;
123 /* Copy last path used mask. */
124 cdev_irb->esw.esw1.lpum = irb->esw.esw1.lpum;
126 /* Copy subchannel logout information if esw is of format 0. */
127 if (irb->scsw.cmd.eswf) {
128 cdev_sublog = &cdev_irb->esw.esw0.sublog;
129 sublog = &irb->esw.esw0.sublog;
130 /* Copy extended status flags. */
131 cdev_sublog->esf = sublog->esf;
133 * Copy fields that have a meaning for channel data check
134 * channel control check and interface control check.
136 if (irb->scsw.cmd.cstat & (SCHN_STAT_CHN_DATA_CHK |
137 SCHN_STAT_CHN_CTRL_CHK |
138 SCHN_STAT_INTF_CTRL_CHK)) {
139 /* Copy ancillary report bit. */
140 cdev_sublog->arep = sublog->arep;
141 /* Copy field-validity-flags. */
142 cdev_sublog->fvf = sublog->fvf;
143 /* Copy storage access code. */
144 cdev_sublog->sacc = sublog->sacc;
145 /* Copy termination code. */
146 cdev_sublog->termc = sublog->termc;
147 /* Copy sequence code. */
148 cdev_sublog->seqc = sublog->seqc;
150 /* Copy device status check. */
151 cdev_sublog->devsc = sublog->devsc;
152 /* Copy secondary error. */
153 cdev_sublog->serr = sublog->serr;
154 /* Copy i/o-error alert. */
155 cdev_sublog->ioerr = sublog->ioerr;
156 /* Copy channel path timeout bit. */
157 if (irb->scsw.cmd.cstat & SCHN_STAT_INTF_CTRL_CHK)
158 cdev_irb->esw.esw0.erw.cpt = irb->esw.esw0.erw.cpt;
159 /* Copy failing storage address validity flag. */
160 cdev_irb->esw.esw0.erw.fsavf = irb->esw.esw0.erw.fsavf;
161 if (cdev_irb->esw.esw0.erw.fsavf) {
162 /* ... and copy the failing storage address. */
163 memcpy(cdev_irb->esw.esw0.faddr, irb->esw.esw0.faddr,
164 sizeof (irb->esw.esw0.faddr));
165 /* ... and copy the failing storage address format. */
166 cdev_irb->esw.esw0.erw.fsaf = irb->esw.esw0.erw.fsaf;
168 /* Copy secondary ccw address validity bit. */
169 cdev_irb->esw.esw0.erw.scavf = irb->esw.esw0.erw.scavf;
170 if (irb->esw.esw0.erw.scavf)
171 /* ... and copy the secondary ccw address. */
172 cdev_irb->esw.esw0.saddr = irb->esw.esw0.saddr;
175 /* FIXME: DCTI for format 2? */
177 /* Copy authorization bit. */
178 cdev_irb->esw.esw0.erw.auth = irb->esw.esw0.erw.auth;
179 /* Copy path verification required flag. */
180 cdev_irb->esw.esw0.erw.pvrf = irb->esw.esw0.erw.pvrf;
181 if (irb->esw.esw0.erw.pvrf)
182 cdev->private->flags.doverify = 1;
183 /* Copy concurrent sense bit. */
184 cdev_irb->esw.esw0.erw.cons = irb->esw.esw0.erw.cons;
185 if (irb->esw.esw0.erw.cons)
186 cdev_irb->esw.esw0.erw.scnt = irb->esw.esw0.erw.scnt;
190 * Accumulate status from irb to devstat.
193 ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb)
195 struct irb *cdev_irb;
198 * Check if the status pending bit is set in stctl.
199 * If not, the remaining bit have no meaning and we must ignore them.
200 * The esw is not meaningful as well...
202 if (!(scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND))
205 /* Check for channel checks and interface control checks. */
206 ccw_device_msg_control_check(cdev, irb);
208 /* Check for path not operational. */
209 if (scsw_is_valid_pno(&irb->scsw) && scsw_pno(&irb->scsw))
210 ccw_device_path_notoper(cdev);
211 /* No irb accumulation for transport mode irbs. */
212 if (scsw_is_tm(&irb->scsw)) {
213 memcpy(&cdev->private->dma_area->irb, irb, sizeof(struct irb));
217 * Don't accumulate unsolicited interrupts.
219 if (!scsw_is_solicited(&irb->scsw))
222 cdev_irb = &cdev->private->dma_area->irb;
225 * If the clear function had been performed, all formerly pending
226 * status at the subchannel has been cleared and we must not pass
227 * intermediate accumulated status to the device driver.
229 if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC)
230 memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
232 /* Copy bits which are valid only for the start function. */
233 if (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) {
235 cdev_irb->scsw.cmd.key = irb->scsw.cmd.key;
236 /* Copy suspend control bit. */
237 cdev_irb->scsw.cmd.sctl = irb->scsw.cmd.sctl;
238 /* Accumulate deferred condition code. */
239 cdev_irb->scsw.cmd.cc |= irb->scsw.cmd.cc;
240 /* Copy ccw format bit. */
241 cdev_irb->scsw.cmd.fmt = irb->scsw.cmd.fmt;
242 /* Copy prefetch bit. */
243 cdev_irb->scsw.cmd.pfch = irb->scsw.cmd.pfch;
244 /* Copy initial-status-interruption-control. */
245 cdev_irb->scsw.cmd.isic = irb->scsw.cmd.isic;
246 /* Copy address limit checking control. */
247 cdev_irb->scsw.cmd.alcc = irb->scsw.cmd.alcc;
248 /* Copy suppress suspend bit. */
249 cdev_irb->scsw.cmd.ssi = irb->scsw.cmd.ssi;
252 /* Take care of the extended control bit and extended control word. */
253 ccw_device_accumulate_ecw(cdev, irb);
255 /* Accumulate function control. */
256 cdev_irb->scsw.cmd.fctl |= irb->scsw.cmd.fctl;
257 /* Copy activity control. */
258 cdev_irb->scsw.cmd.actl = irb->scsw.cmd.actl;
259 /* Accumulate status control. */
260 cdev_irb->scsw.cmd.stctl |= irb->scsw.cmd.stctl;
262 * Copy ccw address if it is valid. This is a bit simplified
263 * but should be close enough for all practical purposes.
265 if ((irb->scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) ||
266 ((irb->scsw.cmd.stctl ==
267 (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND)) &&
268 (irb->scsw.cmd.actl & SCSW_ACTL_DEVACT) &&
269 (irb->scsw.cmd.actl & SCSW_ACTL_SCHACT)) ||
270 (irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED))
271 cdev_irb->scsw.cmd.cpa = irb->scsw.cmd.cpa;
272 /* Accumulate device status, but not the device busy flag. */
273 cdev_irb->scsw.cmd.dstat &= ~DEV_STAT_BUSY;
274 /* dstat is not always valid. */
275 if (irb->scsw.cmd.stctl &
276 (SCSW_STCTL_PRIM_STATUS | SCSW_STCTL_SEC_STATUS
277 | SCSW_STCTL_INTER_STATUS | SCSW_STCTL_ALERT_STATUS))
278 cdev_irb->scsw.cmd.dstat |= irb->scsw.cmd.dstat;
279 /* Accumulate subchannel status. */
280 cdev_irb->scsw.cmd.cstat |= irb->scsw.cmd.cstat;
281 /* Copy residual count if it is valid. */
282 if ((irb->scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) &&
283 (irb->scsw.cmd.cstat & ~(SCHN_STAT_PCI | SCHN_STAT_INCORR_LEN))
285 cdev_irb->scsw.cmd.count = irb->scsw.cmd.count;
287 /* Take care of bits in the extended status word. */
288 ccw_device_accumulate_esw(cdev, irb);
291 * Check whether we must issue a SENSE CCW ourselves if there is no
292 * concurrent sense facility installed for the subchannel.
293 * No sense is required if no delayed sense is pending
294 * and we did not get a unit check without sense information.
296 * Note: We should check for ioinfo[irq]->flags.consns but VM
297 * violates the ESA/390 architecture and doesn't present an
298 * operand exception for virtual devices without concurrent
299 * sense facility available/supported when enabling the
300 * concurrent sense facility.
302 if ((cdev_irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
303 !(cdev_irb->esw.esw0.erw.cons))
304 cdev->private->flags.dosense = 1;
311 ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
313 struct subchannel *sch;
314 struct ccw1 *sense_ccw;
317 sch = to_subchannel(cdev->dev.parent);
319 /* A sense is required, can we do it now ? */
320 if (scsw_actl(&irb->scsw) & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT))
322 * we received an Unit Check but we have no final
323 * status yet, therefore we must delay the SENSE
324 * processing. We must not report this intermediate
325 * status to the device interrupt handler.
330 * We have ending status but no sense information. Do a basic sense.
332 sense_ccw = &to_io_private(sch)->dma_area->sense_ccw;
333 sense_ccw->cmd_code = CCW_CMD_BASIC_SENSE;
334 sense_ccw->cda = (__u32) __pa(cdev->private->dma_area->irb.ecw);
335 sense_ccw->count = SENSE_MAX_COUNT;
336 sense_ccw->flags = CCW_FLAG_SLI;
338 rc = cio_start(sch, sense_ccw, 0xff);
339 if (rc == -ENODEV || rc == -EACCES)
340 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
345 * Add information from basic sense to devstat.
348 ccw_device_accumulate_basic_sense(struct ccw_device *cdev, struct irb *irb)
351 * Check if the status pending bit is set in stctl.
352 * If not, the remaining bit have no meaning and we must ignore them.
353 * The esw is not meaningful as well...
355 if (!(scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND))
358 /* Check for channel checks and interface control checks. */
359 ccw_device_msg_control_check(cdev, irb);
361 /* Check for path not operational. */
362 if (scsw_is_valid_pno(&irb->scsw) && scsw_pno(&irb->scsw))
363 ccw_device_path_notoper(cdev);
365 if (!(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
366 (irb->scsw.cmd.dstat & DEV_STAT_CHN_END)) {
367 cdev->private->dma_area->irb.esw.esw0.erw.cons = 1;
368 cdev->private->flags.dosense = 0;
370 /* Check if path verification is required. */
371 if (ccw_device_accumulate_esw_valid(irb) &&
372 irb->esw.esw0.erw.pvrf)
373 cdev->private->flags.doverify = 1;
377 * This function accumulates the status into the private devstat and
378 * starts a basic sense if one is needed.
381 ccw_device_accumulate_and_sense(struct ccw_device *cdev, struct irb *irb)
383 ccw_device_accumulate_irb(cdev, irb);
384 if ((irb->scsw.cmd.actl & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) != 0)
386 /* Check for basic sense. */
387 if (cdev->private->flags.dosense &&
388 !(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)) {
389 cdev->private->dma_area->irb.esw.esw0.erw.cons = 1;
390 cdev->private->flags.dosense = 0;
393 if (cdev->private->flags.dosense) {
394 ccw_device_do_sense(cdev, irb);