static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp);
static Sg_fd *sg_add_sfp(Sg_device * sdp);
static void sg_remove_sfp(struct kref *);
-static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
+static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id, bool *busy);
static Sg_request *sg_add_request(Sg_fd * sfp);
static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
static Sg_device *sg_get_dev(int dev);
Sg_fd *sfp;
Sg_request *srp;
int req_pack_id = -1;
+ bool busy;
sg_io_hdr_t *hp;
struct sg_header *old_hdr;
int retval;
if (retval)
return retval;
- srp = sg_get_rq_mark(sfp, req_pack_id);
+ srp = sg_get_rq_mark(sfp, req_pack_id, &busy);
if (!srp) { /* now wait on packet to arrive */
- if (atomic_read(&sdp->detaching))
- return -ENODEV;
if (filp->f_flags & O_NONBLOCK)
return -EAGAIN;
retval = wait_event_interruptible(sfp->read_wait,
- (atomic_read(&sdp->detaching) ||
- (srp = sg_get_rq_mark(sfp, req_pack_id))));
- if (atomic_read(&sdp->detaching))
- return -ENODEV;
- if (retval)
- /* -ERESTARTSYS as signal hit process */
- return retval;
+ ((srp = sg_get_rq_mark(sfp, req_pack_id, &busy)) ||
+ (!busy && atomic_read(&sdp->detaching))));
+ if (!srp)
+ /* signal or detaching */
+ return retval ? retval : -ENODEV;
}
if (srp->header.interface_id != '\0')
return sg_new_read(sfp, buf, count, srp);
if (result < 0)
return result;
result = wait_event_interruptible(sfp->read_wait,
- (srp_done(sfp, srp) || atomic_read(&sdp->detaching)));
- if (atomic_read(&sdp->detaching))
- return -ENODEV;
+ srp_done(sfp, srp));
write_lock_irq(&sfp->rq_list_lock);
if (srp->done) {
srp->done = 2;
}
static Sg_request *
-sg_get_rq_mark(Sg_fd * sfp, int pack_id)
+sg_get_rq_mark(Sg_fd * sfp, int pack_id, bool *busy)
{
Sg_request *resp;
unsigned long iflags;
+ *busy = false;
write_lock_irqsave(&sfp->rq_list_lock, iflags);
list_for_each_entry(resp, &sfp->rq_list, entry) {
- /* look for requests that are ready + not SG_IO owned */
- if ((1 == resp->done) && (!resp->sg_io_owned) &&
+ /* look for requests that are not SG_IO owned */
+ if ((!resp->sg_io_owned) &&
((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
- resp->done = 2; /* guard against other readers */
- write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
- return resp;
+ switch (resp->done) {
+ case 0: /* request active */
+ *busy = true;
+ break;
+ case 1: /* request done; response ready to return */
+ resp->done = 2; /* guard against other readers */
+ write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+ return resp;
+ case 2: /* response already being returned */
+ break;
+ }
}
}
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
res = 1;
}
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+
+ /*
+ * If the device is detaching, wakeup any readers in case we just
+ * removed the last response, which would leave nothing for them to
+ * return other than -ENODEV.
+ */
+ if (unlikely(atomic_read(&sfp->parentdp->detaching)))
+ wake_up_interruptible_all(&sfp->read_wait);
+
return res;
}