return __ksmbd_iov_pin_rsp(work, ib, len, aux_buf, aux_size);
}
-void ksmbd_iov_reset(struct ksmbd_work *work)
+int allocate_interim_rsp_buf(struct ksmbd_work *work)
{
- work->iov_idx = 0;
- work->iov_cnt = 0;
- *(__be32 *)work->iov[0].iov_base = 0;
+ work->response_buf = kzalloc(MAX_CIFS_SMALL_BUFFER_SIZE, GFP_KERNEL);
+ if (!work->response_buf)
+ return -ENOMEM;
+ work->response_sz = MAX_CIFS_SMALL_BUFFER_SIZE;
+ return 0;
}
int ksmbd_iov_pin_rsp_read(struct ksmbd_work *work, void *ib, int len,
void *aux_buf, unsigned int aux_size);
int ksmbd_iov_pin_rsp(struct ksmbd_work *work, void *ib, int len);
-void ksmbd_iov_reset(struct ksmbd_work *work);
+int allocate_interim_rsp_buf(struct ksmbd_work *work);
#endif /* __KSMBD_WORK_H__ */
return 0;
}
-static inline int allocate_oplock_break_buf(struct ksmbd_work *work)
-{
- work->response_buf = kzalloc(MAX_CIFS_SMALL_BUFFER_SIZE, GFP_KERNEL);
- if (!work->response_buf)
- return -ENOMEM;
- work->response_sz = MAX_CIFS_SMALL_BUFFER_SIZE;
- return 0;
-}
-
/**
* __smb2_oplock_break_noti() - send smb2 oplock break cmd from conn
* to client
if (!fp)
goto out;
- if (allocate_oplock_break_buf(work)) {
+ if (allocate_interim_rsp_buf(work)) {
pr_err("smb2_allocate_rsp_buf failed! ");
ksmbd_fd_put(work, fp);
goto out;
struct lease_break_info *br_info = work->request_buf;
struct smb2_hdr *rsp_hdr;
- if (allocate_oplock_break_buf(work)) {
+ if (allocate_interim_rsp_buf(work)) {
ksmbd_debug(OPLOCK, "smb2_allocate_rsp_buf failed! ");
goto out;
}
setup_async_work(in_work, NULL, NULL);
smb2_send_interim_resp(in_work, STATUS_PENDING);
list_del(&in_work->interim_entry);
- ksmbd_iov_reset(in_work);
}
INIT_WORK(&work->work, __smb2_lease_break_noti);
ksmbd_queue_work(work);
err_rsp->ByteCount = 0;
err_rsp->ErrorData[0] = 0;
err = ksmbd_iov_pin_rsp(work, (void *)err_rsp,
- work->conn->vals->header_size +
- SMB2_ERROR_STRUCTURE_SIZE2);
+ __SMB2_HEADER_STRUCTURE_SIZE +
+ SMB2_ERROR_STRUCTURE_SIZE2);
if (err)
work->send_no_response = 1;
}
void smb2_send_interim_resp(struct ksmbd_work *work, __le32 status)
{
struct smb2_hdr *rsp_hdr;
+ struct ksmbd_work *in_work = ksmbd_alloc_work_struct();
- rsp_hdr = ksmbd_resp_buf_next(work);
- smb2_set_err_rsp(work);
+ if (allocate_interim_rsp_buf(in_work)) {
+ pr_err("smb_allocate_rsp_buf failed!\n");
+ ksmbd_free_work_struct(in_work);
+ return;
+ }
+
+ in_work->conn = work->conn;
+ memcpy(smb2_get_msg(in_work->response_buf), ksmbd_resp_buf_next(work),
+ __SMB2_HEADER_STRUCTURE_SIZE);
+
+ rsp_hdr = smb2_get_msg(in_work->response_buf);
+ smb2_set_err_rsp(in_work);
rsp_hdr->Status = status;
- ksmbd_conn_write(work);
- rsp_hdr->Status = 0;
+ ksmbd_conn_write(in_work);
+ ksmbd_free_work_struct(in_work);
}
static __le32 smb2_get_reparse_tag_special_file(umode_t mode)
list_del(&work->fp_entry);
spin_unlock(&fp->f_lock);
- ksmbd_iov_reset(work);
-
if (work->state != KSMBD_WORK_ACTIVE) {
list_del(&smb_lock->llist);
spin_lock(&work->conn->llist_lock);
goto out;
}
- init_smb2_rsp_hdr(work);
rsp->hdr.Status =
STATUS_RANGE_NOT_LOCKED;
kfree(smb_lock);