4 * vfs operations that deal with files
6 * Copyright (C) International Business Machines Corp., 2002,2010
7 * Author(s): Steve French (sfrench@us.ibm.com)
8 * Jeremy Allison (jra@samba.org)
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 #include <linux/backing-dev.h>
26 #include <linux/stat.h>
27 #include <linux/fcntl.h>
28 #include <linux/pagemap.h>
29 #include <linux/pagevec.h>
30 #include <linux/writeback.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/delay.h>
33 #include <linux/mount.h>
34 #include <linux/slab.h>
35 #include <linux/swap.h>
36 #include <asm/div64.h>
40 #include "cifsproto.h"
41 #include "cifs_unicode.h"
42 #include "cifs_debug.h"
43 #include "cifs_fs_sb.h"
46 static inline int cifs_convert_flags(unsigned int flags)
48 if ((flags & O_ACCMODE) == O_RDONLY)
50 else if ((flags & O_ACCMODE) == O_WRONLY)
52 else if ((flags & O_ACCMODE) == O_RDWR) {
53 /* GENERIC_ALL is too much permission to request
54 can cause unnecessary access denied on create */
55 /* return GENERIC_ALL; */
56 return (GENERIC_READ | GENERIC_WRITE);
59 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
60 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
64 static u32 cifs_posix_convert_flags(unsigned int flags)
68 if ((flags & O_ACCMODE) == O_RDONLY)
69 posix_flags = SMB_O_RDONLY;
70 else if ((flags & O_ACCMODE) == O_WRONLY)
71 posix_flags = SMB_O_WRONLY;
72 else if ((flags & O_ACCMODE) == O_RDWR)
73 posix_flags = SMB_O_RDWR;
76 posix_flags |= SMB_O_CREAT;
78 posix_flags |= SMB_O_EXCL;
80 posix_flags |= SMB_O_TRUNC;
81 /* be safe and imply O_SYNC for O_DSYNC */
83 posix_flags |= SMB_O_SYNC;
84 if (flags & O_DIRECTORY)
85 posix_flags |= SMB_O_DIRECTORY;
86 if (flags & O_NOFOLLOW)
87 posix_flags |= SMB_O_NOFOLLOW;
89 posix_flags |= SMB_O_DIRECT;
94 static inline int cifs_get_disposition(unsigned int flags)
96 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
98 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
99 return FILE_OVERWRITE_IF;
100 else if ((flags & O_CREAT) == O_CREAT)
102 else if ((flags & O_TRUNC) == O_TRUNC)
103 return FILE_OVERWRITE;
108 int cifs_posix_open(char *full_path, struct inode **pinode,
109 struct super_block *sb, int mode, unsigned int f_flags,
110 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
113 FILE_UNIX_BASIC_INFO *presp_data;
114 __u32 posix_flags = 0;
115 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
116 struct cifs_fattr fattr;
117 struct tcon_link *tlink;
118 struct cifs_tcon *tcon;
120 cFYI(1, "posix open %s", full_path);
122 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
123 if (presp_data == NULL)
126 tlink = cifs_sb_tlink(cifs_sb);
132 tcon = tlink_tcon(tlink);
133 mode &= ~current_umask();
135 posix_flags = cifs_posix_convert_flags(f_flags);
136 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
137 poplock, full_path, cifs_sb->local_nls,
138 cifs_sb->mnt_cifs_flags &
139 CIFS_MOUNT_MAP_SPECIAL_CHR);
140 cifs_put_tlink(tlink);
145 if (presp_data->Type == cpu_to_le32(-1))
146 goto posix_open_ret; /* open ok, caller does qpathinfo */
149 goto posix_open_ret; /* caller does not need info */
151 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
153 /* get new inode and set it up */
154 if (*pinode == NULL) {
155 cifs_fill_uniqueid(sb, &fattr);
156 *pinode = cifs_iget(sb, &fattr);
162 cifs_fattr_to_inode(*pinode, &fattr);
171 cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
172 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
173 struct cifs_fid *fid, unsigned int xid)
178 int create_options = CREATE_NOT_DIR;
181 if (!tcon->ses->server->ops->open)
184 desired_access = cifs_convert_flags(f_flags);
186 /*********************************************************************
187 * open flag mapping table:
189 * POSIX Flag CIFS Disposition
190 * ---------- ----------------
191 * O_CREAT FILE_OPEN_IF
192 * O_CREAT | O_EXCL FILE_CREATE
193 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
194 * O_TRUNC FILE_OVERWRITE
195 * none of the above FILE_OPEN
197 * Note that there is not a direct match between disposition
198 * FILE_SUPERSEDE (ie create whether or not file exists although
199 * O_CREAT | O_TRUNC is similar but truncates the existing
200 * file rather than creating a new file as FILE_SUPERSEDE does
201 * (which uses the attributes / metadata passed in on open call)
203 *? O_SYNC is a reasonable match to CIFS writethrough flag
204 *? and the read write flags match reasonably. O_LARGEFILE
205 *? is irrelevant because largefile support is always used
206 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
207 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
208 *********************************************************************/
210 disposition = cifs_get_disposition(f_flags);
212 /* BB pass O_SYNC flag through on file attributes .. BB */
214 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
218 if (backup_cred(cifs_sb))
219 create_options |= CREATE_OPEN_BACKUP_INTENT;
221 rc = tcon->ses->server->ops->open(xid, tcon, full_path, disposition,
222 desired_access, create_options, fid,
223 oplock, buf, cifs_sb);
229 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
232 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
240 struct cifsFileInfo *
241 cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
242 struct tcon_link *tlink, __u32 oplock)
244 struct dentry *dentry = file->f_path.dentry;
245 struct inode *inode = dentry->d_inode;
246 struct cifsInodeInfo *cinode = CIFS_I(inode);
247 struct cifsFileInfo *cfile;
248 struct cifs_fid_locks *fdlocks;
250 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
254 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
260 INIT_LIST_HEAD(&fdlocks->locks);
261 fdlocks->cfile = cfile;
262 cfile->llist = fdlocks;
263 down_write(&cinode->lock_sem);
264 list_add(&fdlocks->llist, &cinode->llist);
265 up_write(&cinode->lock_sem);
268 cfile->pid = current->tgid;
269 cfile->uid = current_fsuid();
270 cfile->dentry = dget(dentry);
271 cfile->f_flags = file->f_flags;
272 cfile->invalidHandle = false;
273 cfile->tlink = cifs_get_tlink(tlink);
274 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
275 mutex_init(&cfile->fh_mutex);
276 tlink_tcon(tlink)->ses->server->ops->set_fid(cfile, fid, oplock);
278 spin_lock(&cifs_file_list_lock);
279 list_add(&cfile->tlist, &(tlink_tcon(tlink)->openFileList));
280 /* if readable file instance put first in list*/
281 if (file->f_mode & FMODE_READ)
282 list_add(&cfile->flist, &cinode->openFileList);
284 list_add_tail(&cfile->flist, &cinode->openFileList);
285 spin_unlock(&cifs_file_list_lock);
287 file->private_data = cfile;
291 struct cifsFileInfo *
292 cifsFileInfo_get(struct cifsFileInfo *cifs_file)
294 spin_lock(&cifs_file_list_lock);
295 cifsFileInfo_get_locked(cifs_file);
296 spin_unlock(&cifs_file_list_lock);
301 * Release a reference on the file private data. This may involve closing
302 * the filehandle out on the server. Must be called without holding
303 * cifs_file_list_lock.
305 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
307 struct inode *inode = cifs_file->dentry->d_inode;
308 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
309 struct cifsInodeInfo *cifsi = CIFS_I(inode);
310 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
311 struct cifsLockInfo *li, *tmp;
313 spin_lock(&cifs_file_list_lock);
314 if (--cifs_file->count > 0) {
315 spin_unlock(&cifs_file_list_lock);
319 /* remove it from the lists */
320 list_del(&cifs_file->flist);
321 list_del(&cifs_file->tlist);
323 if (list_empty(&cifsi->openFileList)) {
324 cFYI(1, "closing last open instance for inode %p",
325 cifs_file->dentry->d_inode);
327 * In strict cache mode we need invalidate mapping on the last
328 * close because it may cause a error when we open this file
329 * again and get at least level II oplock.
331 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
332 CIFS_I(inode)->invalid_mapping = true;
333 cifs_set_oplock_level(cifsi, 0);
335 spin_unlock(&cifs_file_list_lock);
337 cancel_work_sync(&cifs_file->oplock_break);
339 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
340 struct TCP_Server_Info *server = tcon->ses->server;
345 if (server->ops->close)
346 rc = server->ops->close(xid, tcon, &cifs_file->fid);
351 * Delete any outstanding lock records. We'll lose them when the file
354 down_write(&cifsi->lock_sem);
355 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
356 list_del(&li->llist);
357 cifs_del_lock_waiters(li);
360 list_del(&cifs_file->llist->llist);
361 kfree(cifs_file->llist);
362 up_write(&cifsi->lock_sem);
364 cifs_put_tlink(cifs_file->tlink);
365 dput(cifs_file->dentry);
369 int cifs_open(struct inode *inode, struct file *file)
374 struct cifs_sb_info *cifs_sb;
375 struct cifs_tcon *tcon;
376 struct tcon_link *tlink;
377 struct cifsFileInfo *cfile = NULL;
378 char *full_path = NULL;
379 bool posix_open_ok = false;
384 cifs_sb = CIFS_SB(inode->i_sb);
385 tlink = cifs_sb_tlink(cifs_sb);
388 return PTR_ERR(tlink);
390 tcon = tlink_tcon(tlink);
392 full_path = build_path_from_dentry(file->f_path.dentry);
393 if (full_path == NULL) {
398 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
399 inode, file->f_flags, full_path);
401 if (tcon->ses->server->oplocks)
406 if (!tcon->broken_posix_open && tcon->unix_ext &&
407 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
408 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
409 /* can not refresh inode info since size could be stale */
410 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
411 cifs_sb->mnt_file_mode /* ignored */,
412 file->f_flags, &oplock, &fid.netfid, xid);
414 cFYI(1, "posix open succeeded");
415 posix_open_ok = true;
416 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
417 if (tcon->ses->serverNOS)
418 cERROR(1, "server %s of type %s returned"
419 " unexpected error on SMB posix open"
420 ", disabling posix open support."
421 " Check if server update available.",
422 tcon->ses->serverName,
423 tcon->ses->serverNOS);
424 tcon->broken_posix_open = true;
425 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
426 (rc != -EOPNOTSUPP)) /* path not found or net err */
429 * Else fallthrough to retry open the old way on network i/o
434 if (!posix_open_ok) {
435 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
436 file->f_flags, &oplock, &fid, xid);
441 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
443 if (tcon->ses->server->ops->close)
444 tcon->ses->server->ops->close(xid, tcon, &fid);
449 cifs_fscache_set_inode_cookie(inode, file);
451 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
453 * Time to set mode which we can not set earlier due to
454 * problems creating new read-only files.
456 struct cifs_unix_set_info_args args = {
457 .mode = inode->i_mode,
460 .ctime = NO_CHANGE_64,
461 .atime = NO_CHANGE_64,
462 .mtime = NO_CHANGE_64,
465 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
472 cifs_put_tlink(tlink);
477 * Try to reacquire byte range locks that were released when session
480 static int cifs_relock_file(struct cifsFileInfo *cifsFile)
484 /* BB list all locks open on this file and relock */
490 cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
495 struct cifs_sb_info *cifs_sb;
496 struct cifs_tcon *tcon;
497 struct TCP_Server_Info *server;
498 struct cifsInodeInfo *cinode;
500 char *full_path = NULL;
502 int disposition = FILE_OPEN;
503 int create_options = CREATE_NOT_DIR;
507 mutex_lock(&cfile->fh_mutex);
508 if (!cfile->invalidHandle) {
509 mutex_unlock(&cfile->fh_mutex);
515 inode = cfile->dentry->d_inode;
516 cifs_sb = CIFS_SB(inode->i_sb);
517 tcon = tlink_tcon(cfile->tlink);
518 server = tcon->ses->server;
521 * Can not grab rename sem here because various ops, including those
522 * that already have the rename sem can end up causing writepage to get
523 * called and if the server was down that means we end up here, and we
524 * can never tell if the caller already has the rename_sem.
526 full_path = build_path_from_dentry(cfile->dentry);
527 if (full_path == NULL) {
529 mutex_unlock(&cfile->fh_mutex);
534 cFYI(1, "inode = 0x%p file flags 0x%x for %s", inode, cfile->f_flags,
537 if (tcon->ses->server->oplocks)
542 if (tcon->unix_ext && cap_unix(tcon->ses) &&
543 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
544 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
546 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
547 * original open. Must mask them off for a reopen.
549 unsigned int oflags = cfile->f_flags &
550 ~(O_CREAT | O_EXCL | O_TRUNC);
552 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
553 cifs_sb->mnt_file_mode /* ignored */,
554 oflags, &oplock, &fid.netfid, xid);
556 cFYI(1, "posix reopen succeeded");
560 * fallthrough to retry open the old way on errors, especially
561 * in the reconnect path it is important to retry hard
565 desired_access = cifs_convert_flags(cfile->f_flags);
567 if (backup_cred(cifs_sb))
568 create_options |= CREATE_OPEN_BACKUP_INTENT;
571 * Can not refresh inode by passing in file_info buf to be returned by
572 * CIFSSMBOpen and then calling get_inode_info with returned buf since
573 * file might have write behind data that needs to be flushed and server
574 * version of file size can be stale. If we knew for sure that inode was
575 * not dirty locally we could do this.
577 rc = server->ops->open(xid, tcon, full_path, disposition,
578 desired_access, create_options, &fid, &oplock,
581 mutex_unlock(&cfile->fh_mutex);
582 cFYI(1, "cifs_reopen returned 0x%x", rc);
583 cFYI(1, "oplock: %d", oplock);
584 goto reopen_error_exit;
588 cfile->invalidHandle = false;
589 mutex_unlock(&cfile->fh_mutex);
590 cinode = CIFS_I(inode);
593 rc = filemap_write_and_wait(inode->i_mapping);
594 mapping_set_error(inode->i_mapping, rc);
597 rc = cifs_get_inode_info_unix(&inode, full_path,
600 rc = cifs_get_inode_info(&inode, full_path, NULL,
601 inode->i_sb, xid, NULL);
604 * Else we are writing out data to server already and could deadlock if
605 * we tried to flush data, and since we do not know if we have data that
606 * would invalidate the current end of file on the server we can not go
607 * to the server to get the new inode info.
610 server->ops->set_fid(cfile, &fid, oplock);
611 cifs_relock_file(cfile);
619 int cifs_close(struct inode *inode, struct file *file)
621 if (file->private_data != NULL) {
622 cifsFileInfo_put(file->private_data);
623 file->private_data = NULL;
626 /* return code from the ->release op is always ignored */
630 int cifs_closedir(struct inode *inode, struct file *file)
634 struct cifsFileInfo *cfile = file->private_data;
635 struct cifs_tcon *tcon;
636 struct TCP_Server_Info *server;
639 cFYI(1, "Closedir inode = 0x%p", inode);
645 tcon = tlink_tcon(cfile->tlink);
646 server = tcon->ses->server;
648 cFYI(1, "Freeing private data in close dir");
649 spin_lock(&cifs_file_list_lock);
650 if (!cfile->srch_inf.endOfSearch && !cfile->invalidHandle) {
651 cfile->invalidHandle = true;
652 spin_unlock(&cifs_file_list_lock);
653 if (server->ops->close_dir)
654 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
657 cFYI(1, "Closing uncompleted readdir with rc %d", rc);
658 /* not much we can do if it fails anyway, ignore rc */
661 spin_unlock(&cifs_file_list_lock);
663 buf = cfile->srch_inf.ntwrk_buf_start;
665 cFYI(1, "closedir free smb buf in srch struct");
666 cfile->srch_inf.ntwrk_buf_start = NULL;
667 if (cfile->srch_inf.smallBuf)
668 cifs_small_buf_release(buf);
670 cifs_buf_release(buf);
673 cifs_put_tlink(cfile->tlink);
674 kfree(file->private_data);
675 file->private_data = NULL;
676 /* BB can we lock the filestruct while this is going on? */
681 static struct cifsLockInfo *
682 cifs_lock_init(__u64 offset, __u64 length, __u8 type)
684 struct cifsLockInfo *lock =
685 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
688 lock->offset = offset;
689 lock->length = length;
691 lock->pid = current->tgid;
692 INIT_LIST_HEAD(&lock->blist);
693 init_waitqueue_head(&lock->block_q);
698 cifs_del_lock_waiters(struct cifsLockInfo *lock)
700 struct cifsLockInfo *li, *tmp;
701 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
702 list_del_init(&li->blist);
703 wake_up(&li->block_q);
708 cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
709 __u64 length, __u8 type, struct cifsFileInfo *cfile,
710 struct cifsLockInfo **conf_lock, bool rw_check)
712 struct cifsLockInfo *li;
713 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
714 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
716 list_for_each_entry(li, &fdlocks->locks, llist) {
717 if (offset + length <= li->offset ||
718 offset >= li->offset + li->length)
720 if (rw_check && server->ops->compare_fids(cfile, cur_cfile) &&
721 current->tgid == li->pid)
723 if ((type & server->vals->shared_lock_type) &&
724 ((server->ops->compare_fids(cfile, cur_cfile) &&
725 current->tgid == li->pid) || type == li->type))
735 cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
736 __u8 type, struct cifsLockInfo **conf_lock,
740 struct cifs_fid_locks *cur;
741 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
743 list_for_each_entry(cur, &cinode->llist, llist) {
744 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
745 cfile, conf_lock, rw_check);
754 * Check if there is another lock that prevents us to set the lock (mandatory
755 * style). If such a lock exists, update the flock structure with its
756 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
757 * or leave it the same if we can't. Returns 0 if we don't need to request to
758 * the server or 1 otherwise.
761 cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
762 __u8 type, struct file_lock *flock)
765 struct cifsLockInfo *conf_lock;
766 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
767 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
770 down_read(&cinode->lock_sem);
772 exist = cifs_find_lock_conflict(cfile, offset, length, type,
775 flock->fl_start = conf_lock->offset;
776 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
777 flock->fl_pid = conf_lock->pid;
778 if (conf_lock->type & server->vals->shared_lock_type)
779 flock->fl_type = F_RDLCK;
781 flock->fl_type = F_WRLCK;
782 } else if (!cinode->can_cache_brlcks)
785 flock->fl_type = F_UNLCK;
787 up_read(&cinode->lock_sem);
792 cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
794 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
795 down_write(&cinode->lock_sem);
796 list_add_tail(&lock->llist, &cfile->llist->locks);
797 up_write(&cinode->lock_sem);
801 * Set the byte-range lock (mandatory style). Returns:
802 * 1) 0, if we set the lock and don't need to request to the server;
803 * 2) 1, if no locks prevent us but we need to request to the server;
804 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
807 cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
810 struct cifsLockInfo *conf_lock;
811 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
817 down_write(&cinode->lock_sem);
819 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
820 lock->type, &conf_lock, false);
821 if (!exist && cinode->can_cache_brlcks) {
822 list_add_tail(&lock->llist, &cfile->llist->locks);
823 up_write(&cinode->lock_sem);
832 list_add_tail(&lock->blist, &conf_lock->blist);
833 up_write(&cinode->lock_sem);
834 rc = wait_event_interruptible(lock->block_q,
835 (lock->blist.prev == &lock->blist) &&
836 (lock->blist.next == &lock->blist));
839 down_write(&cinode->lock_sem);
840 list_del_init(&lock->blist);
843 up_write(&cinode->lock_sem);
848 * Check if there is another lock that prevents us to set the lock (posix
849 * style). If such a lock exists, update the flock structure with its
850 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
851 * or leave it the same if we can't. Returns 0 if we don't need to request to
852 * the server or 1 otherwise.
855 cifs_posix_lock_test(struct file *file, struct file_lock *flock)
858 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
859 unsigned char saved_type = flock->fl_type;
861 if ((flock->fl_flags & FL_POSIX) == 0)
864 down_read(&cinode->lock_sem);
865 posix_test_lock(file, flock);
867 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
868 flock->fl_type = saved_type;
872 up_read(&cinode->lock_sem);
877 * Set the byte-range lock (posix style). Returns:
878 * 1) 0, if we set the lock and don't need to request to the server;
879 * 2) 1, if we need to request to the server;
880 * 3) <0, if the error occurs while setting the lock.
883 cifs_posix_lock_set(struct file *file, struct file_lock *flock)
885 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
888 if ((flock->fl_flags & FL_POSIX) == 0)
892 down_write(&cinode->lock_sem);
893 if (!cinode->can_cache_brlcks) {
894 up_write(&cinode->lock_sem);
898 rc = posix_lock_file(file, flock, NULL);
899 up_write(&cinode->lock_sem);
900 if (rc == FILE_LOCK_DEFERRED) {
901 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
904 locks_delete_block(flock);
910 cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
913 int rc = 0, stored_rc;
914 struct cifsLockInfo *li, *tmp;
915 struct cifs_tcon *tcon;
916 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
917 unsigned int num, max_num, max_buf;
918 LOCKING_ANDX_RANGE *buf, *cur;
919 int types[] = {LOCKING_ANDX_LARGE_FILES,
920 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
924 tcon = tlink_tcon(cfile->tlink);
926 /* we are going to update can_cache_brlcks here - need a write access */
927 down_write(&cinode->lock_sem);
928 if (!cinode->can_cache_brlcks) {
929 up_write(&cinode->lock_sem);
935 * Accessing maxBuf is racy with cifs_reconnect - need to store value
936 * and check it for zero before using.
938 max_buf = tcon->ses->server->maxBuf;
940 up_write(&cinode->lock_sem);
945 max_num = (max_buf - sizeof(struct smb_hdr)) /
946 sizeof(LOCKING_ANDX_RANGE);
947 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
949 up_write(&cinode->lock_sem);
954 for (i = 0; i < 2; i++) {
957 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
958 if (li->type != types[i])
960 cur->Pid = cpu_to_le16(li->pid);
961 cur->LengthLow = cpu_to_le32((u32)li->length);
962 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
963 cur->OffsetLow = cpu_to_le32((u32)li->offset);
964 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
965 if (++num == max_num) {
966 stored_rc = cifs_lockv(xid, tcon,
968 (__u8)li->type, 0, num,
979 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
980 (__u8)types[i], 0, num, buf);
986 cinode->can_cache_brlcks = false;
987 up_write(&cinode->lock_sem);
994 /* copied from fs/locks.c with a name change */
995 #define cifs_for_each_lock(inode, lockp) \
996 for (lockp = &inode->i_flock; *lockp != NULL; \
997 lockp = &(*lockp)->fl_next)
999 struct lock_to_push {
1000 struct list_head llist;
1009 cifs_push_posix_locks(struct cifsFileInfo *cfile)
1011 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1012 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1013 struct file_lock *flock, **before;
1014 unsigned int count = 0, i = 0;
1015 int rc = 0, xid, type;
1016 struct list_head locks_to_send, *el;
1017 struct lock_to_push *lck, *tmp;
1022 /* we are going to update can_cache_brlcks here - need a write access */
1023 down_write(&cinode->lock_sem);
1024 if (!cinode->can_cache_brlcks) {
1025 up_write(&cinode->lock_sem);
1031 cifs_for_each_lock(cfile->dentry->d_inode, before) {
1032 if ((*before)->fl_flags & FL_POSIX)
1037 INIT_LIST_HEAD(&locks_to_send);
1040 * Allocating count locks is enough because no FL_POSIX locks can be
1041 * added to the list while we are holding cinode->lock_sem that
1042 * protects locking operations of this inode.
1044 for (; i < count; i++) {
1045 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1050 list_add_tail(&lck->llist, &locks_to_send);
1053 el = locks_to_send.next;
1055 cifs_for_each_lock(cfile->dentry->d_inode, before) {
1057 if ((flock->fl_flags & FL_POSIX) == 0)
1059 if (el == &locks_to_send) {
1061 * The list ended. We don't have enough allocated
1062 * structures - something is really wrong.
1064 cERROR(1, "Can't push all brlocks!");
1067 length = 1 + flock->fl_end - flock->fl_start;
1068 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1072 lck = list_entry(el, struct lock_to_push, llist);
1073 lck->pid = flock->fl_pid;
1074 lck->netfid = cfile->fid.netfid;
1075 lck->length = length;
1077 lck->offset = flock->fl_start;
1082 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1085 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
1086 lck->offset, lck->length, NULL,
1090 list_del(&lck->llist);
1095 cinode->can_cache_brlcks = false;
1096 up_write(&cinode->lock_sem);
1101 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1102 list_del(&lck->llist);
1109 cifs_push_locks(struct cifsFileInfo *cfile)
1111 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1112 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1114 if (cap_unix(tcon->ses) &&
1115 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1116 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1117 return cifs_push_posix_locks(cfile);
1119 return tcon->ses->server->ops->push_mand_locks(cfile);
1123 cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
1124 bool *wait_flag, struct TCP_Server_Info *server)
1126 if (flock->fl_flags & FL_POSIX)
1128 if (flock->fl_flags & FL_FLOCK)
1130 if (flock->fl_flags & FL_SLEEP) {
1131 cFYI(1, "Blocking lock");
1134 if (flock->fl_flags & FL_ACCESS)
1135 cFYI(1, "Process suspended by mandatory locking - "
1136 "not implemented yet");
1137 if (flock->fl_flags & FL_LEASE)
1138 cFYI(1, "Lease on file - not implemented yet");
1139 if (flock->fl_flags &
1140 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
1141 cFYI(1, "Unknown lock flags 0x%x", flock->fl_flags);
1143 *type = server->vals->large_lock_type;
1144 if (flock->fl_type == F_WRLCK) {
1145 cFYI(1, "F_WRLCK ");
1146 *type |= server->vals->exclusive_lock_type;
1148 } else if (flock->fl_type == F_UNLCK) {
1150 *type |= server->vals->unlock_lock_type;
1152 /* Check if unlock includes more than one lock range */
1153 } else if (flock->fl_type == F_RDLCK) {
1155 *type |= server->vals->shared_lock_type;
1157 } else if (flock->fl_type == F_EXLCK) {
1159 *type |= server->vals->exclusive_lock_type;
1161 } else if (flock->fl_type == F_SHLCK) {
1163 *type |= server->vals->shared_lock_type;
1166 cFYI(1, "Unknown type of lock");
1170 cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
1171 bool wait_flag, bool posix_lck, unsigned int xid)
1174 __u64 length = 1 + flock->fl_end - flock->fl_start;
1175 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1176 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1177 struct TCP_Server_Info *server = tcon->ses->server;
1178 __u16 netfid = cfile->fid.netfid;
1181 int posix_lock_type;
1183 rc = cifs_posix_lock_test(file, flock);
1187 if (type & server->vals->shared_lock_type)
1188 posix_lock_type = CIFS_RDLCK;
1190 posix_lock_type = CIFS_WRLCK;
1191 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
1192 flock->fl_start, length, flock,
1193 posix_lock_type, wait_flag);
1197 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
1201 /* BB we could chain these into one lock request BB */
1202 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1205 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1207 flock->fl_type = F_UNLCK;
1209 cERROR(1, "Error unlocking previously locked "
1210 "range %d during test of lock", rc);
1214 if (type & server->vals->shared_lock_type) {
1215 flock->fl_type = F_WRLCK;
1219 type &= ~server->vals->exclusive_lock_type;
1221 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1222 type | server->vals->shared_lock_type,
1225 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1226 type | server->vals->shared_lock_type, 0, 1, false);
1227 flock->fl_type = F_RDLCK;
1229 cERROR(1, "Error unlocking previously locked "
1230 "range %d during test of lock", rc);
1232 flock->fl_type = F_WRLCK;
1238 cifs_move_llist(struct list_head *source, struct list_head *dest)
1240 struct list_head *li, *tmp;
1241 list_for_each_safe(li, tmp, source)
1242 list_move(li, dest);
1246 cifs_free_llist(struct list_head *llist)
1248 struct cifsLockInfo *li, *tmp;
1249 list_for_each_entry_safe(li, tmp, llist, llist) {
1250 cifs_del_lock_waiters(li);
1251 list_del(&li->llist);
1257 cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1260 int rc = 0, stored_rc;
1261 int types[] = {LOCKING_ANDX_LARGE_FILES,
1262 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1264 unsigned int max_num, num, max_buf;
1265 LOCKING_ANDX_RANGE *buf, *cur;
1266 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1267 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1268 struct cifsLockInfo *li, *tmp;
1269 __u64 length = 1 + flock->fl_end - flock->fl_start;
1270 struct list_head tmp_llist;
1272 INIT_LIST_HEAD(&tmp_llist);
1275 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1276 * and check it for zero before using.
1278 max_buf = tcon->ses->server->maxBuf;
1282 max_num = (max_buf - sizeof(struct smb_hdr)) /
1283 sizeof(LOCKING_ANDX_RANGE);
1284 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1288 down_write(&cinode->lock_sem);
1289 for (i = 0; i < 2; i++) {
1292 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
1293 if (flock->fl_start > li->offset ||
1294 (flock->fl_start + length) <
1295 (li->offset + li->length))
1297 if (current->tgid != li->pid)
1299 if (types[i] != li->type)
1301 if (cinode->can_cache_brlcks) {
1303 * We can cache brlock requests - simply remove
1304 * a lock from the file's list.
1306 list_del(&li->llist);
1307 cifs_del_lock_waiters(li);
1311 cur->Pid = cpu_to_le16(li->pid);
1312 cur->LengthLow = cpu_to_le32((u32)li->length);
1313 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1314 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1315 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1317 * We need to save a lock here to let us add it again to
1318 * the file's list if the unlock range request fails on
1321 list_move(&li->llist, &tmp_llist);
1322 if (++num == max_num) {
1323 stored_rc = cifs_lockv(xid, tcon,
1325 li->type, num, 0, buf);
1328 * We failed on the unlock range
1329 * request - add all locks from the tmp
1330 * list to the head of the file's list.
1332 cifs_move_llist(&tmp_llist,
1333 &cfile->llist->locks);
1337 * The unlock range request succeed -
1338 * free the tmp list.
1340 cifs_free_llist(&tmp_llist);
1347 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
1348 types[i], num, 0, buf);
1350 cifs_move_llist(&tmp_llist,
1351 &cfile->llist->locks);
1354 cifs_free_llist(&tmp_llist);
1358 up_write(&cinode->lock_sem);
1364 cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
1365 bool wait_flag, bool posix_lck, int lock, int unlock,
1369 __u64 length = 1 + flock->fl_end - flock->fl_start;
1370 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1371 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1372 struct TCP_Server_Info *server = tcon->ses->server;
1375 int posix_lock_type;
1377 rc = cifs_posix_lock_set(file, flock);
1381 if (type & server->vals->shared_lock_type)
1382 posix_lock_type = CIFS_RDLCK;
1384 posix_lock_type = CIFS_WRLCK;
1387 posix_lock_type = CIFS_UNLCK;
1389 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
1390 current->tgid, flock->fl_start, length,
1391 NULL, posix_lock_type, wait_flag);
1396 struct cifsLockInfo *lock;
1398 lock = cifs_lock_init(flock->fl_start, length, type);
1402 rc = cifs_lock_add_if(cfile, lock, wait_flag);
1408 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1409 type, 1, 0, wait_flag);
1415 cifs_lock_add(cfile, lock);
1417 rc = server->ops->mand_unlock_range(cfile, flock, xid);
1420 if (flock->fl_flags & FL_POSIX)
1421 posix_lock_file_wait(file, flock);
1425 int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1428 int lock = 0, unlock = 0;
1429 bool wait_flag = false;
1430 bool posix_lck = false;
1431 struct cifs_sb_info *cifs_sb;
1432 struct cifs_tcon *tcon;
1433 struct cifsInodeInfo *cinode;
1434 struct cifsFileInfo *cfile;
1441 cFYI(1, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld "
1442 "end: %lld", cmd, flock->fl_flags, flock->fl_type,
1443 flock->fl_start, flock->fl_end);
1445 cfile = (struct cifsFileInfo *)file->private_data;
1446 tcon = tlink_tcon(cfile->tlink);
1448 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1451 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1452 netfid = cfile->fid.netfid;
1453 cinode = CIFS_I(file->f_path.dentry->d_inode);
1455 if (cap_unix(tcon->ses) &&
1456 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1457 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1460 * BB add code here to normalize offset and length to account for
1461 * negative length which we can not accept over the wire.
1463 if (IS_GETLK(cmd)) {
1464 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
1469 if (!lock && !unlock) {
1471 * if no lock or unlock then nothing to do since we do not
1478 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1485 * update the file size (if needed) after a write. Should be called with
1486 * the inode->i_lock held
1489 cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1490 unsigned int bytes_written)
1492 loff_t end_of_write = offset + bytes_written;
1494 if (end_of_write > cifsi->server_eof)
1495 cifsi->server_eof = end_of_write;
1499 cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1500 size_t write_size, loff_t *offset)
1503 unsigned int bytes_written = 0;
1504 unsigned int total_written;
1505 struct cifs_sb_info *cifs_sb;
1506 struct cifs_tcon *tcon;
1507 struct TCP_Server_Info *server;
1509 struct dentry *dentry = open_file->dentry;
1510 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
1511 struct cifs_io_parms io_parms;
1513 cifs_sb = CIFS_SB(dentry->d_sb);
1515 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
1516 *offset, dentry->d_name.name);
1518 tcon = tlink_tcon(open_file->tlink);
1519 server = tcon->ses->server;
1521 if (!server->ops->sync_write)
1526 for (total_written = 0; write_size > total_written;
1527 total_written += bytes_written) {
1529 while (rc == -EAGAIN) {
1533 if (open_file->invalidHandle) {
1534 /* we could deadlock if we called
1535 filemap_fdatawait from here so tell
1536 reopen_file not to flush data to
1538 rc = cifs_reopen_file(open_file, false);
1543 len = min((size_t)cifs_sb->wsize,
1544 write_size - total_written);
1545 /* iov[0] is reserved for smb header */
1546 iov[1].iov_base = (char *)write_data + total_written;
1547 iov[1].iov_len = len;
1549 io_parms.tcon = tcon;
1550 io_parms.offset = *offset;
1551 io_parms.length = len;
1552 rc = server->ops->sync_write(xid, open_file, &io_parms,
1553 &bytes_written, iov, 1);
1555 if (rc || (bytes_written == 0)) {
1563 spin_lock(&dentry->d_inode->i_lock);
1564 cifs_update_eof(cifsi, *offset, bytes_written);
1565 spin_unlock(&dentry->d_inode->i_lock);
1566 *offset += bytes_written;
1570 cifs_stats_bytes_written(tcon, total_written);
1572 if (total_written > 0) {
1573 spin_lock(&dentry->d_inode->i_lock);
1574 if (*offset > dentry->d_inode->i_size)
1575 i_size_write(dentry->d_inode, *offset);
1576 spin_unlock(&dentry->d_inode->i_lock);
1578 mark_inode_dirty_sync(dentry->d_inode);
1580 return total_written;
1583 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1586 struct cifsFileInfo *open_file = NULL;
1587 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1589 /* only filter by fsuid on multiuser mounts */
1590 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1593 spin_lock(&cifs_file_list_lock);
1594 /* we could simply get the first_list_entry since write-only entries
1595 are always at the end of the list but since the first entry might
1596 have a close pending, we go through the whole list */
1597 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1598 if (fsuid_only && open_file->uid != current_fsuid())
1600 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
1601 if (!open_file->invalidHandle) {
1602 /* found a good file */
1603 /* lock it so it will not be closed on us */
1604 cifsFileInfo_get_locked(open_file);
1605 spin_unlock(&cifs_file_list_lock);
1607 } /* else might as well continue, and look for
1608 another, or simply have the caller reopen it
1609 again rather than trying to fix this handle */
1610 } else /* write only file */
1611 break; /* write only files are last so must be done */
1613 spin_unlock(&cifs_file_list_lock);
1617 struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1620 struct cifsFileInfo *open_file, *inv_file = NULL;
1621 struct cifs_sb_info *cifs_sb;
1622 bool any_available = false;
1624 unsigned int refind = 0;
1626 /* Having a null inode here (because mapping->host was set to zero by
1627 the VFS or MM) should not happen but we had reports of on oops (due to
1628 it being zero) during stress testcases so we need to check for it */
1630 if (cifs_inode == NULL) {
1631 cERROR(1, "Null inode passed to cifs_writeable_file");
1636 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1638 /* only filter by fsuid on multiuser mounts */
1639 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1642 spin_lock(&cifs_file_list_lock);
1644 if (refind > MAX_REOPEN_ATT) {
1645 spin_unlock(&cifs_file_list_lock);
1648 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1649 if (!any_available && open_file->pid != current->tgid)
1651 if (fsuid_only && open_file->uid != current_fsuid())
1653 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
1654 if (!open_file->invalidHandle) {
1655 /* found a good writable file */
1656 cifsFileInfo_get_locked(open_file);
1657 spin_unlock(&cifs_file_list_lock);
1661 inv_file = open_file;
1665 /* couldn't find useable FH with same pid, try any available */
1666 if (!any_available) {
1667 any_available = true;
1668 goto refind_writable;
1672 any_available = false;
1673 cifsFileInfo_get_locked(inv_file);
1676 spin_unlock(&cifs_file_list_lock);
1679 rc = cifs_reopen_file(inv_file, false);
1683 spin_lock(&cifs_file_list_lock);
1684 list_move_tail(&inv_file->flist,
1685 &cifs_inode->openFileList);
1686 spin_unlock(&cifs_file_list_lock);
1687 cifsFileInfo_put(inv_file);
1688 spin_lock(&cifs_file_list_lock);
1690 goto refind_writable;
1697 static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1699 struct address_space *mapping = page->mapping;
1700 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1703 int bytes_written = 0;
1704 struct inode *inode;
1705 struct cifsFileInfo *open_file;
1707 if (!mapping || !mapping->host)
1710 inode = page->mapping->host;
1712 offset += (loff_t)from;
1713 write_data = kmap(page);
1716 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1721 /* racing with truncate? */
1722 if (offset > mapping->host->i_size) {
1724 return 0; /* don't care */
1727 /* check to make sure that we are not extending the file */
1728 if (mapping->host->i_size - offset < (loff_t)to)
1729 to = (unsigned)(mapping->host->i_size - offset);
1731 open_file = find_writable_file(CIFS_I(mapping->host), false);
1733 bytes_written = cifs_write(open_file, open_file->pid,
1734 write_data, to - from, &offset);
1735 cifsFileInfo_put(open_file);
1736 /* Does mm or vfs already set times? */
1737 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
1738 if ((bytes_written > 0) && (offset))
1740 else if (bytes_written < 0)
1743 cFYI(1, "No writeable filehandles for inode");
1751 static int cifs_writepages(struct address_space *mapping,
1752 struct writeback_control *wbc)
1754 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1755 bool done = false, scanned = false, range_whole = false;
1757 struct cifs_writedata *wdata;
1758 struct TCP_Server_Info *server;
1761 loff_t isize = i_size_read(mapping->host);
1764 * If wsize is smaller than the page cache size, default to writing
1765 * one page at a time via cifs_writepage
1767 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1768 return generic_writepages(mapping, wbc);
1770 if (wbc->range_cyclic) {
1771 index = mapping->writeback_index; /* Start from prev offset */
1774 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1775 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1776 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1781 while (!done && index <= end) {
1782 unsigned int i, nr_pages, found_pages;
1783 pgoff_t next = 0, tofind;
1784 struct page **pages;
1786 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1789 wdata = cifs_writedata_alloc((unsigned int)tofind,
1790 cifs_writev_complete);
1797 * find_get_pages_tag seems to return a max of 256 on each
1798 * iteration, so we must call it several times in order to
1799 * fill the array or the wsize is effectively limited to
1800 * 256 * PAGE_CACHE_SIZE.
1803 pages = wdata->pages;
1805 nr_pages = find_get_pages_tag(mapping, &index,
1806 PAGECACHE_TAG_DIRTY,
1808 found_pages += nr_pages;
1811 } while (nr_pages && tofind && index <= end);
1813 if (found_pages == 0) {
1814 kref_put(&wdata->refcount, cifs_writedata_release);
1819 for (i = 0; i < found_pages; i++) {
1820 page = wdata->pages[i];
1822 * At this point we hold neither mapping->tree_lock nor
1823 * lock on the page itself: the page may be truncated or
1824 * invalidated (changing page->mapping to NULL), or even
1825 * swizzled back from swapper_space to tmpfs file
1831 else if (!trylock_page(page))
1834 if (unlikely(page->mapping != mapping)) {
1839 if (!wbc->range_cyclic && page->index > end) {
1845 if (next && (page->index != next)) {
1846 /* Not next consecutive page */
1851 if (wbc->sync_mode != WB_SYNC_NONE)
1852 wait_on_page_writeback(page);
1854 if (PageWriteback(page) ||
1855 !clear_page_dirty_for_io(page)) {
1861 * This actually clears the dirty bit in the radix tree.
1862 * See cifs_writepage() for more commentary.
1864 set_page_writeback(page);
1866 if (page_offset(page) >= isize) {
1869 end_page_writeback(page);
1873 wdata->pages[i] = page;
1874 next = page->index + 1;
1878 /* reset index to refind any pages skipped */
1880 index = wdata->pages[0]->index + 1;
1882 /* put any pages we aren't going to use */
1883 for (i = nr_pages; i < found_pages; i++) {
1884 page_cache_release(wdata->pages[i]);
1885 wdata->pages[i] = NULL;
1888 /* nothing to write? */
1889 if (nr_pages == 0) {
1890 kref_put(&wdata->refcount, cifs_writedata_release);
1894 wdata->sync_mode = wbc->sync_mode;
1895 wdata->nr_pages = nr_pages;
1896 wdata->offset = page_offset(wdata->pages[0]);
1897 wdata->pagesz = PAGE_CACHE_SIZE;
1899 min(isize - page_offset(wdata->pages[nr_pages - 1]),
1900 (loff_t)PAGE_CACHE_SIZE);
1901 wdata->bytes = ((nr_pages - 1) * PAGE_CACHE_SIZE) +
1905 if (wdata->cfile != NULL)
1906 cifsFileInfo_put(wdata->cfile);
1907 wdata->cfile = find_writable_file(CIFS_I(mapping->host),
1909 if (!wdata->cfile) {
1910 cERROR(1, "No writable handles for inode");
1914 wdata->pid = wdata->cfile->pid;
1915 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
1916 rc = server->ops->async_writev(wdata);
1917 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
1919 for (i = 0; i < nr_pages; ++i)
1920 unlock_page(wdata->pages[i]);
1922 /* send failure -- clean up the mess */
1924 for (i = 0; i < nr_pages; ++i) {
1926 redirty_page_for_writepage(wbc,
1929 SetPageError(wdata->pages[i]);
1930 end_page_writeback(wdata->pages[i]);
1931 page_cache_release(wdata->pages[i]);
1934 mapping_set_error(mapping, rc);
1936 kref_put(&wdata->refcount, cifs_writedata_release);
1938 wbc->nr_to_write -= nr_pages;
1939 if (wbc->nr_to_write <= 0)
1945 if (!scanned && !done) {
1947 * We hit the last page and there is more work to be done: wrap
1948 * back to the start of the file
1955 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1956 mapping->writeback_index = index;
1962 cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
1968 /* BB add check for wbc flags */
1969 page_cache_get(page);
1970 if (!PageUptodate(page))
1971 cFYI(1, "ppw - page not up to date");
1974 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1976 * A writepage() implementation always needs to do either this,
1977 * or re-dirty the page with "redirty_page_for_writepage()" in
1978 * the case of a failure.
1980 * Just unlocking the page will cause the radix tree tag-bits
1981 * to fail to update with the state of the page correctly.
1983 set_page_writeback(page);
1985 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
1986 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
1988 else if (rc == -EAGAIN)
1989 redirty_page_for_writepage(wbc, page);
1993 SetPageUptodate(page);
1994 end_page_writeback(page);
1995 page_cache_release(page);
2000 static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2002 int rc = cifs_writepage_locked(page, wbc);
2007 static int cifs_write_end(struct file *file, struct address_space *mapping,
2008 loff_t pos, unsigned len, unsigned copied,
2009 struct page *page, void *fsdata)
2012 struct inode *inode = mapping->host;
2013 struct cifsFileInfo *cfile = file->private_data;
2014 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2017 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2020 pid = current->tgid;
2022 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
2025 if (PageChecked(page)) {
2027 SetPageUptodate(page);
2028 ClearPageChecked(page);
2029 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
2030 SetPageUptodate(page);
2032 if (!PageUptodate(page)) {
2034 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
2038 /* this is probably better than directly calling
2039 partialpage_write since in this function the file handle is
2040 known which we might as well leverage */
2041 /* BB check if anything else missing out of ppw
2042 such as updating last write time */
2043 page_data = kmap(page);
2044 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
2045 /* if (rc < 0) should we set writebehind rc? */
2052 set_page_dirty(page);
2056 spin_lock(&inode->i_lock);
2057 if (pos > inode->i_size)
2058 i_size_write(inode, pos);
2059 spin_unlock(&inode->i_lock);
2063 page_cache_release(page);
2068 int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2073 struct cifs_tcon *tcon;
2074 struct TCP_Server_Info *server;
2075 struct cifsFileInfo *smbfile = file->private_data;
2076 struct inode *inode = file->f_path.dentry->d_inode;
2077 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2079 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2082 mutex_lock(&inode->i_mutex);
2086 cFYI(1, "Sync file - name: %s datasync: 0x%x",
2087 file->f_path.dentry->d_name.name, datasync);
2089 if (!CIFS_I(inode)->clientCanCacheRead) {
2090 rc = cifs_invalidate_mapping(inode);
2092 cFYI(1, "rc: %d during invalidate phase", rc);
2093 rc = 0; /* don't care about it in fsync */
2097 tcon = tlink_tcon(smbfile->tlink);
2098 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2099 server = tcon->ses->server;
2100 if (server->ops->flush)
2101 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2107 mutex_unlock(&inode->i_mutex);
2111 int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2115 struct cifs_tcon *tcon;
2116 struct TCP_Server_Info *server;
2117 struct cifsFileInfo *smbfile = file->private_data;
2118 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2119 struct inode *inode = file->f_mapping->host;
2121 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2124 mutex_lock(&inode->i_mutex);
2128 cFYI(1, "Sync file - name: %s datasync: 0x%x",
2129 file->f_path.dentry->d_name.name, datasync);
2131 tcon = tlink_tcon(smbfile->tlink);
2132 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2133 server = tcon->ses->server;
2134 if (server->ops->flush)
2135 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2141 mutex_unlock(&inode->i_mutex);
2146 * As file closes, flush all cached write data for this inode checking
2147 * for write behind errors.
2149 int cifs_flush(struct file *file, fl_owner_t id)
2151 struct inode *inode = file->f_path.dentry->d_inode;
2154 if (file->f_mode & FMODE_WRITE)
2155 rc = filemap_write_and_wait(inode->i_mapping);
2157 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
2163 cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2168 for (i = 0; i < num_pages; i++) {
2169 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2172 * save number of pages we have already allocated and
2173 * return with ENOMEM error
2182 for (i = 0; i < num_pages; i++)
2189 size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2194 clen = min_t(const size_t, len, wsize);
2195 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
2204 cifs_uncached_writev_complete(struct work_struct *work)
2207 struct cifs_writedata *wdata = container_of(work,
2208 struct cifs_writedata, work);
2209 struct inode *inode = wdata->cfile->dentry->d_inode;
2210 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2212 spin_lock(&inode->i_lock);
2213 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2214 if (cifsi->server_eof > inode->i_size)
2215 i_size_write(inode, cifsi->server_eof);
2216 spin_unlock(&inode->i_lock);
2218 complete(&wdata->done);
2220 if (wdata->result != -EAGAIN) {
2221 for (i = 0; i < wdata->nr_pages; i++)
2222 put_page(wdata->pages[i]);
2225 kref_put(&wdata->refcount, cifs_writedata_release);
2228 /* attempt to send write to server, retry on any -EAGAIN errors */
2230 cifs_uncached_retry_writev(struct cifs_writedata *wdata)
2233 struct TCP_Server_Info *server;
2235 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
2238 if (wdata->cfile->invalidHandle) {
2239 rc = cifs_reopen_file(wdata->cfile, false);
2243 rc = server->ops->async_writev(wdata);
2244 } while (rc == -EAGAIN);
2250 cifs_iovec_write(struct file *file, const struct iovec *iov,
2251 unsigned long nr_segs, loff_t *poffset)
2253 unsigned long nr_pages, i;
2254 size_t copied, len, cur_len;
2255 ssize_t total_written = 0;
2258 struct cifsFileInfo *open_file;
2259 struct cifs_tcon *tcon;
2260 struct cifs_sb_info *cifs_sb;
2261 struct cifs_writedata *wdata, *tmp;
2262 struct list_head wdata_list;
2266 len = iov_length(iov, nr_segs);
2270 rc = generic_write_checks(file, poffset, &len, 0);
2274 INIT_LIST_HEAD(&wdata_list);
2275 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2276 open_file = file->private_data;
2277 tcon = tlink_tcon(open_file->tlink);
2279 if (!tcon->ses->server->ops->async_writev)
2284 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2285 pid = open_file->pid;
2287 pid = current->tgid;
2289 iov_iter_init(&it, iov, nr_segs, len, 0);
2293 nr_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
2294 wdata = cifs_writedata_alloc(nr_pages,
2295 cifs_uncached_writev_complete);
2301 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2308 for (i = 0; i < nr_pages; i++) {
2309 copied = min_t(const size_t, cur_len, PAGE_SIZE);
2310 copied = iov_iter_copy_from_user(wdata->pages[i], &it,
2313 iov_iter_advance(&it, copied);
2315 cur_len = save_len - cur_len;
2317 wdata->sync_mode = WB_SYNC_ALL;
2318 wdata->nr_pages = nr_pages;
2319 wdata->offset = (__u64)offset;
2320 wdata->cfile = cifsFileInfo_get(open_file);
2322 wdata->bytes = cur_len;
2323 wdata->pagesz = PAGE_SIZE;
2324 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
2325 rc = cifs_uncached_retry_writev(wdata);
2327 kref_put(&wdata->refcount, cifs_writedata_release);
2331 list_add_tail(&wdata->list, &wdata_list);
2337 * If at least one write was successfully sent, then discard any rc
2338 * value from the later writes. If the other write succeeds, then
2339 * we'll end up returning whatever was written. If it fails, then
2340 * we'll get a new rc value from that.
2342 if (!list_empty(&wdata_list))
2346 * Wait for and collect replies for any successful sends in order of
2347 * increasing offset. Once an error is hit or we get a fatal signal
2348 * while waiting, then return without waiting for any more replies.
2351 list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2353 /* FIXME: freezable too? */
2354 rc = wait_for_completion_killable(&wdata->done);
2357 else if (wdata->result)
2360 total_written += wdata->bytes;
2362 /* resend call if it's a retryable error */
2363 if (rc == -EAGAIN) {
2364 rc = cifs_uncached_retry_writev(wdata);
2368 list_del_init(&wdata->list);
2369 kref_put(&wdata->refcount, cifs_writedata_release);
2372 if (total_written > 0)
2373 *poffset += total_written;
2375 cifs_stats_bytes_written(tcon, total_written);
2376 return total_written ? total_written : (ssize_t)rc;
2379 ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
2380 unsigned long nr_segs, loff_t pos)
2383 struct inode *inode;
2385 inode = iocb->ki_filp->f_path.dentry->d_inode;
2388 * BB - optimize the way when signing is disabled. We can drop this
2389 * extra memory-to-memory copying and use iovec buffers for constructing
2393 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
2395 CIFS_I(inode)->invalid_mapping = true;
2403 cifs_writev(struct kiocb *iocb, const struct iovec *iov,
2404 unsigned long nr_segs, loff_t pos)
2406 struct file *file = iocb->ki_filp;
2407 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2408 struct inode *inode = file->f_mapping->host;
2409 struct cifsInodeInfo *cinode = CIFS_I(inode);
2410 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2411 ssize_t rc = -EACCES;
2413 BUG_ON(iocb->ki_pos != pos);
2415 sb_start_write(inode->i_sb);
2418 * We need to hold the sem to be sure nobody modifies lock list
2419 * with a brlock that prevents writing.
2421 down_read(&cinode->lock_sem);
2422 if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs),
2423 server->vals->exclusive_lock_type, NULL,
2425 mutex_lock(&inode->i_mutex);
2426 rc = __generic_file_aio_write(iocb, iov, nr_segs,
2428 mutex_unlock(&inode->i_mutex);
2431 if (rc > 0 || rc == -EIOCBQUEUED) {
2434 err = generic_write_sync(file, pos, rc);
2435 if (err < 0 && rc > 0)
2439 up_read(&cinode->lock_sem);
2440 sb_end_write(inode->i_sb);
2445 cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
2446 unsigned long nr_segs, loff_t pos)
2448 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
2449 struct cifsInodeInfo *cinode = CIFS_I(inode);
2450 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2451 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2452 iocb->ki_filp->private_data;
2453 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2456 * In strict cache mode we need to write the data to the server exactly
2457 * from the pos to pos+len-1 rather than flush all affected pages
2458 * because it may cause a error with mandatory locks on these pages but
2459 * not on the region from pos to ppos+len-1.
2462 if (!cinode->clientCanCacheAll)
2463 return cifs_user_writev(iocb, iov, nr_segs, pos);
2465 if (cap_unix(tcon->ses) &&
2466 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2467 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2468 return generic_file_aio_write(iocb, iov, nr_segs, pos);
2470 return cifs_writev(iocb, iov, nr_segs, pos);
2473 static struct cifs_readdata *
2474 cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
2476 struct cifs_readdata *rdata;
2478 rdata = kzalloc(sizeof(*rdata) + (sizeof(struct page *) * nr_pages),
2480 if (rdata != NULL) {
2481 kref_init(&rdata->refcount);
2482 INIT_LIST_HEAD(&rdata->list);
2483 init_completion(&rdata->done);
2484 INIT_WORK(&rdata->work, complete);
2491 cifs_readdata_release(struct kref *refcount)
2493 struct cifs_readdata *rdata = container_of(refcount,
2494 struct cifs_readdata, refcount);
2497 cifsFileInfo_put(rdata->cfile);
2503 cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
2509 for (i = 0; i < nr_pages; i++) {
2510 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2515 rdata->pages[i] = page;
2519 for (i = 0; i < nr_pages; i++) {
2520 put_page(rdata->pages[i]);
2521 rdata->pages[i] = NULL;
2528 cifs_uncached_readdata_release(struct kref *refcount)
2530 struct cifs_readdata *rdata = container_of(refcount,
2531 struct cifs_readdata, refcount);
2534 for (i = 0; i < rdata->nr_pages; i++) {
2535 put_page(rdata->pages[i]);
2536 rdata->pages[i] = NULL;
2538 cifs_readdata_release(refcount);
2542 cifs_retry_async_readv(struct cifs_readdata *rdata)
2545 struct TCP_Server_Info *server;
2547 server = tlink_tcon(rdata->cfile->tlink)->ses->server;
2550 if (rdata->cfile->invalidHandle) {
2551 rc = cifs_reopen_file(rdata->cfile, true);
2555 rc = server->ops->async_readv(rdata);
2556 } while (rc == -EAGAIN);
2562 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2563 * @rdata: the readdata response with list of pages holding data
2564 * @iov: vector in which we should copy the data
2565 * @nr_segs: number of segments in vector
2566 * @offset: offset into file of the first iovec
2567 * @copied: used to return the amount of data copied to the iov
2569 * This function copies data from a list of pages in a readdata response into
2570 * an array of iovecs. It will first calculate where the data should go
2571 * based on the info in the readdata and then copy the data into that spot.
2574 cifs_readdata_to_iov(struct cifs_readdata *rdata, const struct iovec *iov,
2575 unsigned long nr_segs, loff_t offset, ssize_t *copied)
2579 size_t pos = rdata->offset - offset;
2580 ssize_t remaining = rdata->bytes;
2581 unsigned char *pdata;
2584 /* set up iov_iter and advance to the correct offset */
2585 iov_iter_init(&ii, iov, nr_segs, iov_length(iov, nr_segs), 0);
2586 iov_iter_advance(&ii, pos);
2589 for (i = 0; i < rdata->nr_pages; i++) {
2591 struct page *page = rdata->pages[i];
2593 /* copy a whole page or whatever's left */
2594 copy = min_t(ssize_t, remaining, PAGE_SIZE);
2596 /* ...but limit it to whatever space is left in the iov */
2597 copy = min_t(ssize_t, copy, iov_iter_count(&ii));
2599 /* go while there's data to be copied and no errors */
2602 rc = memcpy_toiovecend(ii.iov, pdata, ii.iov_offset,
2608 iov_iter_advance(&ii, copy);
2617 cifs_uncached_readv_complete(struct work_struct *work)
2619 struct cifs_readdata *rdata = container_of(work,
2620 struct cifs_readdata, work);
2622 complete(&rdata->done);
2623 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2627 cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
2628 struct cifs_readdata *rdata, unsigned int len)
2630 int total_read = 0, result = 0;
2632 unsigned int nr_pages = rdata->nr_pages;
2635 rdata->tailsz = PAGE_SIZE;
2636 for (i = 0; i < nr_pages; i++) {
2637 struct page *page = rdata->pages[i];
2639 if (len >= PAGE_SIZE) {
2640 /* enough data to fill the page */
2641 iov.iov_base = kmap(page);
2642 iov.iov_len = PAGE_SIZE;
2643 cFYI(1, "%u: iov_base=%p iov_len=%zu",
2644 i, iov.iov_base, iov.iov_len);
2646 } else if (len > 0) {
2647 /* enough for partial page, fill and zero the rest */
2648 iov.iov_base = kmap(page);
2650 cFYI(1, "%u: iov_base=%p iov_len=%zu",
2651 i, iov.iov_base, iov.iov_len);
2652 memset(iov.iov_base + len, '\0', PAGE_SIZE - len);
2653 rdata->tailsz = len;
2656 /* no need to hold page hostage */
2657 rdata->pages[i] = NULL;
2663 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
2668 total_read += result;
2671 return total_read > 0 ? total_read : result;
2675 cifs_iovec_read(struct file *file, const struct iovec *iov,
2676 unsigned long nr_segs, loff_t *poffset)
2679 size_t len, cur_len;
2680 ssize_t total_read = 0;
2681 loff_t offset = *poffset;
2682 unsigned int npages;
2683 struct cifs_sb_info *cifs_sb;
2684 struct cifs_tcon *tcon;
2685 struct cifsFileInfo *open_file;
2686 struct cifs_readdata *rdata, *tmp;
2687 struct list_head rdata_list;
2693 len = iov_length(iov, nr_segs);
2697 INIT_LIST_HEAD(&rdata_list);
2698 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2699 open_file = file->private_data;
2700 tcon = tlink_tcon(open_file->tlink);
2702 if (!tcon->ses->server->ops->async_readv)
2705 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2706 pid = open_file->pid;
2708 pid = current->tgid;
2710 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
2711 cFYI(1, "attempting read on write only file instance");
2714 cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
2715 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
2717 /* allocate a readdata struct */
2718 rdata = cifs_readdata_alloc(npages,
2719 cifs_uncached_readv_complete);
2725 rc = cifs_read_allocate_pages(rdata, npages);
2729 rdata->cfile = cifsFileInfo_get(open_file);
2730 rdata->nr_pages = npages;
2731 rdata->offset = offset;
2732 rdata->bytes = cur_len;
2734 rdata->pagesz = PAGE_SIZE;
2735 rdata->read_into_pages = cifs_uncached_read_into_pages;
2737 rc = cifs_retry_async_readv(rdata);
2740 kref_put(&rdata->refcount,
2741 cifs_uncached_readdata_release);
2745 list_add_tail(&rdata->list, &rdata_list);
2750 /* if at least one read request send succeeded, then reset rc */
2751 if (!list_empty(&rdata_list))
2754 /* the loop below should proceed in the order of increasing offsets */
2756 list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
2760 /* FIXME: freezable sleep too? */
2761 rc = wait_for_completion_killable(&rdata->done);
2764 else if (rdata->result)
2767 rc = cifs_readdata_to_iov(rdata, iov,
2770 total_read += copied;
2773 /* resend call if it's a retryable error */
2774 if (rc == -EAGAIN) {
2775 rc = cifs_retry_async_readv(rdata);
2779 list_del_init(&rdata->list);
2780 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2783 cifs_stats_bytes_read(tcon, total_read);
2784 *poffset += total_read;
2786 /* mask nodata case */
2790 return total_read ? total_read : rc;
2793 ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
2794 unsigned long nr_segs, loff_t pos)
2798 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
2806 cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
2807 unsigned long nr_segs, loff_t pos)
2809 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
2810 struct cifsInodeInfo *cinode = CIFS_I(inode);
2811 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2812 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2813 iocb->ki_filp->private_data;
2814 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2818 * In strict cache mode we need to read from the server all the time
2819 * if we don't have level II oplock because the server can delay mtime
2820 * change - so we can't make a decision about inode invalidating.
2821 * And we can also fail with pagereading if there are mandatory locks
2822 * on pages affected by this read but not on the region from pos to
2825 if (!cinode->clientCanCacheRead)
2826 return cifs_user_readv(iocb, iov, nr_segs, pos);
2828 if (cap_unix(tcon->ses) &&
2829 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2830 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2831 return generic_file_aio_read(iocb, iov, nr_segs, pos);
2834 * We need to hold the sem to be sure nobody modifies lock list
2835 * with a brlock that prevents reading.
2837 down_read(&cinode->lock_sem);
2838 if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs),
2839 tcon->ses->server->vals->shared_lock_type,
2841 rc = generic_file_aio_read(iocb, iov, nr_segs, pos);
2842 up_read(&cinode->lock_sem);
2847 cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
2850 unsigned int bytes_read = 0;
2851 unsigned int total_read;
2852 unsigned int current_read_size;
2854 struct cifs_sb_info *cifs_sb;
2855 struct cifs_tcon *tcon;
2856 struct TCP_Server_Info *server;
2859 struct cifsFileInfo *open_file;
2860 struct cifs_io_parms io_parms;
2861 int buf_type = CIFS_NO_BUFFER;
2865 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2867 /* FIXME: set up handlers for larger reads and/or convert to async */
2868 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2870 if (file->private_data == NULL) {
2875 open_file = file->private_data;
2876 tcon = tlink_tcon(open_file->tlink);
2877 server = tcon->ses->server;
2879 if (!server->ops->sync_read) {
2884 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2885 pid = open_file->pid;
2887 pid = current->tgid;
2889 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
2890 cFYI(1, "attempting read on write only file instance");
2892 for (total_read = 0, cur_offset = read_data; read_size > total_read;
2893 total_read += bytes_read, cur_offset += bytes_read) {
2894 current_read_size = min_t(uint, read_size - total_read, rsize);
2896 * For windows me and 9x we do not want to request more than it
2897 * negotiated since it will refuse the read then.
2899 if ((tcon->ses) && !(tcon->ses->capabilities &
2900 tcon->ses->server->vals->cap_large_files)) {
2901 current_read_size = min_t(uint, current_read_size,
2905 while (rc == -EAGAIN) {
2906 if (open_file->invalidHandle) {
2907 rc = cifs_reopen_file(open_file, true);
2912 io_parms.tcon = tcon;
2913 io_parms.offset = *offset;
2914 io_parms.length = current_read_size;
2915 rc = server->ops->sync_read(xid, open_file, &io_parms,
2916 &bytes_read, &cur_offset,
2919 if (rc || (bytes_read == 0)) {
2927 cifs_stats_bytes_read(tcon, total_read);
2928 *offset += bytes_read;
2936 * If the page is mmap'ed into a process' page tables, then we need to make
2937 * sure that it doesn't change while being written back.
2940 cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
2942 struct page *page = vmf->page;
2945 return VM_FAULT_LOCKED;
2948 static struct vm_operations_struct cifs_file_vm_ops = {
2949 .fault = filemap_fault,
2950 .page_mkwrite = cifs_page_mkwrite,
2953 int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
2956 struct inode *inode = file->f_path.dentry->d_inode;
2960 if (!CIFS_I(inode)->clientCanCacheRead) {
2961 rc = cifs_invalidate_mapping(inode);
2966 rc = generic_file_mmap(file, vma);
2968 vma->vm_ops = &cifs_file_vm_ops;
2973 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
2978 rc = cifs_revalidate_file(file);
2980 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
2984 rc = generic_file_mmap(file, vma);
2986 vma->vm_ops = &cifs_file_vm_ops;
2992 cifs_readv_complete(struct work_struct *work)
2995 struct cifs_readdata *rdata = container_of(work,
2996 struct cifs_readdata, work);
2998 for (i = 0; i < rdata->nr_pages; i++) {
2999 struct page *page = rdata->pages[i];
3001 lru_cache_add_file(page);
3003 if (rdata->result == 0) {
3004 flush_dcache_page(page);
3005 SetPageUptodate(page);
3010 if (rdata->result == 0)
3011 cifs_readpage_to_fscache(rdata->mapping->host, page);
3013 page_cache_release(page);
3014 rdata->pages[i] = NULL;
3016 kref_put(&rdata->refcount, cifs_readdata_release);
3020 cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3021 struct cifs_readdata *rdata, unsigned int len)
3023 int total_read = 0, result = 0;
3027 unsigned int nr_pages = rdata->nr_pages;
3030 /* determine the eof that the server (probably) has */
3031 eof = CIFS_I(rdata->mapping->host)->server_eof;
3032 eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
3033 cFYI(1, "eof=%llu eof_index=%lu", eof, eof_index);
3035 rdata->tailsz = PAGE_CACHE_SIZE;
3036 for (i = 0; i < nr_pages; i++) {
3037 struct page *page = rdata->pages[i];
3039 if (len >= PAGE_CACHE_SIZE) {
3040 /* enough data to fill the page */
3041 iov.iov_base = kmap(page);
3042 iov.iov_len = PAGE_CACHE_SIZE;
3043 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
3044 i, page->index, iov.iov_base, iov.iov_len);
3045 len -= PAGE_CACHE_SIZE;
3046 } else if (len > 0) {
3047 /* enough for partial page, fill and zero the rest */
3048 iov.iov_base = kmap(page);
3050 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
3051 i, page->index, iov.iov_base, iov.iov_len);
3052 memset(iov.iov_base + len,
3053 '\0', PAGE_CACHE_SIZE - len);
3054 rdata->tailsz = len;
3056 } else if (page->index > eof_index) {
3058 * The VFS will not try to do readahead past the
3059 * i_size, but it's possible that we have outstanding
3060 * writes with gaps in the middle and the i_size hasn't
3061 * caught up yet. Populate those with zeroed out pages
3062 * to prevent the VFS from repeatedly attempting to
3063 * fill them until the writes are flushed.
3065 zero_user(page, 0, PAGE_CACHE_SIZE);
3066 lru_cache_add_file(page);
3067 flush_dcache_page(page);
3068 SetPageUptodate(page);
3070 page_cache_release(page);
3071 rdata->pages[i] = NULL;
3075 /* no need to hold page hostage */
3076 lru_cache_add_file(page);
3078 page_cache_release(page);
3079 rdata->pages[i] = NULL;
3084 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
3089 total_read += result;
3092 return total_read > 0 ? total_read : result;
3095 static int cifs_readpages(struct file *file, struct address_space *mapping,
3096 struct list_head *page_list, unsigned num_pages)
3099 struct list_head tmplist;
3100 struct cifsFileInfo *open_file = file->private_data;
3101 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
3102 unsigned int rsize = cifs_sb->rsize;
3106 * Give up immediately if rsize is too small to read an entire page.
3107 * The VFS will fall back to readpage. We should never reach this
3108 * point however since we set ra_pages to 0 when the rsize is smaller
3109 * than a cache page.
3111 if (unlikely(rsize < PAGE_CACHE_SIZE))
3115 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3116 * immediately if the cookie is negative
3118 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3123 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3124 pid = open_file->pid;
3126 pid = current->tgid;
3129 INIT_LIST_HEAD(&tmplist);
3131 cFYI(1, "%s: file=%p mapping=%p num_pages=%u", __func__, file,
3132 mapping, num_pages);
3135 * Start with the page at end of list and move it to private
3136 * list. Do the same with any following pages until we hit
3137 * the rsize limit, hit an index discontinuity, or run out of
3138 * pages. Issue the async read and then start the loop again
3139 * until the list is empty.
3141 * Note that list order is important. The page_list is in
3142 * the order of declining indexes. When we put the pages in
3143 * the rdata->pages, then we want them in increasing order.
3145 while (!list_empty(page_list)) {
3147 unsigned int bytes = PAGE_CACHE_SIZE;
3148 unsigned int expected_index;
3149 unsigned int nr_pages = 1;
3151 struct page *page, *tpage;
3152 struct cifs_readdata *rdata;
3154 page = list_entry(page_list->prev, struct page, lru);
3157 * Lock the page and put it in the cache. Since no one else
3158 * should have access to this page, we're safe to simply set
3159 * PG_locked without checking it first.
3161 __set_page_locked(page);
3162 rc = add_to_page_cache_locked(page, mapping,
3163 page->index, GFP_KERNEL);
3165 /* give up if we can't stick it in the cache */
3167 __clear_page_locked(page);
3171 /* move first page to the tmplist */
3172 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3173 list_move_tail(&page->lru, &tmplist);
3175 /* now try and add more pages onto the request */
3176 expected_index = page->index + 1;
3177 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
3178 /* discontinuity ? */
3179 if (page->index != expected_index)
3182 /* would this page push the read over the rsize? */
3183 if (bytes + PAGE_CACHE_SIZE > rsize)
3186 __set_page_locked(page);
3187 if (add_to_page_cache_locked(page, mapping,
3188 page->index, GFP_KERNEL)) {
3189 __clear_page_locked(page);
3192 list_move_tail(&page->lru, &tmplist);
3193 bytes += PAGE_CACHE_SIZE;
3198 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
3200 /* best to give up if we're out of mem */
3201 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3202 list_del(&page->lru);
3203 lru_cache_add_file(page);
3205 page_cache_release(page);
3211 rdata->cfile = cifsFileInfo_get(open_file);
3212 rdata->mapping = mapping;
3213 rdata->offset = offset;
3214 rdata->bytes = bytes;
3216 rdata->pagesz = PAGE_CACHE_SIZE;
3217 rdata->read_into_pages = cifs_readpages_read_into_pages;
3219 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3220 list_del(&page->lru);
3221 rdata->pages[rdata->nr_pages++] = page;
3224 rc = cifs_retry_async_readv(rdata);
3226 for (i = 0; i < rdata->nr_pages; i++) {
3227 page = rdata->pages[i];
3228 lru_cache_add_file(page);
3230 page_cache_release(page);
3232 kref_put(&rdata->refcount, cifs_readdata_release);
3236 kref_put(&rdata->refcount, cifs_readdata_release);
3242 static int cifs_readpage_worker(struct file *file, struct page *page,
3248 /* Is the page cached? */
3249 rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
3253 page_cache_get(page);
3254 read_data = kmap(page);
3255 /* for reads over a certain size could initiate async read ahead */
3257 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
3262 cFYI(1, "Bytes read %d", rc);
3264 file->f_path.dentry->d_inode->i_atime =
3265 current_fs_time(file->f_path.dentry->d_inode->i_sb);
3267 if (PAGE_CACHE_SIZE > rc)
3268 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
3270 flush_dcache_page(page);
3271 SetPageUptodate(page);
3273 /* send this page to the cache */
3274 cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
3280 page_cache_release(page);
3286 static int cifs_readpage(struct file *file, struct page *page)
3288 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3294 if (file->private_data == NULL) {
3300 cFYI(1, "readpage %p at offset %d 0x%x",
3301 page, (int)offset, (int)offset);
3303 rc = cifs_readpage_worker(file, page, &offset);
3311 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3313 struct cifsFileInfo *open_file;
3315 spin_lock(&cifs_file_list_lock);
3316 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
3317 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
3318 spin_unlock(&cifs_file_list_lock);
3322 spin_unlock(&cifs_file_list_lock);
3326 /* We do not want to update the file size from server for inodes
3327 open for write - to avoid races with writepage extending
3328 the file - in the future we could consider allowing
3329 refreshing the inode only on increases in the file size
3330 but this is tricky to do without racing with writebehind
3331 page caching in the current Linux kernel design */
3332 bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
3337 if (is_inode_writable(cifsInode)) {
3338 /* This inode is open for write at least once */
3339 struct cifs_sb_info *cifs_sb;
3341 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
3342 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
3343 /* since no page cache to corrupt on directio
3344 we can change size safely */
3348 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
3356 static int cifs_write_begin(struct file *file, struct address_space *mapping,
3357 loff_t pos, unsigned len, unsigned flags,
3358 struct page **pagep, void **fsdata)
3360 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
3361 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
3362 loff_t page_start = pos & PAGE_MASK;
3367 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
3369 page = grab_cache_page_write_begin(mapping, index, flags);
3375 if (PageUptodate(page))
3379 * If we write a full page it will be up to date, no need to read from
3380 * the server. If the write is short, we'll end up doing a sync write
3383 if (len == PAGE_CACHE_SIZE)
3387 * optimize away the read when we have an oplock, and we're not
3388 * expecting to use any of the data we'd be reading in. That
3389 * is, when the page lies beyond the EOF, or straddles the EOF
3390 * and the write will cover all of the existing data.
3392 if (CIFS_I(mapping->host)->clientCanCacheRead) {
3393 i_size = i_size_read(mapping->host);
3394 if (page_start >= i_size ||
3395 (offset == 0 && (pos + len) >= i_size)) {
3396 zero_user_segments(page, 0, offset,
3400 * PageChecked means that the parts of the page
3401 * to which we're not writing are considered up
3402 * to date. Once the data is copied to the
3403 * page, it can be set uptodate.
3405 SetPageChecked(page);
3410 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
3412 * might as well read a page, it is fast enough. If we get
3413 * an error, we don't need to return it. cifs_write_end will
3414 * do a sync write instead since PG_uptodate isn't set.
3416 cifs_readpage_worker(file, page, &page_start);
3418 /* we could try using another file handle if there is one -
3419 but how would we lock it to prevent close of that handle
3420 racing with this read? In any case
3421 this will be written out by write_end so is fine */
3428 static int cifs_release_page(struct page *page, gfp_t gfp)
3430 if (PagePrivate(page))
3433 return cifs_fscache_release_page(page, gfp);
3436 static void cifs_invalidate_page(struct page *page, unsigned long offset)
3438 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
3441 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
3444 static int cifs_launder_page(struct page *page)
3447 loff_t range_start = page_offset(page);
3448 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
3449 struct writeback_control wbc = {
3450 .sync_mode = WB_SYNC_ALL,
3452 .range_start = range_start,
3453 .range_end = range_end,
3456 cFYI(1, "Launder page: %p", page);
3458 if (clear_page_dirty_for_io(page))
3459 rc = cifs_writepage_locked(page, &wbc);
3461 cifs_fscache_invalidate_page(page, page->mapping->host);
3465 void cifs_oplock_break(struct work_struct *work)
3467 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3469 struct inode *inode = cfile->dentry->d_inode;
3470 struct cifsInodeInfo *cinode = CIFS_I(inode);
3471 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
3474 if (inode && S_ISREG(inode->i_mode)) {
3475 if (cinode->clientCanCacheRead)
3476 break_lease(inode, O_RDONLY);
3478 break_lease(inode, O_WRONLY);
3479 rc = filemap_fdatawrite(inode->i_mapping);
3480 if (cinode->clientCanCacheRead == 0) {
3481 rc = filemap_fdatawait(inode->i_mapping);
3482 mapping_set_error(inode->i_mapping, rc);
3483 invalidate_remote_inode(inode);
3485 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
3488 rc = cifs_push_locks(cfile);
3490 cERROR(1, "Push locks rc = %d", rc);
3493 * releasing stale oplock after recent reconnect of smb session using
3494 * a now incorrect file handle is not a data integrity issue but do
3495 * not bother sending an oplock release if session to server still is
3496 * disconnected since oplock already released by the server
3498 if (!cfile->oplock_break_cancelled) {
3499 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
3501 cFYI(1, "Oplock release rc = %d", rc);
3505 const struct address_space_operations cifs_addr_ops = {
3506 .readpage = cifs_readpage,
3507 .readpages = cifs_readpages,
3508 .writepage = cifs_writepage,
3509 .writepages = cifs_writepages,
3510 .write_begin = cifs_write_begin,
3511 .write_end = cifs_write_end,
3512 .set_page_dirty = __set_page_dirty_nobuffers,
3513 .releasepage = cifs_release_page,
3514 .invalidatepage = cifs_invalidate_page,
3515 .launder_page = cifs_launder_page,
3519 * cifs_readpages requires the server to support a buffer large enough to
3520 * contain the header plus one complete page of data. Otherwise, we need
3521 * to leave cifs_readpages out of the address space operations.
3523 const struct address_space_operations cifs_addr_ops_smallbuf = {
3524 .readpage = cifs_readpage,
3525 .writepage = cifs_writepage,
3526 .writepages = cifs_writepages,
3527 .write_begin = cifs_write_begin,
3528 .write_end = cifs_write_end,
3529 .set_page_dirty = __set_page_dirty_nobuffers,
3530 .releasepage = cifs_release_page,
3531 .invalidatepage = cifs_invalidate_page,
3532 .launder_page = cifs_launder_page,