4 * vfs operations that deal with files
6 * Copyright (C) International Business Machines Corp., 2002,2010
7 * Author(s): Steve French (sfrench@us.ibm.com)
8 * Jeremy Allison (jra@samba.org)
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 #include <linux/backing-dev.h>
26 #include <linux/stat.h>
27 #include <linux/fcntl.h>
28 #include <linux/pagemap.h>
29 #include <linux/pagevec.h>
30 #include <linux/writeback.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/delay.h>
33 #include <linux/mount.h>
34 #include <linux/slab.h>
35 #include <linux/swap.h>
36 #include <asm/div64.h>
40 #include "cifsproto.h"
41 #include "cifs_unicode.h"
42 #include "cifs_debug.h"
43 #include "cifs_fs_sb.h"
46 static inline int cifs_convert_flags(unsigned int flags)
48 if ((flags & O_ACCMODE) == O_RDONLY)
50 else if ((flags & O_ACCMODE) == O_WRONLY)
52 else if ((flags & O_ACCMODE) == O_RDWR) {
53 /* GENERIC_ALL is too much permission to request
54 can cause unnecessary access denied on create */
55 /* return GENERIC_ALL; */
56 return (GENERIC_READ | GENERIC_WRITE);
59 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
60 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
64 static u32 cifs_posix_convert_flags(unsigned int flags)
68 if ((flags & O_ACCMODE) == O_RDONLY)
69 posix_flags = SMB_O_RDONLY;
70 else if ((flags & O_ACCMODE) == O_WRONLY)
71 posix_flags = SMB_O_WRONLY;
72 else if ((flags & O_ACCMODE) == O_RDWR)
73 posix_flags = SMB_O_RDWR;
76 posix_flags |= SMB_O_CREAT;
78 posix_flags |= SMB_O_EXCL;
80 posix_flags |= SMB_O_TRUNC;
81 /* be safe and imply O_SYNC for O_DSYNC */
83 posix_flags |= SMB_O_SYNC;
84 if (flags & O_DIRECTORY)
85 posix_flags |= SMB_O_DIRECTORY;
86 if (flags & O_NOFOLLOW)
87 posix_flags |= SMB_O_NOFOLLOW;
89 posix_flags |= SMB_O_DIRECT;
94 static inline int cifs_get_disposition(unsigned int flags)
96 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
98 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
99 return FILE_OVERWRITE_IF;
100 else if ((flags & O_CREAT) == O_CREAT)
102 else if ((flags & O_TRUNC) == O_TRUNC)
103 return FILE_OVERWRITE;
108 int cifs_posix_open(char *full_path, struct inode **pinode,
109 struct super_block *sb, int mode, unsigned int f_flags,
110 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
113 FILE_UNIX_BASIC_INFO *presp_data;
114 __u32 posix_flags = 0;
115 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
116 struct cifs_fattr fattr;
117 struct tcon_link *tlink;
118 struct cifs_tcon *tcon;
120 cFYI(1, "posix open %s", full_path);
122 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
123 if (presp_data == NULL)
126 tlink = cifs_sb_tlink(cifs_sb);
132 tcon = tlink_tcon(tlink);
133 mode &= ~current_umask();
135 posix_flags = cifs_posix_convert_flags(f_flags);
136 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
137 poplock, full_path, cifs_sb->local_nls,
138 cifs_sb->mnt_cifs_flags &
139 CIFS_MOUNT_MAP_SPECIAL_CHR);
140 cifs_put_tlink(tlink);
145 if (presp_data->Type == cpu_to_le32(-1))
146 goto posix_open_ret; /* open ok, caller does qpathinfo */
149 goto posix_open_ret; /* caller does not need info */
151 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
153 /* get new inode and set it up */
154 if (*pinode == NULL) {
155 cifs_fill_uniqueid(sb, &fattr);
156 *pinode = cifs_iget(sb, &fattr);
162 cifs_fattr_to_inode(*pinode, &fattr);
171 cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
172 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
173 struct cifs_fid *fid, unsigned int xid)
178 int create_options = CREATE_NOT_DIR;
181 if (!tcon->ses->server->ops->open)
184 desired_access = cifs_convert_flags(f_flags);
186 /*********************************************************************
187 * open flag mapping table:
189 * POSIX Flag CIFS Disposition
190 * ---------- ----------------
191 * O_CREAT FILE_OPEN_IF
192 * O_CREAT | O_EXCL FILE_CREATE
193 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
194 * O_TRUNC FILE_OVERWRITE
195 * none of the above FILE_OPEN
197 * Note that there is not a direct match between disposition
198 * FILE_SUPERSEDE (ie create whether or not file exists although
199 * O_CREAT | O_TRUNC is similar but truncates the existing
200 * file rather than creating a new file as FILE_SUPERSEDE does
201 * (which uses the attributes / metadata passed in on open call)
203 *? O_SYNC is a reasonable match to CIFS writethrough flag
204 *? and the read write flags match reasonably. O_LARGEFILE
205 *? is irrelevant because largefile support is always used
206 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
207 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
208 *********************************************************************/
210 disposition = cifs_get_disposition(f_flags);
212 /* BB pass O_SYNC flag through on file attributes .. BB */
214 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
218 if (backup_cred(cifs_sb))
219 create_options |= CREATE_OPEN_BACKUP_INTENT;
221 rc = tcon->ses->server->ops->open(xid, tcon, full_path, disposition,
222 desired_access, create_options, fid,
223 oplock, buf, cifs_sb);
229 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
232 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
240 struct cifsFileInfo *
241 cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
242 struct tcon_link *tlink, __u32 oplock)
244 struct dentry *dentry = file->f_path.dentry;
245 struct inode *inode = dentry->d_inode;
246 struct cifsInodeInfo *cinode = CIFS_I(inode);
247 struct cifsFileInfo *cfile;
249 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
254 cfile->pid = current->tgid;
255 cfile->uid = current_fsuid();
256 cfile->dentry = dget(dentry);
257 cfile->f_flags = file->f_flags;
258 cfile->invalidHandle = false;
259 cfile->tlink = cifs_get_tlink(tlink);
260 mutex_init(&cfile->fh_mutex);
261 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
262 INIT_LIST_HEAD(&cfile->llist);
263 tlink_tcon(tlink)->ses->server->ops->set_fid(cfile, fid, oplock);
265 spin_lock(&cifs_file_list_lock);
266 list_add(&cfile->tlist, &(tlink_tcon(tlink)->openFileList));
267 /* if readable file instance put first in list*/
268 if (file->f_mode & FMODE_READ)
269 list_add(&cfile->flist, &cinode->openFileList);
271 list_add_tail(&cfile->flist, &cinode->openFileList);
272 spin_unlock(&cifs_file_list_lock);
274 file->private_data = cfile;
278 static void cifs_del_lock_waiters(struct cifsLockInfo *lock);
280 struct cifsFileInfo *
281 cifsFileInfo_get(struct cifsFileInfo *cifs_file)
283 spin_lock(&cifs_file_list_lock);
284 cifsFileInfo_get_locked(cifs_file);
285 spin_unlock(&cifs_file_list_lock);
290 * Release a reference on the file private data. This may involve closing
291 * the filehandle out on the server. Must be called without holding
292 * cifs_file_list_lock.
294 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
296 struct inode *inode = cifs_file->dentry->d_inode;
297 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
298 struct cifsInodeInfo *cifsi = CIFS_I(inode);
299 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
300 struct cifsLockInfo *li, *tmp;
302 spin_lock(&cifs_file_list_lock);
303 if (--cifs_file->count > 0) {
304 spin_unlock(&cifs_file_list_lock);
308 /* remove it from the lists */
309 list_del(&cifs_file->flist);
310 list_del(&cifs_file->tlist);
312 if (list_empty(&cifsi->openFileList)) {
313 cFYI(1, "closing last open instance for inode %p",
314 cifs_file->dentry->d_inode);
316 * In strict cache mode we need invalidate mapping on the last
317 * close because it may cause a error when we open this file
318 * again and get at least level II oplock.
320 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
321 CIFS_I(inode)->invalid_mapping = true;
322 cifs_set_oplock_level(cifsi, 0);
324 spin_unlock(&cifs_file_list_lock);
326 cancel_work_sync(&cifs_file->oplock_break);
328 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
329 struct TCP_Server_Info *server = tcon->ses->server;
334 if (server->ops->close)
335 rc = server->ops->close(xid, tcon, &cifs_file->fid);
339 /* Delete any outstanding lock records. We'll lose them when the file
342 mutex_lock(&cifsi->lock_mutex);
343 list_for_each_entry_safe(li, tmp, &cifs_file->llist, llist) {
344 list_del(&li->llist);
345 cifs_del_lock_waiters(li);
348 mutex_unlock(&cifsi->lock_mutex);
350 cifs_put_tlink(cifs_file->tlink);
351 dput(cifs_file->dentry);
355 int cifs_open(struct inode *inode, struct file *file)
360 struct cifs_sb_info *cifs_sb;
361 struct cifs_tcon *tcon;
362 struct tcon_link *tlink;
363 struct cifsFileInfo *cfile = NULL;
364 char *full_path = NULL;
365 bool posix_open_ok = false;
370 cifs_sb = CIFS_SB(inode->i_sb);
371 tlink = cifs_sb_tlink(cifs_sb);
374 return PTR_ERR(tlink);
376 tcon = tlink_tcon(tlink);
378 full_path = build_path_from_dentry(file->f_path.dentry);
379 if (full_path == NULL) {
384 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
385 inode, file->f_flags, full_path);
387 if (tcon->ses->server->oplocks)
392 if (!tcon->broken_posix_open && tcon->unix_ext &&
393 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
394 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
395 /* can not refresh inode info since size could be stale */
396 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
397 cifs_sb->mnt_file_mode /* ignored */,
398 file->f_flags, &oplock, &fid.netfid, xid);
400 cFYI(1, "posix open succeeded");
401 posix_open_ok = true;
402 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
403 if (tcon->ses->serverNOS)
404 cERROR(1, "server %s of type %s returned"
405 " unexpected error on SMB posix open"
406 ", disabling posix open support."
407 " Check if server update available.",
408 tcon->ses->serverName,
409 tcon->ses->serverNOS);
410 tcon->broken_posix_open = true;
411 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
412 (rc != -EOPNOTSUPP)) /* path not found or net err */
415 * Else fallthrough to retry open the old way on network i/o
420 if (!posix_open_ok) {
421 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
422 file->f_flags, &oplock, &fid, xid);
427 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
429 if (tcon->ses->server->ops->close)
430 tcon->ses->server->ops->close(xid, tcon, &fid);
435 cifs_fscache_set_inode_cookie(inode, file);
437 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
439 * Time to set mode which we can not set earlier due to
440 * problems creating new read-only files.
442 struct cifs_unix_set_info_args args = {
443 .mode = inode->i_mode,
446 .ctime = NO_CHANGE_64,
447 .atime = NO_CHANGE_64,
448 .mtime = NO_CHANGE_64,
451 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
458 cifs_put_tlink(tlink);
463 * Try to reacquire byte range locks that were released when session
466 static int cifs_relock_file(struct cifsFileInfo *cifsFile)
470 /* BB list all locks open on this file and relock */
476 cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
481 struct cifs_sb_info *cifs_sb;
482 struct cifs_tcon *tcon;
483 struct TCP_Server_Info *server;
484 struct cifsInodeInfo *cinode;
486 char *full_path = NULL;
488 int disposition = FILE_OPEN;
489 int create_options = CREATE_NOT_DIR;
493 mutex_lock(&cfile->fh_mutex);
494 if (!cfile->invalidHandle) {
495 mutex_unlock(&cfile->fh_mutex);
501 inode = cfile->dentry->d_inode;
502 cifs_sb = CIFS_SB(inode->i_sb);
503 tcon = tlink_tcon(cfile->tlink);
504 server = tcon->ses->server;
507 * Can not grab rename sem here because various ops, including those
508 * that already have the rename sem can end up causing writepage to get
509 * called and if the server was down that means we end up here, and we
510 * can never tell if the caller already has the rename_sem.
512 full_path = build_path_from_dentry(cfile->dentry);
513 if (full_path == NULL) {
515 mutex_unlock(&cfile->fh_mutex);
520 cFYI(1, "inode = 0x%p file flags 0x%x for %s", inode, cfile->f_flags,
523 if (tcon->ses->server->oplocks)
528 if (tcon->unix_ext && cap_unix(tcon->ses) &&
529 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
530 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
532 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
533 * original open. Must mask them off for a reopen.
535 unsigned int oflags = cfile->f_flags &
536 ~(O_CREAT | O_EXCL | O_TRUNC);
538 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
539 cifs_sb->mnt_file_mode /* ignored */,
540 oflags, &oplock, &fid.netfid, xid);
542 cFYI(1, "posix reopen succeeded");
546 * fallthrough to retry open the old way on errors, especially
547 * in the reconnect path it is important to retry hard
551 desired_access = cifs_convert_flags(cfile->f_flags);
553 if (backup_cred(cifs_sb))
554 create_options |= CREATE_OPEN_BACKUP_INTENT;
557 * Can not refresh inode by passing in file_info buf to be returned by
558 * CIFSSMBOpen and then calling get_inode_info with returned buf since
559 * file might have write behind data that needs to be flushed and server
560 * version of file size can be stale. If we knew for sure that inode was
561 * not dirty locally we could do this.
563 rc = server->ops->open(xid, tcon, full_path, disposition,
564 desired_access, create_options, &fid, &oplock,
567 mutex_unlock(&cfile->fh_mutex);
568 cFYI(1, "cifs_reopen returned 0x%x", rc);
569 cFYI(1, "oplock: %d", oplock);
570 goto reopen_error_exit;
574 cfile->invalidHandle = false;
575 mutex_unlock(&cfile->fh_mutex);
576 cinode = CIFS_I(inode);
579 rc = filemap_write_and_wait(inode->i_mapping);
580 mapping_set_error(inode->i_mapping, rc);
583 rc = cifs_get_inode_info_unix(&inode, full_path,
586 rc = cifs_get_inode_info(&inode, full_path, NULL,
587 inode->i_sb, xid, NULL);
590 * Else we are writing out data to server already and could deadlock if
591 * we tried to flush data, and since we do not know if we have data that
592 * would invalidate the current end of file on the server we can not go
593 * to the server to get the new inode info.
596 server->ops->set_fid(cfile, &fid, oplock);
597 cifs_relock_file(cfile);
605 int cifs_close(struct inode *inode, struct file *file)
607 if (file->private_data != NULL) {
608 cifsFileInfo_put(file->private_data);
609 file->private_data = NULL;
612 /* return code from the ->release op is always ignored */
616 int cifs_closedir(struct inode *inode, struct file *file)
620 struct cifsFileInfo *cfile = file->private_data;
623 cFYI(1, "Closedir inode = 0x%p", inode);
628 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
630 cFYI(1, "Freeing private data in close dir");
631 spin_lock(&cifs_file_list_lock);
632 if (!cfile->srch_inf.endOfSearch && !cfile->invalidHandle) {
633 cfile->invalidHandle = true;
634 spin_unlock(&cifs_file_list_lock);
635 rc = CIFSFindClose(xid, tcon, cfile->fid.netfid);
636 cFYI(1, "Closing uncompleted readdir with rc %d", rc);
637 /* not much we can do if it fails anyway, ignore rc */
640 spin_unlock(&cifs_file_list_lock);
641 tmp = cfile->srch_inf.ntwrk_buf_start;
643 cFYI(1, "closedir free smb buf in srch struct");
644 cfile->srch_inf.ntwrk_buf_start = NULL;
645 if (cfile->srch_inf.smallBuf)
646 cifs_small_buf_release(tmp);
648 cifs_buf_release(tmp);
650 cifs_put_tlink(cfile->tlink);
651 kfree(file->private_data);
652 file->private_data = NULL;
654 /* BB can we lock the filestruct while this is going on? */
659 static struct cifsLockInfo *
660 cifs_lock_init(__u64 offset, __u64 length, __u8 type)
662 struct cifsLockInfo *lock =
663 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
666 lock->offset = offset;
667 lock->length = length;
669 lock->pid = current->tgid;
670 INIT_LIST_HEAD(&lock->blist);
671 init_waitqueue_head(&lock->block_q);
676 cifs_del_lock_waiters(struct cifsLockInfo *lock)
678 struct cifsLockInfo *li, *tmp;
679 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
680 list_del_init(&li->blist);
681 wake_up(&li->block_q);
686 cifs_find_fid_lock_conflict(struct cifsFileInfo *cfile, __u64 offset,
687 __u64 length, __u8 type, struct cifsFileInfo *cur,
688 struct cifsLockInfo **conf_lock)
690 struct cifsLockInfo *li;
691 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
693 list_for_each_entry(li, &cfile->llist, llist) {
694 if (offset + length <= li->offset ||
695 offset >= li->offset + li->length)
697 else if ((type & server->vals->shared_lock_type) &&
698 ((server->ops->compare_fids(cur, cfile) &&
699 current->tgid == li->pid) || type == li->type))
710 cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
711 __u8 type, struct cifsLockInfo **conf_lock)
714 struct cifsFileInfo *fid, *tmp;
715 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
717 spin_lock(&cifs_file_list_lock);
718 list_for_each_entry_safe(fid, tmp, &cinode->openFileList, flist) {
719 rc = cifs_find_fid_lock_conflict(fid, offset, length, type,
724 spin_unlock(&cifs_file_list_lock);
730 * Check if there is another lock that prevents us to set the lock (mandatory
731 * style). If such a lock exists, update the flock structure with its
732 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
733 * or leave it the same if we can't. Returns 0 if we don't need to request to
734 * the server or 1 otherwise.
737 cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
738 __u8 type, struct file_lock *flock)
741 struct cifsLockInfo *conf_lock;
742 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
743 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
746 mutex_lock(&cinode->lock_mutex);
748 exist = cifs_find_lock_conflict(cfile, offset, length, type,
751 flock->fl_start = conf_lock->offset;
752 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
753 flock->fl_pid = conf_lock->pid;
754 if (conf_lock->type & server->vals->shared_lock_type)
755 flock->fl_type = F_RDLCK;
757 flock->fl_type = F_WRLCK;
758 } else if (!cinode->can_cache_brlcks)
761 flock->fl_type = F_UNLCK;
763 mutex_unlock(&cinode->lock_mutex);
768 cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
770 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
771 mutex_lock(&cinode->lock_mutex);
772 list_add_tail(&lock->llist, &cfile->llist);
773 mutex_unlock(&cinode->lock_mutex);
777 * Set the byte-range lock (mandatory style). Returns:
778 * 1) 0, if we set the lock and don't need to request to the server;
779 * 2) 1, if no locks prevent us but we need to request to the server;
780 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
783 cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
786 struct cifsLockInfo *conf_lock;
787 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
793 mutex_lock(&cinode->lock_mutex);
795 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
796 lock->type, &conf_lock);
797 if (!exist && cinode->can_cache_brlcks) {
798 list_add_tail(&lock->llist, &cfile->llist);
799 mutex_unlock(&cinode->lock_mutex);
808 list_add_tail(&lock->blist, &conf_lock->blist);
809 mutex_unlock(&cinode->lock_mutex);
810 rc = wait_event_interruptible(lock->block_q,
811 (lock->blist.prev == &lock->blist) &&
812 (lock->blist.next == &lock->blist));
815 mutex_lock(&cinode->lock_mutex);
816 list_del_init(&lock->blist);
819 mutex_unlock(&cinode->lock_mutex);
824 * Check if there is another lock that prevents us to set the lock (posix
825 * style). If such a lock exists, update the flock structure with its
826 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
827 * or leave it the same if we can't. Returns 0 if we don't need to request to
828 * the server or 1 otherwise.
831 cifs_posix_lock_test(struct file *file, struct file_lock *flock)
834 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
835 unsigned char saved_type = flock->fl_type;
837 if ((flock->fl_flags & FL_POSIX) == 0)
840 mutex_lock(&cinode->lock_mutex);
841 posix_test_lock(file, flock);
843 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
844 flock->fl_type = saved_type;
848 mutex_unlock(&cinode->lock_mutex);
853 * Set the byte-range lock (posix style). Returns:
854 * 1) 0, if we set the lock and don't need to request to the server;
855 * 2) 1, if we need to request to the server;
856 * 3) <0, if the error occurs while setting the lock.
859 cifs_posix_lock_set(struct file *file, struct file_lock *flock)
861 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
864 if ((flock->fl_flags & FL_POSIX) == 0)
868 mutex_lock(&cinode->lock_mutex);
869 if (!cinode->can_cache_brlcks) {
870 mutex_unlock(&cinode->lock_mutex);
874 rc = posix_lock_file(file, flock, NULL);
875 mutex_unlock(&cinode->lock_mutex);
876 if (rc == FILE_LOCK_DEFERRED) {
877 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
880 locks_delete_block(flock);
886 cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
889 int rc = 0, stored_rc;
890 struct cifsLockInfo *li, *tmp;
891 struct cifs_tcon *tcon;
892 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
893 unsigned int num, max_num, max_buf;
894 LOCKING_ANDX_RANGE *buf, *cur;
895 int types[] = {LOCKING_ANDX_LARGE_FILES,
896 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
900 tcon = tlink_tcon(cfile->tlink);
902 mutex_lock(&cinode->lock_mutex);
903 if (!cinode->can_cache_brlcks) {
904 mutex_unlock(&cinode->lock_mutex);
910 * Accessing maxBuf is racy with cifs_reconnect - need to store value
911 * and check it for zero before using.
913 max_buf = tcon->ses->server->maxBuf;
915 mutex_unlock(&cinode->lock_mutex);
920 max_num = (max_buf - sizeof(struct smb_hdr)) /
921 sizeof(LOCKING_ANDX_RANGE);
922 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
924 mutex_unlock(&cinode->lock_mutex);
929 for (i = 0; i < 2; i++) {
932 list_for_each_entry_safe(li, tmp, &cfile->llist, llist) {
933 if (li->type != types[i])
935 cur->Pid = cpu_to_le16(li->pid);
936 cur->LengthLow = cpu_to_le32((u32)li->length);
937 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
938 cur->OffsetLow = cpu_to_le32((u32)li->offset);
939 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
940 if (++num == max_num) {
941 stored_rc = cifs_lockv(xid, tcon,
943 (__u8)li->type, 0, num,
954 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
955 (__u8)types[i], 0, num, buf);
961 cinode->can_cache_brlcks = false;
962 mutex_unlock(&cinode->lock_mutex);
969 /* copied from fs/locks.c with a name change */
970 #define cifs_for_each_lock(inode, lockp) \
971 for (lockp = &inode->i_flock; *lockp != NULL; \
972 lockp = &(*lockp)->fl_next)
974 struct lock_to_push {
975 struct list_head llist;
984 cifs_push_posix_locks(struct cifsFileInfo *cfile)
986 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
987 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
988 struct file_lock *flock, **before;
989 unsigned int count = 0, i = 0;
990 int rc = 0, xid, type;
991 struct list_head locks_to_send, *el;
992 struct lock_to_push *lck, *tmp;
997 mutex_lock(&cinode->lock_mutex);
998 if (!cinode->can_cache_brlcks) {
999 mutex_unlock(&cinode->lock_mutex);
1005 cifs_for_each_lock(cfile->dentry->d_inode, before) {
1006 if ((*before)->fl_flags & FL_POSIX)
1011 INIT_LIST_HEAD(&locks_to_send);
1014 * Allocating count locks is enough because no FL_POSIX locks can be
1015 * added to the list while we are holding cinode->lock_mutex that
1016 * protects locking operations of this inode.
1018 for (; i < count; i++) {
1019 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1024 list_add_tail(&lck->llist, &locks_to_send);
1027 el = locks_to_send.next;
1029 cifs_for_each_lock(cfile->dentry->d_inode, before) {
1031 if ((flock->fl_flags & FL_POSIX) == 0)
1033 if (el == &locks_to_send) {
1035 * The list ended. We don't have enough allocated
1036 * structures - something is really wrong.
1038 cERROR(1, "Can't push all brlocks!");
1041 length = 1 + flock->fl_end - flock->fl_start;
1042 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1046 lck = list_entry(el, struct lock_to_push, llist);
1047 lck->pid = flock->fl_pid;
1048 lck->netfid = cfile->fid.netfid;
1049 lck->length = length;
1051 lck->offset = flock->fl_start;
1056 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1059 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
1060 lck->offset, lck->length, NULL,
1064 list_del(&lck->llist);
1069 cinode->can_cache_brlcks = false;
1070 mutex_unlock(&cinode->lock_mutex);
1075 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1076 list_del(&lck->llist);
1083 cifs_push_locks(struct cifsFileInfo *cfile)
1085 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1086 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1088 if (cap_unix(tcon->ses) &&
1089 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1090 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1091 return cifs_push_posix_locks(cfile);
1093 return cifs_push_mandatory_locks(cfile);
1097 cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
1098 bool *wait_flag, struct TCP_Server_Info *server)
1100 if (flock->fl_flags & FL_POSIX)
1102 if (flock->fl_flags & FL_FLOCK)
1104 if (flock->fl_flags & FL_SLEEP) {
1105 cFYI(1, "Blocking lock");
1108 if (flock->fl_flags & FL_ACCESS)
1109 cFYI(1, "Process suspended by mandatory locking - "
1110 "not implemented yet");
1111 if (flock->fl_flags & FL_LEASE)
1112 cFYI(1, "Lease on file - not implemented yet");
1113 if (flock->fl_flags &
1114 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
1115 cFYI(1, "Unknown lock flags 0x%x", flock->fl_flags);
1117 *type = server->vals->large_lock_type;
1118 if (flock->fl_type == F_WRLCK) {
1119 cFYI(1, "F_WRLCK ");
1120 *type |= server->vals->exclusive_lock_type;
1122 } else if (flock->fl_type == F_UNLCK) {
1124 *type |= server->vals->unlock_lock_type;
1126 /* Check if unlock includes more than one lock range */
1127 } else if (flock->fl_type == F_RDLCK) {
1129 *type |= server->vals->shared_lock_type;
1131 } else if (flock->fl_type == F_EXLCK) {
1133 *type |= server->vals->exclusive_lock_type;
1135 } else if (flock->fl_type == F_SHLCK) {
1137 *type |= server->vals->shared_lock_type;
1140 cFYI(1, "Unknown type of lock");
1144 cifs_mandatory_lock(unsigned int xid, struct cifsFileInfo *cfile, __u64 offset,
1145 __u64 length, __u32 type, int lock, int unlock, bool wait)
1147 return CIFSSMBLock(xid, tlink_tcon(cfile->tlink), cfile->fid.netfid,
1148 current->tgid, length, offset, unlock, lock,
1149 (__u8)type, wait, 0);
1153 cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
1154 bool wait_flag, bool posix_lck, unsigned int xid)
1157 __u64 length = 1 + flock->fl_end - flock->fl_start;
1158 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1159 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1160 struct TCP_Server_Info *server = tcon->ses->server;
1161 __u16 netfid = cfile->fid.netfid;
1164 int posix_lock_type;
1166 rc = cifs_posix_lock_test(file, flock);
1170 if (type & server->vals->shared_lock_type)
1171 posix_lock_type = CIFS_RDLCK;
1173 posix_lock_type = CIFS_WRLCK;
1174 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
1175 flock->fl_start, length, flock,
1176 posix_lock_type, wait_flag);
1180 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
1184 /* BB we could chain these into one lock request BB */
1185 rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length, type,
1188 rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length,
1190 flock->fl_type = F_UNLCK;
1192 cERROR(1, "Error unlocking previously locked "
1193 "range %d during test of lock", rc);
1197 if (type & server->vals->shared_lock_type) {
1198 flock->fl_type = F_WRLCK;
1202 rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length,
1203 type | server->vals->shared_lock_type, 1, 0,
1206 rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length,
1207 type | server->vals->shared_lock_type,
1209 flock->fl_type = F_RDLCK;
1211 cERROR(1, "Error unlocking previously locked "
1212 "range %d during test of lock", rc);
1214 flock->fl_type = F_WRLCK;
1220 cifs_move_llist(struct list_head *source, struct list_head *dest)
1222 struct list_head *li, *tmp;
1223 list_for_each_safe(li, tmp, source)
1224 list_move(li, dest);
1228 cifs_free_llist(struct list_head *llist)
1230 struct cifsLockInfo *li, *tmp;
1231 list_for_each_entry_safe(li, tmp, llist, llist) {
1232 cifs_del_lock_waiters(li);
1233 list_del(&li->llist);
1239 cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1242 int rc = 0, stored_rc;
1243 int types[] = {LOCKING_ANDX_LARGE_FILES,
1244 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1246 unsigned int max_num, num, max_buf;
1247 LOCKING_ANDX_RANGE *buf, *cur;
1248 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1249 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1250 struct cifsLockInfo *li, *tmp;
1251 __u64 length = 1 + flock->fl_end - flock->fl_start;
1252 struct list_head tmp_llist;
1254 INIT_LIST_HEAD(&tmp_llist);
1257 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1258 * and check it for zero before using.
1260 max_buf = tcon->ses->server->maxBuf;
1264 max_num = (max_buf - sizeof(struct smb_hdr)) /
1265 sizeof(LOCKING_ANDX_RANGE);
1266 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1270 mutex_lock(&cinode->lock_mutex);
1271 for (i = 0; i < 2; i++) {
1274 list_for_each_entry_safe(li, tmp, &cfile->llist, llist) {
1275 if (flock->fl_start > li->offset ||
1276 (flock->fl_start + length) <
1277 (li->offset + li->length))
1279 if (current->tgid != li->pid)
1281 if (types[i] != li->type)
1283 if (cinode->can_cache_brlcks) {
1285 * We can cache brlock requests - simply remove
1286 * a lock from the file's list.
1288 list_del(&li->llist);
1289 cifs_del_lock_waiters(li);
1293 cur->Pid = cpu_to_le16(li->pid);
1294 cur->LengthLow = cpu_to_le32((u32)li->length);
1295 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1296 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1297 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1299 * We need to save a lock here to let us add it again to
1300 * the file's list if the unlock range request fails on
1303 list_move(&li->llist, &tmp_llist);
1304 if (++num == max_num) {
1305 stored_rc = cifs_lockv(xid, tcon,
1307 li->type, num, 0, buf);
1310 * We failed on the unlock range
1311 * request - add all locks from the tmp
1312 * list to the head of the file's list.
1314 cifs_move_llist(&tmp_llist,
1319 * The unlock range request succeed -
1320 * free the tmp list.
1322 cifs_free_llist(&tmp_llist);
1329 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
1330 types[i], num, 0, buf);
1332 cifs_move_llist(&tmp_llist, &cfile->llist);
1335 cifs_free_llist(&tmp_llist);
1339 mutex_unlock(&cinode->lock_mutex);
1345 cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
1346 bool wait_flag, bool posix_lck, int lock, int unlock,
1350 __u64 length = 1 + flock->fl_end - flock->fl_start;
1351 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1352 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1353 struct TCP_Server_Info *server = tcon->ses->server;
1354 __u16 netfid = cfile->fid.netfid;
1357 int posix_lock_type;
1359 rc = cifs_posix_lock_set(file, flock);
1363 if (type & server->vals->shared_lock_type)
1364 posix_lock_type = CIFS_RDLCK;
1366 posix_lock_type = CIFS_WRLCK;
1369 posix_lock_type = CIFS_UNLCK;
1371 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
1372 flock->fl_start, length, NULL,
1373 posix_lock_type, wait_flag);
1378 struct cifsLockInfo *lock;
1380 lock = cifs_lock_init(flock->fl_start, length, type);
1384 rc = cifs_lock_add_if(cfile, lock, wait_flag);
1390 rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length,
1391 type, 1, 0, wait_flag);
1397 cifs_lock_add(cfile, lock);
1399 rc = cifs_unlock_range(cfile, flock, xid);
1402 if (flock->fl_flags & FL_POSIX)
1403 posix_lock_file_wait(file, flock);
1407 int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1410 int lock = 0, unlock = 0;
1411 bool wait_flag = false;
1412 bool posix_lck = false;
1413 struct cifs_sb_info *cifs_sb;
1414 struct cifs_tcon *tcon;
1415 struct cifsInodeInfo *cinode;
1416 struct cifsFileInfo *cfile;
1423 cFYI(1, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld "
1424 "end: %lld", cmd, flock->fl_flags, flock->fl_type,
1425 flock->fl_start, flock->fl_end);
1427 cfile = (struct cifsFileInfo *)file->private_data;
1428 tcon = tlink_tcon(cfile->tlink);
1430 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1433 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1434 netfid = cfile->fid.netfid;
1435 cinode = CIFS_I(file->f_path.dentry->d_inode);
1437 if (cap_unix(tcon->ses) &&
1438 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1439 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1442 * BB add code here to normalize offset and length to account for
1443 * negative length which we can not accept over the wire.
1445 if (IS_GETLK(cmd)) {
1446 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
1451 if (!lock && !unlock) {
1453 * if no lock or unlock then nothing to do since we do not
1460 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1467 * update the file size (if needed) after a write. Should be called with
1468 * the inode->i_lock held
1471 cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1472 unsigned int bytes_written)
1474 loff_t end_of_write = offset + bytes_written;
1476 if (end_of_write > cifsi->server_eof)
1477 cifsi->server_eof = end_of_write;
1480 static ssize_t cifs_write(struct cifsFileInfo *open_file, __u32 pid,
1481 const char *write_data, size_t write_size,
1485 unsigned int bytes_written = 0;
1486 unsigned int total_written;
1487 struct cifs_sb_info *cifs_sb;
1488 struct cifs_tcon *pTcon;
1490 struct dentry *dentry = open_file->dentry;
1491 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
1492 struct cifs_io_parms io_parms;
1494 cifs_sb = CIFS_SB(dentry->d_sb);
1496 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
1497 *poffset, dentry->d_name.name);
1499 pTcon = tlink_tcon(open_file->tlink);
1503 for (total_written = 0; write_size > total_written;
1504 total_written += bytes_written) {
1506 while (rc == -EAGAIN) {
1510 if (open_file->invalidHandle) {
1511 /* we could deadlock if we called
1512 filemap_fdatawait from here so tell
1513 reopen_file not to flush data to
1515 rc = cifs_reopen_file(open_file, false);
1520 len = min((size_t)cifs_sb->wsize,
1521 write_size - total_written);
1522 /* iov[0] is reserved for smb header */
1523 iov[1].iov_base = (char *)write_data + total_written;
1524 iov[1].iov_len = len;
1525 io_parms.netfid = open_file->fid.netfid;
1527 io_parms.tcon = pTcon;
1528 io_parms.offset = *poffset;
1529 io_parms.length = len;
1530 rc = CIFSSMBWrite2(xid, &io_parms, &bytes_written, iov,
1533 if (rc || (bytes_written == 0)) {
1541 spin_lock(&dentry->d_inode->i_lock);
1542 cifs_update_eof(cifsi, *poffset, bytes_written);
1543 spin_unlock(&dentry->d_inode->i_lock);
1544 *poffset += bytes_written;
1548 cifs_stats_bytes_written(pTcon, total_written);
1550 if (total_written > 0) {
1551 spin_lock(&dentry->d_inode->i_lock);
1552 if (*poffset > dentry->d_inode->i_size)
1553 i_size_write(dentry->d_inode, *poffset);
1554 spin_unlock(&dentry->d_inode->i_lock);
1556 mark_inode_dirty_sync(dentry->d_inode);
1558 return total_written;
1561 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1564 struct cifsFileInfo *open_file = NULL;
1565 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1567 /* only filter by fsuid on multiuser mounts */
1568 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1571 spin_lock(&cifs_file_list_lock);
1572 /* we could simply get the first_list_entry since write-only entries
1573 are always at the end of the list but since the first entry might
1574 have a close pending, we go through the whole list */
1575 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1576 if (fsuid_only && open_file->uid != current_fsuid())
1578 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
1579 if (!open_file->invalidHandle) {
1580 /* found a good file */
1581 /* lock it so it will not be closed on us */
1582 cifsFileInfo_get_locked(open_file);
1583 spin_unlock(&cifs_file_list_lock);
1585 } /* else might as well continue, and look for
1586 another, or simply have the caller reopen it
1587 again rather than trying to fix this handle */
1588 } else /* write only file */
1589 break; /* write only files are last so must be done */
1591 spin_unlock(&cifs_file_list_lock);
1595 struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1598 struct cifsFileInfo *open_file, *inv_file = NULL;
1599 struct cifs_sb_info *cifs_sb;
1600 bool any_available = false;
1602 unsigned int refind = 0;
1604 /* Having a null inode here (because mapping->host was set to zero by
1605 the VFS or MM) should not happen but we had reports of on oops (due to
1606 it being zero) during stress testcases so we need to check for it */
1608 if (cifs_inode == NULL) {
1609 cERROR(1, "Null inode passed to cifs_writeable_file");
1614 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1616 /* only filter by fsuid on multiuser mounts */
1617 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1620 spin_lock(&cifs_file_list_lock);
1622 if (refind > MAX_REOPEN_ATT) {
1623 spin_unlock(&cifs_file_list_lock);
1626 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1627 if (!any_available && open_file->pid != current->tgid)
1629 if (fsuid_only && open_file->uid != current_fsuid())
1631 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
1632 if (!open_file->invalidHandle) {
1633 /* found a good writable file */
1634 cifsFileInfo_get_locked(open_file);
1635 spin_unlock(&cifs_file_list_lock);
1639 inv_file = open_file;
1643 /* couldn't find useable FH with same pid, try any available */
1644 if (!any_available) {
1645 any_available = true;
1646 goto refind_writable;
1650 any_available = false;
1651 cifsFileInfo_get_locked(inv_file);
1654 spin_unlock(&cifs_file_list_lock);
1657 rc = cifs_reopen_file(inv_file, false);
1661 spin_lock(&cifs_file_list_lock);
1662 list_move_tail(&inv_file->flist,
1663 &cifs_inode->openFileList);
1664 spin_unlock(&cifs_file_list_lock);
1665 cifsFileInfo_put(inv_file);
1666 spin_lock(&cifs_file_list_lock);
1668 goto refind_writable;
1675 static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1677 struct address_space *mapping = page->mapping;
1678 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1681 int bytes_written = 0;
1682 struct inode *inode;
1683 struct cifsFileInfo *open_file;
1685 if (!mapping || !mapping->host)
1688 inode = page->mapping->host;
1690 offset += (loff_t)from;
1691 write_data = kmap(page);
1694 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1699 /* racing with truncate? */
1700 if (offset > mapping->host->i_size) {
1702 return 0; /* don't care */
1705 /* check to make sure that we are not extending the file */
1706 if (mapping->host->i_size - offset < (loff_t)to)
1707 to = (unsigned)(mapping->host->i_size - offset);
1709 open_file = find_writable_file(CIFS_I(mapping->host), false);
1711 bytes_written = cifs_write(open_file, open_file->pid,
1712 write_data, to - from, &offset);
1713 cifsFileInfo_put(open_file);
1714 /* Does mm or vfs already set times? */
1715 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
1716 if ((bytes_written > 0) && (offset))
1718 else if (bytes_written < 0)
1721 cFYI(1, "No writeable filehandles for inode");
1730 * Marshal up the iov array, reserving the first one for the header. Also,
1734 cifs_writepages_marshal_iov(struct kvec *iov, struct cifs_writedata *wdata)
1737 struct inode *inode = wdata->cfile->dentry->d_inode;
1738 loff_t size = i_size_read(inode);
1740 /* marshal up the pages into iov array */
1742 for (i = 0; i < wdata->nr_pages; i++) {
1743 iov[i + 1].iov_len = min(size - page_offset(wdata->pages[i]),
1744 (loff_t)PAGE_CACHE_SIZE);
1745 iov[i + 1].iov_base = kmap(wdata->pages[i]);
1746 wdata->bytes += iov[i + 1].iov_len;
1750 static int cifs_writepages(struct address_space *mapping,
1751 struct writeback_control *wbc)
1753 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1754 bool done = false, scanned = false, range_whole = false;
1756 struct cifs_writedata *wdata;
1757 struct TCP_Server_Info *server;
1762 * If wsize is smaller than the page cache size, default to writing
1763 * one page at a time via cifs_writepage
1765 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1766 return generic_writepages(mapping, wbc);
1768 if (wbc->range_cyclic) {
1769 index = mapping->writeback_index; /* Start from prev offset */
1772 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1773 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1774 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1779 while (!done && index <= end) {
1780 unsigned int i, nr_pages, found_pages;
1781 pgoff_t next = 0, tofind;
1782 struct page **pages;
1784 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1787 wdata = cifs_writedata_alloc((unsigned int)tofind,
1788 cifs_writev_complete);
1795 * find_get_pages_tag seems to return a max of 256 on each
1796 * iteration, so we must call it several times in order to
1797 * fill the array or the wsize is effectively limited to
1798 * 256 * PAGE_CACHE_SIZE.
1801 pages = wdata->pages;
1803 nr_pages = find_get_pages_tag(mapping, &index,
1804 PAGECACHE_TAG_DIRTY,
1806 found_pages += nr_pages;
1809 } while (nr_pages && tofind && index <= end);
1811 if (found_pages == 0) {
1812 kref_put(&wdata->refcount, cifs_writedata_release);
1817 for (i = 0; i < found_pages; i++) {
1818 page = wdata->pages[i];
1820 * At this point we hold neither mapping->tree_lock nor
1821 * lock on the page itself: the page may be truncated or
1822 * invalidated (changing page->mapping to NULL), or even
1823 * swizzled back from swapper_space to tmpfs file
1829 else if (!trylock_page(page))
1832 if (unlikely(page->mapping != mapping)) {
1837 if (!wbc->range_cyclic && page->index > end) {
1843 if (next && (page->index != next)) {
1844 /* Not next consecutive page */
1849 if (wbc->sync_mode != WB_SYNC_NONE)
1850 wait_on_page_writeback(page);
1852 if (PageWriteback(page) ||
1853 !clear_page_dirty_for_io(page)) {
1859 * This actually clears the dirty bit in the radix tree.
1860 * See cifs_writepage() for more commentary.
1862 set_page_writeback(page);
1864 if (page_offset(page) >= mapping->host->i_size) {
1867 end_page_writeback(page);
1871 wdata->pages[i] = page;
1872 next = page->index + 1;
1876 /* reset index to refind any pages skipped */
1878 index = wdata->pages[0]->index + 1;
1880 /* put any pages we aren't going to use */
1881 for (i = nr_pages; i < found_pages; i++) {
1882 page_cache_release(wdata->pages[i]);
1883 wdata->pages[i] = NULL;
1886 /* nothing to write? */
1887 if (nr_pages == 0) {
1888 kref_put(&wdata->refcount, cifs_writedata_release);
1892 wdata->sync_mode = wbc->sync_mode;
1893 wdata->nr_pages = nr_pages;
1894 wdata->offset = page_offset(wdata->pages[0]);
1895 wdata->marshal_iov = cifs_writepages_marshal_iov;
1898 if (wdata->cfile != NULL)
1899 cifsFileInfo_put(wdata->cfile);
1900 wdata->cfile = find_writable_file(CIFS_I(mapping->host),
1902 if (!wdata->cfile) {
1903 cERROR(1, "No writable handles for inode");
1907 wdata->pid = wdata->cfile->pid;
1908 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
1909 rc = server->ops->async_writev(wdata);
1910 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
1912 for (i = 0; i < nr_pages; ++i)
1913 unlock_page(wdata->pages[i]);
1915 /* send failure -- clean up the mess */
1917 for (i = 0; i < nr_pages; ++i) {
1919 redirty_page_for_writepage(wbc,
1922 SetPageError(wdata->pages[i]);
1923 end_page_writeback(wdata->pages[i]);
1924 page_cache_release(wdata->pages[i]);
1927 mapping_set_error(mapping, rc);
1929 kref_put(&wdata->refcount, cifs_writedata_release);
1931 wbc->nr_to_write -= nr_pages;
1932 if (wbc->nr_to_write <= 0)
1938 if (!scanned && !done) {
1940 * We hit the last page and there is more work to be done: wrap
1941 * back to the start of the file
1948 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1949 mapping->writeback_index = index;
1955 cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
1961 /* BB add check for wbc flags */
1962 page_cache_get(page);
1963 if (!PageUptodate(page))
1964 cFYI(1, "ppw - page not up to date");
1967 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1969 * A writepage() implementation always needs to do either this,
1970 * or re-dirty the page with "redirty_page_for_writepage()" in
1971 * the case of a failure.
1973 * Just unlocking the page will cause the radix tree tag-bits
1974 * to fail to update with the state of the page correctly.
1976 set_page_writeback(page);
1978 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
1979 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
1981 else if (rc == -EAGAIN)
1982 redirty_page_for_writepage(wbc, page);
1986 SetPageUptodate(page);
1987 end_page_writeback(page);
1988 page_cache_release(page);
1993 static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1995 int rc = cifs_writepage_locked(page, wbc);
2000 static int cifs_write_end(struct file *file, struct address_space *mapping,
2001 loff_t pos, unsigned len, unsigned copied,
2002 struct page *page, void *fsdata)
2005 struct inode *inode = mapping->host;
2006 struct cifsFileInfo *cfile = file->private_data;
2007 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2010 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2013 pid = current->tgid;
2015 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
2018 if (PageChecked(page)) {
2020 SetPageUptodate(page);
2021 ClearPageChecked(page);
2022 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
2023 SetPageUptodate(page);
2025 if (!PageUptodate(page)) {
2027 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
2031 /* this is probably better than directly calling
2032 partialpage_write since in this function the file handle is
2033 known which we might as well leverage */
2034 /* BB check if anything else missing out of ppw
2035 such as updating last write time */
2036 page_data = kmap(page);
2037 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
2038 /* if (rc < 0) should we set writebehind rc? */
2045 set_page_dirty(page);
2049 spin_lock(&inode->i_lock);
2050 if (pos > inode->i_size)
2051 i_size_write(inode, pos);
2052 spin_unlock(&inode->i_lock);
2056 page_cache_release(page);
2061 int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2066 struct cifs_tcon *tcon;
2067 struct TCP_Server_Info *server;
2068 struct cifsFileInfo *smbfile = file->private_data;
2069 struct inode *inode = file->f_path.dentry->d_inode;
2070 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2072 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2075 mutex_lock(&inode->i_mutex);
2079 cFYI(1, "Sync file - name: %s datasync: 0x%x",
2080 file->f_path.dentry->d_name.name, datasync);
2082 if (!CIFS_I(inode)->clientCanCacheRead) {
2083 rc = cifs_invalidate_mapping(inode);
2085 cFYI(1, "rc: %d during invalidate phase", rc);
2086 rc = 0; /* don't care about it in fsync */
2090 tcon = tlink_tcon(smbfile->tlink);
2091 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2092 server = tcon->ses->server;
2093 if (server->ops->flush)
2094 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2100 mutex_unlock(&inode->i_mutex);
2104 int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2108 struct cifs_tcon *tcon;
2109 struct TCP_Server_Info *server;
2110 struct cifsFileInfo *smbfile = file->private_data;
2111 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2112 struct inode *inode = file->f_mapping->host;
2114 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2117 mutex_lock(&inode->i_mutex);
2121 cFYI(1, "Sync file - name: %s datasync: 0x%x",
2122 file->f_path.dentry->d_name.name, datasync);
2124 tcon = tlink_tcon(smbfile->tlink);
2125 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2126 server = tcon->ses->server;
2127 if (server->ops->flush)
2128 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2134 mutex_unlock(&inode->i_mutex);
2139 * As file closes, flush all cached write data for this inode checking
2140 * for write behind errors.
2142 int cifs_flush(struct file *file, fl_owner_t id)
2144 struct inode *inode = file->f_path.dentry->d_inode;
2147 if (file->f_mode & FMODE_WRITE)
2148 rc = filemap_write_and_wait(inode->i_mapping);
2150 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
2156 cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2161 for (i = 0; i < num_pages; i++) {
2162 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2165 * save number of pages we have already allocated and
2166 * return with ENOMEM error
2175 for (i = 0; i < num_pages; i++)
2182 size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2187 clen = min_t(const size_t, len, wsize);
2188 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
2197 cifs_uncached_marshal_iov(struct kvec *iov, struct cifs_writedata *wdata)
2200 size_t bytes = wdata->bytes;
2202 /* marshal up the pages into iov array */
2203 for (i = 0; i < wdata->nr_pages; i++) {
2204 iov[i + 1].iov_len = min_t(size_t, bytes, PAGE_SIZE);
2205 iov[i + 1].iov_base = kmap(wdata->pages[i]);
2206 bytes -= iov[i + 1].iov_len;
2211 cifs_uncached_writev_complete(struct work_struct *work)
2214 struct cifs_writedata *wdata = container_of(work,
2215 struct cifs_writedata, work);
2216 struct inode *inode = wdata->cfile->dentry->d_inode;
2217 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2219 spin_lock(&inode->i_lock);
2220 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2221 if (cifsi->server_eof > inode->i_size)
2222 i_size_write(inode, cifsi->server_eof);
2223 spin_unlock(&inode->i_lock);
2225 complete(&wdata->done);
2227 if (wdata->result != -EAGAIN) {
2228 for (i = 0; i < wdata->nr_pages; i++)
2229 put_page(wdata->pages[i]);
2232 kref_put(&wdata->refcount, cifs_writedata_release);
2235 /* attempt to send write to server, retry on any -EAGAIN errors */
2237 cifs_uncached_retry_writev(struct cifs_writedata *wdata)
2240 struct TCP_Server_Info *server;
2242 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
2245 if (wdata->cfile->invalidHandle) {
2246 rc = cifs_reopen_file(wdata->cfile, false);
2250 rc = server->ops->async_writev(wdata);
2251 } while (rc == -EAGAIN);
2257 cifs_iovec_write(struct file *file, const struct iovec *iov,
2258 unsigned long nr_segs, loff_t *poffset)
2260 unsigned long nr_pages, i;
2261 size_t copied, len, cur_len;
2262 ssize_t total_written = 0;
2265 struct cifsFileInfo *open_file;
2266 struct cifs_tcon *tcon;
2267 struct cifs_sb_info *cifs_sb;
2268 struct cifs_writedata *wdata, *tmp;
2269 struct list_head wdata_list;
2273 len = iov_length(iov, nr_segs);
2277 rc = generic_write_checks(file, poffset, &len, 0);
2281 INIT_LIST_HEAD(&wdata_list);
2282 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2283 open_file = file->private_data;
2284 tcon = tlink_tcon(open_file->tlink);
2286 if (!tcon->ses->server->ops->async_writev)
2291 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2292 pid = open_file->pid;
2294 pid = current->tgid;
2296 iov_iter_init(&it, iov, nr_segs, len, 0);
2300 nr_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
2301 wdata = cifs_writedata_alloc(nr_pages,
2302 cifs_uncached_writev_complete);
2308 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2315 for (i = 0; i < nr_pages; i++) {
2316 copied = min_t(const size_t, cur_len, PAGE_SIZE);
2317 copied = iov_iter_copy_from_user(wdata->pages[i], &it,
2320 iov_iter_advance(&it, copied);
2322 cur_len = save_len - cur_len;
2324 wdata->sync_mode = WB_SYNC_ALL;
2325 wdata->nr_pages = nr_pages;
2326 wdata->offset = (__u64)offset;
2327 wdata->cfile = cifsFileInfo_get(open_file);
2329 wdata->bytes = cur_len;
2330 wdata->marshal_iov = cifs_uncached_marshal_iov;
2331 rc = cifs_uncached_retry_writev(wdata);
2333 kref_put(&wdata->refcount, cifs_writedata_release);
2337 list_add_tail(&wdata->list, &wdata_list);
2343 * If at least one write was successfully sent, then discard any rc
2344 * value from the later writes. If the other write succeeds, then
2345 * we'll end up returning whatever was written. If it fails, then
2346 * we'll get a new rc value from that.
2348 if (!list_empty(&wdata_list))
2352 * Wait for and collect replies for any successful sends in order of
2353 * increasing offset. Once an error is hit or we get a fatal signal
2354 * while waiting, then return without waiting for any more replies.
2357 list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2359 /* FIXME: freezable too? */
2360 rc = wait_for_completion_killable(&wdata->done);
2363 else if (wdata->result)
2366 total_written += wdata->bytes;
2368 /* resend call if it's a retryable error */
2369 if (rc == -EAGAIN) {
2370 rc = cifs_uncached_retry_writev(wdata);
2374 list_del_init(&wdata->list);
2375 kref_put(&wdata->refcount, cifs_writedata_release);
2378 if (total_written > 0)
2379 *poffset += total_written;
2381 cifs_stats_bytes_written(tcon, total_written);
2382 return total_written ? total_written : (ssize_t)rc;
2385 ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
2386 unsigned long nr_segs, loff_t pos)
2389 struct inode *inode;
2391 inode = iocb->ki_filp->f_path.dentry->d_inode;
2394 * BB - optimize the way when signing is disabled. We can drop this
2395 * extra memory-to-memory copying and use iovec buffers for constructing
2399 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
2401 CIFS_I(inode)->invalid_mapping = true;
2408 ssize_t cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
2409 unsigned long nr_segs, loff_t pos)
2411 struct inode *inode;
2413 inode = iocb->ki_filp->f_path.dentry->d_inode;
2415 if (CIFS_I(inode)->clientCanCacheAll)
2416 return generic_file_aio_write(iocb, iov, nr_segs, pos);
2419 * In strict cache mode we need to write the data to the server exactly
2420 * from the pos to pos+len-1 rather than flush all affected pages
2421 * because it may cause a error with mandatory locks on these pages but
2422 * not on the region from pos to ppos+len-1.
2425 return cifs_user_writev(iocb, iov, nr_segs, pos);
2428 static struct cifs_readdata *
2429 cifs_readdata_alloc(unsigned int nr_vecs, work_func_t complete)
2431 struct cifs_readdata *rdata;
2433 rdata = kzalloc(sizeof(*rdata) +
2434 sizeof(struct kvec) * nr_vecs, GFP_KERNEL);
2435 if (rdata != NULL) {
2436 kref_init(&rdata->refcount);
2437 INIT_LIST_HEAD(&rdata->list);
2438 init_completion(&rdata->done);
2439 INIT_WORK(&rdata->work, complete);
2440 INIT_LIST_HEAD(&rdata->pages);
2446 cifs_readdata_release(struct kref *refcount)
2448 struct cifs_readdata *rdata = container_of(refcount,
2449 struct cifs_readdata, refcount);
2452 cifsFileInfo_put(rdata->cfile);
2458 cifs_read_allocate_pages(struct list_head *list, unsigned int npages)
2461 struct page *page, *tpage;
2464 for (i = 0; i < npages; i++) {
2465 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2470 list_add(&page->lru, list);
2474 list_for_each_entry_safe(page, tpage, list, lru) {
2475 list_del(&page->lru);
2483 cifs_uncached_readdata_release(struct kref *refcount)
2485 struct page *page, *tpage;
2486 struct cifs_readdata *rdata = container_of(refcount,
2487 struct cifs_readdata, refcount);
2489 list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
2490 list_del(&page->lru);
2493 cifs_readdata_release(refcount);
2497 cifs_retry_async_readv(struct cifs_readdata *rdata)
2500 struct TCP_Server_Info *server;
2502 server = tlink_tcon(rdata->cfile->tlink)->ses->server;
2505 if (rdata->cfile->invalidHandle) {
2506 rc = cifs_reopen_file(rdata->cfile, true);
2510 rc = server->ops->async_readv(rdata);
2511 } while (rc == -EAGAIN);
2517 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2518 * @rdata: the readdata response with list of pages holding data
2519 * @iov: vector in which we should copy the data
2520 * @nr_segs: number of segments in vector
2521 * @offset: offset into file of the first iovec
2522 * @copied: used to return the amount of data copied to the iov
2524 * This function copies data from a list of pages in a readdata response into
2525 * an array of iovecs. It will first calculate where the data should go
2526 * based on the info in the readdata and then copy the data into that spot.
2529 cifs_readdata_to_iov(struct cifs_readdata *rdata, const struct iovec *iov,
2530 unsigned long nr_segs, loff_t offset, ssize_t *copied)
2534 size_t pos = rdata->offset - offset;
2535 struct page *page, *tpage;
2536 ssize_t remaining = rdata->bytes;
2537 unsigned char *pdata;
2539 /* set up iov_iter and advance to the correct offset */
2540 iov_iter_init(&ii, iov, nr_segs, iov_length(iov, nr_segs), 0);
2541 iov_iter_advance(&ii, pos);
2544 list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
2547 /* copy a whole page or whatever's left */
2548 copy = min_t(ssize_t, remaining, PAGE_SIZE);
2550 /* ...but limit it to whatever space is left in the iov */
2551 copy = min_t(ssize_t, copy, iov_iter_count(&ii));
2553 /* go while there's data to be copied and no errors */
2556 rc = memcpy_toiovecend(ii.iov, pdata, ii.iov_offset,
2562 iov_iter_advance(&ii, copy);
2566 list_del(&page->lru);
2574 cifs_uncached_readv_complete(struct work_struct *work)
2576 struct cifs_readdata *rdata = container_of(work,
2577 struct cifs_readdata, work);
2579 /* if the result is non-zero then the pages weren't kmapped */
2580 if (rdata->result == 0) {
2583 list_for_each_entry(page, &rdata->pages, lru)
2587 complete(&rdata->done);
2588 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2592 cifs_uncached_read_marshal_iov(struct cifs_readdata *rdata,
2593 unsigned int remaining)
2596 struct page *page, *tpage;
2599 list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
2600 if (remaining >= PAGE_SIZE) {
2601 /* enough data to fill the page */
2602 rdata->iov[rdata->nr_iov].iov_base = kmap(page);
2603 rdata->iov[rdata->nr_iov].iov_len = PAGE_SIZE;
2604 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
2605 rdata->nr_iov, page->index,
2606 rdata->iov[rdata->nr_iov].iov_base,
2607 rdata->iov[rdata->nr_iov].iov_len);
2610 remaining -= PAGE_SIZE;
2611 } else if (remaining > 0) {
2612 /* enough for partial page, fill and zero the rest */
2613 rdata->iov[rdata->nr_iov].iov_base = kmap(page);
2614 rdata->iov[rdata->nr_iov].iov_len = remaining;
2615 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
2616 rdata->nr_iov, page->index,
2617 rdata->iov[rdata->nr_iov].iov_base,
2618 rdata->iov[rdata->nr_iov].iov_len);
2619 memset(rdata->iov[rdata->nr_iov].iov_base + remaining,
2620 '\0', PAGE_SIZE - remaining);
2625 /* no need to hold page hostage */
2626 list_del(&page->lru);
2635 cifs_iovec_read(struct file *file, const struct iovec *iov,
2636 unsigned long nr_segs, loff_t *poffset)
2639 size_t len, cur_len;
2640 ssize_t total_read = 0;
2641 loff_t offset = *poffset;
2642 unsigned int npages;
2643 struct cifs_sb_info *cifs_sb;
2644 struct cifs_tcon *tcon;
2645 struct cifsFileInfo *open_file;
2646 struct cifs_readdata *rdata, *tmp;
2647 struct list_head rdata_list;
2653 len = iov_length(iov, nr_segs);
2657 INIT_LIST_HEAD(&rdata_list);
2658 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2659 open_file = file->private_data;
2660 tcon = tlink_tcon(open_file->tlink);
2662 if (!tcon->ses->server->ops->async_readv)
2665 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2666 pid = open_file->pid;
2668 pid = current->tgid;
2670 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
2671 cFYI(1, "attempting read on write only file instance");
2674 cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
2675 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
2677 /* allocate a readdata struct */
2678 rdata = cifs_readdata_alloc(npages,
2679 cifs_uncached_readv_complete);
2685 rc = cifs_read_allocate_pages(&rdata->pages, npages);
2689 rdata->cfile = cifsFileInfo_get(open_file);
2690 rdata->offset = offset;
2691 rdata->bytes = cur_len;
2693 rdata->marshal_iov = cifs_uncached_read_marshal_iov;
2695 rc = cifs_retry_async_readv(rdata);
2698 kref_put(&rdata->refcount,
2699 cifs_uncached_readdata_release);
2703 list_add_tail(&rdata->list, &rdata_list);
2708 /* if at least one read request send succeeded, then reset rc */
2709 if (!list_empty(&rdata_list))
2712 /* the loop below should proceed in the order of increasing offsets */
2714 list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
2718 /* FIXME: freezable sleep too? */
2719 rc = wait_for_completion_killable(&rdata->done);
2722 else if (rdata->result)
2725 rc = cifs_readdata_to_iov(rdata, iov,
2728 total_read += copied;
2731 /* resend call if it's a retryable error */
2732 if (rc == -EAGAIN) {
2733 rc = cifs_retry_async_readv(rdata);
2737 list_del_init(&rdata->list);
2738 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2741 cifs_stats_bytes_read(tcon, total_read);
2742 *poffset += total_read;
2744 /* mask nodata case */
2748 return total_read ? total_read : rc;
2751 ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
2752 unsigned long nr_segs, loff_t pos)
2756 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
2763 ssize_t cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
2764 unsigned long nr_segs, loff_t pos)
2766 struct inode *inode;
2768 inode = iocb->ki_filp->f_path.dentry->d_inode;
2770 if (CIFS_I(inode)->clientCanCacheRead)
2771 return generic_file_aio_read(iocb, iov, nr_segs, pos);
2774 * In strict cache mode we need to read from the server all the time
2775 * if we don't have level II oplock because the server can delay mtime
2776 * change - so we can't make a decision about inode invalidating.
2777 * And we can also fail with pagereading if there are mandatory locks
2778 * on pages affected by this read but not on the region from pos to
2782 return cifs_user_readv(iocb, iov, nr_segs, pos);
2785 static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
2789 unsigned int bytes_read = 0;
2790 unsigned int total_read;
2791 unsigned int current_read_size;
2793 struct cifs_sb_info *cifs_sb;
2794 struct cifs_tcon *tcon;
2796 char *current_offset;
2797 struct cifsFileInfo *open_file;
2798 struct cifs_io_parms io_parms;
2799 int buf_type = CIFS_NO_BUFFER;
2803 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2805 /* FIXME: set up handlers for larger reads and/or convert to async */
2806 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2808 if (file->private_data == NULL) {
2813 open_file = file->private_data;
2814 tcon = tlink_tcon(open_file->tlink);
2816 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2817 pid = open_file->pid;
2819 pid = current->tgid;
2821 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
2822 cFYI(1, "attempting read on write only file instance");
2824 for (total_read = 0, current_offset = read_data;
2825 read_size > total_read;
2826 total_read += bytes_read, current_offset += bytes_read) {
2827 current_read_size = min_t(uint, read_size - total_read, rsize);
2829 * For windows me and 9x we do not want to request more than it
2830 * negotiated since it will refuse the read then.
2832 if ((tcon->ses) && !(tcon->ses->capabilities &
2833 tcon->ses->server->vals->cap_large_files)) {
2834 current_read_size = min_t(uint, current_read_size,
2838 while (rc == -EAGAIN) {
2839 if (open_file->invalidHandle) {
2840 rc = cifs_reopen_file(open_file, true);
2844 io_parms.netfid = open_file->fid.netfid;
2846 io_parms.tcon = tcon;
2847 io_parms.offset = *poffset;
2848 io_parms.length = current_read_size;
2849 rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
2850 ¤t_offset, &buf_type);
2852 if (rc || (bytes_read == 0)) {
2860 cifs_stats_bytes_read(tcon, total_read);
2861 *poffset += bytes_read;
2869 * If the page is mmap'ed into a process' page tables, then we need to make
2870 * sure that it doesn't change while being written back.
2873 cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
2875 struct page *page = vmf->page;
2878 return VM_FAULT_LOCKED;
2881 static struct vm_operations_struct cifs_file_vm_ops = {
2882 .fault = filemap_fault,
2883 .page_mkwrite = cifs_page_mkwrite,
2886 int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
2889 struct inode *inode = file->f_path.dentry->d_inode;
2893 if (!CIFS_I(inode)->clientCanCacheRead) {
2894 rc = cifs_invalidate_mapping(inode);
2899 rc = generic_file_mmap(file, vma);
2901 vma->vm_ops = &cifs_file_vm_ops;
2906 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
2911 rc = cifs_revalidate_file(file);
2913 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
2917 rc = generic_file_mmap(file, vma);
2919 vma->vm_ops = &cifs_file_vm_ops;
2925 cifs_readv_complete(struct work_struct *work)
2927 struct cifs_readdata *rdata = container_of(work,
2928 struct cifs_readdata, work);
2929 struct page *page, *tpage;
2931 list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
2932 list_del(&page->lru);
2933 lru_cache_add_file(page);
2935 if (rdata->result == 0) {
2937 flush_dcache_page(page);
2938 SetPageUptodate(page);
2943 if (rdata->result == 0)
2944 cifs_readpage_to_fscache(rdata->mapping->host, page);
2946 page_cache_release(page);
2948 kref_put(&rdata->refcount, cifs_readdata_release);
2952 cifs_readpages_marshal_iov(struct cifs_readdata *rdata, unsigned int remaining)
2955 struct page *page, *tpage;
2959 /* determine the eof that the server (probably) has */
2960 eof = CIFS_I(rdata->mapping->host)->server_eof;
2961 eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
2962 cFYI(1, "eof=%llu eof_index=%lu", eof, eof_index);
2965 list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
2966 if (remaining >= PAGE_CACHE_SIZE) {
2967 /* enough data to fill the page */
2968 rdata->iov[rdata->nr_iov].iov_base = kmap(page);
2969 rdata->iov[rdata->nr_iov].iov_len = PAGE_CACHE_SIZE;
2970 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
2971 rdata->nr_iov, page->index,
2972 rdata->iov[rdata->nr_iov].iov_base,
2973 rdata->iov[rdata->nr_iov].iov_len);
2975 len += PAGE_CACHE_SIZE;
2976 remaining -= PAGE_CACHE_SIZE;
2977 } else if (remaining > 0) {
2978 /* enough for partial page, fill and zero the rest */
2979 rdata->iov[rdata->nr_iov].iov_base = kmap(page);
2980 rdata->iov[rdata->nr_iov].iov_len = remaining;
2981 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
2982 rdata->nr_iov, page->index,
2983 rdata->iov[rdata->nr_iov].iov_base,
2984 rdata->iov[rdata->nr_iov].iov_len);
2985 memset(rdata->iov[rdata->nr_iov].iov_base + remaining,
2986 '\0', PAGE_CACHE_SIZE - remaining);
2990 } else if (page->index > eof_index) {
2992 * The VFS will not try to do readahead past the
2993 * i_size, but it's possible that we have outstanding
2994 * writes with gaps in the middle and the i_size hasn't
2995 * caught up yet. Populate those with zeroed out pages
2996 * to prevent the VFS from repeatedly attempting to
2997 * fill them until the writes are flushed.
2999 zero_user(page, 0, PAGE_CACHE_SIZE);
3000 list_del(&page->lru);
3001 lru_cache_add_file(page);
3002 flush_dcache_page(page);
3003 SetPageUptodate(page);
3005 page_cache_release(page);
3007 /* no need to hold page hostage */
3008 list_del(&page->lru);
3009 lru_cache_add_file(page);
3011 page_cache_release(page);
3018 static int cifs_readpages(struct file *file, struct address_space *mapping,
3019 struct list_head *page_list, unsigned num_pages)
3022 struct list_head tmplist;
3023 struct cifsFileInfo *open_file = file->private_data;
3024 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
3025 unsigned int rsize = cifs_sb->rsize;
3029 * Give up immediately if rsize is too small to read an entire page.
3030 * The VFS will fall back to readpage. We should never reach this
3031 * point however since we set ra_pages to 0 when the rsize is smaller
3032 * than a cache page.
3034 if (unlikely(rsize < PAGE_CACHE_SIZE))
3038 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3039 * immediately if the cookie is negative
3041 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3046 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3047 pid = open_file->pid;
3049 pid = current->tgid;
3052 INIT_LIST_HEAD(&tmplist);
3054 cFYI(1, "%s: file=%p mapping=%p num_pages=%u", __func__, file,
3055 mapping, num_pages);
3058 * Start with the page at end of list and move it to private
3059 * list. Do the same with any following pages until we hit
3060 * the rsize limit, hit an index discontinuity, or run out of
3061 * pages. Issue the async read and then start the loop again
3062 * until the list is empty.
3064 * Note that list order is important. The page_list is in
3065 * the order of declining indexes. When we put the pages in
3066 * the rdata->pages, then we want them in increasing order.
3068 while (!list_empty(page_list)) {
3069 unsigned int bytes = PAGE_CACHE_SIZE;
3070 unsigned int expected_index;
3071 unsigned int nr_pages = 1;
3073 struct page *page, *tpage;
3074 struct cifs_readdata *rdata;
3076 page = list_entry(page_list->prev, struct page, lru);
3079 * Lock the page and put it in the cache. Since no one else
3080 * should have access to this page, we're safe to simply set
3081 * PG_locked without checking it first.
3083 __set_page_locked(page);
3084 rc = add_to_page_cache_locked(page, mapping,
3085 page->index, GFP_KERNEL);
3087 /* give up if we can't stick it in the cache */
3089 __clear_page_locked(page);
3093 /* move first page to the tmplist */
3094 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3095 list_move_tail(&page->lru, &tmplist);
3097 /* now try and add more pages onto the request */
3098 expected_index = page->index + 1;
3099 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
3100 /* discontinuity ? */
3101 if (page->index != expected_index)
3104 /* would this page push the read over the rsize? */
3105 if (bytes + PAGE_CACHE_SIZE > rsize)
3108 __set_page_locked(page);
3109 if (add_to_page_cache_locked(page, mapping,
3110 page->index, GFP_KERNEL)) {
3111 __clear_page_locked(page);
3114 list_move_tail(&page->lru, &tmplist);
3115 bytes += PAGE_CACHE_SIZE;
3120 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
3122 /* best to give up if we're out of mem */
3123 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3124 list_del(&page->lru);
3125 lru_cache_add_file(page);
3127 page_cache_release(page);
3133 rdata->cfile = cifsFileInfo_get(open_file);
3134 rdata->mapping = mapping;
3135 rdata->offset = offset;
3136 rdata->bytes = bytes;
3138 rdata->marshal_iov = cifs_readpages_marshal_iov;
3139 list_splice_init(&tmplist, &rdata->pages);
3141 rc = cifs_retry_async_readv(rdata);
3143 list_for_each_entry_safe(page, tpage, &rdata->pages,
3145 list_del(&page->lru);
3146 lru_cache_add_file(page);
3148 page_cache_release(page);
3150 kref_put(&rdata->refcount, cifs_readdata_release);
3154 kref_put(&rdata->refcount, cifs_readdata_release);
3160 static int cifs_readpage_worker(struct file *file, struct page *page,
3166 /* Is the page cached? */
3167 rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
3171 page_cache_get(page);
3172 read_data = kmap(page);
3173 /* for reads over a certain size could initiate async read ahead */
3175 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
3180 cFYI(1, "Bytes read %d", rc);
3182 file->f_path.dentry->d_inode->i_atime =
3183 current_fs_time(file->f_path.dentry->d_inode->i_sb);
3185 if (PAGE_CACHE_SIZE > rc)
3186 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
3188 flush_dcache_page(page);
3189 SetPageUptodate(page);
3191 /* send this page to the cache */
3192 cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
3198 page_cache_release(page);
3204 static int cifs_readpage(struct file *file, struct page *page)
3206 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3212 if (file->private_data == NULL) {
3218 cFYI(1, "readpage %p at offset %d 0x%x",
3219 page, (int)offset, (int)offset);
3221 rc = cifs_readpage_worker(file, page, &offset);
3229 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3231 struct cifsFileInfo *open_file;
3233 spin_lock(&cifs_file_list_lock);
3234 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
3235 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
3236 spin_unlock(&cifs_file_list_lock);
3240 spin_unlock(&cifs_file_list_lock);
3244 /* We do not want to update the file size from server for inodes
3245 open for write - to avoid races with writepage extending
3246 the file - in the future we could consider allowing
3247 refreshing the inode only on increases in the file size
3248 but this is tricky to do without racing with writebehind
3249 page caching in the current Linux kernel design */
3250 bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
3255 if (is_inode_writable(cifsInode)) {
3256 /* This inode is open for write at least once */
3257 struct cifs_sb_info *cifs_sb;
3259 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
3260 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
3261 /* since no page cache to corrupt on directio
3262 we can change size safely */
3266 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
3274 static int cifs_write_begin(struct file *file, struct address_space *mapping,
3275 loff_t pos, unsigned len, unsigned flags,
3276 struct page **pagep, void **fsdata)
3278 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
3279 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
3280 loff_t page_start = pos & PAGE_MASK;
3285 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
3287 page = grab_cache_page_write_begin(mapping, index, flags);
3293 if (PageUptodate(page))
3297 * If we write a full page it will be up to date, no need to read from
3298 * the server. If the write is short, we'll end up doing a sync write
3301 if (len == PAGE_CACHE_SIZE)
3305 * optimize away the read when we have an oplock, and we're not
3306 * expecting to use any of the data we'd be reading in. That
3307 * is, when the page lies beyond the EOF, or straddles the EOF
3308 * and the write will cover all of the existing data.
3310 if (CIFS_I(mapping->host)->clientCanCacheRead) {
3311 i_size = i_size_read(mapping->host);
3312 if (page_start >= i_size ||
3313 (offset == 0 && (pos + len) >= i_size)) {
3314 zero_user_segments(page, 0, offset,
3318 * PageChecked means that the parts of the page
3319 * to which we're not writing are considered up
3320 * to date. Once the data is copied to the
3321 * page, it can be set uptodate.
3323 SetPageChecked(page);
3328 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
3330 * might as well read a page, it is fast enough. If we get
3331 * an error, we don't need to return it. cifs_write_end will
3332 * do a sync write instead since PG_uptodate isn't set.
3334 cifs_readpage_worker(file, page, &page_start);
3336 /* we could try using another file handle if there is one -
3337 but how would we lock it to prevent close of that handle
3338 racing with this read? In any case
3339 this will be written out by write_end so is fine */
3346 static int cifs_release_page(struct page *page, gfp_t gfp)
3348 if (PagePrivate(page))
3351 return cifs_fscache_release_page(page, gfp);
3354 static void cifs_invalidate_page(struct page *page, unsigned long offset)
3356 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
3359 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
3362 static int cifs_launder_page(struct page *page)
3365 loff_t range_start = page_offset(page);
3366 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
3367 struct writeback_control wbc = {
3368 .sync_mode = WB_SYNC_ALL,
3370 .range_start = range_start,
3371 .range_end = range_end,
3374 cFYI(1, "Launder page: %p", page);
3376 if (clear_page_dirty_for_io(page))
3377 rc = cifs_writepage_locked(page, &wbc);
3379 cifs_fscache_invalidate_page(page, page->mapping->host);
3383 void cifs_oplock_break(struct work_struct *work)
3385 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3387 struct inode *inode = cfile->dentry->d_inode;
3388 struct cifsInodeInfo *cinode = CIFS_I(inode);
3391 if (inode && S_ISREG(inode->i_mode)) {
3392 if (cinode->clientCanCacheRead)
3393 break_lease(inode, O_RDONLY);
3395 break_lease(inode, O_WRONLY);
3396 rc = filemap_fdatawrite(inode->i_mapping);
3397 if (cinode->clientCanCacheRead == 0) {
3398 rc = filemap_fdatawait(inode->i_mapping);
3399 mapping_set_error(inode->i_mapping, rc);
3400 invalidate_remote_inode(inode);
3402 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
3405 rc = cifs_push_locks(cfile);
3407 cERROR(1, "Push locks rc = %d", rc);
3410 * releasing stale oplock after recent reconnect of smb session using
3411 * a now incorrect file handle is not a data integrity issue but do
3412 * not bother sending an oplock release if session to server still is
3413 * disconnected since oplock already released by the server
3415 if (!cfile->oplock_break_cancelled) {
3416 rc = CIFSSMBLock(0, tlink_tcon(cfile->tlink), cfile->fid.netfid,
3417 current->tgid, 0, 0, 0, 0,
3418 LOCKING_ANDX_OPLOCK_RELEASE, false,
3419 cinode->clientCanCacheRead ? 1 : 0);
3420 cFYI(1, "Oplock release rc = %d", rc);
3424 const struct address_space_operations cifs_addr_ops = {
3425 .readpage = cifs_readpage,
3426 .readpages = cifs_readpages,
3427 .writepage = cifs_writepage,
3428 .writepages = cifs_writepages,
3429 .write_begin = cifs_write_begin,
3430 .write_end = cifs_write_end,
3431 .set_page_dirty = __set_page_dirty_nobuffers,
3432 .releasepage = cifs_release_page,
3433 .invalidatepage = cifs_invalidate_page,
3434 .launder_page = cifs_launder_page,
3438 * cifs_readpages requires the server to support a buffer large enough to
3439 * contain the header plus one complete page of data. Otherwise, we need
3440 * to leave cifs_readpages out of the address space operations.
3442 const struct address_space_operations cifs_addr_ops_smallbuf = {
3443 .readpage = cifs_readpage,
3444 .writepage = cifs_writepage,
3445 .writepages = cifs_writepages,
3446 .write_begin = cifs_write_begin,
3447 .write_end = cifs_write_end,
3448 .set_page_dirty = __set_page_dirty_nobuffers,
3449 .releasepage = cifs_release_page,
3450 .invalidatepage = cifs_invalidate_page,
3451 .launder_page = cifs_launder_page,