* checked when writes are issued, so warn if we see a page writeback
* operation.
*/
- if (WARN_ON_ONCE(zi->i_ztype == ZONEFS_ZTYPE_SEQ &&
- !(flags & IOMAP_DIRECT)))
+ if (WARN_ON_ONCE(zonefs_zone_is_seq(zi) && !(flags & IOMAP_DIRECT)))
return -EIO;
/*
{
struct zonefs_inode_info *zi = ZONEFS_I(inode);
- if (WARN_ON_ONCE(zi->i_ztype != ZONEFS_ZTYPE_CNV))
+ if (WARN_ON_ONCE(zonefs_zone_is_seq(zi)))
return -EIO;
if (WARN_ON_ONCE(offset >= i_size_read(inode)))
return -EIO;
struct file *swap_file, sector_t *span)
{
struct inode *inode = file_inode(swap_file);
- struct zonefs_inode_info *zi = ZONEFS_I(inode);
- if (zi->i_ztype != ZONEFS_ZTYPE_CNV) {
+ if (zonefs_inode_is_seq(inode)) {
zonefs_err(inode->i_sb,
"swap file: not a conventional zone file\n");
return -EINVAL;
* only down to a 0 size, which is equivalent to a zone reset, and to
* the maximum file size, which is equivalent to a zone finish.
*/
- if (zi->i_ztype != ZONEFS_ZTYPE_SEQ)
+ if (!zonefs_zone_is_seq(zi))
return -EPERM;
if (!isize)
* Since only direct writes are allowed in sequential files, page cache
* flush is needed only for conventional zone files.
*/
- if (ZONEFS_I(inode)->i_ztype == ZONEFS_ZTYPE_CNV)
+ if (zonefs_inode_is_cnv(inode))
ret = file_write_and_wait_range(file, start, end);
if (!ret)
ret = blkdev_issue_flush(inode->i_sb->s_bdev);
static vm_fault_t zonefs_filemap_page_mkwrite(struct vm_fault *vmf)
{
struct inode *inode = file_inode(vmf->vma->vm_file);
- struct zonefs_inode_info *zi = ZONEFS_I(inode);
vm_fault_t ret;
if (unlikely(IS_IMMUTABLE(inode)))
* Sanity check: only conventional zone files can have shared
* writeable mappings.
*/
- if (WARN_ON_ONCE(zi->i_ztype != ZONEFS_ZTYPE_CNV))
+ if (zonefs_inode_is_seq(inode))
return VM_FAULT_NOPAGE;
sb_start_pagefault(inode->i_sb);
* mappings are possible since there are no guarantees for write
* ordering between msync() and page cache writeback.
*/
- if (ZONEFS_I(file_inode(file))->i_ztype == ZONEFS_ZTYPE_SEQ &&
+ if (zonefs_inode_is_seq(file_inode(file)) &&
(vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
return -EINVAL;
return error;
}
- if (size && zi->i_ztype != ZONEFS_ZTYPE_CNV) {
+ if (size && zonefs_zone_is_seq(zi)) {
/*
* Note that we may be seeing completions out of order,
* but that is not a problem since a write completed
return -EINVAL;
if (iocb->ki_flags & IOCB_APPEND) {
- if (zi->i_ztype != ZONEFS_ZTYPE_SEQ)
+ if (zonefs_zone_is_cnv(zi))
return -EINVAL;
mutex_lock(&zi->i_truncate_mutex);
iocb->ki_pos = zi->i_wpoffset;
* as this can cause write reordering (e.g. the first aio gets EAGAIN
* on the inode lock but the second goes through but is now unaligned).
*/
- if (zi->i_ztype == ZONEFS_ZTYPE_SEQ && !sync &&
- (iocb->ki_flags & IOCB_NOWAIT))
+ if (zonefs_zone_is_seq(zi) && !sync && (iocb->ki_flags & IOCB_NOWAIT))
return -EOPNOTSUPP;
if (iocb->ki_flags & IOCB_NOWAIT) {
}
/* Enforce sequential writes (append only) in sequential zones */
- if (zi->i_ztype == ZONEFS_ZTYPE_SEQ) {
+ if (zonefs_zone_is_seq(zi)) {
mutex_lock(&zi->i_truncate_mutex);
if (iocb->ki_pos != zi->i_wpoffset) {
mutex_unlock(&zi->i_truncate_mutex);
else
ret = iomap_dio_rw(iocb, from, &zonefs_write_iomap_ops,
&zonefs_write_dio_ops, 0, NULL, 0);
- if (zi->i_ztype == ZONEFS_ZTYPE_SEQ &&
+ if (zonefs_zone_is_seq(zi) &&
(ret > 0 || ret == -EIOCBQUEUED)) {
if (ret > 0)
count = ret;
struct iov_iter *from)
{
struct inode *inode = file_inode(iocb->ki_filp);
- struct zonefs_inode_info *zi = ZONEFS_I(inode);
ssize_t ret;
/*
* Direct IO writes are mandatory for sequential zone files so that the
* write IO issuing order is preserved.
*/
- if (zi->i_ztype != ZONEFS_ZTYPE_CNV)
+ if (zonefs_inode_is_seq(inode))
return -EIO;
if (iocb->ki_flags & IOCB_NOWAIT) {
static inline bool zonefs_seq_file_need_wro(struct inode *inode,
struct file *file)
{
- struct zonefs_inode_info *zi = ZONEFS_I(inode);
-
- if (zi->i_ztype != ZONEFS_ZTYPE_SEQ)
+ if (zonefs_inode_is_cnv(inode))
return false;
if (!(file->f_mode & FMODE_WRITE))
lockdep_assert_held(&zi->i_truncate_mutex);
- if (zi->i_ztype != ZONEFS_ZTYPE_SEQ)
+ if (zonefs_zone_is_cnv(zi))
return;
/*
zonefs_warn(inode->i_sb, "inode %lu: read-only zone\n",
inode->i_ino);
zi->i_flags |= ZONEFS_ZONE_READONLY;
- if (zi->i_ztype == ZONEFS_ZTYPE_CNV)
+ if (zonefs_zone_is_cnv(zi))
return zi->i_max_size;
return zi->i_wpoffset;
case BLK_ZONE_COND_FULL:
/* The write pointer of full zones is invalid. */
return zi->i_max_size;
default:
- if (zi->i_ztype == ZONEFS_ZTYPE_CNV)
+ if (zonefs_zone_is_cnv(zi))
return zi->i_max_size;
return (zone->wp - zone->start) << SECTOR_SHIFT;
}
* In all cases, warn about inode size inconsistency and handle the
* IO error according to the zone condition and to the mount options.
*/
- if (zi->i_ztype == ZONEFS_ZTYPE_SEQ && isize != data_size)
+ if (zonefs_zone_is_seq(zi) && isize != data_size)
zonefs_warn(sb, "inode %lu: invalid size %lld (should be %lld)\n",
inode->i_ino, isize, data_size);
inode->i_ino = zone->start >> sbi->s_zone_sectors_shift;
inode->i_mode = S_IFREG | sbi->s_perm;
- zi->i_ztype = type;
+ if (type == ZONEFS_ZTYPE_CNV)
+ zi->i_flags |= ZONEFS_ZONE_CNV;
+
zi->i_zsector = zone->start;
zi->i_zone_size = zone->len << SECTOR_SHIFT;
if (zi->i_zone_size > bdev_zone_sectors(sb->s_bdev) << SECTOR_SHIFT &&
#define ZONEFS_ZONE_ACTIVE (1U << 2)
#define ZONEFS_ZONE_OFFLINE (1U << 3)
#define ZONEFS_ZONE_READONLY (1U << 4)
+#define ZONEFS_ZONE_CNV (1U << 31)
/*
* In-memory inode data.
struct zonefs_inode_info {
struct inode i_vnode;
- /* File zone type */
- enum zonefs_ztype i_ztype;
-
/* File zone start sector (512B unit) */
sector_t i_zsector;
return container_of(inode, struct zonefs_inode_info, i_vnode);
}
+static inline bool zonefs_zone_is_cnv(struct zonefs_inode_info *zi)
+{
+ return zi->i_flags & ZONEFS_ZONE_CNV;
+}
+
+static inline bool zonefs_zone_is_seq(struct zonefs_inode_info *zi)
+{
+ return !zonefs_zone_is_cnv(zi);
+}
+
+static inline bool zonefs_inode_is_cnv(struct inode *inode)
+{
+ return zonefs_zone_is_cnv(ZONEFS_I(inode));
+}
+
+static inline bool zonefs_inode_is_seq(struct inode *inode)
+{
+ return zonefs_zone_is_seq(ZONEFS_I(inode));
+}
+
/*
* On-disk super block (block 0).
*/