arm64: efi: Execute runtime services from a dedicated stack
[platform/kernel/linux-starfive.git] / fs / buffer.c
index 0a7ba84..d9c6d1f 100644 (file)
@@ -152,7 +152,7 @@ static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
 
 /*
  * Default synchronous end-of-IO handler..  Just mark it up-to-date and
- * unlock the buffer. This is what ll_rw_block uses too.
+ * unlock the buffer.
  */
 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
 {
@@ -491,8 +491,8 @@ int inode_has_buffers(struct inode *inode)
  * all already-submitted IO to complete, but does not queue any new
  * writes to the disk.
  *
- * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
- * you dirty the buffers, and then use osync_inode_buffers to wait for
+ * To do O_SYNC writes, just queue the buffer writes with write_dirty_buffer
+ * as you dirty the buffers, and then use osync_inode_buffers to wait for
  * completion.  Any other dirty buffers which are not yet queued for
  * write will not be flushed to disk by the osync.
  */
@@ -562,7 +562,7 @@ void write_boundary_block(struct block_device *bdev,
        struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
        if (bh) {
                if (buffer_dirty(bh))
-                       ll_rw_block(REQ_OP_WRITE, 1, &bh);
+                       write_dirty_buffer(bh, 0);
                put_bh(bh);
        }
 }
@@ -1342,23 +1342,12 @@ void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
 {
        struct buffer_head *bh = __getblk(bdev, block, size);
        if (likely(bh)) {
-               ll_rw_block(REQ_OP_READ | REQ_RAHEAD, 1, &bh);
+               bh_readahead(bh, REQ_RAHEAD);
                brelse(bh);
        }
 }
 EXPORT_SYMBOL(__breadahead);
 
-void __breadahead_gfp(struct block_device *bdev, sector_t block, unsigned size,
-                     gfp_t gfp)
-{
-       struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp);
-       if (likely(bh)) {
-               ll_rw_block(REQ_OP_READ | REQ_RAHEAD, 1, &bh);
-               brelse(bh);
-       }
-}
-EXPORT_SYMBOL(__breadahead_gfp);
-
 /**
  *  __bread_gfp() - reads a specified block and returns the bh
  *  @bdev: the block_device to read from
@@ -1464,19 +1453,15 @@ EXPORT_SYMBOL(set_bh_page);
 
 static void discard_buffer(struct buffer_head * bh)
 {
-       unsigned long b_state, b_state_old;
+       unsigned long b_state;
 
        lock_buffer(bh);
        clear_buffer_dirty(bh);
        bh->b_bdev = NULL;
-       b_state = bh->b_state;
-       for (;;) {
-               b_state_old = cmpxchg(&bh->b_state, b_state,
-                                     (b_state & ~BUFFER_FLAGS_DISCARD));
-               if (b_state_old == b_state)
-                       break;
-               b_state = b_state_old;
-       }
+       b_state = READ_ONCE(bh->b_state);
+       do {
+       } while (!try_cmpxchg(&bh->b_state, &b_state,
+                             b_state & ~BUFFER_FLAGS_DISCARD));
        unlock_buffer(bh);
 }
 
@@ -1817,7 +1802,7 @@ done:
                /*
                 * The page was marked dirty, but the buffers were
                 * clean.  Someone wrote them back by hand with
-                * ll_rw_block/submit_bh.  A rare case.
+                * write_dirty_buffer/submit_bh.  A rare case.
                 */
                end_page_writeback(page);
 
@@ -2033,7 +2018,7 @@ int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
                if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
                    !buffer_unwritten(bh) &&
                     (block_start < from || block_end > to)) {
-                       ll_rw_block(REQ_OP_READ, 1, &bh);
+                       bh_read_nowait(bh, 0);
                        *wait_bh++=bh;
                }
        }
@@ -2352,7 +2337,7 @@ int generic_cont_expand_simple(struct inode *inode, loff_t size)
        struct address_space *mapping = inode->i_mapping;
        const struct address_space_operations *aops = mapping->a_ops;
        struct page *page;
-       void *fsdata;
+       void *fsdata = NULL;
        int err;
 
        err = inode_newsize_ok(inode, size);
@@ -2378,7 +2363,7 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
        const struct address_space_operations *aops = mapping->a_ops;
        unsigned int blocksize = i_blocksize(inode);
        struct page *page;
-       void *fsdata;
+       void *fsdata = NULL;
        pgoff_t index, curidx;
        loff_t curpos;
        unsigned zerofrom, offset, len;
@@ -2593,11 +2578,9 @@ int block_truncate_page(struct address_space *mapping,
                set_buffer_uptodate(bh);
 
        if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
-               err = -EIO;
-               ll_rw_block(REQ_OP_READ, 1, &bh);
-               wait_on_buffer(bh);
+               err = bh_read(bh, 0);
                /* Uhhuh. Read error. Complain and punt. */
-               if (!buffer_uptodate(bh))
+               if (err < 0)
                        goto unlock;
        }
 
@@ -2725,61 +2708,6 @@ void submit_bh(blk_opf_t opf, struct buffer_head *bh)
 }
 EXPORT_SYMBOL(submit_bh);
 
-/**
- * ll_rw_block: low-level access to block devices (DEPRECATED)
- * @opf: block layer request operation and flags.
- * @nr: number of &struct buffer_heads in the array
- * @bhs: array of pointers to &struct buffer_head
- *
- * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
- * requests an I/O operation on them, either a %REQ_OP_READ or a %REQ_OP_WRITE.
- * @opf contains flags modifying the detailed I/O behavior, most notably
- * %REQ_RAHEAD.
- *
- * This function drops any buffer that it cannot get a lock on (with the
- * BH_Lock state bit), any buffer that appears to be clean when doing a write
- * request, and any buffer that appears to be up-to-date when doing read
- * request.  Further it marks as clean buffers that are processed for
- * writing (the buffer cache won't assume that they are actually clean
- * until the buffer gets unlocked).
- *
- * ll_rw_block sets b_end_io to simple completion handler that marks
- * the buffer up-to-date (if appropriate), unlocks the buffer and wakes
- * any waiters. 
- *
- * All of the buffers must be for the same device, and must also be a
- * multiple of the current approved size for the device.
- */
-void ll_rw_block(const blk_opf_t opf, int nr, struct buffer_head *bhs[])
-{
-       const enum req_op op = opf & REQ_OP_MASK;
-       int i;
-
-       for (i = 0; i < nr; i++) {
-               struct buffer_head *bh = bhs[i];
-
-               if (!trylock_buffer(bh))
-                       continue;
-               if (op == REQ_OP_WRITE) {
-                       if (test_clear_buffer_dirty(bh)) {
-                               bh->b_end_io = end_buffer_write_sync;
-                               get_bh(bh);
-                               submit_bh(opf, bh);
-                               continue;
-                       }
-               } else {
-                       if (!buffer_uptodate(bh)) {
-                               bh->b_end_io = end_buffer_read_sync;
-                               get_bh(bh);
-                               submit_bh(opf, bh);
-                               continue;
-                       }
-               }
-               unlock_buffer(bh);
-       }
-}
-EXPORT_SYMBOL(ll_rw_block);
-
 void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
 {
        lock_buffer(bh);
@@ -3026,29 +2954,69 @@ int bh_uptodate_or_lock(struct buffer_head *bh)
 EXPORT_SYMBOL(bh_uptodate_or_lock);
 
 /**
- * bh_submit_read - Submit a locked buffer for reading
+ * __bh_read - Submit read for a locked buffer
  * @bh: struct buffer_head
+ * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
+ * @wait: wait until reading finish
  *
- * Returns zero on success and -EIO on error.
+ * Returns zero on success or don't wait, and -EIO on error.
  */
-int bh_submit_read(struct buffer_head *bh)
+int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait)
 {
-       BUG_ON(!buffer_locked(bh));
+       int ret = 0;
 
-       if (buffer_uptodate(bh)) {
-               unlock_buffer(bh);
-               return 0;
-       }
+       BUG_ON(!buffer_locked(bh));
 
        get_bh(bh);
        bh->b_end_io = end_buffer_read_sync;
-       submit_bh(REQ_OP_READ, bh);
-       wait_on_buffer(bh);
-       if (buffer_uptodate(bh))
-               return 0;
-       return -EIO;
+       submit_bh(REQ_OP_READ | op_flags, bh);
+       if (wait) {
+               wait_on_buffer(bh);
+               if (!buffer_uptodate(bh))
+                       ret = -EIO;
+       }
+       return ret;
+}
+EXPORT_SYMBOL(__bh_read);
+
+/**
+ * __bh_read_batch - Submit read for a batch of unlocked buffers
+ * @nr: entry number of the buffer batch
+ * @bhs: a batch of struct buffer_head
+ * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
+ * @force_lock: force to get a lock on the buffer if set, otherwise drops any
+ *              buffer that cannot lock.
+ *
+ * Returns zero on success or don't wait, and -EIO on error.
+ */
+void __bh_read_batch(int nr, struct buffer_head *bhs[],
+                    blk_opf_t op_flags, bool force_lock)
+{
+       int i;
+
+       for (i = 0; i < nr; i++) {
+               struct buffer_head *bh = bhs[i];
+
+               if (buffer_uptodate(bh))
+                       continue;
+
+               if (force_lock)
+                       lock_buffer(bh);
+               else
+                       if (!trylock_buffer(bh))
+                               continue;
+
+               if (buffer_uptodate(bh)) {
+                       unlock_buffer(bh);
+                       continue;
+               }
+
+               bh->b_end_io = end_buffer_read_sync;
+               get_bh(bh);
+               submit_bh(REQ_OP_READ | op_flags, bh);
+       }
 }
-EXPORT_SYMBOL(bh_submit_read);
+EXPORT_SYMBOL(__bh_read_batch);
 
 void __init buffer_init(void)
 {