/*
* objects.c
*/
-struct netfs_io_request *netfs_alloc_request(const struct netfs_request_ops *ops,
+struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
+ struct file *file,
+ const struct netfs_request_ops *ops,
void *netfs_priv,
- struct file *file);
+ loff_t start, size_t len,
+ enum netfs_io_origin origin);
void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what);
void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async);
void netfs_put_request(struct netfs_io_request *rreq, bool was_async,
/*
* Allocate an I/O request and initialise it.
*/
-struct netfs_io_request *netfs_alloc_request(
- const struct netfs_request_ops *ops, void *netfs_priv,
- struct file *file)
+struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
+ struct file *file,
+ const struct netfs_request_ops *ops,
+ void *netfs_priv,
+ loff_t start, size_t len,
+ enum netfs_io_origin origin)
{
static atomic_t debug_ids;
struct netfs_io_request *rreq;
rreq = kzalloc(sizeof(struct netfs_io_request), GFP_KERNEL);
if (rreq) {
+ rreq->start = start;
+ rreq->len = len;
+ rreq->origin = origin;
rreq->netfs_ops = ops;
rreq->netfs_priv = netfs_priv;
+ rreq->mapping = mapping;
rreq->inode = file_inode(file);
rreq->i_size = i_size_read(rreq->inode);
rreq->debug_id = atomic_inc_return(&debug_ids);
if (readahead_count(ractl) == 0)
goto cleanup;
- rreq = netfs_alloc_request(ops, netfs_priv, ractl->file);
+ rreq = netfs_alloc_request(ractl->mapping, ractl->file,
+ ops, netfs_priv,
+ readahead_pos(ractl),
+ readahead_length(ractl),
+ NETFS_READAHEAD);
if (!rreq)
goto cleanup;
- rreq->mapping = ractl->mapping;
- rreq->start = readahead_pos(ractl);
- rreq->len = readahead_length(ractl);
if (ops->begin_cache_operation) {
ret = ops->begin_cache_operation(rreq);
_enter("%lx", folio_index(folio));
- rreq = netfs_alloc_request(ops, netfs_priv, file);
+ rreq = netfs_alloc_request(folio->mapping, file, ops, netfs_priv,
+ folio_file_pos(folio), folio_size(folio),
+ NETFS_READPAGE);
if (!rreq) {
if (netfs_priv)
ops->cleanup(folio_file_mapping(folio), netfs_priv);
folio_unlock(folio);
return -ENOMEM;
}
- rreq->mapping = folio_file_mapping(folio);
- rreq->start = folio_file_pos(folio);
- rreq->len = folio_size(folio);
if (ops->begin_cache_operation) {
ret = ops->begin_cache_operation(rreq);
}
ret = -ENOMEM;
- rreq = netfs_alloc_request(ops, netfs_priv, file);
+ rreq = netfs_alloc_request(mapping, file, ops, netfs_priv,
+ folio_file_pos(folio), folio_size(folio),
+ NETFS_READ_FOR_WRITE);
if (!rreq)
goto error;
- rreq->mapping = folio_file_mapping(folio);
- rreq->start = folio_file_pos(folio);
- rreq->len = folio_size(folio);
rreq->no_unlock_folio = folio_index(folio);
__set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags);
netfs_priv = NULL;
#define NETFS_SREQ_NO_PROGRESS 4 /* Set if we didn't manage to read any data */
};
+enum netfs_io_origin {
+ NETFS_READAHEAD, /* This read was triggered by readahead */
+ NETFS_READPAGE, /* This read is a synchronous read */
+ NETFS_READ_FOR_WRITE, /* This read is to prepare a write */
+} __mode(byte);
+
/*
* Descriptor for an I/O helper request. This is used to make multiple I/O
* operations to a variety of data stores and then stitch the result together.
size_t submitted; /* Amount submitted for I/O so far */
size_t len; /* Length of the request */
short error; /* 0 or error that occurred */
+ enum netfs_io_origin origin; /* Origin of the request */
loff_t i_size; /* Size of the file */
loff_t start; /* Start position */
pgoff_t no_unlock_folio; /* Don't unlock this folio after read */
EM(netfs_read_trace_readpage, "READPAGE ") \
E_(netfs_read_trace_write_begin, "WRITEBEGN")
+#define netfs_rreq_origins \
+ EM(NETFS_READAHEAD, "RA") \
+ EM(NETFS_READPAGE, "RP") \
+ E_(NETFS_READ_FOR_WRITE, "RW")
+
#define netfs_rreq_traces \
EM(netfs_rreq_trace_assess, "ASSESS ") \
EM(netfs_rreq_trace_copy, "COPY ") \
#define E_(a, b) TRACE_DEFINE_ENUM(a);
netfs_read_traces;
+netfs_rreq_origins;
netfs_rreq_traces;
netfs_sreq_sources;
netfs_sreq_traces;
TP_STRUCT__entry(
__field(unsigned int, rreq )
__field(unsigned int, flags )
+ __field(enum netfs_io_origin, origin )
__field(enum netfs_rreq_trace, what )
),
TP_fast_assign(
__entry->rreq = rreq->debug_id;
__entry->flags = rreq->flags;
+ __entry->origin = rreq->origin;
__entry->what = what;
),
- TP_printk("R=%08x %s f=%02x",
+ TP_printk("R=%08x %s %s f=%02x",
__entry->rreq,
+ __print_symbolic(__entry->origin, netfs_rreq_origins),
__print_symbolic(__entry->what, netfs_rreq_traces),
__entry->flags)
);