return ti->len - target_offset;
}
-static sector_t max_io_len(struct dm_target *ti, sector_t sector)
+static sector_t __max_io_len(struct dm_target *ti, sector_t sector,
+ unsigned int max_granularity)
{
sector_t target_offset = dm_target_offset(ti, sector);
sector_t len = max_io_len_target_boundary(ti, target_offset);
* explains why stacked chunk_sectors based splitting via
* bio_split_to_limits() isn't possible here.
*/
- if (!ti->max_io_len)
+ if (!max_granularity)
return len;
return min_t(sector_t, len,
min(queue_max_sectors(ti->table->md->queue),
- blk_chunk_sectors_left(target_offset, ti->max_io_len)));
+ blk_chunk_sectors_left(target_offset, max_granularity)));
+}
+
+static inline sector_t max_io_len(struct dm_target *ti, sector_t sector)
+{
+ return __max_io_len(ti, sector, ti->max_io_len);
}
int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
}
static void __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti,
- unsigned int num_bios)
+ unsigned int num_bios,
+ unsigned int max_granularity)
{
unsigned int len, bios;
len = min_t(sector_t, ci->sector_count,
- max_io_len_target_boundary(ti, dm_target_offset(ti, ci->sector)));
+ __max_io_len(ti, ci->sector, max_granularity));
atomic_add(num_bios, &ci->io->io_count);
bios = __send_duplicate_bios(ci, ti, num_bios, &len);
struct dm_target *ti)
{
unsigned int num_bios = 0;
+ unsigned int max_granularity = 0;
switch (bio_op(ci->bio)) {
case REQ_OP_DISCARD:
num_bios = ti->num_discard_bios;
+ if (ti->max_discard_granularity) {
+ struct queue_limits *limits =
+ dm_get_queue_limits(ti->table->md);
+ max_granularity = limits->max_discard_sectors;
+ }
break;
case REQ_OP_SECURE_ERASE:
num_bios = ti->num_secure_erase_bios;
if (unlikely(!num_bios))
return BLK_STS_NOTSUPP;
- __send_changing_extent_only(ci, ti, num_bios);
+ __send_changing_extent_only(ci, ti, num_bios, max_granularity);
return BLK_STS_OK;
}