return 1;
}
+static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
+{
+ struct request_queue *q = bdev_get_queue(dev->bdev);
+
+ return q && !q->limits.max_write_same_sectors;
+}
+
+static bool dm_table_supports_write_same(struct dm_table *t)
+{
+ struct dm_target *ti;
+ unsigned i = 0;
+
+ while (i < dm_table_get_num_targets(t)) {
+ ti = dm_table_get_target(t, i++);
+
+ if (!ti->num_write_same_requests)
+ return false;
+
+ if (!ti->type->iterate_devices ||
+ !ti->type->iterate_devices(ti, device_not_write_same_capable, NULL))
+ return false;
+ }
+
+ return true;
+}
+
void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
struct queue_limits *limits)
{
else
queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q);
- q->limits.max_write_same_sectors = 0;
+ if (!dm_table_supports_write_same(t))
+ q->limits.max_write_same_sectors = 0;
dm_table_set_integrity(t);
*/
unsigned num_discard_requests;
+ /*
+ * The number of WRITE SAME requests that will be submitted to the target.
+ */
+ unsigned num_write_same_requests;
+
/* target specific data */
void *private;