2 * utils - miscellaneous device utilities for cryptsetup
4 * Copyright (C) 2004, Christophe Saout <christophe@saout.de>
5 * Copyright (C) 2004-2007, Clemens Fruhwirth <clemens@endorphin.org>
6 * Copyright (C) 2009-2011, Red Hat, Inc. All rights reserved.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
29 #include <sys/types.h>
31 #include <sys/types.h>
33 #include <sys/ioctl.h>
36 #include <sys/resource.h>
38 #include "libcryptsetup.h"
41 static char *error=NULL;
43 __attribute__((format(printf, 1, 0)))
44 void set_error_va(const char *fmt, va_list va)
55 r = vasprintf(&error, fmt, va);
62 if (r && error[r - 1] == '\n')
66 __attribute__((format(printf, 1, 2)))
67 void set_error(const char *fmt, ...)
72 set_error_va(fmt, va);
76 const char *get_error(void)
81 static int get_alignment(int fd)
83 int alignment = DEFAULT_MEM_ALIGNMENT;
85 #ifdef _PC_REC_XFER_ALIGN
86 alignment = fpathconf(fd, _PC_REC_XFER_ALIGN);
88 alignment = DEFAULT_MEM_ALIGNMENT;
93 static void *aligned_malloc(void **base, int size, int alignment)
95 #ifdef HAVE_POSIX_MEMALIGN
96 return posix_memalign(base, alignment, size) ? NULL : *base;
98 /* Credits go to Michal's padlock patches for this alignment code */
101 ptr = malloc(size + alignment);
102 if(ptr == NULL) return NULL;
105 if(alignment > 1 && ((long)ptr & (alignment - 1))) {
106 ptr += alignment - ((long)(ptr) & (alignment - 1));
112 int device_read_ahead(const char *dev, uint32_t *read_ahead)
115 long read_ahead_long;
117 if ((fd = open(dev, O_RDONLY)) < 0)
120 r = ioctl(fd, BLKRAGET, &read_ahead_long) ? 0 : 1;
124 *read_ahead = (uint32_t) read_ahead_long;
129 static int sector_size(int fd)
132 if (ioctl(fd,BLKSSZGET, &bsize) < 0)
138 int sector_size_for_device(const char *device)
140 int fd = open(device, O_RDONLY);
149 ssize_t write_blockwise(int fd, void *orig_buf, size_t count)
151 void *hangover_buf, *hangover_buf_base = NULL;
152 void *buf, *buf_base = NULL;
153 int r, hangover, solid, bsize, alignment;
156 if ((bsize = sector_size(fd)) < 0)
159 hangover = count % bsize;
160 solid = count - hangover;
161 alignment = get_alignment(fd);
163 if ((long)orig_buf & (alignment - 1)) {
164 buf = aligned_malloc(&buf_base, count, alignment);
167 memcpy(buf, orig_buf, count);
171 r = write(fd, buf, solid);
172 if (r < 0 || r != solid)
176 hangover_buf = aligned_malloc(&hangover_buf_base, bsize, alignment);
180 r = read(fd, hangover_buf, bsize);
181 if (r < 0 || r != bsize)
184 r = lseek(fd, -bsize, SEEK_CUR);
187 memcpy(hangover_buf, (char*)buf + solid, hangover);
189 r = write(fd, hangover_buf, bsize);
190 if (r < 0 || r != bsize)
195 free(hangover_buf_base);
201 ssize_t read_blockwise(int fd, void *orig_buf, size_t count) {
202 void *hangover_buf, *hangover_buf_base = NULL;
203 void *buf, *buf_base = NULL;
204 int r, hangover, solid, bsize, alignment;
207 if ((bsize = sector_size(fd)) < 0)
210 hangover = count % bsize;
211 solid = count - hangover;
212 alignment = get_alignment(fd);
214 if ((long)orig_buf & (alignment - 1)) {
215 buf = aligned_malloc(&buf_base, count, alignment);
221 r = read(fd, buf, solid);
222 if(r < 0 || r != solid)
226 hangover_buf = aligned_malloc(&hangover_buf_base, bsize, alignment);
229 r = read(fd, hangover_buf, bsize);
230 if (r < 0 || r != bsize)
233 memcpy((char *)buf + solid, hangover_buf, hangover);
237 free(hangover_buf_base);
238 if (buf != orig_buf) {
239 memcpy(orig_buf, buf, count);
246 * Combines llseek with blockwise write. write_blockwise can already deal with short writes
247 * but we also need a function to deal with short writes at the start. But this information
248 * is implicitly included in the read/write offset, which can not be set to non-aligned
249 * boundaries. Hence, we combine llseek with write.
251 ssize_t write_lseek_blockwise(int fd, char *buf, size_t count, off_t offset) {
253 void *frontPadBuf_base = NULL;
254 int r, bsize, frontHang;
255 size_t innerCount = 0;
258 if ((bsize = sector_size(fd)) < 0)
261 frontHang = offset % bsize;
263 if (lseek(fd, offset - frontHang, SEEK_SET) < 0)
267 frontPadBuf = aligned_malloc(&frontPadBuf_base,
268 bsize, get_alignment(fd));
272 r = read(fd, frontPadBuf, bsize);
273 if (r < 0 || r != bsize)
276 innerCount = bsize - frontHang;
277 if (innerCount > count)
280 memcpy(frontPadBuf + frontHang, buf, innerCount);
282 if (lseek(fd, offset - frontHang, SEEK_SET) < 0)
285 r = write(fd, frontPadBuf, bsize);
286 if (r < 0 || r != bsize)
293 ret = count ? write_blockwise(fd, buf, count) : 0;
297 free(frontPadBuf_base);
302 int device_ready(struct crypt_device *cd, const char *device, int mode)
309 if(stat(device, &st) < 0) {
310 log_err(cd, _("Device %s doesn't exist or access denied.\n"), device);
314 if (!S_ISBLK(st.st_mode))
317 log_dbg("Trying to open and read device %s.", device);
318 devfd = open(device, mode | O_DIRECT | O_SYNC);
320 log_err(cd, _("Cannot open device %s for %s%s access.\n"), device,
321 (mode & O_EXCL) ? _("exclusive ") : "",
322 (mode & O_RDWR) ? _("writable") : _("read-only"));
326 /* Try to read first sector */
327 s = read_blockwise(devfd, buf, sizeof(buf));
328 if (s < 0 || s != sizeof(buf)) {
329 log_verbose(cd, _("Cannot read device %s.\n"), device);
333 memset(buf, 0, sizeof(buf));
339 int device_size(const char *device, uint64_t *size)
343 devfd = open(device, O_RDONLY);
347 if (ioctl(devfd, BLKGETSIZE64, size) < 0)
354 static int get_device_infos(const char *device, enum devcheck device_check,
355 int *readonly, uint64_t *size)
358 unsigned long size_small;
365 if (stat(device, &st) < 0)
368 /* never wipe header on mounted device */
369 if (device_check == DEV_EXCL && S_ISBLK(st.st_mode))
372 /* Try to open read-write to check whether it is a read-only device */
373 fd = open(device, O_RDWR | flags);
374 if (fd == -1 && errno == EROFS) {
376 fd = open(device, O_RDONLY | flags);
379 if (fd == -1 && device_check == DEV_EXCL && errno == EBUSY)
385 /* If the device can be opened read-write, i.e. readonly is still 0, then
386 * check whether BKROGET says that it is read-only. E.g. read-only loop
387 * devices may be openend read-write but are read-only according to BLKROGET
389 if (*readonly == 0 && (r = ioctl(fd, BLKROGET, readonly)) < 0)
392 if (ioctl(fd, BLKGETSIZE64, size) >= 0) {
393 *size >>= SECTOR_SHIFT;
398 if (ioctl(fd, BLKGETSIZE, &size_small) >= 0) {
399 *size = (uint64_t)size_small;
410 int device_check_and_adjust(struct crypt_device *cd,
412 enum devcheck device_check,
417 int r, real_readonly;
423 r = get_device_infos(device, device_check, &real_readonly, &real_size);
426 log_err(cd, _("Cannot use device %s which is in use "
427 "(already mapped or mounted).\n"),
430 log_err(cd, _("Cannot get info about device %s.\n"),
435 if (*offset >= real_size) {
436 log_err(cd, _("Requested offset is beyond real size of device %s.\n"),
444 log_err(cd, _("Device %s has zero size.\n"), device);
450 /* in case of size is set by parameter */
451 if ((real_size - *offset) < *size) {
452 log_dbg("Device %s: offset = %" PRIu64 " requested size = %" PRIu64
453 ", backing device size = %" PRIu64,
454 device, *offset, *size, real_size);
455 log_err(cd, _("Device %s is too small.\n"), device);
459 if (device_check == DEV_SHARED) {
460 log_dbg("Checking crypt segments for device %s.", device);
461 r = crypt_sysfs_check_crypt_segment(device, *offset, *size);
463 log_err(cd, _("Cannot use device %s (crypt segments "
464 "overlaps or in use by another device).\n"),
471 *flags |= CRYPT_ACTIVATE_READONLY;
473 log_dbg("Calculated device size is %" PRIu64 " sectors (%s), offset %" PRIu64 ".",
474 *size, real_readonly ? "RO" : "RW", *offset);
478 int wipe_device_header(const char *device, int sectors)
482 int size = sectors * SECTOR_SIZE;
485 int flags = O_RDWR | O_DIRECT | O_SYNC;
487 if (stat(device, &st) < 0)
490 /* never wipe header on mounted device */
491 if (S_ISBLK(st.st_mode))
494 devfd = open(device, flags);
496 return errno == EBUSY ? -EBUSY : -EINVAL;
498 buffer = malloc(size);
503 memset(buffer, 0, size);
505 r = write_blockwise(devfd, buffer, size) < size ? -EIO : 0;
514 #define DEFAULT_PROCESS_PRIORITY -18
516 static int _priority;
517 static int _memlock_count = 0;
519 // return 1 if memory is locked
520 int crypt_memlock_inc(struct crypt_device *ctx)
522 if (!_memlock_count++) {
523 log_dbg("Locking memory.");
524 if (mlockall(MCL_CURRENT | MCL_FUTURE) == -1) {
525 log_err(ctx, _("WARNING!!! Possibly insecure memory. Are you root?\n"));
530 if (((_priority = getpriority(PRIO_PROCESS, 0)) == -1) && errno)
531 log_err(ctx, _("Cannot get process priority.\n"));
533 if (setpriority(PRIO_PROCESS, 0, DEFAULT_PROCESS_PRIORITY))
534 log_err(ctx, _("setpriority %d failed: %s\n"),
535 DEFAULT_PROCESS_PRIORITY, strerror(errno));
537 return _memlock_count ? 1 : 0;
540 int crypt_memlock_dec(struct crypt_device *ctx)
542 if (_memlock_count && (!--_memlock_count)) {
543 log_dbg("Unlocking memory.");
544 if (munlockall() == -1)
545 log_err(ctx, _("Cannot unlock memory.\n"));
546 if (setpriority(PRIO_PROCESS, 0, _priority))
547 log_err(ctx, _("setpriority %d failed: %s\n"), _priority, strerror(errno));
549 return _memlock_count ? 1 : 0;
552 /* DEVICE TOPOLOGY */
554 /* block device topology ioctls, introduced in 2.6.32 */
556 #define BLKIOMIN _IO(0x12,120)
557 #define BLKIOOPT _IO(0x12,121)
558 #define BLKALIGNOFF _IO(0x12,122)
561 void get_topology_alignment(const char *device,
562 unsigned long *required_alignment, /* bytes */
563 unsigned long *alignment_offset, /* bytes */
564 unsigned long default_alignment)
566 int dev_alignment_offset = 0;
567 unsigned int min_io_size = 0, opt_io_size = 0;
568 unsigned long temp_alignment = 0;
571 *required_alignment = default_alignment;
572 *alignment_offset = 0;
574 fd = open(device, O_RDONLY);
578 /* minimum io size */
579 if (ioctl(fd, BLKIOMIN, &min_io_size) == -1) {
580 log_dbg("Topology info for %s not supported, using default offset %lu bytes.",
581 device, default_alignment);
585 /* optimal io size */
586 if (ioctl(fd, BLKIOOPT, &opt_io_size) == -1)
587 opt_io_size = min_io_size;
589 /* alignment offset, bogus -1 means misaligned/unknown */
590 if (ioctl(fd, BLKALIGNOFF, &dev_alignment_offset) == -1 || dev_alignment_offset < 0)
591 dev_alignment_offset = 0;
592 *alignment_offset = (unsigned long)dev_alignment_offset;
594 temp_alignment = (unsigned long)min_io_size;
596 if (temp_alignment < (unsigned long)opt_io_size)
597 temp_alignment = (unsigned long)opt_io_size;
599 /* If calculated alignment is multiple of default, keep default */
600 if (temp_alignment && (default_alignment % temp_alignment))
601 *required_alignment = temp_alignment;
603 log_dbg("Topology: IO (%u/%u), offset = %lu; Required alignment is %lu bytes.",
604 min_io_size, opt_io_size, *alignment_offset, *required_alignment);