10 #include <sys/types.h>
12 #include <sys/ioctl.h>
15 #include <sys/resource.h>
17 #include "libcryptsetup.h"
20 static char *error=NULL;
22 void set_error_va(const char *fmt, va_list va)
33 r = vasprintf(&error, fmt, va);
40 if (r && error[r - 1] == '\n')
44 void set_error(const char *fmt, ...)
49 set_error_va(fmt, va);
53 const char *get_error(void)
58 static int get_alignment(int fd)
60 int alignment = DEFAULT_MEM_ALIGNMENT;
62 #ifdef _PC_REC_XFER_ALIGN
63 alignment = fpathconf(fd, _PC_REC_XFER_ALIGN);
65 alignment = DEFAULT_MEM_ALIGNMENT;
70 static void *aligned_malloc(void **base, int size, int alignment)
72 #ifdef HAVE_POSIX_MEMALIGN
73 return posix_memalign(base, alignment, size) ? NULL : *base;
75 /* Credits go to Michal's padlock patches for this alignment code */
78 ptr = malloc(size + alignment);
79 if(ptr == NULL) return NULL;
82 if(alignment > 1 && ((long)ptr & (alignment - 1))) {
83 ptr += alignment - ((long)(ptr) & (alignment - 1));
88 static int sector_size(int fd)
91 if (ioctl(fd,BLKSSZGET, &bsize) < 0)
97 int sector_size_for_device(const char *device)
99 int fd = open(device, O_RDONLY);
108 ssize_t write_blockwise(int fd, const void *orig_buf, size_t count)
110 void *hangover_buf, *hangover_buf_base = NULL;
111 void *buf, *buf_base = NULL;
112 int r, hangover, solid, bsize, alignment;
115 if ((bsize = sector_size(fd)) < 0)
118 hangover = count % bsize;
119 solid = count - hangover;
120 alignment = get_alignment(fd);
122 if ((long)orig_buf & (alignment - 1)) {
123 buf = aligned_malloc(&buf_base, count, alignment);
126 memcpy(buf, orig_buf, count);
128 buf = (void *)orig_buf;
130 r = write(fd, buf, solid);
131 if (r < 0 || r != solid)
135 hangover_buf = aligned_malloc(&hangover_buf_base, bsize, alignment);
139 r = read(fd, hangover_buf, bsize);
140 if(r < 0 || r != bsize) goto out;
142 r = lseek(fd, -bsize, SEEK_CUR);
145 memcpy(hangover_buf, buf + solid, hangover);
147 r = write(fd, hangover_buf, bsize);
148 if(r < 0 || r != bsize) goto out;
149 free(hangover_buf_base);
158 ssize_t read_blockwise(int fd, void *orig_buf, size_t count) {
159 void *hangover_buf, *hangover_buf_base;
160 void *buf, *buf_base = NULL;
161 int r, hangover, solid, bsize, alignment;
164 if ((bsize = sector_size(fd)) < 0)
167 hangover = count % bsize;
168 solid = count - hangover;
169 alignment = get_alignment(fd);
171 if ((long)orig_buf & (alignment - 1)) {
172 buf = aligned_malloc(&buf_base, count, alignment);
178 r = read(fd, buf, solid);
179 if(r < 0 || r != solid)
183 hangover_buf = aligned_malloc(&hangover_buf_base, bsize, alignment);
186 r = read(fd, hangover_buf, bsize);
187 if (r < 0 || r != bsize)
190 memcpy(buf + solid, hangover_buf, hangover);
191 free(hangover_buf_base);
195 if (buf != orig_buf) {
196 memcpy(orig_buf, buf, count);
203 * Combines llseek with blockwise write. write_blockwise can already deal with short writes
204 * but we also need a function to deal with short writes at the start. But this information
205 * is implicitly included in the read/write offset, which can not be set to non-aligned
206 * boundaries. Hence, we combine llseek with write.
209 ssize_t write_lseek_blockwise(int fd, const char *buf, size_t count, off_t offset) {
210 int bsize = sector_size(fd);
211 const char *orig_buf = buf;
212 char frontPadBuf[bsize];
213 int frontHang = offset % bsize;
215 int innerCount = count < bsize ? count : bsize;
220 lseek(fd, offset - frontHang, SEEK_SET);
222 r = read(fd,frontPadBuf,bsize);
225 memcpy(frontPadBuf+frontHang, buf, innerCount);
227 lseek(fd, offset - frontHang, SEEK_SET);
228 r = write(fd,frontPadBuf,bsize);
234 if(count <= 0) return buf - orig_buf;
236 return write_blockwise(fd, buf, count) + innerCount;
239 int device_ready(struct crypt_device *cd, const char *device, int mode)
246 if(stat(device, &st) < 0) {
247 log_err(cd, _("Device %s doesn't exist or access denied.\n"), device);
251 if (!S_ISBLK(st.st_mode))
254 log_dbg("Trying to open and read device %s.", device);
255 devfd = open(device, mode | O_DIRECT | O_SYNC);
257 log_err(cd, _("Cannot open device %s for %s%s access.\n"), device,
258 (mode & O_EXCL) ? _("exclusive ") : "",
259 (mode & O_RDWR) ? _("writable") : _("read-only"));
263 /* Try to read first sector */
264 s = read_blockwise(devfd, buf, sizeof(buf));
265 if (s < 0 || s != sizeof(buf)) {
266 log_verbose(cd, _("Cannot read device %s.\n"), device);
270 memset(buf, 0, sizeof(buf));
276 int get_device_infos(const char *device,
282 unsigned long size_small;
289 if (stat(device, &st) < 0)
292 /* never wipe header on mounted device */
293 if (open_exclusive && S_ISBLK(st.st_mode))
296 /* Try to open read-write to check whether it is a read-only device */
297 fd = open(device, O_RDWR | flags);
298 if (fd == -1 && errno == EROFS) {
300 fd = open(device, O_RDONLY | flags);
303 if (fd == -1 && open_exclusive && errno == EBUSY)
310 /* If the device can be opened read-write, i.e. readonly is still 0, then
311 * check whether BKROGET says that it is read-only. E.g. read-only loop
312 * devices may be openend read-write but are read-only according to BLKROGET
314 if (*readonly == 0 && (r = ioctl(fd, BLKROGET, readonly)) < 0)
317 #error BLKROGET not available
321 if (ioctl(fd, BLKGETSIZE64, size) >= 0) {
322 *size >>= SECTOR_SHIFT;
329 if (ioctl(fd, BLKGETSIZE, &size_small) >= 0) {
330 *size = (uint64_t)size_small;
336 # error Need at least the BLKGETSIZE ioctl!
344 int device_check_and_adjust(struct crypt_device *cd,
351 int r, real_readonly;
357 r = get_device_infos(device, open_exclusive, &real_readonly, &real_size);
360 log_err(cd, _("Cannot use device %s which is in use "
361 "(already mapped or mounted).\n"),
364 log_err(cd, _("Cannot get info about device %s.\n"),
372 log_err(cd, _("Device %s has zero size.\n"), device);
375 if (*size < *offset) {
376 log_err(cd, _("Device %s is too small.\n"), device);
385 log_dbg("Calculated device size is %" PRIu64 " sectors (%s), offset %" PRIu64 ".",
386 *size, *read_only ? "RO" : "RW", *offset);
390 int wipe_device_header(const char *device, int sectors)
394 int size = sectors * SECTOR_SIZE;
397 int flags = O_RDWR | O_DIRECT | O_SYNC;
399 if (stat(device, &st) < 0)
402 /* never wipe header on mounted device */
403 if (S_ISBLK(st.st_mode))
406 devfd = open(device, flags);
408 return errno == EBUSY ? -EBUSY : -EINVAL;
410 buffer = malloc(size);
415 memset(buffer, 0, size);
417 r = write_blockwise(devfd, buffer, size) < size ? -EIO : 0;
426 #define DEFAULT_PROCESS_PRIORITY -18
428 static int _priority;
429 static int _memlock_count = 0;
431 // return 1 if memory is locked
432 int crypt_memlock_inc(struct crypt_device *ctx)
434 if (!_memlock_count++) {
435 log_dbg("Locking memory.");
436 if (mlockall(MCL_CURRENT | MCL_FUTURE) == -1) {
437 log_err(ctx, _("WARNING!!! Possibly insecure memory. Are you root?\n"));
442 if (((_priority = getpriority(PRIO_PROCESS, 0)) == -1) && errno)
443 log_err(ctx, _("Cannot get process priority.\n"));
445 if (setpriority(PRIO_PROCESS, 0, DEFAULT_PROCESS_PRIORITY))
446 log_err(ctx, _("setpriority %d failed: %s\n"),
447 DEFAULT_PROCESS_PRIORITY, strerror(errno));
449 return _memlock_count ? 1 : 0;
452 int crypt_memlock_dec(struct crypt_device *ctx)
454 if (_memlock_count && (!--_memlock_count)) {
455 log_dbg("Unlocking memory.");
456 if (munlockall() == -1)
457 log_err(ctx, _("Cannot unlock memory.\n"));
458 if (setpriority(PRIO_PROCESS, 0, _priority))
459 log_err(ctx, _("setpriority %d failed: %s\n"), _priority, strerror(errno));
461 return _memlock_count ? 1 : 0;
464 /* DEVICE TOPOLOGY */
466 /* block device topology ioctls, introduced in 2.6.32 */
468 #define BLKIOMIN _IO(0x12,120)
469 #define BLKIOOPT _IO(0x12,121)
470 #define BLKALIGNOFF _IO(0x12,122)
473 void get_topology_alignment(const char *device,
474 unsigned long *required_alignment, /* bytes */
475 unsigned long *alignment_offset, /* bytes */
476 unsigned long default_alignment)
478 int dev_alignment_offset = 0;
479 unsigned int min_io_size = 0, opt_io_size = 0;
480 unsigned long temp_alignment = 0;
483 *required_alignment = default_alignment;
484 *alignment_offset = 0;
486 fd = open(device, O_RDONLY);
490 /* minimum io size */
491 if (ioctl(fd, BLKIOMIN, &min_io_size) == -1) {
492 log_dbg("Topology info for %s not supported, using default offset %lu bytes.",
493 device, default_alignment);
497 /* optimal io size */
498 if (ioctl(fd, BLKIOOPT, &opt_io_size) == -1)
499 opt_io_size = min_io_size;
501 /* alignment offset, bogus -1 means misaligned/unknown */
502 if (ioctl(fd, BLKALIGNOFF, &dev_alignment_offset) == -1 || dev_alignment_offset < 0)
503 dev_alignment_offset = 0;
504 *alignment_offset = (unsigned long)dev_alignment_offset;
506 temp_alignment = (unsigned long)min_io_size;
508 if (temp_alignment < (unsigned long)opt_io_size)
509 temp_alignment = (unsigned long)opt_io_size;
511 /* If calculated alignment is multiple of default, keep default */
512 if (temp_alignment && (default_alignment % temp_alignment))
513 *required_alignment = temp_alignment;
515 log_dbg("Topology: IO (%u/%u), offset = %lu; Required alignment is %lu bytes.",
516 min_io_size, opt_io_size, *alignment_offset, *required_alignment);