10 #include <sys/types.h>
12 #include <sys/ioctl.h>
15 #include <sys/resource.h>
17 #include "libcryptsetup.h"
20 static char *error=NULL;
22 void set_error_va(const char *fmt, va_list va)
33 r = vasprintf(&error, fmt, va);
40 if (r && error[r - 1] == '\n')
44 void set_error(const char *fmt, ...)
49 set_error_va(fmt, va);
53 const char *get_error(void)
58 static int get_alignment(int fd)
60 int alignment = DEFAULT_MEM_ALIGNMENT;
62 #ifdef _PC_REC_XFER_ALIGN
63 alignment = fpathconf(fd, _PC_REC_XFER_ALIGN);
65 alignment = DEFAULT_MEM_ALIGNMENT;
70 static void *aligned_malloc(void **base, int size, int alignment)
72 #ifdef HAVE_POSIX_MEMALIGN
73 return posix_memalign(base, alignment, size) ? NULL : *base;
75 /* Credits go to Michal's padlock patches for this alignment code */
78 ptr = malloc(size + alignment);
79 if(ptr == NULL) return NULL;
82 if(alignment > 1 && ((long)ptr & (alignment - 1))) {
83 ptr += alignment - ((long)(ptr) & (alignment - 1));
88 static int sector_size(int fd)
91 if (ioctl(fd,BLKSSZGET, &bsize) < 0)
97 int sector_size_for_device(const char *device)
99 int fd = open(device, O_RDONLY);
108 ssize_t write_blockwise(int fd, const void *orig_buf, size_t count)
110 void *hangover_buf, *hangover_buf_base = NULL;
111 void *buf, *buf_base = NULL;
112 int r, hangover, solid, bsize, alignment;
115 if ((bsize = sector_size(fd)) < 0)
118 hangover = count % bsize;
119 solid = count - hangover;
120 alignment = get_alignment(fd);
122 if ((long)orig_buf & (alignment - 1)) {
123 buf = aligned_malloc(&buf_base, count, alignment);
126 memcpy(buf, orig_buf, count);
128 buf = (void *)orig_buf;
130 r = write(fd, buf, solid);
131 if (r < 0 || r != solid)
135 hangover_buf = aligned_malloc(&hangover_buf_base, bsize, alignment);
139 r = read(fd, hangover_buf, bsize);
140 if(r < 0 || r != bsize) goto out;
142 r = lseek(fd, -bsize, SEEK_CUR);
145 memcpy(hangover_buf, buf + solid, hangover);
147 r = write(fd, hangover_buf, bsize);
148 if(r < 0 || r != bsize) goto out;
149 free(hangover_buf_base);
158 ssize_t read_blockwise(int fd, void *orig_buf, size_t count) {
159 void *hangover_buf, *hangover_buf_base;
160 void *buf, *buf_base = NULL;
161 int r, hangover, solid, bsize, alignment;
164 if ((bsize = sector_size(fd)) < 0)
167 hangover = count % bsize;
168 solid = count - hangover;
169 alignment = get_alignment(fd);
171 if ((long)orig_buf & (alignment - 1)) {
172 buf = aligned_malloc(&buf_base, count, alignment);
178 r = read(fd, buf, solid);
179 if(r < 0 || r != solid)
183 hangover_buf = aligned_malloc(&hangover_buf_base, bsize, alignment);
186 r = read(fd, hangover_buf, bsize);
187 if (r < 0 || r != bsize)
190 memcpy(buf + solid, hangover_buf, hangover);
191 free(hangover_buf_base);
195 if (buf != orig_buf) {
196 memcpy(orig_buf, buf, count);
203 * Combines llseek with blockwise write. write_blockwise can already deal with short writes
204 * but we also need a function to deal with short writes at the start. But this information
205 * is implicitly included in the read/write offset, which can not be set to non-aligned
206 * boundaries. Hence, we combine llseek with write.
209 ssize_t write_lseek_blockwise(int fd, const char *buf, size_t count, off_t offset) {
210 int bsize = sector_size(fd);
211 const char *orig_buf = buf;
212 char frontPadBuf[bsize];
213 int frontHang = offset % bsize;
215 int innerCount = count < bsize ? count : bsize;
220 lseek(fd, offset - frontHang, SEEK_SET);
222 r = read(fd,frontPadBuf,bsize);
225 memcpy(frontPadBuf+frontHang, buf, innerCount);
227 lseek(fd, offset - frontHang, SEEK_SET);
228 r = write(fd,frontPadBuf,bsize);
234 if(count <= 0) return buf - orig_buf;
236 return write_blockwise(fd, buf, count) + innerCount;
239 int device_ready(struct crypt_device *cd, const char *device, int mode)
246 if(stat(device, &st) < 0) {
247 log_err(cd, _("Device %s doesn't exist or access denied.\n"), device);
251 log_dbg("Trying to open and read device %s.", device);
252 devfd = open(device, mode | O_DIRECT | O_SYNC);
254 log_err(cd, _("Cannot open device %s for %s%s access.\n"), device,
255 (mode & O_EXCL) ? _("exclusive ") : "",
256 (mode & O_RDWR) ? _("writable") : _("read-only"));
260 /* Try to read first sector */
261 s = read_blockwise(devfd, buf, sizeof(buf));
262 if (s < 0 || s != sizeof(buf)) {
263 log_err(cd, _("Cannot read device %s.\n"), device);
267 memset(buf, 0, sizeof(buf));
273 int get_device_infos(const char *device,
279 unsigned long size_small;
286 if (stat(device, &st) < 0)
289 /* never wipe header on mounted device */
290 if (open_exclusive && S_ISBLK(st.st_mode))
293 /* Try to open read-write to check whether it is a read-only device */
294 fd = open(device, O_RDWR | flags);
295 if (fd == -1 && errno == EROFS) {
297 fd = open(device, O_RDONLY | flags);
300 if (fd == -1 && open_exclusive && errno == EBUSY)
307 /* If the device can be opened read-write, i.e. readonly is still 0, then
308 * check whether BKROGET says that it is read-only. E.g. read-only loop
309 * devices may be openend read-write but are read-only according to BLKROGET
311 if (*readonly == 0 && (r = ioctl(fd, BLKROGET, readonly)) < 0)
314 #error BLKROGET not available
318 if (ioctl(fd, BLKGETSIZE64, size) >= 0) {
319 *size >>= SECTOR_SHIFT;
326 if (ioctl(fd, BLKGETSIZE, &size_small) >= 0) {
327 *size = (uint64_t)size_small;
333 # error Need at least the BLKGETSIZE ioctl!
341 int device_check_and_adjust(struct crypt_device *cd,
348 int r, real_readonly;
354 r = get_device_infos(device, open_exclusive, &real_readonly, &real_size);
357 log_err(cd, _("Cannot use device %s which is in use "
358 "(already mapped or mounted).\n"),
361 log_err(cd, _("Cannot get info about device %s.\n"),
369 log_err(cd, _("Device %s has zero size.\n"), device);
372 if (*size < *offset) {
373 log_err(cd, _("Device %s is too small.\n"), device);
382 log_dbg("Calculated device size is %" PRIu64 " sectors (%s), offset %" PRIu64 ".",
383 *size, *read_only ? "RO" : "RW", *offset);
387 int wipe_device_header(const char *device, int sectors)
391 int size = sectors * SECTOR_SIZE;
394 int flags = O_RDWR | O_DIRECT | O_SYNC;
396 if (stat(device, &st) < 0)
399 /* never wipe header on mounted device */
400 if (S_ISBLK(st.st_mode))
403 devfd = open(device, flags);
405 return errno == EBUSY ? -EBUSY : -EINVAL;
407 buffer = malloc(size);
412 memset(buffer, 0, size);
414 r = write_blockwise(devfd, buffer, size) < size ? -EIO : 0;
423 #define DEFAULT_PROCESS_PRIORITY -18
425 static int _priority;
426 static int _memlock_count = 0;
428 // return 1 if memory is locked
429 int crypt_memlock_inc(struct crypt_device *ctx)
431 if (!_memlock_count++) {
432 log_dbg("Locking memory.");
433 if (mlockall(MCL_CURRENT | MCL_FUTURE) == -1) {
434 log_err(ctx, _("WARNING!!! Possibly insecure memory. Are you root?\n"));
439 if (((_priority = getpriority(PRIO_PROCESS, 0)) == -1) && errno)
440 log_err(ctx, _("Cannot get process priority.\n"));
442 if (setpriority(PRIO_PROCESS, 0, DEFAULT_PROCESS_PRIORITY))
443 log_err(ctx, _("setpriority %d failed: %s\n"),
444 DEFAULT_PROCESS_PRIORITY, strerror(errno));
446 return _memlock_count ? 1 : 0;
449 int crypt_memlock_dec(struct crypt_device *ctx)
451 if (_memlock_count && (!--_memlock_count)) {
452 log_dbg("Unlocking memory.");
453 if (munlockall() == -1)
454 log_err(ctx, _("Cannot unlock memory.\n"));
455 if (setpriority(PRIO_PROCESS, 0, _priority))
456 log_err(ctx, _("setpriority %d failed: %s\n"), _priority, strerror(errno));
458 return _memlock_count ? 1 : 0;
461 /* DEVICE TOPOLOGY */
463 /* block device topology ioctls, introduced in 2.6.32 */
465 #define BLKIOMIN _IO(0x12,120)
466 #define BLKIOOPT _IO(0x12,121)
467 #define BLKALIGNOFF _IO(0x12,122)
470 void get_topology_alignment(const char *device,
471 unsigned long *required_alignment, /* bytes */
472 unsigned long *alignment_offset, /* bytes */
473 unsigned long default_alignment)
475 int dev_alignment_offset = 0;
476 unsigned int min_io_size = 0, opt_io_size = 0;
477 unsigned long temp_alignment = 0;
480 *required_alignment = default_alignment;
481 *alignment_offset = 0;
483 fd = open(device, O_RDONLY);
487 /* minimum io size */
488 if (ioctl(fd, BLKIOMIN, &min_io_size) == -1) {
489 log_dbg("Topology info for %s not supported, using default offset %lu bytes.",
490 device, default_alignment);
494 /* optimal io size */
495 if (ioctl(fd, BLKIOOPT, &opt_io_size) == -1)
496 opt_io_size = min_io_size;
498 /* alignment offset, bogus -1 means misaligned/unknown */
499 if (ioctl(fd, BLKALIGNOFF, &dev_alignment_offset) == -1 || dev_alignment_offset < 0)
500 dev_alignment_offset = 0;
501 *alignment_offset = (unsigned long)dev_alignment_offset;
503 temp_alignment = (unsigned long)min_io_size;
505 if (temp_alignment < (unsigned long)opt_io_size)
506 temp_alignment = (unsigned long)opt_io_size;
508 /* If calculated alignment is multiple of default, keep default */
509 if (temp_alignment && (default_alignment % temp_alignment))
510 *required_alignment = temp_alignment;
512 log_dbg("Topology: IO (%u/%u), offset = %lu; Required alignment is %lu bytes.",
513 min_io_size, opt_io_size, *alignment_offset, *required_alignment);