In Python we don't need the line-continuation "\" inside (), [], {}.
Suggested by Simon McVittie <simon.mcvittie@collabora.co.uk>.
Change-Id: I2c258cd1099e5c07bd710cd66854f6291c04f623
Signed-off-by: Artem Bityutskiy <artem.bityutskiy@intel.com>
try:
descriptor = os.open(path, os.O_WRONLY | os.O_EXCL)
except OSError as err:
- log.error("cannot open block device '%s' in exclusive mode: %s" \
+ log.error("cannot open block device '%s' in exclusive mode: %s"
% (path, err))
raise SystemExit(1)
try:
dest_obj = open(args.dest, 'wb+')
except IOError as err:
- log.error("cannot open destination file '%s': %s" \
+ log.error("cannot open destination file '%s': %s"
% (args.dest, err))
raise SystemExit(1)
raise SystemExit(1)
else:
log.info("block map format version %s" % writer.bmap_version)
- log.info("%d blocks of size %d (%s), mapped %d blocks (%s or %.1f%%)" \
+ log.info("%d blocks of size %d (%s), mapped %d blocks (%s or %.1f%%)"
% (writer.blocks_cnt, writer.block_size,
writer.image_size_human, writer.mapped_cnt,
writer.mapped_size_human, writer.mapped_percent))
- log.info("copying image '%s' to %s using bmap file '%s'" \
+ log.info("copying image '%s' to %s using bmap file '%s'"
% (os.path.basename(args.image), dest_str,
os.path.basename(bmap_path)))
copying_time = time.time() - start_time
copying_speed = writer.mapped_size / copying_time
- log.info("copying time: %s, copying speed %s/sec" \
- % (BmapHelpers.human_time(copying_time), \
+ log.info("copying time: %s, copying speed %s/sec"
+ % (BmapHelpers.human_time(copying_time),
BmapHelpers.human_size(copying_speed)))
dest_obj.close()
try:
output = open(args.output, "w+")
except IOError as err:
- log.error("cannot open the output file '%s': %s" \
+ log.error("cannot open the output file '%s': %s"
% (args.output, err))
raise SystemExit(1)
else:
sys.stdout.write(output.read())
if creator.mapped_cnt == creator.blocks_cnt:
- log.warning("all %s are mapped, no holes in '%s'" \
+ log.warning("all %s are mapped, no holes in '%s'"
% (creator.image_size_human, args.image))
- log.warning("was the image handled incorrectly and holes " \
+ log.warning("was the image handled incorrectly and holes "
"were expanded?")
def parse_arguments():
parser = argparse.ArgumentParser(description = text, prog = 'bmaptool')
# The --version option
- parser.add_argument("--version", action = "version", \
+ parser.add_argument("--version", action = "version",
version = "%(prog)s " + "%s" % VERSION)
# The --quiet option
"""
if self.image_size is not None and self.image_size != image_size:
- raise Error("cannot set image size to %d bytes, it is known to " \
+ raise Error("cannot set image size to %d bytes, it is known to "
"be %d bytes (%s)" % (image_size, self.image_size,
self.image_size_human))
mapped_bmap.close()
if calculated_sha1 != correct_sha1:
- raise Error("checksum mismatch for bmap file '%s': calculated " \
- "'%s', should be '%s'" % \
- (self._bmap_path, calculated_sha1, correct_sha1))
+ raise Error("checksum mismatch for bmap file '%s': calculated "
+ "'%s', should be '%s'"
+ % (self._bmap_path, calculated_sha1, correct_sha1))
def _parse_bmap(self):
"""
try:
self._xml = ElementTree.parse(self._f_bmap)
except ElementTree.ParseError as err:
- raise Error("cannot parse the bmap file '%s' which should be a " \
+ raise Error("cannot parse the bmap file '%s' which should be a "
"proper XML file: %s" % (self._bmap_path, err))
xml = self._xml
self.bmap_version_major = int(self.bmap_version.split('.', 1)[0])
self.bmap_version_minor = int(self.bmap_version.split('.', 1)[1])
if self.bmap_version_major > SUPPORTED_BMAP_VERSION:
- raise Error("only bmap format version up to %d is supported, " \
- "version %d is not supported" \
+ raise Error("only bmap format version up to %d is supported, "
+ "version %d is not supported"
% (SUPPORTED_BMAP_VERSION, self.bmap_version_major))
# Fetch interesting data from the bmap XML file
blocks_cnt = (self.image_size + self.block_size - 1) / self.block_size
if self.blocks_cnt != blocks_cnt:
- raise Error("Inconsistent bmap - image size does not match " \
- "blocks count (%d bytes != %d blocks * %d bytes)" \
+ raise Error("Inconsistent bmap - image size does not match "
+ "blocks count (%d bytes != %d blocks * %d bytes)"
% (self.image_size, self.blocks_cnt, self.block_size))
if self.bmap_version_major >= 1 and self.bmap_version_minor >= 3:
try:
buf = self._f_image.read(length * self.block_size)
except IOError as err:
- raise Error("error while reading blocks %d-%d of the " \
- "image file '%s': %s" \
+ raise Error("error while reading blocks %d-%d of the "
+ "image file '%s': %s"
% (start, end, self._image_path, err))
if not buf:
buf))
if verify and sha1 and hash_obj.hexdigest() != sha1:
- raise Error("checksum mismatch for blocks range %d-%d: " \
- "calculated %s, should be %s (image file %s)" \
- % (first, last, hash_obj.hexdigest(), \
+ raise Error("checksum mismatch for blocks range %d-%d: "
+ "calculated %s, should be %s (image file %s)"
+ % (first, last, hash_obj.hexdigest(),
sha1, self._image_path))
# Silence pylint warning about catching too general exception
# pylint: disable=W0703
try:
self._f_dest.write(buf)
except IOError as err:
- raise Error("error while writing blocks %d-%d of '%s': %s" \
+ raise Error("error while writing blocks %d-%d of '%s': %s"
% (start, end, self._dest_path, err))
self._batch_queue.task_done()
# This is just a sanity check - we should have written exactly
# 'mapped_cnt' blocks.
if blocks_written != self.mapped_cnt:
- raise Error("wrote %u blocks from image '%s' to '%s', but should " \
- "have %u - bmap file '%s' does not belong to this" \
- "image" \
- % (blocks_written, self._image_path, self._dest_path, \
+ raise Error("wrote %u blocks from image '%s' to '%s', but should "
+ "have %u - bmap file '%s' does not belong to this"
+ "image"
+ % (blocks_written, self._image_path, self._dest_path,
self.mapped_cnt, self._bmap_path))
if self._dest_is_regfile:
try:
os.ftruncate(self._f_dest.fileno(), self.image_size)
except OSError as err:
- raise Error("cannot truncate file '%s': %s" \
+ raise Error("cannot truncate file '%s': %s"
% (self._dest_path, err))
try:
try:
os.fsync(self._f_dest.fileno()),
except OSError as err:
- raise Error("cannot synchronize '%s': %s " \
+ raise Error("cannot synchronize '%s': %s "
% (self._dest_path, err.strerror))
f_scheduler.seek(0)
f_scheduler.write("noop")
except IOError as err:
- self._logger.warning("failed to enable I/O optimization, expect " \
- "suboptimal speed (reason: cannot switch " \
+ self._logger.warning("failed to enable I/O optimization, expect "
+ "suboptimal speed (reason: cannot switch "
"to the 'noop' I/O scheduler: %s)" % err)
else:
# The file contains a list of schedulers with the current
f_ratio.seek(0)
f_ratio.write("1")
except IOError as err:
- self._logger.warning("failed to disable excessive buffering, " \
- "expect worse system responsiveness " \
- "(reason: cannot set max. I/O ratio to " \
+ self._logger.warning("failed to disable excessive buffering, "
+ "expect worse system responsiveness "
+ "(reason: cannot set max. I/O ratio to "
"1: %s)" % err)
def _restore_bdev_settings(self):
with open(self._sysfs_scheduler_path, "w") as f_scheduler:
f_scheduler.write(self._old_scheduler_value)
except IOError as err:
- raise Error("cannot restore the '%s' I/O scheduler: %s" \
+ raise Error("cannot restore the '%s' I/O scheduler: %s"
% (self._old_scheduler_value, err))
if self._old_max_ratio_value is not None:
with open(self._sysfs_max_ratio_path, "w") as f_ratio:
f_ratio.write(self._old_max_ratio_value)
except IOError as err:
- raise Error("cannot set the max. I/O ratio back to '%s': %s" \
+ raise Error("cannot set the max. I/O ratio back to '%s': %s"
% (self._old_max_ratio_value, err))
def copy(self, sync=True, verify=True):
bdev_size = os.lseek(self._f_dest.fileno(), 0, os.SEEK_END)
os.lseek(self._f_dest.fileno(), 0, os.SEEK_SET)
except OSError as err:
- raise Error("cannot seed block device '%s': %s " \
+ raise Error("cannot seed block device '%s': %s "
% (self._dest_path, err.strerror))
if bdev_size < self.image_size:
- raise Error("the image file '%s' has size %s and it will not " \
- "fit the block device '%s' which has %s capacity" \
+ raise Error("the image file '%s' has size %s and it will not "
+ "fit the block device '%s' which has %s capacity"
% (self._image_path, self.image_size_human,
self._dest_path, human_size(bdev_size)))
try:
self._f_image = open(self._image_path, 'rb')
except IOError as err:
- raise Error("cannot open image file '%s': %s" \
+ raise Error("cannot open image file '%s': %s"
% (self._image_path, err))
self._f_image_needs_close = True
try:
self._f_bmap = open(self._bmap_path, 'w+')
except IOError as err:
- raise Error("cannot open bmap file '%s': %s" \
+ raise Error("cannot open bmap file '%s': %s"
% (self._bmap_path, err))
self._f_bmap_needs_close = True
self.image_size = self.fiemap.image_size
self.image_size_human = human_size(self.image_size)
if self.image_size == 0:
- raise Error("cannot generate bmap for zero-sized image file '%s'" \
+ raise Error("cannot generate bmap for zero-sized image file '%s'"
% self._image_path)
self.block_size = self.fiemap.block_size
self._f_bmap.write(xml)
self._f_bmap.seek(self._mapped_count_pos1)
- self._f_bmap.write("%s or %.1f%%" % \
- (self.mapped_size_human, self.mapped_percent))
+ self._f_bmap.write("%s or %.1f%%"
+ % (self.mapped_size_human, self.mapped_percent))
self._f_bmap.seek(self._mapped_count_pos2)
self._f_bmap.write("%u" % self.mapped_cnt)
sha1 = ""
if first != last:
- self._f_bmap.write(" <Range%s> %s-%s </Range>\n" \
+ self._f_bmap.write(" <Range%s> %s-%s </Range>\n"
% (sha1, first, last))
else:
- self._f_bmap.write(" <Range%s> %s </Range>\n" \
+ self._f_bmap.write(" <Range%s> %s </Range>\n"
% (sha1, first))
self.mapped_size = self.mapped_cnt * self.block_size
try:
self._f_bmap.flush()
except IOError as err:
- raise Error("cannot flush the bmap file '%s': %s" \
+ raise Error("cannot flush the bmap file '%s': %s"
% (self._bmap_path, err))
self._f_image.seek(image_pos)
try:
self._f_image = open(self._image_path, 'rb')
except IOError as err:
- raise Error("cannot open image file '%s': %s" \
+ raise Error("cannot open image file '%s': %s"
% (self._image_path, err))
self._f_image_needs_close = True
# Validate 'buf_size'
if buf_size < MIN_BUFFER_SIZE:
- raise Error("too small buffer (%d bytes), minimum is %d bytes" \
+ raise Error("too small buffer (%d bytes), minimum is %d bytes"
% (buf_size, MIN_BUFFER_SIZE))
# How many 'struct fiemap_extent' elements fit the buffer
try:
self.block_size = BmapHelpers.get_block_size(self._f_image)
except IOError as err:
- raise Error("cannot get block size for '%s': %s" \
+ raise Error("cannot get block size for '%s': %s"
% (self._image_path, err))
self.blocks_cnt = self.image_size + self.block_size - 1
try:
self._f_image.flush()
except IOError as err:
- raise Error("cannot flush image file '%s': %s" \
+ raise Error("cannot flush image file '%s': %s"
% (self._image_path, err))
try:
os.fsync(self._f_image.fileno()),
except OSError as err:
- raise Error("cannot synchronize image file '%s': %s " \
+ raise Error("cannot synchronize image file '%s': %s "
% (self._image_path, err.strerror))
# Check if the FIEMAP ioctl is supported
"""
if block < 0 or block >= self.blocks_cnt:
- raise Error("bad block number %d, should be within [0, %d]" \
+ raise Error("bad block number %d, should be within [0, %d]"
% (block, self.blocks_cnt))
# Initialize the 'struct fiemap' part of the buffer
elif whence == os.SEEK_CUR:
new_pos = cur_pos + offset
else:
- raise Error("'seek()' method requires the 'whence' argument " \
- "to be %d or %d, but %d was passed" \
+ raise Error("'seek()' method requires the 'whence' argument "
+ "to be %d or %d, but %d was passed"
% (os.SEEK_SET, os.SEEK_CUR, whence))
if new_pos < cur_pos:
- raise Error("''seek()' method supports only seeking forward, " \
- "seeking from %d to %d is not allowed" \
+ raise Error("''seek()' method supports only seeking forward, "
+ "seeking from %d to %d is not allowed"
% (cur_pos, new_pos))
length = new_pos - cur_pos
to_read -= len(buf)
if to_read < 0:
- raise Error("seeked too far: %d instead of %d" \
+ raise Error("seeked too far: %d instead of %d"
% (new_pos - to_read, new_pos))
return new_pos - to_read
stdout=subprocess.PIPE).wait()
except OSError as err:
if err.errno == os.errno.ENOENT:
- raise Error("\"sshpass\" program not found, but it is " \
+ raise Error("\"sshpass\" program not found, but it is "
"required for downloading over SSH")
# Prepare the commands that we are going to run
stdout=subprocess.PIPE).wait()
except OSError as err:
if err.errno == os.errno.ENOENT:
- raise Error("\"sshpass\" program not found, but it is " \
+ raise Error("\"sshpass\" program not found, but it is "
"required for password SSH authentication")
else:
popen_args = ["ssh",
retcode = child_process.returncode
if retcode != 0:
decoded = _decode_sshpass_exit_code(retcode)
- raise Error("cannot connect to \"%s\": %s (error code %d)" % \
- (hostname, decoded, retcode))
+ raise Error("cannot connect to \"%s\": %s (error code %d)"
+ % (hostname, decoded, retcode))
# Test if file exists by running "test -f path && test -r path" on the
# host
stdout=subprocess.PIPE)
child_process.wait()
if child_process.returncode != 0:
- raise Error("\"%s\" on \"%s\" cannot be read: make sure it " \
- "exists, is a regular file, and you have read " \
+ raise Error("\"%s\" on \"%s\" cannot be read: make sure it "
+ "exists, is a regular file, and you have read "
"permissions" % (path, hostname))
# Read the entire file using 'cat'
except (IOError, ValueError, httplib.InvalidURL) as err:
raise Error("cannot open URL '%s': %s" % (url, err))
except httplib.BadStatusLine:
- raise Error("cannot open URL '%s': server responds with an HTTP " \
+ raise Error("cannot open URL '%s': server responds with an HTTP "
"status code that we don't understand" % url)
def _create_local_copy(self):
try:
self._transfile_obj = open(tmp_file_obj.name, "rb")
except IOError as err:
- raise Error("cannot open own temporary file '%s': %s" \
+ raise Error("cannot open own temporary file '%s': %s"
% (tmp_file_obj.name, err))
def __init__(self, filepath, local=False):
def seek(self, offset, whence=os.SEEK_SET):
"""The 'seek()' method, similar to the one file objects have."""
if self._force_fake_seek or not hasattr(self._transfile_obj, "seek"):
- self._pos = _fake_seek_forward(self._transfile_obj, self._pos, \
+ self._pos = _fake_seek_forward(self._transfile_obj, self._pos,
offset, whence)
else:
self._transfile_obj.seek(offset, whence)
iterator = itertools.izip_longest(iterator1, iterator2)
for range1, range2 in iterator:
if range1 != range2:
- raise Error("mismatch for hole %d-%d, it is %d-%d in file2" \
+ raise Error("mismatch for hole %d-%d, it is %d-%d in file2"
% (range1[0], range1[1], range2[0], range2[1]))
def _generate_compressed_files(file_obj, delete=True):
# starting from block 'first_block'. Create an iterator which filters
# those block ranges from the 'ranges' list, that are out of the
# 'first_block'/'blocks_cnt' file region.
- ranges_iterator = ( x for x in ranges if x[1] >= first_block and \
+ ranges_iterator = ( x for x in ranges if x[1] >= first_block and
x[0] <= last_block )
iterator = itertools.izip_longest(ranges_iterator, fiemap_iterator)
correct = (correct[0], last_block)
if check[0] > check[1] or check != correct:
- raise Error("bad or unmatching %s range for file '%s': correct " \
- "is %d-%d, get_%s_ranges(%d, %d) returned %d-%d" \
+ raise Error("bad or unmatching %s range for file '%s': correct "
+ "is %d-%d, get_%s_ranges(%d, %d) returned %d-%d"
% (ranges_type, f_image.name, correct[0], correct[1],
ranges_type, first_block, blocks_cnt,
check[0], check[1]))