From 4cadb0f26072be1ea5f618cd9bedc90569cae390 Mon Sep 17 00:00:00 2001 From: Karol Lewandowski Date: Tue, 23 Aug 2022 14:19:07 +0200 Subject: [PATCH 2/3] Import tota-ua This commit imports tota-ua from commit a0c2c06cf ("Add return 0 to log functions to prevent unexpected failure"). Following changes were made with repect to original tota-ua repository: - naming is adjusted (tota-ua -> upgrade/upgrade-engine) - delta.ua is dropped along with libtota depencency (currently it brings only unsupported and faulty delta_fs upgrade type) - actual programs are moved to src/ subdir, scripts moved to scripts/ - CMakeFiles.txt are considerably simplified, dropping most of the duplication Change-Id: If7da44c748e3e25b4519022fb292088ed8cbdcc0 --- CMakeLists.txt | 47 ++ LICENSE | 204 ++++++++ README | 20 + data/40-upgrade.list.in | 23 + data/CMakeLists.txt | 2 + packaging/upgrade-engine.spec | 93 ++++ scripts/clone_partitions/clone_partitions.service | 15 + scripts/clone_partitions/clone_partitions.sh | 112 +++++ .../clone_partitions_recovery.service | 15 + scripts/upgrade-support/upgrade-common.inc | 342 ++++++++++++++ scripts/upgrade-support/upgrade-fota.sh | 88 ++++ scripts/upgrade-support/upgrade-legacy.sh | 116 +++++ scripts/upgrade-support/upgrade-partial.sh | 66 +++ .../upgrade-support/upgrade-prepare-partitions.sh | 43 ++ scripts/upgrade-support/upgrade-trigger.sh | 148 ++++++ src/blkid-print/CMakeLists.txt | 13 + src/blkid-print/blkid-api.c | 62 +++ src/blkid-print/blkid-api.h | 26 ++ src/blkid-print/blkid-print.c | 128 +++++ src/dmverity/CMakeLists.txt | 19 + src/dmverity/verity_handler.c | 224 +++++++++ src/dmverity/verity_handler.h | 34 ++ src/img-verifier/CMakeLists.txt | 20 + src/img-verifier/img-verifier.c | 339 ++++++++++++++ src/img-verifier/img-verifier.h | 64 +++ src/upgrade-apply/CMakeLists.txt | 27 ++ src/upgrade-apply/main.c | 514 +++++++++++++++++++++ src/upgrade-apply/patch/NOTES.md | 3 + src/upgrade-apply/patch/brotli.c | 49 ++ src/upgrade-apply/patch/brotli.h | 22 + src/upgrade-apply/patch/lzma.c | 143 ++++++ src/upgrade-apply/patch/lzma.h | 23 + src/upgrade-apply/patch/patch.c | 447 ++++++++++++++++++ src/upgrade-apply/patch/patch.h | 67 +++ src/upgrade-apply/sha1/sha1.c | 295 ++++++++++++ src/upgrade-apply/sha1/sha1.h | 44 ++ upgrade.manifest | 9 + 37 files changed, 3906 insertions(+) create mode 100644 CMakeLists.txt create mode 100644 LICENSE create mode 100644 README create mode 100644 data/40-upgrade.list.in create mode 100644 data/CMakeLists.txt create mode 100644 packaging/upgrade-engine.spec create mode 100644 scripts/clone_partitions/clone_partitions.service create mode 100644 scripts/clone_partitions/clone_partitions.sh create mode 100644 scripts/clone_partitions/clone_partitions_recovery.service create mode 100644 scripts/upgrade-support/upgrade-common.inc create mode 100755 scripts/upgrade-support/upgrade-fota.sh create mode 100644 scripts/upgrade-support/upgrade-legacy.sh create mode 100755 scripts/upgrade-support/upgrade-partial.sh create mode 100755 scripts/upgrade-support/upgrade-prepare-partitions.sh create mode 100644 scripts/upgrade-support/upgrade-trigger.sh create mode 100644 src/blkid-print/CMakeLists.txt create mode 100644 src/blkid-print/blkid-api.c create mode 100644 src/blkid-print/blkid-api.h create mode 100644 src/blkid-print/blkid-print.c create mode 100755 src/dmverity/CMakeLists.txt create mode 100755 src/dmverity/verity_handler.c create mode 100755 src/dmverity/verity_handler.h create mode 100755 src/img-verifier/CMakeLists.txt create mode 100644 src/img-verifier/img-verifier.c create mode 100644 src/img-verifier/img-verifier.h create mode 100644 src/upgrade-apply/CMakeLists.txt create mode 100644 src/upgrade-apply/main.c create mode 100644 src/upgrade-apply/patch/NOTES.md create mode 100644 src/upgrade-apply/patch/brotli.c create mode 100644 src/upgrade-apply/patch/brotli.h create mode 100644 src/upgrade-apply/patch/lzma.c create mode 100644 src/upgrade-apply/patch/lzma.h create mode 100644 src/upgrade-apply/patch/patch.c create mode 100644 src/upgrade-apply/patch/patch.h create mode 100644 src/upgrade-apply/sha1/sha1.c create mode 100644 src/upgrade-apply/sha1/sha1.h create mode 100644 upgrade.manifest diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 0000000..ed144fc --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,47 @@ +# +# upgrade-engine +# +# Copyright (c) 2021 Samsung Electronics Co., Ltd. +# +# Licensed under the Apache License, Version 2.0 (the License); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +CMAKE_MINIMUM_REQUIRED(VERSION 2.82) +PROJECT(upgrade C) + +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/src/) + +IF("${CMAKE_BUILD_TYPE}" STREQUAL "") + SET(CMAKE_BUILD_TYPE "Release") +ENDIF("${CMAKE_BUILD_TYPE}" STREQUAL "") +MESSAGE("Build type: ${CMAKE_BUILD_TYPE}") + +SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${EXTRA_CFLAGS} -fPIC -fPIE") +SET(CMAKE_C_FLAGS_DEBUG "-O0 -g") +SET(CMAKE_C_FLAGS_RELEASE "-O2") + +FIND_PROGRAM(UNAME NAMES uname) +IF("${ARCH}" STREQUAL "arm") + ADD_DEFINITIONS("-DTARGET") + MESSAGE("add -DTARGET") +ENDIF("${ARCH}" STREQUAL "arm") + +ADD_DEFINITIONS("-DPREFIX=\"${PREFIX}\"") + +SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--as-needed -pie") + +ADD_SUBDIRECTORY(src/dmverity) +ADD_SUBDIRECTORY(src/img-verifier) +ADD_SUBDIRECTORY(src/upgrade-apply) +ADD_SUBDIRECTORY(src/blkid-print) +ADD_SUBDIRECTORY(data) diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..9c13a9b --- /dev/null +++ b/LICENSE @@ -0,0 +1,204 @@ +Copyright (c) 2000 - 2011 Samsung Electronics Co., Ltd. All rights reserved. + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/README b/README new file mode 100644 index 0000000..ca2b6d9 --- /dev/null +++ b/README @@ -0,0 +1,20 @@ +# Tizen System Update + +This repository contains following projects kept separately till Tizen 7.0: + + - platorm/core/system/tota-ua, the "Tizen OTA Upgrade Agent", utility to + apply deltas + + - platform/core/system/system-rw-update, systemd units to handle Read-Write + stage of upgrade + + - platform/core/system/libtota, library handling legacy upgrade types (delta_fs) + + +izen System Update is the one of Tizen feature. This implements +the functionality of firmware update base on OTA mechanism. + +* Notice of Limitations + This functionality does NOT support official signing & validation method. +This means it requires proper mechanism for binary validation offered by each +manufacturers. diff --git a/data/40-upgrade.list.in b/data/40-upgrade.list.in new file mode 100644 index 0000000..ad15233 --- /dev/null +++ b/data/40-upgrade.list.in @@ -0,0 +1,23 @@ +# ---- Target contents ----------------------------------------------------- # +WITHLIBS=" +${CMAKE_INSTALL_LIBDIR}/libcrypto.so.1.1 +${CMAKE_INSTALL_LIBDIR}/libtota.so.1.0.0 +/usr/bin/verity_handler +/usr/sbin/img-verifier +/bin/sed +/bin/findmnt +/bin/dd +/bin/grep +/bin/blkid-print +" + +DIRECTORIES=" +@IMG_VERIFIER_ROOT_CA_DIR@ +" + +VERBATIMS=" +@IMG_VERIFIER_ROOT_CA_DIR@/*.pem +/usr/bin/clone_partitions.sh +/usr/libexec/upgrade-support/upgrade-common.inc +/usr/libexec/upgrade-support/upgrade-prepare-partitions.sh +" diff --git a/data/CMakeLists.txt b/data/CMakeLists.txt new file mode 100644 index 0000000..13c8be0 --- /dev/null +++ b/data/CMakeLists.txt @@ -0,0 +1,2 @@ +configure_file(40-upgrade.list.in 40-upgrade.list) +install(FILES 40-upgrade.list DESTINATION ${UPGRADE_INITRD_LIST_DIR}) diff --git a/packaging/upgrade-engine.spec b/packaging/upgrade-engine.spec new file mode 100644 index 0000000..6a4696a --- /dev/null +++ b/packaging/upgrade-engine.spec @@ -0,0 +1,93 @@ +%define upgrade_support_dir %{_libexecdir}/upgrade-support + +Name: upgrade-engine +Summary: Upgrade engine for Tizen +Version: 7.5.0 +Release: 0 +Group: System +License: Apache-2.0 +Source0: %{name}-%{version}.tar.gz + +BuildRequires: cmake +BuildRequires: ca-certificates-devel +BuildRequires: pkgconfig(liblzma-tool) +BuildRequires: libbrotli-devel +BuildRequires: pkgconfig(openssl1.1) +BuildRequires: pkgconfig(libtzplatform-config) +BuildRequires: pkgconfig(dlog) +Buildrequires: pkgconfig(glib-2.0) +BuildRequires: pkgconfig(hal-api-device) +BuildRequires: libtar-devel + +Requires: tar +Requires: gzip + +%description +Update engine for updating Tizen platform images using delta files +generated by upgrade-tools. + +%prep +%setup -q + +%build +export LDFLAGS+="-Wl,--rpath=%{_libdir} -Wl,--as-needed" +LDFLAGS="$LDFLAGS" + +%define system_bus_services_dir /usr/share/dbus-1/system-services +%define systemd_dbus_conf_dir /etc/dbus-1/system.d + +# You must set CA directory in the RO section to prevent manipulation +%define img_verifier_root_ca_dir %{TZ_SYS_RO_CA_DIR}/img-verifier +%define upgrade_initrd_list_dir %{_datadir}/initrd-recovery/initrd.list.d + +%cmake \ + -DCMAKE_INSTALL_PREFIX=%{_prefix} \ + -DIMG_VERIFIER_ROOT_CA_DIR=%{img_verifier_root_ca_dir} \ + -DUPGRADE_INITRD_LIST_DIR=%{upgrade_initrd_list_dir} \ + . + +make %{?jobs:-j%jobs} + +%install +%make_install + +mkdir -p %{buildroot}%{upgrade_support_dir} +install -m 755 scripts/upgrade-support/* %{buildroot}%{upgrade_support_dir} + +%define fota_dir /opt/usr/data/fota +mkdir -p %{buildroot}%{fota_dir} +mkdir -p %{buildroot}%{img_verifier_root_ca_dir} +mkdir -p %{buildroot}%{_unitdir}/multi-user.target.wants + +install -m 644 scripts/clone_partitions/clone_partitions.service %{buildroot}%{_unitdir} +ln -s ../clone_partitions.service %{buildroot}%{_unitdir}/multi-user.target.wants/ +install -m 775 scripts/clone_partitions/clone_partitions.sh %{buildroot}%{_bindir} + +mkdir -p %{buildroot}%{_unitdir}/recovery.service.wants +install -m 644 scripts/clone_partitions/clone_partitions_recovery.service %{buildroot}%{_unitdir} +ln -s ../clone_partitions_recovery.service %{buildroot}%{_unitdir}/recovery.service.wants/ + +%files +%license LICENSE +%manifest upgrade.manifest +%doc README +%attr(775, root, system_fw) %{fota_dir} +%attr(700,-,-) %{upgrade_initrd_list_dir}/40-upgrade.list +%defattr(-,root,root,-) +%{_bindir}/clone_partitions.sh +%attr(644, root, root) %{_unitdir}/clone_partitions.service +%{_unitdir}/multi-user.target.wants/clone_partitions.service +%attr(644, root, root) %{_unitdir}/clone_partitions_recovery.service +%{_unitdir}/recovery.service.wants/clone_partitions_recovery.service +%{upgrade_support_dir}/upgrade-trigger.sh +%{upgrade_support_dir}/upgrade-partial.sh +%{upgrade_support_dir}/upgrade-prepare-partitions.sh +%{upgrade_support_dir}/upgrade-fota.sh +%{upgrade_support_dir}/upgrade-legacy.sh +%{upgrade_support_dir}/upgrade-common.inc +%{_bindir}/upgrade-apply +%{_bindir}/verity_handler +%{_bindir}/blkid-print +# Image verifier +%{_sbindir}/img-verifier +%attr(755,root,root) %{img_verifier_root_ca_dir} diff --git a/scripts/clone_partitions/clone_partitions.service b/scripts/clone_partitions/clone_partitions.service new file mode 100644 index 0000000..e296d5b --- /dev/null +++ b/scripts/clone_partitions/clone_partitions.service @@ -0,0 +1,15 @@ +[Unit] +Description=Clone partitions +After=system-delayed-target-done.service + +[Service] +Type=simple +SmackProcessLabel=System +ExecStart=/usr/bin/clone_partitions.sh background +RemainAfterExit=true +Nice=19 +IOSchedulingClass=idle +IOSchedulingPriority=7 + +[Install] +WantedBy=multi-user.target diff --git a/scripts/clone_partitions/clone_partitions.sh b/scripts/clone_partitions/clone_partitions.sh new file mode 100644 index 0000000..167394d --- /dev/null +++ b/scripts/clone_partitions/clone_partitions.sh @@ -0,0 +1,112 @@ +#!/bin/bash +PATH=/bin:/usr/bin:/sbin:/usr/sbin +FLOCK_PATH="/var/lock/clone_partitions.lock" + +SCRIPT_NAME="clone_partitions.sh" + +#------------------------------------------------ +# critical_log msg [file] +#------------------------------------------------ +critical_log() { + # log format: [script_name][tag]actual_log + LOG="[${SCRIPT_NAME}]$1" + dlogsend -k "$LOG" + if [ "$2" != "" ]; then + echo "$LOG" >> "$2" + fi + echo "$LOG" + + return 0; +} + +#------------------------------------------------ +# log msg [file] +#------------------------------------------------ +log() { + # log format: [script_name][tag]actual_log + LOG="[${SCRIPT_NAME}]$1" + if [ "$2" != "" ]; then + echo "$LOG" >> "$2" + fi + echo "$LOG" + + return 0; +} + +do_clone() { + . /usr/libexec/upgrade-support/upgrade-common.inc + + check_ab_partition_scheme + check_used_block_device + load_background_copy_list + if background_copy; then + device_board_set_partition_ab_cloned + log "[Info] Partitions have been cloned" + return 0 + else + critical_log "[Error] Partitions have not been cloned" + return 1 + fi +} + +set_partition_status_ok(){ + CURRENT_AB=$(device_board_get_current_partition) + + if [ "$CURRENT_AB" == "a" ]; then + OPPOSITE_AB="b" + elif [ "$CURRENT_AB" == "b" ]; then + OPPOSITE_AB="a" + else + exit 1 + fi + + device_board_set_partition_status $OPPOSITE_AB "ok" +} + +set -o errexit +trap 'echo "Aborting due to errexit on ${0##*/}:$LINENO. Exit code: $?" >&2' ERR +set -o errtrace -e -o pipefail + +if [ ! -z "${UPGRADE_DEBUG}" ]; then + set -x +fi + +( +flock 9 +case $1 in + "recovery") + /bin/device_board_clear_partition_ab_cloned + do_clone + set_partition_status_ok + ;; + "background") + # wait for 60 seconds to wait for bootup + sleep 60 + + BOOTMODE=$(device_board_get_boot_mode) + RET_BOOT_MODE=$? + if [ ${RET_BOOT_MODE} -ne 0 ]; then + critical_log "[Error] Cannot get boot mode: ${RET_BOOT_MODE}" + exit 1 + fi + + if [ "${BOOTMODE}" != "normal" ]; then + log "[Info] bootmode is not \"normal\": ${BOOTMODE}" + exit 0 + fi + + if [ "$(device_board_get_partition_ab_cloned)" -eq "1" ]; then + log "[Info] Partitions already cloned" + else + do_clone + fi + ;; + *) + if [ "$(device_board_get_partition_ab_cloned)" -eq "1" ]; then + log "[Info] Partitions already cloned" + else + do_clone + fi + ;; +esac +) 9> "$FLOCK_PATH" diff --git a/scripts/clone_partitions/clone_partitions_recovery.service b/scripts/clone_partitions/clone_partitions_recovery.service new file mode 100644 index 0000000..7f9d982 --- /dev/null +++ b/scripts/clone_partitions/clone_partitions_recovery.service @@ -0,0 +1,15 @@ +[Unit] +Description=Clone partitions in the recovery mode +DefaultDependencies=no +After=sysinit.target +Before=recovery.service +ConditionKernelCommandLine=bootmode=recovery + +[Service] +Type=oneshot +SmackProcessLabel=System +ExecStart=/usr/bin/clone_partitions.sh recovery +RemainAfterExit=true + +[Install] +WantedBy=recovery.service diff --git a/scripts/upgrade-support/upgrade-common.inc b/scripts/upgrade-support/upgrade-common.inc new file mode 100644 index 0000000..57ea61f --- /dev/null +++ b/scripts/upgrade-support/upgrade-common.inc @@ -0,0 +1,342 @@ +IMG_VERIFIER="/usr/sbin/img-verifier" +STATUS_DIR="/opt/data/update" +HAL_UPGRADE_CFG_DIR="/hal/etc/upgrade/" +HAL_PART_MAP_FILE="label_map.list" +HAL_PART_LIST_FILE="background_copy.list" +CONFIG_FILE="update.cfg" +SET_UPGRADE_STATUS="/usr/bin/device_board_set_upgrade_status" +DO_RW_UPDATE_FILE="$FOTA_UPDATE_PREFIX/opt/.do_rw_update" +TRUE=0 +FALSE=1 + +#------------------------------------------------ +# critical_log msg [file] +#------------------------------------------------ +critical_log() { + # log format: [script_name][tag]actual_log + LOG="[${SCRIPT_NAME}]$1" + dlogsend -k "$LOG" + if [ "$2" != "" ]; then + echo "$LOG" >> "$2" + fi + echo "$LOG" + + return 0 +} + +critical_flog() { + critical_log "$1" "$LOG_FILE" +} + +#------------------------------------------------ +# log msg [file] +#------------------------------------------------ +log() { + # log format: [script_name][tag]actual_log + LOG="[${SCRIPT_NAME}]$1" + if [ "$2" != "" ]; then + echo "$LOG" >> "$2" + fi + echo "$LOG" + + return 0 +} + +flog() { + log "$1" "$LOG_FILE" +} + +untrap() { + trap '' ERR +} + +retrap() { + trap 'echo "Aborting due to errexit on ${0##*/}:$LINENO. Exit code: $?" >&2' ERR +} + +verify_file() { + FILE_NAME="$1" + FILE_PATH="$FOTA_DIR/$FILE_NAME" + VALID_CHECKSUM=$(awk "/$FILE_NAME/ {print \$1}" "$FOTA_DIR/checksum.SHA1") + if [ "$VALID_CHECKSUM" == "" ]; then + echo "[Error] No $FILE_NAME in checksum.SHA1" + return $FALSE + fi + + if ! echo "$VALID_CHECKSUM $FILE_PATH" | sha1sum --check --status; then + echo "[Error] Checksum for file $FILE_NAME is invalid" + return $FALSE + fi + + echo "[Info] Checksum of $FILE_NAME is OK" + return $TRUE +} + +unpack_file() { + ARCHIVE_NAME="$1" + FILE_NAME="$2" + tar xpf "${ARCHIVE_NAME}" -C "${FOTA_DIR}" "${FILE_NAME}" 2> /dev/null + if [ ! -e "${FOTA_DIR}/${FILE_NAME}" ]; then + flog "[Error] There is no ${FILE_NAME}" + fi + + if ! verify_file "$FILE_NAME"; then + exit_error + fi +} + +verify_img() { + DELTA_FILE="$1" + if [ -e "$IMG_VERIFIER" ]; then + log "[Info] Package verifier is found. Verify $DELTA_FILE" "$LOG_FILE" + if ! "$IMG_VERIFIER" -i "$DELTA_FILE" -l "/opt/var/log/last_iv.log"; then + log "[Error] Update package verification FAILED..." "$LOG_FILE" + echo 5 > "$STATUS_DIR"/result + exit_error + else + log "[Info] Update package verification PASSED!" "$LOG_FILE" + fi + fi +} + +check_ab_partition_scheme() { + CURRENT_AB="$(/bin/sed -E 's|.*(partition_ab=)([a-b]).*|\2|' /proc/cmdline)" + if [ "$CURRENT_AB" != "a" ] && [ "$CURRENT_AB" != "b" ]; then + flog "[Info] There is no A/B partition scheme" + exit_error + fi + NEXT_AB="b" + if [ "$CURRENT_AB" == "b" ]; then + NEXT_AB="a" + fi +} + +check_used_block_device() { + local MAPPER_MATCH_REGEX='.*/dev/mapper.*' + ROOTFS_DEVICE="$(findmnt / -no SOURCE)" + MAPPER_DEVICE="" + if [[ "${ROOTFS_DEVICE}" =~ ${MAPPER_MATCH_REGEX} ]]; then + MAPPER_DEVICE="${ROOTFS_DEVICE}" + DM_NAME=$(basename "${MAPPER_DEVICE}") + DM_N=$(dmsetup ls -o blkdevname | grep "${DM_NAME}" | sed -e 's/^.*(\([^)]\+\))/\1/') + ROOTFS_DEVICE="/dev/$(ls /sys/class/block/${DM_N}/slaves/)" + fi + + if ! EMMC_DEVICE="/dev/$(/bin/lsblk -ndo pkname "${ROOTFS_DEVICE}")" ||\ + [ "$EMMC_DEVICE" == "/dev/" ]; then + flog "[Error] Unable to find used block device: $EMMC_DEVICE, for /" + exit_error + fi +} + +background_copy() { + flog "[Info] Background copying A|B partitions for update..." + for partition_name in ${PARTITION_LIST}; do + # echo is there to suspend abort when partition will not be found e.g. hal + if ! CURRENT_PARTITION="$(/usr/bin/blkid-print "$EMMC_DEVICE" "$partition_name" "$CURRENT_AB" |\ + sed -E 's|(part_nr: [0-9]+ )\((.*)\): (.*)|\3|' || echo "__FALSE__")" || \ + [ "$CURRENT_PARTITION" == "__FALSE__" ]; then + flog "[Error] Unable to find: $partition_name current partition on $EMMC_DEVICE device on $CURRENT_AB slot" + check_optional_partition "$partition_name" 1 + continue + fi + if ! NEXT_PARTITION="$(/usr/bin/blkid-print "$EMMC_DEVICE" "$partition_name" "$NEXT_AB" |\ + sed -E 's|(part_nr: [0-9]+ )\((.*)\): (.*)|\3|')"; then + flog "[Error] Unable to find: $partition_name next partition on $EMMC_DEVICE device on $CURRENT_AB slot" + check_optional_partition "$partition_name" 1 + continue + fi + if [ "$CURRENT_PARTITION" == "" ] || [ "$NEXT_PARTITION" = "" ]; then + flog "[Error] current: $CURRENT_PARTITION or next: $NEXT_PARTITION partition is empty on $EMMC_DEVICE device" + return $FALSE + fi + if [ "$CURRENT_PARTITION" == "$NEXT_PARTITION" ]; then + flog "[Info] $partition_name partition current and next are the same: $CURRENT_PARTITION on $EMMC_DEVICE device" + continue + fi + flog "[Info] Background copy $partition_name, from: $CURRENT_PARTITION to $NEXT_PARTITION" + /bin/dd if="$CURRENT_PARTITION" of="$NEXT_PARTITION" bs=4096 + flog "[Info] Finished background copy $partition_name from $CURRENT_PARTITION to $NEXT_PARTITION" + + if [ "$partition_name" == "rootfs" ]; then + flog "[Info] Checksum verification for $partition_name" + CUR_SHA1=$(sha1sum "$CURRENT_PARTITION" | awk '{print $1}') + NEXT_SHA1=$(sha1sum "$NEXT_PARTITION" | awk '{print $1}') + if [ "$CUR_SHA1" == "$NEXT_SHA1" ]; then + flog "[Info] Partition $partition_name was cloned correctly" + else + flog "[Error] Checksums are different: $CUR_SHA1 != $NEXT_SHA1" + exit_error + fi + fi + done + return $TRUE +} + +load_background_copy_list() { + PARTITION_LIST=$(sed -e '/^#/d' -e '/^$/d' "${HAL_UPGRADE_CFG_DIR}/${HAL_PART_LIST_FILE}") +} + +upgrade_images() { + DELTA_TAR="$1" + flog "[Info] Flash images for update..." + + LABEL_MAP_PATH=${HAL_UPGRADE_CFG_DIR}/${HAL_} + while read LABEL PARTLABEL; do + declare "LABEL_MAP_${LABEL}=${PARTLABEL}" + done < <(sed -e '/^#/d' -e '/^$/d' "${LABEL_MAP_PATH}/${HAL_PART_MAP_FILE}") + + # _OFFSET _SIZE _HASH1 _HASH2 + while read -r LABEL_NAME DELTA_NAME TYPE DEV OFFSET OLD_SIZE NEW_SIZE OLD_SHA NEW_SHA + do + local LABEL_NAME="$(echo "$LABEL_NAME" | /bin/awk '{print tolower($0)}')" + + # Translate LABEL to PARTLABEL using label_map.list + local TMP="LABEL_MAP_${LABEL_NAME}" + local PART_NAME=${!TMP} + if [ -z "${PART_NAME}" ]; then + PART_NAME=${LABEL_NAME} + fi + + TYPE_S=(${TYPE//:/ }) + # We support only FULL_IMAGE and DELTA_IMAGE + if [[ ! "${TYPE_S[0]}" =~ ^(FULL_IMAGE|DELTA_IMAGE)$ ]]; then + PARTS_NAME_TO_UPDATE+=( "$PART_NAME:$TYPE" ) + continue + fi + + if ! /bin/tar tf "$DELTA_TAR" "$DELTA_NAME"; then + flog "[Info] There is no delta $DELTA_NAME for label $LABEL_NAME from part $PART_NAME" + continue + fi + + local NEXT_PARTITION="$(/usr/bin/blkid-print "$EMMC_DEVICE" "$PART_NAME" "$NEXT_AB" |\ + /bin/sed -E 's|(part_nr: [0-9]+ )\((.*)\): (.*)|\3|')" + local CURR_PARTITION="$(/usr/bin/blkid-print "$EMMC_DEVICE" "$PART_NAME" "$CURRENT_AB" |\ + /bin/sed -E 's|(part_nr: [0-9]+ )\((.*)\): (.*)|\3|')" + + flog "[Info] Flashing $DELTA_NAME... to $NEXT_PARTITION" + + local UP_RES + untrap + case "${TYPE_S[0]}" in + FULL_IMAGE) + "$FOTA_DIR/upgrade-apply" --archive "$DELTA_TAR" \ + --dest "$NEXT_PARTITION" \ + --dest-size "$NEW_SIZE" \ + --archive-file "$DELTA_NAME" \ + --kind raw \ + --dest-sha1 "$NEW_SHA" && UP_RES=$? || UP_RES=$? + ;; + DELTA_IMAGE) + "$FOTA_DIR/upgrade-apply" --archive "$DELTA_TAR" \ + --dest "$NEXT_PARTITION" \ + --dest-size "$NEW_SIZE" \ + --archive-file "$DELTA_NAME" \ + --kind ss_brotli_patch \ + --patch-orig "$CURR_PARTITION" \ + --dest-sha1 "$NEW_SHA" && UP_RES=$? || UP_RES=$? + ;; + esac + retrap + case $UP_RES in + 0) + flog "[Info] Finished flashing $DELTA_NAME... to $NEXT_PARTITION" + ;; + *) + flog "[Info] Flashing error $DELTA_NAME... to $NEXT_PARTITION" + return $FALSE + ;; + esac + done < "$FOTA_DIR/$CONFIG_FILE" +} + +run_setup_script() { + DELTA_TAR="$1" + SETUP_SCRIPT_NAME=setup.sh + SETUP_SCRIPT_PATH=$FOTA_DIR/$SETUP_SCRIPT_NAME + + /bin/tar xvfp "$DELTA_TAR" -C "$FOTA_DIR" "$SETUP_SCRIPT_NAME" 2>/dev/null ||\ + (log "[Info] setup.sh does not exist, skipping." "$LOG_FILE" && return) + + if [ -e "$SETUP_SCRIPT_PATH" ]; then + /bin/sh "$SETUP_SCRIPT_PATH" + rm "$SETUP_SCRIPT_PATH" + fi +} + +write_version_info() { + VERSION_FILE="$1" + if [ -f "$VERSION_FILE" ]; then + return + fi + OLD_VER=$(/bin/grep platform.version\" /etc/config/model-config.xml \ + | sed -e 's/.*>\(.*\)<.*/\1/' | head -1) + i=0 + VER=(0 0 0 0) + for ENT in $(echo "$OLD_VER" | tr "." "\n"); do + VER[$i]=$ENT + ((i++)) + done + CVT_VER=${VER[0]}.${VER[1]}.${VER[2]}.${VER[3]} + + echo "OLD_VER=$CVT_VER" > "$VERSION_FILE" +} + +set_upgrade_status() { + VALUE="$1" + ${SET_UPGRADE_STATUS} ${VALUE} + if [ $? -eq 0 ]; then + critical_log "set_upgrade_status success: ${VALUE}" "$LOG_FILE" + else + critical_log "set_upgrade_status failed: ${VALUE}" "$LOG_FILE" + fi +} + +exit_error() { + set_upgrade_status -1 + exit 1 +} + +mount_partition() { + local GPT_LABEL="$1" + local DST="$2" + local MOUNT_OPTIONS="$3" + local SRC + if ! SRC="$(/usr/bin/blkid-print "$EMMC_DEVICE" "$GPT_LABEL" "$NEXT_AB" |\ + /bin/sed -E 's|(part_nr: [0-9]+ )\((.*)\): (.*)|\3|')"; then + flog "[Error] Unable to find $GPT_LABEL partition on $EMMC_DEVICE device for $NEXT_AB slot" + return 1 + fi + local MOUNTED="$(/bin/findmnt -n -o TARGET "$SRC" | /bin/head -n 1 || echo "")" + if [ "$MOUNTED" != "" ]; then + MOUNT_OPTIONS="bind,${MOUNT_OPTIONS}" + SRC="$MOUNTED" + fi + if ! /bin/mkdir -p "$DST"; then + flog "[Error] Unable to mkdir for mount destination: $DST" + return 1 + fi + if ! /bin/mount -o "$MOUNT_OPTIONS" "$SRC" "$DST"; then + flog "[Error] Unable to mount $SRC to $DST, options: $MOUNT_OPTIONS" + return 1 + fi + CLEANUP_PARTITION+=("$DST") + return 0 +} + +cleanup_partitions() { + # umount in reverse order + for (( idx=${#CLEANUP_PARTITION[@]}-1 ; idx>=0 ; idx-- )) ; do + local DST="${CLEANUP_PARTITION[idx]}" + if [ "$DST" == "" ]; then + flog "[Error] Partition for cleanup is empty, idx: $idx, partition count: ${#CLEANUP_PARTITION[@]}" + continue + fi + if ! /bin/umount "$DST"; then + flog "[Error] Unable to umount $DST" + fi + + done + CLEANUP_PARTITION=() +} diff --git a/scripts/upgrade-support/upgrade-fota.sh b/scripts/upgrade-support/upgrade-fota.sh new file mode 100755 index 0000000..b0917e8 --- /dev/null +++ b/scripts/upgrade-support/upgrade-fota.sh @@ -0,0 +1,88 @@ +#!/bin/bash +set -o errexit +trap 'echo "Aborting due to errexit on ${0##*/}:$LINENO. Exit code: $?" >&2' ERR +set -o errtrace -e -o pipefail + + +if [ ! -z "${UPGRADE_DEBUG}" ]; then + set -x +fi + +PATH=/bin:/usr/bin:/sbin:/usr/sbin +FOTA_DIR="/opt/usr/data/fota" + +. "$FOTA_DIR"/upgrade-common.inc + +FOTA_UPDATE_PREFIX="/run/upgrade-sysroot" +VERSION_FILE="$FOTA_UPDATE_PREFIX/opt/etc/version" + +SCRIPT_NAME="upgrade-fota.sh" +LOG_FILE="/tmp/update-fota.log" + +cleanup() { + cleanup_partitions + cleanup_files +} + +cleanup_files() { + rm -f -- "$FOTA_DIR"/upgrade-common.inc + rm -f -- "$FOTA_DIR/$SCRIPT_NAME" + rm -f -- "$FOTA_DIR/$CONFIG_FILE" + rm -f -- "$FOTA_DIR/upgrade-trigger.sh" + rm -f -- "$FOTA_DIR/checksum.SHA1" +} + +mount() { + # We need to mount the system-data partition to update /opt/etc/version and + # create a .do_rw_update file + mkdir -p "$FOTA_UPDATE_PREFIX/opt" + + if ! mount_partition system-data "$FOTA_UPDATE_PREFIX/opt" "rw"; then + critical_flog "[Error] Unable to mount opt" + return 1 + fi + return 0 +} + +reboot_to_fota() { + flog "[Info] Write paths..." + touch "$DO_RW_UPDATE_FILE" + flog "[Info] calling sync" + + flog "[Info] Switching board partition from $CURRENT_AB, to $NEXT_AB" + if ! device_board_switch_partition $NEXT_AB; then + critical_flog "[Error] Failed to switch board slot to $NEXT_AB" + exit 1 + fi + cleanup_files + /bin/sync + flog "[Info] Rebooting to fota" + if ! /sbin/reboot fota; then + critical_flog "[Error] Failed to reboot fota" + exit 1 + fi +} + +prepare_fota_update_dir() { + mkdir -p ${FOTA_UPDATE_PREFIX} +} + +prepare_fota_update_dir +check_ab_partition_scheme +check_used_block_device +if ! mount; then + cleanup + exit 1 +fi +if ! write_version_info "$VERSION_FILE"; then + cleanup + exit 1 +fi + +if ! reboot_to_fota; then + cleanup + exit 1 +fi +cleanup + +critical_log "RO update: $SCRIPT_NAME success" "$LOG_FILE" diff --git a/scripts/upgrade-support/upgrade-legacy.sh b/scripts/upgrade-support/upgrade-legacy.sh new file mode 100644 index 0000000..be16f1f --- /dev/null +++ b/scripts/upgrade-support/upgrade-legacy.sh @@ -0,0 +1,116 @@ +#!/bin/bash +# This script handles non-A/B upgrade, with flashing RO & RW after reboot in 'bootmode=fota' +set -o errexit +trap 'echo "Aborting due to errexit on ${0##*/}:$LINENO. Exit code: $?" >&2' ERR +set -o errtrace -e -o pipefail + +PATH=/bin:/usr/bin:/sbin:/usr/sbin +FOTA_DIR="/opt/usr/data/fota" +STATUS_DIR="/opt/data/update" +DOWNLOAD_DELTA=$1 + +. "$FOTA_DIR"/upgrade-common.inc + +SCRIPT_NAME="upgrade-trigger.sh" +LOG_FILE="/tmp/upgrade-trigger.log" + + +if [ "$#" != "1" ] || [ ! -f "$1" ]; then + log "[Error] Usage: $0 path_to_delta.tar[.gz]" "$LOG_FILE" + exit 1 +fi + +if tar ztf "$DOWNLOAD_DELTA" 1>/dev/null 2>&1; then + DELTA_TAR="$FOTA_DIR/delta.tar.gz" + DELTA_TAR_GZ="TRUE" +elif tar tf "$DOWNLOAD_DELTA" 1>/dev/null 2>&1; then + DELTA_TAR="$FOTA_DIR/delta.tar" +else + log "[Error] Usage $0 path_to_delta.tar[.gz]" "$LOG_FILE" + exit 1 +fi + +log "[Info] Using <$DELTA_TAR> delta file." "$LOG_FILE" + +flash_pre_image() { + log "[Info] Flash images for update..." "$LOG_FILE" + CONFIG_FILE="update.cfg" + + /bin/tar xvfp "$DELTA_TAR" -C "$FOTA_DIR" "$CONFIG_FILE" + if [ ! -e "$FOTA_DIR/$CONFIG_FILE" ]; then + critical_log "[Error] There is no $CONFIG_FILE" "$LOG_FILE" + return + fi + + while read -r PART_NAME DELTA_NAME TYPE DEV _OFFSET _SIZE _HASH1 _HASH2 + do + if [ "$TYPE" = "FULL_IMAGE:BEFORE_BOOT_FOTA" ]; then + if ! /bin/tar tf "$DELTA_TAR" "$DELTA_NAME"; then + log "[Info] There is no delta for $PART_NAME" "$LOG_FILE" + continue + fi + + EMMC_DEVICE="/dev/mmcblk0" + DEV_NUM=$(/sbin/partx -s "$EMMC_DEVICE" | sed "\|$PART_NAME|!d") + if [ "$DEV_NUM" != "" ]; then + DEV_NUM="$(echo "$DEV_NUM"| \ + { read -r NUM REST; echo "$NUM"; })" + fi + if [ "$DEV_NUM" == "" ]; then + DEV_NUM=$(/sbin/blkid -L "$PART_NAME" -o device -l | sed "\|$EMMC_DEVICE|!d" | \ + sed 's/\/dev\/mmcblk0p//') + fi + if [ "$DEV_NUM" != "" ]; then + DEV="${EMMC_DEVICE}p${DEV_NUM}" + fi + + log "[Info] Flashing $DELTA_NAME... to $DEV" "$LOG_FILE" + /bin/tar xOf "$DELTA_TAR" "$DELTA_NAME" > "$DEV" + fi + done < "$FOTA_DIR/$CONFIG_FILE" +} + +if [ "$DOWNLOAD_DELTA" -ef "$DELTA_TAR" ]; then + log "[Info] delta.tar[.gz] is already placed in correct directory." "$LOG_FILE" +else + log "[Info] Copy delta.tar[.gz]..." "$LOG_FILE" + /bin/cp "$DOWNLOAD_DELTA" "$DELTA_TAR" +fi + +# delta.tar.gz has to be uncompressed to .tar beacause ua and libtota does not support .tar.gz +if [ "$DELTA_TAR_GZ" == "TRUE" ]; then + gunzip -c "$DOWNLOAD_DELTA" > "$FOTA_DIR/delta.tar" + rm "$DELTA_TAR" + DELTA_TAR="$FOTA_DIR/delta.tar" +fi + +sync + +# Run setup script if exist +run_setup_script "$DELTA_TAR" + +# Flash images +# - in case of some new image was included +# - in case of some image should be flashed before update +flash_pre_image + +# Extract delta.ua +log "[Info] Extract delta.ua..." "$LOG_FILE" +unpack_file "$DELTA_TAR" delta.ua + +# FOTA: /usr/bin/rw-update-prepare.sh + +# Write delta saved path +log "[Info] Write paths..." "$LOG_FILE" +echo "$FOTA_DIR" > "$STATUS_DIR/DELTA.PATH" +dirname "$DOWNLOAD_DELTA" > "$STATUS_DIR/DOWNLOAD.PATH" +sync + +# go to fota mode +log "[Info] Go TOTA update..." "$LOG_FILE" +if ! /sbin/reboot fota; then + critical_log "[Error] Failed to reboot fota" "$LOG_FILE" + exit 100 +fi + +critical_log "RO update: $SCRIPT_NAME success" "$LOG_FILE" diff --git a/scripts/upgrade-support/upgrade-partial.sh b/scripts/upgrade-support/upgrade-partial.sh new file mode 100755 index 0000000..ea6aa7d --- /dev/null +++ b/scripts/upgrade-support/upgrade-partial.sh @@ -0,0 +1,66 @@ +#!/bin/bash +set -o errexit +trap 'echo "Aborting due to errexit on ${0##*/}:$LINENO. Exit code: $?" >&2' ERR +set -o errtrace -e -o pipefail + +if [ ! -z "${UPGRADE_DEBUG}" ]; then + set -x +fi + +PATH=/bin:/usr/bin:/sbin:/usr/sbin +FOTA_DIR="/opt/usr/data/fota" + +. "$FOTA_DIR"/upgrade-common.inc + +SCRIPT_NAME="upgrade-partial.sh" +LOG_FILE="/tmp/upgrade-partial.log" + +check_args() { + DELTA_TAR="$1" + if [ ! -f "$DELTA_TAR" ]; then + flog "[Error] Usage: $0 path_to_delta.tar[.gz]" + exit 1 + fi + + flog "[Info] Using <$DELTA_TAR> delta file." +} + +FOTA_UPDATE_PREFIX="/run/upgrade-sysroot" + +cleanup() { + cleanup_partitions + cleanup_files +} + +cleanup_files() { + rm -f -- "$DELTA_TAR" + rm -f -- "$FOTA_DIR/upgrade-apply" + rm -f -- "$FOTA_DIR/$SCRIPT_NAME" +} + +prepare_fota_update_dir() { + mkdir -p ${FOTA_UPDATE_PREFIX} +} + +prepare_fota_update_dir +check_ab_partition_scheme +check_used_block_device +check_args "$@" +unpack_file "$DELTA_TAR" "$CONFIG_FILE" +unpack_file "$DELTA_TAR" "upgrade-apply" +/bin/chmod +x "$FOTA_DIR/upgrade-apply" +if ! upgrade_images "$DELTA_TAR"; then + critical_flog "[Error] Unable to upgrade_images" + cleanup + exit_error +fi + +if ! run_setup_script "$DELTA_TAR"; then + critical_flog "[Error] Unable to run_setup_script" + cleanup + exit_error +fi + +cleanup + +critical_log "RO update: $SCRIPT_NAME success" "$LOG_FILE" diff --git a/scripts/upgrade-support/upgrade-prepare-partitions.sh b/scripts/upgrade-support/upgrade-prepare-partitions.sh new file mode 100755 index 0000000..0b0761a --- /dev/null +++ b/scripts/upgrade-support/upgrade-prepare-partitions.sh @@ -0,0 +1,43 @@ +#!/bin/bash +set -o errexit +trap 'echo "Aborting due to errexit on ${0##*/}:$LINENO. Exit code: $?" >&2' ERR +set -o errtrace -e -o pipefail + +if [ ! -z "${UPGRADE_DEBUG}" ]; then + set -x +fi + +PATH=/bin:/usr/bin:/sbin:/usr/sbin + +FOTA_DIR="/opt/usr/data/fota" + +. "$FOTA_DIR"/upgrade-common.inc + +SCRIPT_NAME="upgrade-prepare-partitions.sh" +LOG_FILE="/tmp/upgrade-prepare-partitions.log" + +check_optional_partition() { + partition_name="$1" + exit_code="$2" + optional_partitions=(hal) + for part in "${optional_partitions[@]}"; do + if [ "$partition_name" == "$part" ]; then + return + fi + done + exit "$exit_code" +} + +cleanup_files() { + rm -f -- "$FOTA_DIR/$SCRIPT_NAME" +} + +check_ab_partition_scheme +check_used_block_device +load_background_copy_list +if ! background_copy; then + exit_error +fi +cleanup_files + +critical_log "RO update preparation: $SCRIPT_NAME success" "$LOG_FILE" diff --git a/scripts/upgrade-support/upgrade-trigger.sh b/scripts/upgrade-support/upgrade-trigger.sh new file mode 100644 index 0000000..f62f81b --- /dev/null +++ b/scripts/upgrade-support/upgrade-trigger.sh @@ -0,0 +1,148 @@ +#!/bin/bash +# Master script for upgrade. Called by update-manager/manually to start upgrade procedure. +set -o errexit +trap 'echo "Aborting due to errexit on ${0##*/}:$LINENO. Exit code: $?" >&2' ERR +set -o errtrace -e -o pipefail + +if [ ! -z "${UPGRADE_DEBUG}" ]; then + set -x +fi + +PATH=/bin:/usr/bin:/sbin:/usr/sbin +FOTA_DIR="/opt/usr/data/fota" +STATUS_DIR="/opt/data/update" +DOWNLOAD_DELTA="$1" +SCRIPT_NAME="upgrade-trigger.sh" +LOG_FILE="/tmp/upgrade-trigger.log" +SCRIPT_UPGRADE_LEGACY="upgrade-legacy.sh" +SCRIPT_UPGRADE_PREPARE_PARTITIONS="upgrade-prepare-partitions.sh" +SCRIPT_UPGRADE_PARTIAL="upgrade-partial.sh" +SCRIPT_UPGRADE_FOTA="upgrade-fota.sh" +FLOCK_PATH="/var/lock/clone_partitions.lock" + +prepare_fota_dir() { + if [ -d "$FOTA_DIR" ]; then + # Cleanup FOTA_DIR + if [ "$(dirname $DOWNLOAD_DELTA)" = "$FOTA_DIR" ]; then + # If provided delta is from inside the FOTA_DIR, delete everything else + DELTA_FILE_NAME=$(basename $DOWNLOAD_DELTA) + rm -rf -- "$FOTA_DIR/!($DELTA_FILE_NAME)" + else + # If provided delta is from outside the FOTA_DIR delete everything + rm -rf -- "$FOTA_DIR/*" + fi + else + log "[Info] Create fota dir..." "$LOG_FILE" + mkdir -p "$FOTA_DIR" + fi + + if [ ! -d "$STATUS_DIR" ]; then + log "[Info] Create status dir..." "$LOG_FILE" + mkdir -p "$STATUS_DIR" + fi +} + +log() { + # log format: [script_name][tag]actual_log + LOG="[${SCRIPT_NAME}]$1" + if [ "$2" != "" ]; then + echo "$LOG" >> "$2" + fi + echo "$LOG" + + return 0 +} + +is_ab_upgrade() { + if grep -q partition_ab= /proc/cmdline; then + return $TRUE + fi + return $FALSE +} + +do_update() { + if is_ab_upgrade; then + set_upgrade_status 5 + + if [ "$(device_board_get_partition_ab_cloned)" -eq 0 ]; then + unpack_file "${DOWNLOAD_DELTA}" "${SCRIPT_UPGRADE_PREPARE_PARTITIONS}" + if ! "${FOTA_DIR}/${SCRIPT_UPGRADE_PREPARE_PARTITIONS}"; then + exit_error + fi + else + echo "[Info] Partitions already cloned" + fi + + set_upgrade_status 20 + + # In the next steps the content of the partitions of the second slot will + # be modified, so they will not be a clone of the current partitions + device_board_clear_partition_ab_cloned + + unpack_file "${DOWNLOAD_DELTA}" "${SCRIPT_UPGRADE_PARTIAL}" + unpack_file "${DOWNLOAD_DELTA}" "${SCRIPT_UPGRADE_FOTA}" + + if ! "${FOTA_DIR}/${SCRIPT_UPGRADE_PARTIAL}" "${DOWNLOAD_DELTA}"; then + exit_error + fi + + set_upgrade_status 40 + + if ! "${FOTA_DIR}/${SCRIPT_UPGRADE_FOTA}"; then + exit_error + fi + + set_upgrade_status 80 + else + # Legacy upgrade (non-A/B) + unpack_file "${DOWNLOAD_DELTA}" "${SCRIPT_UPGRADE_LEGACY}" + "${FOTA_DIR}/${SCRIPT_UPGRADE_LEGACY}" "${DOWNLOAD_DELTA}" + fi +} + +verify_file() { + FILE_PATH="$1" + FILE_NAME=$(basename "$FILE_PATH") + + VALID_CHECKSUM=$(awk "/$FILE_NAME/ {print \$1}" "$FOTA_DIR/checksum.SHA1") + if [ "$VALID_CHECKSUM" == "" ]; then + echo "[Error] No $FILE_NAME in checksum.SHA1" + device_board_set_upgrade_status -1 + exit 1 + fi + + if ! echo "$VALID_CHECKSUM $FILE_PATH" | sha1sum --check --status; then + echo "[Error] Checksum for file $FILE_NAME is invalid" + device_board_set_upgrade_status -1 + exit 1 + fi + + echo "[Info] Checksum of $FILE_NAME is OK" +} + +( +flock 9 + +rm -f "$LOG_FILE" && touch "$LOG_FILE" + +if [ "$#" != "1" ] || [ ! -f "$1" ]; then + log "[Error] Usage: $0 path_to_delta.tar[.gz]" "$LOG_FILE" + exit 1 +fi + +log "[Info] Using <$DOWNLOAD_DELTA> delta file." "$LOG_FILE" + +prepare_fota_dir +tar xfp "$DOWNLOAD_DELTA" -C "$FOTA_DIR" checksum.SHA1 +verify_file "$0" +tar xfp "$DOWNLOAD_DELTA" -C "$FOTA_DIR" upgrade-common.inc +verify_file "$FOTA_DIR/upgrade-common.inc" + +. "$FOTA_DIR"/upgrade-common.inc + +set_upgrade_status 1 +verify_img "${DOWNLOAD_DELTA}" +do_update + +critical_log "RO update: $SCRIPT_NAME success" "$LOG_FILE" +) 9> "$FLOCK_PATH" diff --git a/src/blkid-print/CMakeLists.txt b/src/blkid-print/CMakeLists.txt new file mode 100644 index 0000000..7916889 --- /dev/null +++ b/src/blkid-print/CMakeLists.txt @@ -0,0 +1,13 @@ +INCLUDE(FindPkgConfig) + +pkg_check_modules(blkid_pkgs REQUIRED blkid) +SET(BLKIDSRCS blkid-print.c) +SET(BLKIDEXENAME "blkid-print") + +FOREACH(flag ${pkgs_CFLAGS}) + SET(EXTRA_CFLAGS "${EXTRA_CFLAGS} ${flag}") +ENDFOREACH(flag) + +ADD_EXECUTABLE(${BLKIDEXENAME} ${BLKIDSRCS}) +TARGET_LINK_LIBRARIES(${BLKIDEXENAME} ${blkid_pkgs_LDFLAGS}) +INSTALL(TARGETS ${BLKIDEXENAME} DESTINATION ${BINDIR}) diff --git a/src/blkid-print/blkid-api.c b/src/blkid-print/blkid-api.c new file mode 100644 index 0000000..9c2ddae --- /dev/null +++ b/src/blkid-print/blkid-api.c @@ -0,0 +1,62 @@ +/* + * tota-ua + * + * Copyright (c) 2021 Samsung Electronics Co., Ltd. + * + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "blkid-api.h" +#include +#include +#include + +blkid_partlist get_part_list(char *device_name, blkid_probe *pr) { + assert(pr); + + *pr = blkid_new_probe_from_filename(device_name); + if (*pr == NULL) + return NULL; + blkid_partlist ls = blkid_probe_get_partitions(*pr); + return ls; +} + +int get_part_number_by_name(blkid_partlist ls, const char *part_name, const char *new_slot) { + int nparts = blkid_partlist_numof_partitions(ls); + char part_name_with_slot[MAX_PARTNAME_LEN]; + int found_part_n = -1; + + if (snprintf(part_name_with_slot, MAX_PARTNAME_LEN, "%s_%s", part_name, new_slot) < 0) { + return -1; + } + + for (int it = 0; it < nparts; ++it) { + blkid_partition part = blkid_partlist_get_partition(ls, it); + const char *p; + + p = blkid_partition_get_name(part); + if (!p) + continue; + + if (found_part_n < 0 && strncmp(p, part_name, strlen(part_name)+1) == 0) + found_part_n = it + 1; + else if (strncmp(p, part_name_with_slot, strlen(part_name_with_slot)+1) == 0) + found_part_n = it + 1; + } + if (found_part_n >= 0) + return found_part_n; + + // nothing found + return -1; +} + diff --git a/src/blkid-print/blkid-api.h b/src/blkid-print/blkid-api.h new file mode 100644 index 0000000..0baa2f2 --- /dev/null +++ b/src/blkid-print/blkid-api.h @@ -0,0 +1,26 @@ +/* + * tota-ua + * + * Copyright (c) 2021 Samsung Electronics Co., Ltd. + * + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once +#include + +#ifndef MAX_PARTNAME_LEN +#define MAX_PARTNAME_LEN 10000 +#endif +blkid_partlist get_part_list(char *device_name, blkid_probe *pr); +int get_part_number_by_name(blkid_partlist ls, const char *part_name, const char *new_slot); diff --git a/src/blkid-print/blkid-print.c b/src/blkid-print/blkid-print.c new file mode 100644 index 0000000..bbb3f98 --- /dev/null +++ b/src/blkid-print/blkid-print.c @@ -0,0 +1,128 @@ +/* + * tota-ua + * + * Copyright (c) 2021 Samsung Electronics Co., Ltd. + * + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include "blkid-api.h" + +void usage(const char* msg) { + if (msg) { + printf("%s\n", msg); + } + printf("USAGE: blkid-print device_name partition_label suffix\n" + "device_name: block device e.g. /dev/mmcblk0\n" + "partition_label: partiton label to search for e.g. rootfs,ramdisk etc.\n" + "suffix: a|b partiton slot\n"); +} + +blkid_partition partition_for(char *device_name, char *part_name, char *new_slot) { + char part_name_with_slot[MAX_PARTNAME_LEN]; + int found_part_n = -1; + if (!device_name || !part_name || !new_slot){ + printf("partition_for: one or more argument is NULL\n"); + return NULL; + } + if (snprintf(part_name_with_slot, MAX_PARTNAME_LEN, "%s_%s", part_name, new_slot) < 0) { + return NULL; + } + blkid_probe pr = blkid_new_probe_from_filename(device_name); + if (pr == NULL) { + printf("ERROR: blkid error for %s\n", device_name); + usage(NULL); + return NULL; + } + blkid_partlist ls = blkid_probe_get_partitions(pr); + if (!ls) { + printf("ERROR: unable to probe partitions.\n"); + blkid_free_probe(pr); + return NULL; + } + int nparts = blkid_partlist_numof_partitions(ls); + if (nparts == -1) { + printf("ERROR: unable to get partition count\n"); + blkid_free_probe(pr); + return NULL; + } + + + for (int i = 0; i < nparts; i++) { + blkid_partition part = blkid_partlist_get_partition(ls, i); + const char *p; + + p = blkid_partition_get_name(part); + if (!p) + continue; + + if (found_part_n < 0 && strncmp(p, part_name, strlen(part_name)+1) == 0) + found_part_n = i; + else if (strncmp(p, part_name_with_slot, strlen(part_name_with_slot)+1) == 0) + found_part_n = i; + } + + blkid_free_probe(pr); + if (found_part_n >= 0) { + blkid_partition par = blkid_partlist_get_partition(ls, found_part_n); + return par; + } + return NULL; +} + +const char *get_part_label(blkid_partlist ls, int part_nr) { + blkid_partition part = blkid_partlist_get_partition(ls, part_nr); + return blkid_partition_get_name(part); +} + +int main(int argc, char *argv[]) { + char device_name_path[PATH_MAX]; + if (argc != 4) { + usage("Please specify correct argument number\n"); + return 1; + } + char *device_name = realpath(argv[1], device_name_path); + if (!device_name) { + printf("ERROR: Unable to determine realpath for: %s\n", argv[1]); + usage(NULL); + return 1; + } + char *partition_label = argv[2]; + char *suffix = argv[3]; + blkid_partition part = partition_for(device_name, partition_label, suffix); + if (part == NULL) { + printf("ERROR: Partition '%s' not found.\n", partition_label); + usage(NULL); + return 2; + } + int part_nr = blkid_partition_get_partno(part); + if (part_nr < 0) { + printf("ERROR: Partition '%s' not found", partition_label); + return 3; + } + const char *part_label = blkid_partition_get_name(part); + char part_path[PATH_MAX]; + // /dev/sda1 vs /dev/mmcblk0p1 /dev/nvme0n1p1 + // no_separator vs "p" separator + if (strncmp("/dev/sd", device_name, strlen(device_name) + 1) == 0) + snprintf(part_path, PATH_MAX, "%s%d", device_name, part_nr); + else + snprintf(part_path, PATH_MAX, "%sp%d", device_name, part_nr); + printf("part_nr: %d (%s): %s\n", part_nr, part_label, part_path); + return 0; +} + diff --git a/src/dmverity/CMakeLists.txt b/src/dmverity/CMakeLists.txt new file mode 100755 index 0000000..1482299 --- /dev/null +++ b/src/dmverity/CMakeLists.txt @@ -0,0 +1,19 @@ +SET(PREFIX ${CMAKE_INSTALL_PREFIX}) +SET(BINDIR "${PREFIX}/bin") +SET(VERITY_HANDLER "verity_handler") +SET(DMVERITY_DIR ${CMAKE_CURRENT_SOURCE_DIR}) + +SET(DMVERITY_SRCS ${DMVERITY_DIR}/verity_handler.c) +INCLUDE_DIRECTORIES(${DMVERITY_DIR}) + +INCLUDE(FindPkgConfig) +pkg_check_modules(dmverity_pkgs REQUIRED) + +FOREACH(flag ${dmverity_pkgs_CFLAGS}) + SET(EXTRA_CFLAGS "${EXTRA_CFLAGS} ${flag}") +ENDFOREACH(flag) + +ADD_EXECUTABLE(${VERITY_HANDLER} ${DMVERITY_SRCS}) +TARGET_LINK_LIBRARIES(${VERITY_HANDLER} ${dmverity_pkgs_LDFLAGS} ${LIBS} -lpthread) + +INSTALL(TARGETS ${VERITY_HANDLER} DESTINATION ${BINDIR}) diff --git a/src/dmverity/verity_handler.c b/src/dmverity/verity_handler.c new file mode 100755 index 0000000..2113616 --- /dev/null +++ b/src/dmverity/verity_handler.c @@ -0,0 +1,224 @@ +/* + * verity-handler + * + * Copyright (c) 2020 Samsung Electronics Co., Ltd. + * + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "verity_handler.h" + +#define TMP_DIR "/tmp/upgrade" +#define PROGRESS_FILE TMP_DIR "/ro_progress" + +/*----------------------------------------------------------------------------- + _system_cmd_wait + ----------------------------------------------------------------------------*/ +static int _system_cmd_wait(const char *command) +{ + int pid = 0; + int status = 0; + char* const environ[2] = { "DISPLAY=:0", NULL }; + + if (command == NULL) + return -1; + + pid = fork(); + + if (pid == -1) + return -1; + + if (pid == 0) { + char *argv[4]; + argv[0] = "sh"; + argv[1] = "-c"; + argv[2] = (char*)command; + argv[3] = 0; + execve("/bin/sh", argv, environ); + exit(127); + } + + do { + if (waitpid(pid, &status, 0) == -1) { + if (errno != EINTR) + return -1; + } else { + return status; + } + } while(1); +} + +/*----------------------------------------------------------------------------- + fota_gui_update_progress + ----------------------------------------------------------------------------*/ +void fota_gui_update_progress(int percent) +{ + int ret; + int fd; + struct stat s; + + // Check directory + ret = stat(TMP_DIR, &s); + if (ret == 0) { + // TMP_DIR exists but it is not directory + if (!S_ISDIR(s.st_mode)) + goto remove_file; + else + goto update_progress; + } else if (errno == ENOENT) // TMP_DIR not exists + goto make_directory; + else { + LOG("stat failed : %m\n"); + return; + } + +remove_file: + ret = remove(TMP_DIR); + if (ret != 0) { + LOG("remove failed : %m\n"); + return; + } + +make_directory: + ret = mkdir(TMP_DIR, 0755); + if (ret != 0) { + LOG("mkdir failed : %m\n"); + return; + } + +update_progress: + fd = creat(PROGRESS_FILE, 0644); + if (fd < 0) { + LOG("creat failed : %m\n"); + return; + } + + ret = dprintf(fd, "%d\n", percent); + if (close(fd) != 0) { + LOG("close failed : %m\n"); + return; + } + if (ret < 2) { + LOG("write failed (%d) : %m\n", ret); + return; + } + + LOG("Progress : %d%%\n", percent); +} + +/*----------------------------------------------------------------------------- + __thread_make_hash + ----------------------------------------------------------------------------*/ +static void *__thread_make_hash(void *arg) +{ + int ret; + char cmd[1024]; + + snprintf(cmd, sizeof(cmd)-1, "/usr/bin/verityctl format %s", (char*)arg); + LOG("cmd = %s\n", cmd); + ret = _system_cmd_wait(cmd); + LOG("ret = %d, exit status = %d\n", ret, WEXITSTATUS(ret)); + + return NULL; +} + +/*----------------------------------------------------------------------------- + __thread_draw_progress + ----------------------------------------------------------------------------*/ +static void *__thread_draw_progress(void *arg) +{ + int expected_duration = 45; /* sec */ + int count = 20; + useconds_t period = expected_duration * 1000000 / count; + int from = 82; + int to = 98; + float progress = from; + float progress_increment = (float)(to - from) / (float)count; + + /** + * Convert tick to progress + * + * 0 ~ count -> from ~ to + */ + for (int tick = 0; tick < count; tick++) { + fota_gui_update_progress((int)progress); + progress += progress_increment; + pthread_testcancel(); + usleep(period); + } + + return NULL; +} + + +/*----------------------------------------------------------------------------- + main + ----------------------------------------------------------------------------*/ +int main(int argc, char **argv) +{ + int error = 0; + pthread_t th_id_hash; + pthread_t th_id_progress; + + /* check argument */ + if ((argc != 2) || (argv[1] == NULL) || (strlen(argv[1]) == 0)) { + return -1; + } + + /* argv[1] is a path of block device whose hash will be made */ + error = pthread_create(&th_id_hash, NULL, __thread_make_hash, argv[1]); + if (error != 0) { + LOG("pthread_create(th_id_hash) failed (err = %d)\n", error); + return -1; + } + + error = pthread_create(&th_id_progress, NULL, __thread_draw_progress, NULL); + if (error != 0) { + LOG("pthread_create(th_id_progress) failed (err = %d)\n", error); + return -1; + } + + error = pthread_join(th_id_hash, NULL); + if (error != 0) { + LOG("pthread_join(th_id_hash) failed (err = %d)\n", error); + return -1; + } else + LOG("pthread_join(th_id_hash) succeeded \n"); + + error = pthread_cancel(th_id_progress); + if (error != 0) + LOG("progress thread is already terminated \n"); + + error = pthread_join(th_id_progress, NULL); + if (error != 0) { + LOG("pthread_join(th_id_progress) failed (err = %d)\n", error); + return -1; + } else + LOG("pthread_join(th_id_hash) succeeded \n"); + + fota_gui_update_progress(100); + + return 0; +} diff --git a/src/dmverity/verity_handler.h b/src/dmverity/verity_handler.h new file mode 100755 index 0000000..9f6b78d --- /dev/null +++ b/src/dmverity/verity_handler.h @@ -0,0 +1,34 @@ +/* + * verity-handler + * + * Copyright (c) 2020 Samsung Electronics Co., Ltd. + * + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __VERITY_HANDLER_H__ +#define __VERITY_HANDLER_H__ + +#define DEBUG_STDOUT +//#define DEBUG_FILE + +#define LOG_PRFIX "VERITY_HANDLER" + +#ifdef DEBUG_STDOUT +#define LOGE(s, args...) printf(LOG_PRFIX "/ERROR(%s) " s, __func__, ##args) // Error log +#define LOGL(s, args...) do{ printf(LOG_PRFIX "/(%s): " s,__func__, ##args);}while(0) +#define LOG(s, args...) LOGL(s, ##args) +#endif + + +#endif /* __VERITY_HANDLER_H__ */ diff --git a/src/img-verifier/CMakeLists.txt b/src/img-verifier/CMakeLists.txt new file mode 100755 index 0000000..fde174f --- /dev/null +++ b/src/img-verifier/CMakeLists.txt @@ -0,0 +1,20 @@ +ADD_DEFINITIONS("-DIMG_VERIFIER_ROOT_CA_DIR=\"${IMG_VERIFIER_ROOT_CA_DIR}\"") + +SET(PREFIX ${CMAKE_INSTALL_PREFIX}) +SET(SBINDIR "${PREFIX}/sbin") +SET(VERIFIER "img-verifier") +SET(VERIFIER_DIR ${CMAKE_CURRENT_SOURCE_DIR}) + +SET(VERIFIER_SRCS ${VERIFIER_DIR}/${VERIFIER}.c) +INCLUDE_DIRECTORIES(${VERIFIER_DIR}) + +INCLUDE(FindPkgConfig) +pkg_check_modules(verify_pkgs REQUIRED openssl1.1) + +FOREACH(flag ${verify_pkgs_CFLAGS}) + SET(EXTRA_CFLAGS "${EXTRA_CFLAGS} ${flag}") +ENDFOREACH(flag) + +ADD_EXECUTABLE(${VERIFIER} ${VERIFIER_SRCS}) +TARGET_LINK_LIBRARIES(${VERIFIER} ${verify_pkgs_LDFLAGS}) +INSTALL(TARGETS ${VERIFIER} DESTINATION ${SBINDIR}) diff --git a/src/img-verifier/img-verifier.c b/src/img-verifier/img-verifier.c new file mode 100644 index 0000000..95ae7a7 --- /dev/null +++ b/src/img-verifier/img-verifier.c @@ -0,0 +1,339 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "img-verifier.h" + +static struct signed_file signed_file; +static X509 *signed_file_cert; +static X509_STORE *ca_store; +static X509_STORE_CTX *ca_store_ctx; +static EVP_MD_CTX *evp_md_ctx; + + +static bool check_signed_file(const char *path) +{ + const int metadata_size = sizeof(signed_file.delta_size) + sizeof(signed_file.signature_size) + sizeof(signed_file.certificate_size) + MAGIC_NUMBER_SIZE; + int ret = -1; + _CLEANUP_CLOSE_ int signed_file_fd = -1; + long long int offset = (off_t)-1; + unsigned int data_size = 0; + + _I("Check signed file..."); + + ASSERT_RETV(path, EINVAL, "Invalid parameter"); + _D("Signed file path : %s", path); + + signed_file_fd = open(path, O_RDONLY); + ASSERT_RETV(signed_file_fd != -1, false, "Failed to open %s : %m", path); + + // Read metadata + offset = lseek(signed_file_fd, -metadata_size, SEEK_END); + ASSERT_RETV(offset != (off_t)-1, false, "Failed to move file offset : %m"); + + ret = read(signed_file_fd, &signed_file.delta_size, metadata_size); + ASSERT_RETV(ret == metadata_size, false, "Failed to read metadata : %m"); + + // Check magic number + _D("Magic number : %s", signed_file.magic_number); + + ret = memcmp(signed_file.magic_number, MAGIC_NUMBER, MAGIC_NUMBER_SIZE); + ASSERT_RETV(ret == 0, false, "Invalid magic number"); + + // Check file size + _D("Delta size : %d", signed_file.delta_size); + _D("Signature size : %d", signed_file.signature_size); + _D("Certificate size : %d", signed_file.certificate_size); + + data_size = signed_file.delta_size + signed_file.signature_size + signed_file.certificate_size; + + offset = lseek(signed_file_fd, -metadata_size, SEEK_END); + ASSERT_RETV(offset == data_size, false, "Invalid file size : Expected(%u), Real(%lld)", data_size, offset); + + return true; +} + +static int print_openssl_error(void) +{ + unsigned long err = ERR_get_error(); + + _E("OpenSSL error : %s", ERR_error_string(err, NULL)); + + return -EIO; +} + +static int read_root_ca(void) +{ + int ret; + int num_root_ca = 0; + _CLEANUP_DIR_ DIR *dirp = NULL; + struct dirent *dirent = NULL; + const char *ext = NULL; + char root_ca_path[PATH_MAX]; + + _I("Read root CA..."); + + dirp = opendir(IMG_VERIFIER_ROOT_CA_DIR); + ASSERT_RETV(dirp, -errno, "Failed to open %s : %m", IMG_VERIFIER_ROOT_CA_DIR); + + ca_store = X509_STORE_new(); + ASSERT_RETV(ca_store, -ENOMEM, "X509_STORE_new failed"); + + _D("Root CA directory : %s", IMG_VERIFIER_ROOT_CA_DIR); + while ((dirent = readdir(dirp))) { + if ((ext = strrchr(dirent->d_name, '.')) && !strncmp(ext + 1, "pem", 4)) { + snprintf(root_ca_path, sizeof(root_ca_path), "%s/%s", + IMG_VERIFIER_ROOT_CA_DIR, dirent->d_name); + _D("Find root CA : %s", dirent->d_name); + ret = X509_STORE_load_locations(ca_store, root_ca_path, NULL); + ASSERT_RETV(ret == 1, print_openssl_error(), + "X509_STORE_load_locations for %s failed", root_ca_path); + ++num_root_ca; + } + } + + return num_root_ca; +} + +static int read_signed_file(const char *path) +{ + int ret = -1; + _CLEANUP_CLOSE_ int signed_file_fd = -1; + long long int offset = (off_t)-1; + + _I("Read signed file..."); + + ASSERT_RETV(path, EINVAL, "Invalid parameter"); + _D("Signed file path : %s", path); + + signed_file_fd = open(path, O_RDONLY); + ASSERT_RETV(signed_file_fd != -1, errno, "Failed to open %s : %m", path); + + // Read data + offset = lseek(signed_file_fd, signed_file.delta_size, SEEK_SET); + ASSERT_RETV(offset != (off_t)-1, false, "Failed to move file offset : %m"); + + signed_file.signature = malloc(signed_file.signature_size); + ASSERT_RETV(signed_file.signature, ENOMEM, "Not enough memory"); + + ret = read(signed_file_fd, signed_file.signature, signed_file.signature_size); + ASSERT_RETV(ret == signed_file.signature_size, errno, "Failed to read signature : %m"); + + signed_file.certificate = malloc(signed_file.certificate_size); + ASSERT_RETV(signed_file.certificate, ENOMEM, "Not enough memory"); + + ret = read(signed_file_fd, signed_file.certificate, signed_file.certificate_size); + ASSERT_RETV(ret == signed_file.certificate_size, errno, "Failed to read certificate : %m"); + + return SUCCEED; +} + +static int verify_certificate(void) +{ + int ret = -1; + unsigned char *pcert = NULL; + STACK_OF(X509_OBJECT *) ca_object_list = NULL; + X509_OBJECT *ca_object = NULL; + X509 *ca = NULL; + + _I("Verify certificate..."); + + pcert = signed_file.certificate; + ASSERT_RETV(pcert, EINVAL, "Invalid certificate"); + + signed_file_cert = d2i_X509(NULL, (const unsigned char **)&pcert, signed_file.certificate_size); + ASSERT_RETV(signed_file_cert, print_openssl_error(), "d2i_X509 failed"); + + // Reject the certificate same with CA + ca_object_list = X509_STORE_get0_objects(ca_store); + _D("The number of CA : %d", sk_X509_OBJECT_num(ca_object_list)); + for (int idx = 0; idx < sk_X509_OBJECT_num(ca_object_list); idx++) { + ca_object = sk_X509_OBJECT_value(ca_object_list, idx); + ca = X509_OBJECT_get0_X509(ca_object); + + ASSERT_RETV(X509_cmp(signed_file_cert, ca) != 0, EPERM, "Signing with CA is prohibited"); + } + + // Check whether image has CA's child certificate + ca_store_ctx = X509_STORE_CTX_new(); + ASSERT_RETV(ca_store_ctx, ENOMEM, "X509_STORE_CTX_new failed"); + + ret = X509_STORE_CTX_init(ca_store_ctx, ca_store, signed_file_cert, NULL); + ASSERT_RETV(ret == 1, print_openssl_error(), "X509_STORE_CTX_init failed"); + + X509_VERIFY_PARAM *x509_verify_param = X509_STORE_CTX_get0_param(ca_store_ctx); + ASSERT_RETV(x509_verify_param, print_openssl_error(), "X509_STORE_CTX_get0_param failed"); + + /** + * We don't check the validity period of certificates + * because RPi doesn't have RTC(Real Time Clock). + */ + ret = X509_VERIFY_PARAM_set_flags(x509_verify_param, X509_V_FLAG_NO_CHECK_TIME); + ASSERT_RETV(ret == 1, print_openssl_error(), "X509_VERIFY_PARAM_set_flags failed"); + + ret = X509_verify_cert(ca_store_ctx); + if (ret != 1) { + _E("X509_verify_cert failed : %s", X509_verify_cert_error_string((long)X509_STORE_CTX_get_error(ca_store_ctx))); + return EPERM; + } + + _I("Verify result : VALID"); + return SUCCEED; +} + +static int verify_delta(const char *path) +{ + int ret = -1; + static EVP_PKEY *signed_file_public_key = NULL; + _CLEANUP_CLOSE_ int signed_file_fd = -1; + unsigned char delta_block[512]; + unsigned int read_delta_size = 0; + + _I("Verify delta..."); + + ASSERT_RETV(signed_file_cert, EINVAL, "Invalid X509 certificate"); + + signed_file_public_key = X509_get0_pubkey(signed_file_cert); + ASSERT_RETV(signed_file_public_key, print_openssl_error(), "X509_get_pubkey_failed"); + + evp_md_ctx = EVP_MD_CTX_new(); + ASSERT_RETV(evp_md_ctx, ENOMEM, "EVP_MD_CTX_new failed"); + + ret = EVP_DigestVerifyInit(evp_md_ctx, NULL, EVP_sha256(), NULL, signed_file_public_key); + ASSERT_RETV(ret == 1, print_openssl_error(), "EVP_DigestVerifyInit failed"); + + // Hash delta + signed_file_fd = open(path, O_RDONLY); + ASSERT_RETV(signed_file_fd != -1, false, "Failed to open %s : %m", path); + + while (read_delta_size < signed_file.delta_size) { + memset(delta_block, 0, sizeof(delta_block)); + ret = read(signed_file_fd, delta_block, sizeof(delta_block)); + ASSERT_RETV(ret != -1, errno, "Failed to read delta : %m"); + read_delta_size += ret; + + ret = EVP_DigestVerifyUpdate(evp_md_ctx, delta_block, sizeof(delta_block)); + ASSERT_RETV(ret == 1, print_openssl_error(), "EVP_DigestVerifyUpdate failed"); + } + + // Decrypt signature and compare with hashed delta + ret = EVP_DigestVerifyFinal(evp_md_ctx, signed_file.signature, signed_file.signature_size); + ASSERT_RETV(ret == 1, print_openssl_error(), "EVP_DigestVerifyFinal failed"); + + _I("Verify result : VALID"); + return SUCCEED; +} + +static int free_data(int ret) +{ + _I("Free data..."); + + if (signed_file.signature) + free(signed_file.signature); + + if (signed_file.certificate) + free(signed_file.certificate); + + if (signed_file_cert) + X509_free(signed_file_cert); + + if (ca_store) + X509_STORE_free(ca_store); + + if (ca_store_ctx) + X509_STORE_CTX_free(ca_store_ctx); + + if (evp_md_ctx) + EVP_MD_CTX_free(evp_md_ctx); + + return ret; +} + +int main(int argc, char *argv[]) +{ + int ret; + int opt; + const char *image_path = NULL; + const char *log_path = NULL; + bool is_signed_file; + int num_root_ca; + + // Check argument + while ((opt = getopt(argc, argv, "i:l:")) != -1) { + switch (opt) { + case 'i': + image_path = optarg; + break; + case 'l': + log_path = optarg; + break; + default: + _E("Invalid option : %c", (char)opt); + return EINVAL; + } + } + + if (log_path) { + _CLEANUP_CLOSE_ int log_fd = -1; + + log_fd = open(log_path, O_WRONLY | O_CREAT, 0644); + ASSERT_RETV(log_fd != -1, errno, "Failed to open log file fd : %m"); + + ret = dup2(log_fd, STDOUT_FILENO); + ASSERT_RETV(ret != -1, errno, "Failed to duplicate fd : %m"); + } + + ASSERT_RETV(image_path, EINVAL, "You must input image path"); + + _I("Start to verify image"); + + is_signed_file = check_signed_file(image_path); + + num_root_ca = read_root_ca(); + ASSERT_RETV(num_root_ca >= 0, free_data(num_root_ca), "Failed to read root CA (%d)", num_root_ca); + + /** + * Root CA / Signed File | Action + * ------------------------------------------- + * X X | Skip verification + * X O | Deny + * O X | Deny + * O O | Try to verify + */ + _D("Signed(%d), Root CA(%d)", is_signed_file, num_root_ca); + if (is_signed_file) { + if (num_root_ca <= 0) { + _E("Root CA not exist"); + return EPERM; + } + } else { + if (num_root_ca > 0) { + _E("%s isn't signed file", image_path); + return EPERM; + } else { + _W("Skip verification"); + return SUCCEED; + } + } + + ret = read_signed_file(image_path); + ASSERT_RETV(ret == SUCCEED, free_data(ret), "Failed to read data (%d)", ret); + + ret = verify_certificate(); + ASSERT_RETV(ret == SUCCEED, free_data(ret), "Failed to verify certificate (%d)", ret); + + ret = verify_delta(image_path); + ASSERT_RETV(ret == SUCCEED, free_data(ret), "Failed to verify delta (%d)", ret); + + free_data(SUCCEED); + + _I("Succeed to verify image!"); + return SUCCEED; +} diff --git a/src/img-verifier/img-verifier.h b/src/img-verifier/img-verifier.h new file mode 100644 index 0000000..ebd9ebd --- /dev/null +++ b/src/img-verifier/img-verifier.h @@ -0,0 +1,64 @@ +#ifndef __IMG_VERIFIER_H__ +#define __IMG_VERIFIER_H__ + +#include + +#define LOG_VERBOSE + +#define _L(lvl, fmt, arg...) printf("IV/" lvl fmt "\n", ##arg) + +#ifdef LOG_VERBOSE +#define _D(fmt, arg...) _L("DEBUG ", fmt, ##arg) +#else +#define _D(fmt, arg...) +#endif +#define _I(fmt, arg...) _L("INFO ", fmt, ##arg) +#define _W(fmt, arg...) _L("WARN ", fmt, ##arg) +#define _E(fmt, arg...) _L("ERROR ", fmt, ##arg) + + +#define SUCCEED 0 + +#define ASSERT_RETV(cond, retv, msg, arg...) \ +{ \ + if (!(cond)) { \ + _E(msg, ##arg); \ + return retv; \ + } \ +} + +static void close_fd(int *fd) +{ + if (fd && *fd >= 0) + close(*fd); +} + +static void close_dir(DIR **dirp) +{ + if (dirp && *dirp) + closedir(*dirp); +} + +#define _CLEANUP_CLOSE_ __attribute__((__cleanup__(close_fd))) +#define _CLEANUP_DIR_ __attribute__((__cleanup__(close_dir))) + +#define MAGIC_NUMBER "IMG_SIGNED_V1" +#define MAGIC_NUMBER_SIZE (sizeof(MAGIC_NUMBER) - 1) + +/** + * This structure is same with signed image. + * So, please sync it with signer. + */ +struct signed_file { + // Data +// unsigned char *delta; (not used, just to understand structure) + unsigned char *signature; + unsigned char *certificate; + // Metadata + unsigned int delta_size; + unsigned int signature_size; + unsigned int certificate_size; + unsigned char magic_number[MAGIC_NUMBER_SIZE]; +}; + +#endif /* __IMG_VERIFIER_H__ */ diff --git a/src/upgrade-apply/CMakeLists.txt b/src/upgrade-apply/CMakeLists.txt new file mode 100644 index 0000000..4fd1111 --- /dev/null +++ b/src/upgrade-apply/CMakeLists.txt @@ -0,0 +1,27 @@ +find_package(PkgConfig) +pkg_check_modules(DEPS REQUIRED IMPORTED_TARGET zlib liblzma-tool) + +ADD_DEFINITIONS("-D_FILE_OFFSET_BITS=64") + +set( + upgrade-apply_SRCS + main.c + patch/patch.c + patch/brotli.c + patch/lzma.c + sha1/sha1.c +) +add_executable(upgrade-apply ${upgrade-apply_SRCS}) +target_link_libraries(upgrade-apply PRIVATE PkgConfig::DEPS) +# Unfortunately, libtar is neither CMake- nor pkgconfig-aware. +# Even more unfortunately, pkgconfig detection of static libraries is broken for all version of CMake before 3.24, +# and as of writing 3.24 is not even released yet (see https://gitlab.kitware.com/cmake/cmake/-/merge_requests/7070). +# This would require a bit of hacking (but also libc.a) if we wanted full static linking, +# but here we need something even different: static linking for all but libc. +# This requires especially brutal hacks. Maybe we should use Meson next time? ;) +# In the meantime, TODO: make this better. +# Also, note that zlib has been used in the normal way (i.e. with dynamic linking). +# That's because the current setup does have libz.so in the execution environment. +# Though in the future, it might be good to make it consistent (TODO). +target_link_libraries(upgrade-apply PRIVATE libbrotlidec-static.a libbrotlicommon-static.a libtar.a) +install(TARGETS upgrade-apply) diff --git a/src/upgrade-apply/main.c b/src/upgrade-apply/main.c new file mode 100644 index 0000000..080a1cb --- /dev/null +++ b/src/upgrade-apply/main.c @@ -0,0 +1,514 @@ +/* + * tota-ua + * + * Copyright (c) 2022 Samsung Electronics Co., Ltd. + * + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "patch/patch.h" +#include "patch/brotli.h" +#include "patch/lzma.h" +#include "sha1/sha1.h" + +void fd_cleanup(int *fd) +{ + if (!fd || *fd < 0) + return; + close(*fd); +} + +void tar_cleanup(TAR **tar) +{ + if (!tar || !*tar) + return; + tar_close(*tar); +} + +enum archive_kind { + KIND_RAW, + KIND_BROTLI_PATCH, + KIND_LZMA_PATCH +}; + +struct parse_result { + enum { + PARSE_OK, + PARSE_HELP, + PARSE_REPEATED_ARGUMENT, + PARSE_MISSING_ARGUMENT, + PARSE_PATCH_MISTAKE, + PARSE_BAD_KIND, + PARSE_BAD_SIZE, + PARSE_NO_PARSE, + } result; + + const char *archive; + const char *dest; + const char *archive_file; + const char *patch_orig; + const char *dest_sha1; + size_t dest_size; + enum archive_kind kind; +}; + +struct parse_result parse_args(int argc, char **argv) +{ + const char *archive = NULL; + const char *dest = NULL; + const char *archive_file = NULL; + const char *patch_orig = NULL; + const char *dest_sha1 = NULL; + long long int dest_size = 0; + enum archive_kind kind = -1; + bool help = false; + + for (;;) { + const struct option long_options[] = { + {"archive", required_argument, NULL, 0 }, + {"dest", required_argument, NULL, 1 }, + {"archive-file", required_argument, NULL, 2 }, + {"kind", required_argument, NULL, 3 }, + {"patch-orig", required_argument, NULL, 4 }, + {"dest-sha1", required_argument, NULL, 5 }, + {"dest-size", required_argument, NULL, 6 }, + {"help", no_argument, NULL, 'h'}, + {0} + }; + int option = getopt_long(argc, argv, "h", long_options, NULL); + if (option < 0) + break; + + switch (option) { + case 0: // archive + if (archive != NULL) + return (struct parse_result) { .result = PARSE_REPEATED_ARGUMENT }; + archive = optarg; + break; + + case 1: // dest + if (dest != NULL) + return (struct parse_result) { .result = PARSE_REPEATED_ARGUMENT }; + dest = optarg; + break; + + case 2: // archive-file + if (archive_file != NULL) + return (struct parse_result) { .result = PARSE_REPEATED_ARGUMENT }; + archive_file = optarg; + break; + + case 3: // kind + if (kind != -1) + return (struct parse_result) { .result = PARSE_REPEATED_ARGUMENT }; + if (strcmp(optarg, "raw") == 0) + kind = KIND_RAW; + else if (strcmp(optarg, "ss_brotli_patch") == 0) + kind = KIND_BROTLI_PATCH; + else if (strcmp(optarg, "ss_lzma_patch") == 0) + kind = KIND_LZMA_PATCH; + else + return (struct parse_result) { .result = PARSE_BAD_KIND }; + break; + + case 4: // patch-orig + if (patch_orig != NULL) + return (struct parse_result) { .result = PARSE_REPEATED_ARGUMENT }; + patch_orig = optarg; + break; + + case 5: // dest-sha1 + if (dest_sha1 != NULL) + return (struct parse_result) { .result = PARSE_REPEATED_ARGUMENT }; + dest_sha1 = optarg; + break; + + case 6: // dest-size + if (dest_size != 0) + return (struct parse_result) { .result = PARSE_REPEATED_ARGUMENT }; + dest_size = atoi(optarg); + if (dest_size <= 0 || dest_size > SIZE_MAX) + return (struct parse_result) { .result = PARSE_BAD_SIZE }; + break; + + case 'h': // help + if (help) + return (struct parse_result) { .result = PARSE_REPEATED_ARGUMENT }; + help = true; + break; + + default: + return (struct parse_result) { .result = PARSE_NO_PARSE }; + } + } + + if (help) { + if (archive != NULL || dest != NULL || archive_file != NULL || patch_orig != NULL || kind != -1) + return (struct parse_result) { .result = PARSE_REPEATED_ARGUMENT }; + return (struct parse_result) { .result = PARSE_HELP }; + } + + if (archive == NULL) + return (struct parse_result) { .result = PARSE_MISSING_ARGUMENT }; + if (dest == NULL) + return (struct parse_result) { .result = PARSE_MISSING_ARGUMENT }; + if (archive_file == NULL) + return (struct parse_result) { .result = PARSE_MISSING_ARGUMENT }; + if (kind == -1) + return (struct parse_result) { .result = PARSE_MISSING_ARGUMENT }; + + if (patch_orig == NULL && (kind == KIND_BROTLI_PATCH || kind == KIND_LZMA_PATCH)) + return (struct parse_result) { .result = PARSE_PATCH_MISTAKE }; + if (patch_orig != NULL && kind == KIND_RAW) + return (struct parse_result) { .result = PARSE_PATCH_MISTAKE }; + + return (struct parse_result) { + .result = PARSE_OK, + .archive = archive, + .dest = dest, + .archive_file = archive_file, + .patch_orig = patch_orig, + .dest_sha1 = dest_sha1, + .dest_size = dest_size, + .kind = kind, + }; +} + +// The TAR structure can only hold an int as a FD. +// libtar example uses some brutal hack which we don't want to do. +// We don't want to open multiple archive at the time, so let's just have 1 as FD, +// and instead hold the gzip structure globally. +static gzFile gzip_file; + +int gzip_open(const char *pathname, int oflags, ...) +{ + if (gzip_file != NULL) { + errno = EALREADY; + return -1; + } + + // We assume oflags == O_RDONLY, since that's what we actually use. + // Also we don't care about the mode since we are lazy. + if (oflags != O_RDONLY) { + errno = ENOTSUP; + return -1; + } + + __attribute__((cleanup(fd_cleanup))) int inner_fd = open(pathname, O_RDONLY); + if (inner_fd == -1) + return -1; + + gzip_file = gzopen(pathname, "r"); + if (gzip_file == NULL) + return -1; + inner_fd = -1; + + return 1; +} + +int gzip_close(__attribute__((unused)) int useless_fd) +{ + if (gzip_file == NULL) { + errno = EINVAL; + return -1; + } + + return gzclose(gzip_file); +} + +ssize_t gzip_read(__attribute__((unused)) int useless_fd, void *buf, size_t len) +{ + if (gzip_file == NULL) { + errno = EINVAL; + return -1; + } + + return gzread(gzip_file, buf, len); +} + +ssize_t gzip_write(__attribute__((unused)) int useless_fd, __attribute__((unused)) const void *buf, __attribute__((unused)) size_t len) +{ + // Again, we do not use this. + errno = ENOTSUP; + return -1; +} + +void help(char *name) +{ + fprintf(stderr, + "Usage: %s --archive ARCHIVE --archive-file FILE --dest DESTINATION --kind KIND [--patch-orig ORIGIN] [--dest-sha1 SHA1] [--dest-size SIZE]\n" + "\n" + "Patches a partition using an archive.\n" + "It will look for a file named FILE in an archive called ARCHIVE; the archive should have an extension\n" + "of either .tar, .tar.gz or .tgz; it will be read in the way corresponding to the extension.\n" + "The KIND parameter specifies how the file will be treated:\n" + " - raw means that the file should be treated as an image to be written,\n" + " - ss_brotli_patch means that the file should be treated as a delta in the (Tizen-specific) ss_brotli_patch\n" + " - ss_lzma_patch means that the file should be treated as a delta in the (Tizen-specific) ss_lzma_patch\n" + " format. In this case, ORIGIN is needed and specifies the partition that a delta is based on.\n" + "The results will be written to the DESTINATION partition.\n" + "If SHA1 is provided (lowercase hexadecimal), it is compared to the one calculated on DESTINATION after writing.\n" + "Additionally, if SIZE is provided, it is used as the destination size; this matters for SHA1 comparision,\n" + "and also if ss_brotli_patch or ss_lzma_patch is used.\n", + name); +} + +int apply_raw(const char *dest, TAR *tar) +{ + int size = th_get_size(tar); + + __attribute__((cleanup(fd_cleanup))) int out = open(dest, O_WRONLY); + if (out == -1) { + fprintf(stderr, "Couldn't open the target (errno: %m)\n"); + return -1; + } + + while (size > 0) { + char buf[T_BLOCKSIZE]; + int r_read = tar_block_read(tar, buf); + switch (r_read) { + case -1: + fprintf(stderr, "Couldn't read from the archive (errno: %m)\n"); + return -1; + case 0: + fprintf(stderr, "We have reached EOF unexpectedly\n"); + return -1; + } + + if (r_read > size) { + r_read = size; + } + size -= r_read; + + int written = 0; + while (written - r_read) { + int r_write = write(out, buf + written, r_read - written); + if (r_write == -1) { + fprintf(stderr, "Couldn't write to the partition (errno: %m)\n"); + return -1; + } + written += r_write; + } + } + + return 0; +} + +int check_sha1(const char *dest, const char *sha1, size_t read_bytes) +{ + const size_t SHA1_LEN = 20; + const char HEX_DIGITS[] = "0123456789abcdef"; + + if (strlen(sha1) != 2 * SHA1_LEN) { + fprintf(stderr, "Invalid SHA1 length\n"); + return -1; + } + + __attribute__((cleanup(fd_cleanup))) int dest_fd = open(dest, O_RDONLY); + if (dest_fd == -1) { + fprintf(stderr, "Couldn't open the target (errno: %m)\n"); + return -1; + } + + SHA1_CTX context; + SHA1Init(&context); + + if (read_bytes == 0) + read_bytes = SIZE_MAX; + + while (read_bytes > 0) { + // This size doesn't really matter, so let's just pick a value that is big enough + // to not use too many syscalls. + unsigned char buf[1 << 16]; + + int r_read = read(dest_fd, buf, read_bytes < sizeof(buf) ? read_bytes : sizeof(buf)); + read_bytes -= r_read; + switch (r_read) { + case -1: + fprintf(stderr, "Couldn't read from the destination (errno: %m)\n"); + return -1; + case 0: + // We read all the data. Let's check the results! + // ... wait, there is no two-layer (switch and while) break in C. + // Ugh, we have to goto instead. + goto while_done; + } + + SHA1Update(&context, buf, r_read); + } +while_done:; + + unsigned char correct_sha1[SHA1_LEN + 1]; + SHA1Final(correct_sha1, &context); + + char correct_sha1_hex[2 * SHA1_LEN + 1]; + for (int i = 0; i < SHA1_LEN; ++i) { + correct_sha1_hex[2 * i] = HEX_DIGITS[correct_sha1[i] / 16]; + correct_sha1_hex[2 * i + 1] = HEX_DIGITS[correct_sha1[i] % 16]; + } + correct_sha1_hex[sizeof(correct_sha1_hex) - 1] = '\0'; + + if (strcmp(correct_sha1_hex, sha1) != 0) { + fprintf(stderr, "Malformed or invalid SHA1: correct one is %s\n", correct_sha1_hex); + return -1; + } + + return 0; +} + +int main(int argc, char **argv) +{ + struct parse_result parsed = parse_args(argc, argv); + switch (parsed.result) { + case PARSE_REPEATED_ARGUMENT: + fprintf(stderr, "Argument incorrectly repeated\n"); + help(argv[0]); + return EXIT_FAILURE; + case PARSE_MISSING_ARGUMENT: + fprintf(stderr, "Argument incorrectly missing\n"); + help(argv[0]); + return EXIT_FAILURE; + case PARSE_PATCH_MISTAKE: + fprintf(stderr, "`patch-orig` parameter doesn't fit the kind\n"); + help(argv[0]); + return EXIT_FAILURE; + case PARSE_BAD_KIND: + fprintf(stderr, "Invalid `kind` parameter (possible: `raw`, `ss_brotli_patch`)\n"); + help(argv[0]); + return EXIT_FAILURE; + case PARSE_BAD_SIZE: + fprintf(stderr, "Invalid `dest-size` parameter (it should be a positive number that fits in size_t)\n"); + help(argv[0]); + return EXIT_FAILURE; + case PARSE_NO_PARSE: + fprintf(stderr, "Invalid parameters passed\n"); + help(argv[0]); + return EXIT_FAILURE; + case PARSE_HELP: + help(argv[0]); + return EXIT_SUCCESS; + case PARSE_OK: + break; + } + + tartype_t *type; + static tartype_t gzip_type = { gzip_open, gzip_close, gzip_read, gzip_write }; + if (strcmp(parsed.archive + strlen(parsed.archive) - strlen(".tar"), ".tar") == 0) + type = NULL; + else if (strcmp(parsed.archive + strlen(parsed.archive) - strlen(".tar.gz"), ".tar.gz") == 0) + type = &gzip_type; + else if (strcmp(parsed.archive + strlen(parsed.archive) - strlen(".tgz"), ".tgz") == 0) + type = &gzip_type; + else { + fprintf(stderr, "Unknown archive extension\n"); + help(argv[0]); + return EXIT_FAILURE; + } + + __attribute__ ((cleanup(tar_cleanup))) TAR *tar; + if (tar_open(&tar, parsed.archive, type, O_RDONLY, 0, 0) == -1) { + /* Usually we would instead initialize tar to NULL, and tar_open would not touch it in case of failure. + * Alas, that's not what happens; tar_open sets tar to a newly allocated address, does stuff there, + * and in case of failure frees the address but does not reset it. This means that in case of failure + * tar contains an invalid address. So we have to NULL it out (so tar_cleanup doesn't crash our program), + * and then we don't have to initialize tar to NULL in first place. */ + tar = NULL; + + fprintf(stderr, "Couldn't open the archive (errno: %m)\n"); + return EXIT_FAILURE; + } + + for (;;) { + switch (th_read(tar)) { + case -1: + fprintf(stderr, "Couldn't read the archive header (errno: %m)\n"); + return EXIT_FAILURE; + case 1: + // All files processed, which means... + fprintf(stderr, "File not found in archive\n"); + return EXIT_FAILURE; + case 0: + break; + } + + const char *this_name = th_get_pathname(tar); + if (this_name == NULL) { + // TODO: Can this even happen? I don't think docs say anything about this. + fprintf(stderr, "Couldn't extract the filename from the archive (malformed archive?)\n"); + return EXIT_FAILURE; + } + + if (strcmp(this_name, parsed.archive_file) != 0) { + if (TH_ISREG(tar)) { + if (tar_skip_regfile(tar) == -1) { + fprintf(stderr, "Couldn't skip an intermediate file from the archive (errno: %m; malformed archive?)\n"); + return EXIT_FAILURE; + } + } + continue; + } + + fprintf(stderr, "File found. Starting to write\n"); + + int r = EXIT_FAILURE; + + + switch (parsed.kind) { + case KIND_RAW: + r = apply_raw(parsed.dest, tar); + break; + + + case KIND_BROTLI_PATCH: + { + struct dec_funcs funcs = { init_brotli, decompress_brotli }; + r = apply_patch(parsed.patch_orig, parsed.dest, tar, parsed.dest_size, &funcs); + break; + } + case KIND_LZMA_PATCH: + { + struct dec_funcs funcs = { init_lzma, decompress_lzma, clean_lzma }; + r = apply_patch(parsed.patch_orig, parsed.dest, tar, parsed.dest_size, &funcs); + break; + } + } + + if (r != 0) { + fprintf(stderr, "Couldn't apply; you might need to restore from the backup!\n"); + return EXIT_FAILURE; + } + + if (parsed.dest_sha1 != NULL) { + fprintf(stderr, "Write successful. Now checking SHA1\n"); + if (check_sha1(parsed.dest, parsed.dest_sha1, parsed.dest_size) != 0) { + fprintf(stderr, "Couldn't check SHA1; you might need to restore from the backup!\n"); + return EXIT_FAILURE; + } + } + + fprintf(stderr, "Everything written successfully\n"); + return EXIT_SUCCESS; + } +} diff --git a/src/upgrade-apply/patch/NOTES.md b/src/upgrade-apply/patch/NOTES.md new file mode 100644 index 0000000..27abb13 --- /dev/null +++ b/src/upgrade-apply/patch/NOTES.md @@ -0,0 +1,3 @@ +This is copied from platform/core/system/libtota @ b25d02d618797b5fb2fbe61a188d6a28dc720bf8. + +The code has been adapted to read patch from a tarball instead of a file descriptor, as well as some other minor improvements. It's mostly untouched otherwise. diff --git a/src/upgrade-apply/patch/brotli.c b/src/upgrade-apply/patch/brotli.c new file mode 100644 index 0000000..b5259bb --- /dev/null +++ b/src/upgrade-apply/patch/brotli.c @@ -0,0 +1,49 @@ +/* + * tota-ua (previously libtota) + * + * Copyright (c) 2022 Samsung Electronics Co., Ltd. + * + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include +#include +#include "patch.h" + +int init_brotli(struct bs_data *data) +{ + assert(data); + data->state = BrotliDecoderCreateInstance(NULL, NULL, NULL); + return 0; +} + +int decompress_brotli(struct bs_data *data) +{ + assert(data); + BrotliDecoderState *bstate = (BrotliDecoderState*)data->state; + int result = BrotliDecoderDecompressStream(bstate, + &data->available_in, + &data->compressed_pos, + &data->available_out, + &data->decompressed_pos, + &data->total_size); + + switch (result) { + case BROTLI_DECODER_RESULT_NEEDS_MORE_INPUT: + return PF_NEED_MORE_DATA; + case BROTLI_DECODER_RESULT_ERROR: + return PF_ERROR_DECOMPRESSION; + } + return PF_OK; +} diff --git a/src/upgrade-apply/patch/brotli.h b/src/upgrade-apply/patch/brotli.h new file mode 100644 index 0000000..919caac --- /dev/null +++ b/src/upgrade-apply/patch/brotli.h @@ -0,0 +1,22 @@ +/* + * tota-ua (previously libtota) + * + * Copyright (c) 2022 Samsung Electronics Co., Ltd. + * + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#pragma once +#include "patch.h" + +int init_brotli(struct bs_data *data); +int decompress_brotli(struct bs_data *data); diff --git a/src/upgrade-apply/patch/lzma.c b/src/upgrade-apply/patch/lzma.c new file mode 100644 index 0000000..60ea3bd --- /dev/null +++ b/src/upgrade-apply/patch/lzma.c @@ -0,0 +1,143 @@ +/* + * tota-ua (previously libtota) + * + * Copyright (c) 2022 Samsung Electronics Co., Ltd. + * + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include +#include +#include "patch.h" +#include +#include + +#define HEADER_PLUS_SIZE_LEN LZMA_PROPS_SIZE+8 + +struct lzma_data { + bool initialized; + CLzmaDec *state; +}; + +static void *SzAlloc(void *p, size_t size) +{ + return MyAlloc(size); +} + + +static void SzFree(void *p, void *address) +{ + MyFree(address); +} + +ISzAlloc g_alloc = { SzAlloc, SzFree }; + +int init_lzma(struct bs_data *data) +{ + assert(data); + struct lzma_data *internal_data = (struct lzma_data*)malloc(sizeof(struct lzma_data)); + if (!internal_data) + return PF_ERROR_DECOMPRESSION; + + CLzmaDec *state = (CLzmaDec*)malloc(sizeof(CLzmaDec)); + if (!state) { + free(internal_data); + return PF_ERROR_DECOMPRESSION; + } + + data->state = internal_data; + internal_data->state = state; + internal_data->initialized = false; + + LzmaDec_Construct(state); + + return PF_OK; +} + +void clean_lzma(struct bs_data *data) +{ + assert(data); + if (!data->state) + return; + + struct lzma_data *internal_data = data->state; + LzmaDec_Free(internal_data->state, &g_alloc); + free(internal_data->state); + free(internal_data); + data->state = NULL; +} + +static int init_lzma_internal(struct bs_data *data) +{ + assert(data); + if (data->available_in < HEADER_PLUS_SIZE_LEN) + return PF_NEED_MORE_DATA; + struct lzma_data *internal_data = data->state; + + int res = LzmaDec_Allocate(internal_data->state, + data->compressed_pos, + LZMA_PROPS_SIZE, + &g_alloc); + if (res != SZ_OK) + return PF_ERROR_DECOMPRESSION; + + data->available_in -= HEADER_PLUS_SIZE_LEN; + data->compressed_pos += HEADER_PLUS_SIZE_LEN; + + LzmaDec_Init(internal_data->state); + internal_data->initialized = true; + return PF_OK; +} + +int decompress_lzma(struct bs_data *data) +{ + assert(data); + int res; + struct lzma_data *internal_data = data->state; + + + if (!internal_data->initialized) { + res = init_lzma_internal(data); + if (res != PF_OK) + return res; + } + + ELzmaStatus status; + size_t out_processed = data->available_out; + size_t in_processed = data->available_in; + + res = LzmaDec_DecodeToBuf(internal_data->state, + data->decompressed_pos, + &out_processed, + data->compressed_pos, + &in_processed, + LZMA_FINISH_ANY, + &status); + + data->available_out -= out_processed; + data->available_in -= in_processed; + data->decompressed_pos += out_processed; + data->compressed_pos += in_processed; + + if (res != SZ_OK) + return PF_ERROR_DECOMPRESSION; + + if (status == LZMA_STATUS_NEEDS_MORE_INPUT && data->available_in == 0) { + if (data->patch_remaining == 0) + return PF_OK; // Since we don't have anything to read, we're done. + return PF_NEED_MORE_DATA; + } + + return PF_OK; +} diff --git a/src/upgrade-apply/patch/lzma.h b/src/upgrade-apply/patch/lzma.h new file mode 100644 index 0000000..d527354 --- /dev/null +++ b/src/upgrade-apply/patch/lzma.h @@ -0,0 +1,23 @@ +/* + * tota-ua + * + * Copyright (c) 2022 Samsung Electronics Co., Ltd. + * + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#pragma once +#include "patch.h" + +int init_lzma(struct bs_data *data); +int decompress_lzma(struct bs_data *data); +void clean_lzma(struct bs_data *data); diff --git a/src/upgrade-apply/patch/patch.c b/src/upgrade-apply/patch/patch.c new file mode 100644 index 0000000..1c7724a --- /dev/null +++ b/src/upgrade-apply/patch/patch.c @@ -0,0 +1,447 @@ +/* + * tota-ua (previously libtota) + * + * Copyright (c) 2022 Samsung Electronics Co., Ltd. + * + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "patch.h" + +#if INTPTR_MAX == INT32_MAX +// We need to map two partitions, so +// on a 32-bit architecture one mmapping +// cannot be more than (3GB - 256M)/2 +#define MAX_MMAP_LEN (3*1024*1024*1024ULL/2 - 256*1024*1024) +#else +#define MAX_MMAP_LEN UINT64_MAX +#endif + +#define SSINT_LEN 8 +#define BLOCK_COUNT_REPORT 5000 + +const char SSDIFF_MAGIC[] = "SSDIFF40"; + +static void free_data(struct bs_data *data, struct dec_funcs *funcs) +{ + if (data == NULL) + return; + + if (funcs->clean) + funcs->clean(data); + + if (data->src.ptr) munmap(data->src.ptr, data->src.mmapped_len); + if (data->dest.ptr) munmap(data->dest.ptr, data->dest.mmapped_len); + + if (data->src.fd) close(data->src.fd); + if (data->dest.fd) close(data->dest.fd); +} + +static int open_file(const char *file_name, int mode) +{ + assert(file_name); + int fd = open(file_name, mode, S_IWUSR | S_IRUSR); + if (fd < 0) + fprintf(stderr, "Open file %s error: %m (%d)\n", file_name, errno); + return fd; +} + +static size_t get_file_len(int fd) +{ + assert(fd >= 0); + size_t result = lseek(fd, 0, SEEK_END); + lseek(fd, 0, SEEK_SET); + return result; +} + +static int remmap(struct file_info *finfo, off_t offset) +{ + if (finfo->ptr) munmap(finfo->ptr, finfo->mmapped_len); + + off_t new_offset = offset & (~(getpagesize()-1)); + + if (finfo->len - new_offset > MAX_MMAP_LEN) { + // If the rest of the file is greather than MAX_MMAP_LEN + // then we only mmap MAX_MMAP_LEN + finfo->mmapped_len = MAX_MMAP_LEN; + } else { + // If the rest of the file is smaller than MAX_MMAP_LEN + if (finfo->len > MAX_MMAP_LEN) { + // If the whole file is greather than MAX_MMAP_LEN + // we mmap as much as possible to reduce the number of + // redundant remmappings + finfo->mmapped_len = MAX_MMAP_LEN; + new_offset = finfo->len - MAX_MMAP_LEN; + } else { + // File is smaller than MAX_MMAP_LEN so we can mmap the entire file + finfo->mmapped_len = finfo->len; + new_offset = 0; + } + } + + finfo->ptr = mmap(NULL, finfo->mmapped_len, finfo->mmap_prot, finfo->mmap_flags, finfo->fd, new_offset); + if (finfo->ptr == MAP_FAILED) { + fprintf(stderr, "mmap file error: %m (%d)\n", errno); + return PF_ERROR_MMAP; + } + + finfo->offset = new_offset; + + return PF_OK; +} + +static int set_new_pos(off_t new_pos, struct file_info *finfo) +{ + int res = PF_OK; + off_t local_pos = new_pos - finfo->offset; + if (local_pos < 0 || local_pos > finfo->mmapped_len) { + res = remmap(finfo, new_pos); + } + if (res == PF_OK) { + finfo->pos = finfo->ptr + new_pos - finfo->offset; + } + return res; +} + +static int remmap_and_save_position(struct file_info *finfo) +{ + off_t global_pos = finfo->pos - (uint8_t*)finfo->ptr + finfo->offset; + int res = remmap(finfo, global_pos); + if (res == PF_OK) { + finfo->pos = finfo->ptr + global_pos - finfo->offset; + } + return res; +} + +static int remmap_if_needed(struct file_info *finfo) +{ + int res = PF_OK; + if (finfo->pos - (uint8_t*)finfo->ptr >= finfo->mmapped_len) { + res = remmap_and_save_position(finfo); + } + return res; +} + +static int remmap_if_no_space(struct file_info *finfo, int64_t size) +{ + int res = PF_OK; + if (finfo->pos + size > (uint8_t*)finfo->ptr + finfo->mmapped_len) { + res = remmap_and_save_position(finfo); + } + return res; +} + +static size_t decompress_bytes(struct dec_funcs *funcs, struct bs_data *data, size_t keep_offset) +{ + assert(data); + if (keep_offset > 0) { + memcpy(data->buff_out, data->buff_out + sizeof(data->buff_out) - keep_offset, keep_offset); + } + data->decompressed_pos = data->buff_out + keep_offset; + data->available_out = sizeof(data->buff_out) - keep_offset; + + int result; + + for (;;) { + result = funcs->decompress(data); + + if (result == PF_ERROR_DECOMPRESSION) { + fprintf(stderr, "Brotli decompression errored\n"); + return PF_ERROR_DECOMPRESSION; + } else if (result == PF_NEED_MORE_DATA) { + if (data->patch_remaining <= 0) { + fprintf(stderr, "We have ran out of data and decompression is still in progress\n"); + return PF_ERROR_DECOMPRESSION; + } + + int r_read = tar_block_read(data->patch_tar, data->buff_in); + switch (r_read) { + case -1: + fprintf(stderr, "Couldn't read from the archive (errno: %m)\n"); + return PF_ERROR_DECOMPRESSION; + case 0: + fprintf(stderr, "We have reached EOF unexpectedly\n"); + return PF_ERROR_DECOMPRESSION; + } + + if (r_read > data->patch_remaining) { + r_read = data->patch_remaining; + } + data->patch_remaining -= r_read; + + data->available_in = r_read; + data->compressed_pos = data->buff_in; + } else { + break; + } + } + + return PF_OK; +} + +static int open_files(struct bs_data *data, const char *source_file, const char *dest_file, TAR *patch_tar, size_t dest_size) +{ + assert(data); + assert(source_file); + assert(dest_file); + assert(patch_tar); + + data->src.fd = open_file(source_file, O_RDONLY); + data->dest.fd = open_file(dest_file, O_RDWR); + if (data->src.fd < 0 || + data->dest.fd < 0) + return PF_ERROR_OPEN_FILE; + + data->patch_tar = patch_tar; + + data->src.len = get_file_len(data->src.fd); + data->patch_len = th_get_size(data->patch_tar); + data->dest.len = dest_size == 0 ? get_file_len(data->dest.fd) : dest_size; + + int res; + if ((res = remmap(&data->src, 0)) != PF_OK) + return res; + + if ((res = remmap(&data->dest, 0)) != PF_OK) + return res; + + data->patch_remaining = data->patch_len; + + return PF_OK; +} + +static void init_file_info(struct file_info *finfo) +{ + finfo->fd = -1; + finfo->ptr = NULL; + finfo->offset = 0; + finfo->mmapped_len = 0; + finfo->pos = 0; + finfo->len = 0; +} + +static void init_data(struct bs_data *data) +{ + assert(data); + + init_file_info(&data->src); + data->src.mmap_prot = PROT_READ; + data->src.mmap_flags = MAP_PRIVATE; + init_file_info(&data->dest); + data->dest.mmap_prot = PROT_WRITE; + data->dest.mmap_flags = MAP_SHARED; + data->patch_tar = NULL; + data->patch_len = 0; + data->patch_remaining = 0; + data->available_in = 0; + data->compressed_pos = 0; + data->available_out = 0; + data->decompressed_pos = 0; + data->state = NULL; +} + +static int64_t parse_ssint(unsigned char *buff) +{ + assert(buff); + /* + * From bsdiff 4.0 documentation: + * + * INTEGER type: + * + * offset size data type value + * 0 1 byte x0 + * 1 1 byte x1 + * 2 1 byte x2 + * 3 1 byte x3 + * 4 1 byte x4 + * 5 1 byte x5 + * 6 1 byte x6 + * 7 1 byte x7 + 128 * s + * + * The values x0, x2, x2, x3, x4, x5, x6 are between 0 and 255 (inclusive). + * The value x7 is between 0 and 127 (inclusive). The value s is 0 or 1. + * + * The INTEGER is parsed as: + * (x0 + x1 * 256 + x2 * 256^2 + x3 * 256^3 + x4 * 256^4 + + * x5 * 256^5 + x6 * 256^6 + x7 * 256^7) * (-1)^s + * + * (In other words, an INTEGER is a 64-byte signed integer in sign-magnitude + * format, stored in little-endian byte order.) + */ + int64_t result = *(int64_t*)buff & 0x7fffffff; + if ((buff[7] & 0x80) != 0) + result = -result; + + return result; +} + +int read_header(struct dec_funcs *funcs, struct bs_data *data, uint8_t **buff_out_pos) +{ + assert(data); + assert(buff_out_pos); + + *buff_out_pos = data->buff_out; + + if (*buff_out_pos + sizeof(SSDIFF_MAGIC) > data->decompressed_pos || + memcmp(data->buff_out, SSDIFF_MAGIC, sizeof(SSDIFF_MAGIC) - 1) != 0) { + fprintf(stderr, "Invalid patch file\n"); + return PF_ERROR_INVALID_PATCH_FILE; + } else { + fprintf(stderr, "Looks like SSDIFF\n"); + } + + *buff_out_pos += sizeof(SSDIFF_MAGIC) - 1; + + if (*buff_out_pos + SSINT_LEN > data->decompressed_pos) { + decompress_bytes(funcs, data, data->decompressed_pos - *buff_out_pos); + *buff_out_pos = data->buff_out; + } + + size_t target_size = parse_ssint(*buff_out_pos); + fprintf(stderr, "target_size: 0x%zx (%zu)\n", target_size, target_size); + + if (target_size != data->dest.len) { + fprintf(stderr, "Declared target size differs from that read from the patch\n"); + return PF_ERROR_INVALID_PATCH_FILE; + } + + *buff_out_pos += SSINT_LEN; + + return PF_OK; +} + +int apply_patch(const char *source_file, const char *dest_file, TAR *patch_tar, size_t dest_size, struct dec_funcs *funcs) +{ + assert(source_file); + assert(dest_file); + assert(patch_tar); + + int result; + uint64_t blocks = 0; + struct bs_data data; + + init_data(&data); + if ((result = funcs->init(&data)) != PF_OK) + goto exit; + + if ((result = open_files(&data, source_file, dest_file, patch_tar, dest_size)) != PF_OK) + goto exit; + + if ((result = decompress_bytes(funcs, &data, 0)) != PF_OK) + goto exit; + + uint8_t *buff_out_pos; + + if ((result = read_header(funcs, &data, &buff_out_pos)) != PF_OK) + goto exit; + + uint64_t total_write = 0; + + while (total_write < data.dest.len) { + /* + * Make sure we can read the block header + */ + if (buff_out_pos + 4*8 > data.decompressed_pos) { + if ((result = decompress_bytes(funcs, &data, data.decompressed_pos - buff_out_pos)) != PF_OK) + goto exit; + buff_out_pos = data.buff_out; + } + + /* + * Read the block header + */ + int64_t diff_len = parse_ssint(buff_out_pos+0*8); + int64_t extra_len = parse_ssint(buff_out_pos+1*8); + int64_t old_pos = parse_ssint(buff_out_pos+2*8); + int64_t new_pos = parse_ssint(buff_out_pos+3*8); + buff_out_pos += 4*8; + + /* + * Prepare pointers + */ + if ((result = set_new_pos(old_pos, &data.src)) != PF_OK) + goto exit; + if ((result = set_new_pos(new_pos, &data.dest)) != PF_OK) + goto exit; + /* + * Read diff data + */ + int64_t write = 0; + while (write < diff_len) { + if (buff_out_pos >= data.decompressed_pos) { + if ((result = decompress_bytes(funcs, &data, 0)) != PF_OK) + goto exit; + buff_out_pos = data.buff_out; + } + + while (write < diff_len && buff_out_pos < data.decompressed_pos) { + if ((result = remmap_if_needed(&data.src)) != PF_OK) + goto exit; + if ((result = remmap_if_needed(&data.dest)) != PF_OK) + goto exit; + *data.dest.pos = *(uint8_t*)data.src.pos + *(uint8_t*)buff_out_pos; + data.dest.pos++; + data.src.pos++; + buff_out_pos++; + write++; + } + } + total_write += write; + /* + * Read extra data + */ + write = 0; + while (write < extra_len) { + if (buff_out_pos >= data.decompressed_pos) { + if ((result = decompress_bytes(funcs, &data, 0)) != PF_OK) + goto exit; + buff_out_pos = data.buff_out; + } + int64_t chunk_size = extra_len - write; + if (buff_out_pos + chunk_size > data.decompressed_pos) { + chunk_size = data.decompressed_pos - buff_out_pos; + } + if ((result = remmap_if_no_space(&data.dest, chunk_size)) != PF_OK) + goto exit; + memcpy(data.dest.pos, buff_out_pos, chunk_size); + data.dest.pos += chunk_size; + buff_out_pos += chunk_size; + write += chunk_size; + } + total_write += write; + + blocks++; + if (blocks % BLOCK_COUNT_REPORT == 0) { + fprintf(stderr, "Number of processed patch blocks: %"PRIu64"\n", blocks); + } + } + + result = PF_OK; + +exit: + fprintf(stderr, "Total processed blocks: %"PRIu64"\n", blocks); + free_data(&data, funcs); + return result; +} diff --git a/src/upgrade-apply/patch/patch.h b/src/upgrade-apply/patch/patch.h new file mode 100644 index 0000000..919802c --- /dev/null +++ b/src/upgrade-apply/patch/patch.h @@ -0,0 +1,67 @@ +/* + * tota-ua (previously libtota) + * + * Copyright (c) 2022 Samsung Electronics Co., Ltd. + * + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#pragma once + +#include + +#include +#include +#include + +#define PF_OK 0 +#define PF_ERROR_OPEN_FILE 1 +#define PF_ERROR_MMAP 2 +#define PF_ERROR_INVALID_PATCH_FILE 3 +#define PF_ERROR_DECOMPRESSION 4 +#define PF_NEED_MORE_DATA 5 + +#define BUFF_OUT_LEN 4096 + +struct file_info { + int fd; + void *ptr; + off_t offset; + size_t mmapped_len; + uint8_t* pos; + size_t len; + int mmap_prot; + int mmap_flags; +}; + +struct bs_data { + struct file_info src; + struct file_info dest; + TAR *patch_tar; + size_t patch_len; + unsigned char buff_in[T_BLOCKSIZE]; + unsigned char buff_out[BUFF_OUT_LEN]; + size_t patch_remaining; + size_t available_in, available_out; + const uint8_t *compressed_pos; + uint8_t *decompressed_pos; + size_t total_size; + void *state; +}; + +struct dec_funcs { + int (*init)(struct bs_data *data); + int (*decompress)(struct bs_data *); + void (*clean)(struct bs_data *data); +}; + +extern int apply_patch(const char *source_file, const char *dest_file, TAR *patch_tar, size_t dest_size, struct dec_funcs *funcs); diff --git a/src/upgrade-apply/sha1/sha1.c b/src/upgrade-apply/sha1/sha1.c new file mode 100644 index 0000000..fe8da83 --- /dev/null +++ b/src/upgrade-apply/sha1/sha1.c @@ -0,0 +1,295 @@ +/* +SHA-1 in C +By Steve Reid +100% Public Domain + +Test Vectors (from FIPS PUB 180-1) +"abc" + A9993E36 4706816A BA3E2571 7850C26C 9CD0D89D +"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq" + 84983E44 1C3BD26E BAAE4AA1 F95129E5 E54670F1 +A million repetitions of "a" + 34AA973C D4C4DAA4 F61EEB2B DBAD2731 6534016F +*/ + +/* #define LITTLE_ENDIAN * This should be #define'd already, if true. */ +/* #define SHA1HANDSOFF * Copies data before messing with it. */ + +#define SHA1HANDSOFF + +#include +#include + +/* for uint32_t */ +#include + +#include "sha1.h" + + +#define rol(value, bits) (((value) << (bits)) | ((value) >> (32 - (bits)))) + +/* blk0() and blk() perform the initial expand. */ +/* I got the idea of expanding during the round function from SSLeay */ +#if BYTE_ORDER == LITTLE_ENDIAN +#define blk0(i) (block->l[i] = (rol(block->l[i],24)&0xFF00FF00) \ + |(rol(block->l[i],8)&0x00FF00FF)) +#elif BYTE_ORDER == BIG_ENDIAN +#define blk0(i) block->l[i] +#else +#error "Endianness not defined!" +#endif +#define blk(i) (block->l[i&15] = rol(block->l[(i+13)&15]^block->l[(i+8)&15] \ + ^block->l[(i+2)&15]^block->l[i&15],1)) + +/* (R0+R1), R2, R3, R4 are the different operations used in SHA1 */ +#define R0(v,w,x,y,z,i) z+=((w&(x^y))^y)+blk0(i)+0x5A827999+rol(v,5);w=rol(w,30); +#define R1(v,w,x,y,z,i) z+=((w&(x^y))^y)+blk(i)+0x5A827999+rol(v,5);w=rol(w,30); +#define R2(v,w,x,y,z,i) z+=(w^x^y)+blk(i)+0x6ED9EBA1+rol(v,5);w=rol(w,30); +#define R3(v,w,x,y,z,i) z+=(((w|x)&y)|(w&x))+blk(i)+0x8F1BBCDC+rol(v,5);w=rol(w,30); +#define R4(v,w,x,y,z,i) z+=(w^x^y)+blk(i)+0xCA62C1D6+rol(v,5);w=rol(w,30); + + +/* Hash a single 512-bit block. This is the core of the algorithm. */ + +void SHA1Transform( + uint32_t state[5], + const unsigned char buffer[64] +) +{ + uint32_t a, b, c, d, e; + + typedef union + { + unsigned char c[64]; + uint32_t l[16]; + } CHAR64LONG16; + +#ifdef SHA1HANDSOFF + CHAR64LONG16 block[1]; /* use array to appear as a pointer */ + + memcpy(block, buffer, 64); +#else + /* The following had better never be used because it causes the + * pointer-to-const buffer to be cast into a pointer to non-const. + * And the result is written through. I threw a "const" in, hoping + * this will cause a diagnostic. + */ + CHAR64LONG16 *block = (const CHAR64LONG16 *) buffer; +#endif + /* Copy context->state[] to working vars */ + a = state[0]; + b = state[1]; + c = state[2]; + d = state[3]; + e = state[4]; + /* 4 rounds of 20 operations each. Loop unrolled. */ + R0(a, b, c, d, e, 0); + R0(e, a, b, c, d, 1); + R0(d, e, a, b, c, 2); + R0(c, d, e, a, b, 3); + R0(b, c, d, e, a, 4); + R0(a, b, c, d, e, 5); + R0(e, a, b, c, d, 6); + R0(d, e, a, b, c, 7); + R0(c, d, e, a, b, 8); + R0(b, c, d, e, a, 9); + R0(a, b, c, d, e, 10); + R0(e, a, b, c, d, 11); + R0(d, e, a, b, c, 12); + R0(c, d, e, a, b, 13); + R0(b, c, d, e, a, 14); + R0(a, b, c, d, e, 15); + R1(e, a, b, c, d, 16); + R1(d, e, a, b, c, 17); + R1(c, d, e, a, b, 18); + R1(b, c, d, e, a, 19); + R2(a, b, c, d, e, 20); + R2(e, a, b, c, d, 21); + R2(d, e, a, b, c, 22); + R2(c, d, e, a, b, 23); + R2(b, c, d, e, a, 24); + R2(a, b, c, d, e, 25); + R2(e, a, b, c, d, 26); + R2(d, e, a, b, c, 27); + R2(c, d, e, a, b, 28); + R2(b, c, d, e, a, 29); + R2(a, b, c, d, e, 30); + R2(e, a, b, c, d, 31); + R2(d, e, a, b, c, 32); + R2(c, d, e, a, b, 33); + R2(b, c, d, e, a, 34); + R2(a, b, c, d, e, 35); + R2(e, a, b, c, d, 36); + R2(d, e, a, b, c, 37); + R2(c, d, e, a, b, 38); + R2(b, c, d, e, a, 39); + R3(a, b, c, d, e, 40); + R3(e, a, b, c, d, 41); + R3(d, e, a, b, c, 42); + R3(c, d, e, a, b, 43); + R3(b, c, d, e, a, 44); + R3(a, b, c, d, e, 45); + R3(e, a, b, c, d, 46); + R3(d, e, a, b, c, 47); + R3(c, d, e, a, b, 48); + R3(b, c, d, e, a, 49); + R3(a, b, c, d, e, 50); + R3(e, a, b, c, d, 51); + R3(d, e, a, b, c, 52); + R3(c, d, e, a, b, 53); + R3(b, c, d, e, a, 54); + R3(a, b, c, d, e, 55); + R3(e, a, b, c, d, 56); + R3(d, e, a, b, c, 57); + R3(c, d, e, a, b, 58); + R3(b, c, d, e, a, 59); + R4(a, b, c, d, e, 60); + R4(e, a, b, c, d, 61); + R4(d, e, a, b, c, 62); + R4(c, d, e, a, b, 63); + R4(b, c, d, e, a, 64); + R4(a, b, c, d, e, 65); + R4(e, a, b, c, d, 66); + R4(d, e, a, b, c, 67); + R4(c, d, e, a, b, 68); + R4(b, c, d, e, a, 69); + R4(a, b, c, d, e, 70); + R4(e, a, b, c, d, 71); + R4(d, e, a, b, c, 72); + R4(c, d, e, a, b, 73); + R4(b, c, d, e, a, 74); + R4(a, b, c, d, e, 75); + R4(e, a, b, c, d, 76); + R4(d, e, a, b, c, 77); + R4(c, d, e, a, b, 78); + R4(b, c, d, e, a, 79); + /* Add the working vars back into context.state[] */ + state[0] += a; + state[1] += b; + state[2] += c; + state[3] += d; + state[4] += e; + /* Wipe variables */ + a = b = c = d = e = 0; +#ifdef SHA1HANDSOFF + memset(block, '\0', sizeof(block)); +#endif +} + + +/* SHA1Init - Initialize new context */ + +void SHA1Init( + SHA1_CTX * context +) +{ + /* SHA1 initialization constants */ + context->state[0] = 0x67452301; + context->state[1] = 0xEFCDAB89; + context->state[2] = 0x98BADCFE; + context->state[3] = 0x10325476; + context->state[4] = 0xC3D2E1F0; + context->count[0] = context->count[1] = 0; +} + + +/* Run your data through this. */ + +void SHA1Update( + SHA1_CTX * context, + const unsigned char *data, + uint32_t len +) +{ + uint32_t i; + + uint32_t j; + + j = context->count[0]; + if ((context->count[0] += len << 3) < j) + context->count[1]++; + context->count[1] += (len >> 29); + j = (j >> 3) & 63; + if ((j + len) > 63) + { + memcpy(&context->buffer[j], data, (i = 64 - j)); + SHA1Transform(context->state, context->buffer); + for (; i + 63 < len; i += 64) + { + SHA1Transform(context->state, &data[i]); + } + j = 0; + } + else + i = 0; + memcpy(&context->buffer[j], &data[i], len - i); +} + + +/* Add padding and return the message digest. */ + +void SHA1Final( + unsigned char digest[20], + SHA1_CTX * context +) +{ + unsigned i; + + unsigned char finalcount[8]; + + unsigned char c; + +#if 0 /* untested "improvement" by DHR */ + /* Convert context->count to a sequence of bytes + * in finalcount. Second element first, but + * big-endian order within element. + * But we do it all backwards. + */ + unsigned char *fcp = &finalcount[8]; + + for (i = 0; i < 2; i++) + { + uint32_t t = context->count[i]; + + int j; + + for (j = 0; j < 4; t >>= 8, j++) + *--fcp = (unsigned char) t} +#else + for (i = 0; i < 8; i++) + { + finalcount[i] = (unsigned char) ((context->count[(i >= 4 ? 0 : 1)] >> ((3 - (i & 3)) * 8)) & 255); /* Endian independent */ + } +#endif + c = 0200; + SHA1Update(context, &c, 1); + while ((context->count[0] & 504) != 448) + { + c = 0000; + SHA1Update(context, &c, 1); + } + SHA1Update(context, finalcount, 8); /* Should cause a SHA1Transform() */ + for (i = 0; i < 20; i++) + { + digest[i] = (unsigned char) + ((context->state[i >> 2] >> ((3 - (i & 3)) * 8)) & 255); + } + /* Wipe variables */ + memset(context, '\0', sizeof(*context)); + memset(&finalcount, '\0', sizeof(finalcount)); +} + +void SHA1( + char *hash_out, + const char *str, + int len) +{ + SHA1_CTX ctx; + unsigned int ii; + + SHA1Init(&ctx); + for (ii=0; ii + 100% Public Domain + */ + +#include "stdint.h" + +typedef struct +{ + uint32_t state[5]; + uint32_t count[2]; + unsigned char buffer[64]; +} SHA1_CTX; + +void SHA1Transform( + uint32_t state[5], + const unsigned char buffer[64] + ); + +void SHA1Init( + SHA1_CTX * context + ); + +void SHA1Update( + SHA1_CTX * context, + const unsigned char *data, + uint32_t len + ); + +void SHA1Final( + unsigned char digest[20], + SHA1_CTX * context + ); + +void SHA1( + char *hash_out, + const char *str, + int len); + +#endif /* SHA1_H */ diff --git a/upgrade.manifest b/upgrade.manifest new file mode 100644 index 0000000..1b29af6 --- /dev/null +++ b/upgrade.manifest @@ -0,0 +1,9 @@ + + + + + + + + + -- 2.7.4 From ff04385aedd1fcaecadc6ca6ba0c12bb524010f1 Mon Sep 17 00:00:00 2001 From: Karol Lewandowski Date: Wed, 24 Aug 2022 11:29:52 +0200 Subject: [PATCH 3/3] Import system-rw-upgrade This commit imports system-rw-upgrade from commit fbb4ba4cf ("Add critical log where upgrade status is changing"). Following changes were made with repect to original repository: - move scripts and unit files under scripts/rw-upgrade - move logic behind installing scripts and unit files to scripts/rw-upgrade/CMakeLists.txt - depend only on UPGRADE_ variables - integrate build into new spec Change-Id: I305fe67d2ac32df7d2b9d05b51b67a1fb808a68b --- CMakeLists.txt | 1 + packaging/upgrade-engine.spec | 63 ++++++- scripts/rw-upgrade/99-sdb-switch.rules | 2 + scripts/rw-upgrade/CMakeLists.txt | 34 ++++ scripts/rw-upgrade/data-checkpoint.service.in | 17 ++ scripts/rw-upgrade/data-checkpoint.target | 7 + scripts/rw-upgrade/install-sdb-rule.sh.in | 11 ++ scripts/rw-upgrade/offline-update.service.in | 13 ++ scripts/rw-upgrade/record-version.sh.in | 9 + scripts/rw-upgrade/rw-update-macro.inc | 239 +++++++++++++++++++++++++ scripts/rw-upgrade/udev-sdb-init.service.in | 9 + scripts/rw-upgrade/udev-trigger-dmbow@.service | 21 +++ scripts/rw-upgrade/update-checkpoint-create.sh | 121 +++++++++++++ scripts/rw-upgrade/update-finalize.service | 12 ++ scripts/rw-upgrade/update-finalize.sh.in | 50 ++++++ scripts/rw-upgrade/update-init.sh.in | 33 ++++ scripts/rw-upgrade/update-post.service | 12 ++ scripts/rw-upgrade/update-post.sh | 20 +++ scripts/rw-upgrade/update.sh.in | 178 ++++++++++++++++++ 19 files changed, 851 insertions(+), 1 deletion(-) create mode 100644 scripts/rw-upgrade/99-sdb-switch.rules create mode 100644 scripts/rw-upgrade/CMakeLists.txt create mode 100644 scripts/rw-upgrade/data-checkpoint.service.in create mode 100644 scripts/rw-upgrade/data-checkpoint.target create mode 100644 scripts/rw-upgrade/install-sdb-rule.sh.in create mode 100644 scripts/rw-upgrade/offline-update.service.in create mode 100644 scripts/rw-upgrade/record-version.sh.in create mode 100644 scripts/rw-upgrade/rw-update-macro.inc create mode 100644 scripts/rw-upgrade/udev-sdb-init.service.in create mode 100644 scripts/rw-upgrade/udev-trigger-dmbow@.service create mode 100644 scripts/rw-upgrade/update-checkpoint-create.sh create mode 100644 scripts/rw-upgrade/update-finalize.service create mode 100644 scripts/rw-upgrade/update-finalize.sh.in create mode 100644 scripts/rw-upgrade/update-init.sh.in create mode 100644 scripts/rw-upgrade/update-post.service create mode 100644 scripts/rw-upgrade/update-post.sh create mode 100644 scripts/rw-upgrade/update.sh.in diff --git a/CMakeLists.txt b/CMakeLists.txt index ed144fc..1861131 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -45,3 +45,4 @@ ADD_SUBDIRECTORY(src/img-verifier) ADD_SUBDIRECTORY(src/upgrade-apply) ADD_SUBDIRECTORY(src/blkid-print) ADD_SUBDIRECTORY(data) +ADD_SUBDIRECTORY(scripts/rw-upgrade) diff --git a/packaging/upgrade-engine.spec b/packaging/upgrade-engine.spec index 6a4696a..2970792 100644 --- a/packaging/upgrade-engine.spec +++ b/packaging/upgrade-engine.spec @@ -1,8 +1,9 @@ %define upgrade_support_dir %{_libexecdir}/upgrade-support +%define upgrade_scripts_dir %TZ_SYS_UPGRADE Name: upgrade-engine Summary: Upgrade engine for Tizen -Version: 7.5.0 +Version: 7.5.1 Release: 0 Group: System License: Apache-2.0 @@ -22,6 +23,9 @@ BuildRequires: libtar-devel Requires: tar Requires: gzip +# from factory-reset: +Requires: %{_bindir}/rstsmack + %description Update engine for updating Tizen platform images using delta files generated by upgrade-tools. @@ -44,6 +48,10 @@ LDFLAGS="$LDFLAGS" -DCMAKE_INSTALL_PREFIX=%{_prefix} \ -DIMG_VERIFIER_ROOT_CA_DIR=%{img_verifier_root_ca_dir} \ -DUPGRADE_INITRD_LIST_DIR=%{upgrade_initrd_list_dir} \ + -DUPGRADE_SCRIPTS_DIR=%{upgrade_scripts_dir} \ + -DUPGRADE_VAR_DIR=%TZ_SYS_VAR \ + -DUPGRADE_PKGSCRIPTS_DIR=%TZ_SYS_UPGRADE_SCRIPTS \ + -DUNIT_DIR=%{_unitdir} \ . make %{?jobs:-j%jobs} @@ -67,6 +75,32 @@ mkdir -p %{buildroot}%{_unitdir}/recovery.service.wants install -m 644 scripts/clone_partitions/clone_partitions_recovery.service %{buildroot}%{_unitdir} ln -s ../clone_partitions_recovery.service %{buildroot}%{_unitdir}/recovery.service.wants/ +# rw-update +mkdir -p %{buildroot}%{_unitdir}/system-update.target.wants +mkdir -p %{buildroot}%{_unitdir}/data-checkpoint.target.wants +mkdir -p %{buildroot}%{_unitdir}/delayed.target.wants +ln -s ../getty.target %{buildroot}%{_unitdir}/system-update.target.wants +ln -s ../cynara.socket %{buildroot}%{_unitdir}/system-update.target.wants +ln -s ../dbus.socket %{buildroot}%{_unitdir}/system-update.target.wants +ln -s ../udev-sdb-init.service %{buildroot}%{_unitdir}/system-update.target.wants +ln -s ../offline-update.service %{buildroot}%{_unitdir}/system-update.target.wants +ln -s ../udev-trigger-dmbow@.service %{buildroot}%{_unitdir}/system-update.target.wants/udev-trigger-dmbow@user.service +ln -s ../data-checkpoint.target %{buildroot}%{_unitdir}/system-update.target.wants +ln -s ../data-checkpoint.service %{buildroot}%{_unitdir}/data-checkpoint.target.wants/data-checkpoint.service +ln -s ../update-post.service %{buildroot}%{_unitdir}/system-update.target.wants +ln -s ../update-finalize.service %{buildroot}%{_unitdir}/delayed.target.wants + +%posttrans +newrulesfile=99-sdb-switch.rules +newrulespath=%{upgrade_scripts_dir}/$newrulesfile +rulesdir=/usr/lib/udev/rules.d +if [ -e "$rulesdir/$newrulesfile" ]; then + echo "$newrulesfile exists. Skip creating symlink" +else + mkdir -p "$rulesdir" + ln -sf "$newrulespath" "$rulesdir" +fi + %files %license LICENSE %manifest upgrade.manifest @@ -91,3 +125,30 @@ ln -s ../clone_partitions_recovery.service %{buildroot}%{_unitdir}/recovery.serv # Image verifier %{_sbindir}/img-verifier %attr(755,root,root) %{img_verifier_root_ca_dir} +# rw-upgrade +%{_unitdir}/data-checkpoint.service +%{_unitdir}/data-checkpoint.target +%{_unitdir}/data-checkpoint.target.wants/data-checkpoint.service +%{_unitdir}/delayed.target.wants/update-finalize.service +%{_unitdir}/offline-update.service +%{_unitdir}/system-update.target.wants/cynara.socket +%{_unitdir}/system-update.target.wants/data-checkpoint.target +%{_unitdir}/system-update.target.wants/dbus.socket +%{_unitdir}/system-update.target.wants/getty.target +%{_unitdir}/system-update.target.wants/offline-update.service +%{_unitdir}/system-update.target.wants/udev-sdb-init.service +%{_unitdir}/system-update.target.wants/udev-trigger-dmbow@user.service +%{_unitdir}/system-update.target.wants/update-post.service +%{_unitdir}/udev-sdb-init.service +%{_unitdir}/udev-trigger-dmbow@.service +%{_unitdir}/update-finalize.service +%{_unitdir}/update-post.service +%{upgrade_scripts_dir}/99-sdb-switch.rules +%{upgrade_scripts_dir}/install-sdb-rule.sh +%{upgrade_scripts_dir}/record-version.sh +%{upgrade_scripts_dir}/rw-update-macro.inc +%{upgrade_scripts_dir}/update-checkpoint-create.sh +%{upgrade_scripts_dir}/update-finalize.sh +%{upgrade_scripts_dir}/update-init.sh +%{upgrade_scripts_dir}/update-post.sh +%{upgrade_scripts_dir}/update.sh diff --git a/scripts/rw-upgrade/99-sdb-switch.rules b/scripts/rw-upgrade/99-sdb-switch.rules new file mode 100644 index 0000000..75238f5 --- /dev/null +++ b/scripts/rw-upgrade/99-sdb-switch.rules @@ -0,0 +1,2 @@ +SUBSYSTEM=="switch", ATTR{name}=="usb_cable", ATTR{state}=="1", RUN+="/usr/bin/direct_set_debug.sh --sdb-set" +SUBSYSTEM=="switch", ATTR{name}=="usb_cable", ATTR{state}=="0", RUN+="/usr/bin/direct_set_debug.sh --sdb-unset" diff --git a/scripts/rw-upgrade/CMakeLists.txt b/scripts/rw-upgrade/CMakeLists.txt new file mode 100644 index 0000000..48dddc8 --- /dev/null +++ b/scripts/rw-upgrade/CMakeLists.txt @@ -0,0 +1,34 @@ +CONFIGURE_FILE(install-sdb-rule.sh.in install-sdb-rule.sh @ONLY) +CONFIGURE_FILE(update-init.sh.in update-init.sh @ONLY) +CONFIGURE_FILE(update.sh.in update.sh @ONLY) +CONFIGURE_FILE(update-finalize.sh.in update-finalize.sh @ONLY) +CONFIGURE_FILE(record-version.sh.in record-version.sh @ONLY) +CONFIGURE_FILE(udev-sdb-init.service.in udev-sdb-init.service @ONLY) +CONFIGURE_FILE(offline-update.service.in offline-update.service @ONLY) +CONFIGURE_FILE(data-checkpoint.service.in data-checkpoint.service @ONLY) + +INSTALL(FILES + udev-sdb-init.service + offline-update.service + data-checkpoint.service + data-checkpoint.target + update-post.service + update-finalize.service + udev-trigger-dmbow@.service + DESTINATION ${UNIT_DIR}) + +INSTALL(FILES + 99-sdb-switch.rules + rw-update-macro.inc + DESTINATION ${UPGRADE_SCRIPTS_DIR}) + +INSTALL(FILES + install-sdb-rule.sh + record-version.sh + update-init.sh + update.sh + update-checkpoint-create.sh + update-post.sh + update-finalize.sh + DESTINATION ${UPGRADE_SCRIPTS_DIR} + PERMISSIONS OWNER_READ OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE) diff --git a/scripts/rw-upgrade/data-checkpoint.service.in b/scripts/rw-upgrade/data-checkpoint.service.in new file mode 100644 index 0000000..ed5d32f --- /dev/null +++ b/scripts/rw-upgrade/data-checkpoint.service.in @@ -0,0 +1,17 @@ +[Unit] +Description=Data checkpoint +DefaultDependencies=no +Before=systemd-journald.service opt.mount opt-usr.mount mnt-inform.mount + + +[Service] +Type=oneshot +ExecStart=@UPGRADE_SCRIPTS_DIR@/update-checkpoint-create.sh +RemainAfterExit=yes +StandardOutput=journal+console +StandardError=journal+console +SmackProcessLabel=System + +[Install] +WantedBy=data-checkpoint.target + diff --git a/scripts/rw-upgrade/data-checkpoint.target b/scripts/rw-upgrade/data-checkpoint.target new file mode 100644 index 0000000..b4694c5 --- /dev/null +++ b/scripts/rw-upgrade/data-checkpoint.target @@ -0,0 +1,7 @@ +[Unit] +Description=Data checkpoint +DefaultDependencies=no +Conflicts=shutdown.target +Before=local-fs-pre.target +OnFailure=emergency.target +OnFailureJobMode=replace-irreversibly diff --git a/scripts/rw-upgrade/install-sdb-rule.sh.in b/scripts/rw-upgrade/install-sdb-rule.sh.in new file mode 100644 index 0000000..bb90ceb --- /dev/null +++ b/scripts/rw-upgrade/install-sdb-rule.sh.in @@ -0,0 +1,11 @@ +#!/bin/bash + +PATH="/usr/bin:/bin:/usr/sbin:/sbin" + +SDB_RULE="99-sdb-switch.rules" +DEST=/opt/data/update + +if [ ! -e ${DEST}/${SDB_RULE} ]; then + /bin/mkdir -p ${DEST} + /bin/cp @UPGRADE_SCRIPTS_DIR@/${SDB_RULE} ${DEST} +fi diff --git a/scripts/rw-upgrade/offline-update.service.in b/scripts/rw-upgrade/offline-update.service.in new file mode 100644 index 0000000..b1d153d --- /dev/null +++ b/scripts/rw-upgrade/offline-update.service.in @@ -0,0 +1,13 @@ +[Unit] +Description=System update script service +DefaultDependencies=no +Requires=sysinit.target +After=sysinit.target + +[Service] +Type=oneshot +SmackProcessLabel=System::Privileged +ExecStartPre=/bin/rm -f @UPGRADE_VAR_DIR@/log/system-rw-update.log +ExecStart=@UPGRADE_SCRIPTS_DIR@/update-init.sh +StandardOutput=file:@UPGRADE_VAR_DIR@/log/system-rw-update.log +StandardError=file:@UPGRADE_VAR_DIR@/log/system-rw-update.log diff --git a/scripts/rw-upgrade/record-version.sh.in b/scripts/rw-upgrade/record-version.sh.in new file mode 100644 index 0000000..2a59877 --- /dev/null +++ b/scripts/rw-upgrade/record-version.sh.in @@ -0,0 +1,9 @@ +#!/bin/sh +PATH="/usr/bin:/bin:/usr/sbin:/sbin" + +RW_MACRO=@UPGRADE_SCRIPTS_DIR@/rw-update-macro.inc + +if [ -e ${RW_MACRO} ]; then + source ${RW_MACRO} + write_version_info +fi diff --git a/scripts/rw-upgrade/rw-update-macro.inc b/scripts/rw-upgrade/rw-update-macro.inc new file mode 100644 index 0000000..5ed8aea --- /dev/null +++ b/scripts/rw-upgrade/rw-update-macro.inc @@ -0,0 +1,239 @@ +#!/bin/sh + +STAT="/usr/bin/stat" +OLD_VER= +NEW_VER= +OLD_VER_INFO="/opt/etc/version" +COLOR_ERROR='\033[01;31m' +COLOR_DEBUG='\033[01;34m' +COLOR_NOTIFY='\033[01;33m' +COLOR_RESET='\033[00;00m' + +DEBUG() +{ + LOG_TEXT=$1 + echo -e "${COLOR_DEBUG}${LOG_TEXT}${COLOR_RESET}" +} + +ERROR() +{ + LOG_TEXT=$1 + echo -e "${COLOR_ERROR}${LOG_TEXT}${COLOR_RESET}" +} + +NOTIFY() +{ + LOG_TEXT=$1 + echo -e "${COLOR_NOTIFY}${LOG_TEXT}${COLOR_RESET}" +} + +CRITICAL_LOG() +{ + LOG="[$SCRIPT_NAME]$1" + dlogsend -k "$LOG" + if [ "$2" != "" ]; then + echo "$LOG" >> "$2" + fi + echo "$LOG" +} + +# Convert version to 4 digits +convert_version() { + i=0 + VER=(0 0 0 0) + for ENT in $(echo "$1" | tr "." "\n"); do + VER[$i]=$ENT + ((i++)) + done + CVT_VER=${VER[0]}.${VER[1]}.${VER[2]}.${VER[3]} +} + +get_version_info() { + if [ -f $OLD_VER_INFO ]; then + source $OLD_VER_INFO + fi + NEW_VER=$(cat /etc/config/model-config.xml | grep platform.version\" \ + | sed -e 's/.*>\(.*\)<.*/\1/' | head -1) + convert_version $NEW_VER + NEW_VER=$CVT_VER +} + +write_version_info() { + get_version_info + echo "OLD_VER=$NEW_VER" > $OLD_VER_INFO + /bin/chmod 775 $OLD_VER_INFO + /bin/chown .system_fw $OLD_VER_INFO +} + +BACKUP_ZIP="/usr/system/RestoreDir/opt.zip" + +restore_from_zip() { + + TARGET_FILE=$1 + TARGET_DIR=$(dirname /$TARGET_FILE) + + echo "restore_from_zip: $TARGET_FILE" + if [ ! -d $TARGET_DIR ]; then + mkdir -p $TARGET_DIR + fi + + # The attributes of directory can not be overwritten by unzip. + # Unzip the directory into temp path and copy it to original path. + if [ "z${TARGET_FILE: -1}" = "z/" ]; then + RESTORE_DIR_PATH="/tmp/restored_dir" + mkdir -p $RESTORE_DIR_PATH + unzip -oX $BACKUP_ZIP $TARGET_FILE -d $RESTORE_DIR_PATH > /dev/null + cp -af $RESTORE_DIR_PATH/$TARGET_FILE $TARGET_DIR + rm -rf $RESTORE_DIR_PATH + else + unzip -oX $BACKUP_ZIP $TARGET_FILE -d / > /dev/null + fi + + # Restore smack label + TMP=$(mktemp /tmp/smackinfo.XXXXXX) + PATH_FOR_SMACK=$(echo $TARGET_FILE | sed -e "s/\/$//") + SMACK_VAL=$(grep "$PATH_FOR_SMACK " /usr/system/RestoreDir/smack_label.txt | \ + { read FILE SMACK; echo $SMACK; }) + if [ "z$SMACK_VAL" = "z" ]; then + echo "No smack label for $PATH_FOR_SMACK" + else + echo "/$PATH_FOR_SMACK $SMACK_VAL" > $TMP + rstsmack $TMP + fi + rm $TMP +} + +restore_backup_file() { + + OVERWRITE= + RESTORE_PATH= + + while [ "z$1" != "z" ]; do + case $1 in + -f ) + OVERWRITE=$1 + ;; + -r ) + ;; + * ) + RESTORE_PATH=$1 + ;; + esac + shift + done + + if [ "z$RESTORE_PATH" = "z" ]; then + echo "There is no file to restore" + return + fi + + if [ ! "z${RESTORE_PATH:0:1}" = "z/" ]; then + echo "Full path of file is required" + return + fi + + if [ -e "$RESTORE_PATH" ]; then + if [ ! "z$OVERWRITE" = "z" ]; then + echo "Warning: $RESTORE_PATH already exists. It will be overwritten" + else + echo "Error: $RESTORE_PATH already exists" + return + fi + fi + + # Check if the target file is backed up + PATH_FOR_ZIP=$(echo $RESTORE_PATH | sed -e "s/^\///") + FOUND_FILES=$(unzip -l $BACKUP_ZIP | awk '{print $4}' | \ + grep "^$PATH_FOR_ZIP") + FOUND_FILE=$(echo "$FOUND_FILES" | \ + grep -E "$PATH_FOR_ZIP$|$PATH_FOR_ZIP/$") + if [ "z$FOUND_FILE" = "z" ]; then + echo "Error: $RESTORE_PATH was not backed up" + return + fi + + echo "restore_backup_file: $RESTORE_PATH" + if [ ! "z${FOUND_FILE: -1}" = "z/" ]; then + restore_from_zip $FOUND_FILE + else + for FILE in $FOUND_FILES; do + restore_from_zip $FILE + done + fi +} + +COMMIT_BOW_PARTITION() +{ + LABEL=${1} + + BOWDEV_PATH=/dev/mapper/bowdev_${LABEL} + DM_NUMBER=$(($("${STAT}" -c "0x%T" $(readlink -f ${BOWDEV_PATH})))) + echo 2 > /sys/block/dm-${DM_NUMBER}/bow/state + NOTIFY "Changes on partition ${LABEL} commited (dm-bow)" +} + +COMMIT_F2FS_PARTITION() +{ + LABEL=${1} + PART_DEVICE=${2} + + mount -o remount,checkpoint=enable "${PART_DEVICE}" + NOTIFY "Changes on partition ${LABEL} commited (f2fs)" +} + +DELETE_BTRFS_PARTITION() { + PART="$1" + MNT_POINT="$("$FINDMNT" "$PART" -o TARGET)" + if [ "$MNT_POINT" = "" ]; then + ERROR "Unable to find btrfs mountpoint for: $PART" + return + fi + NOTIFY "Deleting btrfs snapshot" + umount "${MNT_POINT}" + mount -o subvolid=5,rw "${PART}" "${MNT_POINT}" + btrfs subvolume delete "$MNT_POINT"/fota/RO_update + rm -rf "$MNT_POINT/fota/RO_update" + umount "${MOUNT_POINT}" + mount -o rw "${PART}" "${MNT_POINT}" +} + + +COMMIT_BTRFS_PARTITION() +{ + LABEL=${1} + PART_SYSTEM_DATA=$(blkid --match-token PARTLABEL="${LABEL}" -o device -l || blkid --match-token LABEL="${LABEL}" -o device -l) + DELETE_BTRFS_PARTITION "${PART_SYSTEM_DATA}" + NOTIFY "Changes on partition ${LABEL} commited (btrfs)" +} + +COMMIT_PARTITION() +{ + LABEL=${1} + + PART_DEVICE=$(blkid --match-token PARTLABEL="${LABEL}" -o device -l || blkid --match-token LABEL="${LABEL}" -o device -l) + if [ -z "${PART_DEVICE}" ]; then + NOTIFY "WARNING: Partition ${LABEL} not found" + return + fi + + TYPE=$(blkid ${PART_DEVICE} -o value -s TYPE | tail -n 1) + if [ "${TYPE}" = "ext4" ]; then + COMMIT_BOW_PARTITION "${LABEL}" + elif [ "${TYPE}" = "f2fs" ]; then + COMMIT_F2FS_PARTITION "${LABEL}" ${PART_DEVICE} + elif [ "${TYPE}" = "btrfs" ]; then + COMMIT_BTRFS_PARTITION "${LABEL}" + else + ERROR "ERROR: Cannot commit ${LABEL}: Unsupported filesystem ${TYPE}" + fi +} + +COMMIT_CHANGES() +{ + COMMIT_PARTITION system-data + COMMIT_PARTITION user + if [[ ! $( /sys/block/dm-${DM_NUMBER}/bow/state + if [ "$(> "$2" + fi + echo "$LOG" +} + +SET_UPGRADE_STATUS() +{ + ${HAL_SET_UPGRADE_STATUS} "$1" + if [ $? -eq 0 ]; then + CRITICAL_LOG "set_upgrade_status success: ${1}" + else + CRITICAL_LOG "set_upgrade_status failed: ${1}" + fi +} + +if [ -f $RW_MACRO ]; +then + source $RW_MACRO +else + ERROR "FAIL: Upgrade macro does not exist" + UPDATE_PREPARE_ERR=1 +fi + +# TOOD: check if system is stable +UPDATE_SUCCESS=1 + +if [ -z ${UPDATE_PREPARE_ERR+x} ] && [ "${UPDATE_SUCCESS}" == "1" ]; then + /usr/bin/device_board_clear_boot_mode + COMMIT_CHANGES + /usr/bin/device_board_clear_partition_ab_cloned + /usr/bin/device_board_set_boot_success + SET_UPGRADE_STATUS 100 + reboot -f +else + reboot -f fota +fi diff --git a/scripts/rw-upgrade/update-init.sh.in b/scripts/rw-upgrade/update-init.sh.in new file mode 100644 index 0000000..f7b89c0 --- /dev/null +++ b/scripts/rw-upgrade/update-init.sh.in @@ -0,0 +1,33 @@ +#!/bin/sh +# +# RW update initialize script +# +PATH=/bin:/usr/bin:/sbin:/usr/sbin +RW_MACRO=@UPGRADE_SCRIPTS_DIR@/rw-update-macro.inc +RW_UPDATE=@UPGRADE_SCRIPTS_DIR@/update.sh +DEBUG_MODE_FILE=/opt/usr/.upgdebug + +if [ -f $RW_MACRO ]; then + source $RW_MACRO +fi + +# Restore rpm db +rm -rf /var/lib/rpm/* +restore_backup_file -f /opt/var/lib/rpm + +# Permission Update for shared directories +/etc/gumd/useradd.d/91_user-dbspace-permissions.post owner + +sleep 10 +if [ -f ${DEBUG_MODE_FILE} ]; then + DEBUG_MODE_FILE_OWNER=$(/bin/ls -l ${DEBUG_MODE_FILE} | /bin/cut -d " " -f 3) + if [ "${DEBUG_MODE_FILE_OWNER}" = "root" ]; then + echo "Enter RW debug mode" + echo "If you want to continue FOTA, please run ${RW_UPDATE}" + exit + fi + + echo "Warning: somebody make non-root debug mode file... ignore it" +fi + +exec /bin/sh $RW_UPDATE diff --git a/scripts/rw-upgrade/update-post.service b/scripts/rw-upgrade/update-post.service new file mode 100644 index 0000000..67f958e --- /dev/null +++ b/scripts/rw-upgrade/update-post.service @@ -0,0 +1,12 @@ +[Unit] +Description=RW Update finalization +DefaultDependencies=no +Wants=offline-update.service +After=offline-update.service +IgnoreOnIsolate=true + +[Service] +Type=oneshot +SmackProcessLabel=System +ExecStart=/usr/share/upgrade/update-post.sh + diff --git a/scripts/rw-upgrade/update-post.sh b/scripts/rw-upgrade/update-post.sh new file mode 100644 index 0000000..dd52044 --- /dev/null +++ b/scripts/rw-upgrade/update-post.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +PATH=/bin:/usr/bin:/sbin:/usr/sbin + +if [[ $(> "$2" + fi + echo "$LOG" +} + +PROGRESS_DIR=/tmp/upgrade +PROGRESS_INIT() +{ + mkdir -p ${PROGRESS_DIR} + echo "$1" > ${PROGRESS_DIR}/total + echo "0" > ${PROGRESS_DIR}/progress + chsmack -a _ ${PROGRESS_DIR} + chsmack -a _ ${PROGRESS_DIR}/total + chsmack -a _ ${PROGRESS_DIR}/progress + + # if GUI is available, run the GUI + if [ -e "/usr/bin/rw-update-ani" ]; then + export XDG_RUNTIME_DIR=/run + export TBM_DISPLAY_SERVER=1 + /usr/bin/rw-update-ani --wait & + fi +} + +PROGRESS() +{ + echo "$1" > ${PROGRESS_DIR}/progress +} + +# This result file will be used for Platform Update Control API to get update result. +UPDATE_RESULT_FILE=${UPDATE_DATA_DIR}/result +UPI_RW_UPDATE_ERROR_NONE=00 +UPI_RW_UPDATE_ERROR_PREFIX=FA +UPI_RW_UPDATE_ERROR_FAIL=${UPI_RW_UPDATE_ERROR_PREFIX}1A +SET_UPDATE_RESULT() +{ + echo "$1" > ${UPDATE_RESULT_FILE} +} + +SET_UPGRADE_STATUS() +{ + ${HAL_SET_UPGRADE_STATUS} "$1" + if [ $? -eq 0 ]; then + CRITICAL_LOG "set_upgrade_status success: ${1}" + else + CRITICAL_LOG "set_upgrade_status failed: ${1}" + fi +} + +if [[ $(