Initial codes of libtota 70/137870/3
authorSunmin Lee <sunm.lee@samsung.com>
Mon, 10 Jul 2017 06:55:45 +0000 (15:55 +0900)
committerSunmin Lee <sunm.lee@samsung.com>
Fri, 14 Jul 2017 03:05:08 +0000 (12:05 +0900)
Change-Id: I38d3ff5839645d71cc81c77336a5d5d015d222b6

36 files changed:
CMakeLists.txt [new file with mode: 0755]
LICENSE.Apache-2.0 [new file with mode: 0755]
LICENSE.BSD-2-Clause [new file with mode: 0755]
LICENSE.BSD-3-Clause [new file with mode: 0755]
UPG/CreatePatch.py [new file with mode: 0755]
UPG/dzImagescript.sh [new file with mode: 0755]
UPG/ss_bsdiff [new file with mode: 0755]
UPG/ss_bspatch [new file with mode: 0755]
UPG/unpack.sh [new file with mode: 0755]
bsdiff/CMakeLists.txt [new file with mode: 0755]
bsdiff/ss_bsdiff.c [new file with mode: 0755]
bsdiff/ss_bspatch.c [new file with mode: 0755]
packaging/libtota.spec [new file with mode: 0755]
ss_engine/SS_Common.c [new file with mode: 0755]
ss_engine/SS_Common.h [new file with mode: 0755]
ss_engine/SS_Engine_Errors.h [new file with mode: 0755]
ss_engine/SS_Engine_Update.h [new file with mode: 0755]
ss_engine/SS_FSUpdate.h [new file with mode: 0755]
ss_engine/SS_ImageUpdate.c [new file with mode: 0755]
ss_engine/SS_ImageUpdate.h [new file with mode: 0755]
ss_engine/SS_MultiProcessUpdate.h [new file with mode: 0755]
ss_engine/SS_Nand.c [new file with mode: 0755]
ss_engine/SS_Nand.h [new file with mode: 0755]
ss_engine/SS_UPI.c [new file with mode: 0755]
ss_engine/SS_UPI.h [new file with mode: 0755]
ss_engine/fota_common.h [new file with mode: 0755]
ss_engine/fota_log.h [new file with mode: 0755]
ss_engine/fota_tar.c [new file with mode: 0755]
ss_engine/fota_tar.h [new file with mode: 0755]
ss_engine/ua.h [new file with mode: 0755]
ss_patch/sha1.c [new file with mode: 0755]
ss_patch/sha1.h [new file with mode: 0755]
ss_patch/ss_bspatch.c [new file with mode: 0755]
ss_patch/ss_patchdelta.c [new file with mode: 0755]
ss_patch/ss_patchdelta.h [new file with mode: 0755]
tota.pc.in [new file with mode: 0755]

diff --git a/CMakeLists.txt b/CMakeLists.txt
new file mode 100755 (executable)
index 0000000..814ee22
--- /dev/null
@@ -0,0 +1,74 @@
+CMAKE_MINIMUM_REQUIRED(VERSION 2.6)
+PROJECT(tota C)
+
+SET(SRCS
+       ss_engine/SS_Common.c
+       ss_patch/sha1.c
+       ss_engine/SS_UPI.c
+       ss_engine/fota_tar.c
+       ss_patch/ss_bspatch.c
+       ss_patch/ss_patchdelta.c
+)
+SET(HEADERS
+       ss_engine/fota_common.h
+       ss_engine/fota_log.h
+       ss_engine/fota_tar.h
+       ss_engine/SS_Common.h
+       ss_engine/SS_Engine_Errors.h
+       ss_engine/SS_Engine_Update.h
+       ss_engine/SS_FSUpdate.h
+       ss_engine/SS_ImageUpdate.h
+       ss_engine/SS_MultiProcessUpdate.h
+       ss_engine/SS_Nand.h
+       ss_engine/SS_UPI.h
+)
+#SET(EXEC_PREFIX "\${prefix}")
+SET(PREFIX ${CMAKE_INSTALL_PREFIX})
+SET(LIBDIR "${PREFIX}")
+SET(VERSION_MAJOR 1)
+SET(VERSION ${VERSION_MAJOR}.0.0)
+SET(LIBNAME "tota")
+
+#INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include)
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR})
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/ss_engine)
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/ss_patch)
+
+INCLUDE(FindPkgConfig)
+pkg_check_modules(packages REQUIRED
+       lib7zip
+)
+
+FOREACH(flag ${packages_CFLAGS})
+       SET(EXTRA_CFLAGS "${EXTRA_CFLAGS} ${flag}")
+ENDFOREACH(flag)
+
+SET(EXTRA_CFLAGS "${EXTRA_CFLAGS} -fvisibility=hidden -Wall")
+SET(EXTRA_CFLAGS "${EXTRA_CFLAGS} -g ")
+
+SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${EXTRA_CFLAGS}")
+
+FIND_PROGRAM(UNAME NAMES uname)
+EXEC_PROGRAM("${UNAME}" ARGS "-m" OUTPUT_VARIABLE "ARCH")
+IF("${ARCH}" STREQUAL "arm")
+       ADD_DEFINITIONS("-DTARGET")
+       MESSAGE("add -DTARGET")
+ENDIF("${ARCH}" STREQUAL "arm")
+
+#FIND_PROGRAM(MARSHALTOOL NAMES glib-genmarshal)
+#FIND_PROGRAM(DBUS_BINDING_TOOL NAMES dbus-binding-tool)
+SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--as-needed")
+
+CONFIGURE_FILE(tota.pc.in tota.pc @ONLY)
+
+ADD_LIBRARY(${LIBNAME} STATIC ${SRCS})
+TARGET_LINK_LIBRARIES(${LIBNAME} ${packages_LDFLAGS})
+#CONFIGURE_FILE(${CMAKE_CURRENT_SOURCE_DIR}/modulename-api.pc.in ${CMAKE_CURRENT_BINARY_DIR}/modulename-api.pc @ONLY)
+#INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/modulename-api.pc DESTINATION lib/pkgconfig)
+INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/tota.pc DESTINATION lib/pkgconfig)
+FOREACH(hfile ${HEADERS})
+        INSTALL(FILES ${CMAKE_CURRENT_SOURCE_DIR}/${hfile} DESTINATION include)
+ENDFOREACH(hfile)
+INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/libtota.a DESTINATION /usr/lib)
+
+ADD_SUBDIRECTORY(bsdiff)
diff --git a/LICENSE.Apache-2.0 b/LICENSE.Apache-2.0
new file mode 100755 (executable)
index 0000000..fef8c29
--- /dev/null
@@ -0,0 +1,202 @@
+                                 Apache License\r
+                           Version 2.0, January 2004\r
+                        http://www.apache.org/licenses/\r
+\r
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\r
+\r
+   1. Definitions.\r
+\r
+      "License" shall mean the terms and conditions for use, reproduction,\r
+      and distribution as defined by Sections 1 through 9 of this document.\r
+\r
+      "Licensor" shall mean the copyright owner or entity authorized by\r
+      the copyright owner that is granting the License.\r
+\r
+      "Legal Entity" shall mean the union of the acting entity and all\r
+      other entities that control, are controlled by, or are under common\r
+      control with that entity. For the purposes of this definition,\r
+      "control" means (i) the power, direct or indirect, to cause the\r
+      direction or management of such entity, whether by contract or\r
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the\r
+      outstanding shares, or (iii) beneficial ownership of such entity.\r
+\r
+      "You" (or "Your") shall mean an individual or Legal Entity\r
+      exercising permissions granted by this License.\r
+\r
+      "Source" form shall mean the preferred form for making modifications,\r
+      including but not limited to software source code, documentation\r
+      source, and configuration files.\r
+\r
+      "Object" form shall mean any form resulting from mechanical\r
+      transformation or translation of a Source form, including but\r
+      not limited to compiled object code, generated documentation,\r
+      and conversions to other media types.\r
+\r
+      "Work" shall mean the work of authorship, whether in Source or\r
+      Object form, made available under the License, as indicated by a\r
+      copyright notice that is included in or attached to the work\r
+      (an example is provided in the Appendix below).\r
+\r
+      "Derivative Works" shall mean any work, whether in Source or Object\r
+      form, that is based on (or derived from) the Work and for which the\r
+      editorial revisions, annotations, elaborations, or other modifications\r
+      represent, as a whole, an original work of authorship. For the purposes\r
+      of this License, Derivative Works shall not include works that remain\r
+      separable from, or merely link (or bind by name) to the interfaces of,\r
+      the Work and Derivative Works thereof.\r
+\r
+      "Contribution" shall mean any work of authorship, including\r
+      the original version of the Work and any modifications or additions\r
+      to that Work or Derivative Works thereof, that is intentionally\r
+      submitted to Licensor for inclusion in the Work by the copyright owner\r
+      or by an individual or Legal Entity authorized to submit on behalf of\r
+      the copyright owner. For the purposes of this definition, "submitted"\r
+      means any form of electronic, verbal, or written communication sent\r
+      to the Licensor or its representatives, including but not limited to\r
+      communication on electronic mailing lists, source code control systems,\r
+      and issue tracking systems that are managed by, or on behalf of, the\r
+      Licensor for the purpose of discussing and improving the Work, but\r
+      excluding communication that is conspicuously marked or otherwise\r
+      designated in writing by the copyright owner as "Not a Contribution."\r
+\r
+      "Contributor" shall mean Licensor and any individual or Legal Entity\r
+      on behalf of whom a Contribution has been received by Licensor and\r
+      subsequently incorporated within the Work.\r
+\r
+   2. Grant of Copyright License. Subject to the terms and conditions of\r
+      this License, each Contributor hereby grants to You a perpetual,\r
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\r
+      copyright license to reproduce, prepare Derivative Works of,\r
+      publicly display, publicly perform, sublicense, and distribute the\r
+      Work and such Derivative Works in Source or Object form.\r
+\r
+   3. Grant of Patent License. Subject to the terms and conditions of\r
+      this License, each Contributor hereby grants to You a perpetual,\r
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\r
+      (except as stated in this section) patent license to make, have made,\r
+      use, offer to sell, sell, import, and otherwise transfer the Work,\r
+      where such license applies only to those patent claims licensable\r
+      by such Contributor that are necessarily infringed by their\r
+      Contribution(s) alone or by combination of their Contribution(s)\r
+      with the Work to which such Contribution(s) was submitted. If You\r
+      institute patent litigation against any entity (including a\r
+      cross-claim or counterclaim in a lawsuit) alleging that the Work\r
+      or a Contribution incorporated within the Work constitutes direct\r
+      or contributory patent infringement, then any patent licenses\r
+      granted to You under this License for that Work shall terminate\r
+      as of the date such litigation is filed.\r
+\r
+   4. Redistribution. You may reproduce and distribute copies of the\r
+      Work or Derivative Works thereof in any medium, with or without\r
+      modifications, and in Source or Object form, provided that You\r
+      meet the following conditions:\r
+\r
+      (a) You must give any other recipients of the Work or\r
+          Derivative Works a copy of this License; and\r
+\r
+      (b) You must cause any modified files to carry prominent notices\r
+          stating that You changed the files; and\r
+\r
+      (c) You must retain, in the Source form of any Derivative Works\r
+          that You distribute, all copyright, patent, trademark, and\r
+          attribution notices from the Source form of the Work,\r
+          excluding those notices that do not pertain to any part of\r
+          the Derivative Works; and\r
+\r
+      (d) If the Work includes a "NOTICE" text file as part of its\r
+          distribution, then any Derivative Works that You distribute must\r
+          include a readable copy of the attribution notices contained\r
+          within such NOTICE file, excluding those notices that do not\r
+          pertain to any part of the Derivative Works, in at least one\r
+          of the following places: within a NOTICE text file distributed\r
+          as part of the Derivative Works; within the Source form or\r
+          documentation, if provided along with the Derivative Works; or,\r
+          within a display generated by the Derivative Works, if and\r
+          wherever such third-party notices normally appear. The contents\r
+          of the NOTICE file are for informational purposes only and\r
+          do not modify the License. You may add Your own attribution\r
+          notices within Derivative Works that You distribute, alongside\r
+          or as an addendum to the NOTICE text from the Work, provided\r
+          that such additional attribution notices cannot be construed\r
+          as modifying the License.\r
+\r
+      You may add Your own copyright statement to Your modifications and\r
+      may provide additional or different license terms and conditions\r
+      for use, reproduction, or distribution of Your modifications, or\r
+      for any such Derivative Works as a whole, provided Your use,\r
+      reproduction, and distribution of the Work otherwise complies with\r
+      the conditions stated in this License.\r
+\r
+   5. Submission of Contributions. Unless You explicitly state otherwise,\r
+      any Contribution intentionally submitted for inclusion in the Work\r
+      by You to the Licensor shall be under the terms and conditions of\r
+      this License, without any additional terms or conditions.\r
+      Notwithstanding the above, nothing herein shall supersede or modify\r
+      the terms of any separate license agreement you may have executed\r
+      with Licensor regarding such Contributions.\r
+\r
+   6. Trademarks. This License does not grant permission to use the trade\r
+      names, trademarks, service marks, or product names of the Licensor,\r
+      except as required for reasonable and customary use in describing the\r
+      origin of the Work and reproducing the content of the NOTICE file.\r
+\r
+   7. Disclaimer of Warranty. Unless required by applicable law or\r
+      agreed to in writing, Licensor provides the Work (and each\r
+      Contributor provides its Contributions) on an "AS IS" BASIS,\r
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\r
+      implied, including, without limitation, any warranties or conditions\r
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\r
+      PARTICULAR PURPOSE. You are solely responsible for determining the\r
+      appropriateness of using or redistributing the Work and assume any\r
+      risks associated with Your exercise of permissions under this License.\r
+\r
+   8. Limitation of Liability. In no event and under no legal theory,\r
+      whether in tort (including negligence), contract, or otherwise,\r
+      unless required by applicable law (such as deliberate and grossly\r
+      negligent acts) or agreed to in writing, shall any Contributor be\r
+      liable to You for damages, including any direct, indirect, special,\r
+      incidental, or consequential damages of any character arising as a\r
+      result of this License or out of the use or inability to use the\r
+      Work (including but not limited to damages for loss of goodwill,\r
+      work stoppage, computer failure or malfunction, or any and all\r
+      other commercial damages or losses), even if such Contributor\r
+      has been advised of the possibility of such damages.\r
+\r
+   9. Accepting Warranty or Additional Liability. While redistributing\r
+      the Work or Derivative Works thereof, You may choose to offer,\r
+      and charge a fee for, acceptance of support, warranty, indemnity,\r
+      or other liability obligations and/or rights consistent with this\r
+      License. However, in accepting such obligations, You may act only\r
+      on Your own behalf and on Your sole responsibility, not on behalf\r
+      of any other Contributor, and only if You agree to indemnify,\r
+      defend, and hold each Contributor harmless for any liability\r
+      incurred by, or claims asserted against, such Contributor by reason\r
+      of your accepting any such warranty or additional liability.\r
+\r
+   END OF TERMS AND CONDITIONS\r
+\r
+   APPENDIX: How to apply the Apache License to your work.\r
+\r
+      To apply the Apache License to your work, attach the following\r
+      boilerplate notice, with the fields enclosed by brackets "[]"\r
+      replaced with your own identifying information. (Don't include\r
+      the brackets!)  The text should be enclosed in the appropriate\r
+      comment syntax for the file format. We also recommend that a\r
+      file or class name and description of purpose be included on the\r
+      same "printed page" as the copyright notice for easier\r
+      identification within third-party archives.\r
+\r
+   Copyright [yyyy] [name of copyright owner]\r
+\r
+   Licensed under the Apache License, Version 2.0 (the "License");\r
+   you may not use this file except in compliance with the License.\r
+   You may obtain a copy of the License at\r
+\r
+       http://www.apache.org/licenses/LICENSE-2.0\r
+\r
+   Unless required by applicable law or agreed to in writing, software\r
+   distributed under the License is distributed on an "AS IS" BASIS,\r
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+   See the License for the specific language governing permissions and\r
+   limitations under the License.\r
+\r
diff --git a/LICENSE.BSD-2-Clause b/LICENSE.BSD-2-Clause
new file mode 100755 (executable)
index 0000000..5a81ff9
--- /dev/null
@@ -0,0 +1,19 @@
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+   1. Redistributions of source code must retain the above copyright notice, this
+      list of conditions and the following disclaimer.
+   2. Redistributions in binary form must reproduce the above copyright notice,
+      this list of conditions and the following disclaimer in the documentation
+      and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/LICENSE.BSD-3-Clause b/LICENSE.BSD-3-Clause
new file mode 100755 (executable)
index 0000000..e436781
--- /dev/null
@@ -0,0 +1,28 @@
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+  Redistributions of source code must retain the above copyright
+  notice, this list of conditions and the following disclaimer.
+
+  Redistributions in binary form must reproduce the above
+  copyright notice, this list of conditions and the following
+  disclaimer in the documentation and/or other materials provided
+  with the distribution.
+
+  Neither the name of the Cisco Systems, Inc. nor the names of its
+  contributors may be used to endorse or promote products derived
+  from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/UPG/CreatePatch.py b/UPG/CreatePatch.py
new file mode 100755 (executable)
index 0000000..f45f3e3
--- /dev/null
@@ -0,0 +1,1137 @@
+#!/usr/bin/python
+
+import sys
+
+
+if sys.hexversion < 0x02040000:
+  print >> sys.stderr, "Python 2.4 or newer is required."
+  sys.exit(1)
+
+import sys
+import os
+import filecmp
+import shutil
+import subprocess
+import re
+import ntpath
+import zipfile
+import datetime
+import hashlib
+import operator
+import locale
+import errno
+import logging
+import glob
+import apt
+
+'''
+Diff two folders and create delta using SS_BSDIFF
+Will maintain same format of script that will be generated when we use diffutil
+
+1. Create a list of files in each Base folders,
+2. These files will fall into one these below categories:
+       1) Only in OLD - Should be deleted
+       2) Only in NEW - Should be added or renamed accordingly
+       3) File exists in both directories but contents are different - Create Diff.
+       4) File name is same but TYPE can change (File to Folder, Folder to Link etc.)
+       5) Duplicates in the list of Deletes and News
+       6) Close matching diffs even though name changes across directories. (for matching extension)
+       7) Clearing empty directories after Moves or diffs under Rename.
+
+Current Case
+1. Given two folders, from list of REMOVED and NEW files find if there
+is version change and create diff between them
+
+TODO
+Want to extend the same script for entire DIFF generation and replace TOTAlib.sh file
+Catching errors at all stages. SHOULD exit & return error in case of failure
+'''
+
+def global_paths():
+       global DIFF_UTIL
+       global ZIPUTIL
+       global NEW_FILES_PATH
+       global NEW_FILES_FOLDER
+       global NEW_FILES_ZIP_NAME
+       global SYMLINK_TYPE
+       global ATTR_DOC_EXT
+       global SYMLINK_DOC_NAME
+       global DIFF_PREFIX
+       global DIFF_SUFFIX
+       global SUPPORT_RENAME
+       global NEW_PREFIX
+       global DIFFPATCH_UTIL
+       global SUPPORT_CONTAINERS
+       global FULL_IMG
+       global DELTA_IMG
+       global DELTA_FS
+       global EXTRA
+       global COMMON_BIN_PATH
+       global MEM_REQ
+       global EMPTY
+
+COMMON_BIN_PATH = "../../common/bin/"
+DIFF_UTIL = "./ss_bsdiff"
+DIFFPATCH_UTIL = "./ss_bspatch"
+ZIPUTIL = "p7zip "
+#ZIPUTIL = "7z a system.7z "
+NEW_FILES_PATH = "system"
+NEW_FILES_FOLDER  = "system"
+NEW_FILES_ZIP_NAME = "system.7z"
+SYMLINK_TYPE = "SYM"
+ATTR_DOC_EXT = "_attr.txt"
+SYMLINK_DOC_NAME = "_sym.txt"
+PART_DOC_EXT = ".txt"
+DIFF_PREFIX = "diff"
+DIFF_SUFFIX = ".delta"
+NEW_PREFIX = 'new'
+FULL_IMG = "FULL_IMG"
+DELTA_IMG = "DELTA_IMG"
+DELTA_FS = "DELTA_FS"
+EXTRA = "EXTRA"
+LOGFILE = "Delta.log"
+EMPTY = ""
+MEM_REQ = 0
+
+SUPPORT_RENAME = "TRUE" #Use appropriate name
+SUPPORT_CONTAINERS = "FALSE"
+SUPPORT_DZIMAGE = "TRUE"
+
+TEST_MODE = "FALSE"
+
+def main():
+       logging.basicConfig(filename=LOGFILE, level=logging.DEBUG)
+       global AttributeFile
+       global GenerateDiffAttr
+       try:
+
+               if len(sys.argv) < 5:
+                       sys.exit('Usage: CreatePatch.py UPDATE_TYPE PARTNAME OLDBASE NEWBASE OUTFOLDER')
+               UPDATE_TYPE = sys.argv[1]
+               PART_NAME = sys.argv[2]  # lets make this also optional
+
+               BASE_OLD = sys.argv[3]
+               BASE_NEW = sys.argv[4]
+               OUT_DIR = sys.argv[5]
+               ATTR_OLD = EMPTY
+               ATTR_NEW = EMPTY
+               UPDATE_CFG_PATH = EMPTY
+               GenerateDiffAttr = "FALSE"
+               if UPDATE_TYPE == DELTA_FS:
+                       #instead of arguments check it in outdirectory ?
+                       if len(sys.argv) == 9:
+                               ATTR_OLD = sys.argv[6]
+                               ATTR_NEW = sys.argv[7]
+                               UPDATE_CFG_PATH = '../'+sys.argv[8]
+                               GenerateDiffAttr = "TRUE"
+
+               elif UPDATE_TYPE == DELTA_IMG or UPDATE_TYPE == FULL_IMG:
+                       if len(sys.argv) == 7:
+                               #Use path in better way
+                               UPDATE_CFG_PATH = '../'+sys.argv[6]
+
+               global DIFF_UTIL
+               global DIFFPATCH_UTIL
+               if not (os.path.isfile(DIFF_UTIL) and os.access(DIFF_UTIL, os.X_OK)):
+                       DIFF_UTIL = COMMON_BIN_PATH+DIFF_UTIL
+                       DIFFPATCH_UTIL = COMMON_BIN_PATH+DIFFPATCH_UTIL
+                       if not (os.path.isfile(DIFF_UTIL) and os.access(DIFF_UTIL, os.X_OK)):
+                               print >> sys.stderr, "Diff Util Does NOT exist -- ABORT"
+                               logging.info ('Diff Util Does NOT exist -- ABORT')
+                               sys.exit(1)
+
+               start = datetime.datetime.now().time()
+               logging.info('*************** ENTERED PYTHON SCRIPT *****************')
+               logging.info('Arguments Passed: [UpdateType - %s][Part Name - %s] [BaseOld - %s]  [BaseNew - %s] \n [OUTPUTDir - %s] [BASE ATTR - %s] [TARGET ATTR - %s]'% (UPDATE_TYPE, PART_NAME, BASE_OLD, BASE_NEW, OUT_DIR, ATTR_OLD, ATTR_NEW))
+
+               ensure_dir_exists(OUT_DIR)
+               if GenerateDiffAttr == "TRUE":
+                       if not (os.path.isfile(ATTR_OLD) and os.path.isfile(ATTR_NEW)):
+                               print >> sys.stderr, "Attributes missing -- ABORT"
+                               sys.exit(1)
+
+
+               # Should check if APT is supported on other linux flavours
+               cache = apt.Cache()
+               if cache['p7zip'].is_installed and cache['attr'].is_installed and cache['tar'].is_installed:
+                       logging.info ('Basic utils installed')
+
+               if UPDATE_TYPE == FULL_IMG:
+                       SS_mk_full_img(BASE_OLD, BASE_NEW, OUT_DIR, PART_NAME, UPDATE_CFG_PATH)
+               elif UPDATE_TYPE == DELTA_IMG:
+                       SS_mk_delta_img(BASE_OLD, BASE_NEW, OUT_DIR, PART_NAME, UPDATE_CFG_PATH)
+               elif UPDATE_TYPE == DELTA_FS:
+                       AttributeFile = ATTR_NEW
+                       ATTR_FILE = OUT_DIR+'/'+PART_NAME+ATTR_DOC_EXT
+                       Diff_AttrFiles(ATTR_OLD, ATTR_NEW, ATTR_FILE)
+                       Old_files, Old_dirs = Get_Files(BASE_OLD)
+                       New_files, New_dirs = Get_Files(BASE_NEW)
+                       SS_Generate_Delta(PART_NAME, BASE_OLD, Old_files, Old_dirs, BASE_NEW, New_files, New_dirs, OUT_DIR, ATTR_FILE)
+
+                       if not UPDATE_CFG_PATH == EMPTY:
+                               SS_update_cfg(PART_NAME, UPDATE_CFG_PATH)
+
+
+               elif UPDATE_TYPE == EXTRA:
+                       print('UPDATE_TYPE ---- EXTRA')
+               else:
+                       print('UPDATE_TYPE ---- UNKNOWN FORMAT')
+
+               if GenerateDiffAttr == "TRUE":
+                       os.remove(ATTR_OLD)
+                       os.remove(ATTR_NEW)
+               end = datetime.datetime.now().time()
+
+               logging.info('Max Memory requried to upgrade [%s] is [%d]' % (PART_NAME, MEM_REQ))
+               logging.info('*************** DONE WITH PYTHON SCRIPT ***************')
+               logging.info('Time start [%s] - Time end [%s]' % (start, end))
+               print('Done with [%s][%d] ---- Time start [%s] - Time end [%s]' % (PART_NAME, MEM_REQ, start, end))
+       except:
+               logging.error('Usage: {} <Update_Type> <Part_Name> <OLD_Base> <NEW_Base> <OUT_DIR>'.format(os.path.basename(sys.argv[0])))
+               raise
+
+
+def SS_update_cfg(DELTA_BIN, UPDATE_CFG_PATH):
+       f = open(UPDATE_CFG_PATH, 'r')
+       lines = f.readlines()
+       f.close()
+       f = open(UPDATE_CFG_PATH, 'w')
+       for line in lines:
+               ConfigItems = line.split()
+               if ConfigItems[0] == DELTA_BIN:
+                       DELTA = ConfigItems[1]
+                       logging.info ('Updating %s config' % DELTA_BIN)
+                       line = line.rstrip('\n')
+                       Value = MEM_REQ
+                       line = line.replace(line, line+'\t'+str(Value)+'\n')
+                       f.write(line)
+               else:
+                       f.write(line)
+       f.close()
+
+def SS_mk_delta_img(BASE_OLD, BASE_NEW, OUT_DIR, DELTA_BIN, UPDATE_CFG_PATH):
+       #for sizes
+
+       ZIMAGE_SCRIPT = COMMON_BIN_PATH+'./dzImagescript.sh'
+       ZIMAGE_OLD = BASE_OLD+'_unpacked'
+       ZIMAGE_NEW = BASE_NEW+'_unpacked'
+       DZIMAGE_HEADER = 'UnpackdzImage'
+       DZIMAGE_SEP = ':'
+
+       oldsize_d= os.path.getsize(BASE_OLD)
+       newsize_d= os.path.getsize(BASE_NEW)
+       SHA_BIN_DEST= hash_file(BASE_NEW)
+       SHA_BIN_BASE=hash_file(BASE_OLD)
+
+       #incase UPDATE CFG is empty
+       DELTA = DELTA_BIN
+       SS_UpdateSize(BASE_OLD, BASE_NEW)
+       #Should throw error if PART NAME NOT found??
+       if not UPDATE_CFG_PATH == EMPTY:
+               f = open(UPDATE_CFG_PATH, 'r')
+               lines = f.readlines()
+               f.close()
+               f = open(UPDATE_CFG_PATH, 'w')
+               for line in lines:
+                       ConfigItems = line.split()
+                       if ConfigItems[0] == DELTA_BIN:
+                               logging.info ('Updating %s config' % DELTA_BIN)
+                               DELTA = ConfigItems[1]
+                               line = line.rstrip('\n')
+                               line = line.replace(line, line+'\t'+str(oldsize_d)+'\t\t'+str(newsize_d)+'\t\t'+str(SHA_BIN_BASE)+'\t\t'+str(SHA_BIN_DEST)+'\n')
+                               f.write(line)
+                       else:
+                               f.write(line)
+               f.close()
+
+       #Any validation checks required?
+       if (DELTA_BIN == "zImage" or DELTA_BIN == "dzImage" or DELTA_BIN == "KERNEL" or DELTA_BIN == "BOOT") and SUPPORT_DZIMAGE == "TRUE":
+
+               #Unpack Old and New Images for creating delta
+               subprocess.call([ZIMAGE_SCRIPT, '-u', BASE_OLD])
+               subprocess.call([ZIMAGE_SCRIPT, '-u', BASE_NEW])
+
+               DeltaFiles = []
+               Old_files, Old_dirs = Get_Files(ZIMAGE_OLD)
+               New_files, New_dirs = Get_Files(ZIMAGE_NEW)
+
+               patchLoc = '%s/%s_temp' % (OUT_DIR, DELTA_BIN)
+               ensure_dir_exists(patchLoc)
+
+               for elt in New_files:
+                       if elt in Old_files:
+                               src_file = ZIMAGE_OLD+'/'+elt
+                               dst_file = ZIMAGE_NEW+'/'+elt
+                               if not filecmp.cmp(src_file, dst_file):
+                                       patch = '%s/%s' % (patchLoc,elt)
+                                       DeltaFiles.append(patch)
+                                       subprocess.call([DIFF_UTIL,src_file,dst_file,patch])
+                                       logging.info('Make dz Image %s <--> %s ==> %s %s' % (src_file, dst_file , DELTA_BIN, patch))
+
+               #Append all delta files to make image.delta
+
+               #HEADER FORMAT MAGICNAME:FILECOUNT:[FILENAME:FILESIZE:][FILECONTENT/S]
+               HeaderStr = DZIMAGE_HEADER+DZIMAGE_SEP+'%d' % len(DeltaFiles)
+               HeaderStr = HeaderStr+DZIMAGE_SEP
+
+               with open(OUT_DIR+'/'+DELTA, 'w') as DeltaFile:
+                       for fname in DeltaFiles:
+                               DeltaSize = os.path.getsize(fname)
+                               HeaderStr = HeaderStr+path_leaf(fname)+DZIMAGE_SEP+'%d' % DeltaSize
+                               HeaderStr = HeaderStr+DZIMAGE_SEP
+                       #Using 128 bytes as max Header.
+                       logging.info('zImage Header - %s' % HeaderStr.ljust(128,'0'))
+                       DeltaFile.write(HeaderStr.ljust(128,'0'))
+                       for fname in DeltaFiles:
+                               with open(fname) as infile:
+                                       DeltaFile.write(infile.read())
+                                       infile.close()
+
+               DeltaFile.close()
+               shutil.rmtree(patchLoc)
+               shutil.rmtree(ZIMAGE_OLD)
+               shutil.rmtree(ZIMAGE_NEW)
+               #Do we need to incorprate Max memory required for backup??
+
+       else:
+               patchLoc = '%s/%s' % (OUT_DIR, DELTA)
+               subprocess.call([DIFF_UTIL,BASE_OLD,BASE_NEW,patchLoc])
+               logging.info('Make Delta Image %s <--> %s ==> %s %s' % (BASE_OLD, BASE_NEW , DELTA_BIN, patchLoc))
+
+
+
+def    SS_mk_full_img(BASE_OLD, BASE_NEW, OUT_DIR, DELTA_BIN ,UPDATE_CFG_PATH):
+       logging.info('Make Full Image %s <--> %s ==> %s' % (BASE_OLD, BASE_NEW ,DELTA_BIN))
+       oldsize_d= os.path.getsize(BASE_OLD)
+       newsize_d= os.path.getsize(BASE_NEW)
+       SHA_BIN_DEST= hash_file(BASE_NEW)
+       SHA_BIN_BASE=hash_file(BASE_OLD)
+       #echo -e "\t${oldsize_d}\t\t${newsize_d}\t\t${SHA_BIN_BASE}\t\t${SHA_BIN_DEST}" >> ${DATA_DIR}/update_new.cfg
+       SS_UpdateSize(BASE_OLD, BASE_NEW)
+
+       if not UPDATE_CFG_PATH == EMPTY:
+               f = open(UPDATE_CFG_PATH, 'r')
+               lines = f.readlines()
+               f.close()
+               f = open(UPDATE_CFG_PATH, 'w')
+               for line in lines:
+                       ConfigItems = line.split()
+                       if ConfigItems[0] == DELTA_BIN:
+                               logging.info ('Updating %s config' % DELTA_BIN)
+                               DELTA = ConfigItems[1]
+                               line = line.rstrip('\n')
+                               line = line.replace(line, line+'\t'+str(oldsize_d)+'\t\t'+str(newsize_d)+'\t\t'+str(SHA_BIN_BASE)+'\t\t'+str(SHA_BIN_DEST)+'\n')
+                               f.write(line)
+                       else:
+                               f.write(line)
+               f.close()
+
+def zipdir(path, zip):
+    for root, dirs, files in os.walk(path):
+        for file in files:
+            zip.write(os.path.join(root, file))
+
+def ensure_dir_exists(path):
+       if not os.path.exists(path):
+               os.makedirs(path)
+               #shutil.rmtree(path)
+       #os.makedirs(path)
+
+
+def path_leaf(path):
+    head, tail = ntpath.split(path) #This is for windows?? Recheck
+    return tail
+
+def path_head(path):
+    head, tail = ntpath.split(path)
+    return head
+
+def difflines(list1, list2):
+    c = set(list1).union(set(list2))
+    d = set(list1).intersection(set(list2))
+    return list(c-d)
+
+#Creating Diff between OLD and NEW attribute files v12
+def Diff_AttrFiles(ATTR_OLD, ATTR_NEW, ATTR_FILE):
+       if GenerateDiffAttr == "FALSE":
+               return
+       with open(ATTR_OLD, 'r') as f_old:
+               lines1 = set(f_old.read().splitlines())
+
+       with open(ATTR_NEW, 'r') as f_new:
+               lines2 = set(f_new.read().splitlines())
+
+       lines = difflines(lines2, lines1)
+       with open(ATTR_FILE, 'w+') as file_out:
+               for line in lines:
+                       if line not in lines1:
+                               logging.info('Diff_AttrFiles - %s' % line)
+                               file_out.write(line+'\n')
+
+       f_new.close()
+       f_old.close()
+       file_out.close()
+
+
+
+def Update_Attr(RequestedPath, Type, File_Attibutes, Sym_Attibutes):
+       #Full File Path should MATCH
+       if GenerateDiffAttr == "FALSE":
+               return
+       FilePath = '"/'+RequestedPath+'"'
+       #print ('FilePath - %s'% (FilePath))
+       with open(AttributeFile) as f:
+               for line in f:
+                       if FilePath in line:
+                               if Type == SYMLINK_TYPE:
+                                       Sym_Attibutes.append(line)
+                               else:
+                                       File_Attibutes.append(line)
+
+
+'''This function returns the SHA-1 hash of the file passed into it'''
+def hash_file(filename):
+
+   # make a hash object
+   h = hashlib.sha1()
+
+   # open file for reading in binary mode
+   with open(filename,'rb') as file:
+       # loop till the end of the file
+       chunk = 0
+       while chunk != b'':
+           # read only 1024 bytes at a time
+           chunk = file.read(1024*1024)
+           h.update(chunk)
+
+   # return the hex representation of digest
+   return h.hexdigest()
+
+def find_dupes_dir(BASE_OLD, BASE_NEW):
+       dups = {}
+       fdupes = {}
+       print('Finding Duplicates in - %s %s' % (BASE_OLD, BASE_NEW))
+       logging.info('Finding Duplicates in - %s %s' % (BASE_OLD, BASE_NEW))
+       for rootbase, subdirsB, fileListB in os.walk(BASE_OLD):
+               #print('Scanning %s...' % rootbase)
+               for filename in fileListB:
+                       path = os.path.join(rootbase, filename)
+                       if os.path.islink(path):
+                               continue
+                       # Calculate hash
+                       file_hash = hash_file(path)
+                       dups[file_hash] = path
+
+       for roottarget, subdirsT, fileListT in os.walk(BASE_NEW):
+               #print('Scanning %s...' % roottarget)
+               for filename in fileListT:
+                       # Get the path to the file
+                       path = os.path.join(roottarget, filename)
+                       if os.path.islink(path):
+                               continue
+                       # Calculate hash
+                       file_hash = hash_file(path)
+                       # Add or append the file path
+                       if file_hash in dups:
+                               BaseStr = dups.get(file_hash)
+                               Baseloc = path.find('/')
+                               TarLoc = BaseStr.find('/')
+                               if not path[Baseloc:] == BaseStr[TarLoc:]:
+                                       logging.info('Dupes - %s ==> %s' % (path[Baseloc:], BaseStr[TarLoc:]))
+                                       fdupes[path] = BaseStr
+       logging.info('Total Duplicate files %d' % (len(fdupes)))
+       return fdupes
+
+
+def find_dupes_list(BASE_OLD, BASE_NEW, fileListB, fileListT):
+       dups = {}
+       fdupes = {}
+       print('Finding Duplicates in - %s %s' % (BASE_OLD, BASE_NEW))
+
+       for filename in fileListB:
+               Src_File = BASE_OLD+'/'+filename
+               if os.path.islink(Src_File) or os.path.isdir(Src_File):
+                       continue
+               # Calculate hash
+               file_hash = hash_file(Src_File)
+               dups[file_hash] = Src_File
+
+
+       for filename in fileListT:
+               Dest_File = BASE_NEW+'/'+filename
+               if os.path.islink(Dest_File) or os.path.isdir(Dest_File):
+                       continue
+               # Calculate hash
+               file_hash = hash_file(Dest_File)
+               if file_hash in dups:
+                       BaseStr = dups.get(file_hash)
+                       Baseloc = BaseStr.find('/')
+                       if not BaseStr[Baseloc:] == filename:
+                               #print('Dupes - %s ==> %s' % (BaseStr[Baseloc:], filename))
+                               fdupes[BaseStr] = filename
+
+       logging.info('Total Duplicate files %d' % (len(fdupes)))
+       return fdupes
+
+def SS_UpdateSize(src_file, dst_file):
+       global MEM_REQ
+       oldsize_d= os.path.getsize(src_file)
+       newsize_d= os.path.getsize(dst_file)
+       if oldsize_d >= newsize_d:
+               Max = newsize_d
+       else:
+               Max = oldsize_d
+       if MEM_REQ < Max:
+               MEM_REQ = Max
+
+
+
+def SS_Generate_Delta(PART_NAME, BASE_OLD, Old_files, Old_dirs, BASE_NEW, New_files, New_dirs, OUT_DIR, ATTR_FILE):
+       print('Going from %d files to %d files' % (len(Old_files), len(New_files)))
+       logging.info('Going from %d files to %d files' % (len(Old_files), len(New_files)))
+
+       # First let's fill up these categories
+       files_new = []
+       files_removed = []
+       Dir_removed = []
+       Dir_Added = []
+       files_changed = []
+       files_unchanged = []
+       files_renamed = []
+       File_Attibutes = []
+       Sym_Attibutes = []
+
+       files_Del_List = {}
+       files_New_List = {}
+       MyDict_Patches = {}
+
+
+
+       PWD = os.getcwd()
+
+       # Generate NEW List
+       for elt in New_files:
+               if elt not in Old_files:
+                       files_new.append(elt)
+                       logging.info('New files %s' % elt)
+
+       # Generate Delete List
+       for elt in Old_files:
+               if elt not in New_files:
+                       # Cant we just append it here only if this is NOT a directory???? so that we have list of removed files ONLY. including directories
+                       files_removed.append(elt)
+                       logging.info('Old files %s' % elt)
+
+
+       for elt in Old_dirs:
+               #print('List of Old Dirs %s' % elt)
+               if elt not in New_dirs:
+                       Dir_removed.append(elt)
+                       #print('Old Dirs %s' % elt+'/')
+
+       for elt in New_dirs:
+               if elt not in Old_dirs:
+                       Dir_Added.append(elt)
+               #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+       # What files have changed contents but not name/path?
+       for elt in New_files:
+               if elt in Old_files:
+                       #Both are symbolic linkes and they differ
+                       src_file = BASE_OLD+'/'+elt
+                       dst_file = BASE_NEW+'/'+elt
+                       #print('Files Changed - %s -%s' % (src_file,dst_file))
+                       if os.path.islink(src_file) and os.path.islink(dst_file):
+                               if not os.readlink(src_file) == os.readlink(dst_file):
+                                       files_changed.append(elt)
+                                       #print('%d Sym link files changed' % len(files_changed))
+                                       logging.info('Sym links Changed - %s' % elt)
+                               else:
+                                       files_unchanged.append(elt)
+                       #Both are Normal files and they differ. (Is file returns true in case of symlink also, so additional check to find either of the file is symlink)
+                       elif (not os.path.islink(src_file) or os.path.islink(dst_file)) and os.path.isfile(src_file) and os.path.isfile(dst_file):
+                               if not filecmp.cmp(src_file, dst_file):
+                                       files_changed.append(elt)
+                                       #print('%d Normal files changed' % len(files_changed))
+                                       #print('Files Changed - %s' % elt)
+                               else:
+                                       files_unchanged.append(elt)
+                       #File types differ between BASE and TARGET
+                       else:
+                               logging.info('Files are of diff types but same names  Src- %s Des- %s' % (src_file, dst_file))
+                               #Both file types have changed and they differ
+                               #Case 1: First Delete the OLD entry file type (Be it anything)
+                               #Processing and updating partition txt file will be done under REMOVED case and NEW files case accordingly, we just make an entry here
+                               files_removed.append(elt)
+                               files_new.append(elt)
+
+
+
+       #Currently if Version or number is the first character of the file, then we are NOT making any diffs.
+       if SUPPORT_RENAME == "TRUE":
+               for elt in files_removed:
+                       if os.path.isfile(BASE_OLD+'/'+elt):
+                               FileName = path_leaf(elt)
+                               entries = re.split('[0-9]' , FileName)
+                               #Gives the STRING part of NAME. if name starts with version then later part wil b string
+                               #print('Entires under removed list after split - %s %s - %s' % (FileName, entries[0], elt))
+                               #If version is starting at the begining of the string?? shd we hav additional check for such cases??
+                               if len(entries[0]) > 0:
+                                       files_Del_List.update({entries[0]: elt})
+
+               for elt in files_new:
+                       if os.path.isfile(BASE_NEW+'/'+elt):
+                               FileName = path_leaf(elt)
+                               entries = re.split('[0-9]' , FileName)
+                               #print('Entires under NEWfiles list after split  - %s %s - %s' % (FileName, entries[0], elt))
+                               if len(entries[0]) > 0:
+                                       files_New_List.update({entries[0]: elt})
+
+               for key, value in files_Del_List.iteritems():
+                       #print('Key value pair -%s -%s' % (key, value))
+                       if key in files_New_List:
+                               # this file is the same name in both!
+                               src_file = BASE_OLD+'/'+value
+                               dst_file = BASE_NEW+'/'+files_New_List[key]
+                               olddirpath = path_head(files_New_List[key])
+                               newdirpath = path_head(value)
+                               if os.path.islink(src_file) or os.path.islink(dst_file):
+                                       logging.debug('Cannot diff as one of them is Symlink')
+                               else:
+                                       #Pick the best diff of same type and diff names
+                                       files_renamed.append([files_New_List[key], value])
+                                       files_removed.remove(value)
+                                       files_new.remove(files_New_List[key])
+
+       '''
+       Patch Section
+               Partition.txt contains Protocol for UPI
+               Types Supported: DIFFS, MOVES, NEWS, DELETES, SYMDIFFS, SYMNEWS.
+       '''
+       Sym_Diff_Cnt = 0
+       Sym_New_Cnt = 0;
+       Del_Cnt = 0
+       New_Cnt = 0
+       Diff_Cnt = 0
+       Move_Cnt = 0
+
+       SymLinkDoc = OUT_DIR+'/'+PART_NAME+SYMLINK_DOC_NAME
+       Partition_Doc = open(OUT_DIR+'/'+PART_NAME+'.txt','w')
+       Partition_Doc_SymLinks = open(SymLinkDoc,'w')
+
+       print("writing diff'ed changed files...")
+       for elt in files_changed:
+               dst_file = BASE_NEW+'/'+elt
+               src_file = BASE_OLD+'/'+elt
+               #Both files are symbolic links and they differ
+               if os.path.islink(dst_file) and os.path.islink(src_file):
+               #Both are symlinks and they differ
+                       logging.debug(' File Changed is Link %s ' % dst_file)
+                       patch = os.readlink(dst_file)
+                       Sym_Diff_Cnt = Sym_Diff_Cnt + 1
+                       Partition_Doc_SymLinks.write('SYM:DIFF:%s:%s:%s\n' % (elt, elt, patch))
+                       Update_Attr(elt, "SYM", File_Attibutes, Sym_Attibutes)
+               #Both are NORMAL files and they differ
+               elif (not os.path.islink(src_file) or os.path.islink(dst_file)) and os.path.isfile(dst_file) and os.path.isfile(src_file):
+                       #Both are files and they differ
+                       Diff_Cnt = Diff_Cnt + 1
+                       patchName = (DIFF_PREFIX+'%d_%s'+DIFF_SUFFIX) % (Diff_Cnt, path_leaf(elt))
+                       patchLoc = '%s/%s' % (OUT_DIR, patchName)
+                       logging.debug(' File Differ %s %s' % (src_file, dst_file))
+                       SS_UpdateSize(src_file, dst_file)
+                       if SUPPORT_CONTAINERS == "TRUE":
+                               if src_file.endswith('.zip') and dst_file.endswith('.zip'):
+                                       FORMAT = "ZIP"
+                                       Partition_Doc.write('DIFF:ZIP:%s:%s:%s:%s:%s/\n' % (elt, elt, hash_file(src_file), hash_file(dst_file), patchName))
+                                       compute_containerdelta(src_file, dst_file, FORMAT, OUT_DIR+'/'+patchName, Partition_Doc)
+                               elif src_file.endswith('.tpk') and dst_file.endswith('.tpk'):
+                                       FORMAT = "TPK"
+                                       Partition_Doc.write('DIFF:TPK:%s:%s:%s:%s:%s/\n' % (elt, elt, hash_file(src_file), hash_file(dst_file), patchName))
+                                       compute_containerdelta(src_file, dst_file, FORMAT, OUT_DIR+'/'+patchName, Partition_Doc)
+                               else:
+                                       FORMAT = "REG"
+                                       Partition_Doc.write('DIFF:REG:%s:%s:%s:%s:%s\n' % (elt, elt, hash_file(src_file), hash_file(dst_file), patchName))
+                                       subprocess.call([DIFF_UTIL,src_file,dst_file,patchLoc])
+                       else:
+                               FORMAT = "REG"
+                               Partition_Doc.write('DIFF:REG:%s:%s:%s:%s:%s\n' % (elt, elt, hash_file(src_file), hash_file(dst_file), patchName))
+                               subprocess.call([DIFF_UTIL,src_file,dst_file,patchLoc])
+                       Update_Attr(elt, "FILE", File_Attibutes, Sym_Attibutes)
+               #Both differ but they are of diff types
+               else:
+                       #Processing and updating partition txt file will be done under REMOVED case and NEW files case accordingly, we just make an entry here
+                       files_removed.append(elt)
+                       files_new.append(elt)
+
+       fdupes = find_dupes_list(BASE_OLD, BASE_NEW, files_removed, files_new)
+       for oldpath, newpath in fdupes.iteritems():
+               logging.info('Dupes %s -> %s' % (oldpath, newpath))
+
+       for elt in files_removed:
+               src_file = BASE_OLD+'/'+elt
+               if src_file in fdupes.keys():
+                       dst_file = BASE_NEW+'/'+ fdupes[src_file]
+                       logging.debug(' File Moved %s ==> %s' % (src_file, dst_file))
+                       Move_Cnt = Move_Cnt + 1
+                       Partition_Doc.write('MOVE:REG:%s:%s:%s\n' % (elt, fdupes[src_file], hash_file(src_file)))
+                       #directories should b taken care?? +++++++++++++++++++++++++++++++++++++ PARENT DIREC if not present etc
+                       files_removed.remove(elt)
+                       files_new.remove(fdupes[src_file])
+
+       #Should be placed after removing duplicates, else they will be filtered here.
+       # loop shd b for all NEW files, rather than for all delete files ??
+       DelList = files_removed[:]
+       NewList = files_new[:]
+       for new_file in NewList:
+               if os.path.islink(BASE_NEW+'/'+new_file):
+                       continue
+               elif os.path.isdir(BASE_NEW+'/'+new_file):
+                       continue
+               else:# os.path.isfile(BASE_NEW+'/'+new_file):
+                       DirPathNew = path_head(new_file)
+                       FileNameNew = path_leaf(new_file)
+                       DiffSize = 0
+                       winning_patch_sz = os.path.getsize(BASE_NEW+'/'+new_file)
+                       winning_file = ''
+               for del_file in DelList:
+                       #print '+++++'
+                       if os.path.islink(BASE_OLD+'/'+del_file):
+                               continue
+                       elif os.path.isdir(BASE_OLD+'/'+del_file):
+                               continue
+                       else: #if os.path.isfile(BASE_OLD+'/'+del_file):
+                               FileNameOld = path_leaf(del_file)
+                               if (FileNameOld.startswith(FileNameNew[:len(FileNameNew)/2]) and (os.path.splitext(FileNameNew)[1] == os.path.splitext(del_file)[1])):
+                                       #winning_patch_sz = 0.9 * os.path.getsize(BASE_NEW+'/'+new_file)
+                                       #logging.debug('I can compute diff between %s %s' % (del_file, new_file))
+                                       DiffSize = measure_two_filediffs(BASE_OLD+'/'+del_file, BASE_NEW+'/'+new_file)
+                                       if (DiffSize < 0.8 * winning_patch_sz):
+                                               winning_patch_sz = DiffSize
+                                               winning_file = del_file
+               if len(winning_file) > 0:
+                       logging.debug('Best Pick -%s ==> %s [%d]' % (winning_file, new_file, DiffSize))
+                       files_renamed.append([new_file, winning_file])
+                       DelList.remove(winning_file)
+                       files_removed.remove(winning_file)
+                       files_new.remove(new_file)
+
+       #********************** SHOULD CHECK THIS LOGIC ***********************
+
+       if SUPPORT_RENAME == "TRUE":
+               for elt in files_renamed:
+                       src_file = BASE_OLD+'/'+elt[1]
+                       dst_file = BASE_NEW+'/'+elt[0]
+                       Diff_Cnt = Diff_Cnt + 1
+                       patchName = (DIFF_PREFIX+'%d_%s'+DIFF_SUFFIX) % (Diff_Cnt, path_leaf(elt[1]))
+                       #patchName = (DIFF_PREFIX+'_%s'+DIFF_SUFFIX) % (path_leaf(elt[0]))
+                       patchLoc = '%s/%s' % (OUT_DIR, patchName)
+                       logging.debug(' File Renamed %s ==> %s' % (src_file, dst_file))
+                       # Should be careful of renaming files??
+                       # Should we consider measure_two_filediffs ?? so that patch size is NOT greater than actual file?
+                       # What if folder path has numerics??
+
+                       if  os.path.isdir(src_file) or os.path.isdir(dst_file):
+                               #This case never occurs??
+                               Partition_Doc.write('"%s" and "%s" renamed 0 0\n' % (elt[0], elt[1]))
+                               Update_Attr(elt[0], "FILE", File_Attibutes, Sym_Attibutes)
+                       else: #Make sure these files are PROPER and they shd NOT be symlinks
+                               if filecmp.cmp(src_file, dst_file):
+                                       Move_Cnt = Move_Cnt + 1
+                                       Diff_Cnt = Diff_Cnt - 1
+                                       Partition_Doc.write('MOVE:REG:%s:%s:%s\n' % (elt[1], elt[0], hash_file(src_file)))
+                               elif SUPPORT_CONTAINERS == "TRUE":
+                                       if src_file.endswith('.zip') and dst_file.endswith('.zip'):
+                                               FORMAT = "ZIP"
+                                               Partition_Doc.write('DIFF:ZIP:%s:%s:%s:%s:%s/\n' % (elt[1], elt[0], hash_file(src_file), hash_file(dst_file), patchName))
+                                               compute_containerdelta(src_file, dst_file, FORMAT, OUT_DIR+'/'+patchName, Partition_Doc)
+                                       elif src_file.endswith('.tpk') and dst_file.endswith('.tpk'):
+                                               FORMAT = "TPK"
+                                               Partition_Doc.write('DIFF:TPK:%s:%s:%s:%s:%s/\n' % (elt[1], elt[0], hash_file(src_file), hash_file(dst_file), patchName))
+                                               compute_containerdelta(src_file, dst_file, FORMAT, OUT_DIR+'/'+patchName, Partition_Doc)
+                                       else:
+                                               FORMAT = "REG"
+                                               Partition_Doc.write('DIFF:REG:%s:%s:%s:%s:%s\n' % (elt[1], elt[0], hash_file(src_file), hash_file(dst_file), patchName))
+                                               subprocess.call([DIFF_UTIL,src_file,dst_file,patchLoc])
+                               else:
+                                       FORMAT = "REG"
+                                       Partition_Doc.write('DIFF:REG:%s:%s:%s:%s:%s\n' % (elt[1], elt[0], hash_file(src_file), hash_file(dst_file), patchName))
+                                       subprocess.call([DIFF_UTIL,src_file,dst_file,patchLoc])
+
+                               SS_UpdateSize(src_file, dst_file)
+                               Update_Attr(elt[0], "FILE", File_Attibutes, Sym_Attibutes)
+
+
+       for elt in files_removed:
+               #if files are part of patches after renaming, we shd remove them as part of removed.
+               src_file = BASE_OLD+'/'+elt
+               if os.path.islink(src_file):
+                       Partition_Doc.write('DEL:SYM:%s\n' % (elt))
+               elif os.path.isdir(src_file):
+                       Partition_Doc.write('DEL:DIR:%s\n' % (elt))
+               else:
+                       Partition_Doc.write('DEL:REG:%s:%s\n' % (elt, hash_file(src_file)))
+               logging.debug(' File Deleted %s' % src_file)
+               Del_Cnt = Del_Cnt + 1
+
+       for elt in Dir_removed:
+               #if Dir is empty, add it to the removed list.
+               src_file = BASE_OLD+'/'+elt
+               #Irrespective of weather files are MOVED or DIFF'ed, we can delete the folders. This action can be performed at the end.
+               #It covers symlinks also, as NEW symlinks cannot point to NON existant folders of TARGET (NEW binary)
+               if os.path.isdir(src_file):
+                       Partition_Doc.write('DEL:END:%s\n' % (elt))
+                       Del_Cnt = Del_Cnt + 1
+                       logging.debug(' Dir Deleted- %s' % src_file)
+
+
+       for elt in files_new:
+               dst_file = BASE_NEW+'/'+elt
+               newfiles_dest_path = 'system/'
+               ensure_dir_exists(newfiles_dest_path)
+               if os.path.islink(dst_file):
+                       patch = os.readlink(dst_file)
+                       logging.debug(' File New Links %s' % elt)
+                       Partition_Doc_SymLinks.write('SYM:NEW:%s:%s\n' % (elt, patch))
+                       #What if this is only a new sym link and folder already exists??? Should recheck
+                       destpath = newfiles_dest_path + elt
+                       if not os.path.exists(path_head(destpath)):
+                               os.makedirs(path_head(destpath))
+                               logging.info('New SymLink - Adding missing Dir')
+                       #Update_Attr(elt, "SYM", File_Attibutes, Sym_Attibutes)
+                       Sym_New_Cnt = Sym_New_Cnt + 1
+               elif os.path.isdir(dst_file): # We create just empty directory here
+                       destpath = newfiles_dest_path + elt
+                       if not os.path.exists(destpath):
+                               os.makedirs(destpath)
+                               logging.debug(' File New Dir %s' % destpath)
+                               New_Cnt = New_Cnt + 1
+               else:
+                       New_Cnt = New_Cnt + 1
+                       #newfiles_dest_path = OUT_DIR + '/system/'
+                       destpath = newfiles_dest_path + elt
+                       destdir = os.path.dirname(destpath)
+                       logging.debug('New files - %s ==> %s' % (dst_file, destdir))
+
+                       if not os.path.isdir(destdir):
+                               try:
+                                       os.makedirs(destdir)
+                               except:
+                                       logging.critical('Error in NEW files DIR entry -%s' % destdir)
+                                       raise
+
+                       try:
+                               shutil.copy2(dst_file, destpath)
+                               logging.debug('New files copied from- %s to- %s' % (dst_file, destpath))
+                       except:
+                               logging.critical('Error in NEW files entry -%s -%s' % (dst_file, destpath))
+                               raise
+
+       for elt in Dir_Added:
+               newfiles_dest_path = 'system/'
+               ensure_dir_exists(newfiles_dest_path)
+               destpath = newfiles_dest_path + elt
+               if not os.path.exists(destpath):
+                       os.makedirs(destpath)
+                       logging.debug(' DirList New Dir %s' % destpath)
+                       New_Cnt = New_Cnt + 1
+
+       #Base directory should be system
+       print 'Compressing New files'
+       if (New_Cnt > 0):
+               WorkingDir = os.getcwd()
+               os.chdir(os.getcwd()+"/"+NEW_FILES_PATH)
+               logging.info('Curr Working Dir - %s' % os.getcwd())
+               os.system(ZIPUTIL+NEW_FILES_PATH+" >> " + LOGFILE)
+               shutil.move(NEW_FILES_ZIP_NAME, WorkingDir+"/"+OUT_DIR)
+               #New file size?? cos, we extract system.7z from delta.tar and then proceed with decompression
+               SS_UpdateSize(WorkingDir+"/"+OUT_DIR+"/"+NEW_FILES_ZIP_NAME, WorkingDir+"/"+OUT_DIR+"/"+NEW_FILES_ZIP_NAME)
+               os.chdir(WorkingDir)
+               shutil.rmtree(NEW_FILES_PATH)
+               # use 7z a system.7z ./*
+
+       #logging.info('%d Dir to be removed' % len(Dir_removed))
+       logging.info('%d files unchanged' % len(files_unchanged))
+       logging.info('%d files files_renamed' % len(files_renamed))
+       logging.info('%d files NEW' % len(files_new))
+       logging.info('%d File attr' % len(File_Attibutes))
+       logging.info('%d Sym attr' % len(Sym_Attibutes))
+       logging.info('PaTcHCoUnT:Diffs-%d Moves-%d News-%d Delets-%d SymDiffs-%d SymNews-%d\n' % (Diff_Cnt, Move_Cnt, New_Cnt, Del_Cnt, Sym_Diff_Cnt, Sym_New_Cnt))
+       print('PaTcHCoUnT:Diffs-%d Moves-%d News-%d Delets-%d SymDiffs-%d SymNews-%d\n' % (Diff_Cnt, Move_Cnt, New_Cnt, Del_Cnt, Sym_Diff_Cnt, Sym_New_Cnt))
+
+       #There could be duplicates, TODO, can check before adding..
+       ATTR_FILE = open(ATTR_FILE,'a+')
+       for elt in File_Attibutes:
+               ATTR_FILE.write(elt)
+       for elt in Sym_Attibutes:
+               ATTR_FILE.write(elt)
+
+       ATTR_FILE.close()
+
+       Partition_Doc_SymLinks.close()
+       Partition_Read_SymLinks = open(SymLinkDoc,'r+')
+       Partition_Doc.write(Partition_Read_SymLinks.read())
+       Partition_Doc.write('PaTcHCoUnT:%d %d %d %d %d %d\n' % (Diff_Cnt, Move_Cnt, New_Cnt, Del_Cnt, Sym_Diff_Cnt, Sym_New_Cnt))
+       Partition_Doc_SymLinks.close()
+       Partition_Doc.close()
+       os.remove(SymLinkDoc)
+
+
+def Apply_Container_Delta(a_apk, b_apk, new_apk, a_folder, g_output_dir):
+
+       #CONTROL NAMES, AND PRINTS AND ERROR CASES... SHOULD NOT PROCEED.
+       print 'ApplyContainerDelta - ', b_apk, a_folder, g_output_dir
+       shutil.copy2(g_output_dir+'/'+b_apk, g_output_dir+'/temp')
+       temp_apk = '../'+g_output_dir+'/'+b_apk
+       Patch = 'Patch_'+b_apk
+       ensure_dir_exists(Patch)
+       shutil.copy2(g_output_dir+'/'+b_apk, Patch+'/'+b_apk)
+
+       #Size issue on Device side?? shd check this
+       subprocess.call(['unzip','-q', Patch+'/'+b_apk, '-d', Patch])
+       with open(g_output_dir+'/PATCH.txt', 'r') as f_new:
+               lines = set(f_new.read().splitlines())
+               for line in lines:
+                       #print('Action ==> %s' % line)
+                       #Action, Path, Patch = line.split('|')
+                       Items = line.split('|')
+                       Action = Items[0]
+                       Path = Items[1]
+                       ActualPath = a_folder+'/'+Path
+                       PatchPath = Patch+'/'+Path
+                       SrcPath = g_output_dir+'/'+path_leaf(Path)
+                       #print('Action ==> %s Path ==> %s ' % (Action, Path))
+                       if line[0] == 'c':
+                               patchName = g_output_dir+'/'+Items[2]
+                               #print('Apply Patch: ActualPath %s SrcPath %s PatchLoc %s ' % (PatchPath, ActualPath, patchName))
+                               subprocess.call([DIFFPATCH_UTIL,ActualPath,ActualPath,patchName])
+                               WorkingDir = os.getcwd()
+                               os.chdir(WorkingDir+"/"+"temp_a")
+                               subprocess.call(['cp', '--parents', Path, '../'+Patch])
+                               os.chdir(WorkingDir)
+                       elif line[0] == 's':
+                               WorkingDir = os.getcwd()
+                               os.chdir(WorkingDir+"/"+"temp_a")
+                               subprocess.call(['cp', '--parents', Path, '../'+Patch])
+                               os.chdir(WorkingDir)
+                       else:
+                               print('Unknown Error')
+       #print('Touch all files and set common attributes for DIFF generation')
+       WorkingDir = os.getcwd()
+       os.chdir(WorkingDir+"/"+Patch)
+
+       CONTAINER_DATE = '200011111111.11'
+       CONTAINER_MODE = '0755'
+       subprocess.call(['find', '.', '-type', 'l', '-exec', 'rm', '-rf', '{}', ';'])
+       subprocess.call(['find', '.', '-exec', 'touch', '-t', CONTAINER_DATE, '{}', ';'])
+       subprocess.call(['chmod', '-R', CONTAINER_MODE, '../'+Patch])
+
+       print 'Update Intermediate Archive'
+       #subprocess.call(['zip','-ryX', b_apk, '*'])
+       subprocess.call(['zip','-ryX', b_apk] + glob.glob('*'))
+       os.chdir(WorkingDir)
+       #print('Apply Path completed - Now create diff for this and place in patch folder')
+       #print os.getcwd()
+       print('Patch Applied, Create Final Diff - %s %s' % (g_output_dir+'/'+b_apk,new_apk))
+       patchName = ('New'+'_%s'+DIFF_SUFFIX) % (b_apk)
+       patchLoc = '%s/%s' % (g_output_dir, patchName)
+
+       subprocess.call([DIFF_UTIL, Patch+'/'+b_apk ,new_apk,patchLoc])
+
+       #Only on HOST... for testing
+       if TEST_MODE == 'TRUE':
+               UpgradedName = '%s_Upgraded' % (b_apk)
+               subprocess.call([DIFFPATCH_UTIL,Patch+'/'+b_apk,UpgradedName,patchLoc])
+
+       #This is file only with NEWS and empty diffs and same files.
+       if TEST_MODE == 'FALSE':
+               os.remove(g_output_dir+'/'+b_apk)
+               os.rename(g_output_dir+'/temp', g_output_dir+'/'+b_apk)
+               shutil.rmtree(Patch)
+
+def IsSymlink(info):
+  return (info.external_attr >> 16) == 0120777
+
+
+def compute_containerdelta(src_file, dst_file, FORMAT, patchName, Partition_Doc):
+
+       a_apk = src_file
+       b_apk = dst_file
+       a_folder = 'temp_a'
+       b_folder = 'temp_b'
+
+       g_output_dir = patchName
+
+       logging.info('Uncompressing Containers... [%s][%s]' % (src_file, dst_file))
+       logging.info('Out Dir -%s' %(g_output_dir))
+       ensure_dir_exists(a_folder)
+       zipf = zipfile.ZipFile(a_apk, 'r');
+       zipf.extractall(a_folder)
+       zipf.close()
+
+       ensure_dir_exists(b_folder)
+       zipf = zipfile.ZipFile(b_apk, 'r');
+       zipf.extractall(b_folder)
+
+
+       symlinks = []
+       for info in zipf.infolist():
+               basefilename = info.filename[7:]
+               if IsSymlink(info):
+                       symlinks.append(info.filename)
+                       os.remove(b_folder+'/'+info.filename)
+       zipf.close()
+
+       a_files, a_dirs = Get_Files(a_folder)
+       b_files, b_dirs = Get_Files(b_folder)
+
+       logging.info('Going from %d files %d files' % (len(a_files), len(b_files)))
+
+       # First let's fill up these categories
+       C_files_new = []
+       C_files_removed = []
+       C_files_changed = []
+       C_files_unchanged = []
+
+       # What files appear in B but not in A?
+       for elt in b_files:
+               if elt not in a_files:
+                       #if not elt.endswith('.so'):
+                       C_files_new.append(elt)
+
+       # What files appear in A but not in B?
+       for elt in a_files:
+               if elt not in b_files:
+                       C_files_removed.append(elt)
+
+       # What files have changed contents but not name/path?
+       for elt in b_files:
+               if elt in a_files:
+                       if os.path.islink(a_folder+'/'+elt) or os.path.islink(b_folder+'/'+elt):
+                               print 'links - skip'
+                       elif not filecmp.cmp(a_folder+'/'+elt, b_folder+'/'+elt):
+                               C_files_changed.append(elt)
+                       else:
+                               C_files_unchanged.append(elt)
+
+
+       print('%d new files' % len(C_files_new))
+       print('%d removed files' % len(C_files_removed))
+       print('%d files changed' % len(C_files_changed))
+       print('%d files unchanged' % len(C_files_unchanged))
+
+       # temp dir where we're assembling the patch
+       ensure_dir_exists(g_output_dir)
+
+       unique_fileid = 0
+       toc = open(g_output_dir+'/PATCH.txt','w')
+       print("writing diff'ed changed files...")
+
+       for elt in C_files_changed:
+               dst_file = b_folder+'/'+elt
+               src_file = a_folder+'/'+elt
+               patchName = (DIFF_PREFIX+'%d_%s'+DIFF_SUFFIX) % (unique_fileid, path_leaf(elt))
+               patchLoc = '%s/%s' % (g_output_dir, patchName)
+               #print('src - %s dest -%s patch -%s' % (src_file ,dst_file,patchLoc))
+               subprocess.call([DIFF_UTIL,src_file ,dst_file,patchLoc])
+               toc.write('c%d|%s|%s\n' % (unique_fileid, elt, patchName))
+               unique_fileid = unique_fileid + 1
+
+       for elt in C_files_unchanged:
+               dst_file = b_folder+'/'+elt
+               src_file = a_folder+'/'+elt
+               #print('Same Files src - %s dest -%s' % (src_file ,dst_file))
+               toc.write('s%d|%s\n' % (unique_fileid, elt))
+               unique_fileid = unique_fileid + 1
+
+       #Create NEW TPK with empty data for below files and NEW files
+       shutil.copy2(b_apk, g_output_dir)
+
+       #May b for host??
+       #temp_apk = '../'+g_output_dir+'/'+b_apk
+       temp_apk = '../'+g_output_dir+'/'+path_leaf(b_apk)
+
+       for elt in C_files_changed:
+               dst_file = b_folder+'/'+elt
+               #print dst_file
+               open(dst_file, 'w').close()
+
+       for elt in C_files_unchanged:
+               dst_file = b_folder+'/'+elt
+               open(dst_file, 'w').close()
+
+       WorkingDir = os.getcwd()
+       os.chdir(WorkingDir+"/"+b_folder)
+
+       #for elt in files_changed:
+       #       subprocess.call(['zip', temp_apk, elt]) # confirm ZIP options, extra fields etc.. jus zip it, shd do all at once.. else time taking
+
+       #for elt in files_unchanged:
+       #       subprocess.call(['zip', temp_apk, elt])
+
+       subprocess.call(['zip','-ryq', temp_apk, '*'])
+       os.chdir(WorkingDir)
+       toc.close()
+
+       Apply_Container_Delta(path_leaf(a_apk), path_leaf(b_apk), b_apk, a_folder, g_output_dir)
+       shutil.rmtree(a_folder)
+       shutil.rmtree(b_folder)
+
+def NewFiles(src, dest):
+       print src,dest
+       subprocess.call(['cp','-rp', src,dest])
+    #try:
+               #shutil.copytree(src, dest)
+    #except OSError as e:
+        # If the error was caused because the source wasn't a directory
+        #if e.errno == errno.ENOTDIR:
+            #shutil.copy2(src, dest)
+        #else:
+            #print('Directory not copied. Error: %s' % e)
+
+def measure_two_filediffs(src, dst):
+       patchLoc = 'temp.patch'
+       subprocess.call([DIFF_UTIL,src,dst,patchLoc])
+       result_size = os.path.getsize(patchLoc)
+       os.remove(patchLoc)
+       return result_size
+
+def Get_Files(path):
+       all_files = []
+       all_dirs = []
+
+       for root, directories, filenames in os.walk(path, topdown=False, followlinks=False):
+               for directory in directories:
+                       #DirName = os.path.join(root+'/',directory)
+                       DirName = os.path.join(root,directory)
+                       if os.path.islink(DirName):
+                               logging.debug('This is symlink pointing to dir -%s' % DirName)
+                               all_files.append(os.path.relpath(DirName, path))
+                       elif not os.listdir(DirName):
+                               #print('*****Empty Directory******* -%s', DirName)
+                               #This should NOT be appended ??? Empty dir shd b considered
+                               all_dirs.append(os.path.relpath(DirName, path))
+                       else:
+                               all_dirs.append(os.path.relpath(DirName, path))
+               for filename in filenames:
+                       FileName = os.path.join(root,filename)
+                       all_files.append(os.path.relpath(FileName, path))
+
+       all_files.sort()
+       all_dirs.sort()
+       return all_files, all_dirs
+
+
+USAGE_DOCSTRING = """
+      Generate Delta using BASEOLD AND BASE NEW
+         Attributes is optional
+"""
+
+def Usage(docstring):
+  print docstring.rstrip("\n")
+  print COMMON_DOCSTRING
+
+
+
+if __name__ == '__main__':
+       main()
+
diff --git a/UPG/dzImagescript.sh b/UPG/dzImagescript.sh
new file mode 100755 (executable)
index 0000000..f96bc4f
--- /dev/null
@@ -0,0 +1,255 @@
+#!/bin/bash
+
+pname="${0##*/}"
+args=("$@")
+cur_dir="$(pwd)"
+
+# file names:
+decompression_code="decompression_code"
+piggy_gz_piggy_trailer="piggy.gz+piggy_trailer"
+piggy="piggy"
+piggy_gz="piggy.gz"
+padding_piggy="padding_piggy"
+piggy_trailer="piggy_trailer"
+ramfs_gz_part3="initramfs.cpio+part3"
+ramfs_cpio_gz="initramfs.cpio.gz"
+padding3="padding3"
+part3="part3"
+kernel_img="kernel.img"
+ramfs_cpio="initramfs.cpio"
+ramfs_dir="initramfs"
+sizes="sizes"
+ramfs_part3="ramfs+part3"
+ramfs_list="initramfs_list"
+cpio_t="cpio-t"
+
+
+cpio="cpio_set0"
+
+# We dup2 stderr to 3 so an error path is always available (even
+# during commands where stderr is redirected to /dev/null).  If option
+# -v is set, we dup2 sterr to 9 also so commands (and some of their
+# results if redirected to &9) are printed also.
+exec 9>/dev/null                # kill diagnostic ouput (will be >&2 if -v)
+exec 3>&2                       # an always open error channel
+
+#
+########### Start of functions
+#
+
+# Emit an error message and abort
+fatal(){
+    # Syntax: fatal <string ...>
+    # Output error message, then abort
+    echo >&3
+    echo >&3 "$pname: $*"
+    kill $$
+    exit 1
+}
+
+# Execute a command, displaying the command if -v:
+cmd(){
+    # Syntax: cmd <command> <args...>
+    # Execute <command>, echo command line if -v
+    echo >&9 "$*"
+    "$@"
+}
+
+# Execute a required command, displaying the command if -v, abort on
+# error:
+rqd(){
+    # Syntax: cmd <command> <args...>
+    # Execute <command>, echo commandline if -v, abort on error
+    cmd "$@" || fatal "$* failed."
+}
+
+findByteSequence(){
+    # Syntax: findByteSequence <fname> [<string, default: gzip header>]
+    # Returns: position (offset) on stdout, empty string if nothing found
+    file="$1"
+    local opt
+    if [ "$2" = "lzma" ]; then
+        srch=$'\x5d....\xff\xff\xff\xff\xff'
+        opt=
+    else
+        srch="${2:-$'\x1f\x8b\x08'}" # Default: search for gzip header
+        opt="-F"
+    fi
+    pos=$(LC_ALL=C grep $opt -a --byte-offset -m 1 --only-matching -e "$srch" -- "$file")
+    echo ${pos%%:*}
+}
+
+getFileSize(){
+    # Syntax: getFileSize <file>
+    # Returns size of the file on stdout.
+    # Aborts if file doesn't exist.
+    rqd stat -c %s "$1"
+}
+checkNUL(){
+    # Syntax: checkNUL file offset
+    # Returns true (0) if byte there is 0x0.
+    [ "$(rqd 2>/dev/null dd if="$1" skip=$2 bs=1 count=1)" = $'\0' ]
+}
+
+gunzipWithTrailer(){
+    # Syntax gunzipWithTrailer <file> <gzip name, sans .gz> <padding> <trailer>
+    #
+    # <file>: the input file
+    # <gzip name, sans .gz>, <padding>, <trailer>:
+    #   The output files.  For the gzipped part, both the
+    #   compressed and the uncompressed output is generated, so we have
+    #   4 output files.
+    local file="$1"
+    local gz_result="$2.gz"
+    local result="$2"
+    local padding="$3"
+    local trailer="$4"
+    local tmpfile="/tmp/gunzipWithTrailer.$$.gz"
+    local original_size=$(getFileSize "$file")
+    local d=$(( (original_size+1) / 2))
+    local direction fini at_min=0
+    local results_at_min=()
+    local size=$d
+    local at_min=
+    echo "Separating gzipped part from trailer in  "$file""
+    echo -n "Trying size: $size"
+    while :; do
+        rqd dd if="$file" of="$tmpfile" bs=$size count=1 2>/dev/null
+        cmd gunzip >/dev/null 2>&1 -c "$tmpfile"
+        res=$?
+        if [ "$d" -eq 1 ]; then
+            : $((at_min++))
+            results_at_min[$size]=1
+            [ "$at_min" -gt 3 ] && break
+        fi
+        d=$(((d+1)/2))
+        case $res in
+                # 1: too small
+            1) size=$((size+d)); direction="↑";;
+                # 2: trailing garbage
+            2) size=$((size-d)); direction="↓";;
+                # OK
+            0) break;;
+            *) fatal "gunzip returned $res while checking "$file"";;
+        esac
+        echo -n "  $size"
+    done
+    if [ "$at_min" -gt 3 ]; then
+        echo -e "\ngunzip result is oscillating between 'too small' and 'too large' at size: ${!results_at_min[*]}"
+        echo -n "Trying lower nearby values:  "
+        fini=
+        for ((d=1; d < 30; d++)); do
+            : $((size--))
+            echo -n "  $size"
+            rqd dd if="$file" of="$tmpfile" bs=$size count=1 2>/dev/null
+            if cmd gunzip >/dev/null 2>&1 -c "$tmpfile"; then
+                echo -n " - OK"
+                fini=1
+                break
+            fi
+        done
+        [ -z "$fini" ] && fatal 'oscillating gunzip result, giving up.'
+    fi
+    # We've found the end of the gzipped part.  This is not the real
+    # end since gzip allows for some trailing padding to be appended
+    # before it barfs.  First, go back until we find a non-null
+    # character:
+    echo -ne "\npadding check (may take some time): "
+    real_end=$((size-1))
+    while checkNUL "$file" $real_end; do
+        : $((real_end--))
+    done
+    # Second, try if gunzip still succeeds.  If not, add trailing
+    # null(s) until it succeeds:
+    while :; do
+        rqd dd if="$file" of="$tmpfile" bs=$real_end count=1 2>/dev/null
+        gunzip >/dev/null 2>&1 -c "$tmpfile"
+        case $? in
+            # 1: too small
+            1) : $((real_end++));;
+            *) break;;
+        esac
+    done
+    real_next_start=$size
+    # Now, skip NULs forward until we reach a non-null byte.  This is
+    # considered as being the start of the next part.
+    while checkNUL "$file" $real_next_start; do
+        : $((real_next_start++))
+    done
+    echo $((real_next_start - real_end))
+    echo
+    rm "$tmpfile"
+    # Using the numbers we got so far, create the output files which
+    # reflect the parts we've found so far:
+    rqd dd 2>&9 if="$file" of="$gz_result" bs=$real_end count=1
+    rqd dd 2>&9 if="$file" of="$padding" skip=$real_end bs=1 count=$((real_next_start - real_end))
+    rqd dd 2>&9 if="$file" of="$trailer" bs=$real_next_start skip=1
+    rqd gunzip -c "$gz_result" > "$result"
+}
+
+
+unpack()(
+    [ -d "$unpacked" ] && echo "\
+Warning: there is aready an unpacking directory.  If you have files added on
+your own there, the  repacking result may not reflect the result of the
+current unpacking process."
+    rqd mkdir -p "$unpacked"
+    rqd cd "$unpacked"
+    sizes="$unpacked/sizes"
+    echo "# Unpacking sizes" > "$sizes"
+
+    piggy_start=$(findByteSequence "$cur_dir/$zImage")
+    if [ -z "$piggy_start" ]; then
+        fatal "Can't find a gzip header in file '$zImage'"
+    fi
+
+    rqd dd 2>&9 if="$cur_dir/$zImage" bs="$piggy_start" count=1 of="$decompression_code"
+    rqd dd 2>&9 if="$cur_dir/$zImage" bs="$piggy_start" skip=1 of="$piggy_gz_piggy_trailer"
+
+    gunzipWithTrailer  "$piggy_gz_piggy_trailer" \
+        "$piggy" "$padding_piggy" "$piggy_trailer"
+
+    echo
+       sudo rm -rf "piggy.gz" "piggy.gz+piggy_trailer" "sizes"
+    echo "Success."
+    echo "The unpacked files and the initramfs directory are in "$unpacked""
+)
+
+#### start of main program
+while getopts xv12345sgrpuhtz-: argv; do
+    case $argv in
+        p|u|z|1|2|3|4|5|t|r|g) eval opt_$argv=1;;
+        v) exec 9>&2; opt_v=1;;
+        s) cpio="cpio";;
+        x) set -x;;
+        -) if [ "$OPTARG" = "version" ]; then
+              echo "$pname $version"
+              exit 0
+           else
+              echo "Wrong Usage, use -u to unpack"
+           fi;;
+        h|-) echo "Wrong Usage, use -u to unpack";;
+        *) fatal "Illegal option";;
+    esac
+done
+shift $((OPTIND-1))
+zImage="${1:-zImage}"
+unpacked="$cur_dir/${zImage}_unpacked"
+packing="$cur_dir/${zImage}_packing"
+shift
+if [ -n "$*" ]; then
+    fatal "Excess arguments: '$*'"
+fi
+
+if [ -n "$opt_u" ]; then
+    [ -f "$zImage" ] || fatal "file '$zImage': not found"
+    unpack
+fi
+if [ -z "$opt_u" ]; then
+    echo >&2 "$pname: Need at least -u option."
+    echo >&2 "$pname: Type '$pname --help' for usage info."
+    exit 1
+fi
+
+exit
+
diff --git a/UPG/ss_bsdiff b/UPG/ss_bsdiff
new file mode 100755 (executable)
index 0000000..d1293cd
Binary files /dev/null and b/UPG/ss_bsdiff differ
diff --git a/UPG/ss_bspatch b/UPG/ss_bspatch
new file mode 100755 (executable)
index 0000000..0e539ab
Binary files /dev/null and b/UPG/ss_bspatch differ
diff --git a/UPG/unpack.sh b/UPG/unpack.sh
new file mode 100755 (executable)
index 0000000..869881d
--- /dev/null
@@ -0,0 +1,236 @@
+#!/bin/bash
+
+pname="${0##*/}"
+args=("$@")
+cur_dir="$(pwd)"
+
+# file names:
+decompression_code="decompression_code"
+piggy_gz_piggy_trailer="piggy.gz+piggy_trailer"
+piggy="piggy"
+piggy_gz="piggy.gz"
+padding_piggy="padding_piggy"
+piggy_trailer="piggy_trailer"
+padding3="padding3"
+sizes="sizes"
+
+# We dup2 stderr to 3 so an error path is always available (even
+# during commands where stderr is redirected to /dev/null).  If option
+# -v is set, we dup2 sterr to 9 also so commands (and some of their
+# results if redirected to &9) are printed also.
+exec 9>/dev/null                # kill diagnostic ouput (will be >&2 if -v)
+exec 3>&2                       # an always open error channel
+
+#
+########### Start of functions
+#
+
+# Emit an error message and abort
+fatal(){
+    # Syntax: fatal <string ...>
+    # Output error message, then abort
+    echo >&3
+    echo >&3 "$pname: $*"
+    kill $$
+    exit 1
+}
+
+# Execute a command, displaying the command if -v:
+cmd(){
+    # Syntax: cmd <command> <args...>
+    # Execute <command>, echo command line if -v
+    echo >>"$workspace/log_file" "$*"
+    "$@"
+}
+
+# Execute a required command, displaying the command if -v, abort on
+# error:
+rqd(){
+    # Syntax: cmd <command> <args...>
+    # Execute <command>, echo commandline if -v, abort on error
+    cmd "$@" || fatal "$* failed."
+}
+
+checkNUL(){
+    # Syntax: checkNUL file offset
+    # Returns true (0) if byte there is 0x0.
+    [ "$(rqd 2>>"$workspace/log_file" "$workspace/dd" if="$1" skip=$2 bs=1 count=1)" = $'\0' ]
+}
+
+gunzipWithTrailer(){
+    # Syntax gunzipWithTrailer <file> <gzip name, sans .gz> <padding> <trailer>
+    #
+    # <file>: the input file
+    # <gzip name, sans .gz>, <padding>, <trailer>:
+    #   The output files.  For the gzipped part, both the
+    #   compressed and the uncompressed output is generated, so we have
+    #   4 output files.
+    local file="$1"
+    local gz_result="$2.gz"
+    local result="$2"
+    local padding="$3"
+    local trailer="$4"
+    local tmpfile="/tmp/gunzipWithTrailer.$$.gz"
+       local original_size=$("$workspace/stat" -c %s "$unpacked/$file") 2>>"$workspace/log_file"
+       echo "Original size is $original_size" >> "$workspace/log_file"
+    local d=$(( (original_size+1) / 2))
+    local direction fini at_min=0
+    local results_at_min=()
+    local size=$d
+    local at_min=
+       rm -rf /tmp/test_file
+    echo "Separating gzipped part from trailer in "$unpacked/$file"" >> "$workspace/log_file"
+    echo -n "Trying size: $size"       >> "$workspace/log_file"
+    while :; do
+        rqd "$workspace/dd" if="$unpacked/$file" of="$tmpfile" bs=$size count=1 2>>"$workspace/log_file"
+        cmd "$workspace/gzip" >/tmp/test_file 2>>"$workspace/log_file" -d -c "$tmpfile"
+        res=$?
+               echo "result for gunzip is $res" >>"$workspace/log_file"
+        if [ "$d" -eq 1 ]; then
+            : $((at_min++))
+            results_at_min[$size]=1
+            [ "$at_min" -gt 3 ] && break
+        fi
+        d=$(((d+1)/2))
+        case $res in
+                # 1: too small
+            1)  echo "In case 1" >> "$workspace/log_file"
+                               size=$((size+d)); direction="↑";;
+                # 2: trailing garbage
+            2)         echo "In case 2" >> "$workspace/log_file"
+                               size=$((size-d)); direction="↓";;
+                # OK
+            0)         echo "Breaking" >> "$workspace/log_file"
+                               break;;
+            *)         echo "In case *" >> "$workspace/log_file"
+                               fatal "gunzip returned $res while checking "$unpacked/$file"";;
+        esac
+        echo -n "  $size" >> "$workspace/log_file"
+    done
+    if [ "$at_min" -gt 3 ]; then
+        echo -e "\ngunzip result is oscillating between 'too small' and 'too large' at size: ${!results_at_min[*]}"    >> "$workspace/log_file"
+        echo -n "Trying lower nearby values:  "        >> "$workspace/log_file"
+        fini=
+        for ((d=1; d < 30; d++)); do
+            : $((size--))
+            echo -n "  $size" >> "$workspace/log_file"
+            rqd "$workspace/dd" if="$unpacked/$file" of="$tmpfile" bs=$size count=1 2>/dev/null
+            if cmd "$workspace/gzip" >/dev/null 2>&1 -d -c  "$tmpfile"; then
+                echo -n " - OK"        >> "$workspace/log_file"
+                fini=1
+                break
+            fi
+        done
+        [ -z "$fini" ] && fatal 'oscillating gunzip result, giving up.'
+    fi
+    # We've found the end of the gzipped part.  This is not the real
+    # end since gzip allows for some trailing padding to be appended
+    # before it barfs.  First, go back until we find a non-null
+    # character:
+    echo -ne "\npadding check (may take some time): " >> "$workspace/log_file"
+    real_end=$((size-1))
+    while checkNUL "$unpacked/$file" $real_end; do
+        : $((real_end--))
+    done
+       echo "Found real end at $real_end" >> "$workspace/log_file"
+    # Second, try if gunzip still succeeds.  If not, add trailing
+    # null(s) until it succeeds:
+    while :; do
+        rqd "$workspace/dd" if="$unpacked/$file" of="$tmpfile" bs=$real_end count=1 2>>"$workspace/log_file"
+        "$workspace/gzip" >/tmp/test_file2 2>>"$workspace/log_file" -d -c "$tmpfile"
+        case $? in
+            # 1: too small
+            1)         echo "In case 1" >> "$workspace/log_file"
+                               : $((real_end++));;
+            *)         echo "Case other $?" >> "$workspace/log_file"
+                               break;;
+        esac
+    done
+       echo "Done with add trailing null(s) until it succeeds" >> "$workspace/log_file"
+    real_next_start=$size
+    # Now, skip NULs forward until we reach a non-null byte.  This is
+    # considered as being the start of the next part.
+    while checkNUL "$unpacked/$file" $real_next_start; do
+        : $((real_next_start++))
+    done
+    echo $((real_next_start - real_end))       >> "$workspace/log_file"
+    echo >> "$workspace/log_file"
+    rm "$tmpfile"
+    # Using the numbers we got so far, create the output files which
+    # reflect the parts we've found so far:
+    rqd "$workspace/dd" if="$unpacked/$file" of="$unpacked/$gz_result" bs=$real_end count=1
+    rqd "$workspace/dd" if="$unpacked/$file" of="$unpacked/$padding" skip=$real_end bs=1 count=$((real_next_start - real_end))
+    rqd "$workspace/dd" if="$unpacked/$file" of="$unpacked/$trailer" bs=$real_next_start skip=1
+    rqd "$workspace/gzip" -c -d "$unpacked/$gz_result" > "$unpacked/$result"
+}
+
+unpack()(
+    [ -d "$unpacked" ] && echo "\
+Warning: there is aready an unpacking directory.  If you have files added on
+your own there, the  repacking result may not reflect the result of the
+current unpacking process."
+    rqd mkdir -p "$unpacked"
+    rqd cd "$unpacked"
+    sizes="$unpacked/sizes"
+    echo "# Unpacking sizes" > "$sizes"
+    log_file="$unpacked/log_file"
+    #piggy_start=$1
+    if [ -z "$piggy_start" ]; then
+               fatal "Can't find a gzip header in file '$zImage'" >> "$workspace/log_file"
+        fatal "Can't find a gzip header in file '$zImage'"
+       else
+               echo "start is $piggy_start" >> "$sizes"
+    fi
+
+    rqd "$workspace/dd" if="$zImage" bs="$piggy_start" count=1 of="$unpacked/$decompression_code"
+    rqd "$workspace/dd" if="$zImage" bs="$piggy_start" skip=1 of="$piggy_gz_piggy_trailer"
+
+    gunzipWithTrailer  "$piggy_gz_piggy_trailer" \
+        "$piggy" "$padding_piggy" "$piggy_trailer"
+
+    echo
+    echo "Success."
+)
+
+#### start of main program
+while getopts xv12345sgrpuhtz-: argv; do
+    case $argv in
+        p|z|1|2|3|4|5|t|r|g) eval opt_$argv=1;;
+        u)  opt_u=1
+                       workspace=$2
+                   zImage="$2/$3"
+                       piggy_start=$4
+                       unpacked="${zImage}_unpacked"
+                       packing="${zImage}_packing";;
+        -) if [ "$OPTARG" = "version" ]; then
+              echo "$pname $version"
+              exit 0
+           else
+              echo "Wrong Usage, use -u to unpack"
+           fi;;
+        h|-) echo "Wrong Usage, use -u to unpack";;
+        *) fatal "Illegal option";;
+    esac
+done
+if [ -n "$opt_u" ]; then
+    [ -f "$zImage" ] || fatal "file '$zImage': not found"
+    unpack
+fi
+if [ -n "$opt_p" ]; then
+       work_dir=$2
+       tgt_file=$3
+       cmd_dir=$4
+       rqd cd "$work_dir"
+       #remove all links before proceeding with zip processing
+       "$cmd_dir/find" . -type l  -exec rm {} \;
+       "$cmd_dir/find" . -exec touch -t 200011111111.11 {} \;
+       "$cmd_dir/find" . -exec chmod 0755 {} \;
+       "$cmd_dir/zip" -ryX "$tgt_file" *
+fi
+if [ -z "$opt_u$opt_p" ]; then
+    echo >&2 "$pname: Need -u or -p option to work"
+    echo >&2 "$pname: Type '$pname --help' for usage info."
+    exit 1
+fi
+
+exit
diff --git a/bsdiff/CMakeLists.txt b/bsdiff/CMakeLists.txt
new file mode 100755 (executable)
index 0000000..cbdd658
--- /dev/null
@@ -0,0 +1,23 @@
+CMAKE_MINIMUM_REQUIRED(VERSION 2.6)
+PROJECT(ss_bsdiff C)
+
+SET(ss_bsdiff_SRCS ss_bsdiff.c)
+SET(ss_bspatch_SRCS ss_bspatch.c)
+
+INCLUDE(FindPkgConfig)
+pkg_check_modules(${PROJECT_NAME}_pkgs REQUIRED lib7zip libdivsufsort)
+
+FOREACH(flag ${${PROJECT_NAME}_pkgs_CFLAGS})
+       SET(EXTRA_CFLAGS "${EXTRA_CFLAGS} ${flag}")
+ENDFOREACH(flag)
+
+SET(EXTRA_CFLAGS "${EXTRA_CFLAGS} -I./include")
+SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${EXTRA_CFLAGS}")
+
+ADD_EXECUTABLE(ss_bsdiff ${ss_bsdiff_SRCS})
+TARGET_LINK_LIBRARIES(ss_bsdiff ${${PROJECT_NAME}_pkgs_LDFLAGS} "-g" "-pthread")
+INSTALL(TARGETS ss_bsdiff DESTINATION bin)
+
+ADD_EXECUTABLE(ss_bspatch ${ss_bspatch_SRCS})
+TARGET_LINK_LIBRARIES(ss_bspatch ${${PROJECT_NAME}_pkgs_LDFLAGS} "-g" "-pthread")
+INSTALL(TARGETS ss_bspatch DESTINATION bin)
diff --git a/bsdiff/ss_bsdiff.c b/bsdiff/ss_bsdiff.c
new file mode 100755 (executable)
index 0000000..cea3385
--- /dev/null
@@ -0,0 +1,597 @@
+/*-
+ * Copyright 2003-2005 Colin Percival
+ * All rights reserved
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted providing that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+// This file is a nearly copy of bsdiff.c from the
+// bsdiff-4.3 distribution; the primary differences being how the
+// input and output data are read, data post processing,
+// search function modification and the error handling.
+
+#define _CRT_SECURE_NO_WARNINGS
+#include <sys/types.h>
+#include <err.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#include <Alloc.h>
+#include <7zFile.h>
+#include <7zVersion.h>
+#include <LzmaDec.h>
+#include <LzmaEnc.h>
+
+#define SUFSORT_MOD                                    // Change suffix sorting algorithm from Qsufsort to Divsufsort
+//#define ZLIB_MOD                                     // Change compression algorithm
+#define SEARCH_RECURSIVE                       // Change recursion of Search function to Iteration
+//#define MAX_MATCH_SIZE                       // define ( MAX_MATCH_SIZE or CONST_MEMORY_USAGE ) or ( none of them ); use max_match memory for old file at patch side
+#define CONST_MEMORY_USAGE 16384               // patch in m+O(1); m=size of new file; use only const memory for old file at patch side;
+#define PATCH_FILE_FORMAT_MOD       // no accumulation of diff and extra in db and eb; immediate write; also write all 3 parts of control stmt at same time
+#define MULTI_THREADING 1            // only with #define CONST_MEMORY_USAGE or #define MAX_MATCH_SIZE
+#define TIME_LIMIT_CHECK 300
+
+/* Take care :
+1) Use either (MAX_MATCH_SIZE or CONST_MEMORY_USAGE) or (none of both).
+2.) PATCH_FILE_FORMAT_MOD may or may not be used, independently of everything else.
+3.) MULTI_THREADING can be used only with (CONST_MEMORY_USAGE or MAX_MATCH_SIZE).
+*/
+#ifdef TIME_LIMIT_CHECK
+long long outer_count = 0;
+char ts1[256];
+void get_time_stamp(void)
+{
+       struct timeval tv;
+       int sec, msec;
+
+       gettimeofday(&tv, NULL);
+       sec = (int)tv.tv_sec;
+       msec = (int)(tv.tv_usec / 1000);
+       snprintf(ts1, 256, "%06d.%03d", sec % 100000, msec);
+}
+#endif
+#ifdef SUFSORT_MOD
+//supporting only 32 bit divsufsort for now.
+#include "divsufsort.h"
+#endif
+
+#define MIN(x,y) (((x)<(y)) ? (x) : (y))
+
+#ifdef MULTI_THREADING
+
+struct data_thread
+{
+       unsigned num_threads;
+       off_t size_thread;
+
+       int fd;
+       int rc;
+       u_char *old;
+       u_char **new;
+       off_t oldsize;
+       off_t newsize;
+
+       saidx_t *I;
+       u_char buf[8];
+       u_char header[16];
+       u_char buf2[32];
+
+       off_t lenn;
+       off_t lenn2;
+
+       FILE * pf;
+       int bz2err;
+       FILE * pfbz2;
+};
+
+struct data_thread data;
+
+void* Function(int);
+
+
+#endif
+
+static off_t matchlen(u_char *old, off_t oldsize,u_char *new, off_t newsize)
+{
+       off_t i;
+       for (i = 0; (i < oldsize) && (i < newsize); i++)
+               if (old[i] != new[i])
+                       break;
+       return i;
+}
+
+static off_t search(saidx_t *I, u_char *old, off_t oldsize,
+               u_char *new, off_t newsize, off_t st, off_t en, off_t *pos)
+{
+       off_t x, y;
+       while (en - st >= 2) {
+               x = st + (en - st) / 2;
+               if (memcmp(old + I[x], new, MIN(oldsize - I[x], newsize)) < 0) {
+                       st = x;
+               } else {
+                       en = x;
+               }
+       }
+
+       if (en - st < 2) {
+               x = matchlen(old + I[st], oldsize - I[st], new, newsize);
+               y = matchlen(old + I[en], oldsize - I[en], new, newsize);
+
+               if (x > y) {
+                       *pos = I[st];
+                       return x;
+               } else {
+                       *pos = I[en];
+                       return y;
+               }
+       };
+}
+
+static void offtout(off_t x, u_char *buf)
+{
+       off_t y;
+
+       if (x < 0)
+               y = -x;
+       else
+               y = x;
+
+       buf[0] = y % 256;
+       y -= buf[0];
+       y = y / 256;
+       buf[1] = y % 256;
+       y -= buf[1];
+       y = y / 256;
+       buf[2] = y % 256;
+       y -= buf[2];
+       y = y / 256;
+       buf[3] = y % 256;
+       y -= buf[3];
+       y = y / 256;
+       buf[4] = y % 256;
+       y -= buf[4];
+       y = y / 256;
+       buf[5] = y % 256;
+       y -= buf[5];
+       y = y / 256;
+       buf[6] = y % 256;
+       y -= buf[6];
+       y = y / 256;
+       buf[7] = y % 256;
+
+       if (x < 0)
+               buf[7] |= 0x80;
+}
+
+int create_patch(int argc, char *argv[], int offset_oldscore)
+{
+       data.num_threads = MULTI_THREADING;
+       data.new = (u_char **)malloc(sizeof(u_char *)*data.num_threads);
+       if (argc != 4)
+               errx(1, "usage: %s oldfile newfile patchfile\n", argv[0]);
+
+       /* Allocate oldsize+1 bytes instead of oldsize bytes to ensure
+               that we never try to malloc(0) and get a NULL pointer */
+       if (((data.fd = open(argv[1], O_RDONLY, 0)) < 0) ||
+                       ((data.oldsize = lseek(data.fd, 0, SEEK_END)) == -1) ||
+                       ((data.old = malloc(data.oldsize + 1)) == NULL) ||
+                       (lseek(data.fd, 0, SEEK_SET) != 0) ||
+                       (read(data.fd, data.old, data.oldsize) != data.oldsize) ||
+                       (close(data.fd) == -1))
+               err(1, "%s", argv[1]);
+
+       data.I = malloc((data.oldsize + 1) * sizeof(saidx_t));
+       divsufsort(data.old, data.I, data.oldsize);
+
+       /* Allocate newsize+1 bytes instead of newsize bytes to ensure
+               that we never try to malloc(0) and get a NULL pointer */
+       if (((data.fd = open(argv[2], O_RDONLY, 0)) < 0) ||
+                       ((data.newsize = lseek(data.fd, 0, SEEK_END)) == -1) ||
+                       (lseek(data.fd, 0, SEEK_SET) != 0))
+               err(1, "%s", argv[2]);
+       data.size_thread = (data.newsize / data.num_threads);
+
+       unsigned int j;
+       for (j = 0; j < data.num_threads; ++j) {
+               if (j != data.num_threads - 1) {
+                       if (((data.new[j] = (u_char *)malloc(sizeof(u_char) * (data.size_thread + 1))) == NULL) ||
+                                       (lseek(data.fd, 0, SEEK_CUR) != j * data.size_thread) ||
+                                       (read(data.fd, data.new[j], data.size_thread) != data.size_thread))
+                               err(1, "%s", argv[2]);
+               } else {
+                       if (((data.new[j] = (u_char *)malloc(sizeof(u_char) * (data.newsize - (j * data.size_thread) + 1))) == NULL) ||
+                                       (lseek(data.fd, 0, SEEK_CUR) != j * data.size_thread) ||
+                                       (read(data.fd, data.new[j], data.newsize - (j * data.size_thread)) != data.newsize - (j * data.size_thread)))
+                               err(1, "here %s", argv[2]);
+               }
+       }
+
+       if ((close(data.fd) == -1))
+               err(1, "%s", argv[2]);
+
+       /* Create the patch file */
+       if ((data.pf = fopen("temp_patch", "w")) == NULL)
+               err(1, "%s", "temp_patch");
+
+       /* Header is
+               0       8        "BSDIFF40"
+               8       8       length of bzip2ed ctrl block
+               16      8       length of bzip2ed diff block
+               24      8       length of new file */
+       /* File is
+               0       32      Header
+               32      ??      Bzip2ed ctrl block
+               ??      ??      Bzip2ed diff block
+               ??      ??      Bzip2ed extra block */
+       memcpy(data.header, "SSDIFF40", 8);
+
+       offtout(data.newsize, data.header + 8);
+       if (fwrite(data.header, 16, 1, data.pf) != 1)
+               err(1, "fwrite(%s)", "temp_patch");
+
+       /* Compute the differences, writing ctrl as we go */
+       data.pfbz2 = data.pf;
+       //if ((data.pfbz2 = BZ2_bzWriteOpen(&data.bz2err, data.pf, 9, 0, 0)) == NULL)
+               //errx(1, "BZ2_bzWriteOpen, bz2err = %d", data.bz2err);
+
+
+       //BZ2_bzWriteClose(&data.bz2err, data.pfbz2, 0, NULL, NULL);
+       //if (data.bz2err != BZ_OK)
+               //errx(1, "BZ2_bzWriteClose, bz2err = %d", data.bz2err);
+
+       int ret = Function(offset_oldscore);
+#ifdef TIME_LIMIT_CHECK
+       if (ret != 0) {
+               printf("bsdiff fails to create delta with offset score %d\n", offset_oldscore);
+               printf("Old: [%s] -> New: [%s]\n", argv[1], argv[2]);
+       }
+#endif
+       /* Seek to the beginning, write the header, and close the file */
+       if (fseeko(data.pf, 0, SEEK_SET))
+               err(1, "fseeko");
+
+       if (fwrite(data.header, 16, 1, data.pf) != 1)
+               err(1, "fwrite(%s)", "temp_patch");
+       if (fclose(data.pf))
+               err(1, "fclose");
+       /* Free the memory we used */
+       free(data.I);
+       free(data.old);
+       free(data.new);
+
+       return ret;
+}
+
+void* Function(int offset_oldscore)
+{
+       unsigned int thread_num = 0;
+       off_t end;
+       off_t scan=0;
+       end = data.newsize;
+       int t1 = 0, t2 = 0;
+       get_time_stamp();  //total time capturing
+       t1 = atoi(ts1);
+
+#ifdef PATCH_FILE_FORMAT_MOD
+       u_char* db;
+       u_char* eb;
+       off_t dblen;
+       off_t eblen;
+#endif
+
+       off_t pos;
+       off_t len;
+       off_t lastscan;
+       off_t lastpos;
+       off_t lastoffset;
+       off_t oldscore;
+       off_t scsc;
+       off_t s;
+       off_t Sf;
+       off_t lenf;
+       off_t Sb;
+       off_t lenb;
+       off_t overlap;
+       off_t Ss;
+       off_t lens;
+       off_t i;
+
+       len = 0;
+       lastscan = 0;
+       lastpos = 0;
+       lastoffset = 0;
+       while (scan < end) {
+               oldscore = 0;
+               for (scsc = scan += len; scan < end; scan++) {
+                       len = search(data.I, data.old, data.oldsize, data.new[thread_num] + scan, end - scan,
+                                       len, data.oldsize, &pos); // Passing parameter as len instead of 0 for ramdisk.img etc taking long time
+
+                       for (; scsc < scan + len; scsc++)
+                               if ((scsc + lastoffset < data.oldsize) &&
+                                               (data.old[scsc + lastoffset] == data.new[thread_num][scsc]))
+                                       oldscore++;
+#ifdef TIME_LIMIT_CHECK
+                       outer_count++;
+                       if (outer_count > 1000) {
+                               outer_count = 0;
+                               get_time_stamp();  //total time capturing
+                               t2 = atoi(ts1);
+                               //printf("\ntime diff = %d\n", (t2 - t1));
+                               if ((t2 - t1) > TIME_LIMIT_CHECK)
+                                       return 1;
+                       }
+#endif
+                       if (((len == oldscore) && (len != 0)) ||
+                                       (len > oldscore + offset_oldscore))
+                               break;
+
+                       if ((scan + lastoffset < data.oldsize) &&
+                                       (data.old[scan + lastoffset] == data.new[thread_num][scan]))
+                               oldscore--;
+               };
+               if ((len != oldscore) || (scan == end)) {
+                       s = 0;
+                       Sf = 0;
+                       lenf = 0;
+                       for (i = 0; (lastscan + i < scan) && (lastpos + i < data.oldsize); ) {
+                               if (data.old[lastpos + i] == data.new[thread_num][lastscan + i])
+                                       s++;
+                               i++;
+                               if (s * 2 - i > Sf * 2 - lenf) {
+                                       Sf = s;
+                                       lenf = i;
+                               };
+                       };
+
+                       lenb = 0;
+                       if (scan < end) {
+                               s = 0;
+                               Sb = 0;
+                               for (i = 1; (scan >= lastscan + i) && (pos >= i); i++) {
+                                       if (data.old[pos - i] == data.new[thread_num][scan - i])
+                                               s++;
+                                       if (s * 2 - i > Sb * 2 - lenb) {
+                                               Sb = s;
+                                               lenb = i;
+                                       };
+                               };
+                       };
+
+                       if (lastscan + lenf > scan - lenb) {
+                               overlap = (lastscan + lenf) - (scan - lenb);
+                               s = 0;
+                               Ss = 0;
+                               lens = 0;
+                               for (i = 0; i < overlap; i++) {
+                                       if (data.new[thread_num][lastscan + lenf - overlap + i] ==
+                                                       data.old[lastpos + lenf - overlap + i])
+                                               s++;
+                                       if (data.new[thread_num][scan - lenb + i] ==
+                                                       data.old[pos - lenb + i])
+                                               s--;
+                                       if (s > Ss) {
+                                               Ss = s;
+                                               lens = i + 1;
+                                       };
+                               };
+
+                               lenf += lens - overlap;
+                               lenb -= lens;
+                       };
+
+                       if (((db = malloc(lenf + 1)) == NULL) ||
+                                       ((eb = malloc((scan - lenb) - (lastscan + lenf) + 1)) == NULL))
+                               err(1, NULL);
+
+                       for (i = 0; i < lenf; i++)
+                               db[i] = data.new[thread_num][lastscan + i] - data.old[lastpos + i];
+                       for (i = 0; i < (scan - lenb) - (lastscan + lenf); i++)
+                               eb[i] = data.new[thread_num][lastscan + lenf + i];
+                       dblen = lenf;
+                       eblen = (scan - lenb) - (lastscan + lenf);
+                       offtout(lenf, data.buf2);
+                       offtout((scan - lenb) - (lastscan + lenf), data.buf2 + 8);
+                       offtout(lastpos, data.buf2 + 16);
+                       offtout((data.size_thread * thread_num) + lastscan, data.buf2 + 24);
+                       fwrite(data.buf2, 1, 32, data.pf);
+                       //if (data.bz2err != BZ_OK)
+                               //errx(1, "fwrite, bz2err = %d", data.bz2err);
+
+                       fwrite(db, 1, dblen, data.pf);
+                       //if (data.bz2err != BZ_OK)
+                               //errx(1, "fwrite, bz2err = %d", data.bz2err);
+
+                       fwrite(eb, 1, eblen, data.pf);
+                       //if (data.bz2err != BZ_OK)
+                               //errx(1, "fwrite, bz2err = %d", data.bz2err);
+
+                       free(db);
+                       free(eb);
+
+                       lastscan = scan - lenb;
+                       lastpos = pos - lenb;
+                       lastoffset = pos - scan;
+               };
+       };
+       return 0;
+}
+
+const char *kCantReadMessage = "Can not read input file";
+const char *kCantWriteMessage = "Can not write output file";
+const char *kCantAllocateMessage = "Can not allocate memory";
+const char *kDataErrorMessage = "Data error";
+
+static void *SzAlloc(void *p, size_t size)
+{
+       p = p;
+       return MyAlloc(size);
+}
+static void SzFree(void *p, void *address)
+{
+       p = p;
+       MyFree(address);
+}
+static ISzAlloc g_Alloc = { SzAlloc, SzFree };
+
+void PrintHelp(char *buffer)
+{
+       strcat(buffer, "\nLZMA Utility " MY_VERSION_COPYRIGHT_DATE "\n"
+                       "\nUsage:  lzma <e|d> inputFile outputFile\n"
+                       "  e: encode file\n"
+                       "  d: decode file\n");
+}
+
+int PrintError(char *buffer, const char *message)
+{
+       strcat(buffer, "\nError: ");
+       strcat(buffer, message);
+       strcat(buffer, "\n");
+       return 1;
+}
+
+int PrintErrorNumber(char *buffer, SRes val)
+{
+       sprintf(buffer + strlen(buffer), "\nError code: %x\n", (unsigned)val);
+       return 1;
+}
+
+int PrintUserError(char *buffer)
+{
+       return PrintError(buffer, "Incorrect command");
+}
+
+#define IN_BUF_SIZE (1 << 16)
+#define OUT_BUF_SIZE (1 << 16)
+
+
+static SRes Encode(ISeqOutStream *outStream, ISeqInStream *inStream, UInt64 fileSize, char *rs)
+{
+       CLzmaEncHandle enc;
+       SRes res;
+       CLzmaEncProps props;
+
+       rs = rs;
+
+       enc = LzmaEnc_Create(&g_Alloc);
+       if (enc == 0)
+               return SZ_ERROR_MEM;
+
+       LzmaEncProps_Init(&props);
+       res = LzmaEnc_SetProps(enc, &props);
+
+       if (res == SZ_OK) {
+               Byte header[LZMA_PROPS_SIZE + 8];
+               size_t headerSize = LZMA_PROPS_SIZE;
+               int i;
+
+               res = LzmaEnc_WriteProperties(enc, header, &headerSize);
+               for (i = 0; i < 8; i++)
+                       header[headerSize++] = (Byte)(fileSize >> (8 * i));
+               if (outStream->Write(outStream, header, headerSize) != headerSize)
+                       res = SZ_ERROR_WRITE;
+               else {
+                       if (res == SZ_OK)
+                               res = LzmaEnc_Encode(enc, outStream, inStream, NULL, &g_Alloc, &g_Alloc);
+               }
+       }
+       LzmaEnc_Destroy(enc, &g_Alloc, &g_Alloc);
+       return res;
+}
+
+int main2(int numArgs, const char *args[], char *rs)
+{
+       CFileSeqInStream inStream;
+       CFileOutStream outStream;
+       char c;
+       int res;
+       int encodeMode;
+       Bool useOutFile = False;
+
+       FileSeqInStream_CreateVTable(&inStream);
+       File_Construct(&inStream.file);
+
+       FileOutStream_CreateVTable(&outStream);
+       File_Construct(&outStream.file);
+
+       encodeMode = 1;
+
+       size_t t4 = sizeof(UInt32);
+       size_t t8 = sizeof(UInt64);
+       if (t4 != 4 || t8 != 8)
+               return PrintError(rs, "Incorrect UInt32 or UInt64");
+
+       if (InFile_Open(&inStream.file, "temp_patch") != 0)
+               return PrintError(rs, "Can not open input file");
+
+
+       if (OutFile_Open(&outStream.file, args[3]) != 0)
+               return PrintError(rs, "Can not open output file");
+
+
+       UInt64 fileSize;
+       File_GetLength(&inStream.file, &fileSize);
+       res = Encode(&outStream.s, &inStream.s, fileSize, rs);
+
+       File_Close(&outStream.file);
+       File_Close(&inStream.file);
+
+       if (res != SZ_OK) {
+               if (res == SZ_ERROR_MEM)
+                       return PrintError(rs, kCantAllocateMessage);
+               else if (res == SZ_ERROR_DATA)
+                       return PrintError(rs, kDataErrorMessage);
+               else if (res == SZ_ERROR_WRITE)
+                       return PrintError(rs, kCantWriteMessage);
+               else if (res == SZ_ERROR_READ)
+                       return PrintError(rs, kCantReadMessage);
+               return PrintErrorNumber(rs, res);
+       }
+       return 0;
+}
+
+int MY_CDECL main(int numArgs, const char *args[])
+{
+       char rs[800] = { 0 };
+       if (numArgs != 4)
+               errx(1,"ss_bsdiff Version 5.0\nUsage: ss_bsdiff oldfile newfile patchfile\n");
+
+       int ret = create_patch(numArgs, args, 8);
+#ifdef TIME_LIMIT_CHECK
+       if (ret != 0) {
+               printf("Trying with offset score 2\n");
+               ret = create_patch(numArgs, args, 2);
+       }
+       if (ret != 0) {
+               printf("Trying with offset score 0\n");
+               ret = create_patch(numArgs, args, 0);
+       }
+       if (ret != 0)
+               err(1, "bsdiff fails to create delta within timelimit");
+#endif
+       int res = main2(numArgs, args, rs);
+       remove("temp_patch");
+       fputs(rs, stdout);
+       return res;
+}
diff --git a/bsdiff/ss_bspatch.c b/bsdiff/ss_bspatch.c
new file mode 100755 (executable)
index 0000000..104e24a
--- /dev/null
@@ -0,0 +1,411 @@
+/*-
+ * Copyright 2003-2005 Colin Percival
+ * Copyright 2012 Matthew Endsley
+ * All rights reserved
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted providing that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Modifications are made in reimplementing suffix sort array generation
+ * and how the data is read and written to.Iterative part replaced the
+ * recursive implementation to avoid buffer overflow problems
+ */
+#define _CRT_SECURE_NO_WARNINGS
+#define CONST_MEMORY_USAGE 16384
+#define PATCH_FILE_FORMAT_MOD
+#define BSDIFF_HEADER "BSDIFF40"
+#define SSDIFF_HEADER "SSDIFF40"
+#define MULTI_THREADING
+#include <stdbool.h>
+#include <err.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <Alloc.h>
+#include <7zFile.h>
+#include <7zVersion.h>
+#include <LzmaDec.h>
+#include <LzmaEnc.h>
+
+const char *kCantReadMessage = "Can not read input file";
+const char *kCantWriteMessage = "Can not write output file";
+const char *kCantAllocateMessage = "Can not allocate memory";
+const char *kDataErrorMessage = "Data error";
+
+static void *SzAlloc(void *p, size_t size)
+{
+       p = p;
+       return MyAlloc(size);
+}
+
+static void SzFree(void *p, void *address)
+{
+       p = p;
+       MyFree(address);
+}
+static ISzAlloc g_Alloc = { SzAlloc, SzFree };
+
+static off_t offtin(u_char *buf)
+{
+       off_t y;
+
+       y = buf[7] & 0x7F;
+       y = y * 256;
+       y += buf[6];
+       y = y * 256;
+       y += buf[5];
+       y = y * 256;
+       y += buf[4];
+       y = y * 256;
+       y += buf[3];
+       y = y * 256;
+       y += buf[2];
+       y = y * 256;
+       y += buf[1];
+       y = y * 256;
+       y += buf[0];
+
+       if (buf[7] & 0x80)
+               y = -y;
+
+       return y;
+}
+
+void PrintHelp(char *buffer)
+{
+       strcat(buffer, "\nLZMA Utility " MY_VERSION_COPYRIGHT_DATE "\n"
+                       "\nUsage:  lzma <e|d> inputFile outputFile\n"
+                       "  e: encode file\n"
+                       "  d: decode file\n");
+}
+
+int PrintError(char *buffer, const char *message)
+{
+       strcat(buffer, "\nError: ");
+       strcat(buffer, message);
+       strcat(buffer, "\n");
+       return 1;
+}
+
+int PrintErrorNumber(char *buffer, SRes val)
+{
+       sprintf(buffer + strlen(buffer), "\nError code: %x\n", (unsigned)val);
+       return 1;
+}
+
+int PrintUserError(char *buffer)
+{
+       return PrintError(buffer, "Incorrect command");
+}
+
+#define IN_BUF_SIZE (1 << 16)
+#define OUT_BUF_SIZE (1 << 16)
+
+static SRes Decode2(CLzmaDec *state, ISeqOutStream *outStream, ISeqInStream *inStream,
+               UInt64 *unpackSize,unsigned char *dec_data)
+{
+       int thereIsSize = (*unpackSize != (UInt64)(Int64) - 1);
+       UInt64 offset = 0;
+       Byte inBuf[IN_BUF_SIZE];
+       Byte outBuf[OUT_BUF_SIZE];
+       size_t inPos = 0, inSize = 0, outPos = 0;
+
+       LzmaDec_Init(state);
+
+       offset = 0;
+
+       for (;;) {
+               if (inPos == inSize) {
+                       inSize = IN_BUF_SIZE;
+                       RINOK(inStream->Read(inStream, inBuf, &inSize));
+                       inPos = 0;
+               }
+
+               SRes res;
+               SizeT inProcessed = inSize - inPos;
+               SizeT outProcessed = OUT_BUF_SIZE - outPos;
+               ELzmaFinishMode finishMode = LZMA_FINISH_ANY;
+               ELzmaStatus status;
+
+               if (thereIsSize && outProcessed > *unpackSize) {
+                       outProcessed = (SizeT) * unpackSize;
+                       finishMode = LZMA_FINISH_END;
+               }
+
+               res = LzmaDec_DecodeToBuf(state, outBuf + outPos, &outProcessed,
+                               inBuf + inPos, &inProcessed, finishMode, &status);
+               inPos += inProcessed;
+               outPos += outProcessed;
+               *unpackSize -= outProcessed;
+               memcpy(dec_data + offset, outBuf, outProcessed);
+               offset += outProcessed;
+
+               outPos = 0;
+
+               if ((res != SZ_OK) || (thereIsSize && *unpackSize == 0))
+                       return res;
+
+               if (inProcessed == 0 && outProcessed == 0) {
+                       if (thereIsSize || status != LZMA_STATUS_FINISHED_WITH_MARK)
+                               return SZ_ERROR_DATA;
+                       return res;
+               }
+       }
+}
+
+int apply_patch(char *oldfile, char *patch_buffer, unsigned char **dest_buf, ssize_t *dest_size)
+{
+       int fd = -1, result = 0;
+       off_t oldsize, newsize;
+       u_char header[16], buf[8];
+       u_char *old = NULL;
+       off_t oldpos, newpos;
+       off_t ctrl[4];          /////////////////////////////////////THREAD
+       off_t total_write;      /////////////////////////////////////////THREAD
+       off_t j;
+       off_t memory_usage = CONST_MEMORY_USAGE;
+       off_t match_size;
+       off_t patch_buffer_offset = 0;
+       bool flag;
+
+       /*
+          File format:
+          0    8          "BSDIFF40"
+          8    8          X
+          16   8          Y
+          24   8          sizeof(newfile)
+          32   X          bzip2(control block)
+          32+X Y          bzip2(diff block)
+          32+X+Y          ???   bzip2(extra block)
+          with control block a set of triples (x,y,z) meaning "add x bytes
+          from oldfile to x bytes from the diff block; copy y bytes from the
+          extra block; seek forwards in oldfile by z bytes".
+        */
+       // Read header
+       if (patch_buffer)
+               memcpy(header, patch_buffer, 16);
+       else {
+               printf("%s().%d Corrupt decoded patch buffer\n", __FUNCTION__, __LINE__);
+               return 1;
+       }
+
+       /* Check for appropriate magic */
+       if (memcmp(header, BSDIFF_HEADER, 8) != 0 && memcmp(header, SSDIFF_HEADER, 8) != 0) {
+               printf("%s().%d Patch buffer header corrupt\n", __FUNCTION__, __LINE__ );
+               return 1;
+       }
+
+       /* Read lengths from header */
+       newsize = offtin(header + 8);
+
+       if ((newsize < 0)) {
+               printf("%s().%d Patch buffer corrupt\n", __FUNCTION__, __LINE__ );
+               return 1;
+       }
+
+       /* Cset patch_buffer_offset at the right place */
+       patch_buffer_offset += 16;
+
+       if (((fd = open(oldfile, O_RDONLY, 0)) < 0) ||
+                       ((oldsize = lseek(fd, 0, SEEK_END)) == -1) ||
+                       ((old = malloc(memory_usage + 1)) == NULL) ||
+                       (lseek(fd, 0, SEEK_SET) != 0)) {
+               printf("Corruption in old file %s\n", oldfile);
+               result = 1;
+               goto Cleanup;
+       }
+
+       if ((*dest_buf = malloc(newsize + 1)) == NULL) {
+               printf("Corruption in old file %s\n", oldfile);
+               result = 1;
+               goto Cleanup;
+       }
+       oldpos = 0;
+       newpos = 0;
+
+       total_write = 0;
+
+       while (total_write != newsize) {
+               /* Read control data */
+               for (j = 0; j <= 3; j++) {
+                       memcpy(buf, patch_buffer + patch_buffer_offset, 8);
+                       patch_buffer_offset += 8;
+                       ctrl[j] = offtin(buf);
+               };
+
+               total_write += (ctrl[0] + ctrl[1]);
+               newpos = ctrl[3];
+               oldpos = ctrl[2];
+
+               //////////////////////////////////////////////////////////////////////////////////
+               flag = true;
+               match_size = ctrl[0];
+               while (flag == true) {
+                       if (match_size <= memory_usage) {
+                               if (pread(fd, old, match_size, oldpos) != match_size) {
+                                       printf("Corruption in old file %s\n", oldfile);
+                                       result = 1;
+                                       goto Cleanup;
+                               }
+                               if (newpos + match_size > newsize) {
+                                       printf("%s().%d Corrupt patch\n", __FUNCTION__, __LINE__ );
+                                       result = 1;
+                                       goto Cleanup;
+                               }
+                               memcpy((*dest_buf) + newpos, patch_buffer + patch_buffer_offset, match_size);
+                               patch_buffer_offset += match_size;
+                               for (j = 0; j < match_size; j++) {
+                                       (*dest_buf)[newpos + j] += old[j];
+                               }
+                               newpos += match_size;
+                               flag = false;
+                       } else {
+                               if (pread(fd, old, memory_usage, oldpos) != memory_usage) {
+                                       printf("%s().%d Corruption in old file %s\n", __FUNCTION__, __LINE__ , oldfile);
+                                       result = 1;
+                                       goto Cleanup;
+                               }
+                               if (newpos + memory_usage > newsize) {
+                                       printf("%s().%d Corrupt patch\n", __FUNCTION__, __LINE__ );
+                                       result = 1;
+                                       goto Cleanup;
+                               }
+                               memcpy((*dest_buf) + newpos, patch_buffer + patch_buffer_offset, memory_usage);
+                               patch_buffer_offset += memory_usage;
+                               for (j = 0; j < memory_usage; j++)
+                                       (*dest_buf)[newpos + j] += old[j];
+                               match_size -= memory_usage;
+                               oldpos += memory_usage;
+                               newpos += memory_usage;
+                       }
+               }
+
+               ////////////////////////////////////////////////////////////////////////////////////////
+               /* Sanity-check */
+               if (newpos + ctrl[1] > newsize) {
+                       printf("%s().%d Corrupt patch\n", __FUNCTION__, __LINE__ );
+                       result = 1;
+                       goto Cleanup;
+               }
+               /* Read extra string */
+               memcpy((*dest_buf) + newpos, patch_buffer + patch_buffer_offset, ctrl[1]);
+               patch_buffer_offset += ctrl[1];
+       };
+       *dest_size = newsize;
+Cleanup:
+       //close old file
+       if (fd >= 0)
+               close(fd);
+       if (old)
+               free(old);
+       return result;
+}
+
+int main2(int numArgs, const char *args[], char *rs)
+{
+       CFileSeqInStream inStream;
+       ISeqOutStream  outStream;
+       int res, fd;
+       int encodeMode;
+       char * buf_res = NULL;
+       unsigned char* new_data;
+       ssize_t new_size;
+
+       FileSeqInStream_CreateVTable(&inStream);
+       File_Construct(&inStream.file);
+
+       FileOutStream_CreateVTable(&outStream);
+       //File_Construct(&outStream.file);
+
+       encodeMode = 0;
+
+       size_t t4 = sizeof(UInt32);
+       size_t t8 = sizeof(UInt64);
+       if (t4 != 4 || t8 != 8)
+               return PrintError(rs, "Incorrect UInt32 or UInt64");
+
+       if (InFile_Open(&inStream.file, args[3]) != 0)
+               return PrintError(rs, "Can not open input file");
+       else if (encodeMode)
+               PrintUserError(rs);
+
+       if (encodeMode) {
+               UInt64 fileSize;
+               File_GetLength(&inStream.file, &fileSize);
+               //res = Encode(&outStream.s, &inStream.s, fileSize, rs);
+       } else {
+               UInt64 unpackSize, i, dest_len;
+               CLzmaDec state;
+               unsigned char header[LZMA_PROPS_SIZE + 8];
+               RINOK(SeqInStream_Read(&inStream.s, header, sizeof(header)));
+               unpackSize = 0;
+               for (i = 0; i < 8; i++)
+                       unpackSize += (UInt64)header[LZMA_PROPS_SIZE + i] << (i * 8);
+               buf_res = (unsigned char *)malloc(unpackSize);
+               memset(buf_res, 0x0, unpackSize);
+               dest_len = unpackSize;
+               LzmaDec_Construct(&state);
+               RINOK(LzmaDec_Allocate(&state, header, LZMA_PROPS_SIZE, &g_Alloc));
+               res = Decode2(&state, &outStream, &inStream.s, &unpackSize,buf_res);
+               LzmaDec_Free(&state, &g_Alloc);
+               File_Close(&inStream.file);
+               if (apply_patch(args[1], buf_res, &new_data, &new_size) != 0) {
+                       if (new_data){
+                               free(new_data);
+                               free(buf_res);
+                       }
+                       return 1;
+               }
+               if (((fd = open(args[2], O_CREAT | O_TRUNC | O_WRONLY, 0666)) < 0) ||
+                               (write(fd, new_data, new_size) != new_size) || (close(fd) == -1))
+                       err(1,"%s",args[2]);
+               if (res != SZ_OK) {
+                       free(new_data);
+                       free(buf_res);
+                       if (res == SZ_ERROR_MEM)
+                               return PrintError(rs, kCantAllocateMessage);
+                       else if (res == SZ_ERROR_DATA)
+                               return PrintError(rs, kDataErrorMessage);
+                       else if (res == SZ_ERROR_WRITE)
+                               return PrintError(rs, kCantWriteMessage);
+                       else if (res == SZ_ERROR_READ)
+                               return PrintError(rs, kCantReadMessage);
+                       return PrintErrorNumber(rs, res);
+               }
+               free(new_data);
+               free(buf_res);
+               return 0;
+       }
+}
+
+int main(int numArgs, const char *args[])
+{
+       char rs[800] = { 0 };
+       if (numArgs != 4)
+               errx(1,"ss_bspatch Version 1.0\nUsage: ss_bspatch oldfile newfile patchfile\n");
+       int res = main2(numArgs, args, rs);
+       fputs(rs, stdout);
+       return res;
+}
diff --git a/packaging/libtota.spec b/packaging/libtota.spec
new file mode 100755 (executable)
index 0000000..a798251
--- /dev/null
@@ -0,0 +1,79 @@
+Name:          libtota
+Summary:       fota update library
+ExclusiveArch:         %{arm}
+Version:       0.1.0
+Release:       1
+Group:         System
+License:       Apache-2.0 and BSD-2-Clause and BSD-3-Clause and PD
+Source0:       %{name}-%{version}.tar.gz
+
+BuildRequires: cmake
+BuildRequires: pkgconfig(lib7zip)
+BuildRequires: pkgconfig(libdivsufsort)
+
+%description
+Fota update agent which update firmware using delta files
+
+%package devel
+License:        Apache-2.0 and BSD-2-Clause and BSD-3-Clause and PD
+Summary:        libtota library (development)
+Requires:       libtota = %{version}
+Group:          Development/Libraries
+
+%description devel
+Description: libfactory library (development)
+
+%package -n tota-bsdiff
+Summary:       bsdiff / bspatch tools for TOTA
+
+%description -n tota-bsdiff
+bsdiff / bspatch are tools for building and applying patches to binary files.
+This package offers these tools for TOTA.
+
+%prep
+%setup -q
+
+
+%build
+export LDFLAGS+="-Wl,--rpath=%{_prefix}/lib -Wl,--as-needed"
+mkdir cmake_tmp
+cd cmake_tmp
+LDFLAGS="$LDFLAGS" cmake .. -DCMAKE_INSTALL_PREFIX=%{_prefix}
+
+make %{?jobs:-j%jobs}
+
+%install
+cd cmake_tmp
+%make_install
+#mkdir -p %{buildroot}/usr/lib/
+cp libtota.a %{buildroot}/usr/lib/libtota.a
+%post
+
+
+%files
+%defattr(-,root,root,-)
+%license LICENSE.Apache-2.0
+%license LICENSE.BSD-2-Clause
+%license LICENSE.BSD-3-Clause
+#%{_libdir}/libtota.a
+#%manifest fota.manifest
+
+%files devel
+%defattr(-,root,root,-)
+%{_libdir}/libtota.a
+%{_libdir}/pkgconfig/tota.pc
+%{_includedir}/fota_common.h
+%{_includedir}/fota_log.h
+%{_includedir}/fota_tar.h
+%{_includedir}/SS_Common.h
+%{_includedir}/SS_Engine_Errors.h
+%{_includedir}/SS_Engine_Update.h
+%{_includedir}/SS_FSUpdate.h
+%{_includedir}/SS_ImageUpdate.h
+%{_includedir}/SS_MultiProcessUpdate.h
+%{_includedir}/SS_Nand.h
+%{_includedir}/SS_UPI.h
+
+%files -n tota-bsdiff
+%{_bindir}/ss_bsdiff
+%{_bindir}/ss_bspatch
diff --git a/ss_engine/SS_Common.c b/ss_engine/SS_Common.c
new file mode 100755 (executable)
index 0000000..876fb21
--- /dev/null
@@ -0,0 +1,117 @@
+/*
+ * libtota
+ *
+ * Copyright (c) 2017 Samsung Electronics Co., Ltd.
+ *
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <stdarg.h>
+#include <fcntl.h>
+
+#include "SS_ImageUpdate.h"
+#include "SS_Engine_Errors.h"
+#include "SS_Common.h"
+
+#include "ua.h"
+#include "fota_tar.h"
+#include "fota_common.h"
+
+void SS_Progress(void *pbUserData, SS_UINT32 uPercent)
+{
+    LOG("Progress before\n");
+    LOGL(LOG_SSENGINE, "Progress.. (%u %%)\n", uPercent);
+    LOG("Progress after:\n");
+    ((ua_data_t *) pbUserData)->ui_progress(pbUserData, uPercent);
+}
+
+/* Prints a string like the C printf() function */
+SS_UINT32 SS_Trace(void *pUser, const char *aFormat, ...)
+{
+#if 0
+    LOGL(LOG_REDBEND, aFormat);
+#else
+    char temp[4096];
+    va_list list;
+
+    va_start(list, aFormat);
+    vsprintf(temp, aFormat, list);
+    va_end(list);
+
+    LOGL(LOG_SSENGINE, "%s", temp);
+#endif
+    return S_SS_SUCCESS;
+}
+
+long SS_FSTrace(void *pbUserData, const unsigned short *aFormat, ...)
+{
+    va_list list;
+
+    va_start(list, aFormat);
+    vprintf((const char *)aFormat, list);
+    va_end(list);
+
+    return S_SS_SUCCESS;
+}
+
+long SS_ResetTimerA(void)
+{
+    //LOG("%s \n", __func__);
+
+    return S_SS_SUCCESS;
+}
+
+long SS_GetDelta(void *pbUserData, unsigned char *pbBuffer, SS_UINT32 dwStartAddressOffset, SS_UINT32 dwSize)
+{
+    int ret = S_SS_SUCCESS;
+    int readCount = 0;
+    FILE *fp;
+    long itemOffset = 0;
+
+    ua_data_t *ua_data = (ua_data_t *) pbUserData;
+    ua_part_info_t *ua_partition = ua_data->parti_info;
+    ua_update_data_t *ua_update_data = ua_data->update_data;
+
+    LOGL(LOG_SSENGINE, "SS_GetDelta offset 0x%x(%u), size 0x%x(%u)\n",
+         dwStartAddressOffset, dwStartAddressOffset, dwSize, dwSize);
+
+    itemOffset = tar_get_item_offset(ua_update_data->ua_delta_path, ua_partition->ua_subject_name);
+    if (itemOffset < 0) {
+        return E_SS_OPENFILE_ONLYR;
+    }
+
+    fp = fopen(ua_update_data->ua_delta_path, "r");
+    if (!fp) {
+        LOGL(LOG_SSENGINE, "open file %s failed.\n", ua_update_data->ua_delta_path);
+        return E_SS_OPENFILE_ONLYR;
+    }
+
+    if (fseek(fp, itemOffset + dwStartAddressOffset, 0) == -1)
+        ret = E_SS_READ_ERROR;
+    else {
+        readCount = fread(pbBuffer, 1, dwSize, fp);
+        if (readCount != dwSize) {
+            LOGL(LOG_SSENGINE, "error in read size\n");
+            ret = E_SS_READ_ERROR;
+        }
+    }
+    fclose(fp);
+
+    return ret;
+}
diff --git a/ss_engine/SS_Common.h b/ss_engine/SS_Common.h
new file mode 100755 (executable)
index 0000000..35a8eff
--- /dev/null
@@ -0,0 +1,64 @@
+/*
+ * libtota
+ *
+ * Copyright (c) 2017 Samsung Electronics Co., Ltd.
+ *
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "SS_MultiProcessUpdate.h"
+#include "SS_FSUpdate.h"
+#include "SS_ImageUpdate.h"
+#include "SS_Engine_Errors.h"
+#include "SS_Engine_Update.h"
+
+#include "ua.h"
+
+#define MAX_PATH 256
+
+#define NAND_BLOCK_BITS 18      // 0x40000
+#define NAND_PAGE_BITS 12       // 0x1000
+
+#if defined(PART_W) || defined(PART_WB1) || defined(PART_W3G) || defined(PART_TINY) \
+       || defined(PART_HIGGS) || defined(PART_KIRAN) || defined(PART_KIRANLTE) || defined(PART_ORBIS)
+#define UA_RAM_SIZE            64*1024*1024    // it sould be same <RamSize>0x4000000</RamSize> in xml.
+#else
+#define UA_RAM_SIZE            512*1024*1024   // it sould be same <RamSize>0x20000000</RamSize> in xml.
+#endif
+
+#define FS_ID_MAX_LEN 4
+#define DP_HEADER_SIZE 36
+#define DP_START_OFFSET        0
+#define  BACKUPBUFFER_NUM                           4
+
+/*******[ Multiprocess API sample implementation ]******/
+#define _NOEXEC_
+#define SAMPLE_PROCS_NUM       (0)
+#define EXTRA_ARGS                     (3)
+#define HANDLE_RUN_PROC        "handle_run_process"
+
+struct status_header_page {
+    unsigned char preamble[5];  // state·Î »ç¿ëÇÒ ¼ö ÀÖÀ½, FOTA, SBLB, ZIMG, PLRM, MDEM, DONE, FAILµî
+    unsigned long format;       //ÃÖÁ¾ return °ª, ¾Æ·¡ °¢ image¿¡ ´ëÇÑ engineÀÇ return°ªÀ» DM error table·Î º¯°æµÈ °ª
+    unsigned long reserved;     // Not used
+    unsigned long flexStatus;   // Not used
+    unsigned long apfwStatus;   //zImage update¿¡ ´ëÇÑ engineÀÇ return °ª
+    unsigned long apffsStatus;  //platform update¿¡ ´ëÇÑengineÀÇreturn °ª
+    unsigned long apuaStatus;   //sbl update¿¡ ´ëÇÑengineÀÇreturn °ª
+    unsigned long bpfwStatus;   //modem update¿¡ ´ëÇÑengineÀÇreturn °ª
+    unsigned long bpuaStatus;
+};
+
+void SS_unicode_to_char(const char *src, char *dest);
diff --git a/ss_engine/SS_Engine_Errors.h b/ss_engine/SS_Engine_Errors.h
new file mode 100755 (executable)
index 0000000..f116c14
--- /dev/null
@@ -0,0 +1,116 @@
+/*
+ * libtota
+ *
+ * Copyright (c) 2017 Samsung Electronics Co., Ltd.
+ *
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+       SS_Engine Errors
+
+       0xAxx Series define INPUT/UA Errors
+
+       0xBxx Series define Delta Errors
+
+       0xCxx Series define System Errors
+
+       0xDxx Series define Engine Errors
+
+       0xExx Series define any Other Errors
+
+*/
+
+#ifndef __SS_Engine_ERRORS__
+#define __SS_Engine_ERRORS__
+
+#define  S_SS_SUCCESS                                                                  (0)     /*! Success Code */
+#define  E_SS_FAILURE                                                                  (1)     /*! Failure Code */
+
+/* INPUT Processing errors */
+
+/* invocation errors */
+#define E_SS_BAD_PARAMS                                                                (0xA00)          /**< error in a run parameter */
+#define E_SS_FSINVALIDNODEPARAMS                                       (0xA01) /* Failed to Parse the Params to verify NODE */
+#define E_SS_FSBADNODES                                                                (0xA02) /* Failed to verify NODE for FS */
+#define E_SS_FSBADDELTA                                                                (0xA03) /* Delta File does NOT contain required information on FS */
+#define E_SS_FSBADATTRIBUTES                                           (0xA04) /* Failed to parse attribute data */
+#define E_SS_FSFAILEDTOOPENPATCHINFO                                   (0xA05) /*Failed to open patch list file having details of PATCH info */
+#define E_SS_FSFAILEDTOPARSEDELTACNT                           (0xA06) /* Failed to parse the PATCH count information */
+#define E_SS_FSFAILEDTOPARSEDELTAINFO                          (0xA07) /* Failed to parse the Delta patch information */
+
+/* update package errors */
+#define  E_SS_PKG_TOO_LONG                                                     (0xA08) /**< expected length error    */
+#define  E_SS_PKG_CORRUPTED                                                    (0xA09) /**< structural error         */
+#define  E_SS_SOURCE_CORRUPTED                                         (0xA10) /**< signature error          */
+
+/*Delta Errors*/
+
+#define E_SS_FSBADPATCH                                                                (0xB00) /*File Patch Does NOT match the signature */
+#define E_SS_FSSHA_MISMATCH                                            (0xB01) /*Could NOT produce expected Target SHA for file */
+#define E_SS_IMGBADDELTA                                                       (0xB02) /* Failed to parse attribute data */
+#define E_SS_IMGBADPATCH                                                       (0xB03) /*Image Patch Does NOT match the signature */
+#define E_SS_IMGSHA_MISMATCH                                           (0xB04) /*Could NOT produce expected Target SHA for Image */
+#define E_SS_SHAPRASE_FAILED                                           (0xB05) /*Could NOT Parse SHA */
+
+/* SYSTEM errors */
+
+/* Resources errors */
+#define  E_SS_NOT_ENOUGH_RAM                                           (0xC00) /**< given RAM is not enough  */
+#define  E_SS_BAD_RAM                                                          (0xC01) /**< does not behave as RAM   */
+#define  E_SS_MALLOC_ERROR                                                     (0xC02) /**< memory allocation failure */
+
+/* Image update Error codes */
+#define  E_SS_WRITE_ERROR                                                      (0xC03) /**< flash writing failure    */
+#define  E_SS_ERASE_ERROR                                                      (0xC04) /**< flash erasing failure    */
+#define  E_SS_READ_ERROR                                                       (0xC05) /**< flash reading failure    */
+
+/*File System Error codes */
+#define  E_SS_OPENFILE_ONLYR                                           (0xC06)         /**< file does not exist      */
+#define  E_SS_OPENFILE_WRITE                                                   (0xC07)         /**< RO or no access rights   */
+#define  E_SS_DELETEFILE_NOFILE                                                (0xC08) /**< file does not exist      */
+#define  E_SS_DELETEFILE                                                               (0xC09) /**< no access rights         */
+#define  E_SS_RESIZEFILE                                                               (0xC10) /**< cannot resize file       */
+#define  E_SS_READFILE_SIZE                                                    (0xC11) /**< cannot read specified size*/
+#define  E_SS_CLOSEFILE_ERROR                                          (0xC12) /**< cannot close file handle */
+#define  E_SS_FAILED_CREATING_SYMBOLIC_LINK            (0xC13)         /**< Failed creating symbolic link */
+#define  E_SS_CANNOT_CREATE_DIRECTORY                          (0xC14)         /**< Failed creating directory */
+#define  E_SS_FSMEMORYERROR                                            (0xC15) /* Failed to allocate Memory */
+#define  E_SS_FILENAMELENERROR                                         (0xC16) /* Failed to serve filename length */
+
+/*Engine errors */
+
+#define E_SS_NOT_ENOUGH_RAM_FOR_OPERATION2             (0xD00) /**< There is not enough RAM to run with operation=2 (Dry update) */
+#define E_SS_DELTA_FILE_TOO_LONG                                       (0xD01) /**< Delta file too long - curropted */
+#define E_SS_ERROR_IN_DELETES_SIG                                      (0xD02) /**< Mismatch between deletes sig and delta deletes buffers signature */
+#define E_SS_DELTA_IS_CORRUPT                                          (0xD03) /**< Delta file is corrupt: signature mismatch between delta header signature and calculated signature */
+#define E_SS_SOURCE_FILE_SIG_MISMATCH                          (0xD04) /**< File signature does not match signature */
+#define E_SS_TARGET_SIG_MISMATCH                                       (0xD05) /**< Signature for the target buffer does not match the one stored in the delta file */
+#define E_SS_INVALID_BACKUP                                                    (0xD06) /**< Too many dirty buffers */
+#define E_SS_UPI_VERSION_MISMATCH                                      (0xD07) /**< UPI version mismatch between UPI and delta */
+#define E_SS_PARTITION_NAME_NOT_FOUND                          (0xD08) /**< Partition name is different in delta and in UPI data */
+#define E_SS_NO_SPACE_LEFT                                                     (0xD09) /**< There is not enough flash to update or install the files */
+#define E_SS_INVALID_DP_HEADER                                         (0xD10)  /**< Deployment Package header is invalid */
+#define E_SS_INVALID_DP_WRONG_SIGNATURE                        (0xD11)  /**< Deployment Package signature is invalid */
+#define E_SS_FSFAILEDTOBACKUPPATCHINFO                         (0xD12) /* Failed to create backup file to write Delta patch info data */
+#define E_SS_FSUPDATEFAILED                                                    (0xD13) /*FS Failed during UPGRADE */
+#define E_SS_FSSRCBACKUPFAILED                                         (0xD14) /*Failed to backup FS */
+#define E_SS_FSSRCCURRUPTED                                                    (0xD15) /*Could NOT update FS as SRC seems to be corrupted */
+#define E_SS_IMGUPDATEFAILED                                           (0xD16) /*IMG Failed during UPGRADE */
+#define E_SS_IMGSRCBACKUPFAILED                                        (0xD17) /*Failed to backup IMG */
+#define E_SS_IMGRECOVERYWRITEFAILED                            (0xD18) /*Failed to write patched Recovery IMG */
+#define E_SS_IMGSRCCURRUPTED                                           (0xD19) /*Could NOT update IMG as SRC seems to be corrupted */
+#define E_SS_IMGFLASHWRITEFAIL                                         (0xD20) /*Failed to write Patched IMG data to flash */
+#define E_SS_PATCHFILE_DEL_ERROR                                               (0xD21) /*Failed to Clear/Del Patched SRC file */
+
+#endif
diff --git a/ss_engine/SS_Engine_Update.h b/ss_engine/SS_Engine_Update.h
new file mode 100755 (executable)
index 0000000..72d6f8e
--- /dev/null
@@ -0,0 +1,551 @@
+/*
+ * libtota
+ *
+ * Copyright (c) 2017 Samsung Electronics Co., Ltd.
+ *
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*!
+ *******************************************************************************
+ * \file       SS_vRM_Update.h
+ *
+ * \brief      UPI Update API
+ *******************************************************************************
+ */
+#ifndef _SS_Engine_UPDATE_H
+#define _SS_Engine_UPDATE_H
+/**
+ * Partition type
+ */
+typedef enum {
+    PT_FOTA,                    //!< Image
+    PT_FS                       //!< File system
+} PartitionType;
+
+/**
+ * In-place update
+ */
+typedef enum {
+    UT_SELF_UPDATE = 0,         //!< Don't update in place
+    UT_NO_SELF_UPDATE,          //!< Update in place
+    UT_PRIVATE,                 //!< For internal usage
+} UpdateType;
+
+typedef unsigned int SS_UINT32;
+
+typedef enum {
+    FT_REGULAR_FILE,
+    FT_SYMBOLIC_LINK,
+    FT_FOLDER,
+    FT_MISSING
+} enumFileType;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Partition data
+ */
+    typedef struct tagCustomerPartitionData {
+/**
+ * Partition name. Maximum 256 characters. Must match exactly the name used in
+ * the UPG.
+ */
+        const char *partition_name;
+
+/**
+ * Partition flash address. Address must be sector aligned. Relevant only for
+ * R/O partitions of any type; for R/W FS updates, set to 0.
+ */
+        SS_UINT32 rom_start_address;
+
+/**
+ * Mount point or drive letter containing the partition. Maximum size is 256
+ * characters. Relevent only for R/W FS updates; otherwise set to 0.<p>
+ */
+        const char *mount_point;
+
+/**
+ * Source path (input) partition if the update will not be done in place.
+ * Maximum 25 characters. For Image updates, set to 0.
+ */
+        const char *strSourcePath;
+
+/**
+ * Target path (output) partition if the update will not be done in place.
+ * Maximum 25 characters. For Image updates, set to 0.
+ */
+        const char *strTargetPath;
+
+/**
+ * Internal use; leave null.
+ */
+        const void *priv;
+
+/**
+ * Partition type, a \ref PartitionType value.
+ */
+        PartitionType partition_type;
+
+    } CustomerPartitionData;
+
+/**
+ * Device data
+ */
+    typedef struct tag_Engine_DeviceData {
+/**
+ * UPI Mode. One of:
+ * \li 0: scout and update. Verify that the update is applicable to the device
+ * by comparing protocol versions and then install (or continue to install after
+ * an interruption) the update.
+ * \li 1: Scout only. Verify that the update is applicable to the device by
+ * comparing protocol versions.
+ * \li 2: Dry-run. Verify that the update is applicable to the device by
+ * comparing protocol versions and then "run" the update without actually
+ * changing any files on the device. This verification mode checks that files
+ * can be uncompressed and updates can be applied. It does not verify the
+ * results after the updates are applied.
+ * \li 3: Update only. Install (or continue to install after an interruption)
+ * the update without verification. Applying an update will fail
+ * catastrophically if performed on an incompatible firmware. This mode cannot
+ * be chosen if the update was created with an "optional file".
+ * \li 6: Verify post-install: Verify the file contents of the updated firmware
+ * (post-installation). This mode applies only to FS updates. Does not verify
+ * attributes or empty folders.
+ */
+        SS_UINT32 ui32Operation;
+
+/**
+ * Pre-allocated RAM space.
+ */
+        unsigned char *pRam;
+
+/**
+ * Size of pRam in bytes.
+ */
+        SS_UINT32 ui32RamSize;
+
+/**
+ * Number of backup sectors listed in pBufferBlocks.
+ */
+        SS_UINT32 ui32NumberOfBuffers;
+
+/**
+ * List of backup buffer sector addresses. Addresses must be
+ * sector-aligned.If the update consists only of Image update, pBufferBlocks
+ * is used instead of pTempPath. Otherwise pBufferBlocks should be set to 0
+ * and pTempPath (see below) will be used to allocate the backup as a file.
+ */
+        SS_UINT32 *pBufferBlocks;
+
+/**
+ * Number of partitions listed in pFirstPartitionData.
+ */
+        SS_UINT32 ui32NumberOfPartitions;
+
+/**
+ * List of partition data structures, a list of \ref CustomerPartitionData
+ * values.
+ */
+        CustomerPartitionData *pFirstPartitionData;
+
+/**
+ * Path to temporary storage. If the update contains FS updates, pTempPath is
+ * used instead of pBufferBlocks. If the update consists only of Image updates,
+ * set pTempPath to 0. The path size is limited to 256 characters.
+ * The path must be under the mount point of a partition containing a R/W file system.<p>
+ * The maximum file size will not exceed the sector size x number of backup
+ * sectors.
+ */
+        char *pTempPath;
+
+/**
+ * Whether or not there is a UPI self-update in the update, an
+ * \ref UpdateType value.
+ */
+        UpdateType enmUpdateType;
+
+/**
+ * List of customer-defined installer types. For an Image only update, use a
+ * single element array with the value set to 0.<p>
+ *
+ * For the list of installer types, see the SWM Center documentation.
+ */
+        SS_UINT32 *pComponentInstallerTypes;
+
+/**
+ * Number of installer types in pComponentInstallerTypes.
+ */
+        SS_UINT32 ui32ComponentInstallerTypesNum;
+
+/**
+ * Update flags, if any. Used by the SWM Center.<p>
+ *
+ * For non SWM Center update, set to 0xFFFFFFFF
+ * For the list of update flags, see the SWM Center documentation.
+ */
+        SS_UINT32 ui32ComponentUpdateFlags;
+
+/**
+ * Update number within the DP.
+ */
+        SS_UINT32 ui32OrdinalToUpdate;
+
+/**
+ * Deprecated.
+ */
+        char *pDeltaPath;
+
+/**
+ * Additional data to pass to APIs, if any. Set to null if not used.
+ */
+        void *pbUserData;
+    } Engine_DeviceData;
+
+/**
+ *******************************************************************************
+ * Validate the DP.
+ *
+ * \param      pbUserData                      Optional opaque data-structure to pass to IPL
+ *                                                             functions
+ *
+ * \return     S_SS_SUCCESS on success or an \ref SS_vRM_Errors.h error code
+ *******************************************************************************
+ */
+    long SS_CheckDPStructure(void *pbUserData);
+
+/**
+ *******************************************************************************
+ * Get the number of updates in a DP that match the \a installer_type and
+ * \a component_flags values.
+ *
+ * \param      pbUserData                      Optional opaque data-structure to pass to IPL
+ *                                                             functions
+ * \param      num_deltas                      (out) Number of updates
+ * \param      installer_types         List of installer types; for an Image only
+ *                                                             update, this is a one element array with a value
+ *                                                             of 0
+ * \param      installer_types_num     Size of \a installer_types
+ * \param      component_flags         Update flags, if any. Used by the SWM Center.
+ *                                                             For the list of component flags, see the SWM
+ *                                                             Center documentation.
+ *
+ * \return     S_SS_SUCCESS on success or an \ref SS_vRM_Errors.h error code
+ *******************************************************************************
+ */
+    long SS_GetNumberOfDeltas(void *pbUserData, SS_UINT32 * num_deltas, SS_UINT32 * installer_types,
+                              SS_UINT32 installer_types_num, SS_UINT32 component_flags);
+
+/**
+ *******************************************************************************
+ * Get the offset in bytes of the specified update in the DP. Consider only
+ * updates that match the \a installer_type and \a component_flags values.
+ *
+ * \param      pbUserData                      Optional opaque data-structure to pass to IPL
+ *                                                             functions
+ * \param      delta_ordinal           Update number
+ * \param      offset                          (out) Bytes from start of DP
+ * \param      size                            (out) Size of update, in bytes
+ * \param      installer_types         List of installer types; for an Image only
+ *                                                             update, this is a one element array with a value
+ *                                                             of 0
+ * \param      installer_types_num     Size of \a installer_types
+ * \param      component_flags         Update flags, if any. Used by the SWM Center.
+ *                                                             For the list of component flags, see the SWM
+ *                                                             Center documentation.
+ *
+ * \return     S_SS_SUCCESS on success or an \ref SS_vRM_Errors.h error code
+ *******************************************************************************
+ */
+    long SS_GetSignedDeltaOffset(void *pbUserData, SS_UINT32 delta_ordinal, SS_UINT32 * offset, SS_UINT32 * size,
+                                 SS_UINT32 * installer_types, SS_UINT32 installer_types_num, SS_UINT32 component_flags);
+
+/**
+ *******************************************************************************
+ * Get the offset in bytes of the specified update in the DP. Consider only
+ * updates that match the \a installer_type and \a component_flags values.
+ *
+ * \param      pbUserData                      Optional opaque data-structure to pass to IPL
+ *                                                             functions
+ * \param      delta_ordinal           Update number
+ * \param      offset                          (out) Bytes from start of DP
+ * \param      size                            (out) Size of update, in bytes
+ * \param      installer_types         List of installer types; for an Image only
+ *                                                             update, this is a one element array with a value
+ *                                                             of 0
+ * \param      installer_types_num     Size of \a installer_types
+ * \param      component_flags         Update flags, if any. Used by the SWM Center.
+ *                                                             For the list of component flags, see the SWM
+ *                                                             Center documentation.
+ *
+ * \return     S_SS_SUCCESS on success or an \ref SS_vRM_Errors.h error code
+ *******************************************************************************
+ */
+    long SS_GetUnsignedDeltaOffset(void *pbUserData, SS_UINT32 delta_ordinal, SS_UINT32 * offset, SS_UINT32 * size,
+                                   SS_UINT32 * installer_types, SS_UINT32 installer_types_num,
+                                   SS_UINT32 component_flags);
+
+/**
+ *******************************************************************************
+ * Get signed and unsigned update offsets in DP. YEHUDA signed/unsigned are not
+ * described in the IG, so must be defined here.
+ *
+ * \param      pbUserData                      Optional data-structure, if required
+ * \param      signed_delta_offset     (out) Signed update offset in DP
+ * \param      delta_offset            (out) Unsigned update offset in DP
+ *
+ * \return     S_SS_SUCCESS on success or an \ref SS_vRM_Errors.h error code
+ *******************************************************************************
+ */
+    long SS_GetRBDeltaOffset(void *pbUserData, SS_UINT32 signed_delta_offset, SS_UINT32 * delta_offset);
+
+/**
+ *******************************************************************************
+ * Install an update.<p>
+ *
+ * The update is packaged within a DP, which may contain multiple updates as
+ * well as other components. You must call this function once for each
+ * update.<p>
+ *
+ * Depending on the UPI mode set, this function will scout, scout and update,
+ * update, perform a dry-run, or verify the update post-update.
+ *
+ * \param      pDeviceData             The device data, a \ref vRM_DeviceData value
+ *
+ * \return     S_SS_SUCCESS on success or an \ref SS_vRM_Errors.h error code
+ *******************************************************************************
+ */
+    long SS_Engine_Update(Engine_DeviceData * pDeviceData);
+
+/**
+ *******************************************************************************
+ * Get the highest minimum required RAM of all updates that match the
+ * \a installer_type and \a component_flags values in \a pDeviceData. <br>
+ * The returned amount of memory must be pre allocated and passed to
+ * \ref SS_vRM_Update through \ref vRM_DeviceData
+ *
+ * \param      ui32pRamUse             (out)
+ * \param      pDeviceData             The device data, a \ref vRM_DeviceData value
+ *
+ * \return     S_SS_SUCCESS on success or an \ref SS_vRM_Errors.h error code
+ *******************************************************************************
+ */
+    long SS_Engine_GetDpRamUse(SS_UINT32 * ui32pRamUse, Engine_DeviceData * pDeviceData);
+
+/**
+ *******************************************************************************
+ * Get protocol version of an update in the DP.<p>
+ *
+ * This returns the version of the first update that matches the
+ * \a installer_type and \a component_flags values. The returned protocol
+ * version must match the UPI protocol version for the update to proceed.
+ *
+ * \param      pbUserData                      Optional opaque data-structure to pass to IPL
+ *                                                             functions
+ * \param      pbyRAM                          (out) Buffer to contain part of the update from
+ *                                                             which the protocol version is extracted
+ * \param      dwRAMSize                       Size of \a pbyRAM; minimum 0x100 bytes
+ * \param      installer_types         List of installer types; for an Image only
+ *                                                             update, this is a one element array with a value
+ *                                                             of 0
+ * \param      installer_types_num     Size of \a installer_types
+ * \param      component_flags         Update flags, if any. Used by the SWM Center.
+ *                                                             For the list of component flags, see the SWM
+ *                                                             Center documentation.
+ * \param      dpProtocolVersion       (out) Buffer to contain DP protocol version
+ *
+ * \return     S_SS_SUCCESS on success or an \ref SS_vRM_Errors.h error code
+ *******************************************************************************
+ */
+    long SS_GetDPProtocolVersion(void *pbUserData, void *pbyRAM, SS_UINT32 dwRAMSize,
+                                 SS_UINT32 * installer_types, SS_UINT32 installer_types_num, SS_UINT32 component_flags,
+                                 SS_UINT32 * dpProtocolVersion);
+
+/**
+ *******************************************************************************
+ * Get scout protocol version of an update in the DP.<p>
+ *
+ * This returns the version of the first update that matches the
+ * \a installer_type and \a component_flags values. The returned scout protocol
+ * version must match the UPI scout protocol version for the update to proceed.
+ *
+ * \param      pbUserData                              Optional opaque data-structure to pass to IPL
+ *                                                                     functions
+ * \param      pbyRAM                                  (out) Buffer to contain part of the update
+ *                                                                     from which the protocol version is extracted
+ * \param      dwRAMSize                               Size of \a pbyRAM; minimum 0x100 bytes
+ * \param      installer_types                 List of installer types; for an Image only
+ *                                                                     update, this is a one element array with a
+ *                                                                     value of 0
+ * \param      installer_types_num             Size of \a installer_types
+ * \param      component_flags                 Update flags, if any. Used by the SWM Center.
+ *                                                                     For the list of component flags, see the SWM
+ *                                                                     Center documentation.
+ * \param      dpScoutProtocolVersion  (out) Buffer to contain DP scout protocol
+ *                                                                     version
+ *
+ * \return     S_SS_SUCCESS on success or an \ref SS_vRM_Errors.h error code
+ *******************************************************************************
+ */
+    long SS_GetDPScoutProtocolVersion(void *pbUserData, void *pbyRAM, SS_UINT32 dwRAMSize,
+                                      SS_UINT32 * installer_types, SS_UINT32 installer_types_num,
+                                      SS_UINT32 component_flags, SS_UINT32 * dpScoutProtocolVersion);
+
+/**
+ *******************************************************************************
+ * Get UPI version.<p>
+ *
+ * This is not the protocol version number but the vRapid Mobile version number.
+ *
+ * \param      pbVersion       (out) Buffer to contain version
+ *
+ * \return     S_SS_SUCCESS
+ *******************************************************************************
+ */
+    long SS_GetUPIVersion(unsigned char *pbVersion);
+
+/**
+ *******************************************************************************
+ * Get UPI protocol version.<p>
+ *
+ * Do not perform the update if this version does not match the DP protocol
+ * version returned from \ref SS_GetDPProtocolVersion.
+ *
+ * \return     Protocol version, without periods (.) and with the revision number
+ *                     replaced by 0. For example, if the protocol version is 5.0.14.33,
+ *                     this returns 50140.
+ *******************************************************************************
+ */
+    SS_UINT32 SS_GetUPIProtocolVersion(void);
+
+/**
+ *******************************************************************************
+ * Get UPI scout protocol version.<p>
+ *
+ * Do not perform the update if this version does not match the DP protocol
+ * version returned from \ref SS_GetDPScoutProtocolVersion.
+ *
+ * \return     Scout protocol version, without periods (.) and with the revision
+ *                     number replaced by 0. For example, if the scout protocol version is
+ *                     5.0.14.33, this returns 50140.
+ *******************************************************************************
+ */
+    SS_UINT32 SS_GetUPIScoutProtocolVersion(void);
+
+/**
+ *******************************************************************************
+ * Get protocol version of an update.<p>
+ *
+ * \param      pbUserData              Optional opaque data-structure to pass to IPL
+ *                                                     functions
+ * \param      pbyRAM                  Pre-allocated RAM space
+ * \param      dwRAMSize               Size of \a pbyRAM, in bytes
+ *
+ * \return     S_SS_SUCCESS on success or an \ref SS_vRM_Errors.h error code
+ *******************************************************************************
+ */
+
+    SS_UINT32 SS_GetDeltaProtocolVersion(void *pbUserData, void *pbyRAM, SS_UINT32 dwRAMSize);
+/**
+ *******************************************************************************
+ * Get scout protocol version of an update.<p>
+ *
+ * \param      pbUserData              Optional opaque data-structure to pass to IPL
+ *                                                     functions
+ * \param      pbyRAM                  Pre-allocated RAM space
+ * \param      dwRAMSize               Size of \a pbyRAM, in bytes
+ *
+ * \return     S_SS_SUCCESS on success or an \ref SS_vRM_Errors.h error code
+ *******************************************************************************
+ */
+
+    SS_UINT32 SS_GetDeltaScoutProtocolVersion(void *pbUserData, void *pbyRAM, SS_UINT32 dwRAMSize);     /* User data passed to all porting routines, pointer for the ram to use, size of the ram */
+
+/**
+ *******************************************************************************
+ * Reset the watchdog timer.<p>
+ *
+ * This is a Porting Layer function that must be implemented.
+ *
+ * Called periodically to ensure that the bootstrap doesn't believe the boot to
+ * be stalled during an update.
+ *
+ * \return     S_SS_SUCCESS on success or an \ref SS_vRM_Errors.h error code
+ *******************************************************************************
+ */
+    long SS_ResetTimerA(void);
+
+/**
+ *******************************************************************************
+ * Display progress information to the end-user.<p>
+ *
+ * This is a Porting Layer function that must be implemented.
+ *
+ * Actually, you can do whatever you want with the progress information.
+ *
+ * \param      pbUserData      Optional opaque data-structure to pass to IPL
+ *                                             functions
+ * \param      uPercent        Update percentage
+ *
+ * \return     None
+ *******************************************************************************
+ */
+    void SS_Progress(void *pbUserData, SS_UINT32 uPercent);
+
+/**
+ *******************************************************************************
+ * Print a debug message formatted using a printf-like string.<p>
+ *
+ * This is a Porting Layer function that must be implemented.
+ *
+ * Supported tags:
+ * \li %x: Hex number
+ * \li %0x: Hex number with leading zeroes
+ * \li %u: Unsigned decimal
+ * \li %s: Null-terminated string
+ * \li %d: Signed decimal integer
+ *
+ * \param      pbUserData      Optional opaque data-structure to pass to IPL
+ *                                             functions
+ * \param      aFormat         Printf-like format string
+ * \param      ...                     Items to insert in \a aFormat
+ *
+ * \return     S_SS_SUCCESS on success or an \ref SS_vRM_Errors.h error code
+ *******************************************************************************
+ */
+    SS_UINT32 SS_Trace(void *pbUserData, const char *aFormat, ...);
+
+/**
+ *******************************************************************************
+ * Get update from DP.<p>
+ *
+ * This is a Porting Layer function that must be implemented.
+ * A default implementation of this function is provided.
+ *
+ * \param      pbUserData                              Optional opaque data-structure to pass to IPL
+ *                                                                     functions
+ * \param      pbBuffer                                (out) Buffer to store the update
+ * \param      dwStartAddressOffset    Update offset in DP
+ * \param      dwSize                                  Size of \a pbBuffer
+ *
+ * \return     S_SS_SUCCESS on success or an \ref SS_vRM_Errors.h error code
+ *******************************************************************************
+ */
+    long SS_GetDelta(void *pbUserData, unsigned char *pbBuffer, SS_UINT32 dwStartAddressOffset, SS_UINT32 dwSize);
+
+#ifdef __cplusplus
+}
+#endif
+#endif                          // _SS_VRM_UPDATE_H
diff --git a/ss_engine/SS_FSUpdate.h b/ss_engine/SS_FSUpdate.h
new file mode 100755 (executable)
index 0000000..7b5d1a6
--- /dev/null
@@ -0,0 +1,349 @@
+/*
+ * libtota
+ *
+ * Copyright (c) 2017 Samsung Electronics Co., Ltd.
+ *
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*!
+ *******************************************************************************
+ * \file       SS_FileSystemUpdate.h
+ *
+ * \brief      UPI FS Update API
+ *******************************************************************************
+ */
+#ifndef _SS_FILESYSTEM_UPDATE_H_
+#define _SS_FILESYSTEM_UPDATE_H_
+
+#include "SS_Engine_Update.h"
+#include "SS_Engine_Errors.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif                          /* __cplusplus */
+
+/*!
+ * File access modes
+ */
+    typedef enum tag_RW_TYPE {
+        ONLY_R,                 //!< Read-only
+        ONLY_W,                 //!< Write-only
+        BOTH_RW                 //!< Read-write
+    } E_RW_TYPE;
+
+/*!
+ *******************************************************************************
+ * Copy file.<p>
+ *
+ * Must create the path to the new file as required. Must overwrite any contents
+ * in the old file, if any. Must check if the source file is a symbolic link.
+ * If it is, instead create a new symbolic link using \ref SS_Link.
+ *
+ * \param      pbUserData      Optional opaque data-structure to pass to IPL
+ *                                             functions
+ * \param      strFromPath     Path to old file
+ * \param      strToPath       Path to new file
+ *
+ * \return     S_SS_SUCCESS on success or an \ref SS_vRM_Errors.h error code
+ *******************************************************************************
+ */
+
+    long SS_CopyFile(void *pbUserData, const char *strFromPath, const char *strToPath);
+
+/*!
+ *******************************************************************************
+ * Move (rename) file.<p>
+ *
+ * \param      pbUserData      Optional opaque data-structure to pass to IPL
+ *                                             functions
+ * \param      strFromPath     Path to old file location
+ * \param      strToPath       Path to new file location
+ *
+ * \return     S_SS_SUCCESS on success or an \ref SS_vRM_Errors.h error code
+ *******************************************************************************
+ */
+
+    long SS_MoveFile(void *pbUserData, const char *strFromPath, const char *strToPath);
+
+/*!
+ *******************************************************************************
+ * Delete file.<p>
+ *
+ * Must return success if the file is deleted or didn't exist.
+ *
+ * \param      pbUserData      Optional opaque data-structure to pass to IPL
+ *                                             functions
+ * \param      strPath         Path to file
+ *
+ * \return     S_SS_SUCCESS on success, E_SS_DELETEFILE if the file cannot be
+ *                     deleted, or an \ref SS_vRM_Errors.h error code
+ *******************************************************************************
+ */
+
+    long SS_DeleteFile(void *pbUserData, const char *strPath);
+
+/*!
+ *******************************************************************************
+ * Delete folder.<p>
+ *
+ * Must return success if the folder is now deleted or didn't exist.
+ *
+ * \param      pbUserData      Optional opaque data-structure to pass to IPL
+ *                                             functions
+ * \param      strPath         Path to folder
+ *
+ * \return     S_SS_SUCCESS on success or an \ref SS_vRM_Errors.h error code
+ *******************************************************************************
+ */
+
+    long SS_DeleteFolder(void *pbUserData, const char *strPath);
+
+/*!
+ *******************************************************************************
+ * Create folder.<p>
+ *
+ * Must return success if the folder is created or already existsed. It is
+ * recommended that the new folder's attributes are a copy of its parent's
+ * attributes.
+ *
+ * \param      pbUserData      Optional opaque data-structure to pass to IPL
+ *                                             functions
+ * \param      strPath         Path to folder
+ *
+ * \return     S_SS_SUCCESS on success or an \ref SS_vRM_Errors.h error code
+ *******************************************************************************
+ */
+
+    long SS_CreateFolder(void *pbUserData, const char *strPath);
+
+/*!
+ *******************************************************************************
+ * Open file.<p>
+ *
+ * Must create the the file (and the path to the file) if it doesn't exist. Must
+ * open in binary mode.
+ *
+ * \param      pbUserData      Optional opaque data-structure to pass to IPL
+ *                                             functions
+ * \param      strPath         Path to file
+ * \param      wFlag           Read/write mode, an \ref E_RW_TYPE value
+ * \param      pwHandle        (out) File handle
+ *
+ * \return     S_SS_SUCCESS on success, E_SS_OPENFILE_ONLYR if attempting to open a
+ *                     non-existant file in R/O mode, E_SS_OPENFILE_WRITE if there is an
+ *                     error opening a file for writing, or an \ref SS_vRM_Errors.h error code
+ *******************************************************************************
+ */
+
+    long SS_OpenFile(void *pbUserData, const char *strPath, E_RW_TYPE wFlag, long *pwHandle);
+
+/*!
+ *******************************************************************************
+ * Set file size.
+ *
+ * \param      pbUserData      Optional opaque data-structure to pass to IPL
+ *                                             functions
+ * \param      wHandle         File handle
+ * \param      dwSize          New file size
+ *
+ * \return     S_SS_SUCCESS on success or an \ref SS_vRM_Errors.h error code
+ *******************************************************************************
+ */
+
+    long SS_ResizeFile(void *pbUserData, long wHandle, SS_UINT32 dwSize);
+
+/*!
+ *******************************************************************************
+ * Close file.
+ *
+ * \param      pbUserData      Optional opaque data-structure to pass to IPL
+ *                                             functions
+ * \param      wHandle         File handle
+ *
+ * \return     S_SS_SUCCESS on success or an \ref SS_vRM_Errors.h error code
+ *******************************************************************************
+ */
+
+    long SS_CloseFile(void *pbUserData, long wHandle);
+
+/*!
+ *******************************************************************************
+ * Write data to a specified position within a file.<p>
+ *
+ * Must return success if the block is written or at least resides in
+ * non-volatile memory. Use \ref SS_SyncFile to commit the file to storage.
+ *
+ * \param      pbUserData      Optional opaque data-structure to pass to IPL
+ *                                             functions
+ * \param      wHandle         File handle
+ * \param      dwPosition      Position within the file to which to write
+ * \param      pbBuffer        Data to write
+ * \param      dwSize          Size of \a pbBuffer
+ *
+ * \return     S_SS_SUCCESS on success, or an \ref SS_vRM_Errors.h error code
+ *******************************************************************************
+ */
+
+    long SS_WriteFile(void *pbUserData, long wHandle, SS_UINT32 dwPosition, unsigned char *pbBuffer, SS_UINT32 dwSize);
+
+/*!
+ *******************************************************************************
+ * Commit file to storage.<p>
+ *
+ * Generally called after \ref SS_WriteFile.
+ *
+ * \param      pbUserData      Optional opaque data-structure to pass to IPL
+ *                                             functions
+ * \param      wHandle         File handle
+ *
+ * \return     S_SS_SUCCESS on success or an \ref SS_vRM_Errors.h error code
+ *******************************************************************************
+ */
+    long SS_SyncFile(void *pbUserData, long wHandle);
+
+/*!
+ *******************************************************************************
+ * Read data from a specified position within a file.
+ * If fewer bytes than requested are available in the specified position, this
+ * function should read up to the end of file and return S_SS_SUCCESS.
+ *
+ * \param      pbUserData      Optional opaque data-structure to pass to IPL
+ *                                             functions
+ * \param      wHandle         File handle
+ * \param      dwPosition      Position within the file from which to read
+ * \param      pbBuffer        Buffer to contain data
+ * \param      dwSize          Size of data to read
+ *
+ * \return     S_SS_SUCCESS on success or an \ref SS_vRM_Errors.h error code
+ *******************************************************************************
+ */
+
+    long SS_ReadFile(void *pbUserData, long wHandle, SS_UINT32 dwPosition, unsigned char *pbBuffer, SS_UINT32 dwSize);
+
+/*!
+ *******************************************************************************
+ * Get file size.
+ *
+ * \param      pbUserData      Optional opaque data-structure to pass to IPL
+ *                                             functions
+ * \param      wHandle         File handle
+ *
+ * \return     File size, -1 if file not found, or &lt; -1 on error
+ *******************************************************************************
+ */
+    long SS_GetFileSize(void *pbUserData, long wHandle);
+
+/*!
+ *******************************************************************************
+ * Get free space of a mounted file system.
+ *
+ * \param      pbUserData                              Optional opaque data-structure to pass to
+ *                                                                     IPL functions
+ * \param      path                                Name of any directory within the mounted
+ *                                                                     file system
+ * \param      available_flash_size    (out) Available space
+ *
+ * \return     S_SS_SUCCESS on success or an \ref SS_vRM_Errors.h error code
+ *******************************************************************************
+ */
+    long SS_GetAvailableFreeSpace(void *pbUserData, const char *path, SS_UINT32 * available_flash_size);
+
+/*!
+ *******************************************************************************
+ * Remove symbolic link.<p>
+ *
+ * Must return an error if the file does not exist or is not a symbolic link.<p>
+ *
+ * If your platform does not support symbolic links, you do not need to
+ * implement this.
+ *
+ * \param      pbUserData      Optional opaque data-structure to pass to IPL
+ *                                             functions
+ * \param      pLinkName       Link
+ *
+ * \return     S_SS_SUCCESS on success or an \ref SS_vRM_Errors.h error code
+ *******************************************************************************
+ */
+    long SS_Unlink(void *pbUserData, char *pLinkName);
+
+/*!
+ *******************************************************************************
+ * Create symbolic link.<p>
+ *
+ * Must create the path to the link as required. If a file already exists at the
+ * named location, must return success if the file is a symbolic link or an
+ * error if the file is a regular file. The non-existance of the target of the
+ * link must NOT cause an error.<p>
+ *
+ * If your platform does not support symbolic links, you do not need to
+ * implement this.
+ *
+ * \param      pbUserData                      Optional opaque data-structure to pass to IPL
+ *                                                             functions
+ * \param      pLinkName                       Path to the link file to create
+ * \param      pReferenceFileName      Path to which to point the link
+ *
+ * \return     S_SS_SUCCESS on success or an \ref SS_vRM_Errors.h error code
+ *******************************************************************************
+ */
+    long SS_Link(void *pbUserData, char *pLinkName, char *pReferenceFileName);
+
+/*!
+ *******************************************************************************
+ * Set file attributes.<p>
+ *
+ * The file attributes token (\a ui8pAttribs) is defined at generation time.
+ * If attributes are not defined explicitly, they are given the following,
+ * OS-dependent values:
+ * \li Windows: _redbend_ro_ for R/O files, _redbend_rw_ for R/W files
+ * \li Linux: _redbend_oooooo:xxxx:yyyy indicating the file mode, uid, and gid
+ *             (uid and gid use capitalized hex digits as required)
+ *
+ * \param      pbUserData              Optional opaque data-structure to pass to IPL
+ *                                                     functions
+ * \param      ui16pFilePath   File path
+ * \param      ui32AttribSize  Size of \a ui8pAttribs
+ * \param      ui8pAttribs             Attributes to set
+ *
+ * \return     S_SS_SUCCESS on success or &lt; 0 on error
+ *******************************************************************************
+ */
+
+    long SS_SetFileAttributes(const char *ui16pFilePath,
+                              const SS_UINT32 ui32AttribSize, const unsigned char *ui8pAttribs);
+
+/*!
+ *******************************************************************************
+ * Print status and debug information.
+ *
+ * \param      pbUserData      Optional opaque data-structure to pass to IPL
+ *                                             functions
+ * \param      aFormat         A NULL-terminated printf-like string with support for
+ *                                             the following tags:
+ *                                             \li %x:  Hex number
+ *                                             \li %0x: Hex number with leading zeros
+ *                                             \li %u:  Unsigned decimal
+ *                                             \li %s:  NULL-terminated string
+ * \param      ...                     Strings to insert in \a aFormat
+ *
+ * \return     S_SS_SUCCESS on success or an \ref SS_vRM_Errors.h error code
+ *******************************************************************************
+ */
+    SS_UINT32 SS_Trace(void *pbUserData, const char *aFormat, ...
+        );
+
+#ifdef __cplusplus
+}
+#endif                          /* __cplusplus */
+#endif
diff --git a/ss_engine/SS_ImageUpdate.c b/ss_engine/SS_ImageUpdate.c
new file mode 100755 (executable)
index 0000000..25ceb60
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+ * libtota
+ *
+ * Copyright (c) 2017 Samsung Electronics Co., Ltd.
+ *
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <stdarg.h>
+#include <fcntl.h>
+#include <string.h>
+#include <sys/stat.h>
+#include "SS_Common.h"
+#include "fota_common.h"
+
+//EMMC, MTD??
+//Write to block
+//IF used
diff --git a/ss_engine/SS_ImageUpdate.h b/ss_engine/SS_ImageUpdate.h
new file mode 100755 (executable)
index 0000000..2923b71
--- /dev/null
@@ -0,0 +1,186 @@
+/*
+ * libtota
+ *
+ * Copyright (c) 2017 Samsung Electronics Co., Ltd.
+ *
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*!
+ *******************************************************************************
+ * \file       SS_ImageUpdate.h
+ *
+ * \brief      UPI Image Update API
+ *******************************************************************************
+ */
+#ifndef        __SS_IMAGEUPDATE_H__
+#define        __SS_IMAGEUPDATE_H__
+
+#include "SS_Engine_Update.h"
+#include "SS_Engine_Errors.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif                          /* __cplusplus */
+/**
+ *******************************************************************************
+ * Get data from temporary storage.
+ *
+ * \param      pbUserData              Optional opaque data-structure to pass to IPL
+ *                                                     functions
+ * \param      pbBuffer                (out) Buffer to store the data
+ * \param      dwBlockAddress  Location of data
+ * \param      dwSize                  Size of data
+ *
+ * \return     S_SS_SUCCESS on success or an \ref SS_vRM_Errors.h error code
+ *******************************************************************************
+ */
+    long SS_ReadBackupBlock(void *pbUserData, unsigned char *pbBuffer, SS_UINT32 dwBlockAddress, SS_UINT32 dwSize);
+
+/**
+ *******************************************************************************
+ * Erase sector from temporary storage.<p>
+ *
+ * A default implementation is included. You must use the default implementation
+ * if both of the following are true:
+ * \li <b>InstantFailSafe</b> is set to false
+ * \li Only Image updates are used OR the backup is stored in flash and not on
+ *             the file system
+ * <p>The address must be aligned to the sector size.
+ *
+ * \param      pbUserData              Optional opaque data-structure to pass to IPL
+ *                                                     functions
+ * \param      dwStartAddress  Sector address to erase
+ *
+ * \return     S_SS_SUCCESS on success or an \ref SS_vRM_Errors.h error code
+ *******************************************************************************
+ */
+    long SS_EraseBackupBlock(void *pbUserData, SS_UINT32 dwStartAddress);
+
+/**
+ *******************************************************************************
+ * Write complete sector to temporary storage.<p>
+ *
+ * Must overwrite entire sector. To write part of a sector, see
+ * \ref SS_WriteBackupPartOfBlock.
+ *
+ * \param      pbUserData                      Optional opaque data-structure to pass to IPL
+ *                                                             functions
+ * \param      dwBlockStartAddress     Address to which to write
+ * \param      pbBuffer                        Data to write
+ *
+ * \return     S_SS_SUCCESS on success or an \ref SS_vRM_Errors.h error code
+ *******************************************************************************
+ */
+    long SS_WriteBackupBlock(void *pbUserData, SS_UINT32 dwBlockStartAddress, unsigned char *pbBuffer);
+
+/**
+ *******************************************************************************
+ * Write data to temporary storage.<p>
+ *
+ * Used to write part of a sector that has already been written to by
+ * \ref SS_WriteBackupBlock. Must NOT overwrite entire sector.<p>
+ *
+ * A default implementation is included. You must use the default implementation
+ * if both of the following are true:
+ * \li <b>InstantFailSafe</b> is set to false
+ * \li Only Image updates are used OR the backup is stored in flash and not on
+ *             the file system
+ *
+ * \param      pbUserData              Optional opaque data-structure to pass to IPL
+ *                                                     functions
+ * \param      dwStartAddress  Address to which to write
+ * \param      dwSize                  Size of \a pbBuffer
+ * \param      pbBuffer                Data to write
+ *
+ * \return     S_SS_SUCCESS on success or an \ref SS_vRM_Errors.h error code
+ *******************************************************************************
+ */
+    long SS_WriteBackupPartOfBlock(void *pbUserData,
+                                   SS_UINT32 dwStartAddress, SS_UINT32 dwSize, unsigned char *pbBuffer);
+
+/**
+ *******************************************************************************
+ * Write sector to flash.<p>
+ *
+ * Must overwrite a complete sector.
+ *
+ * \param      pbUserData              Optional opaque data-structure to pass to IPL
+ *                                                     functions
+ * \param      dwBlockAddress  Address to which to write
+ * \param      pbBuffer                Data to write
+ *
+ * \return     S_SS_SUCCESS on success, E_SS_WRITE_ERROR if there is a write error,
+ *                     or an \ref SS_vRM_Errors.h error code
+ *******************************************************************************
+ */
+    long SS_WriteBlock(void *pbUserData, SS_UINT32 dwBlockAddress, unsigned char *pbBuffer);
+
+/**
+ *******************************************************************************
+ * Read data from flash.<p>
+ *
+ * See \ref SS_ReadImageNewKey.
+ *
+ * \param      pbUserData              Optional opaque data-structure to pass to IPL
+ *                                                     functions
+ * \param      pbBuffer                (out) Buffer to store data
+ * \param      dwStartAddress  Address from which to read
+ * \param      dwSize                  Size of data to read (must be less than a sector); 0
+ *                                                     must immediately return success
+ *
+ * \return     S_SS_SUCCESS on success or an \ref SS_vRM_Errors.h error code
+ *******************************************************************************
+ */
+    long SS_ReadImage(void *pbUserData, unsigned char *pbBuffer,        /* pointer to user buffer */
+                      SS_UINT32 dwStartAddress, /* memory address to read from */
+                      SS_UINT32 dwSize);        /* number of bytes to copy */
+
+/**
+ *******************************************************************************
+ * Read data from flash.<p>
+ *
+ * As opposed to \ref SS_ReadImage, this function is intended to read already
+ * updated data and is used for encrypted images. If no part of the image is
+ * encrypted, simply call \ref SS_ReadImage.
+ *
+ * Used by the UPI when resuming an interrupted update.
+ *
+ * \param      pbUserData              Optional opaque data-structure to pass to IPL
+ *                                                     functions
+ * \param      pbBuffer                (out) Buffer to store data
+ * \param      dwStartAddress  Address from which to read
+ * \param      dwSize                  Size of data to read (must be less than a sector); 0
+ *                                                     must immediately return success
+ *
+ * \return     S_SS_SUCCESS on success or an \ref SS_vRM_Errors.h error code
+ *******************************************************************************
+ */
+    long SS_ReadImageNewKey(void *pbUserData, unsigned char *pbBuffer, SS_UINT32 dwStartAddress, SS_UINT32 dwSize);
+
+/**
+ *******************************************************************************
+ * Get sector size.<p>
+ *
+ * \param      pbUserData              Optional opaque data-structure to pass to IPL
+ *                                                     functions
+ *
+ * \return     Sector size, in bytes
+ *******************************************************************************
+ */
+    long SS_GetBlockSize(void *pbUserData);
+
+#ifdef __cplusplus
+}
+#endif                          /* __cplusplus */
+#endif                          /*      __SS_UPDATE_IMAGE_H__   */
diff --git a/ss_engine/SS_MultiProcessUpdate.h b/ss_engine/SS_MultiProcessUpdate.h
new file mode 100755 (executable)
index 0000000..7b85830
--- /dev/null
@@ -0,0 +1,174 @@
+/*
+ * libtota
+ *
+ * Copyright (c) 2017 Samsung Electronics Co., Ltd.
+ *
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*!
+ *******************************************************************************
+ * \file       SS_MultiProcessUpdate.h
+ *
+ * \brief      UPI MultiProcess API
+ *
+ * Multi-process updates can work on platforms that support multi-processing.
+ * To use the UPI with multi-process support you should implement the following
+ * Porting Layer functions:
+ *             SS_Malloc
+ *             SS_Free
+ *             SS_WaitForProcess
+ *             SS_RunProcess
+ *             SS_GetMaxNumProcess
+ *             SS_GetMaxProcRamSize
+ *
+ * The function SS_RunProcess must run a sub-process (optionally the same
+ * executable running the main process) that is also integrated with the UPI.
+ * This process must call the UPI API function SS_HandleProcessRequest.
+ * <p>
+ * The initial call to SS_vRM_Update does not change. vRapid Mobile calls
+ * SS_GetMaxNumProcess to see if multi-processing is supported and to see how
+ * many processes are supported. If SS_GetMaxNumProcess returns a value greater
+ * than zero, the UPI will run parts of the update in sub-processes.
+ * <p>
+ * The UPI creates a sub-process by calling SS_RunProcess with the arguments
+ * for the request. The new process must call SS_HandleProcessRequest to
+ * handle the request. The UPI manages processes using SS_WaitForProcess.
+ * Each sub-process allocates and frees memories using SS_Malloc and SS_Free.
+ * <p>
+ * Notes:
+ * \li In the function SS_RunProcess it is recommended to flush output streams
+ *             (like stdout or log files) before creating the child process to prevent
+ *             buffer duplication in the child process.
+ * \li SS_HandleProcessRequests starts a UPI operation that requires all
+ *             standard and common IPL functions in SS_vRM_FileSystemUpdate.h and
+ *             SS_vRM_Update.h
+ * \li If your implementation of these functions requires any preparations,
+ *             such as opening the update file for SS_GetDelta or opening a log file
+ *             for SS_Trace, these preparations must be done before calling
+ *             SS_HandleProcessRequests.
+ * \li  To distinguish between processes when running in Multi-Process Update mode,
+ *             it is recommended that you add the Process ID (i.e. getpid()) to the log
+ *             output in SS_Trace. Alternatively, create a log file for each sub-process
+ *             named log_file.[pid]
+ * \li If your implementation uses the optional opaque data structure ('user'
+ *             in the functions below), you may also have to pass it to sub-processes.
+ *             You must implement this functionality.
+ *******************************************************************************
+ */
+#ifndef _REDBEND_MULTIPROCESS_UPDATE_H_
+#define _REDBEND_MULTIPROCESS_UPDATE_H_
+
+#include "SS_Engine_Update.h"
+#include "SS_Engine_Errors.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif                          /* __cplusplus */
+
+/**
+ *******************************************************************************
+ * Allocate a memory block.
+ *
+ * \param      size    Memory block size, in blocks
+ *
+ * \return     A pointer to the memory block on success, NULL on failure
+ *******************************************************************************
+ */
+    void *SS_Malloc(SS_UINT32 size);
+
+/**
+ *******************************************************************************
+ * Free a memory block.
+ *
+ * \param      pMemBlock       Pointer to the memory block
+ *******************************************************************************
+ */
+    void SS_Free(void *pMemBlock);
+
+/**
+ *******************************************************************************
+ * Wait for a process to complete.
+ *
+ * \param      handle                          Process handle. If NULL, wait for any process to
+ *                                                             complete.
+ * \param      process_exit_code       Exit code of the completed process
+ *
+ * \return     Handle of the completed process or NULL on error
+ *******************************************************************************
+ */
+    void *SS_WaitForProcess(const void *handle, SS_UINT32 * process_exit_code);
+
+/**
+ *******************************************************************************
+ * Create a new process.
+ *
+ * The new process must call \ref SS_HandleProcessRequest with the same
+ * arguments.
+ *
+ * \param      user    Optional opaque data-structure passed in during
+ *                                     initialization
+ * \param      argc    Number of \a argv arguments
+ * \param      argv    Arguments to pass to the new process
+ *
+ * \return     Handle to the new process or NULL on error
+ *******************************************************************************
+ */
+    void *SS_RunProcess(void *user, int argc, char *argv[]);
+
+/**
+ *******************************************************************************
+ * Get the maximum number of processes that are allowed to run.
+ *
+ * \param      user    Optional opaque data-structure passed in during
+ *                                     initialization
+ *
+ * \return     The number of allowed processes or 0 if multiple processes are not
+ *                     allowed
+ *******************************************************************************
+ */
+    SS_UINT32 SS_GetMaxNumProcess(void *user);
+/**
+ *******************************************************************************
+ * Get the maximum available memory for processes to use.
+ *
+ * \param      user    Optional opaque data-structure passed in during
+ *                                     initialization
+ *
+ * \return     The memory amount available for processes or 0 if there is no available memory
+ *******************************************************************************
+ */
+    unsigned long SS_GetMaxProcRamSize(void *user);
+
+/**
+ *******************************************************************************
+ * Initialize a new process and handle the request sent from an external
+ * process.
+ *
+ * Called from the main function of the new process with the parameters passed to
+ * \ref SS_RunProcess.
+ *
+ * \param      user    Optional opaque data-structure passed in during
+ *                                     initialization
+ * \param      argc    Number of \a argv arguments
+ * \param      argv    Arguments to pass to the new process
+ *
+ * \return     S_SS_SUCCESS on success or an error code
+ *******************************************************************************
+ */
+    long SS_HandleProcessRequest(void *user, int argc, char *argv[]);
+
+#ifdef __cplusplus
+}
+#endif                          /* __cplusplus */
+#endif
diff --git a/ss_engine/SS_Nand.c b/ss_engine/SS_Nand.c
new file mode 100755 (executable)
index 0000000..21d1e65
--- /dev/null
@@ -0,0 +1,123 @@
+/*
+ * libtota
+ *
+ * Copyright (c) 2017 Samsung Electronics Co., Ltd.
+ *
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <dirent.h>
+#include <ctype.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/ioctl.h>
+#include <mtd/mtd-user.h>
+#include "SS_Nand.h"
+#include "SS_ImageUpdate.h"
+#include "SS_Engine_Errors.h"
+#include "SS_Common.h"
+#include "ua.h"
+#include "fota_common.h"
+
+static int local_pid;
+static int local_offset;
+
+int nand_init(void *pbUserData, char *block_file, int offset)
+{
+
+       local_pid = open(block_file, O_RDWR, 0777);
+       if (local_pid < 0)
+               return E_SS_FAILURE;
+
+       local_offset = offset;
+       return S_SS_SUCCESS;
+}
+
+void nand_finalize()
+{
+       close(local_pid);
+}
+
+unsigned long nand_block_size()
+{
+       return SS_GetBlockSize(NULL);
+}
+
+unsigned long nand_pages_size()
+{
+       return 1 << NAND_PAGE_BITS;
+}
+
+unsigned long nand_image_size(char *block_file)
+{
+       int fid;
+       struct stat st;
+       fid = open(block_file, O_RDWR, 0777);
+       if (fid < 0)
+               return 0;
+       if (fstat(fid, &st) < 0) {
+               close(fid);
+               return 0;
+       }
+       close(fid);
+       return st.st_size;
+}
+
+int nand_write(void *pbUserData, unsigned char *pbBuffer,
+              unsigned long dwBlockAddress)
+{
+       int ret;
+       ret = lseek(local_pid, dwBlockAddress + local_offset, SEEK_SET);
+       LOG("nand_write lseek =%x\n", ret);
+       ret = write(local_pid, pbBuffer, nand_block_size());
+
+       LOG("nand_write dwStartAddress local_offset =%x\n", local_offset);
+       LOG("nand_write dwStartAddress dwBlockAddress =%lx\n",
+              dwBlockAddress);
+
+       return (ret >= 0) ? S_SS_SUCCESS : E_SS_WRITE_ERROR;
+}
+
+#define LOGICAL_SECTOR 512
+
+int nand_read(void *pbUserData, unsigned char *pbBuffer,
+             unsigned long dwStartAddress, unsigned long dwSize)
+{
+       int align, align2, ret;
+       char *buf;
+       align =
+           dwStartAddress - (dwStartAddress / LOGICAL_SECTOR) * LOGICAL_SECTOR;
+       align2 =
+           (LOGICAL_SECTOR -
+            (dwSize + dwStartAddress) % LOGICAL_SECTOR) % LOGICAL_SECTOR;
+
+       ret = lseek(local_pid, local_offset + (dwStartAddress - align), SEEK_SET);
+       LOG("nand_read lseek =%x\n", ret);
+       buf = malloc(align + align2 + dwSize);
+       if (buf == NULL)
+               return E_SS_MALLOC_ERROR;
+       ret = read(local_pid, buf, align + align2 + dwSize);
+       if (ret > 0)
+               memcpy(pbBuffer, buf + align, dwSize);
+       free(buf);
+
+       return (ret > 0) ? S_SS_SUCCESS : E_SS_READ_ERROR;
+
+}
+*/
diff --git a/ss_engine/SS_Nand.h b/ss_engine/SS_Nand.h
new file mode 100755 (executable)
index 0000000..6e37c4b
--- /dev/null
@@ -0,0 +1,31 @@
+/*
+ * libtota
+ *
+ * Copyright (c) 2017 Samsung Electronics Co., Ltd.
+ *
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*#ifndef SS_NAND_H
+#define SS_NAND_H
+
+extern unsigned long nand_image_size(char *block_file);
+extern unsigned long nand_block_size(void);
+extern unsigned long nand_pages_size(void);
+extern int nand_init(void *pbUserData,char *block_node,int offset);
+extern void nand_finalize(void);
+extern int nand_write(void *pbUserData,unsigned char *pbBuffer, unsigned long dwBlockAddress);
+extern int nand_read(void *pbUserData,unsigned char *pbBuffer,unsigned long dwStartAddress,unsigned long dwSize);
+
+#endif //SS_NAND_H
+*/
diff --git a/ss_engine/SS_UPI.c b/ss_engine/SS_UPI.c
new file mode 100755 (executable)
index 0000000..0dd61a0
--- /dev/null
@@ -0,0 +1,1987 @@
+/*
+ * libtota
+ *
+ * Copyright (c) 2017 Samsung Electronics Co., Ltd.
+ *
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*HEADER */
+
+/*
+
+Function Prototypes Mandatory
+
+*/
+
+#include<stdio.h>
+#include<ctype.h>
+#include<stdlib.h>
+#include<string.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <sys/statfs.h>
+#include <mntent.h>
+#include "ua.h"
+#include "SS_Common.h"
+#include "fota_common.h"
+#include "SS_UPI.h"
+#include "ss_patchdelta.h"
+#include "SS_Engine_Errors.h"
+#include "SS_FSUpdate.h"
+
+int gtotalFSCnt = 0;
+int FS_UpgradeState = E_SS_FAILURE;
+int gvalid_session = 0;         //used as fail-safe in case device is turmed off or battery removed during update
+fs_list *fs_headptr_main = NULL;
+fs_list *headptr_list[UA_PARTI_MAX];
+tar_Data_t *tar_cfg_data = NULL;
+
+#ifdef MEM_PROFILING
+/*
+       Description:
+               Create script file , set it executable, execute script in child process
+               Only works if valid delta.tar is present for upgrade
+       Summary:
+               If MEM_PROFILING is activated,
+               we can see the result of memory profiling after delta upgrade
+               in file defined by macro - SS_MEMORY_PROFILING_SCRIPT
+*/
+int mem_profiling_start = 0;
+int SS_Do_Memory_Profiling()
+{
+    int ret = -1;
+    pid_t pid;
+    char memory_usage_script[1024] = "#!/bin/bash\nlog_file=$1\npid=$2\nmaxmem=0\nwhile [[ -d \"/proc/${pid}\" ]]; do\n\
+   mem=`cat /proc/${pid}/smaps | grep Pss | grep -v Swap|awk '{print $2}'|awk '{s+=$1} END {print s}'`\n\
+        if [[ ${mem} -gt ${maxmem} ]]; then\n          maxmem=${mem}\n\
+               echo -e \"Memory usage till now is: ${maxmem} KB.\" >> $log_file\n      fi\n    sleep 0.01\ndone\n\
+   echo -e \"Max was : ${maxmem} KB.\" >> $log_file\n";
+    char cmd[1024] = { 0, };
+
+    //Open a file and write the script contents in it
+    FILE *fp = fopen(SS_MEMORY_PROFILING_SCRIPT, "w+");
+    fwrite(memory_usage_script, strlen(memory_usage_script), 1, fp);
+    fclose(fp);
+    //make the file executable      -       Octal 495 is 757 decimal
+    if (chmod(SS_MEMORY_PROFILING_SCRIPT, 495) < 0) {
+        LOGE("Error in chmod(%s, 495) - %d (%s)\n", SS_MEMORY_PROFILING_SCRIPT, errno, strerror(errno));
+        return E_SS_FAILURE;
+    }
+    //calling mem_use.sh
+    //Usage : <location of script> <location of log file to be created> <pid of the calling process>
+    pid = getpid();
+    snprintf(cmd, sizeof(cmd) - 1, "%s %s %d", SS_MEMORY_PROFILING_SCRIPT, SS_MEMORY_USAGE_LOG, pid);
+    ret = _system_cmd_nowait(cmd);
+    sleep(1);
+    LOG("ret for memory profiling cmd is %d\n", ret);
+    if (ret == 0) {
+        mem_profiling_start = 1;
+        return S_SS_SUCCESS;
+    } else {
+        LOGE("Could not start Memory Profiling\n");
+        return E_SS_FAILURE;
+    }
+}
+#endif
+
+#ifdef TIME_PROFILING
+static char ts1[256];
+static double ts2;
+double fast_tar_get_item_size_time = 0.0;
+double SS_LoadFile_time = 0.0;
+double SS_FSBuildNodes_time = 0.0;
+
+static void get_time_stamp1(void)
+{
+    struct timeval tv;
+    int sec, msec;
+
+    gettimeofday(&tv, NULL);
+    sec = (int)tv.tv_sec;
+    msec = (int)(tv.tv_usec / 1000);
+    snprintf(ts1, 256, "%06d.%03d", sec % 100000, msec);
+}
+
+static double get_time_stamp2(void)
+{
+    struct timeval tv;
+
+    gettimeofday(&tv, NULL);
+    ts2 = (double)tv.tv_sec + (double)(tv.tv_usec / 1000000.0);
+    return ts2;
+}
+#endif
+
+long SS_GetUPIVersion(unsigned char *ver_str)
+{
+    if (ver_str) {
+        strncpy((char *)ver_str, SS_TOTA_VERSION, MAX_PATH);
+#ifdef MEM_PROFILING
+        if (!mem_profiling_start)
+            if (!(S_SS_SUCCESS == SS_Do_Memory_Profiling()))
+                LOGE("Unable to start Memory_Profiling");
+#endif
+        return S_SS_SUCCESS;//wgid: 2456
+    } else
+        return E_SS_FAILURE;
+}
+
+int SS_CalculateFileSha(char *filename, int filesize, FileInfo * file)
+{
+
+    FILE *fp = NULL;
+    int ulResult = S_SS_SUCCESS;
+    int chunk = 20*1024*1024;
+
+    fp = fopen(filename, "rb");
+    if (fp == NULL) {
+        LOGE("failed to open \"%s\": %s\n", filename, strerror(errno));
+        ulResult = E_SS_FAILURE;
+        goto Cleanup;
+    }
+
+    file->data = SS_Malloc(chunk);
+    if (!file->data) {
+        LOGE("failed to allocate memory for \"%s\": %s\n", filename, strerror(errno));
+        ulResult = E_SS_FAILURE;
+        goto Cleanup;
+    }
+
+    ssize_t bytes_read = 0;
+    sha1_ctx_t sha_ctx;
+    sha1_init(&sha_ctx);
+
+    while( filesize > 0){
+        if(filesize < chunk){
+            bytes_read = fread(file->data, 1, filesize, fp);
+            if (bytes_read != filesize) {
+                LOGE("short read of \"%s\" (%ld bytes of %ld)\n", filename, (long)bytes_read, (long)file->size);
+                ulResult = E_SS_FAILURE;
+                goto Cleanup;
+            }
+            sha1_update(&sha_ctx, file->data, filesize);
+            break;
+        }
+        else {
+            bytes_read = fread(file->data, 1, chunk, fp);
+            if (bytes_read != chunk) {
+                LOGE("short read of \"%s\" (%ld bytes of %ld)\n", filename, (long)bytes_read, (long)file->size);
+                ulResult = E_SS_FAILURE;
+                goto Cleanup;
+            }
+            sha1_update(&sha_ctx, file->data, chunk);
+            filesize -= chunk;
+        }
+    }
+
+    sha1_final(&sha_ctx, (uint32_t *) &file->sha1);
+
+Cleanup:
+    if(fp)
+        fclose(fp);
+    if(file->data)
+        SS_Free(file->data);
+    return ulResult;
+}
+
+int SS_verify_DELTA_image(char *filename)
+{
+
+    FileInfo file;
+    FILE *fp = NULL;
+    char line[SS_TOKEN_MAXLINE_LEN] = { '\0' };
+    char * delta_size = NULL;
+    char *signature = NULL;
+    char * sha1trg = NULL;
+    uint8_t target_sha1[SHA_DIGEST_SIZE] = { 0, };
+    char cmd[512] = { 0, };
+    int udelta_size = 0;
+    int ulResult = S_SS_SUCCESS;
+
+    if (stat(filename, &file.st) != 0) {
+        LOGE("failed to stat \"%s\": %s\n", filename, strerror(errno));
+        return -1;
+    }
+
+    snprintf(cmd, sizeof(cmd) - 1, "grep -o -P '%s' --binary-files=text system/%s > %s",
+                SS_IMAGE_MAGIC_KEY,filename, SS_IMAGE_MAGIC_KEY_VAL);
+
+    ulResult = _system_cmd_wait(cmd);
+    if (ulResult != S_SS_SUCCESS) {
+        LOGE("Grep extraction for [%s] failed, code [%d]\n", cmd, ulResult);
+        ulResult = E_SS_FAILURE;
+        goto Cleanup;
+    }
+
+    fp = fopen(SS_IMAGE_MAGIC_KEY_VAL, "r");
+    if (!fp) {
+        LOGE("Grep extraction for [%s] failed, code [%d]\n", cmd, ulResult);
+        ulResult = E_SS_FAILURE;
+        goto Cleanup;
+    }
+
+    if (fgets(line, SS_TOKEN_MAXLINE_LEN, fp) == NULL){
+        ulResult = E_SS_FAILURE;
+        goto Cleanup;
+    }
+    fclose(fp);
+    fp = NULL;
+
+    signature = strtok(line, SS_TOEKN_COLON);
+    delta_size = strtok(NULL, SS_TOEKN_COLON);
+    sha1trg = strtok(NULL, SS_TOEKN_COLON);
+
+    if (signature && sha1trg && delta_size) {
+        udelta_size = atoi(delta_size);
+        LOGL(LOG_SSENGINE, "delta_size %d sha1trg %s\n", udelta_size, sha1trg);
+    }
+    else {
+        LOGE("Could not parse signature [%s]\n", line);
+        ulResult = E_SS_FAILURE;
+        goto Cleanup;
+    }
+
+    if (ParseSha1(sha1trg, target_sha1) != 0) {
+        LOGE("failed to parse tgt-sha1 \"%s\"\n", sha1trg);
+        ulResult = E_SS_FAILURE;
+        goto Cleanup;
+    }
+
+    ulResult = SS_CalculateFileSha(filename, udelta_size, &file);
+    if (ulResult != S_SS_SUCCESS)
+        goto Cleanup;
+
+    if(memcmp(file.sha1, target_sha1, SHA_DIGEST_SIZE) != 0){
+        LOGE("SHA mismatch -[%s] actual [%x] target [%x]\n", filename, file.sha1, target_sha1);
+        SS_SetUpgradeState(E_SS_FSBADDELTA);
+        ulResult = E_SS_FAILURE;
+        goto Cleanup;
+    }
+    else
+        LOGL(LOG_SSENGINE, "DELTA verified %s \n", sha1trg);
+
+Cleanup:
+    if(fp)
+        fclose(fp);
+    if(file_exist(SS_IMAGE_MAGIC_KEY_VAL, 0))
+        SS_DeleteFile(NULL, SS_IMAGE_MAGIC_KEY_VAL);
+    return ulResult;
+}
+
+
+int SS_GetProgressResolution(int ultotalFSCnt)
+{
+    if (ultotalFSCnt < DISPLAYRESOLUTION_SIZE)
+        return 1;
+    else
+        return (ultotalFSCnt / DISPLAYRESOLUTION_SIZE);
+}
+
+void SS_SetUpgradeState(int Val)
+{
+    LOGE("FAILED to upgrade Cause:[0x%x]\n", Val);
+    FS_UpgradeState = Val;
+    return;
+}
+
+int SS_GetUpgradeState()
+{
+    return FS_UpgradeState;
+}
+
+//Check SS function if available
+int file_exist(char *filename, int type)
+{
+    struct stat buf;
+    int ret = 0;
+
+    ret = lstat(filename, &buf);
+    if (ret < 0) {
+        ret = stat(filename, &buf);
+    }
+    return (ret >= 0) ? (1) : (0);
+}
+
+int SS_rename(const char *old_file_name, const char *new_file_name)
+{
+    if (link(old_file_name, new_file_name) < 0) {
+        if (errno == EEXIST) {
+            if (unlink(new_file_name) < 0 || link(old_file_name, new_file_name) < 0)
+                return -1;
+        } else
+            return -1;
+    }
+    if (unlink(old_file_name) < 0) {
+        if (unlink(new_file_name) == 0)
+            return -1;
+    }
+    return 0;
+}
+
+int SS_rename1(const char *old_file_name, const char *new_file_name)
+{
+
+    int result = E_SS_FAILURE;
+    char *temp_name = NULL;
+    temp_name = (char *)SS_Malloc(strlen(new_file_name) + 10);
+    if (temp_name == NULL)
+        return E_SS_FAILURE;
+    strcpy(temp_name, new_file_name);
+    strcat(temp_name, ".temp");
+    result = rename(new_file_name, temp_name);
+    if (result != 0)
+        goto Cleanup;
+    result = rename(old_file_name, new_file_name);
+    if (result != 0)
+        goto Cleanup;
+    result = unlink(temp_name);
+ Cleanup:
+    if (temp_name)
+        SS_Free(temp_name);
+    return result;
+}
+
+/*!
+ *********************************************************************************
+ *                     SS_FSVerifyNode
+ *********************************************************************************
+ *
+ * @brief
+ *     This is to verify nodes being added to global control structure for diff and delete cases.
+ *     gvalid_session global is used to check if nodes are already verified.
+ *
+ *
+ *     @param
+ *
+ *     @return                         returns S_SS_SUCCESS and
+ *                                             returns E_SS_FAILURE in case any error occurs
+ *
+ *********************************************************************************
+ */
+int SS_FSVerifyNode(const char *ubDeltaPath, const char *path, const char *patchname, const char *sha1src, int type,
+                    char *patchpath_name, int *data_size, int *data_offset)
+{
+    char patch[MAX_FILE_PATH] = { 0 };
+    FileInfo source_file;
+    uint8_t source_sha1[SHA_DIGEST_SIZE];
+
+    if (gvalid_session) {
+        if ((type == DIFFS || type == DELETES || type == MOVES) && !file_exist((char *)path, type)) {
+            LOGE("failed to verifyNodes [does not exist], Path : [%s] Type[%d]\n", path, type);
+            SS_SetUpgradeState(E_SS_FSBADNODES);
+            return E_SS_FAILURE;
+        }
+        snprintf(patch, MAX_FILE_PATH, "%s%s%s", patchpath_name, "/", patchname);
+        if ((type == DIFFS /*||type == NEWFILES */ ))   // allowing size 0 also for folders
+        {
+            if (tar_get_item_size_from_struct(&tar_cfg_data, patchname, data_size, data_offset) != S_SS_SUCCESS) {
+                LOGE("failed to get item size from struct, Patch : [%s]\n", patchname);
+                SS_SetUpgradeState(E_SS_FSBADNODES);
+                return E_SS_FAILURE;
+            }
+            if (*data_size < 0) {
+                LOGE("failed to verifyNodes [delta absent], Patch : [%s] Type[%d]\n", patch, type);
+                SS_SetUpgradeState(E_SS_FSBADNODES);
+                return E_SS_FAILURE;
+            }
+        }
+        //For other types (NEWs, SYMs, Folders), SHA check not required
+        if ((type == DIFFS || type == MOVES /* ||type == DELETES */ ) && (strcmp(sha1src, SS_NULLENTRY) != 0)) {
+            if (ParseSha1(sha1src, source_sha1) != 0) {
+                LOGE("Failed to parse Src-sha1 \"%s\"\n", sha1src);
+                return E_SS_FAILURE;
+            }
+            if (SS_LoadFile(path, &source_file) == 0) {
+                if (memcmp(source_file.sha1, source_sha1, SHA_DIGEST_SIZE) != 0) {
+                    SS_Free(source_file.data);
+                    LOGE("SS_FSVerifyNode - SHA mismatch with SRC  - PATH [%s]\n", path);
+                    SS_SetUpgradeState(E_SS_FSSRCCURRUPTED);    // Define other error
+                    return E_SS_FAILURE;
+                }
+            }
+            SS_Free(source_file.data);
+        }
+    } else {
+        LOGL(LOG_SSENGINE, "FS partition Already verified - Filling only size and offset \n");
+        snprintf(patch, MAX_FILE_PATH, "%s%s%s", patchpath_name, "/", patchname);
+        if ((type == DIFFS /*||type == NEWFILES */ ))   // allowing size 0 also for folders
+        {
+            if (tar_get_item_size_from_struct(&tar_cfg_data, patchname, data_size, data_offset) != S_SS_SUCCESS) {
+                LOGE("failed to get item size from struct, Patch : [%s] \n", patchname);
+                SS_SetUpgradeState(E_SS_FSBADNODES);
+                return E_SS_FAILURE;
+            }
+            if (*data_size < 0) {
+                LOGE("failed to verifyNodes [delta absent], Patch : [%s] Type[%d]\n", patch, type);
+                SS_SetUpgradeState(E_SS_FSBADNODES);
+                return E_SS_FAILURE;
+            }
+        }
+    }
+    return S_SS_SUCCESS;
+}
+
+/*!
+ *********************************************************************************
+ *                     SS_AppendNode
+ *********************************************************************************
+ *
+ * @brief
+ *     This is to append node to the global control structure for delta files.
+ *
+ *
+ *     @param
+ *
+ *     @return                         returns S_SS_SUCCESS and
+ *                                             returns E_SS_FAILURE in case any error occurs
+ *
+ *********************************************************************************
+ */
+int SS_AppendNode(const char *ubDeltaPath, fs_params ** headparam, fs_params ** tailparam, const char *old_path,
+                  const char *new_path, const char *patchname, const char *sha1src, const char *sha1trg, int type,
+                  char *patchpath_name)
+{
+    fs_params *newnode = NULL;
+    int data_size = 0;
+    int data_offset = 0;
+
+    if (!ubDeltaPath || !old_path || !new_path || !patchname || !sha1src || !sha1trg || !patchpath_name) {
+        LOGE("Bad Nodes, NULL params passed for Appending Nodes \n");
+        SS_SetUpgradeState(E_SS_FSINVALIDNODEPARAMS);
+        return E_SS_FAILURE;
+    }
+    if ((E_SS_FAILURE ==
+         SS_FSVerifyNode(ubDeltaPath, old_path, patchname, sha1src, type, patchpath_name, &data_size, &data_offset))) {
+        LOGE("Bad Nodes, Failed to pass verification - [Delta Path - %s][OldPath - %s] [NewPath - %s] \n", ubDeltaPath,
+             old_path, new_path);
+        return E_SS_FAILURE;
+    }
+    newnode = (fs_params *) SS_Malloc(sizeof(fs_params));
+    if (!newnode)
+        return E_SS_FAILURE;
+    strncpy(newnode->file_old_path, old_path, SS_MAX_NAMELENSUPPORTED);//wgid: 29483
+    strncpy(newnode->file_new_path, new_path, SS_MAX_NAMELENSUPPORTED);//wgid: 29482
+    strncpy(newnode->patch_name, patchname, SS_MAX_NAMELENSUPPORTED);//wgid: 28033
+    strncpy(newnode->sha1src, sha1src, sizeof(newnode->sha1src) -1);//wgid: 25282
+    strncpy(newnode->sha1trg, sha1trg, sizeof(newnode->sha1trg) - 1);//wgid: 25283
+    newnode->type = type;
+    newnode->data_size = data_size;
+    newnode->data_offset = data_offset;
+    newnode->nextnode = NULL;
+
+    //LOG("%s %s %d %s %s \n",newnode->file_path,newnode->patch_name,newnode->type, newnode->sha1src, newnode->sha1trg);
+
+    if (*headparam == NULL) {
+        *headparam = newnode;
+        *tailparam = newnode;
+    } else {
+        (*tailparam)->nextnode = newnode;
+        (*tailparam) = (*tailparam)->nextnode;
+    }
+    return S_SS_SUCCESS;
+
+}
+
+void SS_UpdateUIProgress(ua_dataSS_t * ua_dataSS, int ulTotalFsCnt, int ulDone)
+{
+
+    static int ss_count = 1;
+    int res_val = SS_GetProgressResolution(ulTotalFsCnt);
+    if (!ua_dataSS) {
+        LOGE("Error ua_dataSS\n");
+        return;
+    }
+//LOG("\nvalues are ss_count[%d] total_file_cnt[%d]",ss_count,ulTotalFsCnt);
+    if (ulDone == 1) {
+        if (ua_dataSS->ui_progress)
+            ua_dataSS->ui_progress(ua_dataSS, 100);
+        ss_count = 1;
+    } else if (ss_count < ulTotalFsCnt) {
+
+        if (ss_count % res_val == 0 /* && ss_iterator <= 2 */ ) //Max 50 times display
+        {
+            double data = (double)ss_count / (double)ulTotalFsCnt;
+            if (ua_dataSS->ui_progress)
+                ua_dataSS->ui_progress(ua_dataSS, data * 100);
+        }
+        ss_count++;
+    } else {
+        if (ua_dataSS->ui_progress)
+            ua_dataSS->ui_progress(ua_dataSS, 100);
+        ss_count = 1;
+    }
+
+}
+
+/*!
+ *********************************************************************************
+ *                     SS_FSClearNodes
+ *********************************************************************************
+ *
+ * @brief
+ *     This is to clear the global control structure for delta files.
+ *
+ *
+ *     @param
+ *
+ *     @return
+ *
+ *********************************************************************************
+ */
+void SS_FSClearNodes(int idx)
+{
+    fs_params *local_temp = NULL;
+    fs_params *local_next = NULL;
+    LOGL(LOG_SSENGINE, "Free Nodes idx=%d \n", idx);
+    if (headptr_list[idx]) {
+        if (headptr_list[idx]->del_ref) {
+            local_temp = headptr_list[idx]->del_ref;
+            while (local_temp) {
+                local_next = local_temp->nextnode;
+                SS_Free(local_temp);
+                local_temp = local_next;
+            }
+        }
+        if (headptr_list[idx]->dif_ref) {
+            local_temp = headptr_list[idx]->dif_ref;
+            while (local_temp) {
+                local_next = local_temp->nextnode;
+                SS_Free(local_temp);
+                local_temp = local_next;
+            }
+        }
+        if (headptr_list[idx]->move_ref) {
+            local_temp = headptr_list[idx]->move_ref;
+            while (local_temp) {
+                local_next = local_temp->nextnode;
+                SS_Free(local_temp);
+                local_temp = local_next;
+            }
+        }
+        if (headptr_list[idx]->new_ref) {
+            local_temp = headptr_list[idx]->new_ref;
+            while (local_temp) {
+                local_next = local_temp->nextnode;
+                SS_Free(local_temp);
+                local_temp = local_next;
+            }
+        }
+        if (headptr_list[idx]->sym_difref) {
+            local_temp = headptr_list[idx]->sym_difref;
+            while (local_temp) {
+                local_next = local_temp->nextnode;
+                SS_Free(local_temp);
+                local_temp = local_next;
+            }
+        }
+        if (headptr_list[idx]->sym_newref) {
+            local_temp = headptr_list[idx]->sym_newref;
+            while (local_temp) {
+                local_next = local_temp->nextnode;
+                SS_Free(local_temp);
+                local_temp = local_next;
+            }
+        }
+        SS_Free(headptr_list[idx]);
+        headptr_list[idx] = NULL;
+    }
+}
+
+/*!
+ *********************************************************************************
+ *                     SS_FSGetDeltaCount
+ *********************************************************************************
+ *
+ * @brief
+ *     This is to get the delta count for diffs , deletes etc.
+ *
+ *
+ *     @param
+ *
+ *     @return                         returns structure with delta count info
+ *                                             NULL in case of error
+ *
+ *********************************************************************************
+ */
+
+struct details *SS_FSGetDeltaCount(char *ubDeltaPath, char *ubDeltaInfoFile)
+{
+    int size = 0, bckupsize = 0, ret = 1;
+    char *token = NULL;
+    char *FileData = NULL;
+    int data_size = -1;
+    char *line = NULL;
+    struct details *refer_copy = NULL;
+    FILE *filename_bkup = NULL;
+
+    if (!(ubDeltaPath && ubDeltaInfoFile)) {
+        LOGE("failed to Parse DELTA count information: \n");
+        SS_SetUpgradeState(E_SS_BAD_PARAMS);
+        return NULL;
+    }
+    refer_copy = (struct details *)SS_Malloc(sizeof(struct details));
+
+    if (refer_copy == NULL) {
+        LOGE("failed to allocate memory\n");
+        return NULL;
+    }
+
+    LOGL(LOG_SSENGINE, "Delta File Info =%s\n", ubDeltaInfoFile);
+
+    size = tar_get_item_size(ubDeltaPath, ubDeltaInfoFile);
+    if (size < 0) {
+        LOGE("failed to Access DELTA info file DPath:[%s] File: [%s]\n", ubDeltaPath, ubDeltaInfoFile);
+        SS_SetUpgradeState(E_SS_FSBADDELTA);
+        ret = 0;
+        goto cleanup;
+    }
+
+    FileData = SS_Malloc(size + 1);
+    if (FileData == NULL) {
+        LOGE("Failed to Allocate Memory\n");
+        SS_SetUpgradeState(E_SS_MALLOC_ERROR);
+        ret = 0;
+        goto cleanup;
+    }
+
+    data_size = tar_get_cfg_data(ubDeltaPath, ubDeltaInfoFile, FileData, size);
+    if (data_size <= 0) {       // == 0 is NOT okay??
+        LOGE("Failed to read cfg data from Delta\n");
+        SS_SetUpgradeState(E_SS_FSBADDELTA);
+        ret = 0;
+        goto cleanup;
+    }
+    filename_bkup = fopen(SS_PATCHLIST_BKUPLOC, "wb+");
+    if (filename_bkup == NULL) {
+        LOGE("Failed to create BACKUP file Error:[%s]\n", strerror(errno));
+        SS_SetUpgradeState(E_SS_FSFAILEDTOBACKUPPATCHINFO);
+        ret = 0;
+        goto cleanup;
+    }
+
+    bckupsize = fwrite(FileData, 1, data_size, filename_bkup);  //RECHECK SIZE 1
+    if (bckupsize <= 0) {
+        SS_SetUpgradeState(E_SS_FSFAILEDTOBACKUPPATCHINFO);
+        ret = 0;
+        goto cleanup;
+    }
+    LOGL(LOG_SSENGINE, " Size [%d] DataSize [%d] BakUpSize [%d]\n", size, data_size, bckupsize);
+
+    line = strstr(FileData, SS_FSCOUNT_MAGIC_KEY);
+    if (line) {
+        LOGL(LOG_SSENGINE, "SS_FSGetDeltaCount() last line %s \n", line);
+
+        token = strtok(&line[SS_FSCOUNT_MAGIG_KEYLEN], SS_TOKEN_SPACE);
+        if (token)
+            refer_copy->diffs = atoi(token);
+        else {
+            SS_SetUpgradeState(E_SS_FSFAILEDTOPARSEDELTACNT);
+            ret = 0;
+            goto cleanup;
+        }
+        token = strtok(NULL, SS_TOKEN_SPACE);
+        if (token)
+            refer_copy->moves = atoi(token);
+        else {
+            SS_SetUpgradeState(E_SS_FSFAILEDTOPARSEDELTACNT);
+            ret = 0;
+            goto cleanup;
+        }
+
+        token = strtok(NULL, SS_TOKEN_SPACE);
+        if (token)
+            refer_copy->news = atoi(token);
+        else {
+            SS_SetUpgradeState(E_SS_FSFAILEDTOPARSEDELTACNT);
+            ret = 0;
+            goto cleanup;
+        }
+
+        token = strtok(NULL, SS_TOKEN_SPACE);
+        if (token)
+            refer_copy->deletes = atoi(token);
+        else {
+            SS_SetUpgradeState(E_SS_FSFAILEDTOPARSEDELTACNT);
+            ret = 0;
+            goto cleanup;
+        }
+
+        token = strtok(NULL, SS_TOKEN_SPACE);
+        if (token)
+            refer_copy->symdiffs = atoi(token);
+        else {
+            SS_SetUpgradeState(E_SS_FSFAILEDTOPARSEDELTACNT);
+            ret = 0;
+            goto cleanup;
+        }
+
+        token = strtok(NULL, SS_TOKEN_SPACE);
+        if (token)
+            refer_copy->symnews = atoi(token);
+        else {
+            SS_SetUpgradeState(E_SS_FSFAILEDTOPARSEDELTACNT);
+            ret = 0;
+            goto cleanup;
+        }
+
+        gtotalFSCnt =
+            refer_copy->diffs + refer_copy->moves + refer_copy->news + refer_copy->deletes + refer_copy->symdiffs +
+            refer_copy->symnews;
+        LOG("SS_FSGetDeltaCount() total no of file %d\n", gtotalFSCnt);
+
+    } else {
+        SS_SetUpgradeState(E_SS_FSFAILEDTOPARSEDELTACNT);
+        LOG("SS_FSGetDeltaCount() Failed to read last line\n");
+    }
+    if (gtotalFSCnt < 0) {
+        ret = 0;
+        goto cleanup;
+    }
+
+ cleanup:
+    if (ret) {
+        SS_Free(FileData);
+        if (filename_bkup)
+            fclose(filename_bkup);
+        return refer_copy;
+    } else {
+        SS_Free(FileData);
+        SS_Free(refer_copy);
+        if (filename_bkup)
+            fclose(filename_bkup);
+        return NULL;
+    }
+
+}
+
+/*!
+ *********************************************************************************
+ *                     SS_FSBuildNodes
+ *********************************************************************************
+ *
+ * @brief
+ *     This is used to build the gobal control structure for diffs, deletes etc.
+ *     For all the entries in config file (which has info) the information is parsed to the global control struct
+ *
+ *
+ *     @param
+ *
+ *     @return                         returns fs_list structure filled with details of all the files to be diff-ed ,deleted etc.
+ *                                             NULL in case of error
+ *
+ *********************************************************************************
+ */
+fs_list *SS_FSBuildNodes(ua_dataSS_t * ua_dataSS)
+{
+    FILE *fp = NULL;
+    char line[SS_TOKEN_MAXLINE_LEN] = { '\0' };
+    char string_na[] = "NA";
+    char *patch_name = NULL;
+    char *source_name = NULL;
+    char *target_name = NULL;
+    char *sha1src = NULL;
+    char *sha1trg = NULL;
+    char *change_type = NULL;
+    char *file_type = NULL;
+    uint32_t ulPatchCount = 0, del_type = DELETES;
+    fs_params *fs_diffhead = NULL;
+    fs_params *fs_difftail = NULL;
+    fs_params *fs_movehead = NULL;
+    fs_params *fs_movetail = NULL;
+    fs_params *fs_newhead = NULL;
+    fs_params *fs_delhead = NULL;
+    fs_params *fs_deltail = NULL;
+    fs_params *fs_symlinkdiffhead = NULL;
+    fs_params *fs_symlinkdifftail = NULL;
+    fs_params *fs_symlinknewhead = NULL;
+    fs_params *fs_symlinknewtail = NULL;
+
+    struct details *local = NULL;
+    fs_list *fs_head_node = NULL;
+    int i = 0, retval = 0;
+    if (!ua_dataSS) {
+        LOGE("Bad structure ua_dataSS\n");
+        SS_SetUpgradeState(E_SS_BAD_PARAMS);
+        return NULL;
+    }
+    LOGL(LOG_SSENGINE, " Build Nodes Entry \n");
+    if (tar_cfg_data == NULL)
+        tar_cfg_data = tar_build_cfg_table(ua_dataSS->update_data->ua_delta_path);
+    if (!tar_cfg_data) {
+        LOGE(" tar_build_cfg_table  Failed \n");
+        SS_SetUpgradeState(E_SS_BAD_PARAMS);
+        return NULL;
+    }
+    local = SS_FSGetDeltaCount(ua_dataSS->update_data->ua_delta_path, ua_dataSS->update_delta->ua_patch_info);
+    if (local == NULL) {
+        LOGE(" Build Nodes Failed \n");
+        if (tar_cfg_data)
+            tar_free_cfg_table(&tar_cfg_data);
+        SS_SetUpgradeState(E_SS_BAD_PARAMS);
+        return NULL;
+    }
+
+    fp = fopen(SS_PATCHLIST_BKUPLOC, "r");
+    if (!fp) {
+        SS_SetUpgradeState(E_SS_FSFAILEDTOOPENPATCHINFO);
+        if (tar_cfg_data)
+            tar_free_cfg_table(&tar_cfg_data);
+        SS_Free(local);
+        return NULL;
+    }
+
+    ulPatchCount = local->diffs + local->deletes + local->news + local->moves + local->symdiffs + local->symnews;
+    LOG("Total FS count [%d].\n", ulPatchCount);
+/*
+************************************************************************
+Parsing logic implemented for patchlist
+************************************************************************
+Sample entries in patchlist as below :
+<CHANGE-TYPE>:<FILE-TYPE>:<OTHER-DETAILS...>
+************************************************************************
+DIFF:REG:system/bin/vi:system/bin/vi:2f2f3dc6d3ee06af0080ac7975f22941660f2480:78b2d44af32d854c70f1cb7431a60c2682a320cc:diff1_vi.delta
+DIFF:TPK:system/usr/packages/removable/com.samsung.calculator.tpk:system/usr/packages/removable/com.samsung.calculator.tpk:
+                                96fc1bcde30d501ba65ef0038e05da46d255a7b3:fa1d5d9daa4097ac302b69244297f508577c3a01:diff1598_com.samsung.calculator.tpk.delta/
+MOVE:REG:system/etc/smack/accesses.d/heremaps-engine-devel:system/usr/apps/com.samsung.contacts/res/temp:da39a3ee5e6b4b0d3255bfef95601890afd80709
+DEL:REG:system/usr/ug/res/images/ug-phone/contacts/favorites_icon_remove.PNG:38ad8be378506d19b1c769d46be262cf100f6c59
+DEL:SYM:system/usr/apps/com.samsung.message-lite/lib/libmsg-common.so
+SYM:DIFF:system/usr/lib/sync-agent/kies-private/libplugin-na-mobex.so.0:system/usr/lib/sync-agent/kies-private/libplugin-na-mobex.so.0:
+                                       libplugin-na-mobex.so.0.3.57
+SYM:NEW:system/lib/firmware/vbc_eq:/opt/system/vbc_eq
+***********************************************************************
+*/
+    if (local && ((local->diffs) > 0 || (local->moves > 0))) {
+        LOGL(LOG_SSENGINE, "%ss [%d] %ss [%d]\n", SS_STRING_DIFF, local->diffs, SS_STRING_MOVE, local->moves);
+        for (i = 0; i < (local->diffs + local->moves); i++) {
+            if (fgets(line, SS_TOKEN_MAXLINE_LEN, fp) == NULL) {
+                SS_SetUpgradeState(E_SS_FSFAILEDTOPARSEDELTAINFO);
+                break;
+            }
+            //LOGL(LOG_SSENGINE, "DIFF LINE:[%d] [%s] \n",i+1,line);
+
+            change_type = strtok(line, SS_TOEKN_COLON);
+            file_type = strtok(NULL, SS_TOEKN_COLON);
+
+            if (strcmp(change_type, SS_STRING_MOVE) == 0) {     // && strcmp(file_type,"TPK") == 0){
+                source_name = strtok(NULL, SS_TOEKN_COLON);
+                target_name = strtok(NULL, SS_TOEKN_COLON);
+                sha1src = strtok(NULL, SS_TOKEN_NEWLINE);
+                //LOGL(LOG_SSENGINE, "%s Index [%d]\n", SS_STRING_MOVE, i);
+
+                if (!source_name || !target_name || !sha1src) {
+                    SS_SetUpgradeState(E_SS_FSFAILEDTOPARSEDELTAINFO);
+                    //LOGE("Failed to extract Patch Info Type:DELETES \n");
+                    LOGE("Failed to parse DIFFS - LINE:[%d] [%s] \n", i + 1, line);
+                    goto CleanUp;
+                }
+                retval =
+                    SS_AppendNode(ua_dataSS->update_data->ua_delta_path, &fs_movehead, &fs_movetail, source_name,
+                                  target_name, string_na, sha1src, string_na, MOVES,
+                                  ua_dataSS->update_delta->ua_patch_path);
+                if (retval == E_SS_FAILURE)     // ONLY test purpose, should enable this
+                    goto CleanUp;
+                SS_UpdateUIProgress(ua_dataSS, ulPatchCount, 0);
+            } else if (strcmp(change_type, SS_STRING_DIFF) == 0) {      // && strcmp(file_type,"TPK") == 0){
+                source_name = strtok(NULL, SS_TOEKN_COLON);
+                target_name = strtok(NULL, SS_TOEKN_COLON);
+                sha1src = strtok(NULL, SS_TOEKN_COLON);
+                sha1trg = strtok(NULL, SS_TOEKN_COLON);
+                patch_name = strtok(NULL, SS_TOKEN_NEWLINE);
+                //LOGL(LOG_SSENGINE, "%s Index [%d]\n", SS_STRING_DIFF, i);
+
+                if (strlen(patch_name) <= SS_MAX_NAMELENSUPPORTED) {
+                    retval =
+                        SS_AppendNode(ua_dataSS->update_data->ua_delta_path, &fs_diffhead, &fs_difftail,
+                                      source_name, target_name, patch_name, sha1src, sha1trg, DIFFS,
+                                      ua_dataSS->update_delta->ua_patch_path);
+                    if (retval == E_SS_FAILURE) // ONLY test purpose, should enable this
+                        goto CleanUp;
+                } else {
+                    SS_SetUpgradeState(E_SS_FILENAMELENERROR);
+                    LOGE("File Name length Limitation Error File:[%s]\n", patch_name);
+                    goto CleanUp;
+                }
+                SS_UpdateUIProgress(ua_dataSS, ulPatchCount, 0);
+            } else {
+                SS_SetUpgradeState(E_SS_FSFAILEDTOPARSEDELTAINFO);
+                LOGE("Patch Name format Error File\n");
+                goto CleanUp;
+            }
+        }
+    }
+    if (local && (local->news) > 0) {   //check if new files archive is present in the delta
+        char new_patch_path[MAX_FILE_PATH] = { 0, };
+        snprintf(new_patch_path, MAX_FILE_PATH, "%s%s", ua_dataSS->parti_info->ua_subject_name, SS_COMPRESSED_FILE);
+        if (tar_get_item_size(ua_dataSS->update_data->ua_delta_path, new_patch_path) <= 0) {
+            SS_SetUpgradeState(E_SS_FSFAILEDTOPARSEDELTAINFO);
+            LOGE("New files not present in Patch\n");
+            goto CleanUp;
+        }
+    }
+    if (local && (local->deletes) > 0) {        //this is to group to delete list
+        LOGL(LOG_SSENGINE, "%ss [%d]\n", SS_STRING_DEL, local->deletes);
+        for (i = 0; i < (local->deletes); i++) {
+            if (fgets(line, SS_TOKEN_MAXLINE_LEN, fp) == NULL) {
+                SS_SetUpgradeState(E_SS_FSFAILEDTOPARSEDELTAINFO);
+                break;
+            }
+
+            change_type = strtok(line, SS_TOEKN_COLON);
+            file_type = strtok(NULL, SS_TOEKN_COLON);
+
+            if (strcmp(file_type, SS_STRING_REG) == 0) {
+                source_name = strtok(NULL, SS_TOEKN_COLON);
+                sha1src = strtok(NULL, SS_TOKEN_NEWLINE);
+                del_type = DELETES;
+            } else if (strcmp(file_type, SS_STRING_SYM) == 0) {
+                source_name = strtok(NULL, SS_TOKEN_NEWLINE);
+                sha1src = string_na;
+                del_type = DELETES;
+            } else if (strcmp(file_type, SS_STRING_END) == 0) {
+                source_name = strtok(NULL, SS_TOKEN_NEWLINE);
+                sha1src = string_na;
+                del_type = DELETE_END;
+            }
+            else {
+                LOGE("Failed to parse DELETES - LINE:[%d] [%s] \n", i + 1, line);
+                goto CleanUp;
+            }
+
+            if (!source_name || !sha1src) {
+                SS_SetUpgradeState(E_SS_FSFAILEDTOPARSEDELTAINFO);
+                //LOGE("Failed to extract Patch Info Type:DELETES \n");
+                LOGE("Failed to parse DELETES - LINE:[%d] [%s] \n", i + 1, line);
+                goto CleanUp;
+            }
+            //LOGL(LOG_SSENGINE, "%s Index [%d]\n", SS_STRING_DEL, i);
+            retval =
+                SS_AppendNode(ua_dataSS->update_data->ua_delta_path, &fs_delhead, &fs_deltail, source_name,
+                              string_na, string_na, sha1src, string_na, del_type,
+                              ua_dataSS->update_delta->ua_patch_path);
+            if (retval == E_SS_FAILURE) // ONLY test purpose, should enable this
+                goto CleanUp;
+            SS_UpdateUIProgress(ua_dataSS, ulPatchCount, 0);
+
+        }
+    }                           //For symlink files
+
+    if (local && (local->symdiffs) > 0) {
+        LOGL(LOG_SSENGINE, "%s %ss [%d]\n", SS_STRING_SYM, SS_STRING_DIFF, local->symdiffs);
+        for (i = 0; i < (local->symdiffs); i++) {       //get the count from below function
+            if (fgets(line, SS_TOKEN_MAXLINE_LEN, fp) == NULL) {
+                SS_SetUpgradeState(E_SS_FSFAILEDTOPARSEDELTAINFO);
+                break;
+            }
+            //LOGL(LOG_SSENGINE, "SYMDIFF LINE:[%d] [%s] \n",i+1,line);
+
+            change_type = strtok(line, SS_TOEKN_COLON);
+            file_type = strtok(NULL, SS_TOEKN_COLON);
+
+            if (strcmp(change_type, SS_STRING_SYM) == 0 && strcmp(file_type, SS_STRING_DIFF) == 0) {    // && strcmp(file_type,"TPK") == 0){
+                source_name = strtok(NULL, SS_TOEKN_COLON);
+                target_name = strtok(NULL, SS_TOEKN_COLON);
+                patch_name = strtok(NULL, SS_TOKEN_NEWLINE);
+                //LOGL(LOG_SSENGINE, "%s %s Index [%d]\n", SS_STRING_SYM, SS_STRING_DIFF, i);
+
+                if (!source_name || !target_name || !patch_name) {
+                    SS_SetUpgradeState(E_SS_FSFAILEDTOPARSEDELTAINFO);
+                    //LOGE("Failed to extract Patch Info Type:DELETES \n");
+                    LOGE("Failed to parse SymDiffs - LINE:[%d] [%s] \n", i + 1, line);
+                    goto CleanUp;
+                }
+                retval =
+                    SS_AppendNode(ua_dataSS->update_data->ua_delta_path, &fs_symlinkdiffhead, &fs_symlinkdifftail,
+                                  source_name, target_name, patch_name, string_na, string_na, SYMDIFFS,
+                                  ua_dataSS->update_delta->ua_patch_path);
+                if (retval == E_SS_FAILURE)     // ONLY test purpose, should enable this
+                    goto CleanUp;
+                SS_UpdateUIProgress(ua_dataSS, ulPatchCount, 0);
+            }
+        }
+    }
+    if (local && (local->symnews) > 0) {
+        LOGL(LOG_SSENGINE, "%s %ss [%d]n", SS_STRING_SYM, SS_STRING_NEW, local->symnews);
+        for (i = 0; i < (local->symnews); i++) {
+            if (fgets(line, SS_TOKEN_MAXLINE_LEN, fp) == NULL) {
+                SS_SetUpgradeState(E_SS_FSFAILEDTOPARSEDELTAINFO);
+                break;
+            }
+            //LOGL(LOG_SSENGINE, "SYMNEWS LINE:[%d] [%s] \n",i+1,line);
+
+            change_type = strtok(line, SS_TOEKN_COLON);
+            file_type = strtok(NULL, SS_TOEKN_COLON);
+
+            if (strcmp(change_type, SS_STRING_SYM) == 0 && strcmp(file_type, SS_STRING_NEW) == 0) {
+                source_name = strtok(NULL, SS_TOEKN_COLON);
+                patch_name = strtok(NULL, SS_TOKEN_NEWLINE);
+                //LOGL(LOG_SSENGINE, "%s %s Index [%d]\n", SS_STRING_SYM, SS_STRING_NEW, i);
+
+                if (!source_name || !patch_name) {
+                    SS_SetUpgradeState(E_SS_FSFAILEDTOPARSEDELTAINFO);
+                    //LOGE("Failed to extract Patch Info Type:DELETES \n");
+                    LOGE("Failed to parse SymNews - LINE:[%d] [%s] \n", i + 1, line);
+                    goto CleanUp;
+                }
+                retval =
+                    SS_AppendNode(ua_dataSS->update_data->ua_delta_path, &fs_symlinknewhead, &fs_symlinknewtail,
+                                  source_name, string_na, patch_name, string_na, string_na, SYMNEWFILES,
+                                  ua_dataSS->update_delta->ua_patch_path);
+                if (retval == E_SS_FAILURE)     // ONLY test purpose, should enable this
+                    goto CleanUp;
+                SS_UpdateUIProgress(ua_dataSS, ulPatchCount, 0);
+            }
+        }
+    }
+
+    fs_head_node = (fs_list *) SS_Malloc(sizeof(fs_list));
+    if (!fs_head_node) {
+        SS_SetUpgradeState(E_SS_MALLOC_ERROR);
+        goto CleanUp;
+    }
+    fs_head_node->dif_ref = fs_diffhead;
+    fs_head_node->move_ref = fs_movehead;
+    fs_head_node->new_ref = fs_newhead;
+    fs_head_node->del_ref = fs_delhead;
+    fs_head_node->sym_difref = fs_symlinkdiffhead;
+    fs_head_node->sym_newref = fs_symlinknewhead;
+    fs_head_node->ulPatchCount = ulPatchCount;
+
+    SS_UpdateUIProgress(ua_dataSS, ulPatchCount, 1);
+
+ CleanUp:
+    fclose(fp);
+    SS_Free(local);
+    unlink(SS_PATCHLIST_BKUPLOC);
+    if (retval == E_SS_FAILURE)
+        if (tar_cfg_data)
+            tar_free_cfg_table(&tar_cfg_data);
+    return fs_head_node;
+}
+
+void SS_GetPartition_LocDetails(ua_dataSS_t * ua_dataSS, int part_idx)
+{
+    LOGL(LOG_SSENGINE, "PART NAME: [%s] \n", ua_dataSS->parti_info->ua_parti_name);
+    snprintf(ua_dataSS->update_delta->ua_patch_path, MAX_FILE_PATH, "%s", ua_dataSS->parti_info->ua_subject_name);
+    snprintf(ua_dataSS->update_delta->ua_patch_info, MAX_FILE_PATH, "%s%s%s", ua_dataSS->parti_info->ua_subject_name,
+             ua_dataSS->parti_info->ua_parti_name, SS_PATCHLISTFORMAT);
+    snprintf(ua_dataSS->update_delta->ua_attrib_path, MAX_FILE_PATH, "%s%s%s", ua_dataSS->parti_info->ua_subject_name,
+             ua_dataSS->parti_info->ua_parti_name, SS_PATCH_ATTR_FORMAT);
+    LOGL(LOG_SSENGINE, "PatchPath[%s] PatchInfo [%s] Attributes [%s]\n", ua_dataSS->update_delta->ua_patch_path,
+         ua_dataSS->update_delta->ua_patch_info, ua_dataSS->update_delta->ua_attrib_path);
+
+    return;
+}
+
+//Support functions//Change Struct format details (Can include total file count also???)/*!
+/*
+******************************************************************************** *
+SS_FSSetAttributes
+    *********************************************************************************
+    * *@brief
+    * This is used to set the file attributes at the end of application of patches in FS
+    *
+    * *@param
+    *
+    *@return returns S_SS_SUCCESS
+    * E_SS_FAILURE in case of error
+    *
+    *********************************************************************************
+ */
+int SS_FSSetAttributes(ua_dataSS_t * ua_dataSS)
+{
+    char *pline = NULL;
+    char *psaveptr = NULL;
+    char *pfilePath = NULL;
+    char *pfiletype = NULL;
+    char *attributSize = NULL;
+    char *pattribs = NULL;
+    int ulAttribSize = 0;
+    int result = S_SS_SUCCESS;
+
+    if (!(ua_dataSS && ua_dataSS->update_delta && ua_dataSS->update_data->ua_delta_path)) {
+        LOGE("Bad params for SS_FSSetAttributes\n");
+        SS_SetUpgradeState(E_SS_BAD_PARAMS);
+        return E_SS_FAILURE;
+    }
+    LOGL(LOG_SSENGINE, "ATTRIB PATH: [%s] \n", ua_dataSS->update_delta->ua_attrib_path);
+
+    int item_size = tar_get_item_size(ua_dataSS->update_data->ua_delta_path, ua_dataSS->update_delta->ua_attrib_path);
+
+    if (item_size <= 0) {
+        LOGL(LOG_SSENGINE, "No Attributes to SET\n");
+        return S_SS_SUCCESS;    // Delta with ONLY deletes
+    }
+    char *item_data = SS_Malloc(item_size + 1);
+    if (item_data == NULL) {
+        SS_SetUpgradeState(E_SS_MALLOC_ERROR);
+        return E_SS_FAILURE;
+    }
+    int read_data =
+        tar_get_cfg_data(ua_dataSS->update_data->ua_delta_path, ua_dataSS->update_delta->ua_attrib_path, item_data,
+                         item_size);
+    if (read_data <= 0) {
+        SS_SetUpgradeState(E_SS_FSBADDELTA);
+        if (item_data != NULL)
+            SS_Free(item_data);
+        return E_SS_FAILURE;
+    }
+    pline = strtok_r(item_data, "\n", &psaveptr);
+    if (pline == NULL) {
+        LOGL(LOG_SSENGINE, "No Attributes to SET as no lines in file\n");
+        if (item_data != NULL)
+            SS_Free(item_data);
+        return E_SS_FAILURE;
+    }
+
+    while (pline) {
+        pfilePath = strtok(pline, "\"");
+
+        if (pfilePath && strcmp(pfilePath, SS_FWSLASH) == 0) {
+            LOGE("\n skip root: it is RO");
+            pline = strtok_r(NULL, SS_TOKEN_NEWLINE, &psaveptr);
+            continue;
+        }
+
+        pfiletype = strtok(NULL, SS_TOKEN_SPACE);
+        attributSize = strtok(NULL, SS_TOKEN_SPACE);
+        pattribs = strtok(NULL, SS_TOKEN_NEWLINE);
+        if (pattribs && pfilePath && pfiletype) {
+            ulAttribSize = strlen(pattribs);
+            //LOG("\nSS_SetFileAttributes [%s][%s][%d][%s]",pfilePath,pfiletype,ulAttribSize, pattribs );
+            //LOG("SS_SetFileAttributes [%s]\n", pfilePath);
+
+            result = SS_SetFileAttributes(pfilePath, ulAttribSize, (const unsigned char *)pattribs);
+            if (result != S_SS_SUCCESS) {
+                LOGE("\n Failed to set Attributes %s", pfilePath);
+                SS_SetUpgradeState(E_SS_FSBADATTRIBUTES);
+                if (item_data)
+                    SS_Free(item_data);
+                return E_SS_FAILURE;
+            }
+        } else {
+            LOGE("\n Failed to Parse Attributes - LINE %s", pline);
+            SS_SetUpgradeState(E_SS_FSBADATTRIBUTES);
+            if (item_data)
+                SS_Free(item_data);
+            return E_SS_FAILURE;
+        }
+        pline = strtok_r(NULL, SS_TOKEN_NEWLINE, &psaveptr);
+    }
+    SS_Free(item_data);
+
+    return S_SS_SUCCESS;
+}
+
+/*!
+ *********************************************************************************
+ *                     SS_FSUpdateFile
+ *********************************************************************************
+ *
+ * @brief
+ *     This is used to update individual files on case basis
+ *
+ *
+ *     @param
+ *
+ *     @return                         returns S_SS_SUCCESS
+ *                                             E_SS_FAILURE in case of error
+ *
+ *********************************************************************************
+ */
+int SS_FSUpdateFile(int ubFileType, ua_dataSS_t * ua_dataSS, int ulPatchCount, fs_params * pFsNode,
+                    const char *patch_path)
+{
+    int ulFileIndex = 1;
+    char ubPatch[SS_MAX_FILE_PATH] = {
+        0
+    };
+    int ulReadCnt = 0;
+    int ulResult = S_SS_SUCCESS;
+
+    if (!patch_path) {
+        LOGE("Bad patch_path name\n");
+        return E_SS_FAILURE;
+    }
+    switch (ubFileType) {
+    case DIFFS:
+        {
+            tar_open(ua_dataSS->update_data->ua_delta_path);
+#ifdef TIME_PROFILING
+            get_time_stamp1();  //total time capturing
+            t1 = atoi(ts1);
+#endif
+            while (pFsNode) {
+                LOGL(LOG_SSENGINE, "DIFFS update Index: [%d] \n", ulFileIndex++);
+                snprintf(ubPatch, SS_MAX_FILE_PATH, "%s%s", patch_path, pFsNode->patch_name);
+                //LOGL(LOG_SSENGINE, "DIFF list --- [File Name %s]\n [Patch Name %s]",pFsNode->file_path, ubPatch);
+                if (pFsNode->data_size > 0) {
+                    ulReadCnt =
+                        fast_tar_extract_file(ua_dataSS->update_data->ua_delta_path, ubPatch, SS_PATCHFILE_SOURCE,
+                                              pFsNode->data_size, pFsNode->data_offset);
+                    if (ulReadCnt < 0) {
+                        ulResult = E_SS_FAILURE;
+                        tar_close();
+                        SS_SetUpgradeState(E_SS_FSUPDATEFAILED);
+                        LOGE("Delta Read Failed\n");
+                        break;
+                    }
+                    //LOGL(LOG_SSENGINE,"Updating [Item - %s]and size is[%d] Read Count[%d]\n",ubPatch, pFsNode->data_size, ulReadCnt);
+
+                    if (ulReadCnt > 0)
+                        ulResult =
+                            SS_UpdateDeltaFS(pFsNode->file_old_path, pFsNode->file_new_path, pFsNode->sha1src,
+                                             pFsNode->sha1trg, pFsNode->data_size);
+                    if (ulResult != S_SS_SUCCESS) {
+                        LOGE("FS update Failed Result : [%d], [Item - %s]and size is[%d] Read Count[%d]\n", ulResult,
+                             ubPatch, pFsNode->data_size, ulReadCnt);
+                        tar_close();
+                        SS_SetUpgradeState(E_SS_FSUPDATEFAILED);
+                        break;
+                    }
+                    SS_UpdateUIProgress(ua_dataSS, ulPatchCount, 0);
+                    pFsNode = pFsNode->nextnode;
+                }
+#ifdef SUPPORT_CONTAINER
+                else if (pFsNode->data_size == 0) {     //need to add logic to identify archive update
+                    ulResult = tar_extract_folder(ua_dataSS->update_data->ua_delta_path, ubPatch,
+                                                  SS_ARCHIVE_DELTA_FOLDER);
+                    if (ulResult != S_SS_SUCCESS) {
+                        LOGE("extraction failed for [%s] result [%d]\n", ubPatch, ulResult);
+                        SS_SetUpgradeState(E_SS_DELTA_IS_CORRUPT);
+                        break;
+                    }
+                    //Copy exe's for zip , unzip and find operations    - code to be removed after ramdisk upgrade
+                    ulResult = (int)SS_CopyFile(NULL, SS_ZIP_SOURCE, SS_ZIP_TARGET);
+                    if (ulResult != S_SS_SUCCESS) {
+                        LOGE("failed to copy unzip [%d]\n", ulResult);
+                        SS_SetUpgradeState(E_SS_WRITE_ERROR);
+                        break;
+                    }
+                    ulResult = (int)SS_CopyFile(NULL, SS_UNZIP_SOURCE, SS_UNZIP_TARGET);
+                    if (ulResult != S_SS_SUCCESS) {
+                        LOGE("failed to copy unzip [%d]\n", ulResult);
+                        SS_SetUpgradeState(E_SS_WRITE_ERROR);
+                        break;
+                    }
+                    ulResult = (int)SS_CopyFile(NULL, SS_FIND_CMD_SOURCE, SS_FIND_CMD_TARGET);
+                    if (ulResult != S_SS_SUCCESS) {
+                        LOGE("failed to copy find [%d]\n", ulResult);
+                        SS_SetUpgradeState(E_SS_WRITE_ERROR);
+                        break;
+                    }
+                    ulResult =
+                        SS_UpdateArchive(ua_dataSS, pFsNode->file_old_path, pFsNode->file_new_path, pFsNode->sha1src,
+                                         pFsNode->sha1trg);
+                    if (ulResult != S_SS_SUCCESS) {
+                        LOGE("FS update Failed, Unable to extact archive\n");
+                        break;
+                    }
+
+                    SS_UpdateUIProgress(ua_dataSS, ulPatchCount, 0);
+                    pFsNode = pFsNode->nextnode;
+                }
+#endif
+                else {
+                    ulResult = E_SS_FAILURE;
+                    tar_close();
+                    LOGE("size is invalid\n");
+                    SS_SetUpgradeState(E_SS_FSUPDATEFAILED);
+                    break;
+                }
+            }
+#ifdef TIME_PROFILING
+            get_time_stamp1();  //total time capturing
+            t2 = atoi(ts1);
+            LOG("Shirsh time for DIFFS - [%d] \n", (t2 - t1));
+#endif
+#ifdef SUPPORT_CONTAINER
+            SS_DeleteFile(NULL, SS_ZIP_TARGET);
+            SS_DeleteFile(NULL, SS_UNZIP_TARGET);
+#endif
+            tar_close();
+        }
+        break;
+    case MOVES:
+        {
+#ifdef TIME_PROFILING
+            get_time_stamp1();  //total time capturing
+            t1 = atoi(ts1);
+#endif
+            while (pFsNode) {
+                LOGL(LOG_SSENGINE, "MOVES update Index: [%d] \n", ulFileIndex++);
+                ulResult = SS_MoveFile(NULL, pFsNode->file_old_path, pFsNode->file_new_path);
+                if (ulResult != S_SS_SUCCESS) {
+                    LOGE("Move Failed for [%s] to [%s]\n", pFsNode->file_old_path, pFsNode->file_new_path);
+                    SS_SetUpgradeState(ulResult);
+                    break;
+                }
+                SS_UpdateUIProgress(ua_dataSS, ulPatchCount, 0);
+                pFsNode = pFsNode->nextnode;
+            }
+#ifdef TIME_PROFILING
+            get_time_stamp1();  //total time capturing
+            t2 = atoi(ts1);
+            LOG("Shirsh time for DELETES - [%d] \n", (t2 - t1));
+#endif
+        }
+        break;
+    case DELETES:
+        {
+            int ulFiletype = 0;
+#ifdef TIME_PROFILING
+            get_time_stamp1();  //total time capturing
+            t1 = atoi(ts1);
+#endif
+            while (pFsNode) {
+                if (pFsNode->type == DELETES) {
+                    LOGL(LOG_SSENGINE, "DELETES update Index: [%d] \n", ulFileIndex++);
+                    SS_GetFileType(NULL, pFsNode->file_old_path, (enumFileType *) & ulFiletype);
+                    if (ulFiletype == 2)        //FT_FOLDER
+                        ulResult = SS_DeleteFolder(NULL, pFsNode->file_old_path);
+                    else
+                        ulResult = SS_DeleteFile(NULL, pFsNode->file_old_path);
+                    if (ulResult != S_SS_SUCCESS) {
+                        LOGE("Delete Failed\n");
+                        SS_SetUpgradeState(ulResult);
+                        break;
+                    }
+                }
+                SS_UpdateUIProgress(ua_dataSS, ulPatchCount, 0);
+                pFsNode = pFsNode->nextnode;
+            }
+#ifdef TIME_PROFILING
+            get_time_stamp1();  //total time capturing
+            t2 = atoi(ts1);
+            LOG("Shirsh time for DELETES - [%d] \n", (t2 - t1));
+#endif
+        }
+        break;
+    case DELETE_END:
+        {
+            int ulFiletype = 0;
+#ifdef TIME_PROFILING
+            get_time_stamp1();  //total time capturing
+            t1 = atoi(ts1);
+#endif
+            while (pFsNode) {
+                if (pFsNode->type == DELETE_END) {
+                    LOGL(LOG_SSENGINE, "DELETES update Index: [%d] \n", ulFileIndex++);
+                    SS_GetFileType(NULL, pFsNode->file_old_path, (enumFileType *) & ulFiletype);
+                    if (ulFiletype == 2)        //FT_FOLDER
+                        ulResult = SS_DeleteFolder(NULL, pFsNode->file_old_path);
+                    else
+                        ulResult = SS_DeleteFile(NULL, pFsNode->file_old_path);
+                    if (ulResult != S_SS_SUCCESS) {
+                        LOGE("Delete Failed\n");
+                        SS_SetUpgradeState(ulResult);
+                        break;
+                    }
+                }
+                pFsNode = pFsNode->nextnode;
+            }
+#ifdef TIME_PROFILING
+            get_time_stamp1();  //total time capturing
+            t2 = atoi(ts1);
+            LOG("Shirsh time for DELETES - [%d] \n", (t2 - t1));
+#endif
+        }
+        break;
+
+    case NEWFILES:
+        {
+#ifdef TIME_PROFILING
+            get_time_stamp1();  //total time capturing
+            t1 = atoi(ts1);
+#endif
+            LOGL(LOG_SSENGINE, "Starting New file upgrade for   [%s]\n", patch_path);
+            if (tar_extract_file(ua_dataSS->update_data->ua_delta_path, (char *)patch_path, SS_NEW_COMPRESSED_FILE) >=
+                0)
+                if (_7zdecompress(SS_NEW_COMPRESSED_FILE) == 0)
+                    LOGL(LOG_SSENGINE, "7zip extracted  successfully %s\n", ua_dataSS->parti_info->ua_parti_name);
+                else
+                    LOGL(LOG_SSENGINE, "7zip extraction error for %s\n", ua_dataSS->parti_info->ua_parti_name);
+            else
+                LOGL(LOG_SSENGINE, "tar extraction error for %s\n", ua_dataSS->parti_info->ua_parti_name);
+            SS_DeleteFile(NULL, SS_NEW_COMPRESSED_FILE);
+#ifdef TIME_PROFILING
+            get_time_stamp1();  //total time capturing
+            t2 = atoi(ts1);
+            LOG("Shirsh time for NEWFILES - [%d] \n", (t2 - t1));
+#endif
+        }
+        break;
+    case SYMDIFFS:
+        {
+#ifdef TIME_PROFILING
+            get_time_stamp1();  //total time capturing
+            t1 = atoi(ts1);
+#endif
+            while (pFsNode) {
+                LOGL(LOG_SSENGINE, "SYMDIFFS update Index: [%d] \n", ulFileIndex++);
+                //LOG("Sym Diff file paths: [Linkname - %s] [reference file name- %s][]\n", pFsNode->file_path,pFsNode->patch_name);
+                ulResult = SS_Unlink(NULL, pFsNode->file_old_path);
+                if (ulResult == S_SS_SUCCESS)
+                    ulResult = SS_Link(NULL, pFsNode->file_new_path, pFsNode->patch_name);
+                else {
+                    LOGE("Unlink Failed\n");
+                    SS_SetUpgradeState(E_SS_FSUPDATEFAILED);
+                    break;
+                }
+                SS_UpdateUIProgress(ua_dataSS, ulPatchCount, 0);
+                pFsNode = pFsNode->nextnode;
+            }
+#ifdef TIME_PROFILING
+            get_time_stamp1();  //total time capturing
+            t2 = atoi(ts1);
+            LOG("Shirsh time for SYMDIFFS - [%d] \n", (t2 - t1));
+#endif
+        }
+        break;
+    case SYMNEWFILES:
+        {
+            fs_params *head_node;
+            int retry_count = 0, do_retry = 0;
+#ifdef TIME_PROFILING
+            get_time_stamp1();  //total time capturing
+            t1 = atoi(ts1);
+#endif
+ SYMLINK_CREATE:
+            head_node = pFsNode;
+            while (head_node) {
+                LOGL(LOG_SSENGINE, "SYMNEWS update Index: [%d] \n", ulFileIndex++);
+                snprintf(ubPatch, SS_MAX_FILE_PATH, "%s%s%s", patch_path, "/", head_node->patch_name);
+                LOGL(LOG_SSENGINE, "Sym New file paths: [Linkname - %s] [reference file name- %s][]\n",
+                     head_node->file_old_path, head_node->patch_name);
+                ulResult = SS_Link(NULL, head_node->file_old_path, head_node->patch_name);
+                if (ulResult == E_SS_FAILURE) {
+                    LOGE("Link Failed\n");
+                    SS_SetUpgradeState(E_SS_FSUPDATEFAILED);
+                    break;
+                } else if (ulResult == ENOENT)  //to handle cases where new symlink points to a new symlink yet to be created
+                {
+                    do_retry = 1;       //we will retry the failed symlinks with error 2 (no file or dir) again after this cycle
+                    //SS_UpdateUIProgress(ua_dataSS,ulPatchCount);
+                    head_node = head_node->nextnode;
+                    continue;
+
+                }
+                SS_UpdateUIProgress(ua_dataSS, ulPatchCount, 0);
+                head_node = head_node->nextnode;
+            }
+            if (do_retry && (retry_count < 4)) {
+                retry_count++;
+                ulFileIndex = 0;
+                do_retry = 0;
+                goto SYMLINK_CREATE;
+            } else if (do_retry && (retry_count >= 4))  //retry to be done maximum 4 times
+            {
+                LOGE("Link Failed after %d retrys\n", retry_count);
+                //SS_SetUpgradeState(E_SS_FSUPDATEFAILED);
+                break;
+            }
+#ifdef TIME_PROFILING
+            get_time_stamp1();  //total time capturing
+            t2 = atoi(ts1);
+            LOG("Shirsh time for SYMNEWS - [%d] \n", (t2 - t1));
+#endif
+        }
+        break;
+    default:
+        break;
+    }
+    return ulResult;
+}
+
+#ifdef MEM_PROFILING
+extern int max_mem;
+extern int cur_mem;
+#endif
+/*!
+ *********************************************************************************
+ *                     SS_FSUpdatemain
+ *********************************************************************************
+ *
+ * @brief
+ *     This is the API exposed from the engine to update FS.
+ *
+ *
+ *     @param
+ *
+ *     @return                         returns S_SS_SUCCESS
+ *                                             E_SS_FAILURE in case of error
+ *
+ *********************************************************************************
+ */
+
+int SS_FSUpdatemain(ua_dataSS_t * ua_dataSS, int part_idx)
+{
+    int ulResult = S_SS_SUCCESS;
+    fs_list *head_ptr_node = NULL;
+    char new_patch_path[SS_MAX_FILE_PATH] = {
+        0
+    };
+
+    if (!ua_dataSS)
+        return E_SS_BAD_PARAMS; // Set error ??
+    head_ptr_node = headptr_list[part_idx];
+
+    if (!head_ptr_node) {       //in case of power failure, try rebilding nodes again
+        SS_FSVerifyPartition(ua_dataSS, part_idx);
+        head_ptr_node = headptr_list[part_idx];
+    }
+
+    if (!head_ptr_node)
+        return E_SS_FSBADNODES;
+
+    SS_GetPartition_LocDetails(ua_dataSS, part_idx);
+
+    LOGL(LOG_SSENGINE, "FS Update Entry PartIndex: [%d]\n", part_idx);
+
+    if (head_ptr_node->del_ref == NULL) {
+        LOGL(LOG_SSENGINE, "No DEL header\n");
+    } else if (ulResult == S_SS_SUCCESS) {
+        ulResult =
+            SS_FSUpdateFile(DELETES, ua_dataSS, head_ptr_node->ulPatchCount, head_ptr_node->del_ref,
+                            ua_dataSS->update_delta->ua_patch_path);
+    }
+
+    if (head_ptr_node->dif_ref == NULL) {
+        LOGL(LOG_SSENGINE, "No DIFF header\n");
+    } else if (ulResult == S_SS_SUCCESS) {
+        ulResult =
+            SS_FSUpdateFile(DIFFS, ua_dataSS, head_ptr_node->ulPatchCount, head_ptr_node->dif_ref,
+                            ua_dataSS->update_delta->ua_patch_path);
+    }
+
+    if (head_ptr_node->move_ref == NULL) {
+        LOGL(LOG_SSENGINE, "No MOVE header\n");
+    } else if (ulResult == S_SS_SUCCESS) {
+        ulResult =
+            SS_FSUpdateFile(MOVES, ua_dataSS, head_ptr_node->ulPatchCount, head_ptr_node->move_ref,
+                            ua_dataSS->update_delta->ua_patch_path);
+    }
+
+    if (head_ptr_node->del_ref == NULL) {
+        LOGL(LOG_SSENGINE, "No DEL header\n");
+    } else if (ulResult == S_SS_SUCCESS) {
+        ulResult =
+            SS_FSUpdateFile(DELETE_END, ua_dataSS, head_ptr_node->ulPatchCount, head_ptr_node->del_ref,
+                            ua_dataSS->update_delta->ua_patch_path);
+    }
+
+     if (ulResult == S_SS_SUCCESS) {
+         //new file extraction start
+        snprintf(new_patch_path, SS_MAX_FILE_PATH, "%s%s", ua_dataSS->parti_info->ua_subject_name, SS_COMPRESSED_FILE);     // subject name wil have fw slash as part of cfg file
+        LOGL(LOG_SSENGINE, "File path created to extract new files : [%s]\n", new_patch_path);
+        ulResult =
+            SS_FSUpdateFile(NEWFILES, ua_dataSS, head_ptr_node->ulPatchCount, head_ptr_node->new_ref, new_patch_path);
+        //new file extraction end
+    }
+
+    if (head_ptr_node->sym_difref == NULL) {
+        LOGL(LOG_SSENGINE, "No SYMDIFF header\n");
+    } else if (ulResult == S_SS_SUCCESS) {
+        ulResult =
+            SS_FSUpdateFile(SYMDIFFS, ua_dataSS, head_ptr_node->ulPatchCount, head_ptr_node->sym_difref,
+                            ua_dataSS->update_delta->ua_patch_path);
+    }
+
+    if (head_ptr_node->sym_newref == NULL) {
+        LOGL(LOG_SSENGINE, "No SYMNEW header\n");
+    } else if (ulResult == S_SS_SUCCESS) {
+        ulResult =
+            SS_FSUpdateFile(SYMNEWFILES, ua_dataSS, head_ptr_node->ulPatchCount, head_ptr_node->sym_newref,
+                            ua_dataSS->update_delta->ua_patch_path);
+    }
+
+    if (ulResult == S_SS_SUCCESS)
+        ulResult = SS_FSSetAttributes(ua_dataSS);
+    sync();
+    sleep(1);
+    SS_FSClearNodes(part_idx);
+
+    if (ulResult == S_SS_SUCCESS)
+        SS_UpdateUIProgress(ua_dataSS, 0, 1);  //fix WGID : 51963, When all updates are done to FS , patchcount is not needed, passing 1 to 3rd arg is enough
+
+    LOGL(LOG_SSENGINE, "FS update Complete PartIndex: [%d]\n", part_idx);
+#ifdef  MEM_PROFILING
+    LOGL(LOG_SSENGINE, "Stats are : Cur Max : [%d] Global Max : [%d]\n", cur_mem, max_mem);
+#endif
+    if (ulResult == S_SS_SUCCESS)
+        return ulResult;
+    else
+        return SS_GetUpgradeState();
+}
+
+/*!
+ *********************************************************************************
+ *                     SS_FSUpdatemain
+ *********************************************************************************
+ *
+ * @brief
+ *     This is the API exposed from the engine to update FS.
+ *     FS entry function for updating FS partition. Should be invoked only after verification of the partition
+ *
+ *
+ *     @param                          Requires common data structure having all details & Partition Index.
+ *                                             (Used for getting right NODES information that built during verification)
+ *                                             (Configuration, Delta info, Partition Info, UI link , Kind of operation.(Verify or Updates))
+ *
+ *     @return                         returns S_SS_SUCCESS
+ *                                             E_SS_FAILURE in case of error
+ *
+ *********************************************************************************
+ */
+size_t SS_FSAvailiableFreeSpace(char *block_name)
+{
+
+    struct mntent *ent;
+    FILE *aFile;
+    struct statfs sb;
+    aFile = setmntent("/proc/mounts", "r");
+    if (aFile == NULL) {
+        LOGE("setmntent error");
+        return E_SS_FAILURE;
+    }
+    while (NULL != (ent = getmntent(aFile))) {
+        if (strcmp(ent->mnt_fsname, block_name) == 0) {
+            if (statfs(ent->mnt_dir, &sb) == 0) {
+                LOGL(LOG_SSENGINE, "Total free space = %d, blocks free = %d\n", sb.f_bsize * sb.f_bavail, sb.f_bfree);
+            }
+        }
+    }
+    endmntent(aFile);
+    return sb.f_bsize * sb.f_bavail;
+}
+
+int SS_FSVerifyPartition(ua_dataSS_t * ua_dataSS, int part_idx)
+{
+    int ulResult = S_SS_SUCCESS;
+    size_t free_space = 0;
+    if (!ua_dataSS) {
+        LOGE("Wrong Param for fs verification\n");
+        SS_SetUpgradeState(E_SS_BAD_PARAMS);
+        return E_SS_FAILURE;
+    }
+
+    LOGL(LOG_SSENGINE, "FS max free mem reqired : [%d]\n", ua_dataSS->update_cfg->soure_img_size);
+    free_space = SS_FSAvailiableFreeSpace(ua_dataSS->parti_info->ua_blk_name);
+    if (free_space != E_SS_FAILURE) {
+        //Source img size is max single file size for a file system under upgrade, which is updated in CFG file by UPG
+        if ((free_space) < (ua_dataSS->update_cfg->soure_img_size + ua_dataSS->update_cfg->soure_img_size / 10)) {
+            LOGE("Not enough free space [%d] for max size [%d]\n", free_space,
+                 (ua_dataSS->update_cfg->soure_img_size + ua_dataSS->update_cfg->soure_img_size / 10));
+            //SS_SetUpgradeState(E_SS_FSMEMORYERROR);
+            //return E_SS_FAILURE;
+        } else
+            LOGL(LOG_SSENGINE, "Enough space for Partition [%s]\n", ua_dataSS->parti_info->ua_parti_name);
+    }
+
+    SS_GetAvailableFreeSpace(NULL, SS_COMMON_WORKSPACE, &free_space);
+    //Checking for 2 times the file size free space , as delta can be worst case size of file.
+    if ((free_space) < (2 * ua_dataSS->update_cfg->soure_img_size)) {
+        LOGE("Not enough free space [%d] for max size [%d]\n", free_space, (2 * ua_dataSS->update_cfg->soure_img_size));
+        SS_SetUpgradeState(E_SS_FSMEMORYERROR);
+        return E_SS_FAILURE;
+    }
+#ifdef MEM_PROFILING
+    if (!mem_profiling_start)
+        if (!(S_SS_SUCCESS == SS_Do_Memory_Profiling()))
+            return E_SS_FAILURE;
+#endif
+    SS_GetPartition_LocDetails(ua_dataSS, part_idx);
+    LOGL(LOG_SSENGINE, "FS Verification Start PartIndex:[%d]\n", part_idx);
+    if (ua_dataSS->ua_operation == UI_OP_SCOUT)
+        gvalid_session = TRUE;  // (shd b true if called during verification)
+    headptr_list[part_idx] = SS_FSBuildNodes(ua_dataSS);
+#ifdef TIME_PROFILING
+    LOGL(LOG_SSENGINE, "fast_tar_get_item_size_time :[%lf]\n", fast_tar_get_item_size_time);
+    LOGL(LOG_SSENGINE, "SS_LoadFile_time :[%lf]\n", SS_LoadFile_time);
+    LOGL(LOG_SSENGINE, "SS_FSBuildNodes_time :[%lf]\n", SS_FSBuildNodes_time);
+#endif
+    if (!headptr_list[part_idx]) {
+        LOGE("FS Verification Failed PartIndex: [%d]\n", part_idx);
+        SS_FSClearNodes(part_idx);
+        ulResult = E_SS_FAILURE;
+    }
+
+    if (ulResult == S_SS_SUCCESS)
+        return ulResult;
+    else
+        return SS_GetUpgradeState();
+}
+
+//Should check if space is available????
+int SS_BackupSource(const char *source_filename)
+{
+    int ret = E_SS_FAILURE;
+
+    if (source_filename) {
+        ret = (int)SS_CopyFile(NULL, source_filename, SS_BACKUP_SOURCE);
+        if (ret != S_SS_SUCCESS) {
+            LOGE("failed to back up source file  Error [%d]\n", ret);
+            SS_SetUpgradeState(E_SS_FSSRCBACKUPFAILED);
+        }
+    }
+    return ret;
+}
+
+int SS_BackupSourceClear(void)
+{
+    int ret = E_SS_FAILURE;
+    ret = (int)SS_DeleteFile(NULL, SS_BACKUP_SOURCE);
+    if (ret != S_SS_SUCCESS) {
+        LOGE("failed to delete BACKUP file\n");
+        SS_SetUpgradeState(E_SS_FSSRCBACKUPFAILED);
+    }
+    return ret;
+}
+
+int SS_PatchSourceClear(void)
+{
+    int ret = E_SS_FAILURE;
+    ret = (int)SS_DeleteFile(NULL, SS_PATCHFILE_SOURCE);
+    if (ret != S_SS_SUCCESS) {
+        LOGE("failed to delete PATCHFILE file\n");
+        SS_SetUpgradeState(E_SS_PATCHFILE_DEL_ERROR);
+    }
+    return ret;
+}
+int SS_IMGVerifyFullImage(ua_dataSS_t * ua_dataSS)
+{
+    int read_cnt = 0, patch_size = 0;
+    FileInfo source_file;
+    uint8_t target_sha1[SHA_DIGEST_SIZE];
+    int ulResult = S_SS_SUCCESS;
+
+     if (!(ua_dataSS && ua_dataSS->update_cfg && ua_dataSS->parti_info && ua_dataSS->parti_info->ua_blk_name
+            && !(ua_dataSS->update_cfg->skip_verify == 1))) {
+        LOGE("Bad structure or members\n");
+        SS_SetUpgradeState(E_SS_BAD_PARAMS);
+        return E_SS_FAILURE;
+    }
+
+    LOGL(LOG_SSENGINE, "FULL IMG Verification Entry BlkName:[%s]\n", ua_dataSS->parti_info->ua_blk_name);
+
+    if (ua_dataSS->update_data && ua_dataSS->parti_info && ua_dataSS->update_data->ua_delta_path
+        && ua_dataSS->parti_info->ua_subject_name)
+        patch_size = tar_get_item_size(ua_dataSS->update_data->ua_delta_path, ua_dataSS->parti_info->ua_subject_name);
+    else {
+        LOGE("Bad structure members in ua_dataSS\n");
+        SS_SetUpgradeState(E_SS_BAD_PARAMS);
+        ulResult = E_SS_FAILURE;
+        goto Cleanup;
+    }
+
+    if (ua_dataSS->update_cfg && ua_dataSS->update_cfg->soure_img_size && ua_dataSS->update_cfg->target_sha1)
+        LOGL(LOG_SSENGINE, "\nParams -image size [%d] sha1 [%s]\n",
+             ua_dataSS->update_cfg->soure_img_size, ua_dataSS->update_cfg->target_sha1);
+    else {
+        LOGE("Bad structure member update_cfg in ua_dataSS\n");
+        SS_SetUpgradeState(E_SS_BAD_PARAMS);
+        ulResult = E_SS_FAILURE;
+        goto Cleanup;
+    }
+
+    if (ParseSha1(ua_dataSS->update_cfg->target_sha1, target_sha1) != 0) {
+        LOGE("failed to parse tgt-sha1 \"%s\"\n", ua_dataSS->update_cfg->target_sha1);
+        SS_SetUpgradeState(E_SS_IMGBADDELTA);
+        ulResult = E_SS_FAILURE;
+        goto Cleanup;
+    }
+
+    if ((patch_size) > 0)
+        read_cnt =
+            tar_extract_file(ua_dataSS->update_data->ua_delta_path, ua_dataSS->parti_info->ua_subject_name,
+                             SS_PATCHFILE_SOURCE);
+
+    if (read_cnt <= 0) {
+        LOGL(LOG_SSENGINE, "Failed to read delta\n");
+        SS_SetUpgradeState(E_SS_IMGBADDELTA);
+        ulResult = E_SS_FAILURE;
+        goto Cleanup;
+    }
+     if (SS_LoadFile(SS_PATCHFILE_SOURCE, &source_file) == 0) {
+            if (memcmp(source_file.sha1, target_sha1, SHA_DIGEST_SIZE) == 0) {
+                LOGL(LOG_SSENGINE, "Patch Can be applied\n");
+                SS_Free(source_file.data);
+                ulResult = S_SS_SUCCESS;
+            }
+            else{
+                LOGL(LOG_SSENGINE, "Patch Cannot be applied\n");
+                SS_Free(source_file.data);
+                SS_SetUpgradeState(E_SS_IMGBADDELTA);
+                ulResult = E_SS_FAILURE;
+                goto Cleanup;
+            }
+    }
+    else{
+            LOGL(LOG_SSENGINE, "Failed to LoadFile\n");
+            SS_SetUpgradeState(E_SS_IMGBADDELTA);
+            ulResult = E_SS_FAILURE;
+    }
+
+Cleanup:
+    if(file_exist(SS_PATCHFILE_SOURCE, 0))
+            SS_DeleteFile(NULL, SS_PATCHFILE_SOURCE);
+    return ulResult;
+}
+
+int SS_IMGVerfiyPartition(ua_dataSS_t * ua_dataSS)
+{
+    FileInfo source_file;
+    int ulResult = S_SS_SUCCESS;
+    uint8_t source_sha1[SHA_DIGEST_SIZE];
+    uint8_t target_sha1[SHA_DIGEST_SIZE];
+    size_t free_space = 0;
+
+    if (!(ua_dataSS && ua_dataSS->update_cfg && ua_dataSS->parti_info && ua_dataSS->parti_info->ua_blk_name)) {
+        LOGE("Bad structure or members\n");
+        SS_SetUpgradeState(E_SS_BAD_PARAMS);
+        return E_SS_FAILURE;
+    }
+
+    //We verify twice the image size for BACKUP source, not on Partition. As Patch will be created on RAM
+    SS_GetAvailableFreeSpace(NULL, SS_COMMON_WORKSPACE, &free_space);
+    if ((free_space) < (2 * ua_dataSS->update_cfg->target_img_size)) {
+        LOGE("Not enough free space [%d] for twice max size [%d]\n", free_space,
+             (2 * ua_dataSS->update_cfg->target_img_size));
+        SS_SetUpgradeState(E_SS_FSMEMORYERROR);
+        return E_SS_FAILURE;
+    }
+
+    if (ParseSha1(ua_dataSS->update_cfg->soure_sha1, source_sha1) != 0) {
+        LOGE("failed to parse Src-sha1 \"%s\"\n", ua_dataSS->update_cfg->soure_sha1);
+        SS_SetUpgradeState(E_SS_SHAPRASE_FAILED);
+        return E_SS_FAILURE;
+    }
+    // corner case, Parsing sha can fail if update.cfg is wrong/manually edited
+    if (ParseSha1(ua_dataSS->update_cfg->target_sha1, target_sha1) != 0) {
+        LOGE("failed to parse Target-sha1 \"%s\"\n", ua_dataSS->update_cfg->target_sha1);
+        SS_SetUpgradeState(E_SS_SHAPRASE_FAILED);
+        return E_SS_FAILURE;
+    }
+
+    source_file.size = ua_dataSS->update_cfg->soure_img_size;
+    source_file.data = NULL;
+    if (SS_LoadPartition(ua_dataSS->parti_info->ua_blk_name, &source_file) == 0) {
+        if (memcmp(source_file.sha1, source_sha1, SHA_DIGEST_SIZE) == 0) {
+            LOGL(LOG_SSENGINE, "SS_IMGVerfiyPartition - SHA matches with source [%s] \n",
+                 ua_dataSS->parti_info->ua_blk_name);
+        } else                  // Need not compare with Target sha as once upgraded, it should NOT verify same partition again.
+        {
+            SS_SetUpgradeState(E_SS_IMGSRCCURRUPTED);
+            ulResult = E_SS_FAILURE;
+        }
+    }
+    SS_Free(source_file.data);
+    if (ulResult == S_SS_SUCCESS) {
+        if (ua_dataSS->ui_progress)
+            ua_dataSS->ui_progress(ua_dataSS, 100);
+        return ulResult;
+    } else
+        return SS_GetUpgradeState();
+}
+
+/*!
+ *********************************************************************************
+ *                     SS_IMGUpdatemain
+ *********************************************************************************
+ *
+ * @brief
+ *     This is the API exposed from the engine to update Images(FULL and DELTA images)
+ *
+ *
+ *     @param
+ *
+ *     @return                         returns S_SS_SUCCESS
+ *                                             E_SS_FAILURE in case of error
+ *
+ *********************************************************************************
+ */
+
+int SS_IMGUpdatemain(ua_dataSS_t * ua_dataSS, int update_type)  //SS_FSUpdatePartition
+{
+    int read_cnt = 0, patch_size;
+    int ulResult = S_SS_SUCCESS;
+
+    //sprintf(Blk_name, "%s%s%s","EMMC",":", ua_dataSS->parti_info->ua_blk_name);
+    //LOGL(LOG_SSENGINE, "IMG Upgrade Entry BlkName:[%s]\n",Blk_name);
+    if (!ua_dataSS) {
+        LOGE("Bad structure ua_dataSS\n");
+        SS_SetUpgradeState(E_SS_BAD_PARAMS);
+        return E_SS_FAILURE;
+    }
+    LOGL(LOG_SSENGINE, "IMG Upgrade Entry BlkName:[%s]\n", ua_dataSS->parti_info->ua_blk_name);
+
+    if (ua_dataSS->update_data && ua_dataSS->parti_info && ua_dataSS->update_data->ua_delta_path
+        && ua_dataSS->parti_info->ua_subject_name)
+        patch_size = tar_get_item_size(ua_dataSS->update_data->ua_delta_path, ua_dataSS->parti_info->ua_subject_name);
+    else {
+        LOGE("Bad structure members in ua_dataSS\n");
+        SS_SetUpgradeState(E_SS_BAD_PARAMS);
+        return E_SS_FAILURE;
+    }
+    if (ua_dataSS->update_cfg && ua_dataSS->update_cfg->soure_img_size && ua_dataSS->update_cfg->target_sha1)
+        LOGL(LOG_SSENGINE, "\n SS_IMGUpdatemain Params -source size [%d] sha1 [%s]",
+             ua_dataSS->update_cfg->soure_img_size, ua_dataSS->update_cfg->target_sha1);
+    else {
+        LOGE("Bad structure member update_cfg in ua_dataSS\n");
+        SS_SetUpgradeState(E_SS_BAD_PARAMS);
+        return E_SS_FAILURE;
+    }
+
+    if ((patch_size) > 0)
+        read_cnt =
+            tar_extract_file(ua_dataSS->update_data->ua_delta_path, ua_dataSS->parti_info->ua_subject_name,
+                             SS_PATCHFILE_SOURCE);
+
+    if (read_cnt <= 0) {
+        ulResult = E_SS_FAILURE;
+        SS_SetUpgradeState(E_SS_IMGBADDELTA);
+        return E_SS_FAILURE;    //ulResult;
+    }
+    if (ua_dataSS->ui_progress)
+        ua_dataSS->ui_progress(ua_dataSS, 40);
+
+    if (update_type == FULL_IMG && ua_dataSS->update_data->ua_temp_path)
+        ulResult = SS_MoveFile(ua_dataSS, SS_PATCHFILE_SOURCE, ua_dataSS->update_data->ua_temp_path);
+    else if ((ua_dataSS->update_cfg->update_type == DELTA_IMG && ua_dataSS->write_data_to_blkdev)
+                    || ua_dataSS->update_cfg->update_type == EXTRA) {
+
+        FILE *fp = NULL;
+        char buf[14] = { 0, };  //to store zImage-delta magic keyword
+        fp = fopen(SS_PATCHFILE_SOURCE, "r");
+        fread(buf, 1, 13, fp);        //error check not required as any delta corruption will be caught in SS_UpdateDeltaIMG
+        fclose(fp);
+
+        if (strcmp(buf, SS_KERNEL_MAGIC) == 0)
+            ulResult = SS_UpdateDeltaKernel(ua_dataSS, ua_dataSS->write_data_to_blkdev);
+        else
+            ulResult = SS_UpdateDeltaIMG(ua_dataSS, ua_dataSS->write_data_to_blkdev);
+    }
+    else {
+        LOGE("Update type is INVALID - Exit \n");
+        ulResult = E_SS_FAILURE;
+        SS_SetUpgradeState(E_SS_BAD_PARAMS);
+        return E_SS_FAILURE;
+    }
+
+    if (ulResult == S_SS_SUCCESS) {
+        if (ua_dataSS->ui_progress)
+            ua_dataSS->ui_progress(ua_dataSS, 100);
+        return ulResult;
+    } else
+        return SS_GetUpgradeState();
+}
diff --git a/ss_engine/SS_UPI.h b/ss_engine/SS_UPI.h
new file mode 100755 (executable)
index 0000000..8d3fabd
--- /dev/null
@@ -0,0 +1,86 @@
+/*
+ * libtota
+ *
+ * Copyright (c) 2017 Samsung Electronics Co., Ltd.
+ *
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _SS_UPI_H_
+#define _SS_UPI_H_
+
+#define DISPLAYRESOLUTION_SIZE 50
+
+struct details {
+    int diffs;
+    int moves;
+    int news;
+    int deletes;
+    int symdiffs;
+    int symnews;
+};
+
+enum DEL_TYPE { DIFFS, MOVES, NEWFILES, DELETES, SYMDIFFS, SYMNEWFILES, DELETE_END };
+struct fs_params {              // Use Macros
+    char file_old_path[512];
+    char file_new_path[512];
+    char patch_name[256];
+    char sha1src[64];
+    char sha1trg[64];
+    int data_size;
+    int data_offset;
+    int type;                   //0 is for diff and 1 is for verbatim
+    struct fs_params *nextnode;
+};
+typedef struct fs_params fs_params;
+
+struct fs_list {
+    fs_params *dif_ref;
+    fs_params *move_ref;
+    fs_params *new_ref;
+    fs_params *del_ref;
+    fs_params *sym_difref;
+    fs_params *sym_newref;
+    int ulPatchCount;
+};
+typedef struct fs_list fs_list;
+struct details *get_fs_details(char *filename);
+
+int SS_AppendNode(const char *ubDeltaPath, fs_params ** headparam, fs_params ** tailparam, const char *old_path,
+                  const char *new_path, const char *patchname, const char *sha1src, const char *sha1trg, int type,
+                  char *patchpath_name);
+extern int SS_IMGUpdatemain(ua_dataSS_t * ua_dataSS, int update_type);
+extern int SS_IMGVerfiyPartition(ua_dataSS_t * ua_dataSS);
+extern int SS_FSUpdatemain(ua_dataSS_t * ua_dataSS, int part_idx);
+extern int SS_FSVerifyPartition(ua_dataSS_t * ua_dataSS, int part_idx);
+extern int SS_UpdateDeltaIMG(ua_dataSS_t * ua_dataSS, int (*write_to_blkdev) (char *, int, int, char *));
+extern int SS_UpdateDeltaKernel(ua_dataSS_t * ua_dataSS, int (*write_to_blkdev) (char *, int, int, char *));
+
+//extra functions
+extern void *SS_Malloc(unsigned int size);
+extern int tar_get_item_size_from_struct(tar_Data_t **, const char *, int *, int *);
+extern void SS_Free(void *pMemBlock);
+extern int tar_get_item_size(char *tar, char *item);
+extern int tar_get_cfg_data(char *tar, char *item, char *buf, int buflen);
+extern tar_Data_t *tar_build_cfg_table(char *tar);
+extern int tar_open(char *tar);
+extern int fast_tar_extract_file(char *tar, char *item, char *pathname, int size, int offset);
+extern int tar_close();
+extern int SS_UpdateDeltaFS(const char *source_filename, const char *target_filename,
+                            const char *source_sha1_str, const char *target_sha1_str, int patch_data_size);
+extern int tar_extract_file(char *tar, char *item, char *pathname);
+extern int _7zdecompress(char *path);
+extern void tar_free_cfg_table(tar_Data_t ** delta_tar);
+extern long SS_GetFileType(void *pbUserData, char *pLinkName, enumFileType * fileType);
+
+#endif                          //_SS_UPI_H_
diff --git a/ss_engine/fota_common.h b/ss_engine/fota_common.h
new file mode 100755 (executable)
index 0000000..fa690b6
--- /dev/null
@@ -0,0 +1,142 @@
+/*
+ * libtota
+ *
+ * Copyright (c) 2017 Samsung Electronics Co., Ltd.
+ *
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __FOTA_COMMON_H__
+#define __FOTA_COMMON_H__
+
+#include <stdio.h>
+#include "fota_log.h"
+
+typedef signed char s8;
+typedef unsigned char u8;
+
+typedef signed short s16;
+typedef unsigned short u16;
+
+typedef signed int s32;
+typedef unsigned int u32;
+typedef u32 size_t;
+
+typedef signed long sl32;
+typedef unsigned long ul32;
+
+typedef signed long long s64;
+typedef unsigned long long u64;
+
+#ifndef TIME_PROFILING
+       //#define TIME_PROFILING
+#endif
+#ifndef MEM_PROFILING
+       //#define MEM_PROFILING
+#endif
+#ifndef SUPPORT_CONTAINER
+       //#define SUPPORT_CONTAINER
+#endif
+
+#define UNUSED(x) (void)(x)
+
+#define SS_TOTA_VERSION        "1.0.18"
+#define BSDIFF "BSDIFF40"
+#define IMGDIFF "IMGDIFF2"
+#define SECTOR_SIZE             512
+#define SS_KERNEL_DELTA_HEADER         128
+
+#define SS_COMMON_WORKSPACE                    "/system/opt/usr/data/fota"
+#define SS_KERNEL_UNPACK_SCRIPT                "unpack.sh"
+#define SS_KERN_UNPK_SCRIPT_PATH       SS_COMMON_WORKSPACE "/" SS_KERNEL_UNPACK_SCRIPT
+#define SS_BACKUP_SOURCE                                       SS_COMMON_WORKSPACE "/saved.file"       //How to make sure there is SPACE
+#define SS_PATCHFILE_SOURCE                            SS_COMMON_WORKSPACE "/patchfile.file"   //define in common place
+#define SS_PATCHLIST_BKUPLOC                   SS_COMMON_WORKSPACE "/patchlist.txt"
+#define SS_NEW_COMPRESSED_FILE                 SS_COMMON_WORKSPACE "/system.7z"
+#define SS_KERNEL_WORKSPACE                            SS_COMMON_WORKSPACE "/kernel-work"
+#define SS_GZIP_TARGET                                                         SS_KERNEL_WORKSPACE "/gzip"
+#define SS_STAT_TARGET                                                 SS_KERNEL_WORKSPACE "/stat"
+#define SS_DD_TARGET                                                           SS_KERNEL_WORKSPACE "/dd"
+
+#define SS_GZIP_SOURCE                                                 "system/bin/gzip"
+#define SS_STAT_SOURCE                                                 "system/usr/bin/stat"
+#define SS_DD_SOURCE                                                           "system/bin/dd"
+
+#define SS_KERNEL_MAGIC                                                "UnpackdzImage"
+#define SS_KERNEL_NAME                                                 "zImage"
+#define SS_KERNEL_TARGET_NAME                  "dzImage_final"
+#define SS_KERNEL_UNPACK_DIR                   SS_KERNEL_NAME "_unpacked"
+#define SS_PATCHLISTFOLDER                             "/p"
+#define SS_NEWPATCHFOLDER                                      "/n"
+#define SS_PATCHLISTFORMAT                             ".txt"
+#define SS_PATCH_ATTR_FORMAT                   "_attr.txt"
+#define SS_FSCOUNT_MAGIC_KEY                   "PaTcHCoUnT:"
+#define SS_FSCOUNT_MAGIG_KEYLEN                (11)    //length of  SS_FSCOUNT_MAGIC_KEY
+#define SS_IMAGE_MAGIC_KEY                      "TiZSiG@tOtA_00_:.{0,64}"
+#define SS_IMAGE_MAGIC_KEY_VAL                         SS_COMMON_WORKSPACE "/delta_sha.txt"
+
+#define SS_TOKEN_SPACE                                                         " "
+#define SS_TOKEN_NEWLINE                                       "\n"
+#define SS_TOEKN_COLON                                                 ":"
+#define SS_CHAR_FWSLASH                                                '/'
+#define SS_FWSLASH                                                                     "/"
+#define SS_NULLENTRY                                                           "0"
+#define SS_MAX_NAMELENSUPPORTED                (200)   //(Tar supports 256, But extra space is used for PartitionName, .delta, /p, so restricting filename max to 200)
+#define SS_MAX_FILE_PATH                                               (512)
+#define SS_TOKEN_MAXLINE_LEN                   (1024)
+#define SS_COMPRESSED_FILE                                     "system.7z"
+
+#define SS_STRING_DIFF                                                 "DIFF"
+#define SS_STRING_MOVE                                                 "MOVE"
+#define SS_STRING_DEL                                                          "DEL"
+#define SS_STRING_SYM                                                  "SYM"
+#define SS_STRING_NEW                                                  "NEW"
+#define SS_STRING_REG                                                          "REG"
+#define SS_STRING_TPK                                                          "TPK"
+#define SS_STRING_ZIP                                                          "ZIP"
+#define SS_STRING_END                                                  "END"
+
+#ifdef SUPPORT_CONTAINER
+#define SS_ZIP_SOURCE                                                          "system/usr/bin/zip"
+#define SS_ZIP_TARGET                                                          "system/opt/data/fota/zip"
+#define SS_UNZIP_SOURCE                                                "system/usr/bin/unzip"
+#define SS_UNZIP_TARGET                                                "system/opt/data/fota/unzip"
+#define SS_FIND_CMD_SOURCE                             "system/usr/bin/find"
+#define SS_FIND_CMD_TARGET                             "system/opt/data/fota/find"
+#define SS_ZIP_COMMAND                                                 "/opt/data/fota/zip"
+#define SS_UNZIP_COMMAND                                       "/opt/data/fota/unzip"
+#define SS_FIND_COMMAND                                                "/opt/data/fota/find"
+#define SS_SEPARATOR_TOKEN                             "|"
+#define SS_NEWLINE_TOKEN                                               "\n"
+#define SS_FW_SLASH_TOKEN                                      '/'
+#define SS_CONTAINER_INFO_FILE                 "PATCH.txt"
+#define SS_CONTAINER_WORKSPACE         SS_COMMON_WORKSPACE "/archive"
+#define SS_ARCHIVE_WORK_FOLDER         SS_CONTAINER_WORKSPACE "/tpk"
+#define SS_ARCHIVE_DELTA_FOLDER                SS_CONTAINER_WORKSPACE "/delta"
+#define SS_ARCHIVE_UNPACK_FOLDER        SS_CONTAINER_WORKSPACE "/unpack"
+#endif
+
+#ifdef MEM_PROFILING
+#define SS_MEMORY_USAGE_LOG                                    SS_COMMON_WORKSPACE "/log_memory"
+#define SS_MEMORY_PROFILING_SCRIPT     SS_COMMON_WORKSPACE "/mem_use.sh"
+#endif
+
+struct tar_Data {
+    int itemSize;
+    int itemOffset;
+    int itemName[256];
+    struct tar_Data *nextnode;
+};
+typedef struct tar_Data tar_Data_t;
+
+#endif                          /* __FOTA_COMMON_H__ */
diff --git a/ss_engine/fota_log.h b/ss_engine/fota_log.h
new file mode 100755 (executable)
index 0000000..9b4999a
--- /dev/null
@@ -0,0 +1,76 @@
+/*
+ * libtota
+ *
+ * Copyright (c) 2017 Samsung Electronics Co., Ltd.
+ *
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __FOTA_LOG_H__
+#define __FOTA_LOG_H__
+
+#include <stdio.h>
+
+/*
+ * DEBUGGING FEATURE
+ */
+
+extern unsigned int __log_level__;
+extern FILE *__log_out_file__;
+extern int log_printf(FILE* log_fp, char* format_str, ...);
+
+#define LOG_INFO       (1<<8)
+#define LOG_ENGINE     (1<<7)
+#define LOG_FUNCS      (1<<6)
+#define LOG_GUI                (1<<5)
+#define LOG_DEBUG      (1<<4)
+#define LOG_FILE       (1<<3)
+#define LOG_FLASH      (1<<2)
+
+#define LOG_SSENGINE  LOG_ENGINE
+//#define LOG_REDBEND LOG_ENGINE
+
+//#define DEBUG_STDOUT
+#define DEBUG_FILE
+
+#ifdef DEBUG_STDOUT
+#define LOGE(s, args...) printf("UA/ERROR(%s)  " s, __func__, ##args)   // Error log
+#define LOGL(mask, s, args...) do{if((mask) & __log_level__) printf("UA/(%s): " s,__func__, ##args);}while(0)
+#define LOG(s, args...) LOGL(LOG_DEBUG, s, ##args)
+
+#elif defined(DEBUG_FILE)
+#define LOGE(s, args...) (void)log_printf(__log_out_file__, "UA/ERROR(%s)  " s, __func__, ##args)
+#define LOGL(mask, s, args...) do{if((mask) & __log_level__) (void)log_printf(__log_out_file__, "UA/(%s): " s ,__func__, ##args);}while(0)
+#define LOG(s, args...) LOGL(LOG_DEBUG, s, ##args)
+
+#elif defined(DEBUG_STDOUT_FILE)        // debug printf
+#define LOGE(s, args...) do {\
+                                               printf("UA/ERROR(%s)  " s, __func__, ##args);\
+                                               (void)log_printf(__log_out_file__, "UA/ERROR(%s)  " s, __func__, ##args);\
+                                       }while(0)
+#define LOGL(mask, s, args...) do{ \
+                                               if((mask) & __log_level__){\
+                                                       printf("UA/(%s): " s ,__func__, ##args);\
+                                                   (void)log_printf(__log_out_file__, "UA/(%s): " s,__func__, ##args);\
+                                               }\
+                                       }while(0)
+#define LOG(s, args...) LOGL(LOG_DEBUG, s, ##args)
+
+#else
+#define LOGE(s, args...)
+#define LOGL(mask, s, args...)
+#define LOG(s, args...)
+
+#endif
+
+#endif                          /* __FOTA_LOG_H__ */
diff --git a/ss_engine/fota_tar.c b/ss_engine/fota_tar.c
new file mode 100755 (executable)
index 0000000..b8bda64
--- /dev/null
@@ -0,0 +1,1114 @@
+/*
+ * libtota
+ *
+ * Copyright (c) 2017 Samsung Electronics Co., Ltd.
+ *
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <string.h>
+#include <errno.h>
+#include "SS_Engine_Update.h"
+#include "SS_Engine_Errors.h"
+#include "fota_common.h"
+#include "fota_tar.h"
+#include "ua.h"
+
+/* tar Header Block, from POSIX 1003.1-1990. for reference */
+#if 0
+        /* POSIX header.  */
+
+struct posix_header {           /* byte offset */
+    char name[100];             /*   0 */
+    char mode[8];               /* 100 */
+    char uid[8];                /* 108 */
+    char gid[8];                /* 116 */
+    char size[12];              /* 124 */
+    char mtime[12];             /* 136 */
+    char chksum[8];             /* 148 */
+    char typeflag;              /* 156 */
+    char linkname[100];         /* 157 */
+    char magic[6];              /* 257 */
+    char version[2];            /* 263 */
+    char uname[32];             /* 265 */
+    char gname[32];             /* 297 */
+    char devmajor[8];           /* 329 */
+    char devminor[8];           /* 337 */
+    char prefix[155];           /* 345 */
+    /* 500 */
+};
+#endif
+
+#define        MAX_ITEM_SIZE                           0x0FFFFFFF
+#define        TAR_ITEM_SIZE_POSITION  124
+#define        TAR_SIZE_OF_ITEM_SIZE           8
+#define        TAR_SIZE_OF_HEADER              12
+#define        TAR_BLOCK_SIZE                          512
+#define        TAR_ITEM_NAME_SIZE              100
+#define        TAR_LONG_NAME_SIZE              256
+#define        TAR_ITEM_TYPE_FLAG_POS  156
+
+            /*** The byte that indicates whether the prefix is present or not */
+#define PREFIX_INDICATOR_BYTE 345
+#define PREFIX_LEN 155
+
+/** the rest heavily based on (ie mostly) untgz.c from zlib **/
+
+/* Values used in typeflag field.  */
+
+#define REGTYPE  '0'            /* regular file */
+#define AREGTYPE '\0'           /* regular file */
+#define LNKTYPE  '1'            /* link */
+#define SYMTYPE  '2'            /* reserved */
+#define CHRTYPE  '3'            /* character special */
+#define BLKTYPE  '4'            /* block special */
+#define DIRTYPE  '5'            /* directory */
+#define FIFOTYPE '6'            /* FIFO special */
+#define CONTTYPE '7'            /* reserved, for compatibility with gnu tar,
+                                   treat as regular file, where it represents
+                                   a regular file, but saved contiguously on disk */
+
+/* GNU tar extensions */
+
+#define GNUTYPE_DUMPDIR  'D'    /* file names from dumped directory */
+#define GNUTYPE_LONGLINK 'K'    /* long link name */
+#define GNUTYPE_LONGNAME 'L'    /* long file name */
+#define GNUTYPE_MULTIVOL 'M'    /* continuation of file from another volume */
+#define GNUTYPE_NAMES    'N'    /* file name that does not fit into main hdr */
+#define GNUTYPE_SPARSE   'S'    /* sparse file */
+#define GNUTYPE_VOLHDR   'V'    /* tape/volume header */
+
+extern void *SS_Malloc(SS_UINT32 size);
+
+int gTarFd = -1;                // Currenlty this logic supports only one tar file
+
+/* Parse an octal number, ignoring leading and trailing nonsense. */
+static int parseoct(const char *p, size_t n)
+{
+    int i = 0;
+
+    while (*p < '0' || *p > '7') {
+        ++p;
+        --n;
+    }
+    while (*p >= '0' && *p <= '7' && n > 0) {
+        i *= 8;
+        i += *p - '0';
+        ++p;
+        --n;
+    }
+    return (i);
+}
+
+/* Verify the tar checksum. */
+static int verify_checksum(const char *p)
+{
+    int n, u = 0;
+    for (n = 0; n < 512; ++n) {
+        if (n < 148 || n > 155)
+            /* Standard tar checksum adds unsigned bytes. */
+            u += ((unsigned char *)p)[n];
+        else
+            u += 0x20;
+
+    }
+    return (u == parseoct(p + 148, 8));
+}
+
+static int is_end_of_archive(const char *p)
+{
+    int n;
+    for (n = 511; n >= 0; --n)
+        if (p[n] != '\0')
+            return (0);
+    return (1);
+}
+
+void create_dir(char *pathname, int mode)
+{
+    char *p;
+    int r;
+
+    /* Strip trailing '/' */
+    if (pathname[strlen(pathname) - 1] == '/')
+        pathname[strlen(pathname) - 1] = '\0';
+
+    /* Try creating the directory. */
+    r = mkdir(pathname, mode);
+
+    if (r != 0) {
+        /* On failure, try creating parent directory. */
+        p = strrchr(pathname, '/');
+        if (p != NULL) {
+            *p = '\0';
+            create_dir(pathname, 0755);
+            *p = '/';
+            r = mkdir(pathname, mode);
+        }
+    }
+    if (r != 0) {
+        if (r != EEXIST && r != -1)
+            LOG("Could not create directory [%s] Error[%d]\n", pathname, r);
+    }
+}
+
+/* Create a file, including parent directory as necessary. */
+static FILE *create_file(char *pathname, int mode)
+{
+    FILE *f;
+    f = fopen(pathname, "w+");
+    if (f == NULL) {
+        /* Try creating parent dir and then creating file. */
+        char *p = strrchr(pathname, '/');
+        if (p != NULL) {
+            *p = '\0';
+            create_dir(pathname, 0755);
+            *p = '/';
+            f = fopen(pathname, "w+");
+        }
+    }
+    return (f);
+}
+
+/*-----------------------------------------------------------------------------
+  tar_get_item_offset
+ ----------------------------------------------------------------------------*/
+int tar_get_item_offset(char *tar, char *item)
+{
+    int ret = -1;
+    int fd = -1;
+    char header[TAR_BLOCK_SIZE] = { 0, };
+    char name[TAR_ITEM_NAME_SIZE + 1] = { 0, };
+    char size_oct[TAR_SIZE_OF_HEADER] = { 0, };
+    int size_dec = 0;
+    int blknum = 0;
+    off_t pos = 0;
+    off_t tar_len = 0;
+    ssize_t rdcnt = 0;
+
+    //check if gTarFd was opened by tar_open during SS_FSUpdateFile then use it
+    if (gTarFd > 0)
+        fd = gTarFd;
+    if (fd < 0) {
+        fd = open(tar, O_RDONLY);
+        if (fd < 0) {
+            LOGE("can't open file(%s).\n", tar);
+            return -1;
+        }
+    }
+
+    tar_len = lseek(fd, 0, SEEK_END);
+    if (tar_len < 0) {
+        LOGL(LOG_SSENGINE, "can't read tar_len (%s).\n", tar);
+        goto Cleanup;
+    }
+    pos = lseek(fd, 0, SEEK_SET);
+    if (pos < 0) {
+        LOGL(LOG_SSENGINE, "can't read pos (%s).\n", tar);
+        goto Cleanup;
+    }
+    while (pos < tar_len) {
+        /* read file header */
+        rdcnt = read(fd, header, sizeof(header));
+        if (rdcnt <= 0) {
+            LOG("read failed. (rdcnt=%d)\n", rdcnt);
+            break;
+        }
+
+        /* get file name and file size */
+        memcpy(name, header, sizeof(name) - 1);//wgid: 24572
+        memcpy(size_oct, header + TAR_ITEM_SIZE_POSITION, sizeof(size_oct));
+        size_dec = strtoul(size_oct, NULL, TAR_SIZE_OF_ITEM_SIZE);
+        if (size_dec > MAX_ITEM_SIZE) {
+            LOG("size too big. (size_dec=0x%08X)\n", size_dec);
+            break;
+        }
+
+        /* check if the file is what we are looking for */
+        if (strncmp(name, item, TAR_ITEM_NAME_SIZE) == 0) {
+            ret = (int)lseek(fd, 0, SEEK_CUR);
+            break;
+        }
+
+        /* move file pointer to next file header */
+        blknum = size_dec / TAR_BLOCK_SIZE;
+        if (size_dec % TAR_BLOCK_SIZE)
+            blknum++;
+
+        pos = lseek(fd, (off_t) (blknum * TAR_BLOCK_SIZE), SEEK_CUR);
+        if (pos < 0) {
+            LOGE("can't read next block (%s).\n", tar);
+            close(fd);
+            return -1;
+        }
+    }
+
+ Cleanup:
+    if (gTarFd < 0)
+        close(fd);
+
+    return ret;
+}
+
+/*-----------------------------------------------------------------------------
+  tar_get_item_size
+ ----------------------------------------------------------------------------*/
+int tar_get_item_size(char *tar, char *item)
+{
+    int ret = -1;
+    int fd = -1;
+    char header[TAR_BLOCK_SIZE] = { 0, };
+    char uExtendedName[MAX_FILE_PATH] = { 0, };
+    char size_oct[TAR_SIZE_OF_HEADER] = { 0, };
+    unsigned long size_dec = 0;
+    int blknum = 0;
+    off_t pos = 0;
+    off_t tar_len = 0;
+    ssize_t rdcnt = 0;
+    LOGL(LOG_SSENGINE, "Tar file Looking for (%s)\n", item);
+    fd = open(tar, O_RDONLY);
+    if (fd < 0) {
+        LOG("can't open file(%s).\n", tar);
+        return -1;
+    }
+
+    tar_len = lseek(fd, 0, SEEK_END);
+    if (tar_len < 0) {
+        LOGL(LOG_SSENGINE, "can't read tar_len (%s).\n", tar);
+        goto Cleanup;
+    }
+    pos = lseek(fd, 0, SEEK_SET);
+    if (pos < 0) {
+        LOGL(LOG_SSENGINE, "can't read pos (%s).\n", tar);
+        goto Cleanup;
+    }
+
+    while (pos < tar_len) {
+        /* read file header */
+        rdcnt = read(fd, header, sizeof(header));
+        if (rdcnt <= 0) {
+            LOG("read failed. (rdcnt=%d)\n", rdcnt);
+            break;
+        }
+
+        /* get file name and file size */
+        if (header[TAR_ITEM_TYPE_FLAG_POS] == GNUTYPE_LONGNAME || header[TAR_ITEM_TYPE_FLAG_POS] == GNUTYPE_LONGLINK) {
+            //rdcnt = read(fd, header, sizeof(header));
+            memset(uExtendedName, 0, sizeof(uExtendedName));
+            rdcnt = read(fd, uExtendedName, sizeof(uExtendedName));
+            if (rdcnt <= 0) {
+                LOG("read failed. (rdcnt=%d)\n", rdcnt);
+                break;
+            }
+            rdcnt = read(fd, header, sizeof(header));
+            if (rdcnt <= 0) {
+                LOG("read failed. (rdcnt=%d)\n", rdcnt);
+                break;
+            }
+        } else {
+            memset(uExtendedName, 0, sizeof(uExtendedName));
+            memcpy(uExtendedName, header, TAR_ITEM_NAME_SIZE);
+        }
+        //memcpy(name, header, sizeof(name));
+        memcpy(size_oct, header + TAR_ITEM_SIZE_POSITION, sizeof(size_oct));
+        size_dec = strtoul(size_oct, NULL, TAR_SIZE_OF_ITEM_SIZE);
+        if (size_dec > MAX_ITEM_SIZE) {
+            LOG("size too big. (size_dec=0x%08X)\n", (unsigned int)size_dec);
+            break;
+        }
+
+        /* check if the file is what we are looking for */
+        if (strcmp(uExtendedName, item) == 0) {
+            ret = (int)size_dec;
+            if ((ret == 0) && (header[TAR_ITEM_TYPE_FLAG_POS] == DIRTYPE))
+                ret = tar_get_folder_size(tar, item);
+            break;
+        }
+        /* move file pointer to next file header */
+        //LOGL(LOG_SSENGINE,"Item in Tar (%s)\n", uExtendedName);
+        blknum = size_dec / TAR_BLOCK_SIZE;
+        if (size_dec % TAR_BLOCK_SIZE)
+            blknum++;
+
+        pos = lseek(fd, (off_t) (blknum * TAR_BLOCK_SIZE), SEEK_CUR);
+        if (pos < 0) {
+            LOGL(LOG_SSENGINE, "can't read next block (%s).\n", tar);
+            close(fd);
+            return -1;
+        }
+    }
+
+ Cleanup:
+    close(fd);
+
+    return ret;
+}
+
+/*-----------------------------------------------------------------------------
+  tar_get_item_tye. (Dir/file/link etc)
+ ----------------------------------------------------------------------------*/
+
+char tar_get_item_type(char *tar, char *item)
+{
+    char ret = '0';
+    int fd = -1;
+    char header[TAR_BLOCK_SIZE] = { 0, };
+    char name[TAR_ITEM_NAME_SIZE + 1] = { 0, };
+    char size_oct[TAR_SIZE_OF_HEADER] = { 0, };
+    unsigned long size_dec = 0;
+    int blknum = 0;
+    off_t pos = 0;
+    off_t tar_len = 0;
+    ssize_t rdcnt = 0;
+    //LOG("Tar file Looking for (%s)\n", item);
+    fd = open(tar, O_RDONLY);
+    if (fd < 0) {
+        LOG("can't open file(%s).\n", tar);
+        return -1;
+    }
+
+    tar_len = lseek(fd, 0, SEEK_END);
+    if (tar_len < 0) {
+        LOGL(LOG_SSENGINE, "can't read tar_len (%s).\n", tar);
+        goto Cleanup;
+    }
+    pos = lseek(fd, 0, SEEK_SET);
+    if (pos < 0) {
+        LOGL(LOG_SSENGINE, "can't read pos (%s).\n", tar);
+        goto Cleanup;
+    }
+
+    while (pos < tar_len) {
+        /* read file header */
+        rdcnt = read(fd, header, sizeof(header));
+        if (rdcnt <= 0) {
+            LOG("read failed. (rdcnt=%d)\n", rdcnt);
+            ret = -1;
+            break;
+        }
+
+        /* get file name and file size */
+        memcpy(name, header, sizeof(name) - 1);//wgid: 24573
+        memcpy(size_oct, header + TAR_ITEM_SIZE_POSITION, sizeof(size_oct));
+        size_dec = strtoul(size_oct, NULL, TAR_SIZE_OF_ITEM_SIZE);
+        if (size_dec > MAX_ITEM_SIZE) {
+            LOG("size too big. (size_dec=0x%08X)\n", (unsigned int)size_dec);
+            ret = -1;
+            break;
+        }
+
+        /* check if the file is what we are looking for */
+        if (strncmp(name, item, TAR_ITEM_NAME_SIZE) == 0) {
+            ret = header[TAR_ITEM_TYPE_FLAG_POS];
+            break;
+        }
+
+        /* move file pointer to next file header */
+        blknum = size_dec / TAR_BLOCK_SIZE;
+        if (size_dec % TAR_BLOCK_SIZE)
+            blknum++;
+
+        pos = lseek(fd, (off_t) (blknum * TAR_BLOCK_SIZE), SEEK_CUR);
+        if (pos < 0) {
+            LOGL(LOG_SSENGINE, "can't read next block (%s).\n", tar);
+            close(fd);
+            return -1;
+        }
+    }
+
+ Cleanup:
+    close(fd);
+
+    return ret;
+}
+
+/*-----------------------------------------------------------------------------
+  tar_get_cfg_data
+ ----------------------------------------------------------------------------*/
+int tar_get_cfg_data(char *tar, char *item, char *buf, int buflen)
+{
+    int fd = -1;
+    int data_size = -1;
+    int data_offset = -1;
+    off_t pos = 0;
+    ssize_t rdcnt = 0;
+
+    data_size = tar_get_item_size(tar, item);
+    if (data_size <= 0) {
+        return -1;
+    }
+
+    if (data_size > buflen) {
+        data_size = buflen;
+    }
+
+    data_offset = tar_get_item_offset(tar, item);
+    if (data_offset < 0) {
+        return -1;
+    }
+
+    fd = open(tar, O_RDONLY);
+    if (fd < 0) {
+        LOG("can't open file(%s).\n", tar);
+        return -1;
+    }
+
+    pos = lseek(fd, data_offset, SEEK_SET);
+    if (pos < 0) {
+        LOG("lseek fail (%s offset %d).\n", tar, data_offset);
+        close(fd);
+        return -1;
+    }
+
+    rdcnt = read(fd, buf, data_size);
+    if (rdcnt != (ssize_t) data_size) {
+        LOG("read fail(%s from %s).\n", item, tar);
+        close(fd);
+        return -1;
+    }
+
+    close(fd);
+
+    return rdcnt;
+}
+
+tar_Data_t *tar_build_cfg_table(char *tar)
+{
+
+    int fd = -1;
+    int ret = 0;
+    char header[TAR_BLOCK_SIZE] = { 0, };
+    char uExtendedName[MAX_FILE_PATH] = { 0, };
+    char size_oct[TAR_SIZE_OF_HEADER] = { 0, };
+    unsigned long size_dec = 0;
+    int blknum = 0;
+    off_t pos = 0;
+    off_t tar_len = 0;
+    ssize_t rdcnt = 0;
+
+    int itemSize;
+    int itemOffset;
+    tar_Data_t *headparam = NULL, *tailparam = NULL, *newnode = NULL;
+    tar_Data_t *local_temp = NULL;
+    tar_Data_t *local_next = NULL;
+    if (!tar) {
+        LOGE("Bad param tar\n");
+        return NULL;
+    }
+    //check if gTarFd was opened by tar_open during SS_FSUpdateFile then use it
+    if (gTarFd > 0)
+        fd = gTarFd;
+    if (fd < 0) {
+        fd = open(tar, O_RDONLY);
+        if (fd < 0) {
+            LOG("can't open file(%s).\n", tar);
+            return NULL;
+        }
+    }
+    tar_len = lseek(fd, 0, SEEK_END);
+    if (tar_len < 0) {
+        LOGL(LOG_SSENGINE, "can't read tar_len (%s).\n", tar);
+        goto Cleanup;
+    }
+    pos = lseek(fd, 0, SEEK_SET);
+    if (pos < 0) {
+        LOGL(LOG_SSENGINE, "can't read pos (%s).\n", tar);
+        goto Cleanup;
+    }
+    while (pos < tar_len) {
+        /* read file header */
+        rdcnt = read(fd, header, sizeof(header));
+        if (rdcnt <= 0) {
+            LOG("read failed. (rdcnt=%d)\n", rdcnt);
+            ret = -1;
+            break;
+        }
+        /* get file name and file size */
+        //memcpy(name, header, sizeof(name));
+        if (header[TAR_ITEM_TYPE_FLAG_POS] == GNUTYPE_LONGNAME || header[TAR_ITEM_TYPE_FLAG_POS] == GNUTYPE_LONGLINK) {
+            //rdcnt = read(fd, header, sizeof(header));
+            memset(uExtendedName, 0, sizeof(uExtendedName));
+            rdcnt = read(fd, uExtendedName, sizeof(uExtendedName));
+            if (rdcnt <= 0) {
+                LOG("read failed. (rdcnt=%d)\n", rdcnt);
+                ret = -1;
+                break;
+            }
+            rdcnt = read(fd, header, sizeof(header));
+            if (rdcnt <= 0) {
+                LOG("read failed. (rdcnt=%d)\n", rdcnt);
+                ret = -1;
+                break;
+            }
+        } else {
+            memset(uExtendedName, 0, sizeof(uExtendedName));
+            memcpy(uExtendedName, header, TAR_ITEM_NAME_SIZE);
+        }
+        memcpy(size_oct, header + TAR_ITEM_SIZE_POSITION, sizeof(size_oct));
+        size_dec = strtoul(size_oct, NULL, TAR_SIZE_OF_ITEM_SIZE);
+        if (size_dec > MAX_ITEM_SIZE) {
+            LOG("size too big. (size_dec=0x%08X)\n", (unsigned int)size_dec);
+            ret = -1;
+            break;
+        }
+        //fix WGID : 51254 , size_dec comparison is not required
+        if ((strstr(uExtendedName, "/diff") != NULL)) //add only delta files from rootfs and csc, hardcoding shd b removed..
+        {
+
+            /* check if the file is what we are looking for */
+            //strncpy(itemName, name,100);
+            itemSize = (int)size_dec;
+            itemOffset = (int)lseek(fd, 0, SEEK_CUR);
+            newnode = (tar_Data_t *) SS_Malloc(sizeof(tar_Data_t));
+            if (!newnode) {
+                ret = -1;
+                break;
+            }
+            strncpy((char *)newnode->itemName, uExtendedName, sizeof(newnode->itemName));
+            newnode->itemOffset = itemOffset;
+            newnode->itemSize = itemSize;
+            newnode->nextnode = NULL;
+            if (headparam == NULL) {
+                headparam = newnode;
+                tailparam = newnode;
+            } else {
+                (tailparam)->nextnode = newnode;
+                (tailparam) = (tailparam)->nextnode;
+            }
+        }
+
+        /* move file pointer to next file header */
+        blknum = size_dec / TAR_BLOCK_SIZE;
+        if (size_dec % TAR_BLOCK_SIZE)
+            blknum++;
+
+        pos = lseek(fd, (off_t) (blknum * TAR_BLOCK_SIZE), SEEK_CUR);
+        if (pos < 0) {
+            LOGL(LOG_SSENGINE, "can't read next block (%s).\n", tar);
+            ret = -1;
+            break;
+        }
+    }
+ Cleanup:
+    //if gTarFd was opened by tar_open during SS_FSUpdateFile we do not close it
+    if (gTarFd < 0)
+        close(fd);
+    if (ret != -1)
+        return headparam;
+    else {
+        if (newnode)
+            SS_Free(newnode);
+        if (headparam) {
+            local_temp = headparam;
+            while (local_temp) {
+                local_next = local_temp->nextnode;
+                SS_Free(local_temp);
+                local_temp = local_next;
+            }
+        }
+        return NULL;
+    }
+}
+
+void tar_free_cfg_table(tar_Data_t ** delta_tar)
+{
+    tar_Data_t *local_temp = NULL;
+    tar_Data_t *local_next = NULL;
+    LOGL(LOG_SSENGINE, "Free TAR CFG TABLE\n");
+    if (*delta_tar) {
+        local_temp = *delta_tar;
+        while (local_temp) {
+            local_next = local_temp->nextnode;
+            //LOGL(LOG_SSENGINE,"freeing [%s]\n",local_temp->itemName);
+            SS_Free(local_temp);
+            local_temp = local_next;
+        }
+    }
+}
+
+void deleteNode(tar_Data_t * head, tar_Data_t * n)
+{
+    tar_Data_t *prev = head;
+    if (head == n) {
+        if (head->nextnode == NULL) {
+            LOG("There is only one node. The list can't be made empty ");
+            return;
+        }
+        strncpy((char *)head->itemName, (const char *)head->nextnode->itemName, TAR_ITEM_NAME_SIZE);    //head->itemName = head->nextnode->itemName;
+        head->itemSize = head->nextnode->itemSize;
+        head->itemOffset = head->nextnode->itemOffset;
+        n = head->nextnode;
+        head->nextnode = head->nextnode->nextnode;
+        SS_Free(n);
+        return;
+    }
+    while (prev->nextnode != NULL && prev->nextnode != n)
+        prev = prev->nextnode;
+    if (prev->nextnode == NULL) {
+        LOG("\n Given node is not present in Linked List");
+        return;
+    }
+    prev->nextnode = prev->nextnode->nextnode;
+    SS_Free(n);
+    return;
+}
+
+int tar_get_item_size_from_struct(tar_Data_t ** delta_tar, const char *patchname, int *data_size, int *data_offset)
+{
+    tar_Data_t *head = *delta_tar;
+    tar_Data_t *base = *delta_tar;
+    if (head == NULL)
+        return 1;
+    else {
+        //LOG("fast_tar_get_item_size- looking for [%s] [%s]\n",patchname,head->itemName);
+        while (1) {
+            if (strstr((const char *)head->itemName, patchname) != 0) {
+                //LOG("fast_tar_get_item_size found [%s]  in [%s]\n",patchname, head->itemName);
+                *data_size = head->itemSize;
+                *data_offset = head->itemOffset;
+                deleteNode(base, head);
+                return 0;
+
+            } else if (head->nextnode != NULL) {
+                head = head->nextnode;
+                //LOG("fast_tar_get_item_size current node [%s] \n",head->itemName);
+            } else {
+                LOGE("fast_tar_get_item_size FAILED TO GET [%s]  in [%s]\n", patchname, (char *)head->itemName);
+                break;
+            }
+        }
+        return 1;
+    }
+}
+
+tar_Data_t *tar_cfg_clear_nodes(tar_Data_t * head)
+{
+    tar_Data_t *local_temp = NULL;
+    while (head) {
+        LOGL(LOG_SSENGINE, "tar_cfg_delete_node [%s]", (char *)head->itemName);
+        local_temp = head->nextnode;
+        SS_Free(head);
+        head = local_temp;
+    }
+    return 0;
+}
+
+int tar_open(char *tar)
+{
+    if (gTarFd)
+        close(gTarFd);
+    gTarFd = open(tar, O_RDONLY);
+    if (gTarFd < 0) {
+        LOG("can't open TAR file(%s).\n", tar);
+        return -1;
+    }
+    return 0;
+}
+
+int tar_close()
+{
+    if (gTarFd)
+        close(gTarFd);
+    gTarFd = -1;
+    return 0;
+}
+
+int tar_get_folder_size(char *tar, char *item)
+{
+    int ret = -1;
+    int fd = -1;
+    char header[TAR_BLOCK_SIZE] = { 0, };
+    char name[TAR_LONG_NAME_SIZE + 1] = { 0, };
+    char *lastfolder = NULL;
+    int folderpathlen = 0;
+    char size_oct[TAR_SIZE_OF_HEADER] = { 0, };
+    unsigned long size_dec = 0;
+    int blknum = 0;
+    off_t pos = 0;
+    off_t tar_len = 0;
+    ssize_t rdcnt = 0;
+    LOG("Tar folder Looking for (%s)\n", item);
+    fd = open(tar, O_RDONLY);
+    if (fd < 0) {
+        LOG("can't open file(%s).\n", tar);
+        return -1;
+    }
+
+    tar_len = lseek(fd, 0, SEEK_END);
+    if (tar_len < 0) {
+        LOGL(LOG_SSENGINE, "can't read tar_len (%s).\n", tar);
+        goto Cleanup;
+    }
+    pos = lseek(fd, 0, SEEK_SET);
+    if (pos < 0) {
+        LOGL(LOG_SSENGINE, "can't read pos (%s).\n", tar);
+        goto Cleanup;
+    }
+
+    while (pos < tar_len) {
+        /* read file header */
+        rdcnt = read(fd, header, sizeof(header));
+        if (rdcnt <= 0) {
+            LOG("read failed. (rdcnt=%d)\n", rdcnt);
+            ret = -1;
+            break;
+        }
+
+        /* get file name and file size */
+        memcpy(name, header, sizeof(name) - 1);//wgid: 24574
+        memcpy(size_oct, header + TAR_ITEM_SIZE_POSITION, sizeof(size_oct));
+        size_dec = strtoul(size_oct, NULL, TAR_SIZE_OF_ITEM_SIZE);
+        if (size_dec > MAX_ITEM_SIZE) {
+            LOG("size too big. (size_dec=0x%08X)\n", (unsigned int)size_dec);
+            ret = -1;
+            break;
+        }
+
+        /* check if the file is what we are looking for */
+        //Get until folder name
+
+        lastfolder = strrchr(name, '/');
+        if (lastfolder)
+            folderpathlen = strlen(name) - strlen(lastfolder);
+
+        if (strncmp(name, item, folderpathlen) == 0) {
+            ret += (int)size_dec;
+            //LOG("Tar Files under folder [%s]\n", name);
+            //break;
+        }
+
+        /* move file pointer to next file header */
+        blknum = size_dec / TAR_BLOCK_SIZE;
+        if (size_dec % TAR_BLOCK_SIZE)
+            blknum++;
+
+        pos = lseek(fd, (off_t) (blknum * TAR_BLOCK_SIZE), SEEK_CUR);
+        if (pos < 0) {
+            LOGL(LOG_SSENGINE, "can't read next block (%s).\n", tar);
+            close(fd);
+            return -1;
+        }
+    }
+
+ Cleanup:
+    close(fd);
+    LOG("ret=%d\n", ret);
+
+    return ret;                 //Should return +1?? or Ignore??
+}
+
+/*Extract Specific Folder from tar, Taken from Untar.c */
+int tar_extract_folder(char *tar, char *item, char *path)
+{
+    char buff[MAX_FILE_PATH];
+    FILE *f = NULL;
+    size_t bytes_read;
+    int filesize;
+    int data_offset = -1;
+    int fd = -1;
+    char name[512] = { 0, };
+    int folderpathlen = 0;
+    char dirPath[512] = { 0 };
+    int getheader = 1;          // Asuming initial header is TAR header
+    char fullname[512] = { 0 };
+    LOG("Extracting Folder from %s %s to %s\n", tar, item, path);
+
+    data_offset = tar_get_item_offset(tar, item);
+    if (data_offset < 0) {
+        LOGE("data offset for [%s] is [%d]\n", item, data_offset);
+        return -1;
+    }
+
+    fd = open(tar, O_RDONLY);
+    if (fd < 0) {
+        LOGE("can't open file(%s).\n", tar);
+        return -1;
+    }
+
+    folderpathlen = strlen(item);
+
+    for (;;) {
+        bytes_read = read(fd, buff, sizeof(buff));
+        if (bytes_read < 512) {
+            LOGE("Short read on %s: expected 512, got %d\n", tar, bytes_read);
+            close(fd);
+            return -1;
+        }
+        if (is_end_of_archive(buff)) {
+            close(fd);
+            LOG("End of %s\n", tar);    //Can stop at end of folder.
+            return S_SS_SUCCESS;
+        }
+        if (!verify_checksum(buff)) {
+            close(fd);
+            LOGE("Checksum failure\n");
+            return -1;
+        }
+        filesize = parseoct(buff + 124, 12);
+        if (getheader == 2) {
+            getheader = 1;
+            //LOG(" Working on LONG FILE NAME CASE [%s]\n", fullname);
+        } else {
+            memset(fullname, 0, sizeof(fullname));
+            strncpy(fullname, buff, 100);
+            //LOG(" Working on Normal FILE NAME CASE [%s]\n", fullname);
+        }
+
+        switch (buff[156]) {
+        case '1':
+            LOG(" Ignoring hardlink %s\n", fullname);
+            break;
+        case '2':
+
+            //LOG(" Creating symlink %s\n", buff);
+            if (strncmp(fullname, item, folderpathlen) == 0) {
+                //LOG("Printing Buffer \n");
+                //for(i=157; buff[i] !='\0' ;i++)
+                //{
+                //LOG("%c", buff[i])    ;
+                //}
+                //LOG("\nEnd buffer\n");
+                memset(name, 0, sizeof(name));
+                strncpy(name, buff + 157, 100); //157, target link name will be present
+                memset(dirPath, 0, sizeof(dirPath));
+                sprintf(dirPath, "%s/%s", path, fullname + folderpathlen);
+                LOG(" Creating Symlink [%s][%s]\n", name, dirPath);
+                symlink(name, dirPath); // use ss_link
+            }
+            break;
+        case '3':
+            LOG(" Ignoring character device %s\n", fullname);
+            break;
+        case '4':
+            LOG(" Ignoring block device %s\n", fullname);
+            break;
+        case '5':
+            //break;//delete
+            //LOG(" Dir [%s] Item [%s] Length [%d]\n", fullname, item, folderpathlen);
+            if (strncmp(fullname, item, folderpathlen) == 0) {
+                //LOG(" Extracting dir %s\n", fullname);
+                memset(dirPath, 0, sizeof(dirPath));
+                sprintf(dirPath, "%s/%s", path, fullname + folderpathlen);
+                create_dir(dirPath, parseoct(fullname + 100, 8));
+            }
+
+            filesize = 0;
+            break;
+        case '6':
+            LOG(" Ignoring FIFO %s\n", fullname);
+            break;
+        case GNUTYPE_LONGLINK:
+        case GNUTYPE_LONGNAME:
+            {
+                getheader = 2;
+                memset(fullname, 0, sizeof(fullname));
+                bytes_read = read(fd, fullname, sizeof(fullname));
+                if (bytes_read < 512) {
+                    LOGE("Short read on %s: expected 512, got %d\n", tar, bytes_read);
+                    close(fd);
+                    return -1;
+                }
+                filesize = 0;
+                //LOG("Entered LONG FILE NAME CASE  new NAME is [%s]\n", fullname);
+                break;
+            }
+
+        default:
+            //break;
+            //LOG(" File [%s] Item [%s] Length [%d]\n", fullname, item, folderpathlen);
+            if (strncmp(fullname, item, folderpathlen) == 0) {
+                if (buff[PREFIX_INDICATOR_BYTE] != 0) {
+                    memset(name, 0, sizeof(name));
+                    memset(dirPath, 0, sizeof(dirPath));
+                    strncpy(name, buff, 100);
+                    strcat(name, buff + PREFIX_INDICATOR_BYTE);
+                    sprintf(dirPath, "%s/%s", path, name + folderpathlen);
+                    LOG(" File Name is longer than 100 bytes -Remaining Str [%s]\n Full Str[%s]", dirPath);
+                } else {
+                    //LOG(" Extracting file %s\n", fullname);
+                    memset(dirPath, 0, sizeof(dirPath));
+                    sprintf(dirPath, "%s/%s", path, fullname + folderpathlen);
+                    f = create_file(dirPath, parseoct(fullname + 100, 8));
+                }
+            }
+
+            break;
+        }
+
+        while (filesize > 0) {
+            bytes_read = read(fd, buff, sizeof(buff));
+            if (bytes_read < 512) {
+                LOGE("Short read on %s: Expected 512, got %d\n", tar, bytes_read);
+                close(fd);
+                if(f){
+                    fclose(f);//wgid: 16892
+                    f = NULL;
+                }
+                return -1;
+            }
+            if (filesize < 512)
+                bytes_read = filesize;
+            if (f != NULL) {
+                if (fwrite(buff, 1, bytes_read, f)
+                    != bytes_read) {
+                    LOG("Failed write\n");
+                    fclose(f);
+                    f = NULL;
+                    close(fd);//wgid: 59268
+                    return -1;//wgid: 16892
+                }
+            }
+            filesize -= bytes_read;
+        }
+        if (f != NULL) {
+            fclose(f);
+            f = NULL;
+        }
+
+    }
+    close(fd);
+    return S_SS_SUCCESS;
+}
+
+int fast_tar_extract_file(char *tar, char *item, char *pathname, int size, int offset)
+{
+    int fd = -1;
+    int data_size = size;
+    int data_offset = offset;
+    off_t pos = 0;
+    ssize_t rdcnt = 0;
+    ssize_t writeCount = 0;
+    char *buf = NULL;
+    int fd2;
+
+    if (gTarFd > 0)
+        fd = gTarFd;
+    if (fd < 0) {
+        fd = open(tar, O_RDONLY);
+        if (fd < 0) {
+            LOG("can't open file(%s).\n", tar);
+            return -1;
+        }
+    }
+    pos = lseek(fd, data_offset, SEEK_SET);
+    if (pos < 0) {
+        LOG("lseek fail (%s offset %d).\n", tar, data_offset);
+        close(fd);
+        return -1;
+    }
+    buf = SS_Malloc(data_size + 1);
+    if (buf == NULL) {
+        close(fd);
+        LOGE("Failed to Allocate Memory\n");
+        return -1;
+    }
+    rdcnt = read(fd, buf, data_size);
+    if (rdcnt != (ssize_t) data_size) {
+        LOG(" rdcnt read fail(%s from %s).\n", item, tar);
+        SS_Free(buf);
+        close(fd);
+        return -1;
+    }
+    fd2 = open(pathname, O_CREAT | O_WRONLY, 0777);     // Directory where file is required should be created already.
+    if (fd2 < 0) {
+        LOG("can't open file(%s).\n", pathname);
+        SS_Free(buf);
+        close(fd);
+        return -1;
+    }
+    writeCount = write(fd2, buf, rdcnt);
+    if (writeCount != rdcnt) {
+        LOG("writeCount write fail(%s from %s).\n", item, tar);
+        LOG("Oh  dear, something went wrong with read()! %s\n", strerror(errno));
+        close(fd);
+        close(fd2);
+        SS_Free(buf);
+        return -1;
+    }
+    SS_Free(buf);
+    if (gTarFd < 0)
+        close(fd);
+    fsync(fd2);
+    close(fd2);
+    return rdcnt;               // or jus return success?
+}
+
+int tar_extract_file(char *tar, char *item, char *pathname)
+{
+    int fd = -1;
+    int data_size = -1;
+    int data_offset = -1;
+    off_t pos = 0;
+    ssize_t rdcnt = 0;
+    ssize_t writeCount = 0;
+    char *buf = NULL;
+    int fd2;
+    data_size = tar_get_item_size(tar, item);
+    data_offset = tar_get_item_offset(tar, item);
+
+    if (data_size <= 0 || data_offset < 0) {
+        LOGE("Error Not a file , size is [%d], offset [%d] for item [%s]\n", data_size, data_offset, item);
+        return -1;
+    } else
+        LOGL(LOG_SSENGINE, "extracting file [%s] size [%d]\n", item, data_size);
+    fd = open(tar, O_RDONLY);
+    if (fd < 0) {
+        LOG("can't open file(%s).\n", tar);
+        return -1;
+    }
+    pos = lseek(fd, data_offset, SEEK_SET);
+    if (pos < 0) {
+        LOG("lseek fail (%s offset %d).\n", tar, data_offset);
+        close(fd);
+        return -1;
+    }
+    buf = SS_Malloc(data_size + 1);
+    if (buf == NULL) {
+        close(fd);
+        LOGE("Failed to Allocate Memory\n");
+        return -1;
+    }
+    rdcnt = read(fd, buf, data_size);
+    if (rdcnt != (ssize_t) data_size) {
+        LOG(" rdcnt read fail(%s from %s).\n", item, tar);
+        SS_Free(buf);
+        close(fd);
+        return -1;
+    }
+    fd2 = open(pathname, O_CREAT | O_WRONLY, 0777);     // Directory where file is required should be created already.
+    if (fd2 < 0) {
+        LOG("can't open file(%s).\n", pathname);
+        SS_Free(buf);
+        close(fd);
+        return -1;
+    }
+    writeCount = write(fd2, buf, rdcnt);
+    if (writeCount != rdcnt) {
+        LOG("writeCount write fail(%s from %s).\n", item, tar);
+        LOG("Oh  dear, something went wrong with read()! %s\n", strerror(errno));
+        close(fd);
+        close(fd2);
+        SS_Free(buf);
+        return -1;
+    }
+    SS_Free(buf);
+    close(fd);
+    fsync(fd2);
+    close(fd2);
+    return rdcnt;               // or jus return success?
+}
diff --git a/ss_engine/fota_tar.h b/ss_engine/fota_tar.h
new file mode 100755 (executable)
index 0000000..e6a528f
--- /dev/null
@@ -0,0 +1,31 @@
+/*
+ * libtota
+ *
+ * Copyright (c) 2017 Samsung Electronics Co., Ltd.
+ *
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _FOTA_TAR_H_
+#define _FOTA_TAR_H_
+
+extern void SS_Free(void *pMemBlock);
+extern int tar_get_item_offset(char *tar, char *item);
+
+extern int tar_get_item_size(char *tar, char *item);
+
+extern int tar_get_cfg_data(char *tar, char *item, char *buf, int buflen);
+
+extern int tar_get_folder_size(char *tar, char *item);
+
+#endif                          /* _FOTA_TAR_H_ */
diff --git a/ss_engine/ua.h b/ss_engine/ua.h
new file mode 100755 (executable)
index 0000000..7d1e73c
--- /dev/null
@@ -0,0 +1,125 @@
+/*
+ * libtota
+ *
+ * Copyright (c) 2017 Samsung Electronics Co., Ltd.
+ *
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __UA_H__
+#define __UA_H__
+
+#include <stdio.h>
+
+#define MAX_FILE_PATH           512
+#define MAX_PART_NAME           32
+
+/*
+ * FOTA Adaptaion header
+ */
+
+#define ERROR -1                // 0XFFFFFFFF
+
+#define INVALID    0
+#define VALID      1
+#define TRUE 1
+#define FALSE 0
+#define OK 0
+#define SUCCESS 0
+#define FAIL 1
+#define RESTORING 2
+#define UA_PARTI_MAX                                   20      //currently supporting max 20 partitions
+#define DEFAULT_DELTA_NAME             "delta.tar"
+#define UPDATTE_CFG_FILE               "update.cfg"
+
+#define UI_OP_SCOUT_UPDATE             0
+#define UI_OP_SCOUT                    1
+#define UI_OP_UPDATE                   3
+//SS_ENGINE && BSDIFF
+#define SS_RECOVERYRAMDISK
+
+
+typedef enum {
+    FULL_IMG,
+    DELTA_IMG,
+    DELTA_FS,
+    EXTRA
+} UA_DATA_FORMAT;
+
+typedef enum {
+    UA_MODE_SCOUT_UPDATE,
+    UA_MODE_SCOUT,
+    UA_MODE_VERIFYTARGET,
+    UA_MODE_UPDATE,
+    UA_MODE_SUPPLYIMFOM = 200
+} UA_OPERATION_MODE;
+
+typedef struct _ua_update_data_t {
+    unsigned int exist_check;
+    unsigned int verify_check;
+    unsigned int update_check;
+    unsigned int weight;        // the sum of weight should be 100
+    unsigned int weight_offset; // start offset
+    unsigned int data_size;     // byte
+    char *ua_delta_path;        // it will be allocated to copy delta file path, need to free memory
+    char *ua_temp_path;         // it will be allocated to copy delta file path, need to free memory
+} ua_update_data_t;
+
+typedef struct _ua_update_cfg_t {
+    unsigned int update_type;
+    unsigned int part_idx;
+    int skip_verify;
+    int skip_update;
+    int soure_img_size;         //TOTA
+    int target_img_size;
+    char *soure_sha1;
+    char *target_sha1;
+} ua_update_cfg_t;
+
+typedef struct _ua_part_info_t {
+    char *ua_parti_name;
+    char *ua_subject_name;
+    char *ua_blk_name;
+    int ua_blk_offset;
+} ua_part_info_t;
+
+// User data structure
+typedef struct _ua_data_t {     // partition operations
+    ua_part_info_t *parti_info;
+    ua_update_cfg_t *update_cfg;
+    ua_update_data_t *update_data;
+    unsigned long ua_operation;
+
+    int (*ua_op_read_block) (void *, unsigned char *, unsigned long, unsigned long);
+    int (*ua_op_write_block) (void *, unsigned char *, unsigned long);
+    void (*ui_progress) (void *, unsigned long);
+} ua_data_t;
+
+typedef struct _ua_delta_info_t {
+    char ua_patch_path[MAX_FILE_PATH];
+    char ua_patch_info[MAX_FILE_PATH];
+    char ua_delta_path[MAX_FILE_PATH];
+    char ua_attrib_path[MAX_FILE_PATH];
+} ua_delta_info_t;
+
+typedef struct _ua_dataSS_t {   // partition operations
+    ua_part_info_t *parti_info;
+    ua_update_cfg_t *update_cfg;
+    ua_update_data_t *update_data;
+    ua_delta_info_t *update_delta;
+    unsigned long ua_operation;
+    void (*ui_progress) (void *, unsigned long);
+    int (*write_data_to_blkdev) (char *, int, int, char *);
+} ua_dataSS_t;
+
+#endif
diff --git a/ss_patch/sha1.c b/ss_patch/sha1.c
new file mode 100755 (executable)
index 0000000..91ef661
--- /dev/null
@@ -0,0 +1,405 @@
+/*
+ * sha1.c
+ *
+ * an implementation of the Secure Hash Algorithm v.1 (SHA-1),
+ * specified in FIPS 180-1
+ *
+ * David A. McGrew
+ * Cisco Systems, Inc.
+ */
+
+/*
+ *
+ * Copyright (c) 2001-2006, Cisco Systems, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ *
+ *   Redistributions in binary form must reproduce the above
+ *   copyright notice, this list of conditions and the following
+ *   disclaimer in the documentation and/or other materials provided
+ *   with the distribution.
+ *
+ *   Neither the name of the Cisco Systems, Inc. nor the names of its
+ *   contributors may be used to endorse or promote products derived
+ *   from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+
+#include "sha1.h"
+
+
+
+/* SN == Rotate left N bits */
+#define S1(X)  ((X << 1)  | (X >> 31))
+#define S5(X)  ((X << 5)  | (X >> 27))
+#define S30(X) ((X << 30) | (X >> 2))
+
+#define f0(B,C,D) ((B & C) | (~B & D))
+#define f1(B,C,D) (B ^ C ^ D)
+#define f2(B,C,D) ((B & C) | (B & D) | (C & D))
+#define f3(B,C,D) (B ^ C ^ D)
+
+
+
+/*
+ * nota bene: the variable K0 appears in the curses library, so we
+ * give longer names to these variables to avoid spurious warnings
+ * on systems that uses curses
+ */
+
+uint32_t SHA_K0 = 0x5A827999;   /* Kt for 0  <= t <= 19 */
+uint32_t SHA_K1 = 0x6ED9EBA1;   /* Kt for 20 <= t <= 39 */
+uint32_t SHA_K2 = 0x8F1BBCDC;   /* Kt for 40 <= t <= 59 */
+uint32_t SHA_K3 = 0xCA62C1D6;   /* Kt for 60 <= t <= 79 */
+
+void
+sha1(const uint8_t *msg,  int octets_in_msg, uint32_t hash_value[5]) {
+  sha1_ctx_t ctx;
+
+  sha1_init(&ctx);
+  sha1_update(&ctx, msg, octets_in_msg);
+  sha1_final(&ctx, hash_value);
+
+}
+
+/*
+ *  sha1_core(M, H) computes the core compression function, where M is
+ *  the next part of the message (in network byte order) and H is the
+ *  intermediate state { H0, H1, ...} (in host byte order)
+ *
+ *  this function does not do any of the padding required in the
+ *  complete SHA1 function
+ *
+ *  this function is used in the SEAL 3.0 key setup routines
+ *  (crypto/cipher/seal.c)
+ */
+
+void
+sha1_core(const uint32_t M[16], uint32_t hash_value[5]) {
+  uint32_t H0;
+  uint32_t H1;
+  uint32_t H2;
+  uint32_t H3;
+  uint32_t H4;
+  uint32_t W[80];
+  uint32_t A, B, C, D, E, TEMP;
+  int t;
+
+  /* copy hash_value into H0, H1, H2, H3, H4 */
+  H0 = hash_value[0];
+  H1 = hash_value[1];
+  H2 = hash_value[2];
+  H3 = hash_value[3];
+  H4 = hash_value[4];
+
+  /* copy/xor message into array */
+
+  W[0]  = be32_to_cpu(M[0]);
+  W[1]  = be32_to_cpu(M[1]);
+  W[2]  = be32_to_cpu(M[2]);
+  W[3]  = be32_to_cpu(M[3]);
+  W[4]  = be32_to_cpu(M[4]);
+  W[5]  = be32_to_cpu(M[5]);
+  W[6]  = be32_to_cpu(M[6]);
+  W[7]  = be32_to_cpu(M[7]);
+  W[8]  = be32_to_cpu(M[8]);
+  W[9]  = be32_to_cpu(M[9]);
+  W[10] = be32_to_cpu(M[10]);
+  W[11] = be32_to_cpu(M[11]);
+  W[12] = be32_to_cpu(M[12]);
+  W[13] = be32_to_cpu(M[13]);
+  W[14] = be32_to_cpu(M[14]);
+  W[15] = be32_to_cpu(M[15]);
+  TEMP = W[13] ^ W[8]  ^ W[2]  ^ W[0];  W[16] = S1(TEMP);
+  TEMP = W[14] ^ W[9]  ^ W[3]  ^ W[1];  W[17] = S1(TEMP);
+  TEMP = W[15] ^ W[10] ^ W[4]  ^ W[2];  W[18] = S1(TEMP);
+  TEMP = W[16] ^ W[11] ^ W[5]  ^ W[3];  W[19] = S1(TEMP);
+  TEMP = W[17] ^ W[12] ^ W[6]  ^ W[4];  W[20] = S1(TEMP);
+  TEMP = W[18] ^ W[13] ^ W[7]  ^ W[5];  W[21] = S1(TEMP);
+  TEMP = W[19] ^ W[14] ^ W[8]  ^ W[6];  W[22] = S1(TEMP);
+  TEMP = W[20] ^ W[15] ^ W[9]  ^ W[7];  W[23] = S1(TEMP);
+  TEMP = W[21] ^ W[16] ^ W[10] ^ W[8];  W[24] = S1(TEMP);
+  TEMP = W[22] ^ W[17] ^ W[11] ^ W[9];  W[25] = S1(TEMP);
+  TEMP = W[23] ^ W[18] ^ W[12] ^ W[10]; W[26] = S1(TEMP);
+  TEMP = W[24] ^ W[19] ^ W[13] ^ W[11]; W[27] = S1(TEMP);
+  TEMP = W[25] ^ W[20] ^ W[14] ^ W[12]; W[28] = S1(TEMP);
+  TEMP = W[26] ^ W[21] ^ W[15] ^ W[13]; W[29] = S1(TEMP);
+  TEMP = W[27] ^ W[22] ^ W[16] ^ W[14]; W[30] = S1(TEMP);
+  TEMP = W[28] ^ W[23] ^ W[17] ^ W[15]; W[31] = S1(TEMP);
+
+  /* process the remainder of the array */
+  for (t=32; t < 80; t++) {
+    TEMP = W[t-3] ^ W[t-8] ^ W[t-14] ^ W[t-16];
+    W[t] = S1(TEMP);
+  }
+
+  A = H0; B = H1; C = H2; D = H3; E = H4;
+
+  for (t=0; t < 20; t++) {
+    TEMP = S5(A) + f0(B,C,D) + E + W[t] + SHA_K0;
+    E = D; D = C; C = S30(B); B = A; A = TEMP;
+  }
+  for (   ; t < 40; t++) {
+    TEMP = S5(A) + f1(B,C,D) + E + W[t] + SHA_K1;
+    E = D; D = C; C = S30(B); B = A; A = TEMP;
+  }
+  for (   ; t < 60; t++) {
+    TEMP = S5(A) + f2(B,C,D) + E + W[t] + SHA_K2;
+    E = D; D = C; C = S30(B); B = A; A = TEMP;
+  }
+  for (   ; t < 80; t++) {
+    TEMP = S5(A) + f3(B,C,D) + E + W[t] + SHA_K3;
+    E = D; D = C; C = S30(B); B = A; A = TEMP;
+  }
+
+  hash_value[0] = H0 + A;
+  hash_value[1] = H1 + B;
+  hash_value[2] = H2 + C;
+  hash_value[3] = H3 + D;
+  hash_value[4] = H4 + E;
+
+  return;
+}
+
+void
+sha1_init(sha1_ctx_t *ctx) {
+
+  /* initialize state vector */
+  ctx->H[0] = 0x67452301;
+  ctx->H[1] = 0xefcdab89;
+  ctx->H[2] = 0x98badcfe;
+  ctx->H[3] = 0x10325476;
+  ctx->H[4] = 0xc3d2e1f0;
+
+  /* indicate that message buffer is empty */
+  ctx->octets_in_buffer = 0;
+
+  /* reset message bit-count to zero */
+  ctx->num_bits_in_msg = 0;
+
+}
+
+void
+sha1_update(sha1_ctx_t *ctx, const uint8_t *msg, int octets_in_msg) {
+  int i;
+  uint8_t *buf = (uint8_t *)ctx->M;
+
+  /* update message bit-count */
+  ctx->num_bits_in_msg += octets_in_msg * 8;
+
+  /* loop over 16-word blocks of M */
+  while (octets_in_msg > 0) {
+
+    if (octets_in_msg + ctx->octets_in_buffer >= 64) {
+
+      /*
+       * copy words of M into msg buffer until that buffer is full,
+       * converting them into host byte order as needed
+       */
+      octets_in_msg -= (64 - ctx->octets_in_buffer);
+      for (i=ctx->octets_in_buffer; i < 64; i++)
+       buf[i] = *msg++;
+      ctx->octets_in_buffer = 0;
+
+      /* process a whole block */
+
+      //debug_print(mod_sha1, "(update) running sha1_core()", NULL);
+
+      sha1_core(ctx->M, ctx->H);
+
+    } else {
+
+      //debug_print(mod_sha1, "(update) not running sha1_core()", NULL);
+
+      for (i=ctx->octets_in_buffer;
+          i < (ctx->octets_in_buffer + octets_in_msg); i++)
+       buf[i] = *msg++;
+      ctx->octets_in_buffer += octets_in_msg;
+      octets_in_msg = 0;
+    }
+
+  }
+
+}
+
+/*
+ * sha1_final(ctx, output) computes the result for ctx and copies it
+ * into the twenty octets located at *output
+ */
+
+void
+sha1_final(sha1_ctx_t *ctx, uint32_t *output) {
+  uint32_t A, B, C, D, E, TEMP;
+  uint32_t W[80];
+  int i, t;
+
+  /*
+   * process the remaining octets_in_buffer, padding and terminating as
+   * necessary
+   */
+  {
+    int tail = ctx->octets_in_buffer % 4;
+
+    /* copy/xor message into array */
+    for (i=0; i < (ctx->octets_in_buffer+3)/4; i++)
+      W[i]  = be32_to_cpu(ctx->M[i]);
+
+    /* set the high bit of the octet immediately following the message */
+    switch (tail) {
+    case (3):
+      W[i-1] = (be32_to_cpu(ctx->M[i-1]) & 0xffffff00) | 0x80;
+      W[i] = 0x0;
+      break;
+    case (2):
+      W[i-1] = (be32_to_cpu(ctx->M[i-1]) & 0xffff0000) | 0x8000;
+      W[i] = 0x0;
+      break;
+    case (1):
+      W[i-1] = (be32_to_cpu(ctx->M[i-1]) & 0xff000000) | 0x800000;
+      W[i] = 0x0;
+      break;
+    case (0):
+      W[i] = 0x80000000;
+      break;
+    }
+
+    /* zeroize remaining words */
+    for (i++   ; i < 15; i++)
+      W[i] = 0x0;
+
+    /*
+     * if there is room at the end of the word array, then set the
+     * last word to the bit-length of the message; otherwise, set that
+     * word to zero and then we need to do one more run of the
+     * compression algo.
+     */
+    if (ctx->octets_in_buffer < 56)
+      W[15] = ctx->num_bits_in_msg;
+    else if (ctx->octets_in_buffer < 60)
+      W[15] = 0x0;
+
+    /* process the word array */
+    for (t=16; t < 80; t++) {
+      TEMP = W[t-3] ^ W[t-8] ^ W[t-14] ^ W[t-16];
+      W[t] = S1(TEMP);
+    }
+
+    A = ctx->H[0];
+    B = ctx->H[1];
+    C = ctx->H[2];
+    D = ctx->H[3];
+    E = ctx->H[4];
+
+    for (t=0; t < 20; t++) {
+      TEMP = S5(A) + f0(B,C,D) + E + W[t] + SHA_K0;
+      E = D; D = C; C = S30(B); B = A; A = TEMP;
+    }
+    for (   ; t < 40; t++) {
+      TEMP = S5(A) + f1(B,C,D) + E + W[t] + SHA_K1;
+      E = D; D = C; C = S30(B); B = A; A = TEMP;
+    }
+    for (   ; t < 60; t++) {
+      TEMP = S5(A) + f2(B,C,D) + E + W[t] + SHA_K2;
+      E = D; D = C; C = S30(B); B = A; A = TEMP;
+    }
+    for (   ; t < 80; t++) {
+      TEMP = S5(A) + f3(B,C,D) + E + W[t] + SHA_K3;
+      E = D; D = C; C = S30(B); B = A; A = TEMP;
+    }
+
+    ctx->H[0] += A;
+    ctx->H[1] += B;
+    ctx->H[2] += C;
+    ctx->H[3] += D;
+    ctx->H[4] += E;
+
+  }
+
+  //debug_print(mod_sha1, "(final) running sha1_core()", NULL);
+
+  if (ctx->octets_in_buffer >= 56) {
+
+
+    //debug_print(mod_sha1, "(final) running sha1_core() again", NULL);
+
+    /* we need to do one final run of the compression algo */
+
+    /*
+     * set initial part of word array to zeros, and set the
+     * final part to the number of bits in the message
+     */
+    for (i=0; i < 15; i++)
+      W[i] = 0x0;
+    W[15] = ctx->num_bits_in_msg;
+
+    /* process the word array */
+    for (t=16; t < 80; t++) {
+      TEMP = W[t-3] ^ W[t-8] ^ W[t-14] ^ W[t-16];
+      W[t] = S1(TEMP);
+    }
+
+    A = ctx->H[0];
+    B = ctx->H[1];
+    C = ctx->H[2];
+    D = ctx->H[3];
+    E = ctx->H[4];
+
+    for (t=0; t < 20; t++) {
+      TEMP = S5(A) + f0(B,C,D) + E + W[t] + SHA_K0;
+      E = D; D = C; C = S30(B); B = A; A = TEMP;
+    }
+    for (   ; t < 40; t++) {
+      TEMP = S5(A) + f1(B,C,D) + E + W[t] + SHA_K1;
+      E = D; D = C; C = S30(B); B = A; A = TEMP;
+    }
+    for (   ; t < 60; t++) {
+      TEMP = S5(A) + f2(B,C,D) + E + W[t] + SHA_K2;
+      E = D; D = C; C = S30(B); B = A; A = TEMP;
+    }
+    for (   ; t < 80; t++) {
+      TEMP = S5(A) + f3(B,C,D) + E + W[t] + SHA_K3;
+      E = D; D = C; C = S30(B); B = A; A = TEMP;
+    }
+
+    ctx->H[0] += A;
+    ctx->H[1] += B;
+    ctx->H[2] += C;
+    ctx->H[3] += D;
+    ctx->H[4] += E;
+  }
+
+  /* copy result into output buffer */
+  output[0] = be32_to_cpu(ctx->H[0]);
+  output[1] = be32_to_cpu(ctx->H[1]);
+  output[2] = be32_to_cpu(ctx->H[2]);
+  output[3] = be32_to_cpu(ctx->H[3]);
+  output[4] = be32_to_cpu(ctx->H[4]);
+
+  /* indicate that message buffer in context is empty */
+  ctx->octets_in_buffer = 0;
+
+  return;
+}
+
+
+
diff --git a/ss_patch/sha1.h b/ss_patch/sha1.h
new file mode 100755 (executable)
index 0000000..755628a
--- /dev/null
@@ -0,0 +1,114 @@
+/*
+ * sha1.h
+ *
+ * interface to the Secure Hash Algorithm v.1 (SHA-1), specified in
+ * FIPS 180-1
+ *
+ * David A. McGrew
+ * Cisco Systems, Inc.
+ */
+
+/*
+ *
+ * Copyright (c) 2001-2006, Cisco Systems, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ *
+ *   Redistributions in binary form must reproduce the above
+ *   copyright notice, this list of conditions and the following
+ *   disclaimer in the documentation and/or other materials provided
+ *   with the distribution.
+ *
+ *   Neither the name of the Cisco Systems, Inc. nor the names of its
+ *   contributors may be used to endorse or promote products derived
+ *   from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef SHA1_H
+#define SHA1_H
+#define SHA_DIGEST_SIZE 20
+#define be32_to_cpu(x) (uint32_t)      ( \
+                               (((uint32_t)(x) & 0xff000000u) >> 24) | \
+                       (((uint32_t)(x) & 0x00ff0000u) >>  8) | \
+                       (((uint32_t)(x) & 0x0000ff00u) <<  8) | \
+                       (((uint32_t)(x) & 0x000000ffu) << 24))
+
+
+typedef unsigned int uint32_t;
+typedef unsigned char uint8_t;
+typedef struct {
+  uint32_t H[5];             /* state vector                    */
+  uint32_t M[16];            /* message buffer                  */
+  int octets_in_buffer;      /* octets of message in buffer     */
+  uint32_t num_bits_in_msg;  /* total number of bits in message */
+} sha1_ctx_t;
+
+/*
+ * sha1(&ctx, msg, len, output) hashes the len octets starting at msg
+ * into the SHA1 context, then writes the result to the 20 octets at
+ * output
+ *
+ */
+
+void
+sha1(const uint8_t *message,  int octets_in_msg, uint32_t output[5]);
+
+/*
+ * sha1_init(&ctx) initializes the SHA1 context ctx
+ *
+ * sha1_update(&ctx, msg, len) hashes the len octets starting at msg
+ * into the SHA1 context
+ *
+ * sha1_final(&ctx, output) performs the final processing of the SHA1
+ * context and writes the result to the 20 octets at output
+ *
+ */
+
+void
+sha1_init(sha1_ctx_t *ctx);
+
+void
+sha1_update(sha1_ctx_t *ctx, const uint8_t *M, int octets_in_msg);
+
+void
+sha1_final(sha1_ctx_t *ctx, uint32_t output[5]);
+
+/*
+ * The sha1_core function is INTERNAL to SHA-1, but it is declared
+ * here because it is also used by the cipher SEAL 3.0 in its key
+ * setup algorithm.
+ */
+
+/*
+ *  sha1_core(M, H) computes the core sha1 compression function, where M is
+ *  the next part of the message and H is the intermediate state {H0,
+ *  H1, ...}
+ *
+ *  this function does not do any of the padding required in the
+ *  complete sha1 function
+ */
+
+void
+sha1_core(const uint32_t M[16], uint32_t hash_value[5]);
+
+#endif /* SHA1_H */
diff --git a/ss_patch/ss_bspatch.c b/ss_patch/ss_bspatch.c
new file mode 100755 (executable)
index 0000000..be75265
--- /dev/null
@@ -0,0 +1,368 @@
+/*-
+ * Copyright 2003-2005 Colin Percival
+ * Copyright 2012 Matthew Endsley
+ * All rights reserved
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted providing that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Modifications are made in reimplementing suffix sort array generation
+ * and how the data is read and written to.Iterative part replaced the
+ * recursive implementation to avoid buffer overflow problems
+ */
+//#define ZLIB_MOD                                                                       //not stable yet.
+//#define MAX_MATCH_SIZE                  // define ( MAX_MATCH_SIZE or CONST_MEMORY_USAGE ) or ( none of them )
+#define CONST_MEMORY_USAGE (64*1024)   //tests show smallest time when using 64 kb
+#define PATCH_FILE_FORMAT_MOD
+#define BSDIFF_HEADER "BSDIFF40"
+#define SSDIFF_HEADER "SSDIFF40"
+//#define MULTI_THREADING
+#include <stdbool.h>
+#include <err.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include "ss_patchdelta.h"
+#include "fota_common.h"
+#include "sha1.h"
+#include "SS_Engine_Errors.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <Alloc.h>
+#include <7zFile.h>
+#include <7zVersion.h>
+#include <LzmaDec.h>
+#include <LzmaEnc.h>
+
+static void *SzAlloc(void *p, size_t size)
+{
+       p = p;
+       return MyAlloc(size);
+}
+
+static void SzFree(void *p, void *address)
+{
+       p = p;
+       MyFree(address);
+}
+static ISzAlloc g_Alloc = { SzAlloc, SzFree };
+
+static off_t offtin(u_char *buf)
+{
+       off_t y;
+
+       y = buf[7] & 0x7F;
+       y = y * 256;
+       y += buf[6];
+       y = y * 256;
+       y += buf[5];
+       y = y * 256;
+       y += buf[4];
+       y = y * 256;
+       y += buf[3];
+       y = y * 256;
+       y += buf[2];
+       y = y * 256;
+       y += buf[1];
+       y = y * 256;
+       y += buf[0];
+
+       if (buf[7] & 0x80)
+               y = -y;
+
+       return y;
+}
+
+#define IN_BUF_SIZE (1 << 16)
+#define OUT_BUF_SIZE (1 << 16)
+
+static SRes Decode2(CLzmaDec *state, ISeqOutStream *outStream, ISeqInStream *inStream,
+               UInt64 *unpackSize,unsigned char *dec_data)
+{
+       int thereIsSize = (*unpackSize != (UInt64)(Int64) - 1);
+       UInt64 offset = 0;
+       Byte inBuf[IN_BUF_SIZE];
+       Byte outBuf[OUT_BUF_SIZE];
+       size_t inPos = 0, inSize = 0, outPos = 0;
+
+       LzmaDec_Init(state);
+
+       offset = 0;
+
+       for (;;) {
+               if (inPos == inSize) {
+                       inSize = IN_BUF_SIZE;
+                       RINOK(inStream->Read(inStream, inBuf, &inSize));
+                       inPos = 0;
+               }
+
+               SRes res;
+               SizeT inProcessed = inSize - inPos;
+               SizeT outProcessed = OUT_BUF_SIZE - outPos;
+               ELzmaFinishMode finishMode = LZMA_FINISH_ANY;
+               ELzmaStatus status;
+
+               if (thereIsSize && outProcessed > *unpackSize) {
+                       outProcessed = (SizeT) * unpackSize;
+                       finishMode = LZMA_FINISH_END;
+               }
+
+               res = LzmaDec_DecodeToBuf(state, outBuf + outPos, &outProcessed,
+                               inBuf + inPos, &inProcessed, finishMode, &status);
+               inPos += inProcessed;
+               outPos += outProcessed;
+               *unpackSize -= outProcessed;
+               memcpy(dec_data + offset, outBuf, outProcessed);
+               offset += outProcessed;
+
+               outPos = 0;
+
+               if ((res != SZ_OK) || (thereIsSize && *unpackSize == 0))
+                       return res;
+
+               if (inProcessed == 0 && outProcessed == 0) {
+                       if (thereIsSize || status != LZMA_STATUS_FINISHED_WITH_MARK)
+                               return SZ_ERROR_DATA;
+                       return res;
+               }
+       }
+}
+
+int apply_patch(char *oldfile, char *patch_buffer, unsigned char **dest_buf, ssize_t *dest_size)
+{
+       int fd = -1, result = 0;
+       off_t oldsize, newsize;
+       u_char header[16], buf[8];
+       u_char *old = NULL;
+       off_t oldpos, newpos;
+       off_t ctrl[4];          /////////////////////////////////////THREAD
+       off_t total_write;      /////////////////////////////////////////THREAD
+       off_t j;
+       off_t memory_usage = CONST_MEMORY_USAGE;
+       off_t match_size;
+       off_t patch_buffer_offset = 0;
+       bool flag;
+
+       /*
+          File format:
+          0    8          "BSDIFF40"
+          8    8          X
+          16   8          Y
+          24   8          sizeof(newfile)
+          32   X          bzip2(control block)
+          32+X Y          bzip2(diff block)
+          32+X+Y          ???   bzip2(extra block)
+          with control block a set of triples (x,y,z) meaning "add x bytes
+          from oldfile to x bytes from the diff block; copy y bytes from the
+          extra block; seek forwards in oldfile by z bytes".
+        */
+       // Read header
+       if (patch_buffer)
+               memcpy(header, patch_buffer, 16);
+       else {
+               printf("%s().%d Corrupt decoded patch buffer\n", __FUNCTION__, __LINE__);
+               return 1;
+       }
+
+       /* Check for appropriate magic */
+       if (memcmp(header, BSDIFF_HEADER, 8) != 0 && memcmp(header, SSDIFF_HEADER, 8) != 0) {
+               printf("%s().%d Patch buffer header corrupt\n", __FUNCTION__, __LINE__ );
+               return 1;
+       }
+
+       /* Read lengths from header */
+       newsize = offtin(header + 8);
+
+       if ((newsize < 0)) {
+               printf("%s().%d Patch buffer corrupt\n", __FUNCTION__, __LINE__ );
+               return 1;
+       }
+
+       /* Cset patch_buffer_offset at the right place */
+       patch_buffer_offset += 16;
+
+       if (((fd = open(oldfile, O_RDONLY, 0)) < 0) ||
+                       ((oldsize = lseek(fd, 0, SEEK_END)) == -1) ||
+                       ((old = malloc(memory_usage + 1)) == NULL) ||
+                       (lseek(fd, 0, SEEK_SET) != 0)) {
+               printf("Corruption in old file %s\n", oldfile);
+               result = 1;
+               goto Cleanup;
+       }
+
+       if ((*dest_buf = malloc(newsize + 1)) == NULL) {
+               printf("Corruption in old file %s\n", oldfile);
+               result = 1;
+               goto Cleanup;
+       }
+       oldpos = 0;
+       newpos = 0;
+
+       total_write = 0;
+
+       while (total_write != newsize) {
+               /* Read control data */
+               for (j = 0; j <= 3; j++) {
+                       memcpy(buf, patch_buffer + patch_buffer_offset, 8);
+                       patch_buffer_offset += 8;
+                       ctrl[j] = offtin(buf);
+               };
+
+               total_write += (ctrl[0] + ctrl[1]);
+               newpos = ctrl[3];
+               oldpos = ctrl[2];
+
+               //////////////////////////////////////////////////////////////////////////////////
+               flag = true;
+               match_size = ctrl[0];
+               while (flag == true) {
+                       if (match_size <= memory_usage) {
+                               if (pread(fd, old, match_size, oldpos) != match_size) {
+                                       printf("Corruption in old file %s\n", oldfile);
+                                       result = 1;
+                                       goto Cleanup;
+                               }
+                               if (newpos + match_size > newsize) {
+                                       printf("%s().%d Corrupt patch\n", __FUNCTION__, __LINE__ );
+                                       result = 1;
+                                       goto Cleanup;
+                               }
+                               memcpy((*dest_buf) + newpos, patch_buffer + patch_buffer_offset, match_size);
+                               patch_buffer_offset += match_size;
+                               for (j = 0; j < match_size; j++) {
+                                       (*dest_buf)[newpos + j] += old[j];
+                               }
+                               newpos += match_size;
+                               flag = false;
+                       } else {
+                               if (pread(fd, old, memory_usage, oldpos) != memory_usage) {
+                                       printf("%s().%d Corruption in old file %s\n", __FUNCTION__, __LINE__ , oldfile);
+                                       result = 1;
+                                       goto Cleanup;
+                               }
+                               if (newpos + memory_usage > newsize) {
+                                       printf("%s().%d Corrupt patch\n", __FUNCTION__, __LINE__ );
+                                       result = 1;
+                                       goto Cleanup;
+                               }
+                               memcpy((*dest_buf) + newpos, patch_buffer + patch_buffer_offset, memory_usage);
+                               patch_buffer_offset += memory_usage;
+                               for (j = 0; j < memory_usage; j++)
+                                       (*dest_buf)[newpos + j] += old[j];
+                               match_size -= memory_usage;
+                               oldpos += memory_usage;
+                               newpos += memory_usage;
+                       }
+               }
+
+               ////////////////////////////////////////////////////////////////////////////////////////
+               /* Sanity-check */
+               if (newpos + ctrl[1] > newsize) {
+                       printf("%s().%d Corrupt patch\n", __FUNCTION__, __LINE__ );
+                       result = 1;
+                       goto Cleanup;
+               }
+               /* Read extra string */
+               memcpy((*dest_buf) + newpos, patch_buffer + patch_buffer_offset, ctrl[1]);
+               patch_buffer_offset += ctrl[1];
+       };
+       *dest_size = newsize;
+Cleanup:
+       //close old file
+       if (fd >= 0)
+               close(fd);
+       if (old)
+               free(old);
+       return result;
+}
+
+int SS_ApplyBsdiff(char *oldfile, char *newfile, char *patch, SinkFn sink, void *token, sha1_ctx_t * ctx1)
+{
+
+    UInt64 unpackSize = 0;
+    CFileSeqInStream inStream;
+    ISeqOutStream outStream;
+    char *buf_res = NULL;
+    unsigned char *new_data = NULL;
+    ssize_t new_size = 0;
+    int result = E_SS_FAILURE;
+
+    FileSeqInStream_CreateVTable(&inStream);
+    File_Construct(&inStream.file);
+    FileOutStream_CreateVTable((CFileOutStream *) & outStream);
+
+    if (InFile_Open(&inStream.file, patch) != 0)
+        return E_SS_FAILURE;
+
+    UInt64 i;
+    CLzmaDec state;
+    unsigned char header[LZMA_PROPS_SIZE + 8];
+
+    RINOK(SeqInStream_Read(&inStream.s, header, sizeof(header)));
+
+    unpackSize = 0;
+    for (i = 0; i < 8; i++)
+        unpackSize += (UInt64) header[LZMA_PROPS_SIZE + i] << (i * 8);
+
+    LzmaDec_Construct(&state);
+    RINOK(LzmaDec_Allocate(&state, header, LZMA_PROPS_SIZE, &g_Alloc));
+
+    //decompress the patch file into buf_res
+    buf_res = (char *)SS_Malloc(unpackSize);
+    if (!buf_res) {
+        LOGE("Bad memory allocation\n");
+        goto Cleanup;
+    }
+    result = Decode2(&state, &outStream, &inStream.s, &unpackSize, (unsigned char *)buf_res);
+
+    LzmaDec_Free(&state, &g_Alloc);
+    File_Close(&inStream.file);
+
+    if (result != S_SS_SUCCESS) {
+        LOGE("Error decompression failed with code : [%d]\n", result);
+        goto Cleanup;
+    }
+    //apply patch using buffer decoded by Decode2
+    result = apply_patch(oldfile, buf_res, &new_data, &new_size);
+    if (result != S_SS_SUCCESS) {
+        goto Cleanup;
+    }
+
+    result = (sink(new_data, new_size, token) < new_size) ? E_SS_FAILURE : S_SS_SUCCESS;
+    if (result != S_SS_SUCCESS) {
+        LOGE("short write of output: %d (%s)\n", errno, strerror(errno));
+        goto Cleanup;
+    }
+
+    if (ctx1) {
+        sha1_update(ctx1, new_data, new_size);
+    }
+ Cleanup:
+    if (new_data)
+        SS_Free(new_data);
+    if (buf_res)
+        SS_Free(buf_res);
+    File_Close(&inStream.file);//wgid: 27007
+    return result;
+
+}
diff --git a/ss_patch/ss_patchdelta.c b/ss_patch/ss_patchdelta.c
new file mode 100755 (executable)
index 0000000..5c4f275
--- /dev/null
@@ -0,0 +1,1230 @@
+/*
+ * libtota
+ *
+ * Copyright (c) 2017 Samsung Electronics Co., Ltd.
+ *
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <errno.h>
+#include <libgen.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/statfs.h>
+#include <sys/types.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include "ua.h"
+#include "sha1.h"
+#include "ss_patchdelta.h"
+#include "fota_common.h"
+#include "SS_Engine_Errors.h"
+
+extern void *SS_Malloc(unsigned int size);
+
+typedef struct {
+    unsigned char *buffer;
+    ssize_t size;
+    ssize_t pos;
+} MemorySinkInfo;
+
+ssize_t ss_memorySink(unsigned char *data, ssize_t len, void *token)
+{
+    MemorySinkInfo *msi = (MemorySinkInfo *) token;
+    if (msi->size - msi->pos < len) {
+        return -1;
+    }
+    memcpy(msi->buffer + msi->pos, data, len);
+    msi->pos += len;
+    return len;
+}
+
+ssize_t ss_fileSink(unsigned char *data, ssize_t len, void *token)
+{
+    int ss_fd = *(int *)token;
+    ssize_t done = 0;
+    ssize_t wrote;
+    while (done < (ssize_t) len) {
+        wrote = write(ss_fd, data + done, len - done);
+        if (wrote <= 0) {
+            if (errno == EINTR || errno == EAGAIN)
+                continue;       // try again
+            LOGE("error writing %d bytes: %s\n", (int)(len - done), strerror(errno));
+            return done;
+        }
+        done += wrote;
+    }
+    return done;
+}
+
+// Take a string 'str' of 40 hex digits and parse it into the 20
+// byte array 'digest'.  'str' may contain only the digest or be of
+// the form "<digest>:<anything>".  Return 0 on success, -1 on any
+// error.
+int ParseSha1(const char *str, uint8_t * digest)
+{
+    int i;
+    const char *ps = str;
+    uint8_t *pd = digest;
+    for (i = 0; i < SHA_DIGEST_SIZE * 2; ++i, ++ps) {
+        int digit;
+        if (*ps >= '0' && *ps <= '9') {
+            digit = *ps - '0';
+        } else if (*ps >= 'a' && *ps <= 'f') {
+            digit = *ps - 'a' + 10;
+        } else if (*ps >= 'A' && *ps <= 'F') {
+            digit = *ps - 'A' + 10;
+        } else {
+            return -1;
+        }
+        if (i % 2 == 0) {
+            *pd = digit << 4;
+        } else {
+            *pd |= digit;
+            ++pd;
+        }
+    }
+    if (*ps != '\0')
+        return -1;
+    return 0;
+}
+
+//Function to find the start of gzipped part in compressed kernel
+int getOffset(char *zimage_path)
+{
+    char gzip_header[] = { 31, -117, 8 };       //header value for gzip which needs to be checked
+    char buf[4] = { 0, };
+    int offset = 0;//wgid:14074
+
+    FILE *f = fopen(zimage_path, "r");
+    if(!f){
+        LOGE("Fopen failed for path %s\n", zimage_path);
+        SS_SetUpgradeState(E_SS_OPENFILE_ONLYR);
+        return -1;
+    }
+    fseek(f, 0, SEEK_SET);
+    while (fread(buf, 1, 3, f) > 0) {
+        if (gzip_header[0] == buf[0] && gzip_header[1] == buf[1] && gzip_header[2] == buf[2]) {
+            LOGL(LOG_SSENGINE, "match for %d %d %d found at %d\n", buf[0], buf[1], buf[2], ftell(f) - 3);
+            break;
+        } else {
+            fseek(f, -2, SEEK_CUR);
+        }
+    }
+    offset = ftell(f) - 3;
+    fclose(f);
+    return offset;
+}
+
+int SS_LoadPartition(const char *filename, FileInfo * file)
+{
+    size_t read = 0;
+    FILE *dev = NULL;
+    int i;
+
+    dev = fopen(filename, "rb");
+    if (dev == NULL) {
+        LOGE("failed to open partition \"%s\": %s\n", filename, strerror(errno));
+        return -1;
+    }
+
+    sha1_ctx_t sha_ctx;
+    sha1_init(&sha_ctx);
+
+    file->data = SS_Malloc(file->size);
+    if (file->data) {
+        read = fread(file->data, 1, file->size, dev);
+        LOGL(LOG_SSENGINE, "Partition size read %d\n", read);
+        sha1_update(&sha_ctx, file->data, read);
+        file->size = read;
+    }
+
+    const uint8_t sha_final[SHA_DIGEST_SIZE] = { 0, };
+    sha1_final(&sha_ctx, (uint32_t *) & sha_final);
+    for (i = 0; i < SHA_DIGEST_SIZE; ++i) {
+        file->sha1[i] = sha_final[i];
+    }
+    //LOGL(LOG_SSENGINE, "Final SHA of Source (%s)\n", sha_final);
+
+    file->st.st_mode = 0644;
+    file->st.st_uid = 0;
+    file->st.st_gid = 0;
+    fclose(dev);
+    return 0;
+}
+
+//extern int write_data_to_blkdev(char* dev_name, int blk_start, int blk_cnt, char* data);
+
+int SS_LoadFile(const char *filename, FileInfo * file)
+{
+
+    file->data = NULL;
+    //LOGL(LOG_SSENGINE,"SS_LoadFile --- [File name %s]\n",filename);
+
+    if (stat(filename, &file->st) != 0) {
+        LOGE("failed to stat \"%s\": %s\n", filename, strerror(errno));
+        return -1;
+    }
+
+    file->size = file->st.st_size;
+    file->data = SS_Malloc(file->size);
+    if (!file->data) {
+        LOGE("failed to allocate memory for \"%s\": %s\n", filename, strerror(errno));
+        return -1;
+    }
+
+    FILE *f = fopen(filename, "rb");
+    if (f == NULL) {
+        LOGE("failed to open \"%s\": %s\n", filename, strerror(errno));
+        SS_Free(file->data);
+        file->data = NULL;
+        return -1;
+    }
+
+    ssize_t bytes_read = fread(file->data, 1, file->size, f);
+    if (bytes_read != file->size) {
+        LOGE("short read of \"%s\" (%ld bytes of %ld)\n", filename, (long)bytes_read, (long)file->size);
+        SS_Free(file->data);
+        file->data = NULL;
+        fclose(f);
+        return -1;
+    }
+    fclose(f);
+    //LOGL(LOG_SSENGINE,"SS_LoadFile --- [bytes_read %d]\n",bytes_read);
+    sha1(file->data, file->size, (uint32_t *) file->sha1);
+    return 0;
+}
+
+extern int gvalid_session;
+extern void create_dir(char *pathname, int mode);
+#ifdef SUPPORT_CONTAINER
+//unzip source archive , apply patch from extracted delta folder and repack the source archive
+int SS_UpdateArchive(ua_dataSS_t * ua_dataSS, const char *source_filename, const char *target_filename,
+                     const char *source_sha1_str, const char *target_sha1_str)
+{
+    FILE *fp;
+    char *line = NULL, *token = NULL, *source_file = NULL, *new_file = NULL, *dir_to_create = NULL, *patch_file = NULL;
+    char patch_path_full[MAX_FILE_PATH] = { 0, };       //absolute path for patches
+    char source_path_full[MAX_FILE_PATH] = { 0, };      //absolute path for uncompressed source files
+    char target_path_full[MAX_FILE_PATH] = { 0, };
+    char patchlist[MAX_FILE_PATH] = { 0, };
+    char cmd[2 * MAX_FILE_PATH] = { 0, };
+    size_t len = 0, read = 0;
+    int result = S_SS_SUCCESS;
+    uint8_t target_sha1[SHA_DIGEST_SIZE];
+    uint8_t source_sha1[SHA_DIGEST_SIZE] = { 0, };
+    FileInfo source_data = { 0, };
+    int backupsrc = -1;
+    SinkFn sink = NULL;
+    void *tok = NULL;
+    int output = -1;
+    char *outname = NULL;
+
+    if (ParseSha1(target_sha1_str, target_sha1) != 0) {
+        LOGE("failed to parse tgt-sha1 \"%s\"\n", target_sha1_str);
+        return E_SS_FAILURE;
+    }
+
+    if (0 == gvalid_session) {
+        if (ParseSha1(source_sha1_str, source_sha1) != 0) {
+            LOGE("failed to parse tgt-sha1 \"%s\"\n", source_sha1_str);
+            return E_SS_FAILURE;
+        }
+        if (SS_LoadFile(source_filename, &source_data) == 0) {
+            if (memcmp(source_data.sha1, source_sha1, SHA_DIGEST_SIZE) == 0) {
+                LOGL(LOG_SSENGINE, "SS_UpdateDeltaFS - Patch Can be applied\n");
+                if (source_data.data)
+                    SS_Free(source_data.data);
+            } else if (memcmp(source_data.sha1, target_sha1, SHA_DIGEST_SIZE) == 0) {
+                LOGL(LOG_SSENGINE, "SS_UpdateDeltaFS - Patch Already applied\n");
+                if (source_data.data)
+                    SS_Free(source_data.data);
+                return S_SS_SUCCESS;
+            } else {
+                //Check for backup file SHA
+                SS_Free(source_data.data);
+                source_data.data = NULL;
+                LOGL(LOG_SSENGINE, "SS_UpdateDeltaFS - Source was currupted, Try loading from backup source\n");
+                if (SS_LoadFile(SS_BACKUP_SOURCE, &source_data) == 0) {
+                    if (memcmp(source_data.sha1, source_sha1, SHA_DIGEST_SIZE) == 0) {
+                        if (SS_CopyFile(NULL, SS_BACKUP_SOURCE, source_filename) != 0) {
+                            LOGE("copy of backup to \"%s\" failed: %s\n", source_filename, strerror(errno));
+                            SS_SetUpgradeState(E_SS_FSUPDATEFAILED);
+                            if (source_data.data)
+                                SS_Free(source_data.data);
+                            return E_SS_FAILURE;
+                        }
+                        LOGL(LOG_SSENGINE,
+                             "SS_UpdateDeltaFS - Patch Can be applied from using backup file as source\n");
+                    } else {
+                        SS_SetUpgradeState(E_SS_FSSRCCURRUPTED);
+                        if (source_data.data)
+                            SS_Free(source_data.data);
+                        return E_SS_FAILURE;
+                    }
+                } else {
+                    SS_SetUpgradeState(E_SS_FSSRCCURRUPTED);
+                    if (source_data.data)
+                        SS_Free(source_data.data);
+                    return E_SS_FAILURE;
+                }
+            }
+        } else {
+            LOGL(LOG_SSENGINE, "SS_UpdateDeltaFS - Source was deleted, Try loading from backup source\n");
+            if (SS_LoadFile(SS_BACKUP_SOURCE, &source_data) == 0) {
+                if (memcmp(source_data.sha1, source_sha1, SHA_DIGEST_SIZE) == 0) {
+                    if (SS_CopyFile(NULL, SS_BACKUP_SOURCE, source_filename) != 0) {
+                        LOGE("copy of backup to \"%s\" failed: %s\n", source_filename, strerror(errno));
+                        SS_SetUpgradeState(E_SS_FSUPDATEFAILED);
+                        if (source_data.data)
+                            SS_Free(source_data.data);
+                        return E_SS_FAILURE;
+                    }
+                    LOGL(LOG_SSENGINE, "SS_UpdateDeltaFS - Patch Can be applied from using backup file as source\n");
+                } else {
+                    SS_SetUpgradeState(E_SS_FSSRCCURRUPTED);
+                    if (source_data.data)
+                        SS_Free(source_data.data);
+                    return E_SS_FAILURE;
+                }
+            } else {
+                SS_SetUpgradeState(E_SS_FSSRCCURRUPTED);
+                if (source_data.data)
+                    SS_Free(source_data.data);
+                return E_SS_FAILURE;
+            }
+        }
+    }
+#ifndef ENHANCED_BSDIFF
+    backupsrc = SS_BackupSource(source_filename);
+    if (backupsrc != 0) {
+        LOGE("failed to Backup source File:[%s] \n", source_filename);
+        SS_SetUpgradeState(E_SS_FSSRCBACKUPFAILED);
+        return E_SS_FAILURE;
+    }
+#endif
+    //create workspace for processing container upgrade
+    SS_CreateFolder(NULL, SS_ARCHIVE_WORK_FOLDER);
+    SS_CreateFolder(NULL, SS_ARCHIVE_UNPACK_FOLDER);
+
+    //unpack the source container to the unpack workspace
+    snprintf(cmd, sizeof(cmd) - 1, "%s -qo %s -d %s", SS_UNZIP_COMMAND, source_filename, SS_ARCHIVE_UNPACK_FOLDER);
+    result = _system_cmd_wait(cmd);
+    if (result != S_SS_SUCCESS) {
+        LOGE("zip extraction for [%s] failed, code [%d]\n", cmd, result);
+        return E_SS_FAILURE;
+    }
+    //extract unpack scipt from delta.tar to process containers later
+    if (tar_get_item_size(ua_dataSS->update_data->ua_delta_path, SS_KERNEL_UNPACK_SCRIPT) > 0)
+        if (tar_extract_file(ua_dataSS->update_data->ua_delta_path, SS_KERNEL_UNPACK_SCRIPT, SS_KERN_UNPK_SCRIPT_PATH) >
+            0)
+            LOGL(LOG_SSENGINE, "Extracted %s successfully\n", SS_KERNEL_UNPACK_SCRIPT);
+        else {
+            LOGE("Error in fn tar_extract_file for item %s", SS_KERNEL_UNPACK_SCRIPT);
+            SS_SetUpgradeState(E_SS_DELTA_IS_CORRUPT);
+            result = E_SS_FAILURE;
+        }
+    //move new tpk extracted in the delta folder to the work folder
+    new_file = strrchr(target_filename, '/');
+    snprintf(source_path_full, sizeof(source_path_full) - 1, "%s/%s", SS_ARCHIVE_DELTA_FOLDER, new_file + 1);
+    snprintf(target_path_full, sizeof(target_path_full) - 1, "%s/%s", SS_ARCHIVE_WORK_FOLDER, new_file + 1);
+
+    result = rename(source_path_full, target_path_full);
+    if (result != 0) {
+        LOGE("fatal error in moving %s to %s\n", source_path_full, target_path_full);
+        return E_SS_FAILURE;
+    }
+    snprintf(cmd, sizeof(cmd) - 1, "%s -qo %s -d %s", SS_UNZIP_COMMAND, target_path_full, SS_ARCHIVE_WORK_FOLDER);
+    result = _system_cmd_wait(cmd);
+    if (result != S_SS_SUCCESS) {
+        LOGE("zip extraction for [%s] failed, code [%d]\n", cmd, result);
+        return E_SS_FAILURE;
+    } else
+        LOGL(LOG_SSENGINE, "Thin zip extracted successfully\n");
+    // open the patch list and start iterating through the changes and the same files
+    snprintf(patchlist, MAX_FILE_PATH, "%s/%s", SS_ARCHIVE_DELTA_FOLDER, SS_CONTAINER_INFO_FILE);
+    fp = fopen(patchlist, "r");
+    if (!fp) {
+        LOGE("file open error [%s]\n", patchlist);
+        return E_SS_FAILURE;
+    }
+
+    while ((read = getline(&line, &len, fp)) != -1) {
+
+        switch (line[0]) {      // '-' == Delete File, 's' == same File, 'c' == Changed File
+        case 's':              //for same files case, just extract from old tpk to the work folder, update new tpk in the end
+            token = strtok(line, SS_SEPARATOR_TOKEN);
+
+            source_file = strtok(NULL, SS_NEWLINE_TOKEN);
+
+            snprintf(source_path_full, sizeof(source_path_full) - 1, "%s/%s", SS_ARCHIVE_UNPACK_FOLDER, source_file);
+            snprintf(target_path_full, sizeof(target_path_full) - 1, "%s/%s", SS_ARCHIVE_WORK_FOLDER, source_file);
+            LOGL(LOG_SSENGINE, "copy %s\n", source_file);
+            result = SS_MoveFile(NULL, source_path_full, target_path_full);
+            if (result != S_SS_SUCCESS) {
+                LOGE("fatal error [%d]\n", errno);
+                goto Cleanup;
+            }
+            break;
+        case 'c':
+            token = strtok(line, SS_SEPARATOR_TOKEN);
+
+            source_file = strtok(NULL, SS_SEPARATOR_TOKEN);
+            patch_file = strtok(NULL, SS_NEWLINE_TOKEN);
+
+            snprintf(source_path_full, sizeof(source_path_full) - 1, "%s/%s", SS_ARCHIVE_UNPACK_FOLDER, source_file);
+            snprintf(target_path_full, sizeof(target_path_full) - 1, "%s/%s", SS_ARCHIVE_WORK_FOLDER, source_file);
+            LOGL(LOG_SSENGINE, "copy %s\n", source_file);
+            result = SS_MoveFile(NULL, source_path_full, target_path_full);
+            if (result != S_SS_SUCCESS) {
+                LOGE("fatal error [%d]\n", errno);
+            }
+
+            snprintf(patch_path_full, sizeof(patch_path_full) - 1, "%s/%s", SS_ARCHIVE_DELTA_FOLDER, patch_file);
+            snprintf(source_path_full, sizeof(source_path_full) - 1, "%s/%s", SS_ARCHIVE_WORK_FOLDER, source_file);
+
+            {
+                // We write the decoded output to "<tgt-file>.patch".
+                //allocate some extra space to allow for concatinating ".patch" with the name
+                outname = (char *)SS_Malloc(strlen(source_path_full) + 10);
+                if (outname == NULL)
+                    goto Cleanup;
+                strcpy(outname, source_path_full);
+                strcat(outname, ".patch");
+
+                output = open(outname, O_WRONLY | O_CREAT | O_TRUNC, S_IRWXU);
+                if (output < 0) {
+                    LOGE("failed to open output file %s: %s\n", outname, strerror(errno));
+                    SS_Free(outname);
+                    goto Cleanup;
+                }
+                sink = ss_fileSink;
+                tok = &output;
+            }
+            result = SS_ApplyBsdiff((char *)source_path_full, outname, patch_path_full, sink, tok, NULL);
+            LOGL(LOG_SSENGINE, "GenerateTarget Output is %d and result is %d\n", output, result);
+            if (output >= 0) {
+                fsync(output);
+                close(output);
+            }
+
+            if (result != 0) {
+                LOGE("applying patch failed %s\n", source_path_full);
+                if (outname != NULL) {
+                    unlink(outname);
+                }
+                goto Cleanup;
+            }
+            result = rename(outname, source_path_full);
+            if (result != 0) {
+                LOGE("fatal error %s\n", source_path_full);
+                goto Cleanup;
+            } else {
+                LOGL(LOG_SSENGINE, "Successfully applied patch for [%s]\n", source_path_full);
+            }
+            break;
+        default:
+            break;
+        }
+    }
+
+    new_file = strrchr(target_filename, '/');
+    snprintf(cmd, sizeof(cmd) - 1, "%s -p %s %s /opt/data/fota", SS_KERN_UNPK_SCRIPT_PATH, SS_ARCHIVE_WORK_FOLDER,
+             new_file + 1);
+    int ret = _system_cmd_wait(cmd);
+    LOGL(LOG_SSENGINE, "ret for %s is %d\n", cmd, ret);
+
+    //Apply diff between intermediate new tpk and the new tpk which contains central dir changes only
+    snprintf(patch_path_full, sizeof(patch_path_full) - 1, "%s/New_%s.delta", SS_ARCHIVE_DELTA_FOLDER, new_file + 1);
+    snprintf(source_path_full, sizeof(source_path_full) - 1, "%s/%s", SS_ARCHIVE_WORK_FOLDER, new_file + 1);
+
+    {
+        // We write the decoded output to "<tgt-file>.patch".
+        //allocate some extra space to allow for concatinating ".patch" with the name
+        outname = (char *)SS_Malloc(strlen(source_path_full) + 10);
+        if (outname == NULL)
+            return E_SS_FAILURE;
+        strcpy(outname, source_path_full);
+        strcat(outname, ".patch");
+
+        output = open(outname, O_WRONLY | O_CREAT | O_TRUNC, S_IRWXU);
+        if (output < 0) {
+            LOGE("failed to open output file %s: %s\n", outname, strerror(errno));
+            SS_Free(outname);
+            goto Cleanup;
+        }
+        sink = ss_fileSink;
+        tok = &output;
+    }
+    result = SS_ApplyBsdiff((char *)source_path_full, outname, patch_path_full, sink, tok, NULL);
+    LOGL(LOG_SSENGINE, "GenerateTarget Output is %d and result is %d\n", output, result);
+    if (output >= 0) {
+        fsync(output);
+        close(output);
+    }
+
+    if (result != 0) {
+        LOGE("applying patch failed %s\n", source_path_full);
+        if (outname != NULL) {
+            unlink(outname);
+        }
+        goto Cleanup;
+    }
+
+    if (SS_LoadFile(outname, &source_data) == 0)
+        result = memcmp(source_data.sha1, target_sha1, SHA_DIGEST_SIZE);
+    if (result != S_SS_SUCCESS) {
+        LOGE("patch did not produce expected sha1 \n");
+        SS_SetUpgradeState(E_SS_IMGSHA_MISMATCH);
+        goto Cleanup;
+    }
+
+    result = rename(outname, source_path_full);
+    if (result != 0) {
+        LOGE("fatal error %s\n", source_path_full);
+        goto Cleanup;
+    }
+    //Delete old file and copy patched archive, cant use rename as partitions  may be different
+    unlink(source_filename);
+    if (result != 0) {
+        LOGE("failed to unlink [%s] code [%d]\n", source_filename, errno);
+        goto Cleanup;
+    }
+    result = (int)SS_CopyFile(NULL, source_path_full, target_filename);
+    if (result != S_SS_SUCCESS) {
+        LOGE("failed to copy file [%s] result [%d]\n", source_path_full, result);
+        goto Cleanup;
+    }
+
+ Cleanup:
+    fclose(fp);
+    if (line)
+        SS_Free(line);
+    if (outname)
+        SS_Free(outname);
+    SS_DeleteFile(NULL, SS_KERN_UNPK_SCRIPT_PATH);
+    SS_DeleteFile(NULL, SS_FIND_CMD_TARGET);
+    SS_DeleteFolder(NULL, SS_CONTAINER_WORKSPACE);
+    return result;
+}
+#endif
+/*!
+ *********************************************************************************
+ *                     SS_UpdateDeltaFS
+ *********************************************************************************
+ *
+ * @brief
+ *     This is used to apply patch for a file during delta FS upgrade
+ *
+ *
+ *     @param
+ *
+ *     @return                         0 - in case of success
+ *                                             1 - in case of error during patch application
+ *
+ *********************************************************************************
+ */
+
+int SS_UpdateDeltaFS(const char *source_filename, const char *target_filename,
+                     const char *source_sha1_str, const char *target_sha1_str, int patch_data_size)
+{
+    uint8_t target_sha1[SHA_DIGEST_SIZE];
+    sha1_ctx_t ctx1;
+    int output;
+    int retry = 1;
+    int use_backup = 0;
+    char *outname = NULL;
+    int backupsrc = -1;
+    int result = 0;
+    FileInfo source_file;
+    uint8_t source_sha1[SHA_DIGEST_SIZE] = { 0, };
+
+    if (ParseSha1(target_sha1_str, target_sha1) != 0) {
+        LOGE("failed to parse tgt-sha1 \"%s\"\n", target_sha1_str);
+        return E_SS_FAILURE;
+    }
+
+    /*
+       if battery removed in between update gvalid_session becomes 0
+       need to check file integrity in that case
+     */
+    if (0 == gvalid_session) {
+        if (ParseSha1(source_sha1_str, source_sha1) != 0) {
+            LOGE("failed to parse tgt-sha1 \"%s\"\n", source_sha1_str);
+            return E_SS_FAILURE;
+        }
+        if (SS_LoadFile(source_filename, &source_file) == 0) {
+            if (memcmp(source_file.sha1, source_sha1, SHA_DIGEST_SIZE) == 0) {
+                LOGL(LOG_SSENGINE, "SS_UpdateDeltaFS - Patch Can be applied\n");
+                if (source_file.data)
+                    SS_Free(source_file.data);
+            } else if (memcmp(source_file.sha1, target_sha1, SHA_DIGEST_SIZE) == 0) {
+                LOGL(LOG_SSENGINE, "SS_UpdateDeltaFS - Patch Already applied\n");
+                if (source_file.data)
+                    SS_Free(source_file.data);
+                return S_SS_SUCCESS;
+            } else {
+                //Check for backup file SHA
+                SS_Free(source_file.data);
+                source_file.data = NULL;
+                LOGL(LOG_SSENGINE, "SS_UpdateDeltaFS - Source was currupted, Try loading from backup source\n");
+                if (SS_LoadFile(SS_BACKUP_SOURCE, &source_file) == 0) {
+                    if (memcmp(source_file.sha1, source_sha1, SHA_DIGEST_SIZE) == 0) {
+                        if (SS_CopyFile(NULL, SS_BACKUP_SOURCE, source_filename) != S_SS_SUCCESS) {
+                            LOGE("copy of backup to \"%s\" failed: %s\n", source_filename, strerror(errno));
+                            SS_SetUpgradeState(E_SS_FSUPDATEFAILED);
+                            if (source_file.data)
+                                SS_Free(source_file.data);
+                            return E_SS_FAILURE;
+                        }
+                        LOGL(LOG_SSENGINE,
+                             "SS_UpdateDeltaFS - Patch Can be applied from using backup file as source\n");
+                    } else {
+                        SS_SetUpgradeState(E_SS_FSSRCCURRUPTED);
+                        if (source_file.data)
+                            SS_Free(source_file.data);
+                        return E_SS_FAILURE;
+                    }
+                } else {
+                    SS_SetUpgradeState(E_SS_FSSRCCURRUPTED);
+                    if (source_file.data)
+                        SS_Free(source_file.data);
+                    return E_SS_FAILURE;
+                }
+            }
+        } else {
+            LOGL(LOG_SSENGINE, "SS_UpdateDeltaFS - Source was deleted, Try loading from backup source\n");
+            if (SS_LoadFile(SS_BACKUP_SOURCE, &source_file) == 0) {
+                if (memcmp(source_file.sha1, source_sha1, SHA_DIGEST_SIZE) == 0) {
+                    use_backup = 1;
+                    LOGL(LOG_SSENGINE, "SS_UpdateDeltaFS - Patch Can be applied from using backup file as source\n");
+                } else {
+                    SS_SetUpgradeState(E_SS_FSSRCCURRUPTED);
+                    if (source_file.data)
+                        SS_Free(source_file.data);
+                    return E_SS_FAILURE;
+                }
+            } else {
+                SS_SetUpgradeState(E_SS_FSSRCCURRUPTED);
+                if (source_file.data)
+                    SS_Free(source_file.data);
+                return E_SS_FAILURE;
+            }
+        }
+    }
+    //Now proceed wit patch application since patch can be applied
+    do {
+        int enough_space = 0;
+        size_t free_space;
+        char *tok;
+
+        if (retry > 0) {
+            if (use_backup) {
+                tok = strrchr(source_filename, '/');
+                *tok = '\0';
+            }
+            SS_GetAvailableFreeSpace(NULL, source_filename, &free_space);
+            enough_space = (free_space > (256 << 10)) &&        // 256k (two-block) minimum
+                (free_space > (patch_data_size * 3 / 2));       // 50% margin of error
+            if (use_backup)
+                *tok = '/';
+        }
+
+        if (!use_backup) {
+#ifndef ENHANCED_BSDIFF
+            backupsrc = SS_BackupSource(source_filename);
+            if (backupsrc != 0) {
+                LOGE("failed to Backup source File:[%s] \n", source_filename);
+                SS_SetUpgradeState(E_SS_FSSRCBACKUPFAILED);
+                return E_SS_FAILURE;
+            }
+#endif
+        }
+        if (!enough_space) {
+            LOGL(LOG_SSENGINE, "For %s: free space %ld bytes; enough %d\n", source_filename, (long)free_space,
+                 enough_space);
+            retry = 0;
+            use_backup = 1;
+            unlink(source_filename);
+        }
+        //LOGL(LOG_SSENGINE, "For %s: target %ld bytes; free space %ld bytes; enough %d\n",
+        // source_filename, (long)patch_data_size, (long)free_space, enough_space);
+        //LOGL(LOG_SSENGINE,"Generate Target Space availabitiy [%d]\n", enough_space);
+
+        SinkFn sink = NULL;
+        void *token = NULL;
+        output = -1;
+        outname = NULL;
+
+        {
+            // We write the decoded output to "<tgt-file>.patch".
+            //allocate some extra space to allow for concatinating ".patch" with the name
+            outname = (char *)SS_Malloc(strlen(target_filename) + 10);
+            if (outname == NULL)
+                return -1;
+            strcpy(outname, target_filename);
+            strcat(outname, ".patch");
+
+            output = open(outname, O_WRONLY | O_CREAT | O_TRUNC, S_IRWXU);
+            if (output < 0) {
+                if (errno == 2) {
+                    char *dir_path = strrchr(outname, '/');
+                    *dir_path = '\0';
+                    // need to create directory as the target may be different from source
+                    LOGL(LOG_SSENGINE, "need to create directory [%s]\n", outname);
+                    create_dir(outname, 0755);
+                    *dir_path = '/';
+                    output = open(outname, O_WRONLY | O_CREAT | O_TRUNC, S_IRWXU);
+                    if (output < 0) {
+                        LOGE("failed to open output file %s: %s\n", outname, strerror(errno));
+                        SS_Free(outname);
+                        return E_SS_FAILURE;
+                    }
+                }
+            }
+            sink = ss_fileSink;
+            token = &output;
+        }
+        sha1_init(&ctx1);
+        if (use_backup)
+            result = SS_ApplyBsdiff(SS_BACKUP_SOURCE, outname, SS_PATCHFILE_SOURCE, sink, token, &ctx1);
+        else
+            result = SS_ApplyBsdiff((char *)source_filename, outname, SS_PATCHFILE_SOURCE, sink, token, &ctx1);
+        if (output >= 0) {
+            fsync(output);
+            close(output);
+        }
+
+        if (result != 0) {
+            if (retry == 0) {
+                LOGE("applying patch failed result : [%d]\n", result);
+                SS_Free(outname);//wgid: 20739
+                SS_SetUpgradeState(E_SS_FSUPDATEFAILED);
+                return E_SS_FAILURE;
+            } else {
+                LOGE("applying patch failed; retrying\n");
+                SS_Free(outname);//wgid: 20739
+            }
+            if (outname != NULL) {
+                unlink(outname);
+            }
+        } else {
+            // succeeded; no need to retry
+            break;
+        }
+    } while (retry-- > 0);
+    const uint8_t current_target_sha1[SHA_DIGEST_SIZE] = { 0, };
+    sha1_final(&ctx1, (uint32_t *) & current_target_sha1);
+    if (memcmp(current_target_sha1, target_sha1, SHA_DIGEST_SIZE) != 0) {
+        LOGE("patch did not produce expected sha1\n");
+        SS_SetUpgradeState(E_SS_FSSHA_MISMATCH);
+        if (outname != NULL) {
+            SS_Free(outname);
+        }
+        return E_SS_FAILURE;
+    }
+    // Finally, rename the .patch file to replace the target file.
+#ifdef ENHANCED_BSDIFF
+    if (SS_rename1(outname, target_filename) != 0) {
+        LOGE("rename of .patch to \"%s\" failed: %s\n", target_filename, strerror(errno));
+        SS_SetUpgradeState(E_SS_FSUPDATEFAILED);
+        if (outname != NULL) {
+            SS_Free(outname);
+        }
+        return E_SS_FAILURE;
+    }
+#else
+    if (rename(outname, target_filename) != 0) {
+        LOGE("rename of .patch to \"%s\" failed: %s\n", target_filename, strerror(errno));
+        SS_SetUpgradeState(E_SS_FSUPDATEFAILED);
+        if (outname != NULL) {
+            SS_Free(outname);
+        }
+        return E_SS_FAILURE;
+    }
+    //remove source file if target is not same
+    if (strcmp(source_filename, target_filename) != 0)
+        unlink(source_filename);
+    SS_BackupSourceClear();
+#endif
+    SS_PatchSourceClear();
+    SS_Free(outname);
+
+    return result;
+}
+
+/*!
+ *********************************************************************************
+ *                     SS_UpdateDeltaKernel
+ *********************************************************************************
+ *
+ * @brief
+ *     This is used to apply patch for kernel delta during delta Image upgrade
+ *
+ *
+ *     @param
+ *
+ *     @return                         0 - in case of success
+ *                                             1 - in case of error during patch application
+ *
+ *********************************************************************************
+ */
+
+int SS_UpdateDeltaKernel(ua_dataSS_t * ua_dataSS, int (*write_to_blkdev) (char *, int, int, char *))
+{
+    uint8_t target_sha1[SHA_DIGEST_SIZE];
+    uint8_t source_sha1[SHA_DIGEST_SIZE];
+    FileInfo source_file;
+    int result = S_SS_SUCCESS;
+    int blk_cnt, read_count = 0;
+    int blk_start = 0;
+    int backupsrc = -1;
+    int use_backup_img = -1;
+    FILE *fp = NULL, *wp = NULL, *kp = NULL;
+    int i = 0, j = 0, file_len = 0;
+    char *magic = NULL, *file_name = NULL, *buf = NULL, a = '0';
+    char cmd[1024] = { 0, };
+    char source_filename[MAX_FILE_PATH] = { 0, };
+    char part_filename[MAX_FILE_PATH] = { 0, };
+    int file_num = 0;
+    SinkFn sink = NULL;
+    void *tok = NULL;
+    int output = -1;
+    char *outname = NULL;
+    //Kernel Parts are created on unpacking kernel which is then used to apply delta
+    char *kernel_parts[] = { "decompression_code",
+        "piggy.gz",
+        "padding_piggy",
+        "piggy_trailer"
+    };
+
+    if (ParseSha1(ua_dataSS->update_cfg->target_sha1, target_sha1) != 0) {
+        LOGE("failed to parse tgt-sha1 \"%s\"\n", ua_dataSS->update_cfg->target_sha1);
+        return E_SS_FAILURE;
+    }
+
+    source_file.size = ua_dataSS->update_cfg->soure_img_size;
+    source_file.data = NULL;
+    if (0 == gvalid_session) {
+
+        if (ParseSha1(ua_dataSS->update_cfg->soure_sha1, source_sha1) != 0) {
+            LOGE("failed to parse Src-sha1 \"%s\"\n", ua_dataSS->update_cfg->soure_sha1);
+            return E_SS_FAILURE;
+        }
+
+        if (SS_LoadPartition(ua_dataSS->parti_info->ua_blk_name, &source_file) == 0) {
+            if (memcmp(source_file.sha1, source_sha1, SHA_DIGEST_SIZE) == 0) {
+                LOGL(LOG_SSENGINE, "SS_UpdateDeltaIMG - Patch Can be applied\n");
+
+            } else if (memcmp(source_file.sha1, target_sha1, SHA_DIGEST_SIZE) == 0) {
+                LOGL(LOG_SSENGINE, "SS_UpdateDeltaIMG - Patch Already applied\n");
+                SS_Free(source_file.data);
+                return S_SS_SUCCESS;
+            } else {
+                SS_Free(source_file.data);
+                source_file.data = NULL;
+                LOGL(LOG_SSENGINE, "SS_UpdateDeltaIMG - Source was currupted, Try loading from backup source\n");
+                if (SS_LoadPartition(SS_BACKUP_SOURCE, &source_file) == 0) {
+                    if (memcmp(source_file.sha1, source_sha1, SHA_DIGEST_SIZE) == 0) {
+                        use_backup_img = 1;
+
+                        LOGL(LOG_SSENGINE,
+                             "SS_UpdateDeltaIMG - Patch Can be applied from using backup file as source\n");
+                    } else {
+                        SS_SetUpgradeState(E_SS_IMGSRCCURRUPTED);
+                        SS_Free(source_file.data);
+                        return E_SS_FAILURE;
+                    }
+                }
+            }
+        }
+    } else {                    //in case of kernel delta need to copy kernel data  from blk to buffer
+        if (SS_LoadPartition(ua_dataSS->parti_info->ua_blk_name, &source_file) != 0) {
+            SS_SetUpgradeState(E_SS_IMGSRCCURRUPTED);
+            LOGE("Fatal Error : Kernel block is corrupted\n");
+            return E_SS_FAILURE;
+        }
+    }
+    if (use_backup_img == -1) {
+        backupsrc = SS_BackupSource(ua_dataSS->parti_info->ua_blk_name);
+        if (backupsrc != 0) {
+            LOGE("failed to Backup source File:[%s] \n", ua_dataSS->parti_info->ua_blk_name);
+            SS_SetUpgradeState(E_SS_IMGSRCBACKUPFAILED);
+            return E_SS_FAILURE;
+        }
+    }
+    //Cleanup workspace and copy helper executables to it before proceeding
+    SS_DeleteFolder(NULL, SS_KERNEL_WORKSPACE);
+    create_dir(SS_KERNEL_WORKSPACE, 0755);
+    SS_CopyFile(NULL, SS_GZIP_SOURCE, SS_GZIP_TARGET);
+    SS_CopyFile(NULL, SS_STAT_SOURCE, SS_STAT_TARGET);
+    SS_CopyFile(NULL, SS_DD_SOURCE, SS_DD_TARGET);
+
+    if (tar_get_item_size(ua_dataSS->update_data->ua_delta_path, SS_KERNEL_UNPACK_SCRIPT) > 0)
+        if (tar_extract_file(ua_dataSS->update_data->ua_delta_path, SS_KERNEL_UNPACK_SCRIPT, SS_KERN_UNPK_SCRIPT_PATH) >
+            0)
+            LOGL(LOG_SSENGINE, "Extracted %s successfully\n", SS_KERNEL_UNPACK_SCRIPT);
+        else {
+            LOGE("Error in fn tar_extract_file for item %s", SS_KERNEL_UNPACK_SCRIPT);
+            SS_SetUpgradeState(E_SS_DELTA_IS_CORRUPT);
+            result = E_SS_FAILURE;
+            goto Cleanup;
+    } else {
+        LOGE("Error size is not positive for item %s", SS_KERNEL_UNPACK_SCRIPT);
+        SS_SetUpgradeState(E_SS_DELTA_IS_CORRUPT);
+        result = E_SS_FAILURE;
+        goto Cleanup;
+    }
+    //Now write the kernel data to the workplace and start applying patch
+    snprintf(source_filename, sizeof(source_filename) - 1, "%s/%s", SS_KERNEL_WORKSPACE, SS_KERNEL_NAME);
+    fp = fopen(source_filename, "w");
+    if (!fp) {
+        LOGE("file open error [%s] code [%d]\n", source_filename, errno);
+        SS_Free(source_file.data);
+        result = E_SS_FAILURE;
+        goto Cleanup;
+    }
+    //write source kernel data to workspace
+    read_count = fwrite(source_file.data, 1, source_file.size, fp);
+    if (read_count != source_file.size) {
+        LOGE("file write error read_count = %d for [%s]\n", read_count, source_filename);
+        SS_Free(source_file.data);
+        result = E_SS_FAILURE;
+        goto Cleanup;
+    }
+    SS_Free(source_file.data);
+    fclose(fp);
+    fp = NULL;//wgid: 59313
+
+    //Unpack source kernel
+    int offset = getOffset(source_filename);
+    if(offset < 0){
+        LOGE("Failed to get offset\n");
+        result = E_SS_FAILURE;
+        goto Cleanup;
+    }
+    snprintf(cmd, sizeof(cmd) - 1, "%s -u %s %s %d", SS_KERN_UNPK_SCRIPT_PATH, SS_KERNEL_WORKSPACE, SS_KERNEL_NAME,
+             offset);
+    int ret = _system_cmd_wait(cmd);
+    LOGL(LOG_SSENGINE, "ret for %s is %d\n", cmd, ret);
+
+    //open delta file, extract kernel delta parts and apply patch to previously unpacked kernel
+    fp = fopen(SS_PATCHFILE_SOURCE, "r");
+    if (fp == NULL) {
+        LOGE("Failed to open kernel delta patch\n");
+        result = E_SS_FAILURE;
+        goto Cleanup;
+    }
+    //read kernel delta header for  delta names and size
+    buf = SS_Malloc(SS_KERNEL_DELTA_HEADER);
+    if(!buf){//wgid: 13099
+        LOGE("Failed to allocate memory\n");
+        result = E_SS_MALLOC_ERROR;
+        goto Cleanup;
+    }
+    fread(buf, 1, SS_KERNEL_DELTA_HEADER, fp);
+    magic = strtok(buf, ":");
+    file_num = atoi(strtok(NULL, ":"));
+
+    //adjust offset to start of data section before proceeding
+    fseek(fp, SS_KERNEL_DELTA_HEADER, SEEK_SET);
+
+    while (file_num-- > 0) {
+        file_name = strtok(NULL, ":");
+        file_len = atoi(strtok(NULL, ":"));
+        j = file_len;
+        snprintf(source_filename, sizeof(source_filename) - 1, "%s/%s_unpacked/%s", SS_KERNEL_WORKSPACE, SS_KERNEL_NAME,
+                 file_name);
+        snprintf(part_filename, sizeof(part_filename) - 1, "%s/%s", SS_KERNEL_WORKSPACE, file_name);
+        wp = fopen(part_filename, "w");
+        while (j-- > 0) {
+            a = fgetc(fp);
+            fputc(a, wp);
+        }
+        fclose(wp);
+
+        //apply bspatch to the unpacked kernel parts
+
+        {
+            // We write the decoded output to "<tgt-file>.patch".
+            //allocate some extra space to allow for concatinating ".patch" with the name
+            outname = (char *)SS_Malloc(strlen(source_filename) + 10);
+            if (outname == NULL) {
+                SS_SetUpgradeState(E_SS_MALLOC_ERROR);
+                result = E_SS_FAILURE;
+                goto Cleanup;
+            }
+            strcpy(outname, source_filename);
+            strcat(outname, ".patch");
+
+            output = open(outname, O_WRONLY | O_CREAT | O_TRUNC, S_IRWXU);
+            if (output < 0) {
+                LOGE("failed to open output file %s: %s\n", outname, strerror(errno));
+                SS_Free(outname);
+                result = E_SS_FAILURE;
+                goto Cleanup;
+            }
+            sink = ss_fileSink;
+            tok = &output;
+        }
+        result = SS_ApplyBsdiff(source_filename, outname, part_filename, sink, tok, NULL);
+        LOGL(LOG_SSENGINE, "GenerateTarget Output is %d and result is %d\n", output, result);
+        if (output >= 0) {
+            fsync(output);
+            close(output);
+        }
+
+        if (result != S_SS_SUCCESS) {
+            LOGE("applying patch failed %s\n", source_filename);
+            if (outname != NULL) {
+                unlink(outname);
+            }
+            goto Cleanup;
+        }
+        result = rename(outname, source_filename);
+        if (result != S_SS_SUCCESS) {
+            LOGE("fatal error %s\n", source_filename);
+            goto Cleanup;
+        }
+        if (strcmp(file_name, "piggy") == 0) {
+            snprintf(cmd, sizeof(cmd) - 1,
+                     "%s/gzip -n -9 -c %s/%s/%s > %s/%s/%s.gz",
+                     SS_KERNEL_WORKSPACE, SS_KERNEL_WORKSPACE, SS_KERNEL_UNPACK_DIR, file_name,
+                     SS_KERNEL_WORKSPACE, SS_KERNEL_UNPACK_DIR, file_name);
+            result = _system_cmd_wait(cmd);
+            LOGL(LOG_SSENGINE, "ret for %s = %d\n", cmd, result);
+            unlink(source_filename);
+        }
+        unlink(part_filename);
+    }
+    //open new kernel file and append kernel parts to it in
+    snprintf(source_filename, sizeof(source_filename) - 1, "%s/%s", SS_KERNEL_WORKSPACE, SS_KERNEL_TARGET_NAME);
+    kp = fopen(source_filename, "w");
+    for (i = 0; i < 4; i++) {
+        snprintf(part_filename, sizeof(part_filename) - 1, "%s/%s/%s", SS_KERNEL_WORKSPACE, SS_KERNEL_UNPACK_DIR,
+                 kernel_parts[i]);
+        wp = fopen(part_filename, "r");
+        fseek(wp, SEEK_SET, SEEK_END);
+        j = ftell(wp);
+        fseek(wp, SEEK_SET, SEEK_SET);
+        while (j-- > 0) {
+            a = fgetc(wp);
+            if(a != EOF)//wgid: 4428
+                fputc(a, kp);
+            else
+                break;
+        }
+        fclose(wp);
+        unlink(part_filename);
+    }
+    fclose(fp);
+    fp = NULL;
+    fclose(kp);
+
+    if (SS_LoadFile(source_filename, &source_file) == 0)
+        result = memcmp(source_file.sha1, target_sha1, SHA_DIGEST_SIZE);
+    if (result != S_SS_SUCCESS) {
+        LOGE("patch did not produce expected sha1 \n");
+        SS_SetUpgradeState(E_SS_IMGSHA_MISMATCH);
+        goto Cleanup;
+    }
+    //Considering EMMC partition by deafult
+
+    blk_cnt = ((ua_dataSS->update_cfg->target_img_size - 1) / SECTOR_SIZE) + 1;
+    result = write_to_blkdev((char *)ua_dataSS->parti_info->ua_blk_name, blk_start, blk_cnt, (char *)source_file.data);
+    if (result != S_SS_SUCCESS) {
+        LOGE("write of patched data to %s failed\n", ua_dataSS->parti_info->ua_blk_name);       // All returns should go to CLEAN UP.
+        SS_SetUpgradeState(E_SS_IMGFLASHWRITEFAIL);
+        goto Cleanup;
+    }
+
+ Cleanup:
+    SS_BackupSourceClear();
+    SS_PatchSourceClear();
+    SS_DeleteFile(NULL, SS_KERN_UNPK_SCRIPT_PATH);
+    SS_DeleteFolder(NULL, SS_KERNEL_WORKSPACE);
+    SS_Free(buf);
+    SS_Free(outname);//wgid: 20740
+    if (result == S_SS_SUCCESS)
+        LOGL(LOG_SSENGINE, "************* SS_UpdateDeltaKernel  SUCCESS *****************\n");
+    else{
+        LOGL(LOG_SSENGINE, "************* SS_UpdateDeltaKernel  FAILED *****************\n");
+        if(fp)
+            fclose(fp);//wgid:14711
+    }
+    return result;
+
+}
+
+/*!
+ *********************************************************************************
+ *                     SS_UpdateDeltaIMG
+ *********************************************************************************
+ *
+ * @brief
+ *     This is used to apply patch for an image during delta Image upgrade
+ *
+ *
+ *     @param
+ *
+ *     @return                         0 - in case of success
+ *                                             1 - in case of error during patch application
+ *
+ *********************************************************************************
+ */
+
+int SS_UpdateDeltaIMG(ua_dataSS_t * ua_dataSS, int (*write_to_blkdev) (char *, int, int, char *))
+{
+    uint8_t target_sha1[SHA_DIGEST_SIZE];
+    uint8_t source_sha1[SHA_DIGEST_SIZE];
+    const uint8_t current_target_sha1[SHA_DIGEST_SIZE];
+    FileInfo source_file;
+    sha1_ctx_t ctx1;
+    MemorySinkInfo msi;
+    int result = S_SS_SUCCESS;
+    int blk_cnt;
+    int blk_start = 0;
+    int backupsrc = -1;
+    int use_backup_img = -1;
+    int fd = -1;
+    if (ParseSha1(ua_dataSS->update_cfg->target_sha1, target_sha1) != 0) {
+        LOGE("failed to parse tgt-sha1 \"%s\"\n", ua_dataSS->update_cfg->target_sha1);
+        return E_SS_FAILURE;
+    }
+
+    source_file.size = ua_dataSS->update_cfg->soure_img_size;
+    source_file.data = NULL;
+    if (0 == gvalid_session) {
+
+        if (ParseSha1(ua_dataSS->update_cfg->soure_sha1, source_sha1) != 0) {
+            LOGE("failed to parse Src-sha1 \"%s\"\n", ua_dataSS->update_cfg->soure_sha1);
+            return E_SS_FAILURE;
+        }
+
+        if (SS_LoadPartition(ua_dataSS->parti_info->ua_blk_name, &source_file) == 0) {
+            if (memcmp(source_file.sha1, source_sha1, SHA_DIGEST_SIZE) == 0) {
+                LOGL(LOG_SSENGINE, "SS_UpdateDeltaIMG - Patch Can be applied\n");
+                SS_Free(source_file.data);
+            } else if (memcmp(source_file.sha1, target_sha1, SHA_DIGEST_SIZE) == 0) {
+                LOGL(LOG_SSENGINE, "SS_UpdateDeltaIMG - Patch Already applied\n");
+                SS_Free(source_file.data);
+                return S_SS_SUCCESS;
+            } else {
+                SS_Free(source_file.data);
+                source_file.data = NULL;
+                LOGL(LOG_SSENGINE, "SS_UpdateDeltaIMG - Source was currupted, Try loading from backup source\n");
+                if (SS_LoadPartition(SS_BACKUP_SOURCE, &source_file) == 0) {
+                    if (memcmp(source_file.sha1, source_sha1, SHA_DIGEST_SIZE) == 0) {
+                        use_backup_img = 1;
+                        SS_Free(source_file.data);
+                        LOGL(LOG_SSENGINE,
+                             "SS_UpdateDeltaIMG - Patch Can be applied from using backup file as source\n");
+                    } else {
+                        SS_SetUpgradeState(E_SS_IMGSRCCURRUPTED);
+                        SS_Free(source_file.data);
+                        return E_SS_FAILURE;
+                    }
+                }
+            }
+        }
+    }
+    if (use_backup_img == -1) {
+        backupsrc = SS_BackupSource(ua_dataSS->parti_info->ua_blk_name);
+        if (backupsrc != 0) {
+            LOGE("failed to Backup source File:[%s] \n", ua_dataSS->parti_info->ua_blk_name);
+            SS_SetUpgradeState(E_SS_IMGSRCBACKUPFAILED);
+            return E_SS_FAILURE;
+        }
+    }
+    SinkFn sink = NULL;
+    void *token = NULL;
+
+    blk_cnt = ((ua_dataSS->update_cfg->target_img_size - 1) / SECTOR_SIZE) + 1;
+
+    msi.buffer = SS_Malloc(blk_cnt * SECTOR_SIZE);
+    if (msi.buffer == NULL) {
+        LOGE("failed to alloc %ld bytes for output\n", (long)ua_dataSS->update_cfg->target_img_size);
+        SS_SetUpgradeState(E_SS_MALLOC_ERROR);
+        return E_SS_FAILURE;
+    }
+    msi.pos = 0;
+    msi.size = ua_dataSS->update_cfg->target_img_size;
+    sink = ss_memorySink;
+    token = &msi;
+
+    sha1_init(&ctx1);
+    //if souce was corrupted, use backup to apply diff
+    if (use_backup_img == -1)
+        result =
+            SS_ApplyBsdiff((char *)ua_dataSS->parti_info->ua_blk_name, NULL, SS_PATCHFILE_SOURCE, sink, token, &ctx1);
+    else
+        result = SS_ApplyBsdiff(SS_BACKUP_SOURCE, NULL, SS_PATCHFILE_SOURCE, sink, token, &ctx1);
+    if (result != S_SS_SUCCESS) {
+        LOGE("failed to SS_ApplyBsdiff\n");
+        SS_SetUpgradeState(E_SS_IMGRECOVERYWRITEFAILED);
+        goto Cleanup;
+    }
+
+    sha1_final(&ctx1, (uint32_t *) & current_target_sha1);
+    result = memcmp(current_target_sha1, target_sha1, SHA_DIGEST_SIZE);
+    if (result != S_SS_SUCCESS) {
+        LOGE("patch did not produce expected sha1 \n");
+        SS_SetUpgradeState(E_SS_IMGSHA_MISMATCH);
+        goto Cleanup;
+    }
+    //Considering EMMC partition by deafult
+
+    if (ua_dataSS->update_cfg->update_type == DELTA_IMG) {
+        blk_cnt = ((ua_dataSS->update_cfg->target_img_size - 1) / SECTOR_SIZE) + 1;
+        result = write_to_blkdev((char *)ua_dataSS->parti_info->ua_blk_name, blk_start, blk_cnt, (char *)msi.buffer);
+        if (result != S_SS_SUCCESS) {
+            LOGE("write of patched data to %s failed\n", ua_dataSS->parti_info->ua_blk_name);   // All returns should go to CLEAN UP.
+            SS_SetUpgradeState(E_SS_IMGFLASHWRITEFAIL);
+            goto Cleanup;
+        }
+    } else if (ua_dataSS->update_cfg->update_type == EXTRA && ua_dataSS->update_data->ua_temp_path) {
+        fd = open(ua_dataSS->update_data->ua_temp_path, O_WRONLY | O_CREAT | O_TRUNC, S_IRWXU);
+        if (fd < 0) {
+            LOGE("failed to open %s for write: %s\n", ua_dataSS->update_data->ua_temp_path, strerror(errno));
+            SS_SetUpgradeState(E_SS_IMGRECOVERYWRITEFAILED);
+            result = E_SS_FAILURE;
+            goto Cleanup;
+        }
+        result = SS_WriteFile(NULL, fd, 0, msi.buffer, msi.size);
+        if (result != S_SS_SUCCESS) {
+            LOGE("failed to write\n");
+            SS_SetUpgradeState(E_SS_IMGRECOVERYWRITEFAILED);
+            goto Cleanup;
+        }
+        fsync(fd);
+    }
+    else {
+        SS_SetUpgradeState(E_SS_IMGUPDATEFAILED);
+        result = E_SS_FAILURE;
+        LOGE("failed to apply patch - Invalid Update type params \n");
+    }
+
+ Cleanup:
+    SS_BackupSourceClear();
+    SS_PatchSourceClear();
+    if (msi.buffer)
+        SS_Free(msi.buffer);
+    if (fd >= 0)
+        close(fd);
+    if (result == S_SS_SUCCESS)
+        LOGL(LOG_SSENGINE, "************* SS_UpdateDeltaIMG  SUCCESS *****************\n");
+    return result;
+
+}
diff --git a/ss_patch/ss_patchdelta.h b/ss_patch/ss_patchdelta.h
new file mode 100755 (executable)
index 0000000..db54078
--- /dev/null
@@ -0,0 +1,79 @@
+/*
+ * libtota
+ *
+ * Copyright (c) 2017 Samsung Electronics Co., Ltd.
+ *
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _SS_PATCHDELTA_H
+#define _SS_PATCHDELTA_H
+
+#include <stdint.h>
+#include "sha1.h"
+#include "unistd.h"
+#include "fcntl.h"
+#include "errno.h"
+#include "SS_Engine_Update.h"
+
+//#define ENHANCED_BSDIFF
+#define SS_UPDATE_FS 0
+#define SS_UPDATE_IMG 1
+//#define SHA_DIGEST_SIZE 20
+typedef struct {
+    int type;
+    ssize_t size;
+    char *data;
+} Value;
+typedef struct _Patch {
+    uint8_t sha1[SHA_DIGEST_SIZE];
+    const char *patch_filename;
+} Patch;
+
+typedef struct _FileInfo {
+    unsigned char sha1[20];     //SHA_DIGEST_SIZE 20
+    unsigned char *data;
+    int size;
+    struct stat st;
+} FileInfo;
+
+typedef ssize_t(*SinkFn) (unsigned char *, ssize_t, void *);
+
+int ParseSha1(const char *str, uint8_t * digest);
+
+void ShowBSDiffLicense();
+int ApplyBSDiffPatch(const unsigned char *old_data, ssize_t old_size,
+                     const Value * patch, ssize_t patch_offset, SinkFn sink, void *token, sha1_ctx_t * ctx1);
+int ApplyBSDiffPatchMem(const unsigned char *old_data, ssize_t old_size,
+                        const Value * patch, ssize_t patch_offset, unsigned char **new_data, ssize_t * new_size);
+//int ApplyOptimizedBSDiffPatch(const unsigned char* old_data,void* token,
+//                                                                 const Value* patch, SinkFn sink,sha1_ctx_t* ctx1);
+
+int SS_LoadPartition(const char *filename, FileInfo * file);
+int SS_LoadFile(const char *filename, FileInfo * file);
+extern void SS_SetUpgradeState(int Val);
+extern long SS_GetAvailableFreeSpace(void *pbUserData, const char *partition_name, SS_UINT32 * available_flash_size);
+extern int SS_BackupSource(const char *source_filename);
+extern int SS_ApplyBsdiff(char *oldfile, char *newfile, char *patch, SinkFn sink, void *token, sha1_ctx_t * ctx1);
+extern int SS_BackupSourceClear();
+extern int SS_PatchSourceClear();
+extern long SS_WriteFile(void *pbUserData,
+                         long wHandle, SS_UINT32 dwPosition, unsigned char *pbBuffer, SS_UINT32 dwSize);
+extern void SS_Free(void * pMemBlock);
+extern long SS_CopyFile(void * pbUserData, const char * strFromPath, const char * strToPath);
+extern long SS_DeleteFolder(void * pbUserData, const char * strPath);
+extern long SS_DeleteFile(void * pbUserData, const char * strPath);
+extern int tar_get_item_size(char * tar, char * item);
+extern int tar_extract_file(char * tar, char * item, char * pathname);
+extern int _system_cmd_wait(const char * command);
+#endif
diff --git a/tota.pc.in b/tota.pc.in
new file mode 100755 (executable)
index 0000000..02eaba9
--- /dev/null
@@ -0,0 +1,13 @@
+# Package Information for pkg-config
+
+prefix=@PREFIX@
+exec_prefix=@EXEC_PREFIX@
+libdir=@LIBDIR@
+includedir=@INCLUDEDIR@
+
+Name: libtota
+Description: fota function library
+Version: @VERSION@
+Requires:
+Libs: -L${libdir}
+Cflags: -I${includedir}