Initial codes of tota-upg
authorSunmin Lee <sunm.lee@samsung.com>
Fri, 14 Jul 2017 05:34:38 +0000 (14:34 +0900)
committerSunmin Lee <sunm.lee@samsung.com>
Thu, 24 Aug 2017 06:13:55 +0000 (15:13 +0900)
Change-Id: I5bef4311c1ceb5a91d836f4aed9aefd7d7c0f5af

21 files changed:
mk_delta/common/bin/CreatePatch.py [new file with mode: 0755]
mk_delta/common/bin/chsmack_attr.sh [new file with mode: 0755]
mk_delta/common/bin/dzImagescript.sh [new file with mode: 0755]
mk_delta/common/bin/extended_attr.sh [new file with mode: 0755]
mk_delta/common/bin/unpack.sh [new file with mode: 0755]
mk_delta/rpi3/bin/batch_mk_delta.sh [new file with mode: 0755]
mk_delta/rpi3/bin/mk_delta.sh [new file with mode: 0755]
mk_delta/rpi3/bin/mk_part_delta.sh [new file with mode: 0755]
mk_delta/rpi3/bin/pack_delta_tar.sh [new file with mode: 0755]
mk_delta/rpi3/cfg/default_part.cfg [new file with mode: 0644]
mk_delta/rpi3/cfg/delta.cfg [new file with mode: 0644]
mk_delta/rpi3/data/new_tar/.gitignore [new file with mode: 0755]
mk_delta/rpi3/data/old_tar/.gitignore [new file with mode: 0755]
mk_delta/tw1/bin/batch_mk_delta.sh [new file with mode: 0755]
mk_delta/tw1/bin/mk_delta.sh [new file with mode: 0755]
mk_delta/tw1/bin/mk_part_delta.sh [new file with mode: 0755]
mk_delta/tw1/bin/pack_delta_tar.sh [new file with mode: 0755]
mk_delta/tw1/cfg/default_part.cfg [new file with mode: 0755]
mk_delta/tw1/cfg/delta.cfg [new file with mode: 0755]
mk_delta/tw1/data/new_tar/.gitignore [new file with mode: 0755]
mk_delta/tw1/data/old_tar/.gitignore [new file with mode: 0755]

diff --git a/mk_delta/common/bin/CreatePatch.py b/mk_delta/common/bin/CreatePatch.py
new file mode 100755 (executable)
index 0000000..8af3706
--- /dev/null
@@ -0,0 +1,1245 @@
+#!/usr/bin/python
+
+import sys
+
+
+if sys.hexversion < 0x02040000:
+  print >> sys.stderr, "Python 2.4 or newer is required."
+  sys.exit(1)
+
+import sys
+import os
+import filecmp
+import shutil
+import subprocess
+import re
+import ntpath
+import zipfile
+import datetime
+import hashlib
+import operator
+import locale
+import errno
+import logging
+import glob
+import apt
+import stat
+'''
+Diff two folders and create delta using SS_BSDIFF
+Will maintain same format of script that will be generated when we use diffutil
+
+1. Create a list of files in each Base folders,
+2. These files will fall into one these below categories:
+       1) Only in OLD - Should be deleted
+       2) Only in NEW - Should be added or renamed accordingly
+       3) File exists in both directories but contents are different - Create Diff.
+       4) File name is same but TYPE can change (File to Folder, Folder to Link etc.)
+       5) Duplicates in the list of Deletes and News
+       6) Close matching diffs even though name changes across directories. (for matching extension)
+       7) Clearing empty directories after Moves or diffs under Rename.
+       8) Supporting Verbatim - Any entry under Verbatim_list.txt will be treated as NEW files instead of patch.
+
+Current Case
+1. Given two folders, from list of REMOVED and NEW files find if there
+is version change and create diff between them
+
+TODO
+Want to extend the same script for entire DIFF generation and replace TOTAlib.sh file
+Catching errors at all stages. SHOULD exit & return error in case of failure
+'''
+
+def global_paths():
+       global DIFF_UTIL
+       global ZIPUTIL
+       global NEW_FILES_PATH
+       global NEW_FILES_FOLDER
+       global NEW_FILES_ZIP_NAME
+       global SYMLINK_TYPE
+       global ATTR_DOC_EXT
+       global SYMLINK_DOC_NAME
+       global DIFF_PREFIX
+       global DIFF_SUFFIX
+       global SUPPORT_RENAME
+       global NEW_PREFIX
+       global DIFFPATCH_UTIL
+       global SUPPORT_CONTAINERS
+       global FULL_IMG
+       global DELTA_IMG
+       global DELTA_FS
+       global EXTRA
+       global COMMON_BIN_PATH
+       global MEM_REQ
+       global EMPTY
+       global VERBATIM_LIST
+       global MEM_FILE
+
+COMMON_BIN_PATH = "../../common/bin/"
+DIFF_UTIL = "/usr/local/bin/ss_bsdiff"
+DIFFPATCH_UTIL = "/usr/local/bin/ss_bspatch"
+ZIPUTIL = "p7zip "
+#ZIPUTIL = "7z a system.7z "
+NEW_FILES_PATH = "system"
+NEW_FILES_FOLDER  = "system"
+NEW_FILES_ZIP_NAME = "system.7z"
+SYMLINK_TYPE = "SYM"
+ATTR_DOC_EXT = "_attr.txt"
+SYMLINK_DOC_NAME = "_sym.txt"
+PART_DOC_EXT = ".txt"
+DIFF_PREFIX = "diff"
+DIFF_SUFFIX = ".delta"
+NEW_PREFIX = 'new'
+FULL_IMG = "FULL_IMG"
+DELTA_IMG = "DELTA_IMG"
+DELTA_FS = "DELTA_FS"
+EXTRA = "EXTRA"
+LOGFILE = "Delta.log"
+VERBATIM_LIST = "Verbatim_List.txt"
+EMPTY = ""
+MEM_REQ = 0
+MEM_FILE = "NULL"
+
+SUPPORT_RENAME = "TRUE" #Use appropriate name
+SUPPORT_CONTAINERS = "FALSE"
+SUPPORT_DZIMAGE = "TRUE"
+SUPPORT_VERBATIM = "TRUE"
+
+TEST_MODE = "FALSE"
+
+def main():
+       logging.basicConfig(filename=LOGFILE, level=logging.DEBUG)
+       global AttributeFile
+       global GenerateDiffAttr
+       try:
+
+               if len(sys.argv) < 5:
+                       sys.exit('Usage: CreatePatch.py UPDATE_TYPE PARTNAME OLDBASE NEWBASE OUTFOLDER')
+               UPDATE_TYPE = sys.argv[1]
+               PART_NAME = sys.argv[2]  # lets make this also optional
+
+               BASE_OLD = sys.argv[3]
+               BASE_NEW = sys.argv[4]
+               OUT_DIR = sys.argv[5]
+               ATTR_OLD = EMPTY
+               ATTR_NEW = EMPTY
+               UPDATE_CFG_PATH = EMPTY
+               GenerateDiffAttr = "FALSE"
+               if UPDATE_TYPE == DELTA_FS:
+                       #instead of arguments check it in outdirectory ?
+                       if len(sys.argv) == 9:
+                               ATTR_OLD = sys.argv[6]
+                               ATTR_NEW = sys.argv[7]
+                               UPDATE_CFG_PATH = '../'+sys.argv[8]
+                               GenerateDiffAttr = "TRUE"
+
+               elif UPDATE_TYPE == DELTA_IMG or UPDATE_TYPE == FULL_IMG:
+                       if len(sys.argv) == 7:
+                               #Use path in better way
+                               UPDATE_CFG_PATH = '../'+sys.argv[6]
+
+               global DIFF_UTIL
+               global DIFFPATCH_UTIL
+               if not (os.path.isfile(DIFF_UTIL) and os.access(DIFF_UTIL, os.X_OK)):
+                       DIFF_UTIL = COMMON_BIN_PATH+DIFF_UTIL
+                       DIFFPATCH_UTIL = COMMON_BIN_PATH+DIFFPATCH_UTIL
+                       if not (os.path.isfile(DIFF_UTIL) and os.access(DIFF_UTIL, os.X_OK)):
+                               print >> sys.stderr, "Diff Util Does NOT exist -- ABORT"
+                               logging.info ('Diff Util Does NOT exist -- ABORT')
+                               sys.exit(1)
+
+               start = datetime.datetime.now().time()
+               logging.info('*************** ENTERED PYTHON SCRIPT *****************')
+               logging.info('Arguments Passed: [UpdateType - %s][Part Name - %s] [BaseOld - %s]  [BaseNew - %s] \n [OUTPUTDir - %s] [BASE ATTR - %s] [TARGET ATTR - %s]'% (UPDATE_TYPE, PART_NAME, BASE_OLD, BASE_NEW, OUT_DIR, ATTR_OLD, ATTR_NEW))
+
+               ensure_dir_exists(OUT_DIR)
+               if GenerateDiffAttr == "TRUE":
+                       if not (os.path.isfile(ATTR_OLD) and os.path.isfile(ATTR_NEW)):
+                               print >> sys.stderr, "Attributes missing -- ABORT"
+                               sys.exit(1)
+
+
+               # Should check if APT is supported on other linux flavours
+               cache = apt.Cache()
+               if cache['p7zip'].is_installed and cache['attr'].is_installed and cache['tar'].is_installed:
+                       logging.info ('Basic utils installed')
+               else:
+                       print >> sys.stderr, "Basic utils missing -- ABORT"
+                       sys.exit(1)
+
+               if UPDATE_TYPE == FULL_IMG:
+                       SS_mk_full_img(BASE_OLD, BASE_NEW, OUT_DIR, PART_NAME, UPDATE_CFG_PATH)
+               elif UPDATE_TYPE == DELTA_IMG:
+                       SS_mk_delta_img(BASE_OLD, BASE_NEW, OUT_DIR, PART_NAME, UPDATE_CFG_PATH)
+               elif UPDATE_TYPE == DELTA_FS:
+                       AttributeFile = ATTR_NEW
+                       ATTR_FILE = OUT_DIR+'/'+PART_NAME+ATTR_DOC_EXT
+                       Diff_AttrFiles(ATTR_OLD, ATTR_NEW, ATTR_FILE)
+                       Old_files, Old_dirs = Get_Files(BASE_OLD)
+                       New_files, New_dirs = Get_Files(BASE_NEW)
+                       SS_Generate_Delta(PART_NAME, BASE_OLD, Old_files, Old_dirs, BASE_NEW, New_files, New_dirs, OUT_DIR, ATTR_FILE)
+
+                       if not UPDATE_CFG_PATH == EMPTY:
+                               SS_update_cfg(PART_NAME, UPDATE_CFG_PATH)
+
+
+               elif UPDATE_TYPE == EXTRA:
+                       print('UPDATE_TYPE ---- EXTRA')
+               else:
+                       print('UPDATE_TYPE ---- UNKNOWN FORMAT')
+
+               if GenerateDiffAttr == "TRUE":
+                       if os.path.exists(ATTR_OLD) and os.path.exists(ATTR_NEW):
+                               os.remove(ATTR_OLD)
+                               os.remove(ATTR_NEW)
+               end = datetime.datetime.now().time()
+
+               logging.info('Max Memory requried to upgrade [%s] is [%d] for File[%s]' % (PART_NAME, MEM_REQ, MEM_FILE))
+               logging.info('*************** DONE WITH PYTHON SCRIPT ***************')
+               logging.info('Time start [%s] - Time end [%s]' % (start, end))
+               print('Done with [%s][%d]---- Time start [%s] - Time end [%s]' % (PART_NAME, MEM_REQ, start, end))
+
+       except:
+               logging.error('Usage: {} <Update_Type> <Part_Name> <OLD_Base> <NEW_Base> <OUT_DIR>'.format(os.path.basename(sys.argv[0])))
+               raise
+
+
+def SS_update_cfg(DELTA_BIN, UPDATE_CFG_PATH):
+       f = open(UPDATE_CFG_PATH, 'r')
+       lines = f.readlines()
+       f.close()
+       f = open(UPDATE_CFG_PATH, 'w')
+       for line in lines:
+               ConfigItems = line.split()
+               if ConfigItems[0] == DELTA_BIN:
+                       DELTA = ConfigItems[1]
+                       logging.info ('Updating %s config' % DELTA_BIN)
+                       line = line.rstrip('\n')
+                       Value = MEM_REQ
+                       line = line.replace(line, line+'\t'+str(Value)+'\n')
+                       f.write(line)
+               else:
+                       f.write(line)
+       f.close()
+
+def SS_mk_delta_img(BASE_OLD, BASE_NEW, OUT_DIR, DELTA_BIN, UPDATE_CFG_PATH):
+       #for sizes
+
+       ZIMAGE_SCRIPT = COMMON_BIN_PATH+'./dzImagescript.sh'
+       ZIMAGE_OLD = BASE_OLD+'_unpacked'
+       ZIMAGE_NEW = BASE_NEW+'_unpacked'
+       DZIMAGE_HEADER = 'UnpackdzImage'
+       DZIMAGE_SEP = ':'
+
+       oldsize_d= os.path.getsize(BASE_OLD)
+       newsize_d= os.path.getsize(BASE_NEW)
+       SHA_BIN_DEST= hash_file(BASE_NEW)
+       SHA_BIN_BASE=hash_file(BASE_OLD)
+
+       #incase UPDATE CFG is empty
+       DELTA = DELTA_BIN
+       SS_UpdateSize(BASE_OLD, BASE_NEW)
+       #Should throw error if PART NAME NOT found??
+       if not UPDATE_CFG_PATH == EMPTY:
+               f = open(UPDATE_CFG_PATH, 'r')
+               lines = f.readlines()
+               f.close()
+               f = open(UPDATE_CFG_PATH, 'w')
+               for line in lines:
+                       ConfigItems = line.split()
+                       if ConfigItems[0] == DELTA_BIN:
+                               logging.info ('Updating %s config' % DELTA_BIN)
+                               DELTA = ConfigItems[1]
+                               line = line.rstrip('\n')
+                               line = line.replace(line, line+'\t'+str(oldsize_d)+'\t\t'+str(newsize_d)+'\t\t'+str(SHA_BIN_BASE)+'\t\t'+str(SHA_BIN_DEST)+'\n')
+                               f.write(line)
+                       else:
+                               f.write(line)
+               f.close()
+
+       #Any validation checks required?
+       if (DELTA_BIN == "zImage" or DELTA_BIN == "dzImage" or DELTA_BIN == "KERNEL" or DELTA_BIN == "BOOT") and SUPPORT_DZIMAGE == "TRUE":
+
+               #Unpack Old and New Images for creating delta
+               subprocess.call([ZIMAGE_SCRIPT, '-u', BASE_OLD])
+               subprocess.call([ZIMAGE_SCRIPT, '-u', BASE_NEW])
+
+               DeltaFiles = []
+               Old_files, Old_dirs = Get_Files(ZIMAGE_OLD)
+               New_files, New_dirs = Get_Files(ZIMAGE_NEW)
+
+               patchLoc = '%s/%s_temp' % (OUT_DIR, DELTA_BIN)
+               ensure_dir_exists(patchLoc)
+
+               for elt in New_files:
+                       if elt in Old_files:
+                               src_file = ZIMAGE_OLD+'/'+elt
+                               dst_file = ZIMAGE_NEW+'/'+elt
+                               if not filecmp.cmp(src_file, dst_file):
+                                       patch = '%s/%s' % (patchLoc,elt)
+                                       DeltaFiles.append(patch)
+                                       subprocess.call([DIFF_UTIL,src_file,dst_file,patch])
+                                       logging.info('Make dz Image %s <--> %s ==> %s %s' % (src_file, dst_file , DELTA_BIN, patch))
+
+               #Append all delta files to make image.delta
+
+               #HEADER FORMAT MAGICNAME:FILECOUNT:[FILENAME:FILESIZE:][FILECONTENT/S]
+               HeaderStr = DZIMAGE_HEADER+DZIMAGE_SEP+'%d' % len(DeltaFiles)
+               HeaderStr = HeaderStr+DZIMAGE_SEP
+
+               with open(OUT_DIR+'/'+DELTA, 'w') as DeltaFile:
+                       for fname in DeltaFiles:
+                               DeltaSize = os.path.getsize(fname)
+                               HeaderStr = HeaderStr+path_leaf(fname)+DZIMAGE_SEP+'%d' % DeltaSize
+                               HeaderStr = HeaderStr+DZIMAGE_SEP
+                       #Using 128 bytes as max Header.
+                       logging.info('zImage Header - %s' % HeaderStr.ljust(128,'0'))
+                       DeltaFile.write(HeaderStr.ljust(128,'0'))
+                       for fname in DeltaFiles:
+                               with open(fname) as infile:
+                                       DeltaFile.write(infile.read())
+                                       infile.close()
+
+               DeltaFile.close()
+               shutil.rmtree(patchLoc)
+               shutil.rmtree(ZIMAGE_OLD)
+               shutil.rmtree(ZIMAGE_NEW)
+               #Do we need to incorprate Max memory required for backup??
+
+       else:
+               patchLoc = '%s/%s' % (OUT_DIR, DELTA)
+               logging.info('Make Delta Image %s <--> %s ==> %s %s' % (BASE_OLD, BASE_NEW , DELTA_BIN, patchLoc))
+               subprocess.call([DIFF_UTIL,BASE_OLD,BASE_NEW,patchLoc])
+
+
+
+def    SS_mk_full_img(BASE_OLD, BASE_NEW, OUT_DIR, DELTA_BIN ,UPDATE_CFG_PATH):
+       logging.info('Make Full Image %s <--> %s ==> %s' % (BASE_OLD, BASE_NEW ,DELTA_BIN))
+       oldsize_d= os.path.getsize(BASE_OLD)
+       newsize_d= os.path.getsize(BASE_NEW)
+       SHA_BIN_DEST= hash_file(BASE_NEW)
+       SHA_BIN_BASE=hash_file(BASE_OLD)
+       #echo -e "\t${oldsize_d}\t\t${newsize_d}\t\t${SHA_BIN_BASE}\t\t${SHA_BIN_DEST}" >> ${DATA_DIR}/update_new.cfg
+       SS_UpdateSize(BASE_OLD, BASE_NEW)
+
+       if not UPDATE_CFG_PATH == EMPTY:
+               f = open(UPDATE_CFG_PATH, 'r')
+               lines = f.readlines()
+               f.close()
+               f = open(UPDATE_CFG_PATH, 'w')
+               for line in lines:
+                       ConfigItems = line.split()
+                       if ConfigItems[0] == DELTA_BIN:
+                               logging.info ('Updating %s config' % DELTA_BIN)
+                               DELTA = ConfigItems[1]
+                               line = line.rstrip('\n')
+                               line = line.replace(line, line+'\t'+str(oldsize_d)+'\t\t'+str(newsize_d)+'\t\t'+str(SHA_BIN_BASE)+'\t\t'+str(SHA_BIN_DEST)+'\n')
+                               f.write(line)
+                       else:
+                               f.write(line)
+               f.close()
+
+def zipdir(path, zip):
+    for root, dirs, files in os.walk(path):
+        for file in files:
+            zip.write(os.path.join(root, file))
+
+def ensure_dir_exists(path):
+       if not os.path.exists(path):
+               os.makedirs(path)
+               #shutil.rmtree(path)
+       #os.makedirs(path)
+
+
+def path_leaf(path):
+    head, tail = ntpath.split(path) #This is for windows?? Recheck
+    return tail
+
+def path_head(path):
+    head, tail = ntpath.split(path)
+    return head
+
+def difflines(list1, list2):
+    c = set(list1).union(set(list2))
+    d = set(list1).intersection(set(list2))
+    return list(c-d)
+
+#Creating Diff between OLD and NEW attribute files v12
+def Diff_AttrFiles(ATTR_OLD, ATTR_NEW, ATTR_FILE):
+       if GenerateDiffAttr == "FALSE":
+               return
+       with open(ATTR_OLD, 'r') as f_old:
+               lines1 = set(f_old.read().splitlines())
+
+       with open(ATTR_NEW, 'r') as f_new:
+               lines2 = set(f_new.read().splitlines())
+
+       lines = difflines(lines2, lines1)
+       with open(ATTR_FILE, 'w+') as file_out:
+               for line in lines:
+                       if line not in lines1:
+                               logging.info('Diff_AttrFiles - %s' % line)
+                               file_out.write(line+'\n')
+
+       f_new.close()
+       f_old.close()
+       file_out.close()
+
+
+
+def Update_Attr(RequestedPath, Type, File_Attibutes, Sym_Attibutes):
+       #Full File Path should MATCH
+       if GenerateDiffAttr == "FALSE":
+               return
+       FilePath = '"/'+RequestedPath+'"'
+       #print ('FilePath - %s'% (FilePath))
+       with open(AttributeFile) as f:
+               for line in f:
+                       if FilePath in line:
+                               if Type == SYMLINK_TYPE:
+                                       Sym_Attibutes.append(line)
+                               else:
+                                       File_Attibutes.append(line)
+
+
+'''This function returns the SHA-1 hash of the file passed into it'''
+def hash_file(filename):
+
+   # make a hash object
+   h = hashlib.sha1()
+
+   # open file for reading in binary mode
+   with open(filename,'rb') as file:
+       # loop till the end of the file
+       chunk = 0
+       while chunk != b'':
+           # read only 1024 bytes at a time
+           chunk = file.read(1024*1024)
+           h.update(chunk)
+
+   # return the hex representation of digest
+   return h.hexdigest()
+
+def find_dupes_dir(BASE_OLD, BASE_NEW):
+       dups = {}
+       fdupes = {}
+       print('Finding Duplicates in - %s %s' % (BASE_OLD, BASE_NEW))
+       logging.info('Finding Duplicates in - %s %s' % (BASE_OLD, BASE_NEW))
+       for rootbase, subdirsB, fileListB in os.walk(BASE_OLD):
+               #print('Scanning %s...' % rootbase)
+               for filename in fileListB:
+                       path = os.path.join(rootbase, filename)
+                       if os.path.islink(path):
+                               continue
+                       # Calculate hash
+                       file_hash = hash_file(path)
+                       dups[file_hash] = path
+
+       for roottarget, subdirsT, fileListT in os.walk(BASE_NEW):
+               #print('Scanning %s...' % roottarget)
+               for filename in fileListT:
+                       # Get the path to the file
+                       path = os.path.join(roottarget, filename)
+                       if os.path.islink(path):
+                               continue
+                       # Calculate hash
+                       file_hash = hash_file(path)
+                       # Add or append the file path
+                       if file_hash in dups:
+                               BaseStr = dups.get(file_hash)
+                               Baseloc = path.find('/')
+                               TarLoc = BaseStr.find('/')
+                               if not path[Baseloc:] == BaseStr[TarLoc:]:
+                                       logging.info('Dupes - %s ==> %s' % (path[Baseloc:], BaseStr[TarLoc:]))
+                                       fdupes[path] = BaseStr
+       logging.info('Total Duplicate files %d' % (len(fdupes)))
+       return fdupes
+
+
+def find_dupes_list(BASE_OLD, BASE_NEW, fileListB, fileListT):
+       dups = {}
+       fdupes = {}
+       print('Finding Duplicates in - %s %s' % (BASE_OLD, BASE_NEW))
+
+       for filename in fileListB:
+               Src_File = BASE_OLD+'/'+filename
+               if os.path.islink(Src_File) or os.path.isdir(Src_File):
+                       continue
+               # Calculate hash
+               file_hash = hash_file(Src_File)
+               dups[file_hash] = Src_File
+
+
+       for filename in fileListT:
+               Dest_File = BASE_NEW+'/'+filename
+               if os.path.islink(Dest_File) or os.path.isdir(Dest_File):
+                       continue
+               # Calculate hash
+               file_hash = hash_file(Dest_File)
+               if file_hash in dups:
+                       BaseStr = dups.get(file_hash)
+                       Baseloc = BaseStr.find('/')
+                       if not BaseStr[Baseloc:] == filename:
+                               #print('Dupes - %s ==> %s' % (BaseStr[Baseloc:], filename))
+                               fdupes[BaseStr] = filename
+
+       logging.info('Total Duplicate files %d' % (len(fdupes)))
+       return fdupes
+
+def SS_UpdateSize(src_file, dst_file):
+       global MEM_REQ
+       global MEM_FILE
+       oldsize_d= os.path.getsize(src_file)
+       newsize_d= os.path.getsize(dst_file)
+       if oldsize_d >= newsize_d:
+               Max = newsize_d
+       else:
+               Max = oldsize_d
+       if MEM_REQ < Max:
+               MEM_REQ = Max
+               MEM_FILE = dst_file
+
+
+
+def SS_Generate_Delta(PART_NAME, BASE_OLD, Old_files, Old_dirs, BASE_NEW, New_files, New_dirs, OUT_DIR, ATTR_FILE):
+       print('Going from %d files to %d files' % (len(Old_files), len(New_files)))
+       logging.info('Going from %d files to %d files' % (len(Old_files), len(New_files)))
+
+       # First let's fill up these categories
+       files_new = []
+       files_removed = []
+       Dir_removed = []
+       Dir_Added = []
+       files_changed = []
+       files_unchanged = []
+       files_renamed = []
+       File_Attibutes = []
+       Sym_Attibutes = []
+
+       files_Del_List = {}
+       files_New_List = {}
+       MyDict_Patches = {}
+
+
+
+       PWD = os.getcwd()
+
+       # Generate NEW List
+       for elt in New_files:
+               if elt not in Old_files:
+                       files_new.append(elt)
+                       logging.info('New files %s' % elt)
+
+       # Generate Delete List
+       for elt in Old_files:
+               if elt not in New_files:
+                       # Cant we just append it here only if this is NOT a directory???? so that we have list of removed files ONLY. including directories
+                       files_removed.append(elt)
+                       logging.info('Old files %s' % elt)
+
+
+       for elt in Old_dirs:
+               #print('List of Old Dirs %s' % elt)
+               # Delete END logic goes in hand with UPG, After Diffs and moves, DEL END should be done.
+               if elt not in New_dirs:
+                       Dir_removed.append(elt)
+                       logging.info('Old Dirs %s' % elt+'/')
+
+       for elt in New_dirs:
+               if elt not in Old_dirs:
+                       Dir_Added.append(elt)
+               #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+       # What files have changed contents but not name/path?
+       for elt in New_files:
+               if elt in Old_files:
+                       #Both are symbolic linkes and they differ
+                       src_file = BASE_OLD+'/'+elt
+                       dst_file = BASE_NEW+'/'+elt
+                       #print('Files Changed - %s -%s' % (src_file,dst_file))
+                       if os.path.islink(src_file) and os.path.islink(dst_file):
+                               if not os.readlink(src_file) == os.readlink(dst_file):
+                                       files_changed.append(elt)
+                                       #print('%d Sym link files changed' % len(files_changed))
+                                       logging.info('Sym links Changed - %s' % elt)
+                               else:
+                                       files_unchanged.append(elt)
+                       #Both are Normal files and they differ. (Is file returns true in case of symlink also, so additional check to find either of the file is symlink)
+                       elif (not (os.path.islink(src_file) or os.path.islink(dst_file))) and os.path.isfile(src_file) and os.path.isfile(dst_file):
+                               if not filecmp.cmp(src_file, dst_file):
+                                       files_changed.append(elt)
+                                       #print('%d Normal files changed' % len(files_changed))
+                                       #print('Files Changed - %s' % elt)
+                               else:
+                                       files_unchanged.append(elt)
+                       #File types differ between BASE and TARGET
+                       else:
+                               logging.info('Files are of diff types but same names  Src- %s Des- %s' % (src_file, dst_file))
+                               #Both file types have changed and they differ
+                               #Case 1: First Delete the OLD entry file type (Be it anything)
+                               #Processing and updating partition txt file will be done under REMOVED case and NEW files case accordingly, we just make an entry here
+                               files_removed.append(elt)
+                               files_new.append(elt)
+
+
+       # HANDLING VERBATIM - Remove from changed list and delete the entries on device first
+       #This script is called partition wise, So, how do u want to handle it? (specialy for delete case?)
+
+       print("Check for any verbatim under - %s" % VERBATIM_LIST)
+       if SUPPORT_VERBATIM == "TRUE" and os.path.exists(VERBATIM_LIST):
+               with open(VERBATIM_LIST, 'r') as F_News:
+                       lines = set(F_News.read().splitlines())
+               for line in lines:
+                       if line in files_changed:
+                               files_changed.remove(line)
+                               files_removed.append(line)
+                       if line in files_new:
+                               files_new.remove(line)
+
+       #Currently if Version or number is the first character of the file, then we are NOT making any diffs.
+       if SUPPORT_RENAME == "TRUE":
+               for elt in files_removed:
+                       if os.path.isfile(BASE_OLD+'/'+elt):
+                               FileName = path_leaf(elt)
+                               entries = re.split('[0-9]' , FileName)
+                               #Gives the STRING part of NAME. if name starts with version then later part wil b string
+                               #print('Entires under removed list after split - %s %s - %s' % (FileName, entries[0], elt))
+                               #If version is starting at the begining of the string?? shd we hav additional check for such cases??
+                               if len(entries[0]) > 0:
+                                       files_Del_List.update({entries[0]: elt})
+
+               for elt in files_new:
+                       if os.path.isfile(BASE_NEW+'/'+elt):
+                               FileName = path_leaf(elt)
+                               entries = re.split('[0-9]' , FileName)
+                               #print('Entires under NEWfiles list after split  - %s %s - %s' % (FileName, entries[0], elt))
+                               if len(entries[0]) > 0:
+                                       files_New_List.update({entries[0]: elt})
+
+               for key, value in files_Del_List.iteritems():
+                       #print('Key value pair -%s -%s' % (key, value))
+                       if key in files_New_List:
+                               # this file is the same name in both!
+                               src_file = BASE_OLD+'/'+value
+                               dst_file = BASE_NEW+'/'+files_New_List[key]
+                               olddirpath = path_head(files_New_List[key])
+                               newdirpath = path_head(value)
+                               if os.path.islink(src_file) or os.path.islink(dst_file):
+                                       logging.debug('Cannot diff as one of them is Symlink')
+                               elif os.path.isdir(src_file) or os.path.isdir(dst_file):
+                                       logging.debug('Cannot diff as one of them is dir')
+                               else:
+                                       #Pick the best diff of same type and diff names
+                                       files_renamed.append([files_New_List[key], value])
+                                       files_removed.remove(value)
+                                       files_new.remove(files_New_List[key])
+
+       '''
+       Patch Section
+               Partition.txt contains Protocol for UPI
+               Types Supported: DIFFS, MOVES, NEWS, DELETES, SYMDIFFS, SYMNEWS.
+       '''
+       Sym_Diff_Cnt = 0
+       Sym_New_Cnt = 0;
+       Del_Cnt = 0
+       New_Cnt = 0
+       Diff_Cnt = 0
+       Move_Cnt = 0
+       Verbatim_Cnt = 0
+       SymLinkDoc = OUT_DIR+'/'+PART_NAME+SYMLINK_DOC_NAME
+       Partition_Doc = open(OUT_DIR+'/'+PART_NAME+'.txt','w')
+       Partition_Doc_SymLinks = open(SymLinkDoc,'w')
+
+       print("writing diff'ed changed files...")
+       for elt in files_changed:
+               dst_file = BASE_NEW+'/'+elt
+               src_file = BASE_OLD+'/'+elt
+               #Both files are symbolic links and they differ
+               if os.path.islink(dst_file) and os.path.islink(src_file):
+               #Both are symlinks and they differ
+                       logging.debug(' File Changed is Link %s ' % dst_file)
+                       patch = os.readlink(dst_file)
+                       Sym_Diff_Cnt = Sym_Diff_Cnt + 1
+                       Partition_Doc_SymLinks.write('SYM:DIFF:%s:%s:%s\n' % (elt, elt, patch))
+                       Update_Attr(elt, "SYM", File_Attibutes, Sym_Attibutes)
+               #Both are NORMAL files and they differ
+               elif (not (os.path.islink(src_file) or os.path.islink(dst_file))) and os.path.isfile(dst_file) and os.path.isfile(src_file):
+                       #Both are files and they differ
+                       Diff_Cnt = Diff_Cnt + 1
+                       patchName = (DIFF_PREFIX+'%d_%s_'+PART_NAME+DIFF_SUFFIX) % (Diff_Cnt, path_leaf(elt))
+                       patchLoc = '%s/%s' % (OUT_DIR, patchName)
+                       logging.debug(' File Differ %s %s' % (src_file, dst_file))
+                       SS_UpdateSize(src_file, dst_file)
+                       if SUPPORT_CONTAINERS == "TRUE":
+                               if src_file.endswith('.zip') and dst_file.endswith('.zip'):
+                                       FORMAT = "ZIP"
+                                       Partition_Doc.write('DIFF:ZIP:%s:%s:%s:%s:%s/\n' % (elt, elt, hash_file(src_file), hash_file(dst_file), patchName))
+                                       compute_containerdelta(src_file, dst_file, FORMAT, OUT_DIR+'/'+patchName, Partition_Doc)
+                               elif src_file.endswith('.tpk') and dst_file.endswith('.tpk'):
+                                       FORMAT = "TPK"
+                                       Partition_Doc.write('DIFF:TPK:%s:%s:%s:%s:%s/\n' % (elt, elt, hash_file(src_file), hash_file(dst_file), patchName))
+                                       compute_containerdelta(src_file, dst_file, FORMAT, OUT_DIR+'/'+patchName, Partition_Doc)
+                               else:
+                                       FORMAT = "REG"
+                                       Partition_Doc.write('DIFF:REG:%s:%s:%s:%s:%s\n' % (elt, elt, hash_file(src_file), hash_file(dst_file), patchName))
+                                       subprocess.call([DIFF_UTIL,src_file,dst_file,patchLoc])
+                       else:
+                               FORMAT = "REG"
+                               ret = subprocess.call([DIFF_UTIL,src_file,dst_file,patchLoc])
+                               if ret is not 0:
+                                       logging.debug('Failed to create diff %d %s %s\n' % (ret, src_file, dst_file))
+                                       files_new.append(elt)
+                                       Diff_Cnt = Diff_Cnt - 1
+                               else:
+                                       Partition_Doc.write('DIFF:REG:%s:%s:%s:%s:%s\n' % (elt, elt, hash_file(src_file), hash_file(dst_file), patchName))
+
+                       Update_Attr(elt, "FILE", File_Attibutes, Sym_Attibutes)
+               #Both differ but they are of diff types
+               else:
+                       #Processing and updating partition txt file will be done under REMOVED case and NEW files case accordingly, we just make an entry here
+                       files_removed.append(elt)
+                       files_new.append(elt)
+
+       fdupes = find_dupes_list(BASE_OLD, BASE_NEW, files_removed, files_new)
+       for oldpath, newpath in fdupes.iteritems():
+               logging.info('Dupes %s -> %s' % (oldpath, newpath))
+
+       for elt in files_removed:
+               src_file = BASE_OLD+'/'+elt
+               #If parent directory is deleted.. & del end not possible. (==> Moves should be done before deletes in ENGINE)
+               if src_file in fdupes.keys():
+                       dst_file = BASE_NEW+'/'+ fdupes[src_file]
+                       logging.debug(' File Moved %s ==> %s' % (src_file, dst_file))
+                       Move_Cnt = Move_Cnt + 1
+                       Partition_Doc.write('MOVE:REG:%s:%s:%s\n' % (elt, fdupes[src_file], hash_file(src_file)))
+                       files_removed.remove(elt)
+                       files_new.remove(fdupes[src_file])
+
+       #Should be placed after removing duplicates, else they will be filtered here.
+       # loop shd b for all NEW files, rather than for all delete files (Current understanding)
+       #       First Step: Sort & Filter out unwanted files
+       # Minimum condition used is,
+       #       1. File name should match 70%
+       #       2. Extensions should be same
+       #       3. File name length shd b greater than 3 char
+       #       4. As we are using sorting on file names, once file name does not match and R_Flag is set to true, we nee not check remaining files. So, will execute break.
+       #   5. Should consider editdistance for RENAME LOGIC ==> TBD
+
+       Base_DelList = files_removed[:]
+       Base_NewList = files_new[:]
+       DelList = sorted(Base_DelList, key=path_leaf)
+       NewList = sorted(Base_NewList, key=path_leaf)
+       logging.debug('Rename Logic before filter: Delcount -%d NewCount -%d' % (len(DelList), len(NewList)))
+
+       Filter1 = []
+       Filter2 = []
+       #Remove unwanted items which we cant make diff with for rename logic
+       for file in DelList:
+               if os.path.islink(BASE_OLD+'/'+file):
+                       continue
+               elif os.path.isdir(BASE_OLD+'/'+file):
+                       continue
+               else:
+                       Filter1.append(file)
+                       #logging.debug('Sorted del list - %s' % (file))
+
+       DelList = Filter1
+
+       for file in NewList:
+               if os.path.islink(BASE_NEW+'/'+file):
+                       continue
+               elif os.path.isdir(BASE_NEW+'/'+file):
+                       continue
+               elif len(path_leaf(file)) <= 3:
+                       logging.debug('Ignored for best picks -%s ' % (BASE_NEW+'/'+file))
+                       continue
+               else:
+                       Filter2.append(file)
+
+       NewList = Filter2
+
+       logging.debug('Rename Logic After filter: Delcount -%d NewCount -%d' % (len(DelList), len(NewList)))
+
+       for new_file in NewList:
+               R_Flag = 'FALSE';
+               DirPathNew = path_head(new_file)
+               FileNameNew = path_leaf(new_file)
+               DiffSize = 0
+               winning_patch_sz = os.path.getsize(BASE_NEW+'/'+new_file)
+               New_fs = winning_patch_sz
+               winning_file = ''
+
+               for del_file in DelList:
+                       FileNameOld = path_leaf(del_file)
+                       if (FileNameOld.startswith(FileNameNew[:len(FileNameNew)*7/10]) and (os.path.splitext(FileNameNew)[1] == os.path.splitext(del_file)[1])):
+                               #winning_patch_sz = 0.9 * os.path.getsize(BASE_NEW+'/'+new_file)
+                               #Percentage difference between two file sizes is within 30%, then we consider for diff generation
+                               Del_fs = os.path.getsize(BASE_OLD+'/'+del_file)
+                               v1 = abs(New_fs-Del_fs)
+                               v2 = (New_fs+Del_fs)/2
+                               if( v2<=0 or ((v1/v2) * 100) > 30 ):
+                                       logging.debug('Ignore diff generation New_fs - %d Del_Fs - %d' % (New_fs, Del_fs))
+                                       continue
+                               logging.debug('I can compute diff between %s %s Del_Fs - %d New_Fs - %d' % (del_file, new_file, Del_fs, New_fs))
+                               R_Flag = 'TRUE';
+                               DiffSize = measure_two_filediffs(BASE_OLD+'/'+del_file, BASE_NEW+'/'+new_file)
+                               if (DiffSize < 0.8 * winning_patch_sz):
+                                       winning_patch_sz = DiffSize
+                                       winning_file = del_file
+                       elif (not FileNameOld.startswith(FileNameNew[:len(FileNameNew)*7/10]) and R_Flag == 'TRUE'):
+                               logging.debug('Becuase nex set of files will not have matching name - break @@ %s %s' % (del_file, new_file))
+                               break;
+               if len(winning_file) > 0:
+                       logging.debug('Best Pick -%s ==> %s [%d]' % (winning_file, new_file, DiffSize))
+                       files_renamed.append([new_file, winning_file])
+                       DelList.remove(winning_file)
+                       files_removed.remove(winning_file)
+                       files_new.remove(new_file)
+
+       #********************** Files should NOT be deleted for any such renames ***********************
+
+       if SUPPORT_RENAME == "TRUE":
+               for elt in files_renamed:
+                       src_file = BASE_OLD+'/'+elt[1]
+                       dst_file = BASE_NEW+'/'+elt[0]
+                       Diff_Cnt = Diff_Cnt + 1
+                       patchName = (DIFF_PREFIX+'%d_%s_'+PART_NAME+DIFF_SUFFIX) % (Diff_Cnt, path_leaf(elt[1]))
+                       #patchName = (DIFF_PREFIX+'_%s'+DIFF_SUFFIX) % (path_leaf(elt[0]))
+                       patchLoc = '%s/%s' % (OUT_DIR, patchName)
+                       logging.debug(' File Renamed %s ==> %s' % (src_file, dst_file))
+                       # Should be careful of renaming files??
+                       # Should we consider measure_two_filediffs ?? so that patch size is NOT greater than actual file?
+                       # What if folder path has numerics??
+
+                       if  os.path.isdir(src_file) or os.path.isdir(dst_file):
+                               #This case never occurs??
+                               Partition_Doc.write('"%s" and "%s" renamed 0 0\n' % (elt[0], elt[1]))
+                               Update_Attr(elt[0], "FILE", File_Attibutes, Sym_Attibutes)
+                       else: #Make sure these files are PROPER and they shd NOT be symlinks
+                               if filecmp.cmp(src_file, dst_file):
+                                       Move_Cnt = Move_Cnt + 1
+                                       Diff_Cnt = Diff_Cnt - 1
+                                       Partition_Doc.write('MOVE:REG:%s:%s:%s\n' % (elt[1], elt[0], hash_file(src_file)))
+                               elif SUPPORT_CONTAINERS == "TRUE":
+                                       if src_file.endswith('.zip') and dst_file.endswith('.zip'):
+                                               FORMAT = "ZIP"
+                                               Partition_Doc.write('DIFF:ZIP:%s:%s:%s:%s:%s/\n' % (elt[1], elt[0], hash_file(src_file), hash_file(dst_file), patchName))
+                                               compute_containerdelta(src_file, dst_file, FORMAT, OUT_DIR+'/'+patchName, Partition_Doc)
+                                       elif src_file.endswith('.tpk') and dst_file.endswith('.tpk'):
+                                               FORMAT = "TPK"
+                                               Partition_Doc.write('DIFF:TPK:%s:%s:%s:%s:%s/\n' % (elt[1], elt[0], hash_file(src_file), hash_file(dst_file), patchName))
+                                               compute_containerdelta(src_file, dst_file, FORMAT, OUT_DIR+'/'+patchName, Partition_Doc)
+                                       else:
+                                               FORMAT = "REG"
+                                               Partition_Doc.write('DIFF:REG:%s:%s:%s:%s:%s\n' % (elt[1], elt[0], hash_file(src_file), hash_file(dst_file), patchName))
+                                               subprocess.call([DIFF_UTIL,src_file,dst_file,patchLoc])
+                               else:
+                                       FORMAT = "REG"
+                                       ret = subprocess.call([DIFF_UTIL,src_file,dst_file,patchLoc])
+                                       if ret is not 0:
+                                               logging.debug('Failed to create diff %d %s %s\n' % (ret, src_file, dst_file))
+                                               files_new.append(elt)
+                                               Diff_Cnt = Diff_Cnt - 1
+                                       else:
+                                               Partition_Doc.write('DIFF:REG:%s:%s:%s:%s:%s\n' % (elt[1], elt[0], hash_file(src_file), hash_file(dst_file), patchName))
+
+                               SS_UpdateSize(src_file, dst_file)
+                               Update_Attr(elt[0], "FILE", File_Attibutes, Sym_Attibutes)
+
+
+       #HANDLING VERBATIM - We Process NEWs and DELETEs for Verbatim list ONLY after processing duplicates & rename functionality.
+       #So that, the rename functionality will NOT create PATCH instead of verbatims.
+
+       if SUPPORT_VERBATIM == "TRUE" and os.path.exists(VERBATIM_LIST):
+               with open(VERBATIM_LIST, 'r') as F_News:
+                       lines = set(F_News.read().splitlines())
+               for line in lines:
+                       if not line in files_new:
+                               if os.path.exists(BASE_NEW+'/'+line):
+                                       files_new.append(line)
+                                       Verbatim_Cnt = Verbatim_Cnt+1
+                                       logging.debug("Added to list of verbatims -%s" % BASE_NEW+'/'+line)
+
+
+
+       for elt in files_removed:
+               #if files are part of patches after renaming, we shd remove them as part of removed.
+               src_file = BASE_OLD+'/'+elt
+               if os.path.islink(src_file):
+                       Partition_Doc.write('DEL:SYM:%s\n' % (elt))
+               elif os.path.isdir(src_file):
+                       #If we change to DIR TYPE, then the same token should be modified on UA also and SHA should be accordingly passed.
+                       Partition_Doc.write('DEL:REG:%s:NA\n' % (elt))
+               else:
+                       Partition_Doc.write('DEL:REG:%s:%s\n' % (elt, hash_file(src_file)))
+               logging.debug(' File Deleted %s' % src_file)
+               Del_Cnt = Del_Cnt + 1
+
+       Dir_removed.sort(reverse=True)
+       for elt in Dir_removed:
+               #if Dir is empty, add it to the removed list.
+               src_file = BASE_OLD+'/'+elt
+               #Irrespective of weather files are MOVED or DIFF'ed, we can delete the folders. This action can be performed at the end.
+               #It covers symlinks also, as NEW symlinks cannot point to NON existant folders of TARGET (NEW binary)
+               if os.path.isdir(src_file):
+                       Partition_Doc.write('DEL:END:%s\n' % (elt))
+                       Del_Cnt = Del_Cnt + 1
+                       logging.debug(' Dir Deleted- %s' % src_file)
+
+
+       for elt in files_new:
+               dst_file = BASE_NEW+'/'+elt
+               newfiles_dest_path = 'system/'
+               ensure_dir_exists(newfiles_dest_path)
+               if os.path.islink(dst_file):
+                       patch = os.readlink(dst_file)
+                       logging.debug(' File New Links %s' % elt)
+                       Partition_Doc_SymLinks.write('SYM:NEW:%s:%s\n' % (elt, patch))
+                       #What if this is only a new sym link and folder already exists??? Should recheck
+                       destpath = newfiles_dest_path + elt
+                       if not os.path.exists(path_head(destpath)):
+                               os.makedirs(path_head(destpath))
+                               logging.info('New SymLink - Adding missing Dir')
+                       #Update_Attr(elt, "SYM", File_Attibutes, Sym_Attibutes)
+                       Sym_New_Cnt = Sym_New_Cnt + 1
+               elif os.path.isdir(dst_file): # We create just empty directory here
+                       destpath = newfiles_dest_path + elt
+                       if not os.path.exists(destpath):
+                               os.makedirs(destpath)
+                               logging.debug(' File New Dir %s' % destpath)
+                               New_Cnt = New_Cnt + 1
+               else:
+                       New_Cnt = New_Cnt + 1
+                       #newfiles_dest_path = OUT_DIR + '/system/'
+                       destpath = newfiles_dest_path + elt
+                       destdir = os.path.dirname(destpath)
+                       logging.debug('New files - %s ==> %s' % (dst_file, destdir))
+
+                       if not os.path.isdir(destdir):
+                               try:
+                                       os.makedirs(destdir)
+                               except:
+                                       logging.critical('Error in NEW files DIR entry -%s' % destdir)
+                                       raise
+
+                       try:
+                               if not stat.S_ISFIFO(os.stat(dst_file).st_mode):
+                                       shutil.copy2(dst_file, destpath)
+                                       logging.debug('New files copied from- %s to- %s' % (dst_file, destpath))
+                       except:
+                               logging.critical('Error in NEW files entry -%s -%s' % (dst_file, destpath))
+                               raise
+
+       for elt in Dir_Added:
+               newfiles_dest_path = 'system/'
+               ensure_dir_exists(newfiles_dest_path)
+               destpath = newfiles_dest_path + elt
+               if not os.path.exists(destpath):
+                       os.makedirs(destpath)
+                       logging.debug(' DirList New Dir %s' % destpath)
+                       New_Cnt = New_Cnt + 1
+
+       #Base directory should be system
+       print 'Compressing New files'
+       if (New_Cnt > 0):
+               WorkingDir = os.getcwd()
+               os.chdir(os.getcwd()+"/"+NEW_FILES_PATH)
+               logging.info('Curr Working Dir - %s' % os.getcwd())
+               os.system(ZIPUTIL+NEW_FILES_PATH+" >> " + LOGFILE)
+               shutil.move(NEW_FILES_ZIP_NAME, WorkingDir+"/"+OUT_DIR)
+               #New file size?? cos, we extract system.7z from delta.tar and then proceed with decompression
+               SS_UpdateSize(WorkingDir+"/"+OUT_DIR+"/"+NEW_FILES_ZIP_NAME, WorkingDir+"/"+OUT_DIR+"/"+NEW_FILES_ZIP_NAME)
+               os.chdir(WorkingDir)
+               shutil.rmtree(NEW_FILES_PATH)
+               # use 7z a system.7z ./*
+
+       #logging.info('%d Dir to be removed' % len(Dir_removed))
+       logging.info('%d files unchanged' % len(files_unchanged))
+       logging.info('%d files files_renamed' % len(files_renamed))
+       logging.info('%d files NEW' % len(files_new))
+       logging.info('%d File attr' % len(File_Attibutes))
+       logging.info('%d Sym attr' % len(Sym_Attibutes))
+       logging.info('PaTcHCoUnT:Diffs-%d Moves-%d News-%d Delets-%d SymDiffs-%d SymNews-%d Verbatim -%d\n' % (Diff_Cnt, Move_Cnt, New_Cnt, Del_Cnt, Sym_Diff_Cnt, Sym_New_Cnt, Verbatim_Cnt))
+       print('PaTcHCoUnT:Diffs-%d Moves-%d News-%d Delets-%d SymDiffs-%d SymNews-%d Verbatim -%d\n' % (Diff_Cnt, Move_Cnt, New_Cnt, Del_Cnt, Sym_Diff_Cnt, Sym_New_Cnt, Verbatim_Cnt))
+
+       #There could be duplicates, TODO, can check before adding..
+       ATTR_FILE_D = open(ATTR_FILE,'a+')
+       for elt in File_Attibutes:
+               ATTR_FILE_D.write(elt)
+       for elt in Sym_Attibutes:
+               ATTR_FILE_D.write(elt)
+
+       ATTR_FILE_D.close()
+
+       Partition_Doc_SymLinks.close()
+       Partition_Read_SymLinks = open(SymLinkDoc,'r+')
+       Partition_Doc.write(Partition_Read_SymLinks.read())
+       Partition_Doc.write('PaTcHCoUnT:%d %d %d %d %d %d\n' % (Diff_Cnt, Move_Cnt, New_Cnt, Del_Cnt, Sym_Diff_Cnt, Sym_New_Cnt))
+       Partition_Doc_SymLinks.close()
+       Partition_Doc.close()
+       os.remove(SymLinkDoc)
+
+       if Diff_Cnt + Move_Cnt + New_Cnt+ Del_Cnt + Sym_Diff_Cnt + Sym_New_Cnt + Verbatim_Cnt + os.path.getsize(ATTR_FILE) == 0:
+               print('No Delta Generated for %s - %s' % (PART_NAME,OUT_DIR))
+               logging.info('No Delta Generated for %s' % PART_NAME)
+               shutil.rmtree(OUT_DIR)
+
+
+def Apply_Container_Delta(a_apk, b_apk, new_apk, a_folder, g_output_dir):
+
+       #CONTROL NAMES, AND PRINTS AND ERROR CASES... SHOULD NOT PROCEED.
+       print 'ApplyContainerDelta - ', b_apk, a_folder, g_output_dir
+       shutil.copy2(g_output_dir+'/'+b_apk, g_output_dir+'/temp')
+       temp_apk = '../'+g_output_dir+'/'+b_apk
+       Patch = 'Patch_'+b_apk
+       ensure_dir_exists(Patch)
+       shutil.copy2(g_output_dir+'/'+b_apk, Patch+'/'+b_apk)
+
+       #Size issue on Device side?? shd check this
+       subprocess.call(['unzip','-q', Patch+'/'+b_apk, '-d', Patch])
+       with open(g_output_dir+'/PATCH.txt', 'r') as f_new:
+               lines = set(f_new.read().splitlines())
+               for line in lines:
+                       #print('Action ==> %s' % line)
+                       #Action, Path, Patch = line.split('|')
+                       Items = line.split('|')
+                       Action = Items[0]
+                       Path = Items[1]
+                       ActualPath = a_folder+'/'+Path
+                       PatchPath = Patch+'/'+Path
+                       SrcPath = g_output_dir+'/'+path_leaf(Path)
+                       #print('Action ==> %s Path ==> %s ' % (Action, Path))
+                       if line[0] == 'c':
+                               patchName = g_output_dir+'/'+Items[2]
+                               #print('Apply Patch: ActualPath %s SrcPath %s PatchLoc %s ' % (PatchPath, ActualPath, patchName))
+                               subprocess.call([DIFFPATCH_UTIL,ActualPath,ActualPath,patchName])
+                               WorkingDir = os.getcwd()
+                               os.chdir(WorkingDir+"/"+"temp_a")
+                               subprocess.call(['cp', '--parents', Path, '../'+Patch])
+                               os.chdir(WorkingDir)
+                       elif line[0] == 's':
+                               WorkingDir = os.getcwd()
+                               os.chdir(WorkingDir+"/"+"temp_a")
+                               subprocess.call(['cp', '--parents', Path, '../'+Patch])
+                               os.chdir(WorkingDir)
+                       else:
+                               print('Apply_Container_Delta - Unknown Error')
+       #print('Touch all files and set common attributes for DIFF generation')
+       WorkingDir = os.getcwd()
+       os.chdir(WorkingDir+"/"+Patch)
+
+       CONTAINER_DATE = '200011111111.11'
+       CONTAINER_MODE = '0755'
+       subprocess.call(['find', '.', '-type', 'l', '-exec', 'rm', '-rf', '{}', ';'])
+       subprocess.call(['find', '.', '-exec', 'touch', '-t', CONTAINER_DATE, '{}', ';'])
+       subprocess.call(['chmod', '-R', CONTAINER_MODE, '../'+Patch])
+
+       print 'Update Intermediate Archive'
+       #subprocess.call(['zip','-ryX', b_apk, '*'])
+       subprocess.call(['zip','-ryX', b_apk] + glob.glob('*'))
+       os.chdir(WorkingDir)
+       #print('Apply Path completed - Now create diff for this and place in patch folder')
+       #print os.getcwd()
+       print('Patch Applied, Create Final Diff - %s %s' % (g_output_dir+'/'+b_apk,new_apk))
+       patchName = ('New'+'_%s'+DIFF_SUFFIX) % (b_apk)
+       patchLoc = '%s/%s' % (g_output_dir, patchName)
+
+       subprocess.call([DIFF_UTIL, Patch+'/'+b_apk ,new_apk,patchLoc])
+
+       #Only on HOST... for testing
+       if TEST_MODE == 'TRUE':
+               UpgradedName = '%s_Upgraded' % (b_apk)
+               subprocess.call([DIFFPATCH_UTIL,Patch+'/'+b_apk,UpgradedName,patchLoc])
+
+       #This is file only with NEWS and empty diffs and same files.
+       if TEST_MODE == 'FALSE':
+               os.remove(g_output_dir+'/'+b_apk)
+               os.rename(g_output_dir+'/temp', g_output_dir+'/'+b_apk)
+               shutil.rmtree(Patch)
+
+def IsSymlink(info):
+  return (info.external_attr >> 16) == 0120777
+
+
+def compute_containerdelta(src_file, dst_file, FORMAT, patchName, Partition_Doc):
+
+       a_apk = src_file
+       b_apk = dst_file
+       a_folder = 'temp_a'
+       b_folder = 'temp_b'
+
+       g_output_dir = patchName
+
+       logging.info('Uncompressing Containers... [%s][%s]' % (src_file, dst_file))
+       logging.info('Out Dir -%s' %(g_output_dir))
+       ensure_dir_exists(a_folder)
+       zipf = zipfile.ZipFile(a_apk, 'r');
+       zipf.extractall(a_folder)
+       zipf.close()
+
+       ensure_dir_exists(b_folder)
+       zipf = zipfile.ZipFile(b_apk, 'r');
+       zipf.extractall(b_folder)
+
+
+       symlinks = []
+       for info in zipf.infolist():
+               basefilename = info.filename[7:]
+               if IsSymlink(info):
+                       symlinks.append(info.filename)
+                       os.remove(b_folder+'/'+info.filename)
+       zipf.close()
+
+       a_files, a_dirs = Get_Files(a_folder)
+       b_files, b_dirs = Get_Files(b_folder)
+
+       logging.info('Going from %d files %d files' % (len(a_files), len(b_files)))
+
+       # First let's fill up these categories
+       C_files_new = []
+       C_files_removed = []
+       C_files_changed = []
+       C_files_unchanged = []
+
+       # What files appear in B but not in A?
+       for elt in b_files:
+               if elt not in a_files:
+                       #if not elt.endswith('.so'):
+                       C_files_new.append(elt)
+
+       # What files appear in A but not in B?
+       for elt in a_files:
+               if elt not in b_files:
+                       C_files_removed.append(elt)
+
+       # What files have changed contents but not name/path?
+       for elt in b_files:
+               if elt in a_files:
+                       if os.path.islink(a_folder+'/'+elt) or os.path.islink(b_folder+'/'+elt):
+                               print 'links - skip'
+                       elif not filecmp.cmp(a_folder+'/'+elt, b_folder+'/'+elt):
+                               C_files_changed.append(elt)
+                       else:
+                               C_files_unchanged.append(elt)
+
+
+       print('%d new files' % len(C_files_new))
+       print('%d removed files' % len(C_files_removed))
+       print('%d files changed' % len(C_files_changed))
+       print('%d files unchanged' % len(C_files_unchanged))
+
+       # temp dir where we're assembling the patch
+       ensure_dir_exists(g_output_dir)
+
+       unique_fileid = 0
+       toc = open(g_output_dir+'/PATCH.txt','w')
+       print("writing diff'ed changed files...")
+
+       for elt in C_files_changed:
+               dst_file = b_folder+'/'+elt
+               src_file = a_folder+'/'+elt
+               patchName = (DIFF_PREFIX+'%d_%s_'+PART_NAME+DIFF_SUFFIX) % (unique_fileid, path_leaf(elt))
+               patchLoc = '%s/%s' % (g_output_dir, patchName)
+               #print('src - %s dest -%s patch -%s' % (src_file ,dst_file,patchLoc))
+               subprocess.call([DIFF_UTIL,src_file ,dst_file,patchLoc])
+               toc.write('c%d|%s|%s\n' % (unique_fileid, elt, patchName))
+               unique_fileid = unique_fileid + 1
+
+       for elt in C_files_unchanged:
+               dst_file = b_folder+'/'+elt
+               src_file = a_folder+'/'+elt
+               #print('Same Files src - %s dest -%s' % (src_file ,dst_file))
+               toc.write('s%d|%s\n' % (unique_fileid, elt))
+               unique_fileid = unique_fileid + 1
+
+       #Create NEW TPK with empty data for below files and NEW files
+       shutil.copy2(b_apk, g_output_dir)
+
+       #May b for host??
+       #temp_apk = '../'+g_output_dir+'/'+b_apk
+       temp_apk = '../'+g_output_dir+'/'+path_leaf(b_apk)
+
+       for elt in C_files_changed:
+               dst_file = b_folder+'/'+elt
+               #print dst_file
+               open(dst_file, 'w').close()
+
+       for elt in C_files_unchanged:
+               dst_file = b_folder+'/'+elt
+               open(dst_file, 'w').close()
+
+       WorkingDir = os.getcwd()
+       os.chdir(WorkingDir+"/"+b_folder)
+
+       #for elt in files_changed:
+       #       subprocess.call(['zip', temp_apk, elt]) # confirm ZIP options, extra fields etc.. jus zip it, shd do all at once.. else time taking
+
+       #for elt in files_unchanged:
+       #       subprocess.call(['zip', temp_apk, elt])
+
+       subprocess.call(['zip','-ryq', temp_apk, '*'])
+       os.chdir(WorkingDir)
+       toc.close()
+
+       Apply_Container_Delta(path_leaf(a_apk), path_leaf(b_apk), b_apk, a_folder, g_output_dir)
+       shutil.rmtree(a_folder)
+       shutil.rmtree(b_folder)
+
+def NewFiles(src, dest):
+       print src,dest
+       subprocess.call(['cp','-rp', src,dest])
+    #try:
+               #shutil.copytree(src, dest)
+    #except OSError as e:
+        # If the error was caused because the source wasn't a directory
+        #if e.errno == errno.ENOTDIR:
+            #shutil.copy2(src, dest)
+        #else:
+            #print('Directory not copied. Error: %s' % e)
+
+def measure_two_filediffs(src, dst):
+       patchLoc = 'temp.patch'
+       subprocess.call([DIFF_UTIL,src,dst,patchLoc])
+       result_size = os.path.getsize(patchLoc)
+       os.remove(patchLoc)
+       return result_size
+
+def Get_Files(path):
+       all_files = []
+       all_dirs = []
+
+       for root, directories, filenames in os.walk(path, topdown=False, followlinks=False):
+               for directory in directories:
+                       #DirName = os.path.join(root+'/',directory)
+                       DirName = os.path.join(root,directory)
+                       if os.path.islink(DirName):
+                               logging.debug('This is symlink pointing to dir -%s' % DirName)
+                               all_files.append(os.path.relpath(DirName, path))
+                       elif not os.listdir(DirName):
+                               #print('*****Empty Directory******* -%s', DirName)
+                               #This should NOT be appended ??? Empty dir shd b considered
+                               all_dirs.append(os.path.relpath(DirName, path))
+                       else:
+                               all_dirs.append(os.path.relpath(DirName, path))
+               for filename in filenames:
+                       FileName = os.path.join(root,filename)
+                       all_files.append(os.path.relpath(FileName, path))
+
+       all_files.sort()
+       all_dirs.sort()
+       return all_files, all_dirs
+
+
+USAGE_DOCSTRING = """
+      Generate Delta using BASEOLD AND BASE NEW
+         Attributes is optional
+         Usage: CreatePatch.py UPDATE_TYPE PARTNAME OLDBASE NEWBASE OUTFOLDER
+"""
+
+def Usage(docstring):
+  print docstring.rstrip("\n")
+  print COMMON_DOCSTRING
+
+
+
+if __name__ == '__main__':
+       main()
+
diff --git a/mk_delta/common/bin/chsmack_attr.sh b/mk_delta/common/bin/chsmack_attr.sh
new file mode 100755 (executable)
index 0000000..7288182
--- /dev/null
@@ -0,0 +1,37 @@
+#/bin/sh
+
+chsmack_attr() {
+       TARGET="$1"
+       ATTRS=`attr -Slq "${TARGET}"`
+       CHSMACK_STR="${TARGET}"
+       for attrname in ${ATTRS}
+       do
+               value=`attr -Sq -g ${attrname} "${TARGET}"`
+
+               case "${attrname}" in
+               SMACK64)
+                       chsmack_attrname="access"
+                       ;;
+               SMACK64EXEC)
+                       chsmack_attrname="execute"
+                       ;;
+               SMACK64TRANSMUTE)
+                       chsmack_attrname="transmute"
+                       ;;
+               SMACK64MMAP)
+                       chsmack_attrname="mmap"
+                       ;;
+               *)
+                       continue
+               ;;
+               esac
+
+               CHSMACK_STR="${CHSMACK_STR} ${chsmack_attrname}=\"${value}\""
+
+       done
+}
+
+chsmack_attr "$1"
+echo "${CHSMACK_STR}"
+
+
diff --git a/mk_delta/common/bin/dzImagescript.sh b/mk_delta/common/bin/dzImagescript.sh
new file mode 100755 (executable)
index 0000000..f96bc4f
--- /dev/null
@@ -0,0 +1,255 @@
+#!/bin/bash
+
+pname="${0##*/}"
+args=("$@")
+cur_dir="$(pwd)"
+
+# file names:
+decompression_code="decompression_code"
+piggy_gz_piggy_trailer="piggy.gz+piggy_trailer"
+piggy="piggy"
+piggy_gz="piggy.gz"
+padding_piggy="padding_piggy"
+piggy_trailer="piggy_trailer"
+ramfs_gz_part3="initramfs.cpio+part3"
+ramfs_cpio_gz="initramfs.cpio.gz"
+padding3="padding3"
+part3="part3"
+kernel_img="kernel.img"
+ramfs_cpio="initramfs.cpio"
+ramfs_dir="initramfs"
+sizes="sizes"
+ramfs_part3="ramfs+part3"
+ramfs_list="initramfs_list"
+cpio_t="cpio-t"
+
+
+cpio="cpio_set0"
+
+# We dup2 stderr to 3 so an error path is always available (even
+# during commands where stderr is redirected to /dev/null).  If option
+# -v is set, we dup2 sterr to 9 also so commands (and some of their
+# results if redirected to &9) are printed also.
+exec 9>/dev/null                # kill diagnostic ouput (will be >&2 if -v)
+exec 3>&2                       # an always open error channel
+
+#
+########### Start of functions
+#
+
+# Emit an error message and abort
+fatal(){
+    # Syntax: fatal <string ...>
+    # Output error message, then abort
+    echo >&3
+    echo >&3 "$pname: $*"
+    kill $$
+    exit 1
+}
+
+# Execute a command, displaying the command if -v:
+cmd(){
+    # Syntax: cmd <command> <args...>
+    # Execute <command>, echo command line if -v
+    echo >&9 "$*"
+    "$@"
+}
+
+# Execute a required command, displaying the command if -v, abort on
+# error:
+rqd(){
+    # Syntax: cmd <command> <args...>
+    # Execute <command>, echo commandline if -v, abort on error
+    cmd "$@" || fatal "$* failed."
+}
+
+findByteSequence(){
+    # Syntax: findByteSequence <fname> [<string, default: gzip header>]
+    # Returns: position (offset) on stdout, empty string if nothing found
+    file="$1"
+    local opt
+    if [ "$2" = "lzma" ]; then
+        srch=$'\x5d....\xff\xff\xff\xff\xff'
+        opt=
+    else
+        srch="${2:-$'\x1f\x8b\x08'}" # Default: search for gzip header
+        opt="-F"
+    fi
+    pos=$(LC_ALL=C grep $opt -a --byte-offset -m 1 --only-matching -e "$srch" -- "$file")
+    echo ${pos%%:*}
+}
+
+getFileSize(){
+    # Syntax: getFileSize <file>
+    # Returns size of the file on stdout.
+    # Aborts if file doesn't exist.
+    rqd stat -c %s "$1"
+}
+checkNUL(){
+    # Syntax: checkNUL file offset
+    # Returns true (0) if byte there is 0x0.
+    [ "$(rqd 2>/dev/null dd if="$1" skip=$2 bs=1 count=1)" = $'\0' ]
+}
+
+gunzipWithTrailer(){
+    # Syntax gunzipWithTrailer <file> <gzip name, sans .gz> <padding> <trailer>
+    #
+    # <file>: the input file
+    # <gzip name, sans .gz>, <padding>, <trailer>:
+    #   The output files.  For the gzipped part, both the
+    #   compressed and the uncompressed output is generated, so we have
+    #   4 output files.
+    local file="$1"
+    local gz_result="$2.gz"
+    local result="$2"
+    local padding="$3"
+    local trailer="$4"
+    local tmpfile="/tmp/gunzipWithTrailer.$$.gz"
+    local original_size=$(getFileSize "$file")
+    local d=$(( (original_size+1) / 2))
+    local direction fini at_min=0
+    local results_at_min=()
+    local size=$d
+    local at_min=
+    echo "Separating gzipped part from trailer in  "$file""
+    echo -n "Trying size: $size"
+    while :; do
+        rqd dd if="$file" of="$tmpfile" bs=$size count=1 2>/dev/null
+        cmd gunzip >/dev/null 2>&1 -c "$tmpfile"
+        res=$?
+        if [ "$d" -eq 1 ]; then
+            : $((at_min++))
+            results_at_min[$size]=1
+            [ "$at_min" -gt 3 ] && break
+        fi
+        d=$(((d+1)/2))
+        case $res in
+                # 1: too small
+            1) size=$((size+d)); direction="↑";;
+                # 2: trailing garbage
+            2) size=$((size-d)); direction="↓";;
+                # OK
+            0) break;;
+            *) fatal "gunzip returned $res while checking "$file"";;
+        esac
+        echo -n "  $size"
+    done
+    if [ "$at_min" -gt 3 ]; then
+        echo -e "\ngunzip result is oscillating between 'too small' and 'too large' at size: ${!results_at_min[*]}"
+        echo -n "Trying lower nearby values:  "
+        fini=
+        for ((d=1; d < 30; d++)); do
+            : $((size--))
+            echo -n "  $size"
+            rqd dd if="$file" of="$tmpfile" bs=$size count=1 2>/dev/null
+            if cmd gunzip >/dev/null 2>&1 -c "$tmpfile"; then
+                echo -n " - OK"
+                fini=1
+                break
+            fi
+        done
+        [ -z "$fini" ] && fatal 'oscillating gunzip result, giving up.'
+    fi
+    # We've found the end of the gzipped part.  This is not the real
+    # end since gzip allows for some trailing padding to be appended
+    # before it barfs.  First, go back until we find a non-null
+    # character:
+    echo -ne "\npadding check (may take some time): "
+    real_end=$((size-1))
+    while checkNUL "$file" $real_end; do
+        : $((real_end--))
+    done
+    # Second, try if gunzip still succeeds.  If not, add trailing
+    # null(s) until it succeeds:
+    while :; do
+        rqd dd if="$file" of="$tmpfile" bs=$real_end count=1 2>/dev/null
+        gunzip >/dev/null 2>&1 -c "$tmpfile"
+        case $? in
+            # 1: too small
+            1) : $((real_end++));;
+            *) break;;
+        esac
+    done
+    real_next_start=$size
+    # Now, skip NULs forward until we reach a non-null byte.  This is
+    # considered as being the start of the next part.
+    while checkNUL "$file" $real_next_start; do
+        : $((real_next_start++))
+    done
+    echo $((real_next_start - real_end))
+    echo
+    rm "$tmpfile"
+    # Using the numbers we got so far, create the output files which
+    # reflect the parts we've found so far:
+    rqd dd 2>&9 if="$file" of="$gz_result" bs=$real_end count=1
+    rqd dd 2>&9 if="$file" of="$padding" skip=$real_end bs=1 count=$((real_next_start - real_end))
+    rqd dd 2>&9 if="$file" of="$trailer" bs=$real_next_start skip=1
+    rqd gunzip -c "$gz_result" > "$result"
+}
+
+
+unpack()(
+    [ -d "$unpacked" ] && echo "\
+Warning: there is aready an unpacking directory.  If you have files added on
+your own there, the  repacking result may not reflect the result of the
+current unpacking process."
+    rqd mkdir -p "$unpacked"
+    rqd cd "$unpacked"
+    sizes="$unpacked/sizes"
+    echo "# Unpacking sizes" > "$sizes"
+
+    piggy_start=$(findByteSequence "$cur_dir/$zImage")
+    if [ -z "$piggy_start" ]; then
+        fatal "Can't find a gzip header in file '$zImage'"
+    fi
+
+    rqd dd 2>&9 if="$cur_dir/$zImage" bs="$piggy_start" count=1 of="$decompression_code"
+    rqd dd 2>&9 if="$cur_dir/$zImage" bs="$piggy_start" skip=1 of="$piggy_gz_piggy_trailer"
+
+    gunzipWithTrailer  "$piggy_gz_piggy_trailer" \
+        "$piggy" "$padding_piggy" "$piggy_trailer"
+
+    echo
+       sudo rm -rf "piggy.gz" "piggy.gz+piggy_trailer" "sizes"
+    echo "Success."
+    echo "The unpacked files and the initramfs directory are in "$unpacked""
+)
+
+#### start of main program
+while getopts xv12345sgrpuhtz-: argv; do
+    case $argv in
+        p|u|z|1|2|3|4|5|t|r|g) eval opt_$argv=1;;
+        v) exec 9>&2; opt_v=1;;
+        s) cpio="cpio";;
+        x) set -x;;
+        -) if [ "$OPTARG" = "version" ]; then
+              echo "$pname $version"
+              exit 0
+           else
+              echo "Wrong Usage, use -u to unpack"
+           fi;;
+        h|-) echo "Wrong Usage, use -u to unpack";;
+        *) fatal "Illegal option";;
+    esac
+done
+shift $((OPTIND-1))
+zImage="${1:-zImage}"
+unpacked="$cur_dir/${zImage}_unpacked"
+packing="$cur_dir/${zImage}_packing"
+shift
+if [ -n "$*" ]; then
+    fatal "Excess arguments: '$*'"
+fi
+
+if [ -n "$opt_u" ]; then
+    [ -f "$zImage" ] || fatal "file '$zImage': not found"
+    unpack
+fi
+if [ -z "$opt_u" ]; then
+    echo >&2 "$pname: Need at least -u option."
+    echo >&2 "$pname: Type '$pname --help' for usage info."
+    exit 1
+fi
+
+exit
+
diff --git a/mk_delta/common/bin/extended_attr.sh b/mk_delta/common/bin/extended_attr.sh
new file mode 100755 (executable)
index 0000000..a1747fd
--- /dev/null
@@ -0,0 +1,48 @@
+#/bin/sh
+
+capability() {
+       # -n name
+       # -m - -d can dump all extended attributes
+       # -e hex encoding
+       TARGET="$1"
+       CAP_ATTR_STR=`getfattr -hP -n security.capability -e hex "${TARGET}" 2>/dev/null | grep security.capability`
+       ACL_ATTR_STR=`getfattr -hP -n system.posix_acl_access -e hex "${TARGET}" 2>/dev/null | grep system.posix_acl_access`
+}
+
+chsmack_attr() {
+       TARGET="$1"
+       ATTRS=`attr -Slq "${TARGET}"`
+       CHSMACK_STR="${TARGET}"
+       for attrname in ${ATTRS}
+       do
+               value=`attr -Sq -g ${attrname} "${TARGET}"`
+
+               case "${attrname}" in
+               SMACK64)
+                       chsmack_attrname="access"
+                       ;;
+               SMACK64EXEC)
+                       chsmack_attrname="execute"
+                       ;;
+               SMACK64TRANSMUTE)
+                       chsmack_attrname="transmute"
+                       ;;
+               SMACK64MMAP)
+                       chsmack_attrname="mmap"
+                       ;;
+               *)
+                       continue
+               ;;
+               esac
+
+               CHSMACK_STR="${CHSMACK_STR} ${chsmack_attrname}=\"${value}\""
+
+       done
+}
+
+capability "$1"
+chsmack_attr "$1"
+
+echo "${CAP_ATTR_STR}:${ACL_ATTR_STR}:${CHSMACK_STR}"
+
+
diff --git a/mk_delta/common/bin/unpack.sh b/mk_delta/common/bin/unpack.sh
new file mode 100755 (executable)
index 0000000..869881d
--- /dev/null
@@ -0,0 +1,236 @@
+#!/bin/bash
+
+pname="${0##*/}"
+args=("$@")
+cur_dir="$(pwd)"
+
+# file names:
+decompression_code="decompression_code"
+piggy_gz_piggy_trailer="piggy.gz+piggy_trailer"
+piggy="piggy"
+piggy_gz="piggy.gz"
+padding_piggy="padding_piggy"
+piggy_trailer="piggy_trailer"
+padding3="padding3"
+sizes="sizes"
+
+# We dup2 stderr to 3 so an error path is always available (even
+# during commands where stderr is redirected to /dev/null).  If option
+# -v is set, we dup2 sterr to 9 also so commands (and some of their
+# results if redirected to &9) are printed also.
+exec 9>/dev/null                # kill diagnostic ouput (will be >&2 if -v)
+exec 3>&2                       # an always open error channel
+
+#
+########### Start of functions
+#
+
+# Emit an error message and abort
+fatal(){
+    # Syntax: fatal <string ...>
+    # Output error message, then abort
+    echo >&3
+    echo >&3 "$pname: $*"
+    kill $$
+    exit 1
+}
+
+# Execute a command, displaying the command if -v:
+cmd(){
+    # Syntax: cmd <command> <args...>
+    # Execute <command>, echo command line if -v
+    echo >>"$workspace/log_file" "$*"
+    "$@"
+}
+
+# Execute a required command, displaying the command if -v, abort on
+# error:
+rqd(){
+    # Syntax: cmd <command> <args...>
+    # Execute <command>, echo commandline if -v, abort on error
+    cmd "$@" || fatal "$* failed."
+}
+
+checkNUL(){
+    # Syntax: checkNUL file offset
+    # Returns true (0) if byte there is 0x0.
+    [ "$(rqd 2>>"$workspace/log_file" "$workspace/dd" if="$1" skip=$2 bs=1 count=1)" = $'\0' ]
+}
+
+gunzipWithTrailer(){
+    # Syntax gunzipWithTrailer <file> <gzip name, sans .gz> <padding> <trailer>
+    #
+    # <file>: the input file
+    # <gzip name, sans .gz>, <padding>, <trailer>:
+    #   The output files.  For the gzipped part, both the
+    #   compressed and the uncompressed output is generated, so we have
+    #   4 output files.
+    local file="$1"
+    local gz_result="$2.gz"
+    local result="$2"
+    local padding="$3"
+    local trailer="$4"
+    local tmpfile="/tmp/gunzipWithTrailer.$$.gz"
+       local original_size=$("$workspace/stat" -c %s "$unpacked/$file") 2>>"$workspace/log_file"
+       echo "Original size is $original_size" >> "$workspace/log_file"
+    local d=$(( (original_size+1) / 2))
+    local direction fini at_min=0
+    local results_at_min=()
+    local size=$d
+    local at_min=
+       rm -rf /tmp/test_file
+    echo "Separating gzipped part from trailer in "$unpacked/$file"" >> "$workspace/log_file"
+    echo -n "Trying size: $size"       >> "$workspace/log_file"
+    while :; do
+        rqd "$workspace/dd" if="$unpacked/$file" of="$tmpfile" bs=$size count=1 2>>"$workspace/log_file"
+        cmd "$workspace/gzip" >/tmp/test_file 2>>"$workspace/log_file" -d -c "$tmpfile"
+        res=$?
+               echo "result for gunzip is $res" >>"$workspace/log_file"
+        if [ "$d" -eq 1 ]; then
+            : $((at_min++))
+            results_at_min[$size]=1
+            [ "$at_min" -gt 3 ] && break
+        fi
+        d=$(((d+1)/2))
+        case $res in
+                # 1: too small
+            1)  echo "In case 1" >> "$workspace/log_file"
+                               size=$((size+d)); direction="↑";;
+                # 2: trailing garbage
+            2)         echo "In case 2" >> "$workspace/log_file"
+                               size=$((size-d)); direction="↓";;
+                # OK
+            0)         echo "Breaking" >> "$workspace/log_file"
+                               break;;
+            *)         echo "In case *" >> "$workspace/log_file"
+                               fatal "gunzip returned $res while checking "$unpacked/$file"";;
+        esac
+        echo -n "  $size" >> "$workspace/log_file"
+    done
+    if [ "$at_min" -gt 3 ]; then
+        echo -e "\ngunzip result is oscillating between 'too small' and 'too large' at size: ${!results_at_min[*]}"    >> "$workspace/log_file"
+        echo -n "Trying lower nearby values:  "        >> "$workspace/log_file"
+        fini=
+        for ((d=1; d < 30; d++)); do
+            : $((size--))
+            echo -n "  $size" >> "$workspace/log_file"
+            rqd "$workspace/dd" if="$unpacked/$file" of="$tmpfile" bs=$size count=1 2>/dev/null
+            if cmd "$workspace/gzip" >/dev/null 2>&1 -d -c  "$tmpfile"; then
+                echo -n " - OK"        >> "$workspace/log_file"
+                fini=1
+                break
+            fi
+        done
+        [ -z "$fini" ] && fatal 'oscillating gunzip result, giving up.'
+    fi
+    # We've found the end of the gzipped part.  This is not the real
+    # end since gzip allows for some trailing padding to be appended
+    # before it barfs.  First, go back until we find a non-null
+    # character:
+    echo -ne "\npadding check (may take some time): " >> "$workspace/log_file"
+    real_end=$((size-1))
+    while checkNUL "$unpacked/$file" $real_end; do
+        : $((real_end--))
+    done
+       echo "Found real end at $real_end" >> "$workspace/log_file"
+    # Second, try if gunzip still succeeds.  If not, add trailing
+    # null(s) until it succeeds:
+    while :; do
+        rqd "$workspace/dd" if="$unpacked/$file" of="$tmpfile" bs=$real_end count=1 2>>"$workspace/log_file"
+        "$workspace/gzip" >/tmp/test_file2 2>>"$workspace/log_file" -d -c "$tmpfile"
+        case $? in
+            # 1: too small
+            1)         echo "In case 1" >> "$workspace/log_file"
+                               : $((real_end++));;
+            *)         echo "Case other $?" >> "$workspace/log_file"
+                               break;;
+        esac
+    done
+       echo "Done with add trailing null(s) until it succeeds" >> "$workspace/log_file"
+    real_next_start=$size
+    # Now, skip NULs forward until we reach a non-null byte.  This is
+    # considered as being the start of the next part.
+    while checkNUL "$unpacked/$file" $real_next_start; do
+        : $((real_next_start++))
+    done
+    echo $((real_next_start - real_end))       >> "$workspace/log_file"
+    echo >> "$workspace/log_file"
+    rm "$tmpfile"
+    # Using the numbers we got so far, create the output files which
+    # reflect the parts we've found so far:
+    rqd "$workspace/dd" if="$unpacked/$file" of="$unpacked/$gz_result" bs=$real_end count=1
+    rqd "$workspace/dd" if="$unpacked/$file" of="$unpacked/$padding" skip=$real_end bs=1 count=$((real_next_start - real_end))
+    rqd "$workspace/dd" if="$unpacked/$file" of="$unpacked/$trailer" bs=$real_next_start skip=1
+    rqd "$workspace/gzip" -c -d "$unpacked/$gz_result" > "$unpacked/$result"
+}
+
+unpack()(
+    [ -d "$unpacked" ] && echo "\
+Warning: there is aready an unpacking directory.  If you have files added on
+your own there, the  repacking result may not reflect the result of the
+current unpacking process."
+    rqd mkdir -p "$unpacked"
+    rqd cd "$unpacked"
+    sizes="$unpacked/sizes"
+    echo "# Unpacking sizes" > "$sizes"
+    log_file="$unpacked/log_file"
+    #piggy_start=$1
+    if [ -z "$piggy_start" ]; then
+               fatal "Can't find a gzip header in file '$zImage'" >> "$workspace/log_file"
+        fatal "Can't find a gzip header in file '$zImage'"
+       else
+               echo "start is $piggy_start" >> "$sizes"
+    fi
+
+    rqd "$workspace/dd" if="$zImage" bs="$piggy_start" count=1 of="$unpacked/$decompression_code"
+    rqd "$workspace/dd" if="$zImage" bs="$piggy_start" skip=1 of="$piggy_gz_piggy_trailer"
+
+    gunzipWithTrailer  "$piggy_gz_piggy_trailer" \
+        "$piggy" "$padding_piggy" "$piggy_trailer"
+
+    echo
+    echo "Success."
+)
+
+#### start of main program
+while getopts xv12345sgrpuhtz-: argv; do
+    case $argv in
+        p|z|1|2|3|4|5|t|r|g) eval opt_$argv=1;;
+        u)  opt_u=1
+                       workspace=$2
+                   zImage="$2/$3"
+                       piggy_start=$4
+                       unpacked="${zImage}_unpacked"
+                       packing="${zImage}_packing";;
+        -) if [ "$OPTARG" = "version" ]; then
+              echo "$pname $version"
+              exit 0
+           else
+              echo "Wrong Usage, use -u to unpack"
+           fi;;
+        h|-) echo "Wrong Usage, use -u to unpack";;
+        *) fatal "Illegal option";;
+    esac
+done
+if [ -n "$opt_u" ]; then
+    [ -f "$zImage" ] || fatal "file '$zImage': not found"
+    unpack
+fi
+if [ -n "$opt_p" ]; then
+       work_dir=$2
+       tgt_file=$3
+       cmd_dir=$4
+       rqd cd "$work_dir"
+       #remove all links before proceeding with zip processing
+       "$cmd_dir/find" . -type l  -exec rm {} \;
+       "$cmd_dir/find" . -exec touch -t 200011111111.11 {} \;
+       "$cmd_dir/find" . -exec chmod 0755 {} \;
+       "$cmd_dir/zip" -ryX "$tgt_file" *
+fi
+if [ -z "$opt_u$opt_p" ]; then
+    echo >&2 "$pname: Need -u or -p option to work"
+    echo >&2 "$pname: Type '$pname --help' for usage info."
+    exit 1
+fi
+
+exit
diff --git a/mk_delta/rpi3/bin/batch_mk_delta.sh b/mk_delta/rpi3/bin/batch_mk_delta.sh
new file mode 100755 (executable)
index 0000000..a216b40
--- /dev/null
@@ -0,0 +1,6 @@
+#!/bin/sh
+
+export FW_BW="FW_DELTA"
+
+./bin/mk_delta.sh
+
diff --git a/mk_delta/rpi3/bin/mk_delta.sh b/mk_delta/rpi3/bin/mk_delta.sh
new file mode 100755 (executable)
index 0000000..32eb61c
--- /dev/null
@@ -0,0 +1,366 @@
+#!/bin/bash
+
+
+#==================
+# funtions
+#==================
+
+#------------------------------------------------------------------------------
+# Function :
+#      fn_read_delta_cfg
+#
+# Description :
+#      read configuration file for delta generation.
+#      config file format : PART_NAME PART_BIN UPDATE_TYPE (deliminator : TAB)
+#
+
+fn_read_delta_cfg()
+{
+       in_file=$1
+
+       if [ ! -r ${in_file} ]; then
+               echo "file ${in_file} cannot be read"
+               return 1
+       fi
+
+       line_num=0
+       while read line
+       do
+               # skip comment line
+               if [ "${line:0:1}" = "#" ]; then
+                       continue
+               fi
+
+               # skip empty line
+               line_len=${#line}
+               if [ "${line_len}" = "0" ]; then
+                       continue
+               fi
+
+
+               fields=(${line})
+               PART_NAME_delta[${line_num}]=${fields[0]}
+               PART_BIN_delta[${line_num}]=${fields[1]}
+               UPDATE_TYPE_delta[${line_num}]=${fields[2]}
+
+               line_num=$(($line_num + 1))
+
+       done < ${in_file}
+
+       PART_NUM_delta=${line_num}
+
+       return 0
+}
+
+#------------------------------------------------------------------------------
+# Function :
+#      fn_read_part_cfg
+#
+# Description :
+#      read configuration file for partition information.
+#      config file format : PART_NAME DELTA_BIN UPDATE_TYPE BLK_DEV BLK_OFFSET (deliminator : TAB)
+#
+
+fn_read_part_cfg()
+{
+       in_file=$1
+
+       if [ ! -r ${in_file} ]; then
+               echo "file ${in_file} cannot be read"
+               return 1
+       fi
+
+       line_num=0
+       while read line
+       do
+               # skip comment line
+               if [ "${line:0:1}" = "#" ]; then
+                       continue
+               fi
+
+               # skip empty line
+               line_len=${#line}
+               if [ "${line_len}" = "0" ]; then
+                       continue
+               fi
+
+
+               fields=(${line})
+               PART_NAME_part[${line_num}]=${fields[0]}
+               DELTA_BIN_part[${line_num}]=${fields[1]}
+               UPDATE_TYPE_part[${line_num}]=${fields[2]}
+               BLK_DEV_part[${line_num}]=${fields[3]}
+               BLK_OFFSET_part[${line_num}]=${fields[4]}
+
+               line_num=$(($line_num + 1))
+
+       done < ${in_file}
+
+       PART_NUM_part=${line_num}
+
+       return 0
+}
+
+#------------------------------------------------------------------------------
+# Function :
+#      fn_gen_update_cfg
+#
+# Description :
+#      generate configuration file to be used during update.
+#
+
+fn_gen_update_cfg()
+{
+       out_file=$1
+
+       update_idx=0
+
+       for ((delta_idx=0;delta_idx<PART_NUM_delta;delta_idx++))
+       do
+               FOUND=0
+               for ((part_idx=0;part_idx<PART_NUM_part;part_idx++))
+               do
+                       if [ "${PART_NAME_part[${part_idx}]}" = "${PART_NAME_delta[${delta_idx}]}" ]; then
+                               FOUND=1
+                               break
+                       fi
+               done
+
+               if [ "${FOUND}" = "1" ]; then
+                       #-- PART_NAME, DELTA_BIN, BLK_DEV, BLK_OFFSET are from part.cfg
+                       PART_NAME[${update_idx}]=${PART_NAME_part[${part_idx}]}
+                       DELTA_BIN[${update_idx}]=${DELTA_BIN_part[${part_idx}]}
+                       BLK_DEV[${update_idx}]=${BLK_DEV_part[${part_idx}]}
+                       BLK_OFFSET[${update_idx}]=${BLK_OFFSET_part[${part_idx}]}
+
+                       #-- PART_BIN, UPDATE_TYPE are from delta.cfg
+                       PART_BIN[${update_idx}]=${PART_BIN_delta[${delta_idx}]}
+                       UPDATE_TYPE[${update_idx}]=${UPDATE_TYPE_delta[${delta_idx}]}
+
+                       update_idx=$(($update_idx + 1))
+               fi
+
+       done
+
+       PART_NUM=${update_idx}
+
+       if [ -f ${out_file} ]; then
+               sudo rm -fr ${out_file}
+       fi
+
+       for ((update_idx=0;update_idx<PART_NUM;update_idx++))
+       do
+               f1=${PART_NAME[${update_idx}]}
+               f2=${DELTA_BIN[${update_idx}]}
+               f3=${UPDATE_TYPE[${update_idx}]}
+               f4=${BLK_DEV[${update_idx}]}
+               f5=${BLK_OFFSET[${update_idx}]}
+
+               echo -e "$f1\t\t$f2\t\t$f3\t\t$f4\t\t$f5" >> ${out_file}
+       done
+
+       return 0
+}
+
+#------------------------------------------------------------------------------
+# Function :
+#      fn_set_default_params
+#
+# Description :
+#      set default params used globally in this script
+#
+
+fn_set_default_params()
+{
+       MY_ID=`whoami`
+       MK_PART_DELTA=./bin/mk_part_delta.sh
+       DELTA_CFG_PATH=./cfg/delta.cfg
+       PART_CFG_PATH=./cfg/default_part.cfg
+       UPDATE_CFG_PATH=./cfg/update.cfg
+
+       LOG_PATH=./data/Delta.log
+       # Getting date and minor version
+       MONDATE=$(date +%m%d)
+       i=1
+       while [ -d "result/${MONDATE}_${i}" ]
+       do
+               i=$(($i + 1))
+       done
+       MONDATE="${MONDATE}_${i}"
+       echo "MONDATE = ${MONDATE}"
+
+       if [ "z${OLD_TAR_DIR}" = "z" ]; then
+               export OLD_TAR_DIR="old_tar"
+       fi
+       if [ "z${NEW_TAR_DIR}" = "z" ]; then
+               export NEW_TAR_DIR="new_tar"
+       fi
+}
+
+#------------------------------------------------------------------------------
+# Function :
+#      fn_swap_binary_files
+#
+# Description :
+#      change old & new for making backward delta
+#
+
+fn_swap_binary_files()
+{
+       TEMP_TAR_DIR=${OLD_TAR_DIR}
+       export OLD_TAR_DIR=${NEW_TAR_DIR}
+       export NEW_TAR_DIR=${TEMP_TAR_DIR}
+}
+
+#------------------------------------------------------------------------------
+# Function :
+#      fn_print_update_cfg
+
+fn_print_update_cfg()
+{
+       for ((update_idx=0;update_idx<PART_NUM;update_idx++))
+       do
+                       echo "------------------------------"
+                       echo "[PART_NAME]    ${PART_NAME[${update_idx}]}"
+                       echo "[DELTA_BIN]    ${DELTA_BIN[${update_idx}]}"
+                       echo "[BLK_DEV]      ${BLK_DEV[${update_idx}]}"
+                       echo "[BLK_OFFSET]   ${BLK_OFFSET[${update_idx}]}"
+                       echo "[PART_BIN]     ${PART_BIN[${update_idx}]}"
+                       echo "[UPDATE_TYPE]  ${UPDATE_TYPE[${update_idx}]}"
+
+       done
+       echo "------------------------------"
+}
+
+#------------------------------------------------------------------------------
+# Function :
+#      fn_process_extra_delta
+#
+
+fn_process_extra_delta()
+{
+       RAMDISK2_FULL_IMG=0
+       if [ "${part_name}" = "RAMDISK2" ]; then
+               if [ "${RAMDISK2_FULL_IMG}" = "0" ]; then
+                       #--- delta recovery partition ---
+                       ${MK_PART_DELTA} ${part_name} DELTA_IMG ${part_bin} ${delta_bin}
+                       if [ "$?" = "0" ]; then
+                               sudo mv data/${PART_OUT} ${RESULT_DIR}/${part_bin}
+                               sudo mv ${RESULT_DIR}/${part_bin}/${delta_bin} ${DELTA_DIR}
+                       fi
+               fi
+               if [ "${RAMDISK2_FULL_IMG}" = "1" ]; then
+                       #--- full recovery partition ---
+                       ${MK_PART_DELTA} ${part_name} FULL_IMG ${part_bin} ${delta_bin}
+                       if [ "$?" = "0" ]; then
+                               sudo mv data/${PART_OUT} ${RESULT_DIR}/${part_bin}
+                               sudo mv ${RESULT_DIR}/${part_bin}/${part_bin} ${DELTA_DIR}
+                       fi
+               fi
+       elif [ "${part_name}" = "RECOVERY" ]; then
+               #--- recovery kernel ---
+               sudo echo "1" > ${DELTA_DIR}/${delta_bin}
+       elif [ "${part_name}" = "EXTRA" ]; then
+               echo "---"
+       fi
+}
+
+###############################################################################
+#==================
+# Main Start
+#==================
+
+fn_set_default_params
+
+fn_read_delta_cfg ${DELTA_CFG_PATH}
+
+fn_read_part_cfg ${PART_CFG_PATH}
+
+fn_gen_update_cfg ${UPDATE_CFG_PATH}
+
+COMMON_BINDIR=${PWD}/../common/bin
+
+
+################### Make delta Forward and Backward #####################
+if [ "z${FW_BW}" = "z" ]; then
+       #FW_BW="FW_DELTA BW_DELTA"
+       FW_BW="FW_DELTA"
+fi
+
+START_TIMESTAMP="$(date +%T)"
+
+for Make_Delta in $FW_BW
+do
+       #--- archive result directory ---
+       echo "Make $Make_Delta Delta file"
+       RESULT_DIR=result/$MONDATE/$Make_Delta
+       DELTA_DIR=${RESULT_DIR}/DELTA
+       mkdir -p ${DELTA_DIR}
+
+       if [ "$Make_Delta" = "BW_DELTA" ]; then
+               fn_swap_binary_files
+       fi
+
+
+       #--- make delta for each partition ---
+       for ((part_num=0;part_num<PART_NUM;part_num++))
+       do
+               part_name=${PART_NAME[${part_num}]}
+               update_type=${UPDATE_TYPE[${part_num}]}
+               part_bin=${PART_BIN[${part_num}]}
+               delta_bin=${DELTA_BIN[${part_num}]}
+
+               PART_OUT=${part_name}_OUT
+
+               if [ "${update_type}" = "EXTRA" ]; then
+                       fn_process_extra_delta
+               else
+                       ${MK_PART_DELTA} ${part_name} ${update_type} ${part_bin} ${delta_bin}
+                       if [ "$?" = "0" ]; then
+                               if [ "${update_type}" = "DELTA_FS" ]; then
+                               sudo mv data/${PART_OUT} ${RESULT_DIR}/${part_bin}
+                               sudo mv ${RESULT_DIR}/${part_bin} ${DELTA_DIR}
+                               else
+                                       sudo mv data/${PART_OUT} ${RESULT_DIR}/${part_bin}
+                               sudo mv ${RESULT_DIR}/${part_bin}/${delta_bin} ${DELTA_DIR}
+                               fi
+                       else
+                               sudo rm -rf ${RESULT_DIR}/${part_bin}
+                               echo "Error: Abort Delta Generation"
+                               exit 1
+                       fi
+               fi
+               sudo rm -rf ${RESULT_DIR}/${part_bin}
+       done
+
+       #move update.cfg to delta directory
+       if [ -r ${UPDATE_CFG_PATH} ]; then
+               sudo cp ${UPDATE_CFG_PATH} ${DELTA_DIR}/update.cfg
+               sudo rm ${UPDATE_CFG_PATH}
+       fi
+       if [ -r ${LOG_PATH} ]; then
+               sudo cp ${LOG_PATH} ${RESULT_DIR}/Delta.log
+               sudo rm ${LOG_PATH}
+       fi
+
+       #--- archive result directory ---
+       cd result/$MONDATE
+       echo "tar result directory"
+       sudo tar cf ${MONDATE}_${Make_Delta}.tar $Make_Delta
+       cd ../..
+
+       echo "Making delta binary is End!! ($Make_Delta)"
+done
+
+if [ -r ${UPDATE_CFG_PATH} ]; then
+       sudo rm ${UPDATE_CFG_PATH}
+fi
+cd ${DELTA_DIR}
+sudo cp ${COMMON_BINDIR}/unpack.sh ./
+sudo tar --overwrite -cf ../delta.tar *
+cd -
+
+END_TIMESTAMP="$(date +%T)"
+
+echo "OVERALL TIMESTAMP : [START] ${START_TIMESTAMP} ~ [END] ${END_TIMESTAMP}"
+
diff --git a/mk_delta/rpi3/bin/mk_part_delta.sh b/mk_delta/rpi3/bin/mk_part_delta.sh
new file mode 100755 (executable)
index 0000000..7a19337
--- /dev/null
@@ -0,0 +1,534 @@
+#!/bin/bash
+
+
+#==================
+# funtions
+#==================
+
+#------------------------------------------------------------------------------
+# Function :
+#      fn_mk_attribute
+#
+
+fn_print_line()
+{
+       CHAR=$1
+       NUM=$2
+
+       echo "*******************************************************************"
+}
+
+#------------------------------------------------------------------------------
+# Function :
+#      fn_mk_attribute
+#
+# Params :
+#      $1 - PART_NAME. to descriminate
+#      $2 - base directory to search files for old partition
+#      $3 - base directory to search files for new partition
+#
+# Description :
+#      extract attribute information of files on the given partition
+#
+
+fn_mk_attribute()
+{
+       TARGET=$1
+       BASE_DIR_OLD=$2
+       BASE_DIR_NEW=$3
+       V1_ATTR_FILE=${OUTPUT_DIR}/v1_${TARGET}_attr.txt
+       V1_NAME_FILE=${OUTPUT_DIR}/v1_list.txt
+       V2_ATTR_FILE=${OUTPUT_DIR}/v2_${TARGET}_attr.txt
+       V2_NAME_FILE=${OUTPUT_DIR}/v2_list.txtfg
+       V23_NAME_FILE=v23_${TARGET}_attr.txt
+       V13_NAME_FILE=v13_${TARGET}_attr.txt
+       ATTR_CMD=extended_attr.sh
+
+       echo "Attribute generation for ${TARGET} [START] $(date +%T)"
+
+       sudo find ./${BASE_DIR_OLD} -type f -printf '"/%P" Regular 14 %04m:%04U:%04G:' -exec ${COMMON_BINDIR}/${ATTR_CMD} {} \; >  ${V1_ATTR_FILE}
+       sudo find ./${BASE_DIR_OLD} -type l -printf '"/%P" SymLink 14 %04m:%04U:%04G:' -exec ${COMMON_BINDIR}/${ATTR_CMD} {} \; >> ${V1_ATTR_FILE}
+       sudo find ./${BASE_DIR_OLD} -type d -printf '"/%P" Regular 14 %04m:%04U:%04G:' -exec ${COMMON_BINDIR}/${ATTR_CMD} {} \; >> ${V1_ATTR_FILE}
+
+       sudo find ./${BASE_DIR_NEW} -type f -printf '"/%P" Regular 14 %04m:%04U:%04G:' -exec ${COMMON_BINDIR}/${ATTR_CMD} {} \; >  ${V2_ATTR_FILE}
+       sudo find ./${BASE_DIR_NEW} -type l -printf '"/%P" SymLink 14 %04m:%04U:%04G:' -exec ${COMMON_BINDIR}/${ATTR_CMD} {} \; >> ${V2_ATTR_FILE}
+       sudo find ./${BASE_DIR_NEW} -type d -printf '"/%P" Regular 14 %04m:%04U:%04G:' -exec ${COMMON_BINDIR}/${ATTR_CMD} {} \; >> ${V2_ATTR_FILE}
+
+       ################ Change user and group permission from '0' to '0000' ############
+       sed -i 's/:   0/:0000/g' ${V1_ATTR_FILE}
+       sed -i 's/:   /:000/g' ${V1_ATTR_FILE}
+       sed -i 's/:  /:00/g' ${V1_ATTR_FILE}
+       sed -i 's/: /:0/g' ${V1_ATTR_FILE}
+
+       sed -i 's/:   0/:0000/g' ${V2_ATTR_FILE}
+       sed -i 's/:   /:000/g' ${V2_ATTR_FILE}
+       sed -i 's/:  /:00/g' ${V2_ATTR_FILE}
+       sed -i 's/: /:0/g' ${V2_ATTR_FILE}
+
+       ################ Change ":./old" to "/" ############
+       sed -i "s/:.\/${BASE_DIR_OLD}\//:\//" ${V1_ATTR_FILE}
+       sed -i "s/:.\/${BASE_DIR_OLD}/:\//" ${V1_ATTR_FILE}
+       sed -i "s/:.\/${BASE_DIR_NEW}\//:\//" ${V2_ATTR_FILE}
+       sed -i "s/:.\/${BASE_DIR_NEW}/:\//" ${V2_ATTR_FILE}
+
+       echo "Attribut generation for ${TARGET} [END] $(date +%T)"
+}
+
+#------------------------------------------------------------------------------
+# Function :
+#      fn_mk_full_img
+#
+
+fn_mk_full_img()
+{
+       PART_IMG_ORG=${PART_BIN}
+       PART_IMG_OLD=${PART_IMG_ORG}.old
+       PART_IMG_NEW=${PART_IMG_ORG}.new
+
+       #---- untar partition image and rename for old & new ----
+       echo "untar ${OLD_TAR_FILE}"
+       tar xvf ${OLD_TAR_DIR}/${OLD_TAR_FILE} ${PART_IMG_ORG}
+       if [ "$?" != "0" ]; then
+               return 1;
+       fi
+       sudo mv ${PART_IMG_ORG} ${PART_IMG_OLD}
+
+       echo "untar ${NEW_TAR_FILE}"
+       tar xvf ${NEW_TAR_DIR}/${NEW_TAR_FILE} ${PART_IMG_ORG}
+       if [ "$?" != "0" ]; then
+               return 1;
+       fi
+       sudo mv ${PART_IMG_ORG} ${PART_IMG_NEW}
+
+       #---- check whether the binaries are same ----
+       sudo diff ${PART_IMG_OLD} ${PART_IMG_NEW}
+       if [ "$?" = "0" ]; then
+               echo "[${PART_NAME}] two binaries are same"
+               sudo rm -rf ${PART_IMG_OLD}
+               sudo rm -rf ${PART_IMG_NEW}
+               return 1
+       fi
+       sudo python ${COMMON_BINDIR}/CreatePatch.py ${UPDATE_TYPE} ${PART_NAME} ${PART_IMG_OLD} ${PART_IMG_NEW} ${OUTPUT_DIR} ${UPDATE_CFG_PATH}
+       PythonRet=$?
+       sudo rm ${PART_IMG_OLD}
+       sudo mv ${PART_IMG_NEW} ${OUTPUT_DIR}/${PART_IMG_ORG}
+       sudo chown ${MY_ID}:${MY_ID} ${OUTPUT_DIR}/${PART_IMG_ORG}
+
+       if [ $PythonRet != "0" ]; then
+               echo "[${PART_NAME}] Failed to generate DELTA"
+               exit 1
+       fi
+       return 0
+}
+
+#------------------------------------------------------------------------------
+# Function :
+#      fn_mk_delta_img_core
+#
+# Description :
+#      core routine for delta generation
+#      Image Update Type
+#
+
+fn_mk_delta_img_core()
+{
+
+       START_TIMESTAMP="generation of ${DELTA} [START] $(date +%T)"
+
+       #---- untar partition image and rename for old & new ----
+       echo "untar ${OLD_TAR_FILE}"
+       tar xvf ${OLD_TAR_DIR}/${OLD_TAR_FILE} ${PART_IMG_ORG}
+       if [ "$?" != "0" ]; then
+               return 1;
+       fi
+       sudo mv ${PART_IMG_ORG} ${PART_IMG_OLD}
+
+       echo "untar ${NEW_TAR_FILE}"
+       tar xvf ${NEW_TAR_DIR}/${NEW_TAR_FILE} ${PART_IMG_ORG}
+       if [ "$?" != "0" ]; then
+               return 1;
+       fi
+       sudo mv ${PART_IMG_ORG} ${PART_IMG_NEW}
+
+       #---- check whether the binaries are same ----
+       sudo diff ${PART_IMG_OLD} ${PART_IMG_NEW}
+       if [ "$?" = "0" ]; then
+               echo "[${PART_NAME}] two binaries are same"
+               sudo rm -rf ${PART_IMG_OLD}
+               sudo rm -rf ${PART_IMG_NEW}
+               return 1
+       fi
+
+       #---- make delta file ----
+       echo "make ${DELTA}"
+       sudo python ${COMMON_BINDIR}/CreatePatch.py ${UPDATE_TYPE} ${PART_NAME} ${PART_IMG_OLD} ${PART_IMG_NEW} ${OUTPUT_DIR} ${UPDATE_CFG_PATH}
+       PythonRet=$?
+       #sudo xdelta delta ${PART_IMG_OLD} ${PART_IMG_NEW} ./${OUTPUT_DIR}/${PART_NAME}".delta"
+
+       #---- remove files & directories ----
+       echo "remove temp files/directory & move delta/log to result directory"
+       sudo rm -rf ${PART_IMG_OLD}
+       sudo rm -rf ${PART_IMG_NEW}
+       sudo rm -rf x
+
+       if [ $PythonRet != "0" ]; then
+               echo "[${PART_NAME}] Failed to generate DELTA"
+               exit 1
+       fi
+       END_TIMESTAMP="generation of ${DELTA} [END] $(date +%T)"
+       echo "${START_TIMESTAMP}"
+       echo "${END_TIMESTAMP}"
+
+
+       return 0
+}
+
+#------------------------------------------------------------------------------
+# Function :
+#      fn_mk_delta_img
+#
+# Description :
+#      generate delta file for image type partition
+#
+
+fn_mk_delta_img()
+{
+       PART_IMG_ORG=${PART_BIN}
+       PART_IMG_OLD=${PART_IMG_ORG}.old
+       PART_IMG_NEW=${PART_IMG_ORG}.new
+
+       DELTA=${DELTA_BIN}
+       DEBUG_DELTA=debug_${DELTA}
+       #CFG_XML=${PART_NAME}_IMG.xml
+       mkdir ${OUTPUT_DIR}/i
+       fn_mk_delta_img_core
+       if [ "$?" != "0" ]; then
+               return 1
+       fi
+
+       return 0
+
+}
+#------------------------------------------------------------------------------
+# Function :
+#      mk_inter_attrib_file
+#
+# Description :
+#      makes a list of attributes for those files which differ between v1 aND V2
+#manoj:to create intermediate attribute list
+fn_mk_inter_attrib_file()
+{
+       V1ref_ATTR_FILE=${OUTPUT_DIR}/v1_${TARGET}_attr.txt
+       V2ref_ATTR_FILE=${OUTPUT_DIR}/v2_${TARGET}_attr.txt
+}
+
+#------------------------------------------------------------------------------
+# Function :
+#      fn_mk_delta_fs_core
+#
+# Description :
+#      core routine for delta generation
+#      File System Update Type (DELTA_FS)
+#
+
+fn_mk_delta_fs_core()
+{
+       START_TIMESTAMP="generation of ${DELTA} [START] $(date +%T)"
+       FS=ext4
+
+       if [ "${DELTA}" = "boot.img/" ]; then
+               FS=vfat
+       fi
+
+       #---- untar partition image and rename for old & new ----
+       echo "untar ${OLD_TAR_FILE}"
+       tar xvf ${OLD_TAR_DIR}/${OLD_TAR_FILE} ${PART_IMG_ORG}
+       if [ "$?" != "0" ]; then
+               return 1;
+       fi
+       sudo mv ${PART_IMG_ORG} ${PART_IMG_OLD}
+
+       echo "untar ${NEW_TAR_FILE}"
+       tar xvf ${NEW_TAR_DIR}/${NEW_TAR_FILE} ${PART_IMG_ORG}
+       if [ "$?" != "0" ]; then
+               return 1;
+       fi
+       sudo mv ${PART_IMG_ORG} ${PART_IMG_NEW}
+
+       #---- check whether the binaries are same ----
+       sudo diff ${PART_IMG_OLD} ${PART_IMG_NEW}
+       if [ "$?" = "0" ]; then
+               echo "[${PART_NAME}] two binaries are same"
+               sudo rm -rf ${PART_IMG_OLD}
+               sudo rm -rf ${PART_IMG_NEW}
+               sudo rm -rf ${OUTPUT_DIR}
+               return 1
+       fi
+
+       #---- make mount point ----
+       mkdir -p ${MNT_PNT_OLD}
+       mkdir -p ${MNT_PNT_NEW}
+
+       #---- mount partition image to mount point  ----
+       echo "mount ${PART_IMG_OLD} & ${PART_IMG_NEW}"
+       sudo mount -t ${FS} -o loop ${PART_IMG_OLD} ${MNT_PNT_OLD}
+       if [ "$?" != "0" ]; then
+               return 1;
+       fi
+       sudo mount -t ${FS} -o loop ${PART_IMG_NEW} ${MNT_PNT_NEW}
+       if [ "$?" != "0" ]; then
+               return 1;
+       fi
+
+       #---- remove unnecessary files & directories  ----
+       for ff in ${EXCLUDE_FILES}
+       do
+               sudo rm -rf ${MNT_PNT_OLD}/${ff}
+               sudo rm -rf ${MNT_PNT_NEW}/${ff}
+       done
+
+       #---- make attribute ----
+       fn_mk_attribute ${PART_NAME} ${BASE_OLD} ${BASE_NEW}
+
+       #PART_IMG_ORG parameter should match with DELTA name that is part of default config file. Which will be used to update MAX size file that is present in respective partition.
+       sudo python ${COMMON_BINDIR}/CreatePatch.py ${UPDATE_TYPE} ${PART_NAME} ${BASE_OLD} ${BASE_NEW} ${OUTPUT_DIR} ${V1_ATTR_FILE} ${V2_ATTR_FILE} ${UPDATE_CFG_PATH}
+       PythonRet=$?
+
+       #---- unmount partition image ----
+       echo "umount ${MNT_PNT_OLD} & ${MNT_PNT_NEW}"
+       sudo umount ${MNT_PNT_OLD}
+       sudo umount ${MNT_PNT_NEW}
+
+       #---- remove files & directories ----
+       echo "remove temp files/directory & move delta/log to result directory"
+       sudo rm -rf ${PART_IMG_OLD}
+       sudo rm -rf ${PART_IMG_NEW}
+       sudo rm -rf ${BASE_OLD} ${BASE_NEW}
+       sudo rm -rf x
+
+       #---- extra cleanup ----
+       sudo mv ./file_attr_updated_image.txt ${OUTPUT_DIR}/${PART_NAME}_file_attr_updated_image.txt
+
+       if [ $PythonRet != "0" ]; then
+               echo "[${PART_NAME}] Failed to generate DELTA"
+               exit 1
+       fi
+       END_TIMESTAMP="generation of ${DELTA} [END] $(date +%T)"
+       echo "${START_TIMESTAMP}"
+       echo "${END_TIMESTAMP}"
+
+       return 0
+}
+
+#------------------------------------------------------------------------------
+# Function :
+#      Mk_delta_fs
+#
+# Description :
+#      generate delta file for filesystem type partition
+#
+
+fn_mk_delta_fs()
+{
+       PART_IMG_ORG=${PART_BIN}
+       PART_IMG_OLD=${PART_IMG_ORG}.old
+       PART_IMG_NEW=${PART_IMG_ORG}.new
+
+       DELTA=${DELTA_BIN}
+       DEBUG_DELTA=debug_${DELTA}
+       #CFG_XML=${PART_NAME}_FS.xml
+
+       FAKE_ROOT=system
+       BASE_OLD=${PART_NAME}_OLD
+       BASE_NEW=${PART_NAME}_NEW
+
+       #--- !!! this should be modified according to partition ---
+       case "${PART_NAME}" in
+       "ROOTFS")
+               EXCLUDE_FILES="lost+found dev proc tmp var sys csa"
+               TGT_MNT_PNT=${FAKE_ROOT}
+               ;;
+       "CSC")
+               EXCLUDE_FILES="lost+found"
+               TGT_MNT_PNT=${FAKE_ROOT}/opt/system/csc
+               ;;
+       "MODULE")
+               EXCLUDE_FILES="lost+found"
+               TGT_MNT_PNT=${FAKE_ROOT}/lib/modules
+               ;;
+       "RAMDISK2")
+               EXCLUDE_FILES="lost+found"
+               TGT_MNT_PNT=
+               ;;
+       "USER")
+               EXCLUDE_FILES="lost+found"
+               TGT_MNT_PNT=${FAKE_ROOT}/opt/usr
+               ;;
+       "SYSDATA")
+               EXCLUDE_FILES="lost+found"
+               TGT_MNT_PNT=${FAKE_ROOT}/opt
+               ;;
+       "KERNEL")
+               EXCLUDE_FILES=""
+               TGT_MNT_PNT=${FAKE_ROOT}/boot
+               ;;
+       *)
+               echo "${PART_NAME} not supported !!!"
+               return 1
+               ;;
+       esac
+
+       MNT_PNT_OLD=${BASE_OLD}/${TGT_MNT_PNT}
+       MNT_PNT_NEW=${BASE_NEW}/${TGT_MNT_PNT}
+
+
+       fn_mk_delta_fs_core
+       if [ "$?" != "0" ]; then
+               return 1
+       fi
+
+       return 0
+}
+
+
+#------------------------------------------------------------------------------
+# Function :
+#      get_tar_file_names
+#
+# Description :
+#      find tar file name (without path) that contains given partition binary
+#
+
+fn_get_tar_file_names()
+{
+       BIN_TO_FIND=$1
+
+       cd ${DATA_DIR}/${OLD_TAR_DIR}
+       OLD_TAR_LIST=`ls *.tar *.tar.gz *.tgz *.tar.md5 2>/dev/null`
+       cd ${CUR_DIR}
+
+       cd ${DATA_DIR}/${NEW_TAR_DIR}
+       NEW_TAR_LIST=`ls *.tar *.tar.gz *.tgz *.tar.md5 2>/dev/null`
+       cd ${CUR_DIR}
+
+       #--- find which tar contains the binary ---
+       for tar_file in ${OLD_TAR_LIST}
+       do
+               result=`tar -tvf ${DATA_DIR}/${OLD_TAR_DIR}/${tar_file} | grep " ${BIN_TO_FIND}$"`
+               if [ "$?" = "0" ]; then
+                       OLD_TAR_FILE=${tar_file}
+                       break
+               fi
+       done
+
+       for tar_file in ${NEW_TAR_LIST}
+       do
+               result=`tar -tvf ${DATA_DIR}/${NEW_TAR_DIR}/${tar_file} | grep " ${BIN_TO_FIND}$"`
+               if [ "$?" = "0" ]; then
+                       NEW_TAR_FILE=${tar_file}
+                       break
+               fi
+       done
+
+}
+
+
+###############################################################################
+#==================
+# Main Start
+#==================
+
+fn_print_line - 80
+
+#--- assign input params to internal variables ---
+param_num=$#
+if [ "${param_num}" != "4" ]; then
+       echo "Invalid param num"
+       echo "[Usage] $0 PART_NAME UPDATE_TYPE PART_BIN DELTA_BIN"
+       exit 1
+fi
+
+PART_NAME=$1
+UPDATE_TYPE=$2
+PART_BIN=$3
+DELTA_BIN=$4
+
+#--- set some default variables ---
+MY_ID=`whoami`
+CUR_DIR=`pwd`
+DATA_DIR=./data
+CFG_DIR=./cfg
+#XML_DIR=./xml
+COMMON_BINDIR=${CUR_DIR}/../common/bin
+UPDATE_CFG_PATH=./cfg/update.cfg
+
+#--- check if the current working directory is the parent directory of bin, data, cfg, xml ---
+TEST_DIR=${DATA_DIR}
+if [ ! -d ${TEST_DIR} ]; then
+       echo "Invalid working directory. ${TEST_DIR} does not exist"
+       exit 1
+fi
+
+TEST_DIR=${CFG_DIR}
+if [ ! -d ${TEST_DIR} ]; then
+       echo "Invalid working directory. ${TEST_DIR} does not exist"
+       exit 1
+fi
+
+#TEST_DIR=${XML_DIR}
+#if [ ! -d ${TEST_DIR} ]; then
+#      echo "Invalid working directory. ${TEST_DIR} does not exist"
+#      exit 1
+#fi
+
+#--- get old/new tar file which includes $PART_BIN ---
+if [ "z${OLD_TAR_DIR}" = "z" ]; then
+       OLD_TAR_DIR="old_tar"
+fi
+if [ "z${NEW_TAR_DIR}" = "z" ]; then
+       NEW_TAR_DIR="new_tar"
+fi
+OLD_TAR_FILE=
+NEW_TAR_FILE=
+
+fn_get_tar_file_names ${PART_BIN}
+
+if [ "z${OLD_TAR_FILE}" = "z" ]; then
+       echo "[old] tar file does not exist in ${DATA_DIR}/${OLD_TAR_DIR} which contains ${PART_BIN}"
+       #exit 0, cos this is not an error.
+       exit 0
+fi
+
+if [ "z${NEW_TAR_FILE}" = "z" ]; then
+       echo "[new] tar file does not exist in ${DATA_DIR}/${NEW_TAR_DIR} which contains ${PART_BIN}"
+       exit 0
+fi
+
+echo "[old] ${OLD_TAR_FILE} contains ${PART_BIN}"
+echo "[new] ${NEW_TAR_FILE} contains ${PART_BIN}"
+
+
+#--- generate delta binary ---
+
+cd ${DATA_DIR}
+
+OUTPUT_DIR=${PART_NAME}_OUT
+if [ ! -d ${OUTPUT_DIR} ]; then
+       if [ -f ${OUTPUT_DIR} ]; then
+               sudo rm -f ${OUTPUT_DIR}
+       fi
+       mkdir ${OUTPUT_DIR}
+fi
+
+if [ "${UPDATE_TYPE}" = "FULL_IMG" ]; then
+       fn_mk_full_img
+elif [ "${UPDATE_TYPE}" = "DELTA_IMG" ]; then
+       fn_mk_delta_img
+elif [ "${UPDATE_TYPE}" = "DELTA_FS" ]; then
+       fn_mk_delta_fs
+else
+       echo "Invalid update type"
+       exit 1
+fi
+
+exit 0
diff --git a/mk_delta/rpi3/bin/pack_delta_tar.sh b/mk_delta/rpi3/bin/pack_delta_tar.sh
new file mode 100755 (executable)
index 0000000..9647270
--- /dev/null
@@ -0,0 +1,43 @@
+#!/bin/sh
+
+#------------------------------------------------------------------------------
+# Function :
+#      get_last_result_dir
+#
+
+get_last_result_dir()
+{
+       # Getting date and minor version
+       MONDATE=$(date +%m%d)
+       i=1
+       while [ -d "result/${MONDATE}_${i}" ]
+       do
+               i=$(($i + 1))
+       done
+
+       if [ "$i" = "1" ]; then
+               RESDIR=
+       else
+               j=$(($i - 1))
+               RESDIR=result/${MONDATE}_${j}/FW_DELTA
+       fi
+
+}
+
+###############################################################################
+#==================
+# Main Start
+#==================
+
+get_last_result_dir
+
+echo "Enter last result directory (if no input is given, ${RESDIR} will be used) : "
+read WORK_DIR
+
+if [ "x${WORK_DIR}" = "x" ]; then
+       WORK_DIR=${RESDIR}
+fi
+
+cd ${WORK_DIR}/DELTA
+tar --overwrite -cvf ../delta.tar *
+cd -
diff --git a/mk_delta/rpi3/cfg/default_part.cfg b/mk_delta/rpi3/cfg/default_part.cfg
new file mode 100644 (file)
index 0000000..2f82595
--- /dev/null
@@ -0,0 +1,6 @@
+# Default Partition information file
+# Partition Name (in PIT), delta name, update type, blk dev, blk offset
+
+ROOTFS         rootfs.img/                     DELTA_FS        /dev/mmcblk0p2          0
+RAMDISK1       ramdisk.img/                    DELTA_FS        /dev/mmcblk0p7          0
+RAMDISK2       ramdisk-recovery.img            EXTRA           /dev/mmcblk0p8          0
diff --git a/mk_delta/rpi3/cfg/delta.cfg b/mk_delta/rpi3/cfg/delta.cfg
new file mode 100644 (file)
index 0000000..e3250e7
--- /dev/null
@@ -0,0 +1,6 @@
+# Configuration for generation of delta
+# Partition Name (in PIT), bin name (in tar), update type
+
+ROOTFS         rootfs.img                      DELTA_FS
+RAMDISK1       ramdisk.img                     DELTA_FS
+RAMDISK2       ramdisk-recovery.img            EXTRA
diff --git a/mk_delta/rpi3/data/new_tar/.gitignore b/mk_delta/rpi3/data/new_tar/.gitignore
new file mode 100755 (executable)
index 0000000..e69de29
diff --git a/mk_delta/rpi3/data/old_tar/.gitignore b/mk_delta/rpi3/data/old_tar/.gitignore
new file mode 100755 (executable)
index 0000000..e69de29
diff --git a/mk_delta/tw1/bin/batch_mk_delta.sh b/mk_delta/tw1/bin/batch_mk_delta.sh
new file mode 100755 (executable)
index 0000000..fdf5e1e
--- /dev/null
@@ -0,0 +1,6 @@
+#!/bin/sh
+
+#export FW_BW="BW_DELTA"
+export FW_BW="FW_DELTA"
+
+./bin/mk_delta.sh
diff --git a/mk_delta/tw1/bin/mk_delta.sh b/mk_delta/tw1/bin/mk_delta.sh
new file mode 100755 (executable)
index 0000000..32eb61c
--- /dev/null
@@ -0,0 +1,366 @@
+#!/bin/bash
+
+
+#==================
+# funtions
+#==================
+
+#------------------------------------------------------------------------------
+# Function :
+#      fn_read_delta_cfg
+#
+# Description :
+#      read configuration file for delta generation.
+#      config file format : PART_NAME PART_BIN UPDATE_TYPE (deliminator : TAB)
+#
+
+fn_read_delta_cfg()
+{
+       in_file=$1
+
+       if [ ! -r ${in_file} ]; then
+               echo "file ${in_file} cannot be read"
+               return 1
+       fi
+
+       line_num=0
+       while read line
+       do
+               # skip comment line
+               if [ "${line:0:1}" = "#" ]; then
+                       continue
+               fi
+
+               # skip empty line
+               line_len=${#line}
+               if [ "${line_len}" = "0" ]; then
+                       continue
+               fi
+
+
+               fields=(${line})
+               PART_NAME_delta[${line_num}]=${fields[0]}
+               PART_BIN_delta[${line_num}]=${fields[1]}
+               UPDATE_TYPE_delta[${line_num}]=${fields[2]}
+
+               line_num=$(($line_num + 1))
+
+       done < ${in_file}
+
+       PART_NUM_delta=${line_num}
+
+       return 0
+}
+
+#------------------------------------------------------------------------------
+# Function :
+#      fn_read_part_cfg
+#
+# Description :
+#      read configuration file for partition information.
+#      config file format : PART_NAME DELTA_BIN UPDATE_TYPE BLK_DEV BLK_OFFSET (deliminator : TAB)
+#
+
+fn_read_part_cfg()
+{
+       in_file=$1
+
+       if [ ! -r ${in_file} ]; then
+               echo "file ${in_file} cannot be read"
+               return 1
+       fi
+
+       line_num=0
+       while read line
+       do
+               # skip comment line
+               if [ "${line:0:1}" = "#" ]; then
+                       continue
+               fi
+
+               # skip empty line
+               line_len=${#line}
+               if [ "${line_len}" = "0" ]; then
+                       continue
+               fi
+
+
+               fields=(${line})
+               PART_NAME_part[${line_num}]=${fields[0]}
+               DELTA_BIN_part[${line_num}]=${fields[1]}
+               UPDATE_TYPE_part[${line_num}]=${fields[2]}
+               BLK_DEV_part[${line_num}]=${fields[3]}
+               BLK_OFFSET_part[${line_num}]=${fields[4]}
+
+               line_num=$(($line_num + 1))
+
+       done < ${in_file}
+
+       PART_NUM_part=${line_num}
+
+       return 0
+}
+
+#------------------------------------------------------------------------------
+# Function :
+#      fn_gen_update_cfg
+#
+# Description :
+#      generate configuration file to be used during update.
+#
+
+fn_gen_update_cfg()
+{
+       out_file=$1
+
+       update_idx=0
+
+       for ((delta_idx=0;delta_idx<PART_NUM_delta;delta_idx++))
+       do
+               FOUND=0
+               for ((part_idx=0;part_idx<PART_NUM_part;part_idx++))
+               do
+                       if [ "${PART_NAME_part[${part_idx}]}" = "${PART_NAME_delta[${delta_idx}]}" ]; then
+                               FOUND=1
+                               break
+                       fi
+               done
+
+               if [ "${FOUND}" = "1" ]; then
+                       #-- PART_NAME, DELTA_BIN, BLK_DEV, BLK_OFFSET are from part.cfg
+                       PART_NAME[${update_idx}]=${PART_NAME_part[${part_idx}]}
+                       DELTA_BIN[${update_idx}]=${DELTA_BIN_part[${part_idx}]}
+                       BLK_DEV[${update_idx}]=${BLK_DEV_part[${part_idx}]}
+                       BLK_OFFSET[${update_idx}]=${BLK_OFFSET_part[${part_idx}]}
+
+                       #-- PART_BIN, UPDATE_TYPE are from delta.cfg
+                       PART_BIN[${update_idx}]=${PART_BIN_delta[${delta_idx}]}
+                       UPDATE_TYPE[${update_idx}]=${UPDATE_TYPE_delta[${delta_idx}]}
+
+                       update_idx=$(($update_idx + 1))
+               fi
+
+       done
+
+       PART_NUM=${update_idx}
+
+       if [ -f ${out_file} ]; then
+               sudo rm -fr ${out_file}
+       fi
+
+       for ((update_idx=0;update_idx<PART_NUM;update_idx++))
+       do
+               f1=${PART_NAME[${update_idx}]}
+               f2=${DELTA_BIN[${update_idx}]}
+               f3=${UPDATE_TYPE[${update_idx}]}
+               f4=${BLK_DEV[${update_idx}]}
+               f5=${BLK_OFFSET[${update_idx}]}
+
+               echo -e "$f1\t\t$f2\t\t$f3\t\t$f4\t\t$f5" >> ${out_file}
+       done
+
+       return 0
+}
+
+#------------------------------------------------------------------------------
+# Function :
+#      fn_set_default_params
+#
+# Description :
+#      set default params used globally in this script
+#
+
+fn_set_default_params()
+{
+       MY_ID=`whoami`
+       MK_PART_DELTA=./bin/mk_part_delta.sh
+       DELTA_CFG_PATH=./cfg/delta.cfg
+       PART_CFG_PATH=./cfg/default_part.cfg
+       UPDATE_CFG_PATH=./cfg/update.cfg
+
+       LOG_PATH=./data/Delta.log
+       # Getting date and minor version
+       MONDATE=$(date +%m%d)
+       i=1
+       while [ -d "result/${MONDATE}_${i}" ]
+       do
+               i=$(($i + 1))
+       done
+       MONDATE="${MONDATE}_${i}"
+       echo "MONDATE = ${MONDATE}"
+
+       if [ "z${OLD_TAR_DIR}" = "z" ]; then
+               export OLD_TAR_DIR="old_tar"
+       fi
+       if [ "z${NEW_TAR_DIR}" = "z" ]; then
+               export NEW_TAR_DIR="new_tar"
+       fi
+}
+
+#------------------------------------------------------------------------------
+# Function :
+#      fn_swap_binary_files
+#
+# Description :
+#      change old & new for making backward delta
+#
+
+fn_swap_binary_files()
+{
+       TEMP_TAR_DIR=${OLD_TAR_DIR}
+       export OLD_TAR_DIR=${NEW_TAR_DIR}
+       export NEW_TAR_DIR=${TEMP_TAR_DIR}
+}
+
+#------------------------------------------------------------------------------
+# Function :
+#      fn_print_update_cfg
+
+fn_print_update_cfg()
+{
+       for ((update_idx=0;update_idx<PART_NUM;update_idx++))
+       do
+                       echo "------------------------------"
+                       echo "[PART_NAME]    ${PART_NAME[${update_idx}]}"
+                       echo "[DELTA_BIN]    ${DELTA_BIN[${update_idx}]}"
+                       echo "[BLK_DEV]      ${BLK_DEV[${update_idx}]}"
+                       echo "[BLK_OFFSET]   ${BLK_OFFSET[${update_idx}]}"
+                       echo "[PART_BIN]     ${PART_BIN[${update_idx}]}"
+                       echo "[UPDATE_TYPE]  ${UPDATE_TYPE[${update_idx}]}"
+
+       done
+       echo "------------------------------"
+}
+
+#------------------------------------------------------------------------------
+# Function :
+#      fn_process_extra_delta
+#
+
+fn_process_extra_delta()
+{
+       RAMDISK2_FULL_IMG=0
+       if [ "${part_name}" = "RAMDISK2" ]; then
+               if [ "${RAMDISK2_FULL_IMG}" = "0" ]; then
+                       #--- delta recovery partition ---
+                       ${MK_PART_DELTA} ${part_name} DELTA_IMG ${part_bin} ${delta_bin}
+                       if [ "$?" = "0" ]; then
+                               sudo mv data/${PART_OUT} ${RESULT_DIR}/${part_bin}
+                               sudo mv ${RESULT_DIR}/${part_bin}/${delta_bin} ${DELTA_DIR}
+                       fi
+               fi
+               if [ "${RAMDISK2_FULL_IMG}" = "1" ]; then
+                       #--- full recovery partition ---
+                       ${MK_PART_DELTA} ${part_name} FULL_IMG ${part_bin} ${delta_bin}
+                       if [ "$?" = "0" ]; then
+                               sudo mv data/${PART_OUT} ${RESULT_DIR}/${part_bin}
+                               sudo mv ${RESULT_DIR}/${part_bin}/${part_bin} ${DELTA_DIR}
+                       fi
+               fi
+       elif [ "${part_name}" = "RECOVERY" ]; then
+               #--- recovery kernel ---
+               sudo echo "1" > ${DELTA_DIR}/${delta_bin}
+       elif [ "${part_name}" = "EXTRA" ]; then
+               echo "---"
+       fi
+}
+
+###############################################################################
+#==================
+# Main Start
+#==================
+
+fn_set_default_params
+
+fn_read_delta_cfg ${DELTA_CFG_PATH}
+
+fn_read_part_cfg ${PART_CFG_PATH}
+
+fn_gen_update_cfg ${UPDATE_CFG_PATH}
+
+COMMON_BINDIR=${PWD}/../common/bin
+
+
+################### Make delta Forward and Backward #####################
+if [ "z${FW_BW}" = "z" ]; then
+       #FW_BW="FW_DELTA BW_DELTA"
+       FW_BW="FW_DELTA"
+fi
+
+START_TIMESTAMP="$(date +%T)"
+
+for Make_Delta in $FW_BW
+do
+       #--- archive result directory ---
+       echo "Make $Make_Delta Delta file"
+       RESULT_DIR=result/$MONDATE/$Make_Delta
+       DELTA_DIR=${RESULT_DIR}/DELTA
+       mkdir -p ${DELTA_DIR}
+
+       if [ "$Make_Delta" = "BW_DELTA" ]; then
+               fn_swap_binary_files
+       fi
+
+
+       #--- make delta for each partition ---
+       for ((part_num=0;part_num<PART_NUM;part_num++))
+       do
+               part_name=${PART_NAME[${part_num}]}
+               update_type=${UPDATE_TYPE[${part_num}]}
+               part_bin=${PART_BIN[${part_num}]}
+               delta_bin=${DELTA_BIN[${part_num}]}
+
+               PART_OUT=${part_name}_OUT
+
+               if [ "${update_type}" = "EXTRA" ]; then
+                       fn_process_extra_delta
+               else
+                       ${MK_PART_DELTA} ${part_name} ${update_type} ${part_bin} ${delta_bin}
+                       if [ "$?" = "0" ]; then
+                               if [ "${update_type}" = "DELTA_FS" ]; then
+                               sudo mv data/${PART_OUT} ${RESULT_DIR}/${part_bin}
+                               sudo mv ${RESULT_DIR}/${part_bin} ${DELTA_DIR}
+                               else
+                                       sudo mv data/${PART_OUT} ${RESULT_DIR}/${part_bin}
+                               sudo mv ${RESULT_DIR}/${part_bin}/${delta_bin} ${DELTA_DIR}
+                               fi
+                       else
+                               sudo rm -rf ${RESULT_DIR}/${part_bin}
+                               echo "Error: Abort Delta Generation"
+                               exit 1
+                       fi
+               fi
+               sudo rm -rf ${RESULT_DIR}/${part_bin}
+       done
+
+       #move update.cfg to delta directory
+       if [ -r ${UPDATE_CFG_PATH} ]; then
+               sudo cp ${UPDATE_CFG_PATH} ${DELTA_DIR}/update.cfg
+               sudo rm ${UPDATE_CFG_PATH}
+       fi
+       if [ -r ${LOG_PATH} ]; then
+               sudo cp ${LOG_PATH} ${RESULT_DIR}/Delta.log
+               sudo rm ${LOG_PATH}
+       fi
+
+       #--- archive result directory ---
+       cd result/$MONDATE
+       echo "tar result directory"
+       sudo tar cf ${MONDATE}_${Make_Delta}.tar $Make_Delta
+       cd ../..
+
+       echo "Making delta binary is End!! ($Make_Delta)"
+done
+
+if [ -r ${UPDATE_CFG_PATH} ]; then
+       sudo rm ${UPDATE_CFG_PATH}
+fi
+cd ${DELTA_DIR}
+sudo cp ${COMMON_BINDIR}/unpack.sh ./
+sudo tar --overwrite -cf ../delta.tar *
+cd -
+
+END_TIMESTAMP="$(date +%T)"
+
+echo "OVERALL TIMESTAMP : [START] ${START_TIMESTAMP} ~ [END] ${END_TIMESTAMP}"
+
diff --git a/mk_delta/tw1/bin/mk_part_delta.sh b/mk_delta/tw1/bin/mk_part_delta.sh
new file mode 100755 (executable)
index 0000000..b6fa8cf
--- /dev/null
@@ -0,0 +1,504 @@
+#!/bin/bash
+
+
+#==================
+# funtions
+#==================
+
+#------------------------------------------------------------------------------
+# Function :
+#      fn_mk_attribute
+#
+
+fn_print_line()
+{
+       CHAR=$1
+       NUM=$2
+
+       echo "*******************************************************************"
+}
+
+#------------------------------------------------------------------------------
+# Function :
+#      fn_mk_attribute
+#
+# Params :
+#      $1 - PART_NAME. to descriminate
+#      $2 - base directory to search files for old partition
+#      $3 - base directory to search files for new partition
+#
+# Description :
+#      extract attribute information of files on the given partition
+#
+
+fn_mk_attribute()
+{
+       TARGET=$1
+       BASE_DIR_OLD=$2
+       BASE_DIR_NEW=$3
+       V1_ATTR_FILE=${OUTPUT_DIR}/v1_${TARGET}_attr.txt
+       V1_NAME_FILE=${OUTPUT_DIR}/v1_list.txt
+       V2_ATTR_FILE=${OUTPUT_DIR}/v2_${TARGET}_attr.txt
+       V2_NAME_FILE=${OUTPUT_DIR}/v2_list.txtfg
+       V23_NAME_FILE=v23_${TARGET}_attr.txt
+       V13_NAME_FILE=v13_${TARGET}_attr.txt
+       ATTR_CMD=extended_attr.sh
+
+       echo "Attribute generation for ${TARGET} [START] $(date +%T)"
+
+       sudo find ./${BASE_DIR_OLD} -type f -printf '"/%P" Regular 14 %04m:%04U:%04G:' -exec ${COMMON_BINDIR}/${ATTR_CMD} {} \; >  ${V1_ATTR_FILE}
+       sudo find ./${BASE_DIR_OLD} -type l -printf '"/%P" SymLink 14 %04m:%04U:%04G:' -exec ${COMMON_BINDIR}/${ATTR_CMD} {} \; >> ${V1_ATTR_FILE}
+       sudo find ./${BASE_DIR_OLD} -type d -printf '"/%P" Regular 14 %04m:%04U:%04G:' -exec ${COMMON_BINDIR}/${ATTR_CMD} {} \; >> ${V1_ATTR_FILE}
+
+       sudo find ./${BASE_DIR_NEW} -type f -printf '"/%P" Regular 14 %04m:%04U:%04G:' -exec ${COMMON_BINDIR}/${ATTR_CMD} {} \; >  ${V2_ATTR_FILE}
+       sudo find ./${BASE_DIR_NEW} -type l -printf '"/%P" SymLink 14 %04m:%04U:%04G:' -exec ${COMMON_BINDIR}/${ATTR_CMD} {} \; >> ${V2_ATTR_FILE}
+       sudo find ./${BASE_DIR_NEW} -type d -printf '"/%P" Regular 14 %04m:%04U:%04G:' -exec ${COMMON_BINDIR}/${ATTR_CMD} {} \; >> ${V2_ATTR_FILE}
+
+       ################ Change user and group permission from '0' to '0000' ############
+       sed -i 's/:   0/:0000/g' ${V1_ATTR_FILE}
+       sed -i 's/:   /:000/g' ${V1_ATTR_FILE}
+       sed -i 's/:  /:00/g' ${V1_ATTR_FILE}
+       sed -i 's/: /:0/g' ${V1_ATTR_FILE}
+
+       sed -i 's/:   0/:0000/g' ${V2_ATTR_FILE}
+       sed -i 's/:   /:000/g' ${V2_ATTR_FILE}
+       sed -i 's/:  /:00/g' ${V2_ATTR_FILE}
+       sed -i 's/: /:0/g' ${V2_ATTR_FILE}
+
+       ################ Change ":./old" to "/" ############
+       sed -i "s/:.\/${BASE_DIR_OLD}\//:\//" ${V1_ATTR_FILE}
+       sed -i "s/:.\/${BASE_DIR_OLD}/:\//" ${V1_ATTR_FILE}
+       sed -i "s/:.\/${BASE_DIR_NEW}\//:\//" ${V2_ATTR_FILE}
+       sed -i "s/:.\/${BASE_DIR_NEW}/:\//" ${V2_ATTR_FILE}
+
+       echo "Attribut generation for ${TARGET} [END] $(date +%T)"
+}
+
+#------------------------------------------------------------------------------
+# Function :
+#      fn_mk_full_img
+#
+
+fn_mk_full_img()
+{
+       PART_IMG_ORG=${PART_BIN}
+       PART_IMG_OLD=${PART_IMG_ORG}.old
+       PART_IMG_NEW=${PART_IMG_ORG}.new
+
+       #---- untar partition image and rename for old & new ----
+       echo "untar ${OLD_TAR_FILE}"
+       tar xvf ${OLD_TAR_DIR}/${OLD_TAR_FILE} ${PART_IMG_ORG}
+       if [ "$?" != "0" ]; then
+               return 1;
+       fi
+       sudo mv ${PART_IMG_ORG} ${PART_IMG_OLD}
+
+       echo "untar ${NEW_TAR_FILE}"
+       tar xvf ${NEW_TAR_DIR}/${NEW_TAR_FILE} ${PART_IMG_ORG}
+       if [ "$?" != "0" ]; then
+               return 1;
+       fi
+       sudo mv ${PART_IMG_ORG} ${PART_IMG_NEW}
+
+       #---- check whether the binaries are same ----
+       sudo diff ${PART_IMG_OLD} ${PART_IMG_NEW}
+       if [ "$?" = "0" ]; then
+               echo "[${PART_NAME}] two binaries are same"
+               sudo rm -rf ${PART_IMG_OLD}
+               sudo rm -rf ${PART_IMG_NEW}
+               return 1
+       fi
+       sudo python ${COMMON_BINDIR}/CreatePatch.py ${UPDATE_TYPE} ${PART_NAME} ${PART_IMG_OLD} ${PART_IMG_NEW} ${OUTPUT_DIR} ${UPDATE_CFG_PATH}
+       PythonRet=$?
+       sudo rm ${PART_IMG_OLD}
+       sudo mv ${PART_IMG_NEW} ${OUTPUT_DIR}/${PART_IMG_ORG}
+       sudo chown ${MY_ID}:${MY_ID} ${OUTPUT_DIR}/${PART_IMG_ORG}
+
+       if [ $PythonRet != "0" ]; then
+               echo "[${PART_NAME}] Failed to generate DELTA"
+               exit 1
+       fi
+       return 0
+}
+
+#------------------------------------------------------------------------------
+# Function :
+#      fn_mk_delta_img_core
+#
+# Description :
+#      core routine for delta generation
+#      Image Update Type
+#
+
+fn_mk_delta_img_core()
+{
+
+       START_TIMESTAMP="generation of ${DELTA} [START] $(date +%T)"
+
+       #---- untar partition image and rename for old & new ----
+       echo "untar ${OLD_TAR_FILE}"
+       tar xvf ${OLD_TAR_DIR}/${OLD_TAR_FILE} ${PART_IMG_ORG}
+       if [ "$?" != "0" ]; then
+               return 1;
+       fi
+       sudo mv ${PART_IMG_ORG} ${PART_IMG_OLD}
+
+       echo "untar ${NEW_TAR_FILE}"
+       tar xvf ${NEW_TAR_DIR}/${NEW_TAR_FILE} ${PART_IMG_ORG}
+       if [ "$?" != "0" ]; then
+               return 1;
+       fi
+       sudo mv ${PART_IMG_ORG} ${PART_IMG_NEW}
+
+       #---- check whether the binaries are same ----
+       sudo diff ${PART_IMG_OLD} ${PART_IMG_NEW}
+       if [ "$?" = "0" ]; then
+               echo "[${PART_NAME}] two binaries are same"
+               sudo rm -rf ${PART_IMG_OLD}
+               sudo rm -rf ${PART_IMG_NEW}
+               return 1
+       fi
+
+       #---- make delta file ----
+       echo "make ${DELTA}"
+       sudo python ${COMMON_BINDIR}/CreatePatch.py ${UPDATE_TYPE} ${PART_NAME} ${PART_IMG_OLD} ${PART_IMG_NEW} ${OUTPUT_DIR} ${UPDATE_CFG_PATH}
+       PythonRet=$?
+       #sudo xdelta delta ${PART_IMG_OLD} ${PART_IMG_NEW} ./${OUTPUT_DIR}/${PART_NAME}".delta"
+
+       #---- remove files & directories ----
+       echo "remove temp files/directory & move delta/log to result directory"
+       sudo rm -rf ${PART_IMG_OLD}
+       sudo rm -rf ${PART_IMG_NEW}
+       sudo rm -rf x
+
+       if [ $PythonRet != "0" ]; then
+               echo "[${PART_NAME}] Failed to generate DELTA"
+               exit 1
+       fi
+       END_TIMESTAMP="generation of ${DELTA} [END] $(date +%T)"
+       echo "${START_TIMESTAMP}"
+       echo "${END_TIMESTAMP}"
+
+
+       return 0
+}
+
+#------------------------------------------------------------------------------
+# Function :
+#      fn_mk_delta_img
+#
+# Description :
+#      generate delta file for image type partition
+#
+
+fn_mk_delta_img()
+{
+       PART_IMG_ORG=${PART_BIN}
+       PART_IMG_OLD=${PART_IMG_ORG}.old
+       PART_IMG_NEW=${PART_IMG_ORG}.new
+
+       DELTA=${DELTA_BIN}
+       DEBUG_DELTA=debug_${DELTA}
+       #CFG_XML=${PART_NAME}_IMG.xml
+       mkdir ${OUTPUT_DIR}/i
+       fn_mk_delta_img_core
+       if [ "$?" != "0" ]; then
+               return 1
+       fi
+
+       return 0
+
+}
+#------------------------------------------------------------------------------
+# Function :
+#      mk_inter_attrib_file
+#
+# Description :
+#      makes a list of attributes for those files which differ between v1 aND V2
+#manoj:to create intermediate attribute list
+fn_mk_inter_attrib_file()
+{
+       V1ref_ATTR_FILE=${OUTPUT_DIR}/v1_${TARGET}_attr.txt
+       V2ref_ATTR_FILE=${OUTPUT_DIR}/v2_${TARGET}_attr.txt
+}
+
+#------------------------------------------------------------------------------
+# Function :
+#      fn_mk_delta_fs_core
+#
+# Description :
+#      core routine for delta generation
+#      File System Update Type (DELTA_FS)
+#
+
+fn_mk_delta_fs_core()
+{
+       START_TIMESTAMP="generation of ${DELTA} [START] $(date +%T)"
+
+       #---- untar partition image and rename for old & new ----
+       echo "untar ${OLD_TAR_FILE}"
+       tar xvf ${OLD_TAR_DIR}/${OLD_TAR_FILE} ${PART_IMG_ORG}
+       if [ "$?" != "0" ]; then
+               return 1;
+       fi
+       sudo mv ${PART_IMG_ORG} ${PART_IMG_OLD}
+
+       echo "untar ${NEW_TAR_FILE}"
+       tar xvf ${NEW_TAR_DIR}/${NEW_TAR_FILE} ${PART_IMG_ORG}
+       if [ "$?" != "0" ]; then
+               return 1;
+       fi
+       sudo mv ${PART_IMG_ORG} ${PART_IMG_NEW}
+
+       #---- check whether the binaries are same ----
+       sudo diff ${PART_IMG_OLD} ${PART_IMG_NEW}
+       if [ "$?" = "0" ]; then
+               echo "[${PART_NAME}] two binaries are same"
+               sudo rm -rf ${PART_IMG_OLD}
+               sudo rm -rf ${PART_IMG_NEW}
+               return 1
+       fi
+
+       #---- make mount point ----
+       mkdir -p ${MNT_PNT_OLD}
+       mkdir -p ${MNT_PNT_NEW}
+
+       #---- mount partition image to mount point  ----
+       echo "mount ${PART_IMG_OLD} & ${PART_IMG_NEW}"
+       sudo mount -t ext4 -o loop ${PART_IMG_OLD} ${MNT_PNT_OLD}
+       if [ "$?" != "0" ]; then
+               return 1;
+       fi
+       sudo mount -t ext4 -o loop ${PART_IMG_NEW} ${MNT_PNT_NEW}
+       if [ "$?" != "0" ]; then
+               return 1;
+       fi
+
+       #---- remove unnecessary files & directories  ----
+       for ff in ${EXCLUDE_FILES}
+       do
+               sudo rm -rf ${MNT_PNT_OLD}/${ff}
+               sudo rm -rf ${MNT_PNT_NEW}/${ff}
+       done
+
+       #---- make attribute ----
+       fn_mk_attribute ${PART_NAME} ${BASE_OLD} ${BASE_NEW}
+
+       #PART_IMG_ORG parameter should match with DELTA name that is part of default config file. Which will be used to update MAX size file that is present in respective partition.
+       sudo python ${COMMON_BINDIR}/CreatePatch.py ${UPDATE_TYPE} ${PART_NAME} ${BASE_OLD} ${BASE_NEW} ${OUTPUT_DIR} ${V1_ATTR_FILE} ${V2_ATTR_FILE} ${UPDATE_CFG_PATH}
+       PythonRet=$?
+
+       #---- unmount partition image ----
+       echo "umount ${MNT_PNT_OLD} & ${MNT_PNT_NEW}"
+       sudo umount ${MNT_PNT_OLD}
+       sudo umount ${MNT_PNT_NEW}
+
+       #---- remove files & directories ----
+       echo "remove temp files/directory & move delta/log to result directory"
+       sudo rm -rf ${PART_IMG_OLD}
+       sudo rm -rf ${PART_IMG_NEW}
+       sudo rm -rf ${BASE_OLD} ${BASE_NEW}
+       sudo rm -rf x
+
+       #---- extra cleanup ----
+       sudo mv ./file_attr_updated_image.txt ${OUTPUT_DIR}/${PART_NAME}_file_attr_updated_image.txt
+
+       if [ $PythonRet != "0" ]; then
+               echo "[${PART_NAME}] Failed to generate DELTA"
+               exit 1
+       fi
+       END_TIMESTAMP="generation of ${DELTA} [END] $(date +%T)"
+       echo "${START_TIMESTAMP}"
+       echo "${END_TIMESTAMP}"
+
+       return 0
+}
+
+#------------------------------------------------------------------------------
+# Function :
+#      Mk_delta_fs
+#
+# Description :
+#      generate delta file for filesystem type partition
+#
+
+fn_mk_delta_fs()
+{
+       PART_IMG_ORG=${PART_BIN}
+       PART_IMG_OLD=${PART_IMG_ORG}.old
+       PART_IMG_NEW=${PART_IMG_ORG}.new
+
+       DELTA=${DELTA_BIN}
+       DEBUG_DELTA=debug_${DELTA}
+       #CFG_XML=${PART_NAME}_FS.xml
+
+       FAKE_ROOT=system
+       BASE_OLD=${PART_NAME}_OLD
+       BASE_NEW=${PART_NAME}_NEW
+
+       #--- !!! this should be modified according to partition ---
+       if [ "${PART_NAME}" = "ROOTFS" ]; then
+               EXCLUDE_FILES="lost+found dev proc tmp var sys csa"
+               TGT_MNT_PNT=${FAKE_ROOT}
+       elif [ "${PART_NAME}" = "RAMDISK2" ]; then
+               EXCLUDE_FILES="lost+found"
+               TGT_MNT_PNT=
+       else
+               echo "${PART_NAME} not supported !!!"
+               return 1
+       fi
+
+       MNT_PNT_OLD=${BASE_OLD}/${TGT_MNT_PNT}
+       MNT_PNT_NEW=${BASE_NEW}/${TGT_MNT_PNT}
+
+
+       fn_mk_delta_fs_core
+       if [ "$?" != "0" ]; then
+               return 1
+       fi
+
+       return 0
+}
+
+
+#------------------------------------------------------------------------------
+# Function :
+#      get_tar_file_names
+#
+# Description :
+#      find tar file name (without path) that contains given partition binary
+#
+
+fn_get_tar_file_names()
+{
+       BIN_TO_FIND=$1
+
+       cd ${DATA_DIR}/${OLD_TAR_DIR}
+       OLD_TAR_LIST=`ls *.tar *.tar.gz *.tgz *.tar.md5 2>/dev/null`
+       cd ${CUR_DIR}
+
+       cd ${DATA_DIR}/${NEW_TAR_DIR}
+       NEW_TAR_LIST=`ls *.tar *.tar.gz *.tgz *.tar.md5 2>/dev/null`
+       cd ${CUR_DIR}
+
+       #--- find which tar contains the binary ---
+       for tar_file in ${OLD_TAR_LIST}
+       do
+               result=`tar -tvf ${DATA_DIR}/${OLD_TAR_DIR}/${tar_file} | grep " ${BIN_TO_FIND}$"`
+               if [ "$?" = "0" ]; then
+                       OLD_TAR_FILE=${tar_file}
+                       break
+               fi
+       done
+
+       for tar_file in ${NEW_TAR_LIST}
+       do
+               result=`tar -tvf ${DATA_DIR}/${NEW_TAR_DIR}/${tar_file} | grep " ${BIN_TO_FIND}$"`
+               if [ "$?" = "0" ]; then
+                       NEW_TAR_FILE=${tar_file}
+                       break
+               fi
+       done
+
+}
+
+
+###############################################################################
+#==================
+# Main Start
+#==================
+
+fn_print_line - 80
+
+#--- assign input params to internal variables ---
+param_num=$#
+if [ "${param_num}" != "4" ]; then
+       echo "Invalid param num"
+       echo "[Usage] $0 PART_NAME UPDATE_TYPE PART_BIN DELTA_BIN"
+       exit 1
+fi
+
+PART_NAME=$1
+UPDATE_TYPE=$2
+PART_BIN=$3
+DELTA_BIN=$4
+
+#--- set some default variables ---
+MY_ID=`whoami`
+CUR_DIR=`pwd`
+DATA_DIR=./data
+CFG_DIR=./cfg
+#XML_DIR=./xml
+COMMON_BINDIR=${CUR_DIR}/../common/bin
+UPDATE_CFG_PATH=./cfg/update.cfg
+
+#--- check if the current working directory is the parent directory of bin, data, cfg, xml ---
+TEST_DIR=${DATA_DIR}
+if [ ! -d ${TEST_DIR} ]; then
+       echo "Invalid working directory. ${TEST_DIR} does not exist"
+       exit 1
+fi
+
+TEST_DIR=${CFG_DIR}
+if [ ! -d ${TEST_DIR} ]; then
+       echo "Invalid working directory. ${TEST_DIR} does not exist"
+       exit 1
+fi
+
+#TEST_DIR=${XML_DIR}
+#if [ ! -d ${TEST_DIR} ]; then
+#      echo "Invalid working directory. ${TEST_DIR} does not exist"
+#      exit 1
+#fi
+
+#--- get old/new tar file which includes $PART_BIN ---
+if [ "z${OLD_TAR_DIR}" = "z" ]; then
+       OLD_TAR_DIR="old_tar"
+fi
+if [ "z${NEW_TAR_DIR}" = "z" ]; then
+       NEW_TAR_DIR="new_tar"
+fi
+OLD_TAR_FILE=
+NEW_TAR_FILE=
+
+fn_get_tar_file_names ${PART_BIN}
+
+if [ "z${OLD_TAR_FILE}" = "z" ]; then
+       echo "[old] tar file does not exist in ${DATA_DIR}/${OLD_TAR_DIR} which contains ${PART_BIN}"
+       #exit 0, cos this is not an error.
+       exit 0
+fi
+
+if [ "z${NEW_TAR_FILE}" = "z" ]; then
+       echo "[new] tar file does not exist in ${DATA_DIR}/${NEW_TAR_DIR} which contains ${PART_BIN}"
+       exit 0
+fi
+
+echo "[old] ${OLD_TAR_FILE} contains ${PART_BIN}"
+echo "[new] ${NEW_TAR_FILE} contains ${PART_BIN}"
+
+
+#--- generate delta binary ---
+
+cd ${DATA_DIR}
+
+OUTPUT_DIR=${PART_NAME}_OUT
+if [ ! -d ${OUTPUT_DIR} ]; then
+       if [ -f ${OUTPUT_DIR} ]; then
+               sudo rm -f ${OUTPUT_DIR}
+       fi
+       mkdir ${OUTPUT_DIR}
+fi
+
+if [ "${UPDATE_TYPE}" = "FULL_IMG" ]; then
+       fn_mk_full_img
+elif [ "${UPDATE_TYPE}" = "DELTA_IMG" ]; then
+       fn_mk_delta_img
+elif [ "${UPDATE_TYPE}" = "DELTA_FS" ]; then
+       fn_mk_delta_fs
+else
+       echo "Invalid update type"
+       exit 1
+fi
+
+exit 0
diff --git a/mk_delta/tw1/bin/pack_delta_tar.sh b/mk_delta/tw1/bin/pack_delta_tar.sh
new file mode 100755 (executable)
index 0000000..9647270
--- /dev/null
@@ -0,0 +1,43 @@
+#!/bin/sh
+
+#------------------------------------------------------------------------------
+# Function :
+#      get_last_result_dir
+#
+
+get_last_result_dir()
+{
+       # Getting date and minor version
+       MONDATE=$(date +%m%d)
+       i=1
+       while [ -d "result/${MONDATE}_${i}" ]
+       do
+               i=$(($i + 1))
+       done
+
+       if [ "$i" = "1" ]; then
+               RESDIR=
+       else
+               j=$(($i - 1))
+               RESDIR=result/${MONDATE}_${j}/FW_DELTA
+       fi
+
+}
+
+###############################################################################
+#==================
+# Main Start
+#==================
+
+get_last_result_dir
+
+echo "Enter last result directory (if no input is given, ${RESDIR} will be used) : "
+read WORK_DIR
+
+if [ "x${WORK_DIR}" = "x" ]; then
+       WORK_DIR=${RESDIR}
+fi
+
+cd ${WORK_DIR}/DELTA
+tar --overwrite -cvf ../delta.tar *
+cd -
diff --git a/mk_delta/tw1/cfg/default_part.cfg b/mk_delta/tw1/cfg/default_part.cfg
new file mode 100755 (executable)
index 0000000..ba3c4a6
--- /dev/null
@@ -0,0 +1,7 @@
+# Default Partition information file
+# Partition Name (in PIT), delta name, update type, blk dev, blk offset
+
+ROOTFS         rootfs.img/             DELTA_FS                /dev/mmcblk0p13         0
+BOOT           delta.boot              DELTA_IMG               /dev/mmcblk0p6          0
+RAMDISK1       delta.ramdisk1          DELTA_IMG               /dev/mmcblk0p8          0
+RAMDISK2       delta.ramdisk2          EXTRA                   /dev/mmcblk0p4          0
diff --git a/mk_delta/tw1/cfg/delta.cfg b/mk_delta/tw1/cfg/delta.cfg
new file mode 100755 (executable)
index 0000000..eab8511
--- /dev/null
@@ -0,0 +1,7 @@
+# Configuration for generation of delta
+# Partition Name (in PIT), bin name (in tar), update type
+
+ROOTFS         rootfs.img                      DELTA_FS
+BOOT           zImage                          DELTA_IMG
+RAMDISK1       ramdisk.img                     DELTA_IMG
+RAMDISK2       ramdisk-recovery.img            EXTRA
diff --git a/mk_delta/tw1/data/new_tar/.gitignore b/mk_delta/tw1/data/new_tar/.gitignore
new file mode 100755 (executable)
index 0000000..e69de29
diff --git a/mk_delta/tw1/data/old_tar/.gitignore b/mk_delta/tw1/data/old_tar/.gitignore
new file mode 100755 (executable)
index 0000000..e69de29