[Feature]add MT2731_MP2_MR2_SVN388 baseline version

Change-Id: Ief04314834b31e27effab435d3ca8ba33b499059
diff --git a/src/support/ab_tools/LICENSE b/src/support/ab_tools/LICENSE
new file mode 100644
index 0000000..d251496
--- /dev/null
+++ b/src/support/ab_tools/LICENSE
@@ -0,0 +1,27 @@
+// Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//    * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//    * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//    * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/support/ab_tools/bin/delta_generator b/src/support/ab_tools/bin/delta_generator
new file mode 100644
index 0000000..df4a1cc
--- /dev/null
+++ b/src/support/ab_tools/bin/delta_generator
Binary files differ
diff --git a/src/support/ab_tools/bin/e2fsdroid b/src/support/ab_tools/bin/e2fsdroid
new file mode 100644
index 0000000..1d4a7d1
--- /dev/null
+++ b/src/support/ab_tools/bin/e2fsdroid
Binary files differ
diff --git a/src/support/ab_tools/bin/signapk.jar b/src/support/ab_tools/bin/signapk.jar
new file mode 100644
index 0000000..17ca7e1
--- /dev/null
+++ b/src/support/ab_tools/bin/signapk.jar
Binary files differ
diff --git a/src/support/ab_tools/lib/libbase.so b/src/support/ab_tools/lib/libbase.so
new file mode 100644
index 0000000..9131fc2
--- /dev/null
+++ b/src/support/ab_tools/lib/libbase.so
Binary files differ
diff --git a/src/support/ab_tools/lib/libbrillo-stream.so b/src/support/ab_tools/lib/libbrillo-stream.so
new file mode 100644
index 0000000..e5d6682
--- /dev/null
+++ b/src/support/ab_tools/lib/libbrillo-stream.so
Binary files differ
diff --git a/src/support/ab_tools/lib/libbrillo.so b/src/support/ab_tools/lib/libbrillo.so
new file mode 100644
index 0000000..c65803c
--- /dev/null
+++ b/src/support/ab_tools/lib/libbrillo.so
Binary files differ
diff --git a/src/support/ab_tools/lib/libbz.so b/src/support/ab_tools/lib/libbz.so
new file mode 100644
index 0000000..1b94c37
--- /dev/null
+++ b/src/support/ab_tools/lib/libbz.so
Binary files differ
diff --git a/src/support/ab_tools/lib/libc++.so b/src/support/ab_tools/lib/libc++.so
new file mode 100644
index 0000000..120b588
--- /dev/null
+++ b/src/support/ab_tools/lib/libc++.so
Binary files differ
diff --git a/src/support/ab_tools/lib/libchrome.so b/src/support/ab_tools/lib/libchrome.so
new file mode 100644
index 0000000..93f2ff8
--- /dev/null
+++ b/src/support/ab_tools/lib/libchrome.so
Binary files differ
diff --git a/src/support/ab_tools/lib/libconscrypt_openjdk_jni.so b/src/support/ab_tools/lib/libconscrypt_openjdk_jni.so
new file mode 100644
index 0000000..db1f6e1
--- /dev/null
+++ b/src/support/ab_tools/lib/libconscrypt_openjdk_jni.so
Binary files differ
diff --git a/src/support/ab_tools/lib/libcrypto-host.so b/src/support/ab_tools/lib/libcrypto-host.so
new file mode 100644
index 0000000..a4b5b71
--- /dev/null
+++ b/src/support/ab_tools/lib/libcrypto-host.so
Binary files differ
diff --git a/src/support/ab_tools/lib/libcrypto_utils.so b/src/support/ab_tools/lib/libcrypto_utils.so
new file mode 100644
index 0000000..a439b29
--- /dev/null
+++ b/src/support/ab_tools/lib/libcrypto_utils.so
Binary files differ
diff --git a/src/support/ab_tools/lib/libevent-host.so b/src/support/ab_tools/lib/libevent-host.so
new file mode 100644
index 0000000..8c0928c
--- /dev/null
+++ b/src/support/ab_tools/lib/libevent-host.so
Binary files differ
diff --git a/src/support/ab_tools/lib/libext2fs-host.so b/src/support/ab_tools/lib/libext2fs-host.so
new file mode 100644
index 0000000..60b0340
--- /dev/null
+++ b/src/support/ab_tools/lib/libext2fs-host.so
Binary files differ
diff --git a/src/support/ab_tools/lib/liblog.so b/src/support/ab_tools/lib/liblog.so
new file mode 100644
index 0000000..ecda0bc
--- /dev/null
+++ b/src/support/ab_tools/lib/liblog.so
Binary files differ
diff --git a/src/support/ab_tools/lib/libprotobuf-cpp-lite.so b/src/support/ab_tools/lib/libprotobuf-cpp-lite.so
new file mode 100644
index 0000000..349da61
--- /dev/null
+++ b/src/support/ab_tools/lib/libprotobuf-cpp-lite.so
Binary files differ
diff --git a/src/support/ab_tools/lib/libsparse-host.so b/src/support/ab_tools/lib/libsparse-host.so
new file mode 100644
index 0000000..65e1f3b
--- /dev/null
+++ b/src/support/ab_tools/lib/libsparse-host.so
Binary files differ
diff --git a/src/support/ab_tools/lib/libssl-host.so b/src/support/ab_tools/lib/libssl-host.so
new file mode 100644
index 0000000..f7c55d5
--- /dev/null
+++ b/src/support/ab_tools/lib/libssl-host.so
Binary files differ
diff --git a/src/support/ab_tools/lib/libz-host.so b/src/support/ab_tools/lib/libz-host.so
new file mode 100644
index 0000000..e6a8970
--- /dev/null
+++ b/src/support/ab_tools/lib/libz-host.so
Binary files differ
diff --git a/src/support/ab_tools/scripts/add_care_map_to_target_files.py b/src/support/ab_tools/scripts/add_care_map_to_target_files.py
new file mode 100644
index 0000000..4617c12
--- /dev/null
+++ b/src/support/ab_tools/scripts/add_care_map_to_target_files.py
@@ -0,0 +1,223 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Given a target-files zipfile that does not contain images (ie, does
+not have an IMAGES/ top-level subdirectory), produce the images and
+add them to the zipfile.
+
+Usage:  add_img_to_target_files [flag] target_files
+
+  -a  (--add_missing)
+      Build and add missing images to "IMAGES/". If this option is
+      not specified, this script will simply exit when "IMAGES/"
+      directory exists in the target file.
+
+  -r  (--rebuild_recovery)
+      Rebuild the recovery patch and write it to the system image. Only
+      meaningful when system image needs to be rebuilt.
+
+  --replace_verity_private_key
+      Replace the private key used for verity signing. (same as the option
+      in sign_target_files_apks)
+
+  --replace_verity_public_key
+       Replace the certificate (public key) used for verity verification. (same
+       as the option in sign_target_files_apks)
+
+  --is_signing
+      Skip building & adding the images for "userdata" and "cache" if we
+      are signing the target files.
+"""
+
+from __future__ import print_function
+
+import datetime
+import os
+import shlex
+import shutil
+import subprocess
+import sys
+import uuid
+import zipfile
+
+import common
+import rangelib
+import sparse_img
+import re
+
+if sys.hexversion < 0x02070000:
+  print("Python 2.7 or newer is required.", file=sys.stderr)
+  sys.exit(1)
+
+OPTIONS = common.OPTIONS
+
+OPTIONS.add_missing = False
+OPTIONS.rebuild_recovery = False
+OPTIONS.replace_updated_files_list = []
+OPTIONS.replace_verity_public_key = False
+OPTIONS.replace_verity_private_key = False
+OPTIONS.is_signing = False
+
+# Partitions that should have their care_map added to META/care_map.txt.
+PARTITIONS_WITH_CARE_MAP = ('system', 'vendor', 'product')
+
+
+class OutputFile(object):
+  def __init__(self, output_zip, input_dir, prefix, name):
+    self._output_zip = output_zip
+    self.input_name = os.path.join(input_dir, prefix, name)
+
+    if self._output_zip:
+      self._zip_name = os.path.join(prefix, name)
+
+      root, suffix = os.path.splitext(name)
+      self.name = common.MakeTempFile(prefix=root + '-', suffix=suffix)
+    else:
+      self.name = self.input_name
+
+  def Write(self):
+    if self._output_zip:
+      common.ZipWrite(self._output_zip, self.name, self._zip_name)
+
+def GetCareMap(which, imgname):
+  """Returns the care_map string for the given partition.
+
+  Args:
+    which: The partition name, must be listed in PARTITIONS_WITH_CARE_MAP.
+    imgname: The filename of the image.
+
+  Returns:
+    (which, care_map_ranges): care_map_ranges is the raw string of the care_map
+    RangeSet.
+  """
+  assert which in PARTITIONS_WITH_CARE_MAP
+
+  simg = sparse_img.SparseImage(imgname)
+  care_map_ranges = simg.care_map
+  key = which + "_adjusted_partition_size"
+  avbtool = os.path.join(os.path.dirname(imgname), "../..",
+                                   "avbtool")
+  cmd = [avbtool, "info_image", "--image", imgname]
+  p = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+  image_info_ouput, _ = p.communicate()
+  print(image_info_ouput)
+  adjusted_blocks = int(re.search(r'Original image size:(.*)bytes',image_info_ouput).group(1))/4096 - 1
+  print(adjusted_blocks)
+  #adjusted_blocks = OPTIONS.info_dict.get(key)
+  if adjusted_blocks:
+    assert adjusted_blocks > 0, "blocks should be positive for " + which
+    care_map_ranges = care_map_ranges.intersect(rangelib.RangeSet(
+        "0-%d" % (adjusted_blocks,)))
+
+  print(care_map_ranges.to_string_raw())
+  return [which, care_map_ranges.to_string_raw()]
+
+
+def AddCareMapTxtForAbOta(output_zip, ab_partitions, image_paths):
+  """Generates and adds care_map.txt for system and vendor partitions.
+
+  Args:
+    output_zip: The output zip file (needs to be already open), or None to
+        write images to OPTIONS.input_tmp/.
+    ab_partitions: The list of A/B partitions.
+    image_paths: A map from the partition name to the image path.
+  """
+  care_map_list = []
+  for partition in ab_partitions:
+    partition = partition.strip()
+    if partition not in PARTITIONS_WITH_CARE_MAP:
+      continue
+
+    verity_block_device = "{}_verity_block_device".format(partition)
+    avb_hashtree_enable = "avb_{}_hashtree_enable".format(partition)
+    if (verity_block_device in OPTIONS.info_dict or
+        OPTIONS.info_dict.get(avb_hashtree_enable) == "true"):
+      image_path = image_paths[partition]
+      assert os.path.exists(image_path)
+      care_map_list += GetCareMap(partition, image_path)
+
+  if care_map_list:
+    care_map_path = "META/care_map.txt"
+    if output_zip and care_map_path not in output_zip.namelist():
+      common.ZipWriteStr(output_zip, care_map_path, '\n'.join(care_map_list))
+    else:
+      with open(os.path.join(OPTIONS.input_tmp, care_map_path), 'w') as fp:
+        fp.write('\n'.join(care_map_list))
+      if output_zip:
+        OPTIONS.replace_updated_files_list.append(care_map_path)
+
+def AddCareMapToTargetFiles(filename):
+  if os.path.isdir(filename):
+    OPTIONS.input_tmp = os.path.abspath(filename)
+
+  OPTIONS.info_dict = common.LoadInfoDict(OPTIONS.input_tmp, OPTIONS.input_tmp)
+  partitions = dict()
+  partitions['system'] = os.path.join(OPTIONS.input_tmp, "IMAGES",
+                                   "system.img")
+  output_zip = None
+  ab_partitions_txt = os.path.join(OPTIONS.input_tmp, "META",
+                                   "ab_partitions.txt")
+  if os.path.exists(ab_partitions_txt):
+    with open(ab_partitions_txt, 'r') as f:
+      ab_partitions = f.readlines()
+
+    # Generate care_map.txt for system and vendor partitions (if present), then
+    # write this file to target_files package.
+    AddCareMapTxtForAbOta(output_zip, ab_partitions, partitions)
+
+def main(argv):
+  def option_handler(o, a):
+    if o in ("-a", "--add_missing"):
+      OPTIONS.add_missing = True
+    elif o in ("-r", "--rebuild_recovery",):
+      OPTIONS.rebuild_recovery = True
+    elif o == "--replace_verity_private_key":
+      OPTIONS.replace_verity_private_key = (True, a)
+    elif o == "--replace_verity_public_key":
+      OPTIONS.replace_verity_public_key = (True, a)
+    elif o == "--is_signing":
+      OPTIONS.is_signing = True
+    else:
+      return False
+    return True
+
+  args = common.ParseOptions(
+      argv, __doc__, extra_opts="ar",
+      extra_long_opts=["add_missing", "rebuild_recovery",
+                       "replace_verity_public_key=",
+                       "replace_verity_private_key=",
+                       "is_signing"],
+      extra_option_handler=option_handler)
+
+
+  if len(args) < 1:
+    common.Usage(__doc__)
+    sys.exit(1)
+
+  AddCareMapToTargetFiles(args[0])
+
+  print("done.")
+
+if __name__ == '__main__':
+  try:
+    common.CloseInheritedPipes()
+    main(sys.argv[1:])
+  except common.ExternalError as e:
+    print("\n   ERROR: %s\n" % (e,))
+    sys.exit(1)
+  finally:
+    common.Cleanup()
diff --git a/src/support/ab_tools/scripts/blockimgdiff.py b/src/support/ab_tools/scripts/blockimgdiff.py
new file mode 100644
index 0000000..24c5b2d
--- /dev/null
+++ b/src/support/ab_tools/scripts/blockimgdiff.py
@@ -0,0 +1,1645 @@
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import print_function
+
+import array
+import copy
+import functools
+import heapq
+import itertools
+import multiprocessing
+import os
+import os.path
+import re
+import subprocess
+import sys
+import threading
+from collections import deque, OrderedDict
+from hashlib import sha1
+
+import common
+from rangelib import RangeSet
+
+
+__all__ = ["EmptyImage", "DataImage", "BlockImageDiff"]
+
+
+def compute_patch(srcfile, tgtfile, imgdiff=False):
+  patchfile = common.MakeTempFile(prefix='patch-')
+
+  cmd = ['imgdiff', '-z'] if imgdiff else ['bsdiff']
+  cmd.extend([srcfile, tgtfile, patchfile])
+
+  # Don't dump the bsdiff/imgdiff commands, which are not useful for the case
+  # here, since they contain temp filenames only.
+  p = common.Run(cmd, verbose=False, stdout=subprocess.PIPE,
+                 stderr=subprocess.STDOUT)
+  output, _ = p.communicate()
+
+  if p.returncode != 0:
+    raise ValueError(output)
+
+  with open(patchfile, 'rb') as f:
+    return f.read()
+
+
+class Image(object):
+  def RangeSha1(self, ranges):
+    raise NotImplementedError
+
+  def ReadRangeSet(self, ranges):
+    raise NotImplementedError
+
+  def TotalSha1(self, include_clobbered_blocks=False):
+    raise NotImplementedError
+
+  def WriteRangeDataToFd(self, ranges, fd):
+    raise NotImplementedError
+
+
+class EmptyImage(Image):
+  """A zero-length image."""
+
+  def __init__(self):
+    self.blocksize = 4096
+    self.care_map = RangeSet()
+    self.clobbered_blocks = RangeSet()
+    self.extended = RangeSet()
+    self.total_blocks = 0
+    self.file_map = {}
+
+  def RangeSha1(self, ranges):
+    return sha1().hexdigest()
+
+  def ReadRangeSet(self, ranges):
+    return ()
+
+  def TotalSha1(self, include_clobbered_blocks=False):
+    # EmptyImage always carries empty clobbered_blocks, so
+    # include_clobbered_blocks can be ignored.
+    assert self.clobbered_blocks.size() == 0
+    return sha1().hexdigest()
+
+  def WriteRangeDataToFd(self, ranges, fd):
+    raise ValueError("Can't write data from EmptyImage to file")
+
+
+class DataImage(Image):
+  """An image wrapped around a single string of data."""
+
+  def __init__(self, data, trim=False, pad=False):
+    self.data = data
+    self.blocksize = 4096
+
+    assert not (trim and pad)
+
+    partial = len(self.data) % self.blocksize
+    padded = False
+    if partial > 0:
+      if trim:
+        self.data = self.data[:-partial]
+      elif pad:
+        self.data += '\0' * (self.blocksize - partial)
+        padded = True
+      else:
+        raise ValueError(("data for DataImage must be multiple of %d bytes "
+                          "unless trim or pad is specified") %
+                         (self.blocksize,))
+
+    assert len(self.data) % self.blocksize == 0
+
+    self.total_blocks = len(self.data) / self.blocksize
+    self.care_map = RangeSet(data=(0, self.total_blocks))
+    # When the last block is padded, we always write the whole block even for
+    # incremental OTAs. Because otherwise the last block may get skipped if
+    # unchanged for an incremental, but would fail the post-install
+    # verification if it has non-zero contents in the padding bytes.
+    # Bug: 23828506
+    if padded:
+      clobbered_blocks = [self.total_blocks-1, self.total_blocks]
+    else:
+      clobbered_blocks = []
+    self.clobbered_blocks = clobbered_blocks
+    self.extended = RangeSet()
+
+    zero_blocks = []
+    nonzero_blocks = []
+    reference = '\0' * self.blocksize
+
+    for i in range(self.total_blocks-1 if padded else self.total_blocks):
+      d = self.data[i*self.blocksize : (i+1)*self.blocksize]
+      if d == reference:
+        zero_blocks.append(i)
+        zero_blocks.append(i+1)
+      else:
+        nonzero_blocks.append(i)
+        nonzero_blocks.append(i+1)
+
+    assert zero_blocks or nonzero_blocks or clobbered_blocks
+
+    self.file_map = dict()
+    if zero_blocks:
+      self.file_map["__ZERO"] = RangeSet(data=zero_blocks)
+    if nonzero_blocks:
+      self.file_map["__NONZERO"] = RangeSet(data=nonzero_blocks)
+    if clobbered_blocks:
+      self.file_map["__COPY"] = RangeSet(data=clobbered_blocks)
+
+  def _GetRangeData(self, ranges):
+    for s, e in ranges:
+      yield self.data[s*self.blocksize:e*self.blocksize]
+
+  def RangeSha1(self, ranges):
+    h = sha1()
+    for data in self._GetRangeData(ranges):
+      h.update(data)
+    return h.hexdigest()
+
+  def ReadRangeSet(self, ranges):
+    return [self._GetRangeData(ranges)]
+
+  def TotalSha1(self, include_clobbered_blocks=False):
+    if not include_clobbered_blocks:
+      return self.RangeSha1(self.care_map.subtract(self.clobbered_blocks))
+    else:
+      return sha1(self.data).hexdigest()
+
+  def WriteRangeDataToFd(self, ranges, fd):
+    for data in self._GetRangeData(ranges):
+      fd.write(data)
+
+
+class Transfer(object):
+  def __init__(self, tgt_name, src_name, tgt_ranges, src_ranges, tgt_sha1,
+               src_sha1, style, by_id):
+    self.tgt_name = tgt_name
+    self.src_name = src_name
+    self.tgt_ranges = tgt_ranges
+    self.src_ranges = src_ranges
+    self.tgt_sha1 = tgt_sha1
+    self.src_sha1 = src_sha1
+    self.style = style
+
+    # We use OrderedDict rather than dict so that the output is repeatable;
+    # otherwise it would depend on the hash values of the Transfer objects.
+    self.goes_before = OrderedDict()
+    self.goes_after = OrderedDict()
+
+    self.stash_before = []
+    self.use_stash = []
+
+    self.id = len(by_id)
+    by_id.append(self)
+
+    self._patch = None
+
+  @property
+  def patch(self):
+    return self._patch
+
+  @patch.setter
+  def patch(self, patch):
+    if patch:
+      assert self.style == "diff"
+    self._patch = patch
+
+  def NetStashChange(self):
+    return (sum(sr.size() for (_, sr) in self.stash_before) -
+            sum(sr.size() for (_, sr) in self.use_stash))
+
+  def ConvertToNew(self):
+    assert self.style != "new"
+    self.use_stash = []
+    self.style = "new"
+    self.src_ranges = RangeSet()
+    self.patch = None
+
+  def __str__(self):
+    return (str(self.id) + ": <" + str(self.src_ranges) + " " + self.style +
+            " to " + str(self.tgt_ranges) + ">")
+
+
+@functools.total_ordering
+class HeapItem(object):
+  def __init__(self, item):
+    self.item = item
+    # Negate the score since python's heap is a min-heap and we want the
+    # maximum score.
+    self.score = -item.score
+
+  def clear(self):
+    self.item = None
+
+  def __bool__(self):
+    return self.item is not None
+
+  # Python 2 uses __nonzero__, while Python 3 uses __bool__.
+  __nonzero__ = __bool__
+
+  # The rest operations are generated by functools.total_ordering decorator.
+  def __eq__(self, other):
+    return self.score == other.score
+
+  def __le__(self, other):
+    return self.score <= other.score
+
+
+class ImgdiffStats(object):
+  """A class that collects imgdiff stats.
+
+  It keeps track of the files that will be applied imgdiff while generating
+  BlockImageDiff. It also logs the ones that cannot use imgdiff, with specific
+  reasons. The stats is only meaningful when imgdiff not being disabled by the
+  caller of BlockImageDiff. In addition, only files with supported types
+  (BlockImageDiff.FileTypeSupportedByImgdiff()) are allowed to be logged.
+  """
+
+  USED_IMGDIFF = "APK files diff'd with imgdiff"
+  USED_IMGDIFF_LARGE_APK = "Large APK files split and diff'd with imgdiff"
+
+  # Reasons for not applying imgdiff on APKs.
+  SKIPPED_TRIMMED = "Not used imgdiff due to trimmed RangeSet"
+  SKIPPED_NONMONOTONIC = "Not used imgdiff due to having non-monotonic ranges"
+  SKIPPED_SHARED_BLOCKS = "Not used imgdiff due to using shared blocks"
+  SKIPPED_INCOMPLETE = "Not used imgdiff due to incomplete RangeSet"
+
+  # The list of valid reasons, which will also be the dumped order in a report.
+  REASONS = (
+      USED_IMGDIFF,
+      USED_IMGDIFF_LARGE_APK,
+      SKIPPED_TRIMMED,
+      SKIPPED_NONMONOTONIC,
+      SKIPPED_SHARED_BLOCKS,
+      SKIPPED_INCOMPLETE,
+  )
+
+  def  __init__(self):
+    self.stats = {}
+
+  def Log(self, filename, reason):
+    """Logs why imgdiff can or cannot be applied to the given filename.
+
+    Args:
+      filename: The filename string.
+      reason: One of the reason constants listed in REASONS.
+
+    Raises:
+      AssertionError: On unsupported filetypes or invalid reason.
+    """
+    assert BlockImageDiff.FileTypeSupportedByImgdiff(filename)
+    assert reason in self.REASONS
+
+    if reason not in self.stats:
+      self.stats[reason] = set()
+    self.stats[reason].add(filename)
+
+  def Report(self):
+    """Prints a report of the collected imgdiff stats."""
+
+    def print_header(header, separator):
+      print(header)
+      print(separator * len(header) + '\n')
+
+    print_header('  Imgdiff Stats Report  ', '=')
+    for key in self.REASONS:
+      if key not in self.stats:
+        continue
+      values = self.stats[key]
+      section_header = ' {} (count: {}) '.format(key, len(values))
+      print_header(section_header, '-')
+      print(''.join(['  {}\n'.format(name) for name in values]))
+
+
+# BlockImageDiff works on two image objects.  An image object is
+# anything that provides the following attributes:
+#
+#    blocksize: the size in bytes of a block, currently must be 4096.
+#
+#    total_blocks: the total size of the partition/image, in blocks.
+#
+#    care_map: a RangeSet containing which blocks (in the range [0,
+#      total_blocks) we actually care about; i.e. which blocks contain
+#      data.
+#
+#    file_map: a dict that partitions the blocks contained in care_map
+#      into smaller domains that are useful for doing diffs on.
+#      (Typically a domain is a file, and the key in file_map is the
+#      pathname.)
+#
+#    clobbered_blocks: a RangeSet containing which blocks contain data
+#      but may be altered by the FS. They need to be excluded when
+#      verifying the partition integrity.
+#
+#    ReadRangeSet(): a function that takes a RangeSet and returns the
+#      data contained in the image blocks of that RangeSet.  The data
+#      is returned as a list or tuple of strings; concatenating the
+#      elements together should produce the requested data.
+#      Implementations are free to break up the data into list/tuple
+#      elements in any way that is convenient.
+#
+#    RangeSha1(): a function that returns (as a hex string) the SHA-1
+#      hash of all the data in the specified range.
+#
+#    TotalSha1(): a function that returns (as a hex string) the SHA-1
+#      hash of all the data in the image (ie, all the blocks in the
+#      care_map minus clobbered_blocks, or including the clobbered
+#      blocks if include_clobbered_blocks is True).
+#
+# When creating a BlockImageDiff, the src image may be None, in which
+# case the list of transfers produced will never read from the
+# original image.
+
+class BlockImageDiff(object):
+  def __init__(self, tgt, src=None, threads=None, version=4,
+               disable_imgdiff=False):
+    if threads is None:
+      threads = multiprocessing.cpu_count() // 2
+      if threads == 0:
+        threads = 1
+    self.threads = threads
+    self.version = version
+    self.transfers = []
+    self.src_basenames = {}
+    self.src_numpatterns = {}
+    self._max_stashed_size = 0
+    self.touched_src_ranges = RangeSet()
+    self.touched_src_sha1 = None
+    self.disable_imgdiff = disable_imgdiff
+    self.imgdiff_stats = ImgdiffStats() if not disable_imgdiff else None
+
+    assert version in (3, 4)
+
+    self.tgt = tgt
+    if src is None:
+      src = EmptyImage()
+    self.src = src
+
+    # The updater code that installs the patch always uses 4k blocks.
+    assert tgt.blocksize == 4096
+    assert src.blocksize == 4096
+
+    # The range sets in each filemap should comprise a partition of
+    # the care map.
+    self.AssertPartition(src.care_map, src.file_map.values())
+    self.AssertPartition(tgt.care_map, tgt.file_map.values())
+
+  @property
+  def max_stashed_size(self):
+    return self._max_stashed_size
+
+  @staticmethod
+  def FileTypeSupportedByImgdiff(filename):
+    """Returns whether the file type is supported by imgdiff."""
+    return filename.lower().endswith(('.apk', '.jar', '.zip'))
+
+  def CanUseImgdiff(self, name, tgt_ranges, src_ranges, large_apk=False):
+    """Checks whether we can apply imgdiff for the given RangeSets.
+
+    For files in ZIP format (e.g., APKs, JARs, etc.) we would like to use
+    'imgdiff -z' if possible. Because it usually produces significantly smaller
+    patches than bsdiff.
+
+    This is permissible if all of the following conditions hold.
+      - The imgdiff hasn't been disabled by the caller (e.g. squashfs);
+      - The file type is supported by imgdiff;
+      - The source and target blocks are monotonic (i.e. the data is stored with
+        blocks in increasing order);
+      - Both files don't contain shared blocks;
+      - Both files have complete lists of blocks;
+      - We haven't removed any blocks from the source set.
+
+    If all these conditions are satisfied, concatenating all the blocks in the
+    RangeSet in order will produce a valid ZIP file (plus possibly extra zeros
+    in the last block). imgdiff is fine with extra zeros at the end of the file.
+
+    Args:
+      name: The filename to be diff'd.
+      tgt_ranges: The target RangeSet.
+      src_ranges: The source RangeSet.
+      large_apk: Whether this is to split a large APK.
+
+    Returns:
+      A boolean result.
+    """
+    if self.disable_imgdiff or not self.FileTypeSupportedByImgdiff(name):
+      return False
+
+    if not tgt_ranges.monotonic or not src_ranges.monotonic:
+      self.imgdiff_stats.Log(name, ImgdiffStats.SKIPPED_NONMONOTONIC)
+      return False
+
+    if (tgt_ranges.extra.get('uses_shared_blocks') or
+        src_ranges.extra.get('uses_shared_blocks')):
+      self.imgdiff_stats.Log(name, ImgdiffStats.SKIPPED_SHARED_BLOCKS)
+      return False
+
+    if tgt_ranges.extra.get('incomplete') or src_ranges.extra.get('incomplete'):
+      self.imgdiff_stats.Log(name, ImgdiffStats.SKIPPED_INCOMPLETE)
+      return False
+
+    if tgt_ranges.extra.get('trimmed') or src_ranges.extra.get('trimmed'):
+      self.imgdiff_stats.Log(name, ImgdiffStats.SKIPPED_TRIMMED)
+      return False
+
+    reason = (ImgdiffStats.USED_IMGDIFF_LARGE_APK if large_apk
+              else ImgdiffStats.USED_IMGDIFF)
+    self.imgdiff_stats.Log(name, reason)
+    return True
+
+  def Compute(self, prefix):
+    # When looking for a source file to use as the diff input for a
+    # target file, we try:
+    #   1) an exact path match if available, otherwise
+    #   2) a exact basename match if available, otherwise
+    #   3) a basename match after all runs of digits are replaced by
+    #      "#" if available, otherwise
+    #   4) we have no source for this target.
+    self.AbbreviateSourceNames()
+    self.FindTransfers()
+
+    # Find the ordering dependencies among transfers (this is O(n^2)
+    # in the number of transfers).
+    self.GenerateDigraph()
+    # Find a sequence of transfers that satisfies as many ordering
+    # dependencies as possible (heuristically).
+    self.FindVertexSequence()
+    # Fix up the ordering dependencies that the sequence didn't
+    # satisfy.
+    self.ReverseBackwardEdges()
+    self.ImproveVertexSequence()
+
+    # Ensure the runtime stash size is under the limit.
+    if common.OPTIONS.cache_size is not None:
+      self.ReviseStashSize()
+
+    # Double-check our work.
+    self.AssertSequenceGood()
+    self.AssertSha1Good()
+
+    self.ComputePatches(prefix)
+    self.WriteTransfers(prefix)
+
+    # Report the imgdiff stats.
+    if common.OPTIONS.verbose and not self.disable_imgdiff:
+      self.imgdiff_stats.Report()
+
+  def WriteTransfers(self, prefix):
+    def WriteSplitTransfers(out, style, target_blocks):
+      """Limit the size of operand in command 'new' and 'zero' to 1024 blocks.
+
+      This prevents the target size of one command from being too large; and
+      might help to avoid fsync errors on some devices."""
+
+      assert style == "new" or style == "zero"
+      blocks_limit = 1024
+      total = 0
+      while target_blocks:
+        blocks_to_write = target_blocks.first(blocks_limit)
+        out.append("%s %s\n" % (style, blocks_to_write.to_string_raw()))
+        total += blocks_to_write.size()
+        target_blocks = target_blocks.subtract(blocks_to_write)
+      return total
+
+    out = []
+    total = 0
+
+    # In BBOTA v3+, it uses the hash of the stashed blocks as the stash slot
+    # id. 'stashes' records the map from 'hash' to the ref count. The stash
+    # will be freed only if the count decrements to zero.
+    stashes = {}
+    stashed_blocks = 0
+    max_stashed_blocks = 0
+
+    for xf in self.transfers:
+
+      for _, sr in xf.stash_before:
+        sh = self.src.RangeSha1(sr)
+        if sh in stashes:
+          stashes[sh] += 1
+        else:
+          stashes[sh] = 1
+          stashed_blocks += sr.size()
+          self.touched_src_ranges = self.touched_src_ranges.union(sr)
+          out.append("stash %s %s\n" % (sh, sr.to_string_raw()))
+
+      if stashed_blocks > max_stashed_blocks:
+        max_stashed_blocks = stashed_blocks
+
+      free_string = []
+      free_size = 0
+
+      #   <# blocks> <src ranges>
+      #     OR
+      #   <# blocks> <src ranges> <src locs> <stash refs...>
+      #     OR
+      #   <# blocks> - <stash refs...>
+
+      size = xf.src_ranges.size()
+      src_str_buffer = [str(size)]
+
+      unstashed_src_ranges = xf.src_ranges
+      mapped_stashes = []
+      for _, sr in xf.use_stash:
+        unstashed_src_ranges = unstashed_src_ranges.subtract(sr)
+        sh = self.src.RangeSha1(sr)
+        sr = xf.src_ranges.map_within(sr)
+        mapped_stashes.append(sr)
+        assert sh in stashes
+        src_str_buffer.append("%s:%s" % (sh, sr.to_string_raw()))
+        stashes[sh] -= 1
+        if stashes[sh] == 0:
+          free_string.append("free %s\n" % (sh,))
+          free_size += sr.size()
+          stashes.pop(sh)
+
+      if unstashed_src_ranges:
+        src_str_buffer.insert(1, unstashed_src_ranges.to_string_raw())
+        if xf.use_stash:
+          mapped_unstashed = xf.src_ranges.map_within(unstashed_src_ranges)
+          src_str_buffer.insert(2, mapped_unstashed.to_string_raw())
+          mapped_stashes.append(mapped_unstashed)
+          self.AssertPartition(RangeSet(data=(0, size)), mapped_stashes)
+      else:
+        src_str_buffer.insert(1, "-")
+        self.AssertPartition(RangeSet(data=(0, size)), mapped_stashes)
+
+      src_str = " ".join(src_str_buffer)
+
+      # version 3+:
+      #   zero <rangeset>
+      #   new <rangeset>
+      #   erase <rangeset>
+      #   bsdiff patchstart patchlen srchash tgthash <tgt rangeset> <src_str>
+      #   imgdiff patchstart patchlen srchash tgthash <tgt rangeset> <src_str>
+      #   move hash <tgt rangeset> <src_str>
+
+      tgt_size = xf.tgt_ranges.size()
+
+      if xf.style == "new":
+        assert xf.tgt_ranges
+        assert tgt_size == WriteSplitTransfers(out, xf.style, xf.tgt_ranges)
+        total += tgt_size
+      elif xf.style == "move":
+        assert xf.tgt_ranges
+        assert xf.src_ranges.size() == tgt_size
+        if xf.src_ranges != xf.tgt_ranges:
+          # take into account automatic stashing of overlapping blocks
+          if xf.src_ranges.overlaps(xf.tgt_ranges):
+            temp_stash_usage = stashed_blocks + xf.src_ranges.size()
+            if temp_stash_usage > max_stashed_blocks:
+              max_stashed_blocks = temp_stash_usage
+
+          self.touched_src_ranges = self.touched_src_ranges.union(
+              xf.src_ranges)
+
+          out.append("%s %s %s %s\n" % (
+              xf.style,
+              xf.tgt_sha1,
+              xf.tgt_ranges.to_string_raw(), src_str))
+          total += tgt_size
+      elif xf.style in ("bsdiff", "imgdiff"):
+        assert xf.tgt_ranges
+        assert xf.src_ranges
+        # take into account automatic stashing of overlapping blocks
+        if xf.src_ranges.overlaps(xf.tgt_ranges):
+          temp_stash_usage = stashed_blocks + xf.src_ranges.size()
+          if temp_stash_usage > max_stashed_blocks:
+            max_stashed_blocks = temp_stash_usage
+
+        self.touched_src_ranges = self.touched_src_ranges.union(xf.src_ranges)
+
+        out.append("%s %d %d %s %s %s %s\n" % (
+            xf.style,
+            xf.patch_start, xf.patch_len,
+            xf.src_sha1,
+            xf.tgt_sha1,
+            xf.tgt_ranges.to_string_raw(), src_str))
+        total += tgt_size
+      elif xf.style == "zero":
+        assert xf.tgt_ranges
+        to_zero = xf.tgt_ranges.subtract(xf.src_ranges)
+        assert WriteSplitTransfers(out, xf.style, to_zero) == to_zero.size()
+        total += to_zero.size()
+      else:
+        raise ValueError("unknown transfer style '%s'\n" % xf.style)
+
+      if free_string:
+        out.append("".join(free_string))
+        stashed_blocks -= free_size
+
+      if common.OPTIONS.cache_size is not None:
+        # Sanity check: abort if we're going to need more stash space than
+        # the allowed size (cache_size * threshold). There are two purposes
+        # of having a threshold here. a) Part of the cache may have been
+        # occupied by some recovery logs. b) It will buy us some time to deal
+        # with the oversize issue.
+        cache_size = common.OPTIONS.cache_size
+        stash_threshold = common.OPTIONS.stash_threshold
+        max_allowed = cache_size * stash_threshold
+        assert max_stashed_blocks * self.tgt.blocksize <= max_allowed, \
+               'Stash size %d (%d * %d) exceeds the limit %d (%d * %.2f)' % (
+                   max_stashed_blocks * self.tgt.blocksize, max_stashed_blocks,
+                   self.tgt.blocksize, max_allowed, cache_size,
+                   stash_threshold)
+
+    self.touched_src_sha1 = self.src.RangeSha1(self.touched_src_ranges)
+
+    # Zero out extended blocks as a workaround for bug 20881595.
+    if self.tgt.extended:
+      assert (WriteSplitTransfers(out, "zero", self.tgt.extended) ==
+              self.tgt.extended.size())
+      total += self.tgt.extended.size()
+
+    # We erase all the blocks on the partition that a) don't contain useful
+    # data in the new image; b) will not be touched by dm-verity. Out of those
+    # blocks, we erase the ones that won't be used in this update at the
+    # beginning of an update. The rest would be erased at the end. This is to
+    # work around the eMMC issue observed on some devices, which may otherwise
+    # get starving for clean blocks and thus fail the update. (b/28347095)
+    all_tgt = RangeSet(data=(0, self.tgt.total_blocks))
+    all_tgt_minus_extended = all_tgt.subtract(self.tgt.extended)
+    new_dontcare = all_tgt_minus_extended.subtract(self.tgt.care_map)
+
+    erase_first = new_dontcare.subtract(self.touched_src_ranges)
+    if erase_first:
+      out.insert(0, "erase %s\n" % (erase_first.to_string_raw(),))
+
+    erase_last = new_dontcare.subtract(erase_first)
+    if erase_last:
+      out.append("erase %s\n" % (erase_last.to_string_raw(),))
+
+    out.insert(0, "%d\n" % (self.version,))   # format version number
+    out.insert(1, "%d\n" % (total,))
+    # v3+: the number of stash slots is unused.
+    out.insert(2, "0\n")
+    out.insert(3, str(max_stashed_blocks) + "\n")
+
+    with open(prefix + ".transfer.list", "wb") as f:
+      for i in out:
+        f.write(i)
+
+    self._max_stashed_size = max_stashed_blocks * self.tgt.blocksize
+    OPTIONS = common.OPTIONS
+    if OPTIONS.cache_size is not None:
+      max_allowed = OPTIONS.cache_size * OPTIONS.stash_threshold
+      print("max stashed blocks: %d  (%d bytes), "
+            "limit: %d bytes (%.2f%%)\n" % (
+                max_stashed_blocks, self._max_stashed_size, max_allowed,
+                self._max_stashed_size * 100.0 / max_allowed))
+    else:
+      print("max stashed blocks: %d  (%d bytes), limit: <unknown>\n" % (
+          max_stashed_blocks, self._max_stashed_size))
+
+  def ReviseStashSize(self):
+    print("Revising stash size...")
+    stash_map = {}
+
+    # Create the map between a stash and its def/use points. For example, for a
+    # given stash of (raw_id, sr), stash_map[raw_id] = (sr, def_cmd, use_cmd).
+    for xf in self.transfers:
+      # Command xf defines (stores) all the stashes in stash_before.
+      for stash_raw_id, sr in xf.stash_before:
+        stash_map[stash_raw_id] = (sr, xf)
+
+      # Record all the stashes command xf uses.
+      for stash_raw_id, _ in xf.use_stash:
+        stash_map[stash_raw_id] += (xf,)
+
+    # Compute the maximum blocks available for stash based on /cache size and
+    # the threshold.
+    cache_size = common.OPTIONS.cache_size
+    stash_threshold = common.OPTIONS.stash_threshold
+    max_allowed = cache_size * stash_threshold / self.tgt.blocksize
+
+    # See the comments for 'stashes' in WriteTransfers().
+    stashes = {}
+    stashed_blocks = 0
+    new_blocks = 0
+
+    # Now go through all the commands. Compute the required stash size on the
+    # fly. If a command requires excess stash than available, it deletes the
+    # stash by replacing the command that uses the stash with a "new" command
+    # instead.
+    for xf in self.transfers:
+      replaced_cmds = []
+
+      # xf.stash_before generates explicit stash commands.
+      for stash_raw_id, sr in xf.stash_before:
+        # Check the post-command stashed_blocks.
+        stashed_blocks_after = stashed_blocks
+        sh = self.src.RangeSha1(sr)
+        if sh not in stashes:
+          stashed_blocks_after += sr.size()
+
+        if stashed_blocks_after > max_allowed:
+          # We cannot stash this one for a later command. Find out the command
+          # that will use this stash and replace the command with "new".
+          use_cmd = stash_map[stash_raw_id][2]
+          replaced_cmds.append(use_cmd)
+          print("%10d  %9s  %s" % (sr.size(), "explicit", use_cmd))
+        else:
+          # Update the stashes map.
+          if sh in stashes:
+            stashes[sh] += 1
+          else:
+            stashes[sh] = 1
+          stashed_blocks = stashed_blocks_after
+
+      # "move" and "diff" may introduce implicit stashes in BBOTA v3. Prior to
+      # ComputePatches(), they both have the style of "diff".
+      if xf.style == "diff":
+        assert xf.tgt_ranges and xf.src_ranges
+        if xf.src_ranges.overlaps(xf.tgt_ranges):
+          if stashed_blocks + xf.src_ranges.size() > max_allowed:
+            replaced_cmds.append(xf)
+            print("%10d  %9s  %s" % (xf.src_ranges.size(), "implicit", xf))
+
+      # Replace the commands in replaced_cmds with "new"s.
+      for cmd in replaced_cmds:
+        # It no longer uses any commands in "use_stash". Remove the def points
+        # for all those stashes.
+        for stash_raw_id, sr in cmd.use_stash:
+          def_cmd = stash_map[stash_raw_id][1]
+          assert (stash_raw_id, sr) in def_cmd.stash_before
+          def_cmd.stash_before.remove((stash_raw_id, sr))
+
+        # Add up blocks that violates space limit and print total number to
+        # screen later.
+        new_blocks += cmd.tgt_ranges.size()
+        cmd.ConvertToNew()
+
+      # xf.use_stash may generate free commands.
+      for _, sr in xf.use_stash:
+        sh = self.src.RangeSha1(sr)
+        assert sh in stashes
+        stashes[sh] -= 1
+        if stashes[sh] == 0:
+          stashed_blocks -= sr.size()
+          stashes.pop(sh)
+
+    num_of_bytes = new_blocks * self.tgt.blocksize
+    print("  Total %d blocks (%d bytes) are packed as new blocks due to "
+          "insufficient cache size." % (new_blocks, num_of_bytes))
+    return new_blocks
+
+  def ComputePatches(self, prefix):
+    print("Reticulating splines...")
+    diff_queue = []
+    patch_num = 0
+    with open(prefix + ".new.dat", "wb") as new_f:
+      for index, xf in enumerate(self.transfers):
+        if xf.style == "zero":
+          tgt_size = xf.tgt_ranges.size() * self.tgt.blocksize
+          print("%10d %10d (%6.2f%%) %7s %s %s" % (
+              tgt_size, tgt_size, 100.0, xf.style, xf.tgt_name,
+              str(xf.tgt_ranges)))
+
+        elif xf.style == "new":
+          self.tgt.WriteRangeDataToFd(xf.tgt_ranges, new_f)
+          tgt_size = xf.tgt_ranges.size() * self.tgt.blocksize
+          print("%10d %10d (%6.2f%%) %7s %s %s" % (
+              tgt_size, tgt_size, 100.0, xf.style,
+              xf.tgt_name, str(xf.tgt_ranges)))
+
+        elif xf.style == "diff":
+          # We can't compare src and tgt directly because they may have
+          # the same content but be broken up into blocks differently, eg:
+          #
+          #    ["he", "llo"]  vs  ["h", "ello"]
+          #
+          # We want those to compare equal, ideally without having to
+          # actually concatenate the strings (these may be tens of
+          # megabytes).
+          if xf.src_sha1 == xf.tgt_sha1:
+            # These are identical; we don't need to generate a patch,
+            # just issue copy commands on the device.
+            xf.style = "move"
+            xf.patch = None
+            tgt_size = xf.tgt_ranges.size() * self.tgt.blocksize
+            if xf.src_ranges != xf.tgt_ranges:
+              print("%10d %10d (%6.2f%%) %7s %s %s (from %s)" % (
+                  tgt_size, tgt_size, 100.0, xf.style,
+                  xf.tgt_name if xf.tgt_name == xf.src_name else (
+                      xf.tgt_name + " (from " + xf.src_name + ")"),
+                  str(xf.tgt_ranges), str(xf.src_ranges)))
+          else:
+            if xf.patch:
+              # We have already generated the patch with imgdiff. Check if the
+              # transfer is intact.
+              assert not self.disable_imgdiff
+              imgdiff = True
+              if (xf.src_ranges.extra.get('trimmed') or
+                  xf.tgt_ranges.extra.get('trimmed')):
+                imgdiff = False
+                xf.patch = None
+            else:
+              imgdiff = self.CanUseImgdiff(
+                  xf.tgt_name, xf.tgt_ranges, xf.src_ranges)
+            xf.style = "imgdiff" if imgdiff else "bsdiff"
+            diff_queue.append((index, imgdiff, patch_num))
+            patch_num += 1
+
+        else:
+          assert False, "unknown style " + xf.style
+
+    if diff_queue:
+      if self.threads > 1:
+        print("Computing patches (using %d threads)..." % (self.threads,))
+      else:
+        print("Computing patches...")
+
+      diff_total = len(diff_queue)
+      patches = [None] * diff_total
+      error_messages = []
+
+      # Using multiprocessing doesn't give additional benefits, due to the
+      # pattern of the code. The diffing work is done by subprocess.call, which
+      # already runs in a separate process (not affected much by the GIL -
+      # Global Interpreter Lock). Using multiprocess also requires either a)
+      # writing the diff input files in the main process before forking, or b)
+      # reopening the image file (SparseImage) in the worker processes. Doing
+      # neither of them further improves the performance.
+      lock = threading.Lock()
+      def diff_worker():
+        while True:
+          with lock:
+            if not diff_queue:
+              return
+            xf_index, imgdiff, patch_index = diff_queue.pop()
+            xf = self.transfers[xf_index]
+
+            if sys.stdout.isatty():
+              diff_left = len(diff_queue)
+              progress = (diff_total - diff_left) * 100 / diff_total
+              # '\033[K' is to clear to EOL.
+              print(' [%3d%%] %s\033[K' % (progress, xf.tgt_name), end='\r')
+              sys.stdout.flush()
+
+          patch = xf.patch
+          if not patch:
+            src_ranges = xf.src_ranges
+            tgt_ranges = xf.tgt_ranges
+
+            src_file = common.MakeTempFile(prefix="src-")
+            with open(src_file, "wb") as fd:
+              self.src.WriteRangeDataToFd(src_ranges, fd)
+
+            tgt_file = common.MakeTempFile(prefix="tgt-")
+            with open(tgt_file, "wb") as fd:
+              self.tgt.WriteRangeDataToFd(tgt_ranges, fd)
+
+            message = []
+            try:
+              patch = compute_patch(src_file, tgt_file, imgdiff)
+            except ValueError as e:
+              message.append(
+                  "Failed to generate %s for %s: tgt=%s, src=%s:\n%s" % (
+                      "imgdiff" if imgdiff else "bsdiff",
+                      xf.tgt_name if xf.tgt_name == xf.src_name else
+                      xf.tgt_name + " (from " + xf.src_name + ")",
+                      xf.tgt_ranges, xf.src_ranges, e.message))
+            if message:
+              with lock:
+                error_messages.extend(message)
+
+          with lock:
+            patches[patch_index] = (xf_index, patch)
+
+      threads = [threading.Thread(target=diff_worker)
+                 for _ in range(self.threads)]
+      for th in threads:
+        th.start()
+      while threads:
+        threads.pop().join()
+
+      if sys.stdout.isatty():
+        print('\n')
+
+      if error_messages:
+        print('ERROR:')
+        print('\n'.join(error_messages))
+        print('\n\n\n')
+        sys.exit(1)
+    else:
+      patches = []
+
+    offset = 0
+    with open(prefix + ".patch.dat", "wb") as patch_fd:
+      for index, patch in patches:
+        xf = self.transfers[index]
+        xf.patch_len = len(patch)
+        xf.patch_start = offset
+        offset += xf.patch_len
+        patch_fd.write(patch)
+
+        if common.OPTIONS.verbose:
+          tgt_size = xf.tgt_ranges.size() * self.tgt.blocksize
+          print("%10d %10d (%6.2f%%) %7s %s %s %s" % (
+              xf.patch_len, tgt_size, xf.patch_len * 100.0 / tgt_size,
+              xf.style,
+              xf.tgt_name if xf.tgt_name == xf.src_name else (
+                  xf.tgt_name + " (from " + xf.src_name + ")"),
+              xf.tgt_ranges, xf.src_ranges))
+
+  def AssertSha1Good(self):
+    """Check the SHA-1 of the src & tgt blocks in the transfer list.
+
+    Double check the SHA-1 value to avoid the issue in b/71908713, where
+    SparseImage.RangeSha1() messed up with the hash calculation in multi-thread
+    environment. That specific problem has been fixed by protecting the
+    underlying generator function 'SparseImage._GetRangeData()' with lock.
+    """
+    for xf in self.transfers:
+      tgt_sha1 = self.tgt.RangeSha1(xf.tgt_ranges)
+      assert xf.tgt_sha1 == tgt_sha1
+      if xf.style == "diff":
+        src_sha1 = self.src.RangeSha1(xf.src_ranges)
+        assert xf.src_sha1 == src_sha1
+
+  def AssertSequenceGood(self):
+    # Simulate the sequences of transfers we will output, and check that:
+    # - we never read a block after writing it, and
+    # - we write every block we care about exactly once.
+
+    # Start with no blocks having been touched yet.
+    touched = array.array("B", "\0" * self.tgt.total_blocks)
+
+    # Imagine processing the transfers in order.
+    for xf in self.transfers:
+      # Check that the input blocks for this transfer haven't yet been touched.
+
+      x = xf.src_ranges
+      for _, sr in xf.use_stash:
+        x = x.subtract(sr)
+
+      for s, e in x:
+        # Source image could be larger. Don't check the blocks that are in the
+        # source image only. Since they are not in 'touched', and won't ever
+        # be touched.
+        for i in range(s, min(e, self.tgt.total_blocks)):
+          assert touched[i] == 0
+
+      # Check that the output blocks for this transfer haven't yet
+      # been touched, and touch all the blocks written by this
+      # transfer.
+      for s, e in xf.tgt_ranges:
+        for i in range(s, e):
+          assert touched[i] == 0
+          touched[i] = 1
+
+    # Check that we've written every target block.
+    for s, e in self.tgt.care_map:
+      for i in range(s, e):
+        assert touched[i] == 1
+
+  def ImproveVertexSequence(self):
+    print("Improving vertex order...")
+
+    # At this point our digraph is acyclic; we reversed any edges that
+    # were backwards in the heuristically-generated sequence.  The
+    # previously-generated order is still acceptable, but we hope to
+    # find a better order that needs less memory for stashed data.
+    # Now we do a topological sort to generate a new vertex order,
+    # using a greedy algorithm to choose which vertex goes next
+    # whenever we have a choice.
+
+    # Make a copy of the edge set; this copy will get destroyed by the
+    # algorithm.
+    for xf in self.transfers:
+      xf.incoming = xf.goes_after.copy()
+      xf.outgoing = xf.goes_before.copy()
+
+    L = []   # the new vertex order
+
+    # S is the set of sources in the remaining graph; we always choose
+    # the one that leaves the least amount of stashed data after it's
+    # executed.
+    S = [(u.NetStashChange(), u.order, u) for u in self.transfers
+         if not u.incoming]
+    heapq.heapify(S)
+
+    while S:
+      _, _, xf = heapq.heappop(S)
+      L.append(xf)
+      for u in xf.outgoing:
+        del u.incoming[xf]
+        if not u.incoming:
+          heapq.heappush(S, (u.NetStashChange(), u.order, u))
+
+    # if this fails then our graph had a cycle.
+    assert len(L) == len(self.transfers)
+
+    self.transfers = L
+    for i, xf in enumerate(L):
+      xf.order = i
+
+  def RemoveBackwardEdges(self):
+    print("Removing backward edges...")
+    in_order = 0
+    out_of_order = 0
+    lost_source = 0
+
+    for xf in self.transfers:
+      lost = 0
+      size = xf.src_ranges.size()
+      for u in xf.goes_before:
+        # xf should go before u
+        if xf.order < u.order:
+          # it does, hurray!
+          in_order += 1
+        else:
+          # it doesn't, boo.  trim the blocks that u writes from xf's
+          # source, so that xf can go after u.
+          out_of_order += 1
+          assert xf.src_ranges.overlaps(u.tgt_ranges)
+          xf.src_ranges = xf.src_ranges.subtract(u.tgt_ranges)
+          xf.src_ranges.extra['trimmed'] = True
+
+      if xf.style == "diff" and not xf.src_ranges:
+        # nothing left to diff from; treat as new data
+        xf.style = "new"
+
+      lost = size - xf.src_ranges.size()
+      lost_source += lost
+
+    print(("  %d/%d dependencies (%.2f%%) were violated; "
+           "%d source blocks removed.") %
+          (out_of_order, in_order + out_of_order,
+           (out_of_order * 100.0 / (in_order + out_of_order))
+           if (in_order + out_of_order) else 0.0,
+           lost_source))
+
+  def ReverseBackwardEdges(self):
+    """Reverse unsatisfying edges and compute pairs of stashed blocks.
+
+    For each transfer, make sure it properly stashes the blocks it touches and
+    will be used by later transfers. It uses pairs of (stash_raw_id, range) to
+    record the blocks to be stashed. 'stash_raw_id' is an id that uniquely
+    identifies each pair. Note that for the same range (e.g. RangeSet("1-5")),
+    it is possible to have multiple pairs with different 'stash_raw_id's. Each
+    'stash_raw_id' will be consumed by one transfer. In BBOTA v3+, identical
+    blocks will be written to the same stash slot in WriteTransfers().
+    """
+
+    print("Reversing backward edges...")
+    in_order = 0
+    out_of_order = 0
+    stash_raw_id = 0
+    stash_size = 0
+
+    for xf in self.transfers:
+      for u in xf.goes_before.copy():
+        # xf should go before u
+        if xf.order < u.order:
+          # it does, hurray!
+          in_order += 1
+        else:
+          # it doesn't, boo.  modify u to stash the blocks that it
+          # writes that xf wants to read, and then require u to go
+          # before xf.
+          out_of_order += 1
+
+          overlap = xf.src_ranges.intersect(u.tgt_ranges)
+          assert overlap
+
+          u.stash_before.append((stash_raw_id, overlap))
+          xf.use_stash.append((stash_raw_id, overlap))
+          stash_raw_id += 1
+          stash_size += overlap.size()
+
+          # reverse the edge direction; now xf must go after u
+          del xf.goes_before[u]
+          del u.goes_after[xf]
+          xf.goes_after[u] = None    # value doesn't matter
+          u.goes_before[xf] = None
+
+    print(("  %d/%d dependencies (%.2f%%) were violated; "
+           "%d source blocks stashed.") %
+          (out_of_order, in_order + out_of_order,
+           (out_of_order * 100.0 / (in_order + out_of_order))
+           if (in_order + out_of_order) else 0.0,
+           stash_size))
+
+  def FindVertexSequence(self):
+    print("Finding vertex sequence...")
+
+    # This is based on "A Fast & Effective Heuristic for the Feedback
+    # Arc Set Problem" by P. Eades, X. Lin, and W.F. Smyth.  Think of
+    # it as starting with the digraph G and moving all the vertices to
+    # be on a horizontal line in some order, trying to minimize the
+    # number of edges that end up pointing to the left.  Left-pointing
+    # edges will get removed to turn the digraph into a DAG.  In this
+    # case each edge has a weight which is the number of source blocks
+    # we'll lose if that edge is removed; we try to minimize the total
+    # weight rather than just the number of edges.
+
+    # Make a copy of the edge set; this copy will get destroyed by the
+    # algorithm.
+    for xf in self.transfers:
+      xf.incoming = xf.goes_after.copy()
+      xf.outgoing = xf.goes_before.copy()
+      xf.score = sum(xf.outgoing.values()) - sum(xf.incoming.values())
+
+    # We use an OrderedDict instead of just a set so that the output
+    # is repeatable; otherwise it would depend on the hash values of
+    # the transfer objects.
+    G = OrderedDict()
+    for xf in self.transfers:
+      G[xf] = None
+    s1 = deque()  # the left side of the sequence, built from left to right
+    s2 = deque()  # the right side of the sequence, built from right to left
+
+    heap = []
+    for xf in self.transfers:
+      xf.heap_item = HeapItem(xf)
+      heap.append(xf.heap_item)
+    heapq.heapify(heap)
+
+    # Use OrderedDict() instead of set() to preserve the insertion order. Need
+    # to use 'sinks[key] = None' to add key into the set. sinks will look like
+    # { key1: None, key2: None, ... }.
+    sinks = OrderedDict.fromkeys(u for u in G if not u.outgoing)
+    sources = OrderedDict.fromkeys(u for u in G if not u.incoming)
+
+    def adjust_score(iu, delta):
+      iu.score += delta
+      iu.heap_item.clear()
+      iu.heap_item = HeapItem(iu)
+      heapq.heappush(heap, iu.heap_item)
+
+    while G:
+      # Put all sinks at the end of the sequence.
+      while sinks:
+        new_sinks = OrderedDict()
+        for u in sinks:
+          if u not in G:
+            continue
+          s2.appendleft(u)
+          del G[u]
+          for iu in u.incoming:
+            adjust_score(iu, -iu.outgoing.pop(u))
+            if not iu.outgoing:
+              new_sinks[iu] = None
+        sinks = new_sinks
+
+      # Put all the sources at the beginning of the sequence.
+      while sources:
+        new_sources = OrderedDict()
+        for u in sources:
+          if u not in G:
+            continue
+          s1.append(u)
+          del G[u]
+          for iu in u.outgoing:
+            adjust_score(iu, +iu.incoming.pop(u))
+            if not iu.incoming:
+              new_sources[iu] = None
+        sources = new_sources
+
+      if not G:
+        break
+
+      # Find the "best" vertex to put next.  "Best" is the one that
+      # maximizes the net difference in source blocks saved we get by
+      # pretending it's a source rather than a sink.
+
+      while True:
+        u = heapq.heappop(heap)
+        if u and u.item in G:
+          u = u.item
+          break
+
+      s1.append(u)
+      del G[u]
+      for iu in u.outgoing:
+        adjust_score(iu, +iu.incoming.pop(u))
+        if not iu.incoming:
+          sources[iu] = None
+
+      for iu in u.incoming:
+        adjust_score(iu, -iu.outgoing.pop(u))
+        if not iu.outgoing:
+          sinks[iu] = None
+
+    # Now record the sequence in the 'order' field of each transfer,
+    # and by rearranging self.transfers to be in the chosen sequence.
+
+    new_transfers = []
+    for x in itertools.chain(s1, s2):
+      x.order = len(new_transfers)
+      new_transfers.append(x)
+      del x.incoming
+      del x.outgoing
+
+    self.transfers = new_transfers
+
+  def GenerateDigraph(self):
+    print("Generating digraph...")
+
+    # Each item of source_ranges will be:
+    #   - None, if that block is not used as a source,
+    #   - an ordered set of transfers.
+    source_ranges = []
+    for b in self.transfers:
+      for s, e in b.src_ranges:
+        if e > len(source_ranges):
+          source_ranges.extend([None] * (e-len(source_ranges)))
+        for i in range(s, e):
+          if source_ranges[i] is None:
+            source_ranges[i] = OrderedDict.fromkeys([b])
+          else:
+            source_ranges[i][b] = None
+
+    for a in self.transfers:
+      intersections = OrderedDict()
+      for s, e in a.tgt_ranges:
+        for i in range(s, e):
+          if i >= len(source_ranges):
+            break
+          # Add all the Transfers in source_ranges[i] to the (ordered) set.
+          if source_ranges[i] is not None:
+            for j in source_ranges[i]:
+              intersections[j] = None
+
+      for b in intersections:
+        if a is b:
+          continue
+
+        # If the blocks written by A are read by B, then B needs to go before A.
+        i = a.tgt_ranges.intersect(b.src_ranges)
+        if i:
+          if b.src_name == "__ZERO":
+            # the cost of removing source blocks for the __ZERO domain
+            # is (nearly) zero.
+            size = 0
+          else:
+            size = i.size()
+          b.goes_before[a] = size
+          a.goes_after[b] = size
+
+  def FindTransfers(self):
+    """Parse the file_map to generate all the transfers."""
+
+    def AddSplitTransfersWithFixedSizeChunks(tgt_name, src_name, tgt_ranges,
+                                             src_ranges, style, by_id):
+      """Add one or multiple Transfer()s by splitting large files.
+
+      For BBOTA v3, we need to stash source blocks for resumable feature.
+      However, with the growth of file size and the shrink of the cache
+      partition source blocks are too large to be stashed. If a file occupies
+      too many blocks, we split it into smaller pieces by getting multiple
+      Transfer()s.
+
+      The downside is that after splitting, we may increase the package size
+      since the split pieces don't align well. According to our experiments,
+      1/8 of the cache size as the per-piece limit appears to be optimal.
+      Compared to the fixed 1024-block limit, it reduces the overall package
+      size by 30% for volantis, and 20% for angler and bullhead."""
+
+      pieces = 0
+      while (tgt_ranges.size() > max_blocks_per_transfer and
+             src_ranges.size() > max_blocks_per_transfer):
+        tgt_split_name = "%s-%d" % (tgt_name, pieces)
+        src_split_name = "%s-%d" % (src_name, pieces)
+        tgt_first = tgt_ranges.first(max_blocks_per_transfer)
+        src_first = src_ranges.first(max_blocks_per_transfer)
+
+        Transfer(tgt_split_name, src_split_name, tgt_first, src_first,
+                 self.tgt.RangeSha1(tgt_first), self.src.RangeSha1(src_first),
+                 style, by_id)
+
+        tgt_ranges = tgt_ranges.subtract(tgt_first)
+        src_ranges = src_ranges.subtract(src_first)
+        pieces += 1
+
+      # Handle remaining blocks.
+      if tgt_ranges.size() or src_ranges.size():
+        # Must be both non-empty.
+        assert tgt_ranges.size() and src_ranges.size()
+        tgt_split_name = "%s-%d" % (tgt_name, pieces)
+        src_split_name = "%s-%d" % (src_name, pieces)
+        Transfer(tgt_split_name, src_split_name, tgt_ranges, src_ranges,
+                 self.tgt.RangeSha1(tgt_ranges), self.src.RangeSha1(src_ranges),
+                 style, by_id)
+
+    def AddSplitTransfers(tgt_name, src_name, tgt_ranges, src_ranges, style,
+                          by_id):
+      """Find all the zip files and split the others with a fixed chunk size.
+
+      This function will construct a list of zip archives, which will later be
+      split by imgdiff to reduce the final patch size. For the other files,
+      we will plainly split them based on a fixed chunk size with the potential
+      patch size penalty.
+      """
+
+      assert style == "diff"
+
+      # Change nothing for small files.
+      if (tgt_ranges.size() <= max_blocks_per_transfer and
+          src_ranges.size() <= max_blocks_per_transfer):
+        Transfer(tgt_name, src_name, tgt_ranges, src_ranges,
+                 self.tgt.RangeSha1(tgt_ranges), self.src.RangeSha1(src_ranges),
+                 style, by_id)
+        return
+
+      # Split large APKs with imgdiff, if possible. We're intentionally checking
+      # file types one more time (CanUseImgdiff() checks that as well), before
+      # calling the costly RangeSha1()s.
+      if (self.FileTypeSupportedByImgdiff(tgt_name) and
+          self.tgt.RangeSha1(tgt_ranges) != self.src.RangeSha1(src_ranges)):
+        if self.CanUseImgdiff(tgt_name, tgt_ranges, src_ranges, True):
+          large_apks.append((tgt_name, src_name, tgt_ranges, src_ranges))
+          return
+
+      AddSplitTransfersWithFixedSizeChunks(tgt_name, src_name, tgt_ranges,
+                                           src_ranges, style, by_id)
+
+    def AddTransfer(tgt_name, src_name, tgt_ranges, src_ranges, style, by_id,
+                    split=False):
+      """Wrapper function for adding a Transfer()."""
+
+      # We specialize diff transfers only (which covers bsdiff/imgdiff/move);
+      # otherwise add the Transfer() as is.
+      if style != "diff" or not split:
+        Transfer(tgt_name, src_name, tgt_ranges, src_ranges,
+                 self.tgt.RangeSha1(tgt_ranges), self.src.RangeSha1(src_ranges),
+                 style, by_id)
+        return
+
+      # Handle .odex files specially to analyze the block-wise difference. If
+      # most of the blocks are identical with only few changes (e.g. header),
+      # we will patch the changed blocks only. This avoids stashing unchanged
+      # blocks while patching. We limit the analysis to files without size
+      # changes only. This is to avoid sacrificing the OTA generation cost too
+      # much.
+      if (tgt_name.split(".")[-1].lower() == 'odex' and
+          tgt_ranges.size() == src_ranges.size()):
+
+        # 0.5 threshold can be further tuned. The tradeoff is: if only very
+        # few blocks remain identical, we lose the opportunity to use imgdiff
+        # that may have better compression ratio than bsdiff.
+        crop_threshold = 0.5
+
+        tgt_skipped = RangeSet()
+        src_skipped = RangeSet()
+        tgt_size = tgt_ranges.size()
+        tgt_changed = 0
+        for src_block, tgt_block in zip(src_ranges.next_item(),
+                                        tgt_ranges.next_item()):
+          src_rs = RangeSet(str(src_block))
+          tgt_rs = RangeSet(str(tgt_block))
+          if self.src.ReadRangeSet(src_rs) == self.tgt.ReadRangeSet(tgt_rs):
+            tgt_skipped = tgt_skipped.union(tgt_rs)
+            src_skipped = src_skipped.union(src_rs)
+          else:
+            tgt_changed += tgt_rs.size()
+
+          # Terminate early if no clear sign of benefits.
+          if tgt_changed > tgt_size * crop_threshold:
+            break
+
+        if tgt_changed < tgt_size * crop_threshold:
+          assert tgt_changed + tgt_skipped.size() == tgt_size
+          print('%10d %10d (%6.2f%%) %s' % (
+              tgt_skipped.size(), tgt_size,
+              tgt_skipped.size() * 100.0 / tgt_size, tgt_name))
+          AddSplitTransfers(
+              "%s-skipped" % (tgt_name,),
+              "%s-skipped" % (src_name,),
+              tgt_skipped, src_skipped, style, by_id)
+
+          # Intentionally change the file extension to avoid being imgdiff'd as
+          # the files are no longer in their original format.
+          tgt_name = "%s-cropped" % (tgt_name,)
+          src_name = "%s-cropped" % (src_name,)
+          tgt_ranges = tgt_ranges.subtract(tgt_skipped)
+          src_ranges = src_ranges.subtract(src_skipped)
+
+          # Possibly having no changed blocks.
+          if not tgt_ranges:
+            return
+
+      # Add the transfer(s).
+      AddSplitTransfers(
+          tgt_name, src_name, tgt_ranges, src_ranges, style, by_id)
+
+    def ParseAndValidateSplitInfo(patch_size, tgt_ranges, src_ranges,
+                                  split_info):
+      """Parse the split_info and return a list of info tuples.
+
+      Args:
+        patch_size: total size of the patch file.
+        tgt_ranges: Ranges of the target file within the original image.
+        src_ranges: Ranges of the source file within the original image.
+        split_info format:
+          imgdiff version#
+          count of pieces
+          <patch_size_1> <tgt_size_1> <src_ranges_1>
+          ...
+          <patch_size_n> <tgt_size_n> <src_ranges_n>
+
+      Returns:
+        [patch_start, patch_len, split_tgt_ranges, split_src_ranges]
+      """
+
+      version = int(split_info[0])
+      assert version == 2
+      count = int(split_info[1])
+      assert len(split_info) - 2 == count
+
+      split_info_list = []
+      patch_start = 0
+      tgt_remain = copy.deepcopy(tgt_ranges)
+      # each line has the format <patch_size>, <tgt_size>, <src_ranges>
+      for line in split_info[2:]:
+        info = line.split()
+        assert len(info) == 3
+        patch_length = int(info[0])
+
+        split_tgt_size = int(info[1])
+        assert split_tgt_size % 4096 == 0
+        assert split_tgt_size / 4096 <= tgt_remain.size()
+        split_tgt_ranges = tgt_remain.first(split_tgt_size / 4096)
+        tgt_remain = tgt_remain.subtract(split_tgt_ranges)
+
+        # Find the split_src_ranges within the image file from its relative
+        # position in file.
+        split_src_indices = RangeSet.parse_raw(info[2])
+        split_src_ranges = RangeSet()
+        for r in split_src_indices:
+          curr_range = src_ranges.first(r[1]).subtract(src_ranges.first(r[0]))
+          assert not split_src_ranges.overlaps(curr_range)
+          split_src_ranges = split_src_ranges.union(curr_range)
+
+        split_info_list.append((patch_start, patch_length,
+                                split_tgt_ranges, split_src_ranges))
+        patch_start += patch_length
+
+      # Check that the sizes of all the split pieces add up to the final file
+      # size for patch and target.
+      assert tgt_remain.size() == 0
+      assert patch_start == patch_size
+      return split_info_list
+
+    def SplitLargeApks():
+      """Split the large apks files.
+
+      Example: Chrome.apk will be split into
+        src-0: Chrome.apk-0, tgt-0: Chrome.apk-0
+        src-1: Chrome.apk-1, tgt-1: Chrome.apk-1
+        ...
+
+      After the split, the target pieces are continuous and block aligned; and
+      the source pieces are mutually exclusive. During the split, we also
+      generate and save the image patch between src-X & tgt-X. This patch will
+      be valid because the block ranges of src-X & tgt-X will always stay the
+      same afterwards; but there's a chance we don't use the patch if we
+      convert the "diff" command into "new" or "move" later.
+      """
+
+      while True:
+        with transfer_lock:
+          if not large_apks:
+            return
+          tgt_name, src_name, tgt_ranges, src_ranges = large_apks.pop(0)
+
+        src_file = common.MakeTempFile(prefix="src-")
+        tgt_file = common.MakeTempFile(prefix="tgt-")
+        with open(src_file, "wb") as src_fd:
+          self.src.WriteRangeDataToFd(src_ranges, src_fd)
+        with open(tgt_file, "wb") as tgt_fd:
+          self.tgt.WriteRangeDataToFd(tgt_ranges, tgt_fd)
+
+        patch_file = common.MakeTempFile(prefix="patch-")
+        patch_info_file = common.MakeTempFile(prefix="split_info-")
+        cmd = ["imgdiff", "-z",
+               "--block-limit={}".format(max_blocks_per_transfer),
+               "--split-info=" + patch_info_file,
+               src_file, tgt_file, patch_file]
+        p = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+        imgdiff_output, _ = p.communicate()
+        assert p.returncode == 0, \
+            "Failed to create imgdiff patch between {} and {}:\n{}".format(
+                src_name, tgt_name, imgdiff_output)
+
+        with open(patch_info_file) as patch_info:
+          lines = patch_info.readlines()
+
+        patch_size_total = os.path.getsize(patch_file)
+        split_info_list = ParseAndValidateSplitInfo(patch_size_total,
+                                                    tgt_ranges, src_ranges,
+                                                    lines)
+        for index, (patch_start, patch_length, split_tgt_ranges,
+                    split_src_ranges) in enumerate(split_info_list):
+          with open(patch_file) as f:
+            f.seek(patch_start)
+            patch_content = f.read(patch_length)
+
+          split_src_name = "{}-{}".format(src_name, index)
+          split_tgt_name = "{}-{}".format(tgt_name, index)
+          split_large_apks.append((split_tgt_name,
+                                   split_src_name,
+                                   split_tgt_ranges,
+                                   split_src_ranges,
+                                   patch_content))
+
+    print("Finding transfers...")
+
+    large_apks = []
+    split_large_apks = []
+    cache_size = common.OPTIONS.cache_size
+    split_threshold = 0.125
+    max_blocks_per_transfer = int(cache_size * split_threshold /
+                                  self.tgt.blocksize)
+    empty = RangeSet()
+    for tgt_fn, tgt_ranges in sorted(self.tgt.file_map.items()):
+      if tgt_fn == "__ZERO":
+        # the special "__ZERO" domain is all the blocks not contained
+        # in any file and that are filled with zeros.  We have a
+        # special transfer style for zero blocks.
+        src_ranges = self.src.file_map.get("__ZERO", empty)
+        AddTransfer(tgt_fn, "__ZERO", tgt_ranges, src_ranges,
+                    "zero", self.transfers)
+        continue
+
+      elif tgt_fn == "__COPY":
+        # "__COPY" domain includes all the blocks not contained in any
+        # file and that need to be copied unconditionally to the target.
+        AddTransfer(tgt_fn, None, tgt_ranges, empty, "new", self.transfers)
+        continue
+
+      elif tgt_fn in self.src.file_map:
+        # Look for an exact pathname match in the source.
+        AddTransfer(tgt_fn, tgt_fn, tgt_ranges, self.src.file_map[tgt_fn],
+                    "diff", self.transfers, True)
+        continue
+
+      b = os.path.basename(tgt_fn)
+      if b in self.src_basenames:
+        # Look for an exact basename match in the source.
+        src_fn = self.src_basenames[b]
+        AddTransfer(tgt_fn, src_fn, tgt_ranges, self.src.file_map[src_fn],
+                    "diff", self.transfers, True)
+        continue
+
+      b = re.sub("[0-9]+", "#", b)
+      if b in self.src_numpatterns:
+        # Look for a 'number pattern' match (a basename match after
+        # all runs of digits are replaced by "#").  (This is useful
+        # for .so files that contain version numbers in the filename
+        # that get bumped.)
+        src_fn = self.src_numpatterns[b]
+        AddTransfer(tgt_fn, src_fn, tgt_ranges, self.src.file_map[src_fn],
+                    "diff", self.transfers, True)
+        continue
+
+      AddTransfer(tgt_fn, None, tgt_ranges, empty, "new", self.transfers)
+
+    transfer_lock = threading.Lock()
+    threads = [threading.Thread(target=SplitLargeApks)
+               for _ in range(self.threads)]
+    for th in threads:
+      th.start()
+    while threads:
+      threads.pop().join()
+
+    # Sort the split transfers for large apks to generate a determinate package.
+    split_large_apks.sort()
+    for (tgt_name, src_name, tgt_ranges, src_ranges,
+         patch) in split_large_apks:
+      transfer_split = Transfer(tgt_name, src_name, tgt_ranges, src_ranges,
+                                self.tgt.RangeSha1(tgt_ranges),
+                                self.src.RangeSha1(src_ranges),
+                                "diff", self.transfers)
+      transfer_split.patch = patch
+
+  def AbbreviateSourceNames(self):
+    for k in self.src.file_map.keys():
+      b = os.path.basename(k)
+      self.src_basenames[b] = k
+      b = re.sub("[0-9]+", "#", b)
+      self.src_numpatterns[b] = k
+
+  @staticmethod
+  def AssertPartition(total, seq):
+    """Assert that all the RangeSets in 'seq' form a partition of the
+    'total' RangeSet (ie, they are nonintersecting and their union
+    equals 'total')."""
+
+    so_far = RangeSet()
+    for i in seq:
+      assert not so_far.overlaps(i)
+      so_far = so_far.union(i)
+    assert so_far == total
diff --git a/src/support/ab_tools/scripts/brillo_update_payload b/src/support/ab_tools/scripts/brillo_update_payload
new file mode 100644
index 0000000..65c63f5
--- /dev/null
+++ b/src/support/ab_tools/scripts/brillo_update_payload
@@ -0,0 +1,771 @@
+#!/bin/bash
+
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Script to generate a Brillo update for use by the update engine.
+#
+# usage: brillo_update_payload COMMAND [ARGS]
+# The following commands are supported:
+#  generate    generate an unsigned payload
+#  hash        generate a payload or metadata hash
+#  sign        generate a signed payload
+#  properties  generate a properties file from a payload
+#  verify      verify a payload by recreating a target image.
+#
+#  Generate command arguments:
+#  --payload             generated unsigned payload output file
+#  --source_image        if defined, generate a delta payload from the specified
+#                        image to the target_image
+#  --target_image        the target image that should be sent to clients
+#  --metadata_size_file  if defined, generate a file containing the size of the
+#                        payload metadata in bytes to the specified file
+#
+#  Hash command arguments:
+#  --unsigned_payload    the input unsigned payload to generate the hash from
+#  --signature_size      signature sizes in bytes in the following format:
+#                        "size1:size2[:...]"
+#  --payload_hash_file   if defined, generate a payload hash and output to the
+#                        specified file
+#  --metadata_hash_file  if defined, generate a metadata hash and output to the
+#                        specified file
+#
+#  Sign command arguments:
+#  --unsigned_payload        the input unsigned payload to insert the signatures
+#  --payload                 the output signed payload
+#  --signature_size          signature sizes in bytes in the following format:
+#                            "size1:size2[:...]"
+#  --payload_signature_file  the payload signature files in the following
+#                            format:
+#                            "payload_signature1:payload_signature2[:...]"
+#  --metadata_signature_file the metadata signature files in the following
+#                            format:
+#                            "metadata_signature1:metadata_signature2[:...]"
+#  --metadata_size_file      if defined, generate a file containing the size of
+#                            the signed payload metadata in bytes to the
+#                            specified file
+#  Note that the number of signature sizes and payload signatures have to match.
+#
+#  Properties command arguments:
+#  --payload                 the input signed or unsigned payload
+#  --properties_file         the output path where to write the properties, or
+#                            '-' for stdout.
+#  Verify command arguments:
+#  --payload             payload input file
+#  --source_image        verify payload to the specified source image.
+#  --target_image        the target image to verify upon.
+
+
+# Exit codes:
+EX_UNSUPPORTED_DELTA=100
+
+warn() {
+  echo "brillo_update_payload: warning: $*" >&2
+}
+
+die() {
+  echo "brillo_update_payload: error: $*" >&2
+  exit 1
+}
+
+# Loads shflags. We first look at the default install location; then look for
+# crosutils (chroot); finally check our own directory (au-generator zipfile).
+load_shflags() {
+  local my_dir="$(dirname "$(readlink -f "$0")")"
+  local path
+  for path in /usr/share/misc {/usr/lib/crosutils,"${my_dir}"}/lib/shflags; do
+    if [[ -r "${path}/shflags" ]]; then
+      . "${path}/shflags" || die "Could not load ${path}/shflags."
+      return
+    fi
+  done
+  die "Could not find shflags."
+}
+
+load_shflags
+
+HELP_GENERATE="generate: Generate an unsigned update payload."
+HELP_HASH="hash: Generate the hashes of the unsigned payload and metadata used \
+for signing."
+HELP_SIGN="sign: Insert the signatures into the unsigned payload."
+HELP_PROPERTIES="properties: Extract payload properties to a file."
+HELP_VERIFY="verify: Verify a (signed) update payload."
+
+usage() {
+  echo "Supported commands:"
+  echo
+  echo "${HELP_GENERATE}"
+  echo "${HELP_HASH}"
+  echo "${HELP_SIGN}"
+  echo "${HELP_PROPERTIES}"
+  echo "${HELP_VERIFY}"
+  echo
+  echo "Use: \"$0 <command> --help\" for more options."
+}
+
+# Check that a command is specified.
+if [[ $# -lt 1 ]]; then
+  echo "Please specify a command [generate|hash|sign|properties]"
+  exit 1
+fi
+
+# Parse command.
+COMMAND="${1:-}"
+shift
+
+case "${COMMAND}" in
+  generate)
+    FLAGS_HELP="${HELP_GENERATE}"
+    ;;
+
+  hash)
+    FLAGS_HELP="${HELP_HASH}"
+    ;;
+
+  sign)
+    FLAGS_HELP="${HELP_SIGN}"
+    ;;
+
+  properties)
+    FLAGS_HELP="${HELP_PROPERTIES}"
+    ;;
+
+  verify)
+    FLAGS_HELP="${HELP_VERIFY}"
+    ;;
+
+  *)
+    echo "Unrecognized command: \"${COMMAND}\"" >&2
+    usage >&2
+    exit 1
+    ;;
+esac
+
+# Flags
+FLAGS_HELP="Usage: $0 ${COMMAND} [flags]
+${FLAGS_HELP}"
+
+if [[ "${COMMAND}" == "generate" ]]; then
+  DEFINE_string payload "" \
+    "Path to output the generated unsigned payload file."
+  DEFINE_string target_image "" \
+    "Path to the target image that should be sent to clients."
+  DEFINE_string source_image "" \
+    "Optional: Path to a source image. If specified, this makes a delta update."
+  DEFINE_string metadata_size_file "" \
+    "Optional: Path to output metadata size."
+  DEFINE_string max_timestamp "" \
+    "Optional: The maximum unix timestamp of the OS allowed to apply this \
+payload, should be set to a number higher than the build timestamp of the \
+system running on the device, 0 if not specified."
+fi
+if [[ "${COMMAND}" == "hash" || "${COMMAND}" == "sign" ]]; then
+  DEFINE_string unsigned_payload "" "Path to the input unsigned payload."
+  DEFINE_string signature_size "" \
+    "Signature sizes in bytes in the following format: size1:size2[:...]"
+fi
+if [[ "${COMMAND}" == "hash" ]]; then
+  DEFINE_string metadata_hash_file "" \
+    "Optional: Path to output metadata hash file."
+  DEFINE_string payload_hash_file "" \
+    "Optional: Path to output payload hash file."
+fi
+if [[ "${COMMAND}" == "sign" ]]; then
+  DEFINE_string payload "" \
+    "Path to output the generated unsigned payload file."
+  DEFINE_string metadata_signature_file "" \
+    "The metatada signatures in the following format: \
+metadata_signature1:metadata_signature2[:...]"
+  DEFINE_string payload_signature_file "" \
+    "The payload signatures in the following format: \
+payload_signature1:payload_signature2[:...]"
+  DEFINE_string metadata_size_file "" \
+    "Optional: Path to output metadata size."
+fi
+if [[ "${COMMAND}" == "properties" ]]; then
+  DEFINE_string payload "" \
+    "Path to the input signed or unsigned payload file."
+  DEFINE_string properties_file "-" \
+    "Path to output the extracted property files. If '-' is passed stdout will \
+be used."
+fi
+if [[ "${COMMAND}" == "verify" ]]; then
+  DEFINE_string payload "" \
+    "Path to the input payload file."
+  DEFINE_string target_image "" \
+    "Path to the target image to verify upon."
+  DEFINE_string source_image "" \
+    "Optional: Path to a source image. If specified, the delta update is \
+applied to this."
+fi
+
+DEFINE_string work_dir "${TMPDIR:-/tmp}" "Where to dump temporary files."
+
+# Parse command line flag arguments
+FLAGS "$@" || exit 1
+eval set -- "${FLAGS_ARGV}"
+set -e
+
+# Override the TMPDIR with the passed work_dir flags, which anyway defaults to
+# ${TMPDIR}.
+TMPDIR="${FLAGS_work_dir}"
+export TMPDIR
+
+# Associative arrays from partition name to file in the source and target
+# images. The size of the updated area must be the size of the file.
+declare -A SRC_PARTITIONS
+declare -A DST_PARTITIONS
+
+# Associative arrays for the .map files associated with each src/dst partition
+# file in SRC_PARTITIONS and DST_PARTITIONS.
+declare -A SRC_PARTITIONS_MAP
+declare -A DST_PARTITIONS_MAP
+
+# List of partition names in order.
+declare -a PARTITIONS_ORDER
+
+# A list of temporary files to remove during cleanup.
+CLEANUP_FILES=()
+
+# Global options to force the version of the payload.
+FORCE_MAJOR_VERSION=""
+FORCE_MINOR_VERSION=""
+
+# Path to the postinstall config file in target image if exists.
+POSTINSTALL_CONFIG_FILE=""
+
+# read_option_int <file.txt> <option_key> [default_value]
+#
+# Reads the unsigned integer value associated with |option_key| in a key=value
+# file |file.txt|. Prints the read value if found and valid, otherwise prints
+# the |default_value|.
+read_option_uint() {
+  local file_txt="$1"
+  local option_key="$2"
+  local default_value="${3:-}"
+  local value
+  if value=$(look "${option_key}=" "${file_txt}" | tail -n 1); then
+    if value=$(echo "${value}" | cut -f 2- -d "=" | grep -E "^[0-9]+$"); then
+      echo "${value}"
+      return
+    fi
+  fi
+  echo "${default_value}"
+}
+
+# truncate_file <file_path> <file_size>
+#
+# Truncate the given |file_path| to |file_size| using perl.
+# The truncate binary might not be available.
+truncate_file() {
+  local file_path="$1"
+  local file_size="$2"
+  perl -e "open(FILE, \"+<\", \$ARGV[0]); \
+           truncate(FILE, ${file_size}); \
+           close(FILE);" "${file_path}"
+}
+
+# Create a temporary file in the work_dir with an optional pattern name.
+# Prints the name of the newly created file.
+create_tempfile() {
+  local pattern="${1:-tempfile.XXXXXX}"
+  mktemp --tmpdir="${FLAGS_work_dir}" "${pattern}"
+}
+
+cleanup() {
+  local err=""
+  rm -f "${CLEANUP_FILES[@]}" || err=1
+
+  # If we are cleaning up after an error, or if we got an error during
+  # cleanup (even if we eventually succeeded) return a non-zero exit
+  # code. This triggers additional logging in most environments that call
+  # this script.
+  if [[ -n "${err}" ]]; then
+    die "Cleanup encountered an error."
+  fi
+}
+
+cleanup_on_error() {
+  trap - INT TERM ERR EXIT
+  cleanup
+  die "Cleanup success after an error."
+}
+
+cleanup_on_exit() {
+  trap - INT TERM ERR EXIT
+  cleanup
+}
+
+trap cleanup_on_error INT TERM ERR
+trap cleanup_on_exit EXIT
+
+
+# extract_image <image> <partitions_array> [partitions_order]
+#
+# Detect the format of the |image| file and extract its updatable partitions
+# into new temporary files. Add the list of partition names and its files to the
+# associative array passed in |partitions_array|. If |partitions_order| is
+# passed, set it to list of partition names in order.
+extract_image() {
+  local image="$1"
+
+  # Brillo images are zip files. We detect the 4-byte magic header of the zip
+  # file.
+  local magic=$(head --bytes=4 "${image}" | hexdump -e '1/1 "%.2x"')
+  if [[ "${magic}" == "504b0304" ]]; then
+    echo "Detected .zip file, extracting Brillo image."
+    extract_image_brillo "$@"
+    return
+  fi
+
+  # Chrome OS images are GPT partitioned disks. We should have the cgpt binary
+  # bundled here and we will use it to extract the partitions, so the GPT
+  # headers must be valid.
+  if cgpt show -q -n "${image}" >/dev/null; then
+    echo "Detected GPT image, extracting Chrome OS image."
+    extract_image_cros "$@"
+    return
+  fi
+
+  die "Couldn't detect the image format of ${image}"
+}
+
+# extract_image_cros <image.bin> <partitions_array> [partitions_order]
+#
+# Extract Chromium OS recovery images into new temporary files.
+extract_image_cros() {
+  local image="$1"
+  local partitions_array="$2"
+  local partitions_order="${3:-}"
+
+  local kernel root
+  kernel=$(create_tempfile "kernel.bin.XXXXXX")
+  CLEANUP_FILES+=("${kernel}")
+  root=$(create_tempfile "root.bin.XXXXXX")
+  CLEANUP_FILES+=("${root}")
+
+  cros_generate_update_payload --extract \
+    --image "${image}" \
+    --kern_path "${kernel}" --root_path "${root}" \
+    --work_dir "${FLAGS_work_dir}" --outside_chroot
+
+  # Chrome OS uses major_version 1 payloads for all versions, even if the
+  # updater supports a newer major version.
+  FORCE_MAJOR_VERSION="1"
+
+  # When generating legacy Chrome OS images, we need to use "boot" and "system"
+  # for the partition names to be compatible with updating Brillo devices with
+  # Chrome OS images.
+  eval ${partitions_array}[boot]=\""${kernel}"\"
+  eval ${partitions_array}[system]=\""${root}"\"
+
+  if [[ -n "${partitions_order}" ]]; then
+    eval "${partitions_order}=( \"system\" \"boot\" )"
+  fi
+
+  local part varname
+  for part in boot system; do
+    varname="${partitions_array}[${part}]"
+    printf "md5sum of %s: " "${varname}"
+    md5sum "${!varname}"
+  done
+}
+
+# extract_image_brillo <target_files.zip> <partitions_array> [partitions_order]
+#
+# Extract the A/B updated partitions from a Brillo target_files zip file into
+# new temporary files.
+extract_image_brillo() {
+  local image="$1"
+  local partitions_array="$2"
+  local partitions_order="${3:-}"
+
+  local partitions=( "boot" "system" )
+  local ab_partitions_list
+  ab_partitions_list=$(create_tempfile "ab_partitions_list.XXXXXX")
+  CLEANUP_FILES+=("${ab_partitions_list}")
+  if unzip -p "${image}" "META/ab_partitions.txt" >"${ab_partitions_list}"; then
+    if grep -v -E '^[a-zA-Z0-9_-]*$' "${ab_partitions_list}" >&2; then
+      die "Invalid partition names found in the partition list."
+    fi
+    # Get partition list without duplicates.
+    partitions=($(awk '!seen[$0]++' "${ab_partitions_list}"))
+    if [[ ${#partitions[@]} -eq 0 ]]; then
+      die "The list of partitions is empty. Can't generate a payload."
+    fi
+  else
+    warn "No ab_partitions.txt found. Using default."
+  fi
+  echo "List of A/B partitions: ${partitions[@]}"
+
+  if [[ -n "${partitions_order}" ]]; then
+    eval "${partitions_order}=(${partitions[@]})"
+  fi
+
+  # All Brillo updaters support major version 2.
+  FORCE_MAJOR_VERSION="2"
+
+  if [[ "${partitions_array}" == "SRC_PARTITIONS" ]]; then
+    # Source image
+    local ue_config=$(create_tempfile "ue_config.XXXXXX")
+    CLEANUP_FILES+=("${ue_config}")
+    if ! unzip -p "${image}" "META/update_engine_config.txt" \
+        >"${ue_config}"; then
+      warn "No update_engine_config.txt found. Assuming pre-release image, \
+using payload minor version 2"
+    fi
+    # For delta payloads, we use the major and minor version supported by the
+    # old updater.
+    FORCE_MINOR_VERSION=$(read_option_uint "${ue_config}" \
+      "PAYLOAD_MINOR_VERSION" 2)
+    FORCE_MAJOR_VERSION=$(read_option_uint "${ue_config}" \
+      "PAYLOAD_MAJOR_VERSION" 2)
+
+    # Brillo support for deltas started with minor version 3.
+    if [[ "${FORCE_MINOR_VERSION}" -le 2 ]]; then
+      warn "No delta support from minor version ${FORCE_MINOR_VERSION}. \
+Disabling deltas for this source version."
+      exit ${EX_UNSUPPORTED_DELTA}
+    fi
+  else
+    # Target image
+    local postinstall_config=$(create_tempfile "postinstall_config.XXXXXX")
+    CLEANUP_FILES+=("${postinstall_config}")
+    if unzip -p "${image}" "META/postinstall_config.txt" \
+        >"${postinstall_config}"; then
+      POSTINSTALL_CONFIG_FILE="${postinstall_config}"
+    fi
+  fi
+
+  local part part_file temp_raw filesize
+  for part in "${partitions[@]}"; do
+    part_file=$(create_tempfile "${part}.img.XXXXXX")
+    CLEANUP_FILES+=("${part_file}")
+    unzip -p "${image}" "IMAGES/${part}.img" >"${part_file}"
+
+    # If the partition is stored as an Android sparse image file, we need to
+    # convert them to a raw image for the update.
+    local magic=$(head --bytes=4 "${part_file}" | hexdump -e '1/1 "%.2x"')
+    if [[ "${magic}" == "3aff26ed" ]]; then
+      temp_raw=$(create_tempfile "${part}.raw.XXXXXX")
+      CLEANUP_FILES+=("${temp_raw}")
+      echo "Converting Android sparse image ${part}.img to RAW."
+      simg2img "${part_file}" "${temp_raw}"
+      # At this point, we can drop the contents of the old part_file file, but
+      # we can't delete the file because it will be deleted in cleanup.
+      true >"${part_file}"
+      part_file="${temp_raw}"
+    fi
+
+    # Extract the .map file (if one is available).
+    part_map_file=$(create_tempfile "${part}.map.XXXXXX")
+    CLEANUP_FILES+=("${part_map_file}")
+    unzip -p "${image}" "IMAGES/${part}.map" >"${part_map_file}" || \
+      part_map_file=""
+
+    # delta_generator only supports images multiple of 4 KiB. For target images
+    # we pad the data with zeros if needed, but for source images we truncate
+    # down the data since the last block of the old image could be padded on
+    # disk with unknown data.
+    filesize=$(stat -c%s "${part_file}")
+    if [[ $(( filesize % 4096 )) -ne 0 ]]; then
+      if [[ "${partitions_array}" == "SRC_PARTITIONS" ]]; then
+        echo "Rounding DOWN partition ${part}.img to a multiple of 4 KiB."
+        : $(( filesize = filesize & -4096 ))
+        if [[ ${filesize} == 0 ]]; then
+          echo "Source partition ${part}.img is empty after rounding down," \
+            "skipping."
+          continue
+        fi
+      else
+        echo "Rounding UP partition ${part}.img to a multiple of 4 KiB."
+        : $(( filesize = (filesize + 4095) & -4096 ))
+      fi
+      truncate_file "${part_file}" "${filesize}"
+    fi
+
+    eval "${partitions_array}[\"${part}\"]=\"${part_file}\""
+    eval "${partitions_array}_MAP[\"${part}\"]=\"${part_map_file}\""
+    echo "Extracted ${partitions_array}[${part}]: ${filesize} bytes"
+  done
+}
+
+validate_generate() {
+  [[ -n "${FLAGS_payload}" ]] ||
+    die "You must specify an output filename with --payload FILENAME"
+
+  [[ -n "${FLAGS_target_image}" ]] ||
+    die "You must specify a target image with --target_image FILENAME"
+}
+
+cmd_generate() {
+  local payload_type="delta"
+  if [[ -z "${FLAGS_source_image}" ]]; then
+    payload_type="full"
+  fi
+
+  echo "Extracting images for ${payload_type} update."
+
+  extract_image "${FLAGS_target_image}" DST_PARTITIONS PARTITIONS_ORDER
+  if [[ "${payload_type}" == "delta" ]]; then
+    extract_image "${FLAGS_source_image}" SRC_PARTITIONS
+  fi
+
+  echo "Generating ${payload_type} update."
+  # Common payload args:
+  GENERATOR_ARGS=( -out_file="${FLAGS_payload}" )
+
+  local part old_partitions="" new_partitions="" partition_names=""
+  local old_mapfiles="" new_mapfiles=""
+  for part in "${PARTITIONS_ORDER[@]}"; do
+    if [[ -n "${partition_names}" ]]; then
+      partition_names+=":"
+      new_partitions+=":"
+      old_partitions+=":"
+      new_mapfiles+=":"
+      old_mapfiles+=":"
+    fi
+    partition_names+="${part}"
+    new_partitions+="${DST_PARTITIONS[${part}]}"
+    old_partitions+="${SRC_PARTITIONS[${part}]:-}"
+    new_mapfiles+="${DST_PARTITIONS_MAP[${part}]:-}"
+    old_mapfiles+="${SRC_PARTITIONS_MAP[${part}]:-}"
+  done
+
+  # Target image args:
+  GENERATOR_ARGS+=(
+    -partition_names="${partition_names}"
+    -new_partitions="${new_partitions}"
+    -new_mapfiles="${new_mapfiles}"
+  )
+
+  if [[ "${payload_type}" == "delta" ]]; then
+    # Source image args:
+    GENERATOR_ARGS+=(
+      -old_partitions="${old_partitions}"
+      -old_mapfiles="${old_mapfiles}"
+    )
+    if [[ -n "${FORCE_MINOR_VERSION}" ]]; then
+      GENERATOR_ARGS+=( --minor_version="${FORCE_MINOR_VERSION}" )
+    fi
+  fi
+
+  if [[ -n "${FORCE_MAJOR_VERSION}" ]]; then
+    GENERATOR_ARGS+=( --major_version="${FORCE_MAJOR_VERSION}" )
+  fi
+
+  if [[ -n "${FLAGS_metadata_size_file}" ]]; then
+    GENERATOR_ARGS+=( --out_metadata_size_file="${FLAGS_metadata_size_file}" )
+  fi
+
+  if [[ -n "${FLAGS_max_timestamp}" ]]; then
+    GENERATOR_ARGS+=( --max_timestamp="${FLAGS_max_timestamp}" )
+  fi
+
+  if [[ -n "${POSTINSTALL_CONFIG_FILE}" ]]; then
+    GENERATOR_ARGS+=(
+      --new_postinstall_config_file="${POSTINSTALL_CONFIG_FILE}"
+    )
+  fi
+
+  echo "Running delta_generator with args: ${GENERATOR_ARGS[@]}"
+  "${GENERATOR}" "${GENERATOR_ARGS[@]}"
+
+  echo "Done generating ${payload_type} update."
+}
+
+validate_hash() {
+  [[ -n "${FLAGS_signature_size}" ]] ||
+    die "You must specify signature size with --signature_size SIZES"
+
+  [[ -n "${FLAGS_unsigned_payload}" ]] ||
+    die "You must specify the input unsigned payload with \
+--unsigned_payload FILENAME"
+
+  [[ -n "${FLAGS_payload_hash_file}" ]] ||
+    die "You must specify --payload_hash_file FILENAME"
+
+  [[ -n "${FLAGS_metadata_hash_file}" ]] ||
+    die "You must specify --metadata_hash_file FILENAME"
+}
+
+cmd_hash() {
+  "${GENERATOR}" \
+      -in_file="${FLAGS_unsigned_payload}" \
+      -signature_size="${FLAGS_signature_size}" \
+      -out_hash_file="${FLAGS_payload_hash_file}" \
+      -out_metadata_hash_file="${FLAGS_metadata_hash_file}"
+
+  echo "Done generating hash."
+}
+
+validate_sign() {
+  [[ -n "${FLAGS_signature_size}" ]] ||
+    die "You must specify signature size with --signature_size SIZES"
+
+  [[ -n "${FLAGS_unsigned_payload}" ]] ||
+    die "You must specify the input unsigned payload with \
+--unsigned_payload FILENAME"
+
+  [[ -n "${FLAGS_payload}" ]] ||
+    die "You must specify the output signed payload with --payload FILENAME"
+
+  [[ -n "${FLAGS_payload_signature_file}" ]] ||
+    die "You must specify the payload signature file with \
+--payload_signature_file SIGNATURES"
+
+  [[ -n "${FLAGS_metadata_signature_file}" ]] ||
+    die "You must specify the metadata signature file with \
+--metadata_signature_file SIGNATURES"
+}
+
+cmd_sign() {
+  GENERATOR_ARGS=(
+    -in_file="${FLAGS_unsigned_payload}"
+    -signature_size="${FLAGS_signature_size}"
+    -signature_file="${FLAGS_payload_signature_file}"
+    -metadata_signature_file="${FLAGS_metadata_signature_file}"
+    -out_file="${FLAGS_payload}"
+  )
+
+  if [[ -n "${FLAGS_metadata_size_file}" ]]; then
+    GENERATOR_ARGS+=( --out_metadata_size_file="${FLAGS_metadata_size_file}" )
+  fi
+
+  "${GENERATOR}" "${GENERATOR_ARGS[@]}"
+  echo "Done signing payload."
+}
+
+validate_properties() {
+  [[ -n "${FLAGS_payload}" ]] ||
+    die "You must specify the payload file with --payload FILENAME"
+
+  [[ -n "${FLAGS_properties_file}" ]] ||
+    die "You must specify a non empty --properties_file FILENAME"
+}
+
+cmd_properties() {
+  "${GENERATOR}" \
+      -in_file="${FLAGS_payload}" \
+      -properties_file="${FLAGS_properties_file}"
+}
+
+validate_verify() {
+  [[ -n "${FLAGS_payload}" ]] ||
+    die "Error: you must specify an input filename with --payload FILENAME"
+
+  [[ -n "${FLAGS_target_image}" ]] ||
+    die "Error: you must specify a target image with --target_image FILENAME"
+}
+
+cmd_verify() {
+  local payload_type="delta"
+  if [[ -z "${FLAGS_source_image}" ]]; then
+    payload_type="full"
+  fi
+
+  echo "Extracting images for ${payload_type} update."
+
+  if [[ "${payload_type}" == "delta" ]]; then
+    extract_image "${FLAGS_source_image}" SRC_PARTITIONS
+  fi
+  extract_image "${FLAGS_target_image}" DST_PARTITIONS PARTITIONS_ORDER
+
+  declare -A TMP_PARTITIONS
+  for part in "${PARTITIONS_ORDER[@]}"; do
+    local tmp_part=$(create_tempfile "tmp_part.bin.XXXXXX")
+    echo "Creating temporary target partition ${tmp_part} for ${part}"
+    CLEANUP_FILES+=("${tmp_part}")
+    TMP_PARTITIONS[${part}]=${tmp_part}
+    local FILESIZE=$(stat -c%s "${DST_PARTITIONS[${part}]}")
+    echo "Truncating ${TMP_PARTITIONS[${part}]} to ${FILESIZE}"
+    truncate_file "${TMP_PARTITIONS[${part}]}" "${FILESIZE}"
+  done
+
+  echo "Verifying ${payload_type} update."
+  # Common payload args:
+  GENERATOR_ARGS=( -in_file="${FLAGS_payload}" )
+
+  local part old_partitions="" new_partitions="" partition_names=""
+  for part in "${PARTITIONS_ORDER[@]}"; do
+    if [[ -n "${partition_names}" ]]; then
+      partition_names+=":"
+      new_partitions+=":"
+      old_partitions+=":"
+    fi
+    partition_names+="${part}"
+    new_partitions+="${TMP_PARTITIONS[${part}]}"
+    old_partitions+="${SRC_PARTITIONS[${part}]:-}"
+  done
+
+  # Target image args:
+  GENERATOR_ARGS+=(
+    -partition_names="${partition_names}"
+    -new_partitions="${new_partitions}"
+  )
+
+  if [[ "${payload_type}" == "delta" ]]; then
+    # Source image args:
+    GENERATOR_ARGS+=(
+      -old_partitions="${old_partitions}"
+    )
+  fi
+
+  if [[ -n "${FORCE_MAJOR_VERSION}" ]]; then
+    GENERATOR_ARGS+=( --major_version="${FORCE_MAJOR_VERSION}" )
+  fi
+
+  echo "Running delta_generator to verify ${payload_type} payload with args: \
+${GENERATOR_ARGS[@]}"
+  "${GENERATOR}" "${GENERATOR_ARGS[@]}"
+
+  if [[ $? -eq 0 ]]; then
+    echo "Done applying ${payload_type} update."
+    echo "Checking the newly generated partitions against the target partitions"
+    for part in "${PARTITIONS_ORDER[@]}"; do
+      cmp "${TMP_PARTITIONS[${part}]}" "${DST_PARTITIONS[${part}]}"
+      local not_str=""
+      if [[ $? -ne 0 ]]; then
+        not_str="in"
+      fi
+      echo "The new partition (${part}) is ${not_str}valid."
+    done
+  else
+    echo "Failed to apply ${payload_type} update."
+  fi
+}
+
+# Sanity check that the real generator exists:
+GENERATOR="$(which delta_generator || true)"
+[[ -x "${GENERATOR}" ]] || die "can't find delta_generator"
+
+case "$COMMAND" in
+  generate) validate_generate
+            cmd_generate
+            ;;
+  hash) validate_hash
+        cmd_hash
+        ;;
+  sign) validate_sign
+        cmd_sign
+        ;;
+  properties) validate_properties
+              cmd_properties
+              ;;
+  verify) validate_verify
+          cmd_verify
+          ;;
+esac
diff --git a/src/support/ab_tools/scripts/common.py b/src/support/ab_tools/scripts/common.py
new file mode 100644
index 0000000..4d00169
--- /dev/null
+++ b/src/support/ab_tools/scripts/common.py
@@ -0,0 +1,1924 @@
+# Copyright (C) 2008 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import print_function
+
+import copy
+import errno
+import getopt
+import getpass
+import gzip
+import imp
+import os
+import platform
+import re
+import shlex
+import shutil
+import string
+import subprocess
+import sys
+import tempfile
+import threading
+import time
+import zipfile
+from hashlib import sha1, sha256
+
+import blockimgdiff
+import sparse_img
+
+class Options(object):
+  def __init__(self):
+    platform_search_path = {
+        "linux2": "out/host/linux-x86",
+        "darwin": "out/host/darwin-x86",
+    }
+
+    self.search_path = platform_search_path.get(sys.platform, None)
+    self.signapk_path = "usr/bin/signapk.jar"  # Relative to search_path
+    self.signapk_shared_library_path = "lib64"   # Relative to search_path
+    self.extra_signapk_args = []
+    self.java_path = "java"  # Use the one on the path by default.
+    self.java_args = ["-Xmx2048m"]  # The default JVM args.
+    self.public_key_suffix = ".x509.pem"
+    self.private_key_suffix = ".pk8"
+    # use otatools built boot_signer by default
+    self.boot_signer_path = "boot_signer"
+    self.boot_signer_args = []
+    self.verity_signer_path = None
+    self.verity_signer_args = []
+    self.verbose = False
+    self.tempfiles = []
+    self.device_specific = None
+    self.extras = {}
+    self.info_dict = None
+    self.source_info_dict = None
+    self.target_info_dict = None
+    self.worker_threads = None
+    # Stash size cannot exceed cache_size * threshold.
+    self.cache_size = None
+    self.stash_threshold = 0.8
+
+
+OPTIONS = Options()
+
+
+# Values for "certificate" in apkcerts that mean special things.
+SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL")
+
+
+# The partitions allowed to be signed by AVB (Android verified boot 2.0).
+AVB_PARTITIONS = ('boot', 'recovery', 'system', 'vendor', 'product', 'dtbo')
+
+
+class ErrorCode(object):
+  """Define error_codes for failures that happen during the actual
+  update package installation.
+
+  Error codes 0-999 are reserved for failures before the package
+  installation (i.e. low battery, package verification failure).
+  Detailed code in 'bootable/recovery/error_code.h' """
+
+  SYSTEM_VERIFICATION_FAILURE = 1000
+  SYSTEM_UPDATE_FAILURE = 1001
+  SYSTEM_UNEXPECTED_CONTENTS = 1002
+  SYSTEM_NONZERO_CONTENTS = 1003
+  SYSTEM_RECOVER_FAILURE = 1004
+  VENDOR_VERIFICATION_FAILURE = 2000
+  VENDOR_UPDATE_FAILURE = 2001
+  VENDOR_UNEXPECTED_CONTENTS = 2002
+  VENDOR_NONZERO_CONTENTS = 2003
+  VENDOR_RECOVER_FAILURE = 2004
+  OEM_PROP_MISMATCH = 3000
+  FINGERPRINT_MISMATCH = 3001
+  THUMBPRINT_MISMATCH = 3002
+  OLDER_BUILD = 3003
+  DEVICE_MISMATCH = 3004
+  BAD_PATCH_FILE = 3005
+  INSUFFICIENT_CACHE_SPACE = 3006
+  TUNE_PARTITION_FAILURE = 3007
+  APPLY_PATCH_FAILURE = 3008
+
+class ExternalError(RuntimeError):
+  pass
+
+
+def Run(args, verbose=None, **kwargs):
+  """Create and return a subprocess.Popen object.
+
+  Caller can specify if the command line should be printed. The global
+  OPTIONS.verbose will be used if not specified.
+  """
+  if verbose is None:
+    verbose = OPTIONS.verbose
+  if verbose:
+    print("  running: ", " ".join(args))
+  return subprocess.Popen(args, **kwargs)
+
+
+def RoundUpTo4K(value):
+  rounded_up = value + 4095
+  return rounded_up - (rounded_up % 4096)
+
+
+def CloseInheritedPipes():
+  """ Gmake in MAC OS has file descriptor (PIPE) leak. We close those fds
+  before doing other work."""
+  if platform.system() != "Darwin":
+    return
+  for d in range(3, 1025):
+    try:
+      stat = os.fstat(d)
+      if stat is not None:
+        pipebit = stat[0] & 0x1000
+        if pipebit != 0:
+          os.close(d)
+    except OSError:
+      pass
+
+
+def LoadInfoDict(input_file, input_dir=None):
+  """Read and parse the META/misc_info.txt key/value pairs from the
+  input target files and return a dict."""
+
+  def read_helper(fn):
+    if isinstance(input_file, zipfile.ZipFile):
+      return input_file.read(fn)
+    else:
+      path = os.path.join(input_file, *fn.split("/"))
+      try:
+        with open(path) as f:
+          return f.read()
+      except IOError as e:
+        if e.errno == errno.ENOENT:
+          raise KeyError(fn)
+
+  try:
+    d = LoadDictionaryFromLines(read_helper("META/misc_info.txt").split("\n"))
+  except KeyError:
+    raise ValueError("can't find META/misc_info.txt in input target-files")
+
+  assert "recovery_api_version" in d
+  assert "fstab_version" in d
+
+  # A few properties are stored as links to the files in the out/ directory.
+  # It works fine with the build system. However, they are no longer available
+  # when (re)generating from target_files zip. If input_dir is not None, we
+  # are doing repacking. Redirect those properties to the actual files in the
+  # unzipped directory.
+  if input_dir is not None:
+    # We carry a copy of file_contexts.bin under META/. If not available,
+    # search BOOT/RAMDISK/. Note that sometimes we may need a different file
+    # to build images than the one running on device, such as when enabling
+    # system_root_image. In that case, we must have the one for image
+    # generation copied to META/.
+    fc_basename = os.path.basename(d.get("selinux_fc", "file_contexts"))
+    fc_config = os.path.join(input_dir, "META", fc_basename)
+    if d.get("system_root_image") == "true":
+      assert os.path.exists(fc_config)
+    if not os.path.exists(fc_config):
+      fc_config = os.path.join(input_dir, "BOOT", "RAMDISK", fc_basename)
+      if not os.path.exists(fc_config):
+        fc_config = None
+
+    if fc_config:
+      d["selinux_fc"] = fc_config
+
+    # Similarly we need to redirect "ramdisk_dir" and "ramdisk_fs_config".
+    if d.get("system_root_image") == "true":
+      d["ramdisk_dir"] = os.path.join(input_dir, "ROOT")
+      d["ramdisk_fs_config"] = os.path.join(
+          input_dir, "META", "root_filesystem_config.txt")
+
+    # Redirect {system,vendor}_base_fs_file.
+    if "system_base_fs_file" in d:
+      basename = os.path.basename(d["system_base_fs_file"])
+      system_base_fs_file = os.path.join(input_dir, "META", basename)
+      if os.path.exists(system_base_fs_file):
+        d["system_base_fs_file"] = system_base_fs_file
+      else:
+        print("Warning: failed to find system base fs file: %s" % (
+            system_base_fs_file,))
+        del d["system_base_fs_file"]
+
+    if "vendor_base_fs_file" in d:
+      basename = os.path.basename(d["vendor_base_fs_file"])
+      vendor_base_fs_file = os.path.join(input_dir, "META", basename)
+      if os.path.exists(vendor_base_fs_file):
+        d["vendor_base_fs_file"] = vendor_base_fs_file
+      else:
+        print("Warning: failed to find vendor base fs file: %s" % (
+            vendor_base_fs_file,))
+        del d["vendor_base_fs_file"]
+
+  def makeint(key):
+    if key in d:
+      d[key] = int(d[key], 0)
+
+  makeint("recovery_api_version")
+  makeint("blocksize")
+  makeint("system_size")
+  makeint("vendor_size")
+  makeint("userdata_size")
+  makeint("cache_size")
+  makeint("recovery_size")
+  makeint("boot_size")
+  makeint("fstab_version")
+
+  system_root_image = d.get("system_root_image", None) == "true"
+  if d.get("no_recovery", None) != "true":
+    recovery_fstab_path = "RECOVERY/RAMDISK/etc/recovery.fstab"
+    d["fstab"] = LoadRecoveryFSTab(read_helper, d["fstab_version"],
+        recovery_fstab_path, system_root_image)
+  elif d.get("recovery_as_boot", None) == "true":
+    recovery_fstab_path = "BOOT/RAMDISK/etc/recovery.fstab"
+    d["fstab"] = LoadRecoveryFSTab(read_helper, d["fstab_version"],
+        recovery_fstab_path, system_root_image)
+  else:
+    d["fstab"] = None
+
+  d["build.prop"] = LoadBuildProp(read_helper, 'SYSTEM/build.prop')
+  d["vendor.build.prop"] = LoadBuildProp(read_helper, 'VENDOR/build.prop')
+
+  # Set up the salt (based on fingerprint or thumbprint) that will be used when
+  # adding AVB footer.
+  if d.get("avb_enable") == "true":
+    fp = None
+    if "build.prop" in d:
+      build_prop = d["build.prop"]
+      if "ro.build.fingerprint" in build_prop:
+        fp = build_prop["ro.build.fingerprint"]
+      elif "ro.build.thumbprint" in build_prop:
+        fp = build_prop["ro.build.thumbprint"]
+    if fp:
+      d["avb_salt"] = sha256(fp).hexdigest()
+
+  return d
+
+
+def LoadBuildProp(read_helper, prop_file):
+  try:
+    data = read_helper(prop_file)
+  except KeyError:
+    print("Warning: could not read %s" % (prop_file,))
+    data = ""
+  return LoadDictionaryFromLines(data.split("\n"))
+
+
+def LoadDictionaryFromLines(lines):
+  d = {}
+  for line in lines:
+    line = line.strip()
+    if not line or line.startswith("#"):
+      continue
+    if "=" in line:
+      name, value = line.split("=", 1)
+      d[name] = value
+  return d
+
+
+def LoadRecoveryFSTab(read_helper, fstab_version, recovery_fstab_path,
+                      system_root_image=False):
+  class Partition(object):
+    def __init__(self, mount_point, fs_type, device, length, context):
+      self.mount_point = mount_point
+      self.fs_type = fs_type
+      self.device = device
+      self.length = length
+      self.context = context
+
+  try:
+    data = read_helper(recovery_fstab_path)
+  except KeyError:
+    print("Warning: could not find {}".format(recovery_fstab_path))
+    data = ""
+
+  assert fstab_version == 2
+
+  d = {}
+  for line in data.split("\n"):
+    line = line.strip()
+    if not line or line.startswith("#"):
+      continue
+
+    # <src> <mnt_point> <type> <mnt_flags and options> <fs_mgr_flags>
+    pieces = line.split()
+    if len(pieces) != 5:
+      raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
+
+    # Ignore entries that are managed by vold.
+    options = pieces[4]
+    if "voldmanaged=" in options:
+      continue
+
+    # It's a good line, parse it.
+    length = 0
+    options = options.split(",")
+    for i in options:
+      if i.startswith("length="):
+        length = int(i[7:])
+      else:
+        # Ignore all unknown options in the unified fstab.
+        continue
+
+    mount_flags = pieces[3]
+    # Honor the SELinux context if present.
+    context = None
+    for i in mount_flags.split(","):
+      if i.startswith("context="):
+        context = i
+
+    mount_point = pieces[1]
+    d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[2],
+                               device=pieces[0], length=length, context=context)
+
+  # / is used for the system mount point when the root directory is included in
+  # system. Other areas assume system is always at "/system" so point /system
+  # at /.
+  if system_root_image:
+    assert not d.has_key("/system") and d.has_key("/")
+    d["/system"] = d["/"]
+  return d
+
+
+def DumpInfoDict(d):
+  for k, v in sorted(d.items()):
+    print("%-25s = (%s) %s" % (k, type(v).__name__, v))
+
+
+def AppendAVBSigningArgs(cmd, partition):
+  """Append signing arguments for avbtool."""
+  # e.g., "--key path/to/signing_key --algorithm SHA256_RSA4096"
+  key_path = OPTIONS.info_dict.get("avb_" + partition + "_key_path")
+  algorithm = OPTIONS.info_dict.get("avb_" + partition + "_algorithm")
+  if key_path and algorithm:
+    cmd.extend(["--key", key_path, "--algorithm", algorithm])
+  avb_salt = OPTIONS.info_dict.get("avb_salt")
+  # make_vbmeta_image doesn't like "--salt" (and it's not needed).
+  if avb_salt and partition != "vbmeta":
+    cmd.extend(["--salt", avb_salt])
+
+
+def _BuildBootableImage(sourcedir, fs_config_file, info_dict=None,
+                        has_ramdisk=False, two_step_image=False):
+  """Build a bootable image from the specified sourcedir.
+
+  Take a kernel, cmdline, and optionally a ramdisk directory from the input (in
+  'sourcedir'), and turn them into a boot image. 'two_step_image' indicates if
+  we are building a two-step special image (i.e. building a recovery image to
+  be loaded into /boot in two-step OTAs).
+
+  Return the image data, or None if sourcedir does not appear to contains files
+  for building the requested image.
+  """
+
+  def make_ramdisk():
+    ramdisk_img = tempfile.NamedTemporaryFile()
+
+    if os.access(fs_config_file, os.F_OK):
+      cmd = ["mkbootfs", "-f", fs_config_file,
+             os.path.join(sourcedir, "RAMDISK")]
+    else:
+      cmd = ["mkbootfs", os.path.join(sourcedir, "RAMDISK")]
+    p1 = Run(cmd, stdout=subprocess.PIPE)
+    p2 = Run(["minigzip"], stdin=p1.stdout, stdout=ramdisk_img.file.fileno())
+
+    p2.wait()
+    p1.wait()
+    assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (sourcedir,)
+    assert p2.returncode == 0, "minigzip of %s ramdisk failed" % (sourcedir,)
+
+    return ramdisk_img
+
+  if not os.access(os.path.join(sourcedir, "kernel"), os.F_OK):
+    return None
+
+  if has_ramdisk and not os.access(os.path.join(sourcedir, "RAMDISK"), os.F_OK):
+    return None
+
+  if info_dict is None:
+    info_dict = OPTIONS.info_dict
+
+  img = tempfile.NamedTemporaryFile()
+
+  if has_ramdisk:
+    ramdisk_img = make_ramdisk()
+
+  # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
+  mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
+
+  cmd = [mkbootimg, "--kernel", os.path.join(sourcedir, "kernel")]
+
+  fn = os.path.join(sourcedir, "second")
+  if os.access(fn, os.F_OK):
+    cmd.append("--second")
+    cmd.append(fn)
+
+  fn = os.path.join(sourcedir, "cmdline")
+  if os.access(fn, os.F_OK):
+    cmd.append("--cmdline")
+    cmd.append(open(fn).read().rstrip("\n"))
+
+  fn = os.path.join(sourcedir, "base")
+  if os.access(fn, os.F_OK):
+    cmd.append("--base")
+    cmd.append(open(fn).read().rstrip("\n"))
+
+  fn = os.path.join(sourcedir, "pagesize")
+  if os.access(fn, os.F_OK):
+    cmd.append("--pagesize")
+    cmd.append(open(fn).read().rstrip("\n"))
+
+  args = info_dict.get("mkbootimg_args", None)
+  if args and args.strip():
+    cmd.extend(shlex.split(args))
+
+  args = info_dict.get("mkbootimg_version_args", None)
+  if args and args.strip():
+    cmd.extend(shlex.split(args))
+
+  if has_ramdisk:
+    cmd.extend(["--ramdisk", ramdisk_img.name])
+
+  img_unsigned = None
+  if info_dict.get("vboot", None):
+    img_unsigned = tempfile.NamedTemporaryFile()
+    cmd.extend(["--output", img_unsigned.name])
+  else:
+    cmd.extend(["--output", img.name])
+
+  # "boot" or "recovery", without extension.
+  partition_name = os.path.basename(sourcedir).lower()
+
+  if (partition_name == "recovery" and
+      info_dict.get("include_recovery_dtbo") == "true"):
+    fn = os.path.join(sourcedir, "recovery_dtbo")
+    cmd.extend(["--recovery_dtbo", fn])
+
+  p = Run(cmd, stdout=subprocess.PIPE)
+  p.communicate()
+  assert p.returncode == 0, "mkbootimg of %s image failed" % (partition_name,)
+
+  if (info_dict.get("boot_signer", None) == "true" and
+      info_dict.get("verity_key", None)):
+    # Hard-code the path as "/boot" for two-step special recovery image (which
+    # will be loaded into /boot during the two-step OTA).
+    if two_step_image:
+      path = "/boot"
+    else:
+      path = "/" + partition_name
+    cmd = [OPTIONS.boot_signer_path]
+    cmd.extend(OPTIONS.boot_signer_args)
+    cmd.extend([path, img.name,
+                info_dict["verity_key"] + ".pk8",
+                info_dict["verity_key"] + ".x509.pem", img.name])
+    p = Run(cmd, stdout=subprocess.PIPE)
+    p.communicate()
+    assert p.returncode == 0, "boot_signer of %s image failed" % path
+
+  # Sign the image if vboot is non-empty.
+  elif info_dict.get("vboot", None):
+    path = "/" + partition_name
+    img_keyblock = tempfile.NamedTemporaryFile()
+    # We have switched from the prebuilt futility binary to using the tool
+    # (futility-host) built from the source. Override the setting in the old
+    # TF.zip.
+    futility = info_dict["futility"]
+    if futility.startswith("prebuilts/"):
+      futility = "futility-host"
+    cmd = [info_dict["vboot_signer_cmd"], futility,
+           img_unsigned.name, info_dict["vboot_key"] + ".vbpubk",
+           info_dict["vboot_key"] + ".vbprivk",
+           info_dict["vboot_subkey"] + ".vbprivk",
+           img_keyblock.name,
+           img.name]
+    p = Run(cmd, stdout=subprocess.PIPE)
+    p.communicate()
+    assert p.returncode == 0, "vboot_signer of %s image failed" % path
+
+    # Clean up the temp files.
+    img_unsigned.close()
+    img_keyblock.close()
+
+  # AVB: if enabled, calculate and add hash to boot.img or recovery.img.
+  if info_dict.get("avb_enable") == "true":
+    avbtool = os.getenv('AVBTOOL') or info_dict["avb_avbtool"]
+    part_size = info_dict[partition_name + "_size"]
+    cmd = [avbtool, "add_hash_footer", "--image", img.name,
+           "--partition_size", str(part_size), "--partition_name",
+           partition_name]
+    AppendAVBSigningArgs(cmd, partition_name)
+    args = info_dict.get("avb_" + partition_name + "_add_hash_footer_args")
+    if args and args.strip():
+      cmd.extend(shlex.split(args))
+    p = Run(cmd, stdout=subprocess.PIPE)
+    p.communicate()
+    assert p.returncode == 0, "avbtool add_hash_footer of %s failed" % (
+        partition_name,)
+
+  img.seek(os.SEEK_SET, 0)
+  data = img.read()
+
+  if has_ramdisk:
+    ramdisk_img.close()
+  img.close()
+
+  return data
+
+
+def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir,
+                     info_dict=None, two_step_image=False):
+  """Return a File object with the desired bootable image.
+
+  Look for it in 'unpack_dir'/BOOTABLE_IMAGES under the name 'prebuilt_name',
+  otherwise look for it under 'unpack_dir'/IMAGES, otherwise construct it from
+  the source files in 'unpack_dir'/'tree_subdir'."""
+
+  prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name)
+  if os.path.exists(prebuilt_path):
+    print("using prebuilt %s from BOOTABLE_IMAGES..." % (prebuilt_name,))
+    return File.FromLocalFile(name, prebuilt_path)
+
+  prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
+  if os.path.exists(prebuilt_path):
+    print("using prebuilt %s from IMAGES..." % (prebuilt_name,))
+    return File.FromLocalFile(name, prebuilt_path)
+
+  print("building image from target_files %s..." % (tree_subdir,))
+
+  if info_dict is None:
+    info_dict = OPTIONS.info_dict
+
+  # With system_root_image == "true", we don't pack ramdisk into the boot image.
+  # Unless "recovery_as_boot" is specified, in which case we carry the ramdisk
+  # for recovery.
+  has_ramdisk = (info_dict.get("system_root_image") != "true" or
+                 prebuilt_name != "boot.img" or
+                 info_dict.get("recovery_as_boot") == "true")
+
+  fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt"
+  data = _BuildBootableImage(os.path.join(unpack_dir, tree_subdir),
+                             os.path.join(unpack_dir, fs_config),
+                             info_dict, has_ramdisk, two_step_image)
+  if data:
+    return File(name, data)
+  return None
+
+
+def Gunzip(in_filename, out_filename):
+  """Gunzip the given gzip compressed file to a given output file.
+  """
+  with gzip.open(in_filename, "rb") as in_file, open(out_filename, "wb") as out_file:
+    shutil.copyfileobj(in_file, out_file)
+
+
+def UnzipTemp(filename, pattern=None):
+  """Unzips the given archive into a temporary directory and returns the name.
+
+  If filename is of the form "foo.zip+bar.zip", unzip foo.zip into a temp dir,
+  then unzip bar.zip into that_dir/BOOTABLE_IMAGES.
+
+  Returns:
+    The name of the temporary directory.
+  """
+
+  def unzip_to_dir(filename, dirname):
+    cmd = ["unzip", "-o", "-q", filename, "-d", dirname]
+    if pattern is not None:
+      cmd.extend(pattern)
+    p = Run(cmd, stdout=subprocess.PIPE)
+    p.communicate()
+    if p.returncode != 0:
+      raise ExternalError("failed to unzip input target-files \"%s\"" %
+                          (filename,))
+
+  tmp = MakeTempDir(prefix="targetfiles-")
+  m = re.match(r"^(.*[.]zip)\+(.*[.]zip)$", filename, re.IGNORECASE)
+  if m:
+    unzip_to_dir(m.group(1), tmp)
+    unzip_to_dir(m.group(2), os.path.join(tmp, "BOOTABLE_IMAGES"))
+    filename = m.group(1)
+  else:
+    unzip_to_dir(filename, tmp)
+
+  return tmp
+
+
+def GetSparseImage(which, tmpdir, input_zip, allow_shared_blocks):
+  """Returns a SparseImage object suitable for passing to BlockImageDiff.
+
+  This function loads the specified sparse image from the given path, and
+  performs additional processing for OTA purpose. For example, it always adds
+  block 0 to clobbered blocks list. It also detects files that cannot be
+  reconstructed from the block list, for whom we should avoid applying imgdiff.
+
+  Args:
+    which: The partition name, which must be "system" or "vendor".
+    tmpdir: The directory that contains the prebuilt image and block map file.
+    input_zip: The target-files ZIP archive.
+    allow_shared_blocks: Whether having shared blocks is allowed.
+
+  Returns:
+    A SparseImage object, with file_map info loaded.
+  """
+  assert which in ("system", "vendor")
+
+  path = os.path.join(tmpdir, "IMAGES", which + ".img")
+  mappath = os.path.join(tmpdir, "IMAGES", which + ".map")
+
+  # The image and map files must have been created prior to calling
+  # ota_from_target_files.py (since LMP).
+  assert os.path.exists(path) and os.path.exists(mappath)
+
+  # In ext4 filesystems, block 0 might be changed even being mounted R/O. We add
+  # it to clobbered_blocks so that it will be written to the target
+  # unconditionally. Note that they are still part of care_map. (Bug: 20939131)
+  clobbered_blocks = "0"
+
+  image = sparse_img.SparseImage(path, mappath, clobbered_blocks,
+                                 allow_shared_blocks=allow_shared_blocks)
+
+  # block.map may contain less blocks, because mke2fs may skip allocating blocks
+  # if they contain all zeros. We can't reconstruct such a file from its block
+  # list. Tag such entries accordingly. (Bug: 65213616)
+  for entry in image.file_map:
+    # "/system/framework/am.jar" => "SYSTEM/framework/am.jar".
+    arcname = string.replace(entry, which, which.upper(), 1)[1:]
+    # Skip artificial names, such as "__ZERO", "__NONZERO-1".
+    if arcname not in input_zip.namelist():
+      continue
+
+    info = input_zip.getinfo(arcname)
+    ranges = image.file_map[entry]
+
+    # If a RangeSet has been tagged as using shared blocks while loading the
+    # image, its block list must be already incomplete due to that reason. Don't
+    # give it 'incomplete' tag to avoid messing up the imgdiff stats.
+    if ranges.extra.get('uses_shared_blocks'):
+      continue
+
+    if RoundUpTo4K(info.file_size) > ranges.size() * 4096:
+      ranges.extra['incomplete'] = True
+
+  return image
+
+
+def GetKeyPasswords(keylist):
+  """Given a list of keys, prompt the user to enter passwords for
+  those which require them.  Return a {key: password} dict.  password
+  will be None if the key has no password."""
+
+  no_passwords = []
+  need_passwords = []
+  key_passwords = {}
+  devnull = open("/dev/null", "w+b")
+  for k in sorted(keylist):
+    # We don't need a password for things that aren't really keys.
+    if k in SPECIAL_CERT_STRINGS:
+      no_passwords.append(k)
+      continue
+
+    p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
+             "-inform", "DER", "-nocrypt"],
+            stdin=devnull.fileno(),
+            stdout=devnull.fileno(),
+            stderr=subprocess.STDOUT)
+    p.communicate()
+    if p.returncode == 0:
+      # Definitely an unencrypted key.
+      no_passwords.append(k)
+    else:
+      p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
+               "-inform", "DER", "-passin", "pass:"],
+              stdin=devnull.fileno(),
+              stdout=devnull.fileno(),
+              stderr=subprocess.PIPE)
+      _, stderr = p.communicate()
+      if p.returncode == 0:
+        # Encrypted key with empty string as password.
+        key_passwords[k] = ''
+      elif stderr.startswith('Error decrypting key'):
+        # Definitely encrypted key.
+        # It would have said "Error reading key" if it didn't parse correctly.
+        need_passwords.append(k)
+      else:
+        # Potentially, a type of key that openssl doesn't understand.
+        # We'll let the routines in signapk.jar handle it.
+        no_passwords.append(k)
+  devnull.close()
+
+  key_passwords.update(PasswordManager().GetPasswords(need_passwords))
+  key_passwords.update(dict.fromkeys(no_passwords, None))
+  return key_passwords
+
+
+def GetMinSdkVersion(apk_name):
+  """Get the minSdkVersion delared in the APK. This can be both a decimal number
+  (API Level) or a codename.
+  """
+
+  p = Run(["aapt", "dump", "badging", apk_name], stdout=subprocess.PIPE)
+  output, err = p.communicate()
+  if err:
+    raise ExternalError("Failed to obtain minSdkVersion: aapt return code %s"
+        % (p.returncode,))
+
+  for line in output.split("\n"):
+    # Looking for lines such as sdkVersion:'23' or sdkVersion:'M'
+    m = re.match(r'sdkVersion:\'([^\']*)\'', line)
+    if m:
+      return m.group(1)
+  raise ExternalError("No minSdkVersion returned by aapt")
+
+
+def GetMinSdkVersionInt(apk_name, codename_to_api_level_map):
+  """Get the minSdkVersion declared in the APK as a number (API Level). If
+  minSdkVersion is set to a codename, it is translated to a number using the
+  provided map.
+  """
+
+  version = GetMinSdkVersion(apk_name)
+  try:
+    return int(version)
+  except ValueError:
+    # Not a decimal number. Codename?
+    if version in codename_to_api_level_map:
+      return codename_to_api_level_map[version]
+    else:
+      raise ExternalError("Unknown minSdkVersion: '%s'. Known codenames: %s"
+                          % (version, codename_to_api_level_map))
+
+
+def SignFile(input_name, output_name, key, password, min_api_level=None,
+    codename_to_api_level_map=dict(),
+    whole_file=False):
+  """Sign the input_name zip/jar/apk, producing output_name.  Use the
+  given key and password (the latter may be None if the key does not
+  have a password.
+
+  If whole_file is true, use the "-w" option to SignApk to embed a
+  signature that covers the whole file in the archive comment of the
+  zip file.
+
+  min_api_level is the API Level (int) of the oldest platform this file may end
+  up on. If not specified for an APK, the API Level is obtained by interpreting
+  the minSdkVersion attribute of the APK's AndroidManifest.xml.
+
+  codename_to_api_level_map is needed to translate the codename which may be
+  encountered as the APK's minSdkVersion.
+  """
+
+  java_library_path = os.path.join(
+      OPTIONS.search_path, OPTIONS.signapk_shared_library_path)
+
+  cmd = ([OPTIONS.java_path] + OPTIONS.java_args +
+         ["-Djava.library.path=" + java_library_path,
+          "-jar", os.path.join(OPTIONS.search_path, OPTIONS.signapk_path)] +
+         OPTIONS.extra_signapk_args)
+  if whole_file:
+    cmd.append("-w")
+
+  min_sdk_version = min_api_level
+  if min_sdk_version is None:
+    if not whole_file:
+      min_sdk_version = GetMinSdkVersionInt(
+          input_name, codename_to_api_level_map)
+  if min_sdk_version is not None:
+    cmd.extend(["--min-sdk-version", str(min_sdk_version)])
+
+  cmd.extend([key + OPTIONS.public_key_suffix,
+              key + OPTIONS.private_key_suffix,
+              input_name, output_name])
+
+  p = Run(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
+  if password is not None:
+    password += "\n"
+  p.communicate(password)
+  if p.returncode != 0:
+    raise ExternalError("signapk.jar failed: return code %s" % (p.returncode,))
+
+
+def CheckSize(data, target, info_dict):
+  """Checks the data string passed against the max size limit.
+
+  For non-AVB images, raise exception if the data is too big. Print a warning
+  if the data is nearing the maximum size.
+
+  For AVB images, the actual image size should be identical to the limit.
+
+  Args:
+    data: A string that contains all the data for the partition.
+    target: The partition name. The ".img" suffix is optional.
+    info_dict: The dict to be looked up for relevant info.
+  """
+  if target.endswith(".img"):
+    target = target[:-4]
+  mount_point = "/" + target
+
+  fs_type = None
+  limit = None
+  if info_dict["fstab"]:
+    if mount_point == "/userdata":
+      mount_point = "/data"
+    p = info_dict["fstab"][mount_point]
+    fs_type = p.fs_type
+    device = p.device
+    if "/" in device:
+      device = device[device.rfind("/")+1:]
+    limit = info_dict.get(device + "_size", None)
+  if not fs_type or not limit:
+    return
+
+  size = len(data)
+  # target could be 'userdata' or 'cache'. They should follow the non-AVB image
+  # path.
+  if info_dict.get("avb_enable") == "true" and target in AVB_PARTITIONS:
+    if size != limit:
+      raise ExternalError(
+          "Mismatching image size for %s: expected %d actual %d" % (
+              target, limit, size))
+  else:
+    pct = float(size) * 100.0 / limit
+    msg = "%s size (%d) is %.2f%% of limit (%d)" % (target, size, pct, limit)
+    if pct >= 99.0:
+      raise ExternalError(msg)
+    elif pct >= 95.0:
+      print("\n  WARNING: %s\n" % (msg,))
+    elif OPTIONS.verbose:
+      print("  ", msg)
+
+
+def ReadApkCerts(tf_zip):
+  """Parses the APK certs info from a given target-files zip.
+
+  Given a target-files ZipFile, parses the META/apkcerts.txt entry and returns a
+  tuple with the following elements: (1) a dictionary that maps packages to
+  certs (based on the "certificate" and "private_key" attributes in the file;
+  (2) a string representing the extension of compressed APKs in the target files
+  (e.g ".gz", ".bro").
+
+  Args:
+    tf_zip: The input target_files ZipFile (already open).
+
+  Returns:
+    (certmap, ext): certmap is a dictionary that maps packages to certs; ext is
+        the extension string of compressed APKs (e.g. ".gz"), or None if there's
+        no compressed APKs.
+  """
+  certmap = {}
+  compressed_extension = None
+
+  # META/apkcerts.txt contains the info for _all_ the packages known at build
+  # time. Filter out the ones that are not installed.
+  installed_files = set()
+  for name in tf_zip.namelist():
+    basename = os.path.basename(name)
+    if basename:
+      installed_files.add(basename)
+
+  for line in tf_zip.read("META/apkcerts.txt").split("\n"):
+    line = line.strip()
+    if not line:
+      continue
+    m = re.match(
+        r'^name="(?P<NAME>.*)"\s+certificate="(?P<CERT>.*)"\s+'
+        r'private_key="(?P<PRIVKEY>.*?)"(\s+compressed="(?P<COMPRESSED>.*)")?$',
+        line)
+    if not m:
+      continue
+
+    matches = m.groupdict()
+    cert = matches["CERT"]
+    privkey = matches["PRIVKEY"]
+    name = matches["NAME"]
+    this_compressed_extension = matches["COMPRESSED"]
+
+    public_key_suffix_len = len(OPTIONS.public_key_suffix)
+    private_key_suffix_len = len(OPTIONS.private_key_suffix)
+    if cert in SPECIAL_CERT_STRINGS and not privkey:
+      certmap[name] = cert
+    elif (cert.endswith(OPTIONS.public_key_suffix) and
+          privkey.endswith(OPTIONS.private_key_suffix) and
+          cert[:-public_key_suffix_len] == privkey[:-private_key_suffix_len]):
+      certmap[name] = cert[:-public_key_suffix_len]
+    else:
+      raise ValueError("Failed to parse line from apkcerts.txt:\n" + line)
+
+    if not this_compressed_extension:
+      continue
+
+    # Only count the installed files.
+    filename = name + '.' + this_compressed_extension
+    if filename not in installed_files:
+      continue
+
+    # Make sure that all the values in the compression map have the same
+    # extension. We don't support multiple compression methods in the same
+    # system image.
+    if compressed_extension:
+      if this_compressed_extension != compressed_extension:
+        raise ValueError(
+            "Multiple compressed extensions: {} vs {}".format(
+                compressed_extension, this_compressed_extension))
+    else:
+      compressed_extension = this_compressed_extension
+
+  return (certmap,
+          ("." + compressed_extension) if compressed_extension else None)
+
+
+COMMON_DOCSTRING = """
+  -p  (--path)  <dir>
+      Prepend <dir>/bin to the list of places to search for binaries
+      run by this script, and expect to find jars in <dir>/framework.
+
+  -s  (--device_specific) <file>
+      Path to the python module containing device-specific
+      releasetools code.
+
+  -x  (--extra)  <key=value>
+      Add a key/value pair to the 'extras' dict, which device-specific
+      extension code may look at.
+
+  -v  (--verbose)
+      Show command lines being executed.
+
+  -h  (--help)
+      Display this usage message and exit.
+"""
+
+def Usage(docstring):
+  print(docstring.rstrip("\n"))
+  print(COMMON_DOCSTRING)
+
+
+def ParseOptions(argv,
+                 docstring,
+                 extra_opts="", extra_long_opts=(),
+                 extra_option_handler=None):
+  """Parse the options in argv and return any arguments that aren't
+  flags.  docstring is the calling module's docstring, to be displayed
+  for errors and -h.  extra_opts and extra_long_opts are for flags
+  defined by the caller, which are processed by passing them to
+  extra_option_handler."""
+
+  try:
+    opts, args = getopt.getopt(
+        argv, "hvp:s:x:" + extra_opts,
+        ["help", "verbose", "path=", "signapk_path=",
+         "signapk_shared_library_path=", "extra_signapk_args=",
+         "java_path=", "java_args=", "public_key_suffix=",
+         "private_key_suffix=", "boot_signer_path=", "boot_signer_args=",
+         "verity_signer_path=", "verity_signer_args=", "device_specific=",
+         "extra="] +
+        list(extra_long_opts))
+  except getopt.GetoptError as err:
+    Usage(docstring)
+    print("**", str(err), "**")
+    sys.exit(2)
+
+  for o, a in opts:
+    if o in ("-h", "--help"):
+      Usage(docstring)
+      sys.exit()
+    elif o in ("-v", "--verbose"):
+      OPTIONS.verbose = True
+    elif o in ("-p", "--path"):
+      OPTIONS.search_path = a
+    elif o in ("--signapk_path",):
+      OPTIONS.signapk_path = a
+    elif o in ("--signapk_shared_library_path",):
+      OPTIONS.signapk_shared_library_path = a
+    elif o in ("--extra_signapk_args",):
+      OPTIONS.extra_signapk_args = shlex.split(a)
+    elif o in ("--java_path",):
+      OPTIONS.java_path = a
+    elif o in ("--java_args",):
+      OPTIONS.java_args = shlex.split(a)
+    elif o in ("--public_key_suffix",):
+      OPTIONS.public_key_suffix = a
+    elif o in ("--private_key_suffix",):
+      OPTIONS.private_key_suffix = a
+    elif o in ("--boot_signer_path",):
+      OPTIONS.boot_signer_path = a
+    elif o in ("--boot_signer_args",):
+      OPTIONS.boot_signer_args = shlex.split(a)
+    elif o in ("--verity_signer_path",):
+      OPTIONS.verity_signer_path = a
+    elif o in ("--verity_signer_args",):
+      OPTIONS.verity_signer_args = shlex.split(a)
+    elif o in ("-s", "--device_specific"):
+      OPTIONS.device_specific = a
+    elif o in ("-x", "--extra"):
+      key, value = a.split("=", 1)
+      OPTIONS.extras[key] = value
+    else:
+      if extra_option_handler is None or not extra_option_handler(o, a):
+        assert False, "unknown option \"%s\"" % (o,)
+
+  if OPTIONS.search_path:
+    os.environ["PATH"] = (os.path.join(OPTIONS.search_path, "bin") +
+                          os.pathsep + os.environ["PATH"])
+
+  return args
+
+
+def MakeTempFile(prefix='tmp', suffix=''):
+  """Make a temp file and add it to the list of things to be deleted
+  when Cleanup() is called.  Return the filename."""
+  fd, fn = tempfile.mkstemp(prefix=prefix, suffix=suffix)
+  os.close(fd)
+  OPTIONS.tempfiles.append(fn)
+  return fn
+
+
+def MakeTempDir(prefix='tmp', suffix=''):
+  """Makes a temporary dir that will be cleaned up with a call to Cleanup().
+
+  Returns:
+    The absolute pathname of the new directory.
+  """
+  dir_name = tempfile.mkdtemp(suffix=suffix, prefix=prefix)
+  OPTIONS.tempfiles.append(dir_name)
+  return dir_name
+
+
+def Cleanup():
+  for i in OPTIONS.tempfiles:
+    if os.path.isdir(i):
+      shutil.rmtree(i, ignore_errors=True)
+    else:
+      os.remove(i)
+  del OPTIONS.tempfiles[:]
+
+
+class PasswordManager(object):
+  def __init__(self):
+    self.editor = os.getenv("EDITOR", None)
+    self.pwfile = os.getenv("ANDROID_PW_FILE", None)
+
+  def GetPasswords(self, items):
+    """Get passwords corresponding to each string in 'items',
+    returning a dict.  (The dict may have keys in addition to the
+    values in 'items'.)
+
+    Uses the passwords in $ANDROID_PW_FILE if available, letting the
+    user edit that file to add more needed passwords.  If no editor is
+    available, or $ANDROID_PW_FILE isn't define, prompts the user
+    interactively in the ordinary way.
+    """
+
+    current = self.ReadFile()
+
+    first = True
+    while True:
+      missing = []
+      for i in items:
+        if i not in current or not current[i]:
+          missing.append(i)
+      # Are all the passwords already in the file?
+      if not missing:
+        return current
+
+      for i in missing:
+        current[i] = ""
+
+      if not first:
+        print("key file %s still missing some passwords." % (self.pwfile,))
+        answer = raw_input("try to edit again? [y]> ").strip()
+        if answer and answer[0] not in 'yY':
+          raise RuntimeError("key passwords unavailable")
+      first = False
+
+      current = self.UpdateAndReadFile(current)
+
+  def PromptResult(self, current): # pylint: disable=no-self-use
+    """Prompt the user to enter a value (password) for each key in
+    'current' whose value is fales.  Returns a new dict with all the
+    values.
+    """
+    result = {}
+    for k, v in sorted(current.iteritems()):
+      if v:
+        result[k] = v
+      else:
+        while True:
+          result[k] = getpass.getpass(
+              "Enter password for %s key> " % k).strip()
+          if result[k]:
+            break
+    return result
+
+  def UpdateAndReadFile(self, current):
+    if not self.editor or not self.pwfile:
+      return self.PromptResult(current)
+
+    f = open(self.pwfile, "w")
+    os.chmod(self.pwfile, 0o600)
+    f.write("# Enter key passwords between the [[[ ]]] brackets.\n")
+    f.write("# (Additional spaces are harmless.)\n\n")
+
+    first_line = None
+    sorted_list = sorted([(not v, k, v) for (k, v) in current.iteritems()])
+    for i, (_, k, v) in enumerate(sorted_list):
+      f.write("[[[  %s  ]]] %s\n" % (v, k))
+      if not v and first_line is None:
+        # position cursor on first line with no password.
+        first_line = i + 4
+    f.close()
+
+    p = Run([self.editor, "+%d" % (first_line,), self.pwfile])
+    _, _ = p.communicate()
+
+    return self.ReadFile()
+
+  def ReadFile(self):
+    result = {}
+    if self.pwfile is None:
+      return result
+    try:
+      f = open(self.pwfile, "r")
+      for line in f:
+        line = line.strip()
+        if not line or line[0] == '#':
+          continue
+        m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
+        if not m:
+          print("failed to parse password file: ", line)
+        else:
+          result[m.group(2)] = m.group(1)
+      f.close()
+    except IOError as e:
+      if e.errno != errno.ENOENT:
+        print("error reading password file: ", str(e))
+    return result
+
+
+def ZipWrite(zip_file, filename, arcname=None, perms=0o644,
+             compress_type=None):
+  import datetime
+
+  # http://b/18015246
+  # Python 2.7's zipfile implementation wrongly thinks that zip64 is required
+  # for files larger than 2GiB. We can work around this by adjusting their
+  # limit. Note that `zipfile.writestr()` will not work for strings larger than
+  # 2GiB. The Python interpreter sometimes rejects strings that large (though
+  # it isn't clear to me exactly what circumstances cause this).
+  # `zipfile.write()` must be used directly to work around this.
+  #
+  # This mess can be avoided if we port to python3.
+  saved_zip64_limit = zipfile.ZIP64_LIMIT
+  zipfile.ZIP64_LIMIT = (1 << 32) - 1
+
+  if compress_type is None:
+    compress_type = zip_file.compression
+  if arcname is None:
+    arcname = filename
+
+  saved_stat = os.stat(filename)
+
+  try:
+    # `zipfile.write()` doesn't allow us to pass ZipInfo, so just modify the
+    # file to be zipped and reset it when we're done.
+    os.chmod(filename, perms)
+
+    # Use a fixed timestamp so the output is repeatable.
+    epoch = datetime.datetime.fromtimestamp(0)
+    timestamp = (datetime.datetime(2009, 1, 1) - epoch).total_seconds()
+    os.utime(filename, (timestamp, timestamp))
+
+    zip_file.write(filename, arcname=arcname, compress_type=compress_type)
+  finally:
+    os.chmod(filename, saved_stat.st_mode)
+    os.utime(filename, (saved_stat.st_atime, saved_stat.st_mtime))
+    zipfile.ZIP64_LIMIT = saved_zip64_limit
+
+
+def ZipWriteStr(zip_file, zinfo_or_arcname, data, perms=None,
+                compress_type=None):
+  """Wrap zipfile.writestr() function to work around the zip64 limit.
+
+  Even with the ZIP64_LIMIT workaround, it won't allow writing a string
+  longer than 2GiB. It gives 'OverflowError: size does not fit in an int'
+  when calling crc32(bytes).
+
+  But it still works fine to write a shorter string into a large zip file.
+  We should use ZipWrite() whenever possible, and only use ZipWriteStr()
+  when we know the string won't be too long.
+  """
+
+  saved_zip64_limit = zipfile.ZIP64_LIMIT
+  zipfile.ZIP64_LIMIT = (1 << 32) - 1
+
+  if not isinstance(zinfo_or_arcname, zipfile.ZipInfo):
+    zinfo = zipfile.ZipInfo(filename=zinfo_or_arcname)
+    zinfo.compress_type = zip_file.compression
+    if perms is None:
+      perms = 0o100644
+  else:
+    zinfo = zinfo_or_arcname
+
+  # If compress_type is given, it overrides the value in zinfo.
+  if compress_type is not None:
+    zinfo.compress_type = compress_type
+
+  # If perms is given, it has a priority.
+  if perms is not None:
+    # If perms doesn't set the file type, mark it as a regular file.
+    if perms & 0o770000 == 0:
+      perms |= 0o100000
+    zinfo.external_attr = perms << 16
+
+  # Use a fixed timestamp so the output is repeatable.
+  zinfo.date_time = (2009, 1, 1, 0, 0, 0)
+
+  zip_file.writestr(zinfo, data)
+  zipfile.ZIP64_LIMIT = saved_zip64_limit
+
+
+def ZipDelete(zip_filename, entries):
+  """Deletes entries from a ZIP file.
+
+  Since deleting entries from a ZIP file is not supported, it shells out to
+  'zip -d'.
+
+  Args:
+    zip_filename: The name of the ZIP file.
+    entries: The name of the entry, or the list of names to be deleted.
+
+  Raises:
+    AssertionError: In case of non-zero return from 'zip'.
+  """
+  if isinstance(entries, basestring):
+    entries = [entries]
+  cmd = ["zip", "-d", zip_filename] + entries
+  proc = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+  stdoutdata, _ = proc.communicate()
+  assert proc.returncode == 0, "Failed to delete %s:\n%s" % (entries,
+                                                             stdoutdata)
+
+
+def ZipClose(zip_file):
+  # http://b/18015246
+  # zipfile also refers to ZIP64_LIMIT during close() when it writes out the
+  # central directory.
+  saved_zip64_limit = zipfile.ZIP64_LIMIT
+  zipfile.ZIP64_LIMIT = (1 << 32) - 1
+
+  zip_file.close()
+
+  zipfile.ZIP64_LIMIT = saved_zip64_limit
+
+
+class DeviceSpecificParams(object):
+  module = None
+  def __init__(self, **kwargs):
+    """Keyword arguments to the constructor become attributes of this
+    object, which is passed to all functions in the device-specific
+    module."""
+    for k, v in kwargs.iteritems():
+      setattr(self, k, v)
+    self.extras = OPTIONS.extras
+
+    if self.module is None:
+      path = OPTIONS.device_specific
+      if not path:
+        return
+      try:
+        if os.path.isdir(path):
+          info = imp.find_module("releasetools", [path])
+        else:
+          d, f = os.path.split(path)
+          b, x = os.path.splitext(f)
+          if x == ".py":
+            f = b
+          info = imp.find_module(f, [d])
+        print("loaded device-specific extensions from", path)
+        self.module = imp.load_module("device_specific", *info)
+      except ImportError:
+        print("unable to load device-specific module; assuming none")
+
+  def _DoCall(self, function_name, *args, **kwargs):
+    """Call the named function in the device-specific module, passing
+    the given args and kwargs.  The first argument to the call will be
+    the DeviceSpecific object itself.  If there is no module, or the
+    module does not define the function, return the value of the
+    'default' kwarg (which itself defaults to None)."""
+    if self.module is None or not hasattr(self.module, function_name):
+      return kwargs.get("default", None)
+    return getattr(self.module, function_name)(*((self,) + args), **kwargs)
+
+  def FullOTA_Assertions(self):
+    """Called after emitting the block of assertions at the top of a
+    full OTA package.  Implementations can add whatever additional
+    assertions they like."""
+    return self._DoCall("FullOTA_Assertions")
+
+  def FullOTA_InstallBegin(self):
+    """Called at the start of full OTA installation."""
+    return self._DoCall("FullOTA_InstallBegin")
+
+  def FullOTA_InstallEnd(self):
+    """Called at the end of full OTA installation; typically this is
+    used to install the image for the device's baseband processor."""
+    return self._DoCall("FullOTA_InstallEnd")
+
+  def IncrementalOTA_Assertions(self):
+    """Called after emitting the block of assertions at the top of an
+    incremental OTA package.  Implementations can add whatever
+    additional assertions they like."""
+    return self._DoCall("IncrementalOTA_Assertions")
+
+  def IncrementalOTA_VerifyBegin(self):
+    """Called at the start of the verification phase of incremental
+    OTA installation; additional checks can be placed here to abort
+    the script before any changes are made."""
+    return self._DoCall("IncrementalOTA_VerifyBegin")
+
+  def IncrementalOTA_VerifyEnd(self):
+    """Called at the end of the verification phase of incremental OTA
+    installation; additional checks can be placed here to abort the
+    script before any changes are made."""
+    return self._DoCall("IncrementalOTA_VerifyEnd")
+
+  def IncrementalOTA_InstallBegin(self):
+    """Called at the start of incremental OTA installation (after
+    verification is complete)."""
+    return self._DoCall("IncrementalOTA_InstallBegin")
+
+  def IncrementalOTA_InstallEnd(self):
+    """Called at the end of incremental OTA installation; typically
+    this is used to install the image for the device's baseband
+    processor."""
+    return self._DoCall("IncrementalOTA_InstallEnd")
+
+  def VerifyOTA_Assertions(self):
+    return self._DoCall("VerifyOTA_Assertions")
+
+class File(object):
+  def __init__(self, name, data, compress_size = None):
+    self.name = name
+    self.data = data
+    self.size = len(data)
+    self.compress_size = compress_size or self.size
+    self.sha1 = sha1(data).hexdigest()
+
+  @classmethod
+  def FromLocalFile(cls, name, diskname):
+    f = open(diskname, "rb")
+    data = f.read()
+    f.close()
+    return File(name, data)
+
+  def WriteToTemp(self):
+    t = tempfile.NamedTemporaryFile()
+    t.write(self.data)
+    t.flush()
+    return t
+
+  def WriteToDir(self, d):
+    with open(os.path.join(d, self.name), "wb") as fp:
+      fp.write(self.data)
+
+  def AddToZip(self, z, compression=None):
+    ZipWriteStr(z, self.name, self.data, compress_type=compression)
+
+DIFF_PROGRAM_BY_EXT = {
+    ".gz" : "imgdiff",
+    ".zip" : ["imgdiff", "-z"],
+    ".jar" : ["imgdiff", "-z"],
+    ".apk" : ["imgdiff", "-z"],
+    ".img" : "imgdiff",
+    }
+
+class Difference(object):
+  def __init__(self, tf, sf, diff_program=None):
+    self.tf = tf
+    self.sf = sf
+    self.patch = None
+    self.diff_program = diff_program
+
+  def ComputePatch(self):
+    """Compute the patch (as a string of data) needed to turn sf into
+    tf.  Returns the same tuple as GetPatch()."""
+
+    tf = self.tf
+    sf = self.sf
+
+    if self.diff_program:
+      diff_program = self.diff_program
+    else:
+      ext = os.path.splitext(tf.name)[1]
+      diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff")
+
+    ttemp = tf.WriteToTemp()
+    stemp = sf.WriteToTemp()
+
+    ext = os.path.splitext(tf.name)[1]
+
+    try:
+      ptemp = tempfile.NamedTemporaryFile()
+      if isinstance(diff_program, list):
+        cmd = copy.copy(diff_program)
+      else:
+        cmd = [diff_program]
+      cmd.append(stemp.name)
+      cmd.append(ttemp.name)
+      cmd.append(ptemp.name)
+      p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+      err = []
+      def run():
+        _, e = p.communicate()
+        if e:
+          err.append(e)
+      th = threading.Thread(target=run)
+      th.start()
+      th.join(timeout=300)   # 5 mins
+      if th.is_alive():
+        print("WARNING: diff command timed out")
+        p.terminate()
+        th.join(5)
+        if th.is_alive():
+          p.kill()
+          th.join()
+
+      if p.returncode != 0:
+        print("WARNING: failure running %s:\n%s\n" % (
+            diff_program, "".join(err)))
+        self.patch = None
+        return None, None, None
+      diff = ptemp.read()
+    finally:
+      ptemp.close()
+      stemp.close()
+      ttemp.close()
+
+    self.patch = diff
+    return self.tf, self.sf, self.patch
+
+
+  def GetPatch(self):
+    """Return a tuple (target_file, source_file, patch_data).
+    patch_data may be None if ComputePatch hasn't been called, or if
+    computing the patch failed."""
+    return self.tf, self.sf, self.patch
+
+
+def ComputeDifferences(diffs):
+  """Call ComputePatch on all the Difference objects in 'diffs'."""
+  print(len(diffs), "diffs to compute")
+
+  # Do the largest files first, to try and reduce the long-pole effect.
+  by_size = [(i.tf.size, i) for i in diffs]
+  by_size.sort(reverse=True)
+  by_size = [i[1] for i in by_size]
+
+  lock = threading.Lock()
+  diff_iter = iter(by_size)   # accessed under lock
+
+  def worker():
+    try:
+      lock.acquire()
+      for d in diff_iter:
+        lock.release()
+        start = time.time()
+        d.ComputePatch()
+        dur = time.time() - start
+        lock.acquire()
+
+        tf, sf, patch = d.GetPatch()
+        if sf.name == tf.name:
+          name = tf.name
+        else:
+          name = "%s (%s)" % (tf.name, sf.name)
+        if patch is None:
+          print("patching failed!                                  %s" % (name,))
+        else:
+          print("%8.2f sec %8d / %8d bytes (%6.2f%%) %s" % (
+              dur, len(patch), tf.size, 100.0 * len(patch) / tf.size, name))
+      lock.release()
+    except Exception as e:
+      print(e)
+      raise
+
+  # start worker threads; wait for them all to finish.
+  threads = [threading.Thread(target=worker)
+             for i in range(OPTIONS.worker_threads)]
+  for th in threads:
+    th.start()
+  while threads:
+    threads.pop().join()
+
+
+class BlockDifference(object):
+  def __init__(self, partition, tgt, src=None, check_first_block=False,
+               version=None, disable_imgdiff=False):
+    self.tgt = tgt
+    self.src = src
+    self.partition = partition
+    self.check_first_block = check_first_block
+    self.disable_imgdiff = disable_imgdiff
+
+    if version is None:
+      version = max(
+          int(i) for i in
+          OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
+    assert version >= 3
+    self.version = version
+
+    b = blockimgdiff.BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
+                                    version=self.version,
+                                    disable_imgdiff=self.disable_imgdiff)
+    self.path = os.path.join(MakeTempDir(), partition)
+    b.Compute(self.path)
+    self._required_cache = b.max_stashed_size
+    self.touched_src_ranges = b.touched_src_ranges
+    self.touched_src_sha1 = b.touched_src_sha1
+
+    if src is None:
+      _, self.device = GetTypeAndDevice("/" + partition, OPTIONS.info_dict)
+    else:
+      _, self.device = GetTypeAndDevice("/" + partition,
+                                        OPTIONS.source_info_dict)
+
+  @property
+  def required_cache(self):
+    return self._required_cache
+
+  def WriteScript(self, script, output_zip, progress=None):
+    if not self.src:
+      # write the output unconditionally
+      script.Print("Patching %s image unconditionally..." % (self.partition,))
+    else:
+      script.Print("Patching %s image after verification." % (self.partition,))
+
+    if progress:
+      script.ShowProgress(progress, 0)
+    self._WriteUpdate(script, output_zip)
+    if OPTIONS.verify:
+      self._WritePostInstallVerifyScript(script)
+
+  def WriteStrictVerifyScript(self, script):
+    """Verify all the blocks in the care_map, including clobbered blocks.
+
+    This differs from the WriteVerifyScript() function: a) it prints different
+    error messages; b) it doesn't allow half-way updated images to pass the
+    verification."""
+
+    partition = self.partition
+    script.Print("Verifying %s..." % (partition,))
+    ranges = self.tgt.care_map
+    ranges_str = ranges.to_string_raw()
+    script.AppendExtra('range_sha1("%s", "%s") == "%s" && '
+                       'ui_print("    Verified.") || '
+                       'ui_print("\\"%s\\" has unexpected contents.");' % (
+                       self.device, ranges_str,
+                       self.tgt.TotalSha1(include_clobbered_blocks=True),
+                       self.device))
+    script.AppendExtra("")
+
+  def WriteVerifyScript(self, script, touched_blocks_only=False):
+    partition = self.partition
+
+    # full OTA
+    if not self.src:
+      script.Print("Image %s will be patched unconditionally." % (partition,))
+
+    # incremental OTA
+    else:
+      if touched_blocks_only:
+        ranges = self.touched_src_ranges
+        expected_sha1 = self.touched_src_sha1
+      else:
+        ranges = self.src.care_map.subtract(self.src.clobbered_blocks)
+        expected_sha1 = self.src.TotalSha1()
+
+      # No blocks to be checked, skipping.
+      if not ranges:
+        return
+
+      ranges_str = ranges.to_string_raw()
+      script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
+                          'block_image_verify("%s", '
+                          'package_extract_file("%s.transfer.list"), '
+                          '"%s.new.dat", "%s.patch.dat")) then') % (
+                          self.device, ranges_str, expected_sha1,
+                          self.device, partition, partition, partition))
+      script.Print('Verified %s image...' % (partition,))
+      script.AppendExtra('else')
+
+      if self.version >= 4:
+
+        # Bug: 21124327
+        # When generating incrementals for the system and vendor partitions in
+        # version 4 or newer, explicitly check the first block (which contains
+        # the superblock) of the partition to see if it's what we expect. If
+        # this check fails, give an explicit log message about the partition
+        # having been remounted R/W (the most likely explanation).
+        if self.check_first_block:
+          script.AppendExtra('check_first_block("%s");' % (self.device,))
+
+        # If version >= 4, try block recovery before abort update
+        if partition == "system":
+          code = ErrorCode.SYSTEM_RECOVER_FAILURE
+        else:
+          code = ErrorCode.VENDOR_RECOVER_FAILURE
+        script.AppendExtra((
+            'ifelse (block_image_recover("{device}", "{ranges}") && '
+            'block_image_verify("{device}", '
+            'package_extract_file("{partition}.transfer.list"), '
+            '"{partition}.new.dat", "{partition}.patch.dat"), '
+            'ui_print("{partition} recovered successfully."), '
+            'abort("E{code}: {partition} partition fails to recover"));\n'
+            'endif;').format(device=self.device, ranges=ranges_str,
+                             partition=partition, code=code))
+
+      # Abort the OTA update. Note that the incremental OTA cannot be applied
+      # even if it may match the checksum of the target partition.
+      # a) If version < 3, operations like move and erase will make changes
+      #    unconditionally and damage the partition.
+      # b) If version >= 3, it won't even reach here.
+      else:
+        if partition == "system":
+          code = ErrorCode.SYSTEM_VERIFICATION_FAILURE
+        else:
+          code = ErrorCode.VENDOR_VERIFICATION_FAILURE
+        script.AppendExtra((
+            'abort("E%d: %s partition has unexpected contents");\n'
+            'endif;') % (code, partition))
+
+  def _WritePostInstallVerifyScript(self, script):
+    partition = self.partition
+    script.Print('Verifying the updated %s image...' % (partition,))
+    # Unlike pre-install verification, clobbered_blocks should not be ignored.
+    ranges = self.tgt.care_map
+    ranges_str = ranges.to_string_raw()
+    script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
+                       self.device, ranges_str,
+                       self.tgt.TotalSha1(include_clobbered_blocks=True)))
+
+    # Bug: 20881595
+    # Verify that extended blocks are really zeroed out.
+    if self.tgt.extended:
+      ranges_str = self.tgt.extended.to_string_raw()
+      script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
+                         self.device, ranges_str,
+                         self._HashZeroBlocks(self.tgt.extended.size())))
+      script.Print('Verified the updated %s image.' % (partition,))
+      if partition == "system":
+        code = ErrorCode.SYSTEM_NONZERO_CONTENTS
+      else:
+        code = ErrorCode.VENDOR_NONZERO_CONTENTS
+      script.AppendExtra(
+          'else\n'
+          '  abort("E%d: %s partition has unexpected non-zero contents after '
+          'OTA update");\n'
+          'endif;' % (code, partition))
+    else:
+      script.Print('Verified the updated %s image.' % (partition,))
+
+    if partition == "system":
+      code = ErrorCode.SYSTEM_UNEXPECTED_CONTENTS
+    else:
+      code = ErrorCode.VENDOR_UNEXPECTED_CONTENTS
+
+    script.AppendExtra(
+        'else\n'
+        '  abort("E%d: %s partition has unexpected contents after OTA '
+        'update");\n'
+        'endif;' % (code, partition))
+
+  def _WriteUpdate(self, script, output_zip):
+    ZipWrite(output_zip,
+             '{}.transfer.list'.format(self.path),
+             '{}.transfer.list'.format(self.partition))
+
+    # For full OTA, compress the new.dat with brotli with quality 6 to reduce its size. Quailty 9
+    # almost triples the compression time but doesn't further reduce the size too much.
+    # For a typical 1.8G system.new.dat
+    #                       zip  | brotli(quality 6)  | brotli(quality 9)
+    #   compressed_size:    942M | 869M (~8% reduced) | 854M
+    #   compression_time:   75s  | 265s               | 719s
+    #   decompression_time: 15s  | 25s                | 25s
+
+    if not self.src:
+      brotli_cmd = ['brotli', '--quality=6',
+                    '--output={}.new.dat.br'.format(self.path),
+                    '{}.new.dat'.format(self.path)]
+      print("Compressing {}.new.dat with brotli".format(self.partition))
+      p = Run(brotli_cmd, stdout=subprocess.PIPE)
+      p.communicate()
+      assert p.returncode == 0,\
+          'compression of {}.new.dat failed'.format(self.partition)
+
+      new_data_name = '{}.new.dat.br'.format(self.partition)
+      ZipWrite(output_zip,
+               '{}.new.dat.br'.format(self.path),
+               new_data_name,
+               compress_type=zipfile.ZIP_STORED)
+    else:
+      new_data_name = '{}.new.dat'.format(self.partition)
+      ZipWrite(output_zip, '{}.new.dat'.format(self.path), new_data_name)
+
+    ZipWrite(output_zip,
+             '{}.patch.dat'.format(self.path),
+             '{}.patch.dat'.format(self.partition),
+             compress_type=zipfile.ZIP_STORED)
+
+    if self.partition == "system":
+      code = ErrorCode.SYSTEM_UPDATE_FAILURE
+    else:
+      code = ErrorCode.VENDOR_UPDATE_FAILURE
+
+    call = ('block_image_update("{device}", '
+            'package_extract_file("{partition}.transfer.list"), '
+            '"{new_data_name}", "{partition}.patch.dat") ||\n'
+            '  abort("E{code}: Failed to update {partition} image.");'.format(
+                device=self.device, partition=self.partition,
+                new_data_name=new_data_name, code=code))
+    script.AppendExtra(script.WordWrap(call))
+
+  def _HashBlocks(self, source, ranges): # pylint: disable=no-self-use
+    data = source.ReadRangeSet(ranges)
+    ctx = sha1()
+
+    for p in data:
+      ctx.update(p)
+
+    return ctx.hexdigest()
+
+  def _HashZeroBlocks(self, num_blocks): # pylint: disable=no-self-use
+    """Return the hash value for all zero blocks."""
+    zero_block = '\x00' * 4096
+    ctx = sha1()
+    for _ in range(num_blocks):
+      ctx.update(zero_block)
+
+    return ctx.hexdigest()
+
+
+DataImage = blockimgdiff.DataImage
+
+# map recovery.fstab's fs_types to mount/format "partition types"
+PARTITION_TYPES = {
+    "ext4": "EMMC",
+    "emmc": "EMMC",
+    "f2fs": "EMMC",
+    "squashfs": "EMMC"
+}
+
+def GetTypeAndDevice(mount_point, info):
+  fstab = info["fstab"]
+  if fstab:
+    return (PARTITION_TYPES[fstab[mount_point].fs_type],
+            fstab[mount_point].device)
+  else:
+    raise KeyError
+
+
+def ParseCertificate(data):
+  """Parses and converts a PEM-encoded certificate into DER-encoded.
+
+  This gives the same result as `openssl x509 -in <filename> -outform DER`.
+
+  Returns:
+    The decoded certificate string.
+  """
+  cert_buffer = []
+  save = False
+  for line in data.split("\n"):
+    if "--END CERTIFICATE--" in line:
+      break
+    if save:
+      cert_buffer.append(line)
+    if "--BEGIN CERTIFICATE--" in line:
+      save = True
+  cert = "".join(cert_buffer).decode('base64')
+  return cert
+
+
+def ExtractPublicKey(cert):
+  """Extracts the public key (PEM-encoded) from the given certificate file.
+
+  Args:
+    cert: The certificate filename.
+
+  Returns:
+    The public key string.
+
+  Raises:
+    AssertionError: On non-zero return from 'openssl'.
+  """
+  # The behavior with '-out' is different between openssl 1.1 and openssl 1.0.
+  # While openssl 1.1 writes the key into the given filename followed by '-out',
+  # openssl 1.0 (both of 1.0.1 and 1.0.2) doesn't. So we collect the output from
+  # stdout instead.
+  cmd = ['openssl', 'x509', '-pubkey', '-noout', '-in', cert]
+  proc = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+  pubkey, stderrdata = proc.communicate()
+  assert proc.returncode == 0, \
+      'Failed to dump public key from certificate: %s\n%s' % (cert, stderrdata)
+  return pubkey
+
+
+def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img,
+                      info_dict=None):
+  """Generates the recovery-from-boot patch and writes the script to output.
+
+  Most of the space in the boot and recovery images is just the kernel, which is
+  identical for the two, so the resulting patch should be efficient. Add it to
+  the output zip, along with a shell script that is run from init.rc on first
+  boot to actually do the patching and install the new recovery image.
+
+  Args:
+    input_dir: The top-level input directory of the target-files.zip.
+    output_sink: The callback function that writes the result.
+    recovery_img: File object for the recovery image.
+    boot_img: File objects for the boot image.
+    info_dict: A dict returned by common.LoadInfoDict() on the input
+        target_files. Will use OPTIONS.info_dict if None has been given.
+  """
+  if info_dict is None:
+    info_dict = OPTIONS.info_dict
+
+  full_recovery_image = info_dict.get("full_recovery_image") == "true"
+
+  if full_recovery_image:
+    output_sink("etc/recovery.img", recovery_img.data)
+
+  else:
+    system_root_image = info_dict.get("system_root_image") == "true"
+    path = os.path.join(input_dir, "SYSTEM", "etc", "recovery-resource.dat")
+    # With system-root-image, boot and recovery images will have mismatching
+    # entries (only recovery has the ramdisk entry) (Bug: 72731506). Use bsdiff
+    # to handle such a case.
+    if system_root_image:
+      diff_program = ["bsdiff"]
+      bonus_args = ""
+      assert not os.path.exists(path)
+    else:
+      diff_program = ["imgdiff"]
+      if os.path.exists(path):
+        diff_program.append("-b")
+        diff_program.append(path)
+        bonus_args = "-b /system/etc/recovery-resource.dat"
+      else:
+        bonus_args = ""
+
+    d = Difference(recovery_img, boot_img, diff_program=diff_program)
+    _, _, patch = d.ComputePatch()
+    output_sink("recovery-from-boot.p", patch)
+
+  try:
+    # The following GetTypeAndDevice()s need to use the path in the target
+    # info_dict instead of source_info_dict.
+    boot_type, boot_device = GetTypeAndDevice("/boot", info_dict)
+    recovery_type, recovery_device = GetTypeAndDevice("/recovery", info_dict)
+  except KeyError:
+    return
+
+  if full_recovery_image:
+    sh = """#!/system/bin/sh
+if ! applypatch -c %(type)s:%(device)s:%(size)d:%(sha1)s; then
+  applypatch /system/etc/recovery.img %(type)s:%(device)s %(sha1)s %(size)d && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
+else
+  log -t recovery "Recovery image already installed"
+fi
+""" % {'type': recovery_type,
+       'device': recovery_device,
+       'sha1': recovery_img.sha1,
+       'size': recovery_img.size}
+  else:
+    sh = """#!/system/bin/sh
+if ! applypatch -c %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then
+  applypatch %(bonus_args)s %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s %(recovery_type)s:%(recovery_device)s %(recovery_sha1)s %(recovery_size)d %(boot_sha1)s:/system/recovery-from-boot.p && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
+else
+  log -t recovery "Recovery image already installed"
+fi
+""" % {'boot_size': boot_img.size,
+       'boot_sha1': boot_img.sha1,
+       'recovery_size': recovery_img.size,
+       'recovery_sha1': recovery_img.sha1,
+       'boot_type': boot_type,
+       'boot_device': boot_device,
+       'recovery_type': recovery_type,
+       'recovery_device': recovery_device,
+       'bonus_args': bonus_args}
+
+  # The install script location moved from /system/etc to /system/bin
+  # in the L release.
+  sh_location = "bin/install-recovery.sh"
+
+  print("putting script in", sh_location)
+
+  output_sink(sh_location, sh)
diff --git a/src/support/ab_tools/scripts/edify_generator.py b/src/support/ab_tools/scripts/edify_generator.py
new file mode 100644
index 0000000..7a81928
--- /dev/null
+++ b/src/support/ab_tools/scripts/edify_generator.py
@@ -0,0 +1,340 @@
+# Copyright (C) 2009 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import re
+
+import common
+
+class EdifyGenerator(object):
+  """Class to generate scripts in the 'edify' recovery script language
+  used from donut onwards."""
+
+  def __init__(self, version, info, fstab=None):
+    self.script = []
+    self.mounts = set()
+    self._required_cache = 0
+    self.version = version
+    self.info = info
+    if fstab is None:
+      self.fstab = self.info.get("fstab", None)
+    else:
+      self.fstab = fstab
+
+  def MakeTemporary(self):
+    """Make a temporary script object whose commands can latter be
+    appended to the parent script with AppendScript().  Used when the
+    caller wants to generate script commands out-of-order."""
+    x = EdifyGenerator(self.version, self.info)
+    x.mounts = self.mounts
+    return x
+
+  @property
+  def required_cache(self):
+    """Return the minimum cache size to apply the update."""
+    return self._required_cache
+
+  @staticmethod
+  def WordWrap(cmd, linelen=80):
+    """'cmd' should be a function call with null characters after each
+    parameter (eg, "somefun(foo,\0bar,\0baz)").  This function wraps cmd
+    to a given line length, replacing nulls with spaces and/or newlines
+    to format it nicely."""
+    indent = cmd.index("(")+1
+    out = []
+    first = True
+    x = re.compile("^(.{,%d})\0" % (linelen-indent,))
+    while True:
+      if not first:
+        out.append(" " * indent)
+      first = False
+      m = x.search(cmd)
+      if not m:
+        parts = cmd.split("\0", 1)
+        out.append(parts[0]+"\n")
+        if len(parts) == 1:
+          break
+        else:
+          cmd = parts[1]
+          continue
+      out.append(m.group(1)+"\n")
+      cmd = cmd[m.end():]
+
+    return "".join(out).replace("\0", " ").rstrip("\n")
+
+  def AppendScript(self, other):
+    """Append the contents of another script (which should be created
+    with temporary=True) to this one."""
+    self.script.extend(other.script)
+
+  def AssertOemProperty(self, name, values, oem_no_mount):
+    """Assert that a property on the OEM paritition matches allowed values."""
+    if not name:
+      raise ValueError("must specify an OEM property")
+    if not values:
+      raise ValueError("must specify the OEM value")
+
+    if oem_no_mount:
+      get_prop_command = 'getprop("%s")' % name
+    else:
+      get_prop_command = 'file_getprop("/oem/oem.prop", "%s")' % name
+
+    cmd = ''
+    for value in values:
+      cmd += '%s == "%s" || ' % (get_prop_command, value)
+    cmd += (
+        'abort("E{code}: This package expects the value \\"{values}\\" for '
+        '\\"{name}\\"; this has value \\"" + '
+        '{get_prop_command} + "\\".");').format(
+            code=common.ErrorCode.OEM_PROP_MISMATCH,
+            get_prop_command=get_prop_command, name=name,
+            values='\\" or \\"'.join(values))
+    self.script.append(cmd)
+
+  def AssertSomeFingerprint(self, *fp):
+    """Assert that the current recovery build fingerprint is one of *fp."""
+    if not fp:
+      raise ValueError("must specify some fingerprints")
+    cmd = (' ||\n    '.join([('getprop("ro.build.fingerprint") == "%s"') % i
+                             for i in fp]) +
+           ' ||\n    abort("E%d: Package expects build fingerprint of %s; '
+           'this device has " + getprop("ro.build.fingerprint") + ".");') % (
+               common.ErrorCode.FINGERPRINT_MISMATCH, " or ".join(fp))
+    self.script.append(cmd)
+
+  def AssertSomeThumbprint(self, *fp):
+    """Assert that the current recovery build thumbprint is one of *fp."""
+    if not fp:
+      raise ValueError("must specify some thumbprints")
+    cmd = (' ||\n    '.join([('getprop("ro.build.thumbprint") == "%s"') % i
+                             for i in fp]) +
+           ' ||\n    abort("E%d: Package expects build thumbprint of %s; this '
+           'device has " + getprop("ro.build.thumbprint") + ".");') % (
+               common.ErrorCode.THUMBPRINT_MISMATCH, " or ".join(fp))
+    self.script.append(cmd)
+
+  def AssertFingerprintOrThumbprint(self, fp, tp):
+    """Assert that the current recovery build fingerprint is fp, or thumbprint
+       is tp."""
+    cmd = ('getprop("ro.build.fingerprint") == "{fp}" ||\n'
+           '    getprop("ro.build.thumbprint") == "{tp}" ||\n'
+           '    abort("Package expects build fingerprint of {fp} or '
+           'thumbprint of {tp}; this device has a fingerprint of " '
+           '+ getprop("ro.build.fingerprint") + " and a thumbprint of " '
+           '+ getprop("ro.build.thumbprint") + ".");').format(fp=fp, tp=tp)
+    self.script.append(cmd)
+
+  def AssertOlderBuild(self, timestamp, timestamp_text):
+    """Assert that the build on the device is older (or the same as)
+    the given timestamp."""
+    self.script.append(
+        ('(!less_than_int(%s, getprop("ro.build.date.utc"))) || '
+         'abort("E%d: Can\'t install this package (%s) over newer '
+         'build (" + getprop("ro.build.date") + ").");') % (timestamp,
+             common.ErrorCode.OLDER_BUILD, timestamp_text))
+
+  def AssertDevice(self, device):
+    """Assert that the device identifier is the given string."""
+    cmd = ('getprop("ro.product.device") == "%s" || '
+           'abort("E%d: This package is for \\"%s\\" devices; '
+           'this is a \\"" + getprop("ro.product.device") + "\\".");') % (
+               device, common.ErrorCode.DEVICE_MISMATCH, device)
+    self.script.append(cmd)
+
+  def AssertSomeBootloader(self, *bootloaders):
+    """Asert that the bootloader version is one of *bootloaders."""
+    cmd = ("assert(" +
+           " ||\0".join(['getprop("ro.bootloader") == "%s"' % (b,)
+                         for b in bootloaders]) +
+           ");")
+    self.script.append(self.WordWrap(cmd))
+
+  def ShowProgress(self, frac, dur):
+    """Update the progress bar, advancing it over 'frac' over the next
+    'dur' seconds.  'dur' may be zero to advance it via SetProgress
+    commands instead of by time."""
+    self.script.append("show_progress(%f, %d);" % (frac, int(dur)))
+
+  def SetProgress(self, frac):
+    """Set the position of the progress bar within the chunk defined
+    by the most recent ShowProgress call.  'frac' should be in
+    [0,1]."""
+    self.script.append("set_progress(%f);" % (frac,))
+
+  def PatchCheck(self, filename, *sha1):
+    """Check that the given file has one of the
+    given *sha1 hashes, checking the version saved in cache if the
+    file does not match."""
+    self.script.append(
+        'apply_patch_check("%s"' % (filename,) +
+        "".join([', "%s"' % (i,) for i in sha1]) +
+        ') || abort("E%d: \\"%s\\" has unexpected contents.");' % (
+            common.ErrorCode.BAD_PATCH_FILE, filename))
+
+  def Verify(self, filename):
+    """Check that the given file has one of the
+    given hashes (encoded in the filename)."""
+    self.script.append(
+        'apply_patch_check("{filename}") && '
+        'ui_print("    Verified.") || '
+        'ui_print("\\"{filename}\\" has unexpected contents.");'.format(
+            filename=filename))
+
+  def FileCheck(self, filename, *sha1):
+    """Check that the given file has one of the
+    given *sha1 hashes."""
+    self.script.append('assert(sha1_check(read_file("%s")' % (filename,) +
+                       "".join([', "%s"' % (i,) for i in sha1]) +
+                       '));')
+
+  def CacheFreeSpaceCheck(self, amount):
+    """Check that there's at least 'amount' space that can be made
+    available on /cache."""
+    self._required_cache = max(self._required_cache, amount)
+    self.script.append(('apply_patch_space(%d) || abort("E%d: Not enough free '
+                        'space on /cache to apply patches.");') % (
+                            amount,
+                            common.ErrorCode.INSUFFICIENT_CACHE_SPACE))
+
+  def Mount(self, mount_point, mount_options_by_format=""):
+    """Mount the partition with the given mount_point.
+      mount_options_by_format:
+      [fs_type=option[,option]...[|fs_type=option[,option]...]...]
+      where option is optname[=optvalue]
+      E.g. ext4=barrier=1,nodelalloc,errors=panic|f2fs=errors=recover
+    """
+    fstab = self.fstab
+    if fstab:
+      p = fstab[mount_point]
+      mount_dict = {}
+      if mount_options_by_format is not None:
+        for option in mount_options_by_format.split("|"):
+          if "=" in option:
+            key, value = option.split("=", 1)
+            mount_dict[key] = value
+      mount_flags = mount_dict.get(p.fs_type, "")
+      if p.context is not None:
+        mount_flags = p.context + ("," + mount_flags if mount_flags else "")
+      self.script.append('mount("%s", "%s", "%s", "%s", "%s");' % (
+          p.fs_type, common.PARTITION_TYPES[p.fs_type], p.device,
+          p.mount_point, mount_flags))
+      self.mounts.add(p.mount_point)
+
+  def Comment(self, comment):
+    """Write a comment into the update script."""
+    self.script.append("")
+    for i in comment.split("\n"):
+      self.script.append("# " + i)
+    self.script.append("")
+
+  def Print(self, message):
+    """Log a message to the screen (if the logs are visible)."""
+    self.script.append('ui_print("%s");' % (message,))
+
+  def TunePartition(self, partition, *options):
+    fstab = self.fstab
+    if fstab:
+      p = fstab[partition]
+      if p.fs_type not in ("ext2", "ext3", "ext4"):
+        raise ValueError("Partition %s cannot be tuned\n" % (partition,))
+    self.script.append(
+        'tune2fs(' + "".join(['"%s", ' % (i,) for i in options]) +
+        '"%s") || abort("E%d: Failed to tune partition %s");' % (
+            p.device, common.ErrorCode.TUNE_PARTITION_FAILURE, partition))
+
+  def FormatPartition(self, partition):
+    """Format the given partition, specified by its mount point (eg,
+    "/system")."""
+
+    fstab = self.fstab
+    if fstab:
+      p = fstab[partition]
+      self.script.append('format("%s", "%s", "%s", "%s", "%s");' %
+                         (p.fs_type, common.PARTITION_TYPES[p.fs_type],
+                          p.device, p.length, p.mount_point))
+
+  def WipeBlockDevice(self, partition):
+    if partition not in ("/system", "/vendor"):
+      raise ValueError(("WipeBlockDevice doesn't work on %s\n") % (partition,))
+    fstab = self.fstab
+    size = self.info.get(partition.lstrip("/") + "_size", None)
+    device = fstab[partition].device
+
+    self.script.append('wipe_block_device("%s", %s);' % (device, size))
+
+  def ApplyPatch(self, srcfile, tgtfile, tgtsize, tgtsha1, *patchpairs):
+    """Apply binary patches (in *patchpairs) to the given srcfile to
+    produce tgtfile (which may be "-" to indicate overwriting the
+    source file."""
+    if len(patchpairs) % 2 != 0 or len(patchpairs) == 0:
+      raise ValueError("bad patches given to ApplyPatch")
+    cmd = ['apply_patch("%s",\0"%s",\0%s,\0%d'
+           % (srcfile, tgtfile, tgtsha1, tgtsize)]
+    for i in range(0, len(patchpairs), 2):
+      cmd.append(',\0%s,\0package_extract_file("%s")' % patchpairs[i:i+2])
+    cmd.append(') ||\n    abort("E%d: Failed to apply patch to %s");' % (
+        common.ErrorCode.APPLY_PATCH_FAILURE, srcfile))
+    cmd = "".join(cmd)
+    self.script.append(self.WordWrap(cmd))
+
+  def WriteRawImage(self, mount_point, fn, mapfn=None):
+    """Write the given package file into the partition for the given
+    mount point."""
+
+    fstab = self.fstab
+    if fstab:
+      p = fstab[mount_point]
+      partition_type = common.PARTITION_TYPES[p.fs_type]
+      args = {'device': p.device, 'fn': fn}
+      if partition_type == "EMMC":
+        if mapfn:
+          args["map"] = mapfn
+          self.script.append(
+              'package_extract_file("%(fn)s", "%(device)s", "%(map)s");' % args)
+        else:
+          self.script.append(
+              'package_extract_file("%(fn)s", "%(device)s");' % args)
+      else:
+        raise ValueError(
+            "don't know how to write \"%s\" partitions" % p.fs_type)
+
+  def AppendExtra(self, extra):
+    """Append text verbatim to the output script."""
+    self.script.append(extra)
+
+  def Unmount(self, mount_point):
+    self.script.append('unmount("%s");' % mount_point)
+    self.mounts.remove(mount_point)
+
+  def UnmountAll(self):
+    for p in sorted(self.mounts):
+      self.script.append('unmount("%s");' % (p,))
+    self.mounts = set()
+
+  def AddToZip(self, input_zip, output_zip, input_path=None):
+    """Write the accumulated script to the output_zip file.  input_zip
+    is used as the source for the 'updater' binary needed to run
+    script.  If input_path is not None, it will be used as a local
+    path for the binary instead of input_zip."""
+
+    self.UnmountAll()
+
+    common.ZipWriteStr(output_zip, "META-INF/com/google/android/updater-script",
+                       "\n".join(self.script) + "\n")
+
+    if input_path is None:
+      data = input_zip.read("OTA/bin/updater")
+    else:
+      data = open(input_path, "rb").read()
+    common.ZipWriteStr(output_zip, "META-INF/com/google/android/update-binary",
+                       data, perms=0o755)
diff --git a/src/support/ab_tools/scripts/lib/shflags/shflags b/src/support/ab_tools/scripts/lib/shflags/shflags
new file mode 100644
index 0000000..11d3060
--- /dev/null
+++ b/src/support/ab_tools/scripts/lib/shflags/shflags
@@ -0,0 +1,1155 @@
+# vim:et:ft=sh:sts=2:sw=2
+#
+# Copyright 2008-2016 Kate Ward. All Rights Reserved.
+# Released under the Apache License 2.0.
+#
+# shFlags -- Advanced command-line flag library for Unix shell scripts.
+# http://code.google.com/p/shflags/
+#
+# Author: kate.ward@forestent.com (Kate Ward)
+#
+# This module implements something like the google-gflags library available
+# from http://code.google.com/p/google-gflags/.
+#
+# FLAG TYPES: This is a list of the DEFINE_*'s that you can do.  All flags take
+# a name, default value, help-string, and optional 'short' name (one-letter
+# name).  Some flags have other arguments, which are described with the flag.
+#
+# DEFINE_string: takes any input, and intreprets it as a string.
+#
+# DEFINE_boolean: does not take any arguments. Say --myflag to set
+#   FLAGS_myflag to true, or --nomyflag to set FLAGS_myflag to false. For short
+#   flags, passing the flag on the command-line negates the default value, i.e.
+#   if the default is true, passing the flag sets the value to false.
+#
+# DEFINE_float: takes an input and intreprets it as a floating point number. As
+#   shell does not support floats per-se, the input is merely validated as
+#   being a valid floating point value.
+#
+# DEFINE_integer: takes an input and intreprets it as an integer.
+#
+# SPECIAL FLAGS: There are a few flags that have special meaning:
+#   --help (or -?)  prints a list of all the flags in a human-readable fashion
+#   --flagfile=foo  read flags from foo.  (not implemented yet)
+#   --              as in getopt(), terminates flag-processing
+#
+# EXAMPLE USAGE:
+#
+#   -- begin hello.sh --
+#   #! /bin/sh
+#   . ./shflags
+#   DEFINE_string name 'world' "somebody's name" n
+#   FLAGS "$@" || exit $?
+#   eval set -- "${FLAGS_ARGV}"
+#   echo "Hello, ${FLAGS_name}."
+#   -- end hello.sh --
+#
+#   $ ./hello.sh -n Kate
+#   Hello, Kate.
+#
+# CUSTOMIZABLE BEHAVIOR:
+#
+# A script can override the default 'getopt' command by providing the path to
+# an alternate implementation by defining the FLAGS_GETOPT_CMD variable.
+#
+# NOTES:
+#
+# * Not all systems include a getopt version that supports long flags. On these
+#   systems, only short flags are recognized.
+
+#==============================================================================
+# shFlags
+#
+# Shared attributes:
+#   flags_error:  last error message
+#   flags_output: last function output (rarely valid)
+#   flags_return: last return value
+#
+#   __flags_longNames: list of long names for all flags
+#   __flags_shortNames: list of short names for all flags
+#   __flags_boolNames: list of boolean flag names
+#
+#   __flags_opts: options parsed by getopt
+#
+# Per-flag attributes:
+#   FLAGS_<flag_name>: contains value of flag named 'flag_name'
+#   __flags_<flag_name>_default: the default flag value
+#   __flags_<flag_name>_help: the flag help string
+#   __flags_<flag_name>_short: the flag short name
+#   __flags_<flag_name>_type: the flag type
+#
+# Notes:
+# - lists of strings are space separated, and a null value is the '~' char.
+
+# return if FLAGS already loaded
+[ -n "${FLAGS_VERSION:-}" ] && return 0
+FLAGS_VERSION='1.2.0'
+
+# return values that scripts can use
+FLAGS_TRUE=0
+FLAGS_FALSE=1
+FLAGS_ERROR=2
+
+# determine some reasonable command defaults
+__FLAGS_UNAME_S=`uname -s`
+case "${__FLAGS_UNAME_S}" in
+  BSD) __FLAGS_EXPR_CMD='gexpr' ;;
+  *) __FLAGS_EXPR_CMD='expr' ;;
+esac
+
+# commands a user can override if needed
+FLAGS_EXPR_CMD=${FLAGS_EXPR_CMD:-${__FLAGS_EXPR_CMD}}
+FLAGS_GETOPT_CMD=${FLAGS_GETOPT_CMD:-getopt}
+
+# specific shell checks
+if [ -n "${ZSH_VERSION:-}" ]; then
+  setopt |grep "^shwordsplit$" >/dev/null
+  if [ $? -ne ${FLAGS_TRUE} ]; then
+    _flags_fatal 'zsh shwordsplit option is required for proper zsh operation'
+  fi
+  if [ -z "${FLAGS_PARENT:-}" ]; then
+    _flags_fatal "zsh does not pass \$0 through properly. please declare' \
+\"FLAGS_PARENT=\$0\" before calling shFlags"
+  fi
+fi
+
+# can we use built-ins?
+( echo "${FLAGS_TRUE#0}"; ) >/dev/null 2>&1
+if [ $? -eq ${FLAGS_TRUE} ]; then
+  __FLAGS_USE_BUILTIN=${FLAGS_TRUE}
+else
+  __FLAGS_USE_BUILTIN=${FLAGS_FALSE}
+fi
+
+#
+# constants
+#
+
+# reserved flag names
+__FLAGS_RESERVED_LIST=' ARGC ARGV ERROR FALSE GETOPT_CMD HELP PARENT TRUE '
+__FLAGS_RESERVED_LIST="${__FLAGS_RESERVED_LIST} VERSION "
+
+# getopt version
+__FLAGS_GETOPT_VERS_STD=0
+__FLAGS_GETOPT_VERS_ENH=1
+__FLAGS_GETOPT_VERS_BSD=2
+
+${FLAGS_GETOPT_CMD} >/dev/null 2>&1
+case $? in
+  0) __FLAGS_GETOPT_VERS=${__FLAGS_GETOPT_VERS_STD} ;;  # bsd getopt
+  2)
+    # TODO(kward): look into '-T' option to test the internal getopt() version
+    if [ "`${FLAGS_GETOPT_CMD} --version`" = '-- ' ]; then
+      __FLAGS_GETOPT_VERS=${__FLAGS_GETOPT_VERS_STD}
+    else
+      __FLAGS_GETOPT_VERS=${__FLAGS_GETOPT_VERS_ENH}
+    fi
+    ;;
+  *) _flags_fatal 'unable to determine getopt version' ;;
+esac
+
+# getopt optstring lengths
+__FLAGS_OPTSTR_SHORT=0
+__FLAGS_OPTSTR_LONG=1
+
+__FLAGS_NULL='~'
+
+# flag info strings
+__FLAGS_INFO_DEFAULT='default'
+__FLAGS_INFO_HELP='help'
+__FLAGS_INFO_SHORT='short'
+__FLAGS_INFO_TYPE='type'
+
+# flag lengths
+__FLAGS_LEN_SHORT=0
+__FLAGS_LEN_LONG=1
+
+# flag types
+__FLAGS_TYPE_NONE=0
+__FLAGS_TYPE_BOOLEAN=1
+__FLAGS_TYPE_FLOAT=2
+__FLAGS_TYPE_INTEGER=3
+__FLAGS_TYPE_STRING=4
+
+# set the constants readonly
+__flags_constants=`set |awk -F= '/^FLAGS_/ || /^__FLAGS_/ {print $1}'`
+for __flags_const in ${__flags_constants}; do
+  # skip certain flags
+  case ${__flags_const} in
+    FLAGS_HELP) continue ;;
+    FLAGS_PARENT) continue ;;
+  esac
+  # set flag readonly
+  if [ -z "${ZSH_VERSION:-}" ]; then
+    readonly ${__flags_const}
+  else  # handle zsh
+    case ${ZSH_VERSION} in
+      [123].*) readonly ${__flags_const} ;;
+      *) readonly -g ${__flags_const} ;;  # declare readonly constants globally
+    esac
+  fi
+done
+unset __flags_const __flags_constants
+
+#
+# internal variables
+#
+
+# space separated lists
+__flags_boolNames=' '  # boolean flag names
+__flags_longNames=' '  # long flag names
+__flags_shortNames=' '  # short flag names
+__flags_definedNames=' ' # defined flag names (used for validation)
+
+__flags_columns=''  # screen width in columns
+__flags_opts=''  # temporary storage for parsed getopt flags
+
+#------------------------------------------------------------------------------
+# private functions
+#
+
+# logging functions
+_flags_debug() { echo "flags:DEBUG $@" >&2; }
+_flags_warn() { echo "flags:WARN $@" >&2; }
+_flags_error() { echo "flags:ERROR $@" >&2; }
+_flags_fatal() { echo "flags:FATAL $@" >&2; exit ${FLAGS_ERROR}; }
+
+# Define a flag.
+#
+# Calling this function will define the following info variables for the
+# specified flag:
+#   FLAGS_flagname - the name for this flag (based upon the long flag name)
+#   __flags_<flag_name>_default - the default value
+#   __flags_flagname_help - the help string
+#   __flags_flagname_short - the single letter alias
+#   __flags_flagname_type - the type of flag (one of __FLAGS_TYPE_*)
+#
+# Args:
+#   _flags__type: integer: internal type of flag (__FLAGS_TYPE_*)
+#   _flags__name: string: long flag name
+#   _flags__default: default flag value
+#   _flags__help: string: help string
+#   _flags__short: string: (optional) short flag name
+# Returns:
+#   integer: success of operation, or error
+_flags_define()
+{
+  if [ $# -lt 4 ]; then
+    flags_error='DEFINE error: too few arguments'
+    flags_return=${FLAGS_ERROR}
+    _flags_error "${flags_error}"
+    return ${flags_return}
+  fi
+
+  _flags_type_=$1
+  _flags_name_=$2
+  _flags_default_=$3
+  _flags_help_=$4
+  _flags_short_=${5:-${__FLAGS_NULL}}
+
+  _flags_return_=${FLAGS_TRUE}
+  _flags_usName_=`_flags_underscoreName ${_flags_name_}`
+
+  # check whether the flag name is reserved
+  _flags_itemInList ${_flags_usName_} "${__FLAGS_RESERVED_LIST}"
+  if [ $? -eq ${FLAGS_TRUE} ]; then
+    flags_error="flag name (${_flags_name_}) is reserved"
+    _flags_return_=${FLAGS_ERROR}
+  fi
+
+  # require short option for getopt that don't support long options
+  if [ ${_flags_return_} -eq ${FLAGS_TRUE} \
+      -a ${__FLAGS_GETOPT_VERS} -ne ${__FLAGS_GETOPT_VERS_ENH} \
+      -a "${_flags_short_}" = "${__FLAGS_NULL}" ]
+  then
+    flags_error="short flag required for (${_flags_name_}) on this platform"
+    _flags_return_=${FLAGS_ERROR}
+  fi
+
+  # check for existing long name definition
+  if [ ${_flags_return_} -eq ${FLAGS_TRUE} ]; then
+    if _flags_itemInList ${_flags_usName_} ${__flags_definedNames}; then
+      flags_error="definition for ([no]${_flags_name_}) already exists"
+      _flags_warn "${flags_error}"
+      _flags_return_=${FLAGS_FALSE}
+    fi
+  fi
+
+  # check for existing short name definition
+  if [ ${_flags_return_} -eq ${FLAGS_TRUE} \
+      -a "${_flags_short_}" != "${__FLAGS_NULL}" ]
+  then
+    if _flags_itemInList "${_flags_short_}" ${__flags_shortNames}; then
+      flags_error="flag short name (${_flags_short_}) already defined"
+      _flags_warn "${flags_error}"
+      _flags_return_=${FLAGS_FALSE}
+    fi
+  fi
+
+  # handle default value. note, on several occasions the 'if' portion of an
+  # if/then/else contains just a ':' which does nothing. a binary reversal via
+  # '!' is not done because it does not work on all shells.
+  if [ ${_flags_return_} -eq ${FLAGS_TRUE} ]; then
+    case ${_flags_type_} in
+      ${__FLAGS_TYPE_BOOLEAN})
+        if _flags_validBool "${_flags_default_}"; then
+          case ${_flags_default_} in
+            true|t|0) _flags_default_=${FLAGS_TRUE} ;;
+            false|f|1) _flags_default_=${FLAGS_FALSE} ;;
+          esac
+        else
+          flags_error="invalid default flag value '${_flags_default_}'"
+          _flags_return_=${FLAGS_ERROR}
+        fi
+        ;;
+
+      ${__FLAGS_TYPE_FLOAT})
+        if _flags_validFloat "${_flags_default_}"; then
+          :
+        else
+          flags_error="invalid default flag value '${_flags_default_}'"
+          _flags_return_=${FLAGS_ERROR}
+        fi
+        ;;
+
+      ${__FLAGS_TYPE_INTEGER})
+        if _flags_validInt "${_flags_default_}"; then
+          :
+        else
+          flags_error="invalid default flag value '${_flags_default_}'"
+          _flags_return_=${FLAGS_ERROR}
+        fi
+        ;;
+
+      ${__FLAGS_TYPE_STRING}) ;;  # everything in shell is a valid string
+
+      *)
+        flags_error="unrecognized flag type '${_flags_type_}'"
+        _flags_return_=${FLAGS_ERROR}
+        ;;
+    esac
+  fi
+
+  if [ ${_flags_return_} -eq ${FLAGS_TRUE} ]; then
+    # store flag information
+    eval "FLAGS_${_flags_usName_}='${_flags_default_}'"
+    eval "__flags_${_flags_usName_}_${__FLAGS_INFO_TYPE}=${_flags_type_}"
+    eval "__flags_${_flags_usName_}_${__FLAGS_INFO_DEFAULT}=\
+\"${_flags_default_}\""
+    eval "__flags_${_flags_usName_}_${__FLAGS_INFO_HELP}=\"${_flags_help_}\""
+    eval "__flags_${_flags_usName_}_${__FLAGS_INFO_SHORT}='${_flags_short_}'"
+
+    # append flag names to name lists
+    __flags_shortNames="${__flags_shortNames}${_flags_short_} "
+    __flags_longNames="${__flags_longNames}${_flags_name_} "
+    [ ${_flags_type_} -eq ${__FLAGS_TYPE_BOOLEAN} ] && \
+        __flags_boolNames="${__flags_boolNames}no${_flags_name_} "
+
+    # append flag names to defined names for later validation checks
+    __flags_definedNames="${__flags_definedNames}${_flags_usName_} "
+    [ ${_flags_type_} -eq ${__FLAGS_TYPE_BOOLEAN} ] && \
+        __flags_definedNames="${__flags_definedNames}no${_flags_usName_} "
+  fi
+
+  flags_return=${_flags_return_}
+  unset _flags_default_ _flags_help_ _flags_name_ _flags_return_ \
+      _flags_short_ _flags_type_ _flags_usName_
+  [ ${flags_return} -eq ${FLAGS_ERROR} ] && _flags_error "${flags_error}"
+  return ${flags_return}
+}
+
+# Underscore a flag name by replacing dashes with underscores.
+#
+# Args:
+#   unnamed: string: log flag name
+# Output:
+#   string: underscored name
+_flags_underscoreName()
+{
+  echo $1 |tr '-' '_'
+}
+
+# Return valid getopt options using currently defined list of long options.
+#
+# This function builds a proper getopt option string for short (and long)
+# options, using the current list of long options for reference.
+#
+# Args:
+#   _flags_optStr: integer: option string type (__FLAGS_OPTSTR_*)
+# Output:
+#   string: generated option string for getopt
+# Returns:
+#   boolean: success of operation (always returns True)
+_flags_genOptStr()
+{
+  _flags_optStrType_=$1
+
+  _flags_opts_=''
+
+  for _flags_name_ in ${__flags_longNames}; do
+    _flags_usName_=`_flags_underscoreName ${_flags_name_}`
+    _flags_type_=`_flags_getFlagInfo ${_flags_usName_} ${__FLAGS_INFO_TYPE}`
+    [ $? -eq ${FLAGS_TRUE} ] || _flags_fatal 'call to _flags_type_ failed'
+    case ${_flags_optStrType_} in
+      ${__FLAGS_OPTSTR_SHORT})
+        _flags_shortName_=`_flags_getFlagInfo \
+            ${_flags_usName_} ${__FLAGS_INFO_SHORT}`
+        if [ "${_flags_shortName_}" != "${__FLAGS_NULL}" ]; then
+          _flags_opts_="${_flags_opts_}${_flags_shortName_}"
+          # getopt needs a trailing ':' to indicate a required argument
+          [ ${_flags_type_} -ne ${__FLAGS_TYPE_BOOLEAN} ] && \
+              _flags_opts_="${_flags_opts_}:"
+        fi
+        ;;
+
+      ${__FLAGS_OPTSTR_LONG})
+        _flags_opts_="${_flags_opts_:+${_flags_opts_},}${_flags_name_}"
+        # getopt needs a trailing ':' to indicate a required argument
+        [ ${_flags_type_} -ne ${__FLAGS_TYPE_BOOLEAN} ] && \
+            _flags_opts_="${_flags_opts_}:"
+        ;;
+    esac
+  done
+
+  echo "${_flags_opts_}"
+  unset _flags_name_ _flags_opts_ _flags_optStrType_ _flags_shortName_ \
+      _flags_type_ _flags_usName_
+  return ${FLAGS_TRUE}
+}
+
+# Returns flag details based on a flag name and flag info.
+#
+# Args:
+#   string: underscored flag name
+#   string: flag info (see the _flags_define function for valid info types)
+# Output:
+#   string: value of dereferenced flag variable
+# Returns:
+#   integer: one of FLAGS_{TRUE|FALSE|ERROR}
+_flags_getFlagInfo()
+{
+  # note: adding gFI to variable names to prevent naming conflicts with calling
+  # functions
+  _flags_gFI_usName_=$1
+  _flags_gFI_info_=$2
+
+  _flags_infoVar_="__flags_${_flags_gFI_usName_}_${_flags_gFI_info_}"
+  _flags_strToEval_="_flags_infoValue_=\"\${${_flags_infoVar_}:-}\""
+  eval "${_flags_strToEval_}"
+  if [ -n "${_flags_infoValue_}" ]; then
+    flags_return=${FLAGS_TRUE}
+  else
+    # see if the _flags_gFI_usName_ variable is a string as strings can be
+    # empty...
+    # note: the DRY principle would say to have this function call itself for
+    # the next three lines, but doing so results in an infinite loop as an
+    # invalid _flags_name_ will also not have the associated _type variable.
+    # Because it doesn't (it will evaluate to an empty string) the logic will
+    # try to find the _type variable of the _type variable, and so on. Not so
+    # good ;-)
+    _flags_typeVar_="__flags_${_flags_gFI_usName_}_${__FLAGS_INFO_TYPE}"
+    _flags_strToEval_="_flags_typeValue_=\"\${${_flags_typeVar_}:-}\""
+    eval "${_flags_strToEval_}"
+    if [ "${_flags_typeValue_}" = "${__FLAGS_TYPE_STRING}" ]; then
+      flags_return=${FLAGS_TRUE}
+    else
+      flags_return=${FLAGS_ERROR}
+      flags_error="missing flag info variable (${_flags_infoVar_})"
+    fi
+  fi
+
+  echo "${_flags_infoValue_}"
+  unset _flags_gFI_usName_ _flags_gfI_info_ _flags_infoValue_ _flags_infoVar_ \
+      _flags_strToEval_ _flags_typeValue_ _flags_typeVar_
+  [ ${flags_return} -eq ${FLAGS_ERROR} ] && _flags_error "${flags_error}"
+  return ${flags_return}
+}
+
+# Check for presense of item in a list.
+#
+# Passed a string (e.g. 'abc'), this function will determine if the string is
+# present in the list of strings (e.g.  ' foo bar abc ').
+#
+# Args:
+#   _flags_str_: string: string to search for in a list of strings
+#   unnamed: list: list of strings
+# Returns:
+#   boolean: true if item is in the list
+_flags_itemInList() {
+  _flags_str_=$1
+  shift
+
+  echo " ${*:-} " |grep " ${_flags_str_} " >/dev/null
+  if [ $? -eq 0 ]; then
+    flags_return=${FLAGS_TRUE}
+  else
+    flags_return=${FLAGS_FALSE}
+  fi
+
+  unset _flags_str_
+  return ${flags_return}
+}
+
+# Returns the width of the current screen.
+#
+# Output:
+#   integer: width in columns of the current screen.
+_flags_columns()
+{
+  if [ -z "${__flags_columns}" ]; then
+    # determine the value and store it
+    if eval stty size >/dev/null 2>&1; then
+      # stty size worked :-)
+      set -- `stty size`
+      __flags_columns=$2
+    elif eval tput cols >/dev/null 2>&1; then
+      set -- `tput cols`
+      __flags_columns=$1
+    else
+      __flags_columns=80  # default terminal width
+    fi
+  fi
+  echo ${__flags_columns}
+}
+
+# Validate a boolean.
+#
+# Args:
+#   _flags__bool: boolean: value to validate
+# Returns:
+#   bool: true if the value is a valid boolean
+_flags_validBool()
+{
+  _flags_bool_=$1
+
+  flags_return=${FLAGS_TRUE}
+  case "${_flags_bool_}" in
+    true|t|0) ;;
+    false|f|1) ;;
+    *) flags_return=${FLAGS_FALSE} ;;
+  esac
+
+  unset _flags_bool_
+  return ${flags_return}
+}
+
+# Validate a float.
+#
+# Args:
+#   _flags_float_: float: value to validate
+# Returns:
+#   bool: true if the value is a valid integer
+_flags_validFloat()
+{
+  flags_return=${FLAGS_FALSE}
+  [ -n "$1" ] || return ${flags_return}
+  _flags_float_=$1
+
+  if _flags_validInt ${_flags_float_}; then
+    flags_return=${FLAGS_TRUE}
+  elif _flags_useBuiltin; then
+    _flags_float_whole_=${_flags_float_%.*}
+    _flags_float_fraction_=${_flags_float_#*.}
+    if _flags_validInt ${_flags_float_whole_:-0} -a \
+      _flags_validInt ${_flags_float_fraction_}; then
+      flags_return=${FLAGS_TRUE}
+    fi
+    unset _flags_float_whole_ _flags_float_fraction_
+  else
+    flags_return=${FLAGS_TRUE}
+    case ${_flags_float_} in
+      -*)  # negative floats
+        _flags_test_=`${FLAGS_EXPR_CMD} -- "${_flags_float_}" :\
+            '\(-[0-9]*\.[0-9]*\)'`
+        ;;
+      *)  # positive floats
+        _flags_test_=`${FLAGS_EXPR_CMD} -- "${_flags_float_}" :\
+            '\([0-9]*\.[0-9]*\)'`
+        ;;
+    esac
+    [ "${_flags_test_}" != "${_flags_float_}" ] && flags_return=${FLAGS_FALSE}
+    unset _flags_test_
+  fi
+
+  unset _flags_float_ _flags_float_whole_ _flags_float_fraction_
+  return ${flags_return}
+}
+
+# Validate an integer.
+#
+# Args:
+#   _flags_int_: integer: value to validate
+# Returns:
+#   bool: true if the value is a valid integer
+_flags_validInt()
+{
+  flags_return=${FLAGS_FALSE}
+  [ -n "$1" ] || return ${flags_return}
+  _flags_int_=$1
+
+  case ${_flags_int_} in
+    -*.*) ;;  # ignore negative floats (we'll invalidate them later)
+    -*)  # strip possible leading negative sign
+      if _flags_useBuiltin; then
+        _flags_int_=${_flags_int_#-}
+      else
+        _flags_int_=`${FLAGS_EXPR_CMD} -- "${_flags_int_}" : '-\([0-9][0-9]*\)'`
+      fi
+      ;;
+  esac
+
+  case ${_flags_int_} in
+    *[!0-9]*) flags_return=${FLAGS_FALSE} ;;
+    *) flags_return=${FLAGS_TRUE} ;;
+  esac
+
+  unset _flags_int_
+  return ${flags_return}
+}
+
+# Parse command-line options using the standard getopt.
+#
+# Note: the flag options are passed around in the global __flags_opts so that
+# the formatting is not lost due to shell parsing and such.
+#
+# Args:
+#   @: varies: command-line options to parse
+# Returns:
+#   integer: a FLAGS success condition
+_flags_getoptStandard()
+{
+  flags_return=${FLAGS_TRUE}
+  _flags_shortOpts_=`_flags_genOptStr ${__FLAGS_OPTSTR_SHORT}`
+
+  # check for spaces in passed options
+  for _flags_opt_ in "$@"; do
+    # note: the silliness with the x's is purely for ksh93 on Ubuntu 6.06
+    _flags_match_=`echo "x${_flags_opt_}x" |sed 's/ //g'`
+    if [ "${_flags_match_}" != "x${_flags_opt_}x" ]; then
+      flags_error='the available getopt does not support spaces in options'
+      flags_return=${FLAGS_ERROR}
+      break
+    fi
+  done
+
+  if [ ${flags_return} -eq ${FLAGS_TRUE} ]; then
+    __flags_opts=`getopt ${_flags_shortOpts_} $@ 2>&1`
+    _flags_rtrn_=$?
+    if [ ${_flags_rtrn_} -ne ${FLAGS_TRUE} ]; then
+      _flags_warn "${__flags_opts}"
+      flags_error='unable to parse provided options with getopt.'
+      flags_return=${FLAGS_ERROR}
+    fi
+  fi
+
+  unset _flags_match_ _flags_opt_ _flags_rtrn_ _flags_shortOpts_
+  return ${flags_return}
+}
+
+# Parse command-line options using the enhanced getopt.
+#
+# Note: the flag options are passed around in the global __flags_opts so that
+# the formatting is not lost due to shell parsing and such.
+#
+# Args:
+#   @: varies: command-line options to parse
+# Returns:
+#   integer: a FLAGS success condition
+_flags_getoptEnhanced()
+{
+  flags_return=${FLAGS_TRUE}
+  _flags_shortOpts_=`_flags_genOptStr ${__FLAGS_OPTSTR_SHORT}`
+  _flags_boolOpts_=`echo "${__flags_boolNames}" \
+      |sed 's/^ *//;s/ *$//;s/ /,/g'`
+  _flags_longOpts_=`_flags_genOptStr ${__FLAGS_OPTSTR_LONG}`
+
+  __flags_opts=`${FLAGS_GETOPT_CMD} \
+      -o ${_flags_shortOpts_} \
+      -l "${_flags_longOpts_},${_flags_boolOpts_}" \
+      -- "$@" 2>&1`
+  _flags_rtrn_=$?
+  if [ ${_flags_rtrn_} -ne ${FLAGS_TRUE} ]; then
+    _flags_warn "${__flags_opts}"
+    flags_error='unable to parse provided options with getopt.'
+    flags_return=${FLAGS_ERROR}
+  fi
+
+  unset _flags_boolOpts_ _flags_longOpts_ _flags_rtrn_ _flags_shortOpts_
+  return ${flags_return}
+}
+
+# Dynamically parse a getopt result and set appropriate variables.
+#
+# This function does the actual conversion of getopt output and runs it through
+# the standard case structure for parsing. The case structure is actually quite
+# dynamic to support any number of flags.
+#
+# Args:
+#   argc: int: original command-line argument count
+#   @: varies: output from getopt parsing
+# Returns:
+#   integer: a FLAGS success condition
+_flags_parseGetopt()
+{
+  _flags_argc_=$1
+  shift
+
+  flags_return=${FLAGS_TRUE}
+
+  if [ ${__FLAGS_GETOPT_VERS} -ne ${__FLAGS_GETOPT_VERS_ENH} ]; then
+    set -- $@
+  else
+    # note the quotes around the `$@' -- they are essential!
+    eval set -- "$@"
+  fi
+
+  # Provide user with the number of arguments to shift by later.
+  # NOTE: the FLAGS_ARGC variable is obsolete as of 1.0.3 because it does not
+  # properly give user access to non-flag arguments mixed in between flag
+  # arguments. Its usage was replaced by FLAGS_ARGV, and it is being kept only
+  # for backwards compatibility reasons.
+  FLAGS_ARGC=`_flags_math "$# - 1 - ${_flags_argc_}"`
+
+  # handle options. note options with values must do an additional shift
+  while true; do
+    _flags_opt_=$1
+    _flags_arg_=${2:-}
+    _flags_type_=${__FLAGS_TYPE_NONE}
+    _flags_name_=''
+
+    # determine long flag name
+    case "${_flags_opt_}" in
+      --) shift; break ;;  # discontinue option parsing
+
+      --*)  # long option
+        if _flags_useBuiltin; then
+          _flags_opt_=${_flags_opt_#*--}
+        else
+          _flags_opt_=`${FLAGS_EXPR_CMD} -- "${_flags_opt_}" : '--\(.*\)'`
+        fi
+        _flags_len_=${__FLAGS_LEN_LONG}
+        if _flags_itemInList "${_flags_opt_}" ${__flags_longNames}; then
+          _flags_name_=${_flags_opt_}
+        else
+          # check for negated long boolean version
+          if _flags_itemInList "${_flags_opt_}" ${__flags_boolNames}; then
+            if _flags_useBuiltin; then
+              _flags_name_=${_flags_opt_#*no}
+            else
+              _flags_name_=`${FLAGS_EXPR_CMD} -- "${_flags_opt_}" : 'no\(.*\)'`
+            fi
+            _flags_type_=${__FLAGS_TYPE_BOOLEAN}
+            _flags_arg_=${__FLAGS_NULL}
+          fi
+        fi
+        ;;
+
+      -*)  # short option
+        if _flags_useBuiltin; then
+          _flags_opt_=${_flags_opt_#*-}
+        else
+          _flags_opt_=`${FLAGS_EXPR_CMD} -- "${_flags_opt_}" : '-\(.*\)'`
+        fi
+        _flags_len_=${__FLAGS_LEN_SHORT}
+        if _flags_itemInList "${_flags_opt_}" ${__flags_shortNames}; then
+          # yes. match short name to long name. note purposeful off-by-one
+          # (too high) with awk calculations.
+          _flags_pos_=`echo "${__flags_shortNames}" \
+              |awk 'BEGIN{RS=" ";rn=0}$0==e{rn=NR}END{print rn}' \
+                  e=${_flags_opt_}`
+          _flags_name_=`echo "${__flags_longNames}" \
+              |awk 'BEGIN{RS=" "}rn==NR{print $0}' rn="${_flags_pos_}"`
+        fi
+        ;;
+    esac
+
+    # die if the flag was unrecognized
+    if [ -z "${_flags_name_}" ]; then
+      flags_error="unrecognized option (${_flags_opt_})"
+      flags_return=${FLAGS_ERROR}
+      break
+    fi
+
+    # set new flag value
+    _flags_usName_=`_flags_underscoreName ${_flags_name_}`
+    [ ${_flags_type_} -eq ${__FLAGS_TYPE_NONE} ] && \
+        _flags_type_=`_flags_getFlagInfo \
+            "${_flags_usName_}" ${__FLAGS_INFO_TYPE}`
+    case ${_flags_type_} in
+      ${__FLAGS_TYPE_BOOLEAN})
+        if [ ${_flags_len_} -eq ${__FLAGS_LEN_LONG} ]; then
+          if [ "${_flags_arg_}" != "${__FLAGS_NULL}" ]; then
+            eval "FLAGS_${_flags_usName_}=${FLAGS_TRUE}"
+          else
+            eval "FLAGS_${_flags_usName_}=${FLAGS_FALSE}"
+          fi
+        else
+          _flags_strToEval_="_flags_val_=\
+\${__flags_${_flags_usName_}_${__FLAGS_INFO_DEFAULT}}"
+          eval "${_flags_strToEval_}"
+          if [ ${_flags_val_} -eq ${FLAGS_FALSE} ]; then
+            eval "FLAGS_${_flags_usName_}=${FLAGS_TRUE}"
+          else
+            eval "FLAGS_${_flags_usName_}=${FLAGS_FALSE}"
+          fi
+        fi
+        ;;
+
+      ${__FLAGS_TYPE_FLOAT})
+        if _flags_validFloat "${_flags_arg_}"; then
+          eval "FLAGS_${_flags_usName_}='${_flags_arg_}'"
+        else
+          flags_error="invalid float value (${_flags_arg_})"
+          flags_return=${FLAGS_ERROR}
+          break
+        fi
+        ;;
+
+      ${__FLAGS_TYPE_INTEGER})
+        if _flags_validInt "${_flags_arg_}"; then
+          eval "FLAGS_${_flags_usName_}='${_flags_arg_}'"
+        else
+          flags_error="invalid integer value (${_flags_arg_})"
+          flags_return=${FLAGS_ERROR}
+          break
+        fi
+        ;;
+
+      ${__FLAGS_TYPE_STRING})
+        eval "FLAGS_${_flags_usName_}='${_flags_arg_}'"
+        ;;
+    esac
+
+    # handle special case help flag
+    if [ "${_flags_usName_}" = 'help' ]; then
+      if [ ${FLAGS_help} -eq ${FLAGS_TRUE} ]; then
+        flags_help
+        flags_error='help requested'
+        flags_return=${FLAGS_TRUE}
+        break
+      fi
+    fi
+
+    # shift the option and non-boolean arguements out.
+    shift
+    [ ${_flags_type_} != ${__FLAGS_TYPE_BOOLEAN} ] && shift
+  done
+
+  # give user back non-flag arguments
+  FLAGS_ARGV=''
+  while [ $# -gt 0 ]; do
+    FLAGS_ARGV="${FLAGS_ARGV:+${FLAGS_ARGV} }'$1'"
+    shift
+  done
+
+  unset _flags_arg_ _flags_len_ _flags_name_ _flags_opt_ _flags_pos_ \
+      _flags_strToEval_ _flags_type_ _flags_usName_ _flags_val_
+  return ${flags_return}
+}
+
+# Perform some path using built-ins.
+#
+# Args:
+#   $@: string: math expression to evaluate
+# Output:
+#   integer: the result
+# Returns:
+#   bool: success of math evaluation
+_flags_math()
+{
+  if [ $# -eq 0 ]; then
+    flags_return=${FLAGS_FALSE}
+  elif _flags_useBuiltin; then
+    # Variable assignment is needed as workaround for Solaris Bourne shell,
+    # which cannot parse a bare $((expression)).
+    _flags_expr_='$(($@))'
+    eval echo ${_flags_expr_}
+    flags_return=$?
+    unset _flags_expr_
+  else
+    eval expr $@
+    flags_return=$?
+  fi
+
+  return ${flags_return}
+}
+
+# Cross-platform strlen() implementation.
+#
+# Args:
+#   _flags_str: string: to determine length of
+# Output:
+#   integer: length of string
+# Returns:
+#   bool: success of strlen evaluation
+_flags_strlen()
+{
+  _flags_str_=${1:-}
+
+  if [ -z "${_flags_str_}" ]; then
+    flags_output=0
+  elif _flags_useBuiltin; then
+    flags_output=${#_flags_str_}
+  else
+    flags_output=`${FLAGS_EXPR_CMD} -- "${_flags_str_}" : '.*'`
+  fi
+  flags_return=$?
+
+  unset _flags_str_
+  echo ${flags_output}
+  return ${flags_return}
+}
+
+# Use built-in helper function to enable unit testing.
+#
+# Args:
+#   None
+# Returns:
+#   bool: true if built-ins should be used
+_flags_useBuiltin()
+{
+  return ${__FLAGS_USE_BUILTIN}
+}
+
+#------------------------------------------------------------------------------
+# public functions
+# 
+# A basic boolean flag. Boolean flags do not take any arguments, and their
+# value is either 1 (false) or 0 (true). For long flags, the false value is
+# specified on the command line by prepending the word 'no'. With short flags,
+# the presense of the flag toggles the current value between true and false.
+# Specifying a short boolean flag twice on the command results in returning the
+# value back to the default value.
+#
+# A default value is required for boolean flags.
+#
+# For example, lets say a Boolean flag was created whose long name was 'update'
+# and whose short name was 'x', and the default value was 'false'. This flag
+# could be explicitly set to 'true' with '--update' or by '-x', and it could be
+# explicitly set to 'false' with '--noupdate'.
+DEFINE_boolean() { _flags_define ${__FLAGS_TYPE_BOOLEAN} "$@"; }
+
+# Other basic flags.
+DEFINE_float()   { _flags_define ${__FLAGS_TYPE_FLOAT} "$@"; }
+DEFINE_integer() { _flags_define ${__FLAGS_TYPE_INTEGER} "$@"; }
+DEFINE_string()  { _flags_define ${__FLAGS_TYPE_STRING} "$@"; }
+
+# Parse the flags.
+#
+# Args:
+#   unnamed: list: command-line flags to parse
+# Returns:
+#   integer: success of operation, or error
+FLAGS()
+{
+  # define a standard 'help' flag if one isn't already defined
+  [ -z "${__flags_help_type:-}" ] && \
+      DEFINE_boolean 'help' false 'show this help' 'h'
+
+  # parse options
+  if [ $# -gt 0 ]; then
+    if [ ${__FLAGS_GETOPT_VERS} -ne ${__FLAGS_GETOPT_VERS_ENH} ]; then
+      _flags_getoptStandard "$@"
+    else
+      _flags_getoptEnhanced "$@"
+    fi
+    flags_return=$?
+  else
+    # nothing passed; won't bother running getopt
+    __flags_opts='--'
+    flags_return=${FLAGS_TRUE}
+  fi
+
+  if [ ${flags_return} -eq ${FLAGS_TRUE} ]; then
+    _flags_parseGetopt $# "${__flags_opts}"
+    flags_return=$?
+  fi
+
+  [ ${flags_return} -eq ${FLAGS_ERROR} ] && _flags_fatal "${flags_error}"
+  return ${flags_return}
+}
+
+# This is a helper function for determining the 'getopt' version for platforms
+# where the detection isn't working. It simply outputs debug information that
+# can be included in a bug report.
+#
+# Args:
+#   none
+# Output:
+#   debug info that can be included in a bug report
+# Returns:
+#   nothing
+flags_getoptInfo()
+{
+  # platform info
+  _flags_debug "uname -a: `uname -a`"
+  _flags_debug "PATH: ${PATH}"
+
+  # shell info
+  if [ -n "${BASH_VERSION:-}" ]; then
+    _flags_debug 'shell: bash'
+    _flags_debug "BASH_VERSION: ${BASH_VERSION}"
+  elif [ -n "${ZSH_VERSION:-}" ]; then
+    _flags_debug 'shell: zsh'
+    _flags_debug "ZSH_VERSION: ${ZSH_VERSION}"
+  fi
+
+  # getopt info
+  ${FLAGS_GETOPT_CMD} >/dev/null
+  _flags_getoptReturn=$?
+  _flags_debug "getopt return: ${_flags_getoptReturn}"
+  _flags_debug "getopt --version: `${FLAGS_GETOPT_CMD} --version 2>&1`"
+
+  unset _flags_getoptReturn
+}
+
+# Returns whether the detected getopt version is the enhanced version.
+#
+# Args:
+#   none
+# Output:
+#   none
+# Returns:
+#   bool: true if getopt is the enhanced version
+flags_getoptIsEnh()
+{
+  test ${__FLAGS_GETOPT_VERS} -eq ${__FLAGS_GETOPT_VERS_ENH}
+}
+
+# Returns whether the detected getopt version is the standard version.
+#
+# Args:
+#   none
+# Returns:
+#   bool: true if getopt is the standard version
+flags_getoptIsStd()
+{
+  test ${__FLAGS_GETOPT_VERS} -eq ${__FLAGS_GETOPT_VERS_STD}
+}
+
+# This is effectively a 'usage()' function. It prints usage information and
+# exits the program with ${FLAGS_FALSE} if it is ever found in the command line
+# arguments. Note this function can be overridden so other apps can define
+# their own --help flag, replacing this one, if they want.
+#
+# Args:
+#   none
+# Returns:
+#   integer: success of operation (always returns true)
+flags_help()
+{
+  if [ -n "${FLAGS_HELP:-}" ]; then
+    echo "${FLAGS_HELP}" >&2
+  else
+    echo "USAGE: ${FLAGS_PARENT:-$0} [flags] args" >&2
+  fi
+  if [ -n "${__flags_longNames}" ]; then
+    echo 'flags:' >&2
+    for flags_name_ in ${__flags_longNames}; do
+      flags_flagStr_=''
+      flags_boolStr_=''
+      flags_usName_=`_flags_underscoreName ${flags_name_}`
+
+      flags_default_=`_flags_getFlagInfo \
+          "${flags_usName_}" ${__FLAGS_INFO_DEFAULT}`
+      flags_help_=`_flags_getFlagInfo \
+          "${flags_usName_}" ${__FLAGS_INFO_HELP}`
+      flags_short_=`_flags_getFlagInfo \
+          "${flags_usName_}" ${__FLAGS_INFO_SHORT}`
+      flags_type_=`_flags_getFlagInfo \
+          "${flags_usName_}" ${__FLAGS_INFO_TYPE}`
+
+      [ "${flags_short_}" != "${__FLAGS_NULL}" ] && \
+          flags_flagStr_="-${flags_short_}"
+
+      if [ ${__FLAGS_GETOPT_VERS} -eq ${__FLAGS_GETOPT_VERS_ENH} ]; then
+        [ "${flags_short_}" != "${__FLAGS_NULL}" ] && \
+            flags_flagStr_="${flags_flagStr_},"
+        # add [no] to long boolean flag names, except the 'help' flag
+        [ ${flags_type_} -eq ${__FLAGS_TYPE_BOOLEAN} \
+          -a "${flags_usName_}" != 'help' ] && \
+            flags_boolStr_='[no]'
+        flags_flagStr_="${flags_flagStr_}--${flags_boolStr_}${flags_name_}:"
+      fi
+
+      case ${flags_type_} in
+        ${__FLAGS_TYPE_BOOLEAN})
+          if [ ${flags_default_} -eq ${FLAGS_TRUE} ]; then
+            flags_defaultStr_='true'
+          else
+            flags_defaultStr_='false'
+          fi
+          ;;
+        ${__FLAGS_TYPE_FLOAT}|${__FLAGS_TYPE_INTEGER})
+          flags_defaultStr_=${flags_default_} ;;
+        ${__FLAGS_TYPE_STRING}) flags_defaultStr_="'${flags_default_}'" ;;
+      esac
+      flags_defaultStr_="(default: ${flags_defaultStr_})"
+
+      flags_helpStr_="  ${flags_flagStr_}  ${flags_help_} ${flags_defaultStr_}"
+      _flags_strlen "${flags_helpStr_}" >/dev/null
+      flags_helpStrLen_=${flags_output}
+      flags_columns_=`_flags_columns`
+
+      if [ ${flags_helpStrLen_} -lt ${flags_columns_} ]; then
+        echo "${flags_helpStr_}" >&2
+      else
+        echo "  ${flags_flagStr_}  ${flags_help_}" >&2
+        # note: the silliness with the x's is purely for ksh93 on Ubuntu 6.06
+        # because it doesn't like empty strings when used in this manner.
+        flags_emptyStr_="`echo \"x${flags_flagStr_}x\" \
+            |awk '{printf "%"length($0)-2"s", ""}'`"
+        flags_helpStr_="  ${flags_emptyStr_}  ${flags_defaultStr_}"
+        _flags_strlen "${flags_helpStr_}" >/dev/null
+        flags_helpStrLen_=${flags_output}
+
+        if [ ${__FLAGS_GETOPT_VERS} -eq ${__FLAGS_GETOPT_VERS_STD} \
+            -o ${flags_helpStrLen_} -lt ${flags_columns_} ]; then
+          # indented to match help string
+          echo "${flags_helpStr_}" >&2
+        else
+          # indented four from left to allow for longer defaults as long flag
+          # names might be used too, making things too long
+          echo "    ${flags_defaultStr_}" >&2
+        fi
+      fi
+    done
+  fi
+
+  unset flags_boolStr_ flags_default_ flags_defaultStr_ flags_emptyStr_ \
+      flags_flagStr_ flags_help_ flags_helpStr flags_helpStrLen flags_name_ \
+      flags_columns_ flags_short_ flags_type_ flags_usName_
+  return ${FLAGS_TRUE}
+}
+
+# Reset shflags back to an uninitialized state.
+#
+# Args:
+#   none
+# Returns:
+#   nothing
+flags_reset()
+{
+  for flags_name_ in ${__flags_longNames}; do
+    flags_usName_=`_flags_underscoreName ${flags_name_}`
+    flags_strToEval_="unset FLAGS_${flags_usName_}"
+    for flags_type_ in \
+        ${__FLAGS_INFO_DEFAULT} \
+        ${__FLAGS_INFO_HELP} \
+        ${__FLAGS_INFO_SHORT} \
+        ${__FLAGS_INFO_TYPE}
+    do
+      flags_strToEval_=\
+"${flags_strToEval_} __flags_${flags_usName_}_${flags_type_}"
+    done
+    eval ${flags_strToEval_}
+  done
+
+  # reset internal variables
+  __flags_boolNames=' '
+  __flags_longNames=' '
+  __flags_shortNames=' '
+  __flags_definedNames=' '
+
+  unset flags_name_ flags_type_ flags_strToEval_ flags_usName_
+}
diff --git a/src/support/ab_tools/scripts/ota_from_target_files b/src/support/ab_tools/scripts/ota_from_target_files
new file mode 100644
index 0000000..10a19b3
--- /dev/null
+++ b/src/support/ab_tools/scripts/ota_from_target_files
@@ -0,0 +1,2001 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2008 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Given a target-files zipfile, produces an OTA package that installs
+that build.  An incremental OTA is produced if -i is given, otherwise
+a full OTA is produced.
+
+Usage:  ota_from_target_files [flags] input_target_files output_ota_package
+
+  -k (--package_key) <key> Key to use to sign the package (default is
+      the value of default_system_dev_certificate from the input
+      target-files's META/misc_info.txt, or
+      "build/target/product/security/testkey" if that value is not
+      specified).
+
+      For incremental OTAs, the default value is based on the source
+      target-file, not the target build.
+
+  -i  (--incremental_from)  <file>
+      Generate an incremental OTA using the given target-files zip as
+      the starting build.
+
+  --full_radio
+      When generating an incremental OTA, always include a full copy of
+      radio image. This option is only meaningful when -i is specified,
+      because a full radio is always included in a full OTA if applicable.
+
+  --full_bootloader
+      Similar to --full_radio. When generating an incremental OTA, always
+      include a full copy of bootloader image.
+
+  --verify
+      Remount and verify the checksums of the files written to the system and
+      vendor (if used) partitions. Non-A/B incremental OTAs only.
+
+  -o  (--oem_settings)  <main_file[,additional_files...]>
+      Comma seperated list of files used to specify the expected OEM-specific
+      properties on the OEM partition of the intended device. Multiple expected
+      values can be used by providing multiple files. Only the first dict will
+      be used to compute fingerprint, while the rest will be used to assert
+      OEM-specific properties.
+
+  --oem_no_mount
+      For devices with OEM-specific properties but without an OEM partition,
+      do not mount the OEM partition in the updater-script. This should be
+      very rarely used, since it's expected to have a dedicated OEM partition
+      for OEM-specific properties. Only meaningful when -o is specified.
+
+  --wipe_user_data
+      Generate an OTA package that will wipe the user data partition
+      when installed.
+
+  --downgrade
+      Intentionally generate an incremental OTA that updates from a newer build
+      to an older one (e.g. downgrading from P preview back to O MR1).
+      "ota-downgrade=yes" will be set in the package metadata file. A data wipe
+      will always be enforced when using this flag, so "ota-wipe=yes" will also
+      be included in the metadata file. The update-binary in the source build
+      will be used in the OTA package, unless --binary flag is specified. Please
+      also check the comment for --override_timestamp below.
+
+  --override_timestamp
+      Intentionally generate an incremental OTA that updates from a newer build
+      to an older one (based on timestamp comparison), by setting the downgrade
+      flag in the package metadata. This differs from --downgrade flag, as we
+      don't enforce a data wipe with this flag. Because we know for sure this is
+      NOT an actual downgrade case, but two builds happen to be cut in a reverse
+      order (e.g. from two branches). A legit use case is that we cut a new
+      build C (after having A and B), but want to enfore an update path of A ->
+      C -> B. Specifying --downgrade may not help since that would enforce a
+      data wipe for C -> B update.
+
+      We used to set a fake timestamp in the package metadata for this flow. But
+      now we consolidate the two cases (i.e. an actual downgrade, or a downgrade
+      based on timestamp) with the same "ota-downgrade=yes" flag, with the
+      difference being whether "ota-wipe=yes" is set.
+
+  -e  (--extra_script)  <file>
+      Insert the contents of file at the end of the update script.
+
+  -2  (--two_step)
+      Generate a 'two-step' OTA package, where recovery is updated
+      first, so that any changes made to the system partition are done
+      using the new recovery (new kernel, etc.).
+
+  --include_secondary
+      Additionally include the payload for secondary slot images (default:
+      False). Only meaningful when generating A/B OTAs.
+
+      By default, an A/B OTA package doesn't contain the images for the
+      secondary slot (e.g. system_other.img). Specifying this flag allows
+      generating a separate payload that will install secondary slot images.
+
+      Such a package needs to be applied in a two-stage manner, with a reboot
+      in-between. During the first stage, the updater applies the primary
+      payload only. Upon finishing, it reboots the device into the newly updated
+      slot. It then continues to install the secondary payload to the inactive
+      slot, but without switching the active slot at the end (needs the matching
+      support in update_engine, i.e. SWITCH_SLOT_ON_REBOOT flag).
+
+      Due to the special install procedure, the secondary payload will be always
+      generated as a full payload.
+
+  --block
+      Generate a block-based OTA for non-A/B device. We have deprecated the
+      support for file-based OTA since O. Block-based OTA will be used by
+      default for all non-A/B devices. Keeping this flag here to not break
+      existing callers.
+
+  -b  (--binary)  <file>
+      Use the given binary as the update-binary in the output package,
+      instead of the binary in the build's target_files.  Use for
+      development only.
+
+  -t  (--worker_threads) <int>
+      Specifies the number of worker-threads that will be used when
+      generating patches for incremental updates (defaults to 3).
+
+  --stash_threshold <float>
+      Specifies the threshold that will be used to compute the maximum
+      allowed stash size (defaults to 0.8).
+
+  --log_diff <file>
+      Generate a log file that shows the differences in the source and target
+      builds for an incremental package. This option is only meaningful when
+      -i is specified.
+
+  --payload_signer <signer>
+      Specify the signer when signing the payload and metadata for A/B OTAs.
+      By default (i.e. without this flag), it calls 'openssl pkeyutl' to sign
+      with the package private key. If the private key cannot be accessed
+      directly, a payload signer that knows how to do that should be specified.
+      The signer will be supplied with "-inkey <path_to_key>",
+      "-in <input_file>" and "-out <output_file>" parameters.
+
+  --payload_signer_args <args>
+      Specify the arguments needed for payload signer.
+
+  --skip_postinstall
+      Skip the postinstall hooks when generating an A/B OTA package (default:
+      False). Note that this discards ALL the hooks, including non-optional
+      ones. Should only be used if caller knows it's safe to do so (e.g. all the
+      postinstall work is to dexopt apps and a data wipe will happen immediately
+      after). Only meaningful when generating A/B OTAs.
+"""
+
+from __future__ import print_function
+
+import multiprocessing
+import os.path
+import shlex
+import shutil
+import struct
+import subprocess
+import sys
+import tempfile
+import zipfile
+
+import common
+import edify_generator
+
+if sys.hexversion < 0x02070000:
+  print("Python 2.7 or newer is required.", file=sys.stderr)
+  sys.exit(1)
+
+
+OPTIONS = common.OPTIONS
+OPTIONS.package_key = None
+OPTIONS.incremental_source = None
+OPTIONS.verify = False
+OPTIONS.patch_threshold = 0.95
+OPTIONS.wipe_user_data = False
+OPTIONS.downgrade = False
+OPTIONS.extra_script = None
+OPTIONS.worker_threads = multiprocessing.cpu_count() // 2
+if OPTIONS.worker_threads == 0:
+  OPTIONS.worker_threads = 1
+OPTIONS.two_step = False
+OPTIONS.include_secondary = False
+OPTIONS.no_signing = False
+OPTIONS.block_based = True
+OPTIONS.updater_binary = None
+OPTIONS.oem_source = None
+OPTIONS.oem_no_mount = False
+OPTIONS.full_radio = False
+OPTIONS.full_bootloader = False
+# Stash size cannot exceed cache_size * threshold.
+OPTIONS.cache_size = None
+OPTIONS.stash_threshold = 0.8
+OPTIONS.log_diff = None
+OPTIONS.payload_signer = None
+OPTIONS.payload_signer_args = []
+OPTIONS.extracted_input = None
+OPTIONS.key_passwords = []
+OPTIONS.skip_postinstall = False
+
+
+METADATA_NAME = 'META-INF/com/android/metadata'
+POSTINSTALL_CONFIG = 'META/postinstall_config.txt'
+UNZIP_PATTERN = ['IMAGES/*', 'META/*']
+
+
+class BuildInfo(object):
+  """A class that holds the information for a given build.
+
+  This class wraps up the property querying for a given source or target build.
+  It abstracts away the logic of handling OEM-specific properties, and caches
+  the commonly used properties such as fingerprint.
+
+  There are two types of info dicts: a) build-time info dict, which is generated
+  at build time (i.e. included in a target_files zip); b) OEM info dict that is
+  specified at package generation time (via command line argument
+  '--oem_settings'). If a build doesn't use OEM-specific properties (i.e. not
+  having "oem_fingerprint_properties" in build-time info dict), all the queries
+  would be answered based on build-time info dict only. Otherwise if using
+  OEM-specific properties, some of them will be calculated from two info dicts.
+
+  Users can query properties similarly as using a dict() (e.g. info['fstab']),
+  or to query build properties via GetBuildProp() or GetVendorBuildProp().
+
+  Attributes:
+    info_dict: The build-time info dict.
+    is_ab: Whether it's a build that uses A/B OTA.
+    oem_dicts: A list of OEM dicts.
+    oem_props: A list of OEM properties that should be read from OEM dicts; None
+        if the build doesn't use any OEM-specific property.
+    fingerprint: The fingerprint of the build, which would be calculated based
+        on OEM properties if applicable.
+    device: The device name, which could come from OEM dicts if applicable.
+  """
+
+  def __init__(self, info_dict, oem_dicts):
+    """Initializes a BuildInfo instance with the given dicts.
+
+    Arguments:
+      info_dict: The build-time info dict.
+      oem_dicts: A list of OEM dicts (which is parsed from --oem_settings). Note
+          that it always uses the first dict to calculate the fingerprint or the
+          device name. The rest would be used for asserting OEM properties only
+          (e.g.  one package can be installed on one of these devices).
+    """
+    self.info_dict = info_dict
+    self.oem_dicts = oem_dicts
+
+    self._is_ab = info_dict.get("ab_update") == "true"
+    self._oem_props = info_dict.get("oem_fingerprint_properties")
+
+    if self._oem_props:
+      assert oem_dicts, "OEM source required for this build"
+
+    # These two should be computed only after setting self._oem_props.
+    self._device = self.GetOemProperty("ro.product.device")
+    self._fingerprint = self.CalculateFingerprint()
+
+  @property
+  def is_ab(self):
+    return self._is_ab
+
+  @property
+  def device(self):
+    return self._device
+
+  @property
+  def fingerprint(self):
+    return self._fingerprint
+
+  @property
+  def oem_props(self):
+    return self._oem_props
+
+  def __getitem__(self, key):
+    return self.info_dict[key]
+
+  def get(self, key, default=None):
+    return self.info_dict.get(key, default)
+
+  def GetBuildProp(self, prop):
+    """Returns the inquired build property."""
+    try:
+      return self.info_dict.get("build.prop", {})[prop]
+    except KeyError:
+      raise common.ExternalError("couldn't find %s in build.prop" % (prop,))
+
+  def GetVendorBuildProp(self, prop):
+    """Returns the inquired vendor build property."""
+    try:
+      return self.info_dict.get("vendor.build.prop", {})[prop]
+    except KeyError:
+      raise common.ExternalError(
+          "couldn't find %s in vendor.build.prop" % (prop,))
+
+  def GetOemProperty(self, key):
+    if self.oem_props is not None and key in self.oem_props:
+      return self.oem_dicts[0][key]
+    return self.GetBuildProp(key)
+
+  def CalculateFingerprint(self):
+    if self.oem_props is None:
+      return self.GetBuildProp("ro.build.fingerprint")
+    return "%s/%s/%s:%s" % (
+        self.GetOemProperty("ro.product.brand"),
+        self.GetOemProperty("ro.product.name"),
+        self.GetOemProperty("ro.product.device"),
+        self.GetBuildProp("ro.build.thumbprint"))
+
+  def WriteMountOemScript(self, script):
+    assert self.oem_props is not None
+    recovery_mount_options = self.info_dict.get("recovery_mount_options")
+    script.Mount("/oem", recovery_mount_options)
+
+  def WriteDeviceAssertions(self, script, oem_no_mount):
+    # Read the property directly if not using OEM properties.
+    if not self.oem_props:
+      script.AssertDevice(self.device)
+      return
+
+    # Otherwise assert OEM properties.
+    if not self.oem_dicts:
+      raise common.ExternalError(
+          "No OEM file provided to answer expected assertions")
+
+    for prop in self.oem_props.split():
+      values = []
+      for oem_dict in self.oem_dicts:
+        if prop in oem_dict:
+          values.append(oem_dict[prop])
+      if not values:
+        raise common.ExternalError(
+            "The OEM file is missing the property %s" % (prop,))
+      script.AssertOemProperty(prop, values, oem_no_mount)
+
+
+class PayloadSigner(object):
+  """A class that wraps the payload signing works.
+
+  When generating a Payload, hashes of the payload and metadata files will be
+  signed with the device key, either by calling an external payload signer or
+  by calling openssl with the package key. This class provides a unified
+  interface, so that callers can just call PayloadSigner.Sign().
+
+  If an external payload signer has been specified (OPTIONS.payload_signer), it
+  calls the signer with the provided args (OPTIONS.payload_signer_args). Note
+  that the signing key should be provided as part of the payload_signer_args.
+  Otherwise without an external signer, it uses the package key
+  (OPTIONS.package_key) and calls openssl for the signing works.
+  """
+
+  def __init__(self):
+    if OPTIONS.payload_signer is None:
+      # Prepare the payload signing key.
+      private_key = OPTIONS.package_key + OPTIONS.private_key_suffix
+      pw = OPTIONS.key_passwords[OPTIONS.package_key]
+
+      cmd = ["openssl", "pkcs8", "-in", private_key, "-inform", "DER"]
+      cmd.extend(["-passin", "pass:" + pw] if pw else ["-nocrypt"])
+      signing_key = common.MakeTempFile(prefix="key-", suffix=".key")
+      cmd.extend(["-out", signing_key])
+
+      get_signing_key = common.Run(cmd, verbose=False, stdout=subprocess.PIPE,
+                                   stderr=subprocess.STDOUT)
+      stdoutdata, _ = get_signing_key.communicate()
+      assert get_signing_key.returncode == 0, \
+          "Failed to get signing key: {}".format(stdoutdata)
+
+      self.signer = "openssl"
+      self.signer_args = ["pkeyutl", "-sign", "-inkey", signing_key,
+                          "-pkeyopt", "digest:sha256"]
+    else:
+      self.signer = OPTIONS.payload_signer
+      self.signer_args = OPTIONS.payload_signer_args
+
+  def Sign(self, in_file):
+    """Signs the given input file. Returns the output filename."""
+    out_file = common.MakeTempFile(prefix="signed-", suffix=".bin")
+    cmd = [self.signer] + self.signer_args + ['-in', in_file, '-out', out_file]
+    signing = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+    stdoutdata, _ = signing.communicate()
+    assert signing.returncode == 0, \
+        "Failed to sign the input file: {}".format(stdoutdata)
+    return out_file
+
+
+class Payload(object):
+  """Manages the creation and the signing of an A/B OTA Payload."""
+
+  PAYLOAD_BIN = 'payload.bin'
+  PAYLOAD_PROPERTIES_TXT = 'payload_properties.txt'
+  SECONDARY_PAYLOAD_BIN = 'secondary/payload.bin'
+  SECONDARY_PAYLOAD_PROPERTIES_TXT = 'secondary/payload_properties.txt'
+
+  def __init__(self, secondary=False):
+    """Initializes a Payload instance.
+
+    Args:
+      secondary: Whether it's generating a secondary payload (default: False).
+    """
+    # The place where the output from the subprocess should go.
+    self._log_file = sys.stdout if OPTIONS.verbose else subprocess.PIPE
+    self.payload_file = None
+    self.payload_properties = None
+    self.secondary = secondary
+
+  def Generate(self, target_file, source_file=None, additional_args=None):
+    """Generates a payload from the given target-files zip(s).
+
+    Args:
+      target_file: The filename of the target build target-files zip.
+      source_file: The filename of the source build target-files zip; or None if
+          generating a full OTA.
+      additional_args: A list of additional args that should be passed to
+          brillo_update_payload script; or None.
+    """
+    if additional_args is None:
+      additional_args = []
+
+    payload_file = common.MakeTempFile(prefix="payload-", suffix=".bin")
+    cmd = ["brillo_update_payload", "generate",
+           "--payload", payload_file,
+           "--target_image", target_file]
+    if source_file is not None:
+      cmd.extend(["--source_image", source_file])
+    cmd.extend(additional_args)
+    p = common.Run(cmd, stdout=self._log_file, stderr=subprocess.STDOUT)
+    stdoutdata, _ = p.communicate()
+    assert p.returncode == 0, \
+        "brillo_update_payload generate failed: {}".format(stdoutdata)
+
+    self.payload_file = payload_file
+    self.payload_properties = None
+
+  def Sign(self, payload_signer):
+    """Generates and signs the hashes of the payload and metadata.
+
+    Args:
+      payload_signer: A PayloadSigner() instance that serves the signing work.
+
+    Raises:
+      AssertionError: On any failure when calling brillo_update_payload script.
+    """
+    assert isinstance(payload_signer, PayloadSigner)
+
+    # 1. Generate hashes of the payload and metadata files.
+    payload_sig_file = common.MakeTempFile(prefix="sig-", suffix=".bin")
+    metadata_sig_file = common.MakeTempFile(prefix="sig-", suffix=".bin")
+    cmd = ["brillo_update_payload", "hash",
+           "--unsigned_payload", self.payload_file,
+           "--signature_size", "256",
+           "--metadata_hash_file", metadata_sig_file,
+           "--payload_hash_file", payload_sig_file]
+    p1 = common.Run(cmd, stdout=self._log_file, stderr=subprocess.STDOUT)
+    p1.communicate()
+    assert p1.returncode == 0, "brillo_update_payload hash failed"
+
+    # 2. Sign the hashes.
+    signed_payload_sig_file = payload_signer.Sign(payload_sig_file)
+    signed_metadata_sig_file = payload_signer.Sign(metadata_sig_file)
+
+    # 3. Insert the signatures back into the payload file.
+    signed_payload_file = common.MakeTempFile(prefix="signed-payload-",
+                                              suffix=".bin")
+    cmd = ["brillo_update_payload", "sign",
+           "--unsigned_payload", self.payload_file,
+           "--payload", signed_payload_file,
+           "--signature_size", "256",
+           "--metadata_signature_file", signed_metadata_sig_file,
+           "--payload_signature_file", signed_payload_sig_file]
+    p1 = common.Run(cmd, stdout=self._log_file, stderr=subprocess.STDOUT)
+    p1.communicate()
+    assert p1.returncode == 0, "brillo_update_payload sign failed"
+
+    # 4. Dump the signed payload properties.
+    properties_file = common.MakeTempFile(prefix="payload-properties-",
+                                          suffix=".txt")
+    cmd = ["brillo_update_payload", "properties",
+           "--payload", signed_payload_file,
+           "--properties_file", properties_file]
+    p1 = common.Run(cmd, stdout=self._log_file, stderr=subprocess.STDOUT)
+    p1.communicate()
+    assert p1.returncode == 0, "brillo_update_payload properties failed"
+
+    if self.secondary:
+      with open(properties_file, "a") as f:
+        f.write("SWITCH_SLOT_ON_REBOOT=0\n")
+
+    if OPTIONS.wipe_user_data:
+      with open(properties_file, "a") as f:
+        f.write("POWERWASH=1\n")
+
+    self.payload_file = signed_payload_file
+    self.payload_properties = properties_file
+
+  def WriteToZip(self, output_zip):
+    """Writes the payload to the given zip.
+
+    Args:
+      output_zip: The output ZipFile instance.
+    """
+    assert self.payload_file is not None
+    assert self.payload_properties is not None
+
+    if self.secondary:
+      payload_arcname = Payload.SECONDARY_PAYLOAD_BIN
+      payload_properties_arcname = Payload.SECONDARY_PAYLOAD_PROPERTIES_TXT
+    else:
+      payload_arcname = Payload.PAYLOAD_BIN
+      payload_properties_arcname = Payload.PAYLOAD_PROPERTIES_TXT
+
+    # Add the signed payload file and properties into the zip. In order to
+    # support streaming, we pack them as ZIP_STORED. So these entries can be
+    # read directly with the offset and length pairs.
+    common.ZipWrite(output_zip, self.payload_file, arcname=payload_arcname,
+                    compress_type=zipfile.ZIP_STORED)
+    common.ZipWrite(output_zip, self.payload_properties,
+                    arcname=payload_properties_arcname,
+                    compress_type=zipfile.ZIP_STORED)
+
+
+def SignOutput(temp_zip_name, output_zip_name):
+  pw = OPTIONS.key_passwords[OPTIONS.package_key]
+
+  common.SignFile(temp_zip_name, output_zip_name, OPTIONS.package_key, pw,
+                  whole_file=True)
+
+
+def _LoadOemDicts(oem_source):
+  """Returns the list of loaded OEM properties dict."""
+  if not oem_source:
+    return None
+
+  oem_dicts = []
+  for oem_file in oem_source:
+    with open(oem_file) as fp:
+      oem_dicts.append(common.LoadDictionaryFromLines(fp.readlines()))
+  return oem_dicts
+
+
+def _WriteRecoveryImageToBoot(script, output_zip):
+  """Find and write recovery image to /boot in two-step OTA.
+
+  In two-step OTAs, we write recovery image to /boot as the first step so that
+  we can reboot to there and install a new recovery image to /recovery.
+  A special "recovery-two-step.img" will be preferred, which encodes the correct
+  path of "/boot". Otherwise the device may show "device is corrupt" message
+  when booting into /boot.
+
+  Fall back to using the regular recovery.img if the two-step recovery image
+  doesn't exist. Note that rebuilding the special image at this point may be
+  infeasible, because we don't have the desired boot signer and keys when
+  calling ota_from_target_files.py.
+  """
+
+  recovery_two_step_img_name = "recovery-two-step.img"
+  recovery_two_step_img_path = os.path.join(
+      OPTIONS.input_tmp, "IMAGES", recovery_two_step_img_name)
+  if os.path.exists(recovery_two_step_img_path):
+    recovery_two_step_img = common.GetBootableImage(
+        recovery_two_step_img_name, recovery_two_step_img_name,
+        OPTIONS.input_tmp, "RECOVERY")
+    common.ZipWriteStr(
+        output_zip, recovery_two_step_img_name, recovery_two_step_img.data)
+    print("two-step package: using %s in stage 1/3" % (
+        recovery_two_step_img_name,))
+    script.WriteRawImage("/boot", recovery_two_step_img_name)
+  else:
+    print("two-step package: using recovery.img in stage 1/3")
+    # The "recovery.img" entry has been written into package earlier.
+    script.WriteRawImage("/boot", "recovery.img")
+
+
+def HasRecoveryPatch(target_files_zip):
+  namelist = [name for name in target_files_zip.namelist()]
+  return ("SYSTEM/recovery-from-boot.p" in namelist or
+          "SYSTEM/etc/recovery.img" in namelist)
+
+
+def HasVendorPartition(target_files_zip):
+  try:
+    target_files_zip.getinfo("VENDOR/")
+    return True
+  except KeyError:
+    return False
+
+
+def HasTrebleEnabled(target_files_zip, target_info):
+  return (HasVendorPartition(target_files_zip) and
+          target_info.GetBuildProp("ro.treble.enabled") == "true")
+
+
+def WriteFingerprintAssertion(script, target_info, source_info):
+  source_oem_props = source_info.oem_props
+  target_oem_props = target_info.oem_props
+
+  if source_oem_props is None and target_oem_props is None:
+    script.AssertSomeFingerprint(
+        source_info.fingerprint, target_info.fingerprint)
+  elif source_oem_props is not None and target_oem_props is not None:
+    script.AssertSomeThumbprint(
+        target_info.GetBuildProp("ro.build.thumbprint"),
+        source_info.GetBuildProp("ro.build.thumbprint"))
+  elif source_oem_props is None and target_oem_props is not None:
+    script.AssertFingerprintOrThumbprint(
+        source_info.fingerprint,
+        target_info.GetBuildProp("ro.build.thumbprint"))
+  else:
+    script.AssertFingerprintOrThumbprint(
+        target_info.fingerprint,
+        source_info.GetBuildProp("ro.build.thumbprint"))
+
+
+def AddCompatibilityArchiveIfTrebleEnabled(target_zip, output_zip, target_info,
+                                           source_info=None):
+  """Adds compatibility info into the output zip if it's Treble-enabled target.
+
+  Metadata used for on-device compatibility verification is retrieved from
+  target_zip then added to compatibility.zip which is added to the output_zip
+  archive.
+
+  Compatibility archive should only be included for devices that have enabled
+  Treble support.
+
+  Args:
+    target_zip: Zip file containing the source files to be included for OTA.
+    output_zip: Zip file that will be sent for OTA.
+    target_info: The BuildInfo instance that holds the target build info.
+    source_info: The BuildInfo instance that holds the source build info, if
+        generating an incremental OTA; None otherwise.
+  """
+
+  def AddCompatibilityArchive(system_updated, vendor_updated):
+    """Adds compatibility info based on system/vendor update status.
+
+    Args:
+      system_updated: If True, the system image will be updated and therefore
+          its metadata should be included.
+      vendor_updated: If True, the vendor image will be updated and therefore
+          its metadata should be included.
+    """
+    # Determine what metadata we need. Files are names relative to META/.
+    compatibility_files = []
+    vendor_metadata = ("vendor_manifest.xml", "vendor_matrix.xml")
+    system_metadata = ("system_manifest.xml", "system_matrix.xml")
+    if vendor_updated:
+      compatibility_files += vendor_metadata
+    if system_updated:
+      compatibility_files += system_metadata
+
+    # Create new archive.
+    compatibility_archive = tempfile.NamedTemporaryFile()
+    compatibility_archive_zip = zipfile.ZipFile(
+        compatibility_archive, "w", compression=zipfile.ZIP_DEFLATED)
+
+    # Add metadata.
+    for file_name in compatibility_files:
+      target_file_name = "META/" + file_name
+
+      if target_file_name in target_zip.namelist():
+        data = target_zip.read(target_file_name)
+        common.ZipWriteStr(compatibility_archive_zip, file_name, data)
+
+    # Ensure files are written before we copy into output_zip.
+    compatibility_archive_zip.close()
+
+    # Only add the archive if we have any compatibility info.
+    if compatibility_archive_zip.namelist():
+      common.ZipWrite(output_zip, compatibility_archive.name,
+                      arcname="compatibility.zip",
+                      compress_type=zipfile.ZIP_STORED)
+
+  # Will only proceed if the target has enabled the Treble support (as well as
+  # having a /vendor partition).
+  if not HasTrebleEnabled(target_zip, target_info):
+    return
+
+  # We don't support OEM thumbprint in Treble world (which calculates
+  # fingerprints in a different way as shown in CalculateFingerprint()).
+  assert not target_info.oem_props
+
+  # Full OTA carries the info for system/vendor both.
+  if source_info is None:
+    AddCompatibilityArchive(True, True)
+    return
+
+  assert not source_info.oem_props
+
+  source_fp = source_info.fingerprint
+  target_fp = target_info.fingerprint
+  system_updated = source_fp != target_fp
+
+  source_fp_vendor = source_info.GetVendorBuildProp(
+      "ro.vendor.build.fingerprint")
+  target_fp_vendor = target_info.GetVendorBuildProp(
+      "ro.vendor.build.fingerprint")
+  vendor_updated = source_fp_vendor != target_fp_vendor
+
+  AddCompatibilityArchive(system_updated, vendor_updated)
+
+
+def WriteFullOTAPackage(input_zip, output_file):
+  target_info = BuildInfo(OPTIONS.info_dict, OPTIONS.oem_dicts)
+
+  # We don't know what version it will be installed on top of. We expect the API
+  # just won't change very often. Similarly for fstab, it might have changed in
+  # the target build.
+  target_api_version = target_info["recovery_api_version"]
+  script = edify_generator.EdifyGenerator(target_api_version, target_info)
+
+  if target_info.oem_props and not OPTIONS.oem_no_mount:
+    target_info.WriteMountOemScript(script)
+
+  metadata = GetPackageMetadata(target_info)
+
+  if not OPTIONS.no_signing:
+    staging_file = common.MakeTempFile(suffix='.zip')
+  else:
+    staging_file = output_file
+
+  output_zip = zipfile.ZipFile(
+      staging_file, "w", compression=zipfile.ZIP_DEFLATED)
+
+  device_specific = common.DeviceSpecificParams(
+      input_zip=input_zip,
+      input_version=target_api_version,
+      output_zip=output_zip,
+      script=script,
+      input_tmp=OPTIONS.input_tmp,
+      metadata=metadata,
+      info_dict=OPTIONS.info_dict)
+
+  assert HasRecoveryPatch(input_zip)
+
+  # Assertions (e.g. downgrade check, device properties check).
+  ts = target_info.GetBuildProp("ro.build.date.utc")
+  ts_text = target_info.GetBuildProp("ro.build.date")
+  script.AssertOlderBuild(ts, ts_text)
+
+  target_info.WriteDeviceAssertions(script, OPTIONS.oem_no_mount)
+  device_specific.FullOTA_Assertions()
+
+  # Two-step package strategy (in chronological order, which is *not*
+  # the order in which the generated script has things):
+  #
+  # if stage is not "2/3" or "3/3":
+  #    write recovery image to boot partition
+  #    set stage to "2/3"
+  #    reboot to boot partition and restart recovery
+  # else if stage is "2/3":
+  #    write recovery image to recovery partition
+  #    set stage to "3/3"
+  #    reboot to recovery partition and restart recovery
+  # else:
+  #    (stage must be "3/3")
+  #    set stage to ""
+  #    do normal full package installation:
+  #       wipe and install system, boot image, etc.
+  #       set up system to update recovery partition on first boot
+  #    complete script normally
+  #    (allow recovery to mark itself finished and reboot)
+
+  recovery_img = common.GetBootableImage("recovery.img", "recovery.img",
+                                         OPTIONS.input_tmp, "RECOVERY")
+  if OPTIONS.two_step:
+    if not target_info.get("multistage_support"):
+      assert False, "two-step packages not supported by this build"
+    fs = target_info["fstab"]["/misc"]
+    assert fs.fs_type.upper() == "EMMC", \
+        "two-step packages only supported on devices with EMMC /misc partitions"
+    bcb_dev = {"bcb_dev": fs.device}
+    common.ZipWriteStr(output_zip, "recovery.img", recovery_img.data)
+    script.AppendExtra("""
+if get_stage("%(bcb_dev)s") == "2/3" then
+""" % bcb_dev)
+
+    # Stage 2/3: Write recovery image to /recovery (currently running /boot).
+    script.Comment("Stage 2/3")
+    script.WriteRawImage("/recovery", "recovery.img")
+    script.AppendExtra("""
+set_stage("%(bcb_dev)s", "3/3");
+reboot_now("%(bcb_dev)s", "recovery");
+else if get_stage("%(bcb_dev)s") == "3/3" then
+""" % bcb_dev)
+
+    # Stage 3/3: Make changes.
+    script.Comment("Stage 3/3")
+
+  # Dump fingerprints
+  script.Print("Target: {}".format(target_info.fingerprint))
+
+  device_specific.FullOTA_InstallBegin()
+
+  system_progress = 0.75
+
+  if OPTIONS.wipe_user_data:
+    system_progress -= 0.1
+  if HasVendorPartition(input_zip):
+    system_progress -= 0.1
+
+  script.ShowProgress(system_progress, 0)
+
+  # See the notes in WriteBlockIncrementalOTAPackage().
+  allow_shared_blocks = target_info.get('ext4_share_dup_blocks') == "true"
+
+  # Full OTA is done as an "incremental" against an empty source image. This
+  # has the effect of writing new data from the package to the entire
+  # partition, but lets us reuse the updater code that writes incrementals to
+  # do it.
+  system_tgt = common.GetSparseImage("system", OPTIONS.input_tmp, input_zip,
+                                     allow_shared_blocks)
+  system_tgt.ResetFileMap()
+  system_diff = common.BlockDifference("system", system_tgt, src=None)
+  system_diff.WriteScript(script, output_zip)
+
+  boot_img = common.GetBootableImage(
+      "boot.img", "boot.img", OPTIONS.input_tmp, "BOOT")
+
+  if HasVendorPartition(input_zip):
+    script.ShowProgress(0.1, 0)
+
+    vendor_tgt = common.GetSparseImage("vendor", OPTIONS.input_tmp, input_zip,
+                                       allow_shared_blocks)
+    vendor_tgt.ResetFileMap()
+    vendor_diff = common.BlockDifference("vendor", vendor_tgt)
+    vendor_diff.WriteScript(script, output_zip)
+
+  AddCompatibilityArchiveIfTrebleEnabled(input_zip, output_zip, target_info)
+
+  common.CheckSize(boot_img.data, "boot.img", target_info)
+  common.ZipWriteStr(output_zip, "boot.img", boot_img.data)
+
+  script.ShowProgress(0.05, 5)
+  script.WriteRawImage("/boot", "boot.img")
+
+  script.ShowProgress(0.2, 10)
+  device_specific.FullOTA_InstallEnd()
+
+  if OPTIONS.extra_script is not None:
+    script.AppendExtra(OPTIONS.extra_script)
+
+  script.UnmountAll()
+
+  if OPTIONS.wipe_user_data:
+    script.ShowProgress(0.1, 10)
+    script.FormatPartition("/data")
+
+  if OPTIONS.two_step:
+    script.AppendExtra("""
+set_stage("%(bcb_dev)s", "");
+""" % bcb_dev)
+    script.AppendExtra("else\n")
+
+    # Stage 1/3: Nothing to verify for full OTA. Write recovery image to /boot.
+    script.Comment("Stage 1/3")
+    _WriteRecoveryImageToBoot(script, output_zip)
+
+    script.AppendExtra("""
+set_stage("%(bcb_dev)s", "2/3");
+reboot_now("%(bcb_dev)s", "");
+endif;
+endif;
+""" % bcb_dev)
+
+  script.SetProgress(1)
+  script.AddToZip(input_zip, output_zip, input_path=OPTIONS.updater_binary)
+  metadata["ota-required-cache"] = str(script.required_cache)
+
+  # We haven't written the metadata entry, which will be done in
+  # FinalizeMetadata.
+  common.ZipClose(output_zip)
+
+  needed_property_files = (
+      NonAbOtaPropertyFiles(),
+  )
+  FinalizeMetadata(metadata, staging_file, output_file, needed_property_files)
+
+
+def WriteMetadata(metadata, output_zip):
+  value = "".join(["%s=%s\n" % kv for kv in sorted(metadata.iteritems())])
+  common.ZipWriteStr(output_zip, METADATA_NAME, value,
+                     compress_type=zipfile.ZIP_STORED)
+
+
+def HandleDowngradeMetadata(metadata, target_info, source_info):
+  # Only incremental OTAs are allowed to reach here.
+  assert OPTIONS.incremental_source is not None
+
+  post_timestamp = target_info.GetBuildProp("ro.build.date.utc")
+  pre_timestamp = source_info.GetBuildProp("ro.build.date.utc")
+  is_downgrade = long(post_timestamp) < long(pre_timestamp)
+
+  if OPTIONS.downgrade:
+    if not is_downgrade:
+      raise RuntimeError(
+          "--downgrade or --override_timestamp specified but no downgrade "
+          "detected: pre: %s, post: %s" % (pre_timestamp, post_timestamp))
+    metadata["ota-downgrade"] = "yes"
+  else:
+    if is_downgrade:
+      raise RuntimeError(
+          "Downgrade detected based on timestamp check: pre: %s, post: %s. "
+          "Need to specify --override_timestamp OR --downgrade to allow "
+          "building the incremental." % (pre_timestamp, post_timestamp))
+
+
+def GetPackageMetadata(target_info, source_info=None):
+  """Generates and returns the metadata dict.
+
+  It generates a dict() that contains the info to be written into an OTA
+  package (META-INF/com/android/metadata). It also handles the detection of
+  downgrade / data wipe based on the global options.
+
+  Args:
+    target_info: The BuildInfo instance that holds the target build info.
+    source_info: The BuildInfo instance that holds the source build info, or
+        None if generating full OTA.
+
+  Returns:
+    A dict to be written into package metadata entry.
+  """
+  assert isinstance(target_info, BuildInfo)
+  assert source_info is None or isinstance(source_info, BuildInfo)
+
+  metadata = {
+      'post-build' : target_info.fingerprint,
+      'post-build-incremental' : target_info.GetBuildProp(
+          'ro.build.version.incremental'),
+      'post-sdk-level' : target_info.GetBuildProp(
+          'ro.build.version.sdk'),
+      'post-security-patch-level' : target_info.GetBuildProp(
+          'ro.build.version.security_patch'),
+  }
+
+  if target_info.is_ab:
+    metadata['ota-type'] = 'AB'
+    metadata['ota-required-cache'] = '0'
+  else:
+    metadata['ota-type'] = 'BLOCK'
+
+  if OPTIONS.wipe_user_data:
+    metadata['ota-wipe'] = 'yes'
+
+  is_incremental = source_info is not None
+  if is_incremental:
+    metadata['pre-build'] = source_info.fingerprint
+    metadata['pre-build-incremental'] = source_info.GetBuildProp(
+        'ro.build.version.incremental')
+    metadata['pre-device'] = source_info.device
+  else:
+    metadata['pre-device'] = target_info.device
+
+  # Use the actual post-timestamp, even for a downgrade case.
+  metadata['post-timestamp'] = target_info.GetBuildProp('ro.build.date.utc')
+
+  # Detect downgrades and set up downgrade flags accordingly.
+  if is_incremental:
+    HandleDowngradeMetadata(metadata, target_info, source_info)
+
+  return metadata
+
+
+class PropertyFiles(object):
+  """A class that computes the property-files string for an OTA package.
+
+  A property-files string is a comma-separated string that contains the
+  offset/size info for an OTA package. The entries, which must be ZIP_STORED,
+  can be fetched directly with the package URL along with the offset/size info.
+  These strings can be used for streaming A/B OTAs, or allowing an updater to
+  download package metadata entry directly, without paying the cost of
+  downloading entire package.
+
+  Computing the final property-files string requires two passes. Because doing
+  the whole package signing (with signapk.jar) will possibly reorder the ZIP
+  entries, which may in turn invalidate earlier computed ZIP entry offset/size
+  values.
+
+  This class provides functions to be called for each pass. The general flow is
+  as follows.
+
+    property_files = PropertyFiles()
+    # The first pass, which writes placeholders before doing initial signing.
+    property_files.Compute()
+    SignOutput()
+
+    # The second pass, by replacing the placeholders with actual data.
+    property_files.Finalize()
+    SignOutput()
+
+  And the caller can additionally verify the final result.
+
+    property_files.Verify()
+  """
+
+  def __init__(self):
+    self.name = None
+    self.required = ()
+    self.optional = ()
+
+  def Compute(self, input_zip):
+    """Computes and returns a property-files string with placeholders.
+
+    We reserve extra space for the offset and size of the metadata entry itself,
+    although we don't know the final values until the package gets signed.
+
+    Args:
+      input_zip: The input ZIP file.
+
+    Returns:
+      A string with placeholders for the metadata offset/size info, e.g.
+      "payload.bin:679:343,payload_properties.txt:378:45,metadata:        ".
+    """
+    return self._GetPropertyFilesString(input_zip, reserve_space=True)
+
+  class InsufficientSpaceException(Exception):
+    pass
+
+  def Finalize(self, input_zip, reserved_length):
+    """Finalizes a property-files string with actual METADATA offset/size info.
+
+    The input ZIP file has been signed, with the ZIP entries in the desired
+    place (signapk.jar will possibly reorder the ZIP entries). Now we compute
+    the ZIP entry offsets and construct the property-files string with actual
+    data. Note that during this process, we must pad the property-files string
+    to the reserved length, so that the METADATA entry size remains the same.
+    Otherwise the entries' offsets and sizes may change again.
+
+    Args:
+      input_zip: The input ZIP file.
+      reserved_length: The reserved length of the property-files string during
+          the call to Compute(). The final string must be no more than this
+          size.
+
+    Returns:
+      A property-files string including the metadata offset/size info, e.g.
+      "payload.bin:679:343,payload_properties.txt:378:45,metadata:69:379  ".
+
+    Raises:
+      InsufficientSpaceException: If the reserved length is insufficient to hold
+          the final string.
+    """
+    result = self._GetPropertyFilesString(input_zip, reserve_space=False)
+    if len(result) > reserved_length:
+      raise self.InsufficientSpaceException(
+          'Insufficient reserved space: reserved={}, actual={}'.format(
+              reserved_length, len(result)))
+
+    result += ' ' * (reserved_length - len(result))
+    return result
+
+  def Verify(self, input_zip, expected):
+    """Verifies the input ZIP file contains the expected property-files string.
+
+    Args:
+      input_zip: The input ZIP file.
+      expected: The property-files string that's computed from Finalize().
+
+    Raises:
+      AssertionError: On finding a mismatch.
+    """
+    actual = self._GetPropertyFilesString(input_zip)
+    assert actual == expected, \
+        "Mismatching streaming metadata: {} vs {}.".format(actual, expected)
+
+  def _GetPropertyFilesString(self, zip_file, reserve_space=False):
+    """Constructs the property-files string per request."""
+
+    def ComputeEntryOffsetSize(name):
+      """Computes the zip entry offset and size."""
+      info = zip_file.getinfo(name)
+      offset = info.header_offset + len(info.FileHeader())
+      size = info.file_size
+      return '%s:%d:%d' % (os.path.basename(name), offset, size)
+
+    tokens = []
+    tokens.extend(self._GetPrecomputed(zip_file))
+    for entry in self.required:
+      tokens.append(ComputeEntryOffsetSize(entry))
+    for entry in self.optional:
+      if entry in zip_file.namelist():
+        tokens.append(ComputeEntryOffsetSize(entry))
+
+    # 'META-INF/com/android/metadata' is required. We don't know its actual
+    # offset and length (as well as the values for other entries). So we reserve
+    # 15-byte as a placeholder ('offset:length'), which is sufficient to cover
+    # the space for metadata entry. Because 'offset' allows a max of 10-digit
+    # (i.e. ~9 GiB), with a max of 4-digit for the length. Note that all the
+    # reserved space serves the metadata entry only.
+    if reserve_space:
+      tokens.append('metadata:' + ' ' * 15)
+    else:
+      tokens.append(ComputeEntryOffsetSize(METADATA_NAME))
+
+    return ','.join(tokens)
+
+  def _GetPrecomputed(self, input_zip):
+    """Computes the additional tokens to be included into the property-files.
+
+    This applies to tokens without actual ZIP entries, such as
+    payload_metadadata.bin. We want to expose the offset/size to updaters, so
+    that they can download the payload metadata directly with the info.
+
+    Args:
+      input_zip: The input zip file.
+
+    Returns:
+      A list of strings (tokens) to be added to the property-files string.
+    """
+    # pylint: disable=no-self-use
+    # pylint: disable=unused-argument
+    return []
+
+
+class StreamingPropertyFiles(PropertyFiles):
+  """A subclass for computing the property-files for streaming A/B OTAs."""
+
+  def __init__(self):
+    super(StreamingPropertyFiles, self).__init__()
+    self.name = 'ota-streaming-property-files'
+    self.required = (
+        # payload.bin and payload_properties.txt must exist.
+        'payload.bin',
+        'payload_properties.txt',
+    )
+    self.optional = (
+        # care_map.txt is available only if dm-verity is enabled.
+        'care_map.txt',
+        # compatibility.zip is available only if target supports Treble.
+        'compatibility.zip',
+    )
+
+
+class AbOtaPropertyFiles(StreamingPropertyFiles):
+  """The property-files for A/B OTA that includes payload_metadata.bin info.
+
+  Since P, we expose one more token (aka property-file), in addition to the ones
+  for streaming A/B OTA, for a virtual entry of 'payload_metadata.bin'.
+  'payload_metadata.bin' is the header part of a payload ('payload.bin'), which
+  doesn't exist as a separate ZIP entry, but can be used to verify if the
+  payload can be applied on the given device.
+
+  For backward compatibility, we keep both of the 'ota-streaming-property-files'
+  and the newly added 'ota-property-files' in P. The new token will only be
+  available in 'ota-property-files'.
+  """
+
+  def __init__(self):
+    super(AbOtaPropertyFiles, self).__init__()
+    self.name = 'ota-property-files'
+
+  def _GetPrecomputed(self, input_zip):
+    offset, size = self._GetPayloadMetadataOffsetAndSize(input_zip)
+    return ['payload_metadata.bin:{}:{}'.format(offset, size)]
+
+  @staticmethod
+  def _GetPayloadMetadataOffsetAndSize(input_zip):
+    """Computes the offset and size of the payload metadata for a given package.
+
+    (From system/update_engine/update_metadata.proto)
+    A delta update file contains all the deltas needed to update a system from
+    one specific version to another specific version. The update format is
+    represented by this struct pseudocode:
+
+    struct delta_update_file {
+      char magic[4] = "CrAU";
+      uint64 file_format_version;
+      uint64 manifest_size;  // Size of protobuf DeltaArchiveManifest
+
+      // Only present if format_version > 1:
+      uint32 metadata_signature_size;
+
+      // The Bzip2 compressed DeltaArchiveManifest
+      char manifest[metadata_signature_size];
+
+      // The signature of the metadata (from the beginning of the payload up to
+      // this location, not including the signature itself). This is a
+      // serialized Signatures message.
+      char medatada_signature_message[metadata_signature_size];
+
+      // Data blobs for files, no specific format. The specific offset
+      // and length of each data blob is recorded in the DeltaArchiveManifest.
+      struct {
+        char data[];
+      } blobs[];
+
+      // These two are not signed:
+      uint64 payload_signatures_message_size;
+      char payload_signatures_message[];
+    };
+
+    'payload-metadata.bin' contains all the bytes from the beginning of the
+    payload, till the end of 'medatada_signature_message'.
+    """
+    payload_info = input_zip.getinfo('payload.bin')
+    payload_offset = payload_info.header_offset + len(payload_info.FileHeader())
+    payload_size = payload_info.file_size
+
+    with input_zip.open('payload.bin', 'r') as payload_fp:
+      header_bin = payload_fp.read(24)
+
+    # network byte order (big-endian)
+    header = struct.unpack("!IQQL", header_bin)
+
+    # 'CrAU'
+    magic = header[0]
+    assert magic == 0x43724155, "Invalid magic: {:x}".format(magic)
+
+    manifest_size = header[2]
+    metadata_signature_size = header[3]
+    metadata_total = 24 + manifest_size + metadata_signature_size
+    assert metadata_total < payload_size
+
+    return (payload_offset, metadata_total)
+
+
+class NonAbOtaPropertyFiles(PropertyFiles):
+  """The property-files for non-A/B OTA.
+
+  For non-A/B OTA, the property-files string contains the info for METADATA
+  entry, with which a system updater can be fetched the package metadata prior
+  to downloading the entire package.
+  """
+
+  def __init__(self):
+    super(NonAbOtaPropertyFiles, self).__init__()
+    self.name = 'ota-property-files'
+
+
+def FinalizeMetadata(metadata, input_file, output_file, needed_property_files):
+  """Finalizes the metadata and signs an A/B OTA package.
+
+  In order to stream an A/B OTA package, we need 'ota-streaming-property-files'
+  that contains the offsets and sizes for the ZIP entries. An example
+  property-files string is as follows.
+
+    "payload.bin:679:343,payload_properties.txt:378:45,metadata:69:379"
+
+  OTA server can pass down this string, in addition to the package URL, to the
+  system update client. System update client can then fetch individual ZIP
+  entries (ZIP_STORED) directly at the given offset of the URL.
+
+  Args:
+    metadata: The metadata dict for the package.
+    input_file: The input ZIP filename that doesn't contain the package METADATA
+        entry yet.
+    output_file: The final output ZIP filename.
+    needed_property_files: The list of PropertyFiles' to be generated.
+  """
+
+  def ComputeAllPropertyFiles(input_file, needed_property_files):
+    # Write the current metadata entry with placeholders.
+    with zipfile.ZipFile(input_file) as input_zip:
+      for property_files in needed_property_files:
+        metadata[property_files.name] = property_files.Compute(input_zip)
+      namelist = input_zip.namelist()
+
+    if METADATA_NAME in namelist:
+      common.ZipDelete(input_file, METADATA_NAME)
+    output_zip = zipfile.ZipFile(input_file, 'a')
+    WriteMetadata(metadata, output_zip)
+    common.ZipClose(output_zip)
+
+    if OPTIONS.no_signing:
+      return input_file
+
+    prelim_signing = common.MakeTempFile(suffix='.zip')
+    SignOutput(input_file, prelim_signing)
+    return prelim_signing
+
+  def FinalizeAllPropertyFiles(prelim_signing, needed_property_files):
+    with zipfile.ZipFile(prelim_signing) as prelim_signing_zip:
+      for property_files in needed_property_files:
+        metadata[property_files.name] = property_files.Finalize(
+            prelim_signing_zip, len(metadata[property_files.name]))
+
+  # SignOutput(), which in turn calls signapk.jar, will possibly reorder the ZIP
+  # entries, as well as padding the entry headers. We do a preliminary signing
+  # (with an incomplete metadata entry) to allow that to happen. Then compute
+  # the ZIP entry offsets, write back the final metadata and do the final
+  # signing.
+  prelim_signing = ComputeAllPropertyFiles(input_file, needed_property_files)
+  try:
+    FinalizeAllPropertyFiles(prelim_signing, needed_property_files)
+  except PropertyFiles.InsufficientSpaceException:
+    # Even with the preliminary signing, the entry orders may change
+    # dramatically, which leads to insufficiently reserved space during the
+    # first call to ComputeAllPropertyFiles(). In that case, we redo all the
+    # preliminary signing works, based on the already ordered ZIP entries, to
+    # address the issue.
+    prelim_signing = ComputeAllPropertyFiles(
+        prelim_signing, needed_property_files)
+    FinalizeAllPropertyFiles(prelim_signing, needed_property_files)
+
+  # Replace the METADATA entry.
+  common.ZipDelete(prelim_signing, METADATA_NAME)
+  output_zip = zipfile.ZipFile(prelim_signing, 'a')
+  WriteMetadata(metadata, output_zip)
+  common.ZipClose(output_zip)
+
+  # Re-sign the package after updating the metadata entry.
+  if OPTIONS.no_signing:
+    output_file = prelim_signing
+  else:
+    SignOutput(prelim_signing, output_file)
+
+  # Reopen the final signed zip to double check the streaming metadata.
+  with zipfile.ZipFile(output_file) as output_zip:
+    for property_files in needed_property_files:
+      property_files.Verify(output_zip, metadata[property_files.name].strip())
+
+
+def WriteBlockIncrementalOTAPackage(target_zip, source_zip, output_file):
+  target_info = BuildInfo(OPTIONS.target_info_dict, OPTIONS.oem_dicts)
+  source_info = BuildInfo(OPTIONS.source_info_dict, OPTIONS.oem_dicts)
+
+  target_api_version = target_info["recovery_api_version"]
+  source_api_version = source_info["recovery_api_version"]
+  if source_api_version == 0:
+    print("WARNING: generating edify script for a source that "
+          "can't install it.")
+
+  script = edify_generator.EdifyGenerator(
+      source_api_version, target_info, fstab=source_info["fstab"])
+
+  if target_info.oem_props or source_info.oem_props:
+    if not OPTIONS.oem_no_mount:
+      source_info.WriteMountOemScript(script)
+
+  metadata = GetPackageMetadata(target_info, source_info)
+
+  if not OPTIONS.no_signing:
+    staging_file = common.MakeTempFile(suffix='.zip')
+  else:
+    staging_file = output_file
+
+  output_zip = zipfile.ZipFile(
+      staging_file, "w", compression=zipfile.ZIP_DEFLATED)
+
+  device_specific = common.DeviceSpecificParams(
+      source_zip=source_zip,
+      source_version=source_api_version,
+      target_zip=target_zip,
+      target_version=target_api_version,
+      output_zip=output_zip,
+      script=script,
+      metadata=metadata,
+      info_dict=source_info)
+
+  source_boot = common.GetBootableImage(
+      "/tmp/boot.img", "boot.img", OPTIONS.source_tmp, "BOOT", source_info)
+  target_boot = common.GetBootableImage(
+      "/tmp/boot.img", "boot.img", OPTIONS.target_tmp, "BOOT", target_info)
+  updating_boot = (not OPTIONS.two_step and
+                   (source_boot.data != target_boot.data))
+
+  target_recovery = common.GetBootableImage(
+      "/tmp/recovery.img", "recovery.img", OPTIONS.target_tmp, "RECOVERY")
+
+  # When target uses 'BOARD_EXT4_SHARE_DUP_BLOCKS := true', images may contain
+  # shared blocks (i.e. some blocks will show up in multiple files' block
+  # list). We can only allocate such shared blocks to the first "owner", and
+  # disable imgdiff for all later occurrences.
+  allow_shared_blocks = (source_info.get('ext4_share_dup_blocks') == "true" or
+                         target_info.get('ext4_share_dup_blocks') == "true")
+  system_src = common.GetSparseImage("system", OPTIONS.source_tmp, source_zip,
+                                     allow_shared_blocks)
+  system_tgt = common.GetSparseImage("system", OPTIONS.target_tmp, target_zip,
+                                     allow_shared_blocks)
+
+  blockimgdiff_version = max(
+      int(i) for i in target_info.get("blockimgdiff_versions", "1").split(","))
+  assert blockimgdiff_version >= 3
+
+  # Check the first block of the source system partition for remount R/W only
+  # if the filesystem is ext4.
+  system_src_partition = source_info["fstab"]["/system"]
+  check_first_block = system_src_partition.fs_type == "ext4"
+  # Disable using imgdiff for squashfs. 'imgdiff -z' expects input files to be
+  # in zip formats. However with squashfs, a) all files are compressed in LZ4;
+  # b) the blocks listed in block map may not contain all the bytes for a given
+  # file (because they're rounded to be 4K-aligned).
+  system_tgt_partition = target_info["fstab"]["/system"]
+  disable_imgdiff = (system_src_partition.fs_type == "squashfs" or
+                     system_tgt_partition.fs_type == "squashfs")
+  system_diff = common.BlockDifference("system", system_tgt, system_src,
+                                       check_first_block,
+                                       version=blockimgdiff_version,
+                                       disable_imgdiff=disable_imgdiff)
+
+  if HasVendorPartition(target_zip):
+    if not HasVendorPartition(source_zip):
+      raise RuntimeError("can't generate incremental that adds /vendor")
+    vendor_src = common.GetSparseImage("vendor", OPTIONS.source_tmp, source_zip,
+                                       allow_shared_blocks)
+    vendor_tgt = common.GetSparseImage("vendor", OPTIONS.target_tmp, target_zip,
+                                       allow_shared_blocks)
+
+    # Check first block of vendor partition for remount R/W only if
+    # disk type is ext4
+    vendor_partition = source_info["fstab"]["/vendor"]
+    check_first_block = vendor_partition.fs_type == "ext4"
+    disable_imgdiff = vendor_partition.fs_type == "squashfs"
+    vendor_diff = common.BlockDifference("vendor", vendor_tgt, vendor_src,
+                                         check_first_block,
+                                         version=blockimgdiff_version,
+                                         disable_imgdiff=disable_imgdiff)
+  else:
+    vendor_diff = None
+
+  AddCompatibilityArchiveIfTrebleEnabled(
+      target_zip, output_zip, target_info, source_info)
+
+  # Assertions (e.g. device properties check).
+  target_info.WriteDeviceAssertions(script, OPTIONS.oem_no_mount)
+  device_specific.IncrementalOTA_Assertions()
+
+  # Two-step incremental package strategy (in chronological order,
+  # which is *not* the order in which the generated script has
+  # things):
+  #
+  # if stage is not "2/3" or "3/3":
+  #    do verification on current system
+  #    write recovery image to boot partition
+  #    set stage to "2/3"
+  #    reboot to boot partition and restart recovery
+  # else if stage is "2/3":
+  #    write recovery image to recovery partition
+  #    set stage to "3/3"
+  #    reboot to recovery partition and restart recovery
+  # else:
+  #    (stage must be "3/3")
+  #    perform update:
+  #       patch system files, etc.
+  #       force full install of new boot image
+  #       set up system to update recovery partition on first boot
+  #    complete script normally
+  #    (allow recovery to mark itself finished and reboot)
+
+  if OPTIONS.two_step:
+    if not source_info.get("multistage_support"):
+      assert False, "two-step packages not supported by this build"
+    fs = source_info["fstab"]["/misc"]
+    assert fs.fs_type.upper() == "EMMC", \
+        "two-step packages only supported on devices with EMMC /misc partitions"
+    bcb_dev = {"bcb_dev" : fs.device}
+    common.ZipWriteStr(output_zip, "recovery.img", target_recovery.data)
+    script.AppendExtra("""
+if get_stage("%(bcb_dev)s") == "2/3" then
+""" % bcb_dev)
+
+    # Stage 2/3: Write recovery image to /recovery (currently running /boot).
+    script.Comment("Stage 2/3")
+    script.AppendExtra("sleep(20);\n")
+    script.WriteRawImage("/recovery", "recovery.img")
+    script.AppendExtra("""
+set_stage("%(bcb_dev)s", "3/3");
+reboot_now("%(bcb_dev)s", "recovery");
+else if get_stage("%(bcb_dev)s") != "3/3" then
+""" % bcb_dev)
+
+    # Stage 1/3: (a) Verify the current system.
+    script.Comment("Stage 1/3")
+
+  # Dump fingerprints
+  script.Print("Source: {}".format(source_info.fingerprint))
+  script.Print("Target: {}".format(target_info.fingerprint))
+
+  script.Print("Verifying current system...")
+
+  device_specific.IncrementalOTA_VerifyBegin()
+
+  WriteFingerprintAssertion(script, target_info, source_info)
+
+  # Check the required cache size (i.e. stashed blocks).
+  size = []
+  if system_diff:
+    size.append(system_diff.required_cache)
+  if vendor_diff:
+    size.append(vendor_diff.required_cache)
+
+  if updating_boot:
+    boot_type, boot_device = common.GetTypeAndDevice("/boot", source_info)
+    d = common.Difference(target_boot, source_boot)
+    _, _, d = d.ComputePatch()
+    if d is None:
+      include_full_boot = True
+      common.ZipWriteStr(output_zip, "boot.img", target_boot.data)
+    else:
+      include_full_boot = False
+
+      print("boot      target: %d  source: %d  diff: %d" % (
+          target_boot.size, source_boot.size, len(d)))
+
+      common.ZipWriteStr(output_zip, "patch/boot.img.p", d)
+
+      script.PatchCheck("%s:%s:%d:%s:%d:%s" %
+                        (boot_type, boot_device,
+                         source_boot.size, source_boot.sha1,
+                         target_boot.size, target_boot.sha1))
+      size.append(target_boot.size)
+
+  if size:
+    script.CacheFreeSpaceCheck(max(size))
+
+  device_specific.IncrementalOTA_VerifyEnd()
+
+  if OPTIONS.two_step:
+    # Stage 1/3: (b) Write recovery image to /boot.
+    _WriteRecoveryImageToBoot(script, output_zip)
+
+    script.AppendExtra("""
+set_stage("%(bcb_dev)s", "2/3");
+reboot_now("%(bcb_dev)s", "");
+else
+""" % bcb_dev)
+
+    # Stage 3/3: Make changes.
+    script.Comment("Stage 3/3")
+
+  # Verify the existing partitions.
+  system_diff.WriteVerifyScript(script, touched_blocks_only=True)
+  if vendor_diff:
+    vendor_diff.WriteVerifyScript(script, touched_blocks_only=True)
+
+  script.Comment("---- start making changes here ----")
+
+  device_specific.IncrementalOTA_InstallBegin()
+
+  system_diff.WriteScript(script, output_zip,
+                          progress=0.8 if vendor_diff else 0.9)
+
+  if vendor_diff:
+    vendor_diff.WriteScript(script, output_zip, progress=0.1)
+
+  if OPTIONS.two_step:
+    common.ZipWriteStr(output_zip, "boot.img", target_boot.data)
+    script.WriteRawImage("/boot", "boot.img")
+    print("writing full boot image (forced by two-step mode)")
+
+  if not OPTIONS.two_step:
+    if updating_boot:
+      if include_full_boot:
+        print("boot image changed; including full.")
+        script.Print("Installing boot image...")
+        script.WriteRawImage("/boot", "boot.img")
+      else:
+        # Produce the boot image by applying a patch to the current
+        # contents of the boot partition, and write it back to the
+        # partition.
+        print("boot image changed; including patch.")
+        script.Print("Patching boot image...")
+        script.ShowProgress(0.1, 10)
+        script.ApplyPatch("%s:%s:%d:%s:%d:%s"
+                          % (boot_type, boot_device,
+                             source_boot.size, source_boot.sha1,
+                             target_boot.size, target_boot.sha1),
+                          "-",
+                          target_boot.size, target_boot.sha1,
+                          source_boot.sha1, "patch/boot.img.p")
+    else:
+      print("boot image unchanged; skipping.")
+
+  # Do device-specific installation (eg, write radio image).
+  device_specific.IncrementalOTA_InstallEnd()
+
+  if OPTIONS.extra_script is not None:
+    script.AppendExtra(OPTIONS.extra_script)
+
+  if OPTIONS.wipe_user_data:
+    script.Print("Erasing user data...")
+    script.FormatPartition("/data")
+
+  if OPTIONS.two_step:
+    script.AppendExtra("""
+set_stage("%(bcb_dev)s", "");
+endif;
+endif;
+""" % bcb_dev)
+
+  script.SetProgress(1)
+  # For downgrade OTAs, we prefer to use the update-binary in the source
+  # build that is actually newer than the one in the target build.
+  if OPTIONS.downgrade:
+    script.AddToZip(source_zip, output_zip, input_path=OPTIONS.updater_binary)
+  else:
+    script.AddToZip(target_zip, output_zip, input_path=OPTIONS.updater_binary)
+  metadata["ota-required-cache"] = str(script.required_cache)
+
+  # We haven't written the metadata entry yet, which will be handled in
+  # FinalizeMetadata().
+  common.ZipClose(output_zip)
+
+  # Sign the generated zip package unless no_signing is specified.
+  needed_property_files = (
+      NonAbOtaPropertyFiles(),
+  )
+  FinalizeMetadata(metadata, staging_file, output_file, needed_property_files)
+
+
+def GetTargetFilesZipForSecondaryImages(input_file, skip_postinstall=False):
+  """Returns a target-files.zip file for generating secondary payload.
+
+  Although the original target-files.zip already contains secondary slot
+  images (i.e. IMAGES/system_other.img), we need to rename the files to the
+  ones without _other suffix. Note that we cannot instead modify the names in
+  META/ab_partitions.txt, because there are no matching partitions on device.
+
+  For the partitions that don't have secondary images, the ones for primary
+  slot will be used. This is to ensure that we always have valid boot, vbmeta,
+  bootloader images in the inactive slot.
+
+  Args:
+    input_file: The input target-files.zip file.
+    skip_postinstall: Whether to skip copying the postinstall config file.
+
+  Returns:
+    The filename of the target-files.zip for generating secondary payload.
+  """
+  target_file = common.MakeTempFile(prefix="targetfiles-", suffix=".zip")
+  target_zip = zipfile.ZipFile(target_file, 'w', allowZip64=True)
+
+  input_tmp = common.UnzipTemp(input_file, UNZIP_PATTERN)
+  with zipfile.ZipFile(input_file, 'r') as input_zip:
+    infolist = input_zip.infolist()
+
+  for info in infolist:
+    unzipped_file = os.path.join(input_tmp, *info.filename.split('/'))
+    if info.filename == 'IMAGES/system_other.img':
+      common.ZipWrite(target_zip, unzipped_file, arcname='IMAGES/system.img')
+
+    # Primary images and friends need to be skipped explicitly.
+    elif info.filename in ('IMAGES/system.img',
+                           'IMAGES/system.map'):
+      pass
+
+    # Skip copying the postinstall config if requested.
+    elif skip_postinstall and info.filename == POSTINSTALL_CONFIG:
+      pass
+
+    elif info.filename.startswith(('META/', 'IMAGES/')):
+      common.ZipWrite(target_zip, unzipped_file, arcname=info.filename)
+
+  common.ZipClose(target_zip)
+
+  return target_file
+
+
+def GetTargetFilesZipWithoutPostinstallConfig(input_file):
+  """Returns a target-files.zip that's not containing postinstall_config.txt.
+
+  This allows brillo_update_payload script to skip writing all the postinstall
+  hooks in the generated payload. The input target-files.zip file will be
+  duplicated, with 'META/postinstall_config.txt' skipped. If input_file doesn't
+  contain the postinstall_config.txt entry, the input file will be returned.
+
+  Args:
+    input_file: The input target-files.zip filename.
+
+  Returns:
+    The filename of target-files.zip that doesn't contain postinstall config.
+  """
+  # We should only make a copy if postinstall_config entry exists.
+  with zipfile.ZipFile(input_file, 'r') as input_zip:
+    if POSTINSTALL_CONFIG not in input_zip.namelist():
+      return input_file
+
+  target_file = common.MakeTempFile(prefix="targetfiles-", suffix=".zip")
+  shutil.copyfile(input_file, target_file)
+  common.ZipDelete(target_file, POSTINSTALL_CONFIG)
+  return target_file
+
+
+def WriteABOTAPackageWithBrilloScript(target_file, output_file,
+                                      source_file=None):
+  """Generates an Android OTA package that has A/B update payload."""
+  # Stage the output zip package for package signing.
+  if not OPTIONS.no_signing:
+    staging_file = common.MakeTempFile(suffix='.zip')
+  else:
+    staging_file = output_file
+  output_zip = zipfile.ZipFile(staging_file, "w",
+                               compression=zipfile.ZIP_DEFLATED)
+
+  if source_file is not None:
+    target_info = BuildInfo(OPTIONS.target_info_dict, OPTIONS.oem_dicts)
+    source_info = BuildInfo(OPTIONS.source_info_dict, OPTIONS.oem_dicts)
+  else:
+    target_info = BuildInfo(OPTIONS.info_dict, OPTIONS.oem_dicts)
+    source_info = None
+
+  # Metadata to comply with Android OTA package format.
+  metadata = GetPackageMetadata(target_info, source_info)
+
+  if OPTIONS.skip_postinstall:
+    target_file = GetTargetFilesZipWithoutPostinstallConfig(target_file)
+
+  # Generate payload.
+  payload = Payload()
+
+  # Enforce a max timestamp this payload can be applied on top of.
+  if OPTIONS.downgrade:
+    max_timestamp = source_info.GetBuildProp("ro.build.date.utc")
+  else:
+    max_timestamp = metadata["post-timestamp"]
+  additional_args = ["--max_timestamp", max_timestamp]
+
+  payload.Generate(target_file, source_file, additional_args)
+
+  # Sign the payload.
+  payload_signer = PayloadSigner()
+  payload.Sign(payload_signer)
+
+  # Write the payload into output zip.
+  payload.WriteToZip(output_zip)
+
+  # Generate and include the secondary payload that installs secondary images
+  # (e.g. system_other.img).
+  if OPTIONS.include_secondary:
+    # We always include a full payload for the secondary slot, even when
+    # building an incremental OTA. See the comments for "--include_secondary".
+    secondary_target_file = GetTargetFilesZipForSecondaryImages(
+        target_file, OPTIONS.skip_postinstall)
+    secondary_payload = Payload(secondary=True)
+    secondary_payload.Generate(secondary_target_file,
+                               additional_args=additional_args)
+    secondary_payload.Sign(payload_signer)
+    secondary_payload.WriteToZip(output_zip)
+
+  # If dm-verity is supported for the device, copy contents of care_map
+  # into A/B OTA package.
+  target_zip = zipfile.ZipFile(target_file, "r")
+  if (target_info.get("verity") == "true" or
+      target_info.get("avb_enable") == "true"):
+    care_map_path = "META/care_map.txt"
+    namelist = target_zip.namelist()
+    if care_map_path in namelist:
+      care_map_data = target_zip.read(care_map_path)
+      # In order to support streaming, care_map.txt needs to be packed as
+      # ZIP_STORED.
+      common.ZipWriteStr(output_zip, "care_map.txt", care_map_data,
+                         compress_type=zipfile.ZIP_STORED)
+    else:
+      print("Warning: cannot find care map file in target_file package")
+
+  AddCompatibilityArchiveIfTrebleEnabled(
+      target_zip, output_zip, target_info, source_info)
+
+  common.ZipClose(target_zip)
+
+  # We haven't written the metadata entry yet, which will be handled in
+  # FinalizeMetadata().
+  common.ZipClose(output_zip)
+
+  # AbOtaPropertyFiles intends to replace StreamingPropertyFiles, as it covers
+  # all the info of the latter. However, system updaters and OTA servers need to
+  # take time to switch to the new flag. We keep both of the flags for
+  # P-timeframe, and will remove StreamingPropertyFiles in later release.
+  needed_property_files = (
+      AbOtaPropertyFiles(),
+      StreamingPropertyFiles(),
+  )
+  FinalizeMetadata(metadata, staging_file, output_file, needed_property_files)
+
+
+def main(argv):
+
+  def option_handler(o, a):
+    if o in ("-k", "--package_key"):
+      OPTIONS.package_key = a
+    elif o in ("-i", "--incremental_from"):
+      OPTIONS.incremental_source = a
+    elif o == "--full_radio":
+      OPTIONS.full_radio = True
+    elif o == "--full_bootloader":
+      OPTIONS.full_bootloader = True
+    elif o == "--wipe_user_data":
+      OPTIONS.wipe_user_data = True
+    elif o == "--downgrade":
+      OPTIONS.downgrade = True
+      OPTIONS.wipe_user_data = True
+    elif o == "--override_timestamp":
+      OPTIONS.downgrade = True
+    elif o in ("-o", "--oem_settings"):
+      OPTIONS.oem_source = a.split(',')
+    elif o == "--oem_no_mount":
+      OPTIONS.oem_no_mount = True
+    elif o in ("-e", "--extra_script"):
+      OPTIONS.extra_script = a
+    elif o in ("-t", "--worker_threads"):
+      if a.isdigit():
+        OPTIONS.worker_threads = int(a)
+      else:
+        raise ValueError("Cannot parse value %r for option %r - only "
+                         "integers are allowed." % (a, o))
+    elif o in ("-2", "--two_step"):
+      OPTIONS.two_step = True
+    elif o == "--include_secondary":
+      OPTIONS.include_secondary = True
+    elif o == "--no_signing":
+      OPTIONS.no_signing = True
+    elif o == "--verify":
+      OPTIONS.verify = True
+    elif o == "--block":
+      OPTIONS.block_based = True
+    elif o in ("-b", "--binary"):
+      OPTIONS.updater_binary = a
+    elif o == "--stash_threshold":
+      try:
+        OPTIONS.stash_threshold = float(a)
+      except ValueError:
+        raise ValueError("Cannot parse value %r for option %r - expecting "
+                         "a float" % (a, o))
+    elif o == "--log_diff":
+      OPTIONS.log_diff = a
+    elif o == "--payload_signer":
+      OPTIONS.payload_signer = a
+    elif o == "--payload_signer_args":
+      OPTIONS.payload_signer_args = shlex.split(a)
+    elif o == "--extracted_input_target_files":
+      OPTIONS.extracted_input = a
+    elif o == "--skip_postinstall":
+      OPTIONS.skip_postinstall = True
+    else:
+      return False
+    return True
+
+  args = common.ParseOptions(argv, __doc__,
+                             extra_opts="b:k:i:d:e:t:2o:",
+                             extra_long_opts=[
+                                 "package_key=",
+                                 "incremental_from=",
+                                 "full_radio",
+                                 "full_bootloader",
+                                 "wipe_user_data",
+                                 "downgrade",
+                                 "override_timestamp",
+                                 "extra_script=",
+                                 "worker_threads=",
+                                 "two_step",
+                                 "include_secondary",
+                                 "no_signing",
+                                 "block",
+                                 "binary=",
+                                 "oem_settings=",
+                                 "oem_no_mount",
+                                 "verify",
+                                 "stash_threshold=",
+                                 "log_diff=",
+                                 "payload_signer=",
+                                 "payload_signer_args=",
+                                 "extracted_input_target_files=",
+                                 "skip_postinstall",
+                             ], extra_option_handler=option_handler)
+
+  if len(args) != 2:
+    common.Usage(__doc__)
+    sys.exit(1)
+
+  if OPTIONS.downgrade:
+    # We should only allow downgrading incrementals (as opposed to full).
+    # Otherwise the device may go back from arbitrary build with this full
+    # OTA package.
+    if OPTIONS.incremental_source is None:
+      raise ValueError("Cannot generate downgradable full OTAs")
+
+  # Load the build info dicts from the zip directly or the extracted input
+  # directory. We don't need to unzip the entire target-files zips, because they
+  # won't be needed for A/B OTAs (brillo_update_payload does that on its own).
+  # When loading the info dicts, we don't need to provide the second parameter
+  # to common.LoadInfoDict(). Specifying the second parameter allows replacing
+  # some properties with their actual paths, such as 'selinux_fc',
+  # 'ramdisk_dir', which won't be used during OTA generation.
+  if OPTIONS.extracted_input is not None:
+    OPTIONS.info_dict = common.LoadInfoDict(OPTIONS.extracted_input)
+  else:
+    with zipfile.ZipFile(args[0], 'r') as input_zip:
+      OPTIONS.info_dict = common.LoadInfoDict(input_zip)
+
+  if OPTIONS.verbose:
+    print("--- target info ---")
+    common.DumpInfoDict(OPTIONS.info_dict)
+
+  # Load the source build dict if applicable.
+  if OPTIONS.incremental_source is not None:
+    OPTIONS.target_info_dict = OPTIONS.info_dict
+    with zipfile.ZipFile(OPTIONS.incremental_source, 'r') as source_zip:
+      OPTIONS.source_info_dict = common.LoadInfoDict(source_zip)
+
+    if OPTIONS.verbose:
+      print("--- source info ---")
+      common.DumpInfoDict(OPTIONS.source_info_dict)
+
+  # Load OEM dicts if provided.
+  OPTIONS.oem_dicts = _LoadOemDicts(OPTIONS.oem_source)
+
+  ab_update = OPTIONS.info_dict.get("ab_update") == "true"
+
+  # Use the default key to sign the package if not specified with package_key.
+  # package_keys are needed on ab_updates, so always define them if an
+  # ab_update is getting created.
+  if not OPTIONS.no_signing or ab_update:
+    if OPTIONS.package_key is None:
+      OPTIONS.package_key = OPTIONS.info_dict.get(
+          "default_system_dev_certificate",
+          "build/target/product/security/testkey")
+    # Get signing keys
+    OPTIONS.key_passwords = common.GetKeyPasswords([OPTIONS.package_key])
+
+  if ab_update:
+    WriteABOTAPackageWithBrilloScript(
+        target_file=args[0],
+        output_file=args[1],
+        source_file=OPTIONS.incremental_source)
+
+    print("done.")
+    return
+
+  # Sanity check the loaded info dicts first.
+  if OPTIONS.info_dict.get("no_recovery") == "true":
+    raise common.ExternalError(
+        "--- target build has specified no recovery ---")
+
+  # Non-A/B OTAs rely on /cache partition to store temporary files.
+  cache_size = OPTIONS.info_dict.get("cache_size")
+  if cache_size is None:
+    print("--- can't determine the cache partition size ---")
+  OPTIONS.cache_size = cache_size
+
+  if OPTIONS.extra_script is not None:
+    OPTIONS.extra_script = open(OPTIONS.extra_script).read()
+
+  if OPTIONS.extracted_input is not None:
+    OPTIONS.input_tmp = OPTIONS.extracted_input
+  else:
+    print("unzipping target target-files...")
+    OPTIONS.input_tmp = common.UnzipTemp(args[0], UNZIP_PATTERN)
+  OPTIONS.target_tmp = OPTIONS.input_tmp
+
+  # If the caller explicitly specified the device-specific extensions path via
+  # -s / --device_specific, use that. Otherwise, use META/releasetools.py if it
+  # is present in the target target_files. Otherwise, take the path of the file
+  # from 'tool_extensions' in the info dict and look for that in the local
+  # filesystem, relative to the current directory.
+  if OPTIONS.device_specific is None:
+    from_input = os.path.join(OPTIONS.input_tmp, "META", "releasetools.py")
+    if os.path.exists(from_input):
+      print("(using device-specific extensions from target_files)")
+      OPTIONS.device_specific = from_input
+    else:
+      OPTIONS.device_specific = OPTIONS.info_dict.get("tool_extensions")
+
+  if OPTIONS.device_specific is not None:
+    OPTIONS.device_specific = os.path.abspath(OPTIONS.device_specific)
+
+  # Generate a full OTA.
+  if OPTIONS.incremental_source is None:
+    with zipfile.ZipFile(args[0], 'r') as input_zip:
+      WriteFullOTAPackage(
+          input_zip,
+          output_file=args[1])
+
+  # Generate an incremental OTA.
+  else:
+    print("unzipping source target-files...")
+    OPTIONS.source_tmp = common.UnzipTemp(
+        OPTIONS.incremental_source, UNZIP_PATTERN)
+    with zipfile.ZipFile(args[0], 'r') as input_zip, \
+        zipfile.ZipFile(OPTIONS.incremental_source, 'r') as source_zip:
+      WriteBlockIncrementalOTAPackage(
+          input_zip,
+          source_zip,
+          output_file=args[1])
+
+    if OPTIONS.log_diff:
+      with open(OPTIONS.log_diff, 'w') as out_file:
+        import target_files_diff
+        target_files_diff.recursiveDiff(
+            '', OPTIONS.source_tmp, OPTIONS.input_tmp, out_file)
+
+  print("done.")
+
+
+if __name__ == '__main__':
+  try:
+    common.CloseInheritedPipes()
+    main(sys.argv[1:])
+  except common.ExternalError as e:
+    print("\n   ERROR: %s\n" % (e,))
+    sys.exit(1)
+  finally:
+    common.Cleanup()
diff --git a/src/support/ab_tools/scripts/ota_from_target_files.py b/src/support/ab_tools/scripts/ota_from_target_files.py
new file mode 100644
index 0000000..10a19b3
--- /dev/null
+++ b/src/support/ab_tools/scripts/ota_from_target_files.py
@@ -0,0 +1,2001 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2008 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Given a target-files zipfile, produces an OTA package that installs
+that build.  An incremental OTA is produced if -i is given, otherwise
+a full OTA is produced.
+
+Usage:  ota_from_target_files [flags] input_target_files output_ota_package
+
+  -k (--package_key) <key> Key to use to sign the package (default is
+      the value of default_system_dev_certificate from the input
+      target-files's META/misc_info.txt, or
+      "build/target/product/security/testkey" if that value is not
+      specified).
+
+      For incremental OTAs, the default value is based on the source
+      target-file, not the target build.
+
+  -i  (--incremental_from)  <file>
+      Generate an incremental OTA using the given target-files zip as
+      the starting build.
+
+  --full_radio
+      When generating an incremental OTA, always include a full copy of
+      radio image. This option is only meaningful when -i is specified,
+      because a full radio is always included in a full OTA if applicable.
+
+  --full_bootloader
+      Similar to --full_radio. When generating an incremental OTA, always
+      include a full copy of bootloader image.
+
+  --verify
+      Remount and verify the checksums of the files written to the system and
+      vendor (if used) partitions. Non-A/B incremental OTAs only.
+
+  -o  (--oem_settings)  <main_file[,additional_files...]>
+      Comma seperated list of files used to specify the expected OEM-specific
+      properties on the OEM partition of the intended device. Multiple expected
+      values can be used by providing multiple files. Only the first dict will
+      be used to compute fingerprint, while the rest will be used to assert
+      OEM-specific properties.
+
+  --oem_no_mount
+      For devices with OEM-specific properties but without an OEM partition,
+      do not mount the OEM partition in the updater-script. This should be
+      very rarely used, since it's expected to have a dedicated OEM partition
+      for OEM-specific properties. Only meaningful when -o is specified.
+
+  --wipe_user_data
+      Generate an OTA package that will wipe the user data partition
+      when installed.
+
+  --downgrade
+      Intentionally generate an incremental OTA that updates from a newer build
+      to an older one (e.g. downgrading from P preview back to O MR1).
+      "ota-downgrade=yes" will be set in the package metadata file. A data wipe
+      will always be enforced when using this flag, so "ota-wipe=yes" will also
+      be included in the metadata file. The update-binary in the source build
+      will be used in the OTA package, unless --binary flag is specified. Please
+      also check the comment for --override_timestamp below.
+
+  --override_timestamp
+      Intentionally generate an incremental OTA that updates from a newer build
+      to an older one (based on timestamp comparison), by setting the downgrade
+      flag in the package metadata. This differs from --downgrade flag, as we
+      don't enforce a data wipe with this flag. Because we know for sure this is
+      NOT an actual downgrade case, but two builds happen to be cut in a reverse
+      order (e.g. from two branches). A legit use case is that we cut a new
+      build C (after having A and B), but want to enfore an update path of A ->
+      C -> B. Specifying --downgrade may not help since that would enforce a
+      data wipe for C -> B update.
+
+      We used to set a fake timestamp in the package metadata for this flow. But
+      now we consolidate the two cases (i.e. an actual downgrade, or a downgrade
+      based on timestamp) with the same "ota-downgrade=yes" flag, with the
+      difference being whether "ota-wipe=yes" is set.
+
+  -e  (--extra_script)  <file>
+      Insert the contents of file at the end of the update script.
+
+  -2  (--two_step)
+      Generate a 'two-step' OTA package, where recovery is updated
+      first, so that any changes made to the system partition are done
+      using the new recovery (new kernel, etc.).
+
+  --include_secondary
+      Additionally include the payload for secondary slot images (default:
+      False). Only meaningful when generating A/B OTAs.
+
+      By default, an A/B OTA package doesn't contain the images for the
+      secondary slot (e.g. system_other.img). Specifying this flag allows
+      generating a separate payload that will install secondary slot images.
+
+      Such a package needs to be applied in a two-stage manner, with a reboot
+      in-between. During the first stage, the updater applies the primary
+      payload only. Upon finishing, it reboots the device into the newly updated
+      slot. It then continues to install the secondary payload to the inactive
+      slot, but without switching the active slot at the end (needs the matching
+      support in update_engine, i.e. SWITCH_SLOT_ON_REBOOT flag).
+
+      Due to the special install procedure, the secondary payload will be always
+      generated as a full payload.
+
+  --block
+      Generate a block-based OTA for non-A/B device. We have deprecated the
+      support for file-based OTA since O. Block-based OTA will be used by
+      default for all non-A/B devices. Keeping this flag here to not break
+      existing callers.
+
+  -b  (--binary)  <file>
+      Use the given binary as the update-binary in the output package,
+      instead of the binary in the build's target_files.  Use for
+      development only.
+
+  -t  (--worker_threads) <int>
+      Specifies the number of worker-threads that will be used when
+      generating patches for incremental updates (defaults to 3).
+
+  --stash_threshold <float>
+      Specifies the threshold that will be used to compute the maximum
+      allowed stash size (defaults to 0.8).
+
+  --log_diff <file>
+      Generate a log file that shows the differences in the source and target
+      builds for an incremental package. This option is only meaningful when
+      -i is specified.
+
+  --payload_signer <signer>
+      Specify the signer when signing the payload and metadata for A/B OTAs.
+      By default (i.e. without this flag), it calls 'openssl pkeyutl' to sign
+      with the package private key. If the private key cannot be accessed
+      directly, a payload signer that knows how to do that should be specified.
+      The signer will be supplied with "-inkey <path_to_key>",
+      "-in <input_file>" and "-out <output_file>" parameters.
+
+  --payload_signer_args <args>
+      Specify the arguments needed for payload signer.
+
+  --skip_postinstall
+      Skip the postinstall hooks when generating an A/B OTA package (default:
+      False). Note that this discards ALL the hooks, including non-optional
+      ones. Should only be used if caller knows it's safe to do so (e.g. all the
+      postinstall work is to dexopt apps and a data wipe will happen immediately
+      after). Only meaningful when generating A/B OTAs.
+"""
+
+from __future__ import print_function
+
+import multiprocessing
+import os.path
+import shlex
+import shutil
+import struct
+import subprocess
+import sys
+import tempfile
+import zipfile
+
+import common
+import edify_generator
+
+if sys.hexversion < 0x02070000:
+  print("Python 2.7 or newer is required.", file=sys.stderr)
+  sys.exit(1)
+
+
+OPTIONS = common.OPTIONS
+OPTIONS.package_key = None
+OPTIONS.incremental_source = None
+OPTIONS.verify = False
+OPTIONS.patch_threshold = 0.95
+OPTIONS.wipe_user_data = False
+OPTIONS.downgrade = False
+OPTIONS.extra_script = None
+OPTIONS.worker_threads = multiprocessing.cpu_count() // 2
+if OPTIONS.worker_threads == 0:
+  OPTIONS.worker_threads = 1
+OPTIONS.two_step = False
+OPTIONS.include_secondary = False
+OPTIONS.no_signing = False
+OPTIONS.block_based = True
+OPTIONS.updater_binary = None
+OPTIONS.oem_source = None
+OPTIONS.oem_no_mount = False
+OPTIONS.full_radio = False
+OPTIONS.full_bootloader = False
+# Stash size cannot exceed cache_size * threshold.
+OPTIONS.cache_size = None
+OPTIONS.stash_threshold = 0.8
+OPTIONS.log_diff = None
+OPTIONS.payload_signer = None
+OPTIONS.payload_signer_args = []
+OPTIONS.extracted_input = None
+OPTIONS.key_passwords = []
+OPTIONS.skip_postinstall = False
+
+
+METADATA_NAME = 'META-INF/com/android/metadata'
+POSTINSTALL_CONFIG = 'META/postinstall_config.txt'
+UNZIP_PATTERN = ['IMAGES/*', 'META/*']
+
+
+class BuildInfo(object):
+  """A class that holds the information for a given build.
+
+  This class wraps up the property querying for a given source or target build.
+  It abstracts away the logic of handling OEM-specific properties, and caches
+  the commonly used properties such as fingerprint.
+
+  There are two types of info dicts: a) build-time info dict, which is generated
+  at build time (i.e. included in a target_files zip); b) OEM info dict that is
+  specified at package generation time (via command line argument
+  '--oem_settings'). If a build doesn't use OEM-specific properties (i.e. not
+  having "oem_fingerprint_properties" in build-time info dict), all the queries
+  would be answered based on build-time info dict only. Otherwise if using
+  OEM-specific properties, some of them will be calculated from two info dicts.
+
+  Users can query properties similarly as using a dict() (e.g. info['fstab']),
+  or to query build properties via GetBuildProp() or GetVendorBuildProp().
+
+  Attributes:
+    info_dict: The build-time info dict.
+    is_ab: Whether it's a build that uses A/B OTA.
+    oem_dicts: A list of OEM dicts.
+    oem_props: A list of OEM properties that should be read from OEM dicts; None
+        if the build doesn't use any OEM-specific property.
+    fingerprint: The fingerprint of the build, which would be calculated based
+        on OEM properties if applicable.
+    device: The device name, which could come from OEM dicts if applicable.
+  """
+
+  def __init__(self, info_dict, oem_dicts):
+    """Initializes a BuildInfo instance with the given dicts.
+
+    Arguments:
+      info_dict: The build-time info dict.
+      oem_dicts: A list of OEM dicts (which is parsed from --oem_settings). Note
+          that it always uses the first dict to calculate the fingerprint or the
+          device name. The rest would be used for asserting OEM properties only
+          (e.g.  one package can be installed on one of these devices).
+    """
+    self.info_dict = info_dict
+    self.oem_dicts = oem_dicts
+
+    self._is_ab = info_dict.get("ab_update") == "true"
+    self._oem_props = info_dict.get("oem_fingerprint_properties")
+
+    if self._oem_props:
+      assert oem_dicts, "OEM source required for this build"
+
+    # These two should be computed only after setting self._oem_props.
+    self._device = self.GetOemProperty("ro.product.device")
+    self._fingerprint = self.CalculateFingerprint()
+
+  @property
+  def is_ab(self):
+    return self._is_ab
+
+  @property
+  def device(self):
+    return self._device
+
+  @property
+  def fingerprint(self):
+    return self._fingerprint
+
+  @property
+  def oem_props(self):
+    return self._oem_props
+
+  def __getitem__(self, key):
+    return self.info_dict[key]
+
+  def get(self, key, default=None):
+    return self.info_dict.get(key, default)
+
+  def GetBuildProp(self, prop):
+    """Returns the inquired build property."""
+    try:
+      return self.info_dict.get("build.prop", {})[prop]
+    except KeyError:
+      raise common.ExternalError("couldn't find %s in build.prop" % (prop,))
+
+  def GetVendorBuildProp(self, prop):
+    """Returns the inquired vendor build property."""
+    try:
+      return self.info_dict.get("vendor.build.prop", {})[prop]
+    except KeyError:
+      raise common.ExternalError(
+          "couldn't find %s in vendor.build.prop" % (prop,))
+
+  def GetOemProperty(self, key):
+    if self.oem_props is not None and key in self.oem_props:
+      return self.oem_dicts[0][key]
+    return self.GetBuildProp(key)
+
+  def CalculateFingerprint(self):
+    if self.oem_props is None:
+      return self.GetBuildProp("ro.build.fingerprint")
+    return "%s/%s/%s:%s" % (
+        self.GetOemProperty("ro.product.brand"),
+        self.GetOemProperty("ro.product.name"),
+        self.GetOemProperty("ro.product.device"),
+        self.GetBuildProp("ro.build.thumbprint"))
+
+  def WriteMountOemScript(self, script):
+    assert self.oem_props is not None
+    recovery_mount_options = self.info_dict.get("recovery_mount_options")
+    script.Mount("/oem", recovery_mount_options)
+
+  def WriteDeviceAssertions(self, script, oem_no_mount):
+    # Read the property directly if not using OEM properties.
+    if not self.oem_props:
+      script.AssertDevice(self.device)
+      return
+
+    # Otherwise assert OEM properties.
+    if not self.oem_dicts:
+      raise common.ExternalError(
+          "No OEM file provided to answer expected assertions")
+
+    for prop in self.oem_props.split():
+      values = []
+      for oem_dict in self.oem_dicts:
+        if prop in oem_dict:
+          values.append(oem_dict[prop])
+      if not values:
+        raise common.ExternalError(
+            "The OEM file is missing the property %s" % (prop,))
+      script.AssertOemProperty(prop, values, oem_no_mount)
+
+
+class PayloadSigner(object):
+  """A class that wraps the payload signing works.
+
+  When generating a Payload, hashes of the payload and metadata files will be
+  signed with the device key, either by calling an external payload signer or
+  by calling openssl with the package key. This class provides a unified
+  interface, so that callers can just call PayloadSigner.Sign().
+
+  If an external payload signer has been specified (OPTIONS.payload_signer), it
+  calls the signer with the provided args (OPTIONS.payload_signer_args). Note
+  that the signing key should be provided as part of the payload_signer_args.
+  Otherwise without an external signer, it uses the package key
+  (OPTIONS.package_key) and calls openssl for the signing works.
+  """
+
+  def __init__(self):
+    if OPTIONS.payload_signer is None:
+      # Prepare the payload signing key.
+      private_key = OPTIONS.package_key + OPTIONS.private_key_suffix
+      pw = OPTIONS.key_passwords[OPTIONS.package_key]
+
+      cmd = ["openssl", "pkcs8", "-in", private_key, "-inform", "DER"]
+      cmd.extend(["-passin", "pass:" + pw] if pw else ["-nocrypt"])
+      signing_key = common.MakeTempFile(prefix="key-", suffix=".key")
+      cmd.extend(["-out", signing_key])
+
+      get_signing_key = common.Run(cmd, verbose=False, stdout=subprocess.PIPE,
+                                   stderr=subprocess.STDOUT)
+      stdoutdata, _ = get_signing_key.communicate()
+      assert get_signing_key.returncode == 0, \
+          "Failed to get signing key: {}".format(stdoutdata)
+
+      self.signer = "openssl"
+      self.signer_args = ["pkeyutl", "-sign", "-inkey", signing_key,
+                          "-pkeyopt", "digest:sha256"]
+    else:
+      self.signer = OPTIONS.payload_signer
+      self.signer_args = OPTIONS.payload_signer_args
+
+  def Sign(self, in_file):
+    """Signs the given input file. Returns the output filename."""
+    out_file = common.MakeTempFile(prefix="signed-", suffix=".bin")
+    cmd = [self.signer] + self.signer_args + ['-in', in_file, '-out', out_file]
+    signing = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+    stdoutdata, _ = signing.communicate()
+    assert signing.returncode == 0, \
+        "Failed to sign the input file: {}".format(stdoutdata)
+    return out_file
+
+
+class Payload(object):
+  """Manages the creation and the signing of an A/B OTA Payload."""
+
+  PAYLOAD_BIN = 'payload.bin'
+  PAYLOAD_PROPERTIES_TXT = 'payload_properties.txt'
+  SECONDARY_PAYLOAD_BIN = 'secondary/payload.bin'
+  SECONDARY_PAYLOAD_PROPERTIES_TXT = 'secondary/payload_properties.txt'
+
+  def __init__(self, secondary=False):
+    """Initializes a Payload instance.
+
+    Args:
+      secondary: Whether it's generating a secondary payload (default: False).
+    """
+    # The place where the output from the subprocess should go.
+    self._log_file = sys.stdout if OPTIONS.verbose else subprocess.PIPE
+    self.payload_file = None
+    self.payload_properties = None
+    self.secondary = secondary
+
+  def Generate(self, target_file, source_file=None, additional_args=None):
+    """Generates a payload from the given target-files zip(s).
+
+    Args:
+      target_file: The filename of the target build target-files zip.
+      source_file: The filename of the source build target-files zip; or None if
+          generating a full OTA.
+      additional_args: A list of additional args that should be passed to
+          brillo_update_payload script; or None.
+    """
+    if additional_args is None:
+      additional_args = []
+
+    payload_file = common.MakeTempFile(prefix="payload-", suffix=".bin")
+    cmd = ["brillo_update_payload", "generate",
+           "--payload", payload_file,
+           "--target_image", target_file]
+    if source_file is not None:
+      cmd.extend(["--source_image", source_file])
+    cmd.extend(additional_args)
+    p = common.Run(cmd, stdout=self._log_file, stderr=subprocess.STDOUT)
+    stdoutdata, _ = p.communicate()
+    assert p.returncode == 0, \
+        "brillo_update_payload generate failed: {}".format(stdoutdata)
+
+    self.payload_file = payload_file
+    self.payload_properties = None
+
+  def Sign(self, payload_signer):
+    """Generates and signs the hashes of the payload and metadata.
+
+    Args:
+      payload_signer: A PayloadSigner() instance that serves the signing work.
+
+    Raises:
+      AssertionError: On any failure when calling brillo_update_payload script.
+    """
+    assert isinstance(payload_signer, PayloadSigner)
+
+    # 1. Generate hashes of the payload and metadata files.
+    payload_sig_file = common.MakeTempFile(prefix="sig-", suffix=".bin")
+    metadata_sig_file = common.MakeTempFile(prefix="sig-", suffix=".bin")
+    cmd = ["brillo_update_payload", "hash",
+           "--unsigned_payload", self.payload_file,
+           "--signature_size", "256",
+           "--metadata_hash_file", metadata_sig_file,
+           "--payload_hash_file", payload_sig_file]
+    p1 = common.Run(cmd, stdout=self._log_file, stderr=subprocess.STDOUT)
+    p1.communicate()
+    assert p1.returncode == 0, "brillo_update_payload hash failed"
+
+    # 2. Sign the hashes.
+    signed_payload_sig_file = payload_signer.Sign(payload_sig_file)
+    signed_metadata_sig_file = payload_signer.Sign(metadata_sig_file)
+
+    # 3. Insert the signatures back into the payload file.
+    signed_payload_file = common.MakeTempFile(prefix="signed-payload-",
+                                              suffix=".bin")
+    cmd = ["brillo_update_payload", "sign",
+           "--unsigned_payload", self.payload_file,
+           "--payload", signed_payload_file,
+           "--signature_size", "256",
+           "--metadata_signature_file", signed_metadata_sig_file,
+           "--payload_signature_file", signed_payload_sig_file]
+    p1 = common.Run(cmd, stdout=self._log_file, stderr=subprocess.STDOUT)
+    p1.communicate()
+    assert p1.returncode == 0, "brillo_update_payload sign failed"
+
+    # 4. Dump the signed payload properties.
+    properties_file = common.MakeTempFile(prefix="payload-properties-",
+                                          suffix=".txt")
+    cmd = ["brillo_update_payload", "properties",
+           "--payload", signed_payload_file,
+           "--properties_file", properties_file]
+    p1 = common.Run(cmd, stdout=self._log_file, stderr=subprocess.STDOUT)
+    p1.communicate()
+    assert p1.returncode == 0, "brillo_update_payload properties failed"
+
+    if self.secondary:
+      with open(properties_file, "a") as f:
+        f.write("SWITCH_SLOT_ON_REBOOT=0\n")
+
+    if OPTIONS.wipe_user_data:
+      with open(properties_file, "a") as f:
+        f.write("POWERWASH=1\n")
+
+    self.payload_file = signed_payload_file
+    self.payload_properties = properties_file
+
+  def WriteToZip(self, output_zip):
+    """Writes the payload to the given zip.
+
+    Args:
+      output_zip: The output ZipFile instance.
+    """
+    assert self.payload_file is not None
+    assert self.payload_properties is not None
+
+    if self.secondary:
+      payload_arcname = Payload.SECONDARY_PAYLOAD_BIN
+      payload_properties_arcname = Payload.SECONDARY_PAYLOAD_PROPERTIES_TXT
+    else:
+      payload_arcname = Payload.PAYLOAD_BIN
+      payload_properties_arcname = Payload.PAYLOAD_PROPERTIES_TXT
+
+    # Add the signed payload file and properties into the zip. In order to
+    # support streaming, we pack them as ZIP_STORED. So these entries can be
+    # read directly with the offset and length pairs.
+    common.ZipWrite(output_zip, self.payload_file, arcname=payload_arcname,
+                    compress_type=zipfile.ZIP_STORED)
+    common.ZipWrite(output_zip, self.payload_properties,
+                    arcname=payload_properties_arcname,
+                    compress_type=zipfile.ZIP_STORED)
+
+
+def SignOutput(temp_zip_name, output_zip_name):
+  pw = OPTIONS.key_passwords[OPTIONS.package_key]
+
+  common.SignFile(temp_zip_name, output_zip_name, OPTIONS.package_key, pw,
+                  whole_file=True)
+
+
+def _LoadOemDicts(oem_source):
+  """Returns the list of loaded OEM properties dict."""
+  if not oem_source:
+    return None
+
+  oem_dicts = []
+  for oem_file in oem_source:
+    with open(oem_file) as fp:
+      oem_dicts.append(common.LoadDictionaryFromLines(fp.readlines()))
+  return oem_dicts
+
+
+def _WriteRecoveryImageToBoot(script, output_zip):
+  """Find and write recovery image to /boot in two-step OTA.
+
+  In two-step OTAs, we write recovery image to /boot as the first step so that
+  we can reboot to there and install a new recovery image to /recovery.
+  A special "recovery-two-step.img" will be preferred, which encodes the correct
+  path of "/boot". Otherwise the device may show "device is corrupt" message
+  when booting into /boot.
+
+  Fall back to using the regular recovery.img if the two-step recovery image
+  doesn't exist. Note that rebuilding the special image at this point may be
+  infeasible, because we don't have the desired boot signer and keys when
+  calling ota_from_target_files.py.
+  """
+
+  recovery_two_step_img_name = "recovery-two-step.img"
+  recovery_two_step_img_path = os.path.join(
+      OPTIONS.input_tmp, "IMAGES", recovery_two_step_img_name)
+  if os.path.exists(recovery_two_step_img_path):
+    recovery_two_step_img = common.GetBootableImage(
+        recovery_two_step_img_name, recovery_two_step_img_name,
+        OPTIONS.input_tmp, "RECOVERY")
+    common.ZipWriteStr(
+        output_zip, recovery_two_step_img_name, recovery_two_step_img.data)
+    print("two-step package: using %s in stage 1/3" % (
+        recovery_two_step_img_name,))
+    script.WriteRawImage("/boot", recovery_two_step_img_name)
+  else:
+    print("two-step package: using recovery.img in stage 1/3")
+    # The "recovery.img" entry has been written into package earlier.
+    script.WriteRawImage("/boot", "recovery.img")
+
+
+def HasRecoveryPatch(target_files_zip):
+  namelist = [name for name in target_files_zip.namelist()]
+  return ("SYSTEM/recovery-from-boot.p" in namelist or
+          "SYSTEM/etc/recovery.img" in namelist)
+
+
+def HasVendorPartition(target_files_zip):
+  try:
+    target_files_zip.getinfo("VENDOR/")
+    return True
+  except KeyError:
+    return False
+
+
+def HasTrebleEnabled(target_files_zip, target_info):
+  return (HasVendorPartition(target_files_zip) and
+          target_info.GetBuildProp("ro.treble.enabled") == "true")
+
+
+def WriteFingerprintAssertion(script, target_info, source_info):
+  source_oem_props = source_info.oem_props
+  target_oem_props = target_info.oem_props
+
+  if source_oem_props is None and target_oem_props is None:
+    script.AssertSomeFingerprint(
+        source_info.fingerprint, target_info.fingerprint)
+  elif source_oem_props is not None and target_oem_props is not None:
+    script.AssertSomeThumbprint(
+        target_info.GetBuildProp("ro.build.thumbprint"),
+        source_info.GetBuildProp("ro.build.thumbprint"))
+  elif source_oem_props is None and target_oem_props is not None:
+    script.AssertFingerprintOrThumbprint(
+        source_info.fingerprint,
+        target_info.GetBuildProp("ro.build.thumbprint"))
+  else:
+    script.AssertFingerprintOrThumbprint(
+        target_info.fingerprint,
+        source_info.GetBuildProp("ro.build.thumbprint"))
+
+
+def AddCompatibilityArchiveIfTrebleEnabled(target_zip, output_zip, target_info,
+                                           source_info=None):
+  """Adds compatibility info into the output zip if it's Treble-enabled target.
+
+  Metadata used for on-device compatibility verification is retrieved from
+  target_zip then added to compatibility.zip which is added to the output_zip
+  archive.
+
+  Compatibility archive should only be included for devices that have enabled
+  Treble support.
+
+  Args:
+    target_zip: Zip file containing the source files to be included for OTA.
+    output_zip: Zip file that will be sent for OTA.
+    target_info: The BuildInfo instance that holds the target build info.
+    source_info: The BuildInfo instance that holds the source build info, if
+        generating an incremental OTA; None otherwise.
+  """
+
+  def AddCompatibilityArchive(system_updated, vendor_updated):
+    """Adds compatibility info based on system/vendor update status.
+
+    Args:
+      system_updated: If True, the system image will be updated and therefore
+          its metadata should be included.
+      vendor_updated: If True, the vendor image will be updated and therefore
+          its metadata should be included.
+    """
+    # Determine what metadata we need. Files are names relative to META/.
+    compatibility_files = []
+    vendor_metadata = ("vendor_manifest.xml", "vendor_matrix.xml")
+    system_metadata = ("system_manifest.xml", "system_matrix.xml")
+    if vendor_updated:
+      compatibility_files += vendor_metadata
+    if system_updated:
+      compatibility_files += system_metadata
+
+    # Create new archive.
+    compatibility_archive = tempfile.NamedTemporaryFile()
+    compatibility_archive_zip = zipfile.ZipFile(
+        compatibility_archive, "w", compression=zipfile.ZIP_DEFLATED)
+
+    # Add metadata.
+    for file_name in compatibility_files:
+      target_file_name = "META/" + file_name
+
+      if target_file_name in target_zip.namelist():
+        data = target_zip.read(target_file_name)
+        common.ZipWriteStr(compatibility_archive_zip, file_name, data)
+
+    # Ensure files are written before we copy into output_zip.
+    compatibility_archive_zip.close()
+
+    # Only add the archive if we have any compatibility info.
+    if compatibility_archive_zip.namelist():
+      common.ZipWrite(output_zip, compatibility_archive.name,
+                      arcname="compatibility.zip",
+                      compress_type=zipfile.ZIP_STORED)
+
+  # Will only proceed if the target has enabled the Treble support (as well as
+  # having a /vendor partition).
+  if not HasTrebleEnabled(target_zip, target_info):
+    return
+
+  # We don't support OEM thumbprint in Treble world (which calculates
+  # fingerprints in a different way as shown in CalculateFingerprint()).
+  assert not target_info.oem_props
+
+  # Full OTA carries the info for system/vendor both.
+  if source_info is None:
+    AddCompatibilityArchive(True, True)
+    return
+
+  assert not source_info.oem_props
+
+  source_fp = source_info.fingerprint
+  target_fp = target_info.fingerprint
+  system_updated = source_fp != target_fp
+
+  source_fp_vendor = source_info.GetVendorBuildProp(
+      "ro.vendor.build.fingerprint")
+  target_fp_vendor = target_info.GetVendorBuildProp(
+      "ro.vendor.build.fingerprint")
+  vendor_updated = source_fp_vendor != target_fp_vendor
+
+  AddCompatibilityArchive(system_updated, vendor_updated)
+
+
+def WriteFullOTAPackage(input_zip, output_file):
+  target_info = BuildInfo(OPTIONS.info_dict, OPTIONS.oem_dicts)
+
+  # We don't know what version it will be installed on top of. We expect the API
+  # just won't change very often. Similarly for fstab, it might have changed in
+  # the target build.
+  target_api_version = target_info["recovery_api_version"]
+  script = edify_generator.EdifyGenerator(target_api_version, target_info)
+
+  if target_info.oem_props and not OPTIONS.oem_no_mount:
+    target_info.WriteMountOemScript(script)
+
+  metadata = GetPackageMetadata(target_info)
+
+  if not OPTIONS.no_signing:
+    staging_file = common.MakeTempFile(suffix='.zip')
+  else:
+    staging_file = output_file
+
+  output_zip = zipfile.ZipFile(
+      staging_file, "w", compression=zipfile.ZIP_DEFLATED)
+
+  device_specific = common.DeviceSpecificParams(
+      input_zip=input_zip,
+      input_version=target_api_version,
+      output_zip=output_zip,
+      script=script,
+      input_tmp=OPTIONS.input_tmp,
+      metadata=metadata,
+      info_dict=OPTIONS.info_dict)
+
+  assert HasRecoveryPatch(input_zip)
+
+  # Assertions (e.g. downgrade check, device properties check).
+  ts = target_info.GetBuildProp("ro.build.date.utc")
+  ts_text = target_info.GetBuildProp("ro.build.date")
+  script.AssertOlderBuild(ts, ts_text)
+
+  target_info.WriteDeviceAssertions(script, OPTIONS.oem_no_mount)
+  device_specific.FullOTA_Assertions()
+
+  # Two-step package strategy (in chronological order, which is *not*
+  # the order in which the generated script has things):
+  #
+  # if stage is not "2/3" or "3/3":
+  #    write recovery image to boot partition
+  #    set stage to "2/3"
+  #    reboot to boot partition and restart recovery
+  # else if stage is "2/3":
+  #    write recovery image to recovery partition
+  #    set stage to "3/3"
+  #    reboot to recovery partition and restart recovery
+  # else:
+  #    (stage must be "3/3")
+  #    set stage to ""
+  #    do normal full package installation:
+  #       wipe and install system, boot image, etc.
+  #       set up system to update recovery partition on first boot
+  #    complete script normally
+  #    (allow recovery to mark itself finished and reboot)
+
+  recovery_img = common.GetBootableImage("recovery.img", "recovery.img",
+                                         OPTIONS.input_tmp, "RECOVERY")
+  if OPTIONS.two_step:
+    if not target_info.get("multistage_support"):
+      assert False, "two-step packages not supported by this build"
+    fs = target_info["fstab"]["/misc"]
+    assert fs.fs_type.upper() == "EMMC", \
+        "two-step packages only supported on devices with EMMC /misc partitions"
+    bcb_dev = {"bcb_dev": fs.device}
+    common.ZipWriteStr(output_zip, "recovery.img", recovery_img.data)
+    script.AppendExtra("""
+if get_stage("%(bcb_dev)s") == "2/3" then
+""" % bcb_dev)
+
+    # Stage 2/3: Write recovery image to /recovery (currently running /boot).
+    script.Comment("Stage 2/3")
+    script.WriteRawImage("/recovery", "recovery.img")
+    script.AppendExtra("""
+set_stage("%(bcb_dev)s", "3/3");
+reboot_now("%(bcb_dev)s", "recovery");
+else if get_stage("%(bcb_dev)s") == "3/3" then
+""" % bcb_dev)
+
+    # Stage 3/3: Make changes.
+    script.Comment("Stage 3/3")
+
+  # Dump fingerprints
+  script.Print("Target: {}".format(target_info.fingerprint))
+
+  device_specific.FullOTA_InstallBegin()
+
+  system_progress = 0.75
+
+  if OPTIONS.wipe_user_data:
+    system_progress -= 0.1
+  if HasVendorPartition(input_zip):
+    system_progress -= 0.1
+
+  script.ShowProgress(system_progress, 0)
+
+  # See the notes in WriteBlockIncrementalOTAPackage().
+  allow_shared_blocks = target_info.get('ext4_share_dup_blocks') == "true"
+
+  # Full OTA is done as an "incremental" against an empty source image. This
+  # has the effect of writing new data from the package to the entire
+  # partition, but lets us reuse the updater code that writes incrementals to
+  # do it.
+  system_tgt = common.GetSparseImage("system", OPTIONS.input_tmp, input_zip,
+                                     allow_shared_blocks)
+  system_tgt.ResetFileMap()
+  system_diff = common.BlockDifference("system", system_tgt, src=None)
+  system_diff.WriteScript(script, output_zip)
+
+  boot_img = common.GetBootableImage(
+      "boot.img", "boot.img", OPTIONS.input_tmp, "BOOT")
+
+  if HasVendorPartition(input_zip):
+    script.ShowProgress(0.1, 0)
+
+    vendor_tgt = common.GetSparseImage("vendor", OPTIONS.input_tmp, input_zip,
+                                       allow_shared_blocks)
+    vendor_tgt.ResetFileMap()
+    vendor_diff = common.BlockDifference("vendor", vendor_tgt)
+    vendor_diff.WriteScript(script, output_zip)
+
+  AddCompatibilityArchiveIfTrebleEnabled(input_zip, output_zip, target_info)
+
+  common.CheckSize(boot_img.data, "boot.img", target_info)
+  common.ZipWriteStr(output_zip, "boot.img", boot_img.data)
+
+  script.ShowProgress(0.05, 5)
+  script.WriteRawImage("/boot", "boot.img")
+
+  script.ShowProgress(0.2, 10)
+  device_specific.FullOTA_InstallEnd()
+
+  if OPTIONS.extra_script is not None:
+    script.AppendExtra(OPTIONS.extra_script)
+
+  script.UnmountAll()
+
+  if OPTIONS.wipe_user_data:
+    script.ShowProgress(0.1, 10)
+    script.FormatPartition("/data")
+
+  if OPTIONS.two_step:
+    script.AppendExtra("""
+set_stage("%(bcb_dev)s", "");
+""" % bcb_dev)
+    script.AppendExtra("else\n")
+
+    # Stage 1/3: Nothing to verify for full OTA. Write recovery image to /boot.
+    script.Comment("Stage 1/3")
+    _WriteRecoveryImageToBoot(script, output_zip)
+
+    script.AppendExtra("""
+set_stage("%(bcb_dev)s", "2/3");
+reboot_now("%(bcb_dev)s", "");
+endif;
+endif;
+""" % bcb_dev)
+
+  script.SetProgress(1)
+  script.AddToZip(input_zip, output_zip, input_path=OPTIONS.updater_binary)
+  metadata["ota-required-cache"] = str(script.required_cache)
+
+  # We haven't written the metadata entry, which will be done in
+  # FinalizeMetadata.
+  common.ZipClose(output_zip)
+
+  needed_property_files = (
+      NonAbOtaPropertyFiles(),
+  )
+  FinalizeMetadata(metadata, staging_file, output_file, needed_property_files)
+
+
+def WriteMetadata(metadata, output_zip):
+  value = "".join(["%s=%s\n" % kv for kv in sorted(metadata.iteritems())])
+  common.ZipWriteStr(output_zip, METADATA_NAME, value,
+                     compress_type=zipfile.ZIP_STORED)
+
+
+def HandleDowngradeMetadata(metadata, target_info, source_info):
+  # Only incremental OTAs are allowed to reach here.
+  assert OPTIONS.incremental_source is not None
+
+  post_timestamp = target_info.GetBuildProp("ro.build.date.utc")
+  pre_timestamp = source_info.GetBuildProp("ro.build.date.utc")
+  is_downgrade = long(post_timestamp) < long(pre_timestamp)
+
+  if OPTIONS.downgrade:
+    if not is_downgrade:
+      raise RuntimeError(
+          "--downgrade or --override_timestamp specified but no downgrade "
+          "detected: pre: %s, post: %s" % (pre_timestamp, post_timestamp))
+    metadata["ota-downgrade"] = "yes"
+  else:
+    if is_downgrade:
+      raise RuntimeError(
+          "Downgrade detected based on timestamp check: pre: %s, post: %s. "
+          "Need to specify --override_timestamp OR --downgrade to allow "
+          "building the incremental." % (pre_timestamp, post_timestamp))
+
+
+def GetPackageMetadata(target_info, source_info=None):
+  """Generates and returns the metadata dict.
+
+  It generates a dict() that contains the info to be written into an OTA
+  package (META-INF/com/android/metadata). It also handles the detection of
+  downgrade / data wipe based on the global options.
+
+  Args:
+    target_info: The BuildInfo instance that holds the target build info.
+    source_info: The BuildInfo instance that holds the source build info, or
+        None if generating full OTA.
+
+  Returns:
+    A dict to be written into package metadata entry.
+  """
+  assert isinstance(target_info, BuildInfo)
+  assert source_info is None or isinstance(source_info, BuildInfo)
+
+  metadata = {
+      'post-build' : target_info.fingerprint,
+      'post-build-incremental' : target_info.GetBuildProp(
+          'ro.build.version.incremental'),
+      'post-sdk-level' : target_info.GetBuildProp(
+          'ro.build.version.sdk'),
+      'post-security-patch-level' : target_info.GetBuildProp(
+          'ro.build.version.security_patch'),
+  }
+
+  if target_info.is_ab:
+    metadata['ota-type'] = 'AB'
+    metadata['ota-required-cache'] = '0'
+  else:
+    metadata['ota-type'] = 'BLOCK'
+
+  if OPTIONS.wipe_user_data:
+    metadata['ota-wipe'] = 'yes'
+
+  is_incremental = source_info is not None
+  if is_incremental:
+    metadata['pre-build'] = source_info.fingerprint
+    metadata['pre-build-incremental'] = source_info.GetBuildProp(
+        'ro.build.version.incremental')
+    metadata['pre-device'] = source_info.device
+  else:
+    metadata['pre-device'] = target_info.device
+
+  # Use the actual post-timestamp, even for a downgrade case.
+  metadata['post-timestamp'] = target_info.GetBuildProp('ro.build.date.utc')
+
+  # Detect downgrades and set up downgrade flags accordingly.
+  if is_incremental:
+    HandleDowngradeMetadata(metadata, target_info, source_info)
+
+  return metadata
+
+
+class PropertyFiles(object):
+  """A class that computes the property-files string for an OTA package.
+
+  A property-files string is a comma-separated string that contains the
+  offset/size info for an OTA package. The entries, which must be ZIP_STORED,
+  can be fetched directly with the package URL along with the offset/size info.
+  These strings can be used for streaming A/B OTAs, or allowing an updater to
+  download package metadata entry directly, without paying the cost of
+  downloading entire package.
+
+  Computing the final property-files string requires two passes. Because doing
+  the whole package signing (with signapk.jar) will possibly reorder the ZIP
+  entries, which may in turn invalidate earlier computed ZIP entry offset/size
+  values.
+
+  This class provides functions to be called for each pass. The general flow is
+  as follows.
+
+    property_files = PropertyFiles()
+    # The first pass, which writes placeholders before doing initial signing.
+    property_files.Compute()
+    SignOutput()
+
+    # The second pass, by replacing the placeholders with actual data.
+    property_files.Finalize()
+    SignOutput()
+
+  And the caller can additionally verify the final result.
+
+    property_files.Verify()
+  """
+
+  def __init__(self):
+    self.name = None
+    self.required = ()
+    self.optional = ()
+
+  def Compute(self, input_zip):
+    """Computes and returns a property-files string with placeholders.
+
+    We reserve extra space for the offset and size of the metadata entry itself,
+    although we don't know the final values until the package gets signed.
+
+    Args:
+      input_zip: The input ZIP file.
+
+    Returns:
+      A string with placeholders for the metadata offset/size info, e.g.
+      "payload.bin:679:343,payload_properties.txt:378:45,metadata:        ".
+    """
+    return self._GetPropertyFilesString(input_zip, reserve_space=True)
+
+  class InsufficientSpaceException(Exception):
+    pass
+
+  def Finalize(self, input_zip, reserved_length):
+    """Finalizes a property-files string with actual METADATA offset/size info.
+
+    The input ZIP file has been signed, with the ZIP entries in the desired
+    place (signapk.jar will possibly reorder the ZIP entries). Now we compute
+    the ZIP entry offsets and construct the property-files string with actual
+    data. Note that during this process, we must pad the property-files string
+    to the reserved length, so that the METADATA entry size remains the same.
+    Otherwise the entries' offsets and sizes may change again.
+
+    Args:
+      input_zip: The input ZIP file.
+      reserved_length: The reserved length of the property-files string during
+          the call to Compute(). The final string must be no more than this
+          size.
+
+    Returns:
+      A property-files string including the metadata offset/size info, e.g.
+      "payload.bin:679:343,payload_properties.txt:378:45,metadata:69:379  ".
+
+    Raises:
+      InsufficientSpaceException: If the reserved length is insufficient to hold
+          the final string.
+    """
+    result = self._GetPropertyFilesString(input_zip, reserve_space=False)
+    if len(result) > reserved_length:
+      raise self.InsufficientSpaceException(
+          'Insufficient reserved space: reserved={}, actual={}'.format(
+              reserved_length, len(result)))
+
+    result += ' ' * (reserved_length - len(result))
+    return result
+
+  def Verify(self, input_zip, expected):
+    """Verifies the input ZIP file contains the expected property-files string.
+
+    Args:
+      input_zip: The input ZIP file.
+      expected: The property-files string that's computed from Finalize().
+
+    Raises:
+      AssertionError: On finding a mismatch.
+    """
+    actual = self._GetPropertyFilesString(input_zip)
+    assert actual == expected, \
+        "Mismatching streaming metadata: {} vs {}.".format(actual, expected)
+
+  def _GetPropertyFilesString(self, zip_file, reserve_space=False):
+    """Constructs the property-files string per request."""
+
+    def ComputeEntryOffsetSize(name):
+      """Computes the zip entry offset and size."""
+      info = zip_file.getinfo(name)
+      offset = info.header_offset + len(info.FileHeader())
+      size = info.file_size
+      return '%s:%d:%d' % (os.path.basename(name), offset, size)
+
+    tokens = []
+    tokens.extend(self._GetPrecomputed(zip_file))
+    for entry in self.required:
+      tokens.append(ComputeEntryOffsetSize(entry))
+    for entry in self.optional:
+      if entry in zip_file.namelist():
+        tokens.append(ComputeEntryOffsetSize(entry))
+
+    # 'META-INF/com/android/metadata' is required. We don't know its actual
+    # offset and length (as well as the values for other entries). So we reserve
+    # 15-byte as a placeholder ('offset:length'), which is sufficient to cover
+    # the space for metadata entry. Because 'offset' allows a max of 10-digit
+    # (i.e. ~9 GiB), with a max of 4-digit for the length. Note that all the
+    # reserved space serves the metadata entry only.
+    if reserve_space:
+      tokens.append('metadata:' + ' ' * 15)
+    else:
+      tokens.append(ComputeEntryOffsetSize(METADATA_NAME))
+
+    return ','.join(tokens)
+
+  def _GetPrecomputed(self, input_zip):
+    """Computes the additional tokens to be included into the property-files.
+
+    This applies to tokens without actual ZIP entries, such as
+    payload_metadadata.bin. We want to expose the offset/size to updaters, so
+    that they can download the payload metadata directly with the info.
+
+    Args:
+      input_zip: The input zip file.
+
+    Returns:
+      A list of strings (tokens) to be added to the property-files string.
+    """
+    # pylint: disable=no-self-use
+    # pylint: disable=unused-argument
+    return []
+
+
+class StreamingPropertyFiles(PropertyFiles):
+  """A subclass for computing the property-files for streaming A/B OTAs."""
+
+  def __init__(self):
+    super(StreamingPropertyFiles, self).__init__()
+    self.name = 'ota-streaming-property-files'
+    self.required = (
+        # payload.bin and payload_properties.txt must exist.
+        'payload.bin',
+        'payload_properties.txt',
+    )
+    self.optional = (
+        # care_map.txt is available only if dm-verity is enabled.
+        'care_map.txt',
+        # compatibility.zip is available only if target supports Treble.
+        'compatibility.zip',
+    )
+
+
+class AbOtaPropertyFiles(StreamingPropertyFiles):
+  """The property-files for A/B OTA that includes payload_metadata.bin info.
+
+  Since P, we expose one more token (aka property-file), in addition to the ones
+  for streaming A/B OTA, for a virtual entry of 'payload_metadata.bin'.
+  'payload_metadata.bin' is the header part of a payload ('payload.bin'), which
+  doesn't exist as a separate ZIP entry, but can be used to verify if the
+  payload can be applied on the given device.
+
+  For backward compatibility, we keep both of the 'ota-streaming-property-files'
+  and the newly added 'ota-property-files' in P. The new token will only be
+  available in 'ota-property-files'.
+  """
+
+  def __init__(self):
+    super(AbOtaPropertyFiles, self).__init__()
+    self.name = 'ota-property-files'
+
+  def _GetPrecomputed(self, input_zip):
+    offset, size = self._GetPayloadMetadataOffsetAndSize(input_zip)
+    return ['payload_metadata.bin:{}:{}'.format(offset, size)]
+
+  @staticmethod
+  def _GetPayloadMetadataOffsetAndSize(input_zip):
+    """Computes the offset and size of the payload metadata for a given package.
+
+    (From system/update_engine/update_metadata.proto)
+    A delta update file contains all the deltas needed to update a system from
+    one specific version to another specific version. The update format is
+    represented by this struct pseudocode:
+
+    struct delta_update_file {
+      char magic[4] = "CrAU";
+      uint64 file_format_version;
+      uint64 manifest_size;  // Size of protobuf DeltaArchiveManifest
+
+      // Only present if format_version > 1:
+      uint32 metadata_signature_size;
+
+      // The Bzip2 compressed DeltaArchiveManifest
+      char manifest[metadata_signature_size];
+
+      // The signature of the metadata (from the beginning of the payload up to
+      // this location, not including the signature itself). This is a
+      // serialized Signatures message.
+      char medatada_signature_message[metadata_signature_size];
+
+      // Data blobs for files, no specific format. The specific offset
+      // and length of each data blob is recorded in the DeltaArchiveManifest.
+      struct {
+        char data[];
+      } blobs[];
+
+      // These two are not signed:
+      uint64 payload_signatures_message_size;
+      char payload_signatures_message[];
+    };
+
+    'payload-metadata.bin' contains all the bytes from the beginning of the
+    payload, till the end of 'medatada_signature_message'.
+    """
+    payload_info = input_zip.getinfo('payload.bin')
+    payload_offset = payload_info.header_offset + len(payload_info.FileHeader())
+    payload_size = payload_info.file_size
+
+    with input_zip.open('payload.bin', 'r') as payload_fp:
+      header_bin = payload_fp.read(24)
+
+    # network byte order (big-endian)
+    header = struct.unpack("!IQQL", header_bin)
+
+    # 'CrAU'
+    magic = header[0]
+    assert magic == 0x43724155, "Invalid magic: {:x}".format(magic)
+
+    manifest_size = header[2]
+    metadata_signature_size = header[3]
+    metadata_total = 24 + manifest_size + metadata_signature_size
+    assert metadata_total < payload_size
+
+    return (payload_offset, metadata_total)
+
+
+class NonAbOtaPropertyFiles(PropertyFiles):
+  """The property-files for non-A/B OTA.
+
+  For non-A/B OTA, the property-files string contains the info for METADATA
+  entry, with which a system updater can be fetched the package metadata prior
+  to downloading the entire package.
+  """
+
+  def __init__(self):
+    super(NonAbOtaPropertyFiles, self).__init__()
+    self.name = 'ota-property-files'
+
+
+def FinalizeMetadata(metadata, input_file, output_file, needed_property_files):
+  """Finalizes the metadata and signs an A/B OTA package.
+
+  In order to stream an A/B OTA package, we need 'ota-streaming-property-files'
+  that contains the offsets and sizes for the ZIP entries. An example
+  property-files string is as follows.
+
+    "payload.bin:679:343,payload_properties.txt:378:45,metadata:69:379"
+
+  OTA server can pass down this string, in addition to the package URL, to the
+  system update client. System update client can then fetch individual ZIP
+  entries (ZIP_STORED) directly at the given offset of the URL.
+
+  Args:
+    metadata: The metadata dict for the package.
+    input_file: The input ZIP filename that doesn't contain the package METADATA
+        entry yet.
+    output_file: The final output ZIP filename.
+    needed_property_files: The list of PropertyFiles' to be generated.
+  """
+
+  def ComputeAllPropertyFiles(input_file, needed_property_files):
+    # Write the current metadata entry with placeholders.
+    with zipfile.ZipFile(input_file) as input_zip:
+      for property_files in needed_property_files:
+        metadata[property_files.name] = property_files.Compute(input_zip)
+      namelist = input_zip.namelist()
+
+    if METADATA_NAME in namelist:
+      common.ZipDelete(input_file, METADATA_NAME)
+    output_zip = zipfile.ZipFile(input_file, 'a')
+    WriteMetadata(metadata, output_zip)
+    common.ZipClose(output_zip)
+
+    if OPTIONS.no_signing:
+      return input_file
+
+    prelim_signing = common.MakeTempFile(suffix='.zip')
+    SignOutput(input_file, prelim_signing)
+    return prelim_signing
+
+  def FinalizeAllPropertyFiles(prelim_signing, needed_property_files):
+    with zipfile.ZipFile(prelim_signing) as prelim_signing_zip:
+      for property_files in needed_property_files:
+        metadata[property_files.name] = property_files.Finalize(
+            prelim_signing_zip, len(metadata[property_files.name]))
+
+  # SignOutput(), which in turn calls signapk.jar, will possibly reorder the ZIP
+  # entries, as well as padding the entry headers. We do a preliminary signing
+  # (with an incomplete metadata entry) to allow that to happen. Then compute
+  # the ZIP entry offsets, write back the final metadata and do the final
+  # signing.
+  prelim_signing = ComputeAllPropertyFiles(input_file, needed_property_files)
+  try:
+    FinalizeAllPropertyFiles(prelim_signing, needed_property_files)
+  except PropertyFiles.InsufficientSpaceException:
+    # Even with the preliminary signing, the entry orders may change
+    # dramatically, which leads to insufficiently reserved space during the
+    # first call to ComputeAllPropertyFiles(). In that case, we redo all the
+    # preliminary signing works, based on the already ordered ZIP entries, to
+    # address the issue.
+    prelim_signing = ComputeAllPropertyFiles(
+        prelim_signing, needed_property_files)
+    FinalizeAllPropertyFiles(prelim_signing, needed_property_files)
+
+  # Replace the METADATA entry.
+  common.ZipDelete(prelim_signing, METADATA_NAME)
+  output_zip = zipfile.ZipFile(prelim_signing, 'a')
+  WriteMetadata(metadata, output_zip)
+  common.ZipClose(output_zip)
+
+  # Re-sign the package after updating the metadata entry.
+  if OPTIONS.no_signing:
+    output_file = prelim_signing
+  else:
+    SignOutput(prelim_signing, output_file)
+
+  # Reopen the final signed zip to double check the streaming metadata.
+  with zipfile.ZipFile(output_file) as output_zip:
+    for property_files in needed_property_files:
+      property_files.Verify(output_zip, metadata[property_files.name].strip())
+
+
+def WriteBlockIncrementalOTAPackage(target_zip, source_zip, output_file):
+  target_info = BuildInfo(OPTIONS.target_info_dict, OPTIONS.oem_dicts)
+  source_info = BuildInfo(OPTIONS.source_info_dict, OPTIONS.oem_dicts)
+
+  target_api_version = target_info["recovery_api_version"]
+  source_api_version = source_info["recovery_api_version"]
+  if source_api_version == 0:
+    print("WARNING: generating edify script for a source that "
+          "can't install it.")
+
+  script = edify_generator.EdifyGenerator(
+      source_api_version, target_info, fstab=source_info["fstab"])
+
+  if target_info.oem_props or source_info.oem_props:
+    if not OPTIONS.oem_no_mount:
+      source_info.WriteMountOemScript(script)
+
+  metadata = GetPackageMetadata(target_info, source_info)
+
+  if not OPTIONS.no_signing:
+    staging_file = common.MakeTempFile(suffix='.zip')
+  else:
+    staging_file = output_file
+
+  output_zip = zipfile.ZipFile(
+      staging_file, "w", compression=zipfile.ZIP_DEFLATED)
+
+  device_specific = common.DeviceSpecificParams(
+      source_zip=source_zip,
+      source_version=source_api_version,
+      target_zip=target_zip,
+      target_version=target_api_version,
+      output_zip=output_zip,
+      script=script,
+      metadata=metadata,
+      info_dict=source_info)
+
+  source_boot = common.GetBootableImage(
+      "/tmp/boot.img", "boot.img", OPTIONS.source_tmp, "BOOT", source_info)
+  target_boot = common.GetBootableImage(
+      "/tmp/boot.img", "boot.img", OPTIONS.target_tmp, "BOOT", target_info)
+  updating_boot = (not OPTIONS.two_step and
+                   (source_boot.data != target_boot.data))
+
+  target_recovery = common.GetBootableImage(
+      "/tmp/recovery.img", "recovery.img", OPTIONS.target_tmp, "RECOVERY")
+
+  # When target uses 'BOARD_EXT4_SHARE_DUP_BLOCKS := true', images may contain
+  # shared blocks (i.e. some blocks will show up in multiple files' block
+  # list). We can only allocate such shared blocks to the first "owner", and
+  # disable imgdiff for all later occurrences.
+  allow_shared_blocks = (source_info.get('ext4_share_dup_blocks') == "true" or
+                         target_info.get('ext4_share_dup_blocks') == "true")
+  system_src = common.GetSparseImage("system", OPTIONS.source_tmp, source_zip,
+                                     allow_shared_blocks)
+  system_tgt = common.GetSparseImage("system", OPTIONS.target_tmp, target_zip,
+                                     allow_shared_blocks)
+
+  blockimgdiff_version = max(
+      int(i) for i in target_info.get("blockimgdiff_versions", "1").split(","))
+  assert blockimgdiff_version >= 3
+
+  # Check the first block of the source system partition for remount R/W only
+  # if the filesystem is ext4.
+  system_src_partition = source_info["fstab"]["/system"]
+  check_first_block = system_src_partition.fs_type == "ext4"
+  # Disable using imgdiff for squashfs. 'imgdiff -z' expects input files to be
+  # in zip formats. However with squashfs, a) all files are compressed in LZ4;
+  # b) the blocks listed in block map may not contain all the bytes for a given
+  # file (because they're rounded to be 4K-aligned).
+  system_tgt_partition = target_info["fstab"]["/system"]
+  disable_imgdiff = (system_src_partition.fs_type == "squashfs" or
+                     system_tgt_partition.fs_type == "squashfs")
+  system_diff = common.BlockDifference("system", system_tgt, system_src,
+                                       check_first_block,
+                                       version=blockimgdiff_version,
+                                       disable_imgdiff=disable_imgdiff)
+
+  if HasVendorPartition(target_zip):
+    if not HasVendorPartition(source_zip):
+      raise RuntimeError("can't generate incremental that adds /vendor")
+    vendor_src = common.GetSparseImage("vendor", OPTIONS.source_tmp, source_zip,
+                                       allow_shared_blocks)
+    vendor_tgt = common.GetSparseImage("vendor", OPTIONS.target_tmp, target_zip,
+                                       allow_shared_blocks)
+
+    # Check first block of vendor partition for remount R/W only if
+    # disk type is ext4
+    vendor_partition = source_info["fstab"]["/vendor"]
+    check_first_block = vendor_partition.fs_type == "ext4"
+    disable_imgdiff = vendor_partition.fs_type == "squashfs"
+    vendor_diff = common.BlockDifference("vendor", vendor_tgt, vendor_src,
+                                         check_first_block,
+                                         version=blockimgdiff_version,
+                                         disable_imgdiff=disable_imgdiff)
+  else:
+    vendor_diff = None
+
+  AddCompatibilityArchiveIfTrebleEnabled(
+      target_zip, output_zip, target_info, source_info)
+
+  # Assertions (e.g. device properties check).
+  target_info.WriteDeviceAssertions(script, OPTIONS.oem_no_mount)
+  device_specific.IncrementalOTA_Assertions()
+
+  # Two-step incremental package strategy (in chronological order,
+  # which is *not* the order in which the generated script has
+  # things):
+  #
+  # if stage is not "2/3" or "3/3":
+  #    do verification on current system
+  #    write recovery image to boot partition
+  #    set stage to "2/3"
+  #    reboot to boot partition and restart recovery
+  # else if stage is "2/3":
+  #    write recovery image to recovery partition
+  #    set stage to "3/3"
+  #    reboot to recovery partition and restart recovery
+  # else:
+  #    (stage must be "3/3")
+  #    perform update:
+  #       patch system files, etc.
+  #       force full install of new boot image
+  #       set up system to update recovery partition on first boot
+  #    complete script normally
+  #    (allow recovery to mark itself finished and reboot)
+
+  if OPTIONS.two_step:
+    if not source_info.get("multistage_support"):
+      assert False, "two-step packages not supported by this build"
+    fs = source_info["fstab"]["/misc"]
+    assert fs.fs_type.upper() == "EMMC", \
+        "two-step packages only supported on devices with EMMC /misc partitions"
+    bcb_dev = {"bcb_dev" : fs.device}
+    common.ZipWriteStr(output_zip, "recovery.img", target_recovery.data)
+    script.AppendExtra("""
+if get_stage("%(bcb_dev)s") == "2/3" then
+""" % bcb_dev)
+
+    # Stage 2/3: Write recovery image to /recovery (currently running /boot).
+    script.Comment("Stage 2/3")
+    script.AppendExtra("sleep(20);\n")
+    script.WriteRawImage("/recovery", "recovery.img")
+    script.AppendExtra("""
+set_stage("%(bcb_dev)s", "3/3");
+reboot_now("%(bcb_dev)s", "recovery");
+else if get_stage("%(bcb_dev)s") != "3/3" then
+""" % bcb_dev)
+
+    # Stage 1/3: (a) Verify the current system.
+    script.Comment("Stage 1/3")
+
+  # Dump fingerprints
+  script.Print("Source: {}".format(source_info.fingerprint))
+  script.Print("Target: {}".format(target_info.fingerprint))
+
+  script.Print("Verifying current system...")
+
+  device_specific.IncrementalOTA_VerifyBegin()
+
+  WriteFingerprintAssertion(script, target_info, source_info)
+
+  # Check the required cache size (i.e. stashed blocks).
+  size = []
+  if system_diff:
+    size.append(system_diff.required_cache)
+  if vendor_diff:
+    size.append(vendor_diff.required_cache)
+
+  if updating_boot:
+    boot_type, boot_device = common.GetTypeAndDevice("/boot", source_info)
+    d = common.Difference(target_boot, source_boot)
+    _, _, d = d.ComputePatch()
+    if d is None:
+      include_full_boot = True
+      common.ZipWriteStr(output_zip, "boot.img", target_boot.data)
+    else:
+      include_full_boot = False
+
+      print("boot      target: %d  source: %d  diff: %d" % (
+          target_boot.size, source_boot.size, len(d)))
+
+      common.ZipWriteStr(output_zip, "patch/boot.img.p", d)
+
+      script.PatchCheck("%s:%s:%d:%s:%d:%s" %
+                        (boot_type, boot_device,
+                         source_boot.size, source_boot.sha1,
+                         target_boot.size, target_boot.sha1))
+      size.append(target_boot.size)
+
+  if size:
+    script.CacheFreeSpaceCheck(max(size))
+
+  device_specific.IncrementalOTA_VerifyEnd()
+
+  if OPTIONS.two_step:
+    # Stage 1/3: (b) Write recovery image to /boot.
+    _WriteRecoveryImageToBoot(script, output_zip)
+
+    script.AppendExtra("""
+set_stage("%(bcb_dev)s", "2/3");
+reboot_now("%(bcb_dev)s", "");
+else
+""" % bcb_dev)
+
+    # Stage 3/3: Make changes.
+    script.Comment("Stage 3/3")
+
+  # Verify the existing partitions.
+  system_diff.WriteVerifyScript(script, touched_blocks_only=True)
+  if vendor_diff:
+    vendor_diff.WriteVerifyScript(script, touched_blocks_only=True)
+
+  script.Comment("---- start making changes here ----")
+
+  device_specific.IncrementalOTA_InstallBegin()
+
+  system_diff.WriteScript(script, output_zip,
+                          progress=0.8 if vendor_diff else 0.9)
+
+  if vendor_diff:
+    vendor_diff.WriteScript(script, output_zip, progress=0.1)
+
+  if OPTIONS.two_step:
+    common.ZipWriteStr(output_zip, "boot.img", target_boot.data)
+    script.WriteRawImage("/boot", "boot.img")
+    print("writing full boot image (forced by two-step mode)")
+
+  if not OPTIONS.two_step:
+    if updating_boot:
+      if include_full_boot:
+        print("boot image changed; including full.")
+        script.Print("Installing boot image...")
+        script.WriteRawImage("/boot", "boot.img")
+      else:
+        # Produce the boot image by applying a patch to the current
+        # contents of the boot partition, and write it back to the
+        # partition.
+        print("boot image changed; including patch.")
+        script.Print("Patching boot image...")
+        script.ShowProgress(0.1, 10)
+        script.ApplyPatch("%s:%s:%d:%s:%d:%s"
+                          % (boot_type, boot_device,
+                             source_boot.size, source_boot.sha1,
+                             target_boot.size, target_boot.sha1),
+                          "-",
+                          target_boot.size, target_boot.sha1,
+                          source_boot.sha1, "patch/boot.img.p")
+    else:
+      print("boot image unchanged; skipping.")
+
+  # Do device-specific installation (eg, write radio image).
+  device_specific.IncrementalOTA_InstallEnd()
+
+  if OPTIONS.extra_script is not None:
+    script.AppendExtra(OPTIONS.extra_script)
+
+  if OPTIONS.wipe_user_data:
+    script.Print("Erasing user data...")
+    script.FormatPartition("/data")
+
+  if OPTIONS.two_step:
+    script.AppendExtra("""
+set_stage("%(bcb_dev)s", "");
+endif;
+endif;
+""" % bcb_dev)
+
+  script.SetProgress(1)
+  # For downgrade OTAs, we prefer to use the update-binary in the source
+  # build that is actually newer than the one in the target build.
+  if OPTIONS.downgrade:
+    script.AddToZip(source_zip, output_zip, input_path=OPTIONS.updater_binary)
+  else:
+    script.AddToZip(target_zip, output_zip, input_path=OPTIONS.updater_binary)
+  metadata["ota-required-cache"] = str(script.required_cache)
+
+  # We haven't written the metadata entry yet, which will be handled in
+  # FinalizeMetadata().
+  common.ZipClose(output_zip)
+
+  # Sign the generated zip package unless no_signing is specified.
+  needed_property_files = (
+      NonAbOtaPropertyFiles(),
+  )
+  FinalizeMetadata(metadata, staging_file, output_file, needed_property_files)
+
+
+def GetTargetFilesZipForSecondaryImages(input_file, skip_postinstall=False):
+  """Returns a target-files.zip file for generating secondary payload.
+
+  Although the original target-files.zip already contains secondary slot
+  images (i.e. IMAGES/system_other.img), we need to rename the files to the
+  ones without _other suffix. Note that we cannot instead modify the names in
+  META/ab_partitions.txt, because there are no matching partitions on device.
+
+  For the partitions that don't have secondary images, the ones for primary
+  slot will be used. This is to ensure that we always have valid boot, vbmeta,
+  bootloader images in the inactive slot.
+
+  Args:
+    input_file: The input target-files.zip file.
+    skip_postinstall: Whether to skip copying the postinstall config file.
+
+  Returns:
+    The filename of the target-files.zip for generating secondary payload.
+  """
+  target_file = common.MakeTempFile(prefix="targetfiles-", suffix=".zip")
+  target_zip = zipfile.ZipFile(target_file, 'w', allowZip64=True)
+
+  input_tmp = common.UnzipTemp(input_file, UNZIP_PATTERN)
+  with zipfile.ZipFile(input_file, 'r') as input_zip:
+    infolist = input_zip.infolist()
+
+  for info in infolist:
+    unzipped_file = os.path.join(input_tmp, *info.filename.split('/'))
+    if info.filename == 'IMAGES/system_other.img':
+      common.ZipWrite(target_zip, unzipped_file, arcname='IMAGES/system.img')
+
+    # Primary images and friends need to be skipped explicitly.
+    elif info.filename in ('IMAGES/system.img',
+                           'IMAGES/system.map'):
+      pass
+
+    # Skip copying the postinstall config if requested.
+    elif skip_postinstall and info.filename == POSTINSTALL_CONFIG:
+      pass
+
+    elif info.filename.startswith(('META/', 'IMAGES/')):
+      common.ZipWrite(target_zip, unzipped_file, arcname=info.filename)
+
+  common.ZipClose(target_zip)
+
+  return target_file
+
+
+def GetTargetFilesZipWithoutPostinstallConfig(input_file):
+  """Returns a target-files.zip that's not containing postinstall_config.txt.
+
+  This allows brillo_update_payload script to skip writing all the postinstall
+  hooks in the generated payload. The input target-files.zip file will be
+  duplicated, with 'META/postinstall_config.txt' skipped. If input_file doesn't
+  contain the postinstall_config.txt entry, the input file will be returned.
+
+  Args:
+    input_file: The input target-files.zip filename.
+
+  Returns:
+    The filename of target-files.zip that doesn't contain postinstall config.
+  """
+  # We should only make a copy if postinstall_config entry exists.
+  with zipfile.ZipFile(input_file, 'r') as input_zip:
+    if POSTINSTALL_CONFIG not in input_zip.namelist():
+      return input_file
+
+  target_file = common.MakeTempFile(prefix="targetfiles-", suffix=".zip")
+  shutil.copyfile(input_file, target_file)
+  common.ZipDelete(target_file, POSTINSTALL_CONFIG)
+  return target_file
+
+
+def WriteABOTAPackageWithBrilloScript(target_file, output_file,
+                                      source_file=None):
+  """Generates an Android OTA package that has A/B update payload."""
+  # Stage the output zip package for package signing.
+  if not OPTIONS.no_signing:
+    staging_file = common.MakeTempFile(suffix='.zip')
+  else:
+    staging_file = output_file
+  output_zip = zipfile.ZipFile(staging_file, "w",
+                               compression=zipfile.ZIP_DEFLATED)
+
+  if source_file is not None:
+    target_info = BuildInfo(OPTIONS.target_info_dict, OPTIONS.oem_dicts)
+    source_info = BuildInfo(OPTIONS.source_info_dict, OPTIONS.oem_dicts)
+  else:
+    target_info = BuildInfo(OPTIONS.info_dict, OPTIONS.oem_dicts)
+    source_info = None
+
+  # Metadata to comply with Android OTA package format.
+  metadata = GetPackageMetadata(target_info, source_info)
+
+  if OPTIONS.skip_postinstall:
+    target_file = GetTargetFilesZipWithoutPostinstallConfig(target_file)
+
+  # Generate payload.
+  payload = Payload()
+
+  # Enforce a max timestamp this payload can be applied on top of.
+  if OPTIONS.downgrade:
+    max_timestamp = source_info.GetBuildProp("ro.build.date.utc")
+  else:
+    max_timestamp = metadata["post-timestamp"]
+  additional_args = ["--max_timestamp", max_timestamp]
+
+  payload.Generate(target_file, source_file, additional_args)
+
+  # Sign the payload.
+  payload_signer = PayloadSigner()
+  payload.Sign(payload_signer)
+
+  # Write the payload into output zip.
+  payload.WriteToZip(output_zip)
+
+  # Generate and include the secondary payload that installs secondary images
+  # (e.g. system_other.img).
+  if OPTIONS.include_secondary:
+    # We always include a full payload for the secondary slot, even when
+    # building an incremental OTA. See the comments for "--include_secondary".
+    secondary_target_file = GetTargetFilesZipForSecondaryImages(
+        target_file, OPTIONS.skip_postinstall)
+    secondary_payload = Payload(secondary=True)
+    secondary_payload.Generate(secondary_target_file,
+                               additional_args=additional_args)
+    secondary_payload.Sign(payload_signer)
+    secondary_payload.WriteToZip(output_zip)
+
+  # If dm-verity is supported for the device, copy contents of care_map
+  # into A/B OTA package.
+  target_zip = zipfile.ZipFile(target_file, "r")
+  if (target_info.get("verity") == "true" or
+      target_info.get("avb_enable") == "true"):
+    care_map_path = "META/care_map.txt"
+    namelist = target_zip.namelist()
+    if care_map_path in namelist:
+      care_map_data = target_zip.read(care_map_path)
+      # In order to support streaming, care_map.txt needs to be packed as
+      # ZIP_STORED.
+      common.ZipWriteStr(output_zip, "care_map.txt", care_map_data,
+                         compress_type=zipfile.ZIP_STORED)
+    else:
+      print("Warning: cannot find care map file in target_file package")
+
+  AddCompatibilityArchiveIfTrebleEnabled(
+      target_zip, output_zip, target_info, source_info)
+
+  common.ZipClose(target_zip)
+
+  # We haven't written the metadata entry yet, which will be handled in
+  # FinalizeMetadata().
+  common.ZipClose(output_zip)
+
+  # AbOtaPropertyFiles intends to replace StreamingPropertyFiles, as it covers
+  # all the info of the latter. However, system updaters and OTA servers need to
+  # take time to switch to the new flag. We keep both of the flags for
+  # P-timeframe, and will remove StreamingPropertyFiles in later release.
+  needed_property_files = (
+      AbOtaPropertyFiles(),
+      StreamingPropertyFiles(),
+  )
+  FinalizeMetadata(metadata, staging_file, output_file, needed_property_files)
+
+
+def main(argv):
+
+  def option_handler(o, a):
+    if o in ("-k", "--package_key"):
+      OPTIONS.package_key = a
+    elif o in ("-i", "--incremental_from"):
+      OPTIONS.incremental_source = a
+    elif o == "--full_radio":
+      OPTIONS.full_radio = True
+    elif o == "--full_bootloader":
+      OPTIONS.full_bootloader = True
+    elif o == "--wipe_user_data":
+      OPTIONS.wipe_user_data = True
+    elif o == "--downgrade":
+      OPTIONS.downgrade = True
+      OPTIONS.wipe_user_data = True
+    elif o == "--override_timestamp":
+      OPTIONS.downgrade = True
+    elif o in ("-o", "--oem_settings"):
+      OPTIONS.oem_source = a.split(',')
+    elif o == "--oem_no_mount":
+      OPTIONS.oem_no_mount = True
+    elif o in ("-e", "--extra_script"):
+      OPTIONS.extra_script = a
+    elif o in ("-t", "--worker_threads"):
+      if a.isdigit():
+        OPTIONS.worker_threads = int(a)
+      else:
+        raise ValueError("Cannot parse value %r for option %r - only "
+                         "integers are allowed." % (a, o))
+    elif o in ("-2", "--two_step"):
+      OPTIONS.two_step = True
+    elif o == "--include_secondary":
+      OPTIONS.include_secondary = True
+    elif o == "--no_signing":
+      OPTIONS.no_signing = True
+    elif o == "--verify":
+      OPTIONS.verify = True
+    elif o == "--block":
+      OPTIONS.block_based = True
+    elif o in ("-b", "--binary"):
+      OPTIONS.updater_binary = a
+    elif o == "--stash_threshold":
+      try:
+        OPTIONS.stash_threshold = float(a)
+      except ValueError:
+        raise ValueError("Cannot parse value %r for option %r - expecting "
+                         "a float" % (a, o))
+    elif o == "--log_diff":
+      OPTIONS.log_diff = a
+    elif o == "--payload_signer":
+      OPTIONS.payload_signer = a
+    elif o == "--payload_signer_args":
+      OPTIONS.payload_signer_args = shlex.split(a)
+    elif o == "--extracted_input_target_files":
+      OPTIONS.extracted_input = a
+    elif o == "--skip_postinstall":
+      OPTIONS.skip_postinstall = True
+    else:
+      return False
+    return True
+
+  args = common.ParseOptions(argv, __doc__,
+                             extra_opts="b:k:i:d:e:t:2o:",
+                             extra_long_opts=[
+                                 "package_key=",
+                                 "incremental_from=",
+                                 "full_radio",
+                                 "full_bootloader",
+                                 "wipe_user_data",
+                                 "downgrade",
+                                 "override_timestamp",
+                                 "extra_script=",
+                                 "worker_threads=",
+                                 "two_step",
+                                 "include_secondary",
+                                 "no_signing",
+                                 "block",
+                                 "binary=",
+                                 "oem_settings=",
+                                 "oem_no_mount",
+                                 "verify",
+                                 "stash_threshold=",
+                                 "log_diff=",
+                                 "payload_signer=",
+                                 "payload_signer_args=",
+                                 "extracted_input_target_files=",
+                                 "skip_postinstall",
+                             ], extra_option_handler=option_handler)
+
+  if len(args) != 2:
+    common.Usage(__doc__)
+    sys.exit(1)
+
+  if OPTIONS.downgrade:
+    # We should only allow downgrading incrementals (as opposed to full).
+    # Otherwise the device may go back from arbitrary build with this full
+    # OTA package.
+    if OPTIONS.incremental_source is None:
+      raise ValueError("Cannot generate downgradable full OTAs")
+
+  # Load the build info dicts from the zip directly or the extracted input
+  # directory. We don't need to unzip the entire target-files zips, because they
+  # won't be needed for A/B OTAs (brillo_update_payload does that on its own).
+  # When loading the info dicts, we don't need to provide the second parameter
+  # to common.LoadInfoDict(). Specifying the second parameter allows replacing
+  # some properties with their actual paths, such as 'selinux_fc',
+  # 'ramdisk_dir', which won't be used during OTA generation.
+  if OPTIONS.extracted_input is not None:
+    OPTIONS.info_dict = common.LoadInfoDict(OPTIONS.extracted_input)
+  else:
+    with zipfile.ZipFile(args[0], 'r') as input_zip:
+      OPTIONS.info_dict = common.LoadInfoDict(input_zip)
+
+  if OPTIONS.verbose:
+    print("--- target info ---")
+    common.DumpInfoDict(OPTIONS.info_dict)
+
+  # Load the source build dict if applicable.
+  if OPTIONS.incremental_source is not None:
+    OPTIONS.target_info_dict = OPTIONS.info_dict
+    with zipfile.ZipFile(OPTIONS.incremental_source, 'r') as source_zip:
+      OPTIONS.source_info_dict = common.LoadInfoDict(source_zip)
+
+    if OPTIONS.verbose:
+      print("--- source info ---")
+      common.DumpInfoDict(OPTIONS.source_info_dict)
+
+  # Load OEM dicts if provided.
+  OPTIONS.oem_dicts = _LoadOemDicts(OPTIONS.oem_source)
+
+  ab_update = OPTIONS.info_dict.get("ab_update") == "true"
+
+  # Use the default key to sign the package if not specified with package_key.
+  # package_keys are needed on ab_updates, so always define them if an
+  # ab_update is getting created.
+  if not OPTIONS.no_signing or ab_update:
+    if OPTIONS.package_key is None:
+      OPTIONS.package_key = OPTIONS.info_dict.get(
+          "default_system_dev_certificate",
+          "build/target/product/security/testkey")
+    # Get signing keys
+    OPTIONS.key_passwords = common.GetKeyPasswords([OPTIONS.package_key])
+
+  if ab_update:
+    WriteABOTAPackageWithBrilloScript(
+        target_file=args[0],
+        output_file=args[1],
+        source_file=OPTIONS.incremental_source)
+
+    print("done.")
+    return
+
+  # Sanity check the loaded info dicts first.
+  if OPTIONS.info_dict.get("no_recovery") == "true":
+    raise common.ExternalError(
+        "--- target build has specified no recovery ---")
+
+  # Non-A/B OTAs rely on /cache partition to store temporary files.
+  cache_size = OPTIONS.info_dict.get("cache_size")
+  if cache_size is None:
+    print("--- can't determine the cache partition size ---")
+  OPTIONS.cache_size = cache_size
+
+  if OPTIONS.extra_script is not None:
+    OPTIONS.extra_script = open(OPTIONS.extra_script).read()
+
+  if OPTIONS.extracted_input is not None:
+    OPTIONS.input_tmp = OPTIONS.extracted_input
+  else:
+    print("unzipping target target-files...")
+    OPTIONS.input_tmp = common.UnzipTemp(args[0], UNZIP_PATTERN)
+  OPTIONS.target_tmp = OPTIONS.input_tmp
+
+  # If the caller explicitly specified the device-specific extensions path via
+  # -s / --device_specific, use that. Otherwise, use META/releasetools.py if it
+  # is present in the target target_files. Otherwise, take the path of the file
+  # from 'tool_extensions' in the info dict and look for that in the local
+  # filesystem, relative to the current directory.
+  if OPTIONS.device_specific is None:
+    from_input = os.path.join(OPTIONS.input_tmp, "META", "releasetools.py")
+    if os.path.exists(from_input):
+      print("(using device-specific extensions from target_files)")
+      OPTIONS.device_specific = from_input
+    else:
+      OPTIONS.device_specific = OPTIONS.info_dict.get("tool_extensions")
+
+  if OPTIONS.device_specific is not None:
+    OPTIONS.device_specific = os.path.abspath(OPTIONS.device_specific)
+
+  # Generate a full OTA.
+  if OPTIONS.incremental_source is None:
+    with zipfile.ZipFile(args[0], 'r') as input_zip:
+      WriteFullOTAPackage(
+          input_zip,
+          output_file=args[1])
+
+  # Generate an incremental OTA.
+  else:
+    print("unzipping source target-files...")
+    OPTIONS.source_tmp = common.UnzipTemp(
+        OPTIONS.incremental_source, UNZIP_PATTERN)
+    with zipfile.ZipFile(args[0], 'r') as input_zip, \
+        zipfile.ZipFile(OPTIONS.incremental_source, 'r') as source_zip:
+      WriteBlockIncrementalOTAPackage(
+          input_zip,
+          source_zip,
+          output_file=args[1])
+
+    if OPTIONS.log_diff:
+      with open(OPTIONS.log_diff, 'w') as out_file:
+        import target_files_diff
+        target_files_diff.recursiveDiff(
+            '', OPTIONS.source_tmp, OPTIONS.input_tmp, out_file)
+
+  print("done.")
+
+
+if __name__ == '__main__':
+  try:
+    common.CloseInheritedPipes()
+    main(sys.argv[1:])
+  except common.ExternalError as e:
+    print("\n   ERROR: %s\n" % (e,))
+    sys.exit(1)
+  finally:
+    common.Cleanup()
diff --git a/src/support/ab_tools/scripts/rangelib.py b/src/support/ab_tools/scripts/rangelib.py
new file mode 100644
index 0000000..36becf4
--- /dev/null
+++ b/src/support/ab_tools/scripts/rangelib.py
@@ -0,0 +1,347 @@
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import print_function
+
+import heapq
+import itertools
+
+
+__all__ = ["RangeSet"]
+
+
+class RangeSet(object):
+  """A RangeSet represents a set of non-overlapping ranges on integers.
+
+  Attributes:
+    monotonic: Whether the input has all its integers in increasing order.
+    extra: A dict that can be used by the caller, e.g. to store info that's
+        only meaningful to caller.
+  """
+
+  def __init__(self, data=None):
+    self.monotonic = False
+    self._extra = {}
+    if isinstance(data, str):
+      self._parse_internal(data)
+    elif data:
+      assert len(data) % 2 == 0
+      self.data = tuple(self._remove_pairs(data))
+      self.monotonic = all(x < y for x, y in zip(self.data, self.data[1:]))
+    else:
+      self.data = ()
+
+  def __iter__(self):
+    for i in range(0, len(self.data), 2):
+      yield self.data[i:i+2]
+
+  def __eq__(self, other):
+    return self.data == other.data
+
+  def __ne__(self, other):
+    return self.data != other.data
+
+  def __nonzero__(self):
+    return bool(self.data)
+
+  def __str__(self):
+    if not self.data:
+      return "empty"
+    else:
+      return self.to_string()
+
+  def __repr__(self):
+    return '<RangeSet("' + self.to_string() + '")>'
+
+  @property
+  def extra(self):
+    return self._extra
+
+  @classmethod
+  def parse(cls, text):
+    """Parses a text string into a RangeSet.
+
+    The input text string consists of a space-separated list of blocks and
+    ranges, e.g. "10-20 30 35-40". Ranges are interpreted to include both their
+    ends (so the above example represents 18 individual blocks). Returns a
+    RangeSet object.
+
+    If the input has all its blocks in increasing order, then the 'monotonic'
+    attribute of the returned RangeSet will be set to True. For example the
+    input "10-20 30" is monotonic, but the input "15-20 30 10-14" is not, even
+    though they represent the same set of blocks (and the two RangeSets will
+    compare equal with ==).
+    """
+    return cls(text)
+
+  @classmethod
+  def parse_raw(cls, text):
+    """Parse a string generated by RangeSet.to_string_raw().
+
+    >>> RangeSet.parse_raw(RangeSet("0-9").to_string_raw())
+    <RangeSet("0-9")>
+    """
+
+    raw = [int(i) for i in text.split(',')]
+    assert raw[0] == len(raw[1:]), "Invalid raw string."
+
+    return cls(data=raw[1:])
+
+  def _parse_internal(self, text):
+    data = []
+    last = -1
+    monotonic = True
+    for p in text.split():
+      if "-" in p:
+        s, e = (int(x) for x in p.split("-"))
+        data.append(s)
+        data.append(e+1)
+        if last <= s <= e:
+          last = e
+        else:
+          monotonic = False
+      else:
+        s = int(p)
+        data.append(s)
+        data.append(s+1)
+        if last <= s:
+          last = s+1
+        else:
+          monotonic = False
+    data.sort()
+    self.data = tuple(self._remove_pairs(data))
+    self.monotonic = monotonic
+
+  @staticmethod
+  def _remove_pairs(source):
+    """Remove consecutive duplicate items to simplify the result.
+
+    [1, 2, 2, 5, 5, 10] will become [1, 10]."""
+    last = None
+    for i in source:
+      if i == last:
+        last = None
+      else:
+        if last is not None:
+          yield last
+        last = i
+    if last is not None:
+      yield last
+
+  def to_string(self):
+    out = []
+    for i in range(0, len(self.data), 2):
+      s, e = self.data[i:i+2]
+      if e == s+1:
+        out.append(str(s))
+      else:
+        out.append(str(s) + "-" + str(e-1))
+    return " ".join(out)
+
+  def to_string_raw(self):
+    assert self.data
+    return str(len(self.data)) + "," + ",".join(str(i) for i in self.data)
+
+  def union(self, other):
+    """Return a new RangeSet representing the union of this RangeSet
+    with the argument.
+
+    >>> RangeSet("10-19 30-34").union(RangeSet("18-29"))
+    <RangeSet("10-34")>
+    >>> RangeSet("10-19 30-34").union(RangeSet("22 32"))
+    <RangeSet("10-19 22 30-34")>
+    """
+    out = []
+    z = 0
+    for p, d in heapq.merge(zip(self.data, itertools.cycle((+1, -1))),
+                            zip(other.data, itertools.cycle((+1, -1)))):
+      if (z == 0 and d == 1) or (z == 1 and d == -1):
+        out.append(p)
+      z += d
+    return RangeSet(data=out)
+
+  def intersect(self, other):
+    """Return a new RangeSet representing the intersection of this
+    RangeSet with the argument.
+
+    >>> RangeSet("10-19 30-34").intersect(RangeSet("18-32"))
+    <RangeSet("18-19 30-32")>
+    >>> RangeSet("10-19 30-34").intersect(RangeSet("22-28"))
+    <RangeSet("")>
+    """
+    out = []
+    z = 0
+    for p, d in heapq.merge(zip(self.data, itertools.cycle((+1, -1))),
+                            zip(other.data, itertools.cycle((+1, -1)))):
+      if (z == 1 and d == 1) or (z == 2 and d == -1):
+        out.append(p)
+      z += d
+    return RangeSet(data=out)
+
+  def subtract(self, other):
+    """Return a new RangeSet representing subtracting the argument
+    from this RangeSet.
+
+    >>> RangeSet("10-19 30-34").subtract(RangeSet("18-32"))
+    <RangeSet("10-17 33-34")>
+    >>> RangeSet("10-19 30-34").subtract(RangeSet("22-28"))
+    <RangeSet("10-19 30-34")>
+    """
+
+    out = []
+    z = 0
+    for p, d in heapq.merge(zip(self.data, itertools.cycle((+1, -1))),
+                            zip(other.data, itertools.cycle((-1, +1)))):
+      if (z == 0 and d == 1) or (z == 1 and d == -1):
+        out.append(p)
+      z += d
+    return RangeSet(data=out)
+
+  def overlaps(self, other):
+    """Returns true if the argument has a nonempty overlap with this
+    RangeSet.
+
+    >>> RangeSet("10-19 30-34").overlaps(RangeSet("18-32"))
+    True
+    >>> RangeSet("10-19 30-34").overlaps(RangeSet("22-28"))
+    False
+    """
+
+    # This is like intersect, but we can stop as soon as we discover the
+    # output is going to be nonempty.
+    z = 0
+    for _, d in heapq.merge(zip(self.data, itertools.cycle((+1, -1))),
+                            zip(other.data, itertools.cycle((+1, -1)))):
+      if (z == 1 and d == 1) or (z == 2 and d == -1):
+        return True
+      z += d
+    return False
+
+  def size(self):
+    """Returns the total size of the RangeSet (ie, how many integers
+    are in the set).
+
+    >>> RangeSet("10-19 30-34").size()
+    15
+    """
+
+    total = 0
+    for i, p in enumerate(self.data):
+      if i % 2:
+        total += p
+      else:
+        total -= p
+    return total
+
+  def map_within(self, other):
+    """'other' should be a subset of 'self'.  Returns a RangeSet
+    representing what 'other' would get translated to if the integers
+    of 'self' were translated down to be contiguous starting at zero.
+
+    >>> RangeSet("0-9").map_within(RangeSet("3-4"))
+    <RangeSet("3-4")>
+    >>> RangeSet("10-19").map_within(RangeSet("13-14"))
+    <RangeSet("3-4")>
+    >>> RangeSet("10-19 30-39").map_within(RangeSet("17-19 30-32"))
+    <RangeSet("7-12")>
+    >>> RangeSet("10-19 30-39").map_within(RangeSet("12-13 17-19 30-32"))
+    <RangeSet("2-3 7-12")>
+    """
+
+    out = []
+    offset = 0
+    start = None
+    for p, d in heapq.merge(zip(self.data, itertools.cycle((-5, +5))),
+                            zip(other.data, itertools.cycle((-1, +1)))):
+      if d == -5:
+        start = p
+      elif d == +5:
+        offset += p-start
+        start = None
+      else:
+        out.append(offset + p - start)
+    return RangeSet(data=out)
+
+  def extend(self, n):
+    """Extend the RangeSet by 'n' blocks.
+
+    The lower bound is guaranteed to be non-negative.
+
+    >>> RangeSet("0-9").extend(1)
+    <RangeSet("0-10")>
+    >>> RangeSet("10-19").extend(15)
+    <RangeSet("0-34")>
+    >>> RangeSet("10-19 30-39").extend(4)
+    <RangeSet("6-23 26-43")>
+    >>> RangeSet("10-19 30-39").extend(10)
+    <RangeSet("0-49")>
+    """
+    out = self
+    for i in range(0, len(self.data), 2):
+      s, e = self.data[i:i+2]
+      s1 = max(0, s - n)
+      e1 = e + n
+      out = out.union(RangeSet(str(s1) + "-" + str(e1-1)))
+    return out
+
+  def first(self, n):
+    """Return the RangeSet that contains at most the first 'n' integers.
+
+    >>> RangeSet("0-9").first(1)
+    <RangeSet("0")>
+    >>> RangeSet("10-19").first(5)
+    <RangeSet("10-14")>
+    >>> RangeSet("10-19").first(15)
+    <RangeSet("10-19")>
+    >>> RangeSet("10-19 30-39").first(3)
+    <RangeSet("10-12")>
+    >>> RangeSet("10-19 30-39").first(15)
+    <RangeSet("10-19 30-34")>
+    >>> RangeSet("10-19 30-39").first(30)
+    <RangeSet("10-19 30-39")>
+    >>> RangeSet("0-9").first(0)
+    <RangeSet("")>
+    """
+
+    if self.size() <= n:
+      return self
+
+    out = []
+    for s, e in self:
+      if e - s >= n:
+        out += (s, s+n)
+        break
+      else:
+        out += (s, e)
+        n -= e - s
+    return RangeSet(data=out)
+
+  def next_item(self):
+    """Return the next integer represented by the RangeSet.
+
+    >>> list(RangeSet("0-9").next_item())
+    [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
+    >>> list(RangeSet("10-19 3-5").next_item())
+    [3, 4, 5, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
+    >>> list(RangeSet("10-19 3 5 7").next_item())
+    [3, 5, 7, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
+    """
+    for s, e in self:
+      for element in range(s, e):
+        yield element
+
+
+if __name__ == "__main__":
+  import doctest
+  doctest.testmod()
diff --git a/src/support/ab_tools/scripts/sparse_img.py b/src/support/ab_tools/scripts/sparse_img.py
new file mode 100644
index 0000000..6432ca4
--- /dev/null
+++ b/src/support/ab_tools/scripts/sparse_img.py
@@ -0,0 +1,323 @@
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import bisect
+import os
+import struct
+import threading
+from hashlib import sha1
+
+import rangelib
+
+
+class SparseImage(object):
+  """Wraps a sparse image file into an image object.
+
+  Wraps a sparse image file (and optional file map and clobbered_blocks) into
+  an image object suitable for passing to BlockImageDiff. file_map contains
+  the mapping between files and their blocks. clobbered_blocks contains the set
+  of blocks that should be always written to the target regardless of the old
+  contents (i.e. copying instead of patching). clobbered_blocks should be in
+  the form of a string like "0" or "0 1-5 8".
+  """
+
+  def __init__(self, simg_fn, file_map_fn=None, clobbered_blocks=None,
+               mode="rb", build_map=True, allow_shared_blocks=False):
+    self.simg_f = f = open(simg_fn, mode)
+
+    header_bin = f.read(28)
+    header = struct.unpack("<I4H4I", header_bin)
+
+    magic = header[0]
+    major_version = header[1]
+    minor_version = header[2]
+    file_hdr_sz = header[3]
+    chunk_hdr_sz = header[4]
+    self.blocksize = blk_sz = header[5]
+    self.total_blocks = total_blks = header[6]
+    self.total_chunks = total_chunks = header[7]
+
+
+    
+
+    if magic != 0xED26FF3A:
+      raise ValueError("Magic should be 0xED26FF3A but is 0x%08X" % (magic,))
+    if major_version != 1 or minor_version != 0:
+      raise ValueError("I know about version 1.0, but this is version %u.%u" %
+                       (major_version, minor_version))
+    if file_hdr_sz != 28:
+      raise ValueError("File header size was expected to be 28, but is %u." %
+                       (file_hdr_sz,))
+    if chunk_hdr_sz != 12:
+      raise ValueError("Chunk header size was expected to be 12, but is %u." %
+                       (chunk_hdr_sz,))
+
+    print("Total of %u %u-byte output blocks in %u input chunks."
+          % (total_blks, blk_sz, total_chunks))
+
+    if not build_map:
+      return
+
+    pos = 0   # in blocks
+    care_data = []
+    self.offset_map = offset_map = []
+    self.clobbered_blocks = rangelib.RangeSet(data=clobbered_blocks)
+
+    for i in range(total_chunks):
+      header_bin = f.read(12)
+      header = struct.unpack("<2H2I", header_bin)
+      chunk_type = header[0]
+      chunk_sz = header[2]
+      total_sz = header[3]
+      data_sz = total_sz - 12
+
+      if chunk_type == 0xCAC1:
+        if data_sz != (chunk_sz * blk_sz):
+          raise ValueError(
+              "Raw chunk input size (%u) does not match output size (%u)" %
+              (data_sz, chunk_sz * blk_sz))
+        else:
+          care_data.append(pos)
+          care_data.append(pos + chunk_sz)
+          offset_map.append((pos, chunk_sz, f.tell(), None))
+          pos += chunk_sz
+          f.seek(data_sz, os.SEEK_CUR)
+
+      elif chunk_type == 0xCAC2:
+        fill_data = f.read(4)
+        care_data.append(pos)
+        care_data.append(pos + chunk_sz)
+        offset_map.append((pos, chunk_sz, None, fill_data))
+        pos += chunk_sz
+
+      elif chunk_type == 0xCAC3:
+        if data_sz != 0:
+          raise ValueError("Don't care chunk input size is non-zero (%u)" %
+                           (data_sz))
+        else:
+          pos += chunk_sz
+
+      elif chunk_type == 0xCAC4:
+        raise ValueError("CRC32 chunks are not supported")
+
+      else:
+        raise ValueError("Unknown chunk type 0x%04X not supported" %
+                         (chunk_type,))
+
+    self.generator_lock = threading.Lock()
+
+
+    print("care data %s."
+          % (care_data))
+          
+    self.care_map = rangelib.RangeSet(care_data)
+    print("care data %s." % (self.care_map.to_string_raw()))
+    self.offset_index = [i[0] for i in offset_map]
+
+    # Bug: 20881595
+    # Introduce extended blocks as a workaround for the bug. dm-verity may
+    # touch blocks that are not in the care_map due to block device
+    # read-ahead. It will fail if such blocks contain non-zeroes. We zero out
+    # the extended blocks explicitly to avoid dm-verity failures. 512 blocks
+    # are the maximum read-ahead we configure for dm-verity block devices.
+    extended = self.care_map.extend(512)
+    all_blocks = rangelib.RangeSet(data=(0, self.total_blocks))
+    extended = extended.intersect(all_blocks).subtract(self.care_map)
+    self.extended = extended
+
+    if file_map_fn:
+      self.LoadFileBlockMap(file_map_fn, self.clobbered_blocks,
+                            allow_shared_blocks)
+    else:
+      self.file_map = {"__DATA": self.care_map}
+
+  def AppendFillChunk(self, data, blocks):
+    f = self.simg_f
+
+    # Append a fill chunk
+    f.seek(0, os.SEEK_END)
+    f.write(struct.pack("<2H3I", 0xCAC2, 0, blocks, 16, data))
+
+    # Update the sparse header
+    self.total_blocks += blocks
+    self.total_chunks += 1
+
+    f.seek(16, os.SEEK_SET)
+    f.write(struct.pack("<2I", self.total_blocks, self.total_chunks))
+
+  def RangeSha1(self, ranges):
+    h = sha1()
+    for data in self._GetRangeData(ranges):
+      h.update(data)
+    return h.hexdigest()
+
+  def ReadRangeSet(self, ranges):
+    return [d for d in self._GetRangeData(ranges)]
+
+  def TotalSha1(self, include_clobbered_blocks=False):
+    """Return the SHA-1 hash of all data in the 'care' regions.
+
+    If include_clobbered_blocks is True, it returns the hash including the
+    clobbered_blocks."""
+    ranges = self.care_map
+    if not include_clobbered_blocks:
+      ranges = ranges.subtract(self.clobbered_blocks)
+    return self.RangeSha1(ranges)
+
+  def WriteRangeDataToFd(self, ranges, fd):
+    for data in self._GetRangeData(ranges):
+      fd.write(data)
+
+  def _GetRangeData(self, ranges):
+    """Generator that produces all the image data in 'ranges'.  The
+    number of individual pieces returned is arbitrary (and in
+    particular is not necessarily equal to the number of ranges in
+    'ranges'.
+
+    Use a lock to protect the generator so that we will not run two
+    instances of this generator on the same object simultaneously."""
+
+    f = self.simg_f
+    with self.generator_lock:
+      for s, e in ranges:
+        to_read = e-s
+        idx = bisect.bisect_right(self.offset_index, s) - 1
+        chunk_start, chunk_len, filepos, fill_data = self.offset_map[idx]
+
+        # for the first chunk we may be starting partway through it.
+        remain = chunk_len - (s - chunk_start)
+        this_read = min(remain, to_read)
+        if filepos is not None:
+          p = filepos + ((s - chunk_start) * self.blocksize)
+          f.seek(p, os.SEEK_SET)
+          yield f.read(this_read * self.blocksize)
+        else:
+          yield fill_data * (this_read * (self.blocksize >> 2))
+        to_read -= this_read
+
+        while to_read > 0:
+          # continue with following chunks if this range spans multiple chunks.
+          idx += 1
+          chunk_start, chunk_len, filepos, fill_data = self.offset_map[idx]
+          this_read = min(chunk_len, to_read)
+          if filepos is not None:
+            f.seek(filepos, os.SEEK_SET)
+            yield f.read(this_read * self.blocksize)
+          else:
+            yield fill_data * (this_read * (self.blocksize >> 2))
+          to_read -= this_read
+
+  def LoadFileBlockMap(self, fn, clobbered_blocks, allow_shared_blocks):
+    """Loads the given block map file.
+
+    Args:
+      fn: The filename of the block map file.
+      clobbered_blocks: A RangeSet instance for the clobbered blocks.
+      allow_shared_blocks: Whether having shared blocks is allowed.
+    """
+    remaining = self.care_map
+    self.file_map = out = {}
+
+    with open(fn) as f:
+      for line in f:
+        fn, ranges = line.split(None, 1)
+        ranges = rangelib.RangeSet.parse(ranges)
+
+        if allow_shared_blocks:
+          # Find the shared blocks that have been claimed by others.
+          shared_blocks = ranges.subtract(remaining)
+          if shared_blocks:
+            ranges = ranges.subtract(shared_blocks)
+            if not ranges:
+              continue
+
+            # Tag the entry so that we can skip applying imgdiff on this file.
+            ranges.extra['uses_shared_blocks'] = True
+
+        out[fn] = ranges
+        assert ranges.size() == ranges.intersect(remaining).size()
+
+        # Currently we assume that blocks in clobbered_blocks are not part of
+        # any file.
+        assert not clobbered_blocks.overlaps(ranges)
+        remaining = remaining.subtract(ranges)
+
+    remaining = remaining.subtract(clobbered_blocks)
+
+    # For all the remaining blocks in the care_map (ie, those that
+    # aren't part of the data for any file nor part of the clobbered_blocks),
+    # divide them into blocks that are all zero and blocks that aren't.
+    # (Zero blocks are handled specially because (1) there are usually
+    # a lot of them and (2) bsdiff handles files with long sequences of
+    # repeated bytes especially poorly.)
+
+    zero_blocks = []
+    nonzero_blocks = []
+    reference = '\0' * self.blocksize
+
+    # Workaround for bug 23227672. For squashfs, we don't have a system.map. So
+    # the whole system image will be treated as a single file. But for some
+    # unknown bug, the updater will be killed due to OOM when writing back the
+    # patched image to flash (observed on lenok-userdebug MEA49). Prior to
+    # getting a real fix, we evenly divide the non-zero blocks into smaller
+    # groups (currently 1024 blocks or 4MB per group).
+    # Bug: 23227672
+    MAX_BLOCKS_PER_GROUP = 1024
+    nonzero_groups = []
+
+    f = self.simg_f
+    for s, e in remaining:
+      for b in range(s, e):
+        idx = bisect.bisect_right(self.offset_index, b) - 1
+        chunk_start, _, filepos, fill_data = self.offset_map[idx]
+        if filepos is not None:
+          filepos += (b-chunk_start) * self.blocksize
+          f.seek(filepos, os.SEEK_SET)
+          data = f.read(self.blocksize)
+        else:
+          if fill_data == reference[:4]:   # fill with all zeros
+            data = reference
+          else:
+            data = None
+
+        if data == reference:
+          zero_blocks.append(b)
+          zero_blocks.append(b+1)
+        else:
+          nonzero_blocks.append(b)
+          nonzero_blocks.append(b+1)
+
+          if len(nonzero_blocks) >= MAX_BLOCKS_PER_GROUP:
+            nonzero_groups.append(nonzero_blocks)
+            # Clear the list.
+            nonzero_blocks = []
+
+    if nonzero_blocks:
+      nonzero_groups.append(nonzero_blocks)
+      nonzero_blocks = []
+
+    assert zero_blocks or nonzero_groups or clobbered_blocks
+
+    if zero_blocks:
+      out["__ZERO"] = rangelib.RangeSet(data=zero_blocks)
+    if nonzero_groups:
+      for i, blocks in enumerate(nonzero_groups):
+        out["__NONZERO-%d" % i] = rangelib.RangeSet(data=blocks)
+    if clobbered_blocks:
+      out["__COPY"] = clobbered_blocks
+
+  def ResetFileMap(self):
+    """Throw away the file map and treat the entire image as
+    undifferentiated data."""
+    self.file_map = {"__DATA": self.care_map}
diff --git a/src/support/ab_tools/security/testkey.pk8 b/src/support/ab_tools/security/testkey.pk8
new file mode 100644
index 0000000..586c1bd
--- /dev/null
+++ b/src/support/ab_tools/security/testkey.pk8
Binary files differ
diff --git a/src/support/ab_tools/security/testkey.x509.pem b/src/support/ab_tools/security/testkey.x509.pem
new file mode 100644
index 0000000..e242d83
--- /dev/null
+++ b/src/support/ab_tools/security/testkey.x509.pem
@@ -0,0 +1,27 @@
+-----BEGIN CERTIFICATE-----
+MIIEqDCCA5CgAwIBAgIJAJNurL4H8gHfMA0GCSqGSIb3DQEBBQUAMIGUMQswCQYD
+VQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UEBxMNTW91bnRhaW4g
+VmlldzEQMA4GA1UEChMHQW5kcm9pZDEQMA4GA1UECxMHQW5kcm9pZDEQMA4GA1UE
+AxMHQW5kcm9pZDEiMCAGCSqGSIb3DQEJARYTYW5kcm9pZEBhbmRyb2lkLmNvbTAe
+Fw0wODAyMjkwMTMzNDZaFw0zNTA3MTcwMTMzNDZaMIGUMQswCQYDVQQGEwJVUzET
+MBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UEBxMNTW91bnRhaW4gVmlldzEQMA4G
+A1UEChMHQW5kcm9pZDEQMA4GA1UECxMHQW5kcm9pZDEQMA4GA1UEAxMHQW5kcm9p
+ZDEiMCAGCSqGSIb3DQEJARYTYW5kcm9pZEBhbmRyb2lkLmNvbTCCASAwDQYJKoZI
+hvcNAQEBBQADggENADCCAQgCggEBANaTGQTexgskse3HYuDZ2CU+Ps1s6x3i/waM
+qOi8qM1r03hupwqnbOYOuw+ZNVn/2T53qUPn6D1LZLjk/qLT5lbx4meoG7+yMLV4
+wgRDvkxyGLhG9SEVhvA4oU6Jwr44f46+z4/Kw9oe4zDJ6pPQp8PcSvNQIg1QCAcy
+4ICXF+5qBTNZ5qaU7Cyz8oSgpGbIepTYOzEJOmc3Li9kEsBubULxWBjf/gOBzAzU
+RNps3cO4JFgZSAGzJWQTT7/emMkod0jb9WdqVA2BVMi7yge54kdVMxHEa5r3b97s
+zI5p58ii0I54JiCUP5lyfTwE/nKZHZnfm644oLIXf6MdW2r+6R8CAQOjgfwwgfkw
+HQYDVR0OBBYEFEhZAFY9JyxGrhGGBaR0GawJyowRMIHJBgNVHSMEgcEwgb6AFEhZ
+AFY9JyxGrhGGBaR0GawJyowRoYGapIGXMIGUMQswCQYDVQQGEwJVUzETMBEGA1UE
+CBMKQ2FsaWZvcm5pYTEWMBQGA1UEBxMNTW91bnRhaW4gVmlldzEQMA4GA1UEChMH
+QW5kcm9pZDEQMA4GA1UECxMHQW5kcm9pZDEQMA4GA1UEAxMHQW5kcm9pZDEiMCAG
+CSqGSIb3DQEJARYTYW5kcm9pZEBhbmRyb2lkLmNvbYIJAJNurL4H8gHfMAwGA1Ud
+EwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAHqvlozrUMRBBVEY0NqrrwFbinZa
+J6cVosK0TyIUFf/azgMJWr+kLfcHCHJsIGnlw27drgQAvilFLAhLwn62oX6snb4Y
+LCBOsVMR9FXYJLZW2+TcIkCRLXWG/oiVHQGo/rWuWkJgU134NDEFJCJGjDbiLCpe
++ZTWHdcwauTJ9pUbo8EvHRkU3cYfGmLaLfgn9gP+pWA7LFQNvXwBnDa6sppCccEX
+31I828XzgXpJ4O+mDL1/dBd+ek8ZPUP0IgdyZm5MTYPhvVqGCHzzTy3sIeJFymwr
+sBbmg2OAUNLEMO6nwmocSdN2ClirfxqCzJOLSDE4QyS9BAH6EhY6UFcOaE0=
+-----END CERTIFICATE-----
diff --git a/src/support/bootctrl/Makefile.am b/src/support/bootctrl/Makefile.am
new file mode 100644
index 0000000..900a7f8
--- /dev/null
+++ b/src/support/bootctrl/Makefile.am
@@ -0,0 +1,3 @@
+SUBDIRS = \
+  include \
+  mediatek/bootctrl
\ No newline at end of file
diff --git a/src/support/bootctrl/configure.ac b/src/support/bootctrl/configure.ac
new file mode 100644
index 0000000..ddac88c
--- /dev/null
+++ b/src/support/bootctrl/configure.ac
@@ -0,0 +1,11 @@
+AC_INIT([bootctrl], [1.0])
+AM_INIT_AUTOMAKE([foreign subdir-objects])
+AC_PROG_CXX
+AM_PROG_AS
+LT_INIT
+AC_CONFIG_FILES([
+  Makefile
+  include/Makefile
+  mediatek/bootctrl/Makefile
+])
+AC_OUTPUT
diff --git a/src/support/bootctrl/include/Makefile.am b/src/support/bootctrl/include/Makefile.am
new file mode 100644
index 0000000..41115f0
--- /dev/null
+++ b/src/support/bootctrl/include/Makefile.am
@@ -0,0 +1,3 @@
+nobase_include_HEADERS = \
+	hardware/boot_control.h \
+	hardware/hardware.h
diff --git a/src/support/bootctrl/mediatek/bootctrl/Makefile.am b/src/support/bootctrl/mediatek/bootctrl/Makefile.am
new file mode 100644
index 0000000..676c581
--- /dev/null
+++ b/src/support/bootctrl/mediatek/bootctrl/Makefile.am
@@ -0,0 +1,12 @@
+lib_LIBRARIES = libbootctrl.a
+
+libbootctrl_a_SOURCES = \
+	bootctrl.cpp \
+	avb/libavb/avb_util.c \
+	avb/libavb/avb_crc32.c
+
+libbootctrl_a_CFLAGS = -Wall \
+		       -Werror
+
+libbootctrl_a_CPPFLAGS = \
+	-I$(top_srcdir)/include
diff --git a/src/support/bootctrl/mediatek/bootctrl/bootctrl.cpp b/src/support/bootctrl/mediatek/bootctrl/bootctrl.cpp
new file mode 100644
index 0000000..8f0e948
--- /dev/null
+++ b/src/support/bootctrl/mediatek/bootctrl/bootctrl.cpp
@@ -0,0 +1,530 @@
+/* Copyright Statement:
+ *
+ * This software/firmware and related documentation ("MediaTek Software") are
+ * protected under relevant copyright laws. The information contained herein is
+ * confidential and proprietary to MediaTek Inc. and/or its licensors. Without
+ * the prior written permission of MediaTek inc. and/or its licensors, any
+ * reproduction, modification, use or disclosure of MediaTek Software, and
+ * information contained herein, in whole or in part, shall be strictly
+ * prohibited.
+ *
+ * SPDX-License-Identifier: MediaTekProprietary
+ * MediaTek Inc. (C) 2016. All rights reserved.
+ *
+ * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
+ * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
+ * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER
+ * ON AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL
+ * WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR
+ * NONINFRINGEMENT. NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH
+ * RESPECT TO THE SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY,
+ * INCORPORATED IN, OR SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES
+ * TO LOOK ONLY TO SUCH THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO.
+ * RECEIVER EXPRESSLY ACKNOWLEDGES THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO
+ * OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES CONTAINED IN MEDIATEK
+ * SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK SOFTWARE
+ * RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR
+ * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S
+ * ENTIRE AND CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE
+ * RELEASED HEREUNDER WILL BE, AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE
+ * MEDIATEK SOFTWARE AT ISSUE, OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE
+ * CHARGE PAID BY RECEIVER TO MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
+ *
+ * The following software/firmware and/or related documentation ("MediaTek
+ * Software") have been modified by MediaTek Inc. All revisions are subject to
+ * any receiver's applicable license agreements with MediaTek Inc.
+ */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <hardware/hardware.h>
+#include <hardware/boot_control.h>
+//#include <android-base/logging.h>
+//#include <cutils/properties.h>
+#define LOG_TAG "bootctrlHAL"
+//#include <log/log.h>
+//#include <utils/Log.h>
+
+#include "bootctrl.h"
+#if !defined(ARCH_X86)
+#include "sd_misc.h"
+#endif
+//#include <fstab/fstab.h>
+#include "avb/libavb/avb_util.h"
+
+#if MTK_BOOTDEV_TYPE == BOOTDEV_TYPE_NAND
+extern "C"{
+    #include "mtk_device_wrap.h"
+}
+#endif
+
+#define FSTAB_SUPPORT 0
+// Debug for update_engine_sideload
+#define ALOGE printf
+#define ALOGI printf
+static struct fstab* fstab = NULL;
+static char *blk_dev_path = "/dev/disk/by-partlabel/misc";
+#if FSTAB_SUPPORT
+static void free_fstab(void)
+{
+    fs_mgr_free_fstab(fstab);
+}
+
+
+static char *get_device_path(const char *mount_point)
+{
+    struct fstab_rec *rec = NULL;
+    char *source = NULL;
+
+    rec = fs_mgr_get_entry_for_mount_point(fstab, mount_point);
+    if (!rec) {
+        ALOGE("%s failed to get entry for %s \n", __func__ , mount_point);
+        return NULL;
+    }
+
+    source = strdup(rec->blk_device);
+    return source;
+}
+#endif
+static int bootctrl_read_metadata(AvbABData *bctrl)
+{
+    int fd, err;
+    ssize_t sz, size;
+    char *buf = (char *)bctrl;
+
+#if MTK_BOOTDEV_TYPE == BOOTDEV_TYPE_NAND
+    fd = mtk_device_wrap_open(blk_dev_path, O_RDONLY);
+#else
+    fd = open(blk_dev_path, O_RDONLY);
+#endif
+
+    if (fd < 0) {
+        err = errno;
+        ALOGE("%s Error opening metadata file: %s\n", __func__ ,strerror(errno));
+        return -err;
+    }
+#if MTK_BOOTDEV_TYPE == BOOTDEV_TYPE_NAND
+    if (mtk_device_wrap_seek(fd, OFFSETOF_SLOT_SUFFIX, SEEK_SET) < 0)
+#else
+    if (lseek(fd, OFFSETOF_SLOT_SUFFIX, SEEK_SET) < 0)
+#endif
+    {
+        err = errno;
+        ALOGE("%s Error seeking to metadata offset: %s\n", __func__ ,strerror(errno));
+#if MTK_BOOTDEV_TYPE == BOOTDEV_TYPE_NAND
+        mtk_device_wrap_close(fd);
+#else
+        close(fd);
+#endif
+        return -err;
+    }
+    size = sizeof(AvbABData);
+    do {
+#if MTK_BOOTDEV_TYPE == BOOTDEV_TYPE_NAND
+        sz = mtk_device_wrap_read(fd, buf, size);
+#else
+        sz = read(fd, buf, size);
+#endif
+        if (sz == 0) {
+            break;
+        } else if (sz < 0) {
+            if (errno == EINTR) {
+                continue;
+            }
+            err = -errno;
+            ALOGE("%s Error reading metadata file\n", __func__);
+#if MTK_BOOTDEV_TYPE == BOOTDEV_TYPE_NAND
+            mtk_device_wrap_close(fd);
+#else
+            close(fd);
+#endif
+            return err;
+        }
+        size -= sz;
+        buf += sz;
+    } while(size > 0);
+
+#if MTK_BOOTDEV_TYPE == BOOTDEV_TYPE_NAND
+    mtk_device_wrap_close(fd);
+#else
+    close(fd);
+#endif
+
+    /* Check bootctrl magic number */
+    if (memcmp(bctrl->magic , AVB_AB_MAGIC, AVB_AB_MAGIC_LEN) != 0) {
+        ALOGE("metadata is not initialised or corrupted %s.\n", bctrl->magic);
+        return -EIO;
+    }
+    return 0;
+}
+
+static int bootctrl_write_metadata(AvbABData *bctrl)
+{
+    int fd, err;
+    ssize_t sz, size;
+    char *buf = (char *)bctrl;
+#if MTK_BOOTDEV_TYPE == BOOTDEV_TYPE_NAND
+    char *tmpbuf = NULL;
+#endif
+
+    ALOGI("Enter in bootctrl: bootctrl_write_metadata \n");
+
+#if MTK_BOOTDEV_TYPE == BOOTDEV_TYPE_NAND
+    fd = mtk_device_wrap_open(blk_dev_path, O_RDWR);
+#else
+    fd = open(blk_dev_path, O_RDWR);
+#endif
+    if (fd < 0) {
+        err = errno;
+        ALOGE("%s Error opening metadata file: %s\n", __func__,strerror(errno));
+        return -err;
+    }
+
+#if MTK_BOOTDEV_TYPE == BOOTDEV_TYPE_NAND
+    if (mtk_device_wrap_seek(fd, OFFSETOF_SLOT_SUFFIX, SEEK_SET) < 0)
+#else
+    if (lseek(fd, OFFSETOF_SLOT_SUFFIX, SEEK_SET) < 0)
+#endif
+    {
+        err = errno;
+        ALOGE("%s Error seeking to metadata offset: %s\n", __func__ ,strerror(errno));
+#if MTK_BOOTDEV_TYPE == BOOTDEV_TYPE_NAND
+        mtk_device_wrap_close(fd);
+#else
+        close(fd);
+#endif
+        return -err;
+    }
+
+    bctrl->crc32 = avb_htobe32(
+             avb_crc32((const uint8_t*)bctrl, sizeof(AvbABData) - sizeof(uint32_t)));
+
+    size = sizeof(AvbABData);
+#if MTK_BOOTDEV_TYPE == BOOTDEV_TYPE_NAND
+    tmpbuf = (char *)malloc(sizeof(char)* OFFSETOF_SLOT_SUFFIX);
+    if (tmpbuf == NULL){
+        ALOGE("Error Writing metadata: malloc tmpbuf failed\n");
+        mtk_device_wrap_close(fd);
+        err = -errno;
+        return err;
+    }
+#endif
+
+    do {
+#if MTK_BOOTDEV_TYPE == BOOTDEV_TYPE_NAND
+        memset(tmpbuf, 0xFF, OFFSETOF_SLOT_SUFFIX);
+        if (mtk_device_wrap_seek(fd, 0, SEEK_SET) < 0){
+            err = errno;
+            ALOGE("%s Error seeking to metadata offset: %s\n", __func__ ,strerror(errno));
+            mtk_device_wrap_close(fd);
+            free(tmpbuf);
+            return -err;
+        }
+        if (mtk_device_wrap_write_force(fd, tmpbuf, OFFSETOF_SLOT_SUFFIX) < 0){
+            err = -errno;
+            ALOGE("%s Error Writing metadata file\n",__func__);
+            mtk_device_wrap_close(fd);
+            free(tmpbuf);
+            return err;
+        }
+        memcpy(tmpbuf, bctrl, size);
+        sz = mtk_device_wrap_write_force(fd, tmpbuf, sizeof(char)* OFFSETOF_SLOT_SUFFIX);
+#else
+        sz = write(fd, buf, size);
+#endif
+        if (sz == 0) {
+            break;
+        } else if (sz < 0) {
+            if (errno == EINTR) {
+                continue;
+            }
+            err = -errno;
+            ALOGE("%s Error Writing metadata file\n",__func__);
+#if MTK_BOOTDEV_TYPE == BOOTDEV_TYPE_NAND
+            mtk_device_wrap_close(fd);
+            free(tmpbuf);
+#else
+            close(fd);
+#endif
+            return err;
+        }
+        size -= sz;
+        buf += sz;
+    } while(size > 0);
+
+#if MTK_BOOTDEV_TYPE == BOOTDEV_TYPE_NAND
+    free(tmpbuf);
+    mtk_device_wrap_close(fd);
+#else
+    close(fd);
+#endif
+
+    return 0;
+}
+
+void bootctrl_init(boot_control_module_t  *module )
+{
+    ALOGI("boot control HAL init");
+
+    if(blk_dev_path == NULL) {
+#if FSTAB_SUPPORT
+        /* Initial read fstab */
+        fstab = fs_mgr_read_fstab_default();
+        if (!fstab) {
+            ALOGE("failed to read default fstab");
+        }
+        blk_dev_path = get_device_path("/misc");
+
+        /* Free fstab */
+        free_fstab();
+#endif
+    }
+
+    ALOGI("%s misc blk device path = %s\n", __func__ ,blk_dev_path);
+}
+
+unsigned bootctrl_get_number_slots(boot_control_module_t  *module )
+{
+    return 2;
+}
+
+int bootctrl_get_active_slot()
+{
+    int fd, err, slot;
+    ssize_t size = COMMAND_LINE_SIZE, sz;
+    char *buf, *ptr;
+    char *str;
+
+    fd = open(COMMAND_LINE_PATH, O_RDONLY);
+
+    if (fd < 0) {
+        err = -errno;
+        ALOGE("%s error reading commandline\n", __func__);
+        return err;
+    }
+    ptr = buf = (char *)malloc(size);
+    if (!buf) {
+        err = -errno;
+        ALOGE("%s Error allocating memory\n", __func__);
+        close(fd);
+        return err;
+    }
+    do {
+        sz = read(fd, buf, size);
+        if (sz == 0) {
+            break;
+        } else if (sz < 0) {
+            if (errno == EINTR) {
+                continue;
+            }
+            err = -errno;
+            ALOGE("%s Error reading file\n",__func__);
+            free(ptr);
+            close(fd);
+            return err;
+        }
+        size -= sz;
+        buf += sz;
+    } while(size > 0);
+    str = strstr((char *)ptr, SLOT_SUFFIX_STR);
+    if (!str) {
+        err = -EIO;
+        ALOGE("%s cannot find %s in kernel commandline.\n", __func__ , SLOT_SUFFIX_STR);
+        free(ptr);
+        close(fd);
+        return err;
+    }
+    str += strlen(SLOT_SUFFIX_STR) + 1;
+    //str += strlen(SLOT_SUFFIX_STR);
+    slot = (*str == 'a') ? 0 : 1;
+    free(ptr);
+    close(fd);
+    return slot;
+}
+
+
+uint32_t bootctrl_get_current_slot(boot_control_module_t  *module )
+{
+    ALOGI("boot control bootctrl_get_current_slot\n");
+
+    uint32_t slot = 0;
+
+    slot = bootctrl_get_active_slot();
+
+    ALOGI("bootctrl_get_current_slot %d\n", slot);
+    return slot;
+}
+
+int bootctrl_mark_boot_successful(boot_control_module_t  *module )
+{
+    ALOGI("boot control bootctrl_mark_boot_successful\n");
+    int ret;
+    int slot = 0;
+    AvbABData metadata;
+    AvbABSlotData *slotp;
+
+    ret = bootctrl_read_metadata(&metadata);
+    if (ret < 0) {
+        return ret;
+    }
+
+    slot = bootctrl_get_active_slot();
+    if (slot < 0) {
+        ALOGE("bootctrl_mark_boot_successful fail , slot = \n");
+        return slot;
+    }
+    slotp = &metadata.slots[slot];
+    slotp->successful_boot = 1;
+    slotp->tries_remaining = 0;
+
+    return bootctrl_write_metadata(&metadata);
+}
+
+int bootctrl_set_active_boot_slot(boot_control_module_t  *module ,
+    unsigned slot)
+{
+    ALOGI("boot control bootctrl_set_active_boot_slot , slot is %d\n", slot);
+    int ret, slot2;
+    AvbABData metadata;
+    AvbABSlotData *slotp;
+
+    if (slot >= 2) {
+        ALOGE("%s Wrong Slot value %u\n", __func__ , slot);
+        return -EINVAL;
+    }
+    ret = bootctrl_read_metadata(&metadata);
+    if (ret < 0) {
+        return ret;
+    }
+    /* Set highest priority and reset retry count */
+    slotp = &metadata.slots[slot];
+    slotp->successful_boot = 0;
+    slotp->priority = AVB_AB_MAX_PRIORITY;
+    slotp->tries_remaining = AVB_AB_MAX_TRIES_REMAINING;
+
+    /* Ensure other slot doesn't have as high a priority. */
+    slot2 = (slot == 0) ? 1 : 0;
+    slotp = &metadata.slots[slot2];
+    if(slotp->priority == AVB_AB_MAX_PRIORITY)
+        slotp->priority = AVB_AB_MAX_PRIORITY - 1;
+    ret = bootctrl_write_metadata(&metadata);
+    if (ret < 0) {
+        return ret;
+    }
+
+    return 0;
+}
+
+int bootctrl_set_slot_as_unbootable(boot_control_module_t  *module ,
+    unsigned slot)
+{
+    ALOGI("boot control bootctrl_set_slot_as_unbootable\n");
+    int ret;
+    AvbABData metadata;
+    AvbABSlotData *slotp;
+
+    if (slot >= 2) {
+        ALOGE("%s Wrong Slot value %u\n", __func__ , slot);
+        return -EINVAL;
+    }
+    ret = bootctrl_read_metadata(&metadata);
+    if (ret < 0) {
+        return ret;
+    }
+    /* Set zero to priority ,successful_boot and tries_remaining */
+    slotp = &metadata.slots[slot];
+    slotp->successful_boot = 0;
+    slotp->priority = 0;
+    slotp->tries_remaining = 0;
+    ret = bootctrl_write_metadata(&metadata);
+    if (ret < 0) {
+        return ret;
+    }
+
+    return 0;
+}
+
+int bootctrl_is_slot_bootable(boot_control_module_t  *module ,
+    unsigned slot)
+{
+    ALOGI("boot control bootctrl_is_slot_bootable\n");
+    int ret;
+    AvbABData metadata;
+
+    /* slot 0 is A , slot 1 is B */
+    if (slot >= 2) {
+        ALOGE("%s Wrong slot value %u\n", __func__,slot);
+        return -EINVAL;
+    }
+    ret = bootctrl_read_metadata(&metadata);
+    if (ret < 0) {
+        return ret;
+    }
+
+    return (metadata.slots[slot].priority != 0);
+}
+
+int bootctrl_get_bootup_status(boot_control_module_t  *module ,
+    unsigned slot)
+{
+    ALOGI("bootctrl bootctrl_get_bootup_status\n");
+    int ret = -1;
+    AvbABSlotData *slotp;
+    AvbABData metadata;
+
+    if(slot >= 2) {
+        ALOGE("%s Wrong slot value %u\n", __func__,slot);
+        return -1;
+    }
+
+    ret = bootctrl_read_metadata(&metadata);
+    if (ret < 0) {
+        return ret;
+    }
+
+    slotp = &metadata.slots[slot];
+
+    ALOGI("bootctrl bootctrl_get_bootup_status = %d\n", slotp->successful_boot);
+    return slotp->successful_boot;
+}
+
+const char *bootctrl_get_suffix(boot_control_module_t  *module ,
+    unsigned slot)
+{
+    ALOGI("boot control bootctrl_get_suffix %d\n",slot);
+    static const char* suffix[2] = {BOOTCTRL_SUFFIX_A, BOOTCTRL_SUFFIX_B};
+    if (slot >= 2)
+        return NULL;
+    return suffix[slot];
+}
+
+static struct hw_module_methods_t bootctrl_methods = {
+    .open  = NULL,
+};
+
+/* Boot Control Module implementation */
+boot_control_module_t HAL_MODULE_INFO_SYM = {
+    .common = {
+        .tag                 = HARDWARE_MODULE_TAG,
+        .module_api_version  = BOOT_CONTROL_MODULE_API_VERSION_0_1,
+        .hal_api_version     = HARDWARE_HAL_API_VERSION,
+        .id                  = BOOT_CONTROL_HARDWARE_MODULE_ID,
+        .name                = "boot_control HAL",
+        .author              = "Mediatek Corporation",
+        .methods             = &bootctrl_methods,
+    },
+    .init                 = bootctrl_init,
+    .getNumberSlots       = bootctrl_get_number_slots,
+    .getCurrentSlot       = bootctrl_get_current_slot,
+    .markBootSuccessful   =  bootctrl_mark_boot_successful,
+    .setActiveBootSlot    = bootctrl_set_active_boot_slot,
+    .setSlotAsUnbootable  = bootctrl_set_slot_as_unbootable,
+    .isSlotBootable       = bootctrl_is_slot_bootable,
+	  .getSuffix            = bootctrl_get_suffix,
+    .isSlotMarkedSuccessful = bootctrl_get_bootup_status,
+};
diff --git a/src/support/bootctrl/mediatek/bootctrl/bootctrl.h b/src/support/bootctrl/mediatek/bootctrl/bootctrl.h
new file mode 100644
index 0000000..b2a5c51
--- /dev/null
+++ b/src/support/bootctrl/mediatek/bootctrl/bootctrl.h
@@ -0,0 +1,131 @@
+/* Copyright Statement:
+ *
+ * This software/firmware and related documentation ("MediaTek Software") are
+ * protected under relevant copyright laws. The information contained herein is
+ * confidential and proprietary to MediaTek Inc. and/or its licensors. Without
+ * the prior written permission of MediaTek inc. and/or its licensors, any
+ * reproduction, modification, use or disclosure of MediaTek Software, and
+ * information contained herein, in whole or in part, shall be strictly
+ * prohibited.
+ *
+ * SPDX-License-Identifier: MediaTekProprietary
+ * MediaTek Inc. (C) 2016. All rights reserved.
+ *
+ * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
+ * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
+ * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER
+ * ON AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL
+ * WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR
+ * NONINFRINGEMENT. NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH
+ * RESPECT TO THE SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY,
+ * INCORPORATED IN, OR SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES
+ * TO LOOK ONLY TO SUCH THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO.
+ * RECEIVER EXPRESSLY ACKNOWLEDGES THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO
+ * OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES CONTAINED IN MEDIATEK
+ * SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK SOFTWARE
+ * RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR
+ * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S
+ * ENTIRE AND CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE
+ * RELEASED HEREUNDER WILL BE, AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE
+ * MEDIATEK SOFTWARE AT ISSUE, OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE
+ * CHARGE PAID BY RECEIVER TO MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
+ *
+ * The following software/firmware and/or related documentation ("MediaTek
+ * Software") have been modified by MediaTek Inc. All revisions are subject to
+ * any receiver's applicable license agreements with MediaTek Inc.
+ */
+
+
+/* THE HAL BOOTCTRL HEADER MUST BE IN SYNC WITH THE UBOOT BOOTCTRL HEADER */
+
+#ifndef _BOOTCTRL_H_
+#define _BOOTCTRL_H_
+
+#include <stdint.h>
+
+/* struct boot_ctrl occupies the slot_suffix field of
+ * struct bootloader_message */
+
+#define BOOTCTRL_SUFFIX_A           "_a"
+#define BOOTCTRL_SUFFIX_B           "_b"
+#define BOOT_CONTROL_VERSION    1
+
+#define BOOTCTRL_PROPERTY "ro.boot.slot_suffix"
+#define SLOT_SUFFIX_STR "androidboot.slot_suffix="
+#define COMMAND_LINE_PATH "/proc/cmdline"
+#define COMMAND_LINE_SIZE 2048
+
+/* AVB20 */
+/* Magic for the A/B struct when serialized. */
+#define AVB_AB_MAGIC "\0AB0"
+#define AVB_AB_MAGIC_LEN 4
+
+/* Versioning for the on-disk A/B metadata - keep in sync with avbtool. */
+#define AVB_AB_MAJOR_VERSION 1
+#define AVB_AB_MINOR_VERSION 0
+
+/* Size of AvbABData struct. */
+#define AVB_AB_DATA_SIZE 32
+
+/* Maximum values for slot data */
+#define AVB_AB_MAX_PRIORITY 15
+#define AVB_AB_MAX_TRIES_REMAINING 7
+
+#define BOOTDEV_TYPE_NAND 1
+#define BOOTDEV_TYPE_EMMC 2
+
+#define OFFSETOF_SLOT_SUFFIX 2048
+
+/* Struct used for recording per-slot metadata.
+ *
+ * When serialized, data is stored in network byte-order.
+ */
+typedef struct AvbABSlotData {
+  /* Slot priority. Valid values range from 0 to AVB_AB_MAX_PRIORITY,
+   * both inclusive with 1 being the lowest and AVB_AB_MAX_PRIORITY
+   * being the highest. The special value 0 is used to indicate the
+   * slot is unbootable.
+   */
+  uint8_t priority;
+
+  /* Number of times left attempting to boot this slot ranging from 0
+   * to AVB_AB_MAX_TRIES_REMAINING.
+   */
+  uint8_t tries_remaining;
+
+  /* Non-zero if this slot has booted successfully, 0 otherwise. */
+  uint8_t successful_boot;
+  /* For lk anti rollback*/
+  uint8_t efuse_write;
+  uint8_t bl_ver;
+  /* Reserved for future use. */
+  uint8_t reserved[3];
+
+} AvbABSlotData;
+
+/* Struct used for recording A/B metadata.
+ *
+ * When serialized, data is stored in network byte-order.
+ */
+typedef struct AvbABData {
+  /* Magic number used for identification - see AVB_AB_MAGIC. */
+  uint8_t magic[AVB_AB_MAGIC_LEN];
+
+  /* Version of on-disk struct - see AVB_AB_{MAJOR,MINOR}_VERSION. */
+  uint8_t version_major;
+  uint8_t version_minor;
+
+  /* Padding to ensure |slots| field start eight bytes in. */
+  uint8_t reserved1[2];
+
+  /* Per-slot metadata. */
+  AvbABSlotData slots[2];
+
+  /* Reserved for future use. */
+  uint8_t reserved2[12];
+
+  /* CRC32 of all 28 bytes preceding this field. */
+  uint32_t crc32;
+} AvbABData;
+#endif /* _BOOTCTRL_H_ */
diff --git a/src/support/bootctrl/mediatek/bootctrl/sd_misc.h b/src/support/bootctrl/mediatek/bootctrl/sd_misc.h
new file mode 100644
index 0000000..8dfc5d3
--- /dev/null
+++ b/src/support/bootctrl/mediatek/bootctrl/sd_misc.h
@@ -0,0 +1,239 @@
+/* Copyright Statement:
+ *
+ * This software/firmware and related documentation ("MediaTek Software") are
+ * protected under relevant copyright laws. The information contained herein is
+ * confidential and proprietary to MediaTek Inc. and/or its licensors. Without
+ * the prior written permission of MediaTek inc. and/or its licensors, any
+ * reproduction, modification, use or disclosure of MediaTek Software, and
+ * information contained herein, in whole or in part, shall be strictly
+ * prohibited.
+ *
+ * SPDX-License-Identifier: MediaTekProprietary
+ * MediaTek Inc. (C) 2016. All rights reserved.
+ *
+ * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
+ * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
+ * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER
+ * ON AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL
+ * WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR
+ * NONINFRINGEMENT. NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH
+ * RESPECT TO THE SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY,
+ * INCORPORATED IN, OR SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES
+ * TO LOOK ONLY TO SUCH THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO.
+ * RECEIVER EXPRESSLY ACKNOWLEDGES THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO
+ * OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES CONTAINED IN MEDIATEK
+ * SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK SOFTWARE
+ * RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR
+ * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S
+ * ENTIRE AND CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE
+ * RELEASED HEREUNDER WILL BE, AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE
+ * MEDIATEK SOFTWARE AT ISSUE, OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE
+ * CHARGE PAID BY RECEIVER TO MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
+ *
+ * The following software/firmware and/or related documentation ("MediaTek
+ * Software") have been modified by MediaTek Inc. All revisions are subject to
+ * any receiver's applicable license agreements with MediaTek Inc.
+ */
+
+#ifndef SD_MISC_H
+#define SD_MISC_H
+
+#ifdef __KERNEL__
+#include <linux/bitops.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/core.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sd.h>
+#endif
+
+
+
+struct msdc_ioctl {
+    int opcode;
+    int host_num;
+    int iswrite;
+    int trans_type;
+    unsigned int total_size;
+    unsigned int address;
+    unsigned int *buffer;
+    int cmd_pu_driving;
+    int cmd_pd_driving;
+    int dat_pu_driving;
+    int dat_pd_driving;
+    int clk_pu_driving;
+    int clk_pd_driving;
+    int ds_pu_driving;
+    int ds_pd_driving;
+    int rst_pu_driving;
+    int rst_pd_driving;
+    int clock_freq;
+    int partition;
+    int hopping_bit;
+    int hopping_time;
+    int result;
+    int sd30_mode;
+    int sd30_max_current;
+    int sd30_drive;
+    int sd30_power_control;
+};
+
+/**************for msdc_ssc***********************/
+#define AUDPLL_CTL_REG12                   (0xF0007070)
+#define AUDPLL_CTL_REG01                   (0xF00071E0)
+#define AUDPLL_CTL_REG02                   (0xF100000C)
+
+#define AUDPLL_TSEL_MASK                   (1792)   /* MASK = 00000111 00000000 */
+#define AUDPLL_TSEL_RESULT1                (0)  /* REG = 00000000 00000000   30.5us */
+#define AUDPLL_TSEL_RESULT2                (256)    /* REG = 00000001 00000000   61.0us */
+#define AUDPLL_TSEL_RESULT3                (512)    /* REG = 00000010 00000000   122.1us */
+#define AUDPLL_TSEL_RESULT4                (768)    /* REG = 00000011 00000000   244.1us */
+#define AUDPLL_TSEL_RESULT5                (1024)   /* REG = 00000100 00000000   448.3us */
+
+#define AUDPLL_BSEL_MASK                   (7)  /* MASK = 00000000 00000111 */
+#define AUDPLL_BSEL_RESULT0                (0)  /* REG = 00000000 00000000   REG init val */
+#define AUDPLL_BSEL_RESULT1                (1)  /* REG = 00000000 00000001   2.26MHz */
+#define AUDPLL_BSEL_RESULT2                (2)  /* REG = 00000000 00000010   4.52MHz */
+#define AUDPLL_BSEL_RESULT3                (4)  /* REG = 00000000 00000100   9.04MHz */
+
+#define SET_HOP_BIT_NONE                   (0)
+#define SET_HOP_BIT1                       (1)
+#define SET_HOP_BIT2                       (2)
+#define SET_HOP_BIT3                       (3)
+
+#define SET_HOP_TIME0                      (0)
+#define SET_HOP_TIME1                      (1)
+#define SET_HOP_TIME2                      (2)
+#define SET_HOP_TIME3                      (3)
+#define SET_HOP_TIME4                      (4)
+
+
+/**************for msdc_ssc***********************/
+
+#define MSDC_DRIVING_SETTING                (0)
+#define MSDC_CLOCK_FREQUENCY                (1)
+#define MSDC_SINGLE_READ_WRITE              (2)
+#define MSDC_MULTIPLE_READ_WRITE            (3)
+#define MSDC_GET_CID                        (4)
+#define MSDC_GET_CSD                        (5)
+#define MSDC_GET_EXCSD                      (6)
+#define MSDC_ERASE_PARTITION                (7)
+#define MSDC_HOPPING_SETTING                (8)
+
+#define MSDC_REINIT_SDCARD            _IOW('r', 9, int)
+
+#define MSDC_SD30_MODE_SWITCH               (10)
+#define MSDC_GET_BOOTPART                   (11)
+#define MSDC_SET_BOOTPART                   (12)
+#define MSDC_GET_PARTSIZE                   (13)
+#define MSDC_ERASE_SELECTED_AREA            (0x20)
+#define MSDC_CARD_DUNM_FUNC                 (0xff)
+
+typedef enum {
+    USER_PARTITION = 0,
+    BOOT_PARTITION_1,
+    BOOT_PARTITION_2,
+    RPMB_PARTITION,
+    GP_PARTITION_1,
+    GP_PARTITION_2,
+    GP_PARTITION_3,
+    GP_PARTITION_4,
+} PARTITON_ACCESS_T;
+
+typedef enum {
+    SDHC_HIGHSPEED = 0, /* 0 Host supports HS mode */
+    UHS_SDR12,      /* 1 Host supports UHS SDR12 mode */
+    UHS_SDR25,      /* 2 Host supports UHS SDR25 mode */
+    UHS_SDR50,      /* 3 Host supports UHS SDR50 mode */
+    UHS_SDR104,     /* 4 Host supports UHS SDR104 mode */
+    UHS_DDR50,      /* 5 Host supports UHS DDR50 mode */
+} SD3_MODE;
+
+typedef enum {
+    DRIVER_TYPE_A = 0,  /* 0 Host supports Driver Type A */
+    DRIVER_TYPE_B,      /* 1 Host supports Driver Type B */
+    DRIVER_TYPE_C,      /* 2 Host supports Driver Type C */
+    DRIVER_TYPE_D,      /* 3 Host supports Driver Type D */
+} SD3_DRIVE;
+
+typedef enum {
+    MAX_CURRENT_200 = 0,    /* 0 Host max current limit is 200mA */
+    MAX_CURRENT_400,    /* 1 Host max current limit is 400mA */
+    MAX_CURRENT_600,    /* 2 Host max current limit is 600mA */
+    MAX_CURRENT_800,    /* 3 Host max current limit is 800mA */
+} SD3_MAX_CURRENT;
+
+typedef enum {
+    SDXC_NO_POWER_CONTROL = 0,  /* 0  Host not supports >150mA current at 3.3V /3.0V/1.8V */
+    SDXC_POWER_CONTROL, /* 1 Host supports >150mA current at 3.3V /3.0V/1.8V */
+} SD3_POWER_CONTROL;
+
+typedef enum {
+    DUMP_INTO_BOOT_CARD_IPANIC = 0,
+    DUMP_INTO_BOOT_CARD_KDUMP = 1,
+    DUMP_INTO_EXTERN_CARD = 2,
+} DUMP_STORAGE_TYPE;
+
+typedef enum {
+    EMMC_CARD_BOOT = 0,
+    SD_CARD_BOOT,
+    EMMC_CARD,
+    SD_CARD,
+} STORAGE_TPYE;
+
+#define EXT_CSD_BOOT_SIZE_MULT           (226) /* R */
+#define EXT_CSD_HC_ERASE_GRP_SIZE        (224) /* RO */
+#define EXT_CSD_HC_WP_GPR_SIZE           (221) /* RO */
+#define EXT_CSD_RPMB_SIZE_MULT           (168) /* R */
+#define EXT_CSD_GP1_SIZE_MULT            (143) /* R/W 3 bytes */
+#define EXT_CSD_GP2_SIZE_MULT            (146) /* R/W 3 bytes */
+#define EXT_CSD_GP3_SIZE_MULT            (149) /* R/W 3 bytes */
+#define EXT_CSD_GP4_SIZE_MULT            (152) /* R/W 3 bytes */
+#define EXT_CSD_PART_CFG                 (179) /* R/W/E & R/W/E_P */
+#define EXT_CSD_CACHE_FLUSH              (32)
+#define EXT_CSD_CACHE_CTRL               (33)
+#define CAPACITY_2G                      (2 * 1024 * 1024 * 1024ULL)
+
+typedef enum {
+    EMMC_BOOT_NO_EN = 0,
+    EMMC_BOOT1_EN,
+    EMMC_BOOT2_EN,
+    EMMC_BOOT_USER = 7,
+    EMMC_BOOT_END
+} BOOT_PARTITION_EN;
+
+#ifdef CONFIG_MTK_GPT_SCHEME_SUPPORT
+typedef enum {
+    EMMC_PART_UNKNOWN=0
+    ,EMMC_PART_BOOT1
+    ,EMMC_PART_BOOT2
+    ,EMMC_PART_RPMB
+    ,EMMC_PART_GP1
+    ,EMMC_PART_GP2
+    ,EMMC_PART_GP3
+    ,EMMC_PART_GP4
+    ,EMMC_PART_USER
+    ,EMMC_PART_END
+} Region;
+#endif
+
+typedef enum {
+    CARD_INFO = 0,
+    DISK_INFO,
+    EMMC_USER_CAPACITY,
+    EMMC_CAPACITY,
+    EMMC_RESERVE,
+} GET_STORAGE_INFO;
+
+struct storage_info {
+    struct mmc_card *card;
+    struct gendisk *disk;
+    unsigned long long emmc_user_capacity;
+    unsigned long long emmc_capacity;
+    int emmc_reserve;
+};
+
+int msdc_get_info(STORAGE_TPYE storage_type, GET_STORAGE_INFO info_type, struct storage_info *info);
+
+#endif              /* end of SD_MISC_H */
diff --git a/src/support/libnandapi/Makefile b/src/support/libnandapi/Makefile
new file mode 100644
index 0000000..da3b8ff
--- /dev/null
+++ b/src/support/libnandapi/Makefile
@@ -0,0 +1,23 @@
+
+TARGET := libnandapi.so
+
+INCLUDE :=
+
+CFLAGS += -fPIC -O2 $(INCLUDE) -D_LARGEFILE64_SOURCE
+SHARE := -shared
+
+SOURCE := $(wildcard src/*.c)
+OBJS := $(patsubst %.c,%.o,$(SOURCE))
+
+.PHONY : all clean install $(TARGET)
+
+all : $(TARGET)
+
+$(TARGET) : $(OBJS)
+	$(CC) $(CFLAGS) $(SHARE) -o $@ $(OBJS)
+
+clean :
+	rm -fr src/*.o *.so
+
+install : $(TARGET)
+	install -d $(DESTDIR)$(PREFIX)/lib/
diff --git a/src/support/libnandapi/inc/common.h b/src/support/libnandapi/inc/common.h
new file mode 100644
index 0000000..817d5e7
--- /dev/null
+++ b/src/support/libnandapi/inc/common.h
@@ -0,0 +1,221 @@
+/*
+ * Copyright (c) Artem Bityutskiy, 2007, 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __MTD_UTILS_COMMON_H__
+#define __MTD_UTILS_COMMON_H__
+
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <ctype.h>
+#include <string.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <features.h>
+#include <inttypes.h>
+
+#ifndef PROGRAM_NAME
+# error "You must define PROGRAM_NAME before including this header"
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef MIN	/* some C lib headers define this for us */
+#define MIN(a, b) ((a) < (b) ? (a) : (b))
+#endif
+#ifndef MAX
+#define MAX(a, b) ((a) > (b) ? (a) : (b))
+#endif
+#define min(a, b) MIN(a, b) /* glue for linux kernel source */
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
+#define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1)
+#define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask))
+
+#define min_t(t,x,y) ({ \
+	typeof((x)) _x = (x); \
+	typeof((y)) _y = (y); \
+	(_x < _y) ? _x : _y; \
+})
+
+#define max_t(t,x,y) ({ \
+	typeof((x)) _x = (x); \
+	typeof((y)) _y = (y); \
+	(_x > _y) ? _x : _y; \
+})
+
+#ifndef O_CLOEXEC
+#define O_CLOEXEC 0
+#endif
+
+/* define a print format specifier for off_t */
+#ifdef __USE_FILE_OFFSET64
+#define PRIxoff_t PRIx64
+#define PRIdoff_t PRId64
+#else
+#define PRIxoff_t "l"PRIx32
+#define PRIdoff_t "l"PRId32
+#endif
+
+/* Verbose messages */
+#define bareverbose(verbose, fmt, ...) do {                        \
+	if (verbose)                                               \
+		printf(fmt, ##__VA_ARGS__);                        \
+} while(0)
+#define verbose(verbose, fmt, ...) \
+	bareverbose(verbose, "%s: " fmt "\n", PROGRAM_NAME, ##__VA_ARGS__)
+
+/* Normal messages */
+#define normsg_cont(fmt, ...) do {                                 \
+	printf("%s: " fmt, PROGRAM_NAME, ##__VA_ARGS__);           \
+} while(0)
+#define normsg(fmt, ...) do {                                      \
+	normsg_cont(fmt "\n", ##__VA_ARGS__);                      \
+} while(0)
+
+/* Error messages */
+#define errmsg(fmt, ...)  ({                                                \
+	fprintf(stderr, "%s: error!: " fmt "\n", PROGRAM_NAME, ##__VA_ARGS__); \
+	-1;                                                                 \
+})
+#define errmsg_die(fmt, ...) do {                                           \
+	exit(errmsg(fmt, ##__VA_ARGS__));                                   \
+} while(0)
+
+/* System error messages */
+#define sys_errmsg(fmt, ...)  ({                                            \
+	int _err = errno;                                                   \
+	errmsg(fmt, ##__VA_ARGS__);                                         \
+	fprintf(stderr, "%*serror %d (%s)\n", (int)sizeof(PROGRAM_NAME) + 1,\
+		"", _err, strerror(_err));                                  \
+	-1;                                                                 \
+})
+#define sys_errmsg_die(fmt, ...) do {                                       \
+	exit(sys_errmsg(fmt, ##__VA_ARGS__));                               \
+} while(0)
+
+/* Warnings */
+#define warnmsg(fmt, ...) do {                                                \
+	fprintf(stderr, "%s: warning!: " fmt "\n", PROGRAM_NAME, ##__VA_ARGS__); \
+} while(0)
+
+/* uClibc versions before 0.9.34 and musl don't have rpmatch() */
+#if defined(__UCLIBC__) && \
+		(__UCLIBC_MAJOR__ == 0 && \
+		(__UCLIBC_MINOR__ < 9 || \
+		(__UCLIBC_MINOR__ == 9 && __UCLIBC_SUBLEVEL__ < 34))) || \
+	!defined(__GLIBC__)
+#undef rpmatch
+#define rpmatch __rpmatch
+static inline int __rpmatch(const char *resp)
+{
+    return (resp[0] == 'y' || resp[0] == 'Y') ? 1 :
+	(resp[0] == 'n' || resp[0] == 'N') ? 0 : -1;
+}
+#endif
+
+/**
+ * prompt the user for confirmation
+ */
+static inline bool prompt(const char *msg, bool def)
+{
+	char *line = NULL;
+	size_t len;
+	bool ret = def;
+
+	do {
+		normsg_cont("%s (%c/%c) ", msg, def ? 'Y' : 'y', def ? 'n' : 'N');
+		fflush(stdout);
+
+		while (getline(&line, &len, stdin) == -1) {
+			printf("failed to read prompt; assuming '%s'\n",
+				def ? "yes" : "no");
+			break;
+		}
+
+		if (strcmp("\n", line) != 0) {
+			switch (rpmatch(line)) {
+			case 0: ret = false; break;
+			case 1: ret = true; break;
+			case -1:
+				puts("unknown response; please try again");
+				continue;
+			}
+		}
+		break;
+	} while (1);
+
+	free(line);
+
+	return ret;
+}
+
+static inline int is_power_of_2(unsigned long long n)
+{
+	return (n != 0 && ((n & (n - 1)) == 0));
+}
+
+/**
+ * simple_strtoX - convert a hex/dec/oct string into a number
+ * @snum: buffer to convert
+ * @error: set to 1 when buffer isn't fully consumed
+ *
+ * These functions are similar to the standard strtoX() functions, but they are
+ * a little bit easier to use if you want to convert full string of digits into
+ * the binary form. The typical usage:
+ *
+ * int error = 0;
+ * unsigned long num;
+ *
+ * num = simple_strtoul(str, &error);
+ * if (error || ... if needed, your check that num is not out of range ...)
+ * 	error_happened();
+ */
+#define simple_strtoX(func, type) \
+static inline type simple_##func(const char *snum, int *error) \
+{ \
+	char *endptr; \
+	type ret = func(snum, &endptr, 0); \
+ \
+	if (error && (!*snum || *endptr)) { \
+		errmsg("%s: unable to parse the number '%s'", #func, snum); \
+		*error = 1; \
+	} \
+ \
+	return ret; \
+}
+simple_strtoX(strtol, long int)
+simple_strtoX(strtoll, long long int)
+simple_strtoX(strtoul, unsigned long int)
+simple_strtoX(strtoull, unsigned long long int)
+
+/* Simple version-printing for utils */
+#define common_print_version() \
+do { \
+	printf("%s %s\n", PROGRAM_NAME, VERSION); \
+} while (0)
+
+#include "xalloc.h"
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !__MTD_UTILS_COMMON_H__ */
diff --git a/src/support/libnandapi/inc/libmtd.h b/src/support/libnandapi/inc/libmtd.h
new file mode 100644
index 0000000..a78c8cb
--- /dev/null
+++ b/src/support/libnandapi/inc/libmtd.h
@@ -0,0 +1,352 @@
+/*
+ * Copyright (C) 2008, 2009 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Artem Bityutskiy
+ *
+ * MTD library.
+ */
+
+#ifndef __LIBMTD_H__
+#define __LIBMTD_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Maximum MTD device name length */
+#define MTD_NAME_MAX 127
+/* Maximum MTD device type string length */
+#define MTD_TYPE_MAX 64
+
+/* MTD library descriptor */
+typedef void * libmtd_t;
+
+/* Forward decls */
+struct region_info_user;
+
+/**
+ * @mtd_dev_cnt: count of MTD devices in system
+ * @lowest_mtd_num: lowest MTD device number in system
+ * @highest_mtd_num: highest MTD device number in system
+ * @sysfs_supported: non-zero if sysfs is supported by MTD
+ */
+struct mtd_info
+{
+	int mtd_dev_cnt;
+	int lowest_mtd_num;
+	int highest_mtd_num;
+	unsigned int sysfs_supported:1;
+};
+
+/**
+ * struct mtd_dev_info - information about an MTD device.
+ * @mtd_num: MTD device number
+ * @major: major number of corresponding character device
+ * @minor: minor number of corresponding character device
+ * @type: flash type (constants like %MTD_NANDFLASH defined in mtd-abi.h)
+ * @type_str: static R/O flash type string
+ * @name: device name
+ * @size: device size in bytes
+ * @eb_cnt: count of eraseblocks
+ * @eb_size: eraseblock size
+ * @min_io_size: minimum input/output unit size
+ * @subpage_size: sub-page size
+ * @oob_size: OOB size (zero if the device does not have OOB area)
+ * @region_cnt: count of additional erase regions
+ * @writable: zero if the device is read-only
+ * @bb_allowed: non-zero if the MTD device may have bad eraseblocks
+ */
+struct mtd_dev_info
+{
+	int mtd_num;
+	int major;
+	int minor;
+	int type;
+	const char type_str[MTD_TYPE_MAX + 1];
+	const char name[MTD_NAME_MAX + 1];
+	long long size;
+	int eb_cnt;
+	int eb_size;
+	int min_io_size;
+	int subpage_size;
+	int oob_size;
+	int region_cnt;
+	unsigned int writable:1;
+	unsigned int bb_allowed:1;
+};
+
+/**
+ * libmtd_open - open MTD library.
+ *
+ * This function initializes and opens the MTD library and returns MTD library
+ * descriptor in case of success and %NULL in case of failure. In case of
+ * failure, errno contains zero if MTD is not present in the system, or
+ * contains the error code if a real error happened.
+ */
+libmtd_t libmtd_open(void);
+
+/**
+ * libmtd_close - close MTD library.
+ * @desc: MTD library descriptor
+ */
+void libmtd_close(libmtd_t desc);
+
+/**
+ * mtd_dev_present - check whether a MTD device is present.
+ * @desc: MTD library descriptor
+ * @mtd_num: MTD device number to check
+ *
+ * This function returns %1 if MTD device is present and %0 if not.
+ */
+int mtd_dev_present(libmtd_t desc, int mtd_num);
+
+/**
+ * mtd_get_info - get general MTD information.
+ * @desc: MTD library descriptor
+ * @info: the MTD device information is returned here
+ *
+ * This function fills the passed @info object with general MTD information and
+ * returns %0 in case of success and %-1 in case of failure. If MTD subsystem is
+ * not present in the system, errno is set to @ENODEV.
+ */
+int mtd_get_info(libmtd_t desc, struct mtd_info *info);
+
+/**
+ * mtd_get_dev_info - get information about an MTD device.
+ * @desc: MTD library descriptor
+ * @node: name of the MTD device node
+ * @mtd: the MTD device information is returned here
+ *
+ * This function gets information about MTD device defined by the @node device
+ * node file and saves this information in the @mtd object. Returns %0 in case
+ * of success and %-1 in case of failure. If MTD subsystem is not present in the
+ * system, or the MTD device does not exist, errno is set to @ENODEV.
+ */
+int mtd_get_dev_info(libmtd_t desc, const char *node, struct mtd_dev_info *mtd);
+
+/**
+ * mtd_get_dev_info1 - get information about an MTD device.
+ * @desc: MTD library descriptor
+ * @mtd_num: MTD device number to fetch information about
+ * @mtd: the MTD device information is returned here
+ *
+ * This function is identical to 'mtd_get_dev_info()' except that it accepts
+ * MTD device number, not MTD character device.
+ */
+int mtd_get_dev_info1(libmtd_t desc, int mtd_num, struct mtd_dev_info *mtd);
+
+/**
+ * mtd_lock - lock eraseblocks.
+ * @desc: MTD library descriptor
+ * @mtd: MTD device description object
+ * @fd: MTD device node file descriptor
+ * @eb: eraseblock to lock
+ *
+ * This function locks eraseblock @eb. Returns %0 in case of success and %-1
+ * in case of failure.
+ */
+int mtd_lock(const struct mtd_dev_info *mtd, int fd, int eb);
+
+/**
+ * mtd_unlock - unlock eraseblocks.
+ * @desc: MTD library descriptor
+ * @mtd: MTD device description object
+ * @fd: MTD device node file descriptor
+ * @eb: eraseblock to lock
+ *
+ * This function unlocks eraseblock @eb. Returns %0 in case of success and %-1
+ * in case of failure.
+ */
+int mtd_unlock(const struct mtd_dev_info *mtd, int fd, int eb);
+
+/**
+ * mtd_erase - erase an eraseblock.
+ * @desc: MTD library descriptor
+ * @mtd: MTD device description object
+ * @fd: MTD device node file descriptor
+ * @eb: eraseblock to erase
+ *
+ * This function erases eraseblock @eb of MTD device described by @fd. Returns
+ * %0 in case of success and %-1 in case of failure.
+ */
+int mtd_erase(libmtd_t desc, const struct mtd_dev_info *mtd, int fd, int eb);
+
+/**
+ * mtd_regioninfo - get information about an erase region.
+ * @fd: MTD device node file descriptor
+ * @regidx: index of region to look up
+ * @reginfo: the region information is returned here
+ *
+ * This function gets information about an erase region defined by the
+ * @regidx index and saves this information in the @reginfo object.
+ * Returns %0 in case of success and %-1 in case of failure. If the
+ * @regidx is not valid or unavailable, errno is set to @ENODEV.
+ */
+int mtd_regioninfo(int fd, int regidx, struct region_info_user *reginfo);
+
+/**
+ * mtd_is_locked - see if the specified eraseblock is locked.
+ * @mtd: MTD device description object
+ * @fd: MTD device node file descriptor
+ * @eb: eraseblock to check
+ *
+ * This function checks to see if eraseblock @eb of MTD device described
+ * by @fd is locked. Returns %0 if it is unlocked, %1 if it is locked, and
+ * %-1 in case of failure. If the ioctl is not supported (support was added in
+ * Linux kernel 2.6.36) or this particular device does not support it, errno is
+ * set to @ENOTSUPP.
+ */
+int mtd_is_locked(const struct mtd_dev_info *mtd, int fd, int eb);
+
+/**
+ * mtd_torture - torture an eraseblock.
+ * @desc: MTD library descriptor
+ * @mtd: MTD device description object
+ * @fd: MTD device node file descriptor
+ * @eb: eraseblock to torture
+ *
+ * This function tortures eraseblock @eb. Returns %0 in case of success and %-1
+ * in case of failure.
+ */
+int mtd_torture(libmtd_t desc, const struct mtd_dev_info *mtd, int fd, int eb);
+
+/**
+ * mtd_is_bad - check if eraseblock is bad.
+ * @mtd: MTD device description object
+ * @fd: MTD device node file descriptor
+ * @eb: eraseblock to check
+ *
+ * This function checks if eraseblock @eb is bad. Returns %0 if not, %1 if yes,
+ * and %-1 in case of failure.
+ */
+int mtd_is_bad(const struct mtd_dev_info *mtd, int fd, int eb);
+
+/**
+ * mtd_mark_bad - mark an eraseblock as bad.
+ * @mtd: MTD device description object
+ * @fd: MTD device node file descriptor
+ * @eb: eraseblock to mark as bad
+ *
+ * This function marks eraseblock @eb as bad. Returns %0 in case of success and
+ * %-1 in case of failure.
+ */
+int mtd_mark_bad(const struct mtd_dev_info *mtd, int fd, int eb);
+
+/**
+ * mtd_read - read data from an MTD device.
+ * @mtd: MTD device description object
+ * @fd: MTD device node file descriptor
+ * @eb: eraseblock to read from
+ * @offs: offset withing the eraseblock to read from
+ * @buf: buffer to read data to
+ * @len: how many bytes to read
+ *
+ * This function reads @len bytes of data from eraseblock @eb and offset @offs
+ * of the MTD device defined by @mtd and stores the read data at buffer @buf.
+ * Returns %0 in case of success and %-1 in case of failure.
+ */
+int mtd_read(const struct mtd_dev_info *mtd, int fd, int eb, int offs,
+	     void *buf, int len);
+
+/**
+ * mtd_write - write data to an MTD device.
+ * @desc: MTD library descriptor
+ * @mtd: MTD device description object
+ * @fd: MTD device node file descriptor
+ * @eb: eraseblock to write to
+ * @offs: offset withing the eraseblock to write to
+ * @data: data buffer to write
+ * @len: how many data bytes to write
+ * @oob: OOB buffer to write
+ * @ooblen: how many OOB bytes to write
+ * @mode: write mode (e.g., %MTD_OOB_PLACE, %MTD_OOB_RAW)
+ *
+ * This function writes @len bytes of data to eraseblock @eb and offset @offs
+ * of the MTD device defined by @mtd. Returns %0 in case of success and %-1 in
+ * case of failure.
+ *
+ * Can only write to a single page at a time if writing to OOB.
+ */
+int mtd_write(libmtd_t desc, const struct mtd_dev_info *mtd, int fd, int eb,
+	      int offs, void *data, int len, void *oob, int ooblen,
+	      uint8_t mode);
+
+/**
+ * mtd_read_oob - read out-of-band area.
+ * @desc: MTD library descriptor
+ * @mtd: MTD device description object
+ * @fd: MTD device node file descriptor
+ * @start: page-aligned start address
+ * @length: number of OOB bytes to read
+ * @data: read buffer
+ *
+ * This function reads @length OOB bytes starting from address @start on
+ * MTD device described by @fd. The address is specified as page byte offset
+ * from the beginning of the MTD device. This function returns %0 in case of
+ * success and %-1 in case of failure.
+ */
+int mtd_read_oob(libmtd_t desc, const struct mtd_dev_info *mtd, int fd,
+		 uint64_t start, uint64_t length, void *data);
+
+/**
+ * mtd_write_oob - write out-of-band area.
+ * @desc: MTD library descriptor
+ * @mtd: MTD device description object
+ * @fd: MTD device node file descriptor
+ * @start: page-aligned start address
+ * @length: number of OOB bytes to write
+ * @data: write buffer
+ *
+ * This function writes @length OOB bytes starting from address @start on
+ * MTD device described by @fd. The address is specified as page byte offset
+ * from the beginning of the MTD device. Returns %0 in case of success and %-1
+ * in case of failure.
+ */
+int mtd_write_oob(libmtd_t desc, const struct mtd_dev_info *mtd, int fd,
+		  uint64_t start, uint64_t length, void *data);
+
+/**
+ * mtd_write_img - write a file to MTD device.
+ * @mtd: MTD device description object
+ * @fd: MTD device node file descriptor
+ * @eb: eraseblock to write to
+ * @offs: offset withing the eraseblock to write to
+ * @img_name: the file to write
+ *
+ * This function writes an image @img_name the MTD device defined by @mtd. @eb
+ * and @offs are the starting eraseblock and offset on the MTD device. Returns
+ * %0 in case of success and %-1 in case of failure.
+ */
+int mtd_write_img(const struct mtd_dev_info *mtd, int fd, int eb, int offs,
+		  const char *img_name);
+
+/**
+ * mtd_probe_node - test MTD node.
+ * @desc: MTD library descriptor
+ * @node: the node to test
+ *
+ * This function tests whether @node is an MTD device node and returns %1 if it
+ * is, and %-1 if it is not (errno is %ENODEV in this case) or if an error
+ * occurred.
+ */
+int mtd_probe_node(libmtd_t desc, const char *node);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __LIBMTD_H__ */
diff --git a/src/support/libnandapi/inc/libmtd_int.h b/src/support/libnandapi/inc/libmtd_int.h
new file mode 100644
index 0000000..7913e67
--- /dev/null
+++ b/src/support/libnandapi/inc/libmtd_int.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ * Copyright (C) 2009 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Artem Bityutskiy
+ *
+ * MTD library.
+ */
+
+#ifndef __LIBMTD_INT_H__
+#define __LIBMTD_INT_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define PROGRAM_NAME "libmtd"
+
+#define SYSFS_MTD        "class/mtd"
+#define MTD_NAME_PATT    "mtd%d"
+#define MTD_DEV          "dev"
+#define MTD_NAME         "name"
+#define MTD_TYPE         "type"
+#define MTD_EB_SIZE      "erasesize"
+#define MTD_SIZE         "size"
+#define MTD_MIN_IO_SIZE  "writesize"
+#define MTD_SUBPAGE_SIZE "subpagesize"
+#define MTD_OOB_SIZE     "oobsize"
+#define MTD_REGION_CNT   "numeraseregions"
+#define MTD_FLAGS        "flags"
+
+#define OFFS64_IOCTLS_UNKNOWN       0
+#define OFFS64_IOCTLS_NOT_SUPPORTED 1
+#define OFFS64_IOCTLS_SUPPORTED     2
+
+/**
+ * libmtd - MTD library description data structure.
+ * @sysfs_mtd: MTD directory in sysfs
+ * @mtd: MTD device sysfs directory pattern
+ * @mtd_dev: MTD device major/minor numbers file pattern
+ * @mtd_name: MTD device name file pattern
+ * @mtd_type: MTD device type file pattern
+ * @mtd_eb_size: MTD device eraseblock size file pattern
+ * @mtd_size: MTD device size file pattern
+ * @mtd_min_io_size: minimum I/O unit size file pattern
+ * @mtd_subpage_size: sub-page size file pattern
+ * @mtd_oob_size: MTD device OOB size file pattern
+ * @mtd_region_cnt: count of additional erase regions file pattern
+ * @mtd_flags: MTD device flags file pattern
+ * @sysfs_supported: non-zero if sysfs is supported by MTD
+ * @offs64_ioctls: %OFFS64_IOCTLS_SUPPORTED if 64-bit %MEMERASE64,
+ *                 %MEMREADOOB64, %MEMWRITEOOB64 MTD device ioctls are
+ *                 supported, %OFFS64_IOCTLS_NOT_SUPPORTED if not, and
+ *                 %OFFS64_IOCTLS_UNKNOWN if it is not known yet;
+ *
+ *  Note, we cannot find out whether 64-bit ioctls are supported by MTD when we
+ *  are initializing the library, because this requires an MTD device node.
+ *  Indeed, we have to actually call the ioctl and check for %ENOTTY to find
+ *  out whether it is supported or not.
+ *
+ *  Thus, we leave %offs64_ioctls uninitialized in 'libmtd_open()', and
+ *  initialize it later, when corresponding libmtd function is used, and when
+ *  we actually have a device node and can invoke an ioctl command on it.
+ */
+struct libmtd
+{
+	char *sysfs_mtd;
+	char *mtd;
+	char *mtd_dev;
+	char *mtd_name;
+	char *mtd_type;
+	char *mtd_eb_size;
+	char *mtd_size;
+	char *mtd_min_io_size;
+	char *mtd_subpage_size;
+	char *mtd_oob_size;
+	char *mtd_region_cnt;
+	char *mtd_flags;
+	unsigned int sysfs_supported:1;
+	unsigned int offs64_ioctls:2;
+};
+
+int legacy_libmtd_open(void);
+int legacy_dev_present(int mtd_num);
+int legacy_mtd_get_info(struct mtd_info *info);
+int legacy_get_dev_info(const char *node, struct mtd_dev_info *mtd);
+int legacy_get_dev_info1(int dev_num, struct mtd_dev_info *mtd);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !__LIBMTD_INT_H__ */
diff --git a/src/support/libnandapi/inc/mtk_device_wrap.h b/src/support/libnandapi/inc/mtk_device_wrap.h
new file mode 100644
index 0000000..cd4da87
--- /dev/null
+++ b/src/support/libnandapi/inc/mtk_device_wrap.h
@@ -0,0 +1,30 @@
+

+#ifndef __MTK_DEVICE_WRAP_H__

+#define __MTK_DEVICE_WRAP_H__

+

+#include <sys/types.h>

+#include <sys/stat.h>

+#include <fcntl.h>

+#include <unistd.h>

+

+#define BOOTDEV_TYPE_NAND 1

+#define BOOTDEV_TYPE_EMMC 2

+

+#ifdef __cplusplus

+extern "C" {

+#endif

+

+int mtk_device_wrap_m_open(const char *pathname, int flags, mode_t mode);

+int mtk_device_wrap_open(const char *pathname, int flags);

+off_t mtk_device_wrap_seek(int fd, off_t offset, int whence);

+off64_t mtk_device_wrap_seek64(int fd, off64_t offset, int whence);

+ssize_t mtk_device_wrap_read(int fd, void *buf, size_t count);

+ssize_t mtk_device_wrap_write(int fd, void *buf, size_t count);

+ssize_t mtk_device_wrap_write_force(int fd, void *buf, size_t count);

+int mtk_device_wrap_close(int fd);

+

+#ifdef __cplusplus

+}

+#endif

+

+#endif /* __MTK_DEVICE_WRAP_H__ */

diff --git a/src/support/libnandapi/inc/nand_api.h b/src/support/libnandapi/inc/nand_api.h
new file mode 100644
index 0000000..1d365ab
--- /dev/null
+++ b/src/support/libnandapi/inc/nand_api.h
@@ -0,0 +1,25 @@
+#ifndef __NAND_API_H__
+#define __NAND_API_H__
+
+#define PROGRAM_NAME "libnandapi"
+
+int nand_open(const char *pathname, int flags);
+
+off_t nand_seek(int fd, off_t offset, int whence);
+
+off64_t nand_seek64(int fd, off64_t offset, int whence);
+
+ssize_t nand_read(int fd, void *buf, size_t count);
+
+ssize_t nand_write(int fd, void *buf, size_t count);
+
+ssize_t nand_write_force(int fd, void *buf, size_t count);
+
+int nand_close(int fd);
+
+off_t nand_query_offset(int fd);
+
+size_t nand_query_blk_size(int fd);
+
+#endif
+
diff --git a/src/support/libnandapi/inc/xalloc.h b/src/support/libnandapi/inc/xalloc.h
new file mode 100644
index 0000000..532b80f
--- /dev/null
+++ b/src/support/libnandapi/inc/xalloc.h
@@ -0,0 +1,106 @@
+/*
+ * memory wrappers
+ *
+ * Copyright (c) Artem Bityutskiy, 2007, 2008
+ * Copyright 2001, 2002 Red Hat, Inc.
+ *           2001 David A. Schleef <ds@lineo.com>
+ *           2002 Axis Communications AB
+ *           2001, 2002 Erik Andersen <andersen@codepoet.org>
+ *           2004 University of Szeged, Hungary
+ *           2006 KaiGai Kohei <kaigai@ak.jp.nec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __MTD_UTILS_XALLOC_H__
+#define __MTD_UTILS_XALLOC_H__
+
+#include <stdarg.h>
+#include <stdlib.h>
+#include <string.h>
+
+/*
+ * Mark these functions as unused so that gcc does not emit warnings
+ * when people include this header but don't use every function.
+ */
+
+__attribute__((unused))
+static void *xmalloc(size_t size)
+{
+	void *ptr = malloc(size);
+
+	if (ptr == NULL && size != 0)
+		sys_errmsg_die("out of memory");
+	return ptr;
+}
+
+__attribute__((unused))
+static void *xcalloc(size_t nmemb, size_t size)
+{
+	void *ptr = calloc(nmemb, size);
+
+	if (ptr == NULL && nmemb != 0 && size != 0)
+		sys_errmsg_die("out of memory");
+	return ptr;
+}
+
+__attribute__((unused))
+static void *xzalloc(size_t size)
+{
+	return xcalloc(1, size);
+}
+
+__attribute__((unused))
+static void *xrealloc(void *ptr, size_t size)
+{
+	ptr = realloc(ptr, size);
+	if (ptr == NULL && size != 0)
+		sys_errmsg_die("out of memory");
+	return ptr;
+}
+
+__attribute__((unused))
+static char *xstrdup(const char *s)
+{
+	char *t;
+
+	if (s == NULL)
+		return NULL;
+	t = strdup(s);
+	if (t == NULL)
+		sys_errmsg_die("out of memory");
+	return t;
+}
+
+#ifdef _GNU_SOURCE
+
+__attribute__((unused))
+static int xasprintf(char **strp, const char *fmt, ...)
+{
+	int cnt;
+	va_list ap;
+
+	va_start(ap, fmt);
+	cnt = vasprintf(strp, fmt, ap);
+	va_end(ap);
+
+	if (cnt == -1)
+		sys_errmsg_die("out of memory");
+
+	return cnt;
+}
+#endif
+
+#endif /* !__MTD_UTILS_XALLOC_H__ */
diff --git a/src/support/libnandapi/src/libmtd.c b/src/support/libnandapi/src/libmtd.c
new file mode 100644
index 0000000..516e4e7
--- /dev/null
+++ b/src/support/libnandapi/src/libmtd.c
@@ -0,0 +1,1423 @@
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ * Copyright (C) 2009 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Artem Bityutskiy
+ *
+ * MTD library.
+ */
+
+#include <limits.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <errno.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <dirent.h>
+#include <sys/types.h>
+#include <sys/sysmacros.h>
+#include <sys/stat.h>
+#include <sys/ioctl.h>
+#include <inttypes.h>
+
+#include <mtd/mtd-user.h>
+#include "../inc/libmtd.h"
+
+#include "../inc/libmtd_int.h"
+#include "../inc/common.h"
+
+/**
+ * mkpath - compose full path from 2 given components.
+ * @path: the first component
+ * @name: the second component
+ *
+ * This function returns the resulting path in case of success and %NULL in
+ * case of failure.
+ */
+static char *mkpath(const char *path, const char *name)
+{
+	char *n;
+	size_t len1 = strlen(path);
+	size_t len2 = strlen(name);
+
+	n = xmalloc(len1 + len2 + 2);
+
+	memcpy(n, path, len1);
+	if (n[len1 - 1] != '/')
+		n[len1++] = '/';
+
+	memcpy(n + len1, name, len2 + 1);
+	return n;
+}
+
+/**
+ * read_data - read data from a file.
+ * @file: the file to read from
+ * @buf: the buffer to read to
+ * @buf_len: buffer length
+ *
+ * This function returns number of read bytes in case of success and %-1 in
+ * case of failure. Note, if the file contains more then @buf_len bytes of
+ * date, this function fails with %EINVAL error code.
+ */
+static int read_data(const char *file, void *buf, int buf_len)
+{
+	int fd, rd, tmp, tmp1;
+
+	fd = open(file, O_RDONLY | O_CLOEXEC);
+	if (fd == -1)
+		return -1;
+
+	rd = read(fd, buf, buf_len);
+	if (rd == -1) {
+		sys_errmsg("cannot read \"%s\"", file);
+		goto out_error;
+	}
+
+	if (rd == buf_len) {
+		errmsg("contents of \"%s\" is too long", file);
+		errno = EINVAL;
+		goto out_error;
+	}
+
+	((char *)buf)[rd] = '\0';
+
+	/* Make sure all data is read */
+	tmp1 = read(fd, &tmp, 1);
+	if (tmp1 == 1) {
+		sys_errmsg("cannot read \"%s\"", file);
+		goto out_error;
+	}
+	if (tmp1) {
+		errmsg("file \"%s\" contains too much data (> %d bytes)",
+		       file, buf_len);
+		errno = EINVAL;
+		goto out_error;
+	}
+
+	if (close(fd)) {
+		sys_errmsg("close failed on \"%s\"", file);
+		return -1;
+	}
+
+	return rd;
+
+out_error:
+	close(fd);
+	return -1;
+}
+
+/**
+ * read_major - read major and minor numbers from a file.
+ * @file: name of the file to read from
+ * @major: major number is returned here
+ * @minor: minor number is returned here
+ *
+ * This function returns % in case of success, and %-1 in case of failure.
+ */
+static int read_major(const char *file, int *major, int *minor)
+{
+	int ret;
+	char buf[50];
+
+	ret = read_data(file, buf, 50);
+	if (ret < 0)
+		return ret;
+
+	ret = sscanf(buf, "%d:%d\n", major, minor);
+	if (ret != 2) {
+		errno = EINVAL;
+		return errmsg("\"%s\" does not have major:minor format", file);
+	}
+
+	if (*major < 0 || *minor < 0) {
+		errno = EINVAL;
+		return errmsg("bad major:minor %d:%d in \"%s\"",
+			      *major, *minor, file);
+	}
+
+	return 0;
+}
+
+/**
+ * dev_get_major - get major and minor numbers of an MTD device.
+ * @lib: libmtd descriptor
+ * @mtd_num: MTD device number
+ * @major: major number is returned here
+ * @minor: minor number is returned here
+ *
+ * This function returns zero in case of success and %-1 in case of failure.
+ */
+static int dev_get_major(struct libmtd *lib, int mtd_num, int *major, int *minor)
+{
+	char file[strlen(lib->mtd_dev) + 50];
+
+	sprintf(file, lib->mtd_dev, mtd_num);
+	return read_major(file, major, minor);
+}
+
+/**
+ * dev_read_data - read data from an MTD device's sysfs file.
+ * @patt: file pattern to read from
+ * @mtd_num: MTD device number
+ * @buf: buffer to read to
+ * @buf_len: buffer length
+ *
+ * This function returns number of read bytes in case of success and %-1 in
+ * case of failure.
+ */
+static int dev_read_data(const char *patt, int mtd_num, void *buf, int buf_len)
+{
+	char file[strlen(patt) + 100];
+
+	sprintf(file, patt, mtd_num);
+	return read_data(file, buf, buf_len);
+}
+
+/**
+ * read_hex_ll - read a hex 'long long' value from a file.
+ * @file: the file to read from
+ * @value: the result is stored here
+ *
+ * This function reads file @file and interprets its contents as hexadecimal
+ * 'long long' integer. If this is not true, it fails with %EINVAL error code.
+ * Returns %0 in case of success and %-1 in case of failure.
+ */
+static int read_hex_ll(const char *file, long long *value)
+{
+	int fd, rd;
+	char buf[50];
+
+	fd = open(file, O_RDONLY | O_CLOEXEC);
+	if (fd == -1)
+		return -1;
+
+	rd = read(fd, buf, sizeof(buf));
+	if (rd == -1) {
+		sys_errmsg("cannot read \"%s\"", file);
+		goto out_error;
+	}
+	if (rd == sizeof(buf)) {
+		errmsg("contents of \"%s\" is too long", file);
+		errno = EINVAL;
+		goto out_error;
+	}
+	buf[rd] = '\0';
+
+	if (sscanf(buf, "%llx\n", value) != 1) {
+		errmsg("cannot read integer from \"%s\"\n", file);
+		errno = EINVAL;
+		goto out_error;
+	}
+
+	if (*value < 0) {
+		errmsg("negative value %lld in \"%s\"", *value, file);
+		errno = EINVAL;
+		goto out_error;
+	}
+
+	if (close(fd))
+		return sys_errmsg("close failed on \"%s\"", file);
+
+	return 0;
+
+out_error:
+	close(fd);
+	return -1;
+}
+
+/**
+ * read_pos_ll - read a positive 'long long' value from a file.
+ * @file: the file to read from
+ * @value: the result is stored here
+ *
+ * This function reads file @file and interprets its contents as a positive
+ * 'long long' integer. If this is not true, it fails with %EINVAL error code.
+ * Returns %0 in case of success and %-1 in case of failure.
+ */
+static int read_pos_ll(const char *file, long long *value)
+{
+	int fd, rd;
+	char buf[50];
+
+	fd = open(file, O_RDONLY | O_CLOEXEC);
+	if (fd == -1)
+		return -1;
+
+	rd = read(fd, buf, 50);
+	if (rd == -1) {
+		sys_errmsg("cannot read \"%s\"", file);
+		goto out_error;
+	}
+	if (rd == 50) {
+		errmsg("contents of \"%s\" is too long", file);
+		errno = EINVAL;
+		goto out_error;
+	}
+
+	if (sscanf(buf, "%lld\n", value) != 1) {
+		errmsg("cannot read integer from \"%s\"\n", file);
+		errno = EINVAL;
+		goto out_error;
+	}
+
+	if (*value < 0) {
+		errmsg("negative value %lld in \"%s\"", *value, file);
+		errno = EINVAL;
+		goto out_error;
+	}
+
+	if (close(fd))
+		return sys_errmsg("close failed on \"%s\"", file);
+
+	return 0;
+
+out_error:
+	close(fd);
+	return -1;
+}
+
+/**
+ * read_hex_int - read an 'int' value from a file.
+ * @file: the file to read from
+ * @value: the result is stored here
+ *
+ * This function is the same as 'read_pos_ll()', but it reads an 'int'
+ * value, not 'long long'.
+ */
+static int read_hex_int(const char *file, int *value)
+{
+	long long res;
+
+	if (read_hex_ll(file, &res))
+		return -1;
+
+	/* Make sure the value has correct range */
+	if (res > INT_MAX || res < INT_MIN) {
+		errmsg("value %lld read from file \"%s\" is out of range",
+		       res, file);
+		errno = EINVAL;
+		return -1;
+	}
+
+	*value = res;
+	return 0;
+}
+
+/**
+ * read_pos_int - read a positive 'int' value from a file.
+ * @file: the file to read from
+ * @value: the result is stored here
+ *
+ * This function is the same as 'read_pos_ll()', but it reads an 'int'
+ * value, not 'long long'.
+ */
+static int read_pos_int(const char *file, int *value)
+{
+	long long res;
+
+	if (read_pos_ll(file, &res))
+		return -1;
+
+	/* Make sure the value is not too big */
+	if (res > INT_MAX) {
+		errmsg("value %lld read from file \"%s\" is out of range",
+		       res, file);
+		errno = EINVAL;
+		return -1;
+	}
+
+	*value = res;
+	return 0;
+}
+
+/**
+ * dev_read_hex_int - read an hex 'int' value from an MTD device sysfs file.
+ * @patt: file pattern to read from
+ * @mtd_num: MTD device number
+ * @value: the result is stored here
+ *
+ * This function returns %0 in case of success and %-1 in case of failure.
+ */
+static int dev_read_hex_int(const char *patt, int mtd_num, int *value)
+{
+	char file[strlen(patt) + 50];
+
+	sprintf(file, patt, mtd_num);
+	return read_hex_int(file, value);
+}
+
+/**
+ * dev_read_pos_int - read a positive 'int' value from an MTD device sysfs file.
+ * @patt: file pattern to read from
+ * @mtd_num: MTD device number
+ * @value: the result is stored here
+ *
+ * This function returns %0 in case of success and %-1 in case of failure.
+ */
+static int dev_read_pos_int(const char *patt, int mtd_num, int *value)
+{
+	char file[strlen(patt) + 50];
+
+	sprintf(file, patt, mtd_num);
+	return read_pos_int(file, value);
+}
+
+/**
+ * dev_read_pos_ll - read a positive 'long long' value from an MTD device sysfs file.
+ * @patt: file pattern to read from
+ * @mtd_num: MTD device number
+ * @value: the result is stored here
+ *
+ * This function returns %0 in case of success and %-1 in case of failure.
+ */
+static int dev_read_pos_ll(const char *patt, int mtd_num, long long *value)
+{
+	char file[strlen(patt) + 50];
+
+	sprintf(file, patt, mtd_num);
+	return read_pos_ll(file, value);
+}
+
+/**
+ * type_str2int - convert MTD device type to integer.
+ * @str: MTD device type string to convert
+ *
+ * This function converts MTD device type string @str, read from sysfs, into an
+ * integer.
+ */
+static int type_str2int(const char *str)
+{
+	if (!strcmp(str, "nand"))
+		return MTD_NANDFLASH;
+	if (!strcmp(str, "mlc-nand"))
+		return MTD_MLCNANDFLASH;
+	if (!strcmp(str, "nor"))
+		return MTD_NORFLASH;
+	if (!strcmp(str, "rom"))
+		return MTD_ROM;
+	if (!strcmp(str, "absent"))
+		return MTD_ABSENT;
+	if (!strcmp(str, "dataflash"))
+		return MTD_DATAFLASH;
+	if (!strcmp(str, "ram"))
+		return MTD_RAM;
+	if (!strcmp(str, "ubi"))
+		return MTD_UBIVOLUME;
+	return -1;
+}
+
+/**
+ * dev_node2num - find UBI device number by its character device node.
+ * @lib: MTD library descriptor
+ * @node: name of the MTD device node
+ * @mtd_num: MTD device number is returned here
+ *
+ * This function returns %0 in case of success and %-1 in case of failure.
+ */
+static int dev_node2num(struct libmtd *lib, const char *node, int *mtd_num)
+{
+	struct stat st;
+	int i, mjr, mnr;
+	struct mtd_info info;
+
+	if (stat(node, &st))
+		return sys_errmsg("cannot get information about \"%s\"", node);
+
+	if (!S_ISCHR(st.st_mode)) {
+		errmsg("\"%s\" is not a character device", node);
+		errno = EINVAL;
+		return -1;
+	}
+
+	mjr = major(st.st_rdev);
+	mnr = minor(st.st_rdev);
+
+	if (mtd_get_info((libmtd_t *)lib, &info))
+		return -1;
+
+	for (i = info.lowest_mtd_num; i <= info.highest_mtd_num; i++) {
+		int mjr1, mnr1, ret;
+
+		ret = dev_get_major(lib, i, &mjr1, &mnr1);
+		if (ret) {
+			if (errno == ENOENT)
+				continue;
+			if (!errno)
+				break;
+			return -1;
+		}
+
+		if (mjr1 == mjr && mnr1 == mnr) {
+			errno = 0;
+			*mtd_num = i;
+			return 0;
+		}
+	}
+
+	errno = ENODEV;
+	return -1;
+}
+
+/**
+ * sysfs_is_supported - check whether the MTD sub-system supports MTD.
+ * @lib: MTD library descriptor
+ *
+ * The Linux kernel MTD subsystem gained sysfs support starting from kernel
+ * 2.6.30 and libmtd tries to use sysfs interface if possible, because the NAND
+ * sub-page size is available there (and not available at all in pre-sysfs
+ * kernels).
+ *
+ * Very old kernels did not have "/sys/class/mtd" directory. Not very old
+ * kernels (e.g., 2.6.29) did have "/sys/class/mtd/mtdX" directories, by there
+ * were no files there, e.g., the "name" file was not present. So all we can do
+ * is to check for a "/sys/class/mtd/mtdX/name" file. But this is not a
+ * reliable check, because if this is a new system with no MTD devices - we'll
+ * treat it as a pre-sysfs system.
+ */
+static int sysfs_is_supported(struct libmtd *lib)
+{
+	int fd, num = -1;
+	DIR *sysfs_mtd;
+	char file[strlen(lib->mtd_name) + 10];
+
+	sysfs_mtd = opendir(lib->sysfs_mtd);
+	if (!sysfs_mtd) {
+		if (errno == ENOENT) {
+			errno = 0;
+			return 0;
+		}
+		return sys_errmsg("cannot open \"%s\"", lib->sysfs_mtd);
+	}
+
+	/*
+	 * First of all find an "mtdX" directory. This is needed because there
+	 * may be, for example, mtd1 but no mtd0.
+	 */
+	while (1) {
+		int ret, mtd_num;
+		char tmp_buf[256];
+		struct dirent *dirent;
+
+		dirent = readdir(sysfs_mtd);
+		if (!dirent)
+			break;
+
+		if (strlen(dirent->d_name) >= 255) {
+			errmsg("invalid entry in %s: \"%s\"",
+			       lib->sysfs_mtd, dirent->d_name);
+			errno = EINVAL;
+			closedir(sysfs_mtd);
+			return -1;
+		}
+
+		ret = sscanf(dirent->d_name, MTD_NAME_PATT"%s",
+			     &mtd_num, tmp_buf);
+		if (ret == 1) {
+			num = mtd_num;
+			break;
+		}
+	}
+
+	if (closedir(sysfs_mtd))
+		return sys_errmsg("closedir failed on \"%s\"", lib->sysfs_mtd);
+
+	if (num == -1)
+		/* No mtd device, treat this as pre-sysfs system */
+		return 0;
+
+	sprintf(file, lib->mtd_name, num);
+	fd = open(file, O_RDONLY | O_CLOEXEC);
+	if (fd == -1)
+		return 0;
+
+	if (close(fd)) {
+		sys_errmsg("close failed on \"%s\"", file);
+		return -1;
+	}
+
+	return 1;
+}
+
+libmtd_t libmtd_open(void)
+{
+	struct libmtd *lib;
+
+	lib = xzalloc(sizeof(*lib));
+
+	lib->offs64_ioctls = OFFS64_IOCTLS_UNKNOWN;
+
+	lib->sysfs_mtd = mkpath("/sys", SYSFS_MTD);
+	if (!lib->sysfs_mtd)
+		goto out_error;
+
+	lib->mtd = mkpath(lib->sysfs_mtd, MTD_NAME_PATT);
+	if (!lib->mtd)
+		goto out_error;
+
+	lib->mtd_name = mkpath(lib->mtd, MTD_NAME);
+	if (!lib->mtd_name)
+		goto out_error;
+
+	if (!sysfs_is_supported(lib)) {
+		free(lib->mtd);
+		free(lib->sysfs_mtd);
+		free(lib->mtd_name);
+		lib->mtd_name = lib->mtd = lib->sysfs_mtd = NULL;
+		return lib;
+	}
+
+	lib->mtd_dev = mkpath(lib->mtd, MTD_DEV);
+	if (!lib->mtd_dev)
+		goto out_error;
+
+	lib->mtd_type = mkpath(lib->mtd, MTD_TYPE);
+	if (!lib->mtd_type)
+		goto out_error;
+
+	lib->mtd_eb_size = mkpath(lib->mtd, MTD_EB_SIZE);
+	if (!lib->mtd_eb_size)
+		goto out_error;
+
+	lib->mtd_size = mkpath(lib->mtd, MTD_SIZE);
+	if (!lib->mtd_size)
+		goto out_error;
+
+	lib->mtd_min_io_size = mkpath(lib->mtd, MTD_MIN_IO_SIZE);
+	if (!lib->mtd_min_io_size)
+		goto out_error;
+
+	lib->mtd_subpage_size = mkpath(lib->mtd, MTD_SUBPAGE_SIZE);
+	if (!lib->mtd_subpage_size)
+		goto out_error;
+
+	lib->mtd_oob_size = mkpath(lib->mtd, MTD_OOB_SIZE);
+	if (!lib->mtd_oob_size)
+		goto out_error;
+
+	lib->mtd_region_cnt = mkpath(lib->mtd, MTD_REGION_CNT);
+	if (!lib->mtd_region_cnt)
+		goto out_error;
+
+	lib->mtd_flags = mkpath(lib->mtd, MTD_FLAGS);
+	if (!lib->mtd_flags)
+		goto out_error;
+
+	lib->sysfs_supported = 1;
+	return lib;
+
+out_error:
+	libmtd_close((libmtd_t)lib);
+	return NULL;
+}
+
+void libmtd_close(libmtd_t desc)
+{
+	struct libmtd *lib = (struct libmtd *)desc;
+
+	free(lib->mtd_flags);
+	free(lib->mtd_region_cnt);
+	free(lib->mtd_oob_size);
+	free(lib->mtd_subpage_size);
+	free(lib->mtd_min_io_size);
+	free(lib->mtd_size);
+	free(lib->mtd_eb_size);
+	free(lib->mtd_type);
+	free(lib->mtd_dev);
+	free(lib->mtd_name);
+	free(lib->mtd);
+	free(lib->sysfs_mtd);
+	free(lib);
+}
+
+int mtd_dev_present(libmtd_t desc, int mtd_num) {
+	struct stat st;
+	struct libmtd *lib = (struct libmtd *)desc;
+
+	if (!lib->sysfs_supported) {
+		return legacy_dev_present(mtd_num) == 1;
+	} else {
+		char file[strlen(lib->mtd) + 10];
+
+		sprintf(file, lib->mtd, mtd_num);
+		return !stat(file, &st);
+	}
+}
+
+int mtd_get_info(libmtd_t desc, struct mtd_info *info)
+{
+	DIR *sysfs_mtd;
+	struct dirent *dirent;
+	struct libmtd *lib = (struct libmtd *)desc;
+
+	memset(info, 0, sizeof(struct mtd_info));
+
+	if (!lib->sysfs_supported)
+		return legacy_mtd_get_info(info);
+
+	info->sysfs_supported = 1;
+
+	/*
+	 * We have to scan the MTD sysfs directory to identify how many MTD
+	 * devices are present.
+	 */
+	sysfs_mtd = opendir(lib->sysfs_mtd);
+	if (!sysfs_mtd) {
+		if (errno == ENOENT) {
+			errno = ENODEV;
+			return -1;
+		}
+		return sys_errmsg("cannot open \"%s\"", lib->sysfs_mtd);
+	}
+
+	info->lowest_mtd_num = INT_MAX;
+	while (1) {
+		int mtd_num, ret;
+		char tmp_buf[256];
+
+		errno = 0;
+		dirent = readdir(sysfs_mtd);
+		if (!dirent)
+			break;
+
+		if (strlen(dirent->d_name) >= 255) {
+			errmsg("invalid entry in %s: \"%s\"",
+			       lib->sysfs_mtd, dirent->d_name);
+			errno = EINVAL;
+			goto out_close;
+		}
+
+		ret = sscanf(dirent->d_name, MTD_NAME_PATT"%s",
+			     &mtd_num, tmp_buf);
+		if (ret == 1) {
+			info->mtd_dev_cnt += 1;
+			if (mtd_num > info->highest_mtd_num)
+				info->highest_mtd_num = mtd_num;
+			if (mtd_num < info->lowest_mtd_num)
+				info->lowest_mtd_num = mtd_num;
+		}
+	}
+
+	if (!dirent && errno) {
+		sys_errmsg("readdir failed on \"%s\"", lib->sysfs_mtd);
+		goto out_close;
+	}
+
+	if (closedir(sysfs_mtd))
+		return sys_errmsg("closedir failed on \"%s\"", lib->sysfs_mtd);
+
+	if (info->lowest_mtd_num == INT_MAX)
+		info->lowest_mtd_num = 0;
+
+	return 0;
+
+out_close:
+	closedir(sysfs_mtd);
+	return -1;
+}
+
+int mtd_get_dev_info1(libmtd_t desc, int mtd_num, struct mtd_dev_info *mtd)
+{
+	int ret;
+	struct libmtd *lib = (struct libmtd *)desc;
+
+	memset(mtd, 0, sizeof(struct mtd_dev_info));
+	mtd->mtd_num = mtd_num;
+
+	if (!mtd_dev_present(desc, mtd_num)) {
+		errno = ENODEV;
+		return -1;
+	} else if (!lib->sysfs_supported)
+		return legacy_get_dev_info1(mtd_num, mtd);
+
+	if (dev_get_major(lib, mtd_num, &mtd->major, &mtd->minor))
+		return -1;
+
+	ret = dev_read_data(lib->mtd_name, mtd_num, &mtd->name,
+			    MTD_NAME_MAX + 1);
+	if (ret < 0)
+		return -1;
+	((char *)mtd->name)[ret - 1] = '\0';
+
+	ret = dev_read_data(lib->mtd_type, mtd_num, &mtd->type_str,
+			    MTD_TYPE_MAX + 1);
+	if (ret < 0)
+		return -1;
+	((char *)mtd->type_str)[ret - 1] = '\0';
+
+	if (dev_read_pos_int(lib->mtd_eb_size, mtd_num, &mtd->eb_size))
+		return -1;
+	if (dev_read_pos_ll(lib->mtd_size, mtd_num, &mtd->size))
+		return -1;
+	if (dev_read_pos_int(lib->mtd_min_io_size, mtd_num, &mtd->min_io_size))
+		return -1;
+	if (dev_read_pos_int(lib->mtd_subpage_size, mtd_num, &mtd->subpage_size))
+		return -1;
+	if (dev_read_pos_int(lib->mtd_oob_size, mtd_num, &mtd->oob_size))
+		return -1;
+	if (dev_read_pos_int(lib->mtd_region_cnt, mtd_num, &mtd->region_cnt))
+		return -1;
+	if (dev_read_hex_int(lib->mtd_flags, mtd_num, &ret))
+		return -1;
+	mtd->writable = !!(ret & MTD_WRITEABLE);
+
+	mtd->eb_cnt = mtd->size / mtd->eb_size;
+	mtd->type = type_str2int(mtd->type_str);
+	mtd->bb_allowed = !!(mtd->type == MTD_NANDFLASH ||
+				mtd->type == MTD_MLCNANDFLASH);
+
+	return 0;
+}
+
+int mtd_get_dev_info(libmtd_t desc, const char *node, struct mtd_dev_info *mtd)
+{
+	int mtd_num;
+	struct libmtd *lib = (struct libmtd *)desc;
+
+	if (!lib->sysfs_supported)
+		return legacy_get_dev_info(node, mtd);
+
+	if (dev_node2num(lib, node, &mtd_num))
+		return -1;
+
+	return mtd_get_dev_info1(desc, mtd_num, mtd);
+}
+
+static inline int mtd_ioctl_error(const struct mtd_dev_info *mtd, int eb,
+				  const char *sreq)
+{
+	return sys_errmsg("%s ioctl failed for eraseblock %d (mtd%d)",
+			  sreq, eb, mtd->mtd_num);
+}
+
+static int mtd_valid_erase_block(const struct mtd_dev_info *mtd, int eb)
+{
+	if (eb < 0 || eb >= mtd->eb_cnt) {
+		errmsg("bad eraseblock number %d, mtd%d has %d eraseblocks",
+		       eb, mtd->mtd_num, mtd->eb_cnt);
+		errno = EINVAL;
+		return -1;
+	}
+	return 0;
+}
+
+static int mtd_xlock(const struct mtd_dev_info *mtd, int fd, int eb, int req,
+		     const char *sreq)
+{
+	int ret;
+	struct erase_info_user ei;
+
+	ret = mtd_valid_erase_block(mtd, eb);
+	if (ret)
+		return ret;
+
+	ei.start = eb * mtd->eb_size;
+	ei.length = mtd->eb_size;
+
+	ret = ioctl(fd, req, &ei);
+	if (ret < 0)
+		return mtd_ioctl_error(mtd, eb, sreq);
+
+	return 0;
+}
+#define mtd_xlock(mtd, fd, eb, req) mtd_xlock(mtd, fd, eb, req, #req)
+
+int mtd_lock(const struct mtd_dev_info *mtd, int fd, int eb)
+{
+	return mtd_xlock(mtd, fd, eb, MEMLOCK);
+}
+
+int mtd_unlock(const struct mtd_dev_info *mtd, int fd, int eb)
+{
+	return mtd_xlock(mtd, fd, eb, MEMUNLOCK);
+}
+
+int mtd_erase(libmtd_t desc, const struct mtd_dev_info *mtd, int fd, int eb)
+{
+	int ret;
+	struct libmtd *lib = (struct libmtd *)desc;
+	struct erase_info_user64 ei64;
+	struct erase_info_user ei;
+
+	ret = mtd_valid_erase_block(mtd, eb);
+	if (ret)
+		return ret;
+
+	ei64.start = (__u64)eb * mtd->eb_size;
+	ei64.length = mtd->eb_size;
+
+	if (lib->offs64_ioctls == OFFS64_IOCTLS_SUPPORTED ||
+	    lib->offs64_ioctls == OFFS64_IOCTLS_UNKNOWN) {
+		ret = ioctl(fd, MEMERASE64, &ei64);
+		if (ret == 0)
+			return ret;
+
+		if (errno != ENOTTY ||
+		    lib->offs64_ioctls != OFFS64_IOCTLS_UNKNOWN)
+			return mtd_ioctl_error(mtd, eb, "MEMERASE64");
+
+		/*
+		 * MEMERASE64 support was added in kernel version 2.6.31, so
+		 * probably we are working with older kernel and this ioctl is
+		 * not supported.
+		 */
+		lib->offs64_ioctls = OFFS64_IOCTLS_NOT_SUPPORTED;
+	}
+
+	if (ei64.start + ei64.length > 0xFFFFFFFF) {
+		errmsg("this system can address only %u eraseblocks",
+		       0xFFFFFFFFU / mtd->eb_size);
+		errno = EINVAL;
+		return -1;
+	}
+
+	ei.start = ei64.start;
+	ei.length = ei64.length;
+	ret = ioctl(fd, MEMERASE, &ei);
+	if (ret < 0)
+		return mtd_ioctl_error(mtd, eb, "MEMERASE");
+	return 0;
+}
+
+int mtd_regioninfo(int fd, int regidx, struct region_info_user *reginfo)
+{
+	int ret;
+
+	if (regidx < 0) {
+		errno = ENODEV;
+		return -1;
+	}
+
+	reginfo->regionindex = regidx;
+
+	ret = ioctl(fd, MEMGETREGIONINFO, reginfo);
+	if (ret < 0)
+		return sys_errmsg("%s ioctl failed for erase region %d",
+			"MEMGETREGIONINFO", regidx);
+
+	return 0;
+}
+
+int mtd_is_locked(const struct mtd_dev_info *mtd, int fd, int eb)
+{
+	int ret;
+	erase_info_t ei;
+
+	ei.start = eb * mtd->eb_size;
+	ei.length = mtd->eb_size;
+
+	ret = ioctl(fd, MEMISLOCKED, &ei);
+	if (ret < 0) {
+		if (errno != ENOTTY && errno != EOPNOTSUPP)
+			return mtd_ioctl_error(mtd, eb, "MEMISLOCKED");
+		else
+			errno = EOPNOTSUPP;
+	}
+
+	return ret;
+}
+
+/* Patterns to write to a physical eraseblock when torturing it */
+static uint8_t patterns[] = {0xa5, 0x5a, 0x0};
+
+/**
+ * check_pattern - check if buffer contains only a certain byte pattern.
+ * @buf: buffer to check
+ * @patt: the pattern to check
+ * @size: buffer size in bytes
+ *
+ * This function returns %1 in there are only @patt bytes in @buf, and %0 if
+ * something else was also found.
+ */
+static int check_pattern(const void *buf, uint8_t patt, int size)
+{
+	int i;
+
+	for (i = 0; i < size; i++)
+		if (((const uint8_t *)buf)[i] != patt)
+			return 0;
+	return 1;
+}
+
+int mtd_torture(libmtd_t desc, const struct mtd_dev_info *mtd, int fd, int eb)
+{
+	int err, i, patt_count;
+	void *buf;
+
+	normsg("run torture test for PEB %d", eb);
+	patt_count = ARRAY_SIZE(patterns);
+
+	buf = xmalloc(mtd->eb_size);
+
+	for (i = 0; i < patt_count; i++) {
+		err = mtd_erase(desc, mtd, fd, eb);
+		if (err)
+			goto out;
+
+		/* Make sure the PEB contains only 0xFF bytes */
+		err = mtd_read(mtd, fd, eb, 0, buf, mtd->eb_size);
+		if (err)
+			goto out;
+
+		err = check_pattern(buf, 0xFF, mtd->eb_size);
+		if (err == 0) {
+			errmsg("erased PEB %d, but a non-0xFF byte found", eb);
+			errno = EIO;
+			goto out;
+		}
+
+		/* Write a pattern and check it */
+		memset(buf, patterns[i], mtd->eb_size);
+		err = mtd_write(desc, mtd, fd, eb, 0, buf, mtd->eb_size, NULL,
+				0, 0);
+		if (err)
+			goto out;
+
+		memset(buf, ~patterns[i], mtd->eb_size);
+		err = mtd_read(mtd, fd, eb, 0, buf, mtd->eb_size);
+		if (err)
+			goto out;
+
+		err = check_pattern(buf, patterns[i], mtd->eb_size);
+		if (err == 0) {
+			errmsg("pattern %x checking failed for PEB %d",
+				patterns[i], eb);
+			errno = EIO;
+			goto out;
+		}
+	}
+
+	err = 0;
+	normsg("PEB %d passed torture test, do not mark it a bad", eb);
+
+out:
+	free(buf);
+	return -1;
+}
+
+int mtd_is_bad(const struct mtd_dev_info *mtd, int fd, int eb)
+{
+	int ret;
+	loff_t seek;
+
+	ret = mtd_valid_erase_block(mtd, eb);
+	if (ret)
+		return ret;
+
+	if (!mtd->bb_allowed)
+		return 0;
+
+	seek = (loff_t)eb * mtd->eb_size;
+	ret = ioctl(fd, MEMGETBADBLOCK, &seek);
+	if (ret == -1)
+		return mtd_ioctl_error(mtd, eb, "MEMGETBADBLOCK");
+	return ret;
+}
+
+int mtd_mark_bad(const struct mtd_dev_info *mtd, int fd, int eb)
+{
+	int ret;
+	loff_t seek;
+
+	if (!mtd->bb_allowed) {
+		errno = EINVAL;
+		return -1;
+	}
+
+	ret = mtd_valid_erase_block(mtd, eb);
+	if (ret)
+		return ret;
+
+	seek = (loff_t)eb * mtd->eb_size;
+	ret = ioctl(fd, MEMSETBADBLOCK, &seek);
+	if (ret == -1)
+		return mtd_ioctl_error(mtd, eb, "MEMSETBADBLOCK");
+	return 0;
+}
+
+int mtd_read(const struct mtd_dev_info *mtd, int fd, int eb, int offs,
+	     void *buf, int len)
+{
+	int ret, rd = 0;
+	off_t seek;
+
+	ret = mtd_valid_erase_block(mtd, eb);
+	if (ret)
+		return ret;
+
+	if (offs < 0 || offs + len > mtd->eb_size) {
+		errmsg("bad offset %d or length %d, mtd%d eraseblock size is %d",
+		       offs, len, mtd->mtd_num, mtd->eb_size);
+		errno = EINVAL;
+		return -1;
+	}
+
+	/* Seek to the beginning of the eraseblock */
+	seek = (off_t)eb * mtd->eb_size + offs;
+	if (lseek(fd, seek, SEEK_SET) != seek)
+		return sys_errmsg("cannot seek mtd%d to offset %"PRIdoff_t,
+				  mtd->mtd_num, seek);
+
+	while (rd < len) {
+		ret = read(fd, buf, len);
+		if (ret < 0)
+			return sys_errmsg("cannot read %d bytes from mtd%d (eraseblock %d, offset %d)",
+					  len, mtd->mtd_num, eb, offs);
+		rd += ret;
+	}
+
+	return 0;
+}
+
+static int legacy_auto_oob_layout(const struct mtd_dev_info *mtd, int fd,
+				  int ooblen, void *oob) {
+	struct nand_oobinfo old_oobinfo;
+	int start, len;
+	uint8_t *tmp_buf;
+
+	/* Read the current oob info */
+	if (ioctl(fd, MEMGETOOBSEL, &old_oobinfo))
+		return sys_errmsg("MEMGETOOBSEL failed");
+
+	tmp_buf = malloc(ooblen);
+	memcpy(tmp_buf, oob, ooblen);
+
+	/*
+	 * We use autoplacement and have the oobinfo with the autoplacement
+	 * information from the kernel available
+	 */
+	if (old_oobinfo.useecc == MTD_NANDECC_AUTOPLACE) {
+		int i, tags_pos = 0;
+		for (i = 0; old_oobinfo.oobfree[i][1]; i++) {
+			/* Set the reserved bytes to 0xff */
+			start = old_oobinfo.oobfree[i][0];
+			len = old_oobinfo.oobfree[i][1];
+			memcpy(oob + start, tmp_buf + tags_pos, len);
+			tags_pos += len;
+		}
+	} else {
+		/* Set at least the ecc byte positions to 0xff */
+		start = old_oobinfo.eccbytes;
+		len = mtd->oob_size - start;
+		memcpy(oob + start, tmp_buf + start, len);
+	}
+
+	return 0;
+}
+
+int mtd_write(libmtd_t desc, const struct mtd_dev_info *mtd, int fd, int eb,
+	      int offs, void *data, int len, void *oob, int ooblen,
+	      uint8_t mode)
+{
+	int ret;
+	off_t seek;
+	struct mtd_write_req ops;
+
+	ret = mtd_valid_erase_block(mtd, eb);
+	if (ret)
+		return ret;
+
+	if (offs < 0 || offs + len > mtd->eb_size) {
+		errmsg("bad offset %d or length %d, mtd%d eraseblock size is %d",
+		       offs, len, mtd->mtd_num, mtd->eb_size);
+		errno = EINVAL;
+		return -1;
+	}
+	if (offs % mtd->subpage_size) {
+		errmsg("write offset %d is not aligned to mtd%d min. I/O size %d",
+		       offs, mtd->mtd_num, mtd->subpage_size);
+		errno = EINVAL;
+		return -1;
+	}
+	if (len % mtd->subpage_size) {
+		errmsg("write length %d is not aligned to mtd%d min. I/O size %d",
+		       len, mtd->mtd_num, mtd->subpage_size);
+		errno = EINVAL;
+		return -1;
+	}
+
+	/* Calculate seek address */
+	seek = (off_t)eb * mtd->eb_size + offs;
+
+	if (oob) {
+		ops.start = seek;
+		ops.len = len;
+		ops.ooblen = ooblen;
+		ops.usr_data = (uint64_t)(unsigned long)data;
+		ops.usr_oob = (uint64_t)(unsigned long)oob;
+		ops.mode = mode;
+
+		ret = ioctl(fd, MEMWRITE, &ops);
+		if (ret == 0)
+			return 0;
+		else if (errno != ENOTTY && errno != EOPNOTSUPP)
+			return mtd_ioctl_error(mtd, eb, "MEMWRITE");
+
+		/* Fall back to old OOB ioctl() if necessary */
+		if (mode == MTD_OPS_AUTO_OOB)
+			if (legacy_auto_oob_layout(mtd, fd, ooblen, oob))
+				return -1;
+		if (mtd_write_oob(desc, mtd, fd, seek, ooblen, oob) < 0)
+			return sys_errmsg("cannot write to OOB");
+	}
+	if (data) {
+		/* Seek to the beginning of the eraseblock */
+		if (lseek(fd, seek, SEEK_SET) != seek)
+			return sys_errmsg("cannot seek mtd%d to offset %"PRIdoff_t,
+					mtd->mtd_num, seek);
+		ret = write(fd, data, len);
+		if (ret != len)
+			return sys_errmsg("cannot write %d bytes to mtd%d "
+					  "(eraseblock %d, offset %d)",
+					  len, mtd->mtd_num, eb, offs);
+	}
+
+	return 0;
+}
+
+int do_oob_op(libmtd_t desc, const struct mtd_dev_info *mtd, int fd,
+	      uint64_t start, uint64_t length, void *data, unsigned int cmd64,
+	      unsigned int cmd)
+{
+	int ret, oob_offs;
+	struct mtd_oob_buf64 oob64;
+	struct mtd_oob_buf oob;
+	unsigned long long max_offs;
+	const char *cmd64_str, *cmd_str;
+	struct libmtd *lib = (struct libmtd *)desc;
+
+	if (cmd64 ==  MEMREADOOB64) {
+		cmd64_str = "MEMREADOOB64";
+		cmd_str   = "MEMREADOOB";
+	} else {
+		cmd64_str = "MEMWRITEOOB64";
+		cmd_str   = "MEMWRITEOOB";
+	}
+
+	max_offs = (unsigned long long)mtd->eb_cnt * mtd->eb_size;
+	if (start >= max_offs) {
+		errmsg("bad page address %" PRIu64 ", mtd%d has %d eraseblocks (%llu bytes)",
+		       start, mtd->mtd_num, mtd->eb_cnt, max_offs);
+		errno = EINVAL;
+		return -1;
+	}
+
+	oob_offs = start & (mtd->min_io_size - 1);
+	if (oob_offs + length > mtd->oob_size || length == 0) {
+		errmsg("Cannot write %" PRIu64 " OOB bytes to address %" PRIu64 " (OOB offset %u) - mtd%d OOB size is only %d bytes",
+		       length, start, oob_offs, mtd->mtd_num,  mtd->oob_size);
+		errno = EINVAL;
+		return -1;
+	}
+
+	oob64.start = start;
+	oob64.length = length;
+	oob64.usr_ptr = (uint64_t)(unsigned long)data;
+
+	if (lib->offs64_ioctls == OFFS64_IOCTLS_SUPPORTED ||
+	    lib->offs64_ioctls == OFFS64_IOCTLS_UNKNOWN) {
+		ret = ioctl(fd, cmd64, &oob64);
+		if (ret == 0)
+			return ret;
+
+		if (errno != ENOTTY ||
+		    lib->offs64_ioctls != OFFS64_IOCTLS_UNKNOWN) {
+			sys_errmsg("%s ioctl failed for mtd%d, offset %" PRIu64 " (eraseblock %" PRIu64 ")",
+				   cmd64_str, mtd->mtd_num, start, start / mtd->eb_size);
+		}
+
+		/*
+		 * MEMREADOOB64/MEMWRITEOOB64 support was added in kernel
+		 * version 2.6.31, so probably we are working with older kernel
+		 * and these ioctls are not supported.
+		 */
+		lib->offs64_ioctls = OFFS64_IOCTLS_NOT_SUPPORTED;
+	}
+
+	if (oob64.start > 0xFFFFFFFFULL) {
+		errmsg("this system can address only up to address %lu",
+		       0xFFFFFFFFUL);
+		errno = EINVAL;
+		return -1;
+	}
+
+	oob.start = oob64.start;
+	oob.length = oob64.length;
+	oob.ptr = data;
+
+	ret = ioctl(fd, cmd, &oob);
+	if (ret < 0)
+		sys_errmsg("%s ioctl failed for mtd%d, offset %" PRIu64 " (eraseblock %" PRIu64 ")",
+			   cmd_str, mtd->mtd_num, start, start / mtd->eb_size);
+	return ret;
+}
+
+int mtd_read_oob(libmtd_t desc, const struct mtd_dev_info *mtd, int fd,
+		 uint64_t start, uint64_t length, void *data)
+{
+	return do_oob_op(desc, mtd, fd, start, length, data,
+			 MEMREADOOB64, MEMREADOOB);
+}
+
+int mtd_write_oob(libmtd_t desc, const struct mtd_dev_info *mtd, int fd,
+		  uint64_t start, uint64_t length, void *data)
+{
+	return do_oob_op(desc, mtd, fd, start, length, data,
+			 MEMWRITEOOB64, MEMWRITEOOB);
+}
+
+int mtd_write_img(const struct mtd_dev_info *mtd, int fd, int eb, int offs,
+		  const char *img_name)
+{
+	int tmp, ret, in_fd, len, written = 0;
+	off_t seek;
+	struct stat st;
+	char *buf;
+
+	ret = mtd_valid_erase_block(mtd, eb);
+	if (ret)
+		return ret;
+
+	if (offs < 0 || offs >= mtd->eb_size) {
+		errmsg("bad offset %d, mtd%d eraseblock size is %d",
+		       offs, mtd->mtd_num, mtd->eb_size);
+		errno = EINVAL;
+		return -1;
+	}
+	if (offs % mtd->subpage_size) {
+		errmsg("write offset %d is not aligned to mtd%d min. I/O size %d",
+		       offs, mtd->mtd_num, mtd->subpage_size);
+		errno = EINVAL;
+		return -1;
+	}
+
+	in_fd = open(img_name, O_RDONLY | O_CLOEXEC);
+	if (in_fd == -1)
+		return sys_errmsg("cannot open \"%s\"", img_name);
+
+	if (fstat(in_fd, &st)) {
+		sys_errmsg("cannot stat %s", img_name);
+		goto out_close;
+	}
+
+	len = st.st_size;
+	if (len % mtd->subpage_size) {
+		errmsg("size of \"%s\" is %d byte, which is not aligned to "
+		       "mtd%d min. I/O size %d", img_name, len, mtd->mtd_num,
+		       mtd->subpage_size);
+		errno = EINVAL;
+		goto out_close;
+	}
+	tmp = (offs + len + mtd->eb_size - 1) / mtd->eb_size;
+	if (eb + tmp > mtd->eb_cnt) {
+		errmsg("\"%s\" image size is %d bytes, mtd%d size is %d "
+		       "eraseblocks, the image does not fit if we write it "
+		       "starting from eraseblock %d, offset %d",
+		       img_name, len, mtd->mtd_num, mtd->eb_cnt, eb, offs);
+		errno = EINVAL;
+		goto out_close;
+	}
+
+	/* Seek to the beginning of the eraseblock */
+	seek = (off_t)eb * mtd->eb_size + offs;
+	if (lseek(fd, seek, SEEK_SET) != seek) {
+		sys_errmsg("cannot seek mtd%d to offset %"PRIdoff_t,
+			    mtd->mtd_num, seek);
+		goto out_close;
+	}
+
+	buf = xmalloc(mtd->eb_size);
+
+	while (written < len) {
+		int rd = 0;
+
+		do {
+			ret = read(in_fd, buf, mtd->eb_size - offs - rd);
+			if (ret == -1) {
+				sys_errmsg("cannot read \"%s\"", img_name);
+				goto out_free;
+			}
+			rd += ret;
+		} while (ret && rd < mtd->eb_size - offs);
+
+		ret = write(fd, buf, rd);
+		if (ret != rd) {
+			sys_errmsg("cannot write %d bytes to mtd%d (eraseblock %d, offset %d)",
+				   len, mtd->mtd_num, eb, offs);
+			goto out_free;
+		}
+
+		offs = 0;
+		eb += 1;
+		written += rd;
+	}
+
+	free(buf);
+	close(in_fd);
+	return 0;
+
+out_free:
+	free(buf);
+out_close:
+	close(in_fd);
+	return -1;
+}
+
+int mtd_probe_node(libmtd_t desc, const char *node)
+{
+	struct stat st;
+	struct mtd_info info;
+	int i, mjr, mnr;
+	struct libmtd *lib = (struct libmtd *)desc;
+
+	if (stat(node, &st))
+		return sys_errmsg("cannot get information about \"%s\"", node);
+
+	if (!S_ISCHR(st.st_mode)) {
+		errmsg("\"%s\" is not a character device", node);
+		errno = EINVAL;
+		return -1;
+	}
+
+	mjr = major(st.st_rdev);
+	mnr = minor(st.st_rdev);
+
+	if (mtd_get_info((libmtd_t *)lib, &info))
+		return -1;
+
+	if (!lib->sysfs_supported)
+		return 0;
+
+	for (i = info.lowest_mtd_num; i <= info.highest_mtd_num; i++) {
+		int mjr1, mnr1, ret;
+
+		ret = dev_get_major(lib, i, &mjr1, &mnr1);
+		if (ret) {
+			if (errno == ENOENT)
+				continue;
+			if (!errno)
+				break;
+			return -1;
+		}
+
+		if (mjr1 == mjr && mnr1 == mnr)
+			return 1;
+	}
+
+	errno = 0;
+	return -1;
+}
diff --git a/src/support/libnandapi/src/libmtd_legacy.c b/src/support/libnandapi/src/libmtd_legacy.c
new file mode 100644
index 0000000..a0756de
--- /dev/null
+++ b/src/support/libnandapi/src/libmtd_legacy.c
@@ -0,0 +1,384 @@
+/*
+ * Copyright (C) 2009 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Artem Bityutskiy
+ *
+ * This file  is part of the MTD library. Implements pre-2.6.30 kernels support,
+ * where MTD did not have sysfs interface. The main limitation of the old
+ * kernels was that the sub-page size was not exported to user-space, so it was
+ * not possible to get sub-page size.
+ */
+
+#include <inttypes.h>
+#include <limits.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/sysmacros.h>
+#include <sys/stat.h>
+#include <sys/ioctl.h>
+#include <mtd/mtd-user.h>
+
+#include "../inc/libmtd.h"
+#include "../inc/libmtd_int.h"
+#include "../inc/common.h"
+
+#define MTD_PROC_FILE "/proc/mtd"
+#define MTD_DEV_PATT  "/dev/mtd%d"
+#define MTD_DEV_MAJOR 90
+
+#define PROC_MTD_FIRST     "dev:    size   erasesize  name\n"
+#define PROC_MTD_FIRST_LEN (sizeof(PROC_MTD_FIRST) - 1)
+#define PROC_MTD_MAX_LEN   4096
+#define PROC_MTD_PATT      "mtd%d: %llx %x"
+
+/**
+ * struct proc_parse_info - /proc/mtd parsing information.
+ * @mtd_num: MTD device number
+ * @size: device size
+ * @eb_size: eraseblock size
+ * @name: device name
+ * @buf: contents of /proc/mtd
+ * @data_size: how much data was read into @buf
+ * @pos: next string in @buf to parse
+ */
+struct proc_parse_info
+{
+	int mtd_num;
+	long long size;
+	char name[MTD_NAME_MAX + 1];
+	int eb_size;
+	char *buf;
+	int data_size;
+	char *next;
+};
+
+static int proc_parse_start(struct proc_parse_info *pi)
+{
+	int fd, ret;
+
+	fd = open(MTD_PROC_FILE, O_RDONLY);
+	if (fd == -1)
+		return -1;
+
+	pi->buf = xmalloc(PROC_MTD_MAX_LEN);
+
+	ret = read(fd, pi->buf, PROC_MTD_MAX_LEN);
+	if (ret == -1) {
+		sys_errmsg("cannot read \"%s\"", MTD_PROC_FILE);
+		goto out_free;
+	}
+
+	if (ret < PROC_MTD_FIRST_LEN ||
+	    memcmp(pi->buf, PROC_MTD_FIRST, PROC_MTD_FIRST_LEN)) {
+		errmsg("\"%s\" does not start with \"%s\"", MTD_PROC_FILE,
+		       PROC_MTD_FIRST);
+		goto out_free;
+	}
+
+	pi->data_size = ret;
+	pi->next = pi->buf + PROC_MTD_FIRST_LEN;
+
+	close(fd);
+	return 0;
+
+out_free:
+	free(pi->buf);
+	close(fd);
+	return -1;
+}
+
+static int proc_parse_next(struct proc_parse_info *pi)
+{
+	int ret, len, pos = pi->next - pi->buf;
+	char *p, *p1;
+
+	if (pos >= pi->data_size) {
+		free(pi->buf);
+		return 0;
+	}
+
+	ret = sscanf(pi->next, PROC_MTD_PATT, &pi->mtd_num, &pi->size,
+		     &pi->eb_size);
+	if (ret != 3)
+		return errmsg("\"%s\" pattern not found", PROC_MTD_PATT);
+
+	p = memchr(pi->next, '\"', pi->data_size - pos);
+	if (!p)
+		return errmsg("opening \" not found");
+	p += 1;
+	pos = p - pi->buf;
+	if (pos >= pi->data_size)
+		return errmsg("opening \" not found");
+
+	p1 = memchr(p, '\"', pi->data_size - pos);
+	if (!p1)
+		return errmsg("closing \" not found");
+	pos = p1 - pi->buf;
+	if (pos >= pi->data_size)
+		return errmsg("closing \" not found");
+
+	len = p1 - p;
+	if (len > MTD_NAME_MAX)
+		return errmsg("too long mtd%d device name", pi->mtd_num);
+
+	memcpy(pi->name, p, len);
+	pi->name[len] = '\0';
+
+	if (p1[1] != '\n')
+		return errmsg("opening \"\n\" not found");
+	pi->next = p1 + 2;
+	return 1;
+}
+
+/**
+ * legacy_libmtd_open - legacy version of 'libmtd_open()'.
+ *
+ * This function is just checks that MTD is present in the system. Returns
+ * zero in case of success and %-1 in case of failure. In case of failure,
+ * errno contains zero if MTD is not present in the system, or contains the
+ * error code if a real error happened. This is similar to the 'libmtd_open()'
+ * return conventions.
+ */
+int legacy_libmtd_open(void)
+{
+	int fd;
+
+	fd = open(MTD_PROC_FILE, O_RDONLY);
+	if (fd == -1) {
+		if (errno == ENOENT)
+			errno = 0;
+		return -1;
+	}
+
+	close(fd);
+	return 0;
+}
+
+/**
+ * legacy_dev_presentl - legacy version of 'mtd_dev_present()'.
+ * @info: the MTD device information is returned here
+ *
+ * When the kernel does not provide sysfs files for the MTD subsystem,
+ * fall-back to parsing the /proc/mtd file to determine whether an mtd device
+ * number @mtd_num is present.
+ */
+int legacy_dev_present(int mtd_num)
+{
+	int ret;
+	struct proc_parse_info pi;
+
+	ret = proc_parse_start(&pi);
+	if (ret)
+		return -1;
+
+	while (proc_parse_next(&pi)) {
+		if (pi.mtd_num == mtd_num)
+			return 1;
+	}
+
+	return 0;
+}
+
+/**
+ * legacy_mtd_get_info - legacy version of 'mtd_get_info()'.
+ * @info: the MTD device information is returned here
+ *
+ * This function is similar to 'mtd_get_info()' and has the same conventions.
+ */
+int legacy_mtd_get_info(struct mtd_info *info)
+{
+	int ret;
+	struct proc_parse_info pi;
+
+	ret = proc_parse_start(&pi);
+	if (ret)
+		return -1;
+
+	info->lowest_mtd_num = INT_MAX;
+	while (proc_parse_next(&pi)) {
+		info->mtd_dev_cnt += 1;
+		if (pi.mtd_num > info->highest_mtd_num)
+			info->highest_mtd_num = pi.mtd_num;
+		if (pi.mtd_num < info->lowest_mtd_num)
+			info->lowest_mtd_num = pi.mtd_num;
+	}
+
+	return 0;
+}
+
+/**
+ * legacy_get_dev_info - legacy version of 'mtd_get_dev_info()'.
+ * @node: name of the MTD device node
+ * @mtd: the MTD device information is returned here
+ *
+ * This function is similar to 'mtd_get_dev_info()' and has the same
+ * conventions.
+ */
+int legacy_get_dev_info(const char *node, struct mtd_dev_info *mtd)
+{
+	struct stat st;
+	struct mtd_info_user ui;
+	int fd, ret;
+	loff_t offs = 0;
+	struct proc_parse_info pi;
+
+	if (stat(node, &st)) {
+		sys_errmsg("cannot open \"%s\"", node);
+		if (errno == ENOENT)
+			normsg("MTD subsystem is old and does not support "
+			       "sysfs, so MTD character device nodes have "
+			       "to exist");
+	}
+
+	if (!S_ISCHR(st.st_mode)) {
+		errno = EINVAL;
+		return errmsg("\"%s\" is not a character device", node);
+	}
+
+	memset(mtd, '\0', sizeof(struct mtd_dev_info));
+	mtd->major = major(st.st_rdev);
+	mtd->minor = minor(st.st_rdev);
+
+	if (mtd->major != MTD_DEV_MAJOR) {
+		errno = EINVAL;
+		return errmsg("\"%s\" has major number %d, MTD devices have "
+			      "major %d", node, mtd->major, MTD_DEV_MAJOR);
+	}
+
+	mtd->mtd_num = mtd->minor / 2;
+
+	fd = open(node, O_RDONLY);
+	if (fd == -1)
+		return sys_errmsg("cannot open \"%s\"", node);
+
+	if (ioctl(fd, MEMGETINFO, &ui)) {
+		sys_errmsg("MEMGETINFO ioctl request failed");
+		goto out_close;
+	}
+
+	ret = ioctl(fd, MEMGETBADBLOCK, &offs);
+	if (ret == -1) {
+		if (errno != EOPNOTSUPP) {
+			sys_errmsg("MEMGETBADBLOCK ioctl failed");
+			goto out_close;
+		}
+		errno = 0;
+		mtd->bb_allowed = 0;
+	} else
+		mtd->bb_allowed = 1;
+
+	mtd->type = ui.type;
+	mtd->size = ui.size;
+	mtd->eb_size = ui.erasesize;
+	mtd->min_io_size = ui.writesize;
+	mtd->oob_size = ui.oobsize;
+
+	if (mtd->min_io_size <= 0) {
+		errmsg("mtd%d (%s) has insane min. I/O unit size %d",
+		       mtd->mtd_num, node, mtd->min_io_size);
+		goto out_close;
+	}
+	if (mtd->eb_size <= 0 || mtd->eb_size < mtd->min_io_size) {
+		errmsg("mtd%d (%s) has insane eraseblock size %d",
+		       mtd->mtd_num, node, mtd->eb_size);
+		goto out_close;
+	}
+	if (mtd->size <= 0 || mtd->size < mtd->eb_size) {
+		errmsg("mtd%d (%s) has insane size %lld",
+		       mtd->mtd_num, node, mtd->size);
+		goto out_close;
+	}
+	mtd->eb_cnt = mtd->size / mtd->eb_size;
+
+	switch(mtd->type) {
+	case MTD_ABSENT:
+		errmsg("mtd%d (%s) is removable and is not present",
+		       mtd->mtd_num, node);
+		goto out_close;
+	case MTD_RAM:
+		strcpy((char *)mtd->type_str, "ram");
+		break;
+	case MTD_ROM:
+		strcpy((char *)mtd->type_str, "rom");
+		break;
+	case MTD_NORFLASH:
+		strcpy((char *)mtd->type_str, "nor");
+		break;
+	case MTD_NANDFLASH:
+		strcpy((char *)mtd->type_str, "nand");
+		break;
+	case MTD_MLCNANDFLASH:
+		strcpy((char *)mtd->type_str, "mlc-nand");
+		break;
+	case MTD_DATAFLASH:
+		strcpy((char *)mtd->type_str, "dataflash");
+		break;
+	case MTD_UBIVOLUME:
+		strcpy((char *)mtd->type_str, "ubi");
+		break;
+	default:
+		goto out_close;
+	}
+
+	if (ui.flags & MTD_WRITEABLE)
+		mtd->writable = 1;
+	mtd->subpage_size = mtd->min_io_size;
+
+	close(fd);
+
+	/*
+	 * Unfortunately, the device name is not available via ioctl, and
+	 * we have to parse /proc/mtd to get it.
+	 */
+	ret = proc_parse_start(&pi);
+	if (ret)
+		return -1;
+
+	while (proc_parse_next(&pi)) {
+		if (pi.mtd_num == mtd->mtd_num) {
+			strcpy((char *)mtd->name, pi.name);
+			return 0;
+		}
+	}
+
+	errmsg("mtd%d not found in \"%s\"", mtd->mtd_num, MTD_PROC_FILE);
+	errno = ENOENT;
+	return -1;
+
+out_close:
+	close(fd);
+	return -1;
+}
+
+/**
+ * legacy_get_dev_info1 - legacy version of 'mtd_get_dev_info1()'.
+ * @node: name of the MTD device node
+ * @mtd: the MTD device information is returned here
+ *
+ * This function is similar to 'mtd_get_dev_info1()' and has the same
+ * conventions.
+ */
+int legacy_get_dev_info1(int mtd_num, struct mtd_dev_info *mtd)
+{
+	char node[sizeof(MTD_DEV_PATT) + 20];
+
+	sprintf(node, MTD_DEV_PATT, mtd_num);
+	return legacy_get_dev_info(node, mtd);
+}
diff --git a/src/support/libnandapi/src/mtk_device_wrap.c b/src/support/libnandapi/src/mtk_device_wrap.c
new file mode 100644
index 0000000..66280b5
--- /dev/null
+++ b/src/support/libnandapi/src/mtk_device_wrap.c
@@ -0,0 +1,224 @@
+

+#include "../inc/mtk_device_wrap.h"

+

+#if MTK_BOOTDEV_TYPE == BOOTDEV_TYPE_NAND

+

+#include <stdio.h>

+#include <stdlib.h>

+#include <string.h>

+

+#include "../inc/nand_api.h"

+#include "../inc/common.h"

+#include "../inc/xalloc.h"

+

+

+int mtk_device_wrap_m_open(const char *pathname, int flags, mode_t mode)

+{

+    return nand_open(pathname, flags);

+}

+

+int mtk_device_wrap_open(const char *pathname, int flags)

+{

+    return nand_open(pathname, flags);

+}

+

+off_t mtk_device_wrap_seek(int fd, off_t offset, int whence)

+{

+    return nand_seek(fd, offset, whence);

+}

+

+off64_t mtk_device_wrap_seek64(int fd, off64_t offset, int whence)

+{

+    return nand_seek64(fd, offset, whence);

+}

+

+ssize_t mtk_device_wrap_read(int fd, void *buf, size_t count)

+{

+    return nand_read(fd, buf, count);

+}

+

+ssize_t mtk_device_wrap_write(int fd, void *buf, size_t count)

+{

+    return nand_write(fd, buf, count);

+}

+

+

+#define ALIGN_LOWER(x, a) ((x)&~(a-1))

+

+struct nandx_split32 {

+    off_t  head;

+    size_t head_len;

+    off_t  body;

+    size_t body_len;

+    off_t  tail;

+    size_t tail_len;

+};

+

+static void nandx_split(struct nandx_split32 *split, off_t offset, size_t len, size_t align)

+{

+    size_t val;

+

+    split->head = offset;

+    val = ALIGN_LOWER(offset, align);

+    val = align - (offset - val);

+    if (val == align)

+        split->head_len = 0;

+    else if (val > len)

+        split->head_len = len;

+    else

+        split->head_len = val;

+

+    split->body = offset + split->head_len;

+    split->body_len = ALIGN_LOWER(len - split->head_len, align);

+

+    split->tail = split->body + split->body_len;

+    split->tail_len = len - split->head_len - split->body_len;

+}

+

+ssize_t mtk_device_wrap_write_force(int fd, void *buf, size_t count)

+{

+    struct nandx_split32 split = {0};

+    off_t offset, record_offset;

+    size_t block_size;

+    void* temp_buf;

+    size_t acc_count = 0;

+    size_t result;

+    size_t remain_count = count;

+    char* lbuf = buf;

+    int i=0;

+

+    offset = nand_query_offset(fd);

+    record_offset = offset + count;

+    block_size = nand_query_blk_size(fd);

+    if (block_size <= 0) {

+        printf("write_force wrong block size\n");

+        return 0;

+    }

+

+    //split head, body, and tail

+    nandx_split(&split, offset, count, block_size);

+

+    //allocate buffer

+    temp_buf = xmalloc(block_size);

+    if (temp_buf == NULL) {

+        printf("write_force alloc fail\n");

+        return 0;

+    }

+

+    //handle head

+    if (split.head_len) {

+        off_t head_base = ALIGN_LOWER(split.head, block_size);

+

+        // read the whole block

+        nand_seek(fd, head_base, SEEK_SET);

+        nand_read(fd, temp_buf, block_size);

+

+        //update the temp_buf

+        memcpy(temp_buf + offset - head_base, lbuf, split.head_len);

+

+        //write to flash

+        nand_seek(fd, head_base, SEEK_SET);

+        result = nand_write(fd, temp_buf, block_size);

+        if (result != block_size) {

+           printf("write_force head fail\n");

+           goto out;

+        }

+        acc_count += split.head_len;

+

+        //update

+        lbuf += split.head_len;

+    }

+

+    //handle body

+    if (split.body_len) {

+        nand_seek(fd, split.body, SEEK_SET);

+        result = nand_write(fd, lbuf, split.body_len);

+        if (result != split.body_len) {

+           printf("write_force body fail\n");

+           goto out;

+        }

+        acc_count += result;

+

+        //update

+        lbuf += split.body_len;

+    }

+

+    //handle tail

+    if (split.tail_len) {

+        // read the whole block

+        nand_seek(fd, split.tail, SEEK_SET);

+        nand_read(fd, temp_buf, block_size);

+

+        //update the temp_buf

+        memcpy(temp_buf, lbuf, split.tail_len);

+

+        //write to flash

+        nand_seek(fd, split.tail, SEEK_SET);

+        result = nand_write(fd, temp_buf, block_size);

+        if (result != block_size) {

+           printf("write_force tail fail\n");

+           goto out;

+        }

+        acc_count += split.tail_len;

+

+        //update

+        lbuf += split.tail_len;

+    }

+

+out:

+    nand_seek(fd, record_offset, SEEK_SET);

+

+    //free buffer

+    free(temp_buf);

+

+    return acc_count;

+}

+

+int mtk_device_wrap_close(int fd)

+{

+    return nand_close(fd);

+}

+

+#else

+

+int mtk_device_wrap_m_open(const char *pathname, int flags, mode_t mode)

+{

+    return open(pathname, flags, mode);

+}

+

+int mtk_device_wrap_open(const char *pathname, int flags)

+{

+    return open(pathname, flags);

+}

+

+off_t mtk_device_wrap_seek(int fd, off_t offset, int whence)

+{

+    return lseek(fd, offset, whence);

+}

+

+off64_t mtk_device_wrap_seek64(int fd, off64_t offset, int whence)

+{

+    return lseek64(fd, offset, whence);

+}

+

+ssize_t mtk_device_wrap_read(int fd, void *buf, size_t count)

+{

+    return read(fd, buf, count);

+}

+

+ssize_t mtk_device_wrap_write(int fd, void *buf, size_t count)

+{

+    return write(fd, buf, count);

+}

+

+ssize_t mtk_device_wrap_write_force(int fd, void *buf, size_t count)

+{

+    return write(fd, buf, count);

+}

+

+int mtk_device_wrap_close(int fd)

+{

+    return close(fd);

+}

+

+#endif
\ No newline at end of file
diff --git a/src/support/libnandapi/src/nand_api.c b/src/support/libnandapi/src/nand_api.c
new file mode 100644
index 0000000..d256a84
--- /dev/null
+++ b/src/support/libnandapi/src/nand_api.c
@@ -0,0 +1,399 @@
+#include <inttypes.h>
+#include <ctype.h>
+#include <fcntl.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <unistd.h>
+#include <sys/stat.h>
+#include <sys/ioctl.h>
+#include <sys/types.h>
+#include <getopt.h>
+#include <asm/types.h>
+#include <pthread.h>
+
+#include "../inc/nand_api.h"
+#include "../inc/common.h"
+#include "../inc/xalloc.h"
+#include "../inc/libmtd.h"
+
+struct nand_ops {
+	struct mtd_dev_info mtd;
+	unsigned int *eb_map;
+	unsigned int valid_eb_cnt;
+	int fd;
+	off_t offset;
+};
+
+/* As disscussed with upper user,
+ * the max number of files opened simultaneously is less than 10.
+ * consider to keep code simple,
+ * just define a array for saving files' objects.
+ */
+#define OPEN_FILES_MAX 10
+
+struct nand_handler {
+	libmtd_t mtd_desc;
+	struct nand_ops nops[OPEN_FILES_MAX];
+	int nops_count;
+	unsigned char *buf;
+	int eb_size;
+};
+
+static struct nand_handler nhandler;
+static pthread_mutex_t nmutex = PTHREAD_MUTEX_INITIALIZER;
+
+struct split_info {
+	off_t head;
+	size_t head_len;
+	off_t body;
+	size_t body_len;
+	off_t tail;
+	size_t tail_len;
+};
+
+static void data_split(struct split_info *split, off_t offset,
+			size_t len, unsigned int align)
+{
+	size_t val;
+
+	split->head = offset;
+	val = offset / align * align;
+	val = align - (offset - val);
+	if (val == align)
+		split->head_len = 0;
+	else if (val > len)
+		split->head_len = len;
+	else
+		split->head_len = val;
+
+	split->body = offset + split->head_len;
+	split->body_len = (len - split->head_len) / align * align;
+
+	split->tail = split->body + split->body_len;
+	split->tail_len = len - split->head_len - split->body_len;
+}
+
+static inline int offset_to_eb(off_t offset)
+{
+	return offset / nhandler.eb_size;
+}
+
+static inline int offset_to_eb_offset(off_t offset)
+{
+	return offset % nhandler.eb_size;
+}
+
+static inline unsigned int map_eb(unsigned int eb, int nops_idx)
+{
+	/* this case would be rarely appeared */
+	if (eb >= nhandler.nops[nops_idx].valid_eb_cnt)
+		printf("warning: the block maybe not valid\n");
+
+	return nhandler.nops[nops_idx].eb_map[eb];
+}
+
+static int create_eb_map(struct nand_ops *ops)
+{
+	int eb_cnt = 0;
+	int ret = 0, i;
+
+	ops->eb_map = xzalloc((size_t)ops->mtd.eb_cnt * 4);
+
+	for (i = 0; i < ops->mtd.eb_cnt; i++) {
+		ret = mtd_is_bad(&ops->mtd, ops->fd, i);
+		if (ret == 0)
+			ops->eb_map[eb_cnt++] = i;
+		else if (ret > 0)
+			printf("eb: %d is bad\n", i);
+		else
+			return ret;
+	}
+
+	ops->valid_eb_cnt = eb_cnt;
+	printf("valid eb count:%d\n", ops->valid_eb_cnt);
+
+	return ret;
+}
+
+static int get_current_ops(int fd)
+{
+	int i;
+
+	for (i = 0; i < OPEN_FILES_MAX; i++) {
+		if (nhandler.nops[i].fd == fd)
+			return i;
+	}
+
+	printf("Fail to get the file we want, there must be a error.\n");
+	/*it wouldn't reach here, no need to check if value returned is valid.*/
+	return -1;
+}
+
+static int get_empty_ops()
+{
+	int i;
+
+	for (i = 0; i < OPEN_FILES_MAX; i++) {
+		if (nhandler.nops[i].fd == 0)
+			return i;
+	}
+
+	printf("Fail to get an empty ops object, open too many files.\n");
+	return -1;
+}
+
+int nand_open(const char *pathname, int flags)
+{
+	struct mtd_dev_info *mtd;
+	int current, fd;
+	int ret = -1;
+
+	pthread_mutex_lock(&nmutex);
+
+	if (nhandler.nops_count == 0) {
+		nhandler.mtd_desc = libmtd_open();
+		if (!nhandler.mtd_desc) {
+			printf("libmtd initialize fail\n");
+			goto out;
+		}
+	}
+
+	fd = open(pathname, flags);
+	if (fd == -1) {
+		printf("open file: %s fail\n", pathname);
+		goto out;
+	}
+
+	current = get_empty_ops();
+	nhandler.nops[current].fd = fd;
+
+	mtd = &(nhandler.nops[current].mtd);
+	ret = mtd_get_dev_info(nhandler.mtd_desc, pathname, mtd);
+	if (ret < 0) {
+		printf("get mtd info fail\n");
+		goto out;
+	}
+
+	ret = create_eb_map(&(nhandler.nops[current]));
+	if (ret < 0) {
+		printf("check bad block fail\n");
+		goto out;
+	}
+
+	if (nhandler.nops_count == 0) {
+		nhandler.buf = xmalloc(mtd->eb_size);
+		nhandler.eb_size = mtd->eb_size;
+	}
+
+	ret = fd;
+	nhandler.nops_count++;
+
+out:
+	pthread_mutex_unlock(&nmutex);
+
+	return ret;
+}
+
+ssize_t nand_read(int fd, void *buf, size_t count)
+{
+	struct nand_ops *nops;
+	struct split_info split;
+	int eb, eb_cnt, eb_off;
+	char *lbuf = buf;
+	int current, ret, i;
+
+	current = get_current_ops(fd);
+	nops = &(nhandler.nops[current]);
+
+	data_split(&split, nops->offset, count, nops->mtd.eb_size);
+
+	if (split.head_len) {
+		eb = offset_to_eb(split.head);
+		eb_off = offset_to_eb_offset(split.head);
+
+		ret = mtd_read(&nops->mtd, fd, map_eb(eb, current), eb_off,
+			       lbuf, split.head_len);
+		if (ret < 0)
+			return ret;
+
+		lbuf += split.head_len;
+	}
+
+	if (split.body_len) {
+		eb = offset_to_eb(split.body);
+		eb_cnt = split.body_len / nops->mtd.eb_size;
+
+		for (i = 0; i < eb_cnt; i++) {
+			ret = mtd_read(&nops->mtd, fd, map_eb(eb + i, current),
+				       0, lbuf, nops->mtd.eb_size);
+			if (ret < 0)
+				return ret;
+
+			lbuf += nops->mtd.eb_size;
+		}
+
+	}
+
+	if (split.tail_len) {
+		eb = offset_to_eb(split.tail);
+
+		ret = mtd_read(&nops->mtd, fd, map_eb(eb, current), 0,
+			       lbuf, split.tail_len);
+		if (ret < 0)
+			return ret;
+	}
+
+	nops->offset += count;
+	return count;
+}
+
+/* $count must be page_size alligned */
+ssize_t nand_write(int fd, void *buf, size_t count)
+{
+	struct nand_ops *nops;
+	struct split_info split;
+	int eb, eb_cnt, eb_off;
+	char *lbuf = buf;
+	int current, ret, i;
+
+	current = get_current_ops(fd);
+	nops = &(nhandler.nops[current]);
+
+	data_split(&split, nops->offset, count, nops->mtd.eb_size);
+
+	if (split.head_len) {
+		eb = offset_to_eb(split.head);
+		eb_off = offset_to_eb_offset(split.head);
+
+		/* this eb should be already erased, so write directly */
+		ret = mtd_write(nhandler.mtd_desc, &nops->mtd, fd,
+				map_eb(eb, current), eb_off, lbuf,
+				split.head_len, NULL, 0, 0);
+		if (ret < 0)
+			return ret;
+
+		lbuf += split.head_len;
+	}
+
+	if (split.body_len) {
+		eb = offset_to_eb(split.body);
+		eb_cnt = split.body_len / nops->mtd.eb_size;
+
+		for (i = 0; i < eb_cnt; i++) {
+			ret = mtd_erase(nhandler.mtd_desc, &nops->mtd, fd,
+					map_eb(eb + i, current));
+			if (ret < 0)
+				return ret;
+
+			ret = mtd_write(nhandler.mtd_desc, &nops->mtd, fd,
+					map_eb(eb + i, current), 0, lbuf,
+					nops->mtd.eb_size, NULL, 0, 0);
+			if (ret < 0)
+				return ret;
+
+			lbuf += nops->mtd.eb_size;
+		}
+
+	}
+
+	if (split.tail_len) {
+		eb = offset_to_eb(split.tail);
+
+		ret = mtd_erase(nhandler.mtd_desc, &nops->mtd, fd,
+				map_eb(eb, current));
+		if (ret < 0)
+			return ret;
+
+		ret = mtd_write(nhandler.mtd_desc, &nops->mtd, fd,
+				map_eb(eb, current), 0, lbuf,
+				split.tail_len, NULL, 0, 0);
+		if (ret < 0)
+			return ret;
+	}
+
+	nops->offset += count;
+	return count;
+}
+
+off_t nand_seek(int fd, off_t offset, int whence)
+{
+	int current = get_current_ops(fd);
+
+	switch (whence) {
+	case SEEK_SET:
+		nhandler.nops[current].offset = offset;
+		break;
+
+	case SEEK_CUR:
+		nhandler.nops[current].offset += offset;
+		break;
+
+	case SEEK_END:
+	default:
+		printf("invalid perameter\n");
+		return -1;
+	}
+
+	return lseek(fd, nhandler.nops[current].offset, SEEK_SET);
+}
+
+off64_t nand_seek64(int fd, off64_t offset, int whence)
+{
+	return (off64_t)nand_seek(fd, (off_t)offset, whence);
+}
+
+int nand_close(int fd)
+{
+	int ret;
+	int current = get_current_ops(fd);
+
+	pthread_mutex_lock(&nmutex);
+
+	nhandler.nops_count--;
+
+	if (nhandler.nops_count == 0) {
+		libmtd_close(nhandler.mtd_desc);
+		free(nhandler.buf);
+	}
+
+	free(nhandler.nops[current].eb_map);
+	memset(&nhandler.nops[current], 0, sizeof(struct nand_ops));
+	ret = close(fd);
+
+	pthread_mutex_unlock(&nmutex);
+
+	return ret;
+}
+
+off_t nand_query_offset(int fd)
+{
+	int current = get_current_ops(fd);
+	struct nand_ops *nops;
+
+	if (current == -1) {
+		printf("nand_query_offset fail\n");
+		return 0;
+	}
+
+	nops = &(nhandler.nops[current]);
+	return nops->offset;
+}
+
+size_t nand_query_blk_size(int fd)
+{
+	int current = get_current_ops(fd);
+	struct nand_ops *nops;
+
+	if (current == -1) {
+		printf("nand_query_blk_size fail\n");
+		return 0;
+	}
+
+	nops = &(nhandler.nops[current]);
+	return nops->mtd.eb_size;
+}