releasetools: Switch to Python logging.

Converts the following files to Python logging.

add_img_to_target_files.py
blockimgdiff.py
build_image.py
check_ota_package_signature.py
common.py
img_from_target_files.py
make_recovery_patch.py
ota_from_target_files.py
sparse_img.py
verity_utils.py

This separates logging outputs from normal outputs, and allows easier
switching between different verbosity levels. It also supports adjusting
logging config via environment variable (LOGGING_CONFIG).

Test: `m dist`
Test: `python -m unittest discover build/make/tools/releasetools`
Change-Id: Idfc3f7aff83e03f41f3c9b5067d64fd595d5192d
diff --git a/tools/releasetools/add_img_to_target_files.py b/tools/releasetools/add_img_to_target_files.py
index 1b1ec20..1e8677c 100755
--- a/tools/releasetools/add_img_to_target_files.py
+++ b/tools/releasetools/add_img_to_target_files.py
@@ -46,6 +46,7 @@
 from __future__ import print_function
 
 import datetime
+import logging
 import os
 import shlex
 import shutil
@@ -62,8 +63,9 @@
   print("Python 2.7 or newer is required.", file=sys.stderr)
   sys.exit(1)
 
-OPTIONS = common.OPTIONS
+logger = logging.getLogger(__name__)
 
+OPTIONS = common.OPTIONS
 OPTIONS.add_missing = False
 OPTIONS.rebuild_recovery = False
 OPTIONS.replace_updated_files_list = []
@@ -127,7 +129,7 @@
 
   img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "system.img")
   if os.path.exists(img.input_name):
-    print("system.img already exists; no need to rebuild...")
+    logger.info("system.img already exists; no need to rebuild...")
     return img.input_name
 
   def output_sink(fn, data):
@@ -142,7 +144,7 @@
       common.ZipWrite(output_zip, ofile.name, arc_name)
 
   if OPTIONS.rebuild_recovery:
-    print("Building new recovery patch")
+    logger.info("Building new recovery patch")
     common.MakeRecoveryPatch(OPTIONS.input_tmp, output_sink, recovery_img,
                              boot_img, info_dict=OPTIONS.info_dict)
 
@@ -159,7 +161,7 @@
 
   img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "system_other.img")
   if os.path.exists(img.input_name):
-    print("system_other.img already exists; no need to rebuild...")
+    logger.info("system_other.img already exists; no need to rebuild...")
     return
 
   CreateImage(OPTIONS.input_tmp, OPTIONS.info_dict, "system_other", img)
@@ -171,7 +173,7 @@
 
   img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "vendor.img")
   if os.path.exists(img.input_name):
-    print("vendor.img already exists; no need to rebuild...")
+    logger.info("vendor.img already exists; no need to rebuild...")
     return img.input_name
 
   block_list = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "vendor.map")
@@ -186,7 +188,7 @@
 
   img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "product.img")
   if os.path.exists(img.input_name):
-    print("product.img already exists; no need to rebuild...")
+    logger.info("product.img already exists; no need to rebuild...")
     return img.input_name
 
   block_list = OutputFile(
@@ -204,7 +206,7 @@
   img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES",
                    "product_services.img")
   if os.path.exists(img.input_name):
-    print("product_services.img already exists; no need to rebuild...")
+    logger.info("product_services.img already exists; no need to rebuild...")
     return img.input_name
 
   block_list = OutputFile(
@@ -220,7 +222,7 @@
 
   img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "odm.img")
   if os.path.exists(img.input_name):
-    print("odm.img already exists; no need to rebuild...")
+    logger.info("odm.img already exists; no need to rebuild...")
     return img.input_name
 
   block_list = OutputFile(
@@ -239,7 +241,7 @@
   """
   img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "dtbo.img")
   if os.path.exists(img.input_name):
-    print("dtbo.img already exists; no need to rebuild...")
+    logger.info("dtbo.img already exists; no need to rebuild...")
     return img.input_name
 
   dtbo_prebuilt_path = os.path.join(
@@ -269,7 +271,7 @@
 
 
 def CreateImage(input_dir, info_dict, what, output_file, block_list=None):
-  print("creating " + what + ".img...")
+  logger.info("creating " + what + ".img...")
 
   image_props = build_image.ImagePropFromGlobalDict(info_dict, what)
   fstab = info_dict["fstab"]
@@ -340,7 +342,7 @@
 
   img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "userdata.img")
   if os.path.exists(img.input_name):
-    print("userdata.img already exists; no need to rebuild...")
+    logger.info("userdata.img already exists; no need to rebuild...")
     return
 
   # Skip userdata.img if no size.
@@ -348,7 +350,7 @@
   if not image_props.get("partition_size"):
     return
 
-  print("creating userdata.img...")
+  logger.info("creating userdata.img...")
 
   image_props["timestamp"] = FIXED_FILE_TIMESTAMP
 
@@ -411,7 +413,7 @@
   img = OutputFile(
       output_zip, OPTIONS.input_tmp, "IMAGES", "{}.img".format(name))
   if os.path.exists(img.input_name):
-    print("{}.img already exists; not rebuilding...".format(name))
+    logger.info("%s.img already exists; not rebuilding...", name)
     return img.input_name
 
   avbtool = os.getenv('AVBTOOL') or OPTIONS.info_dict["avb_avbtool"]
@@ -495,7 +497,7 @@
 
   img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "cache.img")
   if os.path.exists(img.input_name):
-    print("cache.img already exists; no need to rebuild...")
+    logger.info("cache.img already exists; no need to rebuild...")
     return
 
   image_props = build_image.ImagePropFromGlobalDict(OPTIONS.info_dict, "cache")
@@ -503,7 +505,7 @@
   if "fs_type" not in image_props:
     return
 
-  print("creating cache.img...")
+  logger.info("creating cache.img...")
 
   image_props["timestamp"] = FIXED_FILE_TIMESTAMP
 
@@ -580,8 +582,7 @@
 
       present_props = [x for x in prop_name_list if x in build_props]
       if not present_props:
-        print("Warning: fingerprint is not present for partition {}".
-              format(partition))
+        logger.warning("fingerprint is not present for partition %s", partition)
         property_id, fingerprint = "unknown", "unknown"
       else:
         property_id = present_props[0]
@@ -633,7 +634,7 @@
 
     prebuilt_path = os.path.join(OPTIONS.input_tmp, "IMAGES", img_name)
     if os.path.exists(prebuilt_path):
-      print("%s already exists, no need to overwrite..." % (img_name,))
+      logger.info("%s already exists, no need to overwrite...", img_name)
       continue
 
     img_radio_path = os.path.join(OPTIONS.input_tmp, "RADIO", img_name)
@@ -698,7 +699,7 @@
 
   if not OPTIONS.add_missing:
     if os.path.isdir(os.path.join(OPTIONS.input_tmp, "IMAGES")):
-      print("target_files appears to already contain images.")
+      logger.warning("target_files appears to already contain images.")
       sys.exit(1)
 
   OPTIONS.info_dict = common.LoadInfoDict(OPTIONS.input_tmp, repacking=True)
@@ -748,7 +749,7 @@
   partitions = dict()
 
   def banner(s):
-    print("\n\n++++ " + s + " ++++\n\n")
+    logger.info("\n\n++++ " + s + " ++++\n\n")
 
   banner("boot")
   # common.GetBootableImage() returns the image directly if present.
@@ -912,20 +913,21 @@
                        "is_signing"],
       extra_option_handler=option_handler)
 
-
   if len(args) != 1:
     common.Usage(__doc__)
     sys.exit(1)
 
+  common.InitLogging()
+
   AddImagesToTargetFiles(args[0])
-  print("done.")
+  logger.info("done.")
 
 if __name__ == '__main__':
   try:
     common.CloseInheritedPipes()
     main(sys.argv[1:])
-  except common.ExternalError as e:
-    print("\n   ERROR: %s\n" % (e,))
+  except common.ExternalError:
+    logger.exception("\n   ERROR:\n")
     sys.exit(1)
   finally:
     common.Cleanup()
diff --git a/tools/releasetools/blockimgdiff.py b/tools/releasetools/blockimgdiff.py
index 189dba2..2d20e23 100644
--- a/tools/releasetools/blockimgdiff.py
+++ b/tools/releasetools/blockimgdiff.py
@@ -19,6 +19,7 @@
 import functools
 import heapq
 import itertools
+import logging
 import multiprocessing
 import os
 import os.path
@@ -33,6 +34,8 @@
 
 __all__ = ["EmptyImage", "DataImage", "BlockImageDiff"]
 
+logger = logging.getLogger(__name__)
+
 
 def compute_patch(srcfile, tgtfile, imgdiff=False):
   patchfile = common.MakeTempFile(prefix='patch-')
@@ -304,8 +307,8 @@
     """Prints a report of the collected imgdiff stats."""
 
     def print_header(header, separator):
-      print(header)
-      print(separator * len(header) + '\n')
+      logger.info(header)
+      logger.info(separator * len(header) + '\n')
 
     print_header('  Imgdiff Stats Report  ', '=')
     for key in self.REASONS:
@@ -314,7 +317,7 @@
       values = self.stats[key]
       section_header = ' {} (count: {}) '.format(key, len(values))
       print_header(section_header, '-')
-      print(''.join(['  {}\n'.format(name) for name in values]))
+      logger.info(''.join(['  {}\n'.format(name) for name in values]))
 
 
 class BlockImageDiff(object):
@@ -482,7 +485,7 @@
     self.WriteTransfers(prefix)
 
     # Report the imgdiff stats.
-    if common.OPTIONS.verbose and not self.disable_imgdiff:
+    if not self.disable_imgdiff:
       self.imgdiff_stats.Report()
 
   def WriteTransfers(self, prefix):
@@ -692,16 +695,17 @@
     OPTIONS = common.OPTIONS
     if OPTIONS.cache_size is not None:
       max_allowed = OPTIONS.cache_size * OPTIONS.stash_threshold
-      print("max stashed blocks: %d  (%d bytes), "
-            "limit: %d bytes (%.2f%%)\n" % (
-                max_stashed_blocks, self._max_stashed_size, max_allowed,
-                self._max_stashed_size * 100.0 / max_allowed))
+      logger.info(
+          "max stashed blocks: %d  (%d bytes), limit: %d bytes (%.2f%%)\n",
+          max_stashed_blocks, self._max_stashed_size, max_allowed,
+          self._max_stashed_size * 100.0 / max_allowed)
     else:
-      print("max stashed blocks: %d  (%d bytes), limit: <unknown>\n" % (
-          max_stashed_blocks, self._max_stashed_size))
+      logger.info(
+          "max stashed blocks: %d  (%d bytes), limit: <unknown>\n",
+          max_stashed_blocks, self._max_stashed_size)
 
   def ReviseStashSize(self):
-    print("Revising stash size...")
+    logger.info("Revising stash size...")
     stash_map = {}
 
     # Create the map between a stash and its def/use points. For example, for a
@@ -746,7 +750,7 @@
           # that will use this stash and replace the command with "new".
           use_cmd = stash_map[stash_raw_id][2]
           replaced_cmds.append(use_cmd)
-          print("%10d  %9s  %s" % (sr.size(), "explicit", use_cmd))
+          logger.info("%10d  %9s  %s", sr.size(), "explicit", use_cmd)
         else:
           # Update the stashes map.
           if sh in stashes:
@@ -762,7 +766,7 @@
         if xf.src_ranges.overlaps(xf.tgt_ranges):
           if stashed_blocks + xf.src_ranges.size() > max_allowed:
             replaced_cmds.append(xf)
-            print("%10d  %9s  %s" % (xf.src_ranges.size(), "implicit", xf))
+            logger.info("%10d  %9s  %s", xf.src_ranges.size(), "implicit", xf)
 
       # Replace the commands in replaced_cmds with "new"s.
       for cmd in replaced_cmds:
@@ -788,28 +792,29 @@
           stashes.pop(sh)
 
     num_of_bytes = new_blocks * self.tgt.blocksize
-    print("  Total %d blocks (%d bytes) are packed as new blocks due to "
-          "insufficient cache size." % (new_blocks, num_of_bytes))
+    logger.info(
+        "  Total %d blocks (%d bytes) are packed as new blocks due to "
+        "insufficient cache size.", new_blocks, num_of_bytes)
     return new_blocks
 
   def ComputePatches(self, prefix):
-    print("Reticulating splines...")
+    logger.info("Reticulating splines...")
     diff_queue = []
     patch_num = 0
     with open(prefix + ".new.dat", "wb") as new_f:
       for index, xf in enumerate(self.transfers):
         if xf.style == "zero":
           tgt_size = xf.tgt_ranges.size() * self.tgt.blocksize
-          print("%10d %10d (%6.2f%%) %7s %s %s" % (
-              tgt_size, tgt_size, 100.0, xf.style, xf.tgt_name,
-              str(xf.tgt_ranges)))
+          logger.info(
+              "%10d %10d (%6.2f%%) %7s %s %s", tgt_size, tgt_size, 100.0,
+              xf.style, xf.tgt_name, str(xf.tgt_ranges))
 
         elif xf.style == "new":
           self.tgt.WriteRangeDataToFd(xf.tgt_ranges, new_f)
           tgt_size = xf.tgt_ranges.size() * self.tgt.blocksize
-          print("%10d %10d (%6.2f%%) %7s %s %s" % (
-              tgt_size, tgt_size, 100.0, xf.style,
-              xf.tgt_name, str(xf.tgt_ranges)))
+          logger.info(
+              "%10d %10d (%6.2f%%) %7s %s %s", tgt_size, tgt_size, 100.0,
+              xf.style, xf.tgt_name, str(xf.tgt_ranges))
 
         elif xf.style == "diff":
           # We can't compare src and tgt directly because they may have
@@ -827,11 +832,12 @@
             xf.patch = None
             tgt_size = xf.tgt_ranges.size() * self.tgt.blocksize
             if xf.src_ranges != xf.tgt_ranges:
-              print("%10d %10d (%6.2f%%) %7s %s %s (from %s)" % (
-                  tgt_size, tgt_size, 100.0, xf.style,
+              logger.info(
+                  "%10d %10d (%6.2f%%) %7s %s %s (from %s)", tgt_size, tgt_size,
+                  100.0, xf.style,
                   xf.tgt_name if xf.tgt_name == xf.src_name else (
                       xf.tgt_name + " (from " + xf.src_name + ")"),
-                  str(xf.tgt_ranges), str(xf.src_ranges)))
+                  str(xf.tgt_ranges), str(xf.src_ranges))
           else:
             if xf.patch:
               # We have already generated the patch with imgdiff, while
@@ -850,9 +856,9 @@
 
     if diff_queue:
       if self.threads > 1:
-        print("Computing patches (using %d threads)..." % (self.threads,))
+        logger.info("Computing patches (using %d threads)...", self.threads)
       else:
-        print("Computing patches...")
+        logger.info("Computing patches...")
 
       diff_total = len(diff_queue)
       patches = [None] * diff_total
@@ -874,13 +880,6 @@
             xf_index, imgdiff, patch_index = diff_queue.pop()
             xf = self.transfers[xf_index]
 
-            if sys.stdout.isatty():
-              diff_left = len(diff_queue)
-              progress = (diff_total - diff_left) * 100 / diff_total
-              # '\033[K' is to clear to EOL.
-              print(' [%3d%%] %s\033[K' % (progress, xf.tgt_name), end='\r')
-              sys.stdout.flush()
-
           patch = xf.patch
           if not patch:
             src_ranges = xf.src_ranges
@@ -918,13 +917,10 @@
       while threads:
         threads.pop().join()
 
-      if sys.stdout.isatty():
-        print('\n')
-
       if error_messages:
-        print('ERROR:')
-        print('\n'.join(error_messages))
-        print('\n\n\n')
+        logger.error('ERROR:')
+        logger.error('\n'.join(error_messages))
+        logger.error('\n\n\n')
         sys.exit(1)
     else:
       patches = []
@@ -938,14 +934,13 @@
         offset += xf.patch_len
         patch_fd.write(patch)
 
-        if common.OPTIONS.verbose:
-          tgt_size = xf.tgt_ranges.size() * self.tgt.blocksize
-          print("%10d %10d (%6.2f%%) %7s %s %s %s" % (
-              xf.patch_len, tgt_size, xf.patch_len * 100.0 / tgt_size,
-              xf.style,
-              xf.tgt_name if xf.tgt_name == xf.src_name else (
-                  xf.tgt_name + " (from " + xf.src_name + ")"),
-              xf.tgt_ranges, xf.src_ranges))
+        tgt_size = xf.tgt_ranges.size() * self.tgt.blocksize
+        logger.info(
+            "%10d %10d (%6.2f%%) %7s %s %s %s", xf.patch_len, tgt_size,
+            xf.patch_len * 100.0 / tgt_size, xf.style,
+            xf.tgt_name if xf.tgt_name == xf.src_name else (
+                xf.tgt_name + " (from " + xf.src_name + ")"),
+            xf.tgt_ranges, xf.src_ranges)
 
   def AssertSha1Good(self):
     """Check the SHA-1 of the src & tgt blocks in the transfer list.
@@ -1005,7 +1000,7 @@
         assert touched[i] == 1
 
   def ImproveVertexSequence(self):
-    print("Improving vertex order...")
+    logger.info("Improving vertex order...")
 
     # At this point our digraph is acyclic; we reversed any edges that
     # were backwards in the heuristically-generated sequence.  The
@@ -1057,7 +1052,7 @@
     blocks will be written to the same stash slot in WriteTransfers().
     """
 
-    print("Reversing backward edges...")
+    logger.info("Reversing backward edges...")
     in_order = 0
     out_of_order = 0
     stash_raw_id = 0
@@ -1089,15 +1084,15 @@
           xf.goes_after[u] = None    # value doesn't matter
           u.goes_before[xf] = None
 
-    print(("  %d/%d dependencies (%.2f%%) were violated; "
-           "%d source blocks stashed.") %
-          (out_of_order, in_order + out_of_order,
-           (out_of_order * 100.0 / (in_order + out_of_order))
-           if (in_order + out_of_order) else 0.0,
-           stash_size))
+    logger.info(
+        "  %d/%d dependencies (%.2f%%) were violated; %d source blocks "
+        "stashed.", out_of_order, in_order + out_of_order,
+        (out_of_order * 100.0 / (in_order + out_of_order)) if (
+            in_order + out_of_order) else 0.0,
+        stash_size)
 
   def FindVertexSequence(self):
-    print("Finding vertex sequence...")
+    logger.info("Finding vertex sequence...")
 
     # This is based on "A Fast & Effective Heuristic for the Feedback
     # Arc Set Problem" by P. Eades, X. Lin, and W.F. Smyth.  Think of
@@ -1210,7 +1205,7 @@
     self.transfers = new_transfers
 
   def GenerateDigraph(self):
-    print("Generating digraph...")
+    logger.info("Generating digraph...")
 
     # Each item of source_ranges will be:
     #   - None, if that block is not used as a source,
@@ -1376,9 +1371,9 @@
 
         if tgt_changed < tgt_size * crop_threshold:
           assert tgt_changed + tgt_skipped.size() == tgt_size
-          print('%10d %10d (%6.2f%%) %s' % (
-              tgt_skipped.size(), tgt_size,
-              tgt_skipped.size() * 100.0 / tgt_size, tgt_name))
+          logger.info(
+              '%10d %10d (%6.2f%%) %s', tgt_skipped.size(), tgt_size,
+              tgt_skipped.size() * 100.0 / tgt_size, tgt_name)
           AddSplitTransfers(
               "%s-skipped" % (tgt_name,),
               "%s-skipped" % (src_name,),
@@ -1519,7 +1514,7 @@
                                    split_src_ranges,
                                    patch_content))
 
-    print("Finding transfers...")
+    logger.info("Finding transfers...")
 
     large_apks = []
     split_large_apks = []
diff --git a/tools/releasetools/build_image.py b/tools/releasetools/build_image.py
index 43c91da..4a013c2 100755
--- a/tools/releasetools/build_image.py
+++ b/tools/releasetools/build_image.py
@@ -26,6 +26,7 @@
 
 from __future__ import print_function
 
+import logging
 import os
 import os.path
 import re
@@ -35,6 +36,8 @@
 import common
 import verity_utils
 
+logger = logging.getLogger(__name__)
+
 OPTIONS = common.OPTIONS
 BLOCK_SIZE = common.BLOCK_SIZE
 BYTES_IN_MB = 1024 * 1024
@@ -228,8 +231,8 @@
       "partition_size" not in prop_dict):
     # If partition_size is not defined, use output of `du' + reserved_size.
     size = GetDiskUsage(in_dir)
-    if OPTIONS.verbose:
-      print("The tree size of %s is %d MB." % (in_dir, size // BYTES_IN_MB))
+    logger.info(
+        "The tree size of %s is %d MB.", in_dir, size // BYTES_IN_MB)
     size += int(prop_dict.get("partition_reserved_size", 0))
     # Round this up to a multiple of 4K so that avbtool works
     size = common.RoundUpTo4K(size)
@@ -241,8 +244,8 @@
           lambda x: verity_utils.AVBCalcMaxImageSize(
               avbtool, avb_footer_type, x, avb_signing_args))
     prop_dict["partition_size"] = str(size)
-    if OPTIONS.verbose:
-      print("Allocating %d MB for %s." % (size // BYTES_IN_MB, out_file))
+    logger.info(
+        "Allocating %d MB for %s.", size // BYTES_IN_MB, out_file)
 
   prop_dict["image_size"] = prop_dict["partition_size"]
 
@@ -350,8 +353,8 @@
       du_str = "{} bytes ({} MB)".format(du, du // BYTES_IN_MB)
     # Suppress any errors from GetDiskUsage() to avoid hiding the real errors
     # from common.RunAndCheckOutput().
-    except Exception as e:  # pylint: disable=broad-except
-      print(e, file=sys.stderr)
+    except Exception:  # pylint: disable=broad-except
+      logger.exception("Failed to compute disk usage with du")
       du_str = "unknown"
     print(
         "Out of space? The tree size of {} is {}, with reserved space of {} "
@@ -664,6 +667,8 @@
     print(__doc__)
     sys.exit(1)
 
+  common.InitLogging()
+
   in_dir = argv[0]
   glob_dict_file = argv[1]
   out_file = argv[2]
@@ -697,7 +702,7 @@
     elif image_filename == "product_services.img":
       mount_point = "product_services"
     else:
-      print("error: unknown image file name ", image_filename, file=sys.stderr)
+      logger.error("Unknown image file name %s", image_filename)
       sys.exit(1)
 
     image_properties = ImagePropFromGlobalDict(glob_dict, mount_point)
@@ -705,14 +710,14 @@
   try:
     BuildImage(in_dir, image_properties, out_file, target_out)
   except:
-    print("Error: Failed to build {} from {}".format(out_file, in_dir),
-          file=sys.stderr)
+    logger.error("Failed to build %s from %s", out_file, in_dir)
     raise
 
   if prop_file_out:
     glob_dict_out = GlobalDictFromImageProp(image_properties, mount_point)
     SaveGlobalDict(prop_file_out, glob_dict_out)
 
+
 if __name__ == '__main__':
   try:
     main(sys.argv[1:])
diff --git a/tools/releasetools/check_ota_package_signature.py b/tools/releasetools/check_ota_package_signature.py
index a580709..7d3424b 100755
--- a/tools/releasetools/check_ota_package_signature.py
+++ b/tools/releasetools/check_ota_package_signature.py
@@ -21,16 +21,18 @@
 from __future__ import print_function
 
 import argparse
+import logging
 import re
 import subprocess
 import sys
 import zipfile
-
 from hashlib import sha1
 from hashlib import sha256
 
 import common
 
+logger = logging.getLogger(__name__)
+
 
 def CertUsesSha256(cert):
   """Check if the cert uses SHA-256 hashing algorithm."""
@@ -181,6 +183,8 @@
   parser.add_argument('package', help='The OTA package to be verified.')
   args = parser.parse_args()
 
+  common.InitLogging()
+
   VerifyPackage(args.certificate, args.package)
   VerifyAbOtaPayload(args.certificate, args.package)
 
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index 7cca766..fe63458 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -20,6 +20,9 @@
 import getpass
 import gzip
 import imp
+import json
+import logging
+import logging.config
 import os
 import platform
 import re
@@ -37,6 +40,8 @@
 import blockimgdiff
 import sparse_img
 
+logger = logging.getLogger(__name__)
+
 
 class Options(object):
   def __init__(self):
@@ -121,13 +126,53 @@
   pass
 
 
+def InitLogging():
+  DEFAULT_LOGGING_CONFIG = {
+      'version': 1,
+      'disable_existing_loggers': False,
+      'formatters': {
+          'standard': {
+              'format':
+                  '%(asctime)s - %(filename)s - %(levelname)-8s: %(message)s',
+              'datefmt': '%Y-%m-%d %H:%M:%S',
+          },
+      },
+      'handlers': {
+          'default': {
+              'class': 'logging.StreamHandler',
+              'formatter': 'standard',
+          },
+      },
+      'loggers': {
+          '': {
+              'handlers': ['default'],
+              'level': 'WARNING',
+              'propagate': True,
+          }
+      }
+  }
+  env_config = os.getenv('LOGGING_CONFIG')
+  if env_config:
+    with open(env_config) as f:
+      config = json.load(f)
+  else:
+    config = DEFAULT_LOGGING_CONFIG
+
+    # Increase the logging level for verbose mode.
+    if OPTIONS.verbose:
+      config = copy.deepcopy(DEFAULT_LOGGING_CONFIG)
+      config['loggers']['']['level'] = 'INFO'
+
+  logging.config.dictConfig(config)
+
+
 def Run(args, verbose=None, **kwargs):
   """Creates and returns a subprocess.Popen object.
 
   Args:
     args: The command represented as a list of strings.
-    verbose: Whether the commands should be shown (default to OPTIONS.verbose
-        if unspecified).
+    verbose: Whether the commands should be shown. Default to the global
+        verbosity if unspecified.
     kwargs: Any additional args to be passed to subprocess.Popen(), such as env,
         stdin, etc. stdout and stderr will default to subprocess.PIPE and
         subprocess.STDOUT respectively unless caller specifies any of them.
@@ -135,13 +180,12 @@
   Returns:
     A subprocess.Popen object.
   """
-  if verbose is None:
-    verbose = OPTIONS.verbose
   if 'stdout' not in kwargs and 'stderr' not in kwargs:
     kwargs['stdout'] = subprocess.PIPE
     kwargs['stderr'] = subprocess.STDOUT
-  if verbose:
-    print("  Running: \"{}\"".format(" ".join(args)))
+  # Don't log any if caller explicitly says so.
+  if verbose != False:
+    logger.info("  Running: \"%s\"", " ".join(args))
   return subprocess.Popen(args, **kwargs)
 
 
@@ -150,8 +194,8 @@
 
   Args:
     args: The command represented as a list of strings.
-    verbose: Whether the commands should be shown (default to OPTIONS.verbose
-        if unspecified).
+    verbose: Whether the commands should be shown. Default to the global
+        verbosity if unspecified.
     kwargs: Any additional args to be passed to subprocess.Popen(), such as env,
         stdin, etc. stdout and stderr will default to subprocess.PIPE and
         subprocess.STDOUT respectively unless caller specifies any of them.
@@ -162,12 +206,11 @@
   Raises:
     ExternalError: On non-zero exit from the command.
   """
-  if verbose is None:
-    verbose = OPTIONS.verbose
   proc = Run(args, verbose=verbose, **kwargs)
   output, _ = proc.communicate()
-  if verbose:
-    print("{}".format(output.rstrip()))
+  # Don't log any if caller explicitly says so.
+  if verbose != False:
+    logger.info("%s", output.rstrip())
   if proc.returncode != 0:
     raise ExternalError(
         "Failed to run command '{}' (exit code {}):\n{}".format(
@@ -277,8 +320,8 @@
       if os.path.exists(system_base_fs_file):
         d["system_base_fs_file"] = system_base_fs_file
       else:
-        print("Warning: failed to find system base fs file: %s" % (
-            system_base_fs_file,))
+        logger.warning(
+            "Failed to find system base fs file: %s", system_base_fs_file)
         del d["system_base_fs_file"]
 
     if "vendor_base_fs_file" in d:
@@ -287,8 +330,8 @@
       if os.path.exists(vendor_base_fs_file):
         d["vendor_base_fs_file"] = vendor_base_fs_file
       else:
-        print("Warning: failed to find vendor base fs file: %s" % (
-            vendor_base_fs_file,))
+        logger.warning(
+            "Failed to find vendor base fs file: %s", vendor_base_fs_file)
         del d["vendor_base_fs_file"]
 
   def makeint(key):
@@ -364,7 +407,7 @@
   try:
     data = read_helper(prop_file)
   except KeyError:
-    print("Warning: could not read %s" % (prop_file,))
+    logger.warning("Failed to read %s", prop_file)
     data = ""
   return LoadDictionaryFromLines(data.split("\n"))
 
@@ -394,7 +437,7 @@
   try:
     data = read_helper(recovery_fstab_path)
   except KeyError:
-    print("Warning: could not find {}".format(recovery_fstab_path))
+    logger.warning("Failed to find %s", recovery_fstab_path)
     data = ""
 
   assert fstab_version == 2
@@ -447,7 +490,7 @@
 
 def DumpInfoDict(d):
   for k, v in sorted(d.items()):
-    print("%-25s = (%s) %s" % (k, type(v).__name__, v))
+    logger.info("%-25s = (%s) %s", k, type(v).__name__, v)
 
 
 def AppendAVBSigningArgs(cmd, partition):
@@ -657,15 +700,15 @@
 
   prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name)
   if os.path.exists(prebuilt_path):
-    print("using prebuilt %s from BOOTABLE_IMAGES..." % (prebuilt_name,))
+    logger.info("using prebuilt %s from BOOTABLE_IMAGES...", prebuilt_name)
     return File.FromLocalFile(name, prebuilt_path)
 
   prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
   if os.path.exists(prebuilt_path):
-    print("using prebuilt %s from IMAGES..." % (prebuilt_name,))
+    logger.info("using prebuilt %s from IMAGES...", prebuilt_name)
     return File.FromLocalFile(name, prebuilt_path)
 
-  print("building image from target_files %s..." % (tree_subdir,))
+  logger.info("building image from target_files %s...", tree_subdir)
 
   if info_dict is None:
     info_dict = OPTIONS.info_dict
@@ -1001,9 +1044,9 @@
     if pct >= 99.0:
       raise ExternalError(msg)
     elif pct >= 95.0:
-      print("\n  WARNING: %s\n" % (msg,))
-    elif OPTIONS.verbose:
-      print("  ", msg)
+      logger.warning("\n  WARNING: %s\n", msg)
+    else:
+      logger.info("  %s", msg)
 
 
 def ReadApkCerts(tf_zip):
@@ -1302,13 +1345,13 @@
           continue
         m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
         if not m:
-          print("failed to parse password file: ", line)
+          logger.warning("Failed to parse password file: %s", line)
         else:
           result[m.group(2)] = m.group(1)
       f.close()
     except IOError as e:
       if e.errno != errno.ENOENT:
-        print("error reading password file: ", str(e))
+        logger.exception("Error reading password file:")
     return result
 
 
@@ -1452,10 +1495,10 @@
           if x == ".py":
             f = b
           info = imp.find_module(f, [d])
-        print("loaded device-specific extensions from", path)
+        logger.info("loaded device-specific extensions from %s", path)
         self.module = imp.load_module("device_specific", *info)
       except ImportError:
-        print("unable to load device-specific module; assuming none")
+        logger.info("unable to load device-specific module; assuming none")
 
   def _DoCall(self, function_name, *args, **kwargs):
     """Call the named function in the device-specific module, passing
@@ -1597,7 +1640,7 @@
       th.start()
       th.join(timeout=300)   # 5 mins
       if th.is_alive():
-        print("WARNING: diff command timed out")
+        logger.warning("diff command timed out")
         p.terminate()
         th.join(5)
         if th.is_alive():
@@ -1605,8 +1648,7 @@
           th.join()
 
       if p.returncode != 0:
-        print("WARNING: failure running %s:\n%s\n" % (
-            diff_program, "".join(err)))
+        logger.warning("Failure running %s:\n%s\n", diff_program, "".join(err))
         self.patch = None
         return None, None, None
       diff = ptemp.read()
@@ -1630,7 +1672,7 @@
 
 def ComputeDifferences(diffs):
   """Call ComputePatch on all the Difference objects in 'diffs'."""
-  print(len(diffs), "diffs to compute")
+  logger.info("%d diffs to compute", len(diffs))
 
   # Do the largest files first, to try and reduce the long-pole effect.
   by_size = [(i.tf.size, i) for i in diffs]
@@ -1656,14 +1698,14 @@
         else:
           name = "%s (%s)" % (tf.name, sf.name)
         if patch is None:
-          print(
-              "patching failed!                                  %s" % (name,))
+          logger.error("patching failed! %40s", name)
         else:
-          print("%8.2f sec %8d / %8d bytes (%6.2f%%) %s" % (
-              dur, len(patch), tf.size, 100.0 * len(patch) / tf.size, name))
+          logger.info(
+              "%8.2f sec %8d / %8d bytes (%6.2f%%) %s", dur, len(patch),
+              tf.size, 100.0 * len(patch) / tf.size, name)
       lock.release()
-    except Exception as e:
-      print(e)
+    except Exception:
+      logger.exception("Failed to compute diff from worker")
       raise
 
   # start worker threads; wait for them all to finish.
@@ -2086,6 +2128,6 @@
   # in the L release.
   sh_location = "bin/install-recovery.sh"
 
-  print("putting script in", sh_location)
+  logger.info("putting script in %s", sh_location)
 
   output_sink(sh_location, sh)
diff --git a/tools/releasetools/img_from_target_files.py b/tools/releasetools/img_from_target_files.py
index 01ff149..0156b72 100755
--- a/tools/releasetools/img_from_target_files.py
+++ b/tools/releasetools/img_from_target_files.py
@@ -28,6 +28,7 @@
 
 from __future__ import print_function
 
+import logging
 import os
 import shutil
 import sys
@@ -39,6 +40,7 @@
   print("Python 2.7 or newer is required.", file=sys.stderr)
   sys.exit(1)
 
+logger = logging.getLogger(__name__)
 
 OPTIONS = common.OPTIONS
 
@@ -72,6 +74,8 @@
     common.Usage(__doc__)
     sys.exit(1)
 
+  common.InitLogging()
+
   OPTIONS.input_tmp = common.UnzipTemp(args[0], ["IMAGES/*", "OTA/*"])
   output_zip = zipfile.ZipFile(args[1], "w", compression=zipfile.ZIP_DEFLATED)
   CopyInfo(output_zip)
@@ -90,11 +94,11 @@
       common.ZipWrite(output_zip, os.path.join(images_path, image), image)
 
   finally:
-    print("cleaning up...")
+    logger.info("cleaning up...")
     common.ZipClose(output_zip)
     shutil.rmtree(OPTIONS.input_tmp)
 
-  print("done.")
+  logger.info("done.")
 
 
 if __name__ == '__main__':
@@ -102,5 +106,5 @@
     common.CloseInheritedPipes()
     main(sys.argv[1:])
   except common.ExternalError as e:
-    print("\n   ERROR: %s\n" % (e,))
+    logger.exception("\n   ERROR:\n")
     sys.exit(1)
diff --git a/tools/releasetools/make_recovery_patch.py b/tools/releasetools/make_recovery_patch.py
index 7c6007e..725b355 100755
--- a/tools/releasetools/make_recovery_patch.py
+++ b/tools/releasetools/make_recovery_patch.py
@@ -16,24 +16,27 @@
 
 from __future__ import print_function
 
+import logging
+import os
 import sys
 
+import common
+
 if sys.hexversion < 0x02070000:
   print("Python 2.7 or newer is required.", file=sys.stderr)
   sys.exit(1)
 
-import os
-import common
+logger = logging.getLogger(__name__)
 
 OPTIONS = common.OPTIONS
 
-def main(argv):
-  # def option_handler(o, a):
-  #   return False
 
+def main(argv):
   args = common.ParseOptions(argv, __doc__)
   input_dir, output_dir = args
 
+  common.InitLogging()
+
   OPTIONS.info_dict = common.LoadInfoDict(input_dir)
 
   recovery_img = common.GetBootableImage("recovery.img", "recovery.img",
diff --git a/tools/releasetools/ota_from_target_files.py b/tools/releasetools/ota_from_target_files.py
index daf959f..2264655 100755
--- a/tools/releasetools/ota_from_target_files.py
+++ b/tools/releasetools/ota_from_target_files.py
@@ -164,6 +164,7 @@
 
 from __future__ import print_function
 
+import logging
 import multiprocessing
 import os.path
 import shlex
@@ -181,6 +182,7 @@
   print("Python 2.7 or newer is required.", file=sys.stderr)
   sys.exit(1)
 
+logger = logging.getLogger(__name__)
 
 OPTIONS = common.OPTIONS
 OPTIONS.package_key = None
@@ -576,11 +578,11 @@
         OPTIONS.input_tmp, "RECOVERY")
     common.ZipWriteStr(
         output_zip, recovery_two_step_img_name, recovery_two_step_img.data)
-    print("two-step package: using %s in stage 1/3" % (
-        recovery_two_step_img_name,))
+    logger.info(
+        "two-step package: using %s in stage 1/3", recovery_two_step_img_name)
     script.WriteRawImage("/boot", recovery_two_step_img_name)
   else:
-    print("two-step package: using recovery.img in stage 1/3")
+    logger.info("two-step package: using recovery.img in stage 1/3")
     # The "recovery.img" entry has been written into package earlier.
     script.WriteRawImage("/boot", "recovery.img")
 
@@ -1344,8 +1346,8 @@
   target_api_version = target_info["recovery_api_version"]
   source_api_version = source_info["recovery_api_version"]
   if source_api_version == 0:
-    print("WARNING: generating edify script for a source that "
-          "can't install it.")
+    logger.warning(
+        "Generating edify script for a source that can't install it.")
 
   script = edify_generator.EdifyGenerator(
       source_api_version, target_info, fstab=source_info["fstab"])
@@ -1523,8 +1525,9 @@
     else:
       include_full_boot = False
 
-      print("boot      target: %d  source: %d  diff: %d" % (
-          target_boot.size, source_boot.size, len(d)))
+      logger.info(
+          "boot      target: %d  source: %d  diff: %d", target_boot.size,
+          source_boot.size, len(d))
 
       common.ZipWriteStr(output_zip, "boot.img.p", d)
 
@@ -1574,19 +1577,19 @@
   if OPTIONS.two_step:
     common.ZipWriteStr(output_zip, "boot.img", target_boot.data)
     script.WriteRawImage("/boot", "boot.img")
-    print("writing full boot image (forced by two-step mode)")
+    logger.info("writing full boot image (forced by two-step mode)")
 
   if not OPTIONS.two_step:
     if updating_boot:
       if include_full_boot:
-        print("boot image changed; including full.")
+        logger.info("boot image changed; including full.")
         script.Print("Installing boot image...")
         script.WriteRawImage("/boot", "boot.img")
       else:
         # Produce the boot image by applying a patch to the current
         # contents of the boot partition, and write it back to the
         # partition.
-        print("boot image changed; including patch.")
+        logger.info("boot image changed; including patch.")
         script.Print("Patching boot image...")
         script.ShowProgress(0.1, 10)
         script.PatchPartition(
@@ -1596,7 +1599,7 @@
                 boot_type, boot_device, source_boot.size, source_boot.sha1),
             'boot.img.p')
     else:
-      print("boot image unchanged; skipping.")
+      logger.info("boot image unchanged; skipping.")
 
   # Do device-specific installation (eg, write radio image).
   device_specific.IncrementalOTA_InstallEnd()
@@ -1787,7 +1790,7 @@
       common.ZipWriteStr(output_zip, care_map_name, care_map_data,
                          compress_type=zipfile.ZIP_STORED)
     else:
-      print("Warning: cannot find care map file in target_file package")
+      logger.warning("Cannot find care map file in target_file package")
 
   AddCompatibilityArchiveIfTrebleEnabled(
       target_zip, output_zip, target_info, source_info)
@@ -1903,6 +1906,8 @@
     common.Usage(__doc__)
     sys.exit(1)
 
+  common.InitLogging()
+
   if OPTIONS.downgrade:
     # We should only allow downgrading incrementals (as opposed to full).
     # Otherwise the device may go back from arbitrary build with this full
@@ -1923,9 +1928,8 @@
     with zipfile.ZipFile(args[0], 'r') as input_zip:
       OPTIONS.info_dict = common.LoadInfoDict(input_zip)
 
-  if OPTIONS.verbose:
-    print("--- target info ---")
-    common.DumpInfoDict(OPTIONS.info_dict)
+  logger.info("--- target info ---")
+  common.DumpInfoDict(OPTIONS.info_dict)
 
   # Load the source build dict if applicable.
   if OPTIONS.incremental_source is not None:
@@ -1933,9 +1937,8 @@
     with zipfile.ZipFile(OPTIONS.incremental_source, 'r') as source_zip:
       OPTIONS.source_info_dict = common.LoadInfoDict(source_zip)
 
-    if OPTIONS.verbose:
-      print("--- source info ---")
-      common.DumpInfoDict(OPTIONS.source_info_dict)
+    logger.info("--- source info ---")
+    common.DumpInfoDict(OPTIONS.source_info_dict)
 
   # Load OEM dicts if provided.
   OPTIONS.oem_dicts = _LoadOemDicts(OPTIONS.oem_source)
@@ -1959,7 +1962,7 @@
         output_file=args[1],
         source_file=OPTIONS.incremental_source)
 
-    print("done.")
+    logger.info("done.")
     return
 
   # Sanity check the loaded info dicts first.
@@ -1970,7 +1973,7 @@
   # Non-A/B OTAs rely on /cache partition to store temporary files.
   cache_size = OPTIONS.info_dict.get("cache_size")
   if cache_size is None:
-    print("--- can't determine the cache partition size ---")
+    logger.warning("--- can't determine the cache partition size ---")
   OPTIONS.cache_size = cache_size
 
   if OPTIONS.extra_script is not None:
@@ -1979,7 +1982,7 @@
   if OPTIONS.extracted_input is not None:
     OPTIONS.input_tmp = OPTIONS.extracted_input
   else:
-    print("unzipping target target-files...")
+    logger.info("unzipping target target-files...")
     OPTIONS.input_tmp = common.UnzipTemp(args[0], UNZIP_PATTERN)
   OPTIONS.target_tmp = OPTIONS.input_tmp
 
@@ -1991,7 +1994,7 @@
   if OPTIONS.device_specific is None:
     from_input = os.path.join(OPTIONS.input_tmp, "META", "releasetools.py")
     if os.path.exists(from_input):
-      print("(using device-specific extensions from target_files)")
+      logger.info("(using device-specific extensions from target_files)")
       OPTIONS.device_specific = from_input
     else:
       OPTIONS.device_specific = OPTIONS.info_dict.get("tool_extensions")
@@ -2008,7 +2011,7 @@
 
   # Generate an incremental OTA.
   else:
-    print("unzipping source target-files...")
+    logger.info("unzipping source target-files...")
     OPTIONS.source_tmp = common.UnzipTemp(
         OPTIONS.incremental_source, UNZIP_PATTERN)
     with zipfile.ZipFile(args[0], 'r') as input_zip, \
@@ -2024,15 +2027,15 @@
         target_files_diff.recursiveDiff(
             '', OPTIONS.source_tmp, OPTIONS.input_tmp, out_file)
 
-  print("done.")
+  logger.info("done.")
 
 
 if __name__ == '__main__':
   try:
     common.CloseInheritedPipes()
     main(sys.argv[1:])
-  except common.ExternalError as e:
-    print("\n   ERROR: %s\n" % (e,))
+  except common.ExternalError:
+    logger.exception("\n   ERROR:\n")
     sys.exit(1)
   finally:
     common.Cleanup()
diff --git a/tools/releasetools/sparse_img.py b/tools/releasetools/sparse_img.py
index ca53ae1..5ebb1f0 100644
--- a/tools/releasetools/sparse_img.py
+++ b/tools/releasetools/sparse_img.py
@@ -13,6 +13,7 @@
 # limitations under the License.
 
 import bisect
+import logging
 import os
 import struct
 import threading
@@ -20,6 +21,8 @@
 
 import rangelib
 
+logger = logging.getLogger(__name__)
+
 
 class SparseImage(object):
   """Wraps a sparse image file into an image object.
@@ -61,8 +64,9 @@
       raise ValueError("Chunk header size was expected to be 12, but is %u." %
                        (chunk_hdr_sz,))
 
-    print("Total of %u %u-byte output blocks in %u input chunks."
-          % (total_blks, blk_sz, total_chunks))
+    logger.info(
+        "Total of %u %u-byte output blocks in %u input chunks.", total_blks,
+        blk_sz, total_chunks)
 
     if not build_map:
       assert not hashtree_info_generator, \
diff --git a/tools/releasetools/verity_utils.py b/tools/releasetools/verity_utils.py
index 626a1dd..00af296 100644
--- a/tools/releasetools/verity_utils.py
+++ b/tools/releasetools/verity_utils.py
@@ -16,6 +16,7 @@
 
 from __future__ import print_function
 
+import logging
 import os.path
 import shlex
 import struct
@@ -24,6 +25,8 @@
 import sparse_img
 from rangelib import RangeSet
 
+logger = logging.getLogger(__name__)
+
 OPTIONS = common.OPTIONS
 BLOCK_SIZE = common.BLOCK_SIZE
 FIXED_SALT = "aee087a5be3b982978c923f566a94613496b417f2af592639bc80d141e34dfe7"
@@ -71,7 +74,7 @@
 
 def ZeroPadSimg(image_file, pad_size):
   blocks = pad_size // BLOCK_SIZE
-  print("Padding %d blocks (%d bytes)" % (blocks, pad_size))
+  logger.info("Padding %d blocks (%d bytes)", blocks, pad_size)
   simg = sparse_img.SparseImage(image_file, mode="r+b", build_map=False)
   simg.AppendFillChunk(0, blocks)
 
@@ -114,9 +117,9 @@
     else:
       hi = i
 
-  if OPTIONS.verbose:
-    print("Adjusted partition size for verity, partition_size: {},"
-          " verity_size: {}".format(result, verity_size))
+  logger.info(
+      "Adjusted partition size for verity, partition_size: %s, verity_size: %s",
+      result, verity_size)
   AdjustPartitionSizeForVerity.results[key] = (result, verity_size)
   return (result, verity_size)
 
@@ -326,9 +329,9 @@
     else:
       lo = mid + BLOCK_SIZE
 
-  if OPTIONS.verbose:
-    print("AVBCalcMinPartitionSize({}): partition_size: {}.".format(
-        image_size, partition_size))
+  logger.info(
+      "AVBCalcMinPartitionSize(%d): partition_size: %d.",
+      image_size, partition_size)
 
   return partition_size
 
@@ -514,9 +517,9 @@
             salt, self.hashtree_info.salt)
 
     if root_hash != self.hashtree_info.root_hash:
-      print(
-          "Calculated root hash {} doesn't match the one in metadata {}".format(
-              root_hash, self.hashtree_info.root_hash))
+      logger.warning(
+          "Calculated root hash %s doesn't match the one in metadata %s",
+          root_hash, self.hashtree_info.root_hash)
       return False
 
     # Reads the generated hash tree and checks if it has the exact same bytes