Protect SparseImage._GetRangeData() with lock

The generator function is not thread safe and is prone to race
conditions. This CL uses a lock to protect this generator and loose the
locks elsewhere, e.g. 'WriteRangeDataToFd()'.

Bug: 71908713
Test: Generate an incremental package several times for angler 4208095 to 4442250.
Change-Id: I9e6f0a182a1ba7904a597f403f2b12fe05016513
diff --git a/tools/releasetools/sparse_img.py b/tools/releasetools/sparse_img.py
index 7eb60d9..c978be8 100644
--- a/tools/releasetools/sparse_img.py
+++ b/tools/releasetools/sparse_img.py
@@ -15,6 +15,7 @@
 import bisect
 import os
 import struct
+import threading
 from hashlib import sha1
 
 import rangelib
@@ -111,6 +112,8 @@
         raise ValueError("Unknown chunk type 0x%04X not supported" %
                          (chunk_type,))
 
+    self.generator_lock = threading.Lock()
+
     self.care_map = rangelib.RangeSet(care_data)
     self.offset_index = [i[0] for i in offset_map]
 
@@ -173,39 +176,39 @@
     particular is not necessarily equal to the number of ranges in
     'ranges'.
 
-    This generator is stateful -- it depends on the open file object
-    contained in this SparseImage, so you should not try to run two
+    Use a lock to protect the generator so that we will not run two
     instances of this generator on the same object simultaneously."""
 
     f = self.simg_f
-    for s, e in ranges:
-      to_read = e-s
-      idx = bisect.bisect_right(self.offset_index, s) - 1
-      chunk_start, chunk_len, filepos, fill_data = self.offset_map[idx]
-
-      # for the first chunk we may be starting partway through it.
-      remain = chunk_len - (s - chunk_start)
-      this_read = min(remain, to_read)
-      if filepos is not None:
-        p = filepos + ((s - chunk_start) * self.blocksize)
-        f.seek(p, os.SEEK_SET)
-        yield f.read(this_read * self.blocksize)
-      else:
-        yield fill_data * (this_read * (self.blocksize >> 2))
-      to_read -= this_read
-
-      while to_read > 0:
-        # continue with following chunks if this range spans multiple chunks.
-        idx += 1
+    with self.generator_lock:
+      for s, e in ranges:
+        to_read = e-s
+        idx = bisect.bisect_right(self.offset_index, s) - 1
         chunk_start, chunk_len, filepos, fill_data = self.offset_map[idx]
-        this_read = min(chunk_len, to_read)
+
+        # for the first chunk we may be starting partway through it.
+        remain = chunk_len - (s - chunk_start)
+        this_read = min(remain, to_read)
         if filepos is not None:
-          f.seek(filepos, os.SEEK_SET)
+          p = filepos + ((s - chunk_start) * self.blocksize)
+          f.seek(p, os.SEEK_SET)
           yield f.read(this_read * self.blocksize)
         else:
           yield fill_data * (this_read * (self.blocksize >> 2))
         to_read -= this_read
 
+        while to_read > 0:
+          # continue with following chunks if this range spans multiple chunks.
+          idx += 1
+          chunk_start, chunk_len, filepos, fill_data = self.offset_map[idx]
+          this_read = min(chunk_len, to_read)
+          if filepos is not None:
+            f.seek(filepos, os.SEEK_SET)
+            yield f.read(this_read * self.blocksize)
+          else:
+            yield fill_data * (this_read * (self.blocksize >> 2))
+          to_read -= this_read
+
   def LoadFileBlockMap(self, fn, clobbered_blocks):
     remaining = self.care_map
     self.file_map = out = {}