1 # Copyright (C) 2008 The Android Open Source Project
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
7 # http://www.apache.org/licenses/LICENSE-2.0
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
34 from hashlib import sha1 as sha1
37 class Options(object):
39 platform_search_path = {
40 "linux2": "out/host/linux-x86",
41 "darwin": "out/host/darwin-x86",
44 self.search_path = platform_search_path.get(sys.platform, None)
45 self.signapk_path = "framework/signapk.jar" # Relative to search_path
46 self.signapk_shared_library_path = "lib64" # Relative to search_path
47 self.extra_signapk_args = []
48 self.java_path = "java" # Use the one on the path by default.
49 self.java_args = ["-Xmx2048m"] # The default JVM args.
50 self.public_key_suffix = ".x509.pem"
51 self.private_key_suffix = ".pk8"
52 # use otatools built boot_signer by default
53 self.boot_signer_path = "boot_signer"
54 self.boot_signer_args = []
55 self.verity_signer_path = None
56 self.verity_signer_args = []
59 self.device_specific = None
62 self.source_info_dict = None
63 self.target_info_dict = None
64 self.worker_threads = None
65 # Stash size cannot exceed cache_size * threshold.
66 self.cache_size = None
67 self.stash_threshold = 0.8
73 # Values for "certificate" in apkcerts that mean special things.
74 SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL")
76 class ErrorCode(object):
77 """Define error_codes for failures that happen during the actual
78 update package installation.
80 Error codes 0-999 are reserved for failures before the package
81 installation (i.e. low battery, package verification failure).
82 Detailed code in 'bootable/recovery/error_code.h' """
84 SYSTEM_VERIFICATION_FAILURE = 1000
85 SYSTEM_UPDATE_FAILURE = 1001
86 SYSTEM_UNEXPECTED_CONTENTS = 1002
87 SYSTEM_NONZERO_CONTENTS = 1003
88 SYSTEM_RECOVER_FAILURE = 1004
89 VENDOR_VERIFICATION_FAILURE = 2000
90 VENDOR_UPDATE_FAILURE = 2001
91 VENDOR_UNEXPECTED_CONTENTS = 2002
92 VENDOR_NONZERO_CONTENTS = 2003
93 VENDOR_RECOVER_FAILURE = 2004
94 OEM_PROP_MISMATCH = 3000
95 FINGERPRINT_MISMATCH = 3001
96 THUMBPRINT_MISMATCH = 3002
98 DEVICE_MISMATCH = 3004
100 INSUFFICIENT_CACHE_SPACE = 3006
101 TUNE_PARTITION_FAILURE = 3007
102 APPLY_PATCH_FAILURE = 3008
104 class ExternalError(RuntimeError):
108 def Run(args, **kwargs):
109 """Create and return a subprocess.Popen object, printing the command
110 line on the terminal if -v was specified."""
112 print " running: ", " ".join(args)
113 return subprocess.Popen(args, **kwargs)
116 def CloseInheritedPipes():
117 """ Gmake in MAC OS has file descriptor (PIPE) leak. We close those fds
118 before doing other work."""
119 if platform.system() != "Darwin":
121 for d in range(3, 1025):
125 pipebit = stat[0] & 0x1000
132 def LoadInfoDict(input_file, input_dir=None):
133 """Read and parse the META/misc_info.txt key/value pairs from the
134 input target files and return a dict."""
137 if isinstance(input_file, zipfile.ZipFile):
138 return input_file.read(fn)
140 path = os.path.join(input_file, *fn.split("/"))
142 with open(path) as f:
145 if e.errno == errno.ENOENT:
149 d = LoadDictionaryFromLines(read_helper("META/misc_info.txt").split("\n"))
151 # ok if misc_info.txt doesn't exist
154 # backwards compatibility: These values used to be in their own
155 # files. Look for them, in case we're processing an old
158 if "mkyaffs2_extra_flags" not in d:
160 d["mkyaffs2_extra_flags"] = read_helper(
161 "META/mkyaffs2-extra-flags.txt").strip()
163 # ok if flags don't exist
166 if "recovery_api_version" not in d:
168 d["recovery_api_version"] = read_helper(
169 "META/recovery-api-version.txt").strip()
171 raise ValueError("can't find recovery API version in input target-files")
173 if "tool_extensions" not in d:
175 d["tool_extensions"] = read_helper("META/tool-extensions.txt").strip()
177 # ok if extensions don't exist
180 if "fstab_version" not in d:
181 d["fstab_version"] = "1"
183 # A few properties are stored as links to the files in the out/ directory.
184 # It works fine with the build system. However, they are no longer available
185 # when (re)generating from target_files zip. If input_dir is not None, we
186 # are doing repacking. Redirect those properties to the actual files in the
187 # unzipped directory.
188 if input_dir is not None:
189 # We carry a copy of file_contexts.bin under META/. If not available,
190 # search BOOT/RAMDISK/. Note that sometimes we may need a different file
191 # to build images than the one running on device, such as when enabling
192 # system_root_image. In that case, we must have the one for image
193 # generation copied to META/.
194 fc_basename = os.path.basename(d.get("selinux_fc", "file_contexts"))
195 fc_config = os.path.join(input_dir, "META", fc_basename)
196 if d.get("system_root_image") == "true":
197 assert os.path.exists(fc_config)
198 if not os.path.exists(fc_config):
199 fc_config = os.path.join(input_dir, "BOOT", "RAMDISK", fc_basename)
200 if not os.path.exists(fc_config):
204 d["selinux_fc"] = fc_config
206 # Similarly we need to redirect "ramdisk_dir" and "ramdisk_fs_config".
207 if d.get("system_root_image") == "true":
208 d["ramdisk_dir"] = os.path.join(input_dir, "ROOT")
209 d["ramdisk_fs_config"] = os.path.join(
210 input_dir, "META", "root_filesystem_config.txt")
212 # Redirect {system,vendor}_base_fs_file.
213 if "system_base_fs_file" in d:
214 basename = os.path.basename(d["system_base_fs_file"])
215 system_base_fs_file = os.path.join(input_dir, "META", basename)
216 if os.path.exists(system_base_fs_file):
217 d["system_base_fs_file"] = system_base_fs_file
219 print "Warning: failed to find system base fs file: %s" % (
220 system_base_fs_file,)
221 del d["system_base_fs_file"]
223 if "vendor_base_fs_file" in d:
224 basename = os.path.basename(d["vendor_base_fs_file"])
225 vendor_base_fs_file = os.path.join(input_dir, "META", basename)
226 if os.path.exists(vendor_base_fs_file):
227 d["vendor_base_fs_file"] = vendor_base_fs_file
229 print "Warning: failed to find vendor base fs file: %s" % (
230 vendor_base_fs_file,)
231 del d["vendor_base_fs_file"]
234 data = read_helper("META/imagesizes.txt")
235 for line in data.split("\n"):
238 name, value = line.split(" ", 1)
241 if name == "blocksize":
244 d[name + "_size"] = value
250 d[key] = int(d[key], 0)
252 makeint("recovery_api_version")
254 makeint("system_size")
255 makeint("vendor_size")
256 makeint("userdata_size")
257 makeint("cache_size")
258 makeint("recovery_size")
260 makeint("fstab_version")
262 system_root_image = d.get("system_root_image", None) == "true"
263 if d.get("no_recovery", None) != "true":
264 recovery_fstab_path = "RECOVERY/RAMDISK/etc/recovery.fstab"
265 d["fstab"] = LoadRecoveryFSTab(read_helper, d["fstab_version"],
266 recovery_fstab_path, system_root_image)
267 elif d.get("recovery_as_boot", None) == "true":
268 recovery_fstab_path = "BOOT/RAMDISK/etc/recovery.fstab"
269 d["fstab"] = LoadRecoveryFSTab(read_helper, d["fstab_version"],
270 recovery_fstab_path, system_root_image)
274 d["build.prop"] = LoadBuildProp(read_helper)
277 def LoadBuildProp(read_helper):
279 data = read_helper("SYSTEM/build.prop")
281 print "Warning: could not find SYSTEM/build.prop in %s" % zip
283 return LoadDictionaryFromLines(data.split("\n"))
285 def LoadDictionaryFromLines(lines):
289 if not line or line.startswith("#"):
292 name, value = line.split("=", 1)
296 def LoadRecoveryFSTab(read_helper, fstab_version, recovery_fstab_path,
297 system_root_image=False):
298 class Partition(object):
299 def __init__(self, mount_point, fs_type, device, length, device2, context):
300 self.mount_point = mount_point
301 self.fs_type = fs_type
304 self.device2 = device2
305 self.context = context
308 data = read_helper(recovery_fstab_path)
310 print "Warning: could not find {}".format(recovery_fstab_path)
313 if fstab_version == 1:
315 for line in data.split("\n"):
317 if not line or line.startswith("#"):
319 pieces = line.split()
320 if not 3 <= len(pieces) <= 4:
321 raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
324 if pieces[3].startswith("/"):
334 mount_point = pieces[0]
337 options = options.split(",")
339 if i.startswith("length="):
342 print "%s: unknown option \"%s\"" % (mount_point, i)
344 d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[1],
345 device=pieces[2], length=length,
348 elif fstab_version == 2:
350 for line in data.split("\n"):
352 if not line or line.startswith("#"):
354 # <src> <mnt_point> <type> <mnt_flags and options> <fs_mgr_flags>
355 pieces = line.split()
357 raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
359 # Ignore entries that are managed by vold
361 if "voldmanaged=" in options:
364 # It's a good line, parse it
366 options = options.split(",")
368 if i.startswith("length="):
371 # Ignore all unknown options in the unified fstab
374 mount_flags = pieces[3]
375 # Honor the SELinux context if present.
377 for i in mount_flags.split(","):
378 if i.startswith("context="):
381 mount_point = pieces[1]
382 d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[2],
383 device=pieces[0], length=length,
384 device2=None, context=context)
387 raise ValueError("Unknown fstab_version: \"%d\"" % (fstab_version,))
389 # / is used for the system mount point when the root directory is included in
390 # system. Other areas assume system is always at "/system" so point /system
392 if system_root_image:
393 assert not d.has_key("/system") and d.has_key("/")
394 d["/system"] = d["/"]
399 for k, v in sorted(d.items()):
400 print "%-25s = (%s) %s" % (k, type(v).__name__, v)
403 def _BuildBootableImage(sourcedir, fs_config_file, info_dict=None,
404 has_ramdisk=False, two_step_image=False):
405 """Build a bootable image from the specified sourcedir.
407 Take a kernel, cmdline, and optionally a ramdisk directory from the input (in
408 'sourcedir'), and turn them into a boot image. 'two_step_image' indicates if
409 we are building a two-step special image (i.e. building a recovery image to
410 be loaded into /boot in two-step OTAs).
412 Return the image data, or None if sourcedir does not appear to contains files
413 for building the requested image.
417 ramdisk_img = tempfile.NamedTemporaryFile()
419 if os.access(fs_config_file, os.F_OK):
420 cmd = ["mkbootfs", "-f", fs_config_file,
421 os.path.join(sourcedir, "RAMDISK")]
423 cmd = ["mkbootfs", os.path.join(sourcedir, "RAMDISK")]
424 p1 = Run(cmd, stdout=subprocess.PIPE)
425 p2 = Run(["minigzip"], stdin=p1.stdout, stdout=ramdisk_img.file.fileno())
429 assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (sourcedir,)
430 assert p2.returncode == 0, "minigzip of %s ramdisk failed" % (sourcedir,)
434 if not os.access(os.path.join(sourcedir, "kernel"), os.F_OK):
437 if has_ramdisk and not os.access(os.path.join(sourcedir, "RAMDISK"), os.F_OK):
440 if info_dict is None:
441 info_dict = OPTIONS.info_dict
443 img = tempfile.NamedTemporaryFile()
446 ramdisk_img = make_ramdisk()
448 # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
449 mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
451 cmd = [mkbootimg, "--kernel", os.path.join(sourcedir, "kernel")]
453 fn = os.path.join(sourcedir, "second")
454 if os.access(fn, os.F_OK):
455 cmd.append("--second")
458 fn = os.path.join(sourcedir, "cmdline")
459 if os.access(fn, os.F_OK):
460 cmd.append("--cmdline")
461 cmd.append(open(fn).read().rstrip("\n"))
463 fn = os.path.join(sourcedir, "base")
464 if os.access(fn, os.F_OK):
466 cmd.append(open(fn).read().rstrip("\n"))
468 fn = os.path.join(sourcedir, "pagesize")
469 if os.access(fn, os.F_OK):
470 cmd.append("--pagesize")
471 cmd.append(open(fn).read().rstrip("\n"))
473 args = info_dict.get("mkbootimg_args", None)
474 if args and args.strip():
475 cmd.extend(shlex.split(args))
477 args = info_dict.get("mkbootimg_version_args", None)
478 if args and args.strip():
479 cmd.extend(shlex.split(args))
482 cmd.extend(["--ramdisk", ramdisk_img.name])
485 if info_dict.get("vboot", None):
486 img_unsigned = tempfile.NamedTemporaryFile()
487 cmd.extend(["--output", img_unsigned.name])
489 cmd.extend(["--output", img.name])
491 p = Run(cmd, stdout=subprocess.PIPE)
493 assert p.returncode == 0, "mkbootimg of %s image failed" % (
494 os.path.basename(sourcedir),)
496 if (info_dict.get("boot_signer", None) == "true" and
497 info_dict.get("verity_key", None)):
498 # Hard-code the path as "/boot" for two-step special recovery image (which
499 # will be loaded into /boot during the two-step OTA).
503 path = "/" + os.path.basename(sourcedir).lower()
504 cmd = [OPTIONS.boot_signer_path]
505 cmd.extend(OPTIONS.boot_signer_args)
506 cmd.extend([path, img.name,
507 info_dict["verity_key"] + ".pk8",
508 info_dict["verity_key"] + ".x509.pem", img.name])
509 p = Run(cmd, stdout=subprocess.PIPE)
511 assert p.returncode == 0, "boot_signer of %s image failed" % path
513 # Sign the image if vboot is non-empty.
514 elif info_dict.get("vboot", None):
515 path = "/" + os.path.basename(sourcedir).lower()
516 img_keyblock = tempfile.NamedTemporaryFile()
517 cmd = [info_dict["vboot_signer_cmd"], info_dict["futility"],
518 img_unsigned.name, info_dict["vboot_key"] + ".vbpubk",
519 info_dict["vboot_key"] + ".vbprivk",
520 info_dict["vboot_subkey"] + ".vbprivk",
523 p = Run(cmd, stdout=subprocess.PIPE)
525 assert p.returncode == 0, "vboot_signer of %s image failed" % path
527 # Clean up the temp files.
531 img.seek(os.SEEK_SET, 0)
541 def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir,
542 info_dict=None, two_step_image=False):
543 """Return a File object with the desired bootable image.
545 Look for it in 'unpack_dir'/BOOTABLE_IMAGES under the name 'prebuilt_name',
546 otherwise look for it under 'unpack_dir'/IMAGES, otherwise construct it from
547 the source files in 'unpack_dir'/'tree_subdir'."""
549 prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name)
550 if os.path.exists(prebuilt_path):
551 print "using prebuilt %s from BOOTABLE_IMAGES..." % (prebuilt_name,)
552 return File.FromLocalFile(name, prebuilt_path)
554 prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
555 if os.path.exists(prebuilt_path):
556 print "using prebuilt %s from IMAGES..." % (prebuilt_name,)
557 return File.FromLocalFile(name, prebuilt_path)
559 print "building image from target_files %s..." % (tree_subdir,)
561 if info_dict is None:
562 info_dict = OPTIONS.info_dict
564 # With system_root_image == "true", we don't pack ramdisk into the boot image.
565 # Unless "recovery_as_boot" is specified, in which case we carry the ramdisk
567 has_ramdisk = (info_dict.get("system_root_image") != "true" or
568 prebuilt_name != "boot.img" or
569 info_dict.get("recovery_as_boot") == "true")
571 fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt"
572 data = _BuildBootableImage(os.path.join(unpack_dir, tree_subdir),
573 os.path.join(unpack_dir, fs_config),
574 info_dict, has_ramdisk, two_step_image)
576 return File(name, data)
580 def UnzipTemp(filename, pattern=None):
581 """Unzip the given archive into a temporary directory and return the name.
583 If filename is of the form "foo.zip+bar.zip", unzip foo.zip into a
584 temp dir, then unzip bar.zip into that_dir/BOOTABLE_IMAGES.
586 Returns (tempdir, zipobj) where zipobj is a zipfile.ZipFile (of the
587 main file), open for reading.
590 tmp = tempfile.mkdtemp(prefix="targetfiles-")
591 OPTIONS.tempfiles.append(tmp)
593 def unzip_to_dir(filename, dirname):
594 cmd = ["unzip", "-o", "-q", filename, "-d", dirname]
595 if pattern is not None:
597 p = Run(cmd, stdout=subprocess.PIPE)
599 if p.returncode != 0:
600 raise ExternalError("failed to unzip input target-files \"%s\"" %
603 m = re.match(r"^(.*[.]zip)\+(.*[.]zip)$", filename, re.IGNORECASE)
605 unzip_to_dir(m.group(1), tmp)
606 unzip_to_dir(m.group(2), os.path.join(tmp, "BOOTABLE_IMAGES"))
607 filename = m.group(1)
609 unzip_to_dir(filename, tmp)
611 return tmp, zipfile.ZipFile(filename, "r")
614 def GetKeyPasswords(keylist):
615 """Given a list of keys, prompt the user to enter passwords for
616 those which require them. Return a {key: password} dict. password
617 will be None if the key has no password."""
622 devnull = open("/dev/null", "w+b")
623 for k in sorted(keylist):
624 # We don't need a password for things that aren't really keys.
625 if k in SPECIAL_CERT_STRINGS:
626 no_passwords.append(k)
629 p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
630 "-inform", "DER", "-nocrypt"],
631 stdin=devnull.fileno(),
632 stdout=devnull.fileno(),
633 stderr=subprocess.STDOUT)
635 if p.returncode == 0:
636 # Definitely an unencrypted key.
637 no_passwords.append(k)
639 p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
640 "-inform", "DER", "-passin", "pass:"],
641 stdin=devnull.fileno(),
642 stdout=devnull.fileno(),
643 stderr=subprocess.PIPE)
644 _, stderr = p.communicate()
645 if p.returncode == 0:
646 # Encrypted key with empty string as password.
647 key_passwords[k] = ''
648 elif stderr.startswith('Error decrypting key'):
649 # Definitely encrypted key.
650 # It would have said "Error reading key" if it didn't parse correctly.
651 need_passwords.append(k)
653 # Potentially, a type of key that openssl doesn't understand.
654 # We'll let the routines in signapk.jar handle it.
655 no_passwords.append(k)
658 key_passwords.update(PasswordManager().GetPasswords(need_passwords))
659 key_passwords.update(dict.fromkeys(no_passwords, None))
663 def GetMinSdkVersion(apk_name):
664 """Get the minSdkVersion delared in the APK. This can be both a decimal number
665 (API Level) or a codename.
668 p = Run(["aapt", "dump", "badging", apk_name], stdout=subprocess.PIPE)
669 output, err = p.communicate()
671 raise ExternalError("Failed to obtain minSdkVersion: aapt return code %s"
674 for line in output.split("\n"):
675 # Looking for lines such as sdkVersion:'23' or sdkVersion:'M'
676 m = re.match(r'sdkVersion:\'([^\']*)\'', line)
679 raise ExternalError("No minSdkVersion returned by aapt")
682 def GetMinSdkVersionInt(apk_name, codename_to_api_level_map):
683 """Get the minSdkVersion declared in the APK as a number (API Level). If
684 minSdkVersion is set to a codename, it is translated to a number using the
688 version = GetMinSdkVersion(apk_name)
692 # Not a decimal number. Codename?
693 if version in codename_to_api_level_map:
694 return codename_to_api_level_map[version]
696 raise ExternalError("Unknown minSdkVersion: '%s'. Known codenames: %s"
697 % (version, codename_to_api_level_map))
700 def SignFile(input_name, output_name, key, password, min_api_level=None,
701 codename_to_api_level_map=dict(),
703 """Sign the input_name zip/jar/apk, producing output_name. Use the
704 given key and password (the latter may be None if the key does not
707 If whole_file is true, use the "-w" option to SignApk to embed a
708 signature that covers the whole file in the archive comment of the
711 min_api_level is the API Level (int) of the oldest platform this file may end
712 up on. If not specified for an APK, the API Level is obtained by interpreting
713 the minSdkVersion attribute of the APK's AndroidManifest.xml.
715 codename_to_api_level_map is needed to translate the codename which may be
716 encountered as the APK's minSdkVersion.
719 java_library_path = os.path.join(
720 OPTIONS.search_path, OPTIONS.signapk_shared_library_path)
722 cmd = ([OPTIONS.java_path] + OPTIONS.java_args +
723 ["-Djava.library.path=" + java_library_path,
724 "-jar", os.path.join(OPTIONS.search_path, OPTIONS.signapk_path)] +
725 OPTIONS.extra_signapk_args)
729 min_sdk_version = min_api_level
730 if min_sdk_version is None:
732 min_sdk_version = GetMinSdkVersionInt(
733 input_name, codename_to_api_level_map)
734 if min_sdk_version is not None:
735 cmd.extend(["--min-sdk-version", str(min_sdk_version)])
737 cmd.extend([key + OPTIONS.public_key_suffix,
738 key + OPTIONS.private_key_suffix,
739 input_name, output_name])
741 p = Run(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
742 if password is not None:
744 p.communicate(password)
745 if p.returncode != 0:
746 raise ExternalError("signapk.jar failed: return code %s" % (p.returncode,))
749 def CheckSize(data, target, info_dict):
750 """Check the data string passed against the max size limit, if
751 any, for the given target. Raise exception if the data is too big.
752 Print a warning if the data is nearing the maximum size."""
754 if target.endswith(".img"):
756 mount_point = "/" + target
760 if info_dict["fstab"]:
761 if mount_point == "/userdata":
762 mount_point = "/data"
763 p = info_dict["fstab"][mount_point]
767 device = device[device.rfind("/")+1:]
768 limit = info_dict.get(device + "_size", None)
769 if not fs_type or not limit:
772 if fs_type == "yaffs2":
773 # image size should be increased by 1/64th to account for the
774 # spare area (64 bytes per 2k page)
775 limit = limit / 2048 * (2048+64)
777 pct = float(size) * 100.0 / limit
778 msg = "%s size (%d) is %.2f%% of limit (%d)" % (target, size, pct, limit)
780 raise ExternalError(msg)
783 print " WARNING: ", msg
785 elif OPTIONS.verbose:
789 def ReadApkCerts(tf_zip):
790 """Given a target_files ZipFile, parse the META/apkcerts.txt file
791 and return a {package: cert} dict."""
793 for line in tf_zip.read("META/apkcerts.txt").split("\n"):
797 m = re.match(r'^name="(.*)"\s+certificate="(.*)"\s+'
798 r'private_key="(.*)"$', line)
800 name, cert, privkey = m.groups()
801 public_key_suffix_len = len(OPTIONS.public_key_suffix)
802 private_key_suffix_len = len(OPTIONS.private_key_suffix)
803 if cert in SPECIAL_CERT_STRINGS and not privkey:
805 elif (cert.endswith(OPTIONS.public_key_suffix) and
806 privkey.endswith(OPTIONS.private_key_suffix) and
807 cert[:-public_key_suffix_len] == privkey[:-private_key_suffix_len]):
808 certmap[name] = cert[:-public_key_suffix_len]
810 raise ValueError("failed to parse line from apkcerts.txt:\n" + line)
814 COMMON_DOCSTRING = """
816 Prepend <dir>/bin to the list of places to search for binaries
817 run by this script, and expect to find jars in <dir>/framework.
819 -s (--device_specific) <file>
820 Path to the python module containing device-specific
823 -x (--extra) <key=value>
824 Add a key/value pair to the 'extras' dict, which device-specific
825 extension code may look at.
828 Show command lines being executed.
831 Display this usage message and exit.
834 def Usage(docstring):
835 print docstring.rstrip("\n")
836 print COMMON_DOCSTRING
839 def ParseOptions(argv,
841 extra_opts="", extra_long_opts=(),
842 extra_option_handler=None):
843 """Parse the options in argv and return any arguments that aren't
844 flags. docstring is the calling module's docstring, to be displayed
845 for errors and -h. extra_opts and extra_long_opts are for flags
846 defined by the caller, which are processed by passing them to
847 extra_option_handler."""
850 opts, args = getopt.getopt(
851 argv, "hvp:s:x:" + extra_opts,
852 ["help", "verbose", "path=", "signapk_path=",
853 "signapk_shared_library_path=", "extra_signapk_args=",
854 "java_path=", "java_args=", "public_key_suffix=",
855 "private_key_suffix=", "boot_signer_path=", "boot_signer_args=",
856 "verity_signer_path=", "verity_signer_args=", "device_specific=",
858 list(extra_long_opts))
859 except getopt.GetoptError as err:
861 print "**", str(err), "**"
865 if o in ("-h", "--help"):
868 elif o in ("-v", "--verbose"):
869 OPTIONS.verbose = True
870 elif o in ("-p", "--path"):
871 OPTIONS.search_path = a
872 elif o in ("--signapk_path",):
873 OPTIONS.signapk_path = a
874 elif o in ("--signapk_shared_library_path",):
875 OPTIONS.signapk_shared_library_path = a
876 elif o in ("--extra_signapk_args",):
877 OPTIONS.extra_signapk_args = shlex.split(a)
878 elif o in ("--java_path",):
879 OPTIONS.java_path = a
880 elif o in ("--java_args",):
881 OPTIONS.java_args = shlex.split(a)
882 elif o in ("--public_key_suffix",):
883 OPTIONS.public_key_suffix = a
884 elif o in ("--private_key_suffix",):
885 OPTIONS.private_key_suffix = a
886 elif o in ("--boot_signer_path",):
887 OPTIONS.boot_signer_path = a
888 elif o in ("--boot_signer_args",):
889 OPTIONS.boot_signer_args = shlex.split(a)
890 elif o in ("--verity_signer_path",):
891 OPTIONS.verity_signer_path = a
892 elif o in ("--verity_signer_args",):
893 OPTIONS.verity_signer_args = shlex.split(a)
894 elif o in ("-s", "--device_specific"):
895 OPTIONS.device_specific = a
896 elif o in ("-x", "--extra"):
897 key, value = a.split("=", 1)
898 OPTIONS.extras[key] = value
900 if extra_option_handler is None or not extra_option_handler(o, a):
901 assert False, "unknown option \"%s\"" % (o,)
903 if OPTIONS.search_path:
904 os.environ["PATH"] = (os.path.join(OPTIONS.search_path, "bin") +
905 os.pathsep + os.environ["PATH"])
910 def MakeTempFile(prefix=None, suffix=None):
911 """Make a temp file and add it to the list of things to be deleted
912 when Cleanup() is called. Return the filename."""
913 fd, fn = tempfile.mkstemp(prefix=prefix, suffix=suffix)
915 OPTIONS.tempfiles.append(fn)
920 for i in OPTIONS.tempfiles:
927 class PasswordManager(object):
929 self.editor = os.getenv("EDITOR", None)
930 self.pwfile = os.getenv("ANDROID_PW_FILE", None)
932 def GetPasswords(self, items):
933 """Get passwords corresponding to each string in 'items',
934 returning a dict. (The dict may have keys in addition to the
937 Uses the passwords in $ANDROID_PW_FILE if available, letting the
938 user edit that file to add more needed passwords. If no editor is
939 available, or $ANDROID_PW_FILE isn't define, prompts the user
940 interactively in the ordinary way.
943 current = self.ReadFile()
949 if i not in current or not current[i]:
951 # Are all the passwords already in the file?
959 print "key file %s still missing some passwords." % (self.pwfile,)
960 answer = raw_input("try to edit again? [y]> ").strip()
961 if answer and answer[0] not in 'yY':
962 raise RuntimeError("key passwords unavailable")
965 current = self.UpdateAndReadFile(current)
967 def PromptResult(self, current): # pylint: disable=no-self-use
968 """Prompt the user to enter a value (password) for each key in
969 'current' whose value is fales. Returns a new dict with all the
973 for k, v in sorted(current.iteritems()):
978 result[k] = getpass.getpass(
979 "Enter password for %s key> " % k).strip()
984 def UpdateAndReadFile(self, current):
985 if not self.editor or not self.pwfile:
986 return self.PromptResult(current)
988 f = open(self.pwfile, "w")
989 os.chmod(self.pwfile, 0o600)
990 f.write("# Enter key passwords between the [[[ ]]] brackets.\n")
991 f.write("# (Additional spaces are harmless.)\n\n")
994 sorted_list = sorted([(not v, k, v) for (k, v) in current.iteritems()])
995 for i, (_, k, v) in enumerate(sorted_list):
996 f.write("[[[ %s ]]] %s\n" % (v, k))
997 if not v and first_line is None:
998 # position cursor on first line with no password.
1002 p = Run([self.editor, "+%d" % (first_line,), self.pwfile])
1003 _, _ = p.communicate()
1005 return self.ReadFile()
1009 if self.pwfile is None:
1012 f = open(self.pwfile, "r")
1015 if not line or line[0] == '#':
1017 m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
1019 print "failed to parse password file: ", line
1021 result[m.group(2)] = m.group(1)
1023 except IOError as e:
1024 if e.errno != errno.ENOENT:
1025 print "error reading password file: ", str(e)
1029 def ZipWrite(zip_file, filename, arcname=None, perms=0o644,
1030 compress_type=None):
1034 # Python 2.7's zipfile implementation wrongly thinks that zip64 is required
1035 # for files larger than 2GiB. We can work around this by adjusting their
1036 # limit. Note that `zipfile.writestr()` will not work for strings larger than
1037 # 2GiB. The Python interpreter sometimes rejects strings that large (though
1038 # it isn't clear to me exactly what circumstances cause this).
1039 # `zipfile.write()` must be used directly to work around this.
1041 # This mess can be avoided if we port to python3.
1042 saved_zip64_limit = zipfile.ZIP64_LIMIT
1043 zipfile.ZIP64_LIMIT = (1 << 32) - 1
1045 if compress_type is None:
1046 compress_type = zip_file.compression
1050 saved_stat = os.stat(filename)
1053 # `zipfile.write()` doesn't allow us to pass ZipInfo, so just modify the
1054 # file to be zipped and reset it when we're done.
1055 os.chmod(filename, perms)
1057 # Use a fixed timestamp so the output is repeatable.
1058 epoch = datetime.datetime.fromtimestamp(0)
1059 timestamp = (datetime.datetime(2009, 1, 1) - epoch).total_seconds()
1060 os.utime(filename, (timestamp, timestamp))
1062 zip_file.write(filename, arcname=arcname, compress_type=compress_type)
1064 os.chmod(filename, saved_stat.st_mode)
1065 os.utime(filename, (saved_stat.st_atime, saved_stat.st_mtime))
1066 zipfile.ZIP64_LIMIT = saved_zip64_limit
1069 def ZipWriteStr(zip_file, zinfo_or_arcname, data, perms=None,
1070 compress_type=None):
1071 """Wrap zipfile.writestr() function to work around the zip64 limit.
1073 Even with the ZIP64_LIMIT workaround, it won't allow writing a string
1074 longer than 2GiB. It gives 'OverflowError: size does not fit in an int'
1075 when calling crc32(bytes).
1077 But it still works fine to write a shorter string into a large zip file.
1078 We should use ZipWrite() whenever possible, and only use ZipWriteStr()
1079 when we know the string won't be too long.
1082 saved_zip64_limit = zipfile.ZIP64_LIMIT
1083 zipfile.ZIP64_LIMIT = (1 << 32) - 1
1085 if not isinstance(zinfo_or_arcname, zipfile.ZipInfo):
1086 zinfo = zipfile.ZipInfo(filename=zinfo_or_arcname)
1087 zinfo.compress_type = zip_file.compression
1091 zinfo = zinfo_or_arcname
1093 # If compress_type is given, it overrides the value in zinfo.
1094 if compress_type is not None:
1095 zinfo.compress_type = compress_type
1097 # If perms is given, it has a priority.
1098 if perms is not None:
1099 # If perms doesn't set the file type, mark it as a regular file.
1100 if perms & 0o770000 == 0:
1102 zinfo.external_attr = perms << 16
1104 # Use a fixed timestamp so the output is repeatable.
1105 zinfo.date_time = (2009, 1, 1, 0, 0, 0)
1107 zip_file.writestr(zinfo, data)
1108 zipfile.ZIP64_LIMIT = saved_zip64_limit
1111 def ZipClose(zip_file):
1113 # zipfile also refers to ZIP64_LIMIT during close() when it writes out the
1114 # central directory.
1115 saved_zip64_limit = zipfile.ZIP64_LIMIT
1116 zipfile.ZIP64_LIMIT = (1 << 32) - 1
1120 zipfile.ZIP64_LIMIT = saved_zip64_limit
1123 class DeviceSpecificParams(object):
1125 def __init__(self, **kwargs):
1126 """Keyword arguments to the constructor become attributes of this
1127 object, which is passed to all functions in the device-specific
1129 for k, v in kwargs.iteritems():
1131 self.extras = OPTIONS.extras
1133 if self.module is None:
1134 path = OPTIONS.device_specific
1138 if os.path.isdir(path):
1139 info = imp.find_module("releasetools", [path])
1141 d, f = os.path.split(path)
1142 b, x = os.path.splitext(f)
1145 info = imp.find_module(f, [d])
1146 print "loaded device-specific extensions from", path
1147 self.module = imp.load_module("device_specific", *info)
1149 print "unable to load device-specific module; assuming none"
1151 def _DoCall(self, function_name, *args, **kwargs):
1152 """Call the named function in the device-specific module, passing
1153 the given args and kwargs. The first argument to the call will be
1154 the DeviceSpecific object itself. If there is no module, or the
1155 module does not define the function, return the value of the
1156 'default' kwarg (which itself defaults to None)."""
1157 if self.module is None or not hasattr(self.module, function_name):
1158 return kwargs.get("default", None)
1159 return getattr(self.module, function_name)(*((self,) + args), **kwargs)
1161 def FullOTA_Assertions(self):
1162 """Called after emitting the block of assertions at the top of a
1163 full OTA package. Implementations can add whatever additional
1164 assertions they like."""
1165 return self._DoCall("FullOTA_Assertions")
1167 def FullOTA_InstallBegin(self):
1168 """Called at the start of full OTA installation."""
1169 return self._DoCall("FullOTA_InstallBegin")
1171 def FullOTA_InstallEnd(self):
1172 """Called at the end of full OTA installation; typically this is
1173 used to install the image for the device's baseband processor."""
1174 return self._DoCall("FullOTA_InstallEnd")
1176 def IncrementalOTA_Assertions(self):
1177 """Called after emitting the block of assertions at the top of an
1178 incremental OTA package. Implementations can add whatever
1179 additional assertions they like."""
1180 return self._DoCall("IncrementalOTA_Assertions")
1182 def IncrementalOTA_VerifyBegin(self):
1183 """Called at the start of the verification phase of incremental
1184 OTA installation; additional checks can be placed here to abort
1185 the script before any changes are made."""
1186 return self._DoCall("IncrementalOTA_VerifyBegin")
1188 def IncrementalOTA_VerifyEnd(self):
1189 """Called at the end of the verification phase of incremental OTA
1190 installation; additional checks can be placed here to abort the
1191 script before any changes are made."""
1192 return self._DoCall("IncrementalOTA_VerifyEnd")
1194 def IncrementalOTA_InstallBegin(self):
1195 """Called at the start of incremental OTA installation (after
1196 verification is complete)."""
1197 return self._DoCall("IncrementalOTA_InstallBegin")
1199 def IncrementalOTA_InstallEnd(self):
1200 """Called at the end of incremental OTA installation; typically
1201 this is used to install the image for the device's baseband
1203 return self._DoCall("IncrementalOTA_InstallEnd")
1205 def VerifyOTA_Assertions(self):
1206 return self._DoCall("VerifyOTA_Assertions")
1209 def __init__(self, name, data):
1212 self.size = len(data)
1213 self.sha1 = sha1(data).hexdigest()
1216 def FromLocalFile(cls, name, diskname):
1217 f = open(diskname, "rb")
1220 return File(name, data)
1222 def WriteToTemp(self):
1223 t = tempfile.NamedTemporaryFile()
1228 def AddToZip(self, z, compression=None):
1229 ZipWriteStr(z, self.name, self.data, compress_type=compression)
1231 DIFF_PROGRAM_BY_EXT = {
1233 ".zip" : ["imgdiff", "-z"],
1234 ".jar" : ["imgdiff", "-z"],
1235 ".apk" : ["imgdiff", "-z"],
1239 class Difference(object):
1240 def __init__(self, tf, sf, diff_program=None):
1244 self.diff_program = diff_program
1246 def ComputePatch(self):
1247 """Compute the patch (as a string of data) needed to turn sf into
1248 tf. Returns the same tuple as GetPatch()."""
1253 if self.diff_program:
1254 diff_program = self.diff_program
1256 ext = os.path.splitext(tf.name)[1]
1257 diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff")
1259 ttemp = tf.WriteToTemp()
1260 stemp = sf.WriteToTemp()
1262 ext = os.path.splitext(tf.name)[1]
1265 ptemp = tempfile.NamedTemporaryFile()
1266 if isinstance(diff_program, list):
1267 cmd = copy.copy(diff_program)
1269 cmd = [diff_program]
1270 cmd.append(stemp.name)
1271 cmd.append(ttemp.name)
1272 cmd.append(ptemp.name)
1273 p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
1276 _, e = p.communicate()
1279 th = threading.Thread(target=run)
1281 th.join(timeout=300) # 5 mins
1283 print "WARNING: diff command timed out"
1290 if err or p.returncode != 0:
1291 print "WARNING: failure running %s:\n%s\n" % (
1292 diff_program, "".join(err))
1294 return None, None, None
1302 return self.tf, self.sf, self.patch
1306 """Return a tuple (target_file, source_file, patch_data).
1307 patch_data may be None if ComputePatch hasn't been called, or if
1308 computing the patch failed."""
1309 return self.tf, self.sf, self.patch
1312 def ComputeDifferences(diffs):
1313 """Call ComputePatch on all the Difference objects in 'diffs'."""
1314 print len(diffs), "diffs to compute"
1316 # Do the largest files first, to try and reduce the long-pole effect.
1317 by_size = [(i.tf.size, i) for i in diffs]
1318 by_size.sort(reverse=True)
1319 by_size = [i[1] for i in by_size]
1321 lock = threading.Lock()
1322 diff_iter = iter(by_size) # accessed under lock
1331 dur = time.time() - start
1334 tf, sf, patch = d.GetPatch()
1335 if sf.name == tf.name:
1338 name = "%s (%s)" % (tf.name, sf.name)
1340 print "patching failed! %s" % (name,)
1342 print "%8.2f sec %8d / %8d bytes (%6.2f%%) %s" % (
1343 dur, len(patch), tf.size, 100.0 * len(patch) / tf.size, name)
1345 except Exception as e:
1349 # start worker threads; wait for them all to finish.
1350 threads = [threading.Thread(target=worker)
1351 for i in range(OPTIONS.worker_threads)]
1355 threads.pop().join()
1358 class BlockDifference(object):
1359 def __init__(self, partition, tgt, src=None, check_first_block=False,
1360 version=None, disable_imgdiff=False):
1363 self.partition = partition
1364 self.check_first_block = check_first_block
1365 self.disable_imgdiff = disable_imgdiff
1369 if OPTIONS.info_dict:
1372 OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
1373 self.version = version
1375 b = blockimgdiff.BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
1376 version=self.version,
1377 disable_imgdiff=self.disable_imgdiff)
1378 tmpdir = tempfile.mkdtemp()
1379 OPTIONS.tempfiles.append(tmpdir)
1380 self.path = os.path.join(tmpdir, partition)
1381 b.Compute(self.path)
1382 self._required_cache = b.max_stashed_size
1383 self.touched_src_ranges = b.touched_src_ranges
1384 self.touched_src_sha1 = b.touched_src_sha1
1387 _, self.device = GetTypeAndDevice("/" + partition, OPTIONS.info_dict)
1389 _, self.device = GetTypeAndDevice("/" + partition,
1390 OPTIONS.source_info_dict)
1393 def required_cache(self):
1394 return self._required_cache
1396 def WriteScript(self, script, output_zip, progress=None):
1398 # write the output unconditionally
1399 script.Print("Patching %s image unconditionally..." % (self.partition,))
1401 script.Print("Patching %s image after verification." % (self.partition,))
1404 script.ShowProgress(progress, 0)
1405 self._WriteUpdate(script, output_zip)
1407 self._WritePostInstallVerifyScript(script)
1409 def WriteStrictVerifyScript(self, script):
1410 """Verify all the blocks in the care_map, including clobbered blocks.
1412 This differs from the WriteVerifyScript() function: a) it prints different
1413 error messages; b) it doesn't allow half-way updated images to pass the
1416 partition = self.partition
1417 script.Print("Verifying %s..." % (partition,))
1418 ranges = self.tgt.care_map
1419 ranges_str = ranges.to_string_raw()
1420 script.AppendExtra('range_sha1("%s", "%s") == "%s" && '
1421 'ui_print(" Verified.") || '
1422 'ui_print("\\"%s\\" has unexpected contents.");' % (
1423 self.device, ranges_str,
1424 self.tgt.TotalSha1(include_clobbered_blocks=True),
1426 script.AppendExtra("")
1428 def WriteVerifyScript(self, script, touched_blocks_only=False):
1429 partition = self.partition
1433 script.Print("Image %s will be patched unconditionally." % (partition,))
1437 if touched_blocks_only and self.version >= 3:
1438 ranges = self.touched_src_ranges
1439 expected_sha1 = self.touched_src_sha1
1441 ranges = self.src.care_map.subtract(self.src.clobbered_blocks)
1442 expected_sha1 = self.src.TotalSha1()
1444 # No blocks to be checked, skipping.
1448 ranges_str = ranges.to_string_raw()
1449 if self.version >= 4:
1450 script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
1451 'block_image_verify("%s", '
1452 'package_extract_file("%s.transfer.list"), '
1453 '"%s.new.dat", "%s.patch.dat")) then') % (
1454 self.device, ranges_str, expected_sha1,
1455 self.device, partition, partition, partition))
1456 elif self.version == 3:
1457 script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
1458 'block_image_verify("%s", '
1459 'package_extract_file("%s.transfer.list"), '
1460 '"%s.new.dat", "%s.patch.dat")) then') % (
1461 self.device, ranges_str, expected_sha1,
1462 self.device, partition, partition, partition))
1464 script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1465 self.device, ranges_str, self.src.TotalSha1()))
1466 script.Print('Verified %s image...' % (partition,))
1467 script.AppendExtra('else')
1469 if self.version >= 4:
1472 # When generating incrementals for the system and vendor partitions in
1473 # version 4 or newer, explicitly check the first block (which contains
1474 # the superblock) of the partition to see if it's what we expect. If
1475 # this check fails, give an explicit log message about the partition
1476 # having been remounted R/W (the most likely explanation).
1477 if self.check_first_block:
1478 script.AppendExtra('check_first_block("%s");' % (self.device,))
1480 # If version >= 4, try block recovery before abort update
1481 if partition == "system":
1482 code = ErrorCode.SYSTEM_RECOVER_FAILURE
1484 code = ErrorCode.VENDOR_RECOVER_FAILURE
1485 script.AppendExtra((
1486 'ifelse (block_image_recover("{device}", "{ranges}") && '
1487 'block_image_verify("{device}", '
1488 'package_extract_file("{partition}.transfer.list"), '
1489 '"{partition}.new.dat", "{partition}.patch.dat"), '
1490 'ui_print("{partition} recovered successfully."), '
1491 'abort("E{code}: {partition} partition fails to recover"));\n'
1492 'endif;').format(device=self.device, ranges=ranges_str,
1493 partition=partition, code=code))
1495 # Abort the OTA update. Note that the incremental OTA cannot be applied
1496 # even if it may match the checksum of the target partition.
1497 # a) If version < 3, operations like move and erase will make changes
1498 # unconditionally and damage the partition.
1499 # b) If version >= 3, it won't even reach here.
1501 if partition == "system":
1502 code = ErrorCode.SYSTEM_VERIFICATION_FAILURE
1504 code = ErrorCode.VENDOR_VERIFICATION_FAILURE
1505 script.AppendExtra((
1506 'abort("E%d: %s partition has unexpected contents");\n'
1507 'endif;') % (code, partition))
1509 def _WritePostInstallVerifyScript(self, script):
1510 partition = self.partition
1511 script.Print('Verifying the updated %s image...' % (partition,))
1512 # Unlike pre-install verification, clobbered_blocks should not be ignored.
1513 ranges = self.tgt.care_map
1514 ranges_str = ranges.to_string_raw()
1515 script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1516 self.device, ranges_str,
1517 self.tgt.TotalSha1(include_clobbered_blocks=True)))
1520 # Verify that extended blocks are really zeroed out.
1521 if self.tgt.extended:
1522 ranges_str = self.tgt.extended.to_string_raw()
1523 script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1524 self.device, ranges_str,
1525 self._HashZeroBlocks(self.tgt.extended.size())))
1526 script.Print('Verified the updated %s image.' % (partition,))
1527 if partition == "system":
1528 code = ErrorCode.SYSTEM_NONZERO_CONTENTS
1530 code = ErrorCode.VENDOR_NONZERO_CONTENTS
1533 ' abort("E%d: %s partition has unexpected non-zero contents after '
1535 'endif;' % (code, partition))
1537 script.Print('Verified the updated %s image.' % (partition,))
1539 if partition == "system":
1540 code = ErrorCode.SYSTEM_UNEXPECTED_CONTENTS
1542 code = ErrorCode.VENDOR_UNEXPECTED_CONTENTS
1546 ' abort("E%d: %s partition has unexpected contents after OTA '
1548 'endif;' % (code, partition))
1550 def _WriteUpdate(self, script, output_zip):
1551 ZipWrite(output_zip,
1552 '{}.transfer.list'.format(self.path),
1553 '{}.transfer.list'.format(self.partition))
1554 ZipWrite(output_zip,
1555 '{}.new.dat'.format(self.path),
1556 '{}.new.dat'.format(self.partition))
1557 ZipWrite(output_zip,
1558 '{}.patch.dat'.format(self.path),
1559 '{}.patch.dat'.format(self.partition),
1560 compress_type=zipfile.ZIP_STORED)
1562 if self.partition == "system":
1563 code = ErrorCode.SYSTEM_UPDATE_FAILURE
1565 code = ErrorCode.VENDOR_UPDATE_FAILURE
1567 call = ('block_image_update("{device}", '
1568 'package_extract_file("{partition}.transfer.list"), '
1569 '"{partition}.new.dat", "{partition}.patch.dat") ||\n'
1570 ' abort("E{code}: Failed to update {partition} image.");'.format(
1571 device=self.device, partition=self.partition, code=code))
1572 script.AppendExtra(script.WordWrap(call))
1574 def _HashBlocks(self, source, ranges): # pylint: disable=no-self-use
1575 data = source.ReadRangeSet(ranges)
1581 return ctx.hexdigest()
1583 def _HashZeroBlocks(self, num_blocks): # pylint: disable=no-self-use
1584 """Return the hash value for all zero blocks."""
1585 zero_block = '\x00' * 4096
1587 for _ in range(num_blocks):
1588 ctx.update(zero_block)
1590 return ctx.hexdigest()
1593 DataImage = blockimgdiff.DataImage
1595 # map recovery.fstab's fs_types to mount/format "partition types"
1605 def GetTypeAndDevice(mount_point, info):
1606 fstab = info["fstab"]
1608 return (PARTITION_TYPES[fstab[mount_point].fs_type],
1609 fstab[mount_point].device)
1614 def ParseCertificate(data):
1615 """Parse a PEM-format certificate."""
1618 for line in data.split("\n"):
1619 if "--END CERTIFICATE--" in line:
1623 if "--BEGIN CERTIFICATE--" in line:
1625 cert = "".join(cert).decode('base64')
1628 def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img,
1630 """Generate a binary patch that creates the recovery image starting
1631 with the boot image. (Most of the space in these images is just the
1632 kernel, which is identical for the two, so the resulting patch
1633 should be efficient.) Add it to the output zip, along with a shell
1634 script that is run from init.rc on first boot to actually do the
1635 patching and install the new recovery image.
1637 recovery_img and boot_img should be File objects for the
1638 corresponding images. info should be the dictionary returned by
1639 common.LoadInfoDict() on the input target_files.
1642 if info_dict is None:
1643 info_dict = OPTIONS.info_dict
1645 full_recovery_image = info_dict.get("full_recovery_image", None) == "true"
1646 system_root_image = info_dict.get("system_root_image", None) == "true"
1648 if full_recovery_image:
1649 output_sink("etc/recovery.img", recovery_img.data)
1652 diff_program = ["imgdiff"]
1653 path = os.path.join(input_dir, "SYSTEM", "etc", "recovery-resource.dat")
1654 if os.path.exists(path):
1655 diff_program.append("-b")
1656 diff_program.append(path)
1657 bonus_args = "-b /system/etc/recovery-resource.dat"
1661 d = Difference(recovery_img, boot_img, diff_program=diff_program)
1662 _, _, patch = d.ComputePatch()
1663 output_sink("recovery-from-boot.p", patch)
1666 # The following GetTypeAndDevice()s need to use the path in the target
1667 # info_dict instead of source_info_dict.
1668 boot_type, boot_device = GetTypeAndDevice("/boot", info_dict)
1669 recovery_type, recovery_device = GetTypeAndDevice("/recovery", info_dict)
1673 if full_recovery_image:
1674 sh = """#!/system/bin/sh
1675 if ! applypatch -c %(type)s:%(device)s:%(size)d:%(sha1)s; then
1676 applypatch /system/etc/recovery.img %(type)s:%(device)s %(sha1)s %(size)d && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
1678 log -t recovery "Recovery image already installed"
1680 """ % {'type': recovery_type,
1681 'device': recovery_device,
1682 'sha1': recovery_img.sha1,
1683 'size': recovery_img.size}
1685 sh = """#!/system/bin/sh
1686 if ! applypatch -c %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then
1687 applypatch %(bonus_args)s %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s %(recovery_type)s:%(recovery_device)s %(recovery_sha1)s %(recovery_size)d %(boot_sha1)s:/system/recovery-from-boot.p && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
1689 log -t recovery "Recovery image already installed"
1691 """ % {'boot_size': boot_img.size,
1692 'boot_sha1': boot_img.sha1,
1693 'recovery_size': recovery_img.size,
1694 'recovery_sha1': recovery_img.sha1,
1695 'boot_type': boot_type,
1696 'boot_device': boot_device,
1697 'recovery_type': recovery_type,
1698 'recovery_device': recovery_device,
1699 'bonus_args': bonus_args}
1701 # The install script location moved from /system/etc to /system/bin
1702 # in the L release. Parse init.*.rc files to find out where the
1703 # target-files expects it to be, and put it there.
1704 sh_location = "etc/install-recovery.sh"
1706 if system_root_image:
1707 init_rc_dir = os.path.join(input_dir, "ROOT")
1709 init_rc_dir = os.path.join(input_dir, "BOOT", "RAMDISK")
1710 init_rc_files = os.listdir(init_rc_dir)
1711 for init_rc_file in init_rc_files:
1712 if (not init_rc_file.startswith('init.') or
1713 not init_rc_file.endswith('.rc')):
1716 with open(os.path.join(init_rc_dir, init_rc_file)) as f:
1718 m = re.match(r"^service flash_recovery /system/(\S+)\s*$", line)
1720 sh_location = m.group(1)
1727 print "putting script in", sh_location
1729 output_sink(sh_location, sh)