1 # Copyright (C) 2008 The Android Open Source Project
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
7 # http://www.apache.org/licenses/LICENSE-2.0
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
34 from hashlib import sha1 as sha1
37 class Options(object):
39 platform_search_path = {
40 "linux2": "out/host/linux-x86",
41 "darwin": "out/host/darwin-x86",
44 self.search_path = platform_search_path.get(sys.platform, None)
45 self.signapk_path = "framework/signapk.jar" # Relative to search_path
46 self.signapk_shared_library_path = "lib64" # Relative to search_path
47 self.extra_signapk_args = []
48 self.java_path = "java" # Use the one on the path by default.
49 self.java_args = "-Xmx2048m" # JVM Args
50 self.public_key_suffix = ".x509.pem"
51 self.private_key_suffix = ".pk8"
52 # use otatools built boot_signer by default
53 self.boot_signer_path = "boot_signer"
54 self.boot_signer_args = []
55 self.verity_signer_path = None
56 self.verity_signer_args = []
59 self.device_specific = None
62 self.source_info_dict = None
63 self.target_info_dict = None
64 self.worker_threads = None
65 # Stash size cannot exceed cache_size * threshold.
66 self.cache_size = None
67 self.stash_threshold = 0.8
73 # Values for "certificate" in apkcerts that mean special things.
74 SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL")
77 class ExternalError(RuntimeError):
81 def Run(args, **kwargs):
82 """Create and return a subprocess.Popen object, printing the command
83 line on the terminal if -v was specified."""
85 print " running: ", " ".join(args)
86 return subprocess.Popen(args, **kwargs)
89 def CloseInheritedPipes():
90 """ Gmake in MAC OS has file descriptor (PIPE) leak. We close those fds
91 before doing other work."""
92 if platform.system() != "Darwin":
94 for d in range(3, 1025):
98 pipebit = stat[0] & 0x1000
105 def LoadInfoDict(input_file, input_dir=None):
106 """Read and parse the META/misc_info.txt key/value pairs from the
107 input target files and return a dict."""
110 if isinstance(input_file, zipfile.ZipFile):
111 return input_file.read(fn)
113 path = os.path.join(input_file, *fn.split("/"))
115 with open(path) as f:
118 if e.errno == errno.ENOENT:
122 d = LoadDictionaryFromLines(read_helper("META/misc_info.txt").split("\n"))
124 # ok if misc_info.txt doesn't exist
127 # backwards compatibility: These values used to be in their own
128 # files. Look for them, in case we're processing an old
131 if "mkyaffs2_extra_flags" not in d:
133 d["mkyaffs2_extra_flags"] = read_helper(
134 "META/mkyaffs2-extra-flags.txt").strip()
136 # ok if flags don't exist
139 if "recovery_api_version" not in d:
141 d["recovery_api_version"] = read_helper(
142 "META/recovery-api-version.txt").strip()
144 raise ValueError("can't find recovery API version in input target-files")
146 if "tool_extensions" not in d:
148 d["tool_extensions"] = read_helper("META/tool-extensions.txt").strip()
150 # ok if extensions don't exist
153 if "fstab_version" not in d:
154 d["fstab_version"] = "1"
156 # A few properties are stored as links to the files in the out/ directory.
157 # It works fine with the build system. However, they are no longer available
158 # when (re)generating from target_files zip. If input_dir is not None, we
159 # are doing repacking. Redirect those properties to the actual files in the
160 # unzipped directory.
161 if input_dir is not None:
162 # We carry a copy of file_contexts.bin under META/. If not available,
163 # search BOOT/RAMDISK/. Note that sometimes we may need a different file
164 # to build images than the one running on device, such as when enabling
165 # system_root_image. In that case, we must have the one for image
166 # generation copied to META/.
167 fc_basename = os.path.basename(d.get("selinux_fc", "file_contexts"))
168 fc_config = os.path.join(input_dir, "META", fc_basename)
169 if d.get("system_root_image") == "true":
170 assert os.path.exists(fc_config)
171 if not os.path.exists(fc_config):
172 fc_config = os.path.join(input_dir, "BOOT", "RAMDISK", fc_basename)
173 if not os.path.exists(fc_config):
177 d["selinux_fc"] = fc_config
179 # Similarly we need to redirect "ramdisk_dir" and "ramdisk_fs_config".
180 if d.get("system_root_image") == "true":
181 d["ramdisk_dir"] = os.path.join(input_dir, "ROOT")
182 d["ramdisk_fs_config"] = os.path.join(
183 input_dir, "META", "root_filesystem_config.txt")
185 # Redirect {system,vendor}_base_fs_file.
186 if "system_base_fs_file" in d:
187 basename = os.path.basename(d["system_base_fs_file"])
188 system_base_fs_file = os.path.join(input_dir, "META", basename)
189 if os.path.exists(system_base_fs_file):
190 d["system_base_fs_file"] = system_base_fs_file
192 print "Warning: failed to find system base fs file: %s" % (
193 system_base_fs_file,)
194 del d["system_base_fs_file"]
196 if "vendor_base_fs_file" in d:
197 basename = os.path.basename(d["vendor_base_fs_file"])
198 vendor_base_fs_file = os.path.join(input_dir, "META", basename)
199 if os.path.exists(vendor_base_fs_file):
200 d["vendor_base_fs_file"] = vendor_base_fs_file
202 print "Warning: failed to find vendor base fs file: %s" % (
203 vendor_base_fs_file,)
204 del d["vendor_base_fs_file"]
207 data = read_helper("META/imagesizes.txt")
208 for line in data.split("\n"):
211 name, value = line.split(" ", 1)
214 if name == "blocksize":
217 d[name + "_size"] = value
223 d[key] = int(d[key], 0)
225 makeint("recovery_api_version")
227 makeint("system_size")
228 makeint("vendor_size")
229 makeint("userdata_size")
230 makeint("cache_size")
231 makeint("recovery_size")
233 makeint("fstab_version")
235 if d.get("no_recovery", False) == "true":
238 d["fstab"] = LoadRecoveryFSTab(read_helper, d["fstab_version"],
239 d.get("system_root_image", False))
240 d["build.prop"] = LoadBuildProp(read_helper)
243 def LoadBuildProp(read_helper):
245 data = read_helper("SYSTEM/build.prop")
247 print "Warning: could not find SYSTEM/build.prop in %s" % zip
249 return LoadDictionaryFromLines(data.split("\n"))
251 def LoadDictionaryFromLines(lines):
255 if not line or line.startswith("#"):
258 name, value = line.split("=", 1)
262 def LoadRecoveryFSTab(read_helper, fstab_version, system_root_image=False):
263 class Partition(object):
264 def __init__(self, mount_point, fs_type, device, length, device2, context):
265 self.mount_point = mount_point
266 self.fs_type = fs_type
269 self.device2 = device2
270 self.context = context
273 data = read_helper("RECOVERY/RAMDISK/etc/recovery.fstab")
275 print "Warning: could not find RECOVERY/RAMDISK/etc/recovery.fstab"
278 if fstab_version == 1:
280 for line in data.split("\n"):
282 if not line or line.startswith("#"):
284 pieces = line.split()
285 if not 3 <= len(pieces) <= 4:
286 raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
289 if pieces[3].startswith("/"):
299 mount_point = pieces[0]
302 options = options.split(",")
304 if i.startswith("length="):
307 print "%s: unknown option \"%s\"" % (mount_point, i)
309 d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[1],
310 device=pieces[2], length=length,
313 elif fstab_version == 2:
315 for line in data.split("\n"):
317 if not line or line.startswith("#"):
319 # <src> <mnt_point> <type> <mnt_flags and options> <fs_mgr_flags>
320 pieces = line.split()
322 raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
324 # Ignore entries that are managed by vold
326 if "voldmanaged=" in options:
329 # It's a good line, parse it
331 options = options.split(",")
333 if i.startswith("length="):
336 # Ignore all unknown options in the unified fstab
339 mount_flags = pieces[3]
340 # Honor the SELinux context if present.
342 for i in mount_flags.split(","):
343 if i.startswith("context="):
346 mount_point = pieces[1]
347 d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[2],
348 device=pieces[0], length=length,
349 device2=None, context=context)
352 raise ValueError("Unknown fstab_version: \"%d\"" % (fstab_version,))
354 # / is used for the system mount point when the root directory is included in
355 # system. Other areas assume system is always at "/system" so point /system
357 if system_root_image:
358 assert not d.has_key("/system") and d.has_key("/")
359 d["/system"] = d["/"]
364 for k, v in sorted(d.items()):
365 print "%-25s = (%s) %s" % (k, type(v).__name__, v)
368 def _BuildBootableImage(sourcedir, fs_config_file, info_dict=None,
370 """Build a bootable image from the specified sourcedir.
372 Take a kernel, cmdline, and optionally a ramdisk directory from the input (in
373 'sourcedir'), and turn them into a boot image. Return the image data, or
374 None if sourcedir does not appear to contains files for building the
378 ramdisk_img = tempfile.NamedTemporaryFile()
380 if os.access(fs_config_file, os.F_OK):
381 cmd = ["mkbootfs", "-f", fs_config_file,
382 os.path.join(sourcedir, "RAMDISK")]
384 cmd = ["mkbootfs", os.path.join(sourcedir, "RAMDISK")]
385 p1 = Run(cmd, stdout=subprocess.PIPE)
386 p2 = Run(["minigzip"], stdin=p1.stdout, stdout=ramdisk_img.file.fileno())
390 assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (sourcedir,)
391 assert p2.returncode == 0, "minigzip of %s ramdisk failed" % (sourcedir,)
395 if not os.access(os.path.join(sourcedir, "kernel"), os.F_OK):
398 if has_ramdisk and not os.access(os.path.join(sourcedir, "RAMDISK"), os.F_OK):
401 if info_dict is None:
402 info_dict = OPTIONS.info_dict
404 img = tempfile.NamedTemporaryFile()
407 ramdisk_img = make_ramdisk()
409 # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
410 mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
412 cmd = [mkbootimg, "--kernel", os.path.join(sourcedir, "kernel")]
414 fn = os.path.join(sourcedir, "second")
415 if os.access(fn, os.F_OK):
416 cmd.append("--second")
419 fn = os.path.join(sourcedir, "cmdline")
420 if os.access(fn, os.F_OK):
421 cmd.append("--cmdline")
422 cmd.append(open(fn).read().rstrip("\n"))
424 fn = os.path.join(sourcedir, "base")
425 if os.access(fn, os.F_OK):
427 cmd.append(open(fn).read().rstrip("\n"))
429 fn = os.path.join(sourcedir, "pagesize")
430 if os.access(fn, os.F_OK):
431 cmd.append("--pagesize")
432 cmd.append(open(fn).read().rstrip("\n"))
434 args = info_dict.get("mkbootimg_args", None)
435 if args and args.strip():
436 cmd.extend(shlex.split(args))
438 args = info_dict.get("mkbootimg_version_args", None)
439 if args and args.strip():
440 cmd.extend(shlex.split(args))
443 cmd.extend(["--ramdisk", ramdisk_img.name])
446 if info_dict.get("vboot", None):
447 img_unsigned = tempfile.NamedTemporaryFile()
448 cmd.extend(["--output", img_unsigned.name])
450 cmd.extend(["--output", img.name])
452 p = Run(cmd, stdout=subprocess.PIPE)
454 assert p.returncode == 0, "mkbootimg of %s image failed" % (
455 os.path.basename(sourcedir),)
457 if (info_dict.get("boot_signer", None) == "true" and
458 info_dict.get("verity_key", None)):
459 path = "/" + os.path.basename(sourcedir).lower()
460 cmd = [OPTIONS.boot_signer_path]
461 cmd.extend(OPTIONS.boot_signer_args)
462 cmd.extend([path, img.name,
463 info_dict["verity_key"] + ".pk8",
464 info_dict["verity_key"] + ".x509.pem", img.name])
465 p = Run(cmd, stdout=subprocess.PIPE)
467 assert p.returncode == 0, "boot_signer of %s image failed" % path
469 # Sign the image if vboot is non-empty.
470 elif info_dict.get("vboot", None):
471 path = "/" + os.path.basename(sourcedir).lower()
472 img_keyblock = tempfile.NamedTemporaryFile()
473 cmd = [info_dict["vboot_signer_cmd"], info_dict["futility"],
474 img_unsigned.name, info_dict["vboot_key"] + ".vbpubk",
475 info_dict["vboot_key"] + ".vbprivk",
476 info_dict["vboot_subkey"] + ".vbprivk",
479 p = Run(cmd, stdout=subprocess.PIPE)
481 assert p.returncode == 0, "vboot_signer of %s image failed" % path
483 # Clean up the temp files.
487 img.seek(os.SEEK_SET, 0)
497 def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir,
499 """Return a File object with the desired bootable image.
501 Look for it in 'unpack_dir'/BOOTABLE_IMAGES under the name 'prebuilt_name',
502 otherwise look for it under 'unpack_dir'/IMAGES, otherwise construct it from
503 the source files in 'unpack_dir'/'tree_subdir'."""
505 prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name)
506 if os.path.exists(prebuilt_path):
507 print "using prebuilt %s from BOOTABLE_IMAGES..." % (prebuilt_name,)
508 return File.FromLocalFile(name, prebuilt_path)
510 prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
511 if os.path.exists(prebuilt_path):
512 print "using prebuilt %s from IMAGES..." % (prebuilt_name,)
513 return File.FromLocalFile(name, prebuilt_path)
515 print "building image from target_files %s..." % (tree_subdir,)
517 if info_dict is None:
518 info_dict = OPTIONS.info_dict
520 # With system_root_image == "true", we don't pack ramdisk into the boot image.
521 # Unless "recovery_as_boot" is specified, in which case we carry the ramdisk
523 has_ramdisk = (info_dict.get("system_root_image") != "true" or
524 prebuilt_name != "boot.img" or
525 info_dict.get("recovery_as_boot") == "true")
527 fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt"
528 data = _BuildBootableImage(os.path.join(unpack_dir, tree_subdir),
529 os.path.join(unpack_dir, fs_config),
530 info_dict, has_ramdisk)
532 return File(name, data)
536 def UnzipTemp(filename, pattern=None):
537 """Unzip the given archive into a temporary directory and return the name.
539 If filename is of the form "foo.zip+bar.zip", unzip foo.zip into a
540 temp dir, then unzip bar.zip into that_dir/BOOTABLE_IMAGES.
542 Returns (tempdir, zipobj) where zipobj is a zipfile.ZipFile (of the
543 main file), open for reading.
546 tmp = tempfile.mkdtemp(prefix="targetfiles-")
547 OPTIONS.tempfiles.append(tmp)
549 def unzip_to_dir(filename, dirname):
550 cmd = ["unzip", "-o", "-q", filename, "-d", dirname]
551 if pattern is not None:
553 p = Run(cmd, stdout=subprocess.PIPE)
555 if p.returncode != 0:
556 raise ExternalError("failed to unzip input target-files \"%s\"" %
559 m = re.match(r"^(.*[.]zip)\+(.*[.]zip)$", filename, re.IGNORECASE)
561 unzip_to_dir(m.group(1), tmp)
562 unzip_to_dir(m.group(2), os.path.join(tmp, "BOOTABLE_IMAGES"))
563 filename = m.group(1)
565 unzip_to_dir(filename, tmp)
567 return tmp, zipfile.ZipFile(filename, "r")
570 def GetKeyPasswords(keylist):
571 """Given a list of keys, prompt the user to enter passwords for
572 those which require them. Return a {key: password} dict. password
573 will be None if the key has no password."""
578 devnull = open("/dev/null", "w+b")
579 for k in sorted(keylist):
580 # We don't need a password for things that aren't really keys.
581 if k in SPECIAL_CERT_STRINGS:
582 no_passwords.append(k)
585 p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
586 "-inform", "DER", "-nocrypt"],
587 stdin=devnull.fileno(),
588 stdout=devnull.fileno(),
589 stderr=subprocess.STDOUT)
591 if p.returncode == 0:
592 # Definitely an unencrypted key.
593 no_passwords.append(k)
595 p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
596 "-inform", "DER", "-passin", "pass:"],
597 stdin=devnull.fileno(),
598 stdout=devnull.fileno(),
599 stderr=subprocess.PIPE)
600 _, stderr = p.communicate()
601 if p.returncode == 0:
602 # Encrypted key with empty string as password.
603 key_passwords[k] = ''
604 elif stderr.startswith('Error decrypting key'):
605 # Definitely encrypted key.
606 # It would have said "Error reading key" if it didn't parse correctly.
607 need_passwords.append(k)
609 # Potentially, a type of key that openssl doesn't understand.
610 # We'll let the routines in signapk.jar handle it.
611 no_passwords.append(k)
614 key_passwords.update(PasswordManager().GetPasswords(need_passwords))
615 key_passwords.update(dict.fromkeys(no_passwords, None))
619 def GetMinSdkVersion(apk_name):
620 """Get the minSdkVersion delared in the APK. This can be both a decimal number
621 (API Level) or a codename.
624 p = Run(["aapt", "dump", "badging", apk_name], stdout=subprocess.PIPE)
625 output, err = p.communicate()
627 raise ExternalError("Failed to obtain minSdkVersion: aapt return code %s"
630 for line in output.split("\n"):
631 # Looking for lines such as sdkVersion:'23' or sdkVersion:'M'
632 m = re.match(r'sdkVersion:\'([^\']*)\'', line)
635 raise ExternalError("No minSdkVersion returned by aapt")
638 def GetMinSdkVersionInt(apk_name, codename_to_api_level_map):
639 """Get the minSdkVersion declared in the APK as a number (API Level). If
640 minSdkVersion is set to a codename, it is translated to a number using the
644 version = GetMinSdkVersion(apk_name)
648 # Not a decimal number. Codename?
649 if version in codename_to_api_level_map:
650 return codename_to_api_level_map[version]
652 raise ExternalError("Unknown minSdkVersion: '%s'. Known codenames: %s"
653 % (version, codename_to_api_level_map))
656 def SignFile(input_name, output_name, key, password, min_api_level=None,
657 codename_to_api_level_map=dict(),
659 """Sign the input_name zip/jar/apk, producing output_name. Use the
660 given key and password (the latter may be None if the key does not
663 If whole_file is true, use the "-w" option to SignApk to embed a
664 signature that covers the whole file in the archive comment of the
667 min_api_level is the API Level (int) of the oldest platform this file may end
668 up on. If not specified for an APK, the API Level is obtained by interpreting
669 the minSdkVersion attribute of the APK's AndroidManifest.xml.
671 codename_to_api_level_map is needed to translate the codename which may be
672 encountered as the APK's minSdkVersion.
675 java_library_path = os.path.join(
676 OPTIONS.search_path, OPTIONS.signapk_shared_library_path)
678 cmd = [OPTIONS.java_path, OPTIONS.java_args,
679 "-Djava.library.path=" + java_library_path,
681 os.path.join(OPTIONS.search_path, OPTIONS.signapk_path)]
682 cmd.extend(OPTIONS.extra_signapk_args)
686 min_sdk_version = min_api_level
687 if min_sdk_version is None:
689 min_sdk_version = GetMinSdkVersionInt(
690 input_name, codename_to_api_level_map)
691 if min_sdk_version is not None:
692 cmd.extend(["--min-sdk-version", str(min_sdk_version)])
694 cmd.extend([key + OPTIONS.public_key_suffix,
695 key + OPTIONS.private_key_suffix,
696 input_name, output_name])
698 p = Run(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
699 if password is not None:
701 p.communicate(password)
702 if p.returncode != 0:
703 raise ExternalError("signapk.jar failed: return code %s" % (p.returncode,))
706 def CheckSize(data, target, info_dict):
707 """Check the data string passed against the max size limit, if
708 any, for the given target. Raise exception if the data is too big.
709 Print a warning if the data is nearing the maximum size."""
711 if target.endswith(".img"):
713 mount_point = "/" + target
717 if info_dict["fstab"]:
718 if mount_point == "/userdata":
719 mount_point = "/data"
720 p = info_dict["fstab"][mount_point]
724 device = device[device.rfind("/")+1:]
725 limit = info_dict.get(device + "_size", None)
726 if not fs_type or not limit:
729 if fs_type == "yaffs2":
730 # image size should be increased by 1/64th to account for the
731 # spare area (64 bytes per 2k page)
732 limit = limit / 2048 * (2048+64)
734 pct = float(size) * 100.0 / limit
735 msg = "%s size (%d) is %.2f%% of limit (%d)" % (target, size, pct, limit)
737 raise ExternalError(msg)
740 print " WARNING: ", msg
742 elif OPTIONS.verbose:
746 def ReadApkCerts(tf_zip):
747 """Given a target_files ZipFile, parse the META/apkcerts.txt file
748 and return a {package: cert} dict."""
750 for line in tf_zip.read("META/apkcerts.txt").split("\n"):
754 m = re.match(r'^name="(.*)"\s+certificate="(.*)"\s+'
755 r'private_key="(.*)"$', line)
757 name, cert, privkey = m.groups()
758 public_key_suffix_len = len(OPTIONS.public_key_suffix)
759 private_key_suffix_len = len(OPTIONS.private_key_suffix)
760 if cert in SPECIAL_CERT_STRINGS and not privkey:
762 elif (cert.endswith(OPTIONS.public_key_suffix) and
763 privkey.endswith(OPTIONS.private_key_suffix) and
764 cert[:-public_key_suffix_len] == privkey[:-private_key_suffix_len]):
765 certmap[name] = cert[:-public_key_suffix_len]
767 raise ValueError("failed to parse line from apkcerts.txt:\n" + line)
771 COMMON_DOCSTRING = """
773 Prepend <dir>/bin to the list of places to search for binaries
774 run by this script, and expect to find jars in <dir>/framework.
776 -s (--device_specific) <file>
777 Path to the python module containing device-specific
780 -x (--extra) <key=value>
781 Add a key/value pair to the 'extras' dict, which device-specific
782 extension code may look at.
785 Show command lines being executed.
788 Display this usage message and exit.
791 def Usage(docstring):
792 print docstring.rstrip("\n")
793 print COMMON_DOCSTRING
796 def ParseOptions(argv,
798 extra_opts="", extra_long_opts=(),
799 extra_option_handler=None):
800 """Parse the options in argv and return any arguments that aren't
801 flags. docstring is the calling module's docstring, to be displayed
802 for errors and -h. extra_opts and extra_long_opts are for flags
803 defined by the caller, which are processed by passing them to
804 extra_option_handler."""
807 opts, args = getopt.getopt(
808 argv, "hvp:s:x:" + extra_opts,
809 ["help", "verbose", "path=", "signapk_path=",
810 "signapk_shared_library_path=", "extra_signapk_args=",
811 "java_path=", "java_args=", "public_key_suffix=",
812 "private_key_suffix=", "boot_signer_path=", "boot_signer_args=",
813 "verity_signer_path=", "verity_signer_args=", "device_specific=",
815 list(extra_long_opts))
816 except getopt.GetoptError as err:
818 print "**", str(err), "**"
822 if o in ("-h", "--help"):
825 elif o in ("-v", "--verbose"):
826 OPTIONS.verbose = True
827 elif o in ("-p", "--path"):
828 OPTIONS.search_path = a
829 elif o in ("--signapk_path",):
830 OPTIONS.signapk_path = a
831 elif o in ("--signapk_shared_library_path",):
832 OPTIONS.signapk_shared_library_path = a
833 elif o in ("--extra_signapk_args",):
834 OPTIONS.extra_signapk_args = shlex.split(a)
835 elif o in ("--java_path",):
836 OPTIONS.java_path = a
837 elif o in ("--java_args",):
838 OPTIONS.java_args = a
839 elif o in ("--public_key_suffix",):
840 OPTIONS.public_key_suffix = a
841 elif o in ("--private_key_suffix",):
842 OPTIONS.private_key_suffix = a
843 elif o in ("--boot_signer_path",):
844 OPTIONS.boot_signer_path = a
845 elif o in ("--boot_signer_args",):
846 OPTIONS.boot_signer_args = shlex.split(a)
847 elif o in ("--verity_signer_path",):
848 OPTIONS.verity_signer_path = a
849 elif o in ("--verity_signer_args",):
850 OPTIONS.verity_signer_args = shlex.split(a)
851 elif o in ("-s", "--device_specific"):
852 OPTIONS.device_specific = a
853 elif o in ("-x", "--extra"):
854 key, value = a.split("=", 1)
855 OPTIONS.extras[key] = value
857 if extra_option_handler is None or not extra_option_handler(o, a):
858 assert False, "unknown option \"%s\"" % (o,)
860 if OPTIONS.search_path:
861 os.environ["PATH"] = (os.path.join(OPTIONS.search_path, "bin") +
862 os.pathsep + os.environ["PATH"])
867 def MakeTempFile(prefix=None, suffix=None):
868 """Make a temp file and add it to the list of things to be deleted
869 when Cleanup() is called. Return the filename."""
870 fd, fn = tempfile.mkstemp(prefix=prefix, suffix=suffix)
872 OPTIONS.tempfiles.append(fn)
877 for i in OPTIONS.tempfiles:
884 class PasswordManager(object):
886 self.editor = os.getenv("EDITOR", None)
887 self.pwfile = os.getenv("ANDROID_PW_FILE", None)
889 def GetPasswords(self, items):
890 """Get passwords corresponding to each string in 'items',
891 returning a dict. (The dict may have keys in addition to the
894 Uses the passwords in $ANDROID_PW_FILE if available, letting the
895 user edit that file to add more needed passwords. If no editor is
896 available, or $ANDROID_PW_FILE isn't define, prompts the user
897 interactively in the ordinary way.
900 current = self.ReadFile()
906 if i not in current or not current[i]:
908 # Are all the passwords already in the file?
916 print "key file %s still missing some passwords." % (self.pwfile,)
917 answer = raw_input("try to edit again? [y]> ").strip()
918 if answer and answer[0] not in 'yY':
919 raise RuntimeError("key passwords unavailable")
922 current = self.UpdateAndReadFile(current)
924 def PromptResult(self, current): # pylint: disable=no-self-use
925 """Prompt the user to enter a value (password) for each key in
926 'current' whose value is fales. Returns a new dict with all the
930 for k, v in sorted(current.iteritems()):
935 result[k] = getpass.getpass(
936 "Enter password for %s key> " % k).strip()
941 def UpdateAndReadFile(self, current):
942 if not self.editor or not self.pwfile:
943 return self.PromptResult(current)
945 f = open(self.pwfile, "w")
946 os.chmod(self.pwfile, 0o600)
947 f.write("# Enter key passwords between the [[[ ]]] brackets.\n")
948 f.write("# (Additional spaces are harmless.)\n\n")
951 sorted_list = sorted([(not v, k, v) for (k, v) in current.iteritems()])
952 for i, (_, k, v) in enumerate(sorted_list):
953 f.write("[[[ %s ]]] %s\n" % (v, k))
954 if not v and first_line is None:
955 # position cursor on first line with no password.
959 p = Run([self.editor, "+%d" % (first_line,), self.pwfile])
960 _, _ = p.communicate()
962 return self.ReadFile()
966 if self.pwfile is None:
969 f = open(self.pwfile, "r")
972 if not line or line[0] == '#':
974 m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
976 print "failed to parse password file: ", line
978 result[m.group(2)] = m.group(1)
981 if e.errno != errno.ENOENT:
982 print "error reading password file: ", str(e)
986 def ZipWrite(zip_file, filename, arcname=None, perms=0o644,
991 # Python 2.7's zipfile implementation wrongly thinks that zip64 is required
992 # for files larger than 2GiB. We can work around this by adjusting their
993 # limit. Note that `zipfile.writestr()` will not work for strings larger than
994 # 2GiB. The Python interpreter sometimes rejects strings that large (though
995 # it isn't clear to me exactly what circumstances cause this).
996 # `zipfile.write()` must be used directly to work around this.
998 # This mess can be avoided if we port to python3.
999 saved_zip64_limit = zipfile.ZIP64_LIMIT
1000 zipfile.ZIP64_LIMIT = (1 << 32) - 1
1002 if compress_type is None:
1003 compress_type = zip_file.compression
1007 saved_stat = os.stat(filename)
1010 # `zipfile.write()` doesn't allow us to pass ZipInfo, so just modify the
1011 # file to be zipped and reset it when we're done.
1012 os.chmod(filename, perms)
1014 # Use a fixed timestamp so the output is repeatable.
1015 epoch = datetime.datetime.fromtimestamp(0)
1016 timestamp = (datetime.datetime(2009, 1, 1) - epoch).total_seconds()
1017 os.utime(filename, (timestamp, timestamp))
1019 zip_file.write(filename, arcname=arcname, compress_type=compress_type)
1021 os.chmod(filename, saved_stat.st_mode)
1022 os.utime(filename, (saved_stat.st_atime, saved_stat.st_mtime))
1023 zipfile.ZIP64_LIMIT = saved_zip64_limit
1026 def ZipWriteStr(zip_file, zinfo_or_arcname, data, perms=None,
1027 compress_type=None):
1028 """Wrap zipfile.writestr() function to work around the zip64 limit.
1030 Even with the ZIP64_LIMIT workaround, it won't allow writing a string
1031 longer than 2GiB. It gives 'OverflowError: size does not fit in an int'
1032 when calling crc32(bytes).
1034 But it still works fine to write a shorter string into a large zip file.
1035 We should use ZipWrite() whenever possible, and only use ZipWriteStr()
1036 when we know the string won't be too long.
1039 saved_zip64_limit = zipfile.ZIP64_LIMIT
1040 zipfile.ZIP64_LIMIT = (1 << 32) - 1
1042 if not isinstance(zinfo_or_arcname, zipfile.ZipInfo):
1043 zinfo = zipfile.ZipInfo(filename=zinfo_or_arcname)
1044 zinfo.compress_type = zip_file.compression
1048 zinfo = zinfo_or_arcname
1050 # If compress_type is given, it overrides the value in zinfo.
1051 if compress_type is not None:
1052 zinfo.compress_type = compress_type
1054 # If perms is given, it has a priority.
1055 if perms is not None:
1056 # If perms doesn't set the file type, mark it as a regular file.
1057 if perms & 0o770000 == 0:
1059 zinfo.external_attr = perms << 16
1061 # Use a fixed timestamp so the output is repeatable.
1062 zinfo.date_time = (2009, 1, 1, 0, 0, 0)
1064 zip_file.writestr(zinfo, data)
1065 zipfile.ZIP64_LIMIT = saved_zip64_limit
1068 def ZipClose(zip_file):
1070 # zipfile also refers to ZIP64_LIMIT during close() when it writes out the
1071 # central directory.
1072 saved_zip64_limit = zipfile.ZIP64_LIMIT
1073 zipfile.ZIP64_LIMIT = (1 << 32) - 1
1077 zipfile.ZIP64_LIMIT = saved_zip64_limit
1080 class DeviceSpecificParams(object):
1082 def __init__(self, **kwargs):
1083 """Keyword arguments to the constructor become attributes of this
1084 object, which is passed to all functions in the device-specific
1086 for k, v in kwargs.iteritems():
1088 self.extras = OPTIONS.extras
1090 if self.module is None:
1091 path = OPTIONS.device_specific
1095 if os.path.isdir(path):
1096 info = imp.find_module("releasetools", [path])
1098 d, f = os.path.split(path)
1099 b, x = os.path.splitext(f)
1102 info = imp.find_module(f, [d])
1103 print "loaded device-specific extensions from", path
1104 self.module = imp.load_module("device_specific", *info)
1106 print "unable to load device-specific module; assuming none"
1108 def _DoCall(self, function_name, *args, **kwargs):
1109 """Call the named function in the device-specific module, passing
1110 the given args and kwargs. The first argument to the call will be
1111 the DeviceSpecific object itself. If there is no module, or the
1112 module does not define the function, return the value of the
1113 'default' kwarg (which itself defaults to None)."""
1114 if self.module is None or not hasattr(self.module, function_name):
1115 return kwargs.get("default", None)
1116 return getattr(self.module, function_name)(*((self,) + args), **kwargs)
1118 def FullOTA_Assertions(self):
1119 """Called after emitting the block of assertions at the top of a
1120 full OTA package. Implementations can add whatever additional
1121 assertions they like."""
1122 return self._DoCall("FullOTA_Assertions")
1124 def FullOTA_InstallBegin(self):
1125 """Called at the start of full OTA installation."""
1126 return self._DoCall("FullOTA_InstallBegin")
1128 def FullOTA_InstallEnd(self):
1129 """Called at the end of full OTA installation; typically this is
1130 used to install the image for the device's baseband processor."""
1131 return self._DoCall("FullOTA_InstallEnd")
1133 def IncrementalOTA_Assertions(self):
1134 """Called after emitting the block of assertions at the top of an
1135 incremental OTA package. Implementations can add whatever
1136 additional assertions they like."""
1137 return self._DoCall("IncrementalOTA_Assertions")
1139 def IncrementalOTA_VerifyBegin(self):
1140 """Called at the start of the verification phase of incremental
1141 OTA installation; additional checks can be placed here to abort
1142 the script before any changes are made."""
1143 return self._DoCall("IncrementalOTA_VerifyBegin")
1145 def IncrementalOTA_VerifyEnd(self):
1146 """Called at the end of the verification phase of incremental OTA
1147 installation; additional checks can be placed here to abort the
1148 script before any changes are made."""
1149 return self._DoCall("IncrementalOTA_VerifyEnd")
1151 def IncrementalOTA_InstallBegin(self):
1152 """Called at the start of incremental OTA installation (after
1153 verification is complete)."""
1154 return self._DoCall("IncrementalOTA_InstallBegin")
1156 def IncrementalOTA_InstallEnd(self):
1157 """Called at the end of incremental OTA installation; typically
1158 this is used to install the image for the device's baseband
1160 return self._DoCall("IncrementalOTA_InstallEnd")
1162 def VerifyOTA_Assertions(self):
1163 return self._DoCall("VerifyOTA_Assertions")
1166 def __init__(self, name, data):
1169 self.size = len(data)
1170 self.sha1 = sha1(data).hexdigest()
1173 def FromLocalFile(cls, name, diskname):
1174 f = open(diskname, "rb")
1177 return File(name, data)
1179 def WriteToTemp(self):
1180 t = tempfile.NamedTemporaryFile()
1185 def AddToZip(self, z, compression=None):
1186 ZipWriteStr(z, self.name, self.data, compress_type=compression)
1188 DIFF_PROGRAM_BY_EXT = {
1190 ".zip" : ["imgdiff", "-z"],
1191 ".jar" : ["imgdiff", "-z"],
1192 ".apk" : ["imgdiff", "-z"],
1196 class Difference(object):
1197 def __init__(self, tf, sf, diff_program=None):
1201 self.diff_program = diff_program
1203 def ComputePatch(self):
1204 """Compute the patch (as a string of data) needed to turn sf into
1205 tf. Returns the same tuple as GetPatch()."""
1210 if self.diff_program:
1211 diff_program = self.diff_program
1213 ext = os.path.splitext(tf.name)[1]
1214 diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff")
1216 ttemp = tf.WriteToTemp()
1217 stemp = sf.WriteToTemp()
1219 ext = os.path.splitext(tf.name)[1]
1222 ptemp = tempfile.NamedTemporaryFile()
1223 if isinstance(diff_program, list):
1224 cmd = copy.copy(diff_program)
1226 cmd = [diff_program]
1227 cmd.append(stemp.name)
1228 cmd.append(ttemp.name)
1229 cmd.append(ptemp.name)
1230 p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
1233 _, e = p.communicate()
1236 th = threading.Thread(target=run)
1238 th.join(timeout=300) # 5 mins
1240 print "WARNING: diff command timed out"
1247 if err or p.returncode != 0:
1248 print "WARNING: failure running %s:\n%s\n" % (
1249 diff_program, "".join(err))
1251 return None, None, None
1259 return self.tf, self.sf, self.patch
1263 """Return a tuple (target_file, source_file, patch_data).
1264 patch_data may be None if ComputePatch hasn't been called, or if
1265 computing the patch failed."""
1266 return self.tf, self.sf, self.patch
1269 def ComputeDifferences(diffs):
1270 """Call ComputePatch on all the Difference objects in 'diffs'."""
1271 print len(diffs), "diffs to compute"
1273 # Do the largest files first, to try and reduce the long-pole effect.
1274 by_size = [(i.tf.size, i) for i in diffs]
1275 by_size.sort(reverse=True)
1276 by_size = [i[1] for i in by_size]
1278 lock = threading.Lock()
1279 diff_iter = iter(by_size) # accessed under lock
1288 dur = time.time() - start
1291 tf, sf, patch = d.GetPatch()
1292 if sf.name == tf.name:
1295 name = "%s (%s)" % (tf.name, sf.name)
1297 print "patching failed! %s" % (name,)
1299 print "%8.2f sec %8d / %8d bytes (%6.2f%%) %s" % (
1300 dur, len(patch), tf.size, 100.0 * len(patch) / tf.size, name)
1302 except Exception as e:
1306 # start worker threads; wait for them all to finish.
1307 threads = [threading.Thread(target=worker)
1308 for i in range(OPTIONS.worker_threads)]
1312 threads.pop().join()
1315 class BlockDifference(object):
1316 def __init__(self, partition, tgt, src=None, check_first_block=False,
1320 self.partition = partition
1321 self.check_first_block = check_first_block
1325 if OPTIONS.info_dict:
1328 OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
1329 self.version = version
1331 b = blockimgdiff.BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
1332 version=self.version)
1333 tmpdir = tempfile.mkdtemp()
1334 OPTIONS.tempfiles.append(tmpdir)
1335 self.path = os.path.join(tmpdir, partition)
1336 b.Compute(self.path)
1337 self._required_cache = b.max_stashed_size
1338 self.touched_src_ranges = b.touched_src_ranges
1339 self.touched_src_sha1 = b.touched_src_sha1
1342 _, self.device = GetTypeAndDevice("/" + partition, OPTIONS.info_dict)
1344 _, self.device = GetTypeAndDevice("/" + partition,
1345 OPTIONS.source_info_dict)
1348 def required_cache(self):
1349 return self._required_cache
1351 def WriteScript(self, script, output_zip, progress=None):
1353 # write the output unconditionally
1354 script.Print("Patching %s image unconditionally..." % (self.partition,))
1356 script.Print("Patching %s image after verification." % (self.partition,))
1359 script.ShowProgress(progress, 0)
1360 self._WriteUpdate(script, output_zip)
1362 self._WritePostInstallVerifyScript(script)
1364 def WriteStrictVerifyScript(self, script):
1365 """Verify all the blocks in the care_map, including clobbered blocks.
1367 This differs from the WriteVerifyScript() function: a) it prints different
1368 error messages; b) it doesn't allow half-way updated images to pass the
1371 partition = self.partition
1372 script.Print("Verifying %s..." % (partition,))
1373 ranges = self.tgt.care_map
1374 ranges_str = ranges.to_string_raw()
1375 script.AppendExtra('range_sha1("%s", "%s") == "%s" && '
1376 'ui_print(" Verified.") || '
1377 'ui_print("\\"%s\\" has unexpected contents.");' % (
1378 self.device, ranges_str,
1379 self.tgt.TotalSha1(include_clobbered_blocks=True),
1381 script.AppendExtra("")
1383 def WriteVerifyScript(self, script, touched_blocks_only=False):
1384 partition = self.partition
1388 script.Print("Image %s will be patched unconditionally." % (partition,))
1392 if touched_blocks_only and self.version >= 3:
1393 ranges = self.touched_src_ranges
1394 expected_sha1 = self.touched_src_sha1
1396 ranges = self.src.care_map.subtract(self.src.clobbered_blocks)
1397 expected_sha1 = self.src.TotalSha1()
1399 # No blocks to be checked, skipping.
1403 ranges_str = ranges.to_string_raw()
1404 if self.version >= 4:
1405 script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
1406 'block_image_verify("%s", '
1407 'package_extract_file("%s.transfer.list"), '
1408 '"%s.new.dat", "%s.patch.dat")) then') % (
1409 self.device, ranges_str, expected_sha1,
1410 self.device, partition, partition, partition))
1411 elif self.version == 3:
1412 script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
1413 'block_image_verify("%s", '
1414 'package_extract_file("%s.transfer.list"), '
1415 '"%s.new.dat", "%s.patch.dat")) then') % (
1416 self.device, ranges_str, expected_sha1,
1417 self.device, partition, partition, partition))
1419 script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1420 self.device, ranges_str, self.src.TotalSha1()))
1421 script.Print('Verified %s image...' % (partition,))
1422 script.AppendExtra('else')
1424 if self.version >= 4:
1427 # When generating incrementals for the system and vendor partitions in
1428 # version 4 or newer, explicitly check the first block (which contains
1429 # the superblock) of the partition to see if it's what we expect. If
1430 # this check fails, give an explicit log message about the partition
1431 # having been remounted R/W (the most likely explanation).
1432 if self.check_first_block:
1433 script.AppendExtra('check_first_block("%s");' % (self.device,))
1435 # If version >= 4, try block recovery before abort update
1436 script.AppendExtra((
1437 'ifelse (block_image_recover("{device}", "{ranges}") && '
1438 'block_image_verify("{device}", '
1439 'package_extract_file("{partition}.transfer.list"), '
1440 '"{partition}.new.dat", "{partition}.patch.dat"), '
1441 'ui_print("{partition} recovered successfully."), '
1442 'abort("{partition} partition fails to recover"));\n'
1443 'endif;').format(device=self.device, ranges=ranges_str,
1444 partition=partition))
1446 # Abort the OTA update. Note that the incremental OTA cannot be applied
1447 # even if it may match the checksum of the target partition.
1448 # a) If version < 3, operations like move and erase will make changes
1449 # unconditionally and damage the partition.
1450 # b) If version >= 3, it won't even reach here.
1452 script.AppendExtra(('abort("%s partition has unexpected contents");\n'
1453 'endif;') % (partition,))
1455 def _WritePostInstallVerifyScript(self, script):
1456 partition = self.partition
1457 script.Print('Verifying the updated %s image...' % (partition,))
1458 # Unlike pre-install verification, clobbered_blocks should not be ignored.
1459 ranges = self.tgt.care_map
1460 ranges_str = ranges.to_string_raw()
1461 script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1462 self.device, ranges_str,
1463 self.tgt.TotalSha1(include_clobbered_blocks=True)))
1466 # Verify that extended blocks are really zeroed out.
1467 if self.tgt.extended:
1468 ranges_str = self.tgt.extended.to_string_raw()
1469 script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1470 self.device, ranges_str,
1471 self._HashZeroBlocks(self.tgt.extended.size())))
1472 script.Print('Verified the updated %s image.' % (partition,))
1475 ' abort("%s partition has unexpected non-zero contents after OTA '
1477 'endif;' % (partition,))
1479 script.Print('Verified the updated %s image.' % (partition,))
1483 ' abort("%s partition has unexpected contents after OTA update");\n'
1484 'endif;' % (partition,))
1486 def _WriteUpdate(self, script, output_zip):
1487 ZipWrite(output_zip,
1488 '{}.transfer.list'.format(self.path),
1489 '{}.transfer.list'.format(self.partition))
1490 ZipWrite(output_zip,
1491 '{}.new.dat'.format(self.path),
1492 '{}.new.dat'.format(self.partition))
1493 ZipWrite(output_zip,
1494 '{}.patch.dat'.format(self.path),
1495 '{}.patch.dat'.format(self.partition),
1496 compress_type=zipfile.ZIP_STORED)
1498 call = ('block_image_update("{device}", '
1499 'package_extract_file("{partition}.transfer.list"), '
1500 '"{partition}.new.dat", "{partition}.patch.dat") ||\n'
1501 ' abort("Failed to update {partition} image.");'.format(
1502 device=self.device, partition=self.partition))
1503 script.AppendExtra(script.WordWrap(call))
1505 def _HashBlocks(self, source, ranges): # pylint: disable=no-self-use
1506 data = source.ReadRangeSet(ranges)
1512 return ctx.hexdigest()
1514 def _HashZeroBlocks(self, num_blocks): # pylint: disable=no-self-use
1515 """Return the hash value for all zero blocks."""
1516 zero_block = '\x00' * 4096
1518 for _ in range(num_blocks):
1519 ctx.update(zero_block)
1521 return ctx.hexdigest()
1524 DataImage = blockimgdiff.DataImage
1526 # map recovery.fstab's fs_types to mount/format "partition types"
1536 def GetTypeAndDevice(mount_point, info):
1537 fstab = info["fstab"]
1539 return (PARTITION_TYPES[fstab[mount_point].fs_type],
1540 fstab[mount_point].device)
1545 def ParseCertificate(data):
1546 """Parse a PEM-format certificate."""
1549 for line in data.split("\n"):
1550 if "--END CERTIFICATE--" in line:
1554 if "--BEGIN CERTIFICATE--" in line:
1556 cert = "".join(cert).decode('base64')
1559 def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img,
1561 """Generate a binary patch that creates the recovery image starting
1562 with the boot image. (Most of the space in these images is just the
1563 kernel, which is identical for the two, so the resulting patch
1564 should be efficient.) Add it to the output zip, along with a shell
1565 script that is run from init.rc on first boot to actually do the
1566 patching and install the new recovery image.
1568 recovery_img and boot_img should be File objects for the
1569 corresponding images. info should be the dictionary returned by
1570 common.LoadInfoDict() on the input target_files.
1573 if info_dict is None:
1574 info_dict = OPTIONS.info_dict
1576 full_recovery_image = info_dict.get("full_recovery_image", None) == "true"
1577 system_root_image = info_dict.get("system_root_image", None) == "true"
1579 if full_recovery_image:
1580 output_sink("etc/recovery.img", recovery_img.data)
1583 diff_program = ["imgdiff"]
1584 path = os.path.join(input_dir, "SYSTEM", "etc", "recovery-resource.dat")
1585 if os.path.exists(path):
1586 diff_program.append("-b")
1587 diff_program.append(path)
1588 bonus_args = "-b /system/etc/recovery-resource.dat"
1592 d = Difference(recovery_img, boot_img, diff_program=diff_program)
1593 _, _, patch = d.ComputePatch()
1594 output_sink("recovery-from-boot.p", patch)
1597 # The following GetTypeAndDevice()s need to use the path in the target
1598 # info_dict instead of source_info_dict.
1599 boot_type, boot_device = GetTypeAndDevice("/boot", info_dict)
1600 recovery_type, recovery_device = GetTypeAndDevice("/recovery", info_dict)
1604 if full_recovery_image:
1605 sh = """#!/system/bin/sh
1606 if ! applypatch -c %(type)s:%(device)s:%(size)d:%(sha1)s; then
1607 applypatch /system/etc/recovery.img %(type)s:%(device)s %(sha1)s %(size)d && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
1609 log -t recovery "Recovery image already installed"
1611 """ % {'type': recovery_type,
1612 'device': recovery_device,
1613 'sha1': recovery_img.sha1,
1614 'size': recovery_img.size}
1616 sh = """#!/system/bin/sh
1617 if ! applypatch -c %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then
1618 applypatch %(bonus_args)s %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s %(recovery_type)s:%(recovery_device)s %(recovery_sha1)s %(recovery_size)d %(boot_sha1)s:/system/recovery-from-boot.p && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
1620 log -t recovery "Recovery image already installed"
1622 """ % {'boot_size': boot_img.size,
1623 'boot_sha1': boot_img.sha1,
1624 'recovery_size': recovery_img.size,
1625 'recovery_sha1': recovery_img.sha1,
1626 'boot_type': boot_type,
1627 'boot_device': boot_device,
1628 'recovery_type': recovery_type,
1629 'recovery_device': recovery_device,
1630 'bonus_args': bonus_args}
1632 # The install script location moved from /system/etc to /system/bin
1633 # in the L release. Parse init.*.rc files to find out where the
1634 # target-files expects it to be, and put it there.
1635 sh_location = "etc/install-recovery.sh"
1637 if system_root_image:
1638 init_rc_dir = os.path.join(input_dir, "ROOT")
1640 init_rc_dir = os.path.join(input_dir, "BOOT", "RAMDISK")
1641 init_rc_files = os.listdir(init_rc_dir)
1642 for init_rc_file in init_rc_files:
1643 if (not init_rc_file.startswith('init.') or
1644 not init_rc_file.endswith('.rc')):
1647 with open(os.path.join(init_rc_dir, init_rc_file)) as f:
1649 m = re.match(r"^service flash_recovery /system/(\S+)\s*$", line)
1651 sh_location = m.group(1)
1658 print "putting script in", sh_location
1660 output_sink(sh_location, sh)