1 # Copyright (C) 2008 The Android Open Source Project
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
7 # http://www.apache.org/licenses/LICENSE-2.0
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
35 from hashlib import sha1 as sha1
38 class Options(object):
40 platform_search_path = {
41 "linux2": "out/host/linux-x86",
42 "darwin": "out/host/darwin-x86",
45 self.search_path = platform_search_path.get(sys.platform, None)
46 self.signapk_path = "framework/signapk.jar" # Relative to search_path
47 self.extra_signapk_args = []
48 self.java_path = "java" # Use the one on the path by default.
49 self.java_args = "-Xmx2048m" # JVM Args
50 self.public_key_suffix = ".x509.pem"
51 self.private_key_suffix = ".pk8"
52 # use otatools built boot_signer by default
53 self.boot_signer_path = "boot_signer"
54 self.boot_signer_args = []
55 self.verity_signer_path = None
56 self.verity_signer_args = []
59 self.device_specific = None
62 self.source_info_dict = None
63 self.target_info_dict = None
64 self.worker_threads = None
70 # Values for "certificate" in apkcerts that mean special things.
71 SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL")
74 class ExternalError(RuntimeError):
78 def Run(args, **kwargs):
79 """Create and return a subprocess.Popen object, printing the command
80 line on the terminal if -v was specified."""
82 print " running: ", " ".join(args)
83 return subprocess.Popen(args, **kwargs)
86 def CloseInheritedPipes():
87 """ Gmake in MAC OS has file descriptor (PIPE) leak. We close those fds
88 before doing other work."""
89 if platform.system() != "Darwin":
91 for d in range(3, 1025):
95 pipebit = stat[0] & 0x1000
102 def LoadInfoDict(input_file):
103 """Read and parse the META/misc_info.txt key/value pairs from the
104 input target files and return a dict."""
107 if isinstance(input_file, zipfile.ZipFile):
108 return input_file.read(fn)
110 path = os.path.join(input_file, *fn.split("/"))
112 with open(path) as f:
115 if e.errno == errno.ENOENT:
119 d = LoadDictionaryFromLines(read_helper("META/misc_info.txt").split("\n"))
121 # ok if misc_info.txt doesn't exist
124 # backwards compatibility: These values used to be in their own
125 # files. Look for them, in case we're processing an old
128 if "mkyaffs2_extra_flags" not in d:
130 d["mkyaffs2_extra_flags"] = read_helper(
131 "META/mkyaffs2-extra-flags.txt").strip()
133 # ok if flags don't exist
136 if "recovery_api_version" not in d:
138 d["recovery_api_version"] = read_helper(
139 "META/recovery-api-version.txt").strip()
141 raise ValueError("can't find recovery API version in input target-files")
143 if "tool_extensions" not in d:
145 d["tool_extensions"] = read_helper("META/tool-extensions.txt").strip()
147 # ok if extensions don't exist
150 if "fstab_version" not in d:
151 d["fstab_version"] = "1"
154 data = read_helper("META/imagesizes.txt")
155 for line in data.split("\n"):
158 name, value = line.split(" ", 1)
161 if name == "blocksize":
164 d[name + "_size"] = value
170 d[key] = int(d[key], 0)
172 makeint("recovery_api_version")
174 makeint("system_size")
175 makeint("vendor_size")
176 makeint("userdata_size")
177 makeint("cache_size")
178 makeint("recovery_size")
180 makeint("fstab_version")
182 d["fstab"] = LoadRecoveryFSTab(read_helper, d["fstab_version"])
183 d["build.prop"] = LoadBuildProp(read_helper)
186 def LoadBuildProp(read_helper):
188 data = read_helper("SYSTEM/build.prop")
190 print "Warning: could not find SYSTEM/build.prop in %s" % zip
192 return LoadDictionaryFromLines(data.split("\n"))
194 def LoadDictionaryFromLines(lines):
198 if not line or line.startswith("#"):
201 name, value = line.split("=", 1)
205 def LoadRecoveryFSTab(read_helper, fstab_version):
206 class Partition(object):
207 def __init__(self, mount_point, fs_type, device, length, device2, context):
208 self.mount_point = mount_point
209 self.fs_type = fs_type
212 self.device2 = device2
213 self.context = context
216 data = read_helper("RECOVERY/RAMDISK/etc/recovery.fstab")
218 print "Warning: could not find RECOVERY/RAMDISK/etc/recovery.fstab"
221 if fstab_version == 1:
223 for line in data.split("\n"):
225 if not line or line.startswith("#"):
227 pieces = line.split()
228 if not 3 <= len(pieces) <= 4:
229 raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
232 if pieces[3].startswith("/"):
242 mount_point = pieces[0]
245 options = options.split(",")
247 if i.startswith("length="):
250 print "%s: unknown option \"%s\"" % (mount_point, i)
252 d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[1],
253 device=pieces[2], length=length,
256 elif fstab_version == 2:
258 for line in data.split("\n"):
260 if not line or line.startswith("#"):
262 # <src> <mnt_point> <type> <mnt_flags and options> <fs_mgr_flags>
263 pieces = line.split()
265 raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
267 # Ignore entries that are managed by vold
269 if "voldmanaged=" in options:
272 # It's a good line, parse it
274 options = options.split(",")
276 if i.startswith("length="):
279 # Ignore all unknown options in the unified fstab
282 mount_flags = pieces[3]
283 # Honor the SELinux context if present.
285 for i in mount_flags.split(","):
286 if i.startswith("context="):
289 mount_point = pieces[1]
290 d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[2],
291 device=pieces[0], length=length,
292 device2=None, context=context)
295 raise ValueError("Unknown fstab_version: \"%d\"" % (fstab_version,))
301 for k, v in sorted(d.items()):
302 print "%-25s = (%s) %s" % (k, type(v).__name__, v)
305 def BuildBootableImage(sourcedir, fs_config_file, info_dict=None):
306 """Take a kernel, cmdline, and ramdisk directory from the input (in
307 'sourcedir'), and turn them into a boot image. Return the image
308 data, or None if sourcedir does not appear to contains files for
309 building the requested image."""
311 if (not os.access(os.path.join(sourcedir, "RAMDISK"), os.F_OK) or
312 not os.access(os.path.join(sourcedir, "kernel"), os.F_OK)):
315 if info_dict is None:
316 info_dict = OPTIONS.info_dict
318 ramdisk_img = tempfile.NamedTemporaryFile()
319 img = tempfile.NamedTemporaryFile()
321 if os.access(fs_config_file, os.F_OK):
322 cmd = ["mkbootfs", "-f", fs_config_file, os.path.join(sourcedir, "RAMDISK")]
324 cmd = ["mkbootfs", os.path.join(sourcedir, "RAMDISK")]
325 p1 = Run(cmd, stdout=subprocess.PIPE)
326 p2 = Run(["minigzip"],
327 stdin=p1.stdout, stdout=ramdisk_img.file.fileno())
331 assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (sourcedir,)
332 assert p2.returncode == 0, "minigzip of %s ramdisk failed" % (sourcedir,)
334 # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
335 mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
337 cmd = [mkbootimg, "--kernel", os.path.join(sourcedir, "kernel")]
339 fn = os.path.join(sourcedir, "second")
340 if os.access(fn, os.F_OK):
341 cmd.append("--second")
344 fn = os.path.join(sourcedir, "cmdline")
345 if os.access(fn, os.F_OK):
346 cmd.append("--cmdline")
347 cmd.append(open(fn).read().rstrip("\n"))
349 fn = os.path.join(sourcedir, "base")
350 if os.access(fn, os.F_OK):
352 cmd.append(open(fn).read().rstrip("\n"))
354 fn = os.path.join(sourcedir, "pagesize")
355 if os.access(fn, os.F_OK):
356 cmd.append("--pagesize")
357 cmd.append(open(fn).read().rstrip("\n"))
359 args = info_dict.get("mkbootimg_args", None)
360 if args and args.strip():
361 cmd.extend(shlex.split(args))
364 if info_dict.get("vboot", None):
365 img_unsigned = tempfile.NamedTemporaryFile()
366 cmd.extend(["--ramdisk", ramdisk_img.name,
367 "--output", img_unsigned.name])
369 cmd.extend(["--ramdisk", ramdisk_img.name,
370 "--output", img.name])
372 p = Run(cmd, stdout=subprocess.PIPE)
374 assert p.returncode == 0, "mkbootimg of %s image failed" % (
375 os.path.basename(sourcedir),)
377 if (info_dict.get("boot_signer", None) == "true" and
378 info_dict.get("verity_key", None)):
379 path = "/" + os.path.basename(sourcedir).lower()
380 cmd = [OPTIONS.boot_signer_path]
381 cmd.extend(OPTIONS.boot_signer_args)
382 cmd.extend([path, img.name,
383 info_dict["verity_key"] + ".pk8",
384 info_dict["verity_key"] + ".x509.pem", img.name])
385 p = Run(cmd, stdout=subprocess.PIPE)
387 assert p.returncode == 0, "boot_signer of %s image failed" % path
389 # Sign the image if vboot is non-empty.
390 elif info_dict.get("vboot", None):
391 path = "/" + os.path.basename(sourcedir).lower()
392 img_keyblock = tempfile.NamedTemporaryFile()
393 cmd = [info_dict["vboot_signer_cmd"], info_dict["futility"],
394 img_unsigned.name, info_dict["vboot_key"] + ".vbpubk",
395 info_dict["vboot_key"] + ".vbprivk", img_keyblock.name,
397 p = Run(cmd, stdout=subprocess.PIPE)
399 assert p.returncode == 0, "vboot_signer of %s image failed" % path
401 # Clean up the temp files.
405 img.seek(os.SEEK_SET, 0)
414 def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir,
416 """Return a File object (with name 'name') with the desired bootable
417 image. Look for it in 'unpack_dir'/BOOTABLE_IMAGES under the name
418 'prebuilt_name', otherwise look for it under 'unpack_dir'/IMAGES,
419 otherwise construct it from the source files in
420 'unpack_dir'/'tree_subdir'."""
422 prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name)
423 if os.path.exists(prebuilt_path):
424 print "using prebuilt %s from BOOTABLE_IMAGES..." % (prebuilt_name,)
425 return File.FromLocalFile(name, prebuilt_path)
427 prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
428 if os.path.exists(prebuilt_path):
429 print "using prebuilt %s from IMAGES..." % (prebuilt_name,)
430 return File.FromLocalFile(name, prebuilt_path)
432 print "building image from target_files %s..." % (tree_subdir,)
433 fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt"
434 data = BuildBootableImage(os.path.join(unpack_dir, tree_subdir),
435 os.path.join(unpack_dir, fs_config),
438 return File(name, data)
442 def UnzipTemp(filename, pattern=None):
443 """Unzip the given archive into a temporary directory and return the name.
445 If filename is of the form "foo.zip+bar.zip", unzip foo.zip into a
446 temp dir, then unzip bar.zip into that_dir/BOOTABLE_IMAGES.
448 Returns (tempdir, zipobj) where zipobj is a zipfile.ZipFile (of the
449 main file), open for reading.
452 tmp = tempfile.mkdtemp(prefix="targetfiles-")
453 OPTIONS.tempfiles.append(tmp)
455 def unzip_to_dir(filename, dirname):
456 cmd = ["unzip", "-o", "-q", filename, "-d", dirname]
457 if pattern is not None:
459 p = Run(cmd, stdout=subprocess.PIPE)
461 if p.returncode != 0:
462 raise ExternalError("failed to unzip input target-files \"%s\"" %
465 m = re.match(r"^(.*[.]zip)\+(.*[.]zip)$", filename, re.IGNORECASE)
467 unzip_to_dir(m.group(1), tmp)
468 unzip_to_dir(m.group(2), os.path.join(tmp, "BOOTABLE_IMAGES"))
469 filename = m.group(1)
471 unzip_to_dir(filename, tmp)
473 return tmp, zipfile.ZipFile(filename, "r")
476 def GetKeyPasswords(keylist):
477 """Given a list of keys, prompt the user to enter passwords for
478 those which require them. Return a {key: password} dict. password
479 will be None if the key has no password."""
484 devnull = open("/dev/null", "w+b")
485 for k in sorted(keylist):
486 # We don't need a password for things that aren't really keys.
487 if k in SPECIAL_CERT_STRINGS:
488 no_passwords.append(k)
491 p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
492 "-inform", "DER", "-nocrypt"],
493 stdin=devnull.fileno(),
494 stdout=devnull.fileno(),
495 stderr=subprocess.STDOUT)
497 if p.returncode == 0:
498 # Definitely an unencrypted key.
499 no_passwords.append(k)
501 p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
502 "-inform", "DER", "-passin", "pass:"],
503 stdin=devnull.fileno(),
504 stdout=devnull.fileno(),
505 stderr=subprocess.PIPE)
506 _, stderr = p.communicate()
507 if p.returncode == 0:
508 # Encrypted key with empty string as password.
509 key_passwords[k] = ''
510 elif stderr.startswith('Error decrypting key'):
511 # Definitely encrypted key.
512 # It would have said "Error reading key" if it didn't parse correctly.
513 need_passwords.append(k)
515 # Potentially, a type of key that openssl doesn't understand.
516 # We'll let the routines in signapk.jar handle it.
517 no_passwords.append(k)
520 key_passwords.update(PasswordManager().GetPasswords(need_passwords))
521 key_passwords.update(dict.fromkeys(no_passwords, None))
525 def SignFile(input_name, output_name, key, password, align=None,
527 """Sign the input_name zip/jar/apk, producing output_name. Use the
528 given key and password (the latter may be None if the key does not
531 If align is an integer > 1, zipalign is run to align stored files in
532 the output zip on 'align'-byte boundaries.
534 If whole_file is true, use the "-w" option to SignApk to embed a
535 signature that covers the whole file in the archive comment of the
539 if align == 0 or align == 1:
543 temp = tempfile.NamedTemporaryFile()
544 sign_name = temp.name
546 sign_name = output_name
548 cmd = [OPTIONS.java_path, OPTIONS.java_args, "-jar",
549 os.path.join(OPTIONS.search_path, OPTIONS.signapk_path)]
550 cmd.extend(OPTIONS.extra_signapk_args)
553 cmd.extend([key + OPTIONS.public_key_suffix,
554 key + OPTIONS.private_key_suffix,
555 input_name, sign_name])
557 p = Run(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
558 if password is not None:
560 p.communicate(password)
561 if p.returncode != 0:
562 raise ExternalError("signapk.jar failed: return code %s" % (p.returncode,))
565 p = Run(["zipalign", "-f", "-p", str(align), sign_name, output_name])
567 if p.returncode != 0:
568 raise ExternalError("zipalign failed: return code %s" % (p.returncode,))
572 def CheckSize(data, target, info_dict):
573 """Check the data string passed against the max size limit, if
574 any, for the given target. Raise exception if the data is too big.
575 Print a warning if the data is nearing the maximum size."""
577 if target.endswith(".img"):
579 mount_point = "/" + target
583 if info_dict["fstab"]:
584 if mount_point == "/userdata":
585 mount_point = "/data"
586 p = info_dict["fstab"][mount_point]
590 device = device[device.rfind("/")+1:]
591 limit = info_dict.get(device + "_size", None)
592 if not fs_type or not limit:
595 if fs_type == "yaffs2":
596 # image size should be increased by 1/64th to account for the
597 # spare area (64 bytes per 2k page)
598 limit = limit / 2048 * (2048+64)
600 pct = float(size) * 100.0 / limit
601 msg = "%s size (%d) is %.2f%% of limit (%d)" % (target, size, pct, limit)
603 raise ExternalError(msg)
606 print " WARNING: ", msg
608 elif OPTIONS.verbose:
612 def ReadApkCerts(tf_zip):
613 """Given a target_files ZipFile, parse the META/apkcerts.txt file
614 and return a {package: cert} dict."""
616 for line in tf_zip.read("META/apkcerts.txt").split("\n"):
620 m = re.match(r'^name="(.*)"\s+certificate="(.*)"\s+'
621 r'private_key="(.*)"$', line)
623 name, cert, privkey = m.groups()
624 public_key_suffix_len = len(OPTIONS.public_key_suffix)
625 private_key_suffix_len = len(OPTIONS.private_key_suffix)
626 if cert in SPECIAL_CERT_STRINGS and not privkey:
628 elif (cert.endswith(OPTIONS.public_key_suffix) and
629 privkey.endswith(OPTIONS.private_key_suffix) and
630 cert[:-public_key_suffix_len] == privkey[:-private_key_suffix_len]):
631 certmap[name] = cert[:-public_key_suffix_len]
633 raise ValueError("failed to parse line from apkcerts.txt:\n" + line)
637 COMMON_DOCSTRING = """
639 Prepend <dir>/bin to the list of places to search for binaries
640 run by this script, and expect to find jars in <dir>/framework.
642 -s (--device_specific) <file>
643 Path to the python module containing device-specific
646 -x (--extra) <key=value>
647 Add a key/value pair to the 'extras' dict, which device-specific
648 extension code may look at.
651 Show command lines being executed.
654 Display this usage message and exit.
657 def Usage(docstring):
658 print docstring.rstrip("\n")
659 print COMMON_DOCSTRING
662 def ParseOptions(argv,
664 extra_opts="", extra_long_opts=(),
665 extra_option_handler=None):
666 """Parse the options in argv and return any arguments that aren't
667 flags. docstring is the calling module's docstring, to be displayed
668 for errors and -h. extra_opts and extra_long_opts are for flags
669 defined by the caller, which are processed by passing them to
670 extra_option_handler."""
673 opts, args = getopt.getopt(
674 argv, "hvp:s:x:" + extra_opts,
675 ["help", "verbose", "path=", "signapk_path=", "extra_signapk_args=",
676 "java_path=", "java_args=", "public_key_suffix=",
677 "private_key_suffix=", "boot_signer_path=", "boot_signer_args=",
678 "verity_signer_path=", "verity_signer_args=", "device_specific=",
680 list(extra_long_opts))
681 except getopt.GetoptError as err:
683 print "**", str(err), "**"
687 if o in ("-h", "--help"):
690 elif o in ("-v", "--verbose"):
691 OPTIONS.verbose = True
692 elif o in ("-p", "--path"):
693 OPTIONS.search_path = a
694 elif o in ("--signapk_path",):
695 OPTIONS.signapk_path = a
696 elif o in ("--extra_signapk_args",):
697 OPTIONS.extra_signapk_args = shlex.split(a)
698 elif o in ("--java_path",):
699 OPTIONS.java_path = a
700 elif o in ("--java_args",):
701 OPTIONS.java_args = a
702 elif o in ("--public_key_suffix",):
703 OPTIONS.public_key_suffix = a
704 elif o in ("--private_key_suffix",):
705 OPTIONS.private_key_suffix = a
706 elif o in ("--boot_signer_path",):
707 OPTIONS.boot_signer_path = a
708 elif o in ("--boot_signer_args",):
709 OPTIONS.boot_signer_args = shlex.split(a)
710 elif o in ("--verity_signer_path",):
711 OPTIONS.verity_signer_path = a
712 elif o in ("--verity_signer_args",):
713 OPTIONS.verity_signer_args = shlex.split(a)
714 elif o in ("-s", "--device_specific"):
715 OPTIONS.device_specific = a
716 elif o in ("-x", "--extra"):
717 key, value = a.split("=", 1)
718 OPTIONS.extras[key] = value
720 if extra_option_handler is None or not extra_option_handler(o, a):
721 assert False, "unknown option \"%s\"" % (o,)
723 if OPTIONS.search_path:
724 os.environ["PATH"] = (os.path.join(OPTIONS.search_path, "bin") +
725 os.pathsep + os.environ["PATH"])
730 def MakeTempFile(prefix=None, suffix=None):
731 """Make a temp file and add it to the list of things to be deleted
732 when Cleanup() is called. Return the filename."""
733 fd, fn = tempfile.mkstemp(prefix=prefix, suffix=suffix)
735 OPTIONS.tempfiles.append(fn)
740 for i in OPTIONS.tempfiles:
747 class PasswordManager(object):
749 self.editor = os.getenv("EDITOR", None)
750 self.pwfile = os.getenv("ANDROID_PW_FILE", None)
752 def GetPasswords(self, items):
753 """Get passwords corresponding to each string in 'items',
754 returning a dict. (The dict may have keys in addition to the
757 Uses the passwords in $ANDROID_PW_FILE if available, letting the
758 user edit that file to add more needed passwords. If no editor is
759 available, or $ANDROID_PW_FILE isn't define, prompts the user
760 interactively in the ordinary way.
763 current = self.ReadFile()
769 if i not in current or not current[i]:
771 # Are all the passwords already in the file?
779 print "key file %s still missing some passwords." % (self.pwfile,)
780 answer = raw_input("try to edit again? [y]> ").strip()
781 if answer and answer[0] not in 'yY':
782 raise RuntimeError("key passwords unavailable")
785 current = self.UpdateAndReadFile(current)
787 def PromptResult(self, current): # pylint: disable=no-self-use
788 """Prompt the user to enter a value (password) for each key in
789 'current' whose value is fales. Returns a new dict with all the
793 for k, v in sorted(current.iteritems()):
798 result[k] = getpass.getpass(
799 "Enter password for %s key> " % k).strip()
804 def UpdateAndReadFile(self, current):
805 if not self.editor or not self.pwfile:
806 return self.PromptResult(current)
808 f = open(self.pwfile, "w")
809 os.chmod(self.pwfile, 0o600)
810 f.write("# Enter key passwords between the [[[ ]]] brackets.\n")
811 f.write("# (Additional spaces are harmless.)\n\n")
814 sorted_list = sorted([(not v, k, v) for (k, v) in current.iteritems()])
815 for i, (_, k, v) in enumerate(sorted_list):
816 f.write("[[[ %s ]]] %s\n" % (v, k))
817 if not v and first_line is None:
818 # position cursor on first line with no password.
822 p = Run([self.editor, "+%d" % (first_line,), self.pwfile])
823 _, _ = p.communicate()
825 return self.ReadFile()
829 if self.pwfile is None:
832 f = open(self.pwfile, "r")
835 if not line or line[0] == '#':
837 m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
839 print "failed to parse password file: ", line
841 result[m.group(2)] = m.group(1)
844 if e.errno != errno.ENOENT:
845 print "error reading password file: ", str(e)
849 def ZipWrite(zip_file, filename, arcname=None, perms=0o644,
854 # Python 2.7's zipfile implementation wrongly thinks that zip64 is required
855 # for files larger than 2GiB. We can work around this by adjusting their
856 # limit. Note that `zipfile.writestr()` will not work for strings larger than
857 # 2GiB. The Python interpreter sometimes rejects strings that large (though
858 # it isn't clear to me exactly what circumstances cause this).
859 # `zipfile.write()` must be used directly to work around this.
861 # This mess can be avoided if we port to python3.
862 saved_zip64_limit = zipfile.ZIP64_LIMIT
863 zipfile.ZIP64_LIMIT = (1 << 32) - 1
865 if compress_type is None:
866 compress_type = zip_file.compression
870 saved_stat = os.stat(filename)
873 # `zipfile.write()` doesn't allow us to pass ZipInfo, so just modify the
874 # file to be zipped and reset it when we're done.
875 os.chmod(filename, perms)
877 # Use a fixed timestamp so the output is repeatable.
878 epoch = datetime.datetime.fromtimestamp(0)
879 timestamp = (datetime.datetime(2009, 1, 1) - epoch).total_seconds()
880 os.utime(filename, (timestamp, timestamp))
882 zip_file.write(filename, arcname=arcname, compress_type=compress_type)
884 os.chmod(filename, saved_stat.st_mode)
885 os.utime(filename, (saved_stat.st_atime, saved_stat.st_mtime))
886 zipfile.ZIP64_LIMIT = saved_zip64_limit
889 def ZipWriteStr(zip_file, zinfo_or_arcname, data, perms=None,
891 """Wrap zipfile.writestr() function to work around the zip64 limit.
893 Even with the ZIP64_LIMIT workaround, it won't allow writing a string
894 longer than 2GiB. It gives 'OverflowError: size does not fit in an int'
895 when calling crc32(bytes).
897 But it still works fine to write a shorter string into a large zip file.
898 We should use ZipWrite() whenever possible, and only use ZipWriteStr()
899 when we know the string won't be too long.
902 saved_zip64_limit = zipfile.ZIP64_LIMIT
903 zipfile.ZIP64_LIMIT = (1 << 32) - 1
905 if not isinstance(zinfo_or_arcname, zipfile.ZipInfo):
906 zinfo = zipfile.ZipInfo(filename=zinfo_or_arcname)
907 zinfo.compress_type = zip_file.compression
911 zinfo = zinfo_or_arcname
913 # If compress_type is given, it overrides the value in zinfo.
914 if compress_type is not None:
915 zinfo.compress_type = compress_type
917 # If perms is given, it has a priority.
918 if perms is not None:
919 zinfo.external_attr = perms << 16
921 # Use a fixed timestamp so the output is repeatable.
922 zinfo.date_time = (2009, 1, 1, 0, 0, 0)
924 zip_file.writestr(zinfo, data)
925 zipfile.ZIP64_LIMIT = saved_zip64_limit
928 def ZipClose(zip_file):
930 # zipfile also refers to ZIP64_LIMIT during close() when it writes out the
932 saved_zip64_limit = zipfile.ZIP64_LIMIT
933 zipfile.ZIP64_LIMIT = (1 << 32) - 1
937 zipfile.ZIP64_LIMIT = saved_zip64_limit
940 class DeviceSpecificParams(object):
942 def __init__(self, **kwargs):
943 """Keyword arguments to the constructor become attributes of this
944 object, which is passed to all functions in the device-specific
946 for k, v in kwargs.iteritems():
948 self.extras = OPTIONS.extras
950 if self.module is None:
951 path = OPTIONS.device_specific
955 if os.path.isdir(path):
956 info = imp.find_module("releasetools", [path])
958 d, f = os.path.split(path)
959 b, x = os.path.splitext(f)
962 info = imp.find_module(f, [d])
963 print "loaded device-specific extensions from", path
964 self.module = imp.load_module("device_specific", *info)
966 print "unable to load device-specific module; assuming none"
968 def _DoCall(self, function_name, *args, **kwargs):
969 """Call the named function in the device-specific module, passing
970 the given args and kwargs. The first argument to the call will be
971 the DeviceSpecific object itself. If there is no module, or the
972 module does not define the function, return the value of the
973 'default' kwarg (which itself defaults to None)."""
974 if self.module is None or not hasattr(self.module, function_name):
975 return kwargs.get("default", None)
976 return getattr(self.module, function_name)(*((self,) + args), **kwargs)
978 def FullOTA_Assertions(self):
979 """Called after emitting the block of assertions at the top of a
980 full OTA package. Implementations can add whatever additional
981 assertions they like."""
982 return self._DoCall("FullOTA_Assertions")
984 def FullOTA_InstallBegin(self):
985 """Called at the start of full OTA installation."""
986 return self._DoCall("FullOTA_InstallBegin")
988 def FullOTA_InstallEnd(self):
989 """Called at the end of full OTA installation; typically this is
990 used to install the image for the device's baseband processor."""
991 return self._DoCall("FullOTA_InstallEnd")
993 def IncrementalOTA_Assertions(self):
994 """Called after emitting the block of assertions at the top of an
995 incremental OTA package. Implementations can add whatever
996 additional assertions they like."""
997 return self._DoCall("IncrementalOTA_Assertions")
999 def IncrementalOTA_VerifyBegin(self):
1000 """Called at the start of the verification phase of incremental
1001 OTA installation; additional checks can be placed here to abort
1002 the script before any changes are made."""
1003 return self._DoCall("IncrementalOTA_VerifyBegin")
1005 def IncrementalOTA_VerifyEnd(self):
1006 """Called at the end of the verification phase of incremental OTA
1007 installation; additional checks can be placed here to abort the
1008 script before any changes are made."""
1009 return self._DoCall("IncrementalOTA_VerifyEnd")
1011 def IncrementalOTA_InstallBegin(self):
1012 """Called at the start of incremental OTA installation (after
1013 verification is complete)."""
1014 return self._DoCall("IncrementalOTA_InstallBegin")
1016 def IncrementalOTA_InstallEnd(self):
1017 """Called at the end of incremental OTA installation; typically
1018 this is used to install the image for the device's baseband
1020 return self._DoCall("IncrementalOTA_InstallEnd")
1023 def __init__(self, name, data):
1026 self.size = len(data)
1027 self.sha1 = sha1(data).hexdigest()
1030 def FromLocalFile(cls, name, diskname):
1031 f = open(diskname, "rb")
1034 return File(name, data)
1036 def WriteToTemp(self):
1037 t = tempfile.NamedTemporaryFile()
1042 def AddToZip(self, z, compression=None):
1043 ZipWriteStr(z, self.name, self.data, compress_type=compression)
1045 DIFF_PROGRAM_BY_EXT = {
1047 ".zip" : ["imgdiff", "-z"],
1048 ".jar" : ["imgdiff", "-z"],
1049 ".apk" : ["imgdiff", "-z"],
1053 class Difference(object):
1054 def __init__(self, tf, sf, diff_program=None):
1058 self.diff_program = diff_program
1060 def ComputePatch(self):
1061 """Compute the patch (as a string of data) needed to turn sf into
1062 tf. Returns the same tuple as GetPatch()."""
1067 if self.diff_program:
1068 diff_program = self.diff_program
1070 ext = os.path.splitext(tf.name)[1]
1071 diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff")
1073 ttemp = tf.WriteToTemp()
1074 stemp = sf.WriteToTemp()
1076 ext = os.path.splitext(tf.name)[1]
1079 ptemp = tempfile.NamedTemporaryFile()
1080 if isinstance(diff_program, list):
1081 cmd = copy.copy(diff_program)
1083 cmd = [diff_program]
1084 cmd.append(stemp.name)
1085 cmd.append(ttemp.name)
1086 cmd.append(ptemp.name)
1087 p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
1090 _, e = p.communicate()
1093 th = threading.Thread(target=run)
1095 th.join(timeout=300) # 5 mins
1097 print "WARNING: diff command timed out"
1104 if err or p.returncode != 0:
1105 print "WARNING: failure running %s:\n%s\n" % (
1106 diff_program, "".join(err))
1108 return None, None, None
1116 return self.tf, self.sf, self.patch
1120 """Return a tuple (target_file, source_file, patch_data).
1121 patch_data may be None if ComputePatch hasn't been called, or if
1122 computing the patch failed."""
1123 return self.tf, self.sf, self.patch
1126 def ComputeDifferences(diffs):
1127 """Call ComputePatch on all the Difference objects in 'diffs'."""
1128 print len(diffs), "diffs to compute"
1130 # Do the largest files first, to try and reduce the long-pole effect.
1131 by_size = [(i.tf.size, i) for i in diffs]
1132 by_size.sort(reverse=True)
1133 by_size = [i[1] for i in by_size]
1135 lock = threading.Lock()
1136 diff_iter = iter(by_size) # accessed under lock
1145 dur = time.time() - start
1148 tf, sf, patch = d.GetPatch()
1149 if sf.name == tf.name:
1152 name = "%s (%s)" % (tf.name, sf.name)
1154 print "patching failed! %s" % (name,)
1156 print "%8.2f sec %8d / %8d bytes (%6.2f%%) %s" % (
1157 dur, len(patch), tf.size, 100.0 * len(patch) / tf.size, name)
1159 except Exception as e:
1163 # start worker threads; wait for them all to finish.
1164 threads = [threading.Thread(target=worker)
1165 for i in range(OPTIONS.worker_threads)]
1169 threads.pop().join()
1172 class BlockDifference(object):
1173 def __init__(self, partition, tgt, src=None, check_first_block=False,
1177 self.partition = partition
1178 self.check_first_block = check_first_block
1180 # Due to http://b/20939131, check_first_block is disabled temporarily.
1181 assert not self.check_first_block
1185 if OPTIONS.info_dict:
1188 OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
1189 self.version = version
1191 b = blockimgdiff.BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
1192 version=self.version)
1193 tmpdir = tempfile.mkdtemp()
1194 OPTIONS.tempfiles.append(tmpdir)
1195 self.path = os.path.join(tmpdir, partition)
1196 b.Compute(self.path)
1199 _, self.device = GetTypeAndDevice("/" + partition, OPTIONS.info_dict)
1201 _, self.device = GetTypeAndDevice("/" + partition,
1202 OPTIONS.source_info_dict)
1204 def WriteScript(self, script, output_zip, progress=None):
1206 # write the output unconditionally
1207 script.Print("Patching %s image unconditionally..." % (self.partition,))
1209 script.Print("Patching %s image after verification." % (self.partition,))
1212 script.ShowProgress(progress, 0)
1213 self._WriteUpdate(script, output_zip)
1214 self._WritePostInstallVerifyScript(script)
1216 def WriteVerifyScript(self, script):
1217 partition = self.partition
1219 script.Print("Image %s will be patched unconditionally." % (partition,))
1221 ranges = self.src.care_map.subtract(self.src.clobbered_blocks)
1222 ranges_str = ranges.to_string_raw()
1223 if self.version >= 3:
1224 script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
1225 'block_image_verify("%s", '
1226 'package_extract_file("%s.transfer.list"), '
1227 '"%s.new.dat", "%s.patch.dat")) then') % (
1228 self.device, ranges_str, self.src.TotalSha1(),
1229 self.device, partition, partition, partition))
1231 script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1232 self.device, ranges_str, self.src.TotalSha1()))
1233 script.Print('Verified %s image...' % (partition,))
1234 script.AppendExtra('else')
1236 # When generating incrementals for the system and vendor partitions,
1237 # explicitly check the first block (which contains the superblock) of
1238 # the partition to see if it's what we expect. If this check fails,
1239 # give an explicit log message about the partition having been
1240 # remounted R/W (the most likely explanation) and the need to flash to
1241 # get OTAs working again.
1242 if self.check_first_block:
1243 self._CheckFirstBlock(script)
1245 # Abort the OTA update. Note that the incremental OTA cannot be applied
1246 # even if it may match the checksum of the target partition.
1247 # a) If version < 3, operations like move and erase will make changes
1248 # unconditionally and damage the partition.
1249 # b) If version >= 3, it won't even reach here.
1250 script.AppendExtra(('abort("%s partition has unexpected contents");\n'
1251 'endif;') % (partition,))
1253 def _WritePostInstallVerifyScript(self, script):
1254 partition = self.partition
1255 script.Print('Verifying the updated %s image...' % (partition,))
1256 # Unlike pre-install verification, clobbered_blocks should not be ignored.
1257 ranges = self.tgt.care_map
1258 ranges_str = ranges.to_string_raw()
1259 script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1260 self.device, ranges_str,
1261 self.tgt.TotalSha1(include_clobbered_blocks=True)))
1264 # Verify that extended blocks are really zeroed out.
1265 if self.tgt.extended:
1266 ranges_str = self.tgt.extended.to_string_raw()
1267 script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1268 self.device, ranges_str,
1269 self._HashZeroBlocks(self.tgt.extended.size())))
1270 script.Print('Verified the updated %s image.' % (partition,))
1273 ' abort("%s partition has unexpected non-zero contents after OTA '
1275 'endif;' % (partition,))
1277 script.Print('Verified the updated %s image.' % (partition,))
1281 ' abort("%s partition has unexpected contents after OTA update");\n'
1282 'endif;' % (partition,))
1284 def _WriteUpdate(self, script, output_zip):
1285 ZipWrite(output_zip,
1286 '{}.transfer.list'.format(self.path),
1287 '{}.transfer.list'.format(self.partition))
1288 ZipWrite(output_zip,
1289 '{}.new.dat'.format(self.path),
1290 '{}.new.dat'.format(self.partition))
1291 ZipWrite(output_zip,
1292 '{}.patch.dat'.format(self.path),
1293 '{}.patch.dat'.format(self.partition),
1294 compress_type=zipfile.ZIP_STORED)
1296 call = ('block_image_update("{device}", '
1297 'package_extract_file("{partition}.transfer.list"), '
1298 '"{partition}.new.dat", "{partition}.patch.dat");\n'.format(
1299 device=self.device, partition=self.partition))
1300 script.AppendExtra(script.WordWrap(call))
1302 def _HashBlocks(self, source, ranges): # pylint: disable=no-self-use
1303 data = source.ReadRangeSet(ranges)
1309 return ctx.hexdigest()
1311 def _HashZeroBlocks(self, num_blocks): # pylint: disable=no-self-use
1312 """Return the hash value for all zero blocks."""
1313 zero_block = '\x00' * 4096
1315 for _ in range(num_blocks):
1316 ctx.update(zero_block)
1318 return ctx.hexdigest()
1320 # TODO(tbao): Due to http://b/20939131, block 0 may be changed without
1321 # remounting R/W. Will change the checking to a finer-grained way to
1322 # mask off those bits.
1323 def _CheckFirstBlock(self, script):
1324 r = rangelib.RangeSet((0, 1))
1325 srchash = self._HashBlocks(self.src, r)
1327 script.AppendExtra(('(range_sha1("%s", "%s") == "%s") || '
1328 'abort("%s has been remounted R/W; '
1329 'reflash device to reenable OTA updates");')
1330 % (self.device, r.to_string_raw(), srchash,
1333 DataImage = blockimgdiff.DataImage
1336 # map recovery.fstab's fs_types to mount/format "partition types"
1346 def GetTypeAndDevice(mount_point, info):
1347 fstab = info["fstab"]
1349 return (PARTITION_TYPES[fstab[mount_point].fs_type],
1350 fstab[mount_point].device)
1355 def ParseCertificate(data):
1356 """Parse a PEM-format certificate."""
1359 for line in data.split("\n"):
1360 if "--END CERTIFICATE--" in line:
1364 if "--BEGIN CERTIFICATE--" in line:
1366 cert = "".join(cert).decode('base64')
1369 def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img,
1371 """Generate a binary patch that creates the recovery image starting
1372 with the boot image. (Most of the space in these images is just the
1373 kernel, which is identical for the two, so the resulting patch
1374 should be efficient.) Add it to the output zip, along with a shell
1375 script that is run from init.rc on first boot to actually do the
1376 patching and install the new recovery image.
1378 recovery_img and boot_img should be File objects for the
1379 corresponding images. info should be the dictionary returned by
1380 common.LoadInfoDict() on the input target_files.
1383 if info_dict is None:
1384 info_dict = OPTIONS.info_dict
1386 diff_program = ["imgdiff"]
1387 path = os.path.join(input_dir, "SYSTEM", "etc", "recovery-resource.dat")
1388 if os.path.exists(path):
1389 diff_program.append("-b")
1390 diff_program.append(path)
1391 bonus_args = "-b /system/etc/recovery-resource.dat"
1395 d = Difference(recovery_img, boot_img, diff_program=diff_program)
1396 _, _, patch = d.ComputePatch()
1397 output_sink("recovery-from-boot.p", patch)
1400 # The following GetTypeAndDevice()s need to use the path in the target
1401 # info_dict instead of source_info_dict.
1402 boot_type, boot_device = GetTypeAndDevice("/boot", info_dict)
1403 recovery_type, recovery_device = GetTypeAndDevice("/recovery", info_dict)
1407 sh = """#!/system/bin/sh
1408 if ! applypatch -c %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then
1409 applypatch %(bonus_args)s %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s %(recovery_type)s:%(recovery_device)s %(recovery_sha1)s %(recovery_size)d %(boot_sha1)s:/system/recovery-from-boot.p && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
1411 log -t recovery "Recovery image already installed"
1413 """ % {'boot_size': boot_img.size,
1414 'boot_sha1': boot_img.sha1,
1415 'recovery_size': recovery_img.size,
1416 'recovery_sha1': recovery_img.sha1,
1417 'boot_type': boot_type,
1418 'boot_device': boot_device,
1419 'recovery_type': recovery_type,
1420 'recovery_device': recovery_device,
1421 'bonus_args': bonus_args}
1423 # The install script location moved from /system/etc to /system/bin
1424 # in the L release. Parse the init.rc file to find out where the
1425 # target-files expects it to be, and put it there.
1426 sh_location = "etc/install-recovery.sh"
1428 with open(os.path.join(input_dir, "BOOT", "RAMDISK", "init.rc")) as f:
1430 m = re.match(r"^service flash_recovery /system/(\S+)\s*$", line)
1432 sh_location = m.group(1)
1433 print "putting script in", sh_location
1435 except (OSError, IOError) as e:
1436 print "failed to read init.rc: %s" % (e,)
1438 output_sink(sh_location, sh)