OSDN Git Service

Merge WebKit at r73109: Initial merge by git.
[android-x86/external-webkit.git] / WebKitTools / Scripts / webkitpy / layout_tests / run_webkit_tests.py
index b26bc6c..f4e92a6 100755 (executable)
@@ -1,5 +1,6 @@
 #!/usr/bin/env python
 # Copyright (C) 2010 Google Inc. All rights reserved.
+# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
 #
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are
@@ -53,7 +54,6 @@ import logging
 import math
 import optparse
 import os
-import pdb
 import platform
 import Queue
 import random
@@ -66,17 +66,16 @@ import traceback
 
 from layout_package import dump_render_tree_thread
 from layout_package import json_layout_results_generator
+from layout_package import message_broker
 from layout_package import printing
 from layout_package import test_expectations
 from layout_package import test_failures
-from layout_package import test_files
+from layout_package import test_results
 from layout_package import test_results_uploader
-from test_types import image_diff
-from test_types import text_diff
-from test_types import test_type_base
 
-from webkitpy.common.system.executive import Executive
+from webkitpy.common.system import user
 from webkitpy.thirdparty import simplejson
+from webkitpy.tool import grammar
 
 import port
 
@@ -85,42 +84,27 @@ _log = logging.getLogger("webkitpy.layout_tests.run_webkit_tests")
 # Builder base URL where we have the archived test results.
 BUILDER_BASE_URL = "http://build.chromium.org/buildbot/layout_test_results/"
 
+LAYOUT_TESTS_DIRECTORY = "LayoutTests" + os.sep
+
 TestExpectationsFile = test_expectations.TestExpectationsFile
 
 
-class TestInfo:
+class TestInput:
     """Groups information about a test for easy passing of data."""
 
-    def __init__(self, port, filename, timeout):
-        """Generates the URI and stores the filename and timeout for this test.
+    def __init__(self, filename, timeout):
+        """Holds the input parameters for a test.
         Args:
           filename: Full path to the test.
-          timeout: Timeout for running the test in TestShell.
+          timeout: Timeout in msecs the driver should use while running the test
           """
+        # FIXME: filename should really be test_name as a relative path.
         self.filename = filename
-        self.uri = port.filename_to_uri(filename)
         self.timeout = timeout
-        # FIXME: Confusing that the file is .checksum and we call it "hash"
-        self._expected_hash_path = port.expected_filename(filename, '.checksum')
-        self._have_read_expected_hash = False
-        self._image_hash = None
-
-    def _read_image_hash(self):
-        try:
-            with codecs.open(self._expected_hash_path, "r", "ascii") as hash_file:
-                return hash_file.read()
-        except IOError, e:
-            if errno.ENOENT != e.errno:
-                raise
-
-    def image_hash(self):
-        # Read the image_hash lazily to reduce startup time.
-        # This class is accessed across threads, but only one thread should
-        # ever be dealing with any given TestInfo so no locking is needed.
-        if not self._have_read_expected_hash:
-            self._have_read_expected_hash = True
-            self._image_hash = self._read_image_hash()
-        return self._image_hash
+        # The image_hash is used to avoid doing an image dump if the
+        # checksums match. The image_hash is set later, and only if it is needed
+        # for the test.
+        self.image_hash = None
 
 
 class ResultSummary(object):
@@ -256,51 +240,60 @@ class TestRunner:
     # in DumpRenderTree.
     DEFAULT_TEST_TIMEOUT_MS = 6 * 1000
 
-    def __init__(self, port, options, printer):
+    def __init__(self, port, options, printer, message_broker):
         """Initialize test runner data structures.
 
         Args:
           port: an object implementing port-specific
           options: a dictionary of command line options
           printer: a Printer object to record updates to.
+          message_broker: object used to communicate with workers.
         """
         self._port = port
         self._options = options
         self._printer = printer
+        self._message_broker = message_broker
 
         # disable wss server. need to install pyOpenSSL on buildbots.
         # self._websocket_secure_server = websocket_server.PyWebSocket(
         #        options.results_directory, use_tls=True, port=9323)
 
-        # a list of TestType objects
-        self._test_types = []
-
         # a set of test files, and the same tests as a list
         self._test_files = set()
         self._test_files_list = None
         self._result_queue = Queue.Queue()
-
         self._retrying = False
 
-        # Hack for dumping threads on the bots
-        self._last_thread_dump = None
-
-    def __del__(self):
-        _log.debug("flushing stdout")
-        sys.stdout.flush()
-        _log.debug("flushing stderr")
-        sys.stderr.flush()
-        _log.debug("stopping http server")
-        self._port.stop_http_server()
-        _log.debug("stopping websocket server")
-        self._port.stop_websocket_server()
-
-    def gather_file_paths(self, paths):
+    def collect_tests(self, args, last_unexpected_results):
         """Find all the files to test.
 
         Args:
-          paths: a list of globs to use instead of the defaults."""
-        self._test_files = test_files.gather_test_files(self._port, paths)
+          args: list of test arguments from the command line
+          last_unexpected_results: list of unexpected results to retest, if any
+
+        """
+        paths = [self._strip_test_dir_prefix(arg) for arg in args if arg and arg != '']
+        paths += last_unexpected_results
+        if self._options.test_list:
+            paths += read_test_files(self._options.test_list)
+        self._test_files = self._port.tests(paths)
+
+    def _strip_test_dir_prefix(self, path):
+        if path.startswith(LAYOUT_TESTS_DIRECTORY):
+            return path[len(LAYOUT_TESTS_DIRECTORY):]
+        return path
+
+    def lint(self):
+        # Creating the expecations for each platform/configuration pair does
+        # all the test list parsing and ensures it's correct syntax (e.g. no
+        # dupes).
+        for platform_name in self._port.test_platform_names():
+            self.parse_expectations(platform_name, is_debug_mode=True)
+            self.parse_expectations(platform_name, is_debug_mode=False)
+        self._printer.write("")
+        _log.info("If there are no fail messages, errors or exceptions, "
+                  "then the lint succeeded.")
+        return 0
 
     def parse_expectations(self, test_platform_name, is_debug_mode):
         """Parse the expectations from the test_list files and return a data
@@ -317,7 +310,7 @@ class TestRunner:
             self._expectations = test_expectations.TestExpectations(
                 self._port, test_files, expectations_str, test_platform_name,
                 is_debug_mode, self._options.lint_test_files,
-                tests_are_present=True, overrides=overrides_str)
+                overrides=overrides_str)
             return self._expectations
         except SyntaxError, err:
             if self._options.lint_test_files:
@@ -336,8 +329,8 @@ class TestRunner:
         self._printer.print_expected("Found:  %d tests" %
                                      (len(self._test_files)))
         if not num_all_test_files:
-            _log.critical("No tests to run.")
-            sys.exit(1)
+            _log.critical('No tests to run.')
+            return None
 
         skipped = set()
         if num_all_test_files > 1 and not self._options.force:
@@ -366,7 +359,7 @@ class TestRunner:
                 assert(test_size > 0)
             except:
                 _log.critical("invalid chunk '%s'" % chunk_value)
-                sys.exit(1)
+                return None
 
             # Get the number of tests
             num_tests = len(test_files)
@@ -405,9 +398,8 @@ class TestRunner:
 
             # If we reached the end and we don't have enough tests, we run some
             # from the beginning.
-            if (self._options.run_chunk and
-                (slice_end - slice_start < chunk_len)):
-                extra = 1 + chunk_len - (slice_end - slice_start)
+            if slice_end - slice_start < chunk_len:
+                extra = chunk_len - (slice_end - slice_start)
                 extra_msg = ('   last chunk is partial, appending [0:%d]' %
                             extra)
                 self._printer.print_expected(extra_msg)
@@ -459,7 +451,7 @@ class TestRunner:
             # subtracted out of self._test_files, above), but we stub out the
             # results here so the statistics can remain accurate.
             for test in skip_chunk:
-                result = dump_render_tree_thread.TestResult(test,
+                result = test_results.TestResult(test,
                     failures=[], test_run_time=0, total_time_for_all_diffs=0,
                     time_for_diffs=0)
                 result.type = test_expectations.SKIP
@@ -468,16 +460,12 @@ class TestRunner:
 
         return result_summary
 
-    def add_test_type(self, test_type):
-        """Add a TestType to the TestRunner."""
-        self._test_types.append(test_type)
-
     def _get_dir_for_test_file(self, test_file):
         """Returns the highest-level directory by which to shard the given
         test file."""
-        index = test_file.rfind(os.sep + 'LayoutTests' + os.sep)
+        index = test_file.rfind(os.sep + LAYOUT_TESTS_DIRECTORY)
 
-        test_file = test_file[index + len('LayoutTests/'):]
+        test_file = test_file[index + len(LAYOUT_TESTS_DIRECTORY):]
         test_file_parts = test_file.split(os.sep, 1)
         directory = test_file_parts[0]
         test_file = test_file_parts[1]
@@ -496,95 +484,83 @@ class TestRunner:
 
         return return_value
 
-    def _get_test_info_for_file(self, test_file):
-        """Returns the appropriate TestInfo object for the file. Mostly this
+    def _get_test_input_for_file(self, test_file):
+        """Returns the appropriate TestInput object for the file. Mostly this
         is used for looking up the timeout value (in ms) to use for the given
         test."""
-        if self._expectations.has_modifier(test_file, test_expectations.SLOW):
-            return TestInfo(self._port, test_file,
-                            self._options.slow_time_out_ms)
-        return TestInfo(self._port, test_file, self._options.time_out_ms)
-
-    def _get_test_file_queue(self, test_files):
-        """Create the thread safe queue of lists of (test filenames, test URIs)
-        tuples. Each TestShellThread pulls a list from this queue and runs
-        those tests in order before grabbing the next available list.
-
-        Shard the lists by directory. This helps ensure that tests that depend
-        on each other (aka bad tests!) continue to run together as most
-        cross-tests dependencies tend to occur within the same directory.
+        if self._test_is_slow(test_file):
+            return TestInput(test_file, self._options.slow_time_out_ms)
+        return TestInput(test_file, self._options.time_out_ms)
+
+    def _test_requires_lock(self, test_file):
+        """Return True if the test needs to be locked when
+        running multiple copies of NRWTs."""
+        split_path = test_file.split(os.sep)
+        return 'http' in split_path or 'websocket' in split_path
+
+    def _test_is_slow(self, test_file):
+        return self._expectations.has_modifier(test_file,
+                                               test_expectations.SLOW)
+
+    def _shard_tests(self, test_files, use_real_shards):
+        """Groups tests into batches.
+        This helps ensure that tests that depend on each other (aka bad tests!)
+        continue to run together as most cross-tests dependencies tend to
+        occur within the same directory. If use_real_shards is false, we
+        put each (non-HTTP/websocket) test into its own shard for maximum
+        concurrency instead of trying to do any sort of real sharding.
 
         Return:
-          The Queue of lists of TestInfo objects.
+            A list of lists of TestInput objects.
         """
+        # FIXME: when we added http locking, we changed how this works such
+        # that we always lump all of the HTTP threads into a single shard.
+        # That will slow down experimental-fully-parallel, but it's unclear
+        # what the best alternative is completely revamping how we track
+        # when to grab the lock.
 
-        if (self._options.experimental_fully_parallel or
-            self._is_single_threaded()):
-            filename_queue = Queue.Queue()
-            for test_file in test_files:
-                filename_queue.put(
-                    ('.', [self._get_test_info_for_file(test_file)]))
-            return filename_queue
-
-        tests_by_dir = {}
-        for test_file in test_files:
-            directory = self._get_dir_for_test_file(test_file)
-            tests_by_dir.setdefault(directory, [])
-            tests_by_dir[directory].append(
-                self._get_test_info_for_file(test_file))
-
-        # Sort by the number of tests in the dir so that the ones with the
-        # most tests get run first in order to maximize parallelization.
-        # Number of tests is a good enough, but not perfect, approximation
-        # of how long that set of tests will take to run. We can't just use
-        # a PriorityQueue until we move # to Python 2.6.
         test_lists = []
-        http_tests = None
-        for directory in tests_by_dir:
-            test_list = tests_by_dir[directory]
-            # Keep the tests in alphabetical order.
-            # TODO: Remove once tests are fixed so they can be run in any
-            # order.
-            test_list.reverse()
-            test_list_tuple = (directory, test_list)
-            if directory == 'LayoutTests' + os.sep + 'http':
-                http_tests = test_list_tuple
-            else:
+        tests_to_http_lock = []
+        if not use_real_shards:
+            for test_file in test_files:
+                test_input = self._get_test_input_for_file(test_file)
+                if self._test_requires_lock(test_file):
+                    tests_to_http_lock.append(test_input)
+                else:
+                    test_lists.append((".", [test_input]))
+        else:
+            tests_by_dir = {}
+            for test_file in test_files:
+                directory = self._get_dir_for_test_file(test_file)
+                test_input = self._get_test_input_for_file(test_file)
+                if self._test_requires_lock(test_file):
+                    tests_to_http_lock.append(test_input)
+                else:
+                    tests_by_dir.setdefault(directory, [])
+                    tests_by_dir[directory].append(test_input)
+            # Sort by the number of tests in the dir so that the ones with the
+            # most tests get run first in order to maximize parallelization.
+            # Number of tests is a good enough, but not perfect, approximation
+            # of how long that set of tests will take to run. We can't just use
+            # a PriorityQueue until we move to Python 2.6.
+            for directory in tests_by_dir:
+                test_list = tests_by_dir[directory]
+                # Keep the tests in alphabetical order.
+                # FIXME: Remove once tests are fixed so they can be run in any
+                # order.
+                test_list.reverse()
+                test_list_tuple = (directory, test_list)
                 test_lists.append(test_list_tuple)
-        test_lists.sort(lambda a, b: cmp(len(b[1]), len(a[1])))
+            test_lists.sort(lambda a, b: cmp(len(b[1]), len(a[1])))
 
         # Put the http tests first. There are only a couple hundred of them,
         # but each http test takes a very long time to run, so sorting by the
         # number of tests doesn't accurately capture how long they take to run.
-        if http_tests:
-            test_lists.insert(0, http_tests)
+        if tests_to_http_lock:
+            tests_to_http_lock.reverse()
+            test_lists.insert(0, ("tests_to_http_lock", tests_to_http_lock))
 
-        filename_queue = Queue.Queue()
-        for item in test_lists:
-            filename_queue.put(item)
-        return filename_queue
-
-    def _get_dump_render_tree_args(self, index):
-        """Returns the tuple of arguments for tests and for DumpRenderTree."""
-        shell_args = []
-        test_args = test_type_base.TestArguments()
-        png_path = None
-        if self._options.pixel_tests:
-            png_path = os.path.join(self._options.results_directory,
-                                    "png_result%s.png" % index)
-            shell_args.append("--pixel-tests=" + png_path)
-            test_args.png_path = png_path
-
-        test_args.new_baseline = self._options.new_baseline
-        test_args.reset_results = self._options.reset_results
-
-        if self._options.startup_dialog:
-            shell_args.append('--testshell-startup-dialog')
-
-        if self._options.gp_fault_error_box:
-            shell_args.append('--gp-fault-error-box')
-
-        return test_args, png_path, shell_args
+        return test_lists
 
     def _contains_tests(self, subdir):
         for test_file in self._test_files:
@@ -592,40 +568,8 @@ class TestRunner:
                 return True
         return False
 
-    def _instantiate_dump_render_tree_threads(self, test_files,
-                                              result_summary):
-        """Instantitates and starts the TestShellThread(s).
-
-        Return:
-          The list of threads.
-        """
-        filename_queue = self._get_test_file_queue(test_files)
-
-        # Instantiate TestShellThreads and start them.
-        threads = []
-        for i in xrange(int(self._options.child_processes)):
-            # Create separate TestTypes instances for each thread.
-            test_types = []
-            for test_type in self._test_types:
-                test_types.append(test_type(self._port,
-                                    self._options.results_directory))
-
-            test_args, png_path, shell_args = \
-                self._get_dump_render_tree_args(i)
-            thread = dump_render_tree_thread.TestShellThread(self._port,
-                filename_queue, self._result_queue, test_types, test_args,
-                png_path, shell_args, self._options)
-            if self._is_single_threaded():
-                thread.run_in_main_thread(self, result_summary)
-            else:
-                thread.start()
-            threads.append(thread)
-
-        return threads
-
-    def _is_single_threaded(self):
-        """Returns whether we should run all the tests in the main thread."""
-        return int(self._options.child_processes) == 1
+    def _num_workers(self):
+        return int(self._options.child_processes)
 
     def _run_tests(self, file_list, result_summary):
         """Runs the tests in the file_list.
@@ -641,60 +585,48 @@ class TestRunner:
               in the form {filename:filename, test_run_time:test_run_time}
             result_summary: summary object to populate with the results
         """
-        # FIXME: We should use webkitpy.tool.grammar.pluralize here.
-        plural = ""
-        if self._options.child_processes > 1:
-            plural = "s"
-        self._printer.print_update('Starting %s%s ...' %
-                                   (self._port.driver_name(), plural))
-        threads = self._instantiate_dump_render_tree_threads(file_list,
-                                                             result_summary)
+
+        self._printer.print_update('Sharding tests ...')
+        num_workers = self._num_workers()
+        test_lists = self._shard_tests(file_list,
+            num_workers > 1 and not self._options.experimental_fully_parallel)
+        filename_queue = Queue.Queue()
+        for item in test_lists:
+            filename_queue.put(item)
+
+        self._printer.print_update('Starting %s ...' %
+                                   grammar.pluralize('worker', num_workers))
+        message_broker = self._message_broker
+        self._current_filename_queue = filename_queue
+        self._current_result_summary = result_summary
+
+        if not self._options.dry_run:
+            threads = message_broker.start_workers(self)
+        else:
+            threads = {}
+
         self._printer.print_update("Starting testing ...")
+        keyboard_interrupted = False
+        if not self._options.dry_run:
+            try:
+                message_broker.run_message_loop()
+            except KeyboardInterrupt:
+                _log.info("Interrupted, exiting")
+                message_broker.cancel_workers()
+                keyboard_interrupted = True
+            except:
+                # Unexpected exception; don't try to clean up workers.
+                _log.info("Exception raised, exiting")
+                raise
 
-        keyboard_interrupted = self._wait_for_threads_to_finish(threads,
-                                                                result_summary)
-        (thread_timings, test_timings, individual_test_timings) = \
+        thread_timings, test_timings, individual_test_timings = \
             self._collect_timing_info(threads)
 
         return (keyboard_interrupted, thread_timings, test_timings,
                 individual_test_timings)
 
-    def _wait_for_threads_to_finish(self, threads, result_summary):
-        keyboard_interrupted = False
-        try:
-            # Loop through all the threads waiting for them to finish.
-            some_thread_is_alive = True
-            while some_thread_is_alive:
-                some_thread_is_alive = False
-                t = time.time()
-                for thread in threads:
-                    exception_info = thread.exception_info()
-                    if exception_info is not None:
-                        # Re-raise the thread's exception here to make it
-                        # clear that testing was aborted. Otherwise,
-                        # the tests that did not run would be assumed
-                        # to have passed.
-                        raise (exception_info[0], exception_info[1],
-                               exception_info[2])
-
-                    if thread.isAlive():
-                        some_thread_is_alive = True
-                        next_timeout = thread.next_timeout()
-                        if (next_timeout and t > next_timeout):
-                            _log_wedged_thread(thread)
-                            thread.clear_next_timeout()
-
-                self.update_summary(result_summary)
-
-                if some_thread_is_alive:
-                    time.sleep(0.1)
-
-        except KeyboardInterrupt:
-            keyboard_interrupted = True
-            for thread in threads:
-                thread.cancel()
-
-        return keyboard_interrupted
+    def update(self):
+        self.update_summary(self._current_result_summary)
 
     def _collect_timing_info(self, threads):
         test_timings = {}
@@ -705,7 +637,7 @@ class TestRunner:
             thread_timings.append({'name': thread.getName(),
                                    'num_tests': thread.get_num_tests(),
                                    'total_time': thread.get_total_time()})
-            test_timings.update(thread.get_directory_timing_stats())
+            test_timings.update(thread.get_test_group_timing_stats())
             individual_test_timings.extend(thread.get_test_results())
 
         return (thread_timings, test_timings, individual_test_timings)
@@ -714,6 +646,44 @@ class TestRunner:
         """Returns whether the test runner needs an HTTP server."""
         return self._contains_tests(self.HTTP_SUBDIR)
 
+    def needs_websocket(self):
+        """Returns whether the test runner needs a WEBSOCKET server."""
+        return self._contains_tests(self.WEBSOCKET_SUBDIR)
+
+    def set_up_run(self):
+        """Configures the system to be ready to run tests.
+
+        Returns a ResultSummary object if we should continue to run tests,
+        or None if we should abort.
+
+        """
+        # This must be started before we check the system dependencies,
+        # since the helper may do things to make the setup correct.
+        self._printer.print_update("Starting helper ...")
+        self._port.start_helper()
+
+        # Check that the system dependencies (themes, fonts, ...) are correct.
+        if not self._options.nocheck_sys_deps:
+            self._printer.print_update("Checking system dependencies ...")
+            if not self._port.check_sys_deps(self.needs_http()):
+                self._port.stop_helper()
+                return None
+
+        if self._options.clobber_old_results:
+            self._clobber_old_results()
+
+        # Create the output directory if it doesn't already exist.
+        self._port.maybe_make_directory(self._options.results_directory)
+
+        self._port.setup_test_run()
+
+        self._printer.print_update("Preparing tests ...")
+        result_summary = self.prepare_lists_and_print_output()
+        if not result_summary:
+            return None
+
+        return result_summary
+
     def run(self, result_summary):
         """Run all our tests on all our test files.
 
@@ -726,19 +696,12 @@ class TestRunner:
         Return:
           The number of unexpected results (0 == success)
         """
-        if not self._test_files:
-            return 0
-        start_time = time.time()
-
-        if self.needs_http():
-            self._printer.print_update('Starting HTTP server ...')
+        # gather_test_files() must have been called first to initialize us.
+        # If we didn't find any files to test, we've errored out already in
+        # prepare_lists_and_print_output().
+        assert(len(self._test_files))
 
-            self._port.start_http_server()
-
-        if self._contains_tests(self.WEBSOCKET_SUBDIR):
-            self._printer.print_update('Starting WebSocket server ...')
-            self._port.start_websocket_server()
-            # self._websocket_secure_server.Start()
+        start_time = time.time()
 
         keyboard_interrupted, thread_timings, test_timings, \
             individual_test_timings = (
@@ -779,17 +742,18 @@ class TestRunner:
             self._expectations, result_summary, retry_summary)
         self._printer.print_unexpected_results(unexpected_results)
 
-        # Write the same data to log files.
-        self._write_json_files(unexpected_results, result_summary,
-                             individual_test_timings)
-
-        # Upload generated JSON files to appengine server.
-        self._upload_json_files()
+        if (self._options.record_results and not self._options.dry_run and
+            not keyboard_interrupted):
+            # Write the same data to log files and upload generated JSON files
+            # to appengine server.
+            self._upload_json_files(unexpected_results, result_summary,
+                                    individual_test_timings)
 
         # Write the summary to disk (results.html) and display it if requested.
-        wrote_results = self._write_results_html_file(result_summary)
-        if self._options.show_results and wrote_results:
-            self._show_results_html_file()
+        if not self._options.dry_run:
+            wrote_results = self._write_results_html_file(result_summary)
+            if self._options.show_results and wrote_results:
+                self._show_results_html_file()
 
         # Now that we've completed all the processing we can, we re-raise
         # a KeyboardInterrupt if necessary so the caller can handle it.
@@ -800,11 +764,21 @@ class TestRunner:
         # bot red for those.
         return unexpected_results['num_regressions']
 
+    def clean_up_run(self):
+        """Restores the system after we're done running tests."""
+
+        _log.debug("flushing stdout")
+        sys.stdout.flush()
+        _log.debug("flushing stderr")
+        sys.stderr.flush()
+        _log.debug("stopping helper")
+        self._port.stop_helper()
+
     def update_summary(self, result_summary):
         """Update the summary and print results with any completed tests."""
         while True:
             try:
-                result = self._result_queue.get_nowait()
+                result = test_results.TestResult.loads(self._result_queue.get_nowait())
             except Queue.Empty:
                 return
 
@@ -818,6 +792,20 @@ class TestRunner:
             self._printer.print_progress(result_summary, self._retrying,
                                          self._test_files_list)
 
+    def _clobber_old_results(self):
+        # Just clobber the actual test results directories since the other
+        # files in the results directory are explicitly used for cross-run
+        # tracking.
+        self._printer.print_update("Clobbering old results in %s" %
+                                   self._options.results_directory)
+        layout_tests_dir = self._port.layout_tests_dir()
+        possible_dirs = self._port.test_dirs()
+        for dirname in possible_dirs:
+            if os.path.isdir(os.path.join(layout_tests_dir, dirname)):
+                shutil.rmtree(os.path.join(self._options.results_directory,
+                                           dirname),
+                              ignore_errors=True)
+
     def _get_failures(self, result_summary, include_crashes):
         """Filters a dict of results and returns only the failures.
 
@@ -839,10 +827,10 @@ class TestRunner:
 
         return failed_results
 
-    def _write_json_files(self, unexpected_results, result_summary,
+    def _upload_json_files(self, unexpected_results, result_summary,
                         individual_test_timings):
         """Writes the results of the test run as JSON files into the results
-        dir.
+        dir and upload the files to the appengine server.
 
         There are three different files written into the results dir:
           unexpected_results.json: A short list of any unexpected results.
@@ -871,44 +859,55 @@ class TestRunner:
         with codecs.open(expectations_path, "w", "utf-8") as file:
             file.write(u"ADD_EXPECTATIONS(%s);" % expectations_json)
 
-        json_layout_results_generator.JSONLayoutResultsGenerator(
+        generator = json_layout_results_generator.JSONLayoutResultsGenerator(
             self._port, self._options.builder_name, self._options.build_name,
             self._options.build_number, self._options.results_directory,
             BUILDER_BASE_URL, individual_test_timings,
             self._expectations, result_summary, self._test_files_list,
             not self._options.upload_full_results,
-            self._options.test_results_server)
+            self._options.test_results_server,
+            "layout-tests",
+            self._options.master_name)
 
         _log.debug("Finished writing JSON files.")
 
-    def _upload_json_files(self):
-        if not self._options.test_results_server:
-            return
-
-        _log.info("Uploading JSON files for builder: %s",
-                   self._options.builder_name)
-
-        attrs = [("builder", self._options.builder_name)]
         json_files = ["expectations.json"]
         if self._options.upload_full_results:
             json_files.append("results.json")
         else:
             json_files.append("incremental_results.json")
 
-        files = [(file, os.path.join(self._options.results_directory, file))
-            for file in json_files]
+        generator.upload_json_files(json_files)
+
+    def _print_config(self):
+        """Prints the configuration for the test run."""
+        p = self._printer
+        p.print_config("Using port '%s'" % self._port.name())
+        p.print_config("Placing test results in %s" %
+                       self._options.results_directory)
+        if self._options.new_baseline:
+            p.print_config("Placing new baselines in %s" %
+                           self._port.baseline_path())
+        p.print_config("Using %s build" % self._options.configuration)
+        if self._options.pixel_tests:
+            p.print_config("Pixel tests enabled")
+        else:
+            p.print_config("Pixel tests disabled")
 
-        uploader = test_results_uploader.TestResultsUploader(
-            self._options.test_results_server)
-        try:
-            # Set uploading timeout in case appengine server is having problem.
-            # 120 seconds are more than enough to upload test results.
-            uploader.upload(attrs, files, 120)
-        except Exception, err:
-            _log.error("Upload failed: %s" % err)
-            return
+        p.print_config("Regular timeout: %s, slow test timeout: %s" %
+                       (self._options.time_out_ms,
+                        self._options.slow_time_out_ms))
 
-        _log.info("JSON files uploaded.")
+        if self._num_workers() == 1:
+            p.print_config("Running one %s" % self._port.driver_name())
+        else:
+            p.print_config("Running %s %ss in parallel" %
+                           (self._options.child_processes,
+                            self._port.driver_name()))
+        p.print_config('Command line: ' +
+                       ' '.join(self._port.driver_cmd_line()))
+        p.print_config("Worker model: %s" % self._options.worker_model)
+        p.print_config("")
 
     def _print_expected_results_of_type(self, result_summary,
                                         result_type, result_type_str):
@@ -922,16 +921,13 @@ class TestRunner:
         tests = self._expectations.get_tests_with_result_type(result_type)
         now = result_summary.tests_by_timeline[test_expectations.NOW]
         wontfix = result_summary.tests_by_timeline[test_expectations.WONTFIX]
-        defer = result_summary.tests_by_timeline[test_expectations.DEFER]
 
         # We use a fancy format string in order to print the data out in a
         # nicely-aligned table.
-        fmtstr = ("Expect: %%5d %%-8s (%%%dd now, %%%dd defer, %%%dd wontfix)"
-                  % (self._num_digits(now), self._num_digits(defer),
-                  self._num_digits(wontfix)))
+        fmtstr = ("Expect: %%5d %%-8s (%%%dd now, %%%dd wontfix)"
+                  % (self._num_digits(now), self._num_digits(wontfix)))
         self._printer.print_expected(fmtstr %
-            (len(tests), result_type_str, len(tests & now),
-             len(tests & defer), len(tests & wontfix)))
+            (len(tests), result_type_str, len(tests & now), len(tests & wontfix)))
 
     def _num_digits(self, num):
         """Returns the number of digits needed to represent the length of a
@@ -1025,8 +1021,7 @@ class TestRunner:
         for test_tuple in individual_test_timings:
             filename = test_tuple.filename
             is_timeout_crash_or_slow = False
-            if self._expectations.has_modifier(filename,
-                                               test_expectations.SLOW):
+            if self._test_is_slow(filename):
                 is_timeout_crash_or_slow = True
                 slow_tests.append(test_tuple)
 
@@ -1152,12 +1147,7 @@ class TestRunner:
                      (passed, total, pct_passed))
         self._printer.print_actual("")
         self._print_result_summary_entry(result_summary,
-            test_expectations.NOW, "Tests to be fixed for the current release")
-
-        self._printer.print_actual("")
-        self._print_result_summary_entry(result_summary,
-            test_expectations.DEFER,
-            "Tests we'll fix in the future if they fail (DEFER)")
+            test_expectations.NOW, "Tests to be fixed")
 
         self._printer.print_actual("")
         self._print_result_summary_entry(result_summary,
@@ -1212,7 +1202,8 @@ class TestRunner:
             page += u"<p><a href='%s'>%s</a><br />\n" % (test_url, test_name)
             test_failures = failures.get(test_file, [])
             for failure in test_failures:
-                page += u"&nbsp;&nbsp;%s<br/>" % failure.result_html_output(test_name)
+                page += (u"&nbsp;&nbsp;%s<br/>" %
+                         failure.result_html_output(test_name))
             page += "</p>\n"
         page += "</body></html>\n"
         return page
@@ -1257,20 +1248,27 @@ class TestRunner:
 def read_test_files(files):
     tests = []
     for file in files:
-        # FIXME: This could be cleaner using a list comprehension.
-        for line in codecs.open(file, "r", "utf-8"):
-            line = test_expectations.strip_comments(line)
-            if line:
-                tests.append(line)
+        try:
+            with codecs.open(file, 'r', 'utf-8') as file_contents:
+                # FIXME: This could be cleaner using a list comprehension.
+                for line in file_contents:
+                    line = test_expectations.strip_comments(line)
+                    if line:
+                        tests.append(line)
+        except IOError, e:
+            if e.errno == errno.ENOENT:
+                _log.critical('')
+                _log.critical('--test-list file "%s" not found' % file)
+            raise
     return tests
 
 
-def run(port_obj, options, args, regular_output=sys.stderr,
+def run(port, options, args, regular_output=sys.stderr,
         buildbot_output=sys.stdout):
     """Run the tests.
 
     Args:
-      port_obj: Port object for port-specific behavior
+      port: Port object for port-specific behavior
       options: a dictionary of command line options
       args: a list of sub directories or files to test
       regular_output: a stream-like object that we can send logging/debug
@@ -1280,24 +1278,74 @@ def run(port_obj, options, args, regular_output=sys.stderr,
     Returns:
       the number of unexpected results that occurred, or -1 if there is an
           error.
-    """
-
-    # Configure the printing subsystem for printing output, logging debug
-    # info, and tracing tests.
 
-    if not options.child_processes:
-        # FIXME: Investigate perf/flakiness impact of using cpu_count + 1.
-        options.child_processes = port_obj.default_child_processes()
+    """
+    _set_up_derived_options(port, options)
 
-    printer = printing.Printer(port_obj, options, regular_output=regular_output,
-        buildbot_output=buildbot_output,
-        child_processes=int(options.child_processes),
-        is_fully_parallel=options.experimental_fully_parallel)
+    printer = printing.Printer(port, options, regular_output, buildbot_output,
+        int(options.child_processes), options.experimental_fully_parallel)
     if options.help_printing:
         printer.help_printing()
+        printer.cleanup()
+        return 0
+
+    last_unexpected_results = _gather_unexpected_results(options)
+    if options.print_last_failures:
+        printer.write("\n".join(last_unexpected_results) + "\n")
+        printer.cleanup()
         return 0
 
-    executive = Executive()
+    broker = message_broker.get(port, options)
+
+    # We wrap any parts of the run that are slow or likely to raise exceptions
+    # in a try/finally to ensure that we clean up the logging configuration.
+    num_unexpected_results = -1
+    try:
+        test_runner = TestRunner(port, options, printer, broker)
+        test_runner._print_config()
+
+        printer.print_update("Collecting tests ...")
+        try:
+            test_runner.collect_tests(args, last_unexpected_results)
+        except IOError, e:
+            if e.errno == errno.ENOENT:
+                return -1
+            raise
+
+        printer.print_update("Parsing expectations ...")
+        if options.lint_test_files:
+            return test_runner.lint()
+        test_runner.parse_expectations(port.test_platform_name(),
+                                       options.configuration == 'Debug')
+
+        printer.print_update("Checking build ...")
+        if not port.check_build(test_runner.needs_http()):
+            _log.error("Build check failed")
+            return -1
+
+        result_summary = test_runner.set_up_run()
+        if result_summary:
+            num_unexpected_results = test_runner.run(result_summary)
+            test_runner.clean_up_run()
+            _log.debug("Testing completed, Exit status: %d" %
+                       num_unexpected_results)
+    finally:
+        broker.cleanup()
+        printer.cleanup()
+
+    return num_unexpected_results
+
+
+def _set_up_derived_options(port_obj, options):
+    """Sets the options values that depend on other options values."""
+
+    if options.worker_model == 'inline':
+        if options.child_processes and int(options.child_processes) > 1:
+            _log.warning("--worker-model=inline overrides --child-processes")
+        options.child_processes = "1"
+    if not options.child_processes:
+        options.child_processes = os.environ.get("WEBKIT_TEST_CHILD_PROCESSES",
+                                                 str(port_obj.default_child_processes()))
 
     if not options.configuration:
         options.configuration = port_obj.default_configuration()
@@ -1308,39 +1356,12 @@ def run(port_obj, options, args, regular_output=sys.stderr,
     if not options.use_apache:
         options.use_apache = sys.platform in ('darwin', 'linux2')
 
-    if options.results_directory.startswith("/"):
-        # Assume it's an absolute path and normalize.
-        options.results_directory = port_obj.get_absolute_path(
-            options.results_directory)
-    else:
-        # If it's a relative path, make the output directory relative to
-        # Debug or Release.
+    if not os.path.isabs(options.results_directory):
+        # This normalizes the path to the build dir.
+        # FIXME: how this happens is not at all obvious; this is a dumb
+        # interface and should be cleaned up.
         options.results_directory = port_obj.results_directory()
 
-    last_unexpected_results = []
-    if options.print_last_failures or options.retest_last_failures:
-        unexpected_results_filename = os.path.join(
-           options.results_directory, "unexpected_results.json")
-        with codecs.open(unexpected_results_filename, "r", "utf-8") as file:
-            results = simplejson.load(file)
-        last_unexpected_results = results['tests'].keys()
-        if options.print_last_failures:
-            printer.write("\n".join(last_unexpected_results) + "\n")
-            return 0
-
-    if options.clobber_old_results:
-        # Just clobber the actual test results directories since the other
-        # files in the results directory are explicitly used for cross-run
-        # tracking.
-        printer.print_update("Clobbering old results in %s" %
-                             options.results_directory)
-        layout_tests_dir = port_obj.layout_tests_dir()
-        possible_dirs = os.listdir(layout_tests_dir)
-        for dirname in possible_dirs:
-            if os.path.isdir(os.path.join(layout_tests_dir, dirname)):
-                shutil.rmtree(os.path.join(options.results_directory, dirname),
-                              ignore_errors=True)
-
     if not options.time_out_ms:
         if options.configuration == "Debug":
             options.time_out_ms = str(2 * TestRunner.DEFAULT_TEST_TIMEOUT_MS)
@@ -1348,92 +1369,18 @@ def run(port_obj, options, args, regular_output=sys.stderr,
             options.time_out_ms = str(TestRunner.DEFAULT_TEST_TIMEOUT_MS)
 
     options.slow_time_out_ms = str(5 * int(options.time_out_ms))
-    printer.print_config("Regular timeout: %s, slow test timeout: %s" %
-                   (options.time_out_ms, options.slow_time_out_ms))
-
-    if int(options.child_processes) == 1:
-        printer.print_config("Running one %s" % port_obj.driver_name())
-    else:
-        printer.print_config("Running %s %ss in parallel" % (
-                       options.child_processes, port_obj.driver_name()))
-
-    # Include all tests if none are specified.
-    new_args = []
-    for arg in args:
-        if arg and arg != '':
-            new_args.append(arg)
-
-    paths = new_args
-    if not paths:
-        paths = []
-    paths += last_unexpected_results
-    if options.test_list:
-        paths += read_test_files(options.test_list)
-
-    # Create the output directory if it doesn't already exist.
-    port_obj.maybe_make_directory(options.results_directory)
-    printer.print_update("Collecting tests ...")
-
-    test_runner = TestRunner(port_obj, options, printer)
-    test_runner.gather_file_paths(paths)
-
-    if options.lint_test_files:
-        # Creating the expecations for each platform/configuration pair does
-        # all the test list parsing and ensures it's correct syntax (e.g. no
-        # dupes).
-        for platform_name in port_obj.test_platform_names():
-            test_runner.parse_expectations(platform_name, is_debug_mode=True)
-            test_runner.parse_expectations(platform_name, is_debug_mode=False)
-        printer.write("")
-        _log.info("If there are no fail messages, errors or exceptions, "
-                  "then the lint succeeded.")
-        return 0
-
-    printer.print_config("Using port '%s'" % port_obj.name())
-    printer.print_config("Placing test results in %s" %
-                         options.results_directory)
-    if options.new_baseline:
-        printer.print_config("Placing new baselines in %s" %
-                             port_obj.baseline_path())
-    printer.print_config("Using %s build" % options.configuration)
-    if options.pixel_tests:
-        printer.print_config("Pixel tests enabled")
-    else:
-        printer.print_config("Pixel tests disabled")
-    printer.print_config("")
-
-    printer.print_update("Parsing expectations ...")
-    test_runner.parse_expectations(port_obj.test_platform_name(),
-                                   options.configuration == 'Debug')
-
-    printer.print_update("Checking build ...")
-    if not port_obj.check_build(test_runner.needs_http()):
-        return -1
-
-    printer.print_update("Starting helper ...")
-    port_obj.start_helper()
-
-    # Check that the system dependencies (themes, fonts, ...) are correct.
-    if not options.nocheck_sys_deps:
-        printer.print_update("Checking system dependencies ...")
-        if not port_obj.check_sys_deps(test_runner.needs_http()):
-            return -1
-
-    printer.print_update("Preparing tests ...")
-    result_summary = test_runner.prepare_lists_and_print_output()
-
-    port_obj.setup_test_run()
-
-    test_runner.add_test_type(text_diff.TestTextDiff)
-    if options.pixel_tests:
-        test_runner.add_test_type(image_diff.ImageDiff)
 
-    num_unexpected_results = test_runner.run(result_summary)
 
-    port_obj.stop_helper()
-
-    _log.debug("Exit status: %d" % num_unexpected_results)
-    return num_unexpected_results
+def _gather_unexpected_results(options):
+    """Returns the unexpected results from the previous run, if any."""
+    last_unexpected_results = []
+    if options.print_last_failures or options.retest_last_failures:
+        unexpected_results_filename = os.path.join(
+        options.results_directory, "unexpected_results.json")
+        with codecs.open(unexpected_results_filename, "r", "utf-8") as file:
+            results = simplejson.load(file)
+        last_unexpected_results = results['tests'].keys()
+    return last_unexpected_results
 
 
 def _compat_shim_callback(option, opt_str, value, parser):
@@ -1477,12 +1424,30 @@ def parse_args(args=None):
             default=False, help="create a dialog on DumpRenderTree startup"),
         optparse.make_option("--gp-fault-error-box", action="store_true",
             default=False, help="enable Windows GP fault error box"),
+        optparse.make_option("--multiple-loads",
+            type="int", help="turn on multiple loads of each test"),
+        optparse.make_option("--js-flags",
+            type="string", help="JavaScript flags to pass to tests"),
         optparse.make_option("--nocheck-sys-deps", action="store_true",
             default=False,
             help="Don't check the system dependencies (themes)"),
         optparse.make_option("--use-drt", action="store_true",
-            default=False,
+            default=None,
             help="Use DumpRenderTree instead of test_shell"),
+        optparse.make_option("--accelerated-compositing",
+            action="store_true",
+            help="Use hardware-accelated compositing for rendering"),
+        optparse.make_option("--no-accelerated-compositing",
+            action="store_false",
+            dest="accelerated_compositing",
+            help="Don't use hardware-accelerated compositing for rendering"),
+        optparse.make_option("--accelerated-2d-canvas",
+            action="store_true",
+            help="Use hardware-accelerated 2D Canvas calls"),
+        optparse.make_option("--no-accelerated-2d-canvas",
+            action="store_false",
+            dest="accelerated_2d_canvas",
+            help="Don't use hardware-accelerated 2D Canvas calls"),
     ]
 
     # Missing Mac-specific old-run-webkit-tests options:
@@ -1509,8 +1474,9 @@ def parse_args(args=None):
             dest="pixel_tests", help="Enable pixel-to-pixel PNG comparisons"),
         optparse.make_option("--no-pixel-tests", action="store_false",
             dest="pixel_tests", help="Disable pixel-to-pixel PNG comparisons"),
-        # old-run-webkit-tests allows a specific tolerance: --tolerance t
-        # Ignore image differences less than this percentage (default: 0.1)
+        optparse.make_option("--tolerance",
+            help="Ignore image differences less than this percentage (some "
+                "ports may ignore this option)", type="float"),
         optparse.make_option("--results-directory",
             default="layout-test-results",
             help="Output results directory source dir, relative to Debug or "
@@ -1545,13 +1511,12 @@ def parse_args(args=None):
             default=False, help="Clobbers test results from previous runs."),
         optparse.make_option("--platform",
             help="Override the platform for expected results"),
+        optparse.make_option("--no-record-results", action="store_false",
+            default=True, dest="record_results",
+            help="Don't record the results."),
         # old-run-webkit-tests also has HTTP toggle options:
         # --[no-]http                     Run (or do not run) http tests
         #                                 (default: run)
-        # --[no-]wait-for-httpd           Wait for httpd if some other test
-        #                                 session is using it already (same
-        #                                 as WEBKIT_WAIT_FOR_HTTPD=1).
-        #                                 (default: 0)
     ]
 
     test_options = [
@@ -1562,6 +1527,9 @@ def parse_args(args=None):
         optparse.make_option("--no-build", dest="build",
             action="store_false", help="Don't check to see if the "
                                        "DumpRenderTree build is up-to-date."),
+        optparse.make_option("-n", "--dry-run", action="store_true",
+            default=False,
+            help="Do everything but actually run the tests or upload results."),
         # old-run-webkit-tests has --valgrind instead of wrapper.
         optparse.make_option("--wrapper",
             help="wrapper command to insert before invocations of "
@@ -1593,7 +1561,7 @@ def parse_args(args=None):
         #   Restart DumpRenderTree every n tests (default: 1000)
         optparse.make_option("--batch-size",
             help=("Run a the tests in batches (n), after every n tests, "
-                  "DumpRenderTree is relaunched.")),
+                  "DumpRenderTree is relaunched."), type="int", default=0),
         # old-run-webkit-tests calls --run-singly: -1|--singly
         # Isolate each test case run (implies --nthly 1 --verbose)
         optparse.make_option("--run-singly", action="store_true",
@@ -1601,6 +1569,9 @@ def parse_args(args=None):
         optparse.make_option("--child-processes",
             help="Number of DumpRenderTrees to run in parallel."),
         # FIXME: Display default number of child processes that will run.
+        optparse.make_option("--worker-model", action="store",
+            default="threads", help=("controls worker model. Valid values are "
+            "'inline' and 'threads' (default).")),
         optparse.make_option("--experimental-fully-parallel",
             action="store_true", default=False,
             help="run all tests in parallel"),
@@ -1612,7 +1583,7 @@ def parse_args(args=None):
         #      Number of times to run the set of tests (e.g. ABCABCABC)
         optparse.make_option("--print-last-failures", action="store_true",
             default=False, help="Print the tests in the last run that "
-            "had unexpected failures (or passes)."),
+            "had unexpected failures (or passes) and then exit."),
         optparse.make_option("--retest-last-failures", action="store_true",
             default=False, help="re-test the tests in the last run that "
             "had unexpected failures (or passes)."),
@@ -1632,6 +1603,7 @@ def parse_args(args=None):
 
     # FIXME: Move these into json_results_generator.py
     results_json_options = [
+        optparse.make_option("--master-name", help="The name of the buildbot master."),
         optparse.make_option("--builder-name", default="DUMMY_BUILDER_NAME",
             help=("The name of the builder shown on the waterfall running "
                   "this script e.g. WebKit.")),
@@ -1655,37 +1627,7 @@ def parse_args(args=None):
                    old_run_webkit_tests_compat)
     option_parser = optparse.OptionParser(option_list=option_list)
 
-    options, args = option_parser.parse_args(args)
-
-    return options, args
-
-
-def _find_thread_stack(id):
-    """Returns a stack object that can be used to dump a stack trace for
-    the given thread id (or None if the id is not found)."""
-    for thread_id, stack in sys._current_frames().items():
-        if thread_id == id:
-            return stack
-    return None
-
-
-def _log_stack(stack):
-    """Log a stack trace to log.error()."""
-    for filename, lineno, name, line in traceback.extract_stack(stack):
-        _log.error('File: "%s", line %d, in %s' % (filename, lineno, name))
-        if line:
-            _log.error('  %s' % line.strip())
-
-
-def _log_wedged_thread(thread):
-    """Log information about the given thread state."""
-    id = thread.id()
-    stack = _find_thread_stack(id)
-    assert(stack is not None)
-    _log.error("")
-    _log.error("thread %s (%d) is wedged" % (thread.getName(), id))
-    _log_stack(stack)
-    _log.error("")
+    return option_parser.parse_args(args)
 
 
 def main():
@@ -1693,6 +1635,7 @@ def main():
     port_obj = port.get(options.platform, options)
     return run(port_obj, options, args)
 
+
 if '__main__' == __name__:
     try:
         sys.exit(main())