startop: Rewrite app startup runner to use new python run_app_with_prefetch.

Test: pytest app_startup_runner_test.py
Test: pytest lib/args_utils_test.py
Test: pytest lib/data_frame_test.py
Test: ./app_startup_runner.py  --package com.google.android.music --readahead fadvise --readahead cold   --inodes textcache  --output output.txt -d -lc 3
Change-Id: Ide9abe4ff3d7179e6830a7866b0eb90cc67d8e40
Bug: 137216480
diff --git a/startop/scripts/app_startup/app_startup_runner.py b/startop/scripts/app_startup/app_startup_runner.py
index 9a608af..7cba780 100755
--- a/startop/scripts/app_startup/app_startup_runner.py
+++ b/startop/scripts/app_startup/app_startup_runner.py
@@ -27,28 +27,40 @@
 #
 
 import argparse
-import asyncio
 import csv
 import itertools
 import os
 import sys
 import tempfile
-import time
-from typing import Any, Callable, Dict, Generic, Iterable, List, NamedTuple, TextIO, Tuple, TypeVar, Optional, Union
+from typing import Any, Callable, Iterable, List, NamedTuple, TextIO, Tuple, \
+    TypeVar, Union
+
+# local import
+DIR = os.path.abspath(os.path.dirname(__file__))
+sys.path.append(os.path.dirname(DIR))
+import app_startup.run_app_with_prefetch as run_app_with_prefetch
+import app_startup.lib.args_utils as args_utils
+from app_startup.lib.data_frame import DataFrame
+import lib.cmd_utils as cmd_utils
+import lib.print_utils as print_utils
 
 # The following command line options participate in the combinatorial generation.
 # All other arguments have a global effect.
-_COMBINATORIAL_OPTIONS=['packages', 'readaheads', 'compiler_filters']
-_TRACING_READAHEADS=['mlock', 'fadvise']
-_FORWARD_OPTIONS={'loop_count': '--count'}
-_RUN_SCRIPT=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'run_app_with_prefetch')
+_COMBINATORIAL_OPTIONS = ['package', 'readahead', 'compiler_filter', 'activity']
+_TRACING_READAHEADS = ['mlock', 'fadvise']
+_FORWARD_OPTIONS = {'loop_count': '--count'}
+_RUN_SCRIPT = os.path.join(os.path.dirname(os.path.realpath(__file__)),
+                           'run_app_with_prefetch.py')
 
-RunCommandArgs = NamedTuple('RunCommandArgs', [('package', str), ('readahead', str), ('compiler_filter', Optional[str])])
-CollectorPackageInfo = NamedTuple('CollectorPackageInfo', [('package', str), ('compiler_filter', str)])
-_COLLECTOR_SCRIPT=os.path.join(os.path.dirname(os.path.realpath(__file__)), '../iorap/collector')
-_COLLECTOR_TIMEOUT_MULTIPLIER = 2 # take the regular --timeout and multiply by 2; systrace starts up slowly.
+CollectorPackageInfo = NamedTuple('CollectorPackageInfo',
+                                  [('package', str), ('compiler_filter', str)])
+_COLLECTOR_SCRIPT = os.path.join(os.path.dirname(os.path.realpath(__file__)),
+                                 '../iorap/collector')
+_COLLECTOR_TIMEOUT_MULTIPLIER = 10  # take the regular --timeout and multiply
+# by 2; systrace starts up slowly.
 
-_UNLOCK_SCREEN_SCRIPT=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'unlock_screen')
+_UNLOCK_SCREEN_SCRIPT = os.path.join(
+    os.path.dirname(os.path.realpath(__file__)), 'unlock_screen')
 
 # This must be the only mutable global variable. All other global variables are constants to avoid magic literals.
 _debug = False  # See -d/--debug flag.
@@ -56,105 +68,70 @@
 
 # Type hinting names.
 T = TypeVar('T')
-NamedTupleMeta = Callable[..., T]  # approximation of a (S : NamedTuple<T> where S() == T) metatype.
+NamedTupleMeta = Callable[
+    ..., T]  # approximation of a (S : NamedTuple<T> where S() == T) metatype.
 
 def parse_options(argv: List[str] = None):
   """Parse command line arguments and return an argparse Namespace object."""
-  parser = argparse.ArgumentParser(description="Run one or more Android applications under various settings in order to measure startup time.")
+  parser = argparse.ArgumentParser(description="Run one or more Android "
+                                               "applications under various "
+                                               "settings in order to measure "
+                                               "startup time.")
   # argparse considers args starting with - and -- optional in --help, even though required=True.
   # by using a named argument group --help will clearly say that it's required instead of optional.
   required_named = parser.add_argument_group('required named arguments')
-  required_named.add_argument('-p', '--package', action='append', dest='packages', help='package of the application', required=True)
-  required_named.add_argument('-r', '--readahead', action='append', dest='readaheads', help='which readahead mode to use', choices=('warm', 'cold', 'mlock', 'fadvise'), required=True)
+  required_named.add_argument('-p', '--package', action='append',
+                              dest='packages',
+                              help='package of the application', required=True)
+  required_named.add_argument('-r', '--readahead', action='append',
+                              dest='readaheads',
+                              help='which readahead mode to use',
+                              choices=('warm', 'cold', 'mlock', 'fadvise'),
+                              required=True)
 
   # optional arguments
   # use a group here to get the required arguments to appear 'above' the optional arguments in help.
   optional_named = parser.add_argument_group('optional named arguments')
-  optional_named.add_argument('-c', '--compiler-filter', action='append', dest='compiler_filters', help='which compiler filter to use. if omitted it does not enforce the app\'s compiler filter', choices=('speed', 'speed-profile', 'quicken'))
-  optional_named.add_argument('-s', '--simulate', dest='simulate', action='store_true', help='Print which commands will run, but don\'t run the apps')
-  optional_named.add_argument('-d', '--debug', dest='debug', action='store_true', help='Add extra debugging output')
-  optional_named.add_argument('-o', '--output', dest='output', action='store', help='Write CSV output to file.')
-  optional_named.add_argument('-t', '--timeout', dest='timeout', action='store', type=int, help='Timeout after this many seconds when executing a single run.')
-  optional_named.add_argument('-lc', '--loop-count', dest='loop_count', default=1, type=int, action='store', help='How many times to loop a single run.')
-  optional_named.add_argument('-in', '--inodes', dest='inodes', type=str, action='store', help='Path to inodes file (system/extras/pagecache/pagecache.py -d inodes)')
+  optional_named.add_argument('-c', '--compiler-filter', action='append',
+                              dest='compiler_filters',
+                              help='which compiler filter to use. if omitted it does not enforce the app\'s compiler filter',
+                              choices=('speed', 'speed-profile', 'quicken'))
+  optional_named.add_argument('-s', '--simulate', dest='simulate',
+                              action='store_true',
+                              help='Print which commands will run, but don\'t run the apps')
+  optional_named.add_argument('-d', '--debug', dest='debug',
+                              action='store_true',
+                              help='Add extra debugging output')
+  optional_named.add_argument('-o', '--output', dest='output', action='store',
+                              help='Write CSV output to file.')
+  optional_named.add_argument('-t', '--timeout', dest='timeout', action='store',
+                              type=int, default=10,
+                              help='Timeout after this many seconds when executing a single run.')
+  optional_named.add_argument('-lc', '--loop-count', dest='loop_count',
+                              default=1, type=int, action='store',
+                              help='How many times to loop a single run.')
+  optional_named.add_argument('-in', '--inodes', dest='inodes', type=str,
+                              action='store',
+                              help='Path to inodes file (system/extras/pagecache/pagecache.py -d inodes)')
 
   return parser.parse_args(argv)
 
-# TODO: refactor this with a common library file with analyze_metrics.py
-def _debug_print(*args, **kwargs):
-  """Print the args to sys.stderr if the --debug/-d flag was passed in."""
+def make_script_command_with_temp_output(script: str,
+                                         args: List[str],
+                                         **kwargs) -> Tuple[str, TextIO]:
+  """
+  Create a command to run a script given the args.
+  Appends --count <loop_count> --output <tmp-file-name>.
+  Returns a tuple (cmd, tmp_file)
+  """
+  tmp_output_file = tempfile.NamedTemporaryFile(mode='r')
+  cmd = [script] + args
+  for key, value in kwargs.items():
+    cmd += ['--%s' % (key), "%s" % (value)]
   if _debug:
-    print(*args, **kwargs, file=sys.stderr)
-
-def _expand_gen_repr(args):
-  """Like repr but any generator-like object has its iterator consumed
-  and then called repr on."""
-  new_args_list = []
-  for i in args:
-    # detect iterable objects that do not have their own override of __str__
-    if hasattr(i, '__iter__'):
-      to_str = getattr(i, '__str__')
-      if to_str.__objclass__ == object:
-        # the repr for a generator is just type+address, expand it out instead.
-        new_args_list.append([_expand_gen_repr([j])[0] for j in i])
-        continue
-    # normal case: uses the built-in to-string
-    new_args_list.append(i)
-  return new_args_list
-
-def _debug_print_gen(*args, **kwargs):
-  """Like _debug_print but will turn any iterable args into a list."""
-  if not _debug:
-    return
-
-  new_args_list = _expand_gen_repr(args)
-  _debug_print(*new_args_list, **kwargs)
-
-def _debug_print_nd(*args, **kwargs):
-  """Like _debug_print but will turn any NamedTuple-type args into a string."""
-  if not _debug:
-    return
-
-  new_args_list = []
-  for i in args:
-    if hasattr(i, '_field_types'):
-      new_args_list.append("%s: %s" %(i.__name__, i._field_types))
-    else:
-      new_args_list.append(i)
-
-  _debug_print(*new_args_list, **kwargs)
-
-def dict_lookup_any_key(dictionary: dict, *keys: List[Any]):
-  for k in keys:
-    if k in dictionary:
-      return dictionary[k]
-  raise KeyError("None of the keys %s were in the dictionary" %(keys))
-
-def generate_run_combinations(named_tuple: NamedTupleMeta[T], opts_dict: Dict[str, List[Optional[str]]])\
-    -> Iterable[T]:
-  """
-  Create all possible combinations given the values in opts_dict[named_tuple._fields].
-
-  :type T: type annotation for the named_tuple type.
-  :param named_tuple: named tuple type, whose fields are used to make combinations for
-  :param opts_dict: dictionary of keys to value list. keys correspond to the named_tuple fields.
-  :return: an iterable over named_tuple instances.
-  """
-  combinations_list = []
-  for k in named_tuple._fields:
-    # the key can be either singular or plural , e.g. 'package' or 'packages'
-    val = dict_lookup_any_key(opts_dict, k, k + "s")
-
-    # treat {'x': None} key value pairs as if it was [None]
-    # otherwise itertools.product throws an exception about not being able to iterate None.
-    combinations_list.append(val or [None])
-
-  _debug_print("opts_dict: ", opts_dict)
-  _debug_print_nd("named_tuple: ", named_tuple)
-  _debug_print("combinations_list: ", combinations_list)
-
-  for combo in itertools.product(*combinations_list):
-    yield named_tuple(*combo)
+    cmd += ['--verbose']
+  cmd = cmd + ["--output", tmp_output_file.name]
+  return cmd, tmp_output_file
 
 def key_to_cmdline_flag(key: str) -> str:
   """Convert key into a command line flag, e.g. 'foo-bars' -> '--foo-bar' """
@@ -176,230 +153,26 @@
     args.append(value)
   return args
 
-def generate_group_run_combinations(run_combinations: Iterable[NamedTuple], dst_nt: NamedTupleMeta[T])\
-    -> Iterable[Tuple[T, Iterable[NamedTuple]]]:
+def run_collector_script(collector_info: CollectorPackageInfo,
+                         inodes_path: str,
+                         timeout: int,
+                         simulate: bool) -> Tuple[bool, TextIO]:
+  """Run collector to collect prefetching trace. """
+  # collector_args = ["--package", package_name]
+  collector_args = as_run_command(collector_info)
+  # TODO: forward --wait_time for how long systrace runs?
+  # TODO: forward --trace_buffer_size for size of systrace buffer size?
+  collector_cmd, collector_tmp_output_file = make_script_command_with_temp_output(
+      _COLLECTOR_SCRIPT, collector_args, inodes=inodes_path)
 
-  def group_by_keys(src_nt):
-    src_d = src_nt._asdict()
-    # now remove the keys that aren't legal in dst.
-    for illegal_key in set(src_d.keys()) - set(dst_nt._fields):
-      if illegal_key in src_d:
-        del src_d[illegal_key]
+  collector_timeout = timeout and _COLLECTOR_TIMEOUT_MULTIPLIER * timeout
+  (collector_passed, collector_script_output) = \
+    cmd_utils.execute_arbitrary_command(collector_cmd,
+                                        collector_timeout,
+                                        shell=False,
+                                        simulate=simulate)
 
-    return dst_nt(**src_d)
-
-  for args_list_it in itertools.groupby(run_combinations, group_by_keys):
-    (group_key_value, args_it) = args_list_it
-    yield (group_key_value, args_it)
-
-class DataFrame:
-  """Table-like class for storing a 2D cells table with named columns."""
-  def __init__(self, data: Dict[str, List[object]] = {}):
-    """
-    Create a new DataFrame from a dictionary (keys = headers,
-    values = columns).
-    """
-    self._headers = [i for i in data.keys()]
-    self._rows = []
-
-    row_num = 0
-
-    def get_data_row(idx):
-      r = {}
-      for header, header_data in data.items():
-
-        if not len(header_data) > idx:
-          continue
-
-        r[header] = header_data[idx]
-
-      return r
-
-    while True:
-      row_dict = get_data_row(row_num)
-      if len(row_dict) == 0:
-        break
-
-      self._append_row(row_dict.keys(), row_dict.values())
-      row_num = row_num + 1
-
-  def concat_rows(self, other: 'DataFrame') -> None:
-    """
-    In-place concatenate rows of other into the rows of the
-    current DataFrame.
-
-    None is added in pre-existing cells if new headers
-    are introduced.
-    """
-    other_datas = other._data_only()
-
-    other_headers = other.headers
-
-    for d in other_datas:
-      self._append_row(other_headers, d)
-
-  def _append_row(self, headers: List[str], data: List[object]):
-    new_row = {k:v for k,v in zip(headers, data)}
-    self._rows.append(new_row)
-
-    for header in headers:
-      if not header in self._headers:
-        self._headers.append(header)
-
-  def __repr__(self):
-#     return repr(self._rows)
-    repr = ""
-
-    header_list = self._headers_only()
-
-    row_format = u""
-    for header in header_list:
-      row_format = row_format + u"{:>%d}" %(len(header) + 1)
-
-    repr = row_format.format(*header_list) + "\n"
-
-    for v in self._data_only():
-      repr = repr + row_format.format(*v) + "\n"
-
-    return repr
-
-  def __eq__(self, other):
-    if isinstance(other, self.__class__):
-      return self.headers == other.headers and self.data_table == other.data_table
-    else:
-      print("wrong instance", other.__class__)
-      return False
-
-  @property
-  def headers(self) -> List[str]:
-    return [i for i in self._headers_only()]
-
-  @property
-  def data_table(self) -> List[List[object]]:
-    return list(self._data_only())
-
-  @property
-  def data_table_transposed(self) -> List[List[object]]:
-    return list(self._transposed_data())
-
-  @property
-  def data_row_len(self) -> int:
-    return len(self._rows)
-
-  def data_row_at(self, idx) -> List[object]:
-    """
-    Return a single data row at the specified index (0th based).
-
-    Accepts negative indices, e.g. -1 is last row.
-    """
-    row_dict = self._rows[idx]
-    l = []
-
-    for h in self._headers_only():
-      l.append(row_dict.get(h)) # Adds None in blank spots.
-
-    return l
-
-  def copy(self) -> 'DataFrame':
-    """
-    Shallow copy of this DataFrame.
-    """
-    return self.repeat(count=0)
-
-  def repeat(self, count: int) -> 'DataFrame':
-    """
-    Returns a new DataFrame where each row of this dataframe is repeated count times.
-    A repeat of a row is adjacent to other repeats of that same row.
-    """
-    df = DataFrame()
-    df._headers = self._headers.copy()
-
-    rows = []
-    for row in self._rows:
-      for i in range(count):
-        rows.append(row.copy())
-
-    df._rows = rows
-
-    return df
-
-  def merge_data_columns(self, other: 'DataFrame'):
-    """
-    Merge self and another DataFrame by adding the data from other column-wise.
-    For any headers that are the same, data from 'other' is preferred.
-    """
-    for h in other._headers:
-      if not h in self._headers:
-        self._headers.append(h)
-
-    append_rows = []
-
-    for self_dict, other_dict in itertools.zip_longest(self._rows, other._rows):
-      if not self_dict:
-        d = {}
-        append_rows.append(d)
-      else:
-        d = self_dict
-
-      d_other = other_dict
-      if d_other:
-        for k,v in d_other.items():
-          d[k] = v
-
-    for r in append_rows:
-      self._rows.append(r)
-
-  def data_row_reduce(self, fnc) -> 'DataFrame':
-    """
-    Reduces the data row-wise by applying the fnc to each row (column-wise).
-    Empty cells are skipped.
-
-    fnc(Iterable[object]) -> object
-    fnc is applied over every non-empty cell in that column (descending row-wise).
-
-    Example:
-      DataFrame({'a':[1,2,3]}).data_row_reduce(sum) == DataFrame({'a':[6]})
-
-    Returns a new single-row DataFrame.
-    """
-    df = DataFrame()
-    df._headers = self._headers.copy()
-
-    def yield_by_column(header_key):
-      for row_dict in self._rows:
-        val = row_dict.get(header_key)
-        if val:
-          yield val
-
-    new_row_dict = {}
-    for h in df._headers:
-      cell_value = fnc(yield_by_column(h))
-      new_row_dict[h] = cell_value
-
-    df._rows = [new_row_dict]
-    return df
-
-  def _headers_only(self):
-    return self._headers
-
-  def _data_only(self):
-    row_len = len(self._rows)
-
-    for i in range(row_len):
-      yield self.data_row_at(i)
-
-  def _transposed_data(self):
-    return zip(*self._data_only())
-
-def parse_run_script_csv_file_flat(csv_file: TextIO) -> List[int]:
-  """Parse a CSV file full of integers into a flat int list."""
-  csv_reader = csv.reader(csv_file)
-  arr = []
-  for row in csv_reader:
-    for i in row:
-      if i:
-        arr.append(int(i))
-  return arr
+  return collector_passed, collector_tmp_output_file
 
 def parse_run_script_csv_file(csv_file: TextIO) -> DataFrame:
   """Parse a CSV file full of integers into a DataFrame."""
@@ -433,160 +206,52 @@
 
   return DataFrame(d)
 
-def make_script_command_with_temp_output(script: str, args: List[str], **kwargs)\
-    -> Tuple[str, TextIO]:
-  """
-  Create a command to run a script given the args.
-  Appends --count <loop_count> --output <tmp-file-name>.
-  Returns a tuple (cmd, tmp_file)
-  """
-  tmp_output_file = tempfile.NamedTemporaryFile(mode='r')
-  cmd = [script] + args
-  for key, value in kwargs.items():
-    cmd += ['--%s' %(key), "%s" %(value)]
-  if _debug:
-    cmd += ['--verbose']
-  cmd = cmd + ["--output", tmp_output_file.name]
-  return cmd, tmp_output_file
-
-async def _run_command(*args : List[str], timeout: Optional[int] = None) -> Tuple[int, bytes]:
-  # start child process
-  # NOTE: universal_newlines parameter is not supported
-  process = await asyncio.create_subprocess_exec(*args,
-      stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.STDOUT)
-
-  script_output = b""
-
-  _debug_print("[PID]", process.pid)
-
-#hack
-#  stdout, stderr = await process.communicate()
-#  return (process.returncode, stdout)
-
-  timeout_remaining = timeout
-  time_started = time.time()
-
-  # read line (sequence of bytes ending with b'\n') asynchronously
-  while True:
-    try:
-      line = await asyncio.wait_for(process.stdout.readline(), timeout_remaining)
-      _debug_print("[STDOUT]", line)
-      script_output += line
-
-      if timeout_remaining:
-        time_elapsed = time.time() - time_started
-        timeout_remaining = timeout - time_elapsed
-    except asyncio.TimeoutError:
-      _debug_print("[TIMEDOUT] Process ", process.pid)
-
-#      if process.returncode is not None:
-#        #_debug_print("[WTF] can-write-eof?", process.stdout.can_write_eof())
-#
-#        _debug_print("[TIMEDOUT] Process already terminated")
-#        (remaining_stdout, remaining_stderr) = await process.communicate()
-#        script_output += remaining_stdout
-#
-#        code = await process.wait()
-#        return (code, script_output)
-
-      _debug_print("[TIMEDOUT] Sending SIGTERM.")
-      process.terminate()
-
-      # 1 second timeout for process to handle SIGTERM nicely.
-      try:
-       (remaining_stdout, remaining_stderr) = await asyncio.wait_for(process.communicate(), 5)
-       script_output += remaining_stdout
-      except asyncio.TimeoutError:
-        _debug_print("[TIMEDOUT] Sending SIGKILL.")
-        process.kill()
-
-      # 1 second timeout to finish with SIGKILL.
-      try:
-        (remaining_stdout, remaining_stderr) = await asyncio.wait_for(process.communicate(), 5)
-        script_output += remaining_stdout
-      except asyncio.TimeoutError:
-        # give up, this will leave a zombie process.
-        _debug_print("[TIMEDOUT] SIGKILL failed for process ", process.pid)
-        time.sleep(100)
-        #await process.wait()
-
-      return (-1, script_output)
-    else:
-      if not line: # EOF
-        break
-
-      #if process.returncode is not None:
-      #  _debug_print("[WTF] can-write-eof?", process.stdout.can_write_eof())
-      #  process.stdout.write_eof()
-
-      #if process.stdout.at_eof():
-      #  break
-
-  code = await process.wait() # wait for child process to exit
-  return (code, script_output)
-
-def execute_arbitrary_command(cmd: List[str], simulate: bool, timeout: Optional[int]) -> Tuple[bool, str]:
-  if simulate:
-    print(" ".join(cmd))
-    return (True, "")
-  else:
-    _debug_print("[EXECUTE]", cmd)
-
-    # block until either command finishes or the timeout occurs.
-    loop = asyncio.get_event_loop()
-    (return_code, script_output) = loop.run_until_complete(_run_command(*cmd, timeout=timeout))
-
-    script_output = script_output.decode() # convert bytes to str
-
-    passed = (return_code == 0)
-    _debug_print("[$?]", return_code)
-    if not passed:
-      print("[FAILED, code:%s]" %(return_code), script_output, file=sys.stderr)
-
-    return (passed, script_output)
-
-def execute_run_combos(grouped_run_combos: Iterable[Tuple[CollectorPackageInfo, Iterable[RunCommandArgs]]], simulate: bool, inodes_path: str, timeout: int, loop_count: int, need_trace: bool):
+def execute_run_combos(
+    grouped_run_combos: Iterable[Tuple[CollectorPackageInfo, Iterable[
+      run_app_with_prefetch.RunCommandArgs]]],
+    simulate: bool,
+    inodes_path: str,
+    timeout: int):
   # nothing will work if the screen isn't unlocked first.
-  execute_arbitrary_command([_UNLOCK_SCREEN_SCRIPT], simulate, timeout)
+  cmd_utils.execute_arbitrary_command([_UNLOCK_SCREEN_SCRIPT],
+                                      timeout,
+                                      simulate=simulate,
+                                      shell=False)
 
   for collector_info, run_combos in grouped_run_combos:
-    #collector_args = ["--package", package_name]
-    collector_args = as_run_command(collector_info)
-    # TODO: forward --wait_time for how long systrace runs?
-    # TODO: forward --trace_buffer_size for size of systrace buffer size?
-    collector_cmd, collector_tmp_output_file = make_script_command_with_temp_output(_COLLECTOR_SCRIPT, collector_args, inodes=inodes_path)
+    for combos in run_combos:
+      args = as_run_command(combos)
+      if combos.readahead in _TRACING_READAHEADS:
+        passed, collector_tmp_output_file = run_collector_script(collector_info,
+                                                                 inodes_path,
+                                                                 timeout,
+                                                                 simulate)
+        combos = combos._replace(input=collector_tmp_output_file.name)
 
-    with collector_tmp_output_file:
-      collector_passed = True
-      if need_trace:
-        collector_timeout = timeout and _COLLECTOR_TIMEOUT_MULTIPLIER * timeout
-        (collector_passed, collector_script_output) = execute_arbitrary_command(collector_cmd, simulate, collector_timeout)
-        # TODO: consider to print a ; collector wrote file to <...> into the CSV file so we know it was ran.
+      print_utils.debug_print(combos)
+      output = run_app_with_prefetch.run_test(combos)
 
-      for combos in run_combos:
-        args = as_run_command(combos)
+      yield DataFrame(dict((x, [y]) for x, y in output)) if output else None
 
-        cmd, tmp_output_file = make_script_command_with_temp_output(_RUN_SCRIPT, args, count=loop_count, input=collector_tmp_output_file.name)
-        with tmp_output_file:
-          (passed, script_output) = execute_arbitrary_command(cmd, simulate, timeout)
-          parsed_output = simulate and DataFrame({'fake_ms':[1,2,3]}) or parse_run_script_csv_file(tmp_output_file)
-          yield (passed, script_output, parsed_output)
-
-def gather_results(commands: Iterable[Tuple[bool, str, DataFrame]], key_list: List[str], value_list: List[Tuple[str, ...]]):
-  _debug_print("gather_results: key_list = ", key_list)
-#  yield key_list + ["time(ms)"]
-
+def gather_results(commands: Iterable[Tuple[DataFrame]],
+                   key_list: List[str], value_list: List[Tuple[str, ...]]):
+  print_utils.debug_print("gather_results: key_list = ", key_list)
   stringify_none = lambda s: s is None and "<none>" or s
+  #  yield key_list + ["time(ms)"]
+  for (run_result_list, values) in itertools.zip_longest(commands, value_list):
+    print_utils.debug_print("run_result_list = ", run_result_list)
+    print_utils.debug_print("values = ", values)
 
-  for ((passed, script_output, run_result_list), values) in itertools.zip_longest(commands, value_list):
-    _debug_print("run_result_list = ", run_result_list)
-    _debug_print("values = ", values)
-    if not passed:
+    if not run_result_list:
       continue
 
     # RunCommandArgs(package='com.whatever', readahead='warm', compiler_filter=None)
     # -> {'package':['com.whatever'], 'readahead':['warm'], 'compiler_filter':[None]}
-    values_dict = {k:[v] for k,v in values._asdict().items()}
+    values_dict = {}
+    for k, v in values._asdict().items():
+      if not k in key_list:
+        continue
+      values_dict[k] = [stringify_none(v)]
 
     values_df = DataFrame(values_dict)
     # project 'values_df' to be same number of rows as run_result_list.
@@ -598,7 +263,6 @@
     yield values_df
 
 def eval_and_save_to_csv(output, annotated_result_values):
-
   printed_header = False
 
   csv_writer = csv.writer(output)
@@ -610,9 +274,22 @@
       # TODO: what about when headers change?
 
     for data_row in row.data_table:
+      data_row = [d for d in data_row]
       csv_writer.writerow(data_row)
 
-    output.flush() # see the output live.
+    output.flush()  # see the output live.
+
+def coerce_to_list(opts: dict):
+  """Tranform values of the dictionary to list.
+  For example:
+  1 -> [1], None -> [None], [1,2,3] -> [1,2,3]
+  [[1],[2]] -> [[1],[2]], {1:1, 2:2} -> [{1:1, 2:2}]
+  """
+  result = {}
+  for key in opts:
+    val = opts[key]
+    result[key] = val if issubclass(type(val), list) else [val]
+  return result
 
 def main():
   global _debug
@@ -621,26 +298,34 @@
   _debug = opts.debug
   if _DEBUG_FORCE is not None:
     _debug = _DEBUG_FORCE
-  _debug_print("parsed options: ", opts)
-  need_trace = not not set(opts.readaheads).intersection(set(_TRACING_READAHEADS))
-  if need_trace and not opts.inodes:
-    print("Error: Missing -in/--inodes, required when using a readahead of %s" %(_TRACING_READAHEADS), file=sys.stderr)
-    return 1
+
+  print_utils.DEBUG = _debug
+  cmd_utils.SIMULATE = opts.simulate
+
+  print_utils.debug_print("parsed options: ", opts)
 
   output_file = opts.output and open(opts.output, 'w') or sys.stdout
 
-  combos = lambda: generate_run_combinations(RunCommandArgs, vars(opts))
-  _debug_print_gen("run combinations: ", combos())
+  combos = lambda: args_utils.generate_run_combinations(
+      run_app_with_prefetch.RunCommandArgs,
+      coerce_to_list(vars(opts)),
+      opts.loop_count)
+  print_utils.debug_print_gen("run combinations: ", combos())
 
-  grouped_combos = lambda: generate_group_run_combinations(combos(), CollectorPackageInfo)
-  _debug_print_gen("grouped run combinations: ", grouped_combos())
+  grouped_combos = lambda: args_utils.generate_group_run_combinations(combos(),
+                                                                      CollectorPackageInfo)
 
-  exec = execute_run_combos(grouped_combos(), opts.simulate, opts.inodes, opts.timeout, opts.loop_count, need_trace)
+  print_utils.debug_print_gen("grouped run combinations: ", grouped_combos())
+  exec = execute_run_combos(grouped_combos(),
+                            opts.simulate,
+                            opts.inodes,
+                            opts.timeout)
+
   results = gather_results(exec, _COMBINATORIAL_OPTIONS, combos())
+
   eval_and_save_to_csv(output_file, results)
 
-  return 0
-
+  return 1
 
 if __name__ == '__main__':
   sys.exit(main())
diff --git a/startop/scripts/app_startup/app_startup_runner_test.py b/startop/scripts/app_startup/app_startup_runner_test.py
index fd81667..9aa7014 100755
--- a/startop/scripts/app_startup/app_startup_runner_test.py
+++ b/startop/scripts/app_startup/app_startup_runner_test.py
@@ -31,18 +31,17 @@
 See also https://docs.pytest.org/en/latest/usage.html
 """
 
-# global imports
-from contextlib import contextmanager
 import io
 import shlex
 import sys
 import typing
-
-# pip imports
-import pytest
+# global imports
+from contextlib import contextmanager
 
 # local imports
 import app_startup_runner as asr
+# pip imports
+import pytest
 
 #
 # Argument Parsing Helpers
@@ -91,7 +90,8 @@
   """
   # Combine it with all of the "optional" parameters' default values.
   """
-  d = {'compiler_filters': None, 'simulate': False, 'debug': False, 'output': None, 'timeout': None, 'loop_count': 1, 'inodes': None}
+  d = {'compiler_filters': None, 'simulate': False, 'debug': False,
+       'output': None, 'timeout': 10, 'loop_count': 1, 'inodes': None}
   d.update(kwargs)
   return d
 
@@ -111,7 +111,7 @@
   in default_mock_dict_for_parsed_args.
   """
   req = "--package com.fake.package --readahead warm"
-  return parse_args("%s %s" %(req, str))
+  return parse_args("%s %s" % (req, str))
 
 def test_argparse():
   # missing arguments
@@ -124,15 +124,22 @@
   # required arguments are parsed correctly
   ad = default_dict_for_parsed_args  # assert dict
 
-  assert parse_args("--package xyz --readahead warm") == ad(packages=['xyz'], readaheads=['warm'])
-  assert parse_args("-p xyz -r warm") == ad(packages=['xyz'], readaheads=['warm'])
+  assert parse_args("--package xyz --readahead warm") == ad(packages=['xyz'],
+                                                            readaheads=['warm'])
+  assert parse_args("-p xyz -r warm") == ad(packages=['xyz'],
+                                            readaheads=['warm'])
 
-  assert parse_args("-p xyz -r warm -s") == ad(packages=['xyz'], readaheads=['warm'], simulate=True)
-  assert parse_args("-p xyz -r warm --simulate") == ad(packages=['xyz'], readaheads=['warm'], simulate=True)
+  assert parse_args("-p xyz -r warm -s") == ad(packages=['xyz'],
+                                               readaheads=['warm'],
+                                               simulate=True)
+  assert parse_args("-p xyz -r warm --simulate") == ad(packages=['xyz'],
+                                                       readaheads=['warm'],
+                                                       simulate=True)
 
   # optional arguments are parsed correctly.
   mad = default_mock_dict_for_parsed_args  # mock assert dict
-  assert parse_optional_args("--output filename.csv") == mad(output='filename.csv')
+  assert parse_optional_args("--output filename.csv") == mad(
+    output='filename.csv')
   assert parse_optional_args("-o filename.csv") == mad(output='filename.csv')
 
   assert parse_optional_args("--timeout 123") == mad(timeout=123)
@@ -145,36 +152,6 @@
   assert parse_optional_args("-in baz") == mad(inodes="baz")
 
 
-def generate_run_combinations(*args):
-  # expand out the generator values so that assert x == y works properly.
-  return [i for i in asr.generate_run_combinations(*args)]
-
-def test_generate_run_combinations():
-  blank_nd = typing.NamedTuple('Blank')
-  assert generate_run_combinations(blank_nd, {}) == [()], "empty"
-  assert generate_run_combinations(blank_nd, {'a' : ['a1', 'a2']}) == [()], "empty filter"
-  a_nd = typing.NamedTuple('A', [('a', str)])
-  assert generate_run_combinations(a_nd, {'a': None}) == [(None,)], "None"
-  assert generate_run_combinations(a_nd, {'a': ['a1', 'a2']}) == [('a1',), ('a2',)], "one item"
-  assert generate_run_combinations(a_nd,
-                                   {'a' : ['a1', 'a2'], 'b': ['b1', 'b2']}) == [('a1',), ('a2',)],\
-      "one item filter"
-  ab_nd = typing.NamedTuple('AB', [('a', str), ('b', str)])
-  assert generate_run_combinations(ab_nd,
-                                   {'a': ['a1', 'a2'],
-                                    'b': ['b1', 'b2']}) == [ab_nd('a1', 'b1'),
-                                                            ab_nd('a1', 'b2'),
-                                                            ab_nd('a2', 'b1'),
-                                                            ab_nd('a2', 'b2')],\
-      "two items"
-
-  assert generate_run_combinations(ab_nd,
-                                   {'as': ['a1', 'a2'],
-                                    'bs': ['b1', 'b2']}) == [ab_nd('a1', 'b1'),
-                                                             ab_nd('a1', 'b2'),
-                                                             ab_nd('a2', 'b1'),
-                                                             ab_nd('a2', 'b2')],\
-      "two items plural"
 
 def test_key_to_cmdline_flag():
   assert asr.key_to_cmdline_flag("abc") == "--abc"
@@ -182,138 +159,18 @@
   assert asr.key_to_cmdline_flag("ba_r") == "--ba-r"
   assert asr.key_to_cmdline_flag("ba_zs") == "--ba-z"
 
-
 def test_make_script_command_with_temp_output():
-  cmd_str, tmp_file = asr.make_script_command_with_temp_output("fake_script", args=[], count=1)
+  cmd_str, tmp_file = asr.make_script_command_with_temp_output("fake_script",
+                                                               args=[], count=1)
   with tmp_file:
     assert cmd_str == ["fake_script", "--count", "1", "--output", tmp_file.name]
 
-  cmd_str, tmp_file = asr.make_script_command_with_temp_output("fake_script", args=['a', 'b'], count=2)
+  cmd_str, tmp_file = asr.make_script_command_with_temp_output("fake_script",
+                                                               args=['a', 'b'],
+                                                               count=2)
   with tmp_file:
-    assert cmd_str == ["fake_script", "a", "b", "--count", "2", "--output", tmp_file.name]
-
-def test_parse_run_script_csv_file_flat():
-  # empty file -> empty list
-  f = io.StringIO("")
-  assert asr.parse_run_script_csv_file_flat(f) == []
-
-  # common case
-  f = io.StringIO("1,2,3")
-  assert asr.parse_run_script_csv_file_flat(f) == [1,2,3]
-
-  # ignore trailing comma
-  f = io.StringIO("1,2,3,4,5,")
-  assert asr.parse_run_script_csv_file_flat(f) == [1,2,3,4,5]
-
-def test_data_frame():
-  # trivial empty data frame
-  df = asr.DataFrame()
-  assert df.headers == []
-  assert df.data_table == []
-  assert df.data_table_transposed == []
-
-  # common case, same number of values in each place.
-  df = asr.DataFrame({'TotalTime_ms':[1,2,3], 'Displayed_ms':[4,5,6]})
-  assert df.headers == ['TotalTime_ms', 'Displayed_ms']
-  assert df.data_table == [[1, 4], [2, 5], [3, 6]]
-  assert df.data_table_transposed == [(1, 2, 3), (4, 5, 6)]
-
-  # varying num values.
-  df = asr.DataFrame({'many':[1,2], 'none': []})
-  assert df.headers == ['many', 'none']
-  assert df.data_table == [[1, None], [2, None]]
-  assert df.data_table_transposed == [(1, 2), (None, None)]
-
-  df = asr.DataFrame({'many':[], 'none': [1,2]})
-  assert df.headers == ['many', 'none']
-  assert df.data_table == [[None, 1], [None, 2]]
-  assert df.data_table_transposed == [(None, None), (1, 2)]
-
-  # merge multiple data frames
-  df = asr.DataFrame()
-  df.concat_rows(asr.DataFrame())
-  assert df.headers == []
-  assert df.data_table == []
-  assert df.data_table_transposed == []
-
-  df = asr.DataFrame()
-  df2 = asr.DataFrame({'TotalTime_ms':[1,2,3], 'Displayed_ms':[4,5,6]})
-
-  df.concat_rows(df2)
-  assert df.headers == ['TotalTime_ms', 'Displayed_ms']
-  assert df.data_table == [[1, 4], [2, 5], [3, 6]]
-  assert df.data_table_transposed == [(1, 2, 3), (4, 5, 6)]
-
-  df = asr.DataFrame({'TotalTime_ms':[1,2]})
-  df2 = asr.DataFrame({'Displayed_ms':[4,5]})
-
-  df.concat_rows(df2)
-  assert df.headers == ['TotalTime_ms', 'Displayed_ms']
-  assert df.data_table == [[1, None], [2, None], [None, 4], [None, 5]]
-
-  df = asr.DataFrame({'TotalTime_ms':[1,2]})
-  df2 = asr.DataFrame({'TotalTime_ms': [3, 4], 'Displayed_ms':[5, 6]})
-
-  df.concat_rows(df2)
-  assert df.headers == ['TotalTime_ms', 'Displayed_ms']
-  assert df.data_table == [[1, None], [2, None], [3, 5], [4, 6]]
-
-  # data_row_at
-  df = asr.DataFrame({'TotalTime_ms':[1,2,3], 'Displayed_ms':[4,5,6]})
-  assert df.data_row_at(-1) == [3,6]
-  assert df.data_row_at(2) == [3,6]
-  assert df.data_row_at(1) == [2,5]
-
-  # repeat
-  df = asr.DataFrame({'TotalTime_ms':[1], 'Displayed_ms':[4]})
-  df2 = asr.DataFrame({'TotalTime_ms':[1,1,1], 'Displayed_ms':[4,4,4]})
-  assert df.repeat(3) == df2
-
-  # repeat
-  df = asr.DataFrame({'TotalTime_ms':[1,1,1], 'Displayed_ms':[4,4,4]})
-  assert df.data_row_len == 3
-  df = asr.DataFrame({'TotalTime_ms':[1,1]})
-  assert df.data_row_len == 2
-
-  # repeat
-  df = asr.DataFrame({'TotalTime_ms':[1,1,1], 'Displayed_ms':[4,4,4]})
-  assert df.data_row_len == 3
-  df = asr.DataFrame({'TotalTime_ms':[1,1]})
-  assert df.data_row_len == 2
-
-  # data_row_reduce
-  df = asr.DataFrame({'TotalTime_ms':[1,1,1], 'Displayed_ms':[4,4,4]})
-  df_sum = asr.DataFrame({'TotalTime_ms':[3], 'Displayed_ms':[12]})
-  assert df.data_row_reduce(sum) == df_sum
-
-  # merge_data_columns
-  df = asr.DataFrame({'TotalTime_ms':[1,2,3]})
-  df2 = asr.DataFrame({'Displayed_ms':[3,4,5,6]})
-
-  df.merge_data_columns(df2)
-  assert df == asr.DataFrame({'TotalTime_ms':[1,2,3], 'Displayed_ms':[3,4,5,6]})
-
-  df = asr.DataFrame({'TotalTime_ms':[1,2,3]})
-  df2 = asr.DataFrame({'Displayed_ms':[3,4]})
-
-  df.merge_data_columns(df2)
-  assert df == asr.DataFrame({'TotalTime_ms':[1,2,3], 'Displayed_ms':[3,4]})
-
-  df = asr.DataFrame({'TotalTime_ms':[1,2,3]})
-  df2 = asr.DataFrame({'TotalTime_ms':[10,11]})
-
-  df.merge_data_columns(df2)
-  assert df == asr.DataFrame({'TotalTime_ms':[10,11,3]})
-
-  df = asr.DataFrame({'TotalTime_ms':[]})
-  df2 = asr.DataFrame({'TotalTime_ms':[10,11]})
-
-  df.merge_data_columns(df2)
-  assert df == asr.DataFrame({'TotalTime_ms':[10,11]})
-
-
-
-
+    assert cmd_str == ["fake_script", "a", "b", "--count", "2", "--output",
+                       tmp_file.name]
 
 def test_parse_run_script_csv_file():
   # empty file -> empty list
diff --git a/startop/scripts/app_startup/lib/args_utils.py b/startop/scripts/app_startup/lib/args_utils.py
new file mode 100644
index 0000000..080f3b5
--- /dev/null
+++ b/startop/scripts/app_startup/lib/args_utils.py
@@ -0,0 +1,77 @@
+import itertools
+import os
+import sys
+from typing import Any, Callable, Dict, Iterable, List, NamedTuple, Tuple, \
+    TypeVar, Optional
+
+# local import
+sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(
+    os.path.abspath(__file__)))))
+import lib.print_utils as print_utils
+
+T = TypeVar('T')
+NamedTupleMeta = Callable[
+    ..., T]  # approximation of a (S : NamedTuple<T> where S() == T) metatype.
+FilterFuncType = Callable[[NamedTuple], bool]
+
+def dict_lookup_any_key(dictionary: dict, *keys: List[Any]):
+  for k in keys:
+    if k in dictionary:
+      return dictionary[k]
+
+
+  print_utils.debug_print("None of the keys {} were in the dictionary".format(
+      keys))
+  return [None]
+
+def generate_run_combinations(named_tuple: NamedTupleMeta[T],
+                              opts_dict: Dict[str, List[Optional[object]]],
+                              loop_count: int = 1) -> Iterable[T]:
+  """
+  Create all possible combinations given the values in opts_dict[named_tuple._fields].
+
+  :type T: type annotation for the named_tuple type.
+  :param named_tuple: named tuple type, whose fields are used to make combinations for
+  :param opts_dict: dictionary of keys to value list. keys correspond to the named_tuple fields.
+  :param loop_count: number of repetitions.
+  :return: an iterable over named_tuple instances.
+  """
+  combinations_list = []
+  for k in named_tuple._fields:
+    # the key can be either singular or plural , e.g. 'package' or 'packages'
+    val = dict_lookup_any_key(opts_dict, k, k + "s")
+
+    # treat {'x': None} key value pairs as if it was [None]
+    # otherwise itertools.product throws an exception about not being able to iterate None.
+    combinations_list.append(val or [None])
+
+  print_utils.debug_print("opts_dict: ", opts_dict)
+  print_utils.debug_print_nd("named_tuple: ", named_tuple)
+  print_utils.debug_print("combinations_list: ", combinations_list)
+
+  for i in range(loop_count):
+    for combo in itertools.product(*combinations_list):
+      yield named_tuple(*combo)
+
+def filter_run_combinations(named_tuple: NamedTuple,
+                            filters: List[FilterFuncType]) -> bool:
+  for filter in filters:
+    if filter(named_tuple):
+      return False
+  return True
+
+def generate_group_run_combinations(run_combinations: Iterable[NamedTuple],
+                                    dst_nt: NamedTupleMeta[T]) \
+    -> Iterable[Tuple[T, Iterable[NamedTuple]]]:
+  def group_by_keys(src_nt):
+    src_d = src_nt._asdict()
+    # now remove the keys that aren't legal in dst.
+    for illegal_key in set(src_d.keys()) - set(dst_nt._fields):
+      if illegal_key in src_d:
+        del src_d[illegal_key]
+
+    return dst_nt(**src_d)
+
+  for args_list_it in itertools.groupby(run_combinations, group_by_keys):
+    (group_key_value, args_it) = args_list_it
+    yield (group_key_value, args_it)
diff --git a/startop/scripts/app_startup/lib/args_utils_test.py b/startop/scripts/app_startup/lib/args_utils_test.py
new file mode 100644
index 0000000..4b7e0fa
--- /dev/null
+++ b/startop/scripts/app_startup/lib/args_utils_test.py
@@ -0,0 +1,58 @@
+#!/usr/bin/env python3
+#
+# Copyright 2018, The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Unit tests for the args_utils.py script."""
+
+import typing
+
+import args_utils
+
+def generate_run_combinations(*args):
+  # expand out the generator values so that assert x == y works properly.
+  return [i for i in args_utils.generate_run_combinations(*args)]
+
+def test_generate_run_combinations():
+  blank_nd = typing.NamedTuple('Blank')
+  assert generate_run_combinations(blank_nd, {}, 1) == [()], "empty"
+  assert generate_run_combinations(blank_nd, {'a': ['a1', 'a2']}) == [
+    ()], "empty filter"
+  a_nd = typing.NamedTuple('A', [('a', str)])
+  assert generate_run_combinations(a_nd, {'a': None}) == [(None,)], "None"
+  assert generate_run_combinations(a_nd, {'a': ['a1', 'a2']}) == [('a1',), (
+    'a2',)], "one item"
+  assert generate_run_combinations(a_nd,
+                                   {'a': ['a1', 'a2'], 'b': ['b1', 'b2']}) == [
+           ('a1',), ('a2',)], \
+    "one item filter"
+  assert generate_run_combinations(a_nd, {'a': ['a1', 'a2']}, 2) == [('a1',), (
+    'a2',), ('a1',), ('a2',)], "one item"
+  ab_nd = typing.NamedTuple('AB', [('a', str), ('b', str)])
+  assert generate_run_combinations(ab_nd,
+                                   {'a': ['a1', 'a2'],
+                                    'b': ['b1', 'b2']}) == [ab_nd('a1', 'b1'),
+                                                            ab_nd('a1', 'b2'),
+                                                            ab_nd('a2', 'b1'),
+                                                            ab_nd('a2', 'b2')], \
+    "two items"
+
+  assert generate_run_combinations(ab_nd,
+                                   {'as': ['a1', 'a2'],
+                                    'bs': ['b1', 'b2']}) == [ab_nd('a1', 'b1'),
+                                                             ab_nd('a1', 'b2'),
+                                                             ab_nd('a2', 'b1'),
+                                                             ab_nd('a2', 'b2')], \
+    "two items plural"
diff --git a/startop/scripts/app_startup/lib/data_frame.py b/startop/scripts/app_startup/lib/data_frame.py
new file mode 100644
index 0000000..20a2308
--- /dev/null
+++ b/startop/scripts/app_startup/lib/data_frame.py
@@ -0,0 +1,201 @@
+import itertools
+from typing import Dict, List
+
+class DataFrame:
+  """Table-like class for storing a 2D cells table with named columns."""
+  def __init__(self, data: Dict[str, List[object]] = {}):
+    """
+    Create a new DataFrame from a dictionary (keys = headers,
+    values = columns).
+    """
+    self._headers = [i for i in data.keys()]
+    self._rows = []
+
+    row_num = 0
+
+    def get_data_row(idx):
+      r = {}
+      for header, header_data in data.items():
+
+        if not len(header_data) > idx:
+          continue
+
+        r[header] = header_data[idx]
+
+      return r
+
+    while True:
+      row_dict = get_data_row(row_num)
+      if len(row_dict) == 0:
+        break
+
+      self._append_row(row_dict.keys(), row_dict.values())
+      row_num = row_num + 1
+
+  def concat_rows(self, other: 'DataFrame') -> None:
+    """
+    In-place concatenate rows of other into the rows of the
+    current DataFrame.
+
+    None is added in pre-existing cells if new headers
+    are introduced.
+    """
+    other_datas = other._data_only()
+
+    other_headers = other.headers
+
+    for d in other_datas:
+      self._append_row(other_headers, d)
+
+  def _append_row(self, headers: List[str], data: List[object]):
+    new_row = {k:v for k,v in zip(headers, data)}
+    self._rows.append(new_row)
+
+    for header in headers:
+      if not header in self._headers:
+        self._headers.append(header)
+
+  def __repr__(self):
+#     return repr(self._rows)
+    repr = ""
+
+    header_list = self._headers_only()
+
+    row_format = u""
+    for header in header_list:
+      row_format = row_format + u"{:>%d}" %(len(header) + 1)
+
+    repr = row_format.format(*header_list) + "\n"
+
+    for v in self._data_only():
+      repr = repr + row_format.format(*v) + "\n"
+
+    return repr
+
+  def __eq__(self, other):
+    if isinstance(other, self.__class__):
+      return self.headers == other.headers and self.data_table == other.data_table
+    else:
+      print("wrong instance", other.__class__)
+      return False
+
+  @property
+  def headers(self) -> List[str]:
+    return [i for i in self._headers_only()]
+
+  @property
+  def data_table(self) -> List[List[object]]:
+    return list(self._data_only())
+
+  @property
+  def data_table_transposed(self) -> List[List[object]]:
+    return list(self._transposed_data())
+
+  @property
+  def data_row_len(self) -> int:
+    return len(self._rows)
+
+  def data_row_at(self, idx) -> List[object]:
+    """
+    Return a single data row at the specified index (0th based).
+
+    Accepts negative indices, e.g. -1 is last row.
+    """
+    row_dict = self._rows[idx]
+    l = []
+
+    for h in self._headers_only():
+      l.append(row_dict.get(h)) # Adds None in blank spots.
+
+    return l
+
+  def copy(self) -> 'DataFrame':
+    """
+    Shallow copy of this DataFrame.
+    """
+    return self.repeat(count=0)
+
+  def repeat(self, count: int) -> 'DataFrame':
+    """
+    Returns a new DataFrame where each row of this dataframe is repeated count times.
+    A repeat of a row is adjacent to other repeats of that same row.
+    """
+    df = DataFrame()
+    df._headers = self._headers.copy()
+
+    rows = []
+    for row in self._rows:
+      for i in range(count):
+        rows.append(row.copy())
+
+    df._rows = rows
+
+    return df
+
+  def merge_data_columns(self, other: 'DataFrame'):
+    """
+    Merge self and another DataFrame by adding the data from other column-wise.
+    For any headers that are the same, data from 'other' is preferred.
+    """
+    for h in other._headers:
+      if not h in self._headers:
+        self._headers.append(h)
+
+    append_rows = []
+
+    for self_dict, other_dict in itertools.zip_longest(self._rows, other._rows):
+      if not self_dict:
+        d = {}
+        append_rows.append(d)
+      else:
+        d = self_dict
+
+      d_other = other_dict
+      if d_other:
+        for k,v in d_other.items():
+          d[k] = v
+
+    for r in append_rows:
+      self._rows.append(r)
+
+  def data_row_reduce(self, fnc) -> 'DataFrame':
+    """
+    Reduces the data row-wise by applying the fnc to each row (column-wise).
+    Empty cells are skipped.
+
+    fnc(Iterable[object]) -> object
+    fnc is applied over every non-empty cell in that column (descending row-wise).
+
+    Example:
+      DataFrame({'a':[1,2,3]}).data_row_reduce(sum) == DataFrame({'a':[6]})
+
+    Returns a new single-row DataFrame.
+    """
+    df = DataFrame()
+    df._headers = self._headers.copy()
+
+    def yield_by_column(header_key):
+      for row_dict in self._rows:
+        val = row_dict.get(header_key)
+        if val:
+          yield val
+
+    new_row_dict = {}
+    for h in df._headers:
+      cell_value = fnc(yield_by_column(h))
+      new_row_dict[h] = cell_value
+
+    df._rows = [new_row_dict]
+    return df
+
+  def _headers_only(self):
+    return self._headers
+
+  def _data_only(self):
+    row_len = len(self._rows)
+
+    for i in range(row_len):
+      yield self.data_row_at(i)
+
+  def _transposed_data(self):
+    return zip(*self._data_only())
\ No newline at end of file
diff --git a/startop/scripts/app_startup/lib/data_frame_test.py b/startop/scripts/app_startup/lib/data_frame_test.py
new file mode 100644
index 0000000..1cbc1cb
--- /dev/null
+++ b/startop/scripts/app_startup/lib/data_frame_test.py
@@ -0,0 +1,128 @@
+#!/usr/bin/env python3
+#
+# Copyright 2018, The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Unit tests for the data_frame.py script."""
+
+from data_frame import DataFrame
+
+def test_data_frame():
+  # trivial empty data frame
+  df = DataFrame()
+  assert df.headers == []
+  assert df.data_table == []
+  assert df.data_table_transposed == []
+
+  # common case, same number of values in each place.
+  df = DataFrame({'TotalTime_ms': [1, 2, 3], 'Displayed_ms': [4, 5, 6]})
+  assert df.headers == ['TotalTime_ms', 'Displayed_ms']
+  assert df.data_table == [[1, 4], [2, 5], [3, 6]]
+  assert df.data_table_transposed == [(1, 2, 3), (4, 5, 6)]
+
+  # varying num values.
+  df = DataFrame({'many': [1, 2], 'none': []})
+  assert df.headers == ['many', 'none']
+  assert df.data_table == [[1, None], [2, None]]
+  assert df.data_table_transposed == [(1, 2), (None, None)]
+
+  df = DataFrame({'many': [], 'none': [1, 2]})
+  assert df.headers == ['many', 'none']
+  assert df.data_table == [[None, 1], [None, 2]]
+  assert df.data_table_transposed == [(None, None), (1, 2)]
+
+  # merge multiple data frames
+  df = DataFrame()
+  df.concat_rows(DataFrame())
+  assert df.headers == []
+  assert df.data_table == []
+  assert df.data_table_transposed == []
+
+  df = DataFrame()
+  df2 = DataFrame({'TotalTime_ms': [1, 2, 3], 'Displayed_ms': [4, 5, 6]})
+
+  df.concat_rows(df2)
+  assert df.headers == ['TotalTime_ms', 'Displayed_ms']
+  assert df.data_table == [[1, 4], [2, 5], [3, 6]]
+  assert df.data_table_transposed == [(1, 2, 3), (4, 5, 6)]
+
+  df = DataFrame({'TotalTime_ms': [1, 2]})
+  df2 = DataFrame({'Displayed_ms': [4, 5]})
+
+  df.concat_rows(df2)
+  assert df.headers == ['TotalTime_ms', 'Displayed_ms']
+  assert df.data_table == [[1, None], [2, None], [None, 4], [None, 5]]
+
+  df = DataFrame({'TotalTime_ms': [1, 2]})
+  df2 = DataFrame({'TotalTime_ms': [3, 4], 'Displayed_ms': [5, 6]})
+
+  df.concat_rows(df2)
+  assert df.headers == ['TotalTime_ms', 'Displayed_ms']
+  assert df.data_table == [[1, None], [2, None], [3, 5], [4, 6]]
+
+  # data_row_at
+  df = DataFrame({'TotalTime_ms': [1, 2, 3], 'Displayed_ms': [4, 5, 6]})
+  assert df.data_row_at(-1) == [3, 6]
+  assert df.data_row_at(2) == [3, 6]
+  assert df.data_row_at(1) == [2, 5]
+
+  # repeat
+  df = DataFrame({'TotalTime_ms': [1], 'Displayed_ms': [4]})
+  df2 = DataFrame({'TotalTime_ms': [1, 1, 1], 'Displayed_ms': [4, 4, 4]})
+  assert df.repeat(3) == df2
+
+  # repeat
+  df = DataFrame({'TotalTime_ms': [1, 1, 1], 'Displayed_ms': [4, 4, 4]})
+  assert df.data_row_len == 3
+  df = DataFrame({'TotalTime_ms': [1, 1]})
+  assert df.data_row_len == 2
+
+  # repeat
+  df = DataFrame({'TotalTime_ms': [1, 1, 1], 'Displayed_ms': [4, 4, 4]})
+  assert df.data_row_len == 3
+  df = DataFrame({'TotalTime_ms': [1, 1]})
+  assert df.data_row_len == 2
+
+  # data_row_reduce
+  df = DataFrame({'TotalTime_ms': [1, 1, 1], 'Displayed_ms': [4, 4, 4]})
+  df_sum = DataFrame({'TotalTime_ms': [3], 'Displayed_ms': [12]})
+  assert df.data_row_reduce(sum) == df_sum
+
+  # merge_data_columns
+  df = DataFrame({'TotalTime_ms': [1, 2, 3]})
+  df2 = DataFrame({'Displayed_ms': [3, 4, 5, 6]})
+
+  df.merge_data_columns(df2)
+  assert df == DataFrame(
+    {'TotalTime_ms': [1, 2, 3], 'Displayed_ms': [3, 4, 5, 6]})
+
+  df = DataFrame({'TotalTime_ms': [1, 2, 3]})
+  df2 = DataFrame({'Displayed_ms': [3, 4]})
+
+  df.merge_data_columns(df2)
+  assert df == DataFrame(
+    {'TotalTime_ms': [1, 2, 3], 'Displayed_ms': [3, 4]})
+
+  df = DataFrame({'TotalTime_ms': [1, 2, 3]})
+  df2 = DataFrame({'TotalTime_ms': [10, 11]})
+
+  df.merge_data_columns(df2)
+  assert df == DataFrame({'TotalTime_ms': [10, 11, 3]})
+
+  df = DataFrame({'TotalTime_ms': []})
+  df2 = DataFrame({'TotalTime_ms': [10, 11]})
+
+  df.merge_data_columns(df2)
+  assert df == DataFrame({'TotalTime_ms': [10, 11]})
diff --git a/startop/scripts/app_startup/query_compiler_filter.py b/startop/scripts/app_startup/query_compiler_filter.py
index dc97c66..ea14264 100755
--- a/startop/scripts/app_startup/query_compiler_filter.py
+++ b/startop/scripts/app_startup/query_compiler_filter.py
@@ -31,13 +31,15 @@
 #
 
 import argparse
-import sys
+import os
 import re
+import sys
 
 # TODO: refactor this with a common library file with analyze_metrics.py
-import app_startup_runner
-from app_startup_runner import _debug_print
-from app_startup_runner import execute_arbitrary_command
+DIR = os.path.abspath(os.path.dirname(__file__))
+sys.path.append(os.path.dirname(DIR))
+import lib.cmd_utils as cmd_utils
+import lib.print_utils as print_utils
 
 from typing import List, NamedTuple, Iterable
 
@@ -74,7 +76,11 @@
       x86: [status=quicken] [reason=install]
 """ %(package, package, package, package)
 
-  code, res = execute_arbitrary_command(['adb', 'shell', 'dumpsys', 'package', package], simulate=False, timeout=5)
+  code, res = cmd_utils.execute_arbitrary_command(['adb', 'shell', 'dumpsys',
+                                                   'package', package],
+                                                  simulate=False,
+                                                  timeout=5,
+                                                  shell=False)
   if code:
     return res
   else:
@@ -115,7 +121,7 @@
       line = str_lines[line_num]
       current_indent = get_indent_level(line)
 
-      _debug_print("INDENT=%d, LINE=%s" %(current_indent, line))
+      print_utils.debug_print("INDENT=%d, LINE=%s" %(current_indent, line))
 
       current_label = line.lstrip()
 
@@ -135,7 +141,7 @@
       break
 
   new_remainder = str_lines[line_num::]
-  _debug_print("NEW REMAINDER: ", new_remainder)
+  print_utils.debug_print("NEW REMAINDER: ", new_remainder)
 
   parse_tree = ParseTree(label, children)
   return ParseResult(new_remainder, parse_tree)
@@ -159,7 +165,7 @@
 def find_first_compiler_filter(dexopt_state: DexoptState, package: str, instruction_set: str) -> str:
   lst = find_all_compiler_filters(dexopt_state, package)
 
-  _debug_print("all compiler filters: ", lst)
+  print_utils.debug_print("all compiler filters: ", lst)
 
   for compiler_filter_info in lst:
     if not instruction_set:
@@ -180,10 +186,10 @@
   if not package_tree:
     raise AssertionError("Could not find any package subtree for package %s" %(package))
 
-  _debug_print("package tree: ", package_tree)
+  print_utils.debug_print("package tree: ", package_tree)
 
   for path_tree in find_parse_children(package_tree, "path: "):
-    _debug_print("path tree: ", path_tree)
+    print_utils.debug_print("path tree: ", path_tree)
 
     matchre = re.compile("([^:]+):\s+\[status=([^\]]+)\]\s+\[reason=([^\]]+)\]")
 
@@ -198,16 +204,16 @@
 
 def main() -> int:
   opts = parse_options()
-  app_startup_runner._debug = opts.debug
+  cmd_utils._debug = opts.debug
   if _DEBUG_FORCE is not None:
-    app_startup_runner._debug = _DEBUG_FORCE
-  _debug_print("parsed options: ", opts)
+    cmd_utils._debug = _DEBUG_FORCE
+  print_utils.debug_print("parsed options: ", opts)
 
   # Note: This can often 'fail' if the package isn't actually installed.
   package_dumpsys = remote_dumpsys_package(opts.package, opts.simulate)
-  _debug_print("package dumpsys: ", package_dumpsys)
+  print_utils.debug_print("package dumpsys: ", package_dumpsys)
   dumpsys_parse_tree = parse_tab_tree(package_dumpsys, package_dumpsys)
-  _debug_print("parse tree: ", dumpsys_parse_tree)
+  print_utils.debug_print("parse tree: ", dumpsys_parse_tree)
   dexopt_state = parse_dexopt_state(dumpsys_parse_tree)
 
   filter = find_first_compiler_filter(dexopt_state, opts.package, opts.instruction_set)
diff --git a/startop/scripts/app_startup/run_app_with_prefetch.py b/startop/scripts/app_startup/run_app_with_prefetch.py
index 052db9d..8a9135b 100644
--- a/startop/scripts/app_startup/run_app_with_prefetch.py
+++ b/startop/scripts/app_startup/run_app_with_prefetch.py
@@ -30,7 +30,7 @@
 import os
 import sys
 import time
-from typing import List, Tuple, Optional
+from typing import List, Tuple, Optional, NamedTuple
 
 # local imports
 import lib.adb_utils as adb_utils
@@ -47,10 +47,20 @@
 import lib.cmd_utils as cmd_utils
 import iorap.lib.iorapd_utils as iorapd_utils
 
+RunCommandArgs = NamedTuple('RunCommandArgs',
+                            [('package', str),
+                             ('readahead', str),
+                             ('activity', Optional[str]),
+                             ('compiler_filter', Optional[str]),
+                             ('timeout', Optional[int]),
+                             ('debug', bool),
+                             ('simulate', bool),
+                             ('input', Optional[str])])
+
 def parse_options(argv: List[str] = None):
   """Parses command line arguments and return an argparse Namespace object."""
   parser = argparse.ArgumentParser(
-    description='Run an Android application once and measure startup time.'
+      description='Run an Android application once and measure startup time.'
   )
 
   required_named = parser.add_argument_group('required named arguments')
@@ -91,43 +101,44 @@
 
   return parser.parse_args(argv)
 
-def validate_options(opts: argparse.Namespace) -> bool:
+def validate_options(args: argparse.Namespace) -> Tuple[bool, RunCommandArgs]:
   """Validates the activity and trace file if needed.
 
   Returns:
     A bool indicates whether the activity is valid and trace file exists if
     necessary.
   """
-  needs_trace_file = (opts.readahead != 'cold' and opts.readahead != 'warm')
-  if needs_trace_file and (opts.input is None or
-                           not os.path.exists(opts.input)):
+  needs_trace_file = (args.readahead != 'cold' and args.readahead != 'warm')
+  if needs_trace_file and (args.input is None or
+                           not os.path.exists(args.input)):
     print_utils.error_print('--input not specified!')
-    return False
+    return False, args
 
-  if opts.simulate:
-    opts.activity = 'act'
+  if args.simulate:
+    args = args._replace(activity='act')
 
-  if not opts.activity:
-    _, opts.activity = cmd_utils.run_shell_func(IORAP_COMMON_BASH_SCRIPT,
-                                                'get_activity_name',
-                                                [opts.package])
+  if not args.activity:
+    _, activity = cmd_utils.run_shell_func(IORAP_COMMON_BASH_SCRIPT,
+                                           'get_activity_name',
+                                           [args.package])
+    args = args._replace(activity=activity)
 
-  if not opts.activity:
+  if not args.activity:
     print_utils.error_print('Activity name could not be found, '
-                              'invalid package name?!')
-    return False
+                            'invalid package name?!')
+    return False, args
 
   # Install necessary trace file. This must be after the activity checking.
   if needs_trace_file:
     passed = iorapd_utils.iorapd_compiler_install_trace_file(
-      opts.package, opts.activity, opts.input)
+        args.package, args.activity, args.input)
     if not cmd_utils.SIMULATE and not passed:
       print_utils.error_print('Failed to install compiled TraceFile.pb for '
                               '"{}/{}"'.
-                                format(opts.package, opts.activity))
-      return False
+                                format(args.package, args.activity))
+      return False, args
 
-  return True
+  return True, args
 
 def set_up_adb_env():
   """Sets up adb environment."""
@@ -149,17 +160,18 @@
 
   passed, current_compiler_filter_info = \
     cmd_utils.run_shell_command(
-      '{} --package {}'.format(os.path.join(DIR, 'query_compiler_filter.py'),
-                               package))
+        '{} --package {}'.format(os.path.join(DIR, 'query_compiler_filter.py'),
+                                 package))
 
   if passed != 0:
     return passed
 
   # TODO: call query_compiler_filter directly as a python function instead of
   #  these shell calls.
-  current_compiler_filter, current_reason, current_isa = current_compiler_filter_info.split(' ')
+  current_compiler_filter, current_reason, current_isa = \
+    current_compiler_filter_info.split(' ')
   print_utils.debug_print('Compiler Filter={} Reason={} Isa={}'.format(
-    current_compiler_filter, current_reason, current_isa))
+      current_compiler_filter, current_reason, current_isa))
 
   # Don't trust reasons that aren't 'unknown' because that means
   #  we didn't manually force the compilation filter.
@@ -202,7 +214,7 @@
     print_utils.debug_print('metric: "{metric_name}", '
                             'value: "{metric_value}" '.
                               format(metric_name=metric_name,
-                                     metric_value=metric_value))
+                                   metric_value=metric_value))
 
     all_metrics.append((metric_name, metric_value))
   return all_metrics
@@ -230,7 +242,7 @@
   """
   total_time = _parse_total_time(am_start_output)
   displayed_time = adb_utils.blocking_wait_for_logcat_displayed_time(
-    pre_launch_timestamp, package, timeout)
+      pre_launch_timestamp, package, timeout)
 
   return 'TotalTime={}\nDisplayedTime={}'.format(total_time, displayed_time)
 
@@ -268,10 +280,10 @@
                                                '"{DIR}/launch_application" '
                                                '"{package}" '
                                                '"{activity}"'
-                                                .format(timeout=timeout,
-                                                        DIR=DIR,
-                                                        package=package,
-                                                        activity=activity))
+                                                 .format(timeout=timeout,
+                                                       DIR=DIR,
+                                                       package=package,
+                                                       activity=activity))
   if not passed and not simulate:
     return None
 
@@ -285,7 +297,7 @@
     results = parse_metrics_output(output, simulate)
 
   passed = perform_post_launch_cleanup(
-    readahead, package, activity, timeout, debug, pre_launch_timestamp)
+      readahead, package, activity, timeout, debug, pre_launch_timestamp)
   if not passed and not simulate:
     print_utils.error_print('Cannot perform post launch cleanup!')
     return None
@@ -306,10 +318,10 @@
   """
   if readahead != 'warm' and readahead != 'cold':
     passed = iorapd_utils.wait_for_iorapd_finish(package,
-                                               activity,
-                                               timeout,
-                                               debug,
-                                               logcat_timestamp)
+                                                 activity,
+                                                 timeout,
+                                                 debug,
+                                                 logcat_timestamp)
 
     if not passed:
       return passed
@@ -319,16 +331,16 @@
   # Don't need to do anything for warm or cold.
   return True
 
-def run_test(opts: argparse.Namespace) -> List[Tuple[str, str]]:
+def run_test(args: RunCommandArgs) -> List[Tuple[str, str]]:
   """Runs one test using given options.
 
   Returns:
-    A list of tuples that including metric name, metric value and anything left.
+    A list of tuples that including metric name, metric value.
   """
-  print_utils.DEBUG = opts.debug
-  cmd_utils.SIMULATE = opts.simulate
+  print_utils.DEBUG = args.debug
+  cmd_utils.SIMULATE = args.simulate
 
-  passed = validate_options(opts)
+  passed, args = validate_options(args)
   if not passed:
     return None
 
@@ -337,15 +349,22 @@
   # Ensure the APK is currently compiled with whatever we passed in
   # via --compiler-filter.
   # No-op if this option was not passed in.
-  if not configure_compiler_filter(opts.compiler_filter, opts.package,
-                                   opts.activity):
+  if not configure_compiler_filter(args.compiler_filter, args.package,
+                                   args.activity):
     return None
 
-  return run(opts.readahead, opts.package, opts.activity, opts.timeout,
-             opts.simulate, opts.debug)
+  return run(args.readahead, args.package, args.activity, args.timeout,
+             args.simulate, args.debug)
+
+def get_args_from_opts(opts: argparse.Namespace) -> RunCommandArgs:
+  kwargs = {}
+  for field in RunCommandArgs._fields:
+    kwargs[field] = getattr(opts, field)
+  return RunCommandArgs(**kwargs)
 
 def main():
-  args = parse_options()
+  opts = parse_options()
+  args = get_args_from_opts(opts)
   result = run_test(args)
 
   if result is None:
diff --git a/startop/scripts/lib/logcat_utils.py b/startop/scripts/lib/logcat_utils.py
index 13b1c3a..8a3d00b 100644
--- a/startop/scripts/lib/logcat_utils.py
+++ b/startop/scripts/lib/logcat_utils.py
@@ -22,7 +22,7 @@
 from typing import Optional, Pattern
 
 # local import
-import print_utils
+import lib.print_utils as print_utils
 
 def parse_logcat_datetime(timestamp: str) -> Optional[datetime]:
   """Parses the timestamp of logcat.
@@ -61,7 +61,7 @@
                                             pattern: Pattern,
                                             timeout: datetime) -> Optional[str]:
   # Show the year in the timestampe.
-  logcat_cmd = 'adb logcat -v year -v threadtime -T'.split()
+  logcat_cmd = 'adb logcat -v UTC -v year -v threadtime -T'.split()
   logcat_cmd.append(str(timestamp))
   print_utils.debug_print('[LOGCAT]:' + ' '.join(logcat_cmd))
 
diff --git a/startop/scripts/lib/print_utils.py b/startop/scripts/lib/print_utils.py
index c33e0f9..8c5999d 100644
--- a/startop/scripts/lib/print_utils.py
+++ b/startop/scripts/lib/print_utils.py
@@ -27,3 +27,41 @@
 
 def error_print(*args, **kwargs):
   print('[ERROR]:', *args, file=sys.stderr, **kwargs)
+
+def _expand_gen_repr(args):
+  """Like repr but any generator-like object has its iterator consumed
+  and then called repr on."""
+  new_args_list = []
+  for i in args:
+    # detect iterable objects that do not have their own override of __str__
+    if hasattr(i, '__iter__'):
+      to_str = getattr(i, '__str__')
+      if to_str.__objclass__ == object:
+        # the repr for a generator is just type+address, expand it out instead.
+        new_args_list.append([_expand_gen_repr([j])[0] for j in i])
+        continue
+    # normal case: uses the built-in to-string
+    new_args_list.append(i)
+  return new_args_list
+
+def debug_print_gen(*args, **kwargs):
+  """Like _debug_print but will turn any iterable args into a list."""
+  if not DEBUG:
+    return
+
+  new_args_list = _expand_gen_repr(args)
+  debug_print(*new_args_list, **kwargs)
+
+def debug_print_nd(*args, **kwargs):
+  """Like _debug_print but will turn any NamedTuple-type args into a string."""
+  if not DEBUG:
+    return
+
+  new_args_list = []
+  for i in args:
+    if hasattr(i, '_field_types'):
+      new_args_list.append("%s: %s" % (i.__name__, i._field_types))
+    else:
+      new_args_list.append(i)
+
+  debug_print(*new_args_list, **kwargs)