summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-xclient/bin/autotest16
-rwxr-xr-xclient/bin/autotest.py22
-rwxr-xr-xclient/bin/autotest_client4
-rwxr-xr-xclient/bin/autotest_utils.py792
-rw-r--r--client/bin/boottool.py145
-rw-r--r--client/bin/common.py2
-rw-r--r--client/bin/config.py62
-rw-r--r--client/bin/cpuset.py438
-rwxr-xr-xclient/bin/fd_stack.py250
-rwxr-xr-xclient/bin/filesystem.py322
-rwxr-xr-xclient/bin/grub.py350
-rwxr-xr-xclient/bin/harness.py102
-rwxr-xr-xclient/bin/harness_ABAT.py222
-rwxr-xr-xclient/bin/harness_simple.py56
-rw-r--r--client/bin/harness_standalone.py74
-rwxr-xr-xclient/bin/job.py1934
-rwxr-xr-xclient/bin/kernel.py1400
-rwxr-xr-xclient/bin/kernel_config.py212
-rw-r--r--client/bin/kernel_versions.py140
-rwxr-xr-xclient/bin/kernel_versions_unittest.py140
-rwxr-xr-xclient/bin/kernelexpand-test.py254
-rwxr-xr-xclient/bin/kernelexpand.py302
-rw-r--r--client/bin/os_dep.py36
-rw-r--r--client/bin/package.py520
-rw-r--r--client/bin/parallel.py56
-rwxr-xr-xclient/bin/profiler.py27
-rwxr-xr-xclient/bin/profilers.py112
-rwxr-xr-xclient/bin/sysinfo.py140
-rwxr-xr-xclient/bin/test.py46
-rw-r--r--client/bin/test_config.py132
-rw-r--r--client/bin/xen.py294
-rwxr-xr-xclient/common_lib/barrier.py956
-rwxr-xr-xclient/common_lib/check_version.py56
-rw-r--r--client/common_lib/error.py138
-rw-r--r--client/common_lib/global_config.py204
-rw-r--r--client/common_lib/global_config_unittest.py160
-rw-r--r--client/common_lib/logging.py80
-rw-r--r--client/common_lib/mail.py66
-rw-r--r--client/common_lib/mail_unittest.py98
-rw-r--r--client/common_lib/pexpect.py12
-rw-r--r--client/common_lib/pxssh.py20
-rw-r--r--client/common_lib/test.py476
-rw-r--r--client/common_lib/test_utils/mock.py678
-rw-r--r--client/common_lib/test_utils/mock_demo.py182
-rw-r--r--client/common_lib/test_utils/mock_demo_MUT.py4
-rw-r--r--client/common_lib/utils.py818
-rwxr-xr-xclient/deps/boottool/boottool.py20
-rwxr-xr-xclient/deps/libaio/libaio.py12
-rwxr-xr-xclient/deps/libnet/libnet.py24
-rwxr-xr-xclient/deps/mysql/mysql.py50
-rwxr-xr-xclient/deps/pgpool/pgpool.py32
-rwxr-xr-xclient/deps/pgsql/pgsql.py24
-rwxr-xr-xclient/profilers/catprofile/catprofile.py75
-rwxr-xr-xclient/profilers/iostat/iostat.py27
-rwxr-xr-xclient/profilers/lockmeter/lockmeter.py50
-rw-r--r--client/profilers/mpstat/mpstat.py26
-rwxr-xr-xclient/profilers/oprofile/oprofile.py198
-rwxr-xr-xclient/profilers/readprofile/readprofile.py60
-rwxr-xr-xclient/profilers/vmstat/vmstat.py27
-rw-r--r--client/setup_modules.py152
-rwxr-xr-xclient/tests/aborttest/aborttest.py6
-rw-r--r--client/tests/aio_dio_bugs/aio_dio_bugs.py50
-rwxr-xr-xclient/tests/aiostress/aiostress.py118
-rw-r--r--client/tests/barriertest/barriertest.py38
-rwxr-xr-xclient/tests/bash_shared_mapping/bash_shared_mapping.py72
-rwxr-xr-xclient/tests/bonnie/bonnie.py114
-rw-r--r--client/tests/btreplay/btreplay.py247
-rw-r--r--client/tests/container_functional/container_functional.py108
-rw-r--r--client/tests/cpu_hotplug/cpu_hotplug.py78
-rwxr-xr-xclient/tests/cyclictest/cyclictest.py16
-rw-r--r--client/tests/dacapo/dacapo.py185
-rwxr-xr-xclient/tests/dbench/dbench.py88
-rw-r--r--client/tests/dbt2/dbt2.py112
-rwxr-xr-xclient/tests/disktest/disktest.py109
-rw-r--r--client/tests/fio/fio.py56
-rw-r--r--client/tests/fs_mark/fs_mark.py48
-rwxr-xr-xclient/tests/fsfuzzer/fsfuzzer.py42
-rw-r--r--client/tests/fsstress/fsstress.py46
-rwxr-xr-xclient/tests/fsx/fsx.py82
-rw-r--r--client/tests/interbench/interbench.py48
-rw-r--r--client/tests/iozone/iozone.py110
-rw-r--r--client/tests/isic/isic.py30
-rwxr-xr-xclient/tests/kernbench/kernbench.py208
-rwxr-xr-xclient/tests/kernelbuild/kernelbuild.py14
-rw-r--r--client/tests/kvmtest/control.with_modbuild70
-rw-r--r--client/tests/kvmtest/kvmtest.py310
-rw-r--r--client/tests/libhugetlbfs/libhugetlbfs.py110
-rwxr-xr-xclient/tests/linus_stress/linus_stress.py56
-rwxr-xr-xclient/tests/lmbench/lmbench.py82
-rw-r--r--client/tests/lsb_dtk/lsb_dtk.py302
-rw-r--r--client/tests/ltp/ltp-diff.py170
-rwxr-xr-xclient/tests/ltp/ltp.py86
-rw-r--r--client/tests/netperf2/control.client1
-rw-r--r--client/tests/netperf2/control.parallel4
-rwxr-xr-xclient/tests/netperf2/netperf2.py144
-rwxr-xr-xclient/tests/parallel_dd/parallel_dd.py232
-rw-r--r--client/tests/pi_tests/pi_tests.py24
-rwxr-xr-xclient/tests/pktgen/pktgen.py73
-rwxr-xr-xclient/tests/posixtest/posixtest.py33
-rwxr-xr-xclient/tests/raisetest/raisetest.py6
-rwxr-xr-xclient/tests/reaim/reaim.py164
-rw-r--r--client/tests/rmaptest/rmaptest.py28
-rw-r--r--client/tests/rtlinuxtests/rtlinuxtests.py34
-rw-r--r--client/tests/rttester/rttester.py18
-rw-r--r--client/tests/scrashme/scrashme.py48
-rw-r--r--client/tests/selftest/selftest.py92
-rw-r--r--client/tests/signaltest/signaltest.py16
-rwxr-xr-xclient/tests/sleeptest/sleeptest.py14
-rwxr-xr-xclient/tests/sparse/sparse.py38
-rwxr-xr-xclient/tests/spew/spew.py76
-rw-r--r--client/tests/stress/stress.py102
-rw-r--r--client/tests/sysbench/sysbench.py370
-rwxr-xr-xclient/tests/tbench/tbench.py110
-rw-r--r--client/tests/tiobench/tiobench.py51
-rwxr-xr-xclient/tests/tsc/tsc.py22
-rwxr-xr-xclient/tests/unixbench/unixbench.py164
-rw-r--r--client/tests/xmtest/xmtest.py42
-rwxr-xr-xclient/tools/autotest20
-rwxr-xr-xclient/tools/avgtime43
-rwxr-xr-xclient/tools/diffprofile78
-rwxr-xr-xclient/tools/make_clean35
-rw-r--r--conmux/contrib/console_check.py516
-rw-r--r--frontend/afe/control_file.py114
-rw-r--r--frontend/afe/enum.py110
-rw-r--r--frontend/afe/feeds/feed.py82
-rw-r--r--frontend/afe/json_rpc/proxy.py34
-rw-r--r--frontend/afe/json_rpc/serviceHandler.py4
-rw-r--r--frontend/afe/management.py52
-rw-r--r--frontend/afe/model_logic.py1120
-rw-r--r--frontend/afe/models.py864
-rw-r--r--frontend/afe/rpc_client_lib.py2
-rw-r--r--frontend/afe/rpc_handler.py108
-rw-r--r--frontend/afe/rpc_interface.py442
-rw-r--r--frontend/afe/rpc_utils.py222
-rw-r--r--frontend/afe/simplejson/__init__.py20
-rw-r--r--frontend/afe/simplejson/decoder.py8
-rw-r--r--frontend/afe/simplejson/encoder.py10
-rw-r--r--frontend/afe/simplejson/scanner.py2
-rw-r--r--frontend/afe/test.py68
-rw-r--r--frontend/afe/urls.py2
-rw-r--r--frontend/afe/views.py44
-rw-r--r--frontend/apache_auth.py96
-rw-r--r--frontend/frontend_unittest.py10
-rwxr-xr-xfrontend/manage.py10
-rw-r--r--frontend/migrations/001_initial_db.py42
-rw-r--r--frontend/migrations/002_cleanup_fields.py18
-rw-r--r--frontend/migrations/003_test_synch_type.py10
-rw-r--r--frontend/migrations/004_add_indexes.py14
-rw-r--r--frontend/migrations/005_one_more_index.py6
-rw-r--r--frontend/migrations/006_host_label_invalid.py8
-rw-r--r--frontend/migrations/007_indexes_on_acl_tables.py14
-rw-r--r--frontend/settings.py8
-rwxr-xr-xmigrate/migrate.py570
-rw-r--r--migrate/migrate_unittest.py218
-rwxr-xr-xmirror/mirror390
-rwxr-xr-xmirror/rsync.py46
-rwxr-xr-xmirror/trigger.py94
-rw-r--r--scheduler/monitor_db.py2860
-rw-r--r--scheduler/monitor_db_unittest.py980
-rwxr-xr-xscheduler/monitor_queue206
-rwxr-xr-xscheduler/start_all_queues38
-rwxr-xr-xserver/autoserv280
-rw-r--r--server/autoserv_parser.py166
-rw-r--r--server/autotest.py890
-rw-r--r--server/autotest_unittest.py516
-rw-r--r--server/deb_kernel.py260
-rw-r--r--server/git.py318
-rw-r--r--server/git_kernel.py80
-rw-r--r--server/hosts/base_classes.py114
-rw-r--r--server/hosts/bootloader.py298
-rw-r--r--server/hosts/bootloader_unittest.py676
-rw-r--r--server/hosts/guest.py94
-rw-r--r--server/hosts/kvm_guest.py48
-rw-r--r--server/hosts/remote.py38
-rw-r--r--server/hosts/site_host.py24
-rw-r--r--server/hosts/ssh_host.py1728
-rw-r--r--server/hypervisor.py42
-rw-r--r--server/installable_object.py70
-rw-r--r--server/kernel.py50
-rw-r--r--server/kvm.py848
-rw-r--r--server/rpm_kernel.py290
-rwxr-xr-xserver/samples/autoserv_console.srv12
-rw-r--r--server/samples/continuous_reboot.srv6
-rw-r--r--server/samples/failtest.srv6
-rw-r--r--server/samples/info.srv10
-rw-r--r--server/samples/kernbench.srv6
-rw-r--r--server/samples/netperf-guest-to-host-far.srv20
-rw-r--r--server/samples/parallel.srv4
-rw-r--r--server/samples/parallel_kvm.srv12
-rw-r--r--server/samples/parallel_sleeptest.srv48
-rw-r--r--server/samples/profilertest.srv78
-rw-r--r--server/samples/reboot.srv6
-rw-r--r--server/samples/run_test.srv52
-rw-r--r--server/samples/sleeptest.srv6
-rw-r--r--server/samples/uname.srv4
-rw-r--r--server/self-test/alltests_suite.py8
-rw-r--r--server/self-test/autotest_test.py224
-rw-r--r--server/self-test/utils_test.py72
-rwxr-xr-xserver/server_job.py1418
-rw-r--r--server/site_autoserv_parser.py6
-rw-r--r--server/source_kernel.py88
-rw-r--r--server/standalone_profiler.py40
-rw-r--r--server/status.py352
-rw-r--r--server/subcommand.py350
-rwxr-xr-xserver/test.py6
-rwxr-xr-xserver/tests/sleeptest/sleeptest.py6
-rw-r--r--server/utils.py548
-rw-r--r--server/utils_unittest.py30
-rw-r--r--server/warning_monitor.py26
-rw-r--r--tko/db.py942
-rw-r--r--tko/db_mysql.py6
-rw-r--r--tko/db_postgres.py6
-rw-r--r--tko/delete_job_results7
-rwxr-xr-xtko/display.py539
-rwxr-xr-xtko/frontend.py496
-rwxr-xr-xtko/machine_load10
-rwxr-xr-xtko/migrations/001_initial_db.py180
-rw-r--r--tko/migrations/002_add_job_timestamps.py176
-rw-r--r--tko/migrations/003_add_test_timestamps.py192
-rw-r--r--tko/migrations/004_add_test_started.py196
-rw-r--r--tko/migrations/005_add_testna_status.py4
-rw-r--r--tko/migrations/006_add_table_query_history.py5
-rw-r--r--tko/migrations/007_widen_reason_field.py4
-rw-r--r--tko/migrations/008_add_iteration_attributes.py12
-rw-r--r--tko/models.py238
-rwxr-xr-xtko/parse.py346
-rw-r--r--tko/parsers/base.py112
-rw-r--r--tko/parsers/version_0.py736
-rw-r--r--tko/parsers/version_0_unittest.py224
-rw-r--r--tko/parsers/version_1.py376
-rw-r--r--tko/parsers/version_1_unittest.py362
-rwxr-xr-xtko/plotgraph.py181
-rw-r--r--tko/query_lib.py125
-rwxr-xr-xtko/reason_qualifier.py103
-rwxr-xr-xtko/retrieve_jobs8
-rw-r--r--tko/status_lib.py72
-rw-r--r--tko/status_lib_unittest.py312
-rw-r--r--tko/unique_cookie.py55
-rw-r--r--tko/utils.py14
-rw-r--r--tko/utils_unittest.py52
-rwxr-xr-xtko/vertical_text.py1
-rw-r--r--ui/dialog.py164
-rwxr-xr-xui/menu578
-rw-r--r--ui/menu_lib.py128
-rw-r--r--unittest_suite.py24
245 files changed, 22657 insertions, 22685 deletions
diff --git a/client/bin/autotest b/client/bin/autotest
index 9d3a968f..34c1e263 100755
--- a/client/bin/autotest
+++ b/client/bin/autotest
@@ -28,27 +28,27 @@ os.environ['PYTHONPATH'] = autodirbin
parser = OptionParser()
parser.add_option("-c", "--continue", dest="cont", action="store_true",
- default=False, help="continue previously started job")
+ default=False, help="continue previously started job")
parser.add_option("-t", "--tag", dest="tag", type="string", default="default",
- help="set the job tag")
+ help="set the job tag")
parser.add_option("-H", "--harness", dest="harness", type="string", default='',
- help="set the harness type")
+ help="set the harness type")
parser.add_option("-l", "--external_logging", dest="log", action="store_true",
- default=False, help="enable external logging")
+ default=False, help="enable external logging")
def usage():
- parser.print_help()
- sys.exit(1)
+ parser.print_help()
+ sys.exit(1)
options, args = parser.parse_args()
# Check for a control file.
if len(args) != 1:
- usage()
+ usage()
# JOB: run the specified job control file.
job.runjob(os.path.abspath(args[0]), options.cont, options.tag, options.harness,
- options.log)
+ options.log)
diff --git a/client/bin/autotest.py b/client/bin/autotest.py
index 9d4faa7a..76958924 100755
--- a/client/bin/autotest.py
+++ b/client/bin/autotest.py
@@ -1,17 +1,17 @@
import os, sys
class system:
- def __init__(self):
- self.autodir = os.environ['AUTODIR']
- self.resultdir = self.autodir + '/results'
- self.tmpdir = self.autodir + '/tmp'
+ def __init__(self):
+ self.autodir = os.environ['AUTODIR']
+ self.resultdir = self.autodir + '/results'
+ self.tmpdir = self.autodir + '/tmp'
- if not os.path.isdir(self.resultdir):
- os.mkdir(self.resultdir)
- if not os.path.isdir(self.tmpdir):
- os.mkdir(self.tmpdir)
- return None
+ if not os.path.isdir(self.resultdir):
+ os.mkdir(self.resultdir)
+ if not os.path.isdir(self.tmpdir):
+ os.mkdir(self.tmpdir)
+ return None
- def boot(self, tag=None):
- print "I OUGHT TO REBOOT NOW!"
+ def boot(self, tag=None):
+ print "I OUGHT TO REBOOT NOW!"
diff --git a/client/bin/autotest_client b/client/bin/autotest_client
index 762d0cb0..1e275a4b 100755
--- a/client/bin/autotest_client
+++ b/client/bin/autotest_client
@@ -17,11 +17,11 @@ os.dup2(1,2)
# If we're using cpusets, run inside the root one by default
if os.path.exists("/dev/cpuset/tasks") and getpass.getuser() == "root":
- utils.write_one_line("/dev/cpuset/tasks", str(os.getpid()))
+ utils.write_one_line("/dev/cpuset/tasks", str(os.getpid()))
autodir = os.path.dirname(sys.argv[0])
autotest = os.path.join(autodir, 'autotest')
cmd = ' '.join([autotest, '-H simple'] + sys.argv[1:])
exit_code = subprocess.call(cmd, shell=True, stderr=subprocess.STDOUT,
- close_fds=False)
+ close_fds=False)
sys.exit(exit_code) # pass on the exit status from autotest
diff --git a/client/bin/autotest_utils.py b/client/bin/autotest_utils.py
index 1e6c7250..f6eb86e8 100755
--- a/client/bin/autotest_utils.py
+++ b/client/bin/autotest_utils.py
@@ -8,593 +8,593 @@ from autotest_lib.client.common_lib import error, utils
def grep(pattern, file):
- """
- This is mainly to fix the return code inversion from grep
- Also handles compressed files.
+ """
+ This is mainly to fix the return code inversion from grep
+ Also handles compressed files.
- returns 1 if the pattern is present in the file, 0 if not.
- """
- command = 'grep "%s" > /dev/null' % pattern
- ret = cat_file_to_cmd(file, command, ignore_status=True)
- return not ret
+ returns 1 if the pattern is present in the file, 0 if not.
+ """
+ command = 'grep "%s" > /dev/null' % pattern
+ ret = cat_file_to_cmd(file, command, ignore_status=True)
+ return not ret
def difflist(list1, list2):
- """returns items in list2 that are not in list1"""
- diff = [];
- for x in list2:
- if x not in list1:
- diff.append(x)
- return diff
+ """returns items in list2 that are not in list1"""
+ diff = [];
+ for x in list2:
+ if x not in list1:
+ diff.append(x)
+ return diff
def cat_file_to_cmd(file, command, ignore_status=0, return_output=False):
- """
- equivalent to 'cat file | command' but knows to use
- zcat or bzcat if appropriate
- """
- if return_output:
- run_cmd = utils.system_output
- else:
- run_cmd = utils.system
-
- if not os.path.isfile(file):
- raise NameError('invalid file %s to cat to command %s'
- % (file, command))
- if file.endswith('.bz2'):
- return run_cmd('bzcat ' + file + ' | ' + command, ignore_status)
- elif (file.endswith('.gz') or file.endswith('.tgz')):
- return run_cmd('zcat ' + file + ' | ' + command, ignore_status)
- else:
- return run_cmd('cat ' + file + ' | ' + command, ignore_status)
+ """
+ equivalent to 'cat file | command' but knows to use
+ zcat or bzcat if appropriate
+ """
+ if return_output:
+ run_cmd = utils.system_output
+ else:
+ run_cmd = utils.system
+
+ if not os.path.isfile(file):
+ raise NameError('invalid file %s to cat to command %s'
+ % (file, command))
+ if file.endswith('.bz2'):
+ return run_cmd('bzcat ' + file + ' | ' + command, ignore_status)
+ elif (file.endswith('.gz') or file.endswith('.tgz')):
+ return run_cmd('zcat ' + file + ' | ' + command, ignore_status)
+ else:
+ return run_cmd('cat ' + file + ' | ' + command, ignore_status)
def extract_tarball_to_dir(tarball, dir):
- """
- Extract a tarball to a specified directory name instead of whatever
- the top level of a tarball is - useful for versioned directory names, etc
- """
- if os.path.exists(dir):
- raise NameError, 'target %s already exists' % dir
- pwd = os.getcwd()
- os.chdir(os.path.dirname(os.path.abspath(dir)))
- newdir = extract_tarball(tarball)
- os.rename(newdir, dir)
- os.chdir(pwd)
+ """
+ Extract a tarball to a specified directory name instead of whatever
+ the top level of a tarball is - useful for versioned directory names, etc
+ """
+ if os.path.exists(dir):
+ raise NameError, 'target %s already exists' % dir
+ pwd = os.getcwd()
+ os.chdir(os.path.dirname(os.path.abspath(dir)))
+ newdir = extract_tarball(tarball)
+ os.rename(newdir, dir)
+ os.chdir(pwd)
def extract_tarball(tarball):
- """Returns the directory extracted by the tarball."""
- extracted = cat_file_to_cmd(tarball, 'tar xvf - 2>/dev/null',
- return_output=True).splitlines()
-
- dir = None
-
- for line in extracted:
- line = re.sub(r'^./', '', line)
- if not line or line == '.':
- continue
- topdir = line.split('/')[0]
- if os.path.isdir(topdir):
- if dir:
- assert(dir == topdir)
- else:
- dir = topdir
- if dir:
- return dir
- else:
- raise NameError('extracting tarball produced no dir')
+ """Returns the directory extracted by the tarball."""
+ extracted = cat_file_to_cmd(tarball, 'tar xvf - 2>/dev/null',
+ return_output=True).splitlines()
+
+ dir = None
+
+ for line in extracted:
+ line = re.sub(r'^./', '', line)
+ if not line or line == '.':
+ continue
+ topdir = line.split('/')[0]
+ if os.path.isdir(topdir):
+ if dir:
+ assert(dir == topdir)
+ else:
+ dir = topdir
+ if dir:
+ return dir
+ else:
+ raise NameError('extracting tarball produced no dir')
def get_md5sum(file_path):
- """Gets the md5sum of a file. You must provide a valid path to the file"""
- if not os.path.isfile(file_path):
- raise ValueError, 'invalid file %s to verify' % file_path
- return utils.system_output("md5sum " + file_path + " | awk '{print $1}'")
+ """Gets the md5sum of a file. You must provide a valid path to the file"""
+ if not os.path.isfile(file_path):
+ raise ValueError, 'invalid file %s to verify' % file_path
+ return utils.system_output("md5sum " + file_path + " | awk '{print $1}'")
def unmap_url_cache(cachedir, url, expected_md5):
- """\
- Downloads a file from a URL to a cache directory. If the file is already
- at the expected position and has the expected md5 number, let's not
- download it again.
- """
- # Let's convert cachedir to a canonical path, if it's not already
- cachedir = os.path.realpath(cachedir)
- if not os.path.isdir(cachedir):
- try:
- system('mkdir -p ' + cachedir)
- except:
- raise ValueError('Could not create cache directory %s' % cachedir)
- file_from_url = os.path.basename(url)
- file_local_path = os.path.join(cachedir, file_from_url)
- if os.path.isfile(file_local_path):
- file_md5 = get_md5sum(file_local_path)
- if file_md5 == expected_md5:
- # File is already at the expected position and ready to go
- src = file_from_url
- else:
- # Let's download the package again, it's corrupted...
- src = url
- else:
- # File is not there, let's download it
- src = url
- return utils.unmap_url(cachedir, src, cachedir)
+ """\
+ Downloads a file from a URL to a cache directory. If the file is already
+ at the expected position and has the expected md5 number, let's not
+ download it again.
+ """
+ # Let's convert cachedir to a canonical path, if it's not already
+ cachedir = os.path.realpath(cachedir)
+ if not os.path.isdir(cachedir):
+ try:
+ system('mkdir -p ' + cachedir)
+ except:
+ raise ValueError('Could not create cache directory %s' % cachedir)
+ file_from_url = os.path.basename(url)
+ file_local_path = os.path.join(cachedir, file_from_url)
+ if os.path.isfile(file_local_path):
+ file_md5 = get_md5sum(file_local_path)
+ if file_md5 == expected_md5:
+ # File is already at the expected position and ready to go
+ src = file_from_url
+ else:
+ # Let's download the package again, it's corrupted...
+ src = url
+ else:
+ # File is not there, let's download it
+ src = url
+ return utils.unmap_url(cachedir, src, cachedir)
def basename(path):
- i = path.rfind('/');
- return path[i+1:]
+ i = path.rfind('/');
+ return path[i+1:]
def force_copy(src, dest):
- """Replace dest with a new copy of src, even if it exists"""
- if os.path.isfile(dest):
- os.remove(dest)
- if os.path.isdir(dest):
- dest = os.path.join(dest, os.path.basename(src))
- shutil.copyfile(src, dest)
- return dest
+ """Replace dest with a new copy of src, even if it exists"""
+ if os.path.isfile(dest):
+ os.remove(dest)
+ if os.path.isdir(dest):
+ dest = os.path.join(dest, os.path.basename(src))
+ shutil.copyfile(src, dest)
+ return dest
def force_link(src, dest):
- """Link src to dest, overwriting it if it exists"""
- return utils.system("ln -sf %s %s" % (src, dest))
+ """Link src to dest, overwriting it if it exists"""
+ return utils.system("ln -sf %s %s" % (src, dest))
def file_contains_pattern(file, pattern):
- """Return true if file contains the specified egrep pattern"""
- if not os.path.isfile(file):
- raise NameError('file %s does not exist' % file)
- return not utils.system('egrep -q "' + pattern + '" ' + file, ignore_status=True)
+ """Return true if file contains the specified egrep pattern"""
+ if not os.path.isfile(file):
+ raise NameError('file %s does not exist' % file)
+ return not utils.system('egrep -q "' + pattern + '" ' + file, ignore_status=True)
def list_grep(list, pattern):
- """True if any item in list matches the specified pattern."""
- compiled = re.compile(pattern)
- for line in list:
- match = compiled.search(line)
- if (match):
- return 1
- return 0
+ """True if any item in list matches the specified pattern."""
+ compiled = re.compile(pattern)
+ for line in list:
+ match = compiled.search(line)
+ if (match):
+ return 1
+ return 0
def get_os_vendor():
- """Try to guess what's the os vendor
- """
- issue = '/etc/issue'
-
- if not os.path.isfile(issue):
- return 'Unknown'
-
- if file_contains_pattern(issue, 'Red Hat'):
- return 'Red Hat'
- elif file_contains_pattern(issue, 'Fedora Core'):
- return 'Fedora Core'
- elif file_contains_pattern(issue, 'SUSE'):
- return 'SUSE'
- elif file_contains_pattern(issue, 'Ubuntu'):
- return 'Ubuntu'
- elif file_contains_pattern(issue, 'Debian'):
- return 'Debian'
- else:
- return 'Unknown'
+ """Try to guess what's the os vendor
+ """
+ issue = '/etc/issue'
+
+ if not os.path.isfile(issue):
+ return 'Unknown'
+
+ if file_contains_pattern(issue, 'Red Hat'):
+ return 'Red Hat'
+ elif file_contains_pattern(issue, 'Fedora Core'):
+ return 'Fedora Core'
+ elif file_contains_pattern(issue, 'SUSE'):
+ return 'SUSE'
+ elif file_contains_pattern(issue, 'Ubuntu'):
+ return 'Ubuntu'
+ elif file_contains_pattern(issue, 'Debian'):
+ return 'Debian'
+ else:
+ return 'Unknown'
def get_vmlinux():
- """Return the full path to vmlinux
+ """Return the full path to vmlinux
- Ahem. This is crap. Pray harder. Bad Martin.
- """
- vmlinux = '/boot/vmlinux-%s' % utils.system_output('uname -r')
- if os.path.isfile(vmlinux):
- return vmlinux
- vmlinux = '/lib/modules/%s/build/vmlinux' % utils.system_output('uname -r')
- if os.path.isfile(vmlinux):
- return vmlinux
- return None
+ Ahem. This is crap. Pray harder. Bad Martin.
+ """
+ vmlinux = '/boot/vmlinux-%s' % utils.system_output('uname -r')
+ if os.path.isfile(vmlinux):
+ return vmlinux
+ vmlinux = '/lib/modules/%s/build/vmlinux' % utils.system_output('uname -r')
+ if os.path.isfile(vmlinux):
+ return vmlinux
+ return None
def get_systemmap():
- """Return the full path to System.map
+ """Return the full path to System.map
- Ahem. This is crap. Pray harder. Bad Martin.
- """
- map = '/boot/System.map-%s' % utils.system_output('uname -r')
- if os.path.isfile(map):
- return map
- map = '/lib/modules/%s/build/System.map' % utils.system_output('uname -r')
- if os.path.isfile(map):
- return map
- return None
+ Ahem. This is crap. Pray harder. Bad Martin.
+ """
+ map = '/boot/System.map-%s' % utils.system_output('uname -r')
+ if os.path.isfile(map):
+ return map
+ map = '/lib/modules/%s/build/System.map' % utils.system_output('uname -r')
+ if os.path.isfile(map):
+ return map
+ return None
def get_modules_dir():
- """Return the modules dir for the running kernel version"""
- kernel_version = utils.system_output('uname -r')
- return '/lib/modules/%s/kernel' % kernel_version
+ """Return the modules dir for the running kernel version"""
+ kernel_version = utils.system_output('uname -r')
+ return '/lib/modules/%s/kernel' % kernel_version
def get_cpu_arch():
- """Work out which CPU architecture we're running on"""
- f = open('/proc/cpuinfo', 'r')
- cpuinfo = f.readlines()
- f.close()
- if list_grep(cpuinfo, '^cpu.*(RS64|POWER3|Broadband Engine)'):
- return 'power'
- elif list_grep(cpuinfo, '^cpu.*POWER4'):
- return 'power4'
- elif list_grep(cpuinfo, '^cpu.*POWER5'):
- return 'power5'
- elif list_grep(cpuinfo, '^cpu.*POWER6'):
- return 'power6'
- elif list_grep(cpuinfo, '^cpu.*PPC970'):
- return 'power970'
- elif list_grep(cpuinfo, 'Opteron'):
- return 'x86_64'
- elif list_grep(cpuinfo, 'GenuineIntel') and list_grep(cpuinfo, '48 bits virtual'):
- return 'x86_64'
- else:
- return 'i386'
+ """Work out which CPU architecture we're running on"""
+ f = open('/proc/cpuinfo', 'r')
+ cpuinfo = f.readlines()
+ f.close()
+ if list_grep(cpuinfo, '^cpu.*(RS64|POWER3|Broadband Engine)'):
+ return 'power'
+ elif list_grep(cpuinfo, '^cpu.*POWER4'):
+ return 'power4'
+ elif list_grep(cpuinfo, '^cpu.*POWER5'):
+ return 'power5'
+ elif list_grep(cpuinfo, '^cpu.*POWER6'):
+ return 'power6'
+ elif list_grep(cpuinfo, '^cpu.*PPC970'):
+ return 'power970'
+ elif list_grep(cpuinfo, 'Opteron'):
+ return 'x86_64'
+ elif list_grep(cpuinfo, 'GenuineIntel') and list_grep(cpuinfo, '48 bits virtual'):
+ return 'x86_64'
+ else:
+ return 'i386'
def get_current_kernel_arch():
- """Get the machine architecture, now just a wrap of 'uname -m'."""
- return os.popen('uname -m').read().rstrip()
+ """Get the machine architecture, now just a wrap of 'uname -m'."""
+ return os.popen('uname -m').read().rstrip()
def get_file_arch(filename):
- # -L means follow symlinks
- file_data = utils.system_output('file -L ' + filename)
- if file_data.count('80386'):
- return 'i386'
- return None
+ # -L means follow symlinks
+ file_data = utils.system_output('file -L ' + filename)
+ if file_data.count('80386'):
+ return 'i386'
+ return None
def count_cpus():
- """number of CPUs in the local machine according to /proc/cpuinfo"""
- f = file('/proc/cpuinfo', 'r')
- cpus = 0
- for line in f.readlines():
- if line.startswith('processor'):
- cpus += 1
- return cpus
+ """number of CPUs in the local machine according to /proc/cpuinfo"""
+ f = file('/proc/cpuinfo', 'r')
+ cpus = 0
+ for line in f.readlines():
+ if line.startswith('processor'):
+ cpus += 1
+ return cpus
# Returns total memory in kb
def read_from_meminfo(key):
- meminfo = utils.system_output('grep %s /proc/meminfo' % key)
- return int(re.search(r'\d+', meminfo).group(0))
+ meminfo = utils.system_output('grep %s /proc/meminfo' % key)
+ return int(re.search(r'\d+', meminfo).group(0))
def memtotal():
- return read_from_meminfo('MemTotal')
+ return read_from_meminfo('MemTotal')
def freememtotal():
- return read_from_meminfo('MemFree')
+ return read_from_meminfo('MemFree')
def sysctl_kernel(key, value=None):
- """(Very) partial implementation of sysctl, for kernel params"""
- if value:
- # write
- utils.write_one_line('/proc/sys/kernel/%s' % key, str(value))
- else:
- # read
- out = utils.read_one_line('/proc/sys/kernel/%s' % key)
- return int(re.search(r'\d+', out).group(0))
+ """(Very) partial implementation of sysctl, for kernel params"""
+ if value:
+ # write
+ utils.write_one_line('/proc/sys/kernel/%s' % key, str(value))
+ else:
+ # read
+ out = utils.read_one_line('/proc/sys/kernel/%s' % key)
+ return int(re.search(r'\d+', out).group(0))
def _convert_exit_status(sts):
- if os.WIFSIGNALED(sts):
- return -os.WTERMSIG(sts)
- elif os.WIFEXITED(sts):
- return os.WEXITSTATUS(sts)
- else:
- # impossible?
- raise RuntimeError("Unknown exit status %d!" % sts)
+ if os.WIFSIGNALED(sts):
+ return -os.WTERMSIG(sts)
+ elif os.WIFEXITED(sts):
+ return os.WEXITSTATUS(sts)
+ else:
+ # impossible?
+ raise RuntimeError("Unknown exit status %d!" % sts)
def where_art_thy_filehandles():
- """Dump the current list of filehandles"""
- os.system("ls -l /proc/%d/fd >> /dev/tty" % os.getpid())
+ """Dump the current list of filehandles"""
+ os.system("ls -l /proc/%d/fd >> /dev/tty" % os.getpid())
def print_to_tty(string):
- """Output string straight to the tty"""
- open('/dev/tty', 'w').write(string + '\n')
+ """Output string straight to the tty"""
+ open('/dev/tty', 'w').write(string + '\n')
def dump_object(object):
- """Dump an object's attributes and methods
+ """Dump an object's attributes and methods
- kind of like dir()
- """
- for item in object.__dict__.iteritems():
- print item
- try:
- (key,value) = item
- dump_object(value)
- except:
- continue
+ kind of like dir()
+ """
+ for item in object.__dict__.iteritems():
+ print item
+ try:
+ (key,value) = item
+ dump_object(value)
+ except:
+ continue
def environ(env_key):
- """return the requested environment variable, or '' if unset"""
- if (os.environ.has_key(env_key)):
- return os.environ[env_key]
- else:
- return ''
+ """return the requested environment variable, or '' if unset"""
+ if (os.environ.has_key(env_key)):
+ return os.environ[env_key]
+ else:
+ return ''
def prepend_path(newpath, oldpath):
- """prepend newpath to oldpath"""
- if (oldpath):
- return newpath + ':' + oldpath
- else:
- return newpath
+ """prepend newpath to oldpath"""
+ if (oldpath):
+ return newpath + ':' + oldpath
+ else:
+ return newpath
def append_path(oldpath, newpath):
- """append newpath to oldpath"""
- if (oldpath):
- return oldpath + ':' + newpath
- else:
- return newpath
+ """append newpath to oldpath"""
+ if (oldpath):
+ return oldpath + ':' + newpath
+ else:
+ return newpath
def avgtime_print(dir):
- """ Calculate some benchmarking statistics.
- Input is a directory containing a file called 'time'.
- File contains one-per-line results of /usr/bin/time.
- Output is average Elapsed, User, and System time in seconds,
- and average CPU percentage.
- """
- f = open(dir + "/time")
- user = system = elapsed = cpu = count = 0
- r = re.compile('([\d\.]*)user ([\d\.]*)system (\d*):([\d\.]*)elapsed (\d*)%CPU')
- for line in f.readlines():
- try:
- s = r.match(line);
- user += float(s.group(1))
- system += float(s.group(2))
- elapsed += (float(s.group(3)) * 60) + float(s.group(4))
- cpu += float(s.group(5))
- count += 1
- except:
- raise ValueError("badly formatted times")
-
- f.close()
- return "Elapsed: %0.2fs User: %0.2fs System: %0.2fs CPU: %0.0f%%" % \
- (elapsed/count, user/count, system/count, cpu/count)
+ """ Calculate some benchmarking statistics.
+ Input is a directory containing a file called 'time'.
+ File contains one-per-line results of /usr/bin/time.
+ Output is average Elapsed, User, and System time in seconds,
+ and average CPU percentage.
+ """
+ f = open(dir + "/time")
+ user = system = elapsed = cpu = count = 0
+ r = re.compile('([\d\.]*)user ([\d\.]*)system (\d*):([\d\.]*)elapsed (\d*)%CPU')
+ for line in f.readlines():
+ try:
+ s = r.match(line);
+ user += float(s.group(1))
+ system += float(s.group(2))
+ elapsed += (float(s.group(3)) * 60) + float(s.group(4))
+ cpu += float(s.group(5))
+ count += 1
+ except:
+ raise ValueError("badly formatted times")
+
+ f.close()
+ return "Elapsed: %0.2fs User: %0.2fs System: %0.2fs CPU: %0.0f%%" % \
+ (elapsed/count, user/count, system/count, cpu/count)
def running_config():
- """
- Return path of config file of the currently running kernel
- """
- version = utils.system_output('uname -r')
- for config in ('/proc/config.gz', \
- '/boot/config-%s' % version,
- '/lib/modules/%s/build/.config' % version):
- if os.path.isfile(config):
- return config
- return None
+ """
+ Return path of config file of the currently running kernel
+ """
+ version = utils.system_output('uname -r')
+ for config in ('/proc/config.gz', \
+ '/boot/config-%s' % version,
+ '/lib/modules/%s/build/.config' % version):
+ if os.path.isfile(config):
+ return config
+ return None
def check_for_kernel_feature(feature):
- config = running_config()
+ config = running_config()
- if not config:
- raise TypeError("Can't find kernel config file")
+ if not config:
+ raise TypeError("Can't find kernel config file")
- if config.endswith('.gz'):
- grep = 'zgrep'
- else:
- grep = 'grep'
- grep += ' ^CONFIG_%s= %s' % (feature, config)
+ if config.endswith('.gz'):
+ grep = 'zgrep'
+ else:
+ grep = 'grep'
+ grep += ' ^CONFIG_%s= %s' % (feature, config)
- if not utils.system_output(grep, ignore_status=True):
- raise ValueError("Kernel doesn't have a %s feature" % (feature))
+ if not utils.system_output(grep, ignore_status=True):
+ raise ValueError("Kernel doesn't have a %s feature" % (feature))
def cpu_online_map():
- """
- Check out the available cpu online map
- """
- cpus = []
- for line in open('/proc/cpuinfo', 'r').readlines():
- if line.startswith('processor'):
- cpus.append(line.split()[2]) # grab cpu number
- return cpus
+ """
+ Check out the available cpu online map
+ """
+ cpus = []
+ for line in open('/proc/cpuinfo', 'r').readlines():
+ if line.startswith('processor'):
+ cpus.append(line.split()[2]) # grab cpu number
+ return cpus
def check_glibc_ver(ver):
- glibc_ver = commands.getoutput('ldd --version').splitlines()[0]
- glibc_ver = re.search(r'(\d+\.\d+(\.\d+)?)', glibc_ver).group()
- if glibc_ver.split('.') < ver.split('.'):
- raise error.TestError("Glibc is too old (%s). Glibc >= %s is needed." % \
- (glibc_ver, ver))
+ glibc_ver = commands.getoutput('ldd --version').splitlines()[0]
+ glibc_ver = re.search(r'(\d+\.\d+(\.\d+)?)', glibc_ver).group()
+ if glibc_ver.split('.') < ver.split('.'):
+ raise error.TestError("Glibc is too old (%s). Glibc >= %s is needed." % \
+ (glibc_ver, ver))
def check_kernel_ver(ver):
- kernel_ver = utils.system_output('uname -r')
- kv_tmp = re.split(r'[-]', kernel_ver)[0:3]
- if kv_tmp[0].split('.') < ver.split('.'):
- raise error.TestError("Kernel is too old (%s). Kernel > %s is needed." % \
- (kernel_ver, ver))
+ kernel_ver = utils.system_output('uname -r')
+ kv_tmp = re.split(r'[-]', kernel_ver)[0:3]
+ if kv_tmp[0].split('.') < ver.split('.'):
+ raise error.TestError("Kernel is too old (%s). Kernel > %s is needed." % \
+ (kernel_ver, ver))
def human_format(number):
- # Convert number to kilo / mega / giga format.
- if number < 1024:
- return "%d" % number
- kilo = float(number) / 1024.0
- if kilo < 1024:
- return "%.2fk" % kilo
- meg = kilo / 1024.0
- if meg < 1024:
- return "%.2fM" % meg
- gig = meg / 1024.0
- return "%.2fG" % gig
+ # Convert number to kilo / mega / giga format.
+ if number < 1024:
+ return "%d" % number
+ kilo = float(number) / 1024.0
+ if kilo < 1024:
+ return "%.2fk" % kilo
+ meg = kilo / 1024.0
+ if meg < 1024:
+ return "%.2fM" % meg
+ gig = meg / 1024.0
+ return "%.2fG" % gig
def numa_nodes():
- node_paths = glob.glob('/sys/devices/system/node/node*')
- nodes = [int(re.sub(r'.*node(\d+)', r'\1', x)) for x in node_paths]
- return (sorted(nodes))
+ node_paths = glob.glob('/sys/devices/system/node/node*')
+ nodes = [int(re.sub(r'.*node(\d+)', r'\1', x)) for x in node_paths]
+ return (sorted(nodes))
def node_size():
- nodes = max(len(numa_nodes()), 1)
- return ((memtotal() * 1024) / nodes)
+ nodes = max(len(numa_nodes()), 1)
+ return ((memtotal() * 1024) / nodes)
def to_seconds(time_string):
- """Converts a string in M+:SS.SS format to S+.SS"""
- elts = time_string.split(':')
- if len(elts) == 1:
- return time_string
- return str(int(elts[0]) * 60 + float(elts[1]))
+ """Converts a string in M+:SS.SS format to S+.SS"""
+ elts = time_string.split(':')
+ if len(elts) == 1:
+ return time_string
+ return str(int(elts[0]) * 60 + float(elts[1]))
def extract_all_time_results(results_string):
- """Extract user, system, and elapsed times into a list of tuples"""
- pattern = re.compile(r"(.*?)user (.*?)system (.*?)elapsed")
- results = []
- for result in pattern.findall(results_string):
- results.append(tuple([to_seconds(elt) for elt in result]))
- return results
+ """Extract user, system, and elapsed times into a list of tuples"""
+ pattern = re.compile(r"(.*?)user (.*?)system (.*?)elapsed")
+ results = []
+ for result in pattern.findall(results_string):
+ results.append(tuple([to_seconds(elt) for elt in result]))
+ return results
def pickle_load(filename):
- return pickle.load(open(filename, 'r'))
+ return pickle.load(open(filename, 'r'))
# Return the kernel version and build timestamp.
def running_os_release():
- return os.uname()[2:4]
+ return os.uname()[2:4]
def running_os_ident():
- (version, timestamp) = running_os_release()
- return version + '::' + timestamp
+ (version, timestamp) = running_os_release()
+ return version + '::' + timestamp
# much like find . -name 'pattern'
def locate(pattern, root=os.getcwd()):
- for path, dirs, files in os.walk(root):
- for f in [os.path.abspath(os.path.join(path, f))
- for f in files if fnmatch.fnmatch(f, pattern)]:
- yield f
+ for path, dirs, files in os.walk(root):
+ for f in [os.path.abspath(os.path.join(path, f))
+ for f in files if fnmatch.fnmatch(f, pattern)]:
+ yield f
def freespace(path):
- """Return the disk free space, in bytes"""
- s = os.statvfs(path)
- return s.f_bavail * s.f_bsize
+ """Return the disk free space, in bytes"""
+ s = os.statvfs(path)
+ return s.f_bavail * s.f_bsize
def disk_block_size(path):
- """Return the disk block size, in bytes"""
- return os.statvfs(path).f_bsize
+ """Return the disk block size, in bytes"""
+ return os.statvfs(path).f_bsize
def get_cpu_family():
- procinfo = utils.system_output('cat /proc/cpuinfo')
- CPU_FAMILY_RE = re.compile(r'^cpu family\s+:\s+(\S+)', re.M)
- matches = CPU_FAMILY_RE.findall(procinfo)
- if matches:
- return int(matches[0])
- else:
- raise error.TestError('Could not get valid cpu family data')
+ procinfo = utils.system_output('cat /proc/cpuinfo')
+ CPU_FAMILY_RE = re.compile(r'^cpu family\s+:\s+(\S+)', re.M)
+ matches = CPU_FAMILY_RE.findall(procinfo)
+ if matches:
+ return int(matches[0])
+ else:
+ raise error.TestError('Could not get valid cpu family data')
def get_disks():
- df_output = utils.system_output('df')
- disk_re = re.compile(r'^(/dev/hd[a-z]+)3', re.M)
- return disk_re.findall(df_output)
+ df_output = utils.system_output('df')
+ disk_re = re.compile(r'^(/dev/hd[a-z]+)3', re.M)
+ return disk_re.findall(df_output)
def load_module(module_name):
- # Checks if a module has already been loaded
- if module_is_loaded(module_name):
- return False
+ # Checks if a module has already been loaded
+ if module_is_loaded(module_name):
+ return False
- utils.system('/sbin/modprobe ' + module_name)
- return True
+ utils.system('/sbin/modprobe ' + module_name)
+ return True
def unload_module(module_name):
- utils.system('/sbin/rmmod ' + module_name)
+ utils.system('/sbin/rmmod ' + module_name)
def module_is_loaded(module_name):
- module_name = module_name.replace('-', '_')
- modules = utils.system_output('/sbin/lsmod').splitlines()
- for module in modules:
- if module.startswith(module_name) and module[len(module_name)] == ' ':
- return True
- return False
+ module_name = module_name.replace('-', '_')
+ modules = utils.system_output('/sbin/lsmod').splitlines()
+ for module in modules:
+ if module.startswith(module_name) and module[len(module_name)] == ' ':
+ return True
+ return False
def get_loaded_modules():
- lsmod_output = utils.system_output('/sbin/lsmod').splitlines()[1:]
- return [line.split(None, 1)[0] for line in lsmod_output]
+ lsmod_output = utils.system_output('/sbin/lsmod').splitlines()[1:]
+ return [line.split(None, 1)[0] for line in lsmod_output]
def get_huge_page_size():
- output = utils.system_output('grep Hugepagesize /proc/meminfo')
- return int(output.split()[1]) # Assumes units always in kB. :(
+ output = utils.system_output('grep Hugepagesize /proc/meminfo')
+ return int(output.split()[1]) # Assumes units always in kB. :(
def get_num_huge_pages():
- raw_hugepages = utils.system_output('/sbin/sysctl vm.nr_hugepages')
- return int(raw_hugepages.split()[2])
+ raw_hugepages = utils.system_output('/sbin/sysctl vm.nr_hugepages')
+ return int(raw_hugepages.split()[2])
def set_num_huge_pages(num):
- utils.system('/sbin/sysctl vm.nr_hugepages=%d' % num)
+ utils.system('/sbin/sysctl vm.nr_hugepages=%d' % num)
def get_system_nodes():
- nodes = os.listdir('/sys/devices/system/node')
- nodes.sort()
- return nodes
+ nodes = os.listdir('/sys/devices/system/node')
+ nodes.sort()
+ return nodes
def get_cpu_vendor():
- cpuinfo = open('/proc/cpuinfo').read()
- vendors = re.findall(r'(?m)^vendor_id\s*:\s*(\S+)\s*$', cpuinfo)
- for i in xrange(1, len(vendors)):
- if vendors[i] != vendors[0]:
- raise error.TestError('multiple cpu vendors found: ' + str(vendors))
- return vendors[0]
+ cpuinfo = open('/proc/cpuinfo').read()
+ vendors = re.findall(r'(?m)^vendor_id\s*:\s*(\S+)\s*$', cpuinfo)
+ for i in xrange(1, len(vendors)):
+ if vendors[i] != vendors[0]:
+ raise error.TestError('multiple cpu vendors found: ' + str(vendors))
+ return vendors[0]
def probe_cpus():
- """
- This routine returns a list of cpu devices found under /sys/devices/system/cpu.
- """
- output = utils.system_output(
- 'find /sys/devices/system/cpu/ -maxdepth 1 -type d -name cpu*')
- return output.splitlines()
+ """
+ This routine returns a list of cpu devices found under /sys/devices/system/cpu.
+ """
+ output = utils.system_output(
+ 'find /sys/devices/system/cpu/ -maxdepth 1 -type d -name cpu*')
+ return output.splitlines()
def ping_default_gateway():
- """Ping the default gateway."""
-
- network = open('/etc/sysconfig/network')
- m = re.search('GATEWAY=(\S+)', network.read())
+ """Ping the default gateway."""
- if m:
- gw = m.group(1)
- cmd = 'ping %s -c 5 > /dev/null' % gw
- return utils.system(cmd, ignore_status=True)
-
- raise error.TestError('Unable to find default gateway')
+ network = open('/etc/sysconfig/network')
+ m = re.search('GATEWAY=(\S+)', network.read())
+
+ if m:
+ gw = m.group(1)
+ cmd = 'ping %s -c 5 > /dev/null' % gw
+ return utils.system(cmd, ignore_status=True)
+
+ raise error.TestError('Unable to find default gateway')
try:
- from site_utils import *
+ from site_utils import *
except ImportError:
- pass
+ pass
diff --git a/client/bin/boottool.py b/client/bin/boottool.py
index 43f7f39f..7c106182 100644
--- a/client/bin/boottool.py
+++ b/client/bin/boottool.py
@@ -2,125 +2,124 @@ import shutil, re, os, string
from autotest_lib.client.common_lib import utils, error
class boottool:
- def __init__(self, boottool_exec=None):
- #variable to indicate if in mode to write entries for Xen
- self.xen_mode = False
+ def __init__(self, boottool_exec=None):
+ #variable to indicate if in mode to write entries for Xen
+ self.xen_mode = False
- if boottool_exec:
- self.boottool_exec = boottool_exec
- else:
- autodir = os.environ['AUTODIR']
- self.boottool_exec = autodir + '/tools/boottool'
+ if boottool_exec:
+ self.boottool_exec = boottool_exec
+ else:
+ autodir = os.environ['AUTODIR']
+ self.boottool_exec = autodir + '/tools/boottool'
- if not self.boottool_exec:
- raise error.AutotestError('Failed to set boottool_exec')
+ if not self.boottool_exec:
+ raise error.AutotestError('Failed to set boottool_exec')
- def run_boottool(self, params):
- return utils.system_output('%s %s' % (self.boottool_exec, params))
+ def run_boottool(self, params):
+ return utils.system_output('%s %s' % (self.boottool_exec, params))
- def bootloader(self):
- return self.run_boottool('--bootloader-probe')
+ def bootloader(self):
+ return self.run_boottool('--bootloader-probe')
- def architecture(self):
- return self.run_boottool('--arch-probe')
+ def architecture(self):
+ return self.run_boottool('--arch-probe')
- def list_titles(self):
- print self.run_boottool('--info all | grep title')
+ def list_titles(self):
+ print self.run_boottool('--info all | grep title')
- def print_entry(self, index):
- print self.run_boottool('--info=%s' % index)
+ def print_entry(self, index):
+ print self.run_boottool('--info=%s' % index)
- def get_default(self):
- self.run_boottool('--default')
+ def get_default(self):
+ self.run_boottool('--default')
- def set_default(self, index):
- print self.run_boottool('--set-default=%s' % index)
+ def set_default(self, index):
+ print self.run_boottool('--set-default=%s' % index)
- def enable_xen_mode(self):
- self.xen_mode = True
+ def enable_xen_mode(self):
+ self.xen_mode = True
- def disable_xen_mode(self):
- self.xen_mode = False
+ def disable_xen_mode(self):
+ self.xen_mode = False
- def get_xen_mode(self):
- return self.xen_mode
+ def get_xen_mode(self):
+ return self.xen_mode
- # 'kernel' can be an position number or a title
- def add_args(self, kernel, args):
- parameters = '--update-kernel=%s --args="%s"' % (kernel, args)
+ # 'kernel' can be an position number or a title
+ def add_args(self, kernel, args):
+ parameters = '--update-kernel=%s --args="%s"' % (kernel, args)
- #add parameter if this is a Xen entry
- if self.xen_mode:
- parameters += ' --xen'
+ #add parameter if this is a Xen entry
+ if self.xen_mode:
+ parameters += ' --xen'
- print self.run_boottool(parameters)
+ print self.run_boottool(parameters)
- def add_xen_hypervisor_args(self, kernel, args):
- self.run_boottool('--xen --update-xenhyper=%s --xha="%s"') %(kernel, args)
-
+ def add_xen_hypervisor_args(self, kernel, args):
+ self.run_boottool('--xen --update-xenhyper=%s --xha="%s"') %(kernel, args)
- def remove_args(self, kernel, args):
- parameters = '--update-kernel=%s --remove-args=%s' % (kernel, args)
- #add parameter if this is a Xen entry
- if self.xen_mode:
- parameters += ' --xen'
+ def remove_args(self, kernel, args):
+ parameters = '--update-kernel=%s --remove-args=%s' % (kernel, args)
- print self.run_boottool(parameters)
+ #add parameter if this is a Xen entry
+ if self.xen_mode:
+ parameters += ' --xen'
+ print self.run_boottool(parameters)
- def remove_xen_hypervisor_args(self, kernel, args):
- self.run_boottool('--xen --update-xenhyper=%s --remove-args="%s"') \
- % (kernel, args)
+ def remove_xen_hypervisor_args(self, kernel, args):
+ self.run_boottool('--xen --update-xenhyper=%s --remove-args="%s"') \
+ % (kernel, args)
- def add_kernel(self, path, title='autotest', initrd='', xen_hypervisor='', args=None, root=None, position='end'):
- parameters = '--add-kernel=%s --title=%s' % (path, title)
- # add an initrd now or forever hold your peace
- if initrd:
- parameters += ' --initrd=%s' % initrd
+ def add_kernel(self, path, title='autotest', initrd='', xen_hypervisor='', args=None, root=None, position='end'):
+ parameters = '--add-kernel=%s --title=%s' % (path, title)
- # add parameter if this is a Xen entry
- if self.xen_mode:
- parameters += ' --xen'
- if xen_hypervisor:
- parameters += ' --xenhyper=%s' % xen_hypervisor
+ # add an initrd now or forever hold your peace
+ if initrd:
+ parameters += ' --initrd=%s' % initrd
- if args:
- parameters += ' --args="%s"' % args
- if root:
- parameters += ' --root="%s"' % root
- if position:
- parameters += ' --position="%s"' % position
+ # add parameter if this is a Xen entry
+ if self.xen_mode:
+ parameters += ' --xen'
+ if xen_hypervisor:
+ parameters += ' --xenhyper=%s' % xen_hypervisor
- print self.run_boottool(parameters)
+ if args:
+ parameters += ' --args="%s"' % args
+ if root:
+ parameters += ' --root="%s"' % root
+ if position:
+ parameters += ' --position="%s"' % position
+ print self.run_boottool(parameters)
- def remove_kernel(self, kernel):
- print self.run_boottool('--remove-kernel=%s' % kernel)
+ def remove_kernel(self, kernel):
+ print self.run_boottool('--remove-kernel=%s' % kernel)
- def boot_once(self, title):
- print self.run_boottool('--boot-once --title=%s' % title)
+ def boot_once(self, title):
+ print self.run_boottool('--boot-once --title=%s' % title)
- def info(self, index):
- return self.run_boottool('--info=%s' % index)
+
+ def info(self, index):
+ return self.run_boottool('--info=%s' % index)
# TODO: backup()
# TODO: set_timeout()
-
diff --git a/client/bin/common.py b/client/bin/common.py
index 74ed7597..6881386b 100644
--- a/client/bin/common.py
+++ b/client/bin/common.py
@@ -5,4 +5,4 @@ sys.path.insert(0, client_dir)
import setup_modules
sys.path.pop(0)
setup_modules.setup(base_path=client_dir,
- root_module_name="autotest_lib.client")
+ root_module_name="autotest_lib.client")
diff --git a/client/bin/config.py b/client/bin/config.py
index 59b8279f..477becef 100644
--- a/client/bin/config.py
+++ b/client/bin/config.py
@@ -8,7 +8,7 @@ are required at a level they should be separated by underscores (_).
Please no StudlyCaps.
For example:
- boot.default_args
+ boot.default_args
"""
__author__ = """Copyright Andy Whitcroft 2006"""
@@ -16,33 +16,33 @@ __author__ = """Copyright Andy Whitcroft 2006"""
import os
class config:
- """The BASIC job configuration
-
- Properties:
- job
- The job object for this job
- config
- The job configuration dictionary
- """
-
- def __init__(self, job):
- """
- job
- The job object for this job
- """
- self.job = job
- self.config = {}
-
-
- def set(self, name, value):
- if name == "proxy":
- os.environ['http_proxy'] = value
- os.environ['ftp_proxy'] = value
-
- self.config[name] = value
-
- def get(self, name):
- if name in self.config:
- return self.config[name]
- else:
- return None
+ """The BASIC job configuration
+
+ Properties:
+ job
+ The job object for this job
+ config
+ The job configuration dictionary
+ """
+
+ def __init__(self, job):
+ """
+ job
+ The job object for this job
+ """
+ self.job = job
+ self.config = {}
+
+
+ def set(self, name, value):
+ if name == "proxy":
+ os.environ['http_proxy'] = value
+ os.environ['ftp_proxy'] = value
+
+ self.config[name] = value
+
+ def get(self, name):
+ if name in self.config:
+ return self.config[name]
+ else:
+ return None
diff --git a/client/bin/cpuset.py b/client/bin/cpuset.py
index a870e762..fe583ac9 100644
--- a/client/bin/cpuset.py
+++ b/client/bin/cpuset.py
@@ -9,265 +9,265 @@ super_root = "/dev/cpuset"
# Convert '1-3,7,9-12' to [1,2,3,7,9,10,11,12]
def rangelist_to_list(rangelist):
- result = []
- if not rangelist:
- return result
- for x in rangelist.split(','):
- if re.match(r'^(\d+)$', x):
- result.append(int(x))
- continue
- m = re.match(r'^(\d+)-(\d+)$', x)
- if m:
- start = int(m.group(1))
- end = int(m.group(2))
- result += range(start, end+1)
- continue
- msg = 'Cannot understand data input: %s %s' % (x, rangelist)
- raise ValueError(msg)
- return result
+ result = []
+ if not rangelist:
+ return result
+ for x in rangelist.split(','):
+ if re.match(r'^(\d+)$', x):
+ result.append(int(x))
+ continue
+ m = re.match(r'^(\d+)-(\d+)$', x)
+ if m:
+ start = int(m.group(1))
+ end = int(m.group(2))
+ result += range(start, end+1)
+ continue
+ msg = 'Cannot understand data input: %s %s' % (x, rangelist)
+ raise ValueError(msg)
+ return result
def rounded_memtotal():
- # Get total of all physical mem, in Kbytes
- usable_Kbytes = autotest_utils.memtotal()
- # usable_Kbytes is system's usable DRAM in Kbytes,
- # as reported by memtotal() from device /proc/meminfo memtotal
- # after Linux deducts 1.5% to 5.1% for system table overhead
- # Undo the unknown actual deduction by rounding up
- # to next small multiple of a big power-of-two
- # eg 12GB - 5.1% gets rounded back up to 12GB
- mindeduct = 0.015 # 1.5 percent
- maxdeduct = 0.055 # 5.5 percent
- # deduction range 1.5% .. 5.5% supports physical mem sizes
- # 6GB .. 12GB in steps of .5GB
- # 12GB .. 24GB in steps of 1 GB
- # 24GB .. 48GB in steps of 2 GB ...
- # Finer granularity in physical mem sizes would require
- # tighter spread between min and max possible deductions
-
- # increase mem size by at least min deduction, without rounding
- min_Kbytes = int(usable_Kbytes / (1.0 - mindeduct))
- # increase mem size further by 2**n rounding, by 0..roundKb or more
- round_Kbytes = int(usable_Kbytes / (1.0 - maxdeduct)) - min_Kbytes
- # find least binary roundup 2**n that covers worst-cast roundKb
- mod2n = 1 << int(math.ceil(math.log(round_Kbytes, 2)))
- # have round_Kbytes <= mod2n < round_Kbytes*2
- # round min_Kbytes up to next multiple of mod2n
- phys_Kbytes = min_Kbytes + mod2n - 1
- phys_Kbytes = phys_Kbytes - (phys_Kbytes % mod2n) # clear low bits
- return phys_Kbytes
+ # Get total of all physical mem, in Kbytes
+ usable_Kbytes = autotest_utils.memtotal()
+ # usable_Kbytes is system's usable DRAM in Kbytes,
+ # as reported by memtotal() from device /proc/meminfo memtotal
+ # after Linux deducts 1.5% to 5.1% for system table overhead
+ # Undo the unknown actual deduction by rounding up
+ # to next small multiple of a big power-of-two
+ # eg 12GB - 5.1% gets rounded back up to 12GB
+ mindeduct = 0.015 # 1.5 percent
+ maxdeduct = 0.055 # 5.5 percent
+ # deduction range 1.5% .. 5.5% supports physical mem sizes
+ # 6GB .. 12GB in steps of .5GB
+ # 12GB .. 24GB in steps of 1 GB
+ # 24GB .. 48GB in steps of 2 GB ...
+ # Finer granularity in physical mem sizes would require
+ # tighter spread between min and max possible deductions
+
+ # increase mem size by at least min deduction, without rounding
+ min_Kbytes = int(usable_Kbytes / (1.0 - mindeduct))
+ # increase mem size further by 2**n rounding, by 0..roundKb or more
+ round_Kbytes = int(usable_Kbytes / (1.0 - maxdeduct)) - min_Kbytes
+ # find least binary roundup 2**n that covers worst-cast roundKb
+ mod2n = 1 << int(math.ceil(math.log(round_Kbytes, 2)))
+ # have round_Kbytes <= mod2n < round_Kbytes*2
+ # round min_Kbytes up to next multiple of mod2n
+ phys_Kbytes = min_Kbytes + mod2n - 1
+ phys_Kbytes = phys_Kbytes - (phys_Kbytes % mod2n) # clear low bits
+ return phys_Kbytes
def my_container_name():
- # Get current process's inherited or self-built container name
- # within /dev/cpuset. Is '/' for root container, '/sys', etc.
- return utils.read_one_line('/proc/%i/cpuset' % os.getpid())
+ # Get current process's inherited or self-built container name
+ # within /dev/cpuset. Is '/' for root container, '/sys', etc.
+ return utils.read_one_line('/proc/%i/cpuset' % os.getpid())
def get_mem_nodes(container_full_name):
- file_name = os.path.join(container_full_name, "mems")
- if os.path.exists(file_name):
- return rangelist_to_list(utils.read_one_line(file_name))
- else:
- return []
+ file_name = os.path.join(container_full_name, "mems")
+ if os.path.exists(file_name):
+ return rangelist_to_list(utils.read_one_line(file_name))
+ else:
+ return []
def available_exclusive_mem_nodes(parent_container):
- # Get list of numa memory nodes of parent container which could
- # be allocated exclusively to new child containers.
- # This excludes any nodes now allocated (exclusively or not)
- # to existing children.
- available = set(get_mem_nodes(parent_container))
- for child_container in glob.glob('%s/*/mems' % parent_container):
- child_container = os.path.dirname(child_container)
- busy = set(get_mem_nodes(child_container))
- available -= busy
- return list(available)
+ # Get list of numa memory nodes of parent container which could
+ # be allocated exclusively to new child containers.
+ # This excludes any nodes now allocated (exclusively or not)
+ # to existing children.
+ available = set(get_mem_nodes(parent_container))
+ for child_container in glob.glob('%s/*/mems' % parent_container):
+ child_container = os.path.dirname(child_container)
+ busy = set(get_mem_nodes(child_container))
+ available -= busy
+ return list(available)
def my_mem_nodes():
- # Get list of numa memory nodes owned by current process's container.
- return get_mem_nodes('/dev/cpuset%s' % my_container_name())
+ # Get list of numa memory nodes owned by current process's container.
+ return get_mem_nodes('/dev/cpuset%s' % my_container_name())
def my_available_exclusive_mem_nodes():
- # Get list of numa memory nodes owned by current process's
- # container, which could be allocated exclusively to new child
- # containers. This excludes any nodes now allocated
- # (exclusively or not) to existing children.
- return available_exclusive_mem_nodes('/dev/cpuset%s' % my_container_name())
+ # Get list of numa memory nodes owned by current process's
+ # container, which could be allocated exclusively to new child
+ # containers. This excludes any nodes now allocated
+ # (exclusively or not) to existing children.
+ return available_exclusive_mem_nodes('/dev/cpuset%s' % my_container_name())
def mbytes_per_mem_node():
- # Get mbyte size of each numa mem node, as float
- # Replaces autotest_utils.node_size().
- # Based on guessed total physical mem size, not on kernel's
- # lesser 'available memory' after various system tables.
- # Can be non-integer when kernel sets up 15 nodes instead of 16.
- return rounded_memtotal() / (len(autotest_utils.numa_nodes()) * 1024.0)
+ # Get mbyte size of each numa mem node, as float
+ # Replaces autotest_utils.node_size().
+ # Based on guessed total physical mem size, not on kernel's
+ # lesser 'available memory' after various system tables.
+ # Can be non-integer when kernel sets up 15 nodes instead of 16.
+ return rounded_memtotal() / (len(autotest_utils.numa_nodes()) * 1024.0)
def get_cpus(container_full_name):
- file_name = os.path.join(container_full_name, "cpus")
- if os.path.exists(file_name):
- return rangelist_to_list(utils.read_one_line(file_name))
- else:
- return []
+ file_name = os.path.join(container_full_name, "cpus")
+ if os.path.exists(file_name):
+ return rangelist_to_list(utils.read_one_line(file_name))
+ else:
+ return []
def my_cpus():
- # Get list of cpu cores owned by current process's container.
- return get_cpus('/dev/cpuset%s' % my_container_name())
+ # Get list of cpu cores owned by current process's container.
+ return get_cpus('/dev/cpuset%s' % my_container_name())
def get_tasks(setname):
- return [x.rstrip() for x in open(setname+'/tasks').readlines()]
+ return [x.rstrip() for x in open(setname+'/tasks').readlines()]
def print_one_cpuset(name):
- dir = os.path.join('/dev/cpuset', name)
- cpus = utils.read_one_line(dir + '/cpus')
- mems = utils.read_one_line(dir + '/mems')
- node_size_ = int(mbytes_per_mem_node()) << 20
- memtotal = node_size_ * len(rangelist_to_list(mems))
- tasks = ','.join(get_tasks(dir))
- print "cpuset %s: size %s; tasks %s; cpus %s; mems %s" % \
- (name, autotest_utils.human_format(memtotal), tasks, cpus, mems)
+ dir = os.path.join('/dev/cpuset', name)
+ cpus = utils.read_one_line(dir + '/cpus')
+ mems = utils.read_one_line(dir + '/mems')
+ node_size_ = int(mbytes_per_mem_node()) << 20
+ memtotal = node_size_ * len(rangelist_to_list(mems))
+ tasks = ','.join(get_tasks(dir))
+ print "cpuset %s: size %s; tasks %s; cpus %s; mems %s" % \
+ (name, autotest_utils.human_format(memtotal), tasks, cpus, mems)
def print_all_cpusets():
- for cpuset in glob.glob('/dev/cpuset/*'):
- print_one_cpuset(re.sub(r'.*/', '', cpuset))
+ for cpuset in glob.glob('/dev/cpuset/*'):
+ print_one_cpuset(re.sub(r'.*/', '', cpuset))
def release_dead_containers(parent=super_root):
- # Delete temp subcontainers nested within parent container
- # that are now dead (having no tasks and no sub-containers)
- # and recover their cpu and mem resources.
- # Must not call when a parallel task may be allocating containers!
- # Limit to test* names to preserve permanent containers.
- for child in glob.glob('%s/test*' % parent):
- print 'releasing dead container', child
- release_dead_containers(child) # bottom-up tree walk
- # rmdir has no effect when container still
- # has tasks or sub-containers
- os.rmdir(child)
+ # Delete temp subcontainers nested within parent container
+ # that are now dead (having no tasks and no sub-containers)
+ # and recover their cpu and mem resources.
+ # Must not call when a parallel task may be allocating containers!
+ # Limit to test* names to preserve permanent containers.
+ for child in glob.glob('%s/test*' % parent):
+ print 'releasing dead container', child
+ release_dead_containers(child) # bottom-up tree walk
+ # rmdir has no effect when container still
+ # has tasks or sub-containers
+ os.rmdir(child)
class cpuset:
- def display(self):
- print_one_cpuset(os.path.join(self.root, self.name))
-
-
- def release(self):
- print "releasing ", self.cpudir
- parent_t = os.path.join(self.root, 'tasks')
- # Transfer survivors (and self) to parent
- for task in get_tasks(self.cpudir):
- utils.write_one_line(parent_t, task)
- os.rmdir(self.cpudir)
- if os.path.exists(self.cpudir):
- raise error.AutotestError('Could not delete container '
- + self.cpudir)
-
-
- def __init__(self, name, job_size=None, job_pid=None, cpus=None,
- root=None):
- """\
- Create a cpuset container and move job_pid into it
- Allocate the list "cpus" of cpus to that container
-
- name = arbitrary string tag
- job_size = reqested memory for job in megabytes
- job_pid = pid of job we're putting into the container
- cpu = list of cpu indicies to associate with the cpuset
- root = the cpuset to create this new set in
- """
- if not os.path.exists(os.path.join(super_root, "cpus")):
- raise error.AutotestError('Root container /dev/cpuset '
- 'is empty; please reboot')
-
- self.name = name
-
- if root == None:
- # default to nested in process's current container
- root = my_container_name()[1:]
- self.root = os.path.join(super_root, root)
- if not os.path.exists(self.root):
- raise error.AutotestError(('Parent container %s'
- ' does not exist')
- % self.root)
-
- if job_size == None:
- # default to biggest container we can make under root
- job_size = int( mbytes_per_mem_node() *
- len(available_exclusive_mem_nodes(self.root)) )
- if not job_size:
- raise error.AutotestError('Creating container '
- 'with no mem')
- self.memory = job_size
-
- if cpus == None:
- # default to biggest container we can make under root
- cpus = get_cpus(self.root)
- if not cpus:
- raise error.AutotestError('Creating container '
- 'with no cpus')
- self.cpus = cpus
-
- # default to the current pid
- if not job_pid:
- job_pid = os.getpid()
-
- print "cpuset(name=%s, root=%s, job_size=%d, pid=%d)" % \
- (name, root, job_size, job_pid)
-
- self.cpudir = os.path.join(self.root, name)
- if os.path.exists(self.cpudir):
- self.release() # destructively replace old
-
- nodes_needed = int(math.ceil( float(job_size) /
- math.ceil(mbytes_per_mem_node()) ))
-
- if nodes_needed > len(get_mem_nodes(self.root)):
- raise error.AutotestError("Container's memory "
- "is bigger than parent's")
-
- while True:
- # Pick specific free mem nodes for this cpuset
- mems = available_exclusive_mem_nodes(self.root)
- if len(mems) < nodes_needed:
- raise error.AutotestError(('Existing container'
- ' hold %d mem nodes'
- ' needed by new'
- 'container')
- % (nodes_needed
- - len(mems)))
- mems = mems[-nodes_needed:]
- mems_spec = ','.join(['%d' % x for x in mems])
- os.mkdir(self.cpudir)
- utils.write_one_line(os.path.join(self.cpudir,
- 'mem_exclusive'), '1')
- utils.write_one_line(os.path.join(self.cpudir,
- 'mems'),
- mems_spec)
- # Above sends err msg to client.log.0, but no exception,
- # if mems_spec contained any now-taken nodes
- # Confirm that siblings didn't grab our chosen mems:
- nodes_gotten = len(get_mem_nodes(self.cpudir))
- if nodes_gotten >= nodes_needed:
- break # success
- print "cpuset %s lost race for nodes" % name, mems_spec
- # Return any mem we did get, and try again
- os.rmdir(self.cpudir)
-
- # add specified cpu cores and own task pid to container:
- cpu_spec = ','.join(['%d' % x for x in cpus])
- utils.write_one_line(os.path.join(self.cpudir,
- 'cpus'),
- cpu_spec)
- utils.write_one_line(os.path.join(self.cpudir,
- 'tasks'),
- "%d" % job_pid)
- self.display()
+ def display(self):
+ print_one_cpuset(os.path.join(self.root, self.name))
+
+
+ def release(self):
+ print "releasing ", self.cpudir
+ parent_t = os.path.join(self.root, 'tasks')
+ # Transfer survivors (and self) to parent
+ for task in get_tasks(self.cpudir):
+ utils.write_one_line(parent_t, task)
+ os.rmdir(self.cpudir)
+ if os.path.exists(self.cpudir):
+ raise error.AutotestError('Could not delete container '
+ + self.cpudir)
+
+
+ def __init__(self, name, job_size=None, job_pid=None, cpus=None,
+ root=None):
+ """\
+ Create a cpuset container and move job_pid into it
+ Allocate the list "cpus" of cpus to that container
+
+ name = arbitrary string tag
+ job_size = reqested memory for job in megabytes
+ job_pid = pid of job we're putting into the container
+ cpu = list of cpu indicies to associate with the cpuset
+ root = the cpuset to create this new set in
+ """
+ if not os.path.exists(os.path.join(super_root, "cpus")):
+ raise error.AutotestError('Root container /dev/cpuset '
+ 'is empty; please reboot')
+
+ self.name = name
+
+ if root == None:
+ # default to nested in process's current container
+ root = my_container_name()[1:]
+ self.root = os.path.join(super_root, root)
+ if not os.path.exists(self.root):
+ raise error.AutotestError(('Parent container %s'
+ ' does not exist')
+ % self.root)
+
+ if job_size == None:
+ # default to biggest container we can make under root
+ job_size = int( mbytes_per_mem_node() *
+ len(available_exclusive_mem_nodes(self.root)) )
+ if not job_size:
+ raise error.AutotestError('Creating container '
+ 'with no mem')
+ self.memory = job_size
+
+ if cpus == None:
+ # default to biggest container we can make under root
+ cpus = get_cpus(self.root)
+ if not cpus:
+ raise error.AutotestError('Creating container '
+ 'with no cpus')
+ self.cpus = cpus
+
+ # default to the current pid
+ if not job_pid:
+ job_pid = os.getpid()
+
+ print "cpuset(name=%s, root=%s, job_size=%d, pid=%d)" % \
+ (name, root, job_size, job_pid)
+
+ self.cpudir = os.path.join(self.root, name)
+ if os.path.exists(self.cpudir):
+ self.release() # destructively replace old
+
+ nodes_needed = int(math.ceil( float(job_size) /
+ math.ceil(mbytes_per_mem_node()) ))
+
+ if nodes_needed > len(get_mem_nodes(self.root)):
+ raise error.AutotestError("Container's memory "
+ "is bigger than parent's")
+
+ while True:
+ # Pick specific free mem nodes for this cpuset
+ mems = available_exclusive_mem_nodes(self.root)
+ if len(mems) < nodes_needed:
+ raise error.AutotestError(('Existing container'
+ ' hold %d mem nodes'
+ ' needed by new'
+ 'container')
+ % (nodes_needed
+ - len(mems)))
+ mems = mems[-nodes_needed:]
+ mems_spec = ','.join(['%d' % x for x in mems])
+ os.mkdir(self.cpudir)
+ utils.write_one_line(os.path.join(self.cpudir,
+ 'mem_exclusive'), '1')
+ utils.write_one_line(os.path.join(self.cpudir,
+ 'mems'),
+ mems_spec)
+ # Above sends err msg to client.log.0, but no exception,
+ # if mems_spec contained any now-taken nodes
+ # Confirm that siblings didn't grab our chosen mems:
+ nodes_gotten = len(get_mem_nodes(self.cpudir))
+ if nodes_gotten >= nodes_needed:
+ break # success
+ print "cpuset %s lost race for nodes" % name, mems_spec
+ # Return any mem we did get, and try again
+ os.rmdir(self.cpudir)
+
+ # add specified cpu cores and own task pid to container:
+ cpu_spec = ','.join(['%d' % x for x in cpus])
+ utils.write_one_line(os.path.join(self.cpudir,
+ 'cpus'),
+ cpu_spec)
+ utils.write_one_line(os.path.join(self.cpudir,
+ 'tasks'),
+ "%d" % job_pid)
+ self.display()
diff --git a/client/bin/fd_stack.py b/client/bin/fd_stack.py
index c377cf05..e0f3f4c7 100755
--- a/client/bin/fd_stack.py
+++ b/client/bin/fd_stack.py
@@ -3,137 +3,137 @@ __author__ = """Copyright Martin J. Bligh, Andy Whitcroft, 2005, 2006"""
import sys, os
class fd_stack:
- """a stack of fd redirects
-
- Redirects cause existing fd's to be pushed on the stack; restore()
- causes the current set of redirects to be popped, restoring the previous
- filehandle destinations.
-
- Note that we need to redirect both the sys.stdout type descriptor
- (which print, etc use) and the low level OS numbered descriptor
- which os.system() etc use.
- """
-
- def __init__(self, fd, filehandle):
- self.fd = fd # eg 1
- self.filehandle = filehandle # eg sys.stdout
- self.stack = [(fd, filehandle)]
-
-
- def update_handle(self, new):
- if (self.filehandle == sys.stdout):
- sys.stdout = new
- if (self.filehandle == sys.stderr):
- sys.stderr = new
- self.filehandle = new
-
- def redirect(self, filename):
- """Redirect output to the specified file
-
- Overwrites the previous contents, if any.
- """
- self.filehandle.flush()
- fdcopy = os.dup(self.fd)
- self.stack.append( (fdcopy, self.filehandle, 0) )
- # self.filehandle = file(filename, 'w')
- if (os.path.isfile(filename)):
- newfd = os.open(filename, os.O_WRONLY)
- else:
- newfd = os.open(filename, os.O_WRONLY | os.O_CREAT)
- os.dup2(newfd, self.fd)
- os.close(newfd)
- self.update_handle(os.fdopen(self.fd, 'w'))
-
-
- def tee_redirect(self, filename):
- """Tee output to the specified file
-
- Overwrites the previous contents, if any.
- """
- self.filehandle.flush()
- #print_to_tty("tee_redirect to " + filename)
- #where_art_thy_filehandles()
- fdcopy = os.dup(self.fd)
- r, w = os.pipe()
- pid = os.fork()
- if pid: # parent
- os.close(r)
- os.dup2(w, self.fd)
- os.close(w)
- self.stack.append( (fdcopy, self.filehandle, pid) )
- self.update_handle(os.fdopen(self.fd, 'w', 0))
- #where_art_thy_filehandles()
- #print_to_tty("done tee_redirect to " + filename)
- else: # child
- os.close(w)
- os.dup2(r, 0)
- os.dup2(fdcopy, 1)
- os.close(r)
- os.close(fdcopy)
- os.execlp('tee', 'tee', '-a', filename)
-
-
- def restore(self):
- """unredirect one level"""
- self.filehandle.flush()
- # print_to_tty("ENTERING RESTORE %d" % self.fd)
- # where_art_thy_filehandles()
- (old_fd, old_filehandle, pid) = self.stack.pop()
- # print_to_tty("old_fd %d" % old_fd)
- # print_to_tty("self.fd %d" % self.fd)
- self.filehandle.close() # seems to close old_fd as well.
- if pid:
- os.waitpid(pid, 0)
- # where_art_thy_filehandles()
- os.dup2(old_fd, self.fd)
- # print_to_tty("CLOSING FD %d" % old_fd)
- os.close(old_fd)
- # where_art_thy_filehandles()
- self.update_handle(old_filehandle)
- # where_art_thy_filehandles()
- # print_to_tty("EXIT RESTORE %d" % self.fd)
+ """a stack of fd redirects
+
+ Redirects cause existing fd's to be pushed on the stack; restore()
+ causes the current set of redirects to be popped, restoring the previous
+ filehandle destinations.
+
+ Note that we need to redirect both the sys.stdout type descriptor
+ (which print, etc use) and the low level OS numbered descriptor
+ which os.system() etc use.
+ """
+
+ def __init__(self, fd, filehandle):
+ self.fd = fd # eg 1
+ self.filehandle = filehandle # eg sys.stdout
+ self.stack = [(fd, filehandle)]
+
+
+ def update_handle(self, new):
+ if (self.filehandle == sys.stdout):
+ sys.stdout = new
+ if (self.filehandle == sys.stderr):
+ sys.stderr = new
+ self.filehandle = new
+
+ def redirect(self, filename):
+ """Redirect output to the specified file
+
+ Overwrites the previous contents, if any.
+ """
+ self.filehandle.flush()
+ fdcopy = os.dup(self.fd)
+ self.stack.append( (fdcopy, self.filehandle, 0) )
+ # self.filehandle = file(filename, 'w')
+ if (os.path.isfile(filename)):
+ newfd = os.open(filename, os.O_WRONLY)
+ else:
+ newfd = os.open(filename, os.O_WRONLY | os.O_CREAT)
+ os.dup2(newfd, self.fd)
+ os.close(newfd)
+ self.update_handle(os.fdopen(self.fd, 'w'))
+
+
+ def tee_redirect(self, filename):
+ """Tee output to the specified file
+
+ Overwrites the previous contents, if any.
+ """
+ self.filehandle.flush()
+ #print_to_tty("tee_redirect to " + filename)
+ #where_art_thy_filehandles()
+ fdcopy = os.dup(self.fd)
+ r, w = os.pipe()
+ pid = os.fork()
+ if pid: # parent
+ os.close(r)
+ os.dup2(w, self.fd)
+ os.close(w)
+ self.stack.append( (fdcopy, self.filehandle, pid) )
+ self.update_handle(os.fdopen(self.fd, 'w', 0))
+ #where_art_thy_filehandles()
+ #print_to_tty("done tee_redirect to " + filename)
+ else: # child
+ os.close(w)
+ os.dup2(r, 0)
+ os.dup2(fdcopy, 1)
+ os.close(r)
+ os.close(fdcopy)
+ os.execlp('tee', 'tee', '-a', filename)
+
+
+ def restore(self):
+ """unredirect one level"""
+ self.filehandle.flush()
+ # print_to_tty("ENTERING RESTORE %d" % self.fd)
+ # where_art_thy_filehandles()
+ (old_fd, old_filehandle, pid) = self.stack.pop()
+ # print_to_tty("old_fd %d" % old_fd)
+ # print_to_tty("self.fd %d" % self.fd)
+ self.filehandle.close() # seems to close old_fd as well.
+ if pid:
+ os.waitpid(pid, 0)
+ # where_art_thy_filehandles()
+ os.dup2(old_fd, self.fd)
+ # print_to_tty("CLOSING FD %d" % old_fd)
+ os.close(old_fd)
+ # where_art_thy_filehandles()
+ self.update_handle(old_filehandle)
+ # where_art_thy_filehandles()
+ # print_to_tty("EXIT RESTORE %d" % self.fd)
def tee_output_logdir(fn):
- """\
- Method decorator for a class to tee the output to the objects log_dir.
- """
- def tee_logdir_wrapper(self, *args, **dargs):
- self.job.stdout.tee_redirect(os.path.join(self.log_dir, 'stdout'))
- self.job.stderr.tee_redirect(os.path.join(self.log_dir, 'stderr'))
- try:
- result = fn(self, *args, **dargs)
- finally:
- self.job.stderr.restore()
- self.job.stdout.restore()
- return result
- return tee_logdir_wrapper
+ """\
+ Method decorator for a class to tee the output to the objects log_dir.
+ """
+ def tee_logdir_wrapper(self, *args, **dargs):
+ self.job.stdout.tee_redirect(os.path.join(self.log_dir, 'stdout'))
+ self.job.stderr.tee_redirect(os.path.join(self.log_dir, 'stderr'))
+ try:
+ result = fn(self, *args, **dargs)
+ finally:
+ self.job.stderr.restore()
+ self.job.stdout.restore()
+ return result
+ return tee_logdir_wrapper
def __mark(filename, msg):
- file = open(filename, 'a')
- file.write(msg)
- file.close()
+ file = open(filename, 'a')
+ file.write(msg)
+ file.close()
def tee_output_logdir_mark(fn):
- def tee_logdir_mark_wrapper(self, *args, **dargs):
- mark = self.__class__.__name__ + "." + fn.__name__
- outfile = os.path.join(self.log_dir, 'stdout')
- errfile = os.path.join(self.log_dir, 'stderr')
- __mark(outfile, "--- START " + mark + " ---\n")
- __mark(errfile, "--- START " + mark + " ---\n")
- self.job.stdout.tee_redirect(outfile)
- self.job.stderr.tee_redirect(errfile)
- try:
- result = fn(self, *args, **dargs)
- finally:
- self.job.stderr.restore()
- self.job.stdout.restore()
- __mark(outfile, "--- END " + mark + " ---\n")
- __mark(errfile, "--- END " + mark + " ---\n")
-
- return result
-
- tee_logdir_mark_wrapper.__name__ = fn.__name__
- return tee_logdir_mark_wrapper
+ def tee_logdir_mark_wrapper(self, *args, **dargs):
+ mark = self.__class__.__name__ + "." + fn.__name__
+ outfile = os.path.join(self.log_dir, 'stdout')
+ errfile = os.path.join(self.log_dir, 'stderr')
+ __mark(outfile, "--- START " + mark + " ---\n")
+ __mark(errfile, "--- START " + mark + " ---\n")
+ self.job.stdout.tee_redirect(outfile)
+ self.job.stderr.tee_redirect(errfile)
+ try:
+ result = fn(self, *args, **dargs)
+ finally:
+ self.job.stderr.restore()
+ self.job.stdout.restore()
+ __mark(outfile, "--- END " + mark + " ---\n")
+ __mark(errfile, "--- END " + mark + " ---\n")
+
+ return result
+
+ tee_logdir_mark_wrapper.__name__ = fn.__name__
+ return tee_logdir_mark_wrapper
diff --git a/client/bin/filesystem.py b/client/bin/filesystem.py
index 2c241418..47c8c443 100755
--- a/client/bin/filesystem.py
+++ b/client/bin/filesystem.py
@@ -5,170 +5,170 @@ from autotest_lib.client.bin import autotest_utils
from autotest_lib.client.common_lib import error, utils
def list_mount_devices():
- devices = []
- # list mounted filesystems
- for line in utils.system_output('mount').splitlines():
- devices.append(line.split()[0])
- # list mounted swap devices
- for line in utils.system_output('swapon -s').splitlines():
- if line.startswith('/'): # skip header line
- devices.append(line.split()[0])
- return devices
+ devices = []
+ # list mounted filesystems
+ for line in utils.system_output('mount').splitlines():
+ devices.append(line.split()[0])
+ # list mounted swap devices
+ for line in utils.system_output('swapon -s').splitlines():
+ if line.startswith('/'): # skip header line
+ devices.append(line.split()[0])
+ return devices
def list_mount_points():
- mountpoints = []
- for line in utils.system_output('mount').splitlines():
- mountpoints.append(line.split()[2])
- return mountpoints
+ mountpoints = []
+ for line in utils.system_output('mount').splitlines():
+ mountpoints.append(line.split()[2])
+ return mountpoints
class filesystem:
- """
- Class for handling filesystems
- """
-
- def __init__(self, job, device, mountpoint, loop_size = 0):
- """
- device should be able to be a file as well
- which we mount as loopback
-
- device
- The device in question (eg "/dev/hda2")
- mountpoint
- Default mountpoint for the device.
- loop_size
- size of loopback device (in MB)
- """
-
- part = re.compile(r'^part(\d+)$')
- m = part.match(device)
- if m:
- number = int(m.groups()[0])
- partitions = job.config_get('filesystem.partitions')
- try:
- device = partitions[number]
- except:
- raise NameError("Partition '" + device + "' not available")
-
- self.device = device
- self.mountpoint = mountpoint
- self.job = job
- self.fstype = None
- self.loop = loop_size
- if self.loop:
- utils.system('dd if=/dev/zero of=%s bs=1M count=%d' % \
- (device, loop_size))
-
-
- def mkfs(self, fstype = 'ext2', args = ''):
- """
- Format a partition to fstype
- """
- if list_mount_devices().count(self.device):
- raise NameError('Attempted to format mounted device')
- if fstype == 'xfs':
- args += ' -f'
- if self.loop:
- # BAH. Inconsistent mkfs syntax SUCKS.
- if fstype == 'ext2' or fstype == 'ext3':
- args += ' -F'
- if fstype == 'reiserfs':
- args += ' -f'
- args = args.lstrip()
- mkfs_cmd = "mkfs -t %s %s %s" % (fstype, args, self.device)
- print mkfs_cmd
- sys.stdout.flush()
- try:
- utils.system("yes | " + mkfs_cmd)
- except:
- self.job.record('FAIL', None, mkfs_cmd, error.format_error())
- raise
- else:
- self.job.record('GOOD', None, mkfs_cmd)
- self.fstype = fstype
-
-
- def fsck(self, args = '-n'):
- # I hate reiserfstools.
- # Requires an explit Yes for some inane reason
- fsck_cmd = 'fsck %s %s' % (self.device, args)
- if self.fstype == 'reiserfs':
- fsck_cmd = 'yes "Yes" | ' + fsck_cmd
- print fsck_cmd
- sys.stdout.flush()
- try:
- utils.system("yes | " + fsck_cmd)
- except:
- self.job.record('FAIL', None, fsck_cmd, error.format_error())
- raise
- else:
- self.job.record('GOOD', None, fsck_cmd)
-
-
- def mount(self, mountpoint = None, args = ''):
- if self.fstype:
- args += ' -t ' + self.fstype
- if self.loop:
- args += ' -o loop'
- args = args.lstrip()
-
- if not mountpoint:
- mountpoint = self.mountpoint
- mount_cmd = "mount %s %s %s" % (args, self.device, mountpoint)
-
- if list_mount_devices().count(self.device):
- err = 'Attempted to mount mounted device'
- self.job.record('FAIL', None, mount_cmd, err)
- raise NameError(err)
- if list_mount_points().count(mountpoint):
- err = 'Attempted to mount busy mountpoint'
- self.job.record('FAIL', None, mount_cmd, err)
- raise NameError(err)
-
- print mount_cmd
- sys.stdout.flush()
- try:
- utils.system(mount_cmd)
- except:
- self.job.record('FAIL', None, mount_cmd, error.format_error())
- raise
- else:
- self.job.record('GOOD', None, mount_cmd)
-
-
- def unmount(self, handle=None):
- if not handle:
- handle = self.device
- umount_cmd = "umount " + handle
- print umount_cmd
- sys.stdout.flush()
- try:
- utils.system(umount_cmd)
- except:
- self.job.record('FAIL', None, umount_cmd, error.format_error())
- raise
- else:
- self.job.record('GOOD', None, umount_cmd)
-
-
- def get_io_scheduler_list(self, device_name):
- names = open(self.__sched_path(device_name)).read()
- return names.translate(string.maketrans('[]', ' ')).split()
-
-
- def get_io_scheduler(self, device_name):
- return re.split('[\[\]]',
- open(self.__sched_path(device_name)).read())[1]
-
-
- def set_io_scheduler(self, device_name, name):
- if name not in self.get_io_scheduler_list(device_name):
- raise NameError('No such IO scheduler: %s' % name)
- f = open(self.__sched_path(device_name), 'w')
- print >> f, name
- f.close()
-
-
- def __sched_path(self, device_name):
- return '/sys/block/%s/queue/scheduler' % device_name
+ """
+ Class for handling filesystems
+ """
+
+ def __init__(self, job, device, mountpoint, loop_size = 0):
+ """
+ device should be able to be a file as well
+ which we mount as loopback
+
+ device
+ The device in question (eg "/dev/hda2")
+ mountpoint
+ Default mountpoint for the device.
+ loop_size
+ size of loopback device (in MB)
+ """
+
+ part = re.compile(r'^part(\d+)$')
+ m = part.match(device)
+ if m:
+ number = int(m.groups()[0])
+ partitions = job.config_get('filesystem.partitions')
+ try:
+ device = partitions[number]
+ except:
+ raise NameError("Partition '" + device + "' not available")
+
+ self.device = device
+ self.mountpoint = mountpoint
+ self.job = job
+ self.fstype = None
+ self.loop = loop_size
+ if self.loop:
+ utils.system('dd if=/dev/zero of=%s bs=1M count=%d' % \
+ (device, loop_size))
+
+
+ def mkfs(self, fstype = 'ext2', args = ''):
+ """
+ Format a partition to fstype
+ """
+ if list_mount_devices().count(self.device):
+ raise NameError('Attempted to format mounted device')
+ if fstype == 'xfs':
+ args += ' -f'
+ if self.loop:
+ # BAH. Inconsistent mkfs syntax SUCKS.
+ if fstype == 'ext2' or fstype == 'ext3':
+ args += ' -F'
+ if fstype == 'reiserfs':
+ args += ' -f'
+ args = args.lstrip()
+ mkfs_cmd = "mkfs -t %s %s %s" % (fstype, args, self.device)
+ print mkfs_cmd
+ sys.stdout.flush()
+ try:
+ utils.system("yes | " + mkfs_cmd)
+ except:
+ self.job.record('FAIL', None, mkfs_cmd, error.format_error())
+ raise
+ else:
+ self.job.record('GOOD', None, mkfs_cmd)
+ self.fstype = fstype
+
+
+ def fsck(self, args = '-n'):
+ # I hate reiserfstools.
+ # Requires an explit Yes for some inane reason
+ fsck_cmd = 'fsck %s %s' % (self.device, args)
+ if self.fstype == 'reiserfs':
+ fsck_cmd = 'yes "Yes" | ' + fsck_cmd
+ print fsck_cmd
+ sys.stdout.flush()
+ try:
+ utils.system("yes | " + fsck_cmd)
+ except:
+ self.job.record('FAIL', None, fsck_cmd, error.format_error())
+ raise
+ else:
+ self.job.record('GOOD', None, fsck_cmd)
+
+
+ def mount(self, mountpoint = None, args = ''):
+ if self.fstype:
+ args += ' -t ' + self.fstype
+ if self.loop:
+ args += ' -o loop'
+ args = args.lstrip()
+
+ if not mountpoint:
+ mountpoint = self.mountpoint
+ mount_cmd = "mount %s %s %s" % (args, self.device, mountpoint)
+
+ if list_mount_devices().count(self.device):
+ err = 'Attempted to mount mounted device'
+ self.job.record('FAIL', None, mount_cmd, err)
+ raise NameError(err)
+ if list_mount_points().count(mountpoint):
+ err = 'Attempted to mount busy mountpoint'
+ self.job.record('FAIL', None, mount_cmd, err)
+ raise NameError(err)
+
+ print mount_cmd
+ sys.stdout.flush()
+ try:
+ utils.system(mount_cmd)
+ except:
+ self.job.record('FAIL', None, mount_cmd, error.format_error())
+ raise
+ else:
+ self.job.record('GOOD', None, mount_cmd)
+
+
+ def unmount(self, handle=None):
+ if not handle:
+ handle = self.device
+ umount_cmd = "umount " + handle
+ print umount_cmd
+ sys.stdout.flush()
+ try:
+ utils.system(umount_cmd)
+ except:
+ self.job.record('FAIL', None, umount_cmd, error.format_error())
+ raise
+ else:
+ self.job.record('GOOD', None, umount_cmd)
+
+
+ def get_io_scheduler_list(self, device_name):
+ names = open(self.__sched_path(device_name)).read()
+ return names.translate(string.maketrans('[]', ' ')).split()
+
+
+ def get_io_scheduler(self, device_name):
+ return re.split('[\[\]]',
+ open(self.__sched_path(device_name)).read())[1]
+
+
+ def set_io_scheduler(self, device_name, name):
+ if name not in self.get_io_scheduler_list(device_name):
+ raise NameError('No such IO scheduler: %s' % name)
+ f = open(self.__sched_path(device_name), 'w')
+ print >> f, name
+ f.close()
+
+
+ def __sched_path(self, device_name):
+ return '/sys/block/%s/queue/scheduler' % device_name
diff --git a/client/bin/grub.py b/client/bin/grub.py
index 68b9bc5b..81ea3de1 100755
--- a/client/bin/grub.py
+++ b/client/bin/grub.py
@@ -4,139 +4,139 @@
import shutil
import re
-import os
+import os
import os.path
import string
class grub:
- config_locations = ['/boot/grub/grub.conf', '/boot/grub/menu.lst',
- '/etc/grub.conf']
-
- def __init__(self, config_file=None):
- if config_file:
- self.config = config_file
- else:
- self.config = self.detect()
- self.read()
-
-
- def read(self):
- conf_file = file(self.config, 'r')
- self.lines = conf_file.readlines()
- conf_file.close()
-
- self.entries = [] # list of stanzas
- self.titles = {} # dictionary of titles
- entry = grub_entry(-1)
- count = 0
- for line in self.lines:
- if re.match(r'\s*title', line):
- self.entries.append(entry)
- entry = grub_entry(count)
- count = count + 1
- title = line.replace('title ', '')
- title = title.rstrip('\n')
- entry.set('title', title)
- self.titles[title] = entry
- # if line.startswith('initrd'):
- if re.match(r'\s*initrd', line):
- entry.set('initrd',
- re.sub(r'\s*initrd\s+', '', line))
- if re.match(r'\s*kernel', line):
- entry.set('kernel',
- re.sub(r'\s*kernel\s+', '', line))
- entry.lines.append(line)
- self.entries.append(entry)
- self.preamble = self.entries.pop(0) # separate preamble
-
-
- def write(self):
- conf_file = file(self.config, 'w')
- conf_file.write(self.preamble)
- for entry in self.entries:
- conf_file.write(entry.lines)
- conf_file.close()
-
-
- def dump(self):
- for line in self.preamble.lines:
- print line,
- for entry in self.entries:
- for line in entry.lines:
- print line,
-
- def backup(self):
- shutil.copyfile(self.config, self.config+'.bak')
- restore = file(autodir + '/var/autotest.boot.restore', 'w')
- restore.write('cp ' + self.config+'.bak ' + self.config + '\n')
- restore.close()
-
-
- def bootloader(self):
- return 'grub'
-
-
- def detect(self):
- for config in grub.config_locations:
- if os.path.isfile(config) and not os.path.islink(config):
- return config
-
-
- def list_titles(self):
- list = []
- for entry in self.entries:
- list.append(entry.get('title'))
- return list
-
-
- def print_entry(self, index):
- entry = self.entries[index]
- entry.print_entry()
-
-
- def renamed_entry(self, index, newname, args=False):
- "print a specified entry, renaming it as specified"
- entry = self.entries[index]
- entry.set('title', newname)
- if args:
- entry.set_autotest_kernel()
- entry.print_entry()
-
-
- def omit_markers(self, marker):
- # print, ommitting entries between specified markers
- print_state = True
- for line in lines:
- if line.count(marker):
- print_state = not print_state
- else:
- if print_state:
- print line
-
-
- def select(self, title, boot_options=None):
- entry = self.titles[title]
- print "grub: will boot entry %d (0-based)" % entry.index
- self.set_default(entry.index)
- self.set_timeout()
-
-
- def set_default(self, index):
- lines = (self.preamble).lines
- for i in range(len(lines)):
- default = 'default %d' % index
- lines[i] = re.sub(r'^\s*default.*',
- default, lines[i])
-
-
- def set_timeout(self):
- lines = (self.preamble).lines
- for i in range(len(lines)):
- lines[i] = re.sub(r'^timeout.*/',
- 'timeout 60', lines[i])
- lines[i] = re.sub(r'^(\s*terminal .*--timeout)=\d+',
- r'\1=30', lines[i])
-
+ config_locations = ['/boot/grub/grub.conf', '/boot/grub/menu.lst',
+ '/etc/grub.conf']
+
+ def __init__(self, config_file=None):
+ if config_file:
+ self.config = config_file
+ else:
+ self.config = self.detect()
+ self.read()
+
+
+ def read(self):
+ conf_file = file(self.config, 'r')
+ self.lines = conf_file.readlines()
+ conf_file.close()
+
+ self.entries = [] # list of stanzas
+ self.titles = {} # dictionary of titles
+ entry = grub_entry(-1)
+ count = 0
+ for line in self.lines:
+ if re.match(r'\s*title', line):
+ self.entries.append(entry)
+ entry = grub_entry(count)
+ count = count + 1
+ title = line.replace('title ', '')
+ title = title.rstrip('\n')
+ entry.set('title', title)
+ self.titles[title] = entry
+ # if line.startswith('initrd'):
+ if re.match(r'\s*initrd', line):
+ entry.set('initrd',
+ re.sub(r'\s*initrd\s+', '', line))
+ if re.match(r'\s*kernel', line):
+ entry.set('kernel',
+ re.sub(r'\s*kernel\s+', '', line))
+ entry.lines.append(line)
+ self.entries.append(entry)
+ self.preamble = self.entries.pop(0) # separate preamble
+
+
+ def write(self):
+ conf_file = file(self.config, 'w')
+ conf_file.write(self.preamble)
+ for entry in self.entries:
+ conf_file.write(entry.lines)
+ conf_file.close()
+
+
+ def dump(self):
+ for line in self.preamble.lines:
+ print line,
+ for entry in self.entries:
+ for line in entry.lines:
+ print line,
+
+ def backup(self):
+ shutil.copyfile(self.config, self.config+'.bak')
+ restore = file(autodir + '/var/autotest.boot.restore', 'w')
+ restore.write('cp ' + self.config+'.bak ' + self.config + '\n')
+ restore.close()
+
+
+ def bootloader(self):
+ return 'grub'
+
+
+ def detect(self):
+ for config in grub.config_locations:
+ if os.path.isfile(config) and not os.path.islink(config):
+ return config
+
+
+ def list_titles(self):
+ list = []
+ for entry in self.entries:
+ list.append(entry.get('title'))
+ return list
+
+
+ def print_entry(self, index):
+ entry = self.entries[index]
+ entry.print_entry()
+
+
+ def renamed_entry(self, index, newname, args=False):
+ "print a specified entry, renaming it as specified"
+ entry = self.entries[index]
+ entry.set('title', newname)
+ if args:
+ entry.set_autotest_kernel()
+ entry.print_entry()
+
+
+ def omit_markers(self, marker):
+ # print, ommitting entries between specified markers
+ print_state = True
+ for line in lines:
+ if line.count(marker):
+ print_state = not print_state
+ else:
+ if print_state:
+ print line
+
+
+ def select(self, title, boot_options=None):
+ entry = self.titles[title]
+ print "grub: will boot entry %d (0-based)" % entry.index
+ self.set_default(entry.index)
+ self.set_timeout()
+
+
+ def set_default(self, index):
+ lines = (self.preamble).lines
+ for i in range(len(lines)):
+ default = 'default %d' % index
+ lines[i] = re.sub(r'^\s*default.*',
+ default, lines[i])
+
+
+ def set_timeout(self):
+ lines = (self.preamble).lines
+ for i in range(len(lines)):
+ lines[i] = re.sub(r'^timeout.*/',
+ 'timeout 60', lines[i])
+ lines[i] = re.sub(r'^(\s*terminal .*--timeout)=\d+',
+ r'\1=30', lines[i])
+
# ----------------------------------------------------------------------
@@ -145,49 +145,49 @@ class grub:
# and bits we don't understand.
class grub_entry:
- def __init__(self, count):
- self.lines = []
- self.fields = {} # title, initrd, kernel, etc
- self.index = count
-
-
- def set(self, field, value):
- print "setting '%s' to '%s'" % (field, value)
- self.fields[field] = value
- for i in range(len(self.lines)):
- m = re.match(r'\s*' + field + r'\s+', self.lines[i])
- if m:
- self.lines[i] = m.group() + value + '\n'
-
-
- def get(self, field):
- return self.fields[field]
-
-
- def print_entry(self):
- print self.lines
-
-
- def set_kernel_options(self, options):
- kernel = self.get('kernel')
- re.sub(r'(autotest_args:).*', r'\1'+options, kernel)
- self.set('kernel', kernel)
-
- def set_autotest_kernel(self):
- kernel_words = []
- found_path = False
- # Want to copy most of the entry, replacing the 'path'
- # part of the entry with vmlinux-autotest in the same
- # dir, and make sure autotest_args: is (uniquely) added
- for word in (self.get('kernel')).split():
- if word.startswith('--'):
- kernel_words.append(word)
- continue
- if not found_path:
- word = os.path.dirname(word)+'vmlinuz-autotest'
- found_path = True
- if re.match(r'auto(bench|test)_args:', word):
- break
- kernel_words.append(word)
- kernel_words.append('autotest_args: ')
- self.set('kernel', string.join(kernel_words))
+ def __init__(self, count):
+ self.lines = []
+ self.fields = {} # title, initrd, kernel, etc
+ self.index = count
+
+
+ def set(self, field, value):
+ print "setting '%s' to '%s'" % (field, value)
+ self.fields[field] = value
+ for i in range(len(self.lines)):
+ m = re.match(r'\s*' + field + r'\s+', self.lines[i])
+ if m:
+ self.lines[i] = m.group() + value + '\n'
+
+
+ def get(self, field):
+ return self.fields[field]
+
+
+ def print_entry(self):
+ print self.lines
+
+
+ def set_kernel_options(self, options):
+ kernel = self.get('kernel')
+ re.sub(r'(autotest_args:).*', r'\1'+options, kernel)
+ self.set('kernel', kernel)
+
+ def set_autotest_kernel(self):
+ kernel_words = []
+ found_path = False
+ # Want to copy most of the entry, replacing the 'path'
+ # part of the entry with vmlinux-autotest in the same
+ # dir, and make sure autotest_args: is (uniquely) added
+ for word in (self.get('kernel')).split():
+ if word.startswith('--'):
+ kernel_words.append(word)
+ continue
+ if not found_path:
+ word = os.path.dirname(word)+'vmlinuz-autotest'
+ found_path = True
+ if re.match(r'auto(bench|test)_args:', word):
+ break
+ kernel_words.append(word)
+ kernel_words.append('autotest_args: ')
+ self.set('kernel', string.join(kernel_words))
diff --git a/client/bin/harness.py b/client/bin/harness.py
index 2a6378f0..86441b86 100755
--- a/client/bin/harness.py
+++ b/client/bin/harness.py
@@ -8,77 +8,77 @@ __author__ = """Copyright Andy Whitcroft 2006"""
import os, sys
class harness:
- """The NULL server harness
+ """The NULL server harness
- Properties:
- job
- The job object for this job
- """
+ Properties:
+ job
+ The job object for this job
+ """
- def __init__(self, job):
- """
- job
- The job object for this job
- """
- self.setup(job)
+ def __init__(self, job):
+ """
+ job
+ The job object for this job
+ """
+ self.setup(job)
- def setup(self, job):
- """
- job
- The job object for this job
- """
- self.job = job
+ def setup(self, job):
+ """
+ job
+ The job object for this job
+ """
+ self.job = job
- configd = os.path.join(os.environ['AUTODIR'], 'configs')
- if os.path.isdir(configd):
- (name, dirs, files) = os.walk(configd).next()
- job.config_set('kernel.default_config_set',
- [ configd + '/' ] + files)
+ configd = os.path.join(os.environ['AUTODIR'], 'configs')
+ if os.path.isdir(configd):
+ (name, dirs, files) = os.walk(configd).next()
+ job.config_set('kernel.default_config_set',
+ [ configd + '/' ] + files)
- def run_start(self):
- """A run within this job is starting"""
- pass
+ def run_start(self):
+ """A run within this job is starting"""
+ pass
- def run_pause(self):
- """A run within this job is completing (expect continue)"""
- pass
+ def run_pause(self):
+ """A run within this job is completing (expect continue)"""
+ pass
- def run_reboot(self):
- """A run within this job is performing a reboot
- (expect continue following reboot)
- """
- pass
+ def run_reboot(self):
+ """A run within this job is performing a reboot
+ (expect continue following reboot)
+ """
+ pass
- def run_abort(self):
- """A run within this job is aborting. It all went wrong"""
- pass
+ def run_abort(self):
+ """A run within this job is aborting. It all went wrong"""
+ pass
- def run_complete(self):
- """A run within this job is completing (all done)"""
- pass
+ def run_complete(self):
+ """A run within this job is completing (all done)"""
+ pass
- def test_status(self, status, tag):
- """A test within this job is completing"""
- pass
+ def test_status(self, status, tag):
+ """A test within this job is completing"""
+ pass
- def test_status_detail(self, code, subdir, operation, status, tag):
- """A test within this job is completing (detail)"""
- pass
+ def test_status_detail(self, code, subdir, operation, status, tag):
+ """A test within this job is completing (detail)"""
+ pass
def select(which, job):
- if not which:
- which = 'standalone'
-
- exec "import harness_%s" % (which)
- exec "myharness = harness_%s.harness_%s(job)" % (which, which)
+ if not which:
+ which = 'standalone'
- return myharness
+ exec "import harness_%s" % (which)
+ exec "myharness = harness_%s.harness_%s(job)" % (which, which)
+
+ return myharness
diff --git a/client/bin/harness_ABAT.py b/client/bin/harness_ABAT.py
index e5f2f632..8fadb2a9 100755
--- a/client/bin/harness_ABAT.py
+++ b/client/bin/harness_ABAT.py
@@ -10,145 +10,145 @@ from autotest_lib.client.common_lib import utils
import os, harness, time, re
def autobench_load(fn):
- disks = re.compile(r'^\s*DATS_FREE_DISKS\s*=(.*\S)\s*$')
- parts = re.compile(r'^\s*DATS_FREE_PARTITIONS\s*=(.*\S)\s*$')
- modules = re.compile(r'^\s*INITRD_MODULES\s*=(.*\S)\s*$')
-
- conf = {}
-
- try:
- fd = file(fn, "r")
- except:
- return conf
- for ln in fd.readlines():
- m = disks.match(ln)
- if m:
- val = m.groups()[0]
- conf['disks'] = val.strip('"').split()
- m = parts.match(ln)
- if m:
- val = m.groups()[0]
- conf['partitions'] = val.strip('"').split()
- m = modules.match(ln)
- if m:
- val = m.groups()[0]
- conf['modules'] = val.strip('"').split()
- fd.close()
-
- return conf
+ disks = re.compile(r'^\s*DATS_FREE_DISKS\s*=(.*\S)\s*$')
+ parts = re.compile(r'^\s*DATS_FREE_PARTITIONS\s*=(.*\S)\s*$')
+ modules = re.compile(r'^\s*INITRD_MODULES\s*=(.*\S)\s*$')
+
+ conf = {}
+
+ try:
+ fd = file(fn, "r")
+ except:
+ return conf
+ for ln in fd.readlines():
+ m = disks.match(ln)
+ if m:
+ val = m.groups()[0]
+ conf['disks'] = val.strip('"').split()
+ m = parts.match(ln)
+ if m:
+ val = m.groups()[0]
+ conf['partitions'] = val.strip('"').split()
+ m = modules.match(ln)
+ if m:
+ val = m.groups()[0]
+ conf['modules'] = val.strip('"').split()
+ fd.close()
+
+ return conf
class harness_ABAT(harness.harness):
- """The ABAT server harness
+ """The ABAT server harness
- Properties:
- job
- The job object for this job
- """
+ Properties:
+ job
+ The job object for this job
+ """
- def __init__(self, job):
- """
- job
- The job object for this job
- """
- self.setup(job)
+ def __init__(self, job):
+ """
+ job
+ The job object for this job
+ """
+ self.setup(job)
- if 'ABAT_STATUS' in os.environ:
- self.status = file(os.environ['ABAT_STATUS'], "w")
- else:
- self.status = None
+ if 'ABAT_STATUS' in os.environ:
+ self.status = file(os.environ['ABAT_STATUS'], "w")
+ else:
+ self.status = None
- def __send(self, msg):
- if self.status:
- msg = msg.rstrip()
- self.status.write(msg + "\n")
- self.status.flush()
+ def __send(self, msg):
+ if self.status:
+ msg = msg.rstrip()
+ self.status.write(msg + "\n")
+ self.status.flush()
- def __send_status(self, code, subdir, operation, msg):
- self.__send("STATUS %s %s %s %s" % \
- (code, subdir, operation, msg))
+ def __send_status(self, code, subdir, operation, msg):
+ self.__send("STATUS %s %s %s %s" % \
+ (code, subdir, operation, msg))
- def __root_device(self):
- device = None
- root = re.compile(r'^\S*(/dev/\S+).*\s/\s*$')
-
- df = utils.system_output('df -lP')
- for line in df.split("\n"):
- m = root.match(line)
- if m:
- device = m.groups()[0]
+ def __root_device(self):
+ device = None
+ root = re.compile(r'^\S*(/dev/\S+).*\s/\s*$')
- return device
+ df = utils.system_output('df -lP')
+ for line in df.split("\n"):
+ m = root.match(line)
+ if m:
+ device = m.groups()[0]
+ return device
- def run_start(self):
- """A run within this job is starting"""
- self.__send_status('GOOD', '----', '----', 'run starting')
- # Load up the autobench.conf if it exists.
- conf = autobench_load("/etc/autobench.conf")
- if 'partitions' in conf:
- self.job.config_set('filesystem.partitions',
- conf['partitions'])
+ def run_start(self):
+ """A run within this job is starting"""
+ self.__send_status('GOOD', '----', '----', 'run starting')
- # Search the boot loader configuration for the autobench entry,
- # and extract its args.
- entry_args = None
- args = None
- for line in self.job.bootloader.info('all').split('\n'):
- if line.startswith('args'):
- entry_args = line.split(None, 2)[2]
- if line.startswith('title'):
- title = line.split()[2]
- if title == 'autobench':
- args = entry_args
+ # Load up the autobench.conf if it exists.
+ conf = autobench_load("/etc/autobench.conf")
+ if 'partitions' in conf:
+ self.job.config_set('filesystem.partitions',
+ conf['partitions'])
- if args:
- args = re.sub(r'autobench_args:.*', '', args)
- args = re.sub(r'root=\S*', '', args)
- args += " root=" + self.__root_device()
+ # Search the boot loader configuration for the autobench entry,
+ # and extract its args.
+ entry_args = None
+ args = None
+ for line in self.job.bootloader.info('all').split('\n'):
+ if line.startswith('args'):
+ entry_args = line.split(None, 2)[2]
+ if line.startswith('title'):
+ title = line.split()[2]
+ if title == 'autobench':
+ args = entry_args
- self.job.config_set('boot.default_args', args)
+ if args:
+ args = re.sub(r'autobench_args:.*', '', args)
+ args = re.sub(r'root=\S*', '', args)
+ args += " root=" + self.__root_device()
- # Turn off boot_once semantics.
- self.job.config_set('boot.set_default', True)
+ self.job.config_set('boot.default_args', args)
- # For RedHat installs we do not load up the module.conf
- # as they cannot be builtin. Pass them as arguments.
- vendor = autotest_utils.get_os_vendor()
- if vendor in ['Red Hat', 'Fedora Core'] and 'modules' in conf:
- args = '--allow-missing'
- for mod in conf['modules']:
- args += " --with " + mod
- self.job.config_set('kernel.mkinitrd_extra_args', args)
+ # Turn off boot_once semantics.
+ self.job.config_set('boot.set_default', True)
+ # For RedHat installs we do not load up the module.conf
+ # as they cannot be builtin. Pass them as arguments.
+ vendor = autotest_utils.get_os_vendor()
+ if vendor in ['Red Hat', 'Fedora Core'] and 'modules' in conf:
+ args = '--allow-missing'
+ for mod in conf['modules']:
+ args += " --with " + mod
+ self.job.config_set('kernel.mkinitrd_extra_args', args)
- def run_reboot(self):
- """A run within this job is performing a reboot
- (expect continue following reboot)
- """
- self.__send("REBOOT")
+ def run_reboot(self):
+ """A run within this job is performing a reboot
+ (expect continue following reboot)
+ """
+ self.__send("REBOOT")
- def run_complete(self):
- """A run within this job is completing (all done)"""
- self.__send("DONE")
+ def run_complete(self):
+ """A run within this job is completing (all done)"""
+ self.__send("DONE")
- def test_status_detail(self, code, subdir, operation, msg, tag):
- """A test within this job is completing (detail)"""
- # Send the first line with the status code as a STATUS message.
- lines = msg.split("\n")
- self.__send_status(code, subdir, operation, lines[0])
+ def test_status_detail(self, code, subdir, operation, msg, tag):
+ """A test within this job is completing (detail)"""
+ # Send the first line with the status code as a STATUS message.
+ lines = msg.split("\n")
+ self.__send_status(code, subdir, operation, lines[0])
- def test_status(self, msg, tag):
- lines = msg.split("\n")
- # Send each line as a SUMMARY message.
- for line in lines:
- self.__send("SUMMARY :" + line)
+ def test_status(self, msg, tag):
+ lines = msg.split("\n")
+
+ # Send each line as a SUMMARY message.
+ for line in lines:
+ self.__send("SUMMARY :" + line)
diff --git a/client/bin/harness_simple.py b/client/bin/harness_simple.py
index 7f104da4..5ff90d4a 100755
--- a/client/bin/harness_simple.py
+++ b/client/bin/harness_simple.py
@@ -7,31 +7,31 @@ __author__ = """Copyright Andy Whitcroft, Martin J. Bligh 2006"""
import os, harness, time
class harness_simple(harness.harness):
- """
- The simple server harness
-
- Properties:
- job
- The job object for this job
- """
-
- def __init__(self, job):
- """
- job
- The job object for this job
- """
- self.setup(job)
-
- self.status = os.fdopen(3, 'w')
-
-
- def test_status(self, status, tag):
- """A test within this job is completing"""
- if self.status:
- for line in status.split('\n'):
- # prepend status messages with
- # AUTOTEST_STATUS:tag: so that we can tell
- # which lines were sent by the autotest client
- pre = 'AUTOTEST_STATUS:%s:' % (tag,)
- self.status.write(pre + line + '\n')
- self.status.flush()
+ """
+ The simple server harness
+
+ Properties:
+ job
+ The job object for this job
+ """
+
+ def __init__(self, job):
+ """
+ job
+ The job object for this job
+ """
+ self.setup(job)
+
+ self.status = os.fdopen(3, 'w')
+
+
+ def test_status(self, status, tag):
+ """A test within this job is completing"""
+ if self.status:
+ for line in status.split('\n'):
+ # prepend status messages with
+ # AUTOTEST_STATUS:tag: so that we can tell
+ # which lines were sent by the autotest client
+ pre = 'AUTOTEST_STATUS:%s:' % (tag,)
+ self.status.write(pre + line + '\n')
+ self.status.flush()
diff --git a/client/bin/harness_standalone.py b/client/bin/harness_standalone.py
index dccdcabf..d6a46877 100644
--- a/client/bin/harness_standalone.py
+++ b/client/bin/harness_standalone.py
@@ -9,40 +9,40 @@ from autotest_lib.client.common_lib import utils
import os, harness, shutil
class harness_standalone(harness.harness):
- """The standalone server harness
-
- Properties:
- job
- The job object for this job
- """
-
- def __init__(self, job):
- """
- job
- The job object for this job
- """
- self.autodir = os.path.abspath(os.environ['AUTODIR'])
- self.setup(job)
-
- src = job.control_get()
- dest = os.path.join(self.autodir, 'control')
- if os.path.abspath(src) != os.path.abspath(dest):
- shutil.copyfile(src, dest)
- job.control_set(dest)
-
- print 'Symlinking init scripts'
- rc = os.path.join(self.autodir, 'tools/autotest')
- # see if system supports event.d versus inittab
- if os.path.exists('/etc/event.d'):
- # NB: assuming current runlevel is default
- initdefault = utils.system_output('runlevel').split()[1]
- else:
- initdefault = utils.system_output('grep :initdefault: /etc/inittab')
- initdefault = initdefault.split(':')[1]
-
- try:
- utils.system('ln -sf %s /etc/init.d/autotest' % rc)
- utils.system('ln -sf %s /etc/rc%s.d/S99autotest' % \
- (rc, initdefault))
- except:
- print "WARNING: linking init scripts failed"
+ """The standalone server harness
+
+ Properties:
+ job
+ The job object for this job
+ """
+
+ def __init__(self, job):
+ """
+ job
+ The job object for this job
+ """
+ self.autodir = os.path.abspath(os.environ['AUTODIR'])
+ self.setup(job)
+
+ src = job.control_get()
+ dest = os.path.join(self.autodir, 'control')
+ if os.path.abspath(src) != os.path.abspath(dest):
+ shutil.copyfile(src, dest)
+ job.control_set(dest)
+
+ print 'Symlinking init scripts'
+ rc = os.path.join(self.autodir, 'tools/autotest')
+ # see if system supports event.d versus inittab
+ if os.path.exists('/etc/event.d'):
+ # NB: assuming current runlevel is default
+ initdefault = utils.system_output('runlevel').split()[1]
+ else:
+ initdefault = utils.system_output('grep :initdefault: /etc/inittab')
+ initdefault = initdefault.split(':')[1]
+
+ try:
+ utils.system('ln -sf %s /etc/init.d/autotest' % rc)
+ utils.system('ln -sf %s /etc/rc%s.d/S99autotest' % \
+ (rc, initdefault))
+ except:
+ print "WARNING: linking init scripts failed"
diff --git a/client/bin/job.py b/client/bin/job.py
index ac2745b2..870b63e3 100755
--- a/client/bin/job.py
+++ b/client/bin/job.py
@@ -20,995 +20,995 @@ from autotest_utils import *
"""
class StepError(error.AutotestError):
- pass
+ pass
class base_job:
- """The actual job against which we do everything.
-
- Properties:
- autodir
- The top level autotest directory (/usr/local/autotest).
- Comes from os.environ['AUTODIR'].
- bindir
- <autodir>/bin/
- libdir
- <autodir>/lib/
- testdir
- <autodir>/tests/
- site_testdir
- <autodir>/site_tests/
- profdir
- <autodir>/profilers/
- tmpdir
- <autodir>/tmp/
- resultdir
- <autodir>/results/<jobtag>
- stdout
- fd_stack object for stdout
- stderr
- fd_stack object for stderr
- profilers
- the profilers object for this job
- harness
- the server harness object for this job
- config
- the job configuration for this job
- """
-
- DEFAULT_LOG_FILENAME = "status"
-
- def __init__(self, control, jobtag, cont, harness_type=None,
- use_external_logging = False):
- """
- control
- The control file (pathname of)
- jobtag
- The job tag string (eg "default")
- cont
- If this is the continuation of this job
- harness_type
- An alternative server harness
- """
- self.autodir = os.environ['AUTODIR']
- self.bindir = os.path.join(self.autodir, 'bin')
- self.libdir = os.path.join(self.autodir, 'lib')
- self.testdir = os.path.join(self.autodir, 'tests')
- self.site_testdir = os.path.join(self.autodir, 'site_tests')
- self.profdir = os.path.join(self.autodir, 'profilers')
- self.tmpdir = os.path.join(self.autodir, 'tmp')
- self.resultdir = os.path.join(self.autodir, 'results', jobtag)
- self.sysinfodir = os.path.join(self.resultdir, 'sysinfo')
- self.control = os.path.abspath(control)
- self.state_file = self.control + '.state'
- self.current_step_ancestry = []
- self.next_step_index = 0
- self.__load_state()
-
- if not cont:
- """
- Don't cleanup the tmp dir (which contains the lockfile)
- in the constructor, this would be a problem for multiple
- jobs starting at the same time on the same client. Instead
- do the delete at the server side. We simply create the tmp
- directory here if it does not already exist.
- """
- if not os.path.exists(self.tmpdir):
- os.mkdir(self.tmpdir)
-
- results = os.path.join(self.autodir, 'results')
- if not os.path.exists(results):
- os.mkdir(results)
-
- download = os.path.join(self.testdir, 'download')
- if not os.path.exists(download):
- os.mkdir(download)
-
- if os.path.exists(self.resultdir):
- utils.system('rm -rf '
- + self.resultdir)
- os.mkdir(self.resultdir)
- os.mkdir(self.sysinfodir)
-
- os.mkdir(os.path.join(self.resultdir, 'debug'))
- os.mkdir(os.path.join(self.resultdir, 'analysis'))
-
- shutil.copyfile(self.control,
- os.path.join(self.resultdir, 'control'))
-
-
- self.control = control
- self.jobtag = jobtag
- self.log_filename = self.DEFAULT_LOG_FILENAME
- self.container = None
-
- self.stdout = fd_stack.fd_stack(1, sys.stdout)
- self.stderr = fd_stack.fd_stack(2, sys.stderr)
-
- self._init_group_level()
-
- self.config = config.config(self)
-
- self.harness = harness.select(harness_type, self)
-
- self.profilers = profilers.profilers(self)
-
- try:
- tool = self.config_get('boottool.executable')
- self.bootloader = boottool.boottool(tool)
- except:
- pass
-
- sysinfo.log_per_reboot_data(self.sysinfodir)
-
- if not cont:
- self.record('START', None, None)
- self._increment_group_level()
-
- self.harness.run_start()
-
- if use_external_logging:
- self.enable_external_logging()
-
- # load the max disk usage rate - default to no monitoring
- self.max_disk_usage_rate = self.get_state('__monitor_disk',
- default=0.0)
-
-
- def monitor_disk_usage(self, max_rate):
- """\
- Signal that the job should monitor disk space usage on /
- and generate a warning if a test uses up disk space at a
- rate exceeding 'max_rate'.
-
- Parameters:
- max_rate - the maximium allowed rate of disk consumption
- during a test, in MB/hour, or 0 to indicate
- no limit.
- """
- self.set_state('__monitor_disk', max_rate)
- self.max_disk_usage_rate = max_rate
-
-
- def relative_path(self, path):
- """\
- Return a patch relative to the job results directory
- """
- head = len(self.resultdir) + 1 # remove the / inbetween
- return path[head:]
-
-
- def control_get(self):
- return self.control
-
-
- def control_set(self, control):
- self.control = os.path.abspath(control)
-
-
- def harness_select(self, which):
- self.harness = harness.select(which, self)
-
-
- def config_set(self, name, value):
- self.config.set(name, value)
-
-
- def config_get(self, name):
- return self.config.get(name)
-
- def setup_dirs(self, results_dir, tmp_dir):
- if not tmp_dir:
- tmp_dir = os.path.join(self.tmpdir, 'build')
- if not os.path.exists(tmp_dir):
- os.mkdir(tmp_dir)
- if not os.path.isdir(tmp_dir):
- e_msg = "Temp dir (%s) is not a dir - args backwards?" % self.tmpdir
- raise ValueError(e_msg)
-
- # We label the first build "build" and then subsequent ones
- # as "build.2", "build.3", etc. Whilst this is a little bit
- # inconsistent, 99.9% of jobs will only have one build
- # (that's not done as kernbench, sparse, or buildtest),
- # so it works out much cleaner. One of life's comprimises.
- if not results_dir:
- results_dir = os.path.join(self.resultdir, 'build')
- i = 2
- while os.path.exists(results_dir):
- results_dir = os.path.join(self.resultdir, 'build.%d' % i)
- i += 1
- if not os.path.exists(results_dir):
- os.mkdir(results_dir)
-
- return (results_dir, tmp_dir)
-
-
- def xen(self, base_tree, results_dir = '', tmp_dir = '', leave = False, \
- kjob = None ):
- """Summon a xen object"""
- (results_dir, tmp_dir) = self.setup_dirs(results_dir, tmp_dir)
- build_dir = 'xen'
- return xen.xen(self, base_tree, results_dir, tmp_dir, build_dir, leave, kjob)
-
-
- def kernel(self, base_tree, results_dir = '', tmp_dir = '', leave = False):
- """Summon a kernel object"""
- (results_dir, tmp_dir) = self.setup_dirs(results_dir, tmp_dir)
- build_dir = 'linux'
- return kernel.auto_kernel(self, base_tree, results_dir,
- tmp_dir, build_dir, leave)
-
-
- def barrier(self, *args, **kwds):
- """Create a barrier object"""
- return barrier.barrier(*args, **kwds)
-
-
- def setup_dep(self, deps):
- """Set up the dependencies for this test.
-
- deps is a list of libraries required for this test.
- """
- for dep in deps:
- try:
- os.chdir(os.path.join(self.autodir, 'deps', dep))
- utils.system('./' + dep + '.py')
- except:
- err = "setting up dependency " + dep + "\n"
- raise error.UnhandledError(err)
-
-
- def __runtest(self, url, tag, args, dargs):
- try:
- l = lambda : test.runtest(self, url, tag, args, dargs)
- pid = parallel.fork_start(self.resultdir, l)
- parallel.fork_waitfor(self.resultdir, pid)
- except error.AutotestError:
- raise
- except Exception, e:
- msg = "Unhandled %s error occured during test\n"
- msg %= str(e.__class__.__name__)
- raise error.UnhandledError(msg)
-
-
- def run_test(self, url, *args, **dargs):
- """Summon a test object and run it.
-
- tag
- tag to add to testname
- url
- url of the test to run
- """
-
- if not url:
- raise TypeError("Test name is invalid. "
- "Switched arguments?")
- (group, testname) = test.testname(url)
- namelen = len(testname)
- dargs = dargs.copy()
- tntag = dargs.pop('tag', None)
- if tntag: # testname tag is included in reported test name
- testname += '.' + tntag
- subdir = testname
- sdtag = dargs.pop('subdir_tag', None)
- if sdtag: # subdir-only tag is not included in reports
- subdir = subdir + '.' + sdtag
- tag = subdir[namelen+1:] # '' if none
-
- outputdir = os.path.join(self.resultdir, subdir)
- if os.path.exists(outputdir):
- msg = ("%s already exists, test <%s> may have"
- " already run with tag <%s>"
- % (outputdir, testname, tag) )
- raise error.TestError(msg)
- os.mkdir(outputdir)
-
- container = dargs.pop('container', None)
- if container:
- cname = container.get('name', None)
- if not cname: # get old name
- cname = container.get('container_name', None)
- mbytes = container.get('mbytes', None)
- if not mbytes: # get old name
- mbytes = container.get('mem', None)
- cpus = container.get('cpus', None)
- if not cpus: # get old name
- cpus = container.get('cpu', None)
- root = container.get('root', None)
- self.new_container(mbytes=mbytes, cpus=cpus,
- root=root, name=cname)
- # We are running in a container now...
-
- def log_warning(reason):
- self.record("WARN", subdir, testname, reason)
- @disk_usage_monitor.watch(log_warning, "/",
- self.max_disk_usage_rate)
- def group_func():
- try:
- self.__runtest(url, tag, args, dargs)
- except error.TestNAError, detail:
- self.record('TEST_NA', subdir, testname,
- str(detail))
- raise
- except Exception, detail:
- self.record('FAIL', subdir, testname,
- str(detail))
- raise
- else:
- self.record('GOOD', subdir, testname,
- 'completed successfully')
-
- result, exc_info = self.__rungroup(subdir, testname, group_func)
- if container:
- self.release_container()
- if exc_info and isinstance(exc_info[1], error.TestError):
- return False
- elif exc_info:
- raise exc_info[0], exc_info[1], exc_info[2]
- else:
- return True
-
-
- def __rungroup(self, subdir, testname, function, *args, **dargs):
- """\
- subdir:
- name of the group
- testname:
- name of the test to run, or support step
- function:
- subroutine to run
- *args:
- arguments for the function
-
- Returns a 2-tuple (result, exc_info) where result
- is the return value of function, and exc_info is
- the sys.exc_info() of the exception thrown by the
- function (which may be None).
- """
-
- result, exc_info = None, None
- try:
- self.record('START', subdir, testname)
- self._increment_group_level()
- result = function(*args, **dargs)
- self._decrement_group_level()
- self.record('END GOOD', subdir, testname)
- except error.TestNAError, e:
- self._decrement_group_level()
- self.record('END TEST_NA', subdir, testname, str(e))
- except Exception, e:
- exc_info = sys.exc_info()
- self._decrement_group_level()
- err_msg = str(e) + '\n' + traceback.format_exc()
- self.record('END FAIL', subdir, testname, err_msg)
-
- return result, exc_info
-
-
- def run_group(self, function, *args, **dargs):
- """\
- function:
- subroutine to run
- *args:
- arguments for the function
- """
-
- # Allow the tag for the group to be specified
- name = function.__name__
- tag = dargs.pop('tag', None)
- if tag:
- name = tag
-
- outputdir = os.path.join(self.resultdir, name)
- if os.path.exists(outputdir):
- msg = ("%s already exists, test <%s> may have"
- " already run with tag <%s>"
- % (outputdir, name, name) )
- raise error.TestError(msg)
- os.mkdir(outputdir)
-
- result, exc_info = self.__rungroup(name, name, function,
- *args, **dargs)
-
- # if there was a non-TestError exception, raise it
- if exc_info and not isinstance(exc_info[1], error.TestError):
- err = ''.join(traceback.format_exception(*exc_info))
- raise error.TestError(name + ' failed\n' + err)
-
- # pass back the actual return value from the function
- return result
-
-
- def new_container(self, mbytes=None, cpus=None, root=None, name=None):
- if not autotest_utils.grep('cpuset', '/proc/filesystems'):
- print "Containers not enabled by latest reboot"
- return # containers weren't enabled in this kernel boot
- pid = os.getpid()
- if not name:
- name = 'test%d' % pid # make arbitrary unique name
- self.container = cpuset.cpuset(name, job_size=mbytes,
- job_pid=pid, cpus=cpus, root=root)
- # This job's python shell is now running in the new container
- # and all forked test processes will inherit that container
-
-
- def release_container(self):
- if self.container:
- self.container.release()
- self.container = None
-
-
- def cpu_count(self):
- if self.container:
- return len(self.container.cpus)
- return autotest_utils.count_cpus() # use total system count
-
-
- # Check the passed kernel identifier against the command line
- # and the running kernel, abort the job on missmatch.
- def kernel_check_ident(self, expected_when, expected_id, subdir,
- type = 'src', patches=[]):
- print (("POST BOOT: checking booted kernel " +
- "mark=%d identity='%s' type='%s'") %
- (expected_when, expected_id, type))
-
- running_id = autotest_utils.running_os_ident()
-
- cmdline = utils.read_one_line("/proc/cmdline")
-
- find_sum = re.compile(r'.*IDENT=(\d+)')
- m = find_sum.match(cmdline)
- cmdline_when = -1
- if m:
- cmdline_when = int(m.groups()[0])
-
- # We have all the facts, see if they indicate we
- # booted the requested kernel or not.
- bad = False
- if (type == 'src' and expected_id != running_id or
- type == 'rpm' and
- not running_id.startswith(expected_id + '::')):
- print "check_kernel_ident: kernel identifier mismatch"
- bad = True
- if expected_when != cmdline_when:
- print "check_kernel_ident: kernel command line mismatch"
- bad = True
-
- if bad:
- print " Expected Ident: " + expected_id
- print " Running Ident: " + running_id
- print " Expected Mark: %d" % (expected_when)
- print "Command Line Mark: %d" % (cmdline_when)
- print " Command Line: " + cmdline
-
- raise error.JobError("boot failure", "reboot.verify")
-
- kernel_info = {'kernel': expected_id}
- for i, patch in enumerate(patches):
- kernel_info["patch%d" % i] = patch
- self.record('GOOD', subdir, 'reboot.verify', expected_id)
- self._decrement_group_level()
- self.record('END GOOD', subdir, 'reboot',
- optional_fields=kernel_info)
-
-
- def filesystem(self, device, mountpoint = None, loop_size = 0):
- if not mountpoint:
- mountpoint = self.tmpdir
- return filesystem.filesystem(self, device, mountpoint,loop_size)
-
-
- def enable_external_logging(self):
- pass
-
-
- def disable_external_logging(self):
- pass
-
-
- def reboot_setup(self):
- pass
-
-
- def reboot(self, tag='autotest'):
- self.reboot_setup()
- self.record('START', None, 'reboot')
- self._increment_group_level()
- self.record('GOOD', None, 'reboot.start')
- self.harness.run_reboot()
- default = self.config_get('boot.set_default')
- if default:
- self.bootloader.set_default(tag)
- else:
- self.bootloader.boot_once(tag)
- cmd = "(sleep 5; reboot) </dev/null >/dev/null 2>&1 &"
- utils.system(cmd)
- self.quit()
-
-
- def noop(self, text):
- print "job: noop: " + text
-
-
- def parallel(self, *tasklist):
- """Run tasks in parallel"""
-
- pids = []
- old_log_filename = self.log_filename
- for i, task in enumerate(tasklist):
- self.log_filename = old_log_filename + (".%d" % i)
- task_func = lambda: task[0](*task[1:])
- pids.append(parallel.fork_start(self.resultdir,
- task_func))
-
- old_log_path = os.path.join(self.resultdir, old_log_filename)
- old_log = open(old_log_path, "a")
- exceptions = []
- for i, pid in enumerate(pids):
- # wait for the task to finish
- try:
- parallel.fork_waitfor(self.resultdir, pid)
- except Exception, e:
- exceptions.append(e)
- # copy the logs from the subtask into the main log
- new_log_path = old_log_path + (".%d" % i)
- if os.path.exists(new_log_path):
- new_log = open(new_log_path)
- old_log.write(new_log.read())
- new_log.close()
- old_log.flush()
- os.remove(new_log_path)
- old_log.close()
-
- self.log_filename = old_log_filename
-
- # handle any exceptions raised by the parallel tasks
- if exceptions:
- msg = "%d task(s) failed" % len(exceptions)
- raise error.JobError(msg, str(exceptions), exceptions)
-
-
- def quit(self):
- # XXX: should have a better name.
- self.harness.run_pause()
- raise error.JobContinue("more to come")
-
-
- def complete(self, status):
- """Clean up and exit"""
- # We are about to exit 'complete' so clean up the control file.
- try:
- os.unlink(self.state_file)
- except:
- pass
-
- self.harness.run_complete()
- self.disable_external_logging()
- sys.exit(status)
-
-
- def set_state(self, var, val):
- # Deep copies make sure that the state can't be altered
- # without it being re-written. Perf wise, deep copies
- # are overshadowed by pickling/loading.
- self.state[var] = copy.deepcopy(val)
- pickle.dump(self.state, open(self.state_file, 'w'))
-
-
- def __load_state(self):
- assert not hasattr(self, "state")
- try:
- self.state = pickle.load(open(self.state_file, 'r'))
- self.state_existed = True
- except Exception:
- print "Initializing the state engine."
- self.state = {}
- self.set_state('__steps', []) # writes pickle file
- self.state_existed = False
-
-
- def get_state(self, var, default=None):
- if var in self.state or default == None:
- val = self.state[var]
- else:
- val = default
- return copy.deepcopy(val)
-
-
- def __create_step_tuple(self, fn, args, dargs):
- # Legacy code passes in an array where the first arg is
- # the function or its name.
- if isinstance(fn, list):
- assert(len(args) == 0)
- assert(len(dargs) == 0)
- args = fn[1:]
- fn = fn[0]
- # Pickling actual functions is harry, thus we have to call
- # them by name. Unfortunately, this means only functions
- # defined globally can be used as a next step.
- if callable(fn):
- fn = fn.__name__
- if not isinstance(fn, types.StringTypes):
- raise StepError("Next steps must be functions or "
- "strings containing the function name")
- ancestry = copy.copy(self.current_step_ancestry)
- return (ancestry, fn, args, dargs)
-
-
- def next_step_append(self, fn, *args, **dargs):
- """Define the next step and place it at the end"""
- steps = self.get_state('__steps')
- steps.append(self.__create_step_tuple(fn, args, dargs))
- self.set_state('__steps', steps)
-
-
- def next_step(self, fn, *args, **dargs):
- """Create a new step and place it after any steps added
- while running the current step but before any steps added in
- previous steps"""
- steps = self.get_state('__steps')
- steps.insert(self.next_step_index,
- self.__create_step_tuple(fn, args, dargs))
- self.next_step_index += 1
- self.set_state('__steps', steps)
-
-
- def next_step_prepend(self, fn, *args, **dargs):
- """Insert a new step, executing first"""
- steps = self.get_state('__steps')
- steps.insert(0, self.__create_step_tuple(fn, args, dargs))
- self.next_step_index += 1
- self.set_state('__steps', steps)
-
-
- def _run_step_fn(self, local_vars, fn, args, dargs):
- """Run a (step) function within the given context"""
-
- local_vars['__args'] = args
- local_vars['__dargs'] = dargs
- exec('__ret = %s(*__args, **__dargs)' % fn,
- local_vars, local_vars)
- return local_vars['__ret']
-
-
- def _create_frame(self, global_vars, ancestry, fn_name):
- """Set up the environment like it would have been when this
- function was first defined.
-
- Child step engine 'implementations' must have 'return locals()'
- at end end of their steps. Because of this, we can call the
- parent function and get back all child functions (i.e. those
- defined within it).
-
- Unfortunately, the call stack of the function calling
- job.next_step might have been deeper than the function it
- added. In order to make sure that the environment is what it
- should be, we need to then pop off the frames we built until
- we find the frame where the function was first defined."""
-
- # The copies ensure that the parent frames are not modified
- # while building child frames. This matters if we then
- # pop some frames in the next part of this function.
- current_frame = copy.copy(global_vars)
- frames = [current_frame]
- for steps_fn_name in ancestry:
- ret = self._run_step_fn(current_frame,
- steps_fn_name, [], {})
- current_frame = copy.copy(ret)
- frames.append(current_frame)
-
- while len(frames) > 2:
- if fn_name not in frames[-2]:
- break
- if frames[-2][fn_name] != frames[-1][fn_name]:
- break
- frames.pop()
- ancestry.pop()
-
- return (frames[-1], ancestry)
-
-
- def _add_step_init(self, local_vars, current_function):
- """If the function returned a dictionary that includes a
- function named 'step_init', prepend it to our list of steps.
- This will only get run the first time a function with a nested
- use of the step engine is run."""
-
- if (isinstance(local_vars, dict) and
- 'step_init' in local_vars and
- callable(local_vars['step_init'])):
- # The init step is a child of the function
- # we were just running.
- self.current_step_ancestry.append(current_function)
- self.next_step_prepend('step_init')
-
-
- def step_engine(self):
- """the stepping engine -- if the control file defines
- step_init we will be using this engine to drive multiple runs.
- """
- """Do the next step"""
-
- # Set up the environment and then interpret the control file.
- # Some control files will have code outside of functions,
- # which means we need to have our state engine initialized
- # before reading in the file.
- global_control_vars = {'job': self}
- exec(JOB_PREAMBLE, global_control_vars, global_control_vars)
- execfile(self.control, global_control_vars, global_control_vars)
-
- # If we loaded in a mid-job state file, then we presumably
- # know what steps we have yet to run.
- if not self.state_existed:
- if global_control_vars.has_key('step_init'):
- self.next_step(global_control_vars['step_init'])
-
- # Iterate through the steps. If we reboot, we'll simply
- # continue iterating on the next step.
- while len(self.get_state('__steps')) > 0:
- steps = self.get_state('__steps')
- (ancestry, fn_name, args, dargs) = steps.pop(0)
- self.set_state('__steps', steps)
-
- self.next_step_index = 0
- ret = self._create_frame(global_control_vars, ancestry,
- fn_name)
- local_vars, self.current_step_ancestry = ret
- local_vars = self._run_step_fn(local_vars, fn_name,
- args, dargs)
- self._add_step_init(local_vars, fn_name)
-
-
- def _init_group_level(self):
- self.group_level = self.get_state("__group_level", default=0)
-
-
- def _increment_group_level(self):
- self.group_level += 1
- self.set_state("__group_level", self.group_level)
-
-
- def _decrement_group_level(self):
- self.group_level -= 1
- self.set_state("__group_level", self.group_level)
-
-
- def record(self, status_code, subdir, operation, status = '',
- optional_fields=None):
- """
- Record job-level status
-
- The intent is to make this file both machine parseable and
- human readable. That involves a little more complexity, but
- really isn't all that bad ;-)
-
- Format is <status code>\t<subdir>\t<operation>\t<status>
-
- status code: (GOOD|WARN|FAIL|ABORT)
- or START
- or END (GOOD|WARN|FAIL|ABORT)
-
- subdir: MUST be a relevant subdirectory in the results,
- or None, which will be represented as '----'
-
- operation: description of what you ran (e.g. "dbench", or
- "mkfs -t foobar /dev/sda9")
-
- status: error message or "completed sucessfully"
-
- ------------------------------------------------------------
-
- Initial tabs indicate indent levels for grouping, and is
- governed by self.group_level
-
- multiline messages have secondary lines prefaced by a double
- space (' ')
- """
-
- if subdir:
- if re.match(r'[\n\t]', subdir):
- raise ValueError("Invalid character in "
- "subdir string")
- substr = subdir
- else:
- substr = '----'
-
- if not logging.is_valid_status(status_code):
- raise ValueError("Invalid status code supplied: %s" %
- status_code)
- if not operation:
- operation = '----'
-
- if re.match(r'[\n\t]', operation):
- raise ValueError("Invalid character in "
- "operation string")
- operation = operation.rstrip()
-
- if not optional_fields:
- optional_fields = {}
-
- status = status.rstrip()
- status = re.sub(r"\t", " ", status)
- # Ensure any continuation lines are marked so we can
- # detect them in the status file to ensure it is parsable.
- status = re.sub(r"\n", "\n" + "\t" * self.group_level + " ",
- status)
+ """The actual job against which we do everything.
+
+ Properties:
+ autodir
+ The top level autotest directory (/usr/local/autotest).
+ Comes from os.environ['AUTODIR'].
+ bindir
+ <autodir>/bin/
+ libdir
+ <autodir>/lib/
+ testdir
+ <autodir>/tests/
+ site_testdir
+ <autodir>/site_tests/
+ profdir
+ <autodir>/profilers/
+ tmpdir
+ <autodir>/tmp/
+ resultdir
+ <autodir>/results/<jobtag>
+ stdout
+ fd_stack object for stdout
+ stderr
+ fd_stack object for stderr
+ profilers
+ the profilers object for this job
+ harness
+ the server harness object for this job
+ config
+ the job configuration for this job
+ """
+
+ DEFAULT_LOG_FILENAME = "status"
+
+ def __init__(self, control, jobtag, cont, harness_type=None,
+ use_external_logging = False):
+ """
+ control
+ The control file (pathname of)
+ jobtag
+ The job tag string (eg "default")
+ cont
+ If this is the continuation of this job
+ harness_type
+ An alternative server harness
+ """
+ self.autodir = os.environ['AUTODIR']
+ self.bindir = os.path.join(self.autodir, 'bin')
+ self.libdir = os.path.join(self.autodir, 'lib')
+ self.testdir = os.path.join(self.autodir, 'tests')
+ self.site_testdir = os.path.join(self.autodir, 'site_tests')
+ self.profdir = os.path.join(self.autodir, 'profilers')
+ self.tmpdir = os.path.join(self.autodir, 'tmp')
+ self.resultdir = os.path.join(self.autodir, 'results', jobtag)
+ self.sysinfodir = os.path.join(self.resultdir, 'sysinfo')
+ self.control = os.path.abspath(control)
+ self.state_file = self.control + '.state'
+ self.current_step_ancestry = []
+ self.next_step_index = 0
+ self.__load_state()
+
+ if not cont:
+ """
+ Don't cleanup the tmp dir (which contains the lockfile)
+ in the constructor, this would be a problem for multiple
+ jobs starting at the same time on the same client. Instead
+ do the delete at the server side. We simply create the tmp
+ directory here if it does not already exist.
+ """
+ if not os.path.exists(self.tmpdir):
+ os.mkdir(self.tmpdir)
+
+ results = os.path.join(self.autodir, 'results')
+ if not os.path.exists(results):
+ os.mkdir(results)
+
+ download = os.path.join(self.testdir, 'download')
+ if not os.path.exists(download):
+ os.mkdir(download)
+
+ if os.path.exists(self.resultdir):
+ utils.system('rm -rf '
+ + self.resultdir)
+ os.mkdir(self.resultdir)
+ os.mkdir(self.sysinfodir)
+
+ os.mkdir(os.path.join(self.resultdir, 'debug'))
+ os.mkdir(os.path.join(self.resultdir, 'analysis'))
+
+ shutil.copyfile(self.control,
+ os.path.join(self.resultdir, 'control'))
+
+
+ self.control = control
+ self.jobtag = jobtag
+ self.log_filename = self.DEFAULT_LOG_FILENAME
+ self.container = None
+
+ self.stdout = fd_stack.fd_stack(1, sys.stdout)
+ self.stderr = fd_stack.fd_stack(2, sys.stderr)
+
+ self._init_group_level()
+
+ self.config = config.config(self)
+
+ self.harness = harness.select(harness_type, self)
+
+ self.profilers = profilers.profilers(self)
+
+ try:
+ tool = self.config_get('boottool.executable')
+ self.bootloader = boottool.boottool(tool)
+ except:
+ pass
+
+ sysinfo.log_per_reboot_data(self.sysinfodir)
+
+ if not cont:
+ self.record('START', None, None)
+ self._increment_group_level()
+
+ self.harness.run_start()
+
+ if use_external_logging:
+ self.enable_external_logging()
+
+ # load the max disk usage rate - default to no monitoring
+ self.max_disk_usage_rate = self.get_state('__monitor_disk',
+ default=0.0)
+
+
+ def monitor_disk_usage(self, max_rate):
+ """\
+ Signal that the job should monitor disk space usage on /
+ and generate a warning if a test uses up disk space at a
+ rate exceeding 'max_rate'.
+
+ Parameters:
+ max_rate - the maximium allowed rate of disk consumption
+ during a test, in MB/hour, or 0 to indicate
+ no limit.
+ """
+ self.set_state('__monitor_disk', max_rate)
+ self.max_disk_usage_rate = max_rate
+
+
+ def relative_path(self, path):
+ """\
+ Return a patch relative to the job results directory
+ """
+ head = len(self.resultdir) + 1 # remove the / inbetween
+ return path[head:]
- # Generate timestamps for inclusion in the logs
- epoch_time = int(time.time()) # seconds since epoch, in UTC
- local_time = time.localtime(epoch_time)
- optional_fields["timestamp"] = str(epoch_time)
- optional_fields["localtime"] = time.strftime("%b %d %H:%M:%S",
- local_time)
- fields = [status_code, substr, operation]
- fields += ["%s=%s" % x for x in optional_fields.iteritems()]
- fields.append(status)
+ def control_get(self):
+ return self.control
- msg = '\t'.join(str(x) for x in fields)
- msg = '\t' * self.group_level + msg
- msg_tag = ""
- if "." in self.log_filename:
- msg_tag = self.log_filename.split(".", 1)[1]
+ def control_set(self, control):
+ self.control = os.path.abspath(control)
- self.harness.test_status_detail(status_code, substr,
- operation, status, msg_tag)
- self.harness.test_status(msg, msg_tag)
- # log to stdout (if enabled)
- #if self.log_filename == self.DEFAULT_LOG_FILENAME:
- print msg
+ def harness_select(self, which):
+ self.harness = harness.select(which, self)
- # log to the "root" status log
- status_file = os.path.join(self.resultdir, self.log_filename)
- open(status_file, "a").write(msg + "\n")
- # log to the subdir status log (if subdir is set)
- if subdir:
- dir = os.path.join(self.resultdir, subdir)
- status_file = os.path.join(dir,
- self.DEFAULT_LOG_FILENAME)
- open(status_file, "a").write(msg + "\n")
+ def config_set(self, name, value):
+ self.config.set(name, value)
+
+
+ def config_get(self, name):
+ return self.config.get(name)
+
+ def setup_dirs(self, results_dir, tmp_dir):
+ if not tmp_dir:
+ tmp_dir = os.path.join(self.tmpdir, 'build')
+ if not os.path.exists(tmp_dir):
+ os.mkdir(tmp_dir)
+ if not os.path.isdir(tmp_dir):
+ e_msg = "Temp dir (%s) is not a dir - args backwards?" % self.tmpdir
+ raise ValueError(e_msg)
+
+ # We label the first build "build" and then subsequent ones
+ # as "build.2", "build.3", etc. Whilst this is a little bit
+ # inconsistent, 99.9% of jobs will only have one build
+ # (that's not done as kernbench, sparse, or buildtest),
+ # so it works out much cleaner. One of life's comprimises.
+ if not results_dir:
+ results_dir = os.path.join(self.resultdir, 'build')
+ i = 2
+ while os.path.exists(results_dir):
+ results_dir = os.path.join(self.resultdir, 'build.%d' % i)
+ i += 1
+ if not os.path.exists(results_dir):
+ os.mkdir(results_dir)
+
+ return (results_dir, tmp_dir)
+
+
+ def xen(self, base_tree, results_dir = '', tmp_dir = '', leave = False, \
+ kjob = None ):
+ """Summon a xen object"""
+ (results_dir, tmp_dir) = self.setup_dirs(results_dir, tmp_dir)
+ build_dir = 'xen'
+ return xen.xen(self, base_tree, results_dir, tmp_dir, build_dir, leave, kjob)
+
+
+ def kernel(self, base_tree, results_dir = '', tmp_dir = '', leave = False):
+ """Summon a kernel object"""
+ (results_dir, tmp_dir) = self.setup_dirs(results_dir, tmp_dir)
+ build_dir = 'linux'
+ return kernel.auto_kernel(self, base_tree, results_dir,
+ tmp_dir, build_dir, leave)
+
+
+ def barrier(self, *args, **kwds):
+ """Create a barrier object"""
+ return barrier.barrier(*args, **kwds)
+
+
+ def setup_dep(self, deps):
+ """Set up the dependencies for this test.
+
+ deps is a list of libraries required for this test.
+ """
+ for dep in deps:
+ try:
+ os.chdir(os.path.join(self.autodir, 'deps', dep))
+ utils.system('./' + dep + '.py')
+ except:
+ err = "setting up dependency " + dep + "\n"
+ raise error.UnhandledError(err)
+
+
+ def __runtest(self, url, tag, args, dargs):
+ try:
+ l = lambda : test.runtest(self, url, tag, args, dargs)
+ pid = parallel.fork_start(self.resultdir, l)
+ parallel.fork_waitfor(self.resultdir, pid)
+ except error.AutotestError:
+ raise
+ except Exception, e:
+ msg = "Unhandled %s error occured during test\n"
+ msg %= str(e.__class__.__name__)
+ raise error.UnhandledError(msg)
+
+
+ def run_test(self, url, *args, **dargs):
+ """Summon a test object and run it.
+
+ tag
+ tag to add to testname
+ url
+ url of the test to run
+ """
+
+ if not url:
+ raise TypeError("Test name is invalid. "
+ "Switched arguments?")
+ (group, testname) = test.testname(url)
+ namelen = len(testname)
+ dargs = dargs.copy()
+ tntag = dargs.pop('tag', None)
+ if tntag: # testname tag is included in reported test name
+ testname += '.' + tntag
+ subdir = testname
+ sdtag = dargs.pop('subdir_tag', None)
+ if sdtag: # subdir-only tag is not included in reports
+ subdir = subdir + '.' + sdtag
+ tag = subdir[namelen+1:] # '' if none
+
+ outputdir = os.path.join(self.resultdir, subdir)
+ if os.path.exists(outputdir):
+ msg = ("%s already exists, test <%s> may have"
+ " already run with tag <%s>"
+ % (outputdir, testname, tag) )
+ raise error.TestError(msg)
+ os.mkdir(outputdir)
+
+ container = dargs.pop('container', None)
+ if container:
+ cname = container.get('name', None)
+ if not cname: # get old name
+ cname = container.get('container_name', None)
+ mbytes = container.get('mbytes', None)
+ if not mbytes: # get old name
+ mbytes = container.get('mem', None)
+ cpus = container.get('cpus', None)
+ if not cpus: # get old name
+ cpus = container.get('cpu', None)
+ root = container.get('root', None)
+ self.new_container(mbytes=mbytes, cpus=cpus,
+ root=root, name=cname)
+ # We are running in a container now...
+
+ def log_warning(reason):
+ self.record("WARN", subdir, testname, reason)
+ @disk_usage_monitor.watch(log_warning, "/",
+ self.max_disk_usage_rate)
+ def group_func():
+ try:
+ self.__runtest(url, tag, args, dargs)
+ except error.TestNAError, detail:
+ self.record('TEST_NA', subdir, testname,
+ str(detail))
+ raise
+ except Exception, detail:
+ self.record('FAIL', subdir, testname,
+ str(detail))
+ raise
+ else:
+ self.record('GOOD', subdir, testname,
+ 'completed successfully')
+
+ result, exc_info = self.__rungroup(subdir, testname, group_func)
+ if container:
+ self.release_container()
+ if exc_info and isinstance(exc_info[1], error.TestError):
+ return False
+ elif exc_info:
+ raise exc_info[0], exc_info[1], exc_info[2]
+ else:
+ return True
+
+
+ def __rungroup(self, subdir, testname, function, *args, **dargs):
+ """\
+ subdir:
+ name of the group
+ testname:
+ name of the test to run, or support step
+ function:
+ subroutine to run
+ *args:
+ arguments for the function
+
+ Returns a 2-tuple (result, exc_info) where result
+ is the return value of function, and exc_info is
+ the sys.exc_info() of the exception thrown by the
+ function (which may be None).
+ """
+
+ result, exc_info = None, None
+ try:
+ self.record('START', subdir, testname)
+ self._increment_group_level()
+ result = function(*args, **dargs)
+ self._decrement_group_level()
+ self.record('END GOOD', subdir, testname)
+ except error.TestNAError, e:
+ self._decrement_group_level()
+ self.record('END TEST_NA', subdir, testname, str(e))
+ except Exception, e:
+ exc_info = sys.exc_info()
+ self._decrement_group_level()
+ err_msg = str(e) + '\n' + traceback.format_exc()
+ self.record('END FAIL', subdir, testname, err_msg)
+
+ return result, exc_info
+
+
+ def run_group(self, function, *args, **dargs):
+ """\
+ function:
+ subroutine to run
+ *args:
+ arguments for the function
+ """
+
+ # Allow the tag for the group to be specified
+ name = function.__name__
+ tag = dargs.pop('tag', None)
+ if tag:
+ name = tag
+
+ outputdir = os.path.join(self.resultdir, name)
+ if os.path.exists(outputdir):
+ msg = ("%s already exists, test <%s> may have"
+ " already run with tag <%s>"
+ % (outputdir, name, name) )
+ raise error.TestError(msg)
+ os.mkdir(outputdir)
+
+ result, exc_info = self.__rungroup(name, name, function,
+ *args, **dargs)
+
+ # if there was a non-TestError exception, raise it
+ if exc_info and not isinstance(exc_info[1], error.TestError):
+ err = ''.join(traceback.format_exception(*exc_info))
+ raise error.TestError(name + ' failed\n' + err)
+
+ # pass back the actual return value from the function
+ return result
+
+
+ def new_container(self, mbytes=None, cpus=None, root=None, name=None):
+ if not autotest_utils.grep('cpuset', '/proc/filesystems'):
+ print "Containers not enabled by latest reboot"
+ return # containers weren't enabled in this kernel boot
+ pid = os.getpid()
+ if not name:
+ name = 'test%d' % pid # make arbitrary unique name
+ self.container = cpuset.cpuset(name, job_size=mbytes,
+ job_pid=pid, cpus=cpus, root=root)
+ # This job's python shell is now running in the new container
+ # and all forked test processes will inherit that container
+
+
+ def release_container(self):
+ if self.container:
+ self.container.release()
+ self.container = None
+
+
+ def cpu_count(self):
+ if self.container:
+ return len(self.container.cpus)
+ return autotest_utils.count_cpus() # use total system count
+
+
+ # Check the passed kernel identifier against the command line
+ # and the running kernel, abort the job on missmatch.
+ def kernel_check_ident(self, expected_when, expected_id, subdir,
+ type = 'src', patches=[]):
+ print (("POST BOOT: checking booted kernel " +
+ "mark=%d identity='%s' type='%s'") %
+ (expected_when, expected_id, type))
+
+ running_id = autotest_utils.running_os_ident()
+
+ cmdline = utils.read_one_line("/proc/cmdline")
+
+ find_sum = re.compile(r'.*IDENT=(\d+)')
+ m = find_sum.match(cmdline)
+ cmdline_when = -1
+ if m:
+ cmdline_when = int(m.groups()[0])
+
+ # We have all the facts, see if they indicate we
+ # booted the requested kernel or not.
+ bad = False
+ if (type == 'src' and expected_id != running_id or
+ type == 'rpm' and
+ not running_id.startswith(expected_id + '::')):
+ print "check_kernel_ident: kernel identifier mismatch"
+ bad = True
+ if expected_when != cmdline_when:
+ print "check_kernel_ident: kernel command line mismatch"
+ bad = True
+
+ if bad:
+ print " Expected Ident: " + expected_id
+ print " Running Ident: " + running_id
+ print " Expected Mark: %d" % (expected_when)
+ print "Command Line Mark: %d" % (cmdline_when)
+ print " Command Line: " + cmdline
+
+ raise error.JobError("boot failure", "reboot.verify")
+
+ kernel_info = {'kernel': expected_id}
+ for i, patch in enumerate(patches):
+ kernel_info["patch%d" % i] = patch
+ self.record('GOOD', subdir, 'reboot.verify', expected_id)
+ self._decrement_group_level()
+ self.record('END GOOD', subdir, 'reboot',
+ optional_fields=kernel_info)
+
+
+ def filesystem(self, device, mountpoint = None, loop_size = 0):
+ if not mountpoint:
+ mountpoint = self.tmpdir
+ return filesystem.filesystem(self, device, mountpoint,loop_size)
+
+
+ def enable_external_logging(self):
+ pass
+
+
+ def disable_external_logging(self):
+ pass
+
+
+ def reboot_setup(self):
+ pass
+
+
+ def reboot(self, tag='autotest'):
+ self.reboot_setup()
+ self.record('START', None, 'reboot')
+ self._increment_group_level()
+ self.record('GOOD', None, 'reboot.start')
+ self.harness.run_reboot()
+ default = self.config_get('boot.set_default')
+ if default:
+ self.bootloader.set_default(tag)
+ else:
+ self.bootloader.boot_once(tag)
+ cmd = "(sleep 5; reboot) </dev/null >/dev/null 2>&1 &"
+ utils.system(cmd)
+ self.quit()
+
+
+ def noop(self, text):
+ print "job: noop: " + text
+
+
+ def parallel(self, *tasklist):
+ """Run tasks in parallel"""
+
+ pids = []
+ old_log_filename = self.log_filename
+ for i, task in enumerate(tasklist):
+ self.log_filename = old_log_filename + (".%d" % i)
+ task_func = lambda: task[0](*task[1:])
+ pids.append(parallel.fork_start(self.resultdir,
+ task_func))
+
+ old_log_path = os.path.join(self.resultdir, old_log_filename)
+ old_log = open(old_log_path, "a")
+ exceptions = []
+ for i, pid in enumerate(pids):
+ # wait for the task to finish
+ try:
+ parallel.fork_waitfor(self.resultdir, pid)
+ except Exception, e:
+ exceptions.append(e)
+ # copy the logs from the subtask into the main log
+ new_log_path = old_log_path + (".%d" % i)
+ if os.path.exists(new_log_path):
+ new_log = open(new_log_path)
+ old_log.write(new_log.read())
+ new_log.close()
+ old_log.flush()
+ os.remove(new_log_path)
+ old_log.close()
+
+ self.log_filename = old_log_filename
+
+ # handle any exceptions raised by the parallel tasks
+ if exceptions:
+ msg = "%d task(s) failed" % len(exceptions)
+ raise error.JobError(msg, str(exceptions), exceptions)
+
+
+ def quit(self):
+ # XXX: should have a better name.
+ self.harness.run_pause()
+ raise error.JobContinue("more to come")
+
+
+ def complete(self, status):
+ """Clean up and exit"""
+ # We are about to exit 'complete' so clean up the control file.
+ try:
+ os.unlink(self.state_file)
+ except:
+ pass
+
+ self.harness.run_complete()
+ self.disable_external_logging()
+ sys.exit(status)
+
+
+ def set_state(self, var, val):
+ # Deep copies make sure that the state can't be altered
+ # without it being re-written. Perf wise, deep copies
+ # are overshadowed by pickling/loading.
+ self.state[var] = copy.deepcopy(val)
+ pickle.dump(self.state, open(self.state_file, 'w'))
+
+
+ def __load_state(self):
+ assert not hasattr(self, "state")
+ try:
+ self.state = pickle.load(open(self.state_file, 'r'))
+ self.state_existed = True
+ except Exception:
+ print "Initializing the state engine."
+ self.state = {}
+ self.set_state('__steps', []) # writes pickle file
+ self.state_existed = False
+
+
+ def get_state(self, var, default=None):
+ if var in self.state or default == None:
+ val = self.state[var]
+ else:
+ val = default
+ return copy.deepcopy(val)
+
+
+ def __create_step_tuple(self, fn, args, dargs):
+ # Legacy code passes in an array where the first arg is
+ # the function or its name.
+ if isinstance(fn, list):
+ assert(len(args) == 0)
+ assert(len(dargs) == 0)
+ args = fn[1:]
+ fn = fn[0]
+ # Pickling actual functions is harry, thus we have to call
+ # them by name. Unfortunately, this means only functions
+ # defined globally can be used as a next step.
+ if callable(fn):
+ fn = fn.__name__
+ if not isinstance(fn, types.StringTypes):
+ raise StepError("Next steps must be functions or "
+ "strings containing the function name")
+ ancestry = copy.copy(self.current_step_ancestry)
+ return (ancestry, fn, args, dargs)
+
+
+ def next_step_append(self, fn, *args, **dargs):
+ """Define the next step and place it at the end"""
+ steps = self.get_state('__steps')
+ steps.append(self.__create_step_tuple(fn, args, dargs))
+ self.set_state('__steps', steps)
+
+
+ def next_step(self, fn, *args, **dargs):
+ """Create a new step and place it after any steps added
+ while running the current step but before any steps added in
+ previous steps"""
+ steps = self.get_state('__steps')
+ steps.insert(self.next_step_index,
+ self.__create_step_tuple(fn, args, dargs))
+ self.next_step_index += 1
+ self.set_state('__steps', steps)
+
+
+ def next_step_prepend(self, fn, *args, **dargs):
+ """Insert a new step, executing first"""
+ steps = self.get_state('__steps')
+ steps.insert(0, self.__create_step_tuple(fn, args, dargs))
+ self.next_step_index += 1
+ self.set_state('__steps', steps)
+
+
+ def _run_step_fn(self, local_vars, fn, args, dargs):
+ """Run a (step) function within the given context"""
+
+ local_vars['__args'] = args
+ local_vars['__dargs'] = dargs
+ exec('__ret = %s(*__args, **__dargs)' % fn,
+ local_vars, local_vars)
+ return local_vars['__ret']
+
+
+ def _create_frame(self, global_vars, ancestry, fn_name):
+ """Set up the environment like it would have been when this
+ function was first defined.
+
+ Child step engine 'implementations' must have 'return locals()'
+ at end end of their steps. Because of this, we can call the
+ parent function and get back all child functions (i.e. those
+ defined within it).
+
+ Unfortunately, the call stack of the function calling
+ job.next_step might have been deeper than the function it
+ added. In order to make sure that the environment is what it
+ should be, we need to then pop off the frames we built until
+ we find the frame where the function was first defined."""
+
+ # The copies ensure that the parent frames are not modified
+ # while building child frames. This matters if we then
+ # pop some frames in the next part of this function.
+ current_frame = copy.copy(global_vars)
+ frames = [current_frame]
+ for steps_fn_name in ancestry:
+ ret = self._run_step_fn(current_frame,
+ steps_fn_name, [], {})
+ current_frame = copy.copy(ret)
+ frames.append(current_frame)
+
+ while len(frames) > 2:
+ if fn_name not in frames[-2]:
+ break
+ if frames[-2][fn_name] != frames[-1][fn_name]:
+ break
+ frames.pop()
+ ancestry.pop()
+
+ return (frames[-1], ancestry)
+
+
+ def _add_step_init(self, local_vars, current_function):
+ """If the function returned a dictionary that includes a
+ function named 'step_init', prepend it to our list of steps.
+ This will only get run the first time a function with a nested
+ use of the step engine is run."""
+
+ if (isinstance(local_vars, dict) and
+ 'step_init' in local_vars and
+ callable(local_vars['step_init'])):
+ # The init step is a child of the function
+ # we were just running.
+ self.current_step_ancestry.append(current_function)
+ self.next_step_prepend('step_init')
+
+
+ def step_engine(self):
+ """the stepping engine -- if the control file defines
+ step_init we will be using this engine to drive multiple runs.
+ """
+ """Do the next step"""
+
+ # Set up the environment and then interpret the control file.
+ # Some control files will have code outside of functions,
+ # which means we need to have our state engine initialized
+ # before reading in the file.
+ global_control_vars = {'job': self}
+ exec(JOB_PREAMBLE, global_control_vars, global_control_vars)
+ execfile(self.control, global_control_vars, global_control_vars)
+
+ # If we loaded in a mid-job state file, then we presumably
+ # know what steps we have yet to run.
+ if not self.state_existed:
+ if global_control_vars.has_key('step_init'):
+ self.next_step(global_control_vars['step_init'])
+
+ # Iterate through the steps. If we reboot, we'll simply
+ # continue iterating on the next step.
+ while len(self.get_state('__steps')) > 0:
+ steps = self.get_state('__steps')
+ (ancestry, fn_name, args, dargs) = steps.pop(0)
+ self.set_state('__steps', steps)
+
+ self.next_step_index = 0
+ ret = self._create_frame(global_control_vars, ancestry,
+ fn_name)
+ local_vars, self.current_step_ancestry = ret
+ local_vars = self._run_step_fn(local_vars, fn_name,
+ args, dargs)
+ self._add_step_init(local_vars, fn_name)
+
+
+ def _init_group_level(self):
+ self.group_level = self.get_state("__group_level", default=0)
+
+
+ def _increment_group_level(self):
+ self.group_level += 1
+ self.set_state("__group_level", self.group_level)
+
+
+ def _decrement_group_level(self):
+ self.group_level -= 1
+ self.set_state("__group_level", self.group_level)
+
+
+ def record(self, status_code, subdir, operation, status = '',
+ optional_fields=None):
+ """
+ Record job-level status
+
+ The intent is to make this file both machine parseable and
+ human readable. That involves a little more complexity, but
+ really isn't all that bad ;-)
+
+ Format is <status code>\t<subdir>\t<operation>\t<status>
+
+ status code: (GOOD|WARN|FAIL|ABORT)
+ or START
+ or END (GOOD|WARN|FAIL|ABORT)
+
+ subdir: MUST be a relevant subdirectory in the results,
+ or None, which will be represented as '----'
+
+ operation: description of what you ran (e.g. "dbench", or
+ "mkfs -t foobar /dev/sda9")
+
+ status: error message or "completed sucessfully"
+
+ ------------------------------------------------------------
+
+ Initial tabs indicate indent levels for grouping, and is
+ governed by self.group_level
+
+ multiline messages have secondary lines prefaced by a double
+ space (' ')
+ """
+
+ if subdir:
+ if re.match(r'[\n\t]', subdir):
+ raise ValueError("Invalid character in "
+ "subdir string")
+ substr = subdir
+ else:
+ substr = '----'
+
+ if not logging.is_valid_status(status_code):
+ raise ValueError("Invalid status code supplied: %s" %
+ status_code)
+ if not operation:
+ operation = '----'
+
+ if re.match(r'[\n\t]', operation):
+ raise ValueError("Invalid character in "
+ "operation string")
+ operation = operation.rstrip()
+
+ if not optional_fields:
+ optional_fields = {}
+
+ status = status.rstrip()
+ status = re.sub(r"\t", " ", status)
+ # Ensure any continuation lines are marked so we can
+ # detect them in the status file to ensure it is parsable.
+ status = re.sub(r"\n", "\n" + "\t" * self.group_level + " ",
+ status)
+
+ # Generate timestamps for inclusion in the logs
+ epoch_time = int(time.time()) # seconds since epoch, in UTC
+ local_time = time.localtime(epoch_time)
+ optional_fields["timestamp"] = str(epoch_time)
+ optional_fields["localtime"] = time.strftime("%b %d %H:%M:%S",
+ local_time)
+
+ fields = [status_code, substr, operation]
+ fields += ["%s=%s" % x for x in optional_fields.iteritems()]
+ fields.append(status)
+
+ msg = '\t'.join(str(x) for x in fields)
+ msg = '\t' * self.group_level + msg
+
+ msg_tag = ""
+ if "." in self.log_filename:
+ msg_tag = self.log_filename.split(".", 1)[1]
+
+ self.harness.test_status_detail(status_code, substr,
+ operation, status, msg_tag)
+ self.harness.test_status(msg, msg_tag)
+
+ # log to stdout (if enabled)
+ #if self.log_filename == self.DEFAULT_LOG_FILENAME:
+ print msg
+
+ # log to the "root" status log
+ status_file = os.path.join(self.resultdir, self.log_filename)
+ open(status_file, "a").write(msg + "\n")
+
+ # log to the subdir status log (if subdir is set)
+ if subdir:
+ dir = os.path.join(self.resultdir, subdir)
+ status_file = os.path.join(dir,
+ self.DEFAULT_LOG_FILENAME)
+ open(status_file, "a").write(msg + "\n")
class disk_usage_monitor:
- def __init__(self, logging_func, device, max_mb_per_hour):
- self.func = logging_func
- self.device = device
- self.max_mb_per_hour = max_mb_per_hour
-
-
- def start(self):
- self.initial_space = autotest_utils.freespace(self.device)
- self.start_time = time.time()
-
-
- def stop(self):
- # if no maximum usage rate was set, we don't need to
- # generate any warnings
- if not self.max_mb_per_hour:
- return
-
- final_space = autotest_utils.freespace(self.device)
- used_space = self.initial_space - final_space
- stop_time = time.time()
- total_time = stop_time - self.start_time
- # round up the time to one minute, to keep extremely short
- # tests from generating false positives due to short, badly
- # timed bursts of activity
- total_time = max(total_time, 60.0)
-
- # determine the usage rate
- bytes_per_sec = used_space / total_time
- mb_per_sec = bytes_per_sec / 1024**2
- mb_per_hour = mb_per_sec * 60 * 60
-
- if mb_per_hour > self.max_mb_per_hour:
- msg = ("disk space on %s was consumed at a rate of "
- "%.2f MB/hour")
- msg %= (self.device, mb_per_hour)
- self.func(msg)
-
-
- @classmethod
- def watch(cls, *monitor_args, **monitor_dargs):
- """ Generic decorator to wrap a function call with the
- standard create-monitor -> start -> call -> stop idiom."""
- def decorator(func):
- def watched_func(*args, **dargs):
- monitor = cls(*monitor_args, **monitor_dargs)
- monitor.start()
- try:
- func(*args, **dargs)
- finally:
- monitor.stop()
- return watched_func
- return decorator
+ def __init__(self, logging_func, device, max_mb_per_hour):
+ self.func = logging_func
+ self.device = device
+ self.max_mb_per_hour = max_mb_per_hour
+
+
+ def start(self):
+ self.initial_space = autotest_utils.freespace(self.device)
+ self.start_time = time.time()
+
+
+ def stop(self):
+ # if no maximum usage rate was set, we don't need to
+ # generate any warnings
+ if not self.max_mb_per_hour:
+ return
+
+ final_space = autotest_utils.freespace(self.device)
+ used_space = self.initial_space - final_space
+ stop_time = time.time()
+ total_time = stop_time - self.start_time
+ # round up the time to one minute, to keep extremely short
+ # tests from generating false positives due to short, badly
+ # timed bursts of activity
+ total_time = max(total_time, 60.0)
+
+ # determine the usage rate
+ bytes_per_sec = used_space / total_time
+ mb_per_sec = bytes_per_sec / 1024**2
+ mb_per_hour = mb_per_sec * 60 * 60
+
+ if mb_per_hour > self.max_mb_per_hour:
+ msg = ("disk space on %s was consumed at a rate of "
+ "%.2f MB/hour")
+ msg %= (self.device, mb_per_hour)
+ self.func(msg)
+
+
+ @classmethod
+ def watch(cls, *monitor_args, **monitor_dargs):
+ """ Generic decorator to wrap a function call with the
+ standard create-monitor -> start -> call -> stop idiom."""
+ def decorator(func):
+ def watched_func(*args, **dargs):
+ monitor = cls(*monitor_args, **monitor_dargs)
+ monitor.start()
+ try:
+ func(*args, **dargs)
+ finally:
+ monitor.stop()
+ return watched_func
+ return decorator
def runjob(control, cont = False, tag = "default", harness_type = '',
- use_external_logging = False):
- """The main interface to this module
-
- control
- The control file to use for this job.
- cont
- Whether this is the continuation of a previously started job
- """
- control = os.path.abspath(control)
- state = control + '.state'
-
- # instantiate the job object ready for the control file.
- myjob = None
- try:
- # Check that the control file is valid
- if not os.path.exists(control):
- raise error.JobError(control +
- ": control file not found")
-
- # When continuing, the job is complete when there is no
- # state file, ensure we don't try and continue.
- if cont and not os.path.exists(state):
- raise error.JobComplete("all done")
- if cont == False and os.path.exists(state):
- os.unlink(state)
-
- myjob = job(control, tag, cont, harness_type,
- use_external_logging)
-
- # Load in the users control file, may do any one of:
- # 1) execute in toto
- # 2) define steps, and select the first via next_step()
- myjob.step_engine()
-
- except error.JobContinue:
- sys.exit(5)
-
- except error.JobComplete:
- sys.exit(1)
-
- except error.JobError, instance:
- print "JOB ERROR: " + instance.args[0]
- if myjob:
- command = None
- if len(instance.args) > 1:
- command = instance.args[1]
- myjob.record('ABORT', None, command, instance.args[0])
- myjob._decrement_group_level()
- myjob.record('END ABORT', None, None)
- assert(myjob.group_level == 0)
- myjob.complete(1)
- else:
- sys.exit(1)
-
- except Exception, e:
- msg = str(e) + '\n' + traceback.format_exc()
- print "JOB ERROR: " + msg
- if myjob:
- myjob.record('ABORT', None, None, msg)
- myjob._decrement_group_level()
- myjob.record('END ABORT', None, None)
- assert(myjob.group_level == 0)
- myjob.complete(1)
- else:
- sys.exit(1)
-
- # If we get here, then we assume the job is complete and good.
- myjob._decrement_group_level()
- myjob.record('END GOOD', None, None)
- assert(myjob.group_level == 0)
-
- myjob.complete(0)
+ use_external_logging = False):
+ """The main interface to this module
+
+ control
+ The control file to use for this job.
+ cont
+ Whether this is the continuation of a previously started job
+ """
+ control = os.path.abspath(control)
+ state = control + '.state'
+
+ # instantiate the job object ready for the control file.
+ myjob = None
+ try:
+ # Check that the control file is valid
+ if not os.path.exists(control):
+ raise error.JobError(control +
+ ": control file not found")
+
+ # When continuing, the job is complete when there is no
+ # state file, ensure we don't try and continue.
+ if cont and not os.path.exists(state):
+ raise error.JobComplete("all done")
+ if cont == False and os.path.exists(state):
+ os.unlink(state)
+
+ myjob = job(control, tag, cont, harness_type,
+ use_external_logging)
+
+ # Load in the users control file, may do any one of:
+ # 1) execute in toto
+ # 2) define steps, and select the first via next_step()
+ myjob.step_engine()
+
+ except error.JobContinue:
+ sys.exit(5)
+
+ except error.JobComplete:
+ sys.exit(1)
+
+ except error.JobError, instance:
+ print "JOB ERROR: " + instance.args[0]
+ if myjob:
+ command = None
+ if len(instance.args) > 1:
+ command = instance.args[1]
+ myjob.record('ABORT', None, command, instance.args[0])
+ myjob._decrement_group_level()
+ myjob.record('END ABORT', None, None)
+ assert(myjob.group_level == 0)
+ myjob.complete(1)
+ else:
+ sys.exit(1)
+
+ except Exception, e:
+ msg = str(e) + '\n' + traceback.format_exc()
+ print "JOB ERROR: " + msg
+ if myjob:
+ myjob.record('ABORT', None, None, msg)
+ myjob._decrement_group_level()
+ myjob.record('END ABORT', None, None)
+ assert(myjob.group_level == 0)
+ myjob.complete(1)
+ else:
+ sys.exit(1)
+
+ # If we get here, then we assume the job is complete and good.
+ myjob._decrement_group_level()
+ myjob.record('END GOOD', None, None)
+ assert(myjob.group_level == 0)
+
+ myjob.complete(0)
# site_job.py may be non-existant or empty, make sure that an appropriate
# site_job class is created nevertheless
try:
- from site_job import site_job
+ from site_job import site_job
except ImportError:
- class site_job(base_job):
- pass
+ class site_job(base_job):
+ pass
class job(site_job):
- pass
+ pass
diff --git a/client/bin/kernel.py b/client/bin/kernel.py
index fca8f7ef..ad65ae9b 100755
--- a/client/bin/kernel.py
+++ b/client/bin/kernel.py
@@ -8,727 +8,727 @@ from autotest_lib.client.common_lib import logging, utils
class kernel:
- """ Class for compiling kernels.
-
- Data for the object includes the src files
- used to create the kernel, patches applied, config (base + changes),
- the build directory itself, and logged output
-
- Properties:
- job
- Backpointer to the job object we're part of
- autodir
- Path to the top level autotest dir (/usr/local/autotest)
- src_dir
- <tmp_dir>/src/
- build_dir
- <tmp_dir>/linux/
- config_dir
- <results_dir>/config/
- log_dir
- <results_dir>/debug/
- results_dir
- <results_dir>/results/
- """
-
- autodir = ''
-
- def __init__(self, job, base_tree, subdir, tmp_dir, build_dir, leave = False):
- """Initialize the kernel build environment
-
- job
- which job this build is part of
- base_tree
- base kernel tree. Can be one of the following:
- 1. A local tarball
- 2. A URL to a tarball
- 3. A local directory (will symlink it)
- 4. A shorthand expandable (eg '2.6.11-git3')
- subdir
- subdir in the results directory (eg "build")
- (holds config/, debug/, results/)
- tmp_dir
-
- leave
- Boolean, whether to leave existing tmpdir or not
- """
- self.job = job
- self.autodir = job.autodir
-
- self.src_dir = os.path.join(tmp_dir, 'src')
- self.build_dir = os.path.join(tmp_dir, build_dir)
- # created by get_kernel_tree
- self.config_dir = os.path.join(subdir, 'config')
- self.log_dir = os.path.join(subdir, 'debug')
- self.results_dir = os.path.join(subdir, 'results')
- self.subdir = os.path.basename(subdir)
-
- self.installed_as = None
-
- if not leave:
- if os.path.isdir(self.src_dir):
- utils.system('rm -rf ' + self.src_dir)
- if os.path.isdir(self.build_dir):
- utils.system('rm -rf ' + self.build_dir)
-
- if not os.path.exists(self.src_dir):
- os.mkdir(self.src_dir)
- for path in [self.config_dir, self.log_dir, self.results_dir]:
- if os.path.exists(path):
- utils.system('rm -rf ' + path)
- os.mkdir(path)
-
- logpath = os.path.join(self.log_dir, 'build_log')
- self.logfile = open(logpath, 'w+')
- self.applied_patches = []
-
- self.target_arch = None
- self.build_target = 'bzImage'
- self.build_image = None
-
- arch = autotest_utils.get_current_kernel_arch()
- if arch == 's390' or arch == 's390x':
- self.build_target = 'image'
- elif arch == 'ia64':
- self.build_target = 'all'
- self.build_image = 'vmlinux.gz'
-
- if leave:
- return
-
- self.logfile.write('BASE: %s\n' % base_tree)
-
- # Where we have direct version hint record that
- # for later configuration selection.
- shorthand = re.compile(r'^\d+\.\d+\.\d+')
- if shorthand.match(base_tree):
- self.base_tree_version = base_tree
- else:
- self.base_tree_version = None
-
- # Actually extract the tree. Make sure we know it occured
- self.extract(base_tree)
-
-
- def kernelexpand(self, kernel):
- # If we have something like a path, just use it as it is
- if '/' in kernel:
- return [kernel]
-
- # Find the configured mirror list.
- mirrors = self.job.config_get('mirror.mirrors')
- if not mirrors:
- # LEGACY: convert the kernel.org mirror
- mirror = self.job.config_get('mirror.ftp_kernel_org')
- if mirror:
- korg = 'http://www.kernel.org/pub/linux/kernel'
- mirrors = [
- [ korg + '/v2.6', mirror + '/v2.6' ],
- [ korg + '/people/akpm/patches/2.6',
- mirror + '/akpm' ],
- [ korg + '/people/mbligh',
- mirror + '/mbligh' ],
- ]
-
- patches = kernelexpand.expand_classic(kernel, mirrors)
- print patches
-
- return patches
-
-
- @logging.record
- @tee_output_logdir_mark
- def extract(self, base_tree):
- if os.path.exists(base_tree):
- self.get_kernel_tree(base_tree)
- else:
- base_components = self.kernelexpand(base_tree)
- print 'kernelexpand: '
- print base_components
- self.get_kernel_tree(base_components.pop(0))
- if base_components: # apply remaining patches
- self.patch(*base_components)
-
-
- @logging.record
- @tee_output_logdir_mark
- def patch(self, *patches):
- """Apply a list of patches (in order)"""
- if not patches:
- return
- print 'Applying patches: ', patches
- self.apply_patches(self.get_patches(patches))
-
-
- @logging.record
- @tee_output_logdir_mark
- def config(self, config_file = '', config_list = None, defconfig = False):
- self.set_cross_cc()
- config = kernel_config.kernel_config(self.job, self.build_dir,
- self.config_dir, config_file, config_list,
- defconfig, self.base_tree_version)
-
-
- def get_patches(self, patches):
- """fetch the patches to the local src_dir"""
- local_patches = []
- for patch in patches:
- dest = os.path.join(self.src_dir, basename(patch))
- # FIXME: this isn't unique. Append something to it
- # like wget does if it's not there?
- print "get_file %s %s %s %s" % (patch, dest, self.src_dir, basename(patch))
- utils.get_file(patch, dest)
- # probably safer to use the command, not python library
- md5sum = utils.system_output('md5sum ' + dest).split()[0]
- local_patches.append((patch, dest, md5sum))
- return local_patches
-
-
- def apply_patches(self, local_patches):
- """apply the list of patches, in order"""
- builddir = self.build_dir
- os.chdir(builddir)
-
- if not local_patches:
- return None
- for (spec, local, md5sum) in local_patches:
- if local.endswith('.bz2') or local.endswith('.gz'):
- ref = spec
- else:
- ref = force_copy(local, self.results_dir)
- ref = self.job.relative_path(ref)
- patch_id = "%s %s %s" % (spec, ref, md5sum)
- log = "PATCH: " + patch_id + "\n"
- print log
- cat_file_to_cmd(local, 'patch -p1 > /dev/null')
- self.logfile.write(log)
- self.applied_patches.append(patch_id)
-
-
- def get_kernel_tree(self, base_tree):
- """Extract/link base_tree to self.build_dir"""
-
- # if base_tree is a dir, assume uncompressed kernel
- if os.path.isdir(base_tree):
- print 'Symlinking existing kernel source'
- os.symlink(base_tree, self.build_dir)
-
- # otherwise, extract tarball
- else:
- os.chdir(os.path.dirname(self.src_dir))
- # Figure out local destination for tarball
- tarball = os.path.join(self.src_dir, os.path.basename(base_tree))
- utils.get_file(base_tree, tarball)
- print 'Extracting kernel tarball:', tarball, '...'
- autotest_utils.extract_tarball_to_dir(tarball,
- self.build_dir)
-
-
- def extraversion(self, tag, append=1):
- os.chdir(self.build_dir)
- extraversion_sub = r's/^EXTRAVERSION =\s*\(.*\)/EXTRAVERSION = '
- if append:
- p = extraversion_sub + '\\1-%s/' % tag
- else:
- p = extraversion_sub + '-%s/' % tag
- utils.system('mv Makefile Makefile.old')
- utils.system('sed "%s" < Makefile.old > Makefile' % p)
-
-
- @logging.record
- @tee_output_logdir_mark
- def build(self, make_opts = '', logfile = '', extraversion='autotest'):
- """build the kernel
-
- make_opts
- additional options to make, if any
- """
- os_dep.commands('gcc', 'make')
- if logfile == '':
- logfile = os.path.join(self.log_dir, 'kernel_build')
- os.chdir(self.build_dir)
- if extraversion:
- self.extraversion(extraversion)
- self.set_cross_cc()
- # setup_config_file(config_file, config_overrides)
-
- # Not needed on 2.6, but hard to tell -- handle failure
- utils.system('make dep', ignore_status=True)
- threads = 2 * autotest_utils.count_cpus()
- build_string = 'make -j %d %s %s' % (threads, make_opts,
- self.build_target)
- # eg make bzImage, or make zImage
- print build_string
- system(build_string)
- if kernel_config.modules_needed('.config'):
- utils.system('make -j %d modules' % (threads))
-
- kernel_version = self.get_kernel_build_ver()
- kernel_version = re.sub('-autotest', '', kernel_version)
- self.logfile.write('BUILD VERSION: %s\n' % kernel_version)
-
- force_copy(self.build_dir+'/System.map', self.results_dir)
-
-
- def build_timed(self, threads, timefile = '/dev/null', make_opts = '',
- output = '/dev/null'):
- """time the bulding of the kernel"""
- os.chdir(self.build_dir)
- self.set_cross_cc()
-
- self.clean(logged=False)
- build_string = "/usr/bin/time -o %s make %s -j %s vmlinux" \
- % (timefile, make_opts, threads)
- build_string += ' > %s 2>&1' % output
- print build_string
- utils.system(build_string)
-
- if (not os.path.isfile('vmlinux')):
- errmsg = "no vmlinux found, kernel build failed"
- raise error.TestError(errmsg)
-
-
- @logging.record
- @tee_output_logdir_mark
- def clean(self):
- """make clean in the kernel tree"""
- os.chdir(self.build_dir)
- print "make clean"
- utils.system('make clean > /dev/null 2> /dev/null')
-
-
- @logging.record
- @tee_output_logdir_mark
- def mkinitrd(self, version, image, system_map, initrd):
- """Build kernel initrd image.
- Try to use distro specific way to build initrd image.
- Parameters:
- version
- new kernel version
- image
- new kernel image file
- system_map
- System.map file
- initrd
- initrd image file to build
- """
- vendor = autotest_utils.get_os_vendor()
-
- if os.path.isfile(initrd):
- print "Existing %s file, will remove it." % initrd
- os.remove(initrd)
-
- args = self.job.config_get('kernel.mkinitrd_extra_args')
-
- # don't leak 'None' into mkinitrd command
- if not args:
- args = ''
-
- if vendor in ['Red Hat', 'Fedora Core']:
- utils.system('mkinitrd %s %s %s' % (args, initrd, version))
- elif vendor in ['SUSE']:
- utils.system('mkinitrd %s -k %s -i %s -M %s' % (args, image, initrd, system_map))
- elif vendor in ['Debian', 'Ubuntu']:
- if os.path.isfile('/usr/sbin/mkinitrd'):
- cmd = '/usr/sbin/mkinitrd'
- elif os.path.isfile('/usr/sbin/mkinitramfs'):
- cmd = '/usr/sbin/mkinitramfs'
- else:
- raise error.TestError('No Debian initrd builder')
- utils.system('%s %s -o %s %s' % (cmd, args, initrd, version))
- else:
- raise error.TestError('Unsupported vendor %s' % vendor)
-
-
- def set_build_image(self, image):
- self.build_image = image
-
-
- @logging.record
- @tee_output_logdir_mark
- def install(self, tag='autotest', prefix = '/'):
- """make install in the kernel tree"""
-
- # Record that we have installed the kernel, and
- # the tag under which we installed it.
- self.installed_as = tag
-
- os.chdir(self.build_dir)
-
- if not os.path.isdir(prefix):
- os.mkdir(prefix)
- self.boot_dir = os.path.join(prefix, 'boot')
- if not os.path.isdir(self.boot_dir):
- os.mkdir(self.boot_dir)
-
- if not self.build_image:
- images = glob.glob('arch/*/boot/' + self.build_target)
- if len(images):
- self.build_image = images[0]
- else:
- self.build_image = self.build_target
-
- # remember installed files
- self.vmlinux = self.boot_dir + '/vmlinux-' + tag
- if (self.build_image != 'vmlinux'):
- self.image = self.boot_dir + '/vmlinuz-' + tag
- else:
- self.image = self.vmlinux
- self.system_map = self.boot_dir + '/System.map-' + tag
- self.config = self.boot_dir + '/config-' + tag
- self.initrd = ''
-
- # copy to boot dir
- autotest_utils.force_copy('vmlinux', self.vmlinux)
- if (self.build_image != 'vmlinux'):
- force_copy(self.build_image, self.image)
- autotest_utils.force_copy('System.map', self.system_map)
- autotest_utils.force_copy('.config', self.config)
-
- if not kernel_config.modules_needed('.config'):
- return
-
- utils.system('make modules_install INSTALL_MOD_PATH=%s' % prefix)
- if prefix == '/':
- self.initrd = self.boot_dir + '/initrd-' + tag
- self.mkinitrd(self.get_kernel_build_ver(), self.image,
- self.system_map, self.initrd)
-
-
- def add_to_bootloader(self, tag='autotest', args=''):
- """ add this kernel to bootloader, taking an
- optional parameter of space separated parameters
- e.g.: kernel.add_to_bootloader('mykernel', 'ro acpi=off')
- """
-
- # remove existing entry if present
- self.job.bootloader.remove_kernel(tag)
-
- # pull the base argument set from the job config,
- baseargs = self.job.config_get('boot.default_args')
- if baseargs:
- args = baseargs + " " + args
-
- # otherwise populate from /proc/cmdline
- # if not baseargs:
- # baseargs = open('/proc/cmdline', 'r').readline().strip()
- # NOTE: This is unnecessary, because boottool does it.
-
- root = None
- roots = [x for x in args.split() if x.startswith('root=')]
- if roots:
- root = re.sub('^root=', '', roots[0])
- arglist = [x for x in args.split() if not x.startswith('root=')]
- args = ' '.join(arglist)
-
- # add the kernel entry
- # add_kernel(image, title='autotest', initrd='')
- self.job.bootloader.add_kernel(self.image, tag, self.initrd, \
- args = args, root = root)
-
-
- def get_kernel_build_arch(self, arch=None):
- """
- Work out the current kernel architecture (as a kernel arch)
- """
- if not arch:
- arch = autotest_utils.get_current_kernel_arch()
- if re.match('i.86', arch):
- return 'i386'
- elif re.match('sun4u', arch):
- return 'sparc64'
- elif re.match('arm.*', arch):
- return 'arm'
- elif re.match('sa110', arch):
- return 'arm'
- elif re.match('s390x', arch):
- return 's390'
- elif re.match('parisc64', arch):
- return 'parisc'
- elif re.match('ppc.*', arch):
- return 'powerpc'
- elif re.match('mips.*', arch):
- return 'mips'
- else:
- return arch
-
-
- def get_kernel_build_release(self):
- releasem = re.compile(r'.*UTS_RELEASE\s+"([^"]+)".*');
- versionm = re.compile(r'.*UTS_VERSION\s+"([^"]+)".*');
-
- release = None
- version = None
-
- for file in [ self.build_dir + "/include/linux/version.h",
- self.build_dir + "/include/linux/utsrelease.h",
- self.build_dir + "/include/linux/compile.h" ]:
- if os.path.exists(file):
- fd = open(file, 'r')
- for line in fd.readlines():
- m = releasem.match(line)
- if m:
- release = m.groups()[0]
- m = versionm.match(line)
- if m:
- version = m.groups()[0]
- fd.close()
-
- return (release, version)
-
-
- def get_kernel_build_ident(self):
- (release, version) = self.get_kernel_build_release()
-
- if not release or not version:
- raise error.JobError('kernel has no identity')
-
- return release + '::' + version
-
-
- def boot(self, args='', ident=1):
- """ install and boot this kernel, do not care how
- just make it happen.
- """
-
- # If we can check the kernel identity do so.
- if ident:
- when = int(time.time())
- ident = self.get_kernel_build_ident()
- args += " IDENT=%d" % (when)
-
- self.job.next_step_prepend(["job.kernel_check_ident",
- when, ident, self.subdir,
- self.applied_patches])
-
- # Check if the kernel has been installed, if not install
- # as the default tag and boot that.
- if not self.installed_as:
- self.install()
-
- # Boot the selected tag.
- self.add_to_bootloader(args=args, tag=self.installed_as)
-
- # Boot it.
- self.job.reboot(tag=self.installed_as)
-
-
- def get_kernel_build_ver(self):
- """Check Makefile and .config to return kernel version"""
- version = patchlevel = sublevel = extraversion = localversion = ''
-
- for line in open(self.build_dir + '/Makefile', 'r').readlines():
- if line.startswith('VERSION'):
- version = line[line.index('=') + 1:].strip()
- if line.startswith('PATCHLEVEL'):
- patchlevel = line[line.index('=') + 1:].strip()
- if line.startswith('SUBLEVEL'):
- sublevel = line[line.index('=') + 1:].strip()
- if line.startswith('EXTRAVERSION'):
- extraversion = line[line.index('=') + 1:].strip()
-
- for line in open(self.build_dir + '/.config', 'r').readlines():
- if line.startswith('CONFIG_LOCALVERSION='):
- localversion = line.rstrip().split('"')[1]
-
- return "%s.%s.%s%s%s" %(version, patchlevel, sublevel, extraversion, localversion)
-
-
- def set_build_target(self, build_target):
- if build_target:
- self.build_target = build_target
- print 'BUILD TARGET: %s' % self.build_target
-
-
- def set_cross_cc(self, target_arch=None, cross_compile=None,
- build_target='bzImage'):
- """Set up to cross-compile.
- This is broken. We need to work out what the default
- compile produces, and if not, THEN set the cross
- compiler.
- """
+ """ Class for compiling kernels.
+
+ Data for the object includes the src files
+ used to create the kernel, patches applied, config (base + changes),
+ the build directory itself, and logged output
+
+ Properties:
+ job
+ Backpointer to the job object we're part of
+ autodir
+ Path to the top level autotest dir (/usr/local/autotest)
+ src_dir
+ <tmp_dir>/src/
+ build_dir
+ <tmp_dir>/linux/
+ config_dir
+ <results_dir>/config/
+ log_dir
+ <results_dir>/debug/
+ results_dir
+ <results_dir>/results/
+ """
+
+ autodir = ''
+
+ def __init__(self, job, base_tree, subdir, tmp_dir, build_dir, leave = False):
+ """Initialize the kernel build environment
+
+ job
+ which job this build is part of
+ base_tree
+ base kernel tree. Can be one of the following:
+ 1. A local tarball
+ 2. A URL to a tarball
+ 3. A local directory (will symlink it)
+ 4. A shorthand expandable (eg '2.6.11-git3')
+ subdir
+ subdir in the results directory (eg "build")
+ (holds config/, debug/, results/)
+ tmp_dir
+
+ leave
+ Boolean, whether to leave existing tmpdir or not
+ """
+ self.job = job
+ self.autodir = job.autodir
+
+ self.src_dir = os.path.join(tmp_dir, 'src')
+ self.build_dir = os.path.join(tmp_dir, build_dir)
+ # created by get_kernel_tree
+ self.config_dir = os.path.join(subdir, 'config')
+ self.log_dir = os.path.join(subdir, 'debug')
+ self.results_dir = os.path.join(subdir, 'results')
+ self.subdir = os.path.basename(subdir)
+
+ self.installed_as = None
+
+ if not leave:
+ if os.path.isdir(self.src_dir):
+ utils.system('rm -rf ' + self.src_dir)
+ if os.path.isdir(self.build_dir):
+ utils.system('rm -rf ' + self.build_dir)
+
+ if not os.path.exists(self.src_dir):
+ os.mkdir(self.src_dir)
+ for path in [self.config_dir, self.log_dir, self.results_dir]:
+ if os.path.exists(path):
+ utils.system('rm -rf ' + path)
+ os.mkdir(path)
+
+ logpath = os.path.join(self.log_dir, 'build_log')
+ self.logfile = open(logpath, 'w+')
+ self.applied_patches = []
+
+ self.target_arch = None
+ self.build_target = 'bzImage'
+ self.build_image = None
+
+ arch = autotest_utils.get_current_kernel_arch()
+ if arch == 's390' or arch == 's390x':
+ self.build_target = 'image'
+ elif arch == 'ia64':
+ self.build_target = 'all'
+ self.build_image = 'vmlinux.gz'
+
+ if leave:
+ return
+
+ self.logfile.write('BASE: %s\n' % base_tree)
+
+ # Where we have direct version hint record that
+ # for later configuration selection.
+ shorthand = re.compile(r'^\d+\.\d+\.\d+')
+ if shorthand.match(base_tree):
+ self.base_tree_version = base_tree
+ else:
+ self.base_tree_version = None
+
+ # Actually extract the tree. Make sure we know it occured
+ self.extract(base_tree)
+
+
+ def kernelexpand(self, kernel):
+ # If we have something like a path, just use it as it is
+ if '/' in kernel:
+ return [kernel]
+
+ # Find the configured mirror list.
+ mirrors = self.job.config_get('mirror.mirrors')
+ if not mirrors:
+ # LEGACY: convert the kernel.org mirror
+ mirror = self.job.config_get('mirror.ftp_kernel_org')
+ if mirror:
+ korg = 'http://www.kernel.org/pub/linux/kernel'
+ mirrors = [
+ [ korg + '/v2.6', mirror + '/v2.6' ],
+ [ korg + '/people/akpm/patches/2.6',
+ mirror + '/akpm' ],
+ [ korg + '/people/mbligh',
+ mirror + '/mbligh' ],
+ ]
+
+ patches = kernelexpand.expand_classic(kernel, mirrors)
+ print patches
+
+ return patches
+
+
+ @logging.record
+ @tee_output_logdir_mark
+ def extract(self, base_tree):
+ if os.path.exists(base_tree):
+ self.get_kernel_tree(base_tree)
+ else:
+ base_components = self.kernelexpand(base_tree)
+ print 'kernelexpand: '
+ print base_components
+ self.get_kernel_tree(base_components.pop(0))
+ if base_components: # apply remaining patches
+ self.patch(*base_components)
+
+
+ @logging.record
+ @tee_output_logdir_mark
+ def patch(self, *patches):
+ """Apply a list of patches (in order)"""
+ if not patches:
+ return
+ print 'Applying patches: ', patches
+ self.apply_patches(self.get_patches(patches))
+
+
+ @logging.record
+ @tee_output_logdir_mark
+ def config(self, config_file = '', config_list = None, defconfig = False):
+ self.set_cross_cc()
+ config = kernel_config.kernel_config(self.job, self.build_dir,
+ self.config_dir, config_file, config_list,
+ defconfig, self.base_tree_version)
+
+
+ def get_patches(self, patches):
+ """fetch the patches to the local src_dir"""
+ local_patches = []
+ for patch in patches:
+ dest = os.path.join(self.src_dir, basename(patch))
+ # FIXME: this isn't unique. Append something to it
+ # like wget does if it's not there?
+ print "get_file %s %s %s %s" % (patch, dest, self.src_dir, basename(patch))
+ utils.get_file(patch, dest)
+ # probably safer to use the command, not python library
+ md5sum = utils.system_output('md5sum ' + dest).split()[0]
+ local_patches.append((patch, dest, md5sum))
+ return local_patches
+
+
+ def apply_patches(self, local_patches):
+ """apply the list of patches, in order"""
+ builddir = self.build_dir
+ os.chdir(builddir)
+
+ if not local_patches:
+ return None
+ for (spec, local, md5sum) in local_patches:
+ if local.endswith('.bz2') or local.endswith('.gz'):
+ ref = spec
+ else:
+ ref = force_copy(local, self.results_dir)
+ ref = self.job.relative_path(ref)
+ patch_id = "%s %s %s" % (spec, ref, md5sum)
+ log = "PATCH: " + patch_id + "\n"
+ print log
+ cat_file_to_cmd(local, 'patch -p1 > /dev/null')
+ self.logfile.write(log)
+ self.applied_patches.append(patch_id)
+
+
+ def get_kernel_tree(self, base_tree):
+ """Extract/link base_tree to self.build_dir"""
+
+ # if base_tree is a dir, assume uncompressed kernel
+ if os.path.isdir(base_tree):
+ print 'Symlinking existing kernel source'
+ os.symlink(base_tree, self.build_dir)
+
+ # otherwise, extract tarball
+ else:
+ os.chdir(os.path.dirname(self.src_dir))
+ # Figure out local destination for tarball
+ tarball = os.path.join(self.src_dir, os.path.basename(base_tree))
+ utils.get_file(base_tree, tarball)
+ print 'Extracting kernel tarball:', tarball, '...'
+ autotest_utils.extract_tarball_to_dir(tarball,
+ self.build_dir)
+
+
+ def extraversion(self, tag, append=1):
+ os.chdir(self.build_dir)
+ extraversion_sub = r's/^EXTRAVERSION =\s*\(.*\)/EXTRAVERSION = '
+ if append:
+ p = extraversion_sub + '\\1-%s/' % tag
+ else:
+ p = extraversion_sub + '-%s/' % tag
+ utils.system('mv Makefile Makefile.old')
+ utils.system('sed "%s" < Makefile.old > Makefile' % p)
+
+
+ @logging.record
+ @tee_output_logdir_mark
+ def build(self, make_opts = '', logfile = '', extraversion='autotest'):
+ """build the kernel
+
+ make_opts
+ additional options to make, if any
+ """
+ os_dep.commands('gcc', 'make')
+ if logfile == '':
+ logfile = os.path.join(self.log_dir, 'kernel_build')
+ os.chdir(self.build_dir)
+ if extraversion:
+ self.extraversion(extraversion)
+ self.set_cross_cc()
+ # setup_config_file(config_file, config_overrides)
+
+ # Not needed on 2.6, but hard to tell -- handle failure
+ utils.system('make dep', ignore_status=True)
+ threads = 2 * autotest_utils.count_cpus()
+ build_string = 'make -j %d %s %s' % (threads, make_opts,
+ self.build_target)
+ # eg make bzImage, or make zImage
+ print build_string
+ system(build_string)
+ if kernel_config.modules_needed('.config'):
+ utils.system('make -j %d modules' % (threads))
+
+ kernel_version = self.get_kernel_build_ver()
+ kernel_version = re.sub('-autotest', '', kernel_version)
+ self.logfile.write('BUILD VERSION: %s\n' % kernel_version)
+
+ force_copy(self.build_dir+'/System.map', self.results_dir)
+
+
+ def build_timed(self, threads, timefile = '/dev/null', make_opts = '',
+ output = '/dev/null'):
+ """time the bulding of the kernel"""
+ os.chdir(self.build_dir)
+ self.set_cross_cc()
+
+ self.clean(logged=False)
+ build_string = "/usr/bin/time -o %s make %s -j %s vmlinux" \
+ % (timefile, make_opts, threads)
+ build_string += ' > %s 2>&1' % output
+ print build_string
+ utils.system(build_string)
+
+ if (not os.path.isfile('vmlinux')):
+ errmsg = "no vmlinux found, kernel build failed"
+ raise error.TestError(errmsg)
+
+
+ @logging.record
+ @tee_output_logdir_mark
+ def clean(self):
+ """make clean in the kernel tree"""
+ os.chdir(self.build_dir)
+ print "make clean"
+ utils.system('make clean > /dev/null 2> /dev/null')
+
+
+ @logging.record
+ @tee_output_logdir_mark
+ def mkinitrd(self, version, image, system_map, initrd):
+ """Build kernel initrd image.
+ Try to use distro specific way to build initrd image.
+ Parameters:
+ version
+ new kernel version
+ image
+ new kernel image file
+ system_map
+ System.map file
+ initrd
+ initrd image file to build
+ """
+ vendor = autotest_utils.get_os_vendor()
+
+ if os.path.isfile(initrd):
+ print "Existing %s file, will remove it." % initrd
+ os.remove(initrd)
+
+ args = self.job.config_get('kernel.mkinitrd_extra_args')
+
+ # don't leak 'None' into mkinitrd command
+ if not args:
+ args = ''
+
+ if vendor in ['Red Hat', 'Fedora Core']:
+ utils.system('mkinitrd %s %s %s' % (args, initrd, version))
+ elif vendor in ['SUSE']:
+ utils.system('mkinitrd %s -k %s -i %s -M %s' % (args, image, initrd, system_map))
+ elif vendor in ['Debian', 'Ubuntu']:
+ if os.path.isfile('/usr/sbin/mkinitrd'):
+ cmd = '/usr/sbin/mkinitrd'
+ elif os.path.isfile('/usr/sbin/mkinitramfs'):
+ cmd = '/usr/sbin/mkinitramfs'
+ else:
+ raise error.TestError('No Debian initrd builder')
+ utils.system('%s %s -o %s %s' % (cmd, args, initrd, version))
+ else:
+ raise error.TestError('Unsupported vendor %s' % vendor)
+
+
+ def set_build_image(self, image):
+ self.build_image = image
+
+
+ @logging.record
+ @tee_output_logdir_mark
+ def install(self, tag='autotest', prefix = '/'):
+ """make install in the kernel tree"""
+
+ # Record that we have installed the kernel, and
+ # the tag under which we installed it.
+ self.installed_as = tag
+
+ os.chdir(self.build_dir)
+
+ if not os.path.isdir(prefix):
+ os.mkdir(prefix)
+ self.boot_dir = os.path.join(prefix, 'boot')
+ if not os.path.isdir(self.boot_dir):
+ os.mkdir(self.boot_dir)
+
+ if not self.build_image:
+ images = glob.glob('arch/*/boot/' + self.build_target)
+ if len(images):
+ self.build_image = images[0]
+ else:
+ self.build_image = self.build_target
+
+ # remember installed files
+ self.vmlinux = self.boot_dir + '/vmlinux-' + tag
+ if (self.build_image != 'vmlinux'):
+ self.image = self.boot_dir + '/vmlinuz-' + tag
+ else:
+ self.image = self.vmlinux
+ self.system_map = self.boot_dir + '/System.map-' + tag
+ self.config = self.boot_dir + '/config-' + tag
+ self.initrd = ''
+
+ # copy to boot dir
+ autotest_utils.force_copy('vmlinux', self.vmlinux)
+ if (self.build_image != 'vmlinux'):
+ force_copy(self.build_image, self.image)
+ autotest_utils.force_copy('System.map', self.system_map)
+ autotest_utils.force_copy('.config', self.config)
+
+ if not kernel_config.modules_needed('.config'):
+ return
+
+ utils.system('make modules_install INSTALL_MOD_PATH=%s' % prefix)
+ if prefix == '/':
+ self.initrd = self.boot_dir + '/initrd-' + tag
+ self.mkinitrd(self.get_kernel_build_ver(), self.image,
+ self.system_map, self.initrd)
+
+
+ def add_to_bootloader(self, tag='autotest', args=''):
+ """ add this kernel to bootloader, taking an
+ optional parameter of space separated parameters
+ e.g.: kernel.add_to_bootloader('mykernel', 'ro acpi=off')
+ """
+
+ # remove existing entry if present
+ self.job.bootloader.remove_kernel(tag)
+
+ # pull the base argument set from the job config,
+ baseargs = self.job.config_get('boot.default_args')
+ if baseargs:
+ args = baseargs + " " + args
+
+ # otherwise populate from /proc/cmdline
+ # if not baseargs:
+ # baseargs = open('/proc/cmdline', 'r').readline().strip()
+ # NOTE: This is unnecessary, because boottool does it.
+
+ root = None
+ roots = [x for x in args.split() if x.startswith('root=')]
+ if roots:
+ root = re.sub('^root=', '', roots[0])
+ arglist = [x for x in args.split() if not x.startswith('root=')]
+ args = ' '.join(arglist)
+
+ # add the kernel entry
+ # add_kernel(image, title='autotest', initrd='')
+ self.job.bootloader.add_kernel(self.image, tag, self.initrd, \
+ args = args, root = root)
+
+
+ def get_kernel_build_arch(self, arch=None):
+ """
+ Work out the current kernel architecture (as a kernel arch)
+ """
+ if not arch:
+ arch = autotest_utils.get_current_kernel_arch()
+ if re.match('i.86', arch):
+ return 'i386'
+ elif re.match('sun4u', arch):
+ return 'sparc64'
+ elif re.match('arm.*', arch):
+ return 'arm'
+ elif re.match('sa110', arch):
+ return 'arm'
+ elif re.match('s390x', arch):
+ return 's390'
+ elif re.match('parisc64', arch):
+ return 'parisc'
+ elif re.match('ppc.*', arch):
+ return 'powerpc'
+ elif re.match('mips.*', arch):
+ return 'mips'
+ else:
+ return arch
+
+
+ def get_kernel_build_release(self):
+ releasem = re.compile(r'.*UTS_RELEASE\s+"([^"]+)".*');
+ versionm = re.compile(r'.*UTS_VERSION\s+"([^"]+)".*');
+
+ release = None
+ version = None
+
+ for file in [ self.build_dir + "/include/linux/version.h",
+ self.build_dir + "/include/linux/utsrelease.h",
+ self.build_dir + "/include/linux/compile.h" ]:
+ if os.path.exists(file):
+ fd = open(file, 'r')
+ for line in fd.readlines():
+ m = releasem.match(line)
+ if m:
+ release = m.groups()[0]
+ m = versionm.match(line)
+ if m:
+ version = m.groups()[0]
+ fd.close()
+
+ return (release, version)
+
+
+ def get_kernel_build_ident(self):
+ (release, version) = self.get_kernel_build_release()
+
+ if not release or not version:
+ raise error.JobError('kernel has no identity')
+
+ return release + '::' + version
+
+
+ def boot(self, args='', ident=1):
+ """ install and boot this kernel, do not care how
+ just make it happen.
+ """
+
+ # If we can check the kernel identity do so.
+ if ident:
+ when = int(time.time())
+ ident = self.get_kernel_build_ident()
+ args += " IDENT=%d" % (when)
+
+ self.job.next_step_prepend(["job.kernel_check_ident",
+ when, ident, self.subdir,
+ self.applied_patches])
+
+ # Check if the kernel has been installed, if not install
+ # as the default tag and boot that.
+ if not self.installed_as:
+ self.install()
+
+ # Boot the selected tag.
+ self.add_to_bootloader(args=args, tag=self.installed_as)
+
+ # Boot it.
+ self.job.reboot(tag=self.installed_as)
+
+
+ def get_kernel_build_ver(self):
+ """Check Makefile and .config to return kernel version"""
+ version = patchlevel = sublevel = extraversion = localversion = ''
+
+ for line in open(self.build_dir + '/Makefile', 'r').readlines():
+ if line.startswith('VERSION'):
+ version = line[line.index('=') + 1:].strip()
+ if line.startswith('PATCHLEVEL'):
+ patchlevel = line[line.index('=') + 1:].strip()
+ if line.startswith('SUBLEVEL'):
+ sublevel = line[line.index('=') + 1:].strip()
+ if line.startswith('EXTRAVERSION'):
+ extraversion = line[line.index('=') + 1:].strip()
+
+ for line in open(self.build_dir + '/.config', 'r').readlines():
+ if line.startswith('CONFIG_LOCALVERSION='):
+ localversion = line.rstrip().split('"')[1]
+
+ return "%s.%s.%s%s%s" %(version, patchlevel, sublevel, extraversion, localversion)
+
+
+ def set_build_target(self, build_target):
+ if build_target:
+ self.build_target = build_target
+ print 'BUILD TARGET: %s' % self.build_target
- if self.target_arch:
- return
- # if someone has set build_target, don't clobber in set_cross_cc
- # run set_build_target before calling set_cross_cc
- if not self.build_target:
- self.set_build_target(build_target)
+ def set_cross_cc(self, target_arch=None, cross_compile=None,
+ build_target='bzImage'):
+ """Set up to cross-compile.
+ This is broken. We need to work out what the default
+ compile produces, and if not, THEN set the cross
+ compiler.
+ """
- # If no 'target_arch' given assume native compilation
- if target_arch == None:
- target_arch = autotest_utils.get_current_kernel_arch()
- if target_arch == 'ppc64':
- if self.build_target == 'bzImage':
- self.build_target = 'vmlinux'
+ if self.target_arch:
+ return
- if not cross_compile:
- cross_compile = self.job.config_get('kernel.cross_cc')
+ # if someone has set build_target, don't clobber in set_cross_cc
+ # run set_build_target before calling set_cross_cc
+ if not self.build_target:
+ self.set_build_target(build_target)
- if cross_compile:
- os.environ['CROSS_COMPILE'] = cross_compile
- else:
- if os.environ.has_key('CROSS_COMPILE'):
- del os.environ['CROSS_COMPILE']
-
- return # HACK. Crap out for now.
+ # If no 'target_arch' given assume native compilation
+ if target_arch == None:
+ target_arch = autotest_utils.get_current_kernel_arch()
+ if target_arch == 'ppc64':
+ if self.build_target == 'bzImage':
+ self.build_target = 'vmlinux'
- # At this point I know what arch I *want* to build for
- # but have no way of working out what arch the default
- # compiler DOES build for.
+ if not cross_compile:
+ cross_compile = self.job.config_get('kernel.cross_cc')
- # Oh, and BTW, install_package() doesn't exist yet.
+ if cross_compile:
+ os.environ['CROSS_COMPILE'] = cross_compile
+ else:
+ if os.environ.has_key('CROSS_COMPILE'):
+ del os.environ['CROSS_COMPILE']
- if target_arch == 'ppc64':
- install_package('ppc64-cross')
- cross_compile = os.path.join(self.autodir, 'sources/ppc64-cross/bin')
+ return # HACK. Crap out for now.
- elif target_arch == 'x86_64':
- install_package('x86_64-cross')
- cross_compile = os.path.join(self.autodir, 'sources/x86_64-cross/bin')
+ # At this point I know what arch I *want* to build for
+ # but have no way of working out what arch the default
+ # compiler DOES build for.
- os.environ['ARCH'] = self.target_arch = target_arch
+ # Oh, and BTW, install_package() doesn't exist yet.
- self.cross_compile = cross_compile
- if self.cross_compile:
- os.environ['CROSS_COMPILE'] = self.cross_compile
+ if target_arch == 'ppc64':
+ install_package('ppc64-cross')
+ cross_compile = os.path.join(self.autodir, 'sources/ppc64-cross/bin')
+ elif target_arch == 'x86_64':
+ install_package('x86_64-cross')
+ cross_compile = os.path.join(self.autodir, 'sources/x86_64-cross/bin')
- def pickle_dump(self, filename):
- """dump a pickle of ourself out to the specified filename
+ os.environ['ARCH'] = self.target_arch = target_arch
- we can't pickle the backreference to job (it contains fd's),
- nor would we want to. Same for logfile (fd's).
- """
- temp = copy.copy(self)
- temp.job = None
- temp.logfile = None
- pickle.dump(temp, open(filename, 'w'))
+ self.cross_compile = cross_compile
+ if self.cross_compile:
+ os.environ['CROSS_COMPILE'] = self.cross_compile
+
+
+ def pickle_dump(self, filename):
+ """dump a pickle of ourself out to the specified filename
+
+ we can't pickle the backreference to job (it contains fd's),
+ nor would we want to. Same for logfile (fd's).
+ """
+ temp = copy.copy(self)
+ temp.job = None
+ temp.logfile = None
+ pickle.dump(temp, open(filename, 'w'))
class rpm_kernel:
- """ Class for installing rpm kernel package
- """
-
- def __init__(self, job, rpm_package, subdir):
- self.job = job
- self.rpm_package = rpm_package
- self.log_dir = os.path.join(subdir, 'debug')
- self.subdir = os.path.basename(subdir)
- if os.path.exists(self.log_dir):
- utils.system('rm -rf ' + self.log_dir)
- os.mkdir(self.log_dir)
- self.installed_as = None
-
-
- @logging.record
- @tee_output_logdir_mark
- def install(self, tag='autotest'):
- self.installed_as = tag
-
- self.rpm_name = utils.system_output('rpm -qp ' + self.rpm_package)
-
- # install
- utils.system('rpm -i --force ' + self.rpm_package)
-
- # get file list
- files = utils.system_output('rpm -ql ' + self.rpm_name).splitlines()
-
- # search for vmlinuz
- for file in files:
- if file.startswith('/boot/vmlinuz'):
- self.image = file
- break
- else:
- errmsg = "%s doesn't contain /boot/vmlinuz"
- errmsg %= self.rpm_package
- raise error.TestError(errmsg)
-
- # search for initrd
- self.initrd = ''
- for file in files:
- if file.startswith('/boot/initrd'):
- self.initrd = file
- break
-
- # get version and release number
- self.version, self.release = utils.system_output(
- 'rpm --queryformat="%{VERSION}\\n%{RELEASE}\\n" -q ' + self.rpm_name).splitlines()[0:2]
-
-
- def add_to_bootloader(self, tag='autotest', args=''):
- """ Add this kernel to bootloader
- """
-
- # remove existing entry if present
- self.job.bootloader.remove_kernel(tag)
-
- # pull the base argument set from the job config
- baseargs = self.job.config_get('boot.default_args')
- if baseargs:
- args = baseargs + ' ' + args
-
- # otherwise populate from /proc/cmdline
- # if not baseargs:
- # baseargs = open('/proc/cmdline', 'r').readline().strip()
- # NOTE: This is unnecessary, because boottool does it.
-
- root = None
- roots = [x for x in args.split() if x.startswith('root=')]
- if roots:
- root = re.sub('^root=', '', roots[0])
- arglist = [x for x in args.split() if not x.startswith('root=')]
- args = ' '.join(arglist)
-
- # add the kernel entry
- self.job.bootloader.add_kernel(self.image, tag, self.initrd, args = args, root = root)
-
-
- def boot(self, args='', ident=1):
- """ install and boot this kernel
- """
-
- # Check if the kernel has been installed, if not install
- # as the default tag and boot that.
- if not self.installed_as:
- self.install()
-
- # If we can check the kernel identity do so.
- if ident:
- when = int(time.time())
- ident = '-'.join([self.version,
- self.rpm_name.split('-')[1],
- self.release])
- args += " IDENT=%d" % (when)
-
- self.job.next_step_prepend(["job.kernel_check_ident",
- when, ident, self.subdir, 'rpm'])
-
- # Boot the selected tag.
- self.add_to_bootloader(args=args, tag=self.installed_as)
-
- # Boot it.
- self.job.reboot(tag=self.installed_as)
+ """ Class for installing rpm kernel package
+ """
+
+ def __init__(self, job, rpm_package, subdir):
+ self.job = job
+ self.rpm_package = rpm_package
+ self.log_dir = os.path.join(subdir, 'debug')
+ self.subdir = os.path.basename(subdir)
+ if os.path.exists(self.log_dir):
+ utils.system('rm -rf ' + self.log_dir)
+ os.mkdir(self.log_dir)
+ self.installed_as = None
+
+
+ @logging.record
+ @tee_output_logdir_mark
+ def install(self, tag='autotest'):
+ self.installed_as = tag
+
+ self.rpm_name = utils.system_output('rpm -qp ' + self.rpm_package)
+
+ # install
+ utils.system('rpm -i --force ' + self.rpm_package)
+
+ # get file list
+ files = utils.system_output('rpm -ql ' + self.rpm_name).splitlines()
+
+ # search for vmlinuz
+ for file in files:
+ if file.startswith('/boot/vmlinuz'):
+ self.image = file
+ break
+ else:
+ errmsg = "%s doesn't contain /boot/vmlinuz"
+ errmsg %= self.rpm_package
+ raise error.TestError(errmsg)
+
+ # search for initrd
+ self.initrd = ''
+ for file in files:
+ if file.startswith('/boot/initrd'):
+ self.initrd = file
+ break
+
+ # get version and release number
+ self.version, self.release = utils.system_output(
+ 'rpm --queryformat="%{VERSION}\\n%{RELEASE}\\n" -q ' + self.rpm_name).splitlines()[0:2]
+
+
+ def add_to_bootloader(self, tag='autotest', args=''):
+ """ Add this kernel to bootloader
+ """
+
+ # remove existing entry if present
+ self.job.bootloader.remove_kernel(tag)
+
+ # pull the base argument set from the job config
+ baseargs = self.job.config_get('boot.default_args')
+ if baseargs:
+ args = baseargs + ' ' + args
+
+ # otherwise populate from /proc/cmdline
+ # if not baseargs:
+ # baseargs = open('/proc/cmdline', 'r').readline().strip()
+ # NOTE: This is unnecessary, because boottool does it.
+
+ root = None
+ roots = [x for x in args.split() if x.startswith('root=')]
+ if roots:
+ root = re.sub('^root=', '', roots[0])
+ arglist = [x for x in args.split() if not x.startswith('root=')]
+ args = ' '.join(arglist)
+
+ # add the kernel entry
+ self.job.bootloader.add_kernel(self.image, tag, self.initrd, args = args, root = root)
+
+
+ def boot(self, args='', ident=1):
+ """ install and boot this kernel
+ """
+
+ # Check if the kernel has been installed, if not install
+ # as the default tag and boot that.
+ if not self.installed_as:
+ self.install()
+
+ # If we can check the kernel identity do so.
+ if ident:
+ when = int(time.time())
+ ident = '-'.join([self.version,
+ self.rpm_name.split('-')[1],
+ self.release])
+ args += " IDENT=%d" % (when)
+
+ self.job.next_step_prepend(["job.kernel_check_ident",
+ when, ident, self.subdir, 'rpm'])
+
+ # Boot the selected tag.
+ self.add_to_bootloader(args=args, tag=self.installed_as)
+
+ # Boot it.
+ self.job.reboot(tag=self.installed_as)
# pull in some optional site-specific path pre-processing
try:
- import site_kernel
- preprocess_path = site_kernel.preprocess_path
- del site_kernel
+ import site_kernel
+ preprocess_path = site_kernel.preprocess_path
+ del site_kernel
except ImportError:
- # just make the preprocessor a nop
- def preprocess_path(path):
- return path
+ # just make the preprocessor a nop
+ def preprocess_path(path):
+ return path
def auto_kernel(job, path, subdir, tmp_dir, build_dir, leave=False):
- """\
- Create a kernel object, dynamically selecting the appropriate class to use
- based on the path provided.
- """
- path = preprocess_path(path)
- if path.endswith('.rpm'):
- return rpm_kernel(job, path, subdir)
- else:
- return kernel(job, path, subdir, tmp_dir, build_dir, leave)
+ """\
+ Create a kernel object, dynamically selecting the appropriate class to use
+ based on the path provided.
+ """
+ path = preprocess_path(path)
+ if path.endswith('.rpm'):
+ return rpm_kernel(job, path, subdir)
+ else:
+ return kernel(job, path, subdir, tmp_dir, build_dir, leave)
diff --git a/client/bin/kernel_config.py b/client/bin/kernel_config.py
index ed6321ad..9b471b51 100755
--- a/client/bin/kernel_config.py
+++ b/client/bin/kernel_config.py
@@ -5,122 +5,122 @@ from autotest_lib.client.bin import autotest_utils, kernel_versions
from autotest_lib.client.common_lib import error, utils
def apply_overrides(orig_file, changes_file, output_file):
- override = dict()
-
- # First suck all the changes into a dictionary.
- input = file(changes_file, 'r')
- for line in input.readlines():
- if line.startswith('CONFIG_'):
- key = line.split('=')[0]
- override[key] = line;
- elif line.startswith('# CONFIG_'):
- key = line.split(' ')[1]
- override[key] = line;
- input.close()
-
- # Now go through the input file, overriding lines where need be
- input = file(orig_file, 'r')
- output = file(output_file, 'w')
- for line in input.readlines():
- if line.startswith('CONFIG_'):
- key = line.split('=')[0]
- elif line.startswith('# CONFIG_'):
- key = line.split(' ')[1]
- else:
- key = None
- if key and key in override:
- output.write(override[key])
- else:
- output.write(line)
- input.close()
- output.close()
+ override = dict()
+
+ # First suck all the changes into a dictionary.
+ input = file(changes_file, 'r')
+ for line in input.readlines():
+ if line.startswith('CONFIG_'):
+ key = line.split('=')[0]
+ override[key] = line;
+ elif line.startswith('# CONFIG_'):
+ key = line.split(' ')[1]
+ override[key] = line;
+ input.close()
+
+ # Now go through the input file, overriding lines where need be
+ input = file(orig_file, 'r')
+ output = file(output_file, 'w')
+ for line in input.readlines():
+ if line.startswith('CONFIG_'):
+ key = line.split('=')[0]
+ elif line.startswith('# CONFIG_'):
+ key = line.split(' ')[1]
+ else:
+ key = None
+ if key and key in override:
+ output.write(override[key])
+ else:
+ output.write(line)
+ input.close()
+ output.close()
def diff_configs(old, new):
- utils.system('diff -u %s %s > %s' % (old, new, new + '.diff'),
- ignore_status=True)
+ utils.system('diff -u %s %s > %s' % (old, new, new + '.diff'),
+ ignore_status=True)
def modules_needed(config):
- return (autotest_utils.grep('CONFIG_MODULES=y', config)
- and autotest_utils.grep('=m', config))
+ return (autotest_utils.grep('CONFIG_MODULES=y', config)
+ and autotest_utils.grep('=m', config))
def config_by_name(name, set):
- version = kernel_versions.version_choose_config(name, set[1:])
- if version:
- return set[0] + version
- return None
+ version = kernel_versions.version_choose_config(name, set[1:])
+ if version:
+ return set[0] + version
+ return None
class kernel_config:
- # Build directory must be ready before init'ing config.
- #
- # Stages:
- # 1. Get original config file
- # 2. Apply overrides
- # 3. Do 'make oldconfig' to update it to current source code
- # (gets done implicitly during the process)
- #
- # You may specifiy the a defconfig within the tree to build,
- # or a custom config file you want, or None, to get machine's
- # default config file from the repo.
-
- build_dir = '' # the directory we're building in
- config_dir = '' # local repository for config_file data
-
- build_config = '' # the config file in the build directory
- orig_config = '' # the original config file
- over_config = '' # config file + overrides
-
-
- def __init__(self, job, build_dir, config_dir, orig_file,
- overrides, defconfig = False, name = None):
- self.build_dir = build_dir
- self.config_dir = config_dir
-
- # 1. Get original config file
- self.build_config = build_dir + '/.config'
- if (orig_file == '' and not defconfig): # use user default
- set = job.config_get("kernel.default_config_set")
- defconf = None
- if set and name:
- defconf = config_by_name(name, set)
- if not defconf:
- defconf = job.config_get("kernel.default_config")
- if defconf:
- orig_file = defconf
- if (orig_file == '' or defconfig): # use defconfig
- print "kernel_config: using defconfig to configure kernel"
- os.chdir(build_dir)
- utils.system('make defconfig')
- else:
- print "kernel_config: using " + orig_file + \
- " to configure kernel"
- self.orig_config = config_dir + '/config.orig'
- utils.get_file(orig_file, self.orig_config)
- self.update_config(self.orig_config, self.orig_config+'.new')
- diff_configs(self.orig_config, self.orig_config+'.new')
-
-
- # 2. Apply overrides
- if overrides:
- print "kernel_config: using " + overrides + \
- " to re-configure kernel"
- self.over_config = config_dir + '/config.over'
- overrides_local = self.over_config + '.changes'
- get_file(overrides, overrides_local)
- apply_overrides(self.build_config, overrides_local, self.over_config)
- self.update_config(self.over_config, self.over_config+'.new')
- diff_configs(self.over_config, self.over_config+'.new')
- else:
- self.over_config = self.orig_config
-
-
- def update_config(self, old_config, new_config = 'None'):
- os.chdir(self.build_dir)
- shutil.copyfile(old_config, self.build_config)
- utils.system('yes "" | make oldconfig > /dev/null')
- if new_config:
- shutil.copyfile(self.build_config, new_config)
+ # Build directory must be ready before init'ing config.
+ #
+ # Stages:
+ # 1. Get original config file
+ # 2. Apply overrides
+ # 3. Do 'make oldconfig' to update it to current source code
+ # (gets done implicitly during the process)
+ #
+ # You may specifiy the a defconfig within the tree to build,
+ # or a custom config file you want, or None, to get machine's
+ # default config file from the repo.
+
+ build_dir = '' # the directory we're building in
+ config_dir = '' # local repository for config_file data
+
+ build_config = '' # the config file in the build directory
+ orig_config = '' # the original config file
+ over_config = '' # config file + overrides
+
+
+ def __init__(self, job, build_dir, config_dir, orig_file,
+ overrides, defconfig = False, name = None):
+ self.build_dir = build_dir
+ self.config_dir = config_dir
+
+ # 1. Get original config file
+ self.build_config = build_dir + '/.config'
+ if (orig_file == '' and not defconfig): # use user default
+ set = job.config_get("kernel.default_config_set")
+ defconf = None
+ if set and name:
+ defconf = config_by_name(name, set)
+ if not defconf:
+ defconf = job.config_get("kernel.default_config")
+ if defconf:
+ orig_file = defconf
+ if (orig_file == '' or defconfig): # use defconfig
+ print "kernel_config: using defconfig to configure kernel"
+ os.chdir(build_dir)
+ utils.system('make defconfig')
+ else:
+ print "kernel_config: using " + orig_file + \
+ " to configure kernel"
+ self.orig_config = config_dir + '/config.orig'
+ utils.get_file(orig_file, self.orig_config)
+ self.update_config(self.orig_config, self.orig_config+'.new')
+ diff_configs(self.orig_config, self.orig_config+'.new')
+
+
+ # 2. Apply overrides
+ if overrides:
+ print "kernel_config: using " + overrides + \
+ " to re-configure kernel"
+ self.over_config = config_dir + '/config.over'
+ overrides_local = self.over_config + '.changes'
+ get_file(overrides, overrides_local)
+ apply_overrides(self.build_config, overrides_local, self.over_config)
+ self.update_config(self.over_config, self.over_config+'.new')
+ diff_configs(self.over_config, self.over_config+'.new')
+ else:
+ self.over_config = self.orig_config
+
+
+ def update_config(self, old_config, new_config = 'None'):
+ os.chdir(self.build_dir)
+ shutil.copyfile(old_config, self.build_config)
+ utils.system('yes "" | make oldconfig > /dev/null')
+ if new_config:
+ shutil.copyfile(self.build_config, new_config)
diff --git a/client/bin/kernel_versions.py b/client/bin/kernel_versions.py
index 3af08fe7..9b04bebd 100644
--- a/client/bin/kernel_versions.py
+++ b/client/bin/kernel_versions.py
@@ -5,111 +5,109 @@ __author__ = """Copyright Andy Whitcroft 2007"""
import sys,re
-#
+#
# Sort key for ordering versions chronologically. The key ordering
# problem is between that introduced by -rcN. These come _before_
# their accompanying version.
-#
+#
# 2.6.0 -> 2.6.1-rc1 -> 2.6.1
-#
+#
# In order to sort them we convert all non-rc releases to a pseudo
# -rc99 release. We also convert all numbers to two digits. The
# result is then sortable textually.
-#
+#
# 02.06.00-rc99 -> 02.06.01-rc01 -> 02.06.01-rc99
-#
+#
encode_sep = re.compile(r'(\D+)')
def version_encode(version):
- bits = encode_sep.split(version)
- n = 9
- if len(bits[0]) == 0:
- n += 2
- if len(bits) == n or (len(bits) > n and bits[n] != '_rc'):
- # Insert missing _rc99 after 2 . 6 . 18 -smp- 220 . 0
- bits.insert(n, '_rc')
- bits.insert(n+1, '99')
- n = 5
- if len(bits[0]) == 0:
- n += 2
- if len(bits) <= n or bits[n] != '-rc':
- bits.insert(n, '-rc')
- bits.insert(n+1, '99')
- for n in range(0, len(bits), 2):
- if len(bits[n]) == 1:
- bits[n] = '0' + bits[n]
-
- return ''.join(bits)
+ bits = encode_sep.split(version)
+ n = 9
+ if len(bits[0]) == 0:
+ n += 2
+ if len(bits) == n or (len(bits) > n and bits[n] != '_rc'):
+ # Insert missing _rc99 after 2 . 6 . 18 -smp- 220 . 0
+ bits.insert(n, '_rc')
+ bits.insert(n+1, '99')
+ n = 5
+ if len(bits[0]) == 0:
+ n += 2
+ if len(bits) <= n or bits[n] != '-rc':
+ bits.insert(n, '-rc')
+ bits.insert(n+1, '99')
+ for n in range(0, len(bits), 2):
+ if len(bits[n]) == 1:
+ bits[n] = '0' + bits[n]
+
+ return ''.join(bits)
def version_limit(version, n):
- bits = encode_sep.split(version)
- return ''.join(bits[0:n])
+ bits = encode_sep.split(version)
+ return ''.join(bits[0:n])
def version_len(version):
- return len(encode_sep.split(version))
+ return len(encode_sep.split(version))
#
# Given a list of versions find the nearest version which is deemed
# less than or equal to the target. Versions are in linux order
# as follows:
-#
+#
# 2.6.0 -> 2.6.1 -> 2.6.2-rc1 -> 2.6.2-rc2 -> 2.6.2 -> 2.6.3-rc1
# | |\
# | | 2.6.2-rc1-mm1 -> 2.6.2-rc1-mm2
# | \
# | 2.6.2-rc1-ac1 -> 2.6.2-rc1-ac2
-# \
+# \
# 2.6.1-mm1 -> 2.6.1-mm2
-#
+#
# Note that a 2.6.1-mm1 is not a predecessor of 2.6.2-rc1-mm1.
#
def version_choose_config(version, candidates):
- # Check if we have an exact match ... if so magic
- if version in candidates:
- return version
-
- # Sort the search key into the list ordered by 'age'
- deco = [ (version_encode(v), i, v) for i, v in
- enumerate(candidates + [ version ]) ]
- deco.sort()
- versions = [ v for _, _, v in deco ]
-
- # Everything sorted below us is of interst.
- for n in range(len(versions) - 1, -1, -1):
- if versions[n] == version:
- break
- n -= 1
-
- # Try ever shorter 'prefixes' 2.6.20-rc3-mm, 2.6.20-rc, 2.6. etc
- # to match against the ordered list newest to oldest.
- length = version_len(version) - 1
- version = version_limit(version, length)
- while length > 1:
- for o in range(n, -1, -1):
- if version_len(versions[o]) == (length + 1) and \
- version_limit(versions[o], length) == version:
- return versions[o]
- length -= 2
- version = version_limit(version, length)
-
- return None
+ # Check if we have an exact match ... if so magic
+ if version in candidates:
+ return version
+
+ # Sort the search key into the list ordered by 'age'
+ deco = [ (version_encode(v), i, v) for i, v in
+ enumerate(candidates + [ version ]) ]
+ deco.sort()
+ versions = [ v for _, _, v in deco ]
+
+ # Everything sorted below us is of interst.
+ for n in range(len(versions) - 1, -1, -1):
+ if versions[n] == version:
+ break
+ n -= 1
+
+ # Try ever shorter 'prefixes' 2.6.20-rc3-mm, 2.6.20-rc, 2.6. etc
+ # to match against the ordered list newest to oldest.
+ length = version_len(version) - 1
+ version = version_limit(version, length)
+ while length > 1:
+ for o in range(n, -1, -1):
+ if version_len(versions[o]) == (length + 1) and \
+ version_limit(versions[o], length) == version:
+ return versions[o]
+ length -= 2
+ version = version_limit(version, length)
+
+ return None
def is_released_kernel(version):
- # True if version name suggests a released kernel,
- # not some release candidate or experimental kernel name
- # e.g. 2.6.18-smp-200.0 includes no other text, underscores, etc
- version = version.strip('01234567890.-')
- return version in ['', 'smp', 'smpx', 'pae']
+ # True if version name suggests a released kernel,
+ # not some release candidate or experimental kernel name
+ # e.g. 2.6.18-smp-200.0 includes no other text, underscores, etc
+ version = version.strip('01234567890.-')
+ return version in ['', 'smp', 'smpx', 'pae']
def is_release_candidate(version):
- # True if version names a released kernel or release candidate,
- # not some experimental name containing arbitrary text
- # e.g. 2.6.18-smp-220.0_rc3 but not 2.6.18_patched
- version = re.sub(r'[_-]rc\d+', '', version)
- return is_released_kernel(version)
-
-
+ # True if version names a released kernel or release candidate,
+ # not some experimental name containing arbitrary text
+ # e.g. 2.6.18-smp-220.0_rc3 but not 2.6.18_patched
+ version = re.sub(r'[_-]rc\d+', '', version)
+ return is_released_kernel(version)
diff --git a/client/bin/kernel_versions_unittest.py b/client/bin/kernel_versions_unittest.py
index fc21a653..b63ecc88 100755
--- a/client/bin/kernel_versions_unittest.py
+++ b/client/bin/kernel_versions_unittest.py
@@ -5,83 +5,83 @@ from kernel_versions import *
class kernel_versions_test(unittest.TestCase):
- def increases(self, kernels):
- for i in xrange(len(kernels)-1):
- k1 = kernels[i]
- k2 = kernels[i+1]
- ek1 = version_encode(k1)
- ek2 = version_encode(k2)
- self.assert_(ek1 < ek2,
- '%s (-> %s) should sort < %s (-> %s)'
- % (k1, ek1, k2, ek2) )
+ def increases(self, kernels):
+ for i in xrange(len(kernels)-1):
+ k1 = kernels[i]
+ k2 = kernels[i+1]
+ ek1 = version_encode(k1)
+ ek2 = version_encode(k2)
+ self.assert_(ek1 < ek2,
+ '%s (-> %s) should sort < %s (-> %s)'
+ % (k1, ek1, k2, ek2) )
- def test_version_encode(self):
- series1 = [
- '2.6',
- '2.6.0',
- '2.6.1-rc1',
- '2.6.1-rc1_fix',
- '2.6.1-rc1_patch',
- '2.6.1-rc9',
- '2.6.1-rc9-mm1',
- '2.6.1-rc9-mm2',
- '2.6.1-rc10',
- '2.6.1-rc98',
- '2.6.1',
- '2.6.1_patch',
- '2.6.9',
- '2.6.10',
- '2.6.99',
- '2.7',
- '2.9.99',
- '2.10.0',
- '99.99.99',
- 'UNKNOWN',
- ]
- self.increases(series1)
- self.increases(['pathX'+k for k in series1])
- series2 = [
- '2.6.18-smp-220',
- '2.6.18-smp-220.0',
- '2.6.18-smp-220.1_rc1',
- '2.6.18-smp-220.1_rc1_fix',
- '2.6.18-smp-220.1_rc1_patch',
- '2.6.18-smp-220.1_rc9',
- '2.6.18-smp-220.1_rc9_mm1',
- '2.6.18-smp-220.1_rc9_mm2',
- '2.6.18-smp-220.1_rc10',
- '2.6.18-smp-220.1_rc98',
- '2.6.18-smp-220.1',
- '2.6.18-smp-220.1_patch',
- '2.6.18-smp-220.9',
- '2.6.18-smp-220.10',
- '2.6.18-smp-220.99',
- '2.6.18-smp-221',
- 'UNKNOWN',
- ]
- self.increases(series2)
- self.increases(['pathX'+k for k in series2])
+ def test_version_encode(self):
+ series1 = [
+ '2.6',
+ '2.6.0',
+ '2.6.1-rc1',
+ '2.6.1-rc1_fix',
+ '2.6.1-rc1_patch',
+ '2.6.1-rc9',
+ '2.6.1-rc9-mm1',
+ '2.6.1-rc9-mm2',
+ '2.6.1-rc10',
+ '2.6.1-rc98',
+ '2.6.1',
+ '2.6.1_patch',
+ '2.6.9',
+ '2.6.10',
+ '2.6.99',
+ '2.7',
+ '2.9.99',
+ '2.10.0',
+ '99.99.99',
+ 'UNKNOWN',
+ ]
+ self.increases(series1)
+ self.increases(['pathX'+k for k in series1])
+ series2 = [
+ '2.6.18-smp-220',
+ '2.6.18-smp-220.0',
+ '2.6.18-smp-220.1_rc1',
+ '2.6.18-smp-220.1_rc1_fix',
+ '2.6.18-smp-220.1_rc1_patch',
+ '2.6.18-smp-220.1_rc9',
+ '2.6.18-smp-220.1_rc9_mm1',
+ '2.6.18-smp-220.1_rc9_mm2',
+ '2.6.18-smp-220.1_rc10',
+ '2.6.18-smp-220.1_rc98',
+ '2.6.18-smp-220.1',
+ '2.6.18-smp-220.1_patch',
+ '2.6.18-smp-220.9',
+ '2.6.18-smp-220.10',
+ '2.6.18-smp-220.99',
+ '2.6.18-smp-221',
+ 'UNKNOWN',
+ ]
+ self.increases(series2)
+ self.increases(['pathX'+k for k in series2])
- releases = ['2.6.1' , '2.6.18-smp-220.0' ]
- candidates = ['2.6.1-rc1' , '2.6.18-smp-220.0_rc1']
- experiments = ['2.6.1-patch', '2.6.1-rc1_patch',
- '2.6.18-smp-220.0_patch', 'UNKNOWN']
+ releases = ['2.6.1' , '2.6.18-smp-220.0' ]
+ candidates = ['2.6.1-rc1' , '2.6.18-smp-220.0_rc1']
+ experiments = ['2.6.1-patch', '2.6.1-rc1_patch',
+ '2.6.18-smp-220.0_patch', 'UNKNOWN']
- def test_is_released_kernel(self):
- for v in self.releases:
- self.assert_( is_released_kernel(v))
- for v in self.candidates + self.experiments:
- self.assert_(not is_released_kernel(v))
+ def test_is_released_kernel(self):
+ for v in self.releases:
+ self.assert_( is_released_kernel(v))
+ for v in self.candidates + self.experiments:
+ self.assert_(not is_released_kernel(v))
- def test_is_release_candidate(self):
- for v in self.releases + self.candidates:
- self.assert_( is_release_candidate(v))
- for v in self.experiments:
- self.assert_(not is_release_candidate(v))
+ def test_is_release_candidate(self):
+ for v in self.releases + self.candidates:
+ self.assert_( is_release_candidate(v))
+ for v in self.experiments:
+ self.assert_(not is_release_candidate(v))
if __name__ == "__main__":
- unittest.main()
+ unittest.main()
diff --git a/client/bin/kernelexpand-test.py b/client/bin/kernelexpand-test.py
index 3c34010d..24b4ce04 100755
--- a/client/bin/kernelexpand-test.py
+++ b/client/bin/kernelexpand-test.py
@@ -11,136 +11,136 @@ kml = 'http://www.example.com/mirror/kernel.org/'
akpml = 'http://www.example.com/mirror/akpm/'
mirrorA = [
- [ akpm, akpml ],
- [ km, kml ],
+ [ akpm, akpml ],
+ [ km, kml ],
]
class kernelexpandTest(unittest.TestCase):
- def test_decompose_simple(self):
- correct = [
- [ km + 'v2.6/linux-2.6.23.tar.bz2' ]
- ]
- sample = decompose_kernel('2.6.23')
- self.assertEqual(sample, correct)
-
-
- def test_decompose_fail(self):
- success = False
- try:
- sample = decompose_kernel('1.0.0.0.0')
- success = True
- except NameError:
- pass
- except Exception, e:
- self.fail('expected NameError, got something else')
-
- if success:
- self.fail('expected NameError, was successful')
-
-
- def test_decompose_rcN(self):
- correct = [
- [ km + 'v2.6/testing/v2.6.23/linux-2.6.23-rc1.tar.bz2',
- km + 'v2.6/testing/linux-2.6.23-rc1.tar.bz2']
- ]
- sample = decompose_kernel('2.6.23-rc1')
- self.assertEqual(sample, correct)
-
-
- def test_decompose_mmN(self):
- correct = [
- [ km + 'v2.6/linux-2.6.23.tar.bz2' ],
- [ akpm + '2.6/2.6.23/2.6.23-mm1/2.6.23-mm1.bz2' ]
- ]
- sample = decompose_kernel('2.6.23-mm1')
- self.assertEqual(sample, correct)
-
-
- def test_decompose_gitN(self):
- correct = [
- [ km + 'v2.6/linux-2.6.23.tar.bz2' ],
- [ km + 'v2.6/snapshots/old/patch-2.6.23-git1.bz2',
- km + 'v2.6/snapshots/patch-2.6.23-git1.bz2']
- ]
- sample = decompose_kernel('2.6.23-git1')
- self.assertEqual(sample, correct)
-
-
- def test_decompose_rcN_mmN(self):
- correct = [
- [ km + 'v2.6/testing/v2.6.23/linux-2.6.23-rc1.tar.bz2',
- km + 'v2.6/testing/linux-2.6.23-rc1.tar.bz2' ],
- [ akpm + '2.6/2.6.23-rc1/2.6.23-rc1-mm1/2.6.23-rc1-mm1.bz2']
- ]
- sample = decompose_kernel('2.6.23-rc1-mm1')
- self.assertEqual(sample, correct)
-
-
- def test_mirrorA_simple(self):
- correct = [
- [ kml + 'v2.6/linux-2.6.23.tar.bz2',
- km + 'v2.6/linux-2.6.23.tar.bz2' ]
- ]
- sample = decompose_kernel('2.6.23')
- sample = mirror_kernel_components(mirrorA, sample)
-
- self.assertEqual(sample, correct)
-
-
- def test_mirrorA_rcN(self):
- correct = [
- [ kml + 'v2.6/testing/v2.6.23/linux-2.6.23-rc1.tar.bz2',
- kml + 'v2.6/testing/linux-2.6.23-rc1.tar.bz2',
- km + 'v2.6/testing/v2.6.23/linux-2.6.23-rc1.tar.bz2',
- km + 'v2.6/testing/linux-2.6.23-rc1.tar.bz2' ]
- ]
- sample = decompose_kernel('2.6.23-rc1')
- sample = mirror_kernel_components(mirrorA, sample)
- self.assertEqual(sample, correct)
-
-
- def test_mirrorA_mmN(self):
- correct = [
- [ kml + 'v2.6/linux-2.6.23.tar.bz2',
- km + 'v2.6/linux-2.6.23.tar.bz2'],
- [ akpml + '2.6/2.6.23/2.6.23-mm1/2.6.23-mm1.bz2',
- kml + 'people/akpm/patches/2.6/2.6.23/2.6.23-mm1/2.6.23-mm1.bz2',
- akpm + '2.6/2.6.23/2.6.23-mm1/2.6.23-mm1.bz2' ]
- ]
-
- sample = decompose_kernel('2.6.23-mm1')
- sample = mirror_kernel_components(mirrorA, sample)
- self.assertEqual(sample, correct)
-
-
- def test_mirrorA_gitN(self):
- correct = [
- [ kml + 'v2.6/linux-2.6.23.tar.bz2',
- km + 'v2.6/linux-2.6.23.tar.bz2'],
- [ kml + 'v2.6/snapshots/old/patch-2.6.23-git1.bz2',
- kml + 'v2.6/snapshots/patch-2.6.23-git1.bz2',
- km + 'v2.6/snapshots/old/patch-2.6.23-git1.bz2',
- km + 'v2.6/snapshots/patch-2.6.23-git1.bz2' ]
- ]
- sample = decompose_kernel('2.6.23-git1')
- sample = mirror_kernel_components(mirrorA, sample)
- self.assertEqual(sample, correct)
-
-
- def test_mirrorA_rcN_mmN(self):
- correct = [
- [ kml + 'v2.6/testing/v2.6.23/linux-2.6.23-rc1.tar.bz2',
- kml + 'v2.6/testing/linux-2.6.23-rc1.tar.bz2',
- km + 'v2.6/testing/v2.6.23/linux-2.6.23-rc1.tar.bz2',
- km + 'v2.6/testing/linux-2.6.23-rc1.tar.bz2'],
- [ akpml + '2.6/2.6.23-rc1/2.6.23-rc1-mm1/2.6.23-rc1-mm1.bz2',
- kml + 'people/akpm/patches/2.6/2.6.23-rc1/2.6.23-rc1-mm1/2.6.23-rc1-mm1.bz2',
- akpm + '2.6/2.6.23-rc1/2.6.23-rc1-mm1/2.6.23-rc1-mm1.bz2' ]
- ]
- sample = decompose_kernel('2.6.23-rc1-mm1')
- sample = mirror_kernel_components(mirrorA, sample)
- self.assertEqual(sample, correct)
+ def test_decompose_simple(self):
+ correct = [
+ [ km + 'v2.6/linux-2.6.23.tar.bz2' ]
+ ]
+ sample = decompose_kernel('2.6.23')
+ self.assertEqual(sample, correct)
+
+
+ def test_decompose_fail(self):
+ success = False
+ try:
+ sample = decompose_kernel('1.0.0.0.0')
+ success = True
+ except NameError:
+ pass
+ except Exception, e:
+ self.fail('expected NameError, got something else')
+
+ if success:
+ self.fail('expected NameError, was successful')
+
+
+ def test_decompose_rcN(self):
+ correct = [
+ [ km + 'v2.6/testing/v2.6.23/linux-2.6.23-rc1.tar.bz2',
+ km + 'v2.6/testing/linux-2.6.23-rc1.tar.bz2']
+ ]
+ sample = decompose_kernel('2.6.23-rc1')
+ self.assertEqual(sample, correct)
+
+
+ def test_decompose_mmN(self):
+ correct = [
+ [ km + 'v2.6/linux-2.6.23.tar.bz2' ],
+ [ akpm + '2.6/2.6.23/2.6.23-mm1/2.6.23-mm1.bz2' ]
+ ]
+ sample = decompose_kernel('2.6.23-mm1')
+ self.assertEqual(sample, correct)
+
+
+ def test_decompose_gitN(self):
+ correct = [
+ [ km + 'v2.6/linux-2.6.23.tar.bz2' ],
+ [ km + 'v2.6/snapshots/old/patch-2.6.23-git1.bz2',
+ km + 'v2.6/snapshots/patch-2.6.23-git1.bz2']
+ ]
+ sample = decompose_kernel('2.6.23-git1')
+ self.assertEqual(sample, correct)
+
+
+ def test_decompose_rcN_mmN(self):
+ correct = [
+ [ km + 'v2.6/testing/v2.6.23/linux-2.6.23-rc1.tar.bz2',
+ km + 'v2.6/testing/linux-2.6.23-rc1.tar.bz2' ],
+ [ akpm + '2.6/2.6.23-rc1/2.6.23-rc1-mm1/2.6.23-rc1-mm1.bz2']
+ ]
+ sample = decompose_kernel('2.6.23-rc1-mm1')
+ self.assertEqual(sample, correct)
+
+
+ def test_mirrorA_simple(self):
+ correct = [
+ [ kml + 'v2.6/linux-2.6.23.tar.bz2',
+ km + 'v2.6/linux-2.6.23.tar.bz2' ]
+ ]
+ sample = decompose_kernel('2.6.23')
+ sample = mirror_kernel_components(mirrorA, sample)
+
+ self.assertEqual(sample, correct)
+
+
+ def test_mirrorA_rcN(self):
+ correct = [
+ [ kml + 'v2.6/testing/v2.6.23/linux-2.6.23-rc1.tar.bz2',
+ kml + 'v2.6/testing/linux-2.6.23-rc1.tar.bz2',
+ km + 'v2.6/testing/v2.6.23/linux-2.6.23-rc1.tar.bz2',
+ km + 'v2.6/testing/linux-2.6.23-rc1.tar.bz2' ]
+ ]
+ sample = decompose_kernel('2.6.23-rc1')
+ sample = mirror_kernel_components(mirrorA, sample)
+ self.assertEqual(sample, correct)
+
+
+ def test_mirrorA_mmN(self):
+ correct = [
+ [ kml + 'v2.6/linux-2.6.23.tar.bz2',
+ km + 'v2.6/linux-2.6.23.tar.bz2'],
+ [ akpml + '2.6/2.6.23/2.6.23-mm1/2.6.23-mm1.bz2',
+ kml + 'people/akpm/patches/2.6/2.6.23/2.6.23-mm1/2.6.23-mm1.bz2',
+ akpm + '2.6/2.6.23/2.6.23-mm1/2.6.23-mm1.bz2' ]
+ ]
+
+ sample = decompose_kernel('2.6.23-mm1')
+ sample = mirror_kernel_components(mirrorA, sample)
+ self.assertEqual(sample, correct)
+
+
+ def test_mirrorA_gitN(self):
+ correct = [
+ [ kml + 'v2.6/linux-2.6.23.tar.bz2',
+ km + 'v2.6/linux-2.6.23.tar.bz2'],
+ [ kml + 'v2.6/snapshots/old/patch-2.6.23-git1.bz2',
+ kml + 'v2.6/snapshots/patch-2.6.23-git1.bz2',
+ km + 'v2.6/snapshots/old/patch-2.6.23-git1.bz2',
+ km + 'v2.6/snapshots/patch-2.6.23-git1.bz2' ]
+ ]
+ sample = decompose_kernel('2.6.23-git1')
+ sample = mirror_kernel_components(mirrorA, sample)
+ self.assertEqual(sample, correct)
+
+
+ def test_mirrorA_rcN_mmN(self):
+ correct = [
+ [ kml + 'v2.6/testing/v2.6.23/linux-2.6.23-rc1.tar.bz2',
+ kml + 'v2.6/testing/linux-2.6.23-rc1.tar.bz2',
+ km + 'v2.6/testing/v2.6.23/linux-2.6.23-rc1.tar.bz2',
+ km + 'v2.6/testing/linux-2.6.23-rc1.tar.bz2'],
+ [ akpml + '2.6/2.6.23-rc1/2.6.23-rc1-mm1/2.6.23-rc1-mm1.bz2',
+ kml + 'people/akpm/patches/2.6/2.6.23-rc1/2.6.23-rc1-mm1/2.6.23-rc1-mm1.bz2',
+ akpm + '2.6/2.6.23-rc1/2.6.23-rc1-mm1/2.6.23-rc1-mm1.bz2' ]
+ ]
+ sample = decompose_kernel('2.6.23-rc1-mm1')
+ sample = mirror_kernel_components(mirrorA, sample)
+ self.assertEqual(sample, correct)
if __name__ == '__main__':
- unittest.main()
+ unittest.main()
diff --git a/client/bin/kernelexpand.py b/client/bin/kernelexpand.py
index 73028fab..e23b8653 100755
--- a/client/bin/kernelexpand.py
+++ b/client/bin/kernelexpand.py
@@ -12,180 +12,180 @@ import sys, re, os
kernel = 'http://www.kernel.org/pub/linux/kernel/'
mappings = [
- [ r'^\d+\.\d+\.\d+$', '', True, [
- kernel + 'v%(major)s/linux-%(full)s.tar.bz2'
- ]],
- [ r'^\d+\.\d+\.\d+\.\d+$', '', True, [
- kernel + 'v%(major)s/linux-%(full)s.tar.bz2'
- ]],
- [ r'-rc\d+$', '%(minor-prev)s', True, [
- kernel + 'v%(major)s/testing/v%(minor)s/linux-%(full)s.tar.bz2',
- kernel + 'v%(major)s/testing/linux-%(full)s.tar.bz2',
- ]],
- [ r'-(git|bk)\d+$', '%(base)s', False, [
- kernel + 'v%(major)s/snapshots/old/patch-%(full)s.bz2',
- kernel + 'v%(major)s/snapshots/patch-%(full)s.bz2',
- ]],
- [ r'-mm\d+$', '%(base)s', False, [
- kernel + 'people/akpm/patches/' +
- '%(major)s/%(base)s/%(full)s/%(full)s.bz2'
- ]],
- [ r'-mjb\d+$', '%(base)s', False, [
- kernel + 'people/mbligh/%(base)s/patch-%(full)s.bz2'
- ]]
+ [ r'^\d+\.\d+\.\d+$', '', True, [
+ kernel + 'v%(major)s/linux-%(full)s.tar.bz2'
+ ]],
+ [ r'^\d+\.\d+\.\d+\.\d+$', '', True, [
+ kernel + 'v%(major)s/linux-%(full)s.tar.bz2'
+ ]],
+ [ r'-rc\d+$', '%(minor-prev)s', True, [
+ kernel + 'v%(major)s/testing/v%(minor)s/linux-%(full)s.tar.bz2',
+ kernel + 'v%(major)s/testing/linux-%(full)s.tar.bz2',
+ ]],
+ [ r'-(git|bk)\d+$', '%(base)s', False, [
+ kernel + 'v%(major)s/snapshots/old/patch-%(full)s.bz2',
+ kernel + 'v%(major)s/snapshots/patch-%(full)s.bz2',
+ ]],
+ [ r'-mm\d+$', '%(base)s', False, [
+ kernel + 'people/akpm/patches/' +
+ '%(major)s/%(base)s/%(full)s/%(full)s.bz2'
+ ]],
+ [ r'-mjb\d+$', '%(base)s', False, [
+ kernel + 'people/mbligh/%(base)s/patch-%(full)s.bz2'
+ ]]
];
def decompose_kernel_once(kernel):
- ##print "S<" + kernel + ">"
- for mapping in mappings:
- (suffix, becomes, is_full, patch_templates) = mapping
-
- params = {}
-
- match = re.search(r'^(.*)' + suffix, kernel)
- if not match:
- continue
-
- # Generate the parameters for the patches:
- # full => full kernel name
- # base => all but the matches suffix
- # minor => 2.n.m
- # major => 2.n
- # minor-prev => 2.n.m-1
- params['full'] = kernel
- params['base'] = match.group(1)
-
- match = re.search(r'^((\d+\.\d+)\.(\d+))', kernel)
- if not match:
- raise "unable to determine major/minor version"
- params['minor'] = match.group(1)
- params['major'] = match.group(2)
- params['minor-prev'] = match.group(2) + \
- '.%d' % (int(match.group(3)) - 1)
-
- # Build the new kernel and patch list.
- new_kernel = becomes % params
- patch_list = []
- for template in patch_templates:
- patch_list.append(template % params)
-
- return (is_full, new_kernel, patch_list)
-
- return (True, kernel, None)
+ ##print "S<" + kernel + ">"
+ for mapping in mappings:
+ (suffix, becomes, is_full, patch_templates) = mapping
+
+ params = {}
+
+ match = re.search(r'^(.*)' + suffix, kernel)
+ if not match:
+ continue
+
+ # Generate the parameters for the patches:
+ # full => full kernel name
+ # base => all but the matches suffix
+ # minor => 2.n.m
+ # major => 2.n
+ # minor-prev => 2.n.m-1
+ params['full'] = kernel
+ params['base'] = match.group(1)
+
+ match = re.search(r'^((\d+\.\d+)\.(\d+))', kernel)
+ if not match:
+ raise "unable to determine major/minor version"
+ params['minor'] = match.group(1)
+ params['major'] = match.group(2)
+ params['minor-prev'] = match.group(2) + \
+ '.%d' % (int(match.group(3)) - 1)
+
+ # Build the new kernel and patch list.
+ new_kernel = becomes % params
+ patch_list = []
+ for template in patch_templates:
+ patch_list.append(template % params)
+
+ return (is_full, new_kernel, patch_list)
+
+ return (True, kernel, None)
def decompose_kernel(kernel):
- kernel_patches = []
+ kernel_patches = []
- done = False
- while not done:
- (done, kernel, patch_list) = decompose_kernel_once(kernel)
- if patch_list:
- kernel_patches.insert(0, patch_list)
- if not len(kernel_patches):
- raise NameError('kernelexpand: %s: unknown kernel' % (kernel))
+ done = False
+ while not done:
+ (done, kernel, patch_list) = decompose_kernel_once(kernel)
+ if patch_list:
+ kernel_patches.insert(0, patch_list)
+ if not len(kernel_patches):
+ raise NameError('kernelexpand: %s: unknown kernel' % (kernel))
- return kernel_patches
+ return kernel_patches
# Look for and add potential mirrors.
def mirror_kernel_components(mirrors, components):
- new_components = []
- for component in components:
- new_patches = []
- for mirror in mirrors:
- (prefix, local) = mirror
- for patch in component:
- if patch.startswith(prefix):
- new_patch = local + \
- patch[len(prefix):]
- new_patches.append(new_patch)
- for patch in component:
- new_patches.append(patch)
- new_components.append(new_patches)
-
- return new_components
+ new_components = []
+ for component in components:
+ new_patches = []
+ for mirror in mirrors:
+ (prefix, local) = mirror
+ for patch in component:
+ if patch.startswith(prefix):
+ new_patch = local + \
+ patch[len(prefix):]
+ new_patches.append(new_patch)
+ for patch in component:
+ new_patches.append(patch)
+ new_components.append(new_patches)
+
+ return new_components
def url_accessible(url):
- status = os.system("wget --spider -q '%s'" % (url))
- #print url + ": status=%d" % (status)
-
- return status == 0
+ status = os.system("wget --spider -q '%s'" % (url))
+ #print url + ": status=%d" % (status)
+
+ return status == 0
def select_kernel_components(components):
- new_components = []
- for component in components:
- new_patches = []
- for patch in component:
- if url_accessible(patch):
- new_patches.append(patch)
- break
- if not len(new_patches):
- new_patches.append(component[-1])
- new_components.append(new_patches)
- return new_components
+ new_components = []
+ for component in components:
+ new_patches = []
+ for patch in component:
+ if url_accessible(patch):
+ new_patches.append(patch)
+ break
+ if not len(new_patches):
+ new_patches.append(component[-1])
+ new_components.append(new_patches)
+ return new_components
def expand_classic(kernel, mirrors):
- components = decompose_kernel(kernel)
- if mirrors:
- components = mirror_kernel_components(mirrors, components)
- components = select_kernel_components(components)
+ components = decompose_kernel(kernel)
+ if mirrors:
+ components = mirror_kernel_components(mirrors, components)
+ components = select_kernel_components(components)
- patches = []
- for component in components:
- patches.append(component[0])
+ patches = []
+ for component in components:
+ patches.append(component[0])
- return patches
+ return patches
if __name__ == '__main__':
- from optparse import OptionParser
-
- parser = OptionParser()
-
- parser.add_option("-m", "--mirror",
- type="string", dest="mirror", action="append", nargs=2,
- help="mirror prefix")
- parser.add_option("-v", "--no-validate", dest="validate",
- action="store_false", default=True,
- help="prune invalid entries")
-
- def usage():
- parser.print_help()
- sys.exit(1)
-
- options, args = parser.parse_args()
-
- # Check for a kernel version
- if len(args) != 1:
- usage()
- kernel = args[0]
-
- #mirrors = [
- # [ 'http://www.kernel.org/pub/linux/kernel/v2.4',
- # 'http://kernel.beaverton.ibm.com/mirror/v2.4' ],
- # [ 'http://www.kernel.org/pub/linux/kernel/v2.6',
- # 'http://kernel.beaverton.ibm.com/mirror/v2.6' ],
- # [ 'http://www.kernel.org/pub/linux/kernel/people/akpm/patches',
- # 'http://kernel.beaverton.ibm.com/mirror/akpm' ],
- #]
- mirrors = options.mirror
-
- try:
- components = decompose_kernel(kernel)
- except NameError, e:
- sys.stderr.write(e.args[0] + "\n")
- sys.exit(1)
-
- if mirrors:
- components = mirror_kernel_components(mirrors, components)
-
- if options.validate:
- components = select_kernel_components(components)
-
- # Dump them out.
- for component in components:
- print " ".join(component)
+ from optparse import OptionParser
+
+ parser = OptionParser()
+
+ parser.add_option("-m", "--mirror",
+ type="string", dest="mirror", action="append", nargs=2,
+ help="mirror prefix")
+ parser.add_option("-v", "--no-validate", dest="validate",
+ action="store_false", default=True,
+ help="prune invalid entries")
+
+ def usage():
+ parser.print_help()
+ sys.exit(1)
+
+ options, args = parser.parse_args()
+
+ # Check for a kernel version
+ if len(args) != 1:
+ usage()
+ kernel = args[0]
+
+ #mirrors = [
+ # [ 'http://www.kernel.org/pub/linux/kernel/v2.4',
+ # 'http://kernel.beaverton.ibm.com/mirror/v2.4' ],
+ # [ 'http://www.kernel.org/pub/linux/kernel/v2.6',
+ # 'http://kernel.beaverton.ibm.com/mirror/v2.6' ],
+ # [ 'http://www.kernel.org/pub/linux/kernel/people/akpm/patches',
+ # 'http://kernel.beaverton.ibm.com/mirror/akpm' ],
+ #]
+ mirrors = options.mirror
+
+ try:
+ components = decompose_kernel(kernel)
+ except NameError, e:
+ sys.stderr.write(e.args[0] + "\n")
+ sys.exit(1)
+
+ if mirrors:
+ components = mirror_kernel_components(mirrors, components)
+
+ if options.validate:
+ components = select_kernel_components(components)
+
+ # Dump them out.
+ for component in components:
+ print " ".join(component)
diff --git a/client/bin/os_dep.py b/client/bin/os_dep.py
index f61d46d2..90229134 100644
--- a/client/bin/os_dep.py
+++ b/client/bin/os_dep.py
@@ -8,30 +8,30 @@ the same. With added distro-independant pixie dust.
"""
def command(cmd):
- # this could use '/usr/bin/which', I suppose. But this seems simpler
- for dir in os.environ['PATH'].split(':'):
- file = os.path.join(dir, cmd)
- if os.path.exists(file):
- return file
- raise ValueError('Missing command: %s' % cmd)
+ # this could use '/usr/bin/which', I suppose. But this seems simpler
+ for dir in os.environ['PATH'].split(':'):
+ file = os.path.join(dir, cmd)
+ if os.path.exists(file):
+ return file
+ raise ValueError('Missing command: %s' % cmd)
def commands(*cmds):
- results = []
- for cmd in cmds:
- results.append(command(cmd))
+ results = []
+ for cmd in cmds:
+ results.append(command(cmd))
def library(lib):
- lddirs = [x.rstrip() for x in open('/etc/ld.so.conf', 'r').readlines()]
- for dir in ['/lib', '/usr/lib'] + lddirs:
- file = os.path.join(dir, lib)
- if os.path.exists(file):
- return file
- raise ValueError('Missing library: %s' % lib)
+ lddirs = [x.rstrip() for x in open('/etc/ld.so.conf', 'r').readlines()]
+ for dir in ['/lib', '/usr/lib'] + lddirs:
+ file = os.path.join(dir, lib)
+ if os.path.exists(file):
+ return file
+ raise ValueError('Missing library: %s' % lib)
def libraries(*libs):
- results = []
- for lib in libs:
- results.append(library(lib))
+ results = []
+ for lib in libs:
+ results.append(library(lib))
diff --git a/client/bin/package.py b/client/bin/package.py
index c889a9ca..42a86049 100644
--- a/client/bin/package.py
+++ b/client/bin/package.py
@@ -1,5 +1,5 @@
"""
-Functions to handle software packages. The functions covered here aim to be
+Functions to handle software packages. The functions covered here aim to be
generic, with implementations that deal with different package managers, such
as dpkg and rpm.
"""
@@ -15,277 +15,277 @@ KNOWN_PACKAGE_MANAGERS = ['rpm', 'dpkg']
def __rpm_info(rpm_package):
- """\
- Private function that returns a dictionary with information about an
- RPM package file
- - type: Package management program that handles the file
- - system_support: If the package management program is installed on the
- system or not
- - source: If it is a source (True) our binary (False) package
- - version: The package version (or name), that is used to check against the
- package manager if the package is installed
- - arch: The architecture for which a binary package was built
- - installed: Whether the package is installed (True) on the system or not
- (False)
- """
- # We will make good use of what the file command has to tell us about the
- # package :)
- file_result = utils.system_output('file ' + rpm_package)
- package_info = {}
- package_info['type'] = 'rpm'
- try:
- os_dep.command('rpm')
- # Build the command strings that will be used to get package info
- # s_cmd - Command to determine if package is a source package
- # a_cmd - Command to determine package architecture
- # v_cmd - Command to determine package version
- # i_cmd - Command to determiine if package is installed
- s_cmd = 'rpm -qp --qf %{SOURCE} ' + rpm_package + ' 2>/dev/null'
- a_cmd = 'rpm -qp --qf %{ARCH} ' + rpm_package + ' 2>/dev/null'
- v_cmd = 'rpm -qp ' + rpm_package + ' 2>/dev/null'
- i_cmd = 'rpm -q ' + utils.system_output(v_cmd) + ' 2>&1 >/dev/null'
-
- package_info['system_support'] = True
- # Checking whether this is a source or src package
- source = utils.system_output(s_cmd)
- if source == '(none)':
- package_info['source'] = False
- else:
- package_info['source'] = True
- package_info['version'] = utils.system_output(v_cmd)
- package_info['arch'] = utils.system_output(a_cmd)
- # Checking if package is installed
- try:
- utils.system(i_cmd)
- package_info['installed'] = True
- except:
- package_info['installed'] = False
-
- except:
- package_info['system_support'] = False
- package_info['installed'] = False
- # File gives a wealth of information about rpm packages.
- # However, we can't trust all this info, as incorrectly
- # packaged rpms can report some wrong values.
- # It's better than nothing though :)
- if len(file_result.split(' ')) == 6:
- # Figure if package is a source package
- if file_result.split(' ')[3] == 'src':
- package_info['source'] = True
- elif file_result.split(' ')[3] == 'bin':
- package_info['source'] = False
- else:
- package_info['source'] = False
- # Get architecture
- package_info['arch'] = file_result.split(' ')[4]
- # Get version
- package_info['version'] = file_result.split(' ')[5]
- elif len(file_result.split(' ')) == 5:
- # Figure if package is a source package
- if file_result.split(' ')[3] == 'src':
- package_info['source'] = True
- elif file_result.split(' ')[3] == 'bin':
- package_info['source'] = False
- else:
- package_info['source'] = False
- # When the arch param is missing on file, we assume noarch
- package_info['arch'] = 'noarch'
- # Get version
- package_info['version'] = file_result.split(' ')[4]
- else:
- # If everything else fails...
- package_info['source'] = False
- package_info['arch'] = 'Not Available'
- package_info['version'] = 'Not Available'
- return package_info
+ """\
+ Private function that returns a dictionary with information about an
+ RPM package file
+ - type: Package management program that handles the file
+ - system_support: If the package management program is installed on the
+ system or not
+ - source: If it is a source (True) our binary (False) package
+ - version: The package version (or name), that is used to check against the
+ package manager if the package is installed
+ - arch: The architecture for which a binary package was built
+ - installed: Whether the package is installed (True) on the system or not
+ (False)
+ """
+ # We will make good use of what the file command has to tell us about the
+ # package :)
+ file_result = utils.system_output('file ' + rpm_package)
+ package_info = {}
+ package_info['type'] = 'rpm'
+ try:
+ os_dep.command('rpm')
+ # Build the command strings that will be used to get package info
+ # s_cmd - Command to determine if package is a source package
+ # a_cmd - Command to determine package architecture
+ # v_cmd - Command to determine package version
+ # i_cmd - Command to determiine if package is installed
+ s_cmd = 'rpm -qp --qf %{SOURCE} ' + rpm_package + ' 2>/dev/null'
+ a_cmd = 'rpm -qp --qf %{ARCH} ' + rpm_package + ' 2>/dev/null'
+ v_cmd = 'rpm -qp ' + rpm_package + ' 2>/dev/null'
+ i_cmd = 'rpm -q ' + utils.system_output(v_cmd) + ' 2>&1 >/dev/null'
+
+ package_info['system_support'] = True
+ # Checking whether this is a source or src package
+ source = utils.system_output(s_cmd)
+ if source == '(none)':
+ package_info['source'] = False
+ else:
+ package_info['source'] = True
+ package_info['version'] = utils.system_output(v_cmd)
+ package_info['arch'] = utils.system_output(a_cmd)
+ # Checking if package is installed
+ try:
+ utils.system(i_cmd)
+ package_info['installed'] = True
+ except:
+ package_info['installed'] = False
+
+ except:
+ package_info['system_support'] = False
+ package_info['installed'] = False
+ # File gives a wealth of information about rpm packages.
+ # However, we can't trust all this info, as incorrectly
+ # packaged rpms can report some wrong values.
+ # It's better than nothing though :)
+ if len(file_result.split(' ')) == 6:
+ # Figure if package is a source package
+ if file_result.split(' ')[3] == 'src':
+ package_info['source'] = True
+ elif file_result.split(' ')[3] == 'bin':
+ package_info['source'] = False
+ else:
+ package_info['source'] = False
+ # Get architecture
+ package_info['arch'] = file_result.split(' ')[4]
+ # Get version
+ package_info['version'] = file_result.split(' ')[5]
+ elif len(file_result.split(' ')) == 5:
+ # Figure if package is a source package
+ if file_result.split(' ')[3] == 'src':
+ package_info['source'] = True
+ elif file_result.split(' ')[3] == 'bin':
+ package_info['source'] = False
+ else:
+ package_info['source'] = False
+ # When the arch param is missing on file, we assume noarch
+ package_info['arch'] = 'noarch'
+ # Get version
+ package_info['version'] = file_result.split(' ')[4]
+ else:
+ # If everything else fails...
+ package_info['source'] = False
+ package_info['arch'] = 'Not Available'
+ package_info['version'] = 'Not Available'
+ return package_info
def __dpkg_info(dpkg_package):
- """\
- Private function that returns a dictionary with information about a
- dpkg package file
- - type: Package management program that handles the file
- - system_support: If the package management program is installed on the
- system or not
- - source: If it is a source (True) our binary (False) package
- - version: The package version (or name), that is used to check against the
- package manager if the package is installed
- - arch: The architecture for which a binary package was built
- - installed: Whether the package is installed (True) on the system or not
- (False)
- """
- # We will make good use of what the file command has to tell us about the
- # package :)
- file_result = utils.system_output('file ' + dpkg_package)
- package_info = {}
- package_info['type'] = 'dpkg'
- # There's no single debian source package as is the case
- # with RPM
- package_info['source'] = False
- try:
- os_dep.command('dpkg')
- # Build the command strings that will be used to get package info
- # a_cmd - Command to determine package architecture
- # v_cmd - Command to determine package version
- # i_cmd - Command to determiine if package is installed
- a_cmd = 'dpkg -f ' + dpkg_package + ' Architecture 2>/dev/null'
- v_cmd = 'dpkg -f ' + dpkg_package + ' Package 2>/dev/null'
- i_cmd = 'dpkg -s ' + utils.system_output(v_cmd) + ' 2>/dev/null'
-
- package_info['system_support'] = True
- package_info['version'] = utils.system_output(v_cmd)
- package_info['arch'] = utils.system_output(a_cmd)
- # Checking if package is installed
- package_status = utils.system_output(i_cmd, ignore_status=True)
- not_inst_pattern = re.compile('not-installed', re.IGNORECASE)
- dpkg_not_installed = re.search(not_inst_pattern, package_status)
- if dpkg_not_installed:
- package_info['installed'] = False
- else:
- package_info['installed'] = True
-
- except:
- package_info['system_support'] = False
- package_info['installed'] = False
- # The output of file is not as generous for dpkg files as
- # it is with rpm files
- package_info['arch'] = 'Not Available'
- package_info['version'] = 'Not Available'
-
- return package_info
+ """\
+ Private function that returns a dictionary with information about a
+ dpkg package file
+ - type: Package management program that handles the file
+ - system_support: If the package management program is installed on the
+ system or not
+ - source: If it is a source (True) our binary (False) package
+ - version: The package version (or name), that is used to check against the
+ package manager if the package is installed
+ - arch: The architecture for which a binary package was built
+ - installed: Whether the package is installed (True) on the system or not
+ (False)
+ """
+ # We will make good use of what the file command has to tell us about the
+ # package :)
+ file_result = utils.system_output('file ' + dpkg_package)
+ package_info = {}
+ package_info['type'] = 'dpkg'
+ # There's no single debian source package as is the case
+ # with RPM
+ package_info['source'] = False
+ try:
+ os_dep.command('dpkg')
+ # Build the command strings that will be used to get package info
+ # a_cmd - Command to determine package architecture
+ # v_cmd - Command to determine package version
+ # i_cmd - Command to determiine if package is installed
+ a_cmd = 'dpkg -f ' + dpkg_package + ' Architecture 2>/dev/null'
+ v_cmd = 'dpkg -f ' + dpkg_package + ' Package 2>/dev/null'
+ i_cmd = 'dpkg -s ' + utils.system_output(v_cmd) + ' 2>/dev/null'
+
+ package_info['system_support'] = True
+ package_info['version'] = utils.system_output(v_cmd)
+ package_info['arch'] = utils.system_output(a_cmd)
+ # Checking if package is installed
+ package_status = utils.system_output(i_cmd, ignore_status=True)
+ not_inst_pattern = re.compile('not-installed', re.IGNORECASE)
+ dpkg_not_installed = re.search(not_inst_pattern, package_status)
+ if dpkg_not_installed:
+ package_info['installed'] = False
+ else:
+ package_info['installed'] = True
+
+ except:
+ package_info['system_support'] = False
+ package_info['installed'] = False
+ # The output of file is not as generous for dpkg files as
+ # it is with rpm files
+ package_info['arch'] = 'Not Available'
+ package_info['version'] = 'Not Available'
+
+ return package_info
def info(package):
- """\
- Returns a dictionary with package information about a given package file:
- - type: Package management program that handles the file
- - system_support: If the package management program is installed on the
- system or not
- - source: If it is a source (True) our binary (False) package
- - version: The package version (or name), that is used to check against the
- package manager if the package is installed
- - arch: The architecture for which a binary package was built
- - installed: Whether the package is installed (True) on the system or not
- (False)
-
- Implemented package types:
- - 'dpkg' - dpkg (debian, ubuntu) package files
- - 'rpm' - rpm (red hat, suse) package files
- Raises an exception if the package type is not one of the implemented
- package types.
- """
- if not os.path.isfile(package):
- raise ValueError('invalid file %s to verify' % package)
- # Use file and libmagic to determine the actual package file type.
- file_result = utils.system_output('file ' + package)
- for package_manager in KNOWN_PACKAGE_MANAGERS:
- if package_manager == 'rpm':
- package_pattern = re.compile('RPM', re.IGNORECASE)
- elif package_manager == 'dpkg':
- package_pattern = re.compile('Debian', re.IGNORECASE)
-
- result = re.search(package_pattern, file_result)
-
- if result and package_manager == 'rpm':
- return __rpm_info(package)
- elif result and package_manager == 'dpkg':
- return __dpkg_info(package)
-
- # If it's not one of the implemented package manager methods, there's
- # not much that can be done, hence we throw an exception.
- raise error.PackageError('Unknown package type %s' % file_result)
+ """\
+ Returns a dictionary with package information about a given package file:
+ - type: Package management program that handles the file
+ - system_support: If the package management program is installed on the
+ system or not
+ - source: If it is a source (True) our binary (False) package
+ - version: The package version (or name), that is used to check against the
+ package manager if the package is installed
+ - arch: The architecture for which a binary package was built
+ - installed: Whether the package is installed (True) on the system or not
+ (False)
+
+ Implemented package types:
+ - 'dpkg' - dpkg (debian, ubuntu) package files
+ - 'rpm' - rpm (red hat, suse) package files
+ Raises an exception if the package type is not one of the implemented
+ package types.
+ """
+ if not os.path.isfile(package):
+ raise ValueError('invalid file %s to verify' % package)
+ # Use file and libmagic to determine the actual package file type.
+ file_result = utils.system_output('file ' + package)
+ for package_manager in KNOWN_PACKAGE_MANAGERS:
+ if package_manager == 'rpm':
+ package_pattern = re.compile('RPM', re.IGNORECASE)
+ elif package_manager == 'dpkg':
+ package_pattern = re.compile('Debian', re.IGNORECASE)
+
+ result = re.search(package_pattern, file_result)
+
+ if result and package_manager == 'rpm':
+ return __rpm_info(package)
+ elif result and package_manager == 'dpkg':
+ return __dpkg_info(package)
+
+ # If it's not one of the implemented package manager methods, there's
+ # not much that can be done, hence we throw an exception.
+ raise error.PackageError('Unknown package type %s' % file_result)
def install(package, nodeps = False):
- """\
- Tries to install a package file. If the package is already installed,
- it prints a message to the user and ends gracefully. If nodeps is set to
- true, it will ignore package dependencies.
- """
- my_package_info = info(package)
- type = my_package_info['type']
- system_support = my_package_info['system_support']
- source = my_package_info['source']
- installed = my_package_info['installed']
-
- if not system_support:
- e_msg = 'Client does not have package manager %s to handle %s install' \
- % (type, package)
- raise error.PackageError(e_msg)
-
- opt_args = ''
- if type == 'rpm':
- if nodeps:
- opt_args = opt_args + '--nodeps'
- install_command = 'rpm %s -U %s' % (opt_args, package)
- if type == 'dpkg':
- if nodeps:
- opt_args = opt_args + '--force-depends'
- install_command = 'dpkg %s -i %s' % (opt_args, package)
-
- # RPM source packages can be installed along with the binary versions
- # with this check
- if installed and not source:
- return 'Package %s is already installed' % package
-
- # At this point, the most likely thing to go wrong is that there are
- # unmet dependencies for the package. We won't cover this case, at
- # least for now.
- utils.system(install_command)
- return 'Package %s was installed successfuly' % package
+ """\
+ Tries to install a package file. If the package is already installed,
+ it prints a message to the user and ends gracefully. If nodeps is set to
+ true, it will ignore package dependencies.
+ """
+ my_package_info = info(package)
+ type = my_package_info['type']
+ system_support = my_package_info['system_support']
+ source = my_package_info['source']
+ installed = my_package_info['installed']
+
+ if not system_support:
+ e_msg = 'Client does not have package manager %s to handle %s install' \
+ % (type, package)
+ raise error.PackageError(e_msg)
+
+ opt_args = ''
+ if type == 'rpm':
+ if nodeps:
+ opt_args = opt_args + '--nodeps'
+ install_command = 'rpm %s -U %s' % (opt_args, package)
+ if type == 'dpkg':
+ if nodeps:
+ opt_args = opt_args + '--force-depends'
+ install_command = 'dpkg %s -i %s' % (opt_args, package)
+
+ # RPM source packages can be installed along with the binary versions
+ # with this check
+ if installed and not source:
+ return 'Package %s is already installed' % package
+
+ # At this point, the most likely thing to go wrong is that there are
+ # unmet dependencies for the package. We won't cover this case, at
+ # least for now.
+ utils.system(install_command)
+ return 'Package %s was installed successfuly' % package
def convert(package, destination_format):
- """\
- Convert packages with the 'alien' utility. If alien is not installed, it
- throws a NotImplementedError exception.
- returns: filename of the package generated.
- """
- try:
- os_dep.command('alien')
- except:
- e_msg = 'Cannot convert to %s, alien not installed' % destination_format
- raise error.TestError(e_msg)
-
- # alien supports converting to many formats, but its interesting to map
- # convertions only for the implemented package types.
- if destination_format == 'dpkg':
- deb_pattern = re.compile('[A-Za-z0-9_.-]*[.][d][e][b]')
- conv_output = utils.system_output('alien --to-deb %s 2>/dev/null' % package)
- converted_package = re.findall(deb_pattern, conv_output)[0]
- elif destination_format == 'rpm':
- rpm_pattern = re.compile('[A-Za-z0-9_.-]*[.][r][p][m]')
- conv_output = utils.system_output('alien --to-rpm %s 2>/dev/null' % package)
- converted_package = re.findall(rpm_pattern, conv_output)[0]
- else:
- e_msg = 'Convertion to format %s not implemented' % destination_format
- raise NotImplementedError(e_msg)
-
- print 'Package %s successfuly converted to %s' % \
- (os.path.basename(package), os.path.basename(converted_package))
- return os.path.abspath(converted_package)
+ """\
+ Convert packages with the 'alien' utility. If alien is not installed, it
+ throws a NotImplementedError exception.
+ returns: filename of the package generated.
+ """
+ try:
+ os_dep.command('alien')
+ except:
+ e_msg = 'Cannot convert to %s, alien not installed' % destination_format
+ raise error.TestError(e_msg)
+
+ # alien supports converting to many formats, but its interesting to map
+ # convertions only for the implemented package types.
+ if destination_format == 'dpkg':
+ deb_pattern = re.compile('[A-Za-z0-9_.-]*[.][d][e][b]')
+ conv_output = utils.system_output('alien --to-deb %s 2>/dev/null' % package)
+ converted_package = re.findall(deb_pattern, conv_output)[0]
+ elif destination_format == 'rpm':
+ rpm_pattern = re.compile('[A-Za-z0-9_.-]*[.][r][p][m]')
+ conv_output = utils.system_output('alien --to-rpm %s 2>/dev/null' % package)
+ converted_package = re.findall(rpm_pattern, conv_output)[0]
+ else:
+ e_msg = 'Convertion to format %s not implemented' % destination_format
+ raise NotImplementedError(e_msg)
+
+ print 'Package %s successfuly converted to %s' % \
+ (os.path.basename(package), os.path.basename(converted_package))
+ return os.path.abspath(converted_package)
def os_support():
- """\
- Returns a dictionary with host os package support info:
- - rpm: True if system supports rpm packages, False otherwise
- - dpkg: True if system supports dpkg packages, False otherwise
- - conversion: True if the system can convert packages (alien installed),
- or False otherwise
- """
- support_info = {}
- for package_manager in KNOWN_PACKAGE_MANAGERS:
- try:
- os_dep.command(package_manager)
- support_info[package_manager] = True
- except:
- support_info[package_manager] = False
-
- try:
- os_dep.command('alien')
- support_info['conversion'] = True
- except:
- support_info['conversion'] = False
-
- return support_info
+ """\
+ Returns a dictionary with host os package support info:
+ - rpm: True if system supports rpm packages, False otherwise
+ - dpkg: True if system supports dpkg packages, False otherwise
+ - conversion: True if the system can convert packages (alien installed),
+ or False otherwise
+ """
+ support_info = {}
+ for package_manager in KNOWN_PACKAGE_MANAGERS:
+ try:
+ os_dep.command(package_manager)
+ support_info[package_manager] = True
+ except:
+ support_info[package_manager] = False
+
+ try:
+ os_dep.command('alien')
+ support_info['conversion'] = True
+ except:
+ support_info['conversion'] = False
+
+ return support_info
diff --git a/client/bin/parallel.py b/client/bin/parallel.py
index a95b6439..13a8b51c 100644
--- a/client/bin/parallel.py
+++ b/client/bin/parallel.py
@@ -6,42 +6,42 @@ import sys, os, pickle
from autotest_lib.client.common_lib import error
def fork_start(tmp, l):
- sys.stdout.flush()
- sys.stderr.flush()
- pid = os.fork()
- if pid:
- # Parent
- return pid
+ sys.stdout.flush()
+ sys.stderr.flush()
+ pid = os.fork()
+ if pid:
+ # Parent
+ return pid
- try:
- try:
- l()
+ try:
+ try:
+ l()
- except error.AutotestError:
- raise
+ except error.AutotestError:
+ raise
- except:
- raise error.UnhandledError("test failed and threw:\n")
+ except:
+ raise error.UnhandledError("test failed and threw:\n")
- except Exception, detail:
- ename = tmp + "/debug/error-%d" % (os.getpid())
- pickle.dump(detail, open(ename, "w"))
+ except Exception, detail:
+ ename = tmp + "/debug/error-%d" % (os.getpid())
+ pickle.dump(detail, open(ename, "w"))
- sys.stdout.flush()
- sys.stderr.flush()
- os._exit(1)
+ sys.stdout.flush()
+ sys.stderr.flush()
+ os._exit(1)
- sys.stdout.flush()
- sys.stderr.flush()
- os._exit(0)
+ sys.stdout.flush()
+ sys.stderr.flush()
+ os._exit(0)
def fork_waitfor(tmp, pid):
- (pid, status) = os.waitpid(pid, 0)
+ (pid, status) = os.waitpid(pid, 0)
- ename = tmp + "/debug/error-%d" % pid
- if (os.path.exists(ename)):
- raise pickle.load(file(ename, 'r'))
+ ename = tmp + "/debug/error-%d" % pid
+ if (os.path.exists(ename)):
+ raise pickle.load(file(ename, 'r'))
- if (status != 0):
- raise error.TestError("test failed rc=%d" % (status))
+ if (status != 0):
+ raise error.TestError("test failed rc=%d" % (status))
diff --git a/client/bin/profiler.py b/client/bin/profiler.py
index b9195428..ff821f9f 100755
--- a/client/bin/profiler.py
+++ b/client/bin/profiler.py
@@ -1,25 +1,24 @@
class profiler:
- preserve_srcdir = False
+ preserve_srcdir = False
- def __init__(self, job):
- self.job = job
+ def __init__(self, job):
+ self.job = job
- def setup(self, *args):
- return
+ def setup(self, *args):
+ return
- def initialize(self, *args):
- return
+ def initialize(self, *args):
+ return
- def start(self, test):
- return
+ def start(self, test):
+ return
- def stop(self, test):
- return
+ def stop(self, test):
+ return
- def report(self, test):
- return
-
+ def report(self, test):
+ return
diff --git a/client/bin/profilers.py b/client/bin/profilers.py
index c0800219..298cba95 100755
--- a/client/bin/profilers.py
+++ b/client/bin/profilers.py
@@ -4,71 +4,71 @@ from autotest_lib.client.common_lib import error, utils
class profilers:
- def __init__(self, job):
- self.job = job
- self.list = []
- self.profdir = job.autodir + '/profilers'
- self.tmpdir = job.tmpdir
- self.profile_run_only = False
+ def __init__(self, job):
+ self.job = job
+ self.list = []
+ self.profdir = job.autodir + '/profilers'
+ self.tmpdir = job.tmpdir
+ self.profile_run_only = False
- # add a profiler
- def add(self, profiler, *args, **dargs):
- try:
- sys.path.insert(0, self.job.profdir + '/' + profiler)
- exec 'import ' + profiler
- exec 'newprofiler = %s.%s(self)' % (profiler, profiler)
- finally:
- sys.path.pop(0)
- newprofiler.name = profiler
- newprofiler.bindir = self.profdir + '/' + profiler
- newprofiler.srcdir = newprofiler.bindir + '/src'
- newprofiler.tmpdir = self.tmpdir + '/' + profiler
- utils.update_version(newprofiler.srcdir, newprofiler.preserve_srcdir,
- newprofiler.version, newprofiler.setup,
- *args, **dargs)
- newprofiler.initialize(*args, **dargs)
- self.list.append(newprofiler)
+ # add a profiler
+ def add(self, profiler, *args, **dargs):
+ try:
+ sys.path.insert(0, self.job.profdir + '/' + profiler)
+ exec 'import ' + profiler
+ exec 'newprofiler = %s.%s(self)' % (profiler, profiler)
+ finally:
+ sys.path.pop(0)
+ newprofiler.name = profiler
+ newprofiler.bindir = self.profdir + '/' + profiler
+ newprofiler.srcdir = newprofiler.bindir + '/src'
+ newprofiler.tmpdir = self.tmpdir + '/' + profiler
+ utils.update_version(newprofiler.srcdir, newprofiler.preserve_srcdir,
+ newprofiler.version, newprofiler.setup,
+ *args, **dargs)
+ newprofiler.initialize(*args, **dargs)
+ self.list.append(newprofiler)
- # remove a profiler
- def delete(self, profiler):
- nukeme = None
- for p in self.list:
- if (p.name == profiler):
- nukeme = p
- self.list.remove(p)
+ # remove a profiler
+ def delete(self, profiler):
+ nukeme = None
+ for p in self.list:
+ if (p.name == profiler):
+ nukeme = p
+ self.list.remove(p)
- # are any profilers enabled ?
- def present(self):
- if self.list:
- return 1
- else:
- return 0
+ # are any profilers enabled ?
+ def present(self):
+ if self.list:
+ return 1
+ else:
+ return 0
- # Returns True if job is supposed to be run only with profiling turned
- # on, False otherwise
- def only(self):
- return self.profile_run_only
+ # Returns True if job is supposed to be run only with profiling turned
+ # on, False otherwise
+ def only(self):
+ return self.profile_run_only
- # Changes the flag which determines whether or not the job is to be
- # run without profilers at all
- def set_only(self, value):
- self.profile_run_only = value
+ # Changes the flag which determines whether or not the job is to be
+ # run without profilers at all
+ def set_only(self, value):
+ self.profile_run_only = value
- # Start all enabled profilers
- def start(self, test):
- for p in self.list:
- p.start(test)
+ # Start all enabled profilers
+ def start(self, test):
+ for p in self.list:
+ p.start(test)
- # Stop all enabled profilers
- def stop(self, test):
- for p in self.list:
- p.stop(test)
+ # Stop all enabled profilers
+ def stop(self, test):
+ for p in self.list:
+ p.stop(test)
- # Report on all enabled profilers
- def report(self, test):
- for p in self.list:
- p.report(test)
+ # Report on all enabled profilers
+ def report(self, test):
+ for p in self.list:
+ p.report(test)
diff --git a/client/bin/sysinfo.py b/client/bin/sysinfo.py
index 2f605ea9..107229ca 100755
--- a/client/bin/sysinfo.py
+++ b/client/bin/sysinfo.py
@@ -6,14 +6,14 @@ import os, shutil, re, glob
from autotest_lib.client.common_lib import utils
try:
- from autotest_lib.client.bin import site_sysinfo
- local = True
+ from autotest_lib.client.bin import site_sysinfo
+ local = True
except ImportError:
- local = False
+ local = False
# stuff to log per reboot
-files = ['/proc/pci', '/proc/meminfo', '/proc/slabinfo', '/proc/version',
- '/proc/cpuinfo', '/proc/cmdline', '/proc/modules']
+files = ['/proc/pci', '/proc/meminfo', '/proc/slabinfo', '/proc/version',
+ '/proc/cpuinfo', '/proc/cmdline', '/proc/modules']
# commands = ['lshw'] # this causes problems triggering CDROM drives
commands = ['uname -a', 'lspci -vvn', 'gcc --version', 'ld --version',
'mount', 'hostname']
@@ -21,84 +21,84 @@ path = ['/usr/bin', '/bin']
def run_command(command, output):
- parts = command.split(None, 1)
- cmd = parts[0]
- if len(parts) > 1:
- args = parts[1]
- else:
- args = ''
- for dir in path:
- pathname = dir + '/' + cmd
- if not os.path.exists(pathname):
- continue
- tmp_cmd = "%s %s > %s 2> /dev/null" % (pathname, args, output)
- utils.system(tmp_cmd)
+ parts = command.split(None, 1)
+ cmd = parts[0]
+ if len(parts) > 1:
+ args = parts[1]
+ else:
+ args = ''
+ for dir in path:
+ pathname = dir + '/' + cmd
+ if not os.path.exists(pathname):
+ continue
+ tmp_cmd = "%s %s > %s 2> /dev/null" % (pathname, args, output)
+ utils.system(tmp_cmd)
def reboot_count():
- if not glob.glob('*'):
- return -1 # No reboots, initial data not logged
- else:
- return len(glob.glob('reboot*'))
-
-
+ if not glob.glob('*'):
+ return -1 # No reboots, initial data not logged
+ else:
+ return len(glob.glob('reboot*'))
+
+
def boot_subdir(reboot_count):
- """subdir of job sysinfo"""
- if reboot_count == 0:
- return '.'
- else:
- return 'reboot%d' % reboot_count
+ """subdir of job sysinfo"""
+ if reboot_count == 0:
+ return '.'
+ else:
+ return 'reboot%d' % reboot_count
def log_per_reboot_data(sysinfo_dir):
- """we log this data when the job starts, and again after any reboot"""
- pwd = os.getcwd()
- try:
- os.chdir(sysinfo_dir)
- subdir = boot_subdir(reboot_count() + 1)
- if not os.path.exists(subdir):
- os.mkdir(subdir)
- os.chdir(os.path.join(sysinfo_dir, subdir))
- _log_per_reboot_data()
- finally:
- os.chdir(pwd)
+ """we log this data when the job starts, and again after any reboot"""
+ pwd = os.getcwd()
+ try:
+ os.chdir(sysinfo_dir)
+ subdir = boot_subdir(reboot_count() + 1)
+ if not os.path.exists(subdir):
+ os.mkdir(subdir)
+ os.chdir(os.path.join(sysinfo_dir, subdir))
+ _log_per_reboot_data()
+ finally:
+ os.chdir(pwd)
def _log_per_reboot_data():
- """system info to log before each step of the job"""
- for command in commands:
- run_command(command, re.sub(r'\s', '_', command))
+ """system info to log before each step of the job"""
+ for command in commands:
+ run_command(command, re.sub(r'\s', '_', command))
- for file in files:
- if (os.path.exists(file)):
- shutil.copyfile(file, os.path.basename(file))
+ for file in files:
+ if (os.path.exists(file)):
+ shutil.copyfile(file, os.path.basename(file))
- utils.system('dmesg -c > dmesg', ignore_status=True)
- utils.system('df -mP > df', ignore_status=True)
- if local:
- site_sysinfo.log_per_reboot_data()
+ utils.system('dmesg -c > dmesg', ignore_status=True)
+ utils.system('df -mP > df', ignore_status=True)
+ if local:
+ site_sysinfo.log_per_reboot_data()
def log_after_each_test(test_sysinfo_dir, job_sysinfo_dir):
- """log things that change after each test (called from test.py)"""
- pwd = os.getcwd()
- try:
- os.chdir(job_sysinfo_dir)
- reboot_subdir = boot_subdir(reboot_count())
- reboot_dir = os.path.join(job_sysinfo_dir, reboot_subdir)
- assert os.path.exists(reboot_dir)
-
- os.makedirs(test_sysinfo_dir)
- os.chdir(test_sysinfo_dir)
- utils.system('ln -s %s reboot_current' % reboot_dir)
-
- utils.system('dmesg -c > dmesg', ignore_status=True)
- utils.system('df -mP > df', ignore_status=True)
- if local:
- site_sysinfo.log_after_each_test()
- finally:
- os.chdir(pwd)
-
-
+ """log things that change after each test (called from test.py)"""
+ pwd = os.getcwd()
+ try:
+ os.chdir(job_sysinfo_dir)
+ reboot_subdir = boot_subdir(reboot_count())
+ reboot_dir = os.path.join(job_sysinfo_dir, reboot_subdir)
+ assert os.path.exists(reboot_dir)
+
+ os.makedirs(test_sysinfo_dir)
+ os.chdir(test_sysinfo_dir)
+ utils.system('ln -s %s reboot_current' % reboot_dir)
+
+ utils.system('dmesg -c > dmesg', ignore_status=True)
+ utils.system('df -mP > df', ignore_status=True)
+ if local:
+ site_sysinfo.log_after_each_test()
+ finally:
+ os.chdir(pwd)
+
+
if __name__ == '__main__':
- log_per_reboot_data()
+ log_per_reboot_data()
diff --git a/client/bin/test.py b/client/bin/test.py
index 78dbdd1a..5d5d4f94 100755
--- a/client/bin/test.py
+++ b/client/bin/test.py
@@ -3,20 +3,20 @@
# Shell class for a test, inherited by all individual tests
#
# Methods:
-# __init__ initialise
-# initialize run once for each job
-# setup run once for each new version of the test installed
-# run run the test (wrapped by job.run_test())
+# __init__ initialise
+# initialize run once for each job
+# setup run once for each new version of the test installed
+# run run the test (wrapped by job.run_test())
#
# Data:
-# job backreference to the job this test instance is part of
-# outputdir eg. results/<job>/<testname.tag>
-# resultsdir eg. results/<job>/<testname.tag>/results
-# profdir eg. results/<job>/<testname.tag>/profiling
-# debugdir eg. results/<job>/<testname.tag>/debug
-# bindir eg. tests/<test>
-# src eg. tests/<test>/src
-# tmpdir eg. tmp/<testname.tag>
+# job backreference to the job this test instance is part of
+# outputdir eg. results/<job>/<testname.tag>
+# resultsdir eg. results/<job>/<testname.tag>/results
+# profdir eg. results/<job>/<testname.tag>/profiling
+# debugdir eg. results/<job>/<testname.tag>/debug
+# bindir eg. tests/<test>
+# src eg. tests/<test>/src
+# tmpdir eg. tmp/<testname.tag>
import os, traceback
@@ -26,23 +26,23 @@ from autotest_lib.client.bin import sysinfo
class test(common_test.base_test):
- pass
+ pass
testname = common_test.testname
def _grab_sysinfo(mytest):
- try:
- sysinfo_dir = os.path.join(mytest.outputdir, 'sysinfo')
- sysinfo.log_after_each_test(sysinfo_dir, mytest.job.sysinfodir)
- if os.path.exists(mytest.tmpdir):
- utils.system('rm -rf ' + mytest.tmpdir)
- except:
- print 'after-test error:'
- traceback.print_exc(file=sys.stdout)
+ try:
+ sysinfo_dir = os.path.join(mytest.outputdir, 'sysinfo')
+ sysinfo.log_after_each_test(sysinfo_dir, mytest.job.sysinfodir)
+ if os.path.exists(mytest.tmpdir):
+ utils.system('rm -rf ' + mytest.tmpdir)
+ except:
+ print 'after-test error:'
+ traceback.print_exc(file=sys.stdout)
def runtest(job, url, tag, args, dargs):
- common_test.runtest(job, url, tag, args, dargs,
- locals(), globals(), _grab_sysinfo)
+ common_test.runtest(job, url, tag, args, dargs,
+ locals(), globals(), _grab_sysinfo)
diff --git a/client/bin/test_config.py b/client/bin/test_config.py
index e9268138..d8a53370 100644
--- a/client/bin/test_config.py
+++ b/client/bin/test_config.py
@@ -13,79 +13,79 @@ from autotest_lib.client.common_lib import utils
__all__ = ['config_loader']
class config_loader:
- """Base class of the configuration parser"""
- def __init__(self, cfg, tmpdir = '/tmp'):
- """\
- Instantiate ConfigParser and provide the file like object that we'll
- use to read configuration data from.
- Args:
- * cfg: Where we'll get configuration data. It can be either:
- * A URL containing the file
- * A valid file path inside the filesystem
- * A string containing configuration data
- * tmpdir: Where we'll dump the temporary conf files. The default
- is the /tmp directory.
- """
- # Base Parser
- self.parser = ConfigParser()
- # File is already a file like object
- if hasattr(cfg, 'read'):
- self.cfg = cfg
- self.parser.readfp(self.cfg)
- elif isinstance(cfg, types.StringTypes):
- # Config file is a URL. Download it to a temp dir
- if cfg.startswith('http') or cfg.startswith('ftp'):
- self.cfg = path.join(tmpdir, path.basename(cfg))
- utils.urlretrieve(cfg, self.cfg)
- self.parser.read(self.cfg)
- # Config is a valid filesystem path to a file.
- elif path.exists(path.abspath(cfg)):
- if path.isfile(cfg):
- self.cfg = path.abspath(cfg)
- self.parser.read(self.cfg)
- else:
- e_msg = 'Invalid config file path: %s' % cfg
- raise IOError(e_msg)
- # Config file is just a string, convert it to a python file like
- # object using StringIO
- else:
- self.cfg = StringIO(cfg)
- self.parser.readfp(self.cfg)
+ """Base class of the configuration parser"""
+ def __init__(self, cfg, tmpdir = '/tmp'):
+ """\
+ Instantiate ConfigParser and provide the file like object that we'll
+ use to read configuration data from.
+ Args:
+ * cfg: Where we'll get configuration data. It can be either:
+ * A URL containing the file
+ * A valid file path inside the filesystem
+ * A string containing configuration data
+ * tmpdir: Where we'll dump the temporary conf files. The default
+ is the /tmp directory.
+ """
+ # Base Parser
+ self.parser = ConfigParser()
+ # File is already a file like object
+ if hasattr(cfg, 'read'):
+ self.cfg = cfg
+ self.parser.readfp(self.cfg)
+ elif isinstance(cfg, types.StringTypes):
+ # Config file is a URL. Download it to a temp dir
+ if cfg.startswith('http') or cfg.startswith('ftp'):
+ self.cfg = path.join(tmpdir, path.basename(cfg))
+ utils.urlretrieve(cfg, self.cfg)
+ self.parser.read(self.cfg)
+ # Config is a valid filesystem path to a file.
+ elif path.exists(path.abspath(cfg)):
+ if path.isfile(cfg):
+ self.cfg = path.abspath(cfg)
+ self.parser.read(self.cfg)
+ else:
+ e_msg = 'Invalid config file path: %s' % cfg
+ raise IOError(e_msg)
+ # Config file is just a string, convert it to a python file like
+ # object using StringIO
+ else:
+ self.cfg = StringIO(cfg)
+ self.parser.readfp(self.cfg)
- def get(self, section, name, default=None):
- """Get the value of a option.
+ def get(self, section, name, default=None):
+ """Get the value of a option.
- Section of the config file and the option name.
- You can pass a default value if the option doesn't exist.
- """
- if not self.parser.has_option(section, name):
- return default
- return self.parser.get(section, name)
+ Section of the config file and the option name.
+ You can pass a default value if the option doesn't exist.
+ """
+ if not self.parser.has_option(section, name):
+ return default
+ return self.parser.get(section, name)
- def set(self, section, option, value):
- """Set an option.
+ def set(self, section, option, value):
+ """Set an option.
- This change is not persistent unless saved with 'save()'.
- """
- if not self.parser.has_section(section):
- self.parser.add_section(section)
- return self.parser.set(section, name, value)
+ This change is not persistent unless saved with 'save()'.
+ """
+ if not self.parser.has_section(section):
+ self.parser.add_section(section)
+ return self.parser.set(section, name, value)
- def remove(self, section, name):
- """Remove an option."""
- if self.parser.has_section(section):
- self.parser.remove_option(section, name)
+ def remove(self, section, name):
+ """Remove an option."""
+ if self.parser.has_section(section):
+ self.parser.remove_option(section, name)
- def save(self):
- """Save the configuration file with all modifications"""
- if not self.filename:
- return
- fileobj = file(self.filename, 'w')
- try:
- self.parser.write(fileobj)
- finally:
- fileobj.close()
+ def save(self):
+ """Save the configuration file with all modifications"""
+ if not self.filename:
+ return
+ fileobj = file(self.filename, 'w')
+ try:
+ self.parser.write(fileobj)
+ finally:
+ fileobj.close()
diff --git a/client/bin/xen.py b/client/bin/xen.py
index d69b2c50..a7924272 100644
--- a/client/bin/xen.py
+++ b/client/bin/xen.py
@@ -8,201 +8,201 @@ from autotest_lib.client.bin import autotest_utils
class xen(kernel.kernel):
- def log(self, msg):
- print msg
- self.logfile.write('%s\n' % msg)
+ def log(self, msg):
+ print msg
+ self.logfile.write('%s\n' % msg)
- def __init__(self, job, base_tree, results_dir, tmp_dir, build_dir, \
- leave = False, kjob = None):
- # call base-class
- kernel.kernel.__init__(self, job, base_tree, results_dir, \
- tmp_dir, build_dir, leave)
- self.kjob = kjob
+ def __init__(self, job, base_tree, results_dir, tmp_dir, build_dir, \
+ leave = False, kjob = None):
+ # call base-class
+ kernel.kernel.__init__(self, job, base_tree, results_dir, \
+ tmp_dir, build_dir, leave)
+ self.kjob = kjob
- def config(self, config_file, config_list = None):
- raise NotImplementedError('config() not implemented for xen')
+ def config(self, config_file, config_list = None):
+ raise NotImplementedError('config() not implemented for xen')
- def build(self, make_opts = '', logfile = '', extraversion='autotest'):
- """build xen
+ def build(self, make_opts = '', logfile = '', extraversion='autotest'):
+ """build xen
- make_opts
- additional options to make, if any
- """
- self.log('running build')
- os_dep.commands('gcc', 'make')
- # build xen with extraversion flag
- os.environ['XEN_EXTRAVERSION'] = '-unstable-%s'% extraversion
- if logfile == '':
- logfile = os.path.join(self.log_dir, 'xen_build')
- os.chdir(self.build_dir)
- self.log('log_dir: %s ' % os.path.join(self.log_dir, 'stdout'))
- self.job.stdout.tee_redirect(logfile + '.stdout')
- self.job.stderr.tee_redirect(logfile + '.stderr')
+ make_opts
+ additional options to make, if any
+ """
+ self.log('running build')
+ os_dep.commands('gcc', 'make')
+ # build xen with extraversion flag
+ os.environ['XEN_EXTRAVERSION'] = '-unstable-%s'% extraversion
+ if logfile == '':
+ logfile = os.path.join(self.log_dir, 'xen_build')
+ os.chdir(self.build_dir)
+ self.log('log_dir: %s ' % os.path.join(self.log_dir, 'stdout'))
+ self.job.stdout.tee_redirect(logfile + '.stdout')
+ self.job.stderr.tee_redirect(logfile + '.stderr')
- # build xen hypervisor and user-space tools
- targets = ['xen', 'tools']
- threads = 2 * autotest_utils.count_cpus()
- for t in targets:
- build_string = 'make -j %d %s %s' % (threads, make_opts, t)
- self.log('build_string: %s' % build_string)
- system(build_string)
+ # build xen hypervisor and user-space tools
+ targets = ['xen', 'tools']
+ threads = 2 * autotest_utils.count_cpus()
+ for t in targets:
+ build_string = 'make -j %d %s %s' % (threads, make_opts, t)
+ self.log('build_string: %s' % build_string)
+ system(build_string)
- # make a kernel job out of the kernel from the xen src if one isn't provided
- if self.kjob == None:
- # get xen kernel tree ready
- self.log("prep-ing xen'ified kernel source tree")
- system('make prep-kernels')
+ # make a kernel job out of the kernel from the xen src if one isn't provided
+ if self.kjob == None:
+ # get xen kernel tree ready
+ self.log("prep-ing xen'ified kernel source tree")
+ system('make prep-kernels')
- v = self.get_xen_kernel_build_ver()
- self.log('building xen kernel version: %s' % v)
+ v = self.get_xen_kernel_build_ver()
+ self.log('building xen kernel version: %s' % v)
- # build xen-ified kernel in xen tree
- kernel_base_tree = os.path.join(self.build_dir, \
- 'linux-%s' % self.get_xen_kernel_build_ver())
+ # build xen-ified kernel in xen tree
+ kernel_base_tree = os.path.join(self.build_dir, \
+ 'linux-%s' % self.get_xen_kernel_build_ver())
- self.log('kernel_base_tree = %s' % kernel_base_tree)
- # fix up XENGUEST value in EXTRAVERSION; we can't have
- # files with '$(XENGEUST)' in the name, =(
- self.fix_up_xen_kernel_makefile(kernel_base_tree)
+ self.log('kernel_base_tree = %s' % kernel_base_tree)
+ # fix up XENGUEST value in EXTRAVERSION; we can't have
+ # files with '$(XENGEUST)' in the name, =(
+ self.fix_up_xen_kernel_makefile(kernel_base_tree)
- # make the kernel job
- self.kjob = self.job.kernel(kernel_base_tree)
+ # make the kernel job
+ self.kjob = self.job.kernel(kernel_base_tree)
- # hardcoding dom0 config (no modules for testing, yay!)
- # FIXME: probe host to determine which config to pick
- c = self.build_dir + '/buildconfigs/linux-defconfig_xen0_x86_32'
- self.log('using kernel config: %s ' % c)
- self.kjob.config(c)
+ # hardcoding dom0 config (no modules for testing, yay!)
+ # FIXME: probe host to determine which config to pick
+ c = self.build_dir + '/buildconfigs/linux-defconfig_xen0_x86_32'
+ self.log('using kernel config: %s ' % c)
+ self.kjob.config(c)
- # Xen's kernel tree sucks; doesn't use bzImage, but vmlinux
- self.kjob.set_build_target('vmlinuz')
+ # Xen's kernel tree sucks; doesn't use bzImage, but vmlinux
+ self.kjob.set_build_target('vmlinuz')
- # also, the vmlinuz is not out in arch/*/boot, ARGH! more hackery
- self.kjob.set_build_image(self.job.tmpdir + '/build/linux/vmlinuz')
+ # also, the vmlinuz is not out in arch/*/boot, ARGH! more hackery
+ self.kjob.set_build_image(self.job.tmpdir + '/build/linux/vmlinuz')
- self.kjob.build()
+ self.kjob.build()
- self.job.stdout.restore()
- self.job.stderr.restore()
+ self.job.stdout.restore()
+ self.job.stderr.restore()
- xen_version = self.get_xen_build_ver()
- self.log('BUILD VERSION: Xen: %s Kernel:%s' % \
- (xen_version, self.kjob.get_kernel_build_ver()))
+ xen_version = self.get_xen_build_ver()
+ self.log('BUILD VERSION: Xen: %s Kernel:%s' % \
+ (xen_version, self.kjob.get_kernel_build_ver()))
- def build_timed(self, *args, **kwds):
- raise NotImplementedError('build_timed() not implemented')
+ def build_timed(self, *args, **kwds):
+ raise NotImplementedError('build_timed() not implemented')
- def install(self, tag='', prefix = '/', extraversion='autotest'):
- """make install in the kernel tree"""
- self.log('Installing ...')
+ def install(self, tag='', prefix = '/', extraversion='autotest'):
+ """make install in the kernel tree"""
+ self.log('Installing ...')
- os.chdir(self.build_dir)
+ os.chdir(self.build_dir)
- if not os.path.isdir(prefix):
- os.mkdir(prefix)
- self.boot_dir = os.path.join(prefix, 'boot')
- if not os.path.isdir(self.boot_dir):
- os.mkdir(self.boot_dir)
+ if not os.path.isdir(prefix):
+ os.mkdir(prefix)
+ self.boot_dir = os.path.join(prefix, 'boot')
+ if not os.path.isdir(self.boot_dir):
+ os.mkdir(self.boot_dir)
- # remember what we are going to install
- xen_version = '%s-%s' % (self.get_xen_build_ver(), extraversion)
- self.xen_image = self.boot_dir + '/xen-' + xen_version + '.gz'
- self.xen_syms = self.boot_dir + '/xen-syms-' + xen_version
+ # remember what we are going to install
+ xen_version = '%s-%s' % (self.get_xen_build_ver(), extraversion)
+ self.xen_image = self.boot_dir + '/xen-' + xen_version + '.gz'
+ self.xen_syms = self.boot_dir + '/xen-syms-' + xen_version
- self.log('Installing Xen ...')
- os.environ['XEN_EXTRAVERSION'] = '-unstable-%s'% extraversion
+ self.log('Installing Xen ...')
+ os.environ['XEN_EXTRAVERSION'] = '-unstable-%s'% extraversion
- # install xen
- system('make DESTDIR=%s -C xen install' % prefix)
+ # install xen
+ system('make DESTDIR=%s -C xen install' % prefix)
- # install tools
- system('make DESTDIR=%s -C tools install' % prefix)
+ # install tools
+ system('make DESTDIR=%s -C tools install' % prefix)
- # install kernel
- ktag = self.kjob.get_kernel_build_ver()
- kprefix = prefix
- self.kjob.install(tag=ktag, prefix=kprefix)
+ # install kernel
+ ktag = self.kjob.get_kernel_build_ver()
+ kprefix = prefix
+ self.kjob.install(tag=ktag, prefix=kprefix)
- def add_to_bootloader(self, tag='autotest', args=''):
- """ add this kernel to bootloader, taking an
- optional parameter of space separated parameters
- e.g.: kernel.add_to_bootloader('mykernel', 'ro acpi=off')
- """
+ def add_to_bootloader(self, tag='autotest', args=''):
+ """ add this kernel to bootloader, taking an
+ optional parameter of space separated parameters
+ e.g.: kernel.add_to_bootloader('mykernel', 'ro acpi=off')
+ """
- # turn on xen mode
- self.job.bootloader.enable_xen_mode()
+ # turn on xen mode
+ self.job.bootloader.enable_xen_mode()
- # remove existing entry if present
- self.job.bootloader.remove_kernel(tag)
+ # remove existing entry if present
+ self.job.bootloader.remove_kernel(tag)
- # add xen and xen kernel
- self.job.bootloader.add_kernel(self.kjob.image, tag, \
- self.kjob.initrd, self.xen_image)
+ # add xen and xen kernel
+ self.job.bootloader.add_kernel(self.kjob.image, tag, \
+ self.kjob.initrd, self.xen_image)
- # if no args passed, populate from /proc/cmdline
- if not args:
- args = open('/proc/cmdline', 'r').readline().strip()
+ # if no args passed, populate from /proc/cmdline
+ if not args:
+ args = open('/proc/cmdline', 'r').readline().strip()
- # add args to entry one at a time
- for a in args.split(' '):
- self.job.bootloader.add_args(tag, a)
+ # add args to entry one at a time
+ for a in args.split(' '):
+ self.job.bootloader.add_args(tag, a)
- # turn off xen mode
- self.job.bootloader.disable_xen_mode()
+ # turn off xen mode
+ self.job.bootloader.disable_xen_mode()
- def get_xen_kernel_build_ver(self):
- """Check xen buildconfig for current kernel version"""
- version = patchlevel = sublevel = ''
- extraversion = localversion = ''
+ def get_xen_kernel_build_ver(self):
+ """Check xen buildconfig for current kernel version"""
+ version = patchlevel = sublevel = ''
+ extraversion = localversion = ''
- version_file = self.build_dir + '/buildconfigs/mk.linux-2.6-xen'
+ version_file = self.build_dir + '/buildconfigs/mk.linux-2.6-xen'
- for line in open(version_file, 'r').readlines():
- if line.startswith('LINUX_VER'):
- start = line.index('=') + 1
- version = line[start:].strip() + "-xen"
- break
+ for line in open(version_file, 'r').readlines():
+ if line.startswith('LINUX_VER'):
+ start = line.index('=') + 1
+ version = line[start:].strip() + "-xen"
+ break
- return version
+ return version
- def fix_up_xen_kernel_makefile(self, kernel_dir):
- """Fix up broken EXTRAVERSION in xen-ified Linux kernel Makefile"""
- xenguest = ''
- makefile = kernel_dir + '/Makefile'
+ def fix_up_xen_kernel_makefile(self, kernel_dir):
+ """Fix up broken EXTRAVERSION in xen-ified Linux kernel Makefile"""
+ xenguest = ''
+ makefile = kernel_dir + '/Makefile'
- for line in open(makefile, 'r').readlines():
- if line.startswith('XENGUEST'):
- start = line.index('=') + 1
- xenguest = line[start:].strip()
- break;
+ for line in open(makefile, 'r').readlines():
+ if line.startswith('XENGUEST'):
+ start = line.index('=') + 1
+ xenguest = line[start:].strip()
+ break;
- # change out $XENGUEST in EXTRAVERSION line
- system('sed -i.old "s,\$(XENGUEST),%s," %s' % \
- (xenguest, makefile))
+ # change out $XENGUEST in EXTRAVERSION line
+ system('sed -i.old "s,\$(XENGUEST),%s," %s' % \
+ (xenguest, makefile))
- def get_xen_build_ver(self):
- """Check Makefile and .config to return kernel version"""
- version = patchlevel = sublevel = ''
- extraversion = localversion = ''
+ def get_xen_build_ver(self):
+ """Check Makefile and .config to return kernel version"""
+ version = patchlevel = sublevel = ''
+ extraversion = localversion = ''
- for line in open(self.build_dir + '/xen/Makefile', 'r').readlines():
- if line.startswith('export XEN_VERSION'):
- start = line.index('=') + 1
- version = line[start:].strip()
- if line.startswith('export XEN_SUBVERSION'):
- start = line.index('=') + 1
- sublevel = line[start:].strip()
- if line.startswith('export XEN_EXTRAVERSION'):
- start = line.index('=') + 1
- extraversion = line[start:].strip()
+ for line in open(self.build_dir + '/xen/Makefile', 'r').readlines():
+ if line.startswith('export XEN_VERSION'):
+ start = line.index('=') + 1
+ version = line[start:].strip()
+ if line.startswith('export XEN_SUBVERSION'):
+ start = line.index('=') + 1
+ sublevel = line[start:].strip()
+ if line.startswith('export XEN_EXTRAVERSION'):
+ start = line.index('=') + 1
+ extraversion = line[start:].strip()
- return "%s.%s%s" % (version, sublevel, extraversion)
+ return "%s.%s%s" % (version, sublevel, extraversion)
diff --git a/client/common_lib/barrier.py b/client/common_lib/barrier.py
index bc6bb615..d9da3302 100755
--- a/client/common_lib/barrier.py
+++ b/client/common_lib/barrier.py
@@ -6,462 +6,462 @@ import error
class BarrierError(error.JobError):
- pass
+ pass
class barrier:
- """ Multi-machine barrier support
-
- Provides multi-machine barrier mechanism. Execution
- stopping until all members arrive at the barrier.
-
- When a barrier is forming the master node (first in sort
- order) in the set accepts connections from each member
- of the set. As they arrive they indicate the barrier
- they are joining and their identifier (their hostname
- or IP address and optional tag). They are then asked
- to wait. When all members are present the master node
- then checks that each member is still responding via a
- ping/pong exchange. If this is successful then everyone
- has checked in at the barrier. We then tell everyone
- they may continue via a rlse message.
-
- Where the master is not the first to reach the barrier
- the client connects will fail. Client will retry until
- they either succeed in connecting to master or the overal
- timeout is exceeded.
-
- As an example here is the exchange for a three node
- barrier called 'TAG'
-
- MASTER CLIENT1 CLIENT2
- <-------------TAG C1-------------
- --------------wait-------------->
- [...]
- <-------------TAG C2-----------------------------
- --------------wait------------------------------>
- [...]
- --------------ping-------------->
- <-------------pong---------------
- --------------ping------------------------------>
- <-------------pong-------------------------------
- ----- BARRIER conditions MET -----
- --------------rlse-------------->
- --------------rlse------------------------------>
-
- Note that once the last client has responded to pong the
- barrier is implicitly deemed satisifed, they have all
- acknowledged their presence. If we fail to send any
- of the rlse messages the barrier is still a success,
- the failed host has effectively broken 'right at the
- beginning' of the post barrier execution window.
-
- In addition, there is another rendevous, that makes each slave a server
- and the master a client. The connection process and usage is still the
- same but allows barriers from machines that only have a one-way
- connection initiation. This is called rendevous_servers.
-
- For example:
- if ME == SERVER:
- server start
-
- b = job.barrier(ME, 'server-up', 120)
- b.rendevous(CLIENT, SERVER)
-
- if ME == CLIENT:
- client run
-
- b = job.barrier(ME, 'test-complete', 3600)
- b.rendevous(CLIENT, SERVER)
-
- if ME == SERVER:
- server stop
-
- Properties:
- hostid
- My hostname/IP address + optional tag
- tag
- Symbolic name of the barrier in progress
- port
- TCP port used for this barrier
- timeout
- Maximum time to wait for a the barrier to meet
- start
- Timestamp when we started waiting
- members
- All members we expect to find in the barrier
- seen
- Number of clients seen (should be the length of waiting)
- waiting
- Clients who have checked in and are waiting (master)
- masterid
- Hostname/IP address + optional tag of selected master
- """
-
- def __init__(self, hostid, tag, timeout, port=63000):
- self.hostid = hostid
- self.tag = tag
- self.port = port
- self.timeout = timeout
-
- self.report("tag=%s port=%d timeout=%d" \
- % (self.tag, self.port, self.timeout))
-
-
- def get_host_from_id(self, id):
- # Remove any trailing local identifier following a #.
- # This allows multiple members per host which is particularly
- # helpful in testing.
- return id.split('#')[0]
-
-
- def report(self, out):
- print "barrier:", self.hostid, out
- sys.stdout.flush()
-
-
- def update_timeout(self, timeout):
- self.timeout = (time() - self.start) + timeout
-
-
- def remaining(self):
- timeout = self.timeout - (time() - self.start)
- if (timeout <= 0):
- raise BarrierError("timeout waiting for barrier")
-
- self.report("remaining: %d" % (timeout))
- return timeout
-
-
- def master_welcome(self, connection):
- (client, addr) = connection
- name = None
-
- client.settimeout(5)
- try:
- # Get the clients name.
- intro = client.recv(1024)
- intro = intro.strip("\r\n")
-
- (tag, name) = intro.split(' ')
-
- self.report("new client tag=%s, name=%s" % (tag, name))
-
- # Ok, we know who is trying to attach. Confirm that
- # they are coming to the same meeting. Also, everyone
- # should be using a unique handle (their IP address).
- # If we see a duplicate, something _bad_ has happened
- # so drop them now.
- if self.tag != tag:
- self.report("client arriving for the " \
- "wrong barrier")
- client.settimeout(5)
- client.send("!tag")
- client.close()
- return
- elif name in self.waiting:
- self.report("duplicate client")
- client.settimeout(5)
- client.send("!dup")
- client.close()
- return
-
- # Acknowledge the client
- client.send("wait")
-
- except socket.timeout:
- # This is nominally an error, but as we do not know
- # who that was we cannot do anything sane other
- # than report it and let the normal timeout kill
- # us when thats appropriate.
- self.report("client handshake timeout: (%s:%d)" %\
- (addr[0], addr[1]))
- client.close()
- return
-
- self.report("client now waiting: %s (%s:%d)" % \
- (name, addr[0], addr[1]))
-
- # They seem to be valid record them.
- self.waiting[name] = connection
- self.seen += 1
-
-
- def slave_hello(self, connection):
- (client, addr) = connection
- name = None
-
- client.settimeout(5)
- try:
- client.send(self.tag + " " + self.hostid)
-
- reply = client.recv(4)
- reply = reply.strip("\r\n")
- self.report("master said: " + reply)
-
- # Confirm the master accepted the connection.
- if reply != "wait":
- self.report("Bad connection request to master")
- client.close()
- return
-
- except socket.timeout:
- # This is nominally an error, but as we do not know
- # who that was we cannot do anything sane other
- # than report it and let the normal timeout kill
- # us when thats appropriate.
- self.report("master handshake timeout: (%s:%d)" %\
- (addr[0], addr[1]))
- client.close()
- return
-
- self.report("slave now waiting: (%s:%d)" % \
- (addr[0], addr[1]))
-
- # They seem to be valid record them.
- self.waiting[self.hostid] = connection
- self.seen = 1
-
-
- def master_release(self):
- # Check everyone is still there, that they have not
- # crashed or disconnected in the meantime.
- allpresent = 1
- for name in self.waiting:
- (client, addr) = self.waiting[name]
-
- self.report("checking client present: " + name)
-
- client.settimeout(5)
- reply = 'none'
- try:
- client.send("ping")
- reply = client.recv(1024)
- except socket.timeout:
- self.report("ping/pong timeout: " + name)
- pass
-
- if reply != "pong":
- allpresent = 0
-
- if not allpresent:
- raise BarrierError("master lost client")
-
- # If every ones checks in then commit the release.
- for name in self.waiting:
- (client, addr) = self.waiting[name]
-
- self.report("releasing client: " + name)
-
- client.settimeout(5)
- try:
- client.send("rlse")
- except socket.timeout:
- self.report("release timeout: " + name)
- pass
-
-
- def waiting_close(self):
- # Either way, close out all the clients. If we have
- # not released them then they know to abort.
- for name in self.waiting:
- (client, addr) = self.waiting[name]
-
- self.report("closing client: " + name)
-
- try:
- client.close()
- except:
- pass
-
-
- def run_server(self, is_master):
- self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- self.server.setsockopt(socket.SOL_SOCKET,
- socket.SO_REUSEADDR, 1)
- self.server.bind(('', self.port))
- self.server.listen(10)
-
- failed = 0
- try:
- while 1:
- try:
- # Wait for callers welcoming each.
- self.server.settimeout(self.remaining())
- connection = self.server.accept()
- if is_master:
- self.master_welcome(connection)
- else:
- self.slave_hello(connection)
- except socket.timeout:
- self.report("timeout waiting for " +
- "remaining clients")
- pass
-
- if is_master:
- # Check if everyone is here.
- self.report("master seen %d of %d" % \
- (self.seen, len(self.members)))
- if self.seen == len(self.members):
- self.master_release()
- break
- else:
- # Check if master connected.
- if self.seen:
- self.report("slave connected " +
- "to master")
- self.slave_wait()
- break
-
- self.waiting_close()
- self.server.close()
- except:
- self.waiting_close()
- self.server.close()
- raise
-
-
- def run_client(self, is_master):
- while self.remaining() > 0:
- try:
- remote = socket.socket(socket.AF_INET,
- socket.SOCK_STREAM)
- remote.settimeout(30)
- if is_master:
- # Connect to all slaves.
- host = self.get_host_from_id(
- self.members[self.seen])
- self.report("calling slave: %s" % host)
- connection = (remote, (host, self.port))
- remote.connect(connection[1])
- self.master_welcome(connection)
- else:
- # Just connect to the master.
- host = self.get_host_from_id(
- self.masterid)
- self.report("calling master")
- connection = (remote, (host, self.port))
- remote.connect(connection[1])
- self.slave_hello(connection)
- except socket.timeout:
- self.report("timeout calling host, retry")
- sleep(10)
- pass
- except socket.error, err:
- (code, str) = err
- if (code != errno.ECONNREFUSED):
- raise
- sleep(10)
-
- if is_master:
- # Check if everyone is here.
- self.report("master seen %d of %d" % \
- (self.seen, len(self.members)))
- if self.seen == len(self.members):
- self.master_release()
- break
- else:
- # Check if master connected.
- if self.seen:
- self.report("slave connected " +
- "to master")
- self.slave_wait()
- break
-
- self.waiting_close()
-
-
- def slave_wait(self):
- remote = self.waiting[self.hostid][0]
- mode = "wait"
- while 1:
- # All control messages are the same size to allow
- # us to split individual messages easily.
- remote.settimeout(self.remaining())
- reply = remote.recv(4)
- if not reply:
- break
-
- reply = reply.strip("\r\n")
- self.report("master said: " + reply)
-
- mode = reply
- if reply == "ping":
- # Ensure we have sufficient time for the
- # ping/pong/rlse cyle to complete normally.
- self.update_timeout(10 + 10 * len(self.members))
-
- self.report("pong")
- remote.settimeout(self.remaining())
- remote.send("pong")
-
- elif reply == "rlse":
- # Ensure we have sufficient time for the
- # ping/pong/rlse cyle to complete normally.
- self.update_timeout(10 + 10 * len(self.members))
-
- self.report("was released, waiting for close")
-
- if mode == "rlse":
- pass
- elif mode == "wait":
- raise BarrierError("master abort -- barrier timeout")
- elif mode == "ping":
- raise BarrierError("master abort -- client lost")
- elif mode == "!tag":
- raise BarrierError("master abort -- incorrect tag")
- elif mode == "!dup":
- raise BarrierError("master abort -- duplicate client")
- else:
- raise BarrierError("master handshake failure: " + mode)
-
-
- def rendevous(self, *hosts):
- self.start = time()
- self.members = list(hosts)
- self.members.sort()
- self.masterid = self.members.pop(0)
-
- self.report("masterid: %s" % self.masterid)
- if not len(self.members):
- self.report("No other members listed.")
- return
- self.report("members: %s" % ",".join(self.members))
-
- self.seen = 0
- self.waiting = {}
-
- # Figure out who is the master in this barrier.
- if self.hostid == self.masterid:
- self.report("selected as master")
- self.run_server(is_master=True)
- else:
- self.report("selected as slave")
- self.run_client(is_master=False)
-
-
- def rendevous_servers(self, masterid, *hosts):
- self.start = time()
- self.members = list(hosts)
- self.members.sort()
- self.masterid = masterid
-
- self.report("masterid: %s" % self.masterid)
- if not len(self.members):
- self.report("No other members listed.")
- return
- self.report("members: %s" % ",".join(self.members))
-
- self.seen = 0
- self.waiting = {}
-
- # Figure out who is the master in this barrier.
- if self.hostid == self.masterid:
- self.report("selected as master")
- self.run_client(is_master=True)
- else:
- self.report("selected as slave")
- self.run_server(is_master=False)
+ """ Multi-machine barrier support
+
+ Provides multi-machine barrier mechanism. Execution
+ stopping until all members arrive at the barrier.
+
+ When a barrier is forming the master node (first in sort
+ order) in the set accepts connections from each member
+ of the set. As they arrive they indicate the barrier
+ they are joining and their identifier (their hostname
+ or IP address and optional tag). They are then asked
+ to wait. When all members are present the master node
+ then checks that each member is still responding via a
+ ping/pong exchange. If this is successful then everyone
+ has checked in at the barrier. We then tell everyone
+ they may continue via a rlse message.
+
+ Where the master is not the first to reach the barrier
+ the client connects will fail. Client will retry until
+ they either succeed in connecting to master or the overal
+ timeout is exceeded.
+
+ As an example here is the exchange for a three node
+ barrier called 'TAG'
+
+ MASTER CLIENT1 CLIENT2
+ <-------------TAG C1-------------
+ --------------wait-------------->
+ [...]
+ <-------------TAG C2-----------------------------
+ --------------wait------------------------------>
+ [...]
+ --------------ping-------------->
+ <-------------pong---------------
+ --------------ping------------------------------>
+ <-------------pong-------------------------------
+ ----- BARRIER conditions MET -----
+ --------------rlse-------------->
+ --------------rlse------------------------------>
+
+ Note that once the last client has responded to pong the
+ barrier is implicitly deemed satisifed, they have all
+ acknowledged their presence. If we fail to send any
+ of the rlse messages the barrier is still a success,
+ the failed host has effectively broken 'right at the
+ beginning' of the post barrier execution window.
+
+ In addition, there is another rendevous, that makes each slave a server
+ and the master a client. The connection process and usage is still the
+ same but allows barriers from machines that only have a one-way
+ connection initiation. This is called rendevous_servers.
+
+ For example:
+ if ME == SERVER:
+ server start
+
+ b = job.barrier(ME, 'server-up', 120)
+ b.rendevous(CLIENT, SERVER)
+
+ if ME == CLIENT:
+ client run
+
+ b = job.barrier(ME, 'test-complete', 3600)
+ b.rendevous(CLIENT, SERVER)
+
+ if ME == SERVER:
+ server stop
+
+ Properties:
+ hostid
+ My hostname/IP address + optional tag
+ tag
+ Symbolic name of the barrier in progress
+ port
+ TCP port used for this barrier
+ timeout
+ Maximum time to wait for a the barrier to meet
+ start
+ Timestamp when we started waiting
+ members
+ All members we expect to find in the barrier
+ seen
+ Number of clients seen (should be the length of waiting)
+ waiting
+ Clients who have checked in and are waiting (master)
+ masterid
+ Hostname/IP address + optional tag of selected master
+ """
+
+ def __init__(self, hostid, tag, timeout, port=63000):
+ self.hostid = hostid
+ self.tag = tag
+ self.port = port
+ self.timeout = timeout
+
+ self.report("tag=%s port=%d timeout=%d" \
+ % (self.tag, self.port, self.timeout))
+
+
+ def get_host_from_id(self, id):
+ # Remove any trailing local identifier following a #.
+ # This allows multiple members per host which is particularly
+ # helpful in testing.
+ return id.split('#')[0]
+
+
+ def report(self, out):
+ print "barrier:", self.hostid, out
+ sys.stdout.flush()
+
+
+ def update_timeout(self, timeout):
+ self.timeout = (time() - self.start) + timeout
+
+
+ def remaining(self):
+ timeout = self.timeout - (time() - self.start)
+ if (timeout <= 0):
+ raise BarrierError("timeout waiting for barrier")
+
+ self.report("remaining: %d" % (timeout))
+ return timeout
+
+
+ def master_welcome(self, connection):
+ (client, addr) = connection
+ name = None
+
+ client.settimeout(5)
+ try:
+ # Get the clients name.
+ intro = client.recv(1024)
+ intro = intro.strip("\r\n")
+
+ (tag, name) = intro.split(' ')
+
+ self.report("new client tag=%s, name=%s" % (tag, name))
+
+ # Ok, we know who is trying to attach. Confirm that
+ # they are coming to the same meeting. Also, everyone
+ # should be using a unique handle (their IP address).
+ # If we see a duplicate, something _bad_ has happened
+ # so drop them now.
+ if self.tag != tag:
+ self.report("client arriving for the " \
+ "wrong barrier")
+ client.settimeout(5)
+ client.send("!tag")
+ client.close()
+ return
+ elif name in self.waiting:
+ self.report("duplicate client")
+ client.settimeout(5)
+ client.send("!dup")
+ client.close()
+ return
+
+ # Acknowledge the client
+ client.send("wait")
+
+ except socket.timeout:
+ # This is nominally an error, but as we do not know
+ # who that was we cannot do anything sane other
+ # than report it and let the normal timeout kill
+ # us when thats appropriate.
+ self.report("client handshake timeout: (%s:%d)" %\
+ (addr[0], addr[1]))
+ client.close()
+ return
+
+ self.report("client now waiting: %s (%s:%d)" % \
+ (name, addr[0], addr[1]))
+
+ # They seem to be valid record them.
+ self.waiting[name] = connection
+ self.seen += 1
+
+
+ def slave_hello(self, connection):
+ (client, addr) = connection
+ name = None
+
+ client.settimeout(5)
+ try:
+ client.send(self.tag + " " + self.hostid)
+
+ reply = client.recv(4)
+ reply = reply.strip("\r\n")
+ self.report("master said: " + reply)
+
+ # Confirm the master accepted the connection.
+ if reply != "wait":
+ self.report("Bad connection request to master")
+ client.close()
+ return
+
+ except socket.timeout:
+ # This is nominally an error, but as we do not know
+ # who that was we cannot do anything sane other
+ # than report it and let the normal timeout kill
+ # us when thats appropriate.
+ self.report("master handshake timeout: (%s:%d)" %\
+ (addr[0], addr[1]))
+ client.close()
+ return
+
+ self.report("slave now waiting: (%s:%d)" % \
+ (addr[0], addr[1]))
+
+ # They seem to be valid record them.
+ self.waiting[self.hostid] = connection
+ self.seen = 1
+
+
+ def master_release(self):
+ # Check everyone is still there, that they have not
+ # crashed or disconnected in the meantime.
+ allpresent = 1
+ for name in self.waiting:
+ (client, addr) = self.waiting[name]
+
+ self.report("checking client present: " + name)
+
+ client.settimeout(5)
+ reply = 'none'
+ try:
+ client.send("ping")
+ reply = client.recv(1024)
+ except socket.timeout:
+ self.report("ping/pong timeout: " + name)
+ pass
+
+ if reply != "pong":
+ allpresent = 0
+
+ if not allpresent:
+ raise BarrierError("master lost client")
+
+ # If every ones checks in then commit the release.
+ for name in self.waiting:
+ (client, addr) = self.waiting[name]
+
+ self.report("releasing client: " + name)
+
+ client.settimeout(5)
+ try:
+ client.send("rlse")
+ except socket.timeout:
+ self.report("release timeout: " + name)
+ pass
+
+
+ def waiting_close(self):
+ # Either way, close out all the clients. If we have
+ # not released them then they know to abort.
+ for name in self.waiting:
+ (client, addr) = self.waiting[name]
+
+ self.report("closing client: " + name)
+
+ try:
+ client.close()
+ except:
+ pass
+
+
+ def run_server(self, is_master):
+ self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ self.server.setsockopt(socket.SOL_SOCKET,
+ socket.SO_REUSEADDR, 1)
+ self.server.bind(('', self.port))
+ self.server.listen(10)
+
+ failed = 0
+ try:
+ while 1:
+ try:
+ # Wait for callers welcoming each.
+ self.server.settimeout(self.remaining())
+ connection = self.server.accept()
+ if is_master:
+ self.master_welcome(connection)
+ else:
+ self.slave_hello(connection)
+ except socket.timeout:
+ self.report("timeout waiting for " +
+ "remaining clients")
+ pass
+
+ if is_master:
+ # Check if everyone is here.
+ self.report("master seen %d of %d" % \
+ (self.seen, len(self.members)))
+ if self.seen == len(self.members):
+ self.master_release()
+ break
+ else:
+ # Check if master connected.
+ if self.seen:
+ self.report("slave connected " +
+ "to master")
+ self.slave_wait()
+ break
+
+ self.waiting_close()
+ self.server.close()
+ except:
+ self.waiting_close()
+ self.server.close()
+ raise
+
+
+ def run_client(self, is_master):
+ while self.remaining() > 0:
+ try:
+ remote = socket.socket(socket.AF_INET,
+ socket.SOCK_STREAM)
+ remote.settimeout(30)
+ if is_master:
+ # Connect to all slaves.
+ host = self.get_host_from_id(
+ self.members[self.seen])
+ self.report("calling slave: %s" % host)
+ connection = (remote, (host, self.port))
+ remote.connect(connection[1])
+ self.master_welcome(connection)
+ else:
+ # Just connect to the master.
+ host = self.get_host_from_id(
+ self.masterid)
+ self.report("calling master")
+ connection = (remote, (host, self.port))
+ remote.connect(connection[1])
+ self.slave_hello(connection)
+ except socket.timeout:
+ self.report("timeout calling host, retry")
+ sleep(10)
+ pass
+ except socket.error, err:
+ (code, str) = err
+ if (code != errno.ECONNREFUSED):
+ raise
+ sleep(10)
+
+ if is_master:
+ # Check if everyone is here.
+ self.report("master seen %d of %d" % \
+ (self.seen, len(self.members)))
+ if self.seen == len(self.members):
+ self.master_release()
+ break
+ else:
+ # Check if master connected.
+ if self.seen:
+ self.report("slave connected " +
+ "to master")
+ self.slave_wait()
+ break
+
+ self.waiting_close()
+
+
+ def slave_wait(self):
+ remote = self.waiting[self.hostid][0]
+ mode = "wait"
+ while 1:
+ # All control messages are the same size to allow
+ # us to split individual messages easily.
+ remote.settimeout(self.remaining())
+ reply = remote.recv(4)
+ if not reply:
+ break
+
+ reply = reply.strip("\r\n")
+ self.report("master said: " + reply)
+
+ mode = reply
+ if reply == "ping":
+ # Ensure we have sufficient time for the
+ # ping/pong/rlse cyle to complete normally.
+ self.update_timeout(10 + 10 * len(self.members))
+
+ self.report("pong")
+ remote.settimeout(self.remaining())
+ remote.send("pong")
+
+ elif reply == "rlse":
+ # Ensure we have sufficient time for the
+ # ping/pong/rlse cyle to complete normally.
+ self.update_timeout(10 + 10 * len(self.members))
+
+ self.report("was released, waiting for close")
+
+ if mode == "rlse":
+ pass
+ elif mode == "wait":
+ raise BarrierError("master abort -- barrier timeout")
+ elif mode == "ping":
+ raise BarrierError("master abort -- client lost")
+ elif mode == "!tag":
+ raise BarrierError("master abort -- incorrect tag")
+ elif mode == "!dup":
+ raise BarrierError("master abort -- duplicate client")
+ else:
+ raise BarrierError("master handshake failure: " + mode)
+
+
+ def rendevous(self, *hosts):
+ self.start = time()
+ self.members = list(hosts)
+ self.members.sort()
+ self.masterid = self.members.pop(0)
+
+ self.report("masterid: %s" % self.masterid)
+ if not len(self.members):
+ self.report("No other members listed.")
+ return
+ self.report("members: %s" % ",".join(self.members))
+
+ self.seen = 0
+ self.waiting = {}
+
+ # Figure out who is the master in this barrier.
+ if self.hostid == self.masterid:
+ self.report("selected as master")
+ self.run_server(is_master=True)
+ else:
+ self.report("selected as slave")
+ self.run_client(is_master=False)
+
+
+ def rendevous_servers(self, masterid, *hosts):
+ self.start = time()
+ self.members = list(hosts)
+ self.members.sort()
+ self.masterid = masterid
+
+ self.report("masterid: %s" % self.masterid)
+ if not len(self.members):
+ self.report("No other members listed.")
+ return
+ self.report("members: %s" % ",".join(self.members))
+
+ self.seen = 0
+ self.waiting = {}
+
+ # Figure out who is the master in this barrier.
+ if self.hostid == self.masterid:
+ self.report("selected as master")
+ self.run_client(is_master=True)
+ else:
+ self.report("selected as slave")
+ self.run_server(is_master=False)
#
# TESTING -- direct test harness.
@@ -472,28 +472,28 @@ class barrier:
# python bin/barrier.py 3 meeting
#
if __name__ == "__main__":
- barrier = barrier('127.0.0.1#' + sys.argv[1], sys.argv[2], 60)
-
- try:
- all = [ '127.0.0.1#2', '127.0.0.1#1', '127.0.0.1#3' ]
- barrier.rendevous(*all)
- except BarrierError, err:
- print "barrier: 127.0.0.1#" + sys.argv[1] + \
- ": barrier failed:", err
- sys.exit(1)
- else:
- print "barrier: 127.0.0.1#" + sys.argv[1] + \
- ": all present and accounted for"
-
- try:
- all = [ '127.0.0.1#2', '127.0.0.1#1' ]
- if 1 <= int(sys.argv[1]) <= 2:
- barrier.rendevous_servers(*all)
- except BarrierError, err:
- print "barrier: 127.0.0.1#" + sys.argv[1] + \
- ": barrier failed:", err
- sys.exit(1)
- else:
- print "barrier: 127.0.0.1#" + sys.argv[1] + \
- ": all present and accounted for"
- sys.exit(0)
+ barrier = barrier('127.0.0.1#' + sys.argv[1], sys.argv[2], 60)
+
+ try:
+ all = [ '127.0.0.1#2', '127.0.0.1#1', '127.0.0.1#3' ]
+ barrier.rendevous(*all)
+ except BarrierError, err:
+ print "barrier: 127.0.0.1#" + sys.argv[1] + \
+ ": barrier failed:", err
+ sys.exit(1)
+ else:
+ print "barrier: 127.0.0.1#" + sys.argv[1] + \
+ ": all present and accounted for"
+
+ try:
+ all = [ '127.0.0.1#2', '127.0.0.1#1' ]
+ if 1 <= int(sys.argv[1]) <= 2:
+ barrier.rendevous_servers(*all)
+ except BarrierError, err:
+ print "barrier: 127.0.0.1#" + sys.argv[1] + \
+ ": barrier failed:", err
+ sys.exit(1)
+ else:
+ print "barrier: 127.0.0.1#" + sys.argv[1] + \
+ ": all present and accounted for"
+ sys.exit(0)
diff --git a/client/common_lib/check_version.py b/client/common_lib/check_version.py
index 981a21c6..6971049a 100755
--- a/client/common_lib/check_version.py
+++ b/client/common_lib/check_version.py
@@ -2,41 +2,41 @@ import sys, string, os, glob, re
def extract_version(path):
- match = re.search(r'/python(\d+)\.(\d+)$', path)
- if match:
- return (int(match.group(1)), int(match.group(2)))
- else:
- return None
+ match = re.search(r'/python(\d+)\.(\d+)$', path)
+ if match:
+ return (int(match.group(1)), int(match.group(2)))
+ else:
+ return None
def find_newest_python():
- pythons = []
- pythons.extend(glob.glob('/usr/bin/python*'))
- pythons.extend(glob.glob('/usr/local/bin/python*'))
+ pythons = []
+ pythons.extend(glob.glob('/usr/bin/python*'))
+ pythons.extend(glob.glob('/usr/local/bin/python*'))
- best_python = (0, 0), ''
- for python in pythons:
- version = extract_version(python)
- if version > best_python[0] and version >= (2, 4):
- best_python = version, python
+ best_python = (0, 0), ''
+ for python in pythons:
+ version = extract_version(python)
+ if version > best_python[0] and version >= (2, 4):
+ best_python = version, python
+
+ if best_python[0] == (0, 0):
+ raise ValueError('Python 2.4 or newer is needed')
+ return best_python[1]
- if best_python[0] == (0, 0):
- raise ValueError('Python 2.4 or newer is needed')
- return best_python[1]
-
def restart():
- python = find_newest_python()
- sys.argv.insert(0, '-u')
- sys.argv.insert(0, python)
- os.execv(sys.argv[0], sys.argv)
+ python = find_newest_python()
+ sys.argv.insert(0, '-u')
+ sys.argv.insert(0, python)
+ os.execv(sys.argv[0], sys.argv)
def check_python_version():
- version = None
- try:
- version = sys.version_info[0:2]
- except AttributeError:
- pass # pre 2.0, no neat way to get the exact number
- if not version or version < (2, 4):
- restart()
+ version = None
+ try:
+ version = sys.version_info[0:2]
+ except AttributeError:
+ pass # pre 2.0, no neat way to get the exact number
+ if not version or version < (2, 4):
+ restart()
diff --git a/client/common_lib/error.py b/client/common_lib/error.py
index 6eeb3fed..1fe33700 100644
--- a/client/common_lib/error.py
+++ b/client/common_lib/error.py
@@ -6,137 +6,137 @@ import sys
from traceback import format_exception
def format_error():
- t, o, tb = sys.exc_info()
- trace = format_exception(t, o, tb)
- # Clear the backtrace to prevent a circular reference
- # in the heap -- as per tutorial
- tb = ''
+ t, o, tb = sys.exc_info()
+ trace = format_exception(t, o, tb)
+ # Clear the backtrace to prevent a circular reference
+ # in the heap -- as per tutorial
+ tb = ''
- return ''.join(trace)
+ return ''.join(trace)
class JobContinue(SystemExit):
- """Allow us to bail out requesting continuance."""
- pass
+ """Allow us to bail out requesting continuance."""
+ pass
class JobComplete(SystemExit):
- """Allow us to bail out indicating continuation not required."""
- pass
+ """Allow us to bail out indicating continuation not required."""
+ pass
class AutotestError(Exception):
- """The parent of all errors deliberatly thrown within the client code."""
- pass
+ """The parent of all errors deliberatly thrown within the client code."""
+ pass
class JobError(AutotestError):
- """Indicates an error which terminates and fails the whole job."""
- pass
+ """Indicates an error which terminates and fails the whole job."""
+ pass
class TestError(AutotestError):
- """Indicates an error which terminates and fails the test."""
- pass
+ """Indicates an error which terminates and fails the test."""
+ pass
class TestNAError(AutotestError):
- """Indictates that the test is Not Applicable. Should be thrown
- when various conditions are such that the test is inappropriate"""
- pass
+ """Indictates that the test is Not Applicable. Should be thrown
+ when various conditions are such that the test is inappropriate"""
+ pass
class CmdError(TestError):
- """\
- Indicates that a command failed, is fatal to the test unless caught.
- """
- def __init__(self, command, result_obj, additional_text=None):
- TestError.__init__(self, command, result_obj, additional_text)
+ """\
+ Indicates that a command failed, is fatal to the test unless caught.
+ """
+ def __init__(self, command, result_obj, additional_text=None):
+ TestError.__init__(self, command, result_obj, additional_text)
- def __str__(self):
- msg = "Command <%s> failed, rc=%d" % (self.args[0],
- self.args[1].exit_status)
- if self.args[2]:
- msg += ", " + self.args[2]
- return msg
+ def __str__(self):
+ msg = "Command <%s> failed, rc=%d" % (self.args[0],
+ self.args[1].exit_status)
+ if self.args[2]:
+ msg += ", " + self.args[2]
+ return msg
class PackageError(TestError):
- """Indicates an error trying to perform a package operation."""
- pass
+ """Indicates an error trying to perform a package operation."""
+ pass
class UnhandledError(TestError):
- """Indicates an unhandled exception in a test."""
- def __init__(self, prefix):
- msg = prefix + format_error()
- TestError.__init__(self, msg)
+ """Indicates an unhandled exception in a test."""
+ def __init__(self, prefix):
+ msg = prefix + format_error()
+ TestError.__init__(self, msg)
class InstallError(JobError):
- """Indicates an installation error which Terminates and fails the job."""
- pass
+ """Indicates an installation error which Terminates and fails the job."""
+ pass
class AutotestRunError(AutotestError):
- pass
+ pass
class AutotestTimeoutError(AutotestError):
- """This exception is raised when an autotest test exceeds the timeout
- parameter passed to run_timed_test and is killed.
- """
+ """This exception is raised when an autotest test exceeds the timeout
+ parameter passed to run_timed_test and is killed.
+ """
# server-specific errors
class AutoservError(Exception):
- pass
+ pass
class AutoservSSHTimeout(AutoservError):
- """SSH experienced a connection timeout"""
- pass
+ """SSH experienced a connection timeout"""
+ pass
class AutoservRunError(AutoservError):
- """\
- Errors raised by one of the run functions. Should always be
- constructed with a tuple of two args (error description (str),
- run result object).
- """
- def __init__(self, description, result_obj):
- AutoservError.__init__(self, description, result_obj)
+ """\
+ Errors raised by one of the run functions. Should always be
+ constructed with a tuple of two args (error description (str),
+ run result object).
+ """
+ def __init__(self, description, result_obj):
+ AutoservError.__init__(self, description, result_obj)
class AutoservVirtError(AutoservError):
- """Vitualization related error"""
- pass
+ """Vitualization related error"""
+ pass
class AutoservUnsupportedError(AutoservError):
- """Error raised when you try to use an unsupported optional feature"""
- pass
+ """Error raised when you try to use an unsupported optional feature"""
+ pass
class AutoservHostError(AutoservError):
- """Error reaching a host"""
- pass
+ """Error reaching a host"""
+ pass
class AutoservRebootError(AutoservError):
- """Error occured while rebooting a machine"""
- pass
+ """Error occured while rebooting a machine"""
+ pass
class AutoservSubcommandError(AutoservError):
- """Indicates an error while executing a (forked) subcommand"""
- def __init__(self, func, exit_code):
- AutoservError.__init__(self, func, exit_code)
- self.func = func
- self.exit_code = exit_code
-
- def __str__(self):
- return ("Subcommand %s failed with exit code %d" %
- (self.func, self.exit_code))
+ """Indicates an error while executing a (forked) subcommand"""
+ def __init__(self, func, exit_code):
+ AutoservError.__init__(self, func, exit_code)
+ self.func = func
+ self.exit_code = exit_code
+
+ def __str__(self):
+ return ("Subcommand %s failed with exit code %d" %
+ (self.func, self.exit_code))
diff --git a/client/common_lib/global_config.py b/client/common_lib/global_config.py
index 1036e3da..8db02e18 100644
--- a/client/common_lib/global_config.py
+++ b/client/common_lib/global_config.py
@@ -11,115 +11,115 @@ import ConfigParser
import error
dirname = os.path.dirname(sys.modules[__name__].__file__)
-DEFAULT_CONFIG_FILE = os.path.abspath(os.path.join(dirname,
- "../../global_config.ini"))
-DEFAULT_SHADOW_FILE = os.path.abspath(os.path.join(dirname,
- "../../shadow_config.ini"))
-
+DEFAULT_CONFIG_FILE = os.path.abspath(os.path.join(dirname,
+ "../../global_config.ini"))
+DEFAULT_SHADOW_FILE = os.path.abspath(os.path.join(dirname,
+ "../../shadow_config.ini"))
+
class ConfigError(error.AutotestError):
- pass
+ pass
class ConfigValueError(ConfigError):
- pass
+ pass
class global_config(object):
- config = None
- config_file = DEFAULT_CONFIG_FILE
- shadow_file = DEFAULT_SHADOW_FILE
-
-
- def set_config_files(self, config_file=DEFAULT_CONFIG_FILE,
- shadow_file=DEFAULT_SHADOW_FILE):
- self.config_file = config_file
- self.shadow_file = shadow_file
- self.config = None
-
-
- def get_config_value(self, section, key, type=str, default=None):
- if self.config == None:
- self.parse_config_file()
-
- try:
- val = self.config.get(section, key)
- except:
- if default == None:
- msg = ("Value '%s' not found in section '%s'" %
- (key, section))
- raise ConfigError(msg)
- else:
- return default
-
- return self.convert_value(key, section, val, type, default)
-
-
- def merge_configs(self, shadow_config):
- # overwrite whats in config with whats in shadow_config
- sections = shadow_config.sections()
- for section in sections:
- # add the section if need be
- if not self.config.has_section(section):
- self.config.add_section(section)
- # now run through all options and set them
- options = shadow_config.options(section)
- for option in options:
- val = shadow_config.get(section, option)
- self.config.set(section, option, val)
-
-
- def parse_config_file(self):
- if not os.path.exists(self.config_file):
- raise ConfigError('%s not found' % (self.config_file))
- self.config = ConfigParser.ConfigParser()
- self.config.read(self.config_file)
-
- # now also read the shadow file if there is one
- # this will overwrite anything that is found in the
- # other config
- if os.path.exists(self.shadow_file):
- shadow_config = ConfigParser.ConfigParser()
- shadow_config.read(self.shadow_file)
- # now we merge shadow into global
- self.merge_configs(shadow_config)
-
-
- # the values that are pulled from ini
- # are strings. But we should attempt to
- # convert them to other types if needed.
- def convert_value(self, key, section, value, type, default):
- # strip off leading and trailing white space
- sval = value.strip()
-
- # if length of string is zero then return None
- if len(sval) == 0:
- if type == str:
- return ""
- elif type == bool:
- return False
- elif type == int:
- return 0
- elif type == float:
- return 0.0
- else:
- return None
-
- if type == bool:
- if sval.lower() == "false":
- return False
- else:
- return True
-
- try:
- conv_val = type(sval)
- return conv_val
- except:
- msg = ("Could not covert %s in section %s" %
- (key, section))
- raise ConfigValueError(msg)
-
-
-# insure the class is a singleton. Now the symbol global_config
+ config = None
+ config_file = DEFAULT_CONFIG_FILE
+ shadow_file = DEFAULT_SHADOW_FILE
+
+
+ def set_config_files(self, config_file=DEFAULT_CONFIG_FILE,
+ shadow_file=DEFAULT_SHADOW_FILE):
+ self.config_file = config_file
+ self.shadow_file = shadow_file
+ self.config = None
+
+
+ def get_config_value(self, section, key, type=str, default=None):
+ if self.config == None:
+ self.parse_config_file()
+
+ try:
+ val = self.config.get(section, key)
+ except:
+ if default == None:
+ msg = ("Value '%s' not found in section '%s'" %
+ (key, section))
+ raise ConfigError(msg)
+ else:
+ return default
+
+ return self.convert_value(key, section, val, type, default)
+
+
+ def merge_configs(self, shadow_config):
+ # overwrite whats in config with whats in shadow_config
+ sections = shadow_config.sections()
+ for section in sections:
+ # add the section if need be
+ if not self.config.has_section(section):
+ self.config.add_section(section)
+ # now run through all options and set them
+ options = shadow_config.options(section)
+ for option in options:
+ val = shadow_config.get(section, option)
+ self.config.set(section, option, val)
+
+
+ def parse_config_file(self):
+ if not os.path.exists(self.config_file):
+ raise ConfigError('%s not found' % (self.config_file))
+ self.config = ConfigParser.ConfigParser()
+ self.config.read(self.config_file)
+
+ # now also read the shadow file if there is one
+ # this will overwrite anything that is found in the
+ # other config
+ if os.path.exists(self.shadow_file):
+ shadow_config = ConfigParser.ConfigParser()
+ shadow_config.read(self.shadow_file)
+ # now we merge shadow into global
+ self.merge_configs(shadow_config)
+
+
+ # the values that are pulled from ini
+ # are strings. But we should attempt to
+ # convert them to other types if needed.
+ def convert_value(self, key, section, value, type, default):
+ # strip off leading and trailing white space
+ sval = value.strip()
+
+ # if length of string is zero then return None
+ if len(sval) == 0:
+ if type == str:
+ return ""
+ elif type == bool:
+ return False
+ elif type == int:
+ return 0
+ elif type == float:
+ return 0.0
+ else:
+ return None
+
+ if type == bool:
+ if sval.lower() == "false":
+ return False
+ else:
+ return True
+
+ try:
+ conv_val = type(sval)
+ return conv_val
+ except:
+ msg = ("Could not covert %s in section %s" %
+ (key, section))
+ raise ConfigValueError(msg)
+
+
+# insure the class is a singleton. Now the symbol global_config
# will point to the one and only one instace of the class
global_config = global_config()
diff --git a/client/common_lib/global_config_unittest.py b/client/common_lib/global_config_unittest.py
index e41d7093..dab68257 100644
--- a/client/common_lib/global_config_unittest.py
+++ b/client/common_lib/global_config_unittest.py
@@ -30,90 +30,90 @@ value_1: somebody@remotehost
def create_config_files():
- (fp, global_file) = tempfile.mkstemp(".ini", text=True)
- os.write(fp, global_config_ini_contents)
- os.close(fp)
-
- (fp, shadow_file) = tempfile.mkstemp(".ini", text=True)
- os.write(fp, shadow_config_ini_contents)
- os.close(fp)
-
- return (global_file, shadow_file)
+ (fp, global_file) = tempfile.mkstemp(".ini", text=True)
+ os.write(fp, global_config_ini_contents)
+ os.close(fp)
+
+ (fp, shadow_file) = tempfile.mkstemp(".ini", text=True)
+ os.write(fp, shadow_config_ini_contents)
+ os.close(fp)
+
+ return (global_file, shadow_file)
class global_config_test(unittest.TestCase):
- # grab the singelton
- conf = global_config.global_config
-
- def setUp(self):
- # set the config files to our test files
- (self.global_file, self.shadow_file) = create_config_files()
- self.conf.set_config_files(self.global_file, self.shadow_file)
-
-
- def tearDown(self):
- os.remove(self.global_file)
- os.remove(self.shadow_file)
- self.conf.set_config_files(global_config.DEFAULT_CONFIG_FILE,
- global_config.DEFAULT_SHADOW_FILE)
-
-
- def testFloat(self):
- val = self.conf.get_config_value("SECTION_A", "value_1", float)
- self.assertEquals(type(val), types.FloatType)
- self.assertEquals(val, 6.0)
-
-
- def testInt(self):
- val = self.conf.get_config_value("SECTION_B", "value_1", int)
- self.assertEquals(type(val), types.IntType)
- self.assertTrue(val < 0)
- val = self.conf.get_config_value("SECTION_B", "value_3", int)
- self.assertEquals(val, 0)
- val = self.conf.get_config_value("SECTION_B", "value_4", int)
- self.assertTrue(val > 0)
-
-
- def testString(self):
- val = self.conf.get_config_value("SECTION_A", "value_2")
- self.assertEquals(type(val),types.StringType)
- self.assertEquals(val, "hello")
-
-
- def testOverride(self):
- val = self.conf.get_config_value("SECTION_C", "value_1")
- self.assertEquals(val, "somebody@remotehost")
-
-
- def testException(self):
- error = 0
- try:
- val = self.conf.get_config_value("SECTION_B",
- "value_2", int)
- except:
- error = 1
- self.assertEquals(error, 1)
-
-
- def testBoolean(self):
- val = self.conf.get_config_value("SECTION_A", "value_3", bool)
- self.assertEquals(val, True)
- val = self.conf.get_config_value("SECTION_A", "value_4", bool)
- self.assertEquals(val, False)
- val = self.conf.get_config_value("SECTION_A", "value_5", bool)
- self.assertEquals(val, True)
- val = self.conf.get_config_value("SECTION_A", "value_6", bool)
- self.assertEquals(val, False)
-
-
- def testDefaults(self):
- val = self.conf.get_config_value("MISSING", "foo", float, 3.6)
- self.assertEquals(val, 3.6)
- val = self.conf.get_config_value("SECTION_A", "novalue", str,
- "default")
- self.assertEquals(val, "default")
+ # grab the singelton
+ conf = global_config.global_config
+
+ def setUp(self):
+ # set the config files to our test files
+ (self.global_file, self.shadow_file) = create_config_files()
+ self.conf.set_config_files(self.global_file, self.shadow_file)
+
+
+ def tearDown(self):
+ os.remove(self.global_file)
+ os.remove(self.shadow_file)
+ self.conf.set_config_files(global_config.DEFAULT_CONFIG_FILE,
+ global_config.DEFAULT_SHADOW_FILE)
+
+
+ def testFloat(self):
+ val = self.conf.get_config_value("SECTION_A", "value_1", float)
+ self.assertEquals(type(val), types.FloatType)
+ self.assertEquals(val, 6.0)
+
+
+ def testInt(self):
+ val = self.conf.get_config_value("SECTION_B", "value_1", int)
+ self.assertEquals(type(val), types.IntType)
+ self.assertTrue(val < 0)
+ val = self.conf.get_config_value("SECTION_B", "value_3", int)
+ self.assertEquals(val, 0)
+ val = self.conf.get_config_value("SECTION_B", "value_4", int)
+ self.assertTrue(val > 0)
+
+
+ def testString(self):
+ val = self.conf.get_config_value("SECTION_A", "value_2")
+ self.assertEquals(type(val),types.StringType)
+ self.assertEquals(val, "hello")
+
+
+ def testOverride(self):
+ val = self.conf.get_config_value("SECTION_C", "value_1")
+ self.assertEquals(val, "somebody@remotehost")
+
+
+ def testException(self):
+ error = 0
+ try:
+ val = self.conf.get_config_value("SECTION_B",
+ "value_2", int)
+ except:
+ error = 1
+ self.assertEquals(error, 1)
+
+
+ def testBoolean(self):
+ val = self.conf.get_config_value("SECTION_A", "value_3", bool)
+ self.assertEquals(val, True)
+ val = self.conf.get_config_value("SECTION_A", "value_4", bool)
+ self.assertEquals(val, False)
+ val = self.conf.get_config_value("SECTION_A", "value_5", bool)
+ self.assertEquals(val, True)
+ val = self.conf.get_config_value("SECTION_A", "value_6", bool)
+ self.assertEquals(val, False)
+
+
+ def testDefaults(self):
+ val = self.conf.get_config_value("MISSING", "foo", float, 3.6)
+ self.assertEquals(val, 3.6)
+ val = self.conf.get_config_value("SECTION_A", "novalue", str,
+ "default")
+ self.assertEquals(val, "default")
# this is so the test can be run in standalone mode
if __name__ == '__main__':
- unittest.main()
+ unittest.main()
diff --git a/client/common_lib/logging.py b/client/common_lib/logging.py
index 004975a2..64b80860 100644
--- a/client/common_lib/logging.py
+++ b/client/common_lib/logging.py
@@ -8,48 +8,48 @@ __author__ = 'jadmanski@google.com (John Admanski)'
job_statuses = ["TEST_NA", "ABORT", "ERROR", "FAIL", "WARN", "GOOD", "ALERT",
- "NOSTATUS"]
+ "NOSTATUS"]
def is_valid_status(status):
- if not re.match(r'(START|(END )?(GOOD|WARN|FAIL|ABORT|TEST_NA))$',
- status):
- return False
- else:
- return True
+ if not re.match(r'(START|(END )?(GOOD|WARN|FAIL|ABORT|TEST_NA))$',
+ status):
+ return False
+ else:
+ return True
def record(fn):
- """
- Generic method decorator for logging calls under the
- assumption that return=GOOD, exception=FAIL. The method
- determines parameters as:
- subdir = self.subdir if it exists, or None
- operation = "class name"."method name"
- status = None on GOOD, str(exception) on FAIL
- The object using this method must have a job attribute
- for the logging to actually occur, otherwise the logging
- will silently fail.
-
- Logging can explicitly be disabled for a call by passing
- a logged=False parameter
- """
- def recorded_func(self, *args, **dargs):
- logged = dargs.pop('logged', True)
- job = getattr(self, 'job', None)
- # if logging is disabled/unavailable, just
- # call the method
- if not logged or job is None:
- return fn(self, *args, **dargs)
- # logging is available, so wrap the method call
- # in success/failure logging
- subdir = getattr(self, 'subdir', None)
- operation = '%s.%s' % (self.__class__.__name__,
- fn.__name__)
- try:
- result = fn(self, *args, **dargs)
- job.record('GOOD', subdir, operation)
- except Exception, detail:
- job.record('FAIL', subdir, operation, str(detail))
- raise
- return result
- return recorded_func
+ """
+ Generic method decorator for logging calls under the
+ assumption that return=GOOD, exception=FAIL. The method
+ determines parameters as:
+ subdir = self.subdir if it exists, or None
+ operation = "class name"."method name"
+ status = None on GOOD, str(exception) on FAIL
+ The object using this method must have a job attribute
+ for the logging to actually occur, otherwise the logging
+ will silently fail.
+
+ Logging can explicitly be disabled for a call by passing
+ a logged=False parameter
+ """
+ def recorded_func(self, *args, **dargs):
+ logged = dargs.pop('logged', True)
+ job = getattr(self, 'job', None)
+ # if logging is disabled/unavailable, just
+ # call the method
+ if not logged or job is None:
+ return fn(self, *args, **dargs)
+ # logging is available, so wrap the method call
+ # in success/failure logging
+ subdir = getattr(self, 'subdir', None)
+ operation = '%s.%s' % (self.__class__.__name__,
+ fn.__name__)
+ try:
+ result = fn(self, *args, **dargs)
+ job.record('GOOD', subdir, operation)
+ except Exception, detail:
+ job.record('FAIL', subdir, operation, str(detail))
+ raise
+ return result
+ return recorded_func
diff --git a/client/common_lib/mail.py b/client/common_lib/mail.py
index 697c209a..0c641dda 100644
--- a/client/common_lib/mail.py
+++ b/client/common_lib/mail.py
@@ -1,39 +1,39 @@
-import os, email.Message, smtplib
+import os, email.Message, smtplib
def send(from_address, to_addresses, cc_addresses, subject, message_body):
- """
- Send out a plain old text email. It uses sendmail by default, but
- if that fails then it falls back to using smtplib.
+ """
+ Send out a plain old text email. It uses sendmail by default, but
+ if that fails then it falls back to using smtplib.
- Args:
- from_address: the email address to put in the "From:" field
- to_addresses: either a single string or an iterable of
- strings to put in the "To:" field of the email
- cc_addresses: either a single string of an iterable of
- strings to put in the "Cc:" field of the email
- subject: the email subject
- message_body: the body of the email. there's no special
- handling of encoding here, so it's safest to
- stick to 7-bit ASCII text
- """
- # addresses can be a tuple or a single string, so make them tuples
- if isinstance(to_addresses, str):
- to_addresses = [to_addresses]
- else:
- to_addresses = list(to_addresses)
- if isinstance(cc_addresses, str):
- cc_addresses = [cc_addresses]
- else:
- cc_addresses = list(cc_addresses)
+ Args:
+ from_address: the email address to put in the "From:" field
+ to_addresses: either a single string or an iterable of
+ strings to put in the "To:" field of the email
+ cc_addresses: either a single string of an iterable of
+ strings to put in the "Cc:" field of the email
+ subject: the email subject
+ message_body: the body of the email. there's no special
+ handling of encoding here, so it's safest to
+ stick to 7-bit ASCII text
+ """
+ # addresses can be a tuple or a single string, so make them tuples
+ if isinstance(to_addresses, str):
+ to_addresses = [to_addresses]
+ else:
+ to_addresses = list(to_addresses)
+ if isinstance(cc_addresses, str):
+ cc_addresses = [cc_addresses]
+ else:
+ cc_addresses = list(cc_addresses)
- message = email.Message.Message()
- message["To"] = ", ".join(to_addresses)
- message["Cc"] = ", ".join(cc_addresses)
- message["From"] = from_address
- message["Subject"] = subject
- message.set_payload(message_body)
+ message = email.Message.Message()
+ message["To"] = ", ".join(to_addresses)
+ message["Cc"] = ", ".join(cc_addresses)
+ message["From"] = from_address
+ message["Subject"] = subject
+ message.set_payload(message_body)
- server = smtplib.SMTP("localhost")
- server.sendmail(from_address, to_addresses + cc_addresses, message.as_string())
- server.quit()
+ server = smtplib.SMTP("localhost")
+ server.sendmail(from_address, to_addresses + cc_addresses, message.as_string())
+ server.quit()
diff --git a/client/common_lib/mail_unittest.py b/client/common_lib/mail_unittest.py
index 95de7827..cac7ee6d 100644
--- a/client/common_lib/mail_unittest.py
+++ b/client/common_lib/mail_unittest.py
@@ -4,66 +4,66 @@ import unittest
import mail, email.Message
class test_data:
- mail_host = None
- mail_port = None
- mail_connect = False
- mail_from_address = None
- mail_to_address = None
- mail_message = None
+ mail_host = None
+ mail_port = None
+ mail_connect = False
+ mail_from_address = None
+ mail_to_address = None
+ mail_message = None
# we define our needed mock SMTP
class SMTP:
- def __init__(self, host=None, port=25):
- test_data.mail_host = host
- test_data.mail_port = port
-
- if test_data.mail_host:
- self.connect(test_data.mail_host, test_data.mail_port)
-
-
- def connect(self, host, port):
- test_data.mail_connect = True
+ def __init__(self, host=None, port=25):
+ test_data.mail_host = host
+ test_data.mail_port = port
+ if test_data.mail_host:
+ self.connect(test_data.mail_host, test_data.mail_port)
- def quit(self):
- test_data.mail_connect = False
+ def connect(self, host, port):
+ test_data.mail_connect = True
- def sendmail(self, from_address, to_address, message):
- test_data.mail_from_address = from_address
- test_data.mail_to_address = to_address
- test_data.mail_message = message
+
+ def quit(self):
+ test_data.mail_connect = False
+
+
+ def sendmail(self, from_address, to_address, message):
+ test_data.mail_from_address = from_address
+ test_data.mail_to_address = to_address
+ test_data.mail_message = message
class mail_test(unittest.TestCase):
- cached_SMTP = None
-
- def setUp(self):
- # now perform the slip
- self.cached_SMTP = mail.smtplib.SMTP
- mail.smtplib.SMTP = SMTP
-
-
- def tearDown(self):
- # now put things back
- mail.smtplib.SMTP = self.cached_SMTP
-
-
- def test_send_message(self):
- message = email.Message.Message()
- message["To"] = "you"
- message["Cc"] = "them"
- message["From"] = "me"
- message["Subject"] = "hello"
- message.set_payload("Hello everybody!")
-
- mail.send("me", "you", "them", "hello", "Hello everybody!")
- self.assertEquals("me", test_data.mail_from_address)
- self.assertEquals(["you","them"], test_data.mail_to_address)
- self.assertEquals(message.as_string(), test_data.mail_message)
-
+ cached_SMTP = None
+
+ def setUp(self):
+ # now perform the slip
+ self.cached_SMTP = mail.smtplib.SMTP
+ mail.smtplib.SMTP = SMTP
+
+
+ def tearDown(self):
+ # now put things back
+ mail.smtplib.SMTP = self.cached_SMTP
+
+
+ def test_send_message(self):
+ message = email.Message.Message()
+ message["To"] = "you"
+ message["Cc"] = "them"
+ message["From"] = "me"
+ message["Subject"] = "hello"
+ message.set_payload("Hello everybody!")
+
+ mail.send("me", "you", "them", "hello", "Hello everybody!")
+ self.assertEquals("me", test_data.mail_from_address)
+ self.assertEquals(["you","them"], test_data.mail_to_address)
+ self.assertEquals(message.as_string(), test_data.mail_message)
+
# this is so the test can be run in standalone mode
if __name__ == '__main__':
- unittest.main()
+ unittest.main()
diff --git a/client/common_lib/pexpect.py b/client/common_lib/pexpect.py
index 67c6389f..260c1d8b 100644
--- a/client/common_lib/pexpect.py
+++ b/client/common_lib/pexpect.py
@@ -336,12 +336,12 @@ class spawn (object):
the input from the child and output sent to the child. Sometimes you
don't want to see everything you write to the child. You only want to
log what the child sends back. For example::
-
+
child = pexpect.spawn('some_command')
child.logfile_read = sys.stdout
To separately log output sent to the child use logfile_send::
-
+
self.logfile_send = fout
The delaybeforesend helps overcome a weird behavior that many users
@@ -704,7 +704,7 @@ class spawn (object):
if timeout == -1:
timeout = self.timeout
if timeout is not None:
- end_time = time.time() + timeout
+ end_time = time.time() + timeout
while True:
if not self.getecho():
return True
@@ -1355,7 +1355,7 @@ class spawn (object):
if timeout == -1:
timeout = self.timeout
if timeout is not None:
- end_time = time.time() + timeout
+ end_time = time.time() + timeout
if searchwindowsize == -1:
searchwindowsize = self.searchwindowsize
@@ -1653,7 +1653,7 @@ class searcher_string (object):
# rescanning until we've read three more bytes.
#
# Sadly, I don't know enough about this interesting topic. /grahn
-
+
for index, s in self._strings:
if searchwindowsize is None:
# the match, if any, can only be in the fresh data,
@@ -1732,7 +1732,7 @@ class searcher_re (object):
'buffer' which have not been searched before.
See class spawn for the 'searchwindowsize' argument.
-
+
If there is a match this returns the index of that string, and sets
'start', 'end' and 'match'. Otherwise, returns -1."""
diff --git a/client/common_lib/pxssh.py b/client/common_lib/pxssh.py
index f8add8d2..1e5a6a49 100644
--- a/client/common_lib/pxssh.py
+++ b/client/common_lib/pxssh.py
@@ -30,10 +30,10 @@ class pxssh (spawn):
shells.
Example that runs a few commands on a remote server and prints the result::
-
+
import pxssh
import getpass
- try:
+ try:
s = pxssh.pxssh()
hostname = raw_input('hostname: ')
username = raw_input('username: ')
@@ -74,7 +74,7 @@ class pxssh (spawn):
spawn.__init__(self, None, timeout=timeout, maxread=maxread, searchwindowsize=searchwindowsize, logfile=logfile, cwd=cwd, env=env)
self.name = '<pxssh>'
-
+
#SUBTLE HACK ALERT! Note that the command to set the prompt uses a
#slightly different string than the regular expression to match it. This
#is because when you set the prompt the command will echo back, but we
@@ -98,7 +98,7 @@ class pxssh (spawn):
# Unsetting SSH_ASKPASS on the remote side doesn't disable it! Annoying!
#self.SSH_OPTS = "-x -o'RSAAuthentication=no' -o 'PubkeyAuthentication=no'"
self.force_password = False
- self.auto_prompt_reset = True
+ self.auto_prompt_reset = True
def levenshtein_distance(self, a,b):
@@ -173,7 +173,7 @@ class pxssh (spawn):
to guess when we have reached the prompt. Then we hope for the best and
blindly try to reset the prompt to something more unique. If that fails
then login() raises an ExceptionPxssh exception.
-
+
In some situations it is not possible or desirable to reset the
original prompt. In this case, set 'auto_prompt_reset' to False to
inhibit setting the prompt to the UNIQUE_PROMPT. Remember that pxssh
@@ -194,7 +194,7 @@ class pxssh (spawn):
i = self.expect(["(?i)are you sure you want to continue connecting", original_prompt, "(?i)(?:password)|(?:passphrase for key)", "(?i)permission denied", "(?i)terminal type", TIMEOUT, "(?i)connection closed by remote host"], timeout=login_timeout)
# First phase
- if i==0:
+ if i==0:
# New certificate -- always accept it.
# This is what you get if SSH does not have the remote host's
# public key stored in the 'known_hosts' cache.
@@ -212,14 +212,14 @@ class pxssh (spawn):
# This is weird. This should not happen twice in a row.
self.close()
raise ExceptionPxssh ('Weird error. Got "are you sure" prompt twice.')
- elif i==1: # can occur if you have a public key pair set to authenticate.
+ elif i==1: # can occur if you have a public key pair set to authenticate.
### TODO: May NOT be OK if expect() got tricked and matched a false prompt.
pass
elif i==2: # password prompt again
# For incorrect passwords, some ssh servers will
# ask for the password again, others return 'denied' right away.
# If we get the password prompt again then this means
- # we didn't get the password right the first time.
+ # we didn't get the password right the first time.
self.close()
raise ExceptionPxssh ('password refused')
elif i==3: # permission denied -- password was bad.
@@ -239,7 +239,7 @@ class pxssh (spawn):
elif i==6: # Connection closed by remote host
self.close()
raise ExceptionPxssh ('connection closed')
- else: # Unexpected
+ else: # Unexpected
self.close()
raise ExceptionPxssh ('unexpected login response')
if not self.synch_original_prompt():
@@ -278,7 +278,7 @@ class pxssh (spawn):
if i==1:
return False
return True
-
+
def set_unique_prompt (self):
"""This sets the remote prompt to something more unique than # or $.
diff --git a/client/common_lib/test.py b/client/common_lib/test.py
index 52aaf542..dc793711 100644
--- a/client/common_lib/test.py
+++ b/client/common_lib/test.py
@@ -1,20 +1,20 @@
# Shell class for a test, inherited by all individual tests
#
# Methods:
-# __init__ initialise
-# initialize run once for each job
-# setup run once for each new version of the test installed
-# run run the test (wrapped by job.run_test())
+# __init__ initialise
+# initialize run once for each job
+# setup run once for each new version of the test installed
+# run run the test (wrapped by job.run_test())
#
# Data:
-# job backreference to the job this test instance is part of
-# outputdir eg. results/<job>/<testname.tag>
-# resultsdir eg. results/<job>/<testname.tag>/results
-# profdir eg. results/<job>/<testname.tag>/profiling
-# debugdir eg. results/<job>/<testname.tag>/debug
-# bindir eg. tests/<test>
-# src eg. tests/<test>/src
-# tmpdir eg. tmp/<testname.tag>
+# job backreference to the job this test instance is part of
+# outputdir eg. results/<job>/<testname.tag>
+# resultsdir eg. results/<job>/<testname.tag>/results
+# profdir eg. results/<job>/<testname.tag>/profiling
+# debugdir eg. results/<job>/<testname.tag>/debug
+# bindir eg. tests/<test>
+# src eg. tests/<test>/src
+# tmpdir eg. tmp/<testname.tag>
import os, sys, re, fcntl, shutil, tarfile, warnings
@@ -22,243 +22,243 @@ from autotest_lib.client.common_lib import error, utils
class base_test:
- preserve_srcdir = False
+ preserve_srcdir = False
- def __init__(self, job, bindir, outputdir):
- self.job = job
- self.autodir = job.autodir
+ def __init__(self, job, bindir, outputdir):
+ self.job = job
+ self.autodir = job.autodir
- self.outputdir = outputdir
- tagged_testname = os.path.basename(self.outputdir)
- self.resultsdir = os.path.join(self.outputdir, 'results')
- os.mkdir(self.resultsdir)
- self.profdir = os.path.join(self.outputdir, 'profiling')
- os.mkdir(self.profdir)
- self.debugdir = os.path.join(self.outputdir, 'debug')
- os.mkdir(self.debugdir)
- self.bindir = bindir
- if hasattr(job, 'libdir'):
- self.libdir = job.libdir
- self.srcdir = os.path.join(self.bindir, 'src')
-
- self.tmpdir = os.path.join(job.tmpdir, tagged_testname)
-
- if os.path.exists(self.tmpdir):
- shutil.rmtree(self.tmpdir)
- os.mkdir(self.tmpdir)
-
- self.job.stdout.tee_redirect(
- os.path.join(self.debugdir, 'stdout'))
- self.job.stderr.tee_redirect(
- os.path.join(self.debugdir, 'stderr'))
- try:
- self.initialize()
- # compile and install the test, if needed.
- utils.update_version(self.srcdir, self.preserve_srcdir,
- self.version, self.setup)
- finally:
- self.job.stderr.restore()
- self.job.stdout.restore()
-
-
- def assert_(self, expr, msg='Assertion failed.'):
- if not expr:
- raise error.TestError(msg)
-
-
- def write_test_keyval(self, attr_dict):
- utils.write_keyval(self.outputdir, attr_dict)
-
-
- @staticmethod
- def _append_type_to_keys(dictionary, typename):
- new_dict = {}
- for key, value in dictionary.iteritems():
- new_key = "%s{%s}" % (key, typename)
- new_dict[new_key] = value
- return new_dict
-
-
- def write_iteration_keyval(self, attr_dict, perf_dict):
- attr_dict = self._append_type_to_keys(attr_dict, "attr")
- perf_dict = self._append_type_to_keys(perf_dict, "perf")
-
- utils.write_keyval(self.resultsdir, attr_dict,
- type_tag="attr")
- utils.write_keyval(self.resultsdir, perf_dict,
- type_tag="perf")
-
- keyval_path = os.path.join(self.resultsdir, "keyval")
- print >> open(keyval_path, "a"), ""
-
-
- # TODO: deprecate, remove from code in favour of
- # the write_*_keyval methods
- def write_keyval(self, dictionary):
- warnings.warn("test.write_keyval is deprecated, use "
- "test.write_test_keyval or "
- "test.write_iteration_keyval instead",
- DeprecationWarning)
- self.write_iteration_keyval({}, dictionary)
-
-
- def initialize(self):
- pass
-
-
- def setup(self):
- pass
-
-
- def cleanup(self):
- pass
-
-
- def _exec(self, args, dargs):
- try:
- self.job.stdout.tee_redirect(
- os.path.join(self.debugdir, 'stdout'))
- self.job.stderr.tee_redirect(
- os.path.join(self.debugdir, 'stderr'))
-
- try:
- os.chdir(self.outputdir)
- dargs = dargs.copy()
- keyvals = dargs.pop('test_attributes', dict())
- keyvals = keyvals.copy()
- keyvals['version'] = self.version
- self.write_test_keyval(keyvals)
-
- self.execute(*args, **dargs)
- finally:
- self.cleanup()
- self.job.stderr.restore()
- self.job.stdout.restore()
- except error.AutotestError:
- raise
- except Exception, e:
- msg = "Unhandled %s error occured during test\n"
- msg %= str(e.__class__.__name__)
- raise error.UnhandledError(msg)
+ self.outputdir = outputdir
+ tagged_testname = os.path.basename(self.outputdir)
+ self.resultsdir = os.path.join(self.outputdir, 'results')
+ os.mkdir(self.resultsdir)
+ self.profdir = os.path.join(self.outputdir, 'profiling')
+ os.mkdir(self.profdir)
+ self.debugdir = os.path.join(self.outputdir, 'debug')
+ os.mkdir(self.debugdir)
+ self.bindir = bindir
+ if hasattr(job, 'libdir'):
+ self.libdir = job.libdir
+ self.srcdir = os.path.join(self.bindir, 'src')
+
+ self.tmpdir = os.path.join(job.tmpdir, tagged_testname)
+
+ if os.path.exists(self.tmpdir):
+ shutil.rmtree(self.tmpdir)
+ os.mkdir(self.tmpdir)
+
+ self.job.stdout.tee_redirect(
+ os.path.join(self.debugdir, 'stdout'))
+ self.job.stderr.tee_redirect(
+ os.path.join(self.debugdir, 'stderr'))
+ try:
+ self.initialize()
+ # compile and install the test, if needed.
+ utils.update_version(self.srcdir, self.preserve_srcdir,
+ self.version, self.setup)
+ finally:
+ self.job.stderr.restore()
+ self.job.stdout.restore()
+
+
+ def assert_(self, expr, msg='Assertion failed.'):
+ if not expr:
+ raise error.TestError(msg)
+
+
+ def write_test_keyval(self, attr_dict):
+ utils.write_keyval(self.outputdir, attr_dict)
+
+
+ @staticmethod
+ def _append_type_to_keys(dictionary, typename):
+ new_dict = {}
+ for key, value in dictionary.iteritems():
+ new_key = "%s{%s}" % (key, typename)
+ new_dict[new_key] = value
+ return new_dict
+
+
+ def write_iteration_keyval(self, attr_dict, perf_dict):
+ attr_dict = self._append_type_to_keys(attr_dict, "attr")
+ perf_dict = self._append_type_to_keys(perf_dict, "perf")
+
+ utils.write_keyval(self.resultsdir, attr_dict,
+ type_tag="attr")
+ utils.write_keyval(self.resultsdir, perf_dict,
+ type_tag="perf")
+
+ keyval_path = os.path.join(self.resultsdir, "keyval")
+ print >> open(keyval_path, "a"), ""
+
+
+ # TODO: deprecate, remove from code in favour of
+ # the write_*_keyval methods
+ def write_keyval(self, dictionary):
+ warnings.warn("test.write_keyval is deprecated, use "
+ "test.write_test_keyval or "
+ "test.write_iteration_keyval instead",
+ DeprecationWarning)
+ self.write_iteration_keyval({}, dictionary)
+
+
+ def initialize(self):
+ pass
+
+
+ def setup(self):
+ pass
+
+
+ def cleanup(self):
+ pass
+
+
+ def _exec(self, args, dargs):
+ try:
+ self.job.stdout.tee_redirect(
+ os.path.join(self.debugdir, 'stdout'))
+ self.job.stderr.tee_redirect(
+ os.path.join(self.debugdir, 'stderr'))
+
+ try:
+ os.chdir(self.outputdir)
+ dargs = dargs.copy()
+ keyvals = dargs.pop('test_attributes', dict())
+ keyvals = keyvals.copy()
+ keyvals['version'] = self.version
+ self.write_test_keyval(keyvals)
+
+ self.execute(*args, **dargs)
+ finally:
+ self.cleanup()
+ self.job.stderr.restore()
+ self.job.stdout.restore()
+ except error.AutotestError:
+ raise
+ except Exception, e:
+ msg = "Unhandled %s error occured during test\n"
+ msg %= str(e.__class__.__name__)
+ raise error.UnhandledError(msg)
def testname(url):
- # Extract the testname from the test url.
- match = re.match('[^:]+://(.*)/([^/]*)$', url)
- if not match:
- return ('', url)
- (group, filename) = match.groups()
+ # Extract the testname from the test url.
+ match = re.match('[^:]+://(.*)/([^/]*)$', url)
+ if not match:
+ return ('', url)
+ (group, filename) = match.groups()
- # Generate the group prefix.
- group = re.sub(r'\W', '_', group)
+ # Generate the group prefix.
+ group = re.sub(r'\W', '_', group)
- # Drop the extension to get the raw test name.
- testname = re.sub(r'\.tgz', '', filename)
+ # Drop the extension to get the raw test name.
+ testname = re.sub(r'\.tgz', '', filename)
- return (group, testname)
+ return (group, testname)
def _installtest(job, url):
- (group, name) = testname(url)
-
- # Bail if the test is already installed
- group_dir = os.path.join(job.testdir, "download", group)
- if os.path.exists(os.path.join(group_dir, name)):
- return (group, name)
-
- # If the group directory is missing create it and add
- # an empty __init__.py so that sub-directories are
- # considered for import.
- if not os.path.exists(group_dir):
- os.mkdir(group_dir)
- f = file(os.path.join(group_dir, '__init__.py'), 'w+')
- f.close()
-
- print name + ": installing test url=" + url
- get_file(url, os.path.join(group_dir, 'test.tgz'))
- old_wd = os.getcwd()
- os.chdir(group_dir)
- tar = tarfile.open('test.tgz')
- for member in tar.getmembers():
- tar.extract(member)
- tar.close()
- os.chdir(old_wd)
- os.remove(os.path.join(group_dir, 'test.tgz'))
-
- # For this 'sub-object' to be importable via the name
- # 'group.name' we need to provide an __init__.py,
- # so link the main entry point to this.
- os.symlink(name + '.py', os.path.join(group_dir, name,
- '__init__.py'))
-
- # The test is now installed.
- return (group, name)
+ (group, name) = testname(url)
+
+ # Bail if the test is already installed
+ group_dir = os.path.join(job.testdir, "download", group)
+ if os.path.exists(os.path.join(group_dir, name)):
+ return (group, name)
+
+ # If the group directory is missing create it and add
+ # an empty __init__.py so that sub-directories are
+ # considered for import.
+ if not os.path.exists(group_dir):
+ os.mkdir(group_dir)
+ f = file(os.path.join(group_dir, '__init__.py'), 'w+')
+ f.close()
+
+ print name + ": installing test url=" + url
+ get_file(url, os.path.join(group_dir, 'test.tgz'))
+ old_wd = os.getcwd()
+ os.chdir(group_dir)
+ tar = tarfile.open('test.tgz')
+ for member in tar.getmembers():
+ tar.extract(member)
+ tar.close()
+ os.chdir(old_wd)
+ os.remove(os.path.join(group_dir, 'test.tgz'))
+
+ # For this 'sub-object' to be importable via the name
+ # 'group.name' we need to provide an __init__.py,
+ # so link the main entry point to this.
+ os.symlink(name + '.py', os.path.join(group_dir, name,
+ '__init__.py'))
+
+ # The test is now installed.
+ return (group, name)
def runtest(job, url, tag, args, dargs,
- local_namespace={}, global_namespace={}, after_test_hook=None):
- local_namespace = local_namespace.copy()
- global_namespace = global_namespace.copy()
-
- # if this is not a plain test name then download and install the
- # specified test
- if utils.is_url(url):
- (group, testname) = _installtest(job, url)
- bindir = os.path.join(job.testdir, 'download', group, testname)
- site_bindir = None
- else:
- # if the test is local, it can be found in either testdir
- # or site_testdir. tests in site_testdir override tests
- # defined in testdir
- (group, testname) = ('', url)
- bindir = os.path.join(job.testdir, group, testname)
- if hasattr(job, 'site_testdir'):
- site_bindir = os.path.join(job.site_testdir,
- group, testname)
- else:
- site_bindir = None
-
- outputdir = os.path.join(job.resultdir, testname)
- if tag:
- outputdir += '.' + tag
-
- # if we can find the test in site_bindir, use this version
- if site_bindir and os.path.exists(site_bindir):
- bindir = site_bindir
- testdir = job.site_testdir
- elif os.path.exists(bindir):
- testdir = job.testdir
- elif not os.path.exists(bindir):
- raise error.TestError(testname + ': test does not exist')
-
- if group:
- sys.path.insert(0, os.path.join(testdir, 'download'))
- group += '.'
- else:
- sys.path.insert(0, os.path.join(testdir, testname))
-
- local_namespace['job'] = job
- local_namespace['bindir'] = bindir
- local_namespace['outputdir'] = outputdir
-
- lockfile = open(os.path.join(job.tmpdir, '.testlock'), 'w')
- try:
- fcntl.flock(lockfile, fcntl.LOCK_EX)
- exec ("import %s%s" % (group, testname),
- local_namespace, global_namespace)
- exec ("mytest = %s%s.%s(job, bindir, outputdir)" %
- (group, testname, testname),
- local_namespace, global_namespace)
- finally:
- fcntl.flock(lockfile, fcntl.LOCK_UN)
- lockfile.close()
- sys.path.pop(0)
-
- pwd = os.getcwd()
- os.chdir(outputdir)
- try:
- mytest = global_namespace['mytest']
- mytest._exec(args, dargs)
- finally:
- if after_test_hook:
- after_test_hook(mytest)
+ local_namespace={}, global_namespace={}, after_test_hook=None):
+ local_namespace = local_namespace.copy()
+ global_namespace = global_namespace.copy()
+
+ # if this is not a plain test name then download and install the
+ # specified test
+ if utils.is_url(url):
+ (group, testname) = _installtest(job, url)
+ bindir = os.path.join(job.testdir, 'download', group, testname)
+ site_bindir = None
+ else:
+ # if the test is local, it can be found in either testdir
+ # or site_testdir. tests in site_testdir override tests
+ # defined in testdir
+ (group, testname) = ('', url)
+ bindir = os.path.join(job.testdir, group, testname)
+ if hasattr(job, 'site_testdir'):
+ site_bindir = os.path.join(job.site_testdir,
+ group, testname)
+ else:
+ site_bindir = None
+
+ outputdir = os.path.join(job.resultdir, testname)
+ if tag:
+ outputdir += '.' + tag
+
+ # if we can find the test in site_bindir, use this version
+ if site_bindir and os.path.exists(site_bindir):
+ bindir = site_bindir
+ testdir = job.site_testdir
+ elif os.path.exists(bindir):
+ testdir = job.testdir
+ elif not os.path.exists(bindir):
+ raise error.TestError(testname + ': test does not exist')
+
+ if group:
+ sys.path.insert(0, os.path.join(testdir, 'download'))
+ group += '.'
+ else:
+ sys.path.insert(0, os.path.join(testdir, testname))
+
+ local_namespace['job'] = job
+ local_namespace['bindir'] = bindir
+ local_namespace['outputdir'] = outputdir
+
+ lockfile = open(os.path.join(job.tmpdir, '.testlock'), 'w')
+ try:
+ fcntl.flock(lockfile, fcntl.LOCK_EX)
+ exec ("import %s%s" % (group, testname),
+ local_namespace, global_namespace)
+ exec ("mytest = %s%s.%s(job, bindir, outputdir)" %
+ (group, testname, testname),
+ local_namespace, global_namespace)
+ finally:
+ fcntl.flock(lockfile, fcntl.LOCK_UN)
+ lockfile.close()
+ sys.path.pop(0)
+
+ pwd = os.getcwd()
+ os.chdir(outputdir)
+ try:
+ mytest = global_namespace['mytest']
+ mytest._exec(args, dargs)
+ finally:
+ if after_test_hook:
+ after_test_hook(mytest)
diff --git a/client/common_lib/test_utils/mock.py b/client/common_lib/test_utils/mock.py
index 556e1658..9906c944 100644
--- a/client/common_lib/test_utils/mock.py
+++ b/client/common_lib/test_utils/mock.py
@@ -5,426 +5,426 @@ import re, collections, StringIO, sys
class CheckPlaybackError(Exception):
- 'Raised when mock playback does not match recorded calls.'
+ 'Raised when mock playback does not match recorded calls.'
class ExitException(Exception):
- 'Raised when the mocked sys.exit() is called'
- pass
+ 'Raised when the mocked sys.exit() is called'
+ pass
class argument_comparator(object):
- def is_satisfied_by(self, parameter):
- raise NotImplementedError
+ def is_satisfied_by(self, parameter):
+ raise NotImplementedError
class equality_comparator(argument_comparator):
- def __init__(self, value):
- self.value = value
+ def __init__(self, value):
+ self.value = value
- def is_satisfied_by(self, parameter):
- return parameter == self.value
+ def is_satisfied_by(self, parameter):
+ return parameter == self.value
- def __str__(self):
- return repr(self.value)
+ def __str__(self):
+ return repr(self.value)
class regex_comparator(argument_comparator):
- def __init__(self, pattern, flags=0):
- self.regex = re.compile(pattern, flags)
+ def __init__(self, pattern, flags=0):
+ self.regex = re.compile(pattern, flags)
- def is_satisfied_by(self, parameter):
- return self.regex.search(parameter) is not None
+ def is_satisfied_by(self, parameter):
+ return self.regex.search(parameter) is not None
- def __str__(self):
- return self.regex.pattern
+ def __str__(self):
+ return self.regex.pattern
class is_string_comparator(argument_comparator):
- def is_satisfied_by(self, parameter):
- return isinstance(parameter, basestring)
+ def is_satisfied_by(self, parameter):
+ return isinstance(parameter, basestring)
- def __str__(self):
- return "a string"
+ def __str__(self):
+ return "a string"
class is_instance_comparator(argument_comparator):
- def __init__(self, cls):
- self.cls = cls
+ def __init__(self, cls):
+ self.cls = cls
- def is_satisfied_by(self, parameter):
- return isinstance(parameter, self.cls)
+ def is_satisfied_by(self, parameter):
+ return isinstance(parameter, self.cls)
- def __str__(self):
- return "is a %s" % self.cls
+ def __str__(self):
+ return "is a %s" % self.cls
class function_map(object):
- def __init__(self, symbol, return_val, *args, **dargs):
- self.return_val = return_val
- self.args = []
- self.symbol = symbol
- for arg in args:
- if isinstance(arg, argument_comparator):
- self.args.append(arg)
- else:
- self.args.append(equality_comparator(arg))
+ def __init__(self, symbol, return_val, *args, **dargs):
+ self.return_val = return_val
+ self.args = []
+ self.symbol = symbol
+ for arg in args:
+ if isinstance(arg, argument_comparator):
+ self.args.append(arg)
+ else:
+ self.args.append(equality_comparator(arg))
- self.dargs = dargs
- self.error = None
+ self.dargs = dargs
+ self.error = None
- def and_return(self, return_val):
- self.return_val = return_val
+ def and_return(self, return_val):
+ self.return_val = return_val
- def and_raises(self, error):
- self.error = error
+ def and_raises(self, error):
+ self.error = error
- def match(self, *args, **dargs):
- if len(args) != len(self.args) or len(dargs) != len(self.dargs):
- return False
+ def match(self, *args, **dargs):
+ if len(args) != len(self.args) or len(dargs) != len(self.dargs):
+ return False
- for i, expected_arg in enumerate(self.args):
- if not expected_arg.is_satisfied_by(args[i]):
- return False
+ for i, expected_arg in enumerate(self.args):
+ if not expected_arg.is_satisfied_by(args[i]):
+ return False
- if self.dargs != dargs:
- return False
+ if self.dargs != dargs:
+ return False
- return True
+ return True
- def __str__(self):
- return _dump_function_call(self.symbol, self.args, self.dargs)
+ def __str__(self):
+ return _dump_function_call(self.symbol, self.args, self.dargs)
class mock_function(object):
- def __init__(self, symbol, default_return_val=None,
- record=None, playback=None):
- self.default_return_val = default_return_val
- self.num_calls = 0
- self.args = []
- self.dargs = []
- self.symbol = symbol
- self.record = record
- self.playback = playback
- self.__name__ = symbol
+ def __init__(self, symbol, default_return_val=None,
+ record=None, playback=None):
+ self.default_return_val = default_return_val
+ self.num_calls = 0
+ self.args = []
+ self.dargs = []
+ self.symbol = symbol
+ self.record = record
+ self.playback = playback
+ self.__name__ = symbol
- def __call__(self, *args, **dargs):
- self.num_calls += 1
- self.args.append(args)
- self.dargs.append(dargs)
- if self.playback:
- return self.playback(self.symbol, *args, **dargs)
- else:
- return self.default_return_val
+ def __call__(self, *args, **dargs):
+ self.num_calls += 1
+ self.args.append(args)
+ self.dargs.append(dargs)
+ if self.playback:
+ return self.playback(self.symbol, *args, **dargs)
+ else:
+ return self.default_return_val
- def expect_call(self, *args, **dargs):
- mapping = function_map(self.symbol, None, *args, **dargs)
- if self.record:
- self.record(mapping)
+ def expect_call(self, *args, **dargs):
+ mapping = function_map(self.symbol, None, *args, **dargs)
+ if self.record:
+ self.record(mapping)
- return mapping
+ return mapping
class mask_function(mock_function):
- def __init__(self, symbol, original_function, default_return_val=None,
- record=None, playback=None):
- super(mask_function, self).__init__(symbol,
- default_return_val,
- record, playback)
- self.original_function = original_function
+ def __init__(self, symbol, original_function, default_return_val=None,
+ record=None, playback=None):
+ super(mask_function, self).__init__(symbol,
+ default_return_val,
+ record, playback)
+ self.original_function = original_function
- def run_original_function(self, *args, **dargs):
- return self.original_function(*args, **dargs)
+ def run_original_function(self, *args, **dargs):
+ return self.original_function(*args, **dargs)
class mock_class(object):
- def __init__(self, cls, name, default_ret_val=None,
- record=None, playback=None):
- self.errors = []
- self.name = name
- self.record = record
- self.playback = playback
-
- for symbol in dir(cls):
- if symbol.startswith("_"):
- continue
-
- orig_symbol = getattr(cls, symbol)
- if callable(orig_symbol):
- f_name = "%s.%s" % (self.name, symbol)
- func = mock_function(f_name, default_ret_val,
- self.record, self.playback)
- setattr(self, symbol, func)
- else:
- setattr(self, symbol, orig_symbol)
+ def __init__(self, cls, name, default_ret_val=None,
+ record=None, playback=None):
+ self.errors = []
+ self.name = name
+ self.record = record
+ self.playback = playback
+
+ for symbol in dir(cls):
+ if symbol.startswith("_"):
+ continue
+
+ orig_symbol = getattr(cls, symbol)
+ if callable(orig_symbol):
+ f_name = "%s.%s" % (self.name, symbol)
+ func = mock_function(f_name, default_ret_val,
+ self.record, self.playback)
+ setattr(self, symbol, func)
+ else:
+ setattr(self, symbol, orig_symbol)
class mock_god:
- NONEXISTENT_ATTRIBUTE = object()
-
- def __init__(self, debug=False):
- """
- With debug=True, all recorded method calls will be printed as
- they happen.
- """
- self.recording = collections.deque()
- self.errors = []
- self._stubs = []
- self._debug = debug
-
-
- def create_mock_class_obj(self, cls, name, default_ret_val=None):
- record = self.__record_call
- playback = self.__method_playback
- errors = self.errors
-
- class cls_sub(cls):
- cls_count = 0
- creations = collections.deque()
-
- # overwrite the initializer
- def __init__(self, *args, **dargs):
- pass
-
-
- @classmethod
- def expect_new(typ, *args, **dargs):
- obj = typ.make_new(*args, **dargs)
- typ.creations.append(obj)
- return obj
-
-
- def __new__(typ, *args, **dargs):
- if len(typ.creations) == 0:
- msg = ("not expecting call to %s "
- "constructor" % (name))
- errors.append(msg)
- return None
- else:
- return typ.creations.popleft()
-
-
- @classmethod
- def make_new(typ, *args, **dargs):
- obj = super(cls_sub, typ).__new__(typ, *args,
- **dargs)
-
- typ.cls_count += 1
- obj_name = "%s_%s" % (name, typ.cls_count)
- for symbol in dir(obj):
- if (symbol.startswith("__") and
- symbol.endswith("__")):
- continue
-
- orig_symbol = getattr(obj, symbol)
- if callable(orig_symbol):
- f_name = ("%s.%s" %
- (obj_name, symbol))
- func = mock_function(f_name,
- default_ret_val,
- record,
- playback)
- setattr(obj, symbol, func)
- else:
- setattr(obj, symbol,
- orig_symbol)
-
- return obj
-
-
-
- return cls_sub
-
-
- def create_mock_class(self, cls, name, default_ret_val=None):
- """
- Given something that defines a namespace cls (class, object,
- module), and a (hopefully unique) name, will create a
- mock_class object with that name and that possessess all
- the public attributes of cls. default_ret_val sets the
- default_ret_val on all methods of the cls mock.
- """
- return mock_class(cls, name, default_ret_val,
- self.__record_call, self.__method_playback)
-
-
- def create_mock_function(self, symbol, default_return_val=None):
- """
- create a mock_function with name symbol and default return
- value of default_ret_val.
- """
- return mock_function(symbol, default_return_val,
- self.__record_call, self.__method_playback)
-
-
- def mock_up(self, obj, name, default_ret_val=None):
- """
- Given an object (class instance or module) and a registration
- name, then replace all its methods with mock function objects
- (passing the orignal functions to the mock functions).
- """
- for symbol in dir(obj):
- if symbol.startswith("__"):
- continue
-
- orig_symbol = getattr(obj, symbol)
- if callable(orig_symbol):
- f_name = "%s.%s" % (name, symbol)
- func = mask_function(f_name, orig_symbol,
- default_ret_val,
- self.__record_call,
- self.__method_playback)
- setattr(obj, symbol, func)
-
-
- def stub_with(self, namespace, symbol, new_attribute):
- original_attribute = getattr(namespace, symbol,
- self.NONEXISTENT_ATTRIBUTE)
- self._stubs.append((namespace, symbol, original_attribute))
- setattr(namespace, symbol, new_attribute)
-
-
- def stub_function(self, namespace, symbol):
- mock_attribute = self.create_mock_function(symbol)
- self.stub_with(namespace, symbol, mock_attribute)
-
-
- def stub_class_method(self, cls, symbol):
- mock_attribute = self.create_mock_function(symbol)
- self.stub_with(cls, symbol, staticmethod(mock_attribute))
-
-
- def unstub_all(self):
- self._stubs.reverse()
- for namespace, symbol, original_attribute in self._stubs:
- if original_attribute == self.NONEXISTENT_ATTRIBUTE:
- delattr(namespace, symbol)
- else:
- setattr(namespace, symbol, original_attribute)
- self._stubs = []
-
-
- def __method_playback(self, symbol, *args, **dargs):
- if self._debug:
- print 'Mock call:', _dump_function_call(symbol,
- args, dargs)
- if len(self.recording) != 0:
- func_call = self.recording[0]
- if func_call.symbol != symbol:
- msg = ("Unexpected call: %s. Expected %s"
- % (_dump_function_call(symbol, args, dargs),
- func_call))
- self.errors.append(msg)
- return None
-
- if not func_call.match(*args, **dargs):
- msg = ("%s called. Expected %s"
- % (_dump_function_call(symbol, args, dargs),
- func_call))
- self.errors.append(msg)
- return None
+ NONEXISTENT_ATTRIBUTE = object()
+
+ def __init__(self, debug=False):
+ """
+ With debug=True, all recorded method calls will be printed as
+ they happen.
+ """
+ self.recording = collections.deque()
+ self.errors = []
+ self._stubs = []
+ self._debug = debug
+
+
+ def create_mock_class_obj(self, cls, name, default_ret_val=None):
+ record = self.__record_call
+ playback = self.__method_playback
+ errors = self.errors
+
+ class cls_sub(cls):
+ cls_count = 0
+ creations = collections.deque()
+
+ # overwrite the initializer
+ def __init__(self, *args, **dargs):
+ pass
+
+
+ @classmethod
+ def expect_new(typ, *args, **dargs):
+ obj = typ.make_new(*args, **dargs)
+ typ.creations.append(obj)
+ return obj
+
+
+ def __new__(typ, *args, **dargs):
+ if len(typ.creations) == 0:
+ msg = ("not expecting call to %s "
+ "constructor" % (name))
+ errors.append(msg)
+ return None
+ else:
+ return typ.creations.popleft()
+
+
+ @classmethod
+ def make_new(typ, *args, **dargs):
+ obj = super(cls_sub, typ).__new__(typ, *args,
+ **dargs)
+
+ typ.cls_count += 1
+ obj_name = "%s_%s" % (name, typ.cls_count)
+ for symbol in dir(obj):
+ if (symbol.startswith("__") and
+ symbol.endswith("__")):
+ continue
+
+ orig_symbol = getattr(obj, symbol)
+ if callable(orig_symbol):
+ f_name = ("%s.%s" %
+ (obj_name, symbol))
+ func = mock_function(f_name,
+ default_ret_val,
+ record,
+ playback)
+ setattr(obj, symbol, func)
+ else:
+ setattr(obj, symbol,
+ orig_symbol)
+
+ return obj
+
+
+
+ return cls_sub
+
+
+ def create_mock_class(self, cls, name, default_ret_val=None):
+ """
+ Given something that defines a namespace cls (class, object,
+ module), and a (hopefully unique) name, will create a
+ mock_class object with that name and that possessess all
+ the public attributes of cls. default_ret_val sets the
+ default_ret_val on all methods of the cls mock.
+ """
+ return mock_class(cls, name, default_ret_val,
+ self.__record_call, self.__method_playback)
+
+
+ def create_mock_function(self, symbol, default_return_val=None):
+ """
+ create a mock_function with name symbol and default return
+ value of default_ret_val.
+ """
+ return mock_function(symbol, default_return_val,
+ self.__record_call, self.__method_playback)
+
+
+ def mock_up(self, obj, name, default_ret_val=None):
+ """
+ Given an object (class instance or module) and a registration
+ name, then replace all its methods with mock function objects
+ (passing the orignal functions to the mock functions).
+ """
+ for symbol in dir(obj):
+ if symbol.startswith("__"):
+ continue
+
+ orig_symbol = getattr(obj, symbol)
+ if callable(orig_symbol):
+ f_name = "%s.%s" % (name, symbol)
+ func = mask_function(f_name, orig_symbol,
+ default_ret_val,
+ self.__record_call,
+ self.__method_playback)
+ setattr(obj, symbol, func)
+
+
+ def stub_with(self, namespace, symbol, new_attribute):
+ original_attribute = getattr(namespace, symbol,
+ self.NONEXISTENT_ATTRIBUTE)
+ self._stubs.append((namespace, symbol, original_attribute))
+ setattr(namespace, symbol, new_attribute)
+
+
+ def stub_function(self, namespace, symbol):
+ mock_attribute = self.create_mock_function(symbol)
+ self.stub_with(namespace, symbol, mock_attribute)
+
+
+ def stub_class_method(self, cls, symbol):
+ mock_attribute = self.create_mock_function(symbol)
+ self.stub_with(cls, symbol, staticmethod(mock_attribute))
+
+
+ def unstub_all(self):
+ self._stubs.reverse()
+ for namespace, symbol, original_attribute in self._stubs:
+ if original_attribute == self.NONEXISTENT_ATTRIBUTE:
+ delattr(namespace, symbol)
+ else:
+ setattr(namespace, symbol, original_attribute)
+ self._stubs = []
+
+
+ def __method_playback(self, symbol, *args, **dargs):
+ if self._debug:
+ print 'Mock call:', _dump_function_call(symbol,
+ args, dargs)
+ if len(self.recording) != 0:
+ func_call = self.recording[0]
+ if func_call.symbol != symbol:
+ msg = ("Unexpected call: %s. Expected %s"
+ % (_dump_function_call(symbol, args, dargs),
+ func_call))
+ self.errors.append(msg)
+ return None
+
+ if not func_call.match(*args, **dargs):
+ msg = ("%s called. Expected %s"
+ % (_dump_function_call(symbol, args, dargs),
+ func_call))
+ self.errors.append(msg)
+ return None
- # this is the expected call so pop it and return
- self.recording.popleft()
- if func_call.error:
- raise func_call.error
- else:
- return func_call.return_val
- else:
- msg = ("unexpected call: %s"
- % (_dump_function_call(symbol, args, dargs)))
- self.errors.append(msg)
- return None
-
-
- def __record_call(self, mapping):
- self.recording.append(mapping)
-
-
- def check_playback(self):
- """
- Report any errors that were encounterd during calls
- to __method_playback().
- """
- if len(self.errors) > 0:
- for error in self.errors:
- print error
- raise CheckPlaybackError
- elif len(self.recording) != 0:
- for func_call in self.recording:
- print "%s not called" % (func_call)
- raise CheckPlaybackError
+ # this is the expected call so pop it and return
+ self.recording.popleft()
+ if func_call.error:
+ raise func_call.error
+ else:
+ return func_call.return_val
+ else:
+ msg = ("unexpected call: %s"
+ % (_dump_function_call(symbol, args, dargs)))
+ self.errors.append(msg)
+ return None
+
+
+ def __record_call(self, mapping):
+ self.recording.append(mapping)
+
+
+ def check_playback(self):
+ """
+ Report any errors that were encounterd during calls
+ to __method_playback().
+ """
+ if len(self.errors) > 0:
+ for error in self.errors:
+ print error
+ raise CheckPlaybackError
+ elif len(self.recording) != 0:
+ for func_call in self.recording:
+ print "%s not called" % (func_call)
+ raise CheckPlaybackError
- def mock_exit(self):
- def mock_exit_handler(self):
- raise ExitException
-
- self.saved_exit = sys.exit
- sys.exit = mock_exit_handler
+ def mock_exit(self):
+ def mock_exit_handler(self):
+ raise ExitException
+
+ self.saved_exit = sys.exit
+ sys.exit = mock_exit_handler
- def unmock_exit(self):
- sys.exit = self.saved_exit
- self.saved_exit = None
+ def unmock_exit(self):
+ sys.exit = self.saved_exit
+ self.saved_exit = None
- def mock_stdout_stderr(self):
- """Mocks and saves the stdout & stderr output"""
- self.mock_streams_stdout = StringIO.StringIO('')
- self.mock_streams_stderr = StringIO.StringIO('')
+ def mock_stdout_stderr(self):
+ """Mocks and saves the stdout & stderr output"""
+ self.mock_streams_stdout = StringIO.StringIO('')
+ self.mock_streams_stderr = StringIO.StringIO('')
- sys.stdout = self.mock_streams_stdout
- sys.stderr = self.mock_streams_stderr
+ sys.stdout = self.mock_streams_stdout
+ sys.stderr = self.mock_streams_stderr
- def unmock_stdout_stderr(self):
- """Restores the stdout & stderr, and returns both
- output strings"""
- sys.stdout = sys.__stdout__
- sys.stderr = sys.__stderr__
- values = (self.mock_streams_stdout.getvalue(),
- self.mock_streams_stderr.getvalue())
-
- self.mock_streams_stdout.close()
- self.mock_streams_stderr.close()
- return values
+ def unmock_stdout_stderr(self):
+ """Restores the stdout & stderr, and returns both
+ output strings"""
+ sys.stdout = sys.__stdout__
+ sys.stderr = sys.__stderr__
+ values = (self.mock_streams_stdout.getvalue(),
+ self.mock_streams_stderr.getvalue())
+
+ self.mock_streams_stdout.close()
+ self.mock_streams_stderr.close()
+ return values
- def mock_io_exit(self):
- self.mock_exit()
- self.mock_stdout_stderr()
-
-
- def unmock_io_exit(self):
- self.unmock_exit()
- return self.unmock_stdout_stderr()
+ def mock_io_exit(self):
+ self.mock_exit()
+ self.mock_stdout_stderr()
+
+
+ def unmock_io_exit(self):
+ self.unmock_exit()
+ return self.unmock_stdout_stderr()
def _arg_to_str(arg):
- if isinstance(arg, argument_comparator):
- return str(arg)
- return repr(arg)
+ if isinstance(arg, argument_comparator):
+ return str(arg)
+ return repr(arg)
def _dump_function_call(symbol, args, dargs):
- arg_vec = []
- for arg in args:
- arg_vec.append(_arg_to_str(arg))
- for key, val in dargs.iteritems():
- arg_vec.append("%s=%s" % (key, _arg_to_stv(val)))
- return "%s(%s)" % (symbol, ', '.join(arg_vec))
+ arg_vec = []
+ for arg in args:
+ arg_vec.append(_arg_to_str(arg))
+ for key, val in dargs.iteritems():
+ arg_vec.append("%s=%s" % (key, _arg_to_stv(val)))
+ return "%s(%s)" % (symbol, ', '.join(arg_vec))
diff --git a/client/common_lib/test_utils/mock_demo.py b/client/common_lib/test_utils/mock_demo.py
index fec116b7..f01fe60b 100644
--- a/client/common_lib/test_utils/mock_demo.py
+++ b/client/common_lib/test_utils/mock_demo.py
@@ -5,140 +5,140 @@ __author__ = "raphtee@google.com (Travis Miller)"
import mock, mock_demo_MUT
class MyError(Exception):
- pass
+ pass
class A(object):
- var = 8
+ var = 8
- def __init__(self):
- self.x = 0
+ def __init__(self):
+ self.x = 0
- def method1(self):
- self.x += 1
- return self.x
+ def method1(self):
+ self.x += 1
+ return self.x
- def method2(self, y):
- return y * self.x
+ def method2(self, y):
+ return y * self.x
class B(A):
- def method3(self, z):
- return self.x + z
+ def method3(self, z):
+ return self.x + z
- def method4(self, z, w):
- return self.x * z + w
+ def method4(self, z, w):
+ return self.x * z + w
class C(B):
- def method5(self):
- self.method1()
- t = self.method2(4)
- u = self.method3(t)
- return u
+ def method5(self):
+ self.method1()
+ t = self.method2(4)
+ u = self.method3(t)
+ return u
class D(C):
- def method6(self, error):
- if error:
- raise MyError("woops")
- else:
- return 10
+ def method6(self, error):
+ if error:
+ raise MyError("woops")
+ else:
+ return 10
class E(D):
- def __init__(self, val):
- self.val = val
+ def __init__(self, val):
+ self.val = val
# say we want to test that do_stuff is doing what we think it is doing
def do_stuff(a, b, func):
- print b.method1()
- print b.method3(10)
- print func("how many")
- print a.method2(5)
- print b.method1()
- print b.method4(1, 4)
- print b.method2(3)
- print b.method2("hello")
+ print b.method1()
+ print b.method3(10)
+ print func("how many")
+ print a.method2(5)
+ print b.method1()
+ print b.method4(1, 4)
+ print b.method2(3)
+ print b.method2("hello")
def do_more_stuff(d):
- print d.method6(False)
- try:
- d.method6(True)
- except:
- print "caught error"
+ print d.method6(False)
+ try:
+ d.method6(True)
+ except:
+ print "caught error"
def main():
- god = mock.mock_god()
+ god = mock.mock_god()
- m1 = god.create_mock_class(A, "A")
- print m1.var
- m2 = god.create_mock_class(B, "B")
- f = god.create_mock_function("func")
+ m1 = god.create_mock_class(A, "A")
+ print m1.var
+ m2 = god.create_mock_class(B, "B")
+ f = god.create_mock_function("func")
- print dir(m1)
- print dir(m2)
+ print dir(m1)
+ print dir(m2)
- # sets up the "recording"
- m2.method1.expect_call().and_return(1)
- m2.method3.expect_call(10).and_return(10)
- f.expect_call("how many").and_return(42)
- m1.method2.expect_call(5).and_return(0)
- m2.method1.expect_call().and_return(2)
- m2.method4.expect_call(1, 4).and_return(6)
- m2.method2.expect_call(3).and_return(6)
- m2.method2.expect_call(mock.is_string_comparator()).and_return("foo")
+ # sets up the "recording"
+ m2.method1.expect_call().and_return(1)
+ m2.method3.expect_call(10).and_return(10)
+ f.expect_call("how many").and_return(42)
+ m1.method2.expect_call(5).and_return(0)
+ m2.method1.expect_call().and_return(2)
+ m2.method4.expect_call(1, 4).and_return(6)
+ m2.method2.expect_call(3).and_return(6)
+ m2.method2.expect_call(mock.is_string_comparator()).and_return("foo")
- # check the recording order
- for func_call in god.recording:
- print func_call
+ # check the recording order
+ for func_call in god.recording:
+ print func_call
- # once we start making calls into the methods we are in
- # playback mode
- do_stuff(m1, m2, f)
+ # once we start making calls into the methods we are in
+ # playback mode
+ do_stuff(m1, m2, f)
- # we can now check that playback succeeded
- god.check_playback()
+ # we can now check that playback succeeded
+ god.check_playback()
- # now test the ability to mock out all methods of an object
- # except those under test
- c = C()
- god.mock_up(c, "c")
+ # now test the ability to mock out all methods of an object
+ # except those under test
+ c = C()
+ god.mock_up(c, "c")
- # setup recording
- c.method1.expect_call()
- c.method2.expect_call(4).and_return(4)
- c.method3.expect_call(4).and_return(5)
+ # setup recording
+ c.method1.expect_call()
+ c.method2.expect_call(4).and_return(4)
+ c.method3.expect_call(4).and_return(5)
- # perform the test
- answer = c.method5.run_original_function()
+ # perform the test
+ answer = c.method5.run_original_function()
- # check playback
- print "answer = %s" % (answer)
- god.check_playback()
+ # check playback
+ print "answer = %s" % (answer)
+ god.check_playback()
- # check exception returns too
- m3 = god.create_mock_class(D, "D")
- m3.method6.expect_call(False).and_return(10)
- m3.method6.expect_call(True).and_raises(MyError("woops"))
+ # check exception returns too
+ m3 = god.create_mock_class(D, "D")
+ m3.method6.expect_call(False).and_return(10)
+ m3.method6.expect_call(True).and_raises(MyError("woops"))
- do_more_stuff(m3)
- god.check_playback()
+ do_more_stuff(m3)
+ god.check_playback()
- # now check we can mock out a whole class (rather than just an instance)
- mockE = god.create_mock_class_obj(E, "E")
- oldE = mock_demo_MUT.E
- mock_demo_MUT.E = mockE
+ # now check we can mock out a whole class (rather than just an instance)
+ mockE = god.create_mock_class_obj(E, "E")
+ oldE = mock_demo_MUT.E
+ mock_demo_MUT.E = mockE
- m4 = mockE.expect_new(val=7)
- m4.method1.expect_call().and_return(1)
+ m4 = mockE.expect_new(val=7)
+ m4.method1.expect_call().and_return(1)
- mock_demo_MUT.do_create_stuff()
- god.check_playback()
+ mock_demo_MUT.do_create_stuff()
+ god.check_playback()
- mock_demo_MUT.E = oldE
+ mock_demo_MUT.E = oldE
if __name__ == "__main__":
- main()
+ main()
diff --git a/client/common_lib/test_utils/mock_demo_MUT.py b/client/common_lib/test_utils/mock_demo_MUT.py
index c44e5a2a..b2fde777 100644
--- a/client/common_lib/test_utils/mock_demo_MUT.py
+++ b/client/common_lib/test_utils/mock_demo_MUT.py
@@ -1,5 +1,5 @@
from mock_demo import E
def do_create_stuff():
- obj = E(val=7)
- print obj.method1()
+ obj = E(val=7)
+ print obj.method1()
diff --git a/client/common_lib/utils.py b/client/common_lib/utils.py
index 92e53f73..f19ecc59 100644
--- a/client/common_lib/utils.py
+++ b/client/common_lib/utils.py
@@ -8,360 +8,360 @@ import error, barrier
def read_one_line(filename):
- return open(filename, 'r').readline().strip()
+ return open(filename, 'r').readline().strip()
def write_one_line(filename, str):
- open(filename, 'w').write(str.rstrip() + "\n")
+ open(filename, 'w').write(str.rstrip() + "\n")
def read_keyval(path):
- """
- Read a key-value pair format file into a dictionary, and return it.
- Takes either a filename or directory name as input. If it's a
- directory name, we assume you want the file to be called keyval.
- """
- if os.path.isdir(path):
- path = os.path.join(path, 'keyval')
- keyval = {}
- for line in open(path):
- line = re.sub('#.*', '', line.rstrip())
- if not re.search(r'^[-\w]+=', line):
- raise ValueError('Invalid format line: %s' % line)
- key, value = line.split('=', 1)
- if re.search('^\d+$', value):
- value = int(value)
- elif re.search('^(\d+\.)?\d+$', value):
- value = float(value)
- keyval[key] = value
- return keyval
+ """
+ Read a key-value pair format file into a dictionary, and return it.
+ Takes either a filename or directory name as input. If it's a
+ directory name, we assume you want the file to be called keyval.
+ """
+ if os.path.isdir(path):
+ path = os.path.join(path, 'keyval')
+ keyval = {}
+ for line in open(path):
+ line = re.sub('#.*', '', line.rstrip())
+ if not re.search(r'^[-\w]+=', line):
+ raise ValueError('Invalid format line: %s' % line)
+ key, value = line.split('=', 1)
+ if re.search('^\d+$', value):
+ value = int(value)
+ elif re.search('^(\d+\.)?\d+$', value):
+ value = float(value)
+ keyval[key] = value
+ return keyval
def write_keyval(path, dictionary, type_tag=None):
- """
- Write a key-value pair format file out to a file. This uses append
- mode to open the file, so existing text will not be overwritten or
- reparsed.
-
- If type_tag is None, then the key must be composed of alphanumeric
- characters (or dashes+underscores). However, if type-tag is not
- null then the keys must also have "{type_tag}" as a suffix. At
- the moment the only valid values of type_tag are "attr" and "perf".
- """
- if os.path.isdir(path):
- path = os.path.join(path, 'keyval')
- keyval = open(path, 'a')
-
- if type_tag is None:
- key_regex = re.compile(r'^[-\w]+$')
- else:
- if type_tag not in ('attr', 'perf'):
- raise ValueError('Invalid type tag: %s' % type_tag)
- escaped_tag = re.escape(type_tag)
- key_regex = re.compile(r'^[-\w]+\{%s\}$' % escaped_tag)
- try:
- for key, value in dictionary.iteritems():
- if not key_regex.search(key):
- raise ValueError('Invalid key: %s' % key)
- keyval.write('%s=%s\n' % (key, value))
- finally:
- keyval.close()
+ """
+ Write a key-value pair format file out to a file. This uses append
+ mode to open the file, so existing text will not be overwritten or
+ reparsed.
+
+ If type_tag is None, then the key must be composed of alphanumeric
+ characters (or dashes+underscores). However, if type-tag is not
+ null then the keys must also have "{type_tag}" as a suffix. At
+ the moment the only valid values of type_tag are "attr" and "perf".
+ """
+ if os.path.isdir(path):
+ path = os.path.join(path, 'keyval')
+ keyval = open(path, 'a')
+
+ if type_tag is None:
+ key_regex = re.compile(r'^[-\w]+$')
+ else:
+ if type_tag not in ('attr', 'perf'):
+ raise ValueError('Invalid type tag: %s' % type_tag)
+ escaped_tag = re.escape(type_tag)
+ key_regex = re.compile(r'^[-\w]+\{%s\}$' % escaped_tag)
+ try:
+ for key, value in dictionary.iteritems():
+ if not key_regex.search(key):
+ raise ValueError('Invalid key: %s' % key)
+ keyval.write('%s=%s\n' % (key, value))
+ finally:
+ keyval.close()
def is_url(path):
- """Return true if path looks like a URL"""
- # for now, just handle http and ftp
- url_parts = urlparse.urlparse(path)
- return (url_parts[0] in ('http', 'ftp'))
+ """Return true if path looks like a URL"""
+ # for now, just handle http and ftp
+ url_parts = urlparse.urlparse(path)
+ return (url_parts[0] in ('http', 'ftp'))
def urlopen(url, data=None, proxies=None, timeout=300):
- """Wrapper to urllib.urlopen with timeout addition."""
+ """Wrapper to urllib.urlopen with timeout addition."""
- # Save old timeout
- old_timeout = socket.getdefaulttimeout()
- socket.setdefaulttimeout(timeout)
- try:
- return urllib.urlopen(url, data=data, proxies=proxies)
- finally:
- socket.setdefaulttimeout(old_timeout)
+ # Save old timeout
+ old_timeout = socket.getdefaulttimeout()
+ socket.setdefaulttimeout(timeout)
+ try:
+ return urllib.urlopen(url, data=data, proxies=proxies)
+ finally:
+ socket.setdefaulttimeout(old_timeout)
def urlretrieve(url, filename=None, reporthook=None, data=None, timeout=300):
- """Wrapper to urllib.urlretrieve with timeout addition."""
- old_timeout = socket.getdefaulttimeout()
- socket.setdefaulttimeout(timeout)
- try:
- return urllib.urlretrieve(url, filename=filename,
- reporthook=reporthook, data=data)
- finally:
- socket.setdefaulttimeout(old_timeout)
-
+ """Wrapper to urllib.urlretrieve with timeout addition."""
+ old_timeout = socket.getdefaulttimeout()
+ socket.setdefaulttimeout(timeout)
+ try:
+ return urllib.urlretrieve(url, filename=filename,
+ reporthook=reporthook, data=data)
+ finally:
+ socket.setdefaulttimeout(old_timeout)
+
def get_file(src, dest, permissions=None):
- """Get a file from src, which can be local or a remote URL"""
- if (src == dest):
- return
- if (is_url(src)):
- print 'PWD: ' + os.getcwd()
- print 'Fetching \n\t', src, '\n\t->', dest
- try:
- urllib.urlretrieve(src, dest)
- except IOError, e:
- raise error.AutotestError('Unable to retrieve %s (to %s)'
- % (src, dest), e)
- else:
- shutil.copyfile(src, dest)
- if permissions:
- os.chmod(dest, permissions)
- return dest
+ """Get a file from src, which can be local or a remote URL"""
+ if (src == dest):
+ return
+ if (is_url(src)):
+ print 'PWD: ' + os.getcwd()
+ print 'Fetching \n\t', src, '\n\t->', dest
+ try:
+ urllib.urlretrieve(src, dest)
+ except IOError, e:
+ raise error.AutotestError('Unable to retrieve %s (to %s)'
+ % (src, dest), e)
+ else:
+ shutil.copyfile(src, dest)
+ if permissions:
+ os.chmod(dest, permissions)
+ return dest
def unmap_url(srcdir, src, destdir='.'):
- """
- Receives either a path to a local file or a URL.
- returns either the path to the local file, or the fetched URL
-
- unmap_url('/usr/src', 'foo.tar', '/tmp')
- = '/usr/src/foo.tar'
- unmap_url('/usr/src', 'http://site/file', '/tmp')
- = '/tmp/file'
- (after retrieving it)
- """
- if is_url(src):
- url_parts = urlparse.urlparse(src)
- filename = os.path.basename(url_parts[2])
- dest = os.path.join(destdir, filename)
- return get_file(src, dest)
- else:
- return os.path.join(srcdir, src)
+ """
+ Receives either a path to a local file or a URL.
+ returns either the path to the local file, or the fetched URL
+
+ unmap_url('/usr/src', 'foo.tar', '/tmp')
+ = '/usr/src/foo.tar'
+ unmap_url('/usr/src', 'http://site/file', '/tmp')
+ = '/tmp/file'
+ (after retrieving it)
+ """
+ if is_url(src):
+ url_parts = urlparse.urlparse(src)
+ filename = os.path.basename(url_parts[2])
+ dest = os.path.join(destdir, filename)
+ return get_file(src, dest)
+ else:
+ return os.path.join(srcdir, src)
def update_version(srcdir, preserve_srcdir, new_version, install,
- *args, **dargs):
- """
- Make sure srcdir is version new_version
+ *args, **dargs):
+ """
+ Make sure srcdir is version new_version
- If not, delete it and install() the new version.
+ If not, delete it and install() the new version.
- In the preserve_srcdir case, we just check it's up to date,
- and if not, we rerun install, without removing srcdir
- """
- versionfile = os.path.join(srcdir, '.version')
- install_needed = True
+ In the preserve_srcdir case, we just check it's up to date,
+ and if not, we rerun install, without removing srcdir
+ """
+ versionfile = os.path.join(srcdir, '.version')
+ install_needed = True
- if os.path.exists(versionfile):
- old_version = pickle.load(open(versionfile))
- if old_version == new_version:
- install_needed = False
+ if os.path.exists(versionfile):
+ old_version = pickle.load(open(versionfile))
+ if old_version == new_version:
+ install_needed = False
- if install_needed:
- if not preserve_srcdir and os.path.exists(srcdir):
- shutil.rmtree(srcdir)
- install(*args, **dargs)
- if os.path.exists(srcdir):
- pickle.dump(new_version, open(versionfile, 'w'))
+ if install_needed:
+ if not preserve_srcdir and os.path.exists(srcdir):
+ shutil.rmtree(srcdir)
+ install(*args, **dargs)
+ if os.path.exists(srcdir):
+ pickle.dump(new_version, open(versionfile, 'w'))
def run(command, timeout=None, ignore_status=False,
- stdout_tee=None, stderr_tee=None):
- """
- Run a command on the host.
-
- Args:
- command: the command line string
- timeout: time limit in seconds before attempting to
- kill the running process. The run() function
- will take a few seconds longer than 'timeout'
- to complete if it has to kill the process.
- ignore_status: do not raise an exception, no matter what
- the exit code of the command is.
- stdout_tee: optional file-like object to which stdout data
- will be written as it is generated (data will still
- be stored in result.stdout)
- stderr_tee: likewise for stderr
-
- Returns:
- a CmdResult object
-
- Raises:
- CmdError: the exit code of the command
- execution was not 0
- """
- return join_bg_job(run_bg(command), command, timeout, ignore_status,
- stdout_tee, stderr_tee)
+ stdout_tee=None, stderr_tee=None):
+ """
+ Run a command on the host.
+
+ Args:
+ command: the command line string
+ timeout: time limit in seconds before attempting to
+ kill the running process. The run() function
+ will take a few seconds longer than 'timeout'
+ to complete if it has to kill the process.
+ ignore_status: do not raise an exception, no matter what
+ the exit code of the command is.
+ stdout_tee: optional file-like object to which stdout data
+ will be written as it is generated (data will still
+ be stored in result.stdout)
+ stderr_tee: likewise for stderr
+
+ Returns:
+ a CmdResult object
+
+ Raises:
+ CmdError: the exit code of the command
+ execution was not 0
+ """
+ return join_bg_job(run_bg(command), command, timeout, ignore_status,
+ stdout_tee, stderr_tee)
def run_bg(command):
- """Run the command in a subprocess and return the subprocess."""
- result = CmdResult(command)
- sp = subprocess.Popen(command, stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- shell=True, executable="/bin/bash")
- return sp, result
+ """Run the command in a subprocess and return the subprocess."""
+ result = CmdResult(command)
+ sp = subprocess.Popen(command, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ shell=True, executable="/bin/bash")
+ return sp, result
def join_bg_job(bg_job, command, timeout=None, ignore_status=False,
- stdout_tee=None, stderr_tee=None):
- """Join the subprocess with the current thread. See run description."""
- sp, result = bg_job
- stdout_file = StringIO.StringIO()
- stderr_file = StringIO.StringIO()
- (ret, timeouterr) = (0, False)
-
- try:
- # We are holding ends to stdin, stdout pipes
- # hence we need to be sure to close those fds no mater what
- start_time = time.time()
- (ret, timeouterr) = _wait_for_command(sp, start_time,
- timeout, stdout_file, stderr_file,
- stdout_tee, stderr_tee)
- result.exit_status = ret
- result.duration = time.time() - start_time
- # don't use os.read now, so we get all the rest of the output
- _process_output(sp.stdout, stdout_file, stdout_tee,
- use_os_read=False)
- _process_output(sp.stderr, stderr_file, stderr_tee,
- use_os_read=False)
- finally:
- # close our ends of the pipes to the sp no matter what
- sp.stdout.close()
- sp.stderr.close()
-
- result.stdout = stdout_file.getvalue()
- result.stderr = stderr_file.getvalue()
-
- if result.exit_status != 0:
- if timeouterr:
- raise error.CmdError(command, result, "Command did not "
- "complete within %d seconds" % timeout)
- elif not ignore_status:
- raise error.CmdError(command, result,
- "Command returned non-zero exit status")
-
- return result
+ stdout_tee=None, stderr_tee=None):
+ """Join the subprocess with the current thread. See run description."""
+ sp, result = bg_job
+ stdout_file = StringIO.StringIO()
+ stderr_file = StringIO.StringIO()
+ (ret, timeouterr) = (0, False)
+
+ try:
+ # We are holding ends to stdin, stdout pipes
+ # hence we need to be sure to close those fds no mater what
+ start_time = time.time()
+ (ret, timeouterr) = _wait_for_command(sp, start_time,
+ timeout, stdout_file, stderr_file,
+ stdout_tee, stderr_tee)
+ result.exit_status = ret
+ result.duration = time.time() - start_time
+ # don't use os.read now, so we get all the rest of the output
+ _process_output(sp.stdout, stdout_file, stdout_tee,
+ use_os_read=False)
+ _process_output(sp.stderr, stderr_file, stderr_tee,
+ use_os_read=False)
+ finally:
+ # close our ends of the pipes to the sp no matter what
+ sp.stdout.close()
+ sp.stderr.close()
+
+ result.stdout = stdout_file.getvalue()
+ result.stderr = stderr_file.getvalue()
+
+ if result.exit_status != 0:
+ if timeouterr:
+ raise error.CmdError(command, result, "Command did not "
+ "complete within %d seconds" % timeout)
+ elif not ignore_status:
+ raise error.CmdError(command, result,
+ "Command returned non-zero exit status")
+
+ return result
# this returns a tuple with the return code and a flag to specify if the error
# is due to the process not terminating within timeout
def _wait_for_command(subproc, start_time, timeout, stdout_file, stderr_file,
- stdout_tee, stderr_tee):
- if timeout:
- stop_time = start_time + timeout
- time_left = stop_time - time.time()
- else:
- time_left = None # so that select never times out
- while not timeout or time_left > 0:
- # select will return when stdout is ready (including when it is
- # EOF, that is the process has terminated).
- ready, _, _ = select.select([subproc.stdout, subproc.stderr],
- [], [], time_left)
- # os.read() has to be used instead of
- # subproc.stdout.read() which will otherwise block
- if subproc.stdout in ready:
- _process_output(subproc.stdout, stdout_file,
- stdout_tee)
- if subproc.stderr in ready:
- _process_output(subproc.stderr, stderr_file,
- stderr_tee)
-
- exit_status_indication = subproc.poll()
-
- if exit_status_indication is not None:
- return (exit_status_indication, False)
-
- if timeout:
- time_left = stop_time - time.time()
-
- # the process has not terminated within timeout,
- # kill it via an escalating series of signals.
- if exit_status_indication is None:
- exit_status_indication = nuke_subprocess(subproc)
-
- return (exit_status_indication, True)
+ stdout_tee, stderr_tee):
+ if timeout:
+ stop_time = start_time + timeout
+ time_left = stop_time - time.time()
+ else:
+ time_left = None # so that select never times out
+ while not timeout or time_left > 0:
+ # select will return when stdout is ready (including when it is
+ # EOF, that is the process has terminated).
+ ready, _, _ = select.select([subproc.stdout, subproc.stderr],
+ [], [], time_left)
+ # os.read() has to be used instead of
+ # subproc.stdout.read() which will otherwise block
+ if subproc.stdout in ready:
+ _process_output(subproc.stdout, stdout_file,
+ stdout_tee)
+ if subproc.stderr in ready:
+ _process_output(subproc.stderr, stderr_file,
+ stderr_tee)
+
+ exit_status_indication = subproc.poll()
+
+ if exit_status_indication is not None:
+ return (exit_status_indication, False)
+
+ if timeout:
+ time_left = stop_time - time.time()
+
+ # the process has not terminated within timeout,
+ # kill it via an escalating series of signals.
+ if exit_status_indication is None:
+ exit_status_indication = nuke_subprocess(subproc)
+
+ return (exit_status_indication, True)
def _process_output(pipe, fbuffer, teefile=None, use_os_read=True):
- if use_os_read:
- data = os.read(pipe.fileno(), 1024)
- else:
- data = pipe.read()
- fbuffer.write(data)
- if teefile:
- teefile.write(data)
- teefile.flush()
+ if use_os_read:
+ data = os.read(pipe.fileno(), 1024)
+ else:
+ data = pipe.read()
+ fbuffer.write(data)
+ if teefile:
+ teefile.write(data)
+ teefile.flush()
def nuke_subprocess(subproc):
- # the process has not terminated within timeout,
- # kill it via an escalating series of signals.
- signal_queue = [signal.SIGTERM, signal.SIGKILL]
- for sig in signal_queue:
- try:
- os.kill(subproc.pid, sig)
- # The process may have died before we could kill it.
- except OSError:
- pass
-
- for i in range(5):
- rc = subproc.poll()
- if rc != None:
- return rc
- time.sleep(1)
+ # the process has not terminated within timeout,
+ # kill it via an escalating series of signals.
+ signal_queue = [signal.SIGTERM, signal.SIGKILL]
+ for sig in signal_queue:
+ try:
+ os.kill(subproc.pid, sig)
+ # The process may have died before we could kill it.
+ except OSError:
+ pass
+
+ for i in range(5):
+ rc = subproc.poll()
+ if rc != None:
+ return rc
+ time.sleep(1)
def nuke_pid(pid):
- # the process has not terminated within timeout,
- # kill it via an escalating series of signals.
- signal_queue = [signal.SIGTERM, signal.SIGKILL]
- for sig in signal_queue:
- try:
- os.kill(pid, sig)
+ # the process has not terminated within timeout,
+ # kill it via an escalating series of signals.
+ signal_queue = [signal.SIGTERM, signal.SIGKILL]
+ for sig in signal_queue:
+ try:
+ os.kill(pid, sig)
- # The process may have died before we could kill it.
- except OSError:
- pass
+ # The process may have died before we could kill it.
+ except OSError:
+ pass
- try:
- for i in range(5):
- status = os.waitpid(pid, os.WNOHANG)[0]
- if status == pid:
- return
- time.sleep(1)
+ try:
+ for i in range(5):
+ status = os.waitpid(pid, os.WNOHANG)[0]
+ if status == pid:
+ return
+ time.sleep(1)
- if status != pid:
- raise error.AutoservRunError('Could not kill %d'
- % pid, None)
+ if status != pid:
+ raise error.AutoservRunError('Could not kill %d'
+ % pid, None)
- # the process died before we join it.
- except OSError:
- pass
+ # the process died before we join it.
+ except OSError:
+ pass
def _process_output(pipe, fbuffer, teefile=None, use_os_read=True):
- if use_os_read:
- data = os.read(pipe.fileno(), 1024)
- else:
- data = pipe.read()
- fbuffer.write(data)
- if teefile:
- teefile.write(data)
- teefile.flush()
+ if use_os_read:
+ data = os.read(pipe.fileno(), 1024)
+ else:
+ data = pipe.read()
+ fbuffer.write(data)
+ if teefile:
+ teefile.write(data)
+ teefile.flush()
def system(command, timeout=None, ignore_status=False):
- return run(command, timeout, ignore_status,
- stdout_tee=sys.stdout, stderr_tee=sys.stderr).exit_status
+ return run(command, timeout, ignore_status,
+ stdout_tee=sys.stdout, stderr_tee=sys.stderr).exit_status
def system_output(command, timeout=None, ignore_status=False,
- retain_output=False):
- if retain_output:
- out = run(command, timeout, ignore_status,
- stdout_tee=sys.stdout, stderr_tee=sys.stderr).stdout
- else:
- out = run(command, timeout, ignore_status).stdout
- if out[-1:] == '\n': out = out[:-1]
- return out
+ retain_output=False):
+ if retain_output:
+ out = run(command, timeout, ignore_status,
+ stdout_tee=sys.stdout, stderr_tee=sys.stderr).stdout
+ else:
+ out = run(command, timeout, ignore_status).stdout
+ if out[-1:] == '\n': out = out[:-1]
+ return out
"""
This function is used when there is a need to run more than one
@@ -400,134 +400,134 @@ RETURN VALUE:
"""
def get_sync_control_file(control, host_name, host_num,
- instance, num_jobs, port_base=63100):
- sc_bar_port = port_base
- c_bar_port = port_base
- if host_num < 0:
- print "Please provide a non negative number for the host"
- return None
- s_bar_port = port_base + 1 + host_num # The set of s_bar_ports are
- # the same for a given machine
-
- sc_bar_timeout = 180
- s_bar_timeout = c_bar_timeout = 120
-
- # The barrier code snippet is prepended into the conrol file
- # dynamically before at.run() is called finally.
- control_new = []
-
- # jobid is the unique name used to identify the processes
- # trying to reach the barriers
- jobid = "%s#%d" % (host_name, instance)
-
- rendv = []
- # rendvstr is a temp holder for the rendezvous list of the processes
- for n in range(num_jobs):
- rendv.append("'%s#%d'" % (host_name, n))
- rendvstr = ",".join(rendv)
-
- if instance == 0:
- # Do the setup and wait at the server barrier
- # Clean up the tmp and the control dirs for the first instance
- control_new.append('if os.path.exists(job.tmpdir):')
- control_new.append("\t system('umount -f %s > /dev/null"
- "2> /dev/null' % job.tmpdir,"
- "ignore_status=True)")
- control_new.append("\t system('rm -rf ' + job.tmpdir)")
- control_new.append(
- 'b0 = job.barrier("%s", "sc_bar", %d, port=%d)'
- % (jobid, sc_bar_timeout, sc_bar_port))
- control_new.append(
- 'b0.rendevous_servers("PARALLEL_MASTER", "%s")'
- % jobid)
-
- elif instance == 1:
- # Wait at the server barrier to wait for instance=0
- # process to complete setup
- b0 = barrier.barrier("PARALLEL_MASTER", "sc_bar", sc_bar_timeout,
- port=sc_bar_port)
- b0.rendevous_servers("PARALLEL_MASTER", jobid)
-
- if(num_jobs > 2):
- b1 = barrier.barrier(jobid, "s_bar", s_bar_timeout,
- port=s_bar_port)
- b1.rendevous(rendvstr)
-
- else:
- # For the rest of the clients
- b2 = barrier.barrier(jobid, "s_bar", s_bar_timeout, port=s_bar_port)
- b2.rendevous(rendvstr)
-
- # Client side barrier for all the tests to start at the same time
- control_new.append('b1 = job.barrier("%s", "c_bar", %d, port=%d)'
- % (jobid, c_bar_timeout, c_bar_port))
- control_new.append("b1.rendevous(%s)" % rendvstr)
-
- # Stick in the rest of the control file
- control_new.append(control)
-
- return "\n".join(control_new)
+ instance, num_jobs, port_base=63100):
+ sc_bar_port = port_base
+ c_bar_port = port_base
+ if host_num < 0:
+ print "Please provide a non negative number for the host"
+ return None
+ s_bar_port = port_base + 1 + host_num # The set of s_bar_ports are
+ # the same for a given machine
+
+ sc_bar_timeout = 180
+ s_bar_timeout = c_bar_timeout = 120
+
+ # The barrier code snippet is prepended into the conrol file
+ # dynamically before at.run() is called finally.
+ control_new = []
+
+ # jobid is the unique name used to identify the processes
+ # trying to reach the barriers
+ jobid = "%s#%d" % (host_name, instance)
+
+ rendv = []
+ # rendvstr is a temp holder for the rendezvous list of the processes
+ for n in range(num_jobs):
+ rendv.append("'%s#%d'" % (host_name, n))
+ rendvstr = ",".join(rendv)
+
+ if instance == 0:
+ # Do the setup and wait at the server barrier
+ # Clean up the tmp and the control dirs for the first instance
+ control_new.append('if os.path.exists(job.tmpdir):')
+ control_new.append("\t system('umount -f %s > /dev/null"
+ "2> /dev/null' % job.tmpdir,"
+ "ignore_status=True)")
+ control_new.append("\t system('rm -rf ' + job.tmpdir)")
+ control_new.append(
+ 'b0 = job.barrier("%s", "sc_bar", %d, port=%d)'
+ % (jobid, sc_bar_timeout, sc_bar_port))
+ control_new.append(
+ 'b0.rendevous_servers("PARALLEL_MASTER", "%s")'
+ % jobid)
+
+ elif instance == 1:
+ # Wait at the server barrier to wait for instance=0
+ # process to complete setup
+ b0 = barrier.barrier("PARALLEL_MASTER", "sc_bar", sc_bar_timeout,
+ port=sc_bar_port)
+ b0.rendevous_servers("PARALLEL_MASTER", jobid)
+
+ if(num_jobs > 2):
+ b1 = barrier.barrier(jobid, "s_bar", s_bar_timeout,
+ port=s_bar_port)
+ b1.rendevous(rendvstr)
+
+ else:
+ # For the rest of the clients
+ b2 = barrier.barrier(jobid, "s_bar", s_bar_timeout, port=s_bar_port)
+ b2.rendevous(rendvstr)
+
+ # Client side barrier for all the tests to start at the same time
+ control_new.append('b1 = job.barrier("%s", "c_bar", %d, port=%d)'
+ % (jobid, c_bar_timeout, c_bar_port))
+ control_new.append("b1.rendevous(%s)" % rendvstr)
+
+ # Stick in the rest of the control file
+ control_new.append(control)
+
+ return "\n".join(control_new)
class CmdResult(object):
- """
- Command execution result.
-
- command: String containing the command line itself
- exit_status: Integer exit code of the process
- stdout: String containing stdout of the process
- stderr: String containing stderr of the process
- duration: Elapsed wall clock time running the process
- """
-
-
- def __init__(self, command=None, stdout="", stderr="",
- exit_status=None, duration=0):
- self.command = command
- self.exit_status = exit_status
- self.stdout = stdout
- self.stderr = stderr
- self.duration = duration
-
-
- def __repr__(self):
- wrapper = textwrap.TextWrapper(width = 78,
- initial_indent="\n ",
- subsequent_indent=" ")
-
- stdout = self.stdout.rstrip()
- if stdout:
- stdout = "\nstdout:\n%s" % stdout
-
- stderr = self.stderr.rstrip()
- if stderr:
- stderr = "\nstderr:\n%s" % stderr
-
- return ("* Command: %s\n"
- "Exit status: %s\n"
- "Duration: %s\n"
- "%s"
- "%s"
- % (wrapper.fill(self.command), self.exit_status,
- self.duration, stdout, stderr))
+ """
+ Command execution result.
+
+ command: String containing the command line itself
+ exit_status: Integer exit code of the process
+ stdout: String containing stdout of the process
+ stderr: String containing stderr of the process
+ duration: Elapsed wall clock time running the process
+ """
+
+
+ def __init__(self, command=None, stdout="", stderr="",
+ exit_status=None, duration=0):
+ self.command = command
+ self.exit_status = exit_status
+ self.stdout = stdout
+ self.stderr = stderr
+ self.duration = duration
+
+
+ def __repr__(self):
+ wrapper = textwrap.TextWrapper(width = 78,
+ initial_indent="\n ",
+ subsequent_indent=" ")
+
+ stdout = self.stdout.rstrip()
+ if stdout:
+ stdout = "\nstdout:\n%s" % stdout
+
+ stderr = self.stderr.rstrip()
+ if stderr:
+ stderr = "\nstderr:\n%s" % stderr
+
+ return ("* Command: %s\n"
+ "Exit status: %s\n"
+ "Duration: %s\n"
+ "%s"
+ "%s"
+ % (wrapper.fill(self.command), self.exit_status,
+ self.duration, stdout, stderr))
class run_randomly:
- def __init__(self, run_sequentially=False):
- # Run sequentially is for debugging control files
- self.test_list = []
- self.run_sequentially = run_sequentially
+ def __init__(self, run_sequentially=False):
+ # Run sequentially is for debugging control files
+ self.test_list = []
+ self.run_sequentially = run_sequentially
- def add(self, *args, **dargs):
- test = (args, dargs)
- self.test_list.append(test)
+ def add(self, *args, **dargs):
+ test = (args, dargs)
+ self.test_list.append(test)
- def run(self, fn):
- while self.test_list:
- test_index = random.randint(0, len(self.test_list)-1)
- if self.run_sequentially:
- test_index = 0
- (args, dargs) = self.test_list.pop(test_index)
- fn(*args, **dargs)
+ def run(self, fn):
+ while self.test_list:
+ test_index = random.randint(0, len(self.test_list)-1)
+ if self.run_sequentially:
+ test_index = 0
+ (args, dargs) = self.test_list.pop(test_index)
+ fn(*args, **dargs)
diff --git a/client/deps/boottool/boottool.py b/client/deps/boottool/boottool.py
index e8013571..43e5cb22 100755
--- a/client/deps/boottool/boottool.py
+++ b/client/deps/boottool/boottool.py
@@ -7,21 +7,21 @@ from autotest_lib.client.common_lib import utils
from autotest_lib.client.bin import autotest_utils
# To use this, you have to set PERL5LIB to:
-# autodir+'deps/boottool/lib/perl'
+# autodir+'deps/boottool/lib/perl'
# or on Ubuntu we also need
-# autodir+'deps/boottool/share/perl'
+# autodir+'deps/boottool/share/perl'
# because it uses nonstandard locations
version = 1
-def setup(tarball, topdir):
- srcdir = os.path.join(topdir, 'src')
- autotest_utils.extract_tarball_to_dir(tarball, srcdir)
- os.chdir(srcdir)
- utils.system ('perl Makefile.PL PREFIX=' + topdir)
- utils.system ('make')
- utils.system ('make install')
- os.chdir(topdir)
+def setup(tarball, topdir):
+ srcdir = os.path.join(topdir, 'src')
+ autotest_utils.extract_tarball_to_dir(tarball, srcdir)
+ os.chdir(srcdir)
+ utils.system ('perl Makefile.PL PREFIX=' + topdir)
+ utils.system ('make')
+ utils.system ('make install')
+ os.chdir(topdir)
pwd = os.getcwd()
diff --git a/client/deps/libaio/libaio.py b/client/deps/libaio/libaio.py
index cfb037d3..e6736787 100755
--- a/client/deps/libaio/libaio.py
+++ b/client/deps/libaio/libaio.py
@@ -9,12 +9,12 @@ from autotest_lib.client.bin import autotest_utils
version = 1
def setup(tarball, topdir):
- srcdir = os.path.join(topdir, 'src')
- autotest_utils.extract_tarball_to_dir(tarball, srcdir)
- os.chdir(srcdir)
- utils.system ('make')
- utils.system ('make prefix=%s install' % topdir)
- os.chdir(topdir)
+ srcdir = os.path.join(topdir, 'src')
+ autotest_utils.extract_tarball_to_dir(tarball, srcdir)
+ os.chdir(srcdir)
+ utils.system ('make')
+ utils.system ('make prefix=%s install' % topdir)
+ os.chdir(topdir)
# old source was
diff --git a/client/deps/libnet/libnet.py b/client/deps/libnet/libnet.py
index eac66b25..90ab0238 100755
--- a/client/deps/libnet/libnet.py
+++ b/client/deps/libnet/libnet.py
@@ -8,19 +8,19 @@ from autotest_lib.client.bin import autotest_utils
version = 1
-def setup(tarball, topdir):
- srcdir = os.path.join(topdir, 'src')
- if not os.path.exists(tarball):
- utils.get_file('http://www.packetfactory.net/libnet/dist/libnet.tar.gz',
- tarball)
- autotest_utils.extract_tarball_to_dir(tarball, 'src')
- os.chdir(srcdir)
- utils.system ('./configure --prefix=%s/libnet' % topdir)
- utils.system('make')
- utils.system('make install')
+def setup(tarball, topdir):
+ srcdir = os.path.join(topdir, 'src')
+ if not os.path.exists(tarball):
+ utils.get_file('http://www.packetfactory.net/libnet/dist/libnet.tar.gz',
+ tarball)
+ autotest_utils.extract_tarball_to_dir(tarball, 'src')
+ os.chdir(srcdir)
+ utils.system ('./configure --prefix=%s/libnet' % topdir)
+ utils.system('make')
+ utils.system('make install')
+
+ os.chdir(topdir)
- os.chdir(topdir)
-
pwd = os.getcwd()
tarball = os.path.join(pwd, 'libnet.tar.gz')
utils.update_version(pwd+'/src', False, version, setup, tarball, pwd)
diff --git a/client/deps/mysql/mysql.py b/client/deps/mysql/mysql.py
index 0fafe271..9a9eeb67 100755
--- a/client/deps/mysql/mysql.py
+++ b/client/deps/mysql/mysql.py
@@ -8,32 +8,32 @@ from autotest_lib.client.bin import autotest_utils
version = 3
-def setup(tarball, topdir):
- srcdir = os.path.join(topdir, 'src')
- if not os.path.exists(tarball):
- utils.get_file('http://mirror.x10.com/mirror/mysql/Downloads/MySQL-5.0/mysql-5.0.45.tar.gz', tarball)
- autotest_utils.extract_tarball_to_dir(tarball, 'src')
- os.chdir(srcdir)
- utils.system ('./configure --prefix=%s/mysql --enable-thread-safe-client' \
- % topdir)
- utils.system('make -j %d' % count_cpus())
- utils.system('make install')
+def setup(tarball, topdir):
+ srcdir = os.path.join(topdir, 'src')
+ if not os.path.exists(tarball):
+ utils.get_file('http://mirror.x10.com/mirror/mysql/Downloads/MySQL-5.0/mysql-5.0.45.tar.gz', tarball)
+ autotest_utils.extract_tarball_to_dir(tarball, 'src')
+ os.chdir(srcdir)
+ utils.system ('./configure --prefix=%s/mysql --enable-thread-safe-client' \
+ % topdir)
+ utils.system('make -j %d' % count_cpus())
+ utils.system('make install')
+
+ #
+ # MySQL doesn't create this directory on it's own.
+ # This is where database logs and files are created.
+ #
+ try:
+ os.mkdir(topdir + '/mysql/var')
+ except:
+ pass
+ #
+ # Initialize the database.
+ #
+ utils.system('%s/mysql/bin/mysql_install_db' % topdir)
+
+ os.chdir(topdir)
- #
- # MySQL doesn't create this directory on it's own.
- # This is where database logs and files are created.
- #
- try:
- os.mkdir(topdir + '/mysql/var')
- except:
- pass
- #
- # Initialize the database.
- #
- utils.system('%s/mysql/bin/mysql_install_db' % topdir)
-
- os.chdir(topdir)
-
pwd = os.getcwd()
tarball = os.path.join(pwd, 'mysql-5.0.45.tar.gz')
utils.update_version(pwd+'/src', False, version, setup, tarball, pwd)
diff --git a/client/deps/pgpool/pgpool.py b/client/deps/pgpool/pgpool.py
index 3034b6af..24afc26f 100755
--- a/client/deps/pgpool/pgpool.py
+++ b/client/deps/pgpool/pgpool.py
@@ -8,23 +8,23 @@ from autotest_lib.client.bin import autotest_utils
version = 1
-def setup(tarball, topdir):
- # FIXME - Waiting to be able to specify dependency.
- #self.job.setup_dep(['pgsql'])
- srcdir = os.path.join(topdir, 'src')
- if not os.path.exists(tarball):
- utils.get_file('http://pgfoundry.org/frs/download.php/1083/pgpool-II-1.0.1.tar.gz', tarball)
- autotest_utils.extract_tarball_to_dir(tarball, 'src')
- os.chdir(srcdir)
- # FIXEME - Waiting to be able to use self.autodir instead of
- # os.environ['AUTODIR']
- utils.system('./configure --prefix=%s/pgpool --with-pgsql=%s/deps/pgsql/pgsql' \
- % (topdir, os.environ['AUTODIR']))
- utils.system('make -j %d' % count_cpus())
- utils.system('make install')
+def setup(tarball, topdir):
+ # FIXME - Waiting to be able to specify dependency.
+ #self.job.setup_dep(['pgsql'])
+ srcdir = os.path.join(topdir, 'src')
+ if not os.path.exists(tarball):
+ utils.get_file('http://pgfoundry.org/frs/download.php/1083/pgpool-II-1.0.1.tar.gz', tarball)
+ autotest_utils.extract_tarball_to_dir(tarball, 'src')
+ os.chdir(srcdir)
+ # FIXEME - Waiting to be able to use self.autodir instead of
+ # os.environ['AUTODIR']
+ utils.system('./configure --prefix=%s/pgpool --with-pgsql=%s/deps/pgsql/pgsql' \
+ % (topdir, os.environ['AUTODIR']))
+ utils.system('make -j %d' % count_cpus())
+ utils.system('make install')
+
+ os.chdir(topdir)
- os.chdir(topdir)
-
pwd = os.getcwd()
tarball = os.path.join(pwd, 'pgpool-II-1.0.1.tar.gz')
utils.update_version(pwd+'/src', False, version, setup, tarball, pwd)
diff --git a/client/deps/pgsql/pgsql.py b/client/deps/pgsql/pgsql.py
index 1de96b8b..cd536534 100755
--- a/client/deps/pgsql/pgsql.py
+++ b/client/deps/pgsql/pgsql.py
@@ -8,18 +8,18 @@ from autotest_lib.client.bin import autotest_utils
version = 4
-def setup(tarball, topdir):
- srcdir = os.path.join(topdir, 'src')
- if not os.path.exists(tarball):
- utils.get_file('ftp://ftp.postgresql.org/pub/source/v8.3.1/postgresql-8.3.1.tar.bz2', tarball)
- autotest_utils.extract_tarball_to_dir(tarball, 'src')
- os.chdir(srcdir)
- utils.system ('./configure --without-readline --without-zlib --enable-debug --prefix=%s/pgsql' % topdir)
- utils.system('make -j %d' % count_cpus())
- utils.system('make install')
-
- os.chdir(topdir)
-
+def setup(tarball, topdir):
+ srcdir = os.path.join(topdir, 'src')
+ if not os.path.exists(tarball):
+ utils.get_file('ftp://ftp.postgresql.org/pub/source/v8.3.1/postgresql-8.3.1.tar.bz2', tarball)
+ autotest_utils.extract_tarball_to_dir(tarball, 'src')
+ os.chdir(srcdir)
+ utils.system ('./configure --without-readline --without-zlib --enable-debug --prefix=%s/pgsql' % topdir)
+ utils.system('make -j %d' % count_cpus())
+ utils.system('make install')
+
+ os.chdir(topdir)
+
pwd = os.getcwd()
tarball = os.path.join(pwd, 'postgresql-8.3.1.tar.bz2')
utils.update_version(pwd+'/src', False, version, setup, tarball, pwd)
diff --git a/client/profilers/catprofile/catprofile.py b/client/profilers/catprofile/catprofile.py
index 30167fc3..5ccc8350 100755
--- a/client/profilers/catprofile/catprofile.py
+++ b/client/profilers/catprofile/catprofile.py
@@ -3,41 +3,40 @@
import profiler,time,os
class catprofile(profiler.profiler):
- version = 1
-
- # filenames: list of filenames to cat
- def initialize(self, filenames = ['/proc/meminfo', '/proc/slabinfo'],
- outfile = 'monitor', interval = 1):
- self.filenames = filenames
- self.outfile = outfile
- self.interval = interval
-
-
- def start(self, test):
- self.child_pid = os.fork()
- if self.child_pid: # parent
- return None
- else: # child
- while 1:
- lines = []
- for filename in self.filenames:
- input = open(filename, 'r')
- lines += '\n----- %s -----\n' % filename
- lines += input.readlines()
- input.close
- outfile = test.profdir + '/' + self.outfile
- output = open(outfile, 'a')
- output.write(time.asctime() + '\n')
- output.writelines(lines)
- output.write('\n=========================\n')
- output.close()
- time.sleep(self.interval)
-
-
- def stop(self, test):
- os.kill(self.child_pid, 15)
-
-
- def report(self, test):
- return None
-
+ version = 1
+
+ # filenames: list of filenames to cat
+ def initialize(self, filenames = ['/proc/meminfo', '/proc/slabinfo'],
+ outfile = 'monitor', interval = 1):
+ self.filenames = filenames
+ self.outfile = outfile
+ self.interval = interval
+
+
+ def start(self, test):
+ self.child_pid = os.fork()
+ if self.child_pid: # parent
+ return None
+ else: # child
+ while 1:
+ lines = []
+ for filename in self.filenames:
+ input = open(filename, 'r')
+ lines += '\n----- %s -----\n' % filename
+ lines += input.readlines()
+ input.close
+ outfile = test.profdir + '/' + self.outfile
+ output = open(outfile, 'a')
+ output.write(time.asctime() + '\n')
+ output.writelines(lines)
+ output.write('\n=========================\n')
+ output.close()
+ time.sleep(self.interval)
+
+
+ def stop(self, test):
+ os.kill(self.child_pid, 15)
+
+
+ def report(self, test):
+ return None
diff --git a/client/profilers/iostat/iostat.py b/client/profilers/iostat/iostat.py
index 11fb24e5..a258c1f3 100755
--- a/client/profilers/iostat/iostat.py
+++ b/client/profilers/iostat/iostat.py
@@ -3,24 +3,23 @@
import profiler,time,os,subprocess
class iostat(profiler.profiler):
- version = 1
+ version = 1
- def initialize(self, interval = 1):
- self.interval = interval
+ def initialize(self, interval = 1):
+ self.interval = interval
- def start(self, test):
- cmd = "/usr/bin/iostat %d" % self.interval
- logfile = open(os.path.join(test.profdir, "iostat"), 'w')
- p = subprocess.Popen(cmd, shell=True, stdout=logfile, \
- stderr=subprocess.STDOUT)
- self.pid = p.pid
+ def start(self, test):
+ cmd = "/usr/bin/iostat %d" % self.interval
+ logfile = open(os.path.join(test.profdir, "iostat"), 'w')
+ p = subprocess.Popen(cmd, shell=True, stdout=logfile, \
+ stderr=subprocess.STDOUT)
+ self.pid = p.pid
- def stop(self, test):
- os.kill(self.pid, 15)
+ def stop(self, test):
+ os.kill(self.pid, 15)
- def report(self, test):
- return None
-
+ def report(self, test):
+ return None
diff --git a/client/profilers/lockmeter/lockmeter.py b/client/profilers/lockmeter/lockmeter.py
index bea3cc64..8c3d0144 100755
--- a/client/profilers/lockmeter/lockmeter.py
+++ b/client/profilers/lockmeter/lockmeter.py
@@ -1,47 +1,47 @@
# NOTE: if you get compile errors from config.h, referring you to a FAQ,
-# you might need to do 'cat < /dev/null > /usr/include/linux/config.h'.
+# you might need to do 'cat < /dev/null > /usr/include/linux/config.h'.
# But read the FAQ first.
import profiler
from autotest_lib.client.common_lib import utils
from autotest_lib.client.bin import autotest_utils
class lockmeter(profiler.profiler):
- version = 1
+ version = 1
# ftp://oss.sgi.com/projects/lockmeter/download/lockstat-1.4.11.tar.gz
# patched with lockstat.diff
# ftp://oss.sgi.com/projects/lockmeter/download/v2.6/patch.2.6.14-lockmeter-1.gz
# is the kernel patch
- def setup(self, tarball = 'lockstat-1.4.11.tar.bz2'):
- self.tarball = utils.unmap_url(self.bindir, tarball, self.tmpdir)
- autotest_utils.extract_tarball_to_dir(self.tarball, self.srcdir)
- os.chdir(self.srcdir)
+ def setup(self, tarball = 'lockstat-1.4.11.tar.bz2'):
+ self.tarball = utils.unmap_url(self.bindir, tarball, self.tmpdir)
+ autotest_utils.extract_tarball_to_dir(self.tarball, self.srcdir)
+ os.chdir(self.srcdir)
- utils.system('make')
- self.cmd = self.srcdir + '/lockstat'
+ utils.system('make')
+ self.cmd = self.srcdir + '/lockstat'
- def initialize(self):
- try:
- assert os.path.exists('/proc/lockmeter')
- except:
- print 'Lockmeter is not compiled into your kernel'
- print 'Please fix and try again'
- raise AssertionError
+ def initialize(self):
+ try:
+ assert os.path.exists('/proc/lockmeter')
+ except:
+ print 'Lockmeter is not compiled into your kernel'
+ print 'Please fix and try again'
+ raise AssertionError
- def start(self, test):
- utils.system(self.cmd + ' off')
- utils.system(self.cmd + ' reset')
- utils.system(self.cmd + ' on')
+ def start(self, test):
+ utils.system(self.cmd + ' off')
+ utils.system(self.cmd + ' reset')
+ utils.system(self.cmd + ' on')
- def stop(self, test):
- utils.system(self.cmd + ' off')
+ def stop(self, test):
+ utils.system(self.cmd + ' off')
- def report(self, test):
- args = ' -m ' + autotest_utils.get_systemmap()
- self.output = self.profdir + '/results/lockstat'
- utils.system(self.cmd + args + ' print > ' + self.output)
+ def report(self, test):
+ args = ' -m ' + autotest_utils.get_systemmap()
+ self.output = self.profdir + '/results/lockstat'
+ utils.system(self.cmd + args + ' print > ' + self.output)
diff --git a/client/profilers/mpstat/mpstat.py b/client/profilers/mpstat/mpstat.py
index 261bd25a..5870c9cf 100644
--- a/client/profilers/mpstat/mpstat.py
+++ b/client/profilers/mpstat/mpstat.py
@@ -2,24 +2,24 @@
import profiler,time,os,subprocess
class mpstat(profiler.profiler):
- version = 1
+ version = 1
- def initialize(self, interval = 1):
- self.interval = interval
+ def initialize(self, interval = 1):
+ self.interval = interval
- def start(self, test):
- cmd = "mpstat -P ALL %d" % self.interval
- logfile = open(os.path.join(test.profdir, "mpstat"), 'w')
- p = subprocess.Popen(cmd, shell=True, stdout=logfile,
- stderr=subprocess.STDOUT)
- self.pid = p.pid
+ def start(self, test):
+ cmd = "mpstat -P ALL %d" % self.interval
+ logfile = open(os.path.join(test.profdir, "mpstat"), 'w')
+ p = subprocess.Popen(cmd, shell=True, stdout=logfile,
+ stderr=subprocess.STDOUT)
+ self.pid = p.pid
- def stop(self, test):
- os.kill(self.pid, 15)
+ def stop(self, test):
+ os.kill(self.pid, 15)
- def report(self, test):
- return None
+ def report(self, test):
+ return None
diff --git a/client/profilers/oprofile/oprofile.py b/client/profilers/oprofile/oprofile.py
index a36989a9..8f557368 100755
--- a/client/profilers/oprofile/oprofile.py
+++ b/client/profilers/oprofile/oprofile.py
@@ -1,112 +1,112 @@
-# Will need some libaries to compile. Do 'apt-get build-dep oprofile'
+# Will need some libaries to compile. Do 'apt-get build-dep oprofile'
import profiler, shutil
from autotest_lib.client.common_lib import utils
from autotest_lib.client.bin import autotest_utils
class oprofile(profiler.profiler):
- version = 5
+ version = 5
# Notes on whether to use the local copy or the builtin from source:
# local = None
# Try to use source copy if it works, else use local
# local = False
-# Force use of the source copy
+# Force use of the source copy
# local = True
-# Force use of the local copy
+# Force use of the local copy
# http://prdownloads.sourceforge.net/oprofile/oprofile-0.9.3.tar.gz
- def setup(self, tarball = 'oprofile-0.9.3.tar.bz2', local = None,
- *args, **dargs):
- if local == True:
- return
-
- try:
- self.tarball = utils.unmap_url(self.bindir, tarball,
- self.tmpdir)
- autotest_utils.extract_tarball_to_dir(self.tarball, self.srcdir)
- os.chdir(self.srcdir)
-
- patch = os.path.join(self.bindir,"oprofile-69455.patch")
- utils.system('patch -p1 < %s' % patch)
- utils.system('./configure --with-kernel-support --prefix=' + \
- self.srcdir)
- utils.system('make')
- utils.system('make install')
- except:
- # Build from source failed.
- # But maybe can still use the local copy
- local_opcontrol = os.path.exists('/usr/bin/opcontrol')
- local_opreport = os.path.exists('/usr/bin/opreport')
- if local == False or not local_opcontrol or not local_opreport:
- raise
-
-
- def initialize(self, vmlinux = None, events = [], others = None,
- local = None):
- if not vmlinux:
- self.vmlinux = get_vmlinux()
- else:
- self.vmlinux = vmlinux
- if not len(events):
- self.events = ['default']
- else:
- self.events = events
- self.others = others
-
- # If there is existing setup file, oprofile may fail to start with default parameters.
- if os.path.isfile('/root/.oprofile/daemonrc'):
- os.rename('/root/.oprofile/daemonrc', '/root/.oprofile/daemonrc.org')
-
- setup = ' --setup'
- if not self.vmlinux:
- setup += ' --no-vmlinux'
- else:
- setup += ' --vmlinux=%s' % self.vmlinux
- for e in self.events:
- setup += ' --event=%s' % e
- if self.others:
- setup += ' ' + self.others
-
- src_opreport = os.path.join(self.srcdir, '/bin/opreport')
- src_opcontrol = os.path.join(self.srcdir, '/bin/opcontrol')
- if local == False or (local == None and
- os.path.exists(src_opreport) and
- os.path.exists(src_opcontrol)):
- print "Using source-built copy of oprofile"
- self.opreport = src_opreport
- self.opcontrol = src_opcontrol
- else:
- print "Using machine local copy of oprofile"
- self.opreport = '/usr/bin/opreport'
- self.opcontrol = '/usr/bin/opcontrol'
-
- utils.system(self.opcontrol + setup)
-
-
- def start(self, test):
- utils.system(self.opcontrol + ' --shutdown')
- utils.system(self.opcontrol + ' --reset')
- utils.system(self.opcontrol + ' --start')
-
-
- def stop(self, test):
- utils.system(self.opcontrol + ' --stop')
- utils.system(self.opcontrol + ' --dump')
-
-
- def report(self, test):
- # Output kernel per-symbol profile report
- reportfile = test.profdir + '/oprofile.kernel'
- if self.vmlinux:
- report = self.opreport + ' -l ' + self.vmlinux
- if os.path.exists(get_modules_dir()):
- report += ' -p ' + get_modules_dir()
- utils.system(report + ' > ' + reportfile)
- else:
- utils.system("echo 'no vmlinux found.' > %s" %reportfile)
-
- # output profile summary report
- reportfile = test.profdir + '/oprofile.user'
- utils.system(self.opreport + ' --long-filenames ' + ' > ' + reportfile)
-
- utils.system(self.opcontrol + ' --shutdown')
+ def setup(self, tarball = 'oprofile-0.9.3.tar.bz2', local = None,
+ *args, **dargs):
+ if local == True:
+ return
+
+ try:
+ self.tarball = utils.unmap_url(self.bindir, tarball,
+ self.tmpdir)
+ autotest_utils.extract_tarball_to_dir(self.tarball, self.srcdir)
+ os.chdir(self.srcdir)
+
+ patch = os.path.join(self.bindir,"oprofile-69455.patch")
+ utils.system('patch -p1 < %s' % patch)
+ utils.system('./configure --with-kernel-support --prefix=' + \
+ self.srcdir)
+ utils.system('make')
+ utils.system('make install')
+ except:
+ # Build from source failed.
+ # But maybe can still use the local copy
+ local_opcontrol = os.path.exists('/usr/bin/opcontrol')
+ local_opreport = os.path.exists('/usr/bin/opreport')
+ if local == False or not local_opcontrol or not local_opreport:
+ raise
+
+
+ def initialize(self, vmlinux = None, events = [], others = None,
+ local = None):
+ if not vmlinux:
+ self.vmlinux = get_vmlinux()
+ else:
+ self.vmlinux = vmlinux
+ if not len(events):
+ self.events = ['default']
+ else:
+ self.events = events
+ self.others = others
+
+ # If there is existing setup file, oprofile may fail to start with default parameters.
+ if os.path.isfile('/root/.oprofile/daemonrc'):
+ os.rename('/root/.oprofile/daemonrc', '/root/.oprofile/daemonrc.org')
+
+ setup = ' --setup'
+ if not self.vmlinux:
+ setup += ' --no-vmlinux'
+ else:
+ setup += ' --vmlinux=%s' % self.vmlinux
+ for e in self.events:
+ setup += ' --event=%s' % e
+ if self.others:
+ setup += ' ' + self.others
+
+ src_opreport = os.path.join(self.srcdir, '/bin/opreport')
+ src_opcontrol = os.path.join(self.srcdir, '/bin/opcontrol')
+ if local == False or (local == None and
+ os.path.exists(src_opreport) and
+ os.path.exists(src_opcontrol)):
+ print "Using source-built copy of oprofile"
+ self.opreport = src_opreport
+ self.opcontrol = src_opcontrol
+ else:
+ print "Using machine local copy of oprofile"
+ self.opreport = '/usr/bin/opreport'
+ self.opcontrol = '/usr/bin/opcontrol'
+
+ utils.system(self.opcontrol + setup)
+
+
+ def start(self, test):
+ utils.system(self.opcontrol + ' --shutdown')
+ utils.system(self.opcontrol + ' --reset')
+ utils.system(self.opcontrol + ' --start')
+
+
+ def stop(self, test):
+ utils.system(self.opcontrol + ' --stop')
+ utils.system(self.opcontrol + ' --dump')
+
+
+ def report(self, test):
+ # Output kernel per-symbol profile report
+ reportfile = test.profdir + '/oprofile.kernel'
+ if self.vmlinux:
+ report = self.opreport + ' -l ' + self.vmlinux
+ if os.path.exists(get_modules_dir()):
+ report += ' -p ' + get_modules_dir()
+ utils.system(report + ' > ' + reportfile)
+ else:
+ utils.system("echo 'no vmlinux found.' > %s" %reportfile)
+
+ # output profile summary report
+ reportfile = test.profdir + '/oprofile.user'
+ utils.system(self.opreport + ' --long-filenames ' + ' > ' + reportfile)
+
+ utils.system(self.opcontrol + ' --shutdown')
diff --git a/client/profilers/readprofile/readprofile.py b/client/profilers/readprofile/readprofile.py
index 95b1c60d..8ac1c376 100755
--- a/client/profilers/readprofile/readprofile.py
+++ b/client/profilers/readprofile/readprofile.py
@@ -3,45 +3,45 @@ from autotest_lib.client.common_lib import utils
from autotest_lib.client.bin import autotest_utils
class readprofile(profiler.profiler):
- version = 1
+ version = 1
# http://www.kernel.org/pub/linux/utils/util-linux/util-linux-2.12r.tar.bz2
- def setup(self, tarball = 'util-linux-2.12r.tar.bz2'):
- self.tarball = utils.unmap_url(self.bindir, tarball, self.tmpdir)
- autotest_utils.extract_tarball_to_dir(self.tarball, self.srcdir)
- os.chdir(self.srcdir)
+ def setup(self, tarball = 'util-linux-2.12r.tar.bz2'):
+ self.tarball = utils.unmap_url(self.bindir, tarball, self.tmpdir)
+ autotest_utils.extract_tarball_to_dir(self.tarball, self.srcdir)
+ os.chdir(self.srcdir)
- utils.system('./configure')
- os.chdir('sys-utils')
- utils.system('make readprofile')
+ utils.system('./configure')
+ os.chdir('sys-utils')
+ utils.system('make readprofile')
- def initialize(self):
- try:
- utils.system('grep -iq " profile=" /proc/cmdline')
- except CmdError:
- raise AutotestError('readprofile not enabled')
+ def initialize(self):
+ try:
+ utils.system('grep -iq " profile=" /proc/cmdline')
+ except CmdError:
+ raise AutotestError('readprofile not enabled')
- self.cmd = self.srcdir + '/sys-utils/readprofile'
+ self.cmd = self.srcdir + '/sys-utils/readprofile'
- def start(self, test):
- utils.system(self.cmd + ' -r')
+ def start(self, test):
+ utils.system(self.cmd + ' -r')
- def stop(self, test):
- # There's no real way to stop readprofile, so we stash the
- # raw data at this point instead. BAD EXAMPLE TO COPY! ;-)
- self.rawprofile = test.profdir + '/profile.raw'
- print "STOP"
- shutil.copyfile('/proc/profile', self.rawprofile)
+ def stop(self, test):
+ # There's no real way to stop readprofile, so we stash the
+ # raw data at this point instead. BAD EXAMPLE TO COPY! ;-)
+ self.rawprofile = test.profdir + '/profile.raw'
+ print "STOP"
+ shutil.copyfile('/proc/profile', self.rawprofile)
- def report(self, test):
- args = ' -n'
- args += ' -m ' + get_systemmap()
- args += ' -p ' + self.rawprofile
- cmd = self.cmd + ' ' + args
- txtprofile = test.profdir + '/profile.text'
- utils.system(cmd + ' | sort -nr > ' + txtprofile)
- utils.system('bzip2 ' + self.rawprofile)
+ def report(self, test):
+ args = ' -n'
+ args += ' -m ' + get_systemmap()
+ args += ' -p ' + self.rawprofile
+ cmd = self.cmd + ' ' + args
+ txtprofile = test.profdir + '/profile.text'
+ utils.system(cmd + ' | sort -nr > ' + txtprofile)
+ utils.system('bzip2 ' + self.rawprofile)
diff --git a/client/profilers/vmstat/vmstat.py b/client/profilers/vmstat/vmstat.py
index 632cc34b..b381649a 100755
--- a/client/profilers/vmstat/vmstat.py
+++ b/client/profilers/vmstat/vmstat.py
@@ -3,24 +3,23 @@
import profiler,time,os,subprocess
class vmstat(profiler.profiler):
- version = 1
+ version = 1
- def initialize(self, interval = 1):
- self.interval = interval
+ def initialize(self, interval = 1):
+ self.interval = interval
- def start(self, test):
- cmd = "/usr/bin/vmstat %d" % self.interval
- logfile = open(os.path.join(test.profdir, "vmstat"), 'w')
- p = subprocess.Popen(cmd, shell=True, stdout=logfile, \
- stderr=subprocess.STDOUT)
- self.pid = p.pid
+ def start(self, test):
+ cmd = "/usr/bin/vmstat %d" % self.interval
+ logfile = open(os.path.join(test.profdir, "vmstat"), 'w')
+ p = subprocess.Popen(cmd, shell=True, stdout=logfile, \
+ stderr=subprocess.STDOUT)
+ self.pid = p.pid
- def stop(self, test):
- os.kill(self.pid, 15)
+ def stop(self, test):
+ os.kill(self.pid, 15)
- def report(self, test):
- return None
-
+ def report(self, test):
+ return None
diff --git a/client/setup_modules.py b/client/setup_modules.py
index 4e2185ee..f2de48c6 100644
--- a/client/setup_modules.py
+++ b/client/setup_modules.py
@@ -4,95 +4,95 @@ import os, sys, new
def _create_module(name):
- """Create a single top-level module"""
- module = new.module(name)
- sys.modules[name] = module
- return module
+ """Create a single top-level module"""
+ module = new.module(name)
+ sys.modules[name] = module
+ return module
def _create_module_and_parents(name):
- """Create a module, and all the necessary parents"""
- parts = name.split(".")
- # first create the top-level module
- parent = _create_module(parts[0])
- created_parts = [parts[0]]
- parts.pop(0)
- # now, create any remaining child modules
- while parts:
- child_name = parts.pop(0)
- module = new.module(child_name)
- setattr(parent, child_name, module)
- created_parts.append(child_name)
- sys.modules[".".join(created_parts)] = module
- parent = module
+ """Create a module, and all the necessary parents"""
+ parts = name.split(".")
+ # first create the top-level module
+ parent = _create_module(parts[0])
+ created_parts = [parts[0]]
+ parts.pop(0)
+ # now, create any remaining child modules
+ while parts:
+ child_name = parts.pop(0)
+ module = new.module(child_name)
+ setattr(parent, child_name, module)
+ created_parts.append(child_name)
+ sys.modules[".".join(created_parts)] = module
+ parent = module
def _import_children_into_module(parent_module_name, path):
- """Import all the packages on a path into a parent module"""
- # find all the packages at 'path'
- names = []
- for filename in os.listdir(path):
- full_name = os.path.join(path, filename)
- if not os.path.isdir(full_name):
- continue # skip files
- if "__init__.py" in os.listdir(full_name):
- names.append(filename)
- # import all the packages and insert them into 'parent_module'
- sys.path.insert(0, path)
- for name in names:
- module = __import__(name)
- # add the package to the parent
- parent_module = sys.modules[parent_module_name]
- setattr(parent_module, name, module)
- full_name = parent_module_name + "." + name
- sys.modules[full_name] = module
- # restore the system path
- sys.path.pop(0)
+ """Import all the packages on a path into a parent module"""
+ # find all the packages at 'path'
+ names = []
+ for filename in os.listdir(path):
+ full_name = os.path.join(path, filename)
+ if not os.path.isdir(full_name):
+ continue # skip files
+ if "__init__.py" in os.listdir(full_name):
+ names.append(filename)
+ # import all the packages and insert them into 'parent_module'
+ sys.path.insert(0, path)
+ for name in names:
+ module = __import__(name)
+ # add the package to the parent
+ parent_module = sys.modules[parent_module_name]
+ setattr(parent_module, name, module)
+ full_name = parent_module_name + "." + name
+ sys.modules[full_name] = module
+ # restore the system path
+ sys.path.pop(0)
def _setup_common_library(root_module_name):
- """
- Setup aliases for all the common libraries, e.g.
- common -> autotest_lib.client.common_lib
- common.error -> autotest_lib.client.common_lib.error
- """
- # convert the root_module_name into a client module name
- parts = root_module_name.split(".")
- if parts[-1] == "client":
- client_name = root_module_name
- else:
- client_name = root_module_name + ".client"
- # import the top-level common library
- common_lib = __import__(client_name, globals(), locals(),
- ["common_lib"]).common_lib
- sys.modules["common"] = common_lib
- # patch up all the root_module_name.*.common libs
- for module_name in sys.modules.iterkeys():
- if (module_name.startswith(root_module_name + ".") and
- module_name.endswith(".common")):
- sys.modules[module_name] = common_lib
- # import the specific common libraries
- for library in common_lib.__all__:
- module = __import__(client_name + ".common_lib", globals(),
- locals(), [library])
- module = getattr(module, library)
- setattr(common_lib, library, module)
- sys.modules["common.%s" % library] = module
+ """
+ Setup aliases for all the common libraries, e.g.
+ common -> autotest_lib.client.common_lib
+ common.error -> autotest_lib.client.common_lib.error
+ """
+ # convert the root_module_name into a client module name
+ parts = root_module_name.split(".")
+ if parts[-1] == "client":
+ client_name = root_module_name
+ else:
+ client_name = root_module_name + ".client"
+ # import the top-level common library
+ common_lib = __import__(client_name, globals(), locals(),
+ ["common_lib"]).common_lib
+ sys.modules["common"] = common_lib
+ # patch up all the root_module_name.*.common libs
+ for module_name in sys.modules.iterkeys():
+ if (module_name.startswith(root_module_name + ".") and
+ module_name.endswith(".common")):
+ sys.modules[module_name] = common_lib
+ # import the specific common libraries
+ for library in common_lib.__all__:
+ module = __import__(client_name + ".common_lib", globals(),
+ locals(), [library])
+ module = getattr(module, library)
+ setattr(common_lib, library, module)
+ sys.modules["common.%s" % library] = module
def setup(base_path, root_module_name=""):
- """
- Perform all the necessary setup so that all the packages at
- 'base_path' can be imported via "import root_module_name.package".
- If root_module_name is empty, then all the packages at base_path
- are inserted as top-level packages.
+ """
+ Perform all the necessary setup so that all the packages at
+ 'base_path' can be imported via "import root_module_name.package".
+ If root_module_name is empty, then all the packages at base_path
+ are inserted as top-level packages.
- Also, setup all the common.* aliases for modules in the common
- library.
- """
- _create_module_and_parents(root_module_name)
- _import_children_into_module(root_module_name, base_path)
- _setup_common_library(root_module_name)
+ Also, setup all the common.* aliases for modules in the common
+ library.
+ """
+ _create_module_and_parents(root_module_name)
+ _import_children_into_module(root_module_name, base_path)
+ _setup_common_library(root_module_name)
# This must run on Python versions less than 2.4.
diff --git a/client/tests/aborttest/aborttest.py b/client/tests/aborttest/aborttest.py
index 44ceb178..6e8dadd2 100755
--- a/client/tests/aborttest/aborttest.py
+++ b/client/tests/aborttest/aborttest.py
@@ -2,7 +2,7 @@ from autotest_lib.client.common_lib import error
from autotest_lib.client.bin import test
class aborttest(test.test):
- version = 1
+ version = 1
- def execute(self):
- raise error.JobError('Arrrrrrrrggggh. You are DOOOMED')
+ def execute(self):
+ raise error.JobError('Arrrrrrrrggggh. You are DOOOMED')
diff --git a/client/tests/aio_dio_bugs/aio_dio_bugs.py b/client/tests/aio_dio_bugs/aio_dio_bugs.py
index a4563d85..05132db2 100644
--- a/client/tests/aio_dio_bugs/aio_dio_bugs.py
+++ b/client/tests/aio_dio_bugs/aio_dio_bugs.py
@@ -5,36 +5,36 @@ from autotest_lib.client.common_lib import utils
# tests is a simple array of "cmd" "arguments"
tests = [["aio-dio-invalidate-failure", "poo"],
- ["aio-dio-subblock-eof-read", "eoftest"],
- ["aio-free-ring-with-bogus-nr-pages", ""],
- ["aio-io-setup-with-nonwritable-context-pointer", ""],
- ["aio-dio-extend-stat", "file"],
- ]
+ ["aio-dio-subblock-eof-read", "eoftest"],
+ ["aio-free-ring-with-bogus-nr-pages", ""],
+ ["aio-io-setup-with-nonwritable-context-pointer", ""],
+ ["aio-dio-extend-stat", "file"],
+ ]
name = 0
arglist = 1
class aio_dio_bugs(test.test):
- version = 5
- preserve_srcdir = True
+ version = 5
+ preserve_srcdir = True
- def initialize(self):
- self.job.setup_dep(['libaio'])
- ldflags = '-L ' + self.autodir + '/deps/libaio/lib'
- cflags = '-I ' + self.autodir + '/deps/libaio/include'
- self.gcc_flags = ldflags + ' ' + cflags
+ def initialize(self):
+ self.job.setup_dep(['libaio'])
+ ldflags = '-L ' + self.autodir + '/deps/libaio/lib'
+ cflags = '-I ' + self.autodir + '/deps/libaio/include'
+ self.gcc_flags = ldflags + ' ' + cflags
- def setup(self):
- os.chdir(self.srcdir)
- utils.system('make ' + '"CFLAGS=' + self.gcc_flags + '"')
+ def setup(self):
+ os.chdir(self.srcdir)
+ utils.system('make ' + '"CFLAGS=' + self.gcc_flags + '"')
- def execute(self, args = ''):
- os.chdir(self.tmpdir)
- libs = self.autodir + '/deps/libaio/lib/'
- ld_path = autotest_utils.prepend_path(libs,
- autotest_utils.environ('LD_LIBRARY_PATH'))
- var_ld_path = 'LD_LIBRARY_PATH=' + ld_path
- for test in tests:
- cmd = self.srcdir + '/' + test[name] + ' ' \
- + args + ' ' + test[arglist]
- utils.system(var_ld_path + ' ' + cmd)
+ def execute(self, args = ''):
+ os.chdir(self.tmpdir)
+ libs = self.autodir + '/deps/libaio/lib/'
+ ld_path = autotest_utils.prepend_path(libs,
+ autotest_utils.environ('LD_LIBRARY_PATH'))
+ var_ld_path = 'LD_LIBRARY_PATH=' + ld_path
+ for test in tests:
+ cmd = self.srcdir + '/' + test[name] + ' ' \
+ + args + ' ' + test[arglist]
+ utils.system(var_ld_path + ' ' + cmd)
diff --git a/client/tests/aiostress/aiostress.py b/client/tests/aiostress/aiostress.py
index 14878f8a..6d39edc8 100755
--- a/client/tests/aiostress/aiostress.py
+++ b/client/tests/aiostress/aiostress.py
@@ -1,7 +1,7 @@
# This requires aio headers to build.
# Should work automagically out of deps now.
-# NOTE - this should also have the ability to mount a filesystem,
+# NOTE - this should also have the ability to mount a filesystem,
# run the tests, unmount it, then fsck the filesystem
import os
from autotest_lib.client.bin import test, autotest_utils
@@ -9,68 +9,68 @@ from autotest_lib.client.common_lib import utils
class aiostress(test.test):
- version = 2
-
- def initialize(self):
- self.job.setup_dep(['libaio'])
- ldflags = '-L ' + self.autodir + '/deps/libaio/lib'
- cflags = '-I ' + self.autodir + '/deps/libaio/include'
- self.gcc_flags = ldflags + ' ' + cflags
-
-
- # ftp://ftp.suse.com/pub/people/mason/utils/aio-stress.c
- def setup(self, tarball = None):
- print self.srcdir, self.bindir, self.tmpdir
- os.mkdir(self.srcdir)
- os.chdir(self.srcdir)
- utils.system('cp ' + self.bindir+'/aio-stress.c .')
- os.chdir(self.srcdir)
- self.gcc_flags += ' -Wall -lpthread -laio'
- cmd = 'gcc ' + self.gcc_flags + ' aio-stress.c -o aio-stress'
- utils.system(cmd)
-
-
- def execute(self, args = ''):
- os.chdir(self.tmpdir)
- libs = self.autodir+'/deps/libaio/lib/'
- ld_path = autotest_utils.prepend_path(libs,
- autotest_utils.environ('LD_LIBRARY_PATH'))
- var_ld_path = 'LD_LIBRARY_PATH=' + ld_path
- cmd = self.srcdir + '/aio-stress ' + args + ' poo'
- profilers = self.job.profilers
-
- if not profilers.only():
- utils.system(var_ld_path + ' ' + cmd)
- report = open(self.debugdir + '/stderr')
- keyval = open(self.resultsdir + '/keyval', 'w')
- _format_results(report, keyval)
-
- # Do a profiling run if necessary
- if profilers.present():
- profilers.start(self)
- utils.system(var_ld_path + ' ' + cmd)
- profilers.stop(self)
- profilers.report(self)
- if profilers.only():
- report = open(self.debugdir + '/stderr')
- keyval = open(self.resultsdir + '/keyval', 'w')
- _format_results(report, keyval)
+ version = 2
+
+ def initialize(self):
+ self.job.setup_dep(['libaio'])
+ ldflags = '-L ' + self.autodir + '/deps/libaio/lib'
+ cflags = '-I ' + self.autodir + '/deps/libaio/include'
+ self.gcc_flags = ldflags + ' ' + cflags
+
+
+ # ftp://ftp.suse.com/pub/people/mason/utils/aio-stress.c
+ def setup(self, tarball = None):
+ print self.srcdir, self.bindir, self.tmpdir
+ os.mkdir(self.srcdir)
+ os.chdir(self.srcdir)
+ utils.system('cp ' + self.bindir+'/aio-stress.c .')
+ os.chdir(self.srcdir)
+ self.gcc_flags += ' -Wall -lpthread -laio'
+ cmd = 'gcc ' + self.gcc_flags + ' aio-stress.c -o aio-stress'
+ utils.system(cmd)
+
+
+ def execute(self, args = ''):
+ os.chdir(self.tmpdir)
+ libs = self.autodir+'/deps/libaio/lib/'
+ ld_path = autotest_utils.prepend_path(libs,
+ autotest_utils.environ('LD_LIBRARY_PATH'))
+ var_ld_path = 'LD_LIBRARY_PATH=' + ld_path
+ cmd = self.srcdir + '/aio-stress ' + args + ' poo'
+ profilers = self.job.profilers
+
+ if not profilers.only():
+ utils.system(var_ld_path + ' ' + cmd)
+ report = open(self.debugdir + '/stderr')
+ keyval = open(self.resultsdir + '/keyval', 'w')
+ _format_results(report, keyval)
+
+ # Do a profiling run if necessary
+ if profilers.present():
+ profilers.start(self)
+ utils.system(var_ld_path + ' ' + cmd)
+ profilers.stop(self)
+ profilers.report(self)
+ if profilers.only():
+ report = open(self.debugdir + '/stderr')
+ keyval = open(self.resultsdir + '/keyval', 'w')
+ _format_results(report, keyval)
def _format_results(report, keyval):
- for line in report:
- if 'threads' in line:
- if 'files' in line:
- if 'contexts' in line:
- break
-
- for line in report:
- line = line.split(')')[0]
- key, value = line.split('(')
- key = key.strip().replace(' ', '_')
- value = value.split()[0]
- print >> keyval, '%s=%s' % (key, value)
+ for line in report:
+ if 'threads' in line:
+ if 'files' in line:
+ if 'contexts' in line:
+ break
+
+ for line in report:
+ line = line.split(')')[0]
+ key, value = line.split('(')
+ key = key.strip().replace(' ', '_')
+ value = value.split()[0]
+ print >> keyval, '%s=%s' % (key, value)
"""
diff --git a/client/tests/barriertest/barriertest.py b/client/tests/barriertest/barriertest.py
index cf890ad4..26bcab70 100644
--- a/client/tests/barriertest/barriertest.py
+++ b/client/tests/barriertest/barriertest.py
@@ -2,30 +2,30 @@ import time
from autotest_utils.client.bin import test
class barriertest(test.test):
- version = 1
+ version = 1
- def execute(self, timeout_sync, timeout_start, timeout_stop,
- hostid, masterid, all_ids):
- profilers = self.job.profilers
+ def execute(self, timeout_sync, timeout_start, timeout_stop,
+ hostid, masterid, all_ids):
+ profilers = self.job.profilers
- b0 = self.job.barrier(hostid, "sync_profilers",
- timeout_start, port=63100)
- b0.rendevous_servers(masterid, hostid)
+ b0 = self.job.barrier(hostid, "sync_profilers",
+ timeout_start, port=63100)
+ b0.rendevous_servers(masterid, hostid)
- b1 = self.job.barrier(hostid, "start_profilers",
- timeout_start, port=63100)
- b1.rendevous_servers(masterid, hostid)
+ b1 = self.job.barrier(hostid, "start_profilers",
+ timeout_start, port=63100)
+ b1.rendevous_servers(masterid, hostid)
- b2 = self.job.barrier(hostid, "local_sync_profilers",
- timeout_sync)
- b2.rendevous(*all_ids)
+ b2 = self.job.barrier(hostid, "local_sync_profilers",
+ timeout_sync)
+ b2.rendevous(*all_ids)
- profilers.start(self)
+ profilers.start(self)
- b3 = self.job.barrier(hostid, "stop_profilers",
- timeout_stop, port=63100)
- b3.rendevous_servers(masterid, hostid)
+ b3 = self.job.barrier(hostid, "stop_profilers",
+ timeout_stop, port=63100)
+ b3.rendevous_servers(masterid, hostid)
- profilers.stop(self)
- profilers.report(self)
+ profilers.stop(self)
+ profilers.report(self)
diff --git a/client/tests/bash_shared_mapping/bash_shared_mapping.py b/client/tests/bash_shared_mapping/bash_shared_mapping.py
index e3fb69df..df993c1a 100755
--- a/client/tests/bash_shared_mapping/bash_shared_mapping.py
+++ b/client/tests/bash_shared_mapping/bash_shared_mapping.py
@@ -3,39 +3,39 @@ from autotest_lib.client.bin import autotest_utils, test
from autotest_lib.client.common_lib import utils
class bash_shared_mapping(test.test):
- version = 3
-
- # http://www.zip.com.au/~akpm/linux/patches/stuff/ext3-tools.tar.gz
- def setup(self, tarball = 'ext3-tools.tar.gz'):
- self.tarball = utils.unmap_url(self.bindir, tarball,
- self.tmpdir)
- autotest_utils.extract_tarball_to_dir(self.tarball, self.srcdir)
-
- os.chdir(self.srcdir)
- utils.system('make bash-shared-mapping usemem')
-
-
- def execute(self, testdir = None, iterations = 10000):
- if not testdir:
- testdir = self.tmpdir
- os.chdir(testdir)
- file = os.path.join(testdir, 'foo')
- # Want to use 3/4 of all memory for each of
- # bash-shared-mapping and usemem
- kilobytes = (3 * autotest_utils.memtotal()) / 4
-
- # Want two usemem -m megabytes in parallel in background.
- pid = [None, None]
- usemem = os.path.join(self.srcdir, 'usemem')
- args = ('usemem', '-N', '-m', '%d' % (kilobytes / 1024))
- # print_to_tty ('2 x ' + ' '.join(args))
- for i in (0,1):
- pid[i] = os.spawnv(os.P_NOWAIT, usemem, args)
-
- cmd = "%s/bash-shared-mapping %s %d -t %d -n %d" % \
- (self.srcdir, file, kilobytes,
- count_cpus(), iterations)
- os.system(cmd)
-
- for i in (0,1):
- os.kill(pid[i], signal.SIGKILL)
+ version = 3
+
+ # http://www.zip.com.au/~akpm/linux/patches/stuff/ext3-tools.tar.gz
+ def setup(self, tarball = 'ext3-tools.tar.gz'):
+ self.tarball = utils.unmap_url(self.bindir, tarball,
+ self.tmpdir)
+ autotest_utils.extract_tarball_to_dir(self.tarball, self.srcdir)
+
+ os.chdir(self.srcdir)
+ utils.system('make bash-shared-mapping usemem')
+
+
+ def execute(self, testdir = None, iterations = 10000):
+ if not testdir:
+ testdir = self.tmpdir
+ os.chdir(testdir)
+ file = os.path.join(testdir, 'foo')
+ # Want to use 3/4 of all memory for each of
+ # bash-shared-mapping and usemem
+ kilobytes = (3 * autotest_utils.memtotal()) / 4
+
+ # Want two usemem -m megabytes in parallel in background.
+ pid = [None, None]
+ usemem = os.path.join(self.srcdir, 'usemem')
+ args = ('usemem', '-N', '-m', '%d' % (kilobytes / 1024))
+ # print_to_tty ('2 x ' + ' '.join(args))
+ for i in (0,1):
+ pid[i] = os.spawnv(os.P_NOWAIT, usemem, args)
+
+ cmd = "%s/bash-shared-mapping %s %d -t %d -n %d" % \
+ (self.srcdir, file, kilobytes,
+ count_cpus(), iterations)
+ os.system(cmd)
+
+ for i in (0,1):
+ os.kill(pid[i], signal.SIGKILL)
diff --git a/client/tests/bonnie/bonnie.py b/client/tests/bonnie/bonnie.py
index e0252215..e6def250 100755
--- a/client/tests/bonnie/bonnie.py
+++ b/client/tests/bonnie/bonnie.py
@@ -4,73 +4,73 @@ from autotest_lib.client.common_lib import utils
def convert_size(values):
- values = values.split(':')
- size = values[0]
- if len(values) > 1:
- chunk = values[1]
+ values = values.split(':')
+ size = values[0]
+ if len(values) > 1:
+ chunk = values[1]
+ else:
+ chunk = 0
+ if size.endswith('G') or size.endswith('g'):
+ size = int(size[:-1]) * 2**30
+ else:
+ if size.endswith('M') or size.endswith('m'):
+ size = int(size[:-1])
+ size = int(size) * 2**20
+ if chunk:
+ if chunk.endswith('K') or chunk.endswith('k'):
+ chunk = int(chunk[:-1]) * 2**10
else:
- chunk = 0
- if size.endswith('G') or size.endswith('g'):
- size = int(size[:-1]) * 2**30
- else:
- if size.endswith('M') or size.endswith('m'):
- size = int(size[:-1])
- size = int(size) * 2**20
- if chunk:
- if chunk.endswith('K') or chunk.endswith('k'):
- chunk = int(chunk[:-1]) * 2**10
- else:
- chunk = int(chunk)
- return [size, chunk]
+ chunk = int(chunk)
+ return [size, chunk]
class bonnie(test.test):
- version = 1
+ version = 1
- # http://www.coker.com.au/bonnie++/bonnie++-1.03a.tgz
- def setup(self, tarball = 'bonnie++-1.03a.tgz'):
- tarball = utils.unmap_url(self.bindir, tarball,
- self.tmpdir)
- autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
- os.chdir(self.srcdir)
+ # http://www.coker.com.au/bonnie++/bonnie++-1.03a.tgz
+ def setup(self, tarball = 'bonnie++-1.03a.tgz'):
+ tarball = utils.unmap_url(self.bindir, tarball,
+ self.tmpdir)
+ autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
+ os.chdir(self.srcdir)
- os_dep.command('g++')
- utils.system('./configure')
- utils.system('make')
+ os_dep.command('g++')
+ utils.system('./configure')
+ utils.system('make')
- def execute(self, testdir = None, iterations = 1, extra_args = '', user = 'root'):
- if not testdir:
- testdir = self.tmpdir
+ def execute(self, testdir = None, iterations = 1, extra_args = '', user = 'root'):
+ if not testdir:
+ testdir = self.tmpdir
- args = '-d ' + testdir + ' -u ' + user + ' ' + extra_args
- cmd = self.srcdir + '/bonnie++ ' + args
- results = []
- profilers = self.job.profilers
- if not profilers.only():
- for i in range(iterations):
- results.append(utils.system_output(cmd,
- retain_output=True))
+ args = '-d ' + testdir + ' -u ' + user + ' ' + extra_args
+ cmd = self.srcdir + '/bonnie++ ' + args
+ results = []
+ profilers = self.job.profilers
+ if not profilers.only():
+ for i in range(iterations):
+ results.append(utils.system_output(cmd,
+ retain_output=True))
- # Do a profiling run if necessary
- if profilers.present():
- profilers.start(self)
- results.append(utils.system_output(cmd,
- retain_output=True))
- profilers.stop(self)
- profilers.report(self)
+ # Do a profiling run if necessary
+ if profilers.present():
+ profilers.start(self)
+ results.append(utils.system_output(cmd,
+ retain_output=True))
+ profilers.stop(self)
+ profilers.report(self)
- self.__format_results("\n".join(results))
+ self.__format_results("\n".join(results))
- def __format_results(self, results):
- strip_plus = lambda s: re.sub(r"^\++$", "0", s)
- out = open(self.resultsdir + '/keyval', 'w')
- for line in results.split('\n'):
- if len([c for c in line if c == ',']) != 26:
- continue
- fields = tuple(line.split(','))
- fields = [strip_plus(f) for f in fields]
- fields = tuple(convert_size(fields[1]) + fields[2:])
- print >> out, """size=%s
+ def __format_results(self, results):
+ strip_plus = lambda s: re.sub(r"^\++$", "0", s)
+ out = open(self.resultsdir + '/keyval', 'w')
+ for line in results.split('\n'):
+ if len([c for c in line if c == ',']) != 26:
+ continue
+ fields = tuple(line.split(','))
+ fields = [strip_plus(f) for f in fields]
+ fields = tuple(convert_size(fields[1]) + fields[2:])
+ print >> out, """size=%s
chnk=%s
seqout_perchr_ksec=%s
seqout_perchr_pctcp=%s
@@ -98,4 +98,4 @@ randcreate_read_pctcp=%s
randcreate_delete_ksec=%s
randcreate_delete_pctcp=%s
""" % fields
- out.close()
+ out.close()
diff --git a/client/tests/btreplay/btreplay.py b/client/tests/btreplay/btreplay.py
index ba6e5cbc..5bfc2890 100644
--- a/client/tests/btreplay/btreplay.py
+++ b/client/tests/btreplay/btreplay.py
@@ -4,131 +4,130 @@ from autotest_lib.client.common_lib import error, utils
class btreplay(test.test):
- version = 1
-
- # http://brick.kernel.dk/snaps/blktrace-git-latest.tar.gz
- def setup(self, tarball = 'blktrace-git-latest.tar.gz'):
- tarball = utils.unmap_url(self.bindir, tarball,
- self.tmpdir)
- autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
-
- self.job.setup_dep(['libaio'])
- libs = '-L' + self.autodir + '/deps/libaio/lib -laio'
- cflags = '-I ' + self.autodir + '/deps/libaio/include'
- var_libs = 'LIBS="' + libs + '"'
- var_cflags = 'CFLAGS="' + cflags + '"'
- self.make_flags = var_libs + ' ' + var_cflags
-
- os.chdir(self.srcdir)
- utils.system('patch -p1 < ../Makefile.patch')
- utils.system(self.make_flags + ' make')
-
-
- def initialize(self):
- self.ldlib = 'LD_LIBRARY_PATH=%s/deps/libaio/lib'%(self.autodir)
-
-
- def _run_btreplay(self, dev, devices, tmpdir, extra_args):
- alldevs="-d /dev/"+dev
- alldnames = dev
- for d in devices.split():
- alldevs += " -d /dev/"+d
- alldnames += " "+d
-
- # convert the trace (assumed to be in this test's base
- # directory) into btreplay's required format
- utils.system("./btreplay/btrecord -d .. -D "+tmpdir+" "+dev)
-
- # time a replay that omits "thinktime" between requests
- # (by use of the -N flag)
- utils.system(self.ldlib+" /usr/bin/time ./btreplay/btreplay -d "+\
- tmpdir+" -N -W "+dev+" "+extra_args+" 2>&1")
-
- # trace a replay that reproduces inter-request delays, and
- # analyse the trace with btt to determine the average request
- # completion latency
- utils.system("./blktrace -D "+tmpdir+" "+alldevs+" >/dev/null &")
- utils.system(self.ldlib+" ./btreplay/btreplay -d "+tmpdir+" -W "+\
- dev+" "+extra_args)
- utils.system("killall -INT blktrace")
-
- # wait until blktrace is really done
- slept = 0.0
- while utils.system("ps -C blktrace > /dev/null",
- ignore_status=True) == 0:
- time.sleep(0.1)
- slept += 0.1
- if slept > 30.0:
- utils.system("killall -9 blktrace")
- raise error.TestError("blktrace failed to exit after 30 seconds")
- utils.system("./blkparse -q -D "+tmpdir+" -d "+tmpdir+\
- "/trace.bin -O "+alldnames+" >/dev/null")
- utils.system("./btt/btt -i "+tmpdir+"/trace.bin")
-
- def execute(self, iterations = 1, dev="", devices="",
- extra_args = '', tmpdir = None):
- # @dev: The device against which the trace will be replayed.
- # e.g. "sdb" or "md_d1"
- # @devices: A space-separated list of the underlying devices
- # which make up dev, e.g. "sdb sdc". You only need to set
- # devices if dev is an MD, LVM, or similar device;
- # otherwise leave it as an empty string.
-
- if not tmpdir:
- tmpdir = self.tmpdir
-
- os.chdir(self.srcdir)
-
- profilers = self.job.profilers
- if not profilers.only():
- for i in range(iterations):
- self._run_btreplay(dev, devices, tmpdir, extra_args)
-
- # Do a profiling run if necessary
- if profilers.present():
- profilers.start(self)
- self._run_btreplay(dev, devices, tmpdir, extra_args)
- profilers.stop(self)
- profilers.report(self)
-
- self.job.stdout.filehandle.flush()
- self.__format_results(open(self.debugdir + '/stdout').read())
-
-
- def __format_results(self, results):
- out = open(self.resultsdir + '/keyval', 'w')
- lines = results.split('\n')
-
- for n in range(len(lines)):
- if lines[n].strip() == "==================== All Devices ====================":
- words = lines[n-2].split()
- s = words[1].strip('sytem').split(':')
- e = words[2].strip('elapsd').split(':')
- break
-
- systime = 0.0
- for n in range(len(s)):
- i = (len(s)-1) - n
- systime += float(s[i])*(60**n)
- elapsed = 0.0
- for n in range(len(e)):
- i = (len(e)-1) - n
- elapsed += float(e[i])*(60**n)
-
- q2c = 0.0
- for line in lines:
- words = line.split()
- if len(words) < 3:
- continue
- if words[0] == 'Q2C':
- q2c = float(words[2])
- break
-
-
- print >> out, """\
+ version = 1
+
+ # http://brick.kernel.dk/snaps/blktrace-git-latest.tar.gz
+ def setup(self, tarball = 'blktrace-git-latest.tar.gz'):
+ tarball = utils.unmap_url(self.bindir, tarball,
+ self.tmpdir)
+ autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
+
+ self.job.setup_dep(['libaio'])
+ libs = '-L' + self.autodir + '/deps/libaio/lib -laio'
+ cflags = '-I ' + self.autodir + '/deps/libaio/include'
+ var_libs = 'LIBS="' + libs + '"'
+ var_cflags = 'CFLAGS="' + cflags + '"'
+ self.make_flags = var_libs + ' ' + var_cflags
+
+ os.chdir(self.srcdir)
+ utils.system('patch -p1 < ../Makefile.patch')
+ utils.system(self.make_flags + ' make')
+
+
+ def initialize(self):
+ self.ldlib = 'LD_LIBRARY_PATH=%s/deps/libaio/lib'%(self.autodir)
+
+
+ def _run_btreplay(self, dev, devices, tmpdir, extra_args):
+ alldevs="-d /dev/"+dev
+ alldnames = dev
+ for d in devices.split():
+ alldevs += " -d /dev/"+d
+ alldnames += " "+d
+
+ # convert the trace (assumed to be in this test's base
+ # directory) into btreplay's required format
+ utils.system("./btreplay/btrecord -d .. -D "+tmpdir+" "+dev)
+
+ # time a replay that omits "thinktime" between requests
+ # (by use of the -N flag)
+ utils.system(self.ldlib+" /usr/bin/time ./btreplay/btreplay -d "+\
+ tmpdir+" -N -W "+dev+" "+extra_args+" 2>&1")
+
+ # trace a replay that reproduces inter-request delays, and
+ # analyse the trace with btt to determine the average request
+ # completion latency
+ utils.system("./blktrace -D "+tmpdir+" "+alldevs+" >/dev/null &")
+ utils.system(self.ldlib+" ./btreplay/btreplay -d "+tmpdir+" -W "+\
+ dev+" "+extra_args)
+ utils.system("killall -INT blktrace")
+
+ # wait until blktrace is really done
+ slept = 0.0
+ while utils.system("ps -C blktrace > /dev/null",
+ ignore_status=True) == 0:
+ time.sleep(0.1)
+ slept += 0.1
+ if slept > 30.0:
+ utils.system("killall -9 blktrace")
+ raise error.TestError("blktrace failed to exit after 30 seconds")
+ utils.system("./blkparse -q -D "+tmpdir+" -d "+tmpdir+\
+ "/trace.bin -O "+alldnames+" >/dev/null")
+ utils.system("./btt/btt -i "+tmpdir+"/trace.bin")
+
+ def execute(self, iterations = 1, dev="", devices="",
+ extra_args = '', tmpdir = None):
+ # @dev: The device against which the trace will be replayed.
+ # e.g. "sdb" or "md_d1"
+ # @devices: A space-separated list of the underlying devices
+ # which make up dev, e.g. "sdb sdc". You only need to set
+ # devices if dev is an MD, LVM, or similar device;
+ # otherwise leave it as an empty string.
+
+ if not tmpdir:
+ tmpdir = self.tmpdir
+
+ os.chdir(self.srcdir)
+
+ profilers = self.job.profilers
+ if not profilers.only():
+ for i in range(iterations):
+ self._run_btreplay(dev, devices, tmpdir, extra_args)
+
+ # Do a profiling run if necessary
+ if profilers.present():
+ profilers.start(self)
+ self._run_btreplay(dev, devices, tmpdir, extra_args)
+ profilers.stop(self)
+ profilers.report(self)
+
+ self.job.stdout.filehandle.flush()
+ self.__format_results(open(self.debugdir + '/stdout').read())
+
+
+ def __format_results(self, results):
+ out = open(self.resultsdir + '/keyval', 'w')
+ lines = results.split('\n')
+
+ for n in range(len(lines)):
+ if lines[n].strip() == "==================== All Devices ====================":
+ words = lines[n-2].split()
+ s = words[1].strip('sytem').split(':')
+ e = words[2].strip('elapsd').split(':')
+ break
+
+ systime = 0.0
+ for n in range(len(s)):
+ i = (len(s)-1) - n
+ systime += float(s[i])*(60**n)
+ elapsed = 0.0
+ for n in range(len(e)):
+ i = (len(e)-1) - n
+ elapsed += float(e[i])*(60**n)
+
+ q2c = 0.0
+ for line in lines:
+ words = line.split()
+ if len(words) < 3:
+ continue
+ if words[0] == 'Q2C':
+ q2c = float(words[2])
+ break
+
+
+ print >> out, """\
time=%f
systime=%f
avg_q2c_latency=%f
""" % (elapsed, systime, q2c)
- out.close()
-
+ out.close()
diff --git a/client/tests/container_functional/container_functional.py b/client/tests/container_functional/container_functional.py
index 62bc3e8a..eec05322 100644
--- a/client/tests/container_functional/container_functional.py
+++ b/client/tests/container_functional/container_functional.py
@@ -7,66 +7,66 @@ from autotest_lib.client.common_lib import utils
class container_functional(test.test):
- version = 1
+ version = 1
- def execute(self, mbytes=None, cpus=None, root='', name=None):
- """Check that the container was setup.
- The arguments must be the same than
- job.new_container()"""
- if not name:
- raise error.TestError("Must have a container name")
+ def execute(self, mbytes=None, cpus=None, root='', name=None):
+ """Check that the container was setup.
+ The arguments must be the same than
+ job.new_container()"""
+ if not name:
+ raise error.TestError("Must have a container name")
- # Do container exists?
- for container in ['sys', name]:
- try:
- utils.system('ls %s > /dev/null' % \
- os.path.join('/dev/cpuset',
- container))
- except error.CmdError:
- raise error.TestError("Container %s not created." % \
- container)
+ # Do container exists?
+ for container in ['sys', name]:
+ try:
+ utils.system('ls %s > /dev/null' % \
+ os.path.join('/dev/cpuset',
+ container))
+ except error.CmdError:
+ raise error.TestError("Container %s not created." % \
+ container)
- # Did we get the CPUs?
- if cpus:
- actual_cpus = utils.system_output('cat %s' % \
- os.path.join('/dev/cpuset',
- name,
- 'cpus'))
- if cpus != cpuset.rangelist_to_list(actual_cpus):
- raise error.TestError(("CPUs = %s, "
- "expecting: %s") %
- (actual_cpus, cpus))
+ # Did we get the CPUs?
+ if cpus:
+ actual_cpus = utils.system_output('cat %s' % \
+ os.path.join('/dev/cpuset',
+ name,
+ 'cpus'))
+ if cpus != cpuset.rangelist_to_list(actual_cpus):
+ raise error.TestError(("CPUs = %s, "
+ "expecting: %s") %
+ (actual_cpus, cpus))
- # Are we in this container?
- actual_pid = utils.system_output('cat %s' % \
- os.path.join('/dev/cpuset',
- name,
- 'tasks'))
+ # Are we in this container?
+ actual_pid = utils.system_output('cat %s' % \
+ os.path.join('/dev/cpuset',
+ name,
+ 'tasks'))
- if str(os.getpid()) not in actual_pid:
- raise error.TestError("My pid %s is not in "
- "container task list: %s" % \
- (str(os.getpid()), actual_pid))
+ if str(os.getpid()) not in actual_pid:
+ raise error.TestError("My pid %s is not in "
+ "container task list: %s" % \
+ (str(os.getpid()), actual_pid))
- # Our memory nodes != sys memory nodes
- actual_mems = utils.system_output('cat %s' % \
- os.path.join('/dev/cpuset',
- name,
- 'mems'))
- sys_mems = utils.system_output('cat %s' % \
- os.path.join('/dev/cpuset',
- 'sys',
- 'mems'))
+ # Our memory nodes != sys memory nodes
+ actual_mems = utils.system_output('cat %s' % \
+ os.path.join('/dev/cpuset',
+ name,
+ 'mems'))
+ sys_mems = utils.system_output('cat %s' % \
+ os.path.join('/dev/cpuset',
+ 'sys',
+ 'mems'))
- actual_nodes = set(cpuset.rangelist_to_list(actual_mems))
- sys_nodes = set(cpuset.rangelist_to_list(sys_mems))
+ actual_nodes = set(cpuset.rangelist_to_list(actual_mems))
+ sys_nodes = set(cpuset.rangelist_to_list(sys_mems))
- if actual_nodes.intersection(sys_nodes):
- raise error.TestError("Sys nodes = %s\n"
- "My nodes = %s" % \
- (sys_nodes, actual_nodes))
+ if actual_nodes.intersection(sys_nodes):
+ raise error.TestError("Sys nodes = %s\n"
+ "My nodes = %s" % \
+ (sys_nodes, actual_nodes))
- # Should only have one node for 100MB
- if len(actual_nodes) != 1:
- raise error.TestError(("Got more than 1 node: %s" %
- actual_nodes))
+ # Should only have one node for 100MB
+ if len(actual_nodes) != 1:
+ raise error.TestError(("Got more than 1 node: %s" %
+ actual_nodes))
diff --git a/client/tests/cpu_hotplug/cpu_hotplug.py b/client/tests/cpu_hotplug/cpu_hotplug.py
index 9d016cc3..352145bf 100644
--- a/client/tests/cpu_hotplug/cpu_hotplug.py
+++ b/client/tests/cpu_hotplug/cpu_hotplug.py
@@ -3,44 +3,44 @@ from autotest_lib.client.bin import test, autotest_utils
from autotest_lib.client.common_lib import utils
class cpu_hotplug(test.test):
- version = 2
+ version = 2
- # http://developer.osdl.org/dev/hotplug/tests/lhcs_regression-1.6.tgz
- def setup(self, tarball = 'lhcs_regression-1.6.tgz'):
- tarball = utils.unmap_url(self.bindir, tarball,
- self.tmpdir)
- autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
-
- def execute(self):
- # Check if the kernel supports cpu hotplug
- if autotest_utils.running_config():
- autotest_utils.check_for_kernel_feature('HOTPLUG_CPU')
-
- # Check cpu nums, if equals 1, quit.
- if autotest_utils.count_cpus() == 1:
- print 'Just only single cpu online, quiting...'
- sys.exit()
-
- # Have a simple and quick check first, FIX me please.
- utils.system('dmesg -c > /dev/null')
- for cpu in autotest_utils.cpu_online_map():
- if os.path.isfile('/sys/devices/system/cpu/cpu%s/online' % cpu):
- utils.system('echo 0 > /sys/devices/system/cpu/cpu%s/online' % cpu, 1)
- utils.system('dmesg -c')
- time.sleep(3)
- utils.system('echo 1 > /sys/devices/system/cpu/cpu%s/online' % cpu, 1)
- utils.system('dmesg -c')
- time.sleep(3)
-
- # Begin this cpu hotplug test big guru.
- os.chdir(self.srcdir)
- profilers = self.job.profilers
- if not profilers.only():
- utils.system('./runtests.sh')
+ # http://developer.osdl.org/dev/hotplug/tests/lhcs_regression-1.6.tgz
+ def setup(self, tarball = 'lhcs_regression-1.6.tgz'):
+ tarball = utils.unmap_url(self.bindir, tarball,
+ self.tmpdir)
+ autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
- # Do a profiling run if necessary
- if profilers.present():
- profilers.start(self)
- utils.system('./runtests.sh')
- profilers.stop(self)
- profilers.report(self)
+ def execute(self):
+ # Check if the kernel supports cpu hotplug
+ if autotest_utils.running_config():
+ autotest_utils.check_for_kernel_feature('HOTPLUG_CPU')
+
+ # Check cpu nums, if equals 1, quit.
+ if autotest_utils.count_cpus() == 1:
+ print 'Just only single cpu online, quiting...'
+ sys.exit()
+
+ # Have a simple and quick check first, FIX me please.
+ utils.system('dmesg -c > /dev/null')
+ for cpu in autotest_utils.cpu_online_map():
+ if os.path.isfile('/sys/devices/system/cpu/cpu%s/online' % cpu):
+ utils.system('echo 0 > /sys/devices/system/cpu/cpu%s/online' % cpu, 1)
+ utils.system('dmesg -c')
+ time.sleep(3)
+ utils.system('echo 1 > /sys/devices/system/cpu/cpu%s/online' % cpu, 1)
+ utils.system('dmesg -c')
+ time.sleep(3)
+
+ # Begin this cpu hotplug test big guru.
+ os.chdir(self.srcdir)
+ profilers = self.job.profilers
+ if not profilers.only():
+ utils.system('./runtests.sh')
+
+ # Do a profiling run if necessary
+ if profilers.present():
+ profilers.start(self)
+ utils.system('./runtests.sh')
+ profilers.stop(self)
+ profilers.report(self)
diff --git a/client/tests/cyclictest/cyclictest.py b/client/tests/cyclictest/cyclictest.py
index 5f0f98e4..5c9189d9 100755
--- a/client/tests/cyclictest/cyclictest.py
+++ b/client/tests/cyclictest/cyclictest.py
@@ -4,14 +4,14 @@ from autotest_lib.client.common_lib import utils
class cyclictest(test.test):
- version = 2
- preserve_srcdir = True
+ version = 2
+ preserve_srcdir = True
- # git://git.kernel.org/pub/scm/linux/kernel/git/tglx/rt-tests.git
+ # git://git.kernel.org/pub/scm/linux/kernel/git/tglx/rt-tests.git
- def setup(self):
- os.chdir(self.srcdir)
- utils.system('make')
+ def setup(self):
+ os.chdir(self.srcdir)
+ utils.system('make')
- def execute(self, args = '-t 10 -l 100000'):
- utils.system(self.srcdir + '/cyclictest ' + args)
+ def execute(self, args = '-t 10 -l 100000'):
+ utils.system(self.srcdir + '/cyclictest ' + args)
diff --git a/client/tests/dacapo/dacapo.py b/client/tests/dacapo/dacapo.py
index 38bb7118..e7f10c26 100644
--- a/client/tests/dacapo/dacapo.py
+++ b/client/tests/dacapo/dacapo.py
@@ -1,10 +1,10 @@
# Dacapo test suite wrapper
#
-# This benchmark suite is intended as a tool for Java benchmarking by the
+# This benchmark suite is intended as a tool for Java benchmarking by the
# programming language, memory management and computer architecture communities.
-# It consists of a set of open source, real world applications with non-trivial
-# memory loads. The suite is the culmination of over five years work at eight
-# institutions, as part of the DaCapo research project, which was funded by a
+# It consists of a set of open source, real world applications with non-trivial
+# memory loads. The suite is the culmination of over five years work at eight
+# institutions, as part of the DaCapo research project, which was funded by a
# National Science Foundation ITR Grant, CCR-0085792.
#
import os
@@ -14,101 +14,100 @@ from autotest_lib.client.common_lib import utils
class dacapo(test.test):
- version = 1
+ version = 1
- def set_java_environment(self, jvm, java_root):
- '''\
- Setup java environment variables (path and classpath in order to
- execute a specific jvm specified by the java_root variable.
- java_root - Base of the java vm installation
- '''
- # Sun has changed the directory layout for java 6
- # (now there's no jre directory). Let's work around this...
- if jvm == 'sun16':
- self.java_home = java_root
- else:
- self.java_home = os.path.join(java_root, 'jre')
- self.java_bin = os.path.join(self.java_home, 'bin')
- self.java_lib = os.path.join(self.java_home, 'lib')
- os.environ['JAVA_ROOT'] = java_root
- os.environ['JAVA_HOME'] = self.java_home
- os.environ['JRE_HOME'] = self.java_home
- os.environ['CLASSPATH'] = self.java_lib
- os.environ['JAVA_BINDIR'] = self.java_bin
- os.environ['PATH'] = self.java_bin + ':' + os.environ['PATH']
+ def set_java_environment(self, jvm, java_root):
+ '''\
+ Setup java environment variables (path and classpath in order to
+ execute a specific jvm specified by the java_root variable.
+ java_root - Base of the java vm installation
+ '''
+ # Sun has changed the directory layout for java 6
+ # (now there's no jre directory). Let's work around this...
+ if jvm == 'sun16':
+ self.java_home = java_root
+ else:
+ self.java_home = os.path.join(java_root, 'jre')
+ self.java_bin = os.path.join(self.java_home, 'bin')
+ self.java_lib = os.path.join(self.java_home, 'lib')
+ os.environ['JAVA_ROOT'] = java_root
+ os.environ['JAVA_HOME'] = self.java_home
+ os.environ['JRE_HOME'] = self.java_home
+ os.environ['CLASSPATH'] = self.java_lib
+ os.environ['JAVA_BINDIR'] = self.java_bin
+ os.environ['PATH'] = self.java_bin + ':' + os.environ['PATH']
- def execute(self, test = 'antlr', config = './dacapo.cfg', jvm = 'ibm14-ppc64'):
- # Load the test configuration. If needed, use autotest tmpdir to write
- # files.
- my_config = config_loader(config, self.tmpdir)
- # Directory where we will cache the dacapo jar file
- # and the jvm package files
- self.cachedir = os.path.join(self.bindir, 'cache')
- if not os.path.isdir(self.cachedir):
- os.makedirs(self.cachedir)
+ def execute(self, test = 'antlr', config = './dacapo.cfg', jvm = 'ibm14-ppc64'):
+ # Load the test configuration. If needed, use autotest tmpdir to write
+ # files.
+ my_config = config_loader(config, self.tmpdir)
+ # Directory where we will cache the dacapo jar file
+ # and the jvm package files
+ self.cachedir = os.path.join(self.bindir, 'cache')
+ if not os.path.isdir(self.cachedir):
+ os.makedirs(self.cachedir)
- # Get dacapo jar URL
- # (It's possible to override the default URL that points to the
- # sourceforge repository)
- if my_config.get('dacapo', 'override_default_url') == 'no':
- self.dacapo_url = my_config.get('dacapo', 'tarball_url')
- else:
- self.dacapo_url = my_config.get('dacapo', 'tarball_url_alt')
- if not self.dacapo_url:
- raise error.TestError('Could not read dacapo URL from conf file')
- # We can cache the dacapo package file if we take some
- # precautions (checking md5 sum of the downloaded file)
- self.dacapo_md5 = my_config.get('dacapo', 'package_md5')
- if not self.dacapo_md5:
- e_msg = 'Could not read dacapo package md5sum from conf file'
- raise error.TestError(e_msg)
- self.dacapo_pkg = \
- autotest_utils.unmap_url_cache(self.cachedir, self.dacapo_url,
- self.dacapo_md5)
+ # Get dacapo jar URL
+ # (It's possible to override the default URL that points to the
+ # sourceforge repository)
+ if my_config.get('dacapo', 'override_default_url') == 'no':
+ self.dacapo_url = my_config.get('dacapo', 'tarball_url')
+ else:
+ self.dacapo_url = my_config.get('dacapo', 'tarball_url_alt')
+ if not self.dacapo_url:
+ raise error.TestError('Could not read dacapo URL from conf file')
+ # We can cache the dacapo package file if we take some
+ # precautions (checking md5 sum of the downloaded file)
+ self.dacapo_md5 = my_config.get('dacapo', 'package_md5')
+ if not self.dacapo_md5:
+ e_msg = 'Could not read dacapo package md5sum from conf file'
+ raise error.TestError(e_msg)
+ self.dacapo_pkg = \
+ autotest_utils.unmap_url_cache(self.cachedir, self.dacapo_url,
+ self.dacapo_md5)
- # Get jvm package URL
- self.jvm_pkg_url = my_config.get(jvm, 'jvm_pkg_url')
- if not self.jvm_pkg_url:
- raise error.TestError('Could not read java vm URL from conf file')
- # Let's cache the jvm package as well
- self.jvm_pkg_md5 = my_config.get(jvm, 'package_md5')
- if not self.jvm_pkg_md5:
- raise error.TestError('Could not read java package_md5 from conf file')
- self.jvm_pkg = \
- autotest_utils.unmap_url_cache(self.cachedir, self.jvm_pkg_url,
- self.jvm_pkg_md5)
+ # Get jvm package URL
+ self.jvm_pkg_url = my_config.get(jvm, 'jvm_pkg_url')
+ if not self.jvm_pkg_url:
+ raise error.TestError('Could not read java vm URL from conf file')
+ # Let's cache the jvm package as well
+ self.jvm_pkg_md5 = my_config.get(jvm, 'package_md5')
+ if not self.jvm_pkg_md5:
+ raise error.TestError('Could not read java package_md5 from conf file')
+ self.jvm_pkg = \
+ autotest_utils.unmap_url_cache(self.cachedir, self.jvm_pkg_url,
+ self.jvm_pkg_md5)
- # Install the jvm pakage
- package.install(self.jvm_pkg)
+ # Install the jvm pakage
+ package.install(self.jvm_pkg)
- # Basic Java environment variables setup
- self.java_root = my_config.get(jvm, 'java_root')
- if not self.java_root:
- raise error.TestError('Could not read java root dir from conf file')
- self.set_java_environment(jvm, self.java_root)
+ # Basic Java environment variables setup
+ self.java_root = my_config.get(jvm, 'java_root')
+ if not self.java_root:
+ raise error.TestError('Could not read java root dir from conf file')
+ self.set_java_environment(jvm, self.java_root)
- # If use_global is set to 'yes', then we want to use the global
- # setting instead of per test settings
- if my_config.get('global', 'use_global') == 'yes':
- self.iterations = my_config.get('global', 'iterations')
- self.workload = my_config.get('global', 'workload')
- else:
- self.iterations = my_config.get(test, 'iterations')
- self.workload = my_config.get(test, 'workload')
-
- self.verbose = '-v '
- self.workload = '-s %s ' % self.workload
- self.iterations = '-n %s ' % self.iterations
- self.scratch = '-scratch %s ' % os.path.join(self.resultsdir, test)
- # Compose the arguments string
- self.args = self.verbose + self.workload + self.scratch \
- + self.iterations + test
- # Execute the actual test
- try:
- utils.system('java -jar %s %s' % (self.dacapo_pkg, self.args))
- except:
- e_msg = \
- 'Test %s has failed, command line options "%s"' % (test, self.args)
- raise error.TestError(e_msg)
+ # If use_global is set to 'yes', then we want to use the global
+ # setting instead of per test settings
+ if my_config.get('global', 'use_global') == 'yes':
+ self.iterations = my_config.get('global', 'iterations')
+ self.workload = my_config.get('global', 'workload')
+ else:
+ self.iterations = my_config.get(test, 'iterations')
+ self.workload = my_config.get(test, 'workload')
+ self.verbose = '-v '
+ self.workload = '-s %s ' % self.workload
+ self.iterations = '-n %s ' % self.iterations
+ self.scratch = '-scratch %s ' % os.path.join(self.resultsdir, test)
+ # Compose the arguments string
+ self.args = self.verbose + self.workload + self.scratch \
+ + self.iterations + test
+ # Execute the actual test
+ try:
+ utils.system('java -jar %s %s' % (self.dacapo_pkg, self.args))
+ except:
+ e_msg = \
+ 'Test %s has failed, command line options "%s"' % (test, self.args)
+ raise error.TestError(e_msg)
diff --git a/client/tests/dbench/dbench.py b/client/tests/dbench/dbench.py
index a75adc6b..085b4b85 100755
--- a/client/tests/dbench/dbench.py
+++ b/client/tests/dbench/dbench.py
@@ -3,47 +3,47 @@ from autotest_lib.client.bin import autotest_utils, test
from autotest_lib.client.common_lib import utils
class dbench(test.test):
- version = 1
-
- # http://samba.org/ftp/tridge/dbench/dbench-3.04.tar.gz
- def setup(self, tarball = 'dbench-3.04.tar.gz'):
- tarball = utils.unmap_url(self.bindir, tarball, self.tmpdir)
- autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
- os.chdir(self.srcdir)
-
- utils.system('./configure')
- utils.system('make')
-
-
- def execute(self, iterations = 1, dir = None, nprocs = None, args = ''):
- if not nprocs:
- nprocs = self.job.cpu_count()
- profilers = self.job.profilers
- args = args + ' -c '+self.srcdir+'/client.txt'
- if dir:
- args += ' -D ' + dir
- args += ' %s' % nprocs
- cmd = self.srcdir + '/dbench ' + args
- results = []
- if not profilers.only():
- for i in range(iterations):
- results.append(utils.system_output(cmd,
- retain_output=True))
-
- # Do a profiling run if necessary
- if profilers.present():
- profilers.start(self)
- results.append(utils.system_output(cmd,
- retain_output=True))
- profilers.stop(self)
- profilers.report(self)
-
- self.__format_results("\n".join(results))
-
-
- def __format_results(self, results):
- out = open(self.resultsdir + '/keyval', 'w')
- pattern = re.compile(r"Throughput (.*?) MB/sec (.*?) procs")
- for result in pattern.findall(results):
- print >> out, "throughput=%s\nprocs=%s\n" % result
- out.close()
+ version = 1
+
+ # http://samba.org/ftp/tridge/dbench/dbench-3.04.tar.gz
+ def setup(self, tarball = 'dbench-3.04.tar.gz'):
+ tarball = utils.unmap_url(self.bindir, tarball, self.tmpdir)
+ autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
+ os.chdir(self.srcdir)
+
+ utils.system('./configure')
+ utils.system('make')
+
+
+ def execute(self, iterations = 1, dir = None, nprocs = None, args = ''):
+ if not nprocs:
+ nprocs = self.job.cpu_count()
+ profilers = self.job.profilers
+ args = args + ' -c '+self.srcdir+'/client.txt'
+ if dir:
+ args += ' -D ' + dir
+ args += ' %s' % nprocs
+ cmd = self.srcdir + '/dbench ' + args
+ results = []
+ if not profilers.only():
+ for i in range(iterations):
+ results.append(utils.system_output(cmd,
+ retain_output=True))
+
+ # Do a profiling run if necessary
+ if profilers.present():
+ profilers.start(self)
+ results.append(utils.system_output(cmd,
+ retain_output=True))
+ profilers.stop(self)
+ profilers.report(self)
+
+ self.__format_results("\n".join(results))
+
+
+ def __format_results(self, results):
+ out = open(self.resultsdir + '/keyval', 'w')
+ pattern = re.compile(r"Throughput (.*?) MB/sec (.*?) procs")
+ for result in pattern.findall(results):
+ print >> out, "throughput=%s\nprocs=%s\n" % result
+ out.close()
diff --git a/client/tests/dbt2/dbt2.py b/client/tests/dbt2/dbt2.py
index d35018b0..5d6d6fca 100644
--- a/client/tests/dbt2/dbt2.py
+++ b/client/tests/dbt2/dbt2.py
@@ -3,72 +3,72 @@ from autotest_lib.client.bin import test, autotest_utils
from autotest_lib.client.common_lib import utils
-# Dbt-2 is a fair-use implementation of the TPC-C benchmark. The test is
+# Dbt-2 is a fair-use implementation of the TPC-C benchmark. The test is
# currently hardcoded to use PostgreSQL but the kit also supports MySQL.
class dbt2(test.test):
- version = 2
+ version = 2
- # http://osdn.dl.sourceforge.net/sourceforge/osdldbt/dbt2-0.39.tar.gz
- def setup(self, tarball = 'dbt2-0.39.tar.bz2'):
- tarball = utils.unmap_url(self.bindir, tarball,
- self.tmpdir)
- autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
- self.job.setup_dep(['pgsql', 'pgpool', 'mysql'])
+ # http://osdn.dl.sourceforge.net/sourceforge/osdldbt/dbt2-0.39.tar.gz
+ def setup(self, tarball = 'dbt2-0.39.tar.bz2'):
+ tarball = utils.unmap_url(self.bindir, tarball,
+ self.tmpdir)
+ autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
+ self.job.setup_dep(['pgsql', 'pgpool', 'mysql'])
- #
- # Extract one copy of the kit for MySQL.
- #
- utils.system('cp -pR ' + self.srcdir + ' ' + self.srcdir + '.mysql')
- os.chdir(self.srcdir + '.mysql')
- utils.system('./configure --with-mysql=%s/deps/mysql/mysql' \
- % self.autodir)
- utils.system('make')
+ #
+ # Extract one copy of the kit for MySQL.
+ #
+ utils.system('cp -pR ' + self.srcdir + ' ' + self.srcdir + '.mysql')
+ os.chdir(self.srcdir + '.mysql')
+ utils.system('./configure --with-mysql=%s/deps/mysql/mysql' \
+ % self.autodir)
+ utils.system('make')
- #
- # Extract one copy of the kit for PostgreSQL.
- #
- utils.system('cp -pR ' + self.srcdir + ' ' + self.srcdir + '.pgsql')
- os.chdir(self.srcdir + '.pgsql')
- utils.system('./configure --with-postgresql=%s/deps/pgsql/pgsql' \
- % self.autodir)
- utils.system('make')
+ #
+ # Extract one copy of the kit for PostgreSQL.
+ #
+ utils.system('cp -pR ' + self.srcdir + ' ' + self.srcdir + '.pgsql')
+ os.chdir(self.srcdir + '.pgsql')
+ utils.system('./configure --with-postgresql=%s/deps/pgsql/pgsql' \
+ % self.autodir)
+ utils.system('make')
- # Create symlinks to autotest's results directory from dbt-2's
- # preferred results directory to self.resultsdir
- utils.system('ln -s %s %s' % (self.resultsdir, \
- self.srcdir + '.mysql/scripts/output'))
- utils.system('ln -s %s %s' % (self.resultsdir, \
- self.srcdir + '.pgsql/scripts/output'))
+ # Create symlinks to autotest's results directory from dbt-2's
+ # preferred results directory to self.resultsdir
+ utils.system('ln -s %s %s' % (self.resultsdir, \
+ self.srcdir + '.mysql/scripts/output'))
+ utils.system('ln -s %s %s' % (self.resultsdir, \
+ self.srcdir + '.pgsql/scripts/output'))
- def execute(self, db_type, args = ''):
- logfile = self.resultsdir + '/dbt2.log'
+ def execute(self, db_type, args = ''):
+ logfile = self.resultsdir + '/dbt2.log'
- if (db_type == "mysql"):
- self.execute_mysql(args)
- elif (db_type == "pgpool"):
- self.execute_pgpool(args)
- elif (db_type == "pgsql"):
- self.execute_pgsql(args)
+ if (db_type == "mysql"):
+ self.execute_mysql(args)
+ elif (db_type == "pgpool"):
+ self.execute_pgpool(args)
+ elif (db_type == "pgsql"):
+ self.execute_pgsql(args)
- def execute_mysql(self, args = ''):
- args = args
- utils.system(self.srcdir + '.mysql/scripts/mysql/build_db.sh -g -w 1')
- utils.system(self.srcdir + '.mysql/scripts/run_workload.sh ' + args)
+ def execute_mysql(self, args = ''):
+ args = args
+ utils.system(self.srcdir + '.mysql/scripts/mysql/build_db.sh -g -w 1')
+ utils.system(self.srcdir + '.mysql/scripts/run_workload.sh ' + args)
- def execute_pgpool(self, args = ''):
- utils.system('%s/deps/pgpool/pgpool/bin/pgpool -f %s/../pgpool.conf' \
- % (self.autodir, self.srcdir))
- self.execute_pgsql(args)
- utils.system('%s/deps/pgpool/pgpool/bin/pgpool stop' % self.autodir)
+ def execute_pgpool(self, args = ''):
+ utils.system('%s/deps/pgpool/pgpool/bin/pgpool -f %s/../pgpool.conf' \
+ % (self.autodir, self.srcdir))
+ self.execute_pgsql(args)
+ utils.system('%s/deps/pgpool/pgpool/bin/pgpool stop' % self.autodir)
- def execute_pgsql(self, args = ''):
- utils.system(self.srcdir + '.pgsql/scripts/pgsql/build_db.sh -g -w 1')
- utils.system(self.srcdir + '.pgsql/scripts/run_workload.sh ' + args)
- #
- # Clean up by dropping the database after the test.
- #
- utils.system(self.srcdir + '.pgsql/scripts/pgsql/start_db.sh')
- utils.system(self.srcdir + '.pgsql/scripts/pgsql/drop_db.sh')
- utils.system(self.srcdir + '.pgsql/scripts/pgsql/stop_db.sh')
+ def execute_pgsql(self, args = ''):
+ utils.system(self.srcdir + '.pgsql/scripts/pgsql/build_db.sh -g -w 1')
+ utils.system(self.srcdir + '.pgsql/scripts/run_workload.sh ' + args)
+ #
+ # Clean up by dropping the database after the test.
+ #
+ utils.system(self.srcdir + '.pgsql/scripts/pgsql/start_db.sh')
+ utils.system(self.srcdir + '.pgsql/scripts/pgsql/drop_db.sh')
+ utils.system(self.srcdir + '.pgsql/scripts/pgsql/stop_db.sh')
diff --git a/client/tests/disktest/disktest.py b/client/tests/disktest/disktest.py
index e021cb8e..2019c005 100755
--- a/client/tests/disktest/disktest.py
+++ b/client/tests/disktest/disktest.py
@@ -4,58 +4,57 @@ from autotest_lib.client.common_lib import utils, error
class disktest(test.test):
- version = 1
-
- def setup(self):
- os.mkdir(self.srcdir)
- os.chdir(self.bindir)
- utils.system('cp disktest.c src/')
- os.chdir(self.srcdir)
- cflags = '-D_FILE_OFFSET_BITS=64 -D _GNU_SOURCE -static -Wall'
- utils.system('cc disktest.c ' + cflags + ' -o disktest')
-
-
- def test_one_disk_chunk(self, disk, chunk):
- print "testing %d MB files on %s in %d MB memory" % \
- (self.chunk_mb, disk, self.memory_mb)
- cmd = "%s/disktest -m %d -f %s/testfile.%d -i -S" % \
- (self.srcdir, self.chunk_mb, disk, chunk)
- p = subprocess.Popen(cmd, shell=True)
- return(p.pid)
-
-
- def execute(self, disks = None, gigabytes = None,
- chunk_mb = autotest_utils.memtotal() / 1024):
- os.chdir(self.srcdir)
-
- if not disks:
- disks = [self.tmpdir]
- if not gigabytes:
- free = 100 # cap it at 100GB by default
- for disk in disks:
- free = min(autotest_utils.freespace(disk) / 1024**3, free)
- gigabytes = free
- print "resizing to %s GB" % gigabytes
- sys.stdout.flush()
-
- self.chunk_mb = chunk_mb
- self.memory_mb = autotest_utils.memtotal()/1024
- if self.memory_mb > chunk_mb:
- e_msg = "Too much RAM (%dMB) for this test to work" % self.memory_mb
- raise error.TestError(e_msg)
-
- chunks = (1024 * gigabytes) / chunk_mb
-
- for i in range(chunks):
- pids = []
- for disk in disks:
- pid = self.test_one_disk_chunk(disk, i)
- pids.append(pid)
- errors = []
- for pid in pids:
- (junk, retval) = os.waitpid(pid, 0)
- if (retval != 0):
- errors.append(retval)
- if errors:
- raise error.TestError("Errors from children: %s" % errors)
-
+ version = 1
+
+ def setup(self):
+ os.mkdir(self.srcdir)
+ os.chdir(self.bindir)
+ utils.system('cp disktest.c src/')
+ os.chdir(self.srcdir)
+ cflags = '-D_FILE_OFFSET_BITS=64 -D _GNU_SOURCE -static -Wall'
+ utils.system('cc disktest.c ' + cflags + ' -o disktest')
+
+
+ def test_one_disk_chunk(self, disk, chunk):
+ print "testing %d MB files on %s in %d MB memory" % \
+ (self.chunk_mb, disk, self.memory_mb)
+ cmd = "%s/disktest -m %d -f %s/testfile.%d -i -S" % \
+ (self.srcdir, self.chunk_mb, disk, chunk)
+ p = subprocess.Popen(cmd, shell=True)
+ return(p.pid)
+
+
+ def execute(self, disks = None, gigabytes = None,
+ chunk_mb = autotest_utils.memtotal() / 1024):
+ os.chdir(self.srcdir)
+
+ if not disks:
+ disks = [self.tmpdir]
+ if not gigabytes:
+ free = 100 # cap it at 100GB by default
+ for disk in disks:
+ free = min(autotest_utils.freespace(disk) / 1024**3, free)
+ gigabytes = free
+ print "resizing to %s GB" % gigabytes
+ sys.stdout.flush()
+
+ self.chunk_mb = chunk_mb
+ self.memory_mb = autotest_utils.memtotal()/1024
+ if self.memory_mb > chunk_mb:
+ e_msg = "Too much RAM (%dMB) for this test to work" % self.memory_mb
+ raise error.TestError(e_msg)
+
+ chunks = (1024 * gigabytes) / chunk_mb
+
+ for i in range(chunks):
+ pids = []
+ for disk in disks:
+ pid = self.test_one_disk_chunk(disk, i)
+ pids.append(pid)
+ errors = []
+ for pid in pids:
+ (junk, retval) = os.waitpid(pid, 0)
+ if (retval != 0):
+ errors.append(retval)
+ if errors:
+ raise error.TestError("Errors from children: %s" % errors)
diff --git a/client/tests/fio/fio.py b/client/tests/fio/fio.py
index e1d97341..aad6bf7e 100644
--- a/client/tests/fio/fio.py
+++ b/client/tests/fio/fio.py
@@ -4,36 +4,36 @@ from autotest_lib.client.common_lib import utils
class fio(test.test):
- version = 2
+ version = 2
- # http://brick.kernel.dk/snaps/fio-1.16.5.tar.bz2
- def setup(self, tarball = 'fio-1.16.5.tar.bz2'):
- tarball = utils.unmap_url(self.bindir, tarball,
- self.tmpdir)
- autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
+ # http://brick.kernel.dk/snaps/fio-1.16.5.tar.bz2
+ def setup(self, tarball = 'fio-1.16.5.tar.bz2'):
+ tarball = utils.unmap_url(self.bindir, tarball,
+ self.tmpdir)
+ autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
- self.job.setup_dep(['libaio'])
- ldflags = '-L' + self.autodir + '/deps/libaio/lib'
- cflags = '-I' + self.autodir + '/deps/libaio/include'
- var_ldflags = 'LDFLAGS="' + ldflags + '"'
- var_cflags = 'CFLAGS="' + cflags + '"'
+ self.job.setup_dep(['libaio'])
+ ldflags = '-L' + self.autodir + '/deps/libaio/lib'
+ cflags = '-I' + self.autodir + '/deps/libaio/include'
+ var_ldflags = 'LDFLAGS="' + ldflags + '"'
+ var_cflags = 'CFLAGS="' + cflags + '"'
- os.chdir(self.srcdir)
- utils.system('patch -p1 < ../Makefile.patch')
- utils.system('%s %s make' % (var_ldflags, var_cflags))
+ os.chdir(self.srcdir)
+ utils.system('patch -p1 < ../Makefile.patch')
+ utils.system('%s %s make' % (var_ldflags, var_cflags))
- def execute(self, args = '', user = 'root'):
- os.chdir(self.srcdir)
- ##vars = 'TMPDIR=\"%s\" RESULTDIR=\"%s\"' % (self.tmpdir, self.resultsdir)
- vars = 'LD_LIBRARY_PATH="' + self.autodir + '/deps/libaio/lib"'
- ##args = '-m -o ' + self.resultsdir + '/fio-tio.log ' + self.srcdir + '/examples/tiobench-example';
- args = '--output ' + self.resultsdir + '/fio-mixed.log ' + self.bindir + '/fio-mixed.job';
- utils.system(vars + ' ./fio ' + args)
+ def execute(self, args = '', user = 'root'):
+ os.chdir(self.srcdir)
+ ##vars = 'TMPDIR=\"%s\" RESULTDIR=\"%s\"' % (self.tmpdir, self.resultsdir)
+ vars = 'LD_LIBRARY_PATH="' + self.autodir + '/deps/libaio/lib"'
+ ##args = '-m -o ' + self.resultsdir + '/fio-tio.log ' + self.srcdir + '/examples/tiobench-example';
+ args = '--output ' + self.resultsdir + '/fio-mixed.log ' + self.bindir + '/fio-mixed.job';
+ utils.system(vars + ' ./fio ' + args)
- # Do a profiling run if necessary
- profilers = self.job.profilers
- if profilers.present():
- profilers.start(self)
- utils.system(vars + ' ./fio ' + args)
- profilers.stop(self)
- profilers.report(self)
+ # Do a profiling run if necessary
+ profilers = self.job.profilers
+ if profilers.present():
+ profilers.start(self)
+ utils.system(vars + ' ./fio ' + args)
+ profilers.stop(self)
+ profilers.report(self)
diff --git a/client/tests/fs_mark/fs_mark.py b/client/tests/fs_mark/fs_mark.py
index f38f0118..6b52e197 100644
--- a/client/tests/fs_mark/fs_mark.py
+++ b/client/tests/fs_mark/fs_mark.py
@@ -4,30 +4,30 @@ from autotest_lib.client.common_lib import utils
class fs_mark(test.test):
- version = 1
+ version = 1
- # http://developer.osdl.org/dev/doubt/fs_mark/archive/fs_mark-3.2.tgz
- def setup(self, tarball = 'fs_mark-3.2.tgz'):
- tarball = utils.unmap_url(self.bindir, tarball,
- self.tmpdir)
- autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
- os.chdir(self.srcdir)
+ # http://developer.osdl.org/dev/doubt/fs_mark/archive/fs_mark-3.2.tgz
+ def setup(self, tarball = 'fs_mark-3.2.tgz'):
+ tarball = utils.unmap_url(self.bindir, tarball,
+ self.tmpdir)
+ autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
+ os.chdir(self.srcdir)
- utils.system('make')
-
- def execute(self, dir, iterations = 2, args = None):
- os.chdir(self.srcdir)
- if not args:
- # Just provide a sample run parameters
- args = '-s 10240 -n 1000'
- profilers = self.job.profilers
- if not profilers.only():
- for i in range(iterations):
- utils.system('./fs_mark -d %s %s' %(dir, args))
+ utils.system('make')
- # Do a profiling run if necessary
- if profilers.present():
- profilers.start(self)
- utils.system('./fs_mark -d %s %s' %(dir, args))
- profilers.stop(self)
- profilers.report(self)
+ def execute(self, dir, iterations = 2, args = None):
+ os.chdir(self.srcdir)
+ if not args:
+ # Just provide a sample run parameters
+ args = '-s 10240 -n 1000'
+ profilers = self.job.profilers
+ if not profilers.only():
+ for i in range(iterations):
+ utils.system('./fs_mark -d %s %s' %(dir, args))
+
+ # Do a profiling run if necessary
+ if profilers.present():
+ profilers.start(self)
+ utils.system('./fs_mark -d %s %s' %(dir, args))
+ profilers.stop(self)
+ profilers.report(self)
diff --git a/client/tests/fsfuzzer/fsfuzzer.py b/client/tests/fsfuzzer/fsfuzzer.py
index 07d2c095..d8310e30 100755
--- a/client/tests/fsfuzzer/fsfuzzer.py
+++ b/client/tests/fsfuzzer/fsfuzzer.py
@@ -4,27 +4,27 @@ from autotest_lib.client.common_lib import utils
class fsfuzzer(test.test):
- version = 1
+ version = 1
- # http://people.redhat.com/sgrubb/files/fsfuzzer-0.6.tar.gz
- def setup(self, tarball = 'fsfuzzer-0.6.tar.gz'):
- tarball = utils.unmap_url(self.bindir, tarball,
- self.tmpdir)
- autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
- os.chdir(self.srcdir)
+ # http://people.redhat.com/sgrubb/files/fsfuzzer-0.6.tar.gz
+ def setup(self, tarball = 'fsfuzzer-0.6.tar.gz'):
+ tarball = utils.unmap_url(self.bindir, tarball,
+ self.tmpdir)
+ autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
+ os.chdir(self.srcdir)
- utils.system('make')
-
- def execute(self, iterations = 1, fstype = 'iso9660'):
- profilers = self.job.profilers
- args = fstype + ' 1'
- if not profilers.only():
- for i in range(iterations):
- utils.system(self.srcdir + '/run_test ' + args)
+ utils.system('make')
- # Do a profiling run if necessary
- if profilers.present():
- profilers.start(self)
- utils.system(self.srcdir + '/run_test ' + args)
- profilers.stop(self)
- profilers.report(self)
+ def execute(self, iterations = 1, fstype = 'iso9660'):
+ profilers = self.job.profilers
+ args = fstype + ' 1'
+ if not profilers.only():
+ for i in range(iterations):
+ utils.system(self.srcdir + '/run_test ' + args)
+
+ # Do a profiling run if necessary
+ if profilers.present():
+ profilers.start(self)
+ utils.system(self.srcdir + '/run_test ' + args)
+ profilers.stop(self)
+ profilers.report(self)
diff --git a/client/tests/fsstress/fsstress.py b/client/tests/fsstress/fsstress.py
index 9b2a2dba..6a6e20ff 100644
--- a/client/tests/fsstress/fsstress.py
+++ b/client/tests/fsstress/fsstress.py
@@ -5,33 +5,33 @@ from autotest_lib.client.common_lib import utils
class fsstress(test.test):
- version = 1
+ version = 1
- # http://www.zip.com.au/~akpm/linux/patches/stuff/ext3-tools.tar.gz
- def setup(self, tarball = 'ext3-tools.tar.gz'):
- self.tarball = utils.unmap_url(self.bindir, tarball,
- self.tmpdir)
- autotest_utils.extract_tarball_to_dir(self.tarball, self.srcdir)
+ # http://www.zip.com.au/~akpm/linux/patches/stuff/ext3-tools.tar.gz
+ def setup(self, tarball = 'ext3-tools.tar.gz'):
+ self.tarball = utils.unmap_url(self.bindir, tarball,
+ self.tmpdir)
+ autotest_utils.extract_tarball_to_dir(self.tarball, self.srcdir)
- os.chdir(self.srcdir)
- utils.system('patch -p1 < ../fsstress-ltp.patch')
- utils.system('make fsstress')
+ os.chdir(self.srcdir)
+ utils.system('patch -p1 < ../fsstress-ltp.patch')
+ utils.system('make fsstress')
- def execute(self, testdir = None, extra_args = '', nproc = '1000', nops = '1000'):
- if not testdir:
- testdir = self.tmpdir
+ def execute(self, testdir = None, extra_args = '', nproc = '1000', nops = '1000'):
+ if not testdir:
+ testdir = self.tmpdir
- args = '-d ' + testdir + ' -p ' + nproc + ' -n ' + nops + ' ' + extra_args
+ args = '-d ' + testdir + ' -p ' + nproc + ' -n ' + nops + ' ' + extra_args
- cmd = self.srcdir + '/fsstress ' + args
- profilers = self.job.profilers
- if not profilers.only():
- utils.system(cmd)
+ cmd = self.srcdir + '/fsstress ' + args
+ profilers = self.job.profilers
+ if not profilers.only():
+ utils.system(cmd)
- # Do a profiling run if necessary
- if profilers.present():
- profilers.start(self)
- utils.system(cmd)
- profilers.stop(self)
- profilers.report(self)
+ # Do a profiling run if necessary
+ if profilers.present():
+ profilers.start(self)
+ utils.system(cmd)
+ profilers.stop(self)
+ profilers.report(self)
diff --git a/client/tests/fsx/fsx.py b/client/tests/fsx/fsx.py
index 5e77cf1c..08328be7 100755
--- a/client/tests/fsx/fsx.py
+++ b/client/tests/fsx/fsx.py
@@ -1,7 +1,7 @@
# This requires aio headers to build.
# Should work automagically out of deps now.
-# NOTE - this should also have the ability to mount a filesystem,
+# NOTE - this should also have the ability to mount a filesystem,
# run the tests, unmount it, then fsck the filesystem
import os
from autotest_lib.client.bin import test, autotest_utils
@@ -9,43 +9,43 @@ from autotest_lib.client.common_lib import utils
class fsx(test.test):
- version = 3
-
- # http://www.zip.com.au/~akpm/linux/patches/stuff/ext3-tools.tar.gz
- def setup(self, tarball = 'ext3-tools.tar.gz'):
- self.tarball = utils.unmap_url(self.bindir, tarball,
- self.tmpdir)
- autotest_utils.extract_tarball_to_dir(self.tarball, self.srcdir)
-
- self.job.setup_dep(['libaio'])
- ldflags = '-L' + self.autodir + '/deps/libaio/lib'
- cflags = '-I' + self.autodir + '/deps/libaio/include'
- var_ldflags = 'LDFLAGS="' + ldflags + '"'
- var_cflags = 'CFLAGS="' + cflags + '"'
- self.make_flags = var_ldflags + ' ' + var_cflags
-
- os.chdir(self.srcdir)
- utils.system('patch -p1 < ../fsx-linux.diff')
- utils.system(self.make_flags + ' make fsx-linux')
-
-
- def execute(self, testdir = None, repeat = '100000'):
- args = '-N ' + repeat
- if not testdir:
- testdir = self.tmpdir
- os.chdir(testdir)
- libs = self.autodir+'/deps/libaio/lib/'
- ld_path = autotest_utils.prepend_path(libs,
- autotest_utils.environ('LD_LIBRARY_PATH'))
- var_ld_path = 'LD_LIBRARY_PATH=' + ld_path
- cmd = self.srcdir + '/fsx-linux ' + args + ' poo'
- profilers = self.job.profilers
- if not profilers.only():
- utils.system(var_ld_path + ' ' + cmd)
-
- # Do a profiling run if necessary
- if profilers.present():
- profilers.start(self)
- utils.system(var_ld_path + ' ' + cmd)
- profilers.stop(self)
- profilers.report(self)
+ version = 3
+
+ # http://www.zip.com.au/~akpm/linux/patches/stuff/ext3-tools.tar.gz
+ def setup(self, tarball = 'ext3-tools.tar.gz'):
+ self.tarball = utils.unmap_url(self.bindir, tarball,
+ self.tmpdir)
+ autotest_utils.extract_tarball_to_dir(self.tarball, self.srcdir)
+
+ self.job.setup_dep(['libaio'])
+ ldflags = '-L' + self.autodir + '/deps/libaio/lib'
+ cflags = '-I' + self.autodir + '/deps/libaio/include'
+ var_ldflags = 'LDFLAGS="' + ldflags + '"'
+ var_cflags = 'CFLAGS="' + cflags + '"'
+ self.make_flags = var_ldflags + ' ' + var_cflags
+
+ os.chdir(self.srcdir)
+ utils.system('patch -p1 < ../fsx-linux.diff')
+ utils.system(self.make_flags + ' make fsx-linux')
+
+
+ def execute(self, testdir = None, repeat = '100000'):
+ args = '-N ' + repeat
+ if not testdir:
+ testdir = self.tmpdir
+ os.chdir(testdir)
+ libs = self.autodir+'/deps/libaio/lib/'
+ ld_path = autotest_utils.prepend_path(libs,
+ autotest_utils.environ('LD_LIBRARY_PATH'))
+ var_ld_path = 'LD_LIBRARY_PATH=' + ld_path
+ cmd = self.srcdir + '/fsx-linux ' + args + ' poo'
+ profilers = self.job.profilers
+ if not profilers.only():
+ utils.system(var_ld_path + ' ' + cmd)
+
+ # Do a profiling run if necessary
+ if profilers.present():
+ profilers.start(self)
+ utils.system(var_ld_path + ' ' + cmd)
+ profilers.stop(self)
+ profilers.report(self)
diff --git a/client/tests/interbench/interbench.py b/client/tests/interbench/interbench.py
index 0bbdec4f..aca7834d 100644
--- a/client/tests/interbench/interbench.py
+++ b/client/tests/interbench/interbench.py
@@ -4,31 +4,31 @@ from autotest_lib.client.common_lib import utils
class interbench(test.test):
- version = 1
+ version = 1
- # http://www.kernel.org/pub/linux/kernel/people/ck/apps/interbench/interbench-0.30.tar.bz2
- def setup(self, tarball = 'interbench-0.30.tar.bz2'):
- tarball = utils.unmap_url(self.bindir, tarball,
- self.tmpdir)
- autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
- os.chdir(self.srcdir)
+ # http://www.kernel.org/pub/linux/kernel/people/ck/apps/interbench/interbench-0.30.tar.bz2
+ def setup(self, tarball = 'interbench-0.30.tar.bz2'):
+ tarball = utils.unmap_url(self.bindir, tarball,
+ self.tmpdir)
+ autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
+ os.chdir(self.srcdir)
- utils.system('make')
-
- def execute(self, iterations = 1, args = ''):
- os.chdir(self.tmpdir)
- args += " -c"
+ utils.system('make')
- profilers = self.job.profilers
- if not profilers.only():
- for i in range(iterations):
- utils.system("%s/interbench -m 'run #%s' %s" % \
- (self.srcdir, i, args))
+ def execute(self, iterations = 1, args = ''):
+ os.chdir(self.tmpdir)
+ args += " -c"
- # Do a profiling run if necessary
- if profilers.present():
- profilers.start(self)
- utils.system("%s/interbench -m 'profile run' %s" % \
- (self.srcdir, args))
- profilers.stop(self)
- profilers.report(self)
+ profilers = self.job.profilers
+ if not profilers.only():
+ for i in range(iterations):
+ utils.system("%s/interbench -m 'run #%s' %s" % \
+ (self.srcdir, i, args))
+
+ # Do a profiling run if necessary
+ if profilers.present():
+ profilers.start(self)
+ utils.system("%s/interbench -m 'profile run' %s" % \
+ (self.srcdir, args))
+ profilers.stop(self)
+ profilers.report(self)
diff --git a/client/tests/iozone/iozone.py b/client/tests/iozone/iozone.py
index 26c23d68..8347a320 100644
--- a/client/tests/iozone/iozone.py
+++ b/client/tests/iozone/iozone.py
@@ -5,66 +5,66 @@ from autotest_lib.client.common_lib import utils
class iozone(test.test):
- version = 1
+ version = 1
- # http://www.iozone.org/src/current/iozone3_283.tar
- def setup(self, tarball = 'iozone3_283.tar'):
- tarball = utils.unmap_url(self.bindir, tarball,
- self.tmpdir)
- autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
- os.chdir(os.path.join(self.srcdir, 'src/current'))
+ # http://www.iozone.org/src/current/iozone3_283.tar
+ def setup(self, tarball = 'iozone3_283.tar'):
+ tarball = utils.unmap_url(self.bindir, tarball,
+ self.tmpdir)
+ autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
+ os.chdir(os.path.join(self.srcdir, 'src/current'))
- arch = autotest_utils.get_current_kernel_arch()
- if (arch == 'ppc'):
- utils.system('make linux-powerpc')
- elif (arch == 'ppc64'):
- utils.system('make linux-powerpc64')
- elif (arch == 'x86_64'):
- utils.system('make linux-AMD64')
- else:
- utils.system('make linux')
+ arch = autotest_utils.get_current_kernel_arch()
+ if (arch == 'ppc'):
+ utils.system('make linux-powerpc')
+ elif (arch == 'ppc64'):
+ utils.system('make linux-powerpc64')
+ elif (arch == 'x86_64'):
+ utils.system('make linux-AMD64')
+ else:
+ utils.system('make linux')
- def execute(self, dir = None, iterations=1, args = None):
- self.keyval = open(os.path.join(self.resultsdir, 'keyval'),
- 'w')
- if not dir:
- dir = self.tmpdir
- os.chdir(dir)
- if not args:
- args = '-a'
- profilers = self.job.profilers
- if not profilers.only():
- for i in range(iterations):
- output = utils.system_output('%s/src/current/iozone %s' %
- (self.srcdir, args))
- self.__format_results(output)
+ def execute(self, dir = None, iterations=1, args = None):
+ self.keyval = open(os.path.join(self.resultsdir, 'keyval'),
+ 'w')
+ if not dir:
+ dir = self.tmpdir
+ os.chdir(dir)
+ if not args:
+ args = '-a'
+ profilers = self.job.profilers
+ if not profilers.only():
+ for i in range(iterations):
+ output = utils.system_output('%s/src/current/iozone %s' %
+ (self.srcdir, args))
+ self.__format_results(output)
- # Do a profiling run if necessary
- if profilers.present():
- profilers.start(self)
- output = utils.system_output('%s/src/current/iozone %s' %
- (self.srcdir, args))
- self.__format_results(output)
- profilers.stop(self)
- profilers.report(self)
+ # Do a profiling run if necessary
+ if profilers.present():
+ profilers.start(self)
+ output = utils.system_output('%s/src/current/iozone %s' %
+ (self.srcdir, args))
+ self.__format_results(output)
+ profilers.stop(self)
+ profilers.report(self)
- self.keyval.close()
+ self.keyval.close()
- def __format_results(self, results):
- labels = ('write', 'rewrite', 'read', 'reread', 'randread',
- 'randwrite', 'bkwdread', 'recordrewrite',
- 'strideread', 'fwrite', 'frewrite',
- 'fread', 'freread')
- for line in results.splitlines():
- fields = line.split()
- if len(fields) != 15:
- continue
- try:
- fields = tuple([int(i) for i in fields])
- except ValueError:
- continue
- for l, v in zip(labels, fields[2:]):
- print >> self.keyval, "%d-%d-%s=%d" % (fields[0], fields[1], l, v)
- print >> self.keyval
+ def __format_results(self, results):
+ labels = ('write', 'rewrite', 'read', 'reread', 'randread',
+ 'randwrite', 'bkwdread', 'recordrewrite',
+ 'strideread', 'fwrite', 'frewrite',
+ 'fread', 'freread')
+ for line in results.splitlines():
+ fields = line.split()
+ if len(fields) != 15:
+ continue
+ try:
+ fields = tuple([int(i) for i in fields])
+ except ValueError:
+ continue
+ for l, v in zip(labels, fields[2:]):
+ print >> self.keyval, "%d-%d-%s=%d" % (fields[0], fields[1], l, v)
+ print >> self.keyval
diff --git a/client/tests/isic/isic.py b/client/tests/isic/isic.py
index 93788307..df616bee 100644
--- a/client/tests/isic/isic.py
+++ b/client/tests/isic/isic.py
@@ -4,23 +4,23 @@ from autotest_lib.client.common_lib import utils
class isic(test.test):
- version = 2
+ version = 2
- # http://www.packetfactory.net/Projects/ISIC/isic-0.06.tgz
- # + http://www.stardust.webpages.pl/files/crap/isic-gcc41-fix.patch
+ # http://www.packetfactory.net/Projects/ISIC/isic-0.06.tgz
+ # + http://www.stardust.webpages.pl/files/crap/isic-gcc41-fix.patch
- def initialize(self):
- self.job.setup_dep(['libnet'])
+ def initialize(self):
+ self.job.setup_dep(['libnet'])
- def setup(self, tarball = 'isic-0.06.tar.bz2'):
- tarball = utils.unmap_url(self.bindir, tarball,
- self.tmpdir)
- autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
- os.chdir(self.srcdir)
+ def setup(self, tarball = 'isic-0.06.tar.bz2'):
+ tarball = utils.unmap_url(self.bindir, tarball,
+ self.tmpdir)
+ autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
+ os.chdir(self.srcdir)
- utils.system('patch -p1 < ../build-fixes.patch')
- utils.system('PREFIX=' + self.autodir + '/deps/libnet/libnet/ ./configure')
- utils.system('make')
+ utils.system('patch -p1 < ../build-fixes.patch')
+ utils.system('PREFIX=' + self.autodir + '/deps/libnet/libnet/ ./configure')
+ utils.system('make')
- def execute(self, args = '-s rand -d 127.0.0.1 -p 10000000'):
- utils.system(self.srcdir + '/isic ' + args)
+ def execute(self, args = '-s rand -d 127.0.0.1 -p 10000000'):
+ utils.system(self.srcdir + '/isic ' + args)
diff --git a/client/tests/kernbench/kernbench.py b/client/tests/kernbench/kernbench.py
index 8abf6a36..de7d8491 100755
--- a/client/tests/kernbench/kernbench.py
+++ b/client/tests/kernbench/kernbench.py
@@ -4,107 +4,107 @@ from autotest_lib.client.common_lib import utils
class kernbench(test.test):
- version = 2
-
- def setup(self, build_dir = None):
- if not build_dir:
- build_dir = self.srcdir
- os.mkdir(build_dir)
-
-
- def __init_tree(self, build_dir, version = None):
- #
- # If we have a local copy of the 2.6.14 tarball use that
- # else let the kernel object use the defined mirrors
- # to obtain it.
- #
- # http://kernel.org/pub/linux/kernel/v2.6/linux-2.6.14.tar.bz2
- #
- # On ia64, we default to 2.6.20, as it can't compile 2.6.14.
- if version:
- default_ver = version
- elif autotest_utils.get_current_kernel_arch() == 'ia64':
- default_ver = '2.6.20'
- else:
- default_ver = '2.6.14'
-
- kversionfile = os.path.join(build_dir, ".kversion")
- install_needed = True
- if os.path.exists(kversionfile):
- old_version = pickle.load(open(kversionfile, 'r'))
- if (old_version == default_ver):
- install_needed = False
-
- if not install_needed:
- return
-
- # Clear out the old version
- utils.system("echo rm -rf '" + build_dir + "/*'")
-
- pickle.dump(default_ver, open(kversionfile, 'w'))
-
- tarball = None
- for dir in (self.bindir, '/usr/local/src'):
- tar = 'linux-%s.tar.bz2' % default_ver
- path = os.path.join(dir, tar)
- if os.path.exists(path):
- tarball = path
- break
- if not tarball:
- tarball = default_ver
-
- # Do the extraction of the kernel tree
- kernel = self.job.kernel(tarball, self.tmpdir, build_dir)
- kernel.config(defconfig=True, logged=False)
-
-
- def execute(self, iterations = 1, threads = None, dir = None, version = None):
- if not threads:
- threads = self.job.cpu_count()*2
- if dir:
- build_dir = dir
- else:
- build_dir = os.path.join(self.tmpdir, "src")
- if not os.path.exists(build_dir):
- os.makedirs(build_dir)
-
- self.__init_tree(build_dir, version)
-
- kernel = self.job.kernel(build_dir, self.tmpdir, build_dir,
- leave = True)
- print "kernbench x %d: %d threads" % (iterations, threads)
-
- logfile = os.path.join(self.debugdir, 'build_log')
-
- print "Warmup run ..."
- kernel.build_timed(threads, output = logfile) # warmup run
-
- profilers = self.job.profilers
- if not profilers.only():
- for i in range(iterations):
- print "Performance run, iteration %d ..." % i
- timefile = os.path.join(self.resultsdir,
- 'time.%d' % i)
- kernel.build_timed(threads, timefile)
-
- # Do a profiling run if necessary
- if profilers.present():
- profilers.start(self)
- print "Profiling run ..."
- timefile = os.path.join(self.resultsdir, 'time.profile')
- kernel.build_timed(threads, timefile)
- profilers.stop(self)
- profilers.report(self)
-
- kernel.clean(logged=False) # Don't leave litter lying around
- os.chdir(self.resultsdir)
- utils.system("grep -h elapsed time.* > time")
-
- self.__format_results(open('time').read())
-
-
- def __format_results(self, results):
- out = open('keyval', 'w')
- for result in autotest_utils.extract_all_time_results(results):
- print >> out, "user=%s\nsystem=%s\nelapsed=%s\n" % result
- out.close()
+ version = 2
+
+ def setup(self, build_dir = None):
+ if not build_dir:
+ build_dir = self.srcdir
+ os.mkdir(build_dir)
+
+
+ def __init_tree(self, build_dir, version = None):
+ #
+ # If we have a local copy of the 2.6.14 tarball use that
+ # else let the kernel object use the defined mirrors
+ # to obtain it.
+ #
+ # http://kernel.org/pub/linux/kernel/v2.6/linux-2.6.14.tar.bz2
+ #
+ # On ia64, we default to 2.6.20, as it can't compile 2.6.14.
+ if version:
+ default_ver = version
+ elif autotest_utils.get_current_kernel_arch() == 'ia64':
+ default_ver = '2.6.20'
+ else:
+ default_ver = '2.6.14'
+
+ kversionfile = os.path.join(build_dir, ".kversion")
+ install_needed = True
+ if os.path.exists(kversionfile):
+ old_version = pickle.load(open(kversionfile, 'r'))
+ if (old_version == default_ver):
+ install_needed = False
+
+ if not install_needed:
+ return
+
+ # Clear out the old version
+ utils.system("echo rm -rf '" + build_dir + "/*'")
+
+ pickle.dump(default_ver, open(kversionfile, 'w'))
+
+ tarball = None
+ for dir in (self.bindir, '/usr/local/src'):
+ tar = 'linux-%s.tar.bz2' % default_ver
+ path = os.path.join(dir, tar)
+ if os.path.exists(path):
+ tarball = path
+ break
+ if not tarball:
+ tarball = default_ver
+
+ # Do the extraction of the kernel tree
+ kernel = self.job.kernel(tarball, self.tmpdir, build_dir)
+ kernel.config(defconfig=True, logged=False)
+
+
+ def execute(self, iterations = 1, threads = None, dir = None, version = None):
+ if not threads:
+ threads = self.job.cpu_count()*2
+ if dir:
+ build_dir = dir
+ else:
+ build_dir = os.path.join(self.tmpdir, "src")
+ if not os.path.exists(build_dir):
+ os.makedirs(build_dir)
+
+ self.__init_tree(build_dir, version)
+
+ kernel = self.job.kernel(build_dir, self.tmpdir, build_dir,
+ leave = True)
+ print "kernbench x %d: %d threads" % (iterations, threads)
+
+ logfile = os.path.join(self.debugdir, 'build_log')
+
+ print "Warmup run ..."
+ kernel.build_timed(threads, output = logfile) # warmup run
+
+ profilers = self.job.profilers
+ if not profilers.only():
+ for i in range(iterations):
+ print "Performance run, iteration %d ..." % i
+ timefile = os.path.join(self.resultsdir,
+ 'time.%d' % i)
+ kernel.build_timed(threads, timefile)
+
+ # Do a profiling run if necessary
+ if profilers.present():
+ profilers.start(self)
+ print "Profiling run ..."
+ timefile = os.path.join(self.resultsdir, 'time.profile')
+ kernel.build_timed(threads, timefile)
+ profilers.stop(self)
+ profilers.report(self)
+
+ kernel.clean(logged=False) # Don't leave litter lying around
+ os.chdir(self.resultsdir)
+ utils.system("grep -h elapsed time.* > time")
+
+ self.__format_results(open('time').read())
+
+
+ def __format_results(self, results):
+ out = open('keyval', 'w')
+ for result in autotest_utils.extract_all_time_results(results):
+ print >> out, "user=%s\nsystem=%s\nelapsed=%s\n" % result
+ out.close()
diff --git a/client/tests/kernelbuild/kernelbuild.py b/client/tests/kernelbuild/kernelbuild.py
index 95b639a2..836a766d 100755
--- a/client/tests/kernelbuild/kernelbuild.py
+++ b/client/tests/kernelbuild/kernelbuild.py
@@ -2,12 +2,12 @@ from autotest_lib.client.bin import test, kernel
class kernelbuild(test.test):
- version = 1
+ version = 1
- def execute(self, base_tree, patches, config, config_list = None):
- kernel = self.job.kernel(base_tree, self.outputdir)
- if patches:
- kernel.patch(*patches)
- kernel.config(config, config_list)
+ def execute(self, base_tree, patches, config, config_list = None):
+ kernel = self.job.kernel(base_tree, self.outputdir)
+ if patches:
+ kernel.patch(*patches)
+ kernel.config(config, config_list)
- kernel.build()
+ kernel.build()
diff --git a/client/tests/kvmtest/control.with_modbuild b/client/tests/kvmtest/control.with_modbuild
index 09c39926..a527c682 100644
--- a/client/tests/kvmtest/control.with_modbuild
+++ b/client/tests/kvmtest/control.with_modbuild
@@ -1,6 +1,6 @@
# CHANGEME - point to kvm release tarball
-#
-#
+#
+#
# nightly kvm snapshot base URL
SNAPBASE = 'http://people.qumranet.com/avi/snapshots/'
@@ -19,44 +19,44 @@ from autotest_utils import *
def install_kvm_external_modules(tarball=None, base=SNAPBASE, daysold=DAYSOLD):
- dldir = os.environ['AUTODIR']+'/tmp'
- srcdir = os.environ['AUTODIR']+'/tmp/kvm'
- print "kvm dldir->%s"%(dldir)
- print "kvm srcdir->%s"%(srcdir)
-
- # ex: http://people.qumranet.com/avi/snapshots/kvm-snapshot-20071021.tar.gz
- if tarball == None:
- d = (date.today() - timedelta(days=daysold)).strftime('%Y%m%d')
- tarball = base+'kvm-snapshot-%s.tar.gz' %(d)
- sys.stderr.write("tarball url: %s\n" %(tarball))
+ dldir = os.environ['AUTODIR']+'/tmp'
+ srcdir = os.environ['AUTODIR']+'/tmp/kvm'
+ print "kvm dldir->%s"%(dldir)
+ print "kvm srcdir->%s"%(srcdir)
- tarball = unmap_url("/", tarball, dldir)
- extract_tarball_to_dir(tarball, srcdir)
- os.chdir(srcdir)
+ # ex: http://people.qumranet.com/avi/snapshots/kvm-snapshot-20071021.tar.gz
+ if tarball == None:
+ d = (date.today() - timedelta(days=daysold)).strftime('%Y%m%d')
+ tarball = base+'kvm-snapshot-%s.tar.gz' %(d)
+ sys.stderr.write("tarball url: %s\n" %(tarball))
- print "detecting cpu vendor..."
- vendor = "intel"
- if os.system("grep vmx /proc/cpuinfo 1>/dev/null") != 0:
- vendor = "amd"
- print "detected cpu vendor as '%s'" %(vendor)
+ tarball = unmap_url("/", tarball, dldir)
+ extract_tarball_to_dir(tarball, srcdir)
+ os.chdir(srcdir)
- print "building kvm..."
- system('./configure')
- system('make')
- system('make install')
- print "done building and installing kvm"
+ print "detecting cpu vendor..."
+ vendor = "intel"
+ if os.system("grep vmx /proc/cpuinfo 1>/dev/null") != 0:
+ vendor = "amd"
+ print "detected cpu vendor as '%s'" %(vendor)
- # remove existing in kernel kvm modules
- print "unloading loaded kvm modules (if present) ..."
- if system("grep kvm_%s /proc/modules 1>/dev/null" %(vendor), 1) == 0:
- system("rmmod -f kvm_%s" %(vendor))
- if system("grep kvm /proc/modules 1>/dev/null", 1) == 0:
- system("rmmod -f kvm")
+ print "building kvm..."
+ system('./configure')
+ system('make')
+ system('make install')
+ print "done building and installing kvm"
- # load new modules
- print "loading new kvm modules..."
- os.chdir(srcdir+'/kernel')
- system("insmod ./kvm.ko && sleep 1 && insmod ./kvm-%s.ko" %(vendor))
+ # remove existing in kernel kvm modules
+ print "unloading loaded kvm modules (if present) ..."
+ if system("grep kvm_%s /proc/modules 1>/dev/null" %(vendor), 1) == 0:
+ system("rmmod -f kvm_%s" %(vendor))
+ if system("grep kvm /proc/modules 1>/dev/null", 1) == 0:
+ system("rmmod -f kvm")
+
+ # load new modules
+ print "loading new kvm modules..."
+ os.chdir(srcdir+'/kernel')
+ system("insmod ./kvm.ko && sleep 1 && insmod ./kvm-%s.ko" %(vendor))
# build and install kvm external modules
diff --git a/client/tests/kvmtest/kvmtest.py b/client/tests/kvmtest/kvmtest.py
index 96806c0a..f7760335 100644
--- a/client/tests/kvmtest/kvmtest.py
+++ b/client/tests/kvmtest/kvmtest.py
@@ -4,158 +4,158 @@ from autotest_lib.client.common_lib import utils, error
class kvmtest(test.test):
- version = 1
-
- def setup(self, tarball = 'kvm-test.tar.gz'):
- tarball = utils.unmap_url(self.bindir, tarball,
- self.tmpdir)
- autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
- os.chdir(self.srcdir)
- utils.system('python setup.py install')
-
-
- def execute(self, testdir = '', args = ''):
- dirs = []
- results = []
- passed = 0
- failed = 0
-
- # spawn vncserver if needed
- if not os.environ.has_key('DISPLAY'):
- print("No DISPLAY set in environment,"
- "spawning vncserver...")
- display = self.__create_vncserver(os.environ['HOME']+"/.vnc")
- print("Setting DISPLAY=%s"%(display))
- os.environ['DISPLAY'] = display
-
- # build a list of dirs with 'vm.log' files
- os.path.walk(testdir, self.__has_vmlog, dirs)
-
- for d in dirs:
- replaydir = os.path.join(self.resultsdir,
- os.path.basename(d))
- os.mkdir(replaydir)
- logfile = replaydir + "/%s.log" %(os.path.basename(d))
-
- os.chdir(d)
- rv = utils.system("kvm-test-replay > %s" %(logfile), 1)
-
- results.append((d, rv))
- if rv != 0:
- screenshot = self.__get_expected_file(logfile)
- expected = "expected-%03d.png" %(
- random.randint(0, 999))
- dest = os.path.join(replaydir,expected)
-
- # make a copy of the screen shot
- utils.system("cp %s %s" %(screenshot, dest), 1)
-
- # move the failure
- utils.system("mv failure-*.png %s" %(replaydir), 1)
-
- # generate html output
- self.__format_results(results)
-
- # produce pass/fail output
- for (x, y) in results:
- if y != 0:
- print("FAIL: '%s' with rv %s" %(x, y))
- failed = failed + 1
- else:
- print("pass: '%s' with rv %s" %(x, y))
- passed = passed + 1
-
- print("Summary: Passed %d Failed %d" %(passed, failed))
- # if we had any tests not passed, fail entire test
- if failed != 0:
- raise error.TestError('kvm-test-replay')
-
-
- def __get_expected_file(self, logfile):
- # pull out screeshot name from logfile
- return filter(lambda x: "Expected" in x,
- open(logfile, 'r').readlines())\
- [0].split('{')[1].split('}')[0]
-
-
- def __create_vncserver(self, dirname):
- """
- this test may run without an X connection in kvm/qemu needs
- a DISPLAY to push the vga buffer. If a DISPLAY is not set
- in the environment, then attempt to spawn a vncserver, and
- change env DISPLAY so that kvmtest can run
- """
- for pidfile in locate("*:*.pid", dirname):
- pid = open(pidfile, 'r').readline().strip()
- # if the server is still active, just use it for display
- if os.path.exists('/proc/%s/status' % pid):
- vncdisplay = os.path.basename(pidfile)\
- .split(":")[1].split(".")[0]
- print("Found vncserver on port %s, using it"%(
- vncdisplay))
- return ':%s.0' %(vncdisplay)
-
- # none of the vncserver were still alive, spawn our own and
- # return the display whack existing server first, then spawn it
- vncdisplay = "1"
- print("Spawning vncserver on port %s"%(vncdisplay))
- utils.system('vncserver :%s' %(vncdisplay))
- return ':%s.0' %(vncdisplay)
-
-
- def __has_vmlog(self, arg, dirname, names):
- if os.path.exists(os.path.join(dirname, 'vm.log')):
- arg.append(dirname)
-
-
- def __gen_fail_html(self, testdir):
- # generate a failure index.html to display the expected and failure
- # images
- fail_dir = os.path.join(self.resultsdir, os.path.basename(testdir))
- fail_index = os.path.join(fail_dir, "index.html")
-
- # lambda helpers for pulling out image files
- is_png = lambda x: x.endswith('.png')
- failure_filter = lambda x: x.startswith('failure') and is_png(x)
- expected_filter = lambda x: x.startswith('expected') and is_png(x)
-
- failure_img = filter(failure_filter, os.listdir(fail_dir))[0]
- expected_img = filter(expected_filter, os.listdir(fail_dir))[0]
- if not failure_img or not expected_img:
- raise "Failed to find images"
-
- fail_buff = "<html><table border=1><tr><th>Barrier Diff</th>\n" + \
- "<th>Expected Barrier</th><th>Failure</th></tr><tr><td></td>\n"
- for img in expected_img, failure_img:
- fail_buff = fail_buff + "<td><a href=\"%s\"><img width=320 " \
- "height=200 src=\"%s\"></a></td>\n" %(img, img)
-
- fail_buff = fail_buff + "</tr></table></html>\n"
-
- fh = open(fail_index, "w+")
- fh.write(fail_buff)
- fh.close()
-
- def __format_results(self, results):
- # generate kvmtest/index.html and an index.html for each fail
- test_index = os.path.join(self.outputdir, "index.html")
- test_buff = "<html><table border=1><tr><th>Test</th>\n"
-
- for (x,y) in results:
- test_buff = test_buff + "<th>%s</th>\n" %(os.path.basename(x))
-
- test_buff = test_buff + "</tr><tr><td></td>\n"
-
- for (x,y) in results:
- if y != 0:
- fail = "<td><a href=\"results/%s/\">FAIL</a></td>\n" %(os.path.basename(x))
- test_buff = test_buff + fail
- self.__gen_fail_html(x)
- else:
- test_buff = test_buff + "<td>GOOD</td>\n"
-
- test_buff = test_buff + "</tr></table></html>"
-
- fh = open(test_index, "w+")
- fh.write(test_buff)
- fh.close()
+ version = 1
+
+ def setup(self, tarball = 'kvm-test.tar.gz'):
+ tarball = utils.unmap_url(self.bindir, tarball,
+ self.tmpdir)
+ autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
+ os.chdir(self.srcdir)
+ utils.system('python setup.py install')
+
+
+ def execute(self, testdir = '', args = ''):
+ dirs = []
+ results = []
+ passed = 0
+ failed = 0
+
+ # spawn vncserver if needed
+ if not os.environ.has_key('DISPLAY'):
+ print("No DISPLAY set in environment,"
+ "spawning vncserver...")
+ display = self.__create_vncserver(os.environ['HOME']+"/.vnc")
+ print("Setting DISPLAY=%s"%(display))
+ os.environ['DISPLAY'] = display
+
+ # build a list of dirs with 'vm.log' files
+ os.path.walk(testdir, self.__has_vmlog, dirs)
+
+ for d in dirs:
+ replaydir = os.path.join(self.resultsdir,
+ os.path.basename(d))
+ os.mkdir(replaydir)
+ logfile = replaydir + "/%s.log" %(os.path.basename(d))
+
+ os.chdir(d)
+ rv = utils.system("kvm-test-replay > %s" %(logfile), 1)
+
+ results.append((d, rv))
+ if rv != 0:
+ screenshot = self.__get_expected_file(logfile)
+ expected = "expected-%03d.png" %(
+ random.randint(0, 999))
+ dest = os.path.join(replaydir,expected)
+
+ # make a copy of the screen shot
+ utils.system("cp %s %s" %(screenshot, dest), 1)
+
+ # move the failure
+ utils.system("mv failure-*.png %s" %(replaydir), 1)
+
+ # generate html output
+ self.__format_results(results)
+
+ # produce pass/fail output
+ for (x, y) in results:
+ if y != 0:
+ print("FAIL: '%s' with rv %s" %(x, y))
+ failed = failed + 1
+ else:
+ print("pass: '%s' with rv %s" %(x, y))
+ passed = passed + 1
+
+ print("Summary: Passed %d Failed %d" %(passed, failed))
+ # if we had any tests not passed, fail entire test
+ if failed != 0:
+ raise error.TestError('kvm-test-replay')
+
+
+ def __get_expected_file(self, logfile):
+ # pull out screeshot name from logfile
+ return filter(lambda x: "Expected" in x,
+ open(logfile, 'r').readlines())\
+ [0].split('{')[1].split('}')[0]
+
+
+ def __create_vncserver(self, dirname):
+ """
+ this test may run without an X connection in kvm/qemu needs
+ a DISPLAY to push the vga buffer. If a DISPLAY is not set
+ in the environment, then attempt to spawn a vncserver, and
+ change env DISPLAY so that kvmtest can run
+ """
+ for pidfile in locate("*:*.pid", dirname):
+ pid = open(pidfile, 'r').readline().strip()
+ # if the server is still active, just use it for display
+ if os.path.exists('/proc/%s/status' % pid):
+ vncdisplay = os.path.basename(pidfile)\
+ .split(":")[1].split(".")[0]
+ print("Found vncserver on port %s, using it"%(
+ vncdisplay))
+ return ':%s.0' %(vncdisplay)
+
+ # none of the vncserver were still alive, spawn our own and
+ # return the display whack existing server first, then spawn it
+ vncdisplay = "1"
+ print("Spawning vncserver on port %s"%(vncdisplay))
+ utils.system('vncserver :%s' %(vncdisplay))
+ return ':%s.0' %(vncdisplay)
+
+
+ def __has_vmlog(self, arg, dirname, names):
+ if os.path.exists(os.path.join(dirname, 'vm.log')):
+ arg.append(dirname)
+
+
+ def __gen_fail_html(self, testdir):
+ # generate a failure index.html to display the expected and failure
+ # images
+ fail_dir = os.path.join(self.resultsdir, os.path.basename(testdir))
+ fail_index = os.path.join(fail_dir, "index.html")
+
+ # lambda helpers for pulling out image files
+ is_png = lambda x: x.endswith('.png')
+ failure_filter = lambda x: x.startswith('failure') and is_png(x)
+ expected_filter = lambda x: x.startswith('expected') and is_png(x)
+
+ failure_img = filter(failure_filter, os.listdir(fail_dir))[0]
+ expected_img = filter(expected_filter, os.listdir(fail_dir))[0]
+ if not failure_img or not expected_img:
+ raise "Failed to find images"
+
+ fail_buff = "<html><table border=1><tr><th>Barrier Diff</th>\n" + \
+ "<th>Expected Barrier</th><th>Failure</th></tr><tr><td></td>\n"
+ for img in expected_img, failure_img:
+ fail_buff = fail_buff + "<td><a href=\"%s\"><img width=320 " \
+ "height=200 src=\"%s\"></a></td>\n" %(img, img)
+
+ fail_buff = fail_buff + "</tr></table></html>\n"
+
+ fh = open(fail_index, "w+")
+ fh.write(fail_buff)
+ fh.close()
+
+ def __format_results(self, results):
+ # generate kvmtest/index.html and an index.html for each fail
+ test_index = os.path.join(self.outputdir, "index.html")
+ test_buff = "<html><table border=1><tr><th>Test</th>\n"
+
+ for (x,y) in results:
+ test_buff = test_buff + "<th>%s</th>\n" %(os.path.basename(x))
+
+ test_buff = test_buff + "</tr><tr><td></td>\n"
+
+ for (x,y) in results:
+ if y != 0:
+ fail = "<td><a href=\"results/%s/\">FAIL</a></td>\n" %(os.path.basename(x))
+ test_buff = test_buff + fail
+ self.__gen_fail_html(x)
+ else:
+ test_buff = test_buff + "<td>GOOD</td>\n"
+
+ test_buff = test_buff + "</tr></table></html>"
+
+ fh = open(test_index, "w+")
+ fh.write(test_buff)
+ fh.close()
diff --git a/client/tests/libhugetlbfs/libhugetlbfs.py b/client/tests/libhugetlbfs/libhugetlbfs.py
index 1f0aed17..aa018c51 100644
--- a/client/tests/libhugetlbfs/libhugetlbfs.py
+++ b/client/tests/libhugetlbfs/libhugetlbfs.py
@@ -3,58 +3,58 @@ from autotest_lib.client.bin import autotest_utils, test
from autotest_lib.client.common_lib import utils, error
class libhugetlbfs(test.test):
- version = 4
-
- # http://libhugetlbfs.ozlabs.org/releases/libhugetlbfs-1.3-pre1.tar.gz
- def setup(self, tarball = 'libhugetlbfs-1.3-pre1.tar.gz'):
- tarball = utils.unmap_url(self.bindir, tarball,
- self.tmpdir)
- autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
- os.chdir(self.srcdir)
-
- # make might fail if there are no proper headers for the 32 bit
- # version, in that case try only for the 64 bit version
- try:
- utils.system('make')
- except:
- utils.system('make OBJDIRS=obj64')
-
- def execute(self, dir = None, pages_requested = 20):
- autotest_utils.check_kernel_ver("2.6.16")
-
- # Check huge page number
- pages_available = 0
- if os.path.exists('/proc/sys/vm/nr_hugepages'):
- utils.write_one_line('/proc/sys/vm/nr_hugepages',
- str(pages_requested))
- pages_available = int(open('/proc/sys/vm/nr_hugepages', 'r').readline())
- else:
- raise error.TestNAError('Kernel does not support hugepages')
-
- if pages_available < pages_requested:
- raise error.TestError('%d huge pages available, < %d pages requested' % (pages_available, pages_requested))
-
- # Check if hugetlbfs has been mounted
- if not autotest_utils.file_contains_pattern('/proc/mounts', 'hugetlbfs'):
- if not dir:
- dir = os.path.join(self.tmpdir, 'hugetlbfs')
- os.makedirs(dir)
- utils.system('mount -t hugetlbfs none %s' % dir)
-
- os.chdir(self.srcdir)
-
- profilers = self.job.profilers
- if profilers.present():
- profilers.start(self)
- os.chdir(self.srcdir)
- # make check might fail for 32 bit if the 32 bit compile earlier
- # had failed. See if it passes for 64 bit in that case.
- try:
- utils.system('make check')
- except:
- utils.system('make check OBJDIRS=obj64')
- if profilers.present():
- profilers.stop(self)
- profilers.report(self)
-
- utils.system('umount %s' % dir)
+ version = 4
+
+ # http://libhugetlbfs.ozlabs.org/releases/libhugetlbfs-1.3-pre1.tar.gz
+ def setup(self, tarball = 'libhugetlbfs-1.3-pre1.tar.gz'):
+ tarball = utils.unmap_url(self.bindir, tarball,
+ self.tmpdir)
+ autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
+ os.chdir(self.srcdir)
+
+ # make might fail if there are no proper headers for the 32 bit
+ # version, in that case try only for the 64 bit version
+ try:
+ utils.system('make')
+ except:
+ utils.system('make OBJDIRS=obj64')
+
+ def execute(self, dir = None, pages_requested = 20):
+ autotest_utils.check_kernel_ver("2.6.16")
+
+ # Check huge page number
+ pages_available = 0
+ if os.path.exists('/proc/sys/vm/nr_hugepages'):
+ utils.write_one_line('/proc/sys/vm/nr_hugepages',
+ str(pages_requested))
+ pages_available = int(open('/proc/sys/vm/nr_hugepages', 'r').readline())
+ else:
+ raise error.TestNAError('Kernel does not support hugepages')
+
+ if pages_available < pages_requested:
+ raise error.TestError('%d huge pages available, < %d pages requested' % (pages_available, pages_requested))
+
+ # Check if hugetlbfs has been mounted
+ if not autotest_utils.file_contains_pattern('/proc/mounts', 'hugetlbfs'):
+ if not dir:
+ dir = os.path.join(self.tmpdir, 'hugetlbfs')
+ os.makedirs(dir)
+ utils.system('mount -t hugetlbfs none %s' % dir)
+
+ os.chdir(self.srcdir)
+
+ profilers = self.job.profilers
+ if profilers.present():
+ profilers.start(self)
+ os.chdir(self.srcdir)
+ # make check might fail for 32 bit if the 32 bit compile earlier
+ # had failed. See if it passes for 64 bit in that case.
+ try:
+ utils.system('make check')
+ except:
+ utils.system('make check OBJDIRS=obj64')
+ if profilers.present():
+ profilers.stop(self)
+ profilers.report(self)
+
+ utils.system('umount %s' % dir)
diff --git a/client/tests/linus_stress/linus_stress.py b/client/tests/linus_stress/linus_stress.py
index 3a81134e..61603078 100755
--- a/client/tests/linus_stress/linus_stress.py
+++ b/client/tests/linus_stress/linus_stress.py
@@ -4,40 +4,40 @@ from autotest_lib.client.common_lib import utils
class linus_stress(test.test):
- version = 1
+ version = 1
- def setup(self):
- os.mkdir(self.srcdir)
- os.chdir(self.bindir)
- utils.system('cp linus_stress.c src/')
- os.chdir(self.srcdir)
- utils.system('cc linus_stress.c -D_POSIX_C_SOURCE=200112 -o linus_stress')
+ def setup(self):
+ os.mkdir(self.srcdir)
+ os.chdir(self.bindir)
+ utils.system('cp linus_stress.c src/')
+ os.chdir(self.srcdir)
+ utils.system('cc linus_stress.c -D_POSIX_C_SOURCE=200112 -o linus_stress')
- def run_the_test(self, iterations):
- utils.write_one_line('/proc/sys/vm/dirty_ratio', '4')
- utils.write_one_line('/proc/sys/vm/dirty_background_ratio', '2')
+ def run_the_test(self, iterations):
+ utils.write_one_line('/proc/sys/vm/dirty_ratio', '4')
+ utils.write_one_line('/proc/sys/vm/dirty_background_ratio', '2')
- cmd = os.path.join(self.srcdir, 'linus_stress')
- args = "%d" % (autotest_utils.memtotal() / 32)
+ cmd = os.path.join(self.srcdir, 'linus_stress')
+ args = "%d" % (autotest_utils.memtotal() / 32)
- profilers = self.job.profilers
- if profilers.present():
- profilers.start(self)
+ profilers = self.job.profilers
+ if profilers.present():
+ profilers.start(self)
- for i in range(iterations):
- utils.system(cmd + ' ' + args)
+ for i in range(iterations):
+ utils.system(cmd + ' ' + args)
- if profilers.present():
- profilers.stop(self)
- profilers.report(self)
+ if profilers.present():
+ profilers.stop(self)
+ profilers.report(self)
- def execute(self, iterations = 1):
- dirty_ratio = utils.read_one_line('/proc/sys/vm/dirty_ratio')
- dirty_background_ratio = utils.read_one_line('/proc/sys/vm/dirty_background_ratio')
- try:
- self.run_the_test(iterations)
- finally:
- utils.write_one_line('/proc/sys/vm/dirty_ratio', dirty_ratio)
- utils.write_one_line('/proc/sys/vm/dirty_background_ratio', dirty_background_ratio)
+ def execute(self, iterations = 1):
+ dirty_ratio = utils.read_one_line('/proc/sys/vm/dirty_ratio')
+ dirty_background_ratio = utils.read_one_line('/proc/sys/vm/dirty_background_ratio')
+ try:
+ self.run_the_test(iterations)
+ finally:
+ utils.write_one_line('/proc/sys/vm/dirty_ratio', dirty_ratio)
+ utils.write_one_line('/proc/sys/vm/dirty_background_ratio', dirty_background_ratio)
diff --git a/client/tests/lmbench/lmbench.py b/client/tests/lmbench/lmbench.py
index fc4807bd..a4bef457 100755
--- a/client/tests/lmbench/lmbench.py
+++ b/client/tests/lmbench/lmbench.py
@@ -5,44 +5,44 @@ from autotest_lib.client.common_lib import utils
class lmbench(test.test):
- version = 2
-
- def setup(self, tarball = 'lmbench3.tar.bz2'):
- tarball = utils.unmap_url(self.bindir, tarball,
- self.tmpdir)
- # http://www.bitmover.com/lm/lmbench/lmbench3.tar.gz
- # + lmbench3.diff
- # removes Makefile references to bitkeeper
- # default mail to no, fix job placement defaults (masouds)
- autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
- os.chdir(self.srcdir)
-
- utils.system('make')
-
-
- def execute(self, iterations = 1, mem = '', fastmem = 'NO',
- slowfs = 'NO', disks = '', disks_desc = '',
- mhz = '', remote = '', enough = '5000', sync_max = '1',
- fsdir = None, file = None):
- if not fsdir:
- fsdir = self.tmpdir
- if not file:
- file = self.tmpdir+'XXX'
-
- os.chdir(self.srcdir)
- cmd = "yes '' | make rerun"
- profilers = self.job.profilers
- if not profilers.only():
- for i in range(iterations):
- utils.system(cmd)
-
- # Do a profiling run if necessary
- if profilers.present():
- profilers.start(self)
- utils.system(cmd)
- profilers.stop(self)
- profilers.report(self)
- # Get the results:
- outputdir = self.srcdir + "/results"
- results = self.resultsdir + "/summary.txt"
- utils.system("make -C " + outputdir + " summary > " + results)
+ version = 2
+
+ def setup(self, tarball = 'lmbench3.tar.bz2'):
+ tarball = utils.unmap_url(self.bindir, tarball,
+ self.tmpdir)
+ # http://www.bitmover.com/lm/lmbench/lmbench3.tar.gz
+ # + lmbench3.diff
+ # removes Makefile references to bitkeeper
+ # default mail to no, fix job placement defaults (masouds)
+ autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
+ os.chdir(self.srcdir)
+
+ utils.system('make')
+
+
+ def execute(self, iterations = 1, mem = '', fastmem = 'NO',
+ slowfs = 'NO', disks = '', disks_desc = '',
+ mhz = '', remote = '', enough = '5000', sync_max = '1',
+ fsdir = None, file = None):
+ if not fsdir:
+ fsdir = self.tmpdir
+ if not file:
+ file = self.tmpdir+'XXX'
+
+ os.chdir(self.srcdir)
+ cmd = "yes '' | make rerun"
+ profilers = self.job.profilers
+ if not profilers.only():
+ for i in range(iterations):
+ utils.system(cmd)
+
+ # Do a profiling run if necessary
+ if profilers.present():
+ profilers.start(self)
+ utils.system(cmd)
+ profilers.stop(self)
+ profilers.report(self)
+ # Get the results:
+ outputdir = self.srcdir + "/results"
+ results = self.resultsdir + "/summary.txt"
+ utils.system("make -C " + outputdir + " summary > " + results)
diff --git a/client/tests/lsb_dtk/lsb_dtk.py b/client/tests/lsb_dtk/lsb_dtk.py
index 0d1db506..f8d25df2 100644
--- a/client/tests/lsb_dtk/lsb_dtk.py
+++ b/client/tests/lsb_dtk/lsb_dtk.py
@@ -12,154 +12,154 @@ lucasmr@br.ibm.com (Lucas Meneghel Rodrigues)
'''
class lsb_dtk(test.test):
- version = 1
- def get_lsb_arch(self):
- self.arch = autotest_utils.get_current_kernel_arch()
- if self.arch in ['i386', 'i486', 'i586', 'i686', 'athlon']:
- return 'ia32'
- elif self.arch == 'ppc':
- return 'ppc32'
- elif self.arch in ['s390', 's390x', 'ia64', 'x86_64', 'ppc64']:
- return self.arch
- else:
- e_msg = 'Architecture %s not supported by LSB' % self.arch
- raise error.TestError(e_msg)
-
-
- def install_lsb_packages(self, srcdir, cachedir, my_config):
- # First, we download the LSB DTK manager package, worry about installing it later
- self.dtk_manager_arch = my_config.get('dtk-manager', 'arch-%s' % self.get_lsb_arch())
- self.dtk_manager_url = my_config.get('dtk-manager', 'tarball_url') % self.dtk_manager_arch
- if not self.dtk_manager_url:
- raise error.TestError('Could not get DTK manager URL from configuration file')
- self.dtk_md5 = my_config.get('dtk-manager', 'md5-%s' % self.get_lsb_arch())
- if self.dtk_md5:
- print 'Caching LSB DTK manager RPM'
- self.dtk_manager_pkg = autotest_utils.unmap_url_cache(cachedir, self.dtk_manager_url, self.dtk_md5)
- else:
- raise error.TestError('Could not find DTK manager package md5, cannot cache DTK manager tarball')
-
- # Get LSB tarball, cache it and uncompress under autotest srcdir
- if my_config.get('lsb', 'override_default_url') == 'no':
- self.lsb_url = my_config.get('lsb', 'tarball_url') % self.get_lsb_arch()
- else:
- self.lsb_url = my_config.get('lsb', 'tarball_url_alt') % self.get_lsb_arch()
- if not self.lsb_url:
- raise TestError('Could not get lsb URL from configuration file')
- self.md5_key = 'md5-%s' % self.get_lsb_arch()
- self.lsb_md5 = my_config.get('lsb', self.md5_key)
- if self.lsb_md5:
- print 'Caching LSB tarball'
- self.lsb_pkg = autotest_utils.unmap_url_cache(self.cachedir, self.lsb_url, self.lsb_md5)
- else:
- raise error.TestError('Could not find LSB package md5, cannot cache LSB tarball')
-
- autotest_utils.extract_tarball_to_dir(self.lsb_pkg, srcdir)
-
- # Lets load a file that contains the list of RPMs
- os.chdir(srcdir)
- if not os.path.isfile('inst-config'):
- raise IOError('Could not find file with package info, inst-config')
- self.rpm_file_list = open('inst-config', 'r')
- self.pkg_pattern = re.compile('[A-Za-z0-9_.-]*[.][r][p][m]')
- self.lsb_pkg_list = []
- for self.line in self.rpm_file_list.readlines():
- try:
- # We will install lsb-dtk-manager separately, so we can remove
- # it from the list of packages
- if not 'lsb-dtk-manager' in self.line:
- self.line = re.findall(self.pkg_pattern, self.line)[0]
- self.lsb_pkg_list.append(self.line)
- except:
- # If we don't get a match, no problem
- pass
-
- # Lets figure out the host distro
- distro_pkg_support = package.os_support()
- if os.path.isfile('/etc/debian_version') and distro_pkg_support['dpkg']:
- print 'Debian based distro detected'
- if distro_pkg_support['conversion']:
- print 'Package conversion supported'
- self.distro_type = 'debian-based'
- else:
- e_msg = 'Package conversion not supported. Cannot handle LSB package installation'
- raise EnvironmentError(e_msg)
- elif distro_pkg_support['rpm']:
- print 'Red Hat based distro detected'
- self.distro_type = 'redhat-based'
- else:
- print 'OS does not seem to be red hat or debian based'
- e_msg = 'Cannot handle LSB package installation'
- raise EnvironmentError(e_msg)
-
- # According to the host distro detection, we can install the packages
- # using the list previously assembled
- if self.distro_type == 'redhat-based':
- print 'Installing LSB RPM packages'
- package.install(self.dtk_manager_pkg)
- for self.lsb_rpm in self.lsb_pkg_list:
- package.install(self.lsb_rpm, nodeps = True)
- elif self.distro_type == 'debian-based':
- print 'Remember that you must have the following lsb compliance packages installed:'
- print 'lsb-core lsb-cxx lsb-graphics lsb-desktop lsb-qt4 lsb-languages lsb-multimedia lsb-printing'
- print 'Converting and installing LSB packages'
- self.dtk_manager_dpkg = package.convert(self.dtk_manager_pkg, 'dpkg')
- package.install(self.dtk_manager_dpkg)
- for self.lsb_rpm in self.lsb_pkg_list:
- self.lsb_dpkg = package.convert(self.lsb_rpm, 'dpkg')
- package.install(self.lsb_dpkg, nodeps = True)
-
- def link_lsb_libraries(self, config):
- print 'Linking LSB libraries'
- self.libdir_key = 'libdir-%s' % self.get_lsb_arch()
- self.os_libdir = config.get('lib', self.libdir_key)
- if not self.os_libdir:
- raise TypeError('Could not find OS lib dir from conf file')
- self.lib_key = 'lib-%s' % self.get_lsb_arch()
- self.lib_list_raw = config.get('lib', self.lib_key)
- if not self.lib_list_raw:
- raise TypeError('Could not find library list from conf file')
- self.lib_list = eval(self.lib_list_raw)
-
- # Remove any previous ld-lsb*.so symbolic links
- self.lsb_libs = glob.glob('%s/ld-lsb*.so*' % self.os_libdir)
- for self.lib in self.lsb_libs:
- os.remove(self.lib)
-
- # Get the base library that we'll use to recreate the symbolic links
- self.system_lib = glob.glob('%s/ld-2*.so*' % self.os_libdir)[0]
-
- # Now just link the system lib that we just found to each one of the
- # needed LSB libraries that we provided on the conf file
- for self.lsb_lib in self.lib_list:
- # Get the library absolute path
- self.lsb_lib = os.path.join(self.os_libdir, self.lsb_lib)
- # Link the library system_lib -> lsb_lib
- os.symlink(self.system_lib, self.lsb_lib)
-
-
- def execute(self, args = 'all', config = './lsb31.cfg'):
- # Load configuration. Use autotest tmpdir if needed
- my_config = config_loader(config, self.tmpdir)
- # Cache directory, that will store LSB tarball and DTK manager RPM
- self.cachedir = os.path.join(self.bindir, 'cache')
- if not os.path.isdir(self.cachedir):
- os.makedirs(self.cachedir)
-
- self.install_lsb_packages(self.srcdir, self.cachedir, my_config)
- self.link_lsb_libraries(my_config)
-
- self.main_script_path = my_config.get('lsb', 'main_script_path')
- logfile = os.path.join(self.resultsdir, 'lsb.log')
- args2 = '-r %s' % (logfile)
- args = args + ' ' + args2
- cmd = os.path.join(self.srcdir, self.main_script_path) + ' ' + args
-
- profilers = self.job.profilers
- if profilers.present():
- profilers.start(self)
- print 'Executing LSB main test script'
- utils.system(cmd)
- if profilers.present():
- profilers.stop(self)
- profilers.report(self)
+ version = 1
+ def get_lsb_arch(self):
+ self.arch = autotest_utils.get_current_kernel_arch()
+ if self.arch in ['i386', 'i486', 'i586', 'i686', 'athlon']:
+ return 'ia32'
+ elif self.arch == 'ppc':
+ return 'ppc32'
+ elif self.arch in ['s390', 's390x', 'ia64', 'x86_64', 'ppc64']:
+ return self.arch
+ else:
+ e_msg = 'Architecture %s not supported by LSB' % self.arch
+ raise error.TestError(e_msg)
+
+
+ def install_lsb_packages(self, srcdir, cachedir, my_config):
+ # First, we download the LSB DTK manager package, worry about installing it later
+ self.dtk_manager_arch = my_config.get('dtk-manager', 'arch-%s' % self.get_lsb_arch())
+ self.dtk_manager_url = my_config.get('dtk-manager', 'tarball_url') % self.dtk_manager_arch
+ if not self.dtk_manager_url:
+ raise error.TestError('Could not get DTK manager URL from configuration file')
+ self.dtk_md5 = my_config.get('dtk-manager', 'md5-%s' % self.get_lsb_arch())
+ if self.dtk_md5:
+ print 'Caching LSB DTK manager RPM'
+ self.dtk_manager_pkg = autotest_utils.unmap_url_cache(cachedir, self.dtk_manager_url, self.dtk_md5)
+ else:
+ raise error.TestError('Could not find DTK manager package md5, cannot cache DTK manager tarball')
+
+ # Get LSB tarball, cache it and uncompress under autotest srcdir
+ if my_config.get('lsb', 'override_default_url') == 'no':
+ self.lsb_url = my_config.get('lsb', 'tarball_url') % self.get_lsb_arch()
+ else:
+ self.lsb_url = my_config.get('lsb', 'tarball_url_alt') % self.get_lsb_arch()
+ if not self.lsb_url:
+ raise TestError('Could not get lsb URL from configuration file')
+ self.md5_key = 'md5-%s' % self.get_lsb_arch()
+ self.lsb_md5 = my_config.get('lsb', self.md5_key)
+ if self.lsb_md5:
+ print 'Caching LSB tarball'
+ self.lsb_pkg = autotest_utils.unmap_url_cache(self.cachedir, self.lsb_url, self.lsb_md5)
+ else:
+ raise error.TestError('Could not find LSB package md5, cannot cache LSB tarball')
+
+ autotest_utils.extract_tarball_to_dir(self.lsb_pkg, srcdir)
+
+ # Lets load a file that contains the list of RPMs
+ os.chdir(srcdir)
+ if not os.path.isfile('inst-config'):
+ raise IOError('Could not find file with package info, inst-config')
+ self.rpm_file_list = open('inst-config', 'r')
+ self.pkg_pattern = re.compile('[A-Za-z0-9_.-]*[.][r][p][m]')
+ self.lsb_pkg_list = []
+ for self.line in self.rpm_file_list.readlines():
+ try:
+ # We will install lsb-dtk-manager separately, so we can remove
+ # it from the list of packages
+ if not 'lsb-dtk-manager' in self.line:
+ self.line = re.findall(self.pkg_pattern, self.line)[0]
+ self.lsb_pkg_list.append(self.line)
+ except:
+ # If we don't get a match, no problem
+ pass
+
+ # Lets figure out the host distro
+ distro_pkg_support = package.os_support()
+ if os.path.isfile('/etc/debian_version') and distro_pkg_support['dpkg']:
+ print 'Debian based distro detected'
+ if distro_pkg_support['conversion']:
+ print 'Package conversion supported'
+ self.distro_type = 'debian-based'
+ else:
+ e_msg = 'Package conversion not supported. Cannot handle LSB package installation'
+ raise EnvironmentError(e_msg)
+ elif distro_pkg_support['rpm']:
+ print 'Red Hat based distro detected'
+ self.distro_type = 'redhat-based'
+ else:
+ print 'OS does not seem to be red hat or debian based'
+ e_msg = 'Cannot handle LSB package installation'
+ raise EnvironmentError(e_msg)
+
+ # According to the host distro detection, we can install the packages
+ # using the list previously assembled
+ if self.distro_type == 'redhat-based':
+ print 'Installing LSB RPM packages'
+ package.install(self.dtk_manager_pkg)
+ for self.lsb_rpm in self.lsb_pkg_list:
+ package.install(self.lsb_rpm, nodeps = True)
+ elif self.distro_type == 'debian-based':
+ print 'Remember that you must have the following lsb compliance packages installed:'
+ print 'lsb-core lsb-cxx lsb-graphics lsb-desktop lsb-qt4 lsb-languages lsb-multimedia lsb-printing'
+ print 'Converting and installing LSB packages'
+ self.dtk_manager_dpkg = package.convert(self.dtk_manager_pkg, 'dpkg')
+ package.install(self.dtk_manager_dpkg)
+ for self.lsb_rpm in self.lsb_pkg_list:
+ self.lsb_dpkg = package.convert(self.lsb_rpm, 'dpkg')
+ package.install(self.lsb_dpkg, nodeps = True)
+
+ def link_lsb_libraries(self, config):
+ print 'Linking LSB libraries'
+ self.libdir_key = 'libdir-%s' % self.get_lsb_arch()
+ self.os_libdir = config.get('lib', self.libdir_key)
+ if not self.os_libdir:
+ raise TypeError('Could not find OS lib dir from conf file')
+ self.lib_key = 'lib-%s' % self.get_lsb_arch()
+ self.lib_list_raw = config.get('lib', self.lib_key)
+ if not self.lib_list_raw:
+ raise TypeError('Could not find library list from conf file')
+ self.lib_list = eval(self.lib_list_raw)
+
+ # Remove any previous ld-lsb*.so symbolic links
+ self.lsb_libs = glob.glob('%s/ld-lsb*.so*' % self.os_libdir)
+ for self.lib in self.lsb_libs:
+ os.remove(self.lib)
+
+ # Get the base library that we'll use to recreate the symbolic links
+ self.system_lib = glob.glob('%s/ld-2*.so*' % self.os_libdir)[0]
+
+ # Now just link the system lib that we just found to each one of the
+ # needed LSB libraries that we provided on the conf file
+ for self.lsb_lib in self.lib_list:
+ # Get the library absolute path
+ self.lsb_lib = os.path.join(self.os_libdir, self.lsb_lib)
+ # Link the library system_lib -> lsb_lib
+ os.symlink(self.system_lib, self.lsb_lib)
+
+
+ def execute(self, args = 'all', config = './lsb31.cfg'):
+ # Load configuration. Use autotest tmpdir if needed
+ my_config = config_loader(config, self.tmpdir)
+ # Cache directory, that will store LSB tarball and DTK manager RPM
+ self.cachedir = os.path.join(self.bindir, 'cache')
+ if not os.path.isdir(self.cachedir):
+ os.makedirs(self.cachedir)
+
+ self.install_lsb_packages(self.srcdir, self.cachedir, my_config)
+ self.link_lsb_libraries(my_config)
+
+ self.main_script_path = my_config.get('lsb', 'main_script_path')
+ logfile = os.path.join(self.resultsdir, 'lsb.log')
+ args2 = '-r %s' % (logfile)
+ args = args + ' ' + args2
+ cmd = os.path.join(self.srcdir, self.main_script_path) + ' ' + args
+
+ profilers = self.job.profilers
+ if profilers.present():
+ profilers.start(self)
+ print 'Executing LSB main test script'
+ utils.system(cmd)
+ if profilers.present():
+ profilers.stop(self)
+ profilers.report(self)
diff --git a/client/tests/ltp/ltp-diff.py b/client/tests/ltp/ltp-diff.py
index 12031a36..d0f0f9f5 100644
--- a/client/tests/ltp/ltp-diff.py
+++ b/client/tests/ltp/ltp-diff.py
@@ -4,106 +4,106 @@
# Description:
# Input: Two or more files containing results from different executions of
# the LTP. The input can either be file names or the url location
-# of the ltp.results file.
+# of the ltp.results file.
# Output: A report on the following:
-# - The total number of tests executed in each run
-# - The testname, sequence number, and output of each run
+# - The total number of tests executed in each run
+# - The testname, sequence number, and output of each run
# where the results of those runs differ
# Return:
-# 0 if all runs had identical results
-# Non-zero if results differ, or bad input
+# 0 if all runs had identical results
+# Non-zero if results differ, or bad input
import sys, string, re
from autotest_lib.client.comon_lib import utils
def usage():
- print "\nUsage: \n\
- ltp-diff results1 results2 ... locationN \n\
- Note: location[1,2,N] may be local files or URLs of LTP results\n"
- sys.exit(1)
+ print "\nUsage: \n\
+ltp-diff results1 results2 ... locationN \n\
+Note: location[1,2,N] may be local files or URLs of LTP results\n"
+ sys.exit(1)
def get_results(results_files):
- """
- Download the results if needed.
- Return results of each run in a numerically-indexed dictionary
- of dictionaries keyed on testnames.
- Return dictionary keyed on unique testnames across all runs.
- """
- r = re.compile('(\S+\s+\S+)\s+(\S+)\s+:')
- i = 0
- runs = {}
- testnames = {}
- for file in results_files:
- runs[i] = {}
- try:
- fh = utils.urlopen(file)
- results = fh.readlines()
- fh.close()
- except:
- print "ERROR: reading results resource [%s]" % (file)
- usage()
- for line in results:
- try:
- s = r.match(line)
- testname = s.group(1)
- status = s.group(2)
- runs[i][testname] = status
- testnames[testname] = 1
- except:
- pass
- i += 1
- return (runs, testnames)
+ """
+ Download the results if needed.
+ Return results of each run in a numerically-indexed dictionary
+ of dictionaries keyed on testnames.
+ Return dictionary keyed on unique testnames across all runs.
+ """
+ r = re.compile('(\S+\s+\S+)\s+(\S+)\s+:')
+ i = 0
+ runs = {}
+ testnames = {}
+ for file in results_files:
+ runs[i] = {}
+ try:
+ fh = utils.urlopen(file)
+ results = fh.readlines()
+ fh.close()
+ except:
+ print "ERROR: reading results resource [%s]" % (file)
+ usage()
+ for line in results:
+ try:
+ s = r.match(line)
+ testname = s.group(1)
+ status = s.group(2)
+ runs[i][testname] = status
+ testnames[testname] = 1
+ except:
+ pass
+ i += 1
+ return (runs, testnames)
def compare_results(runs):
- """
- Loop through all testnames alpahbetically.
- Print any testnames with differing results across runs.
- Return 1 if any test results across runs differ.
- Return 0 if all test results match.
- """
- rc = 0
- print "LTP Test Results to Compare"
- for i in range(len(runs)):
- print " Run[%d]: %d" % (i, len(runs[i].keys()))
- print ""
- header = 0
- all_testnames = testnames.keys()
- all_testnames.sort()
- for testname in all_testnames:
- differ = 0
- for i in range(1,len(runs)):
- # Must handle testcases that executed in one run
- # but not another by setting status to "null"
- if not runs[i].has_key(testname):
- runs[i][testname] = "null"
- if not runs[i-1].has_key(testname):
- runs[i-1][testname] = "null"
- # Check for the results inconsistencies
- if runs[i][testname] != runs[i-1][testname]:
- differ = 1
- if differ:
- if header == 0:
- # Print the differences header only once
- print "Tests with Inconsistent Results across Runs"
- print " %-35s:\t%s" % ("Testname,Sequence", "Run Results")
- header = 1
+ """
+ Loop through all testnames alpahbetically.
+ Print any testnames with differing results across runs.
+ Return 1 if any test results across runs differ.
+ Return 0 if all test results match.
+ """
+ rc = 0
+ print "LTP Test Results to Compare"
+ for i in range(len(runs)):
+ print " Run[%d]: %d" % (i, len(runs[i].keys()))
+ print ""
+ header = 0
+ all_testnames = testnames.keys()
+ all_testnames.sort()
+ for testname in all_testnames:
+ differ = 0
+ for i in range(1,len(runs)):
+ # Must handle testcases that executed in one run
+ # but not another by setting status to "null"
+ if not runs[i].has_key(testname):
+ runs[i][testname] = "null"
+ if not runs[i-1].has_key(testname):
+ runs[i-1][testname] = "null"
+ # Check for the results inconsistencies
+ if runs[i][testname] != runs[i-1][testname]:
+ differ = 1
+ if differ:
+ if header == 0:
+ # Print the differences header only once
+ print "Tests with Inconsistent Results across Runs"
+ print " %-35s:\t%s" % ("Testname,Sequence", "Run Results")
+ header = 1
- # Print info if results differ
- rc = 1
- testname_cleaned = re.sub('\s+', ',', testname)
- print " %-35s:\t" % (testname_cleaned),
- all_results = ""
- for i in range(len(runs)):
- all_results += runs[i][testname]
- if i+1<len(runs):
- all_results += "/"
- print all_results
- if rc == 0:
- print "All LTP results are identical"
- return rc
+ # Print info if results differ
+ rc = 1
+ testname_cleaned = re.sub('\s+', ',', testname)
+ print " %-35s:\t" % (testname_cleaned),
+ all_results = ""
+ for i in range(len(runs)):
+ all_results += runs[i][testname]
+ if i+1<len(runs):
+ all_results += "/"
+ print all_results
+ if rc == 0:
+ print "All LTP results are identical"
+ return rc
########
@@ -111,7 +111,7 @@ def compare_results(runs):
########
sys.argv.pop(0)
if (len(sys.argv) < 2):
- usage()
+ usage()
(runs, testnames) = get_results(sys.argv)
rc = compare_results(runs)
sys.exit(rc)
diff --git a/client/tests/ltp/ltp.py b/client/tests/ltp/ltp.py
index 043ac007..2417f46b 100755
--- a/client/tests/ltp/ltp.py
+++ b/client/tests/ltp/ltp.py
@@ -3,46 +3,46 @@ from autotest_lib.client.bin import autotest_utils, test
from autotest_lib.client.common_lib import utils, error
class ltp(test.test):
- version = 4
-
- # http://prdownloads.sourceforge.net/ltp/ltp-full-20080229.tgz
- def setup(self, tarball = 'ltp-full-20080229.tar.bz2'):
- tarball = utils.unmap_url(self.bindir, tarball,
- self.tmpdir)
- autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
- os.chdir(self.srcdir)
-
- utils.system('patch -p1 < ../ltp.patch')
-
- # comment the capability tests if we fail to load the capability module
- try:
- utils.system('modprobe capability')
- except error.CmdError, detail:
- utils.system('patch -p1 < ../ltp_capability.patch')
-
- utils.system('cp ../scan.c pan/') # saves having lex installed
- utils.system('make -j %d' % autotest_utils.count_cpus())
- utils.system('yes n | make install')
-
-
- # Note: to run a specific test, try '-f cmdfile -s test' in the
- # in the args (-f for test file and -s for the test case)
- # eg, job.run_test('ltp', '-f math -s float_bessel')
- def execute(self, args = '', script = 'runltp'):
-
- # In case the user wants to run another test script
- if script == 'runltp':
- logfile = os.path.join(self.resultsdir, 'ltp.log')
- failcmdfile = os.path.join(self.debugdir, 'failcmdfile')
- args2 = '-q -l %s -C %s -d %s' % (logfile, failcmdfile, self.tmpdir)
- args = args + ' ' + args2
-
- cmd = os.path.join(self.srcdir, script) + ' ' + args
-
- profilers = self.job.profilers
- if profilers.present():
- profilers.start(self)
- utils.system(cmd)
- if profilers.present():
- profilers.stop(self)
- profilers.report(self)
+ version = 4
+
+ # http://prdownloads.sourceforge.net/ltp/ltp-full-20080229.tgz
+ def setup(self, tarball = 'ltp-full-20080229.tar.bz2'):
+ tarball = utils.unmap_url(self.bindir, tarball,
+ self.tmpdir)
+ autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
+ os.chdir(self.srcdir)
+
+ utils.system('patch -p1 < ../ltp.patch')
+
+ # comment the capability tests if we fail to load the capability module
+ try:
+ utils.system('modprobe capability')
+ except error.CmdError, detail:
+ utils.system('patch -p1 < ../ltp_capability.patch')
+
+ utils.system('cp ../scan.c pan/') # saves having lex installed
+ utils.system('make -j %d' % autotest_utils.count_cpus())
+ utils.system('yes n | make install')
+
+
+ # Note: to run a specific test, try '-f cmdfile -s test' in the
+ # in the args (-f for test file and -s for the test case)
+ # eg, job.run_test('ltp', '-f math -s float_bessel')
+ def execute(self, args = '', script = 'runltp'):
+
+ # In case the user wants to run another test script
+ if script == 'runltp':
+ logfile = os.path.join(self.resultsdir, 'ltp.log')
+ failcmdfile = os.path.join(self.debugdir, 'failcmdfile')
+ args2 = '-q -l %s -C %s -d %s' % (logfile, failcmdfile, self.tmpdir)
+ args = args + ' ' + args2
+
+ cmd = os.path.join(self.srcdir, script) + ' ' + args
+
+ profilers = self.job.profilers
+ if profilers.present():
+ profilers.start(self)
+ utils.system(cmd)
+ if profilers.present():
+ profilers.stop(self)
+ profilers.report(self)
diff --git a/client/tests/netperf2/control.client b/client/tests/netperf2/control.client
index bfff4022..7c2ecaca 100644
--- a/client/tests/netperf2/control.client
+++ b/client/tests/netperf2/control.client
@@ -1,2 +1 @@
job.run_test('netperf2', '10.10.1.2', '10.10.1.6', 'client', tag='client')
-
diff --git a/client/tests/netperf2/control.parallel b/client/tests/netperf2/control.parallel
index c845879e..8414a3ad 100644
--- a/client/tests/netperf2/control.parallel
+++ b/client/tests/netperf2/control.parallel
@@ -1,7 +1,7 @@
def client():
- job.run_test('netperf2', '127.0.0.1', '127.0.0.1', 'client', tag='client')
+ job.run_test('netperf2', '127.0.0.1', '127.0.0.1', 'client', tag='client')
def server():
- job.run_test('netperf2', '127.0.0.1', '127.0.0.1', 'server', tag='server')
+ job.run_test('netperf2', '127.0.0.1', '127.0.0.1', 'server', tag='server')
job.parallel([server], [client])
diff --git a/client/tests/netperf2/netperf2.py b/client/tests/netperf2/netperf2.py
index 1f7fb663..e01f815a 100755
--- a/client/tests/netperf2/netperf2.py
+++ b/client/tests/netperf2/netperf2.py
@@ -4,75 +4,75 @@ from autotest_lib.client.common_lib import utils, error
class netperf2(test.test):
- version = 1
-
- # ftp://ftp.netperf.org/netperf/netperf-2.4.1.tar.gz
- def setup(self, tarball = 'netperf-2.4.1.tar.gz'):
- tarball = utils.unmap_url(self.bindir, tarball,
- self.tmpdir)
- autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
- os.chdir(self.srcdir)
-
- utils.system('./configure')
- utils.system('make')
-
-
- def initialize(self):
- # netserver doesn't detach properly from the console. When
- # it is run from ssh, this causes the ssh command not to
- # return even though netserver meant to be backgrounded.
- # This behavior is remedied by redirecting fd 0, 1 & 2
- self.server_path = ('%s &>/dev/null </dev/null'
- % os.path.join(self.srcdir, 'src/netserver'))
- self.client_path = os.path.join(self.srcdir, 'src/netperf')
-
-
- def execute(self, server_ip, client_ip, role,
- script='snapshot_script', args=''):
- server_tag = server_ip + '#netperf-server'
- client_tag = client_ip + '#netperf-client'
- all = [server_tag, client_tag]
- job = self.job
- if (role == 'server'):
- self.server_start()
- try:
- job.barrier(server_tag, 'start',
- 600).rendevous(*all)
- job.barrier(server_tag, 'stop',
- 3600).rendevous(*all)
- finally:
- self.server_stop()
- elif (role == 'client'):
- os.environ['NETPERF_CMD'] = self.client_path
- job.barrier(client_tag, 'start', 600).rendevous(*all)
- self.client(script, server_ip, args)
- job.barrier(client_tag, 'stop', 30).rendevous(*all)
- else:
- raise error.UnhandledError('invalid role specified')
-
-
- def server_start(self):
- # we should really record the pid we forked off, but there
- # was no obvious way to run the daemon in the foreground.
- # Hacked it for now
- system('killall netserver', ignore_status=True)
- system(self.server_path)
-
-
- def server_stop(self):
- # this should really just kill the pid I forked, but ...
- system('killall netserver')
-
-
- def client(self, script, server_ip, args = 'CPU'):
- # run some client stuff
- stdout_path = os.path.join(self.resultsdir, script + '.stdout')
- stderr_path = os.path.join(self.resultsdir, script + '.stderr')
- self.job.stdout.tee_redirect(stdout_path)
- self.job.stderr.tee_redirect(stderr_path)
-
- script_path = os.path.join(self.srcdir, 'doc/examples', script)
- system('%s %s %s' % (script_path, server_ip, args))
-
- self.job.stdout.restore()
- self.job.stderr.restore()
+ version = 1
+
+ # ftp://ftp.netperf.org/netperf/netperf-2.4.1.tar.gz
+ def setup(self, tarball = 'netperf-2.4.1.tar.gz'):
+ tarball = utils.unmap_url(self.bindir, tarball,
+ self.tmpdir)
+ autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
+ os.chdir(self.srcdir)
+
+ utils.system('./configure')
+ utils.system('make')
+
+
+ def initialize(self):
+ # netserver doesn't detach properly from the console. When
+ # it is run from ssh, this causes the ssh command not to
+ # return even though netserver meant to be backgrounded.
+ # This behavior is remedied by redirecting fd 0, 1 & 2
+ self.server_path = ('%s &>/dev/null </dev/null'
+ % os.path.join(self.srcdir, 'src/netserver'))
+ self.client_path = os.path.join(self.srcdir, 'src/netperf')
+
+
+ def execute(self, server_ip, client_ip, role,
+ script='snapshot_script', args=''):
+ server_tag = server_ip + '#netperf-server'
+ client_tag = client_ip + '#netperf-client'
+ all = [server_tag, client_tag]
+ job = self.job
+ if (role == 'server'):
+ self.server_start()
+ try:
+ job.barrier(server_tag, 'start',
+ 600).rendevous(*all)
+ job.barrier(server_tag, 'stop',
+ 3600).rendevous(*all)
+ finally:
+ self.server_stop()
+ elif (role == 'client'):
+ os.environ['NETPERF_CMD'] = self.client_path
+ job.barrier(client_tag, 'start', 600).rendevous(*all)
+ self.client(script, server_ip, args)
+ job.barrier(client_tag, 'stop', 30).rendevous(*all)
+ else:
+ raise error.UnhandledError('invalid role specified')
+
+
+ def server_start(self):
+ # we should really record the pid we forked off, but there
+ # was no obvious way to run the daemon in the foreground.
+ # Hacked it for now
+ system('killall netserver', ignore_status=True)
+ system(self.server_path)
+
+
+ def server_stop(self):
+ # this should really just kill the pid I forked, but ...
+ system('killall netserver')
+
+
+ def client(self, script, server_ip, args = 'CPU'):
+ # run some client stuff
+ stdout_path = os.path.join(self.resultsdir, script + '.stdout')
+ stderr_path = os.path.join(self.resultsdir, script + '.stderr')
+ self.job.stdout.tee_redirect(stdout_path)
+ self.job.stderr.tee_redirect(stderr_path)
+
+ script_path = os.path.join(self.srcdir, 'doc/examples', script)
+ system('%s %s %s' % (script_path, server_ip, args))
+
+ self.job.stdout.restore()
+ self.job.stderr.restore()
diff --git a/client/tests/parallel_dd/parallel_dd.py b/client/tests/parallel_dd/parallel_dd.py
index a524e6d1..5d92667f 100755
--- a/client/tests/parallel_dd/parallel_dd.py
+++ b/client/tests/parallel_dd/parallel_dd.py
@@ -4,119 +4,119 @@ from autotest_lib.client.common_lib import utils
class parallel_dd(test.test):
- version = 1
-
-
- def raw_write(self):
- print "Timing raw write of %d megabytes" % self.megabytes
- sys.stdout.flush()
- dd = 'dd if=/dev/zero of=%s bs=4K count=%d' % \
- (self.fs.device, self.blocks)
- print dd
- utils.system(dd + ' > /dev/null')
-
-
- def raw_read(self):
- print "Timing raw read of %d megabytes" % self.megabytes
- sys.stdout.flush()
- dd = 'dd if=%s of=/dev/null bs=4K count=%d' % \
- (self.fs.device, self.blocks)
- print dd
- utils.system(dd + ' > /dev/null')
-
-
- def fs_write(self):
- p = []
- # Write out 'streams' files in parallel background tasks
- for i in range(self.streams):
- file = 'poo%d' % (i+1)
- file = os.path.join(self.job.tmpdir, file)
- dd = 'dd if=/dev/zero of=%s bs=4K count=%d' % \
- (file, self.blocks_per_file)
- print dd
- p.append(subprocess.Popen(dd + ' > /dev/null',
- shell=True))
- print "Waiting for %d streams" % self.streams
- # Wait for everyone to complete
- for i in range(self.streams):
- print "Waiting for %d" % p[i].pid
- sys.stdout.flush()
- os.waitpid(p[i].pid, 0)
- sys.stdout.flush()
- sys.stderr.flush()
-
-
- def fs_read(self):
- for i in range(self.streams):
- file = os.path.join(self.job.tmpdir, 'poo%d' % (i+1))
- dd = 'dd if=%s of=/dev/null bs=4K count=%d' % \
- (file, self.blocks_per_file)
- utils.system(dd + ' > /dev/null')
-
-
- def test(self, tag):
- start = time.time()
- self.raw_write()
- self.raw_write_rate = self.megabytes / (time.time() - start)
-
- start = time.time()
- self.raw_read()
- self.raw_read_rate = self.megabytes / (time.time() - start)
-
- self.fs.mkfs(self.fstype)
- self.fs.mount()
- start = time.time()
- try:
- self.fs_write()
- except:
- try:
- self.fs.unmount()
- finally:
- raise
- self.fs.unmount()
- self.fs_write_rate = self.megabytes / (time.time() - start)
-
- self.fs.mount()
- start = time.time()
- try:
- self.fs_read()
- except:
- try:
- self.fs.unmount()
- finally:
- raise
- read_in()
- self.fs_read_rate = self.megabytes / (time.time() - start)
- self.fs.unmount()
-
-
- def execute(self, fs, fstype = 'ext2', iterations = 2, megabytes = 1000, streams = 2):
- self.megabytes = megabytes
- self.blocks = megabytes * 256
- self.blocks_per_file = self.blocks / streams
- self.fs = fs
- self.fstype = fstype
- self.streams = streams
-
- print "Dumping %d megabytes across %d streams, %d times" % \
- (megabytes, streams, iterations)
-
- keyval = open(os.path.join(self.resultsdir, 'keyval'), 'w')
- for i in range(iterations):
- self.test('%d' % i)
- t = "raw_write=%d\n" % self.raw_write_rate
- t += "raw_read=%d\n" % self.raw_read_rate
- t += "fs_write=%d\n" % self.fs_write_rate
- t += "fs_read=%d\n" % self.fs_read_rate
- t += "\n"
- print t
- keyval.write(t)
- keyval.close()
-
-
- profilers = self.job.profilers
- if profilers.present():
- profilers.start(self)
- self.test('profile')
- profilers.stop(self)
- profilers.report(self)
+ version = 1
+
+
+ def raw_write(self):
+ print "Timing raw write of %d megabytes" % self.megabytes
+ sys.stdout.flush()
+ dd = 'dd if=/dev/zero of=%s bs=4K count=%d' % \
+ (self.fs.device, self.blocks)
+ print dd
+ utils.system(dd + ' > /dev/null')
+
+
+ def raw_read(self):
+ print "Timing raw read of %d megabytes" % self.megabytes
+ sys.stdout.flush()
+ dd = 'dd if=%s of=/dev/null bs=4K count=%d' % \
+ (self.fs.device, self.blocks)
+ print dd
+ utils.system(dd + ' > /dev/null')
+
+
+ def fs_write(self):
+ p = []
+ # Write out 'streams' files in parallel background tasks
+ for i in range(self.streams):
+ file = 'poo%d' % (i+1)
+ file = os.path.join(self.job.tmpdir, file)
+ dd = 'dd if=/dev/zero of=%s bs=4K count=%d' % \
+ (file, self.blocks_per_file)
+ print dd
+ p.append(subprocess.Popen(dd + ' > /dev/null',
+ shell=True))
+ print "Waiting for %d streams" % self.streams
+ # Wait for everyone to complete
+ for i in range(self.streams):
+ print "Waiting for %d" % p[i].pid
+ sys.stdout.flush()
+ os.waitpid(p[i].pid, 0)
+ sys.stdout.flush()
+ sys.stderr.flush()
+
+
+ def fs_read(self):
+ for i in range(self.streams):
+ file = os.path.join(self.job.tmpdir, 'poo%d' % (i+1))
+ dd = 'dd if=%s of=/dev/null bs=4K count=%d' % \
+ (file, self.blocks_per_file)
+ utils.system(dd + ' > /dev/null')
+
+
+ def test(self, tag):
+ start = time.time()
+ self.raw_write()
+ self.raw_write_rate = self.megabytes / (time.time() - start)
+
+ start = time.time()
+ self.raw_read()
+ self.raw_read_rate = self.megabytes / (time.time() - start)
+
+ self.fs.mkfs(self.fstype)
+ self.fs.mount()
+ start = time.time()
+ try:
+ self.fs_write()
+ except:
+ try:
+ self.fs.unmount()
+ finally:
+ raise
+ self.fs.unmount()
+ self.fs_write_rate = self.megabytes / (time.time() - start)
+
+ self.fs.mount()
+ start = time.time()
+ try:
+ self.fs_read()
+ except:
+ try:
+ self.fs.unmount()
+ finally:
+ raise
+ read_in()
+ self.fs_read_rate = self.megabytes / (time.time() - start)
+ self.fs.unmount()
+
+
+ def execute(self, fs, fstype = 'ext2', iterations = 2, megabytes = 1000, streams = 2):
+ self.megabytes = megabytes
+ self.blocks = megabytes * 256
+ self.blocks_per_file = self.blocks / streams
+ self.fs = fs
+ self.fstype = fstype
+ self.streams = streams
+
+ print "Dumping %d megabytes across %d streams, %d times" % \
+ (megabytes, streams, iterations)
+
+ keyval = open(os.path.join(self.resultsdir, 'keyval'), 'w')
+ for i in range(iterations):
+ self.test('%d' % i)
+ t = "raw_write=%d\n" % self.raw_write_rate
+ t += "raw_read=%d\n" % self.raw_read_rate
+ t += "fs_write=%d\n" % self.fs_write_rate
+ t += "fs_read=%d\n" % self.fs_read_rate
+ t += "\n"
+ print t
+ keyval.write(t)
+ keyval.close()
+
+
+ profilers = self.job.profilers
+ if profilers.present():
+ profilers.start(self)
+ self.test('profile')
+ profilers.stop(self)
+ profilers.report(self)
diff --git a/client/tests/pi_tests/pi_tests.py b/client/tests/pi_tests/pi_tests.py
index 54872f45..7f3fcf27 100644
--- a/client/tests/pi_tests/pi_tests.py
+++ b/client/tests/pi_tests/pi_tests.py
@@ -4,19 +4,19 @@ from autotest_lib.client.common_lib import utils
class pi_tests(test.test):
- version = 1
+ version = 1
- # http://www.stardust.webpages.pl/files/patches/autotest/pi_tests.tar.bz2
+ # http://www.stardust.webpages.pl/files/patches/autotest/pi_tests.tar.bz2
- def setup(self, tarball = 'pi_tests.tar.bz2'):
- autotest_utils.check_glibc_ver('2.5')
- tarball = autotest_utils.unmap_url(self.bindir, tarball,
- self.tmpdir)
- autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
- os.chdir(self.srcdir)
+ def setup(self, tarball = 'pi_tests.tar.bz2'):
+ autotest_utils.check_glibc_ver('2.5')
+ tarball = autotest_utils.unmap_url(self.bindir, tarball,
+ self.tmpdir)
+ autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
+ os.chdir(self.srcdir)
- utils.system('make')
+ utils.system('make')
- def execute(self, args = '1 300'):
- os.chdir(self.srcdir)
- utils.system('./start.sh ' + args)
+ def execute(self, args = '1 300'):
+ os.chdir(self.srcdir)
+ utils.system('./start.sh ' + args)
diff --git a/client/tests/pktgen/pktgen.py b/client/tests/pktgen/pktgen.py
index c70cb434..aaef41d4 100755
--- a/client/tests/pktgen/pktgen.py
+++ b/client/tests/pktgen/pktgen.py
@@ -4,51 +4,50 @@ from autotest_lib.client.common_lib import utils, error
class pktgen(test.test):
- version = 1
+ version = 1
- def execute(self, eth='eth0', count=50000, clone_skb=1, \
- dst_ip='192.168.210.210', dst_mac='01:02:03:04:05:07'):
- if not os.path.exists('/proc/net/pktgen'):
- utils.system('/sbin/modprobe pktgen')
- if not os.path.exists('/proc/net/pktgen'):
- raise error.UnhandledError('pktgen not loaded')
+ def execute(self, eth='eth0', count=50000, clone_skb=1, \
+ dst_ip='192.168.210.210', dst_mac='01:02:03:04:05:07'):
+ if not os.path.exists('/proc/net/pktgen'):
+ utils.system('/sbin/modprobe pktgen')
+ if not os.path.exists('/proc/net/pktgen'):
+ raise error.UnhandledError('pktgen not loaded')
- print 'Adding devices to run'
- self.pgdev = '/proc/net/pktgen/kpktgend_0'
+ print 'Adding devices to run'
+ self.pgdev = '/proc/net/pktgen/kpktgend_0'
- self.pgset('rem_device_all')
- self.pgset('add_device ' + eth)
- self.pgset('max_before_softirq 10000')
+ self.pgset('rem_device_all')
+ self.pgset('add_device ' + eth)
+ self.pgset('max_before_softirq 10000')
- # Configure the individual devices
- print 'Configuring devices'
+ # Configure the individual devices
+ print 'Configuring devices'
- self.ethdev='/proc/net/pktgen/' + eth
- self.pgdev=self.ethdev
+ self.ethdev='/proc/net/pktgen/' + eth
+ self.pgdev=self.ethdev
- if clone_skb:
- self.pgset('clone_skb %d' % (count))
- self.pgset('min_pkt_size 60')
- self.pgset('max_pkt_size 60')
- self.pgset('dst ' + dst_ip)
- self.pgset('dst_mac ' + dst_mac)
- self.pgset('count %d' % (count))
+ if clone_skb:
+ self.pgset('clone_skb %d' % (count))
+ self.pgset('min_pkt_size 60')
+ self.pgset('max_pkt_size 60')
+ self.pgset('dst ' + dst_ip)
+ self.pgset('dst_mac ' + dst_mac)
+ self.pgset('count %d' % (count))
- # Time to run
- self.pgdev='/proc/net/pktgen/pgctrl'
- self.pgset('start')
+ # Time to run
+ self.pgdev='/proc/net/pktgen/pgctrl'
+ self.pgset('start')
- output = os.path.join(self.resultsdir, eth)
- utils.system ('cp %s %s' % (self.ethdev, output))
+ output = os.path.join(self.resultsdir, eth)
+ utils.system ('cp %s %s' % (self.ethdev, output))
- def pgset(self, command):
- file = open(self.pgdev, 'w')
- file.write(command + '\n');
- file.close
-
- if not autotest_utils.grep('Result: OK', self.pgdev):
- if not autotest_utils.grep('Result: NA', self.pgdev):
- utils.system('cat ' + self.pgdev)
- # raise UnhandledError('Result not OK')
+ def pgset(self, command):
+ file = open(self.pgdev, 'w')
+ file.write(command + '\n');
+ file.close
+ if not autotest_utils.grep('Result: OK', self.pgdev):
+ if not autotest_utils.grep('Result: NA', self.pgdev):
+ utils.system('cat ' + self.pgdev)
+ # raise UnhandledError('Result not OK')
diff --git a/client/tests/posixtest/posixtest.py b/client/tests/posixtest/posixtest.py
index e11989a7..adec69cb 100755
--- a/client/tests/posixtest/posixtest.py
+++ b/client/tests/posixtest/posixtest.py
@@ -8,22 +8,21 @@ from autotest_lib.client.common_lib import utils
__author__ = '''mohd.omar@in.ibm.com (Mohammed Omar)'''
class posixtest(test.test):
- version = 1
- # http://ufpr.dl.sourceforge.net/sourceforge/posixtest/posixtestsuite-1.5.2.tar.gz
- def setup(self, tarball = 'posixtestsuite-1.5.2.tar.gz'):
- self.posix_tarball = utils.unmap_url(self.bindir,
- tarball,
- self.tmpdir)
- autotest_utils.extract_tarball_to_dir(self.posix_tarball,
- self.srcdir)
- os.chdir(self.srcdir)
- # Applying a small patch that introduces some linux specific
- # linking options
- utils.system('patch -p1 < ../posix-linux.patch')
- utils.system('make')
+ version = 1
+ # http://ufpr.dl.sourceforge.net/sourceforge/posixtest/posixtestsuite-1.5.2.tar.gz
+ def setup(self, tarball = 'posixtestsuite-1.5.2.tar.gz'):
+ self.posix_tarball = utils.unmap_url(self.bindir,
+ tarball,
+ self.tmpdir)
+ autotest_utils.extract_tarball_to_dir(self.posix_tarball,
+ self.srcdir)
+ os.chdir(self.srcdir)
+ # Applying a small patch that introduces some linux specific
+ # linking options
+ utils.system('patch -p1 < ../posix-linux.patch')
+ utils.system('make')
- def execute(self):
- os.chdir(self.srcdir)
- utils.system('./run_tests THR')
-
+ def execute(self):
+ os.chdir(self.srcdir)
+ utils.system('./run_tests THR')
diff --git a/client/tests/raisetest/raisetest.py b/client/tests/raisetest/raisetest.py
index 1c968b4b..e966585d 100755
--- a/client/tests/raisetest/raisetest.py
+++ b/client/tests/raisetest/raisetest.py
@@ -3,7 +3,7 @@ from autotest_lib.client.common_lib import error
class raisetest(test.test):
- version = 1
+ version = 1
- def execute(self):
- raise error.TestError('Arrrrrrrrggggh. You are DOOOMED')
+ def execute(self):
+ raise error.TestError('Arrrrrrrrggggh. You are DOOOMED')
diff --git a/client/tests/reaim/reaim.py b/client/tests/reaim/reaim.py
index 7e43723e..9a5e832a 100755
--- a/client/tests/reaim/reaim.py
+++ b/client/tests/reaim/reaim.py
@@ -5,87 +5,87 @@ from autotest_lib.client.common_lib import utils
class reaim(test.test):
- version = 1
-
- # http://prdownloads.sourceforge.net/re-aim-7/osdl-aim-7.0.1.13.tar.gz
- def setup(self, tarball = 'osdl-aim-7.0.1.13.tar.gz'):
- tarball = utils.unmap_url(self.bindir, tarball,
- self.tmpdir)
- autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
-
- self.job.setup_dep(['libaio'])
- libs = '-L' + self.autodir + '/deps/libaio/lib -laio'
- cflags = '-I ' + self.autodir + '/deps/libaio/include'
- var_libs = 'LIBS="' + libs + '"'
- var_cflags = 'CFLAGS="' + cflags + '"'
- self.make_flags = var_libs + ' ' + var_cflags
-
- os_dep.commands('autoconf', 'automake', 'libtoolize')
- os.chdir(self.srcdir)
- utils.system('./bootstrap')
- utils.system('./configure')
- # we can't use patch here, as the Makefile is autogenerated
- # so we can't tell exactly what it looks like.
- # Perform some foul in-place sed hackery instead.
- for file in ('Makefile', 'src/Makefile'):
- utils.system('sed -i "s/^CFLAGS =/CFLAGS +=/" ' + file)
- utils.system('sed -i "s/^LIBS =/LIBS +=/" ' + file)
- utils.system(self.make_flags + ' make')
- os.rename('src/reaim', 'reaim')
-
-
- def initialize(self):
- self.ldlib = 'LD_LIBRARY_PATH=%s/deps/libaio/lib'%(self.autodir)
-
-
- def execute(self, iterations = 1, workfile = 'workfile.short',
- start = 1, end = 10, increment = 2,
- extra_args = '', tmpdir = None):
- if not tmpdir:
- tmpdir = self.tmpdir
-
- # -f workfile
- # -s <number of users to start with>
- # -e <number of users to end with>
- # -i <number of users to increment>
- workfile = os.path.join('data', workfile)
- args = "-f %s -s %d -e %d -i %d" %(workfile,start,end,increment)
- config = os.path.join(self.srcdir, 'reaim.config')
- utils.system('cp -f %s/reaim.config %s' % (self.bindir, config))
- args += ' -c ./reaim.config'
- open(config, 'a+').write("DISKDIR %s\n" % (tmpdir))
- os.chdir(self.srcdir)
- print os.getcwd()
- cmd = self.ldlib + ' ./reaim ' + args + ' ' + extra_args
-
- results = []
-
- profilers = self.job.profilers
- if not profilers.only():
- for i in range(iterations):
- results.append(utils.system_output(cmd,
- retain_output=True))
-
- # Do a profiling run if necessary
- if profilers.present():
- profilers.start(self)
- resuls.append(utils.system_output(cmd,
- retain_output=True))
- profilers.stop(self)
- profilers.report(self)
-
- self.__format_results("\n".join(results))
-
-
- def __format_results(self, results):
- out = open(self.resultsdir + '/keyval', 'w')
- for line in results.split('\n'):
- m = re.match('Max Jobs per Minute (\d+)', line)
- if m:
- max_jobs_per_min = m.group(1)
- if re.match(r"^[0-9\. ]+$", line):
- fields = line.split()
- print >> out, """\
+ version = 1
+
+ # http://prdownloads.sourceforge.net/re-aim-7/osdl-aim-7.0.1.13.tar.gz
+ def setup(self, tarball = 'osdl-aim-7.0.1.13.tar.gz'):
+ tarball = utils.unmap_url(self.bindir, tarball,
+ self.tmpdir)
+ autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
+
+ self.job.setup_dep(['libaio'])
+ libs = '-L' + self.autodir + '/deps/libaio/lib -laio'
+ cflags = '-I ' + self.autodir + '/deps/libaio/include'
+ var_libs = 'LIBS="' + libs + '"'
+ var_cflags = 'CFLAGS="' + cflags + '"'
+ self.make_flags = var_libs + ' ' + var_cflags
+
+ os_dep.commands('autoconf', 'automake', 'libtoolize')
+ os.chdir(self.srcdir)
+ utils.system('./bootstrap')
+ utils.system('./configure')
+ # we can't use patch here, as the Makefile is autogenerated
+ # so we can't tell exactly what it looks like.
+ # Perform some foul in-place sed hackery instead.
+ for file in ('Makefile', 'src/Makefile'):
+ utils.system('sed -i "s/^CFLAGS =/CFLAGS +=/" ' + file)
+ utils.system('sed -i "s/^LIBS =/LIBS +=/" ' + file)
+ utils.system(self.make_flags + ' make')
+ os.rename('src/reaim', 'reaim')
+
+
+ def initialize(self):
+ self.ldlib = 'LD_LIBRARY_PATH=%s/deps/libaio/lib'%(self.autodir)
+
+
+ def execute(self, iterations = 1, workfile = 'workfile.short',
+ start = 1, end = 10, increment = 2,
+ extra_args = '', tmpdir = None):
+ if not tmpdir:
+ tmpdir = self.tmpdir
+
+ # -f workfile
+ # -s <number of users to start with>
+ # -e <number of users to end with>
+ # -i <number of users to increment>
+ workfile = os.path.join('data', workfile)
+ args = "-f %s -s %d -e %d -i %d" %(workfile,start,end,increment)
+ config = os.path.join(self.srcdir, 'reaim.config')
+ utils.system('cp -f %s/reaim.config %s' % (self.bindir, config))
+ args += ' -c ./reaim.config'
+ open(config, 'a+').write("DISKDIR %s\n" % (tmpdir))
+ os.chdir(self.srcdir)
+ print os.getcwd()
+ cmd = self.ldlib + ' ./reaim ' + args + ' ' + extra_args
+
+ results = []
+
+ profilers = self.job.profilers
+ if not profilers.only():
+ for i in range(iterations):
+ results.append(utils.system_output(cmd,
+ retain_output=True))
+
+ # Do a profiling run if necessary
+ if profilers.present():
+ profilers.start(self)
+ resuls.append(utils.system_output(cmd,
+ retain_output=True))
+ profilers.stop(self)
+ profilers.report(self)
+
+ self.__format_results("\n".join(results))
+
+
+ def __format_results(self, results):
+ out = open(self.resultsdir + '/keyval', 'w')
+ for line in results.split('\n'):
+ m = re.match('Max Jobs per Minute (\d+)', line)
+ if m:
+ max_jobs_per_min = m.group(1)
+ if re.match(r"^[0-9\. ]+$", line):
+ fields = line.split()
+ print >> out, """\
max_jobs_per_min=%s
num_forked=%s
parent_time=%s
@@ -97,4 +97,4 @@ std_dev_time=%s
std_dev_pct=%s
jti=%s
""" % tuple([max_jobs_per_min] + fields)
- out.close()
+ out.close()
diff --git a/client/tests/rmaptest/rmaptest.py b/client/tests/rmaptest/rmaptest.py
index f4cf3af1..9b655470 100644
--- a/client/tests/rmaptest/rmaptest.py
+++ b/client/tests/rmaptest/rmaptest.py
@@ -5,24 +5,24 @@ from autotest_lib.client.common_lib import utils
# tests is a simple array of "cmd" "arguments"
tests = [["rmaptest", "-h -i100 -n100 -s100 -t100 -V10 -v file1.dat"],
- ["rmaptest", "-l -i100 -n100 -s100 -t100 -V10 -v file2.dat"],
- ["rmaptest", "-r -i100 -n100 -s100 -t100 -V10 -v file3.dat"],
- ]
+ ["rmaptest", "-l -i100 -n100 -s100 -t100 -V10 -v file2.dat"],
+ ["rmaptest", "-r -i100 -n100 -s100 -t100 -V10 -v file3.dat"],
+ ]
name = 0
arglist = 1
class rmaptest(test.test):
- version = 1
- preserve_srcdir = True
+ version = 1
+ preserve_srcdir = True
- def setup(self):
- os.chdir(self.srcdir)
- utils.system('gcc -Wall -o rmaptest rmap-test.c')
+ def setup(self):
+ os.chdir(self.srcdir)
+ utils.system('gcc -Wall -o rmaptest rmap-test.c')
- def execute(self, args = ''):
- os.chdir(self.tmpdir)
- for test in tests:
- cmd = self.srcdir + '/' + test[name] + ' ' \
- + args + ' ' + test[arglist]
- utils.system(cmd)
+ def execute(self, args = ''):
+ os.chdir(self.tmpdir)
+ for test in tests:
+ cmd = self.srcdir + '/' + test[name] + ' ' \
+ + args + ' ' + test[arglist]
+ utils.system(cmd)
diff --git a/client/tests/rtlinuxtests/rtlinuxtests.py b/client/tests/rtlinuxtests/rtlinuxtests.py
index e6df6e31..8c27432d 100644
--- a/client/tests/rtlinuxtests/rtlinuxtests.py
+++ b/client/tests/rtlinuxtests/rtlinuxtests.py
@@ -5,27 +5,27 @@ from autotest_lib.client.common_lib import utils
# tests is a simple array of "cmd" "arguments"
tests = [["./run.sh", "tests=func"],
- ["./run.sh", "tests=pi-tests"],
- ]
+ ["./run.sh", "tests=pi-tests"],
+ ]
name = 0
arglist = 1
class rtlinuxtests(test.test):
- version = 1
- preserve_srcdir = True
+ version = 1
+ preserve_srcdir = True
- # http://www.kernel.org/pub/linux/kernel/people/dvhart/realtime/tests/tests.tar.bz2
+ # http://www.kernel.org/pub/linux/kernel/people/dvhart/realtime/tests/tests.tar.bz2
- def setup(self, tarball = 'tests.tar.bz2'):
- autotest_utils.check_glibc_ver('2.5')
- self.tarball = utils.unmap_url(self.bindir, tarball,
- self.tmpdir)
- autotest_utils.extract_tarball_to_dir(self.tarball, self.srcdir)
- os.chdir(self.srcdir)
- utils.system('patch -p1 < ../path-fix.patch')
+ def setup(self, tarball = 'tests.tar.bz2'):
+ autotest_utils.check_glibc_ver('2.5')
+ self.tarball = utils.unmap_url(self.bindir, tarball,
+ self.tmpdir)
+ autotest_utils.extract_tarball_to_dir(self.tarball, self.srcdir)
+ os.chdir(self.srcdir)
+ utils.system('patch -p1 < ../path-fix.patch')
- def execute(self, args = ''):
- os.chdir(self.srcdir)
- for test in tests:
- cmd = 'echo y | ' + test[name] + ' ' + args + ' ' + test[arglist]
- utils.system(cmd)
+ def execute(self, args = ''):
+ os.chdir(self.srcdir)
+ for test in tests:
+ cmd = 'echo y | ' + test[name] + ' ' + args + ' ' + test[arglist]
+ utils.system(cmd)
diff --git a/client/tests/rttester/rttester.py b/client/tests/rttester/rttester.py
index 97cea0a3..69a2e9ff 100644
--- a/client/tests/rttester/rttester.py
+++ b/client/tests/rttester/rttester.py
@@ -4,15 +4,15 @@ from autotest_lib.client.common_lib import utils
class rttester(test.test):
- version = 1
+ version = 1
- # http://www.stardust.webpages.pl/files/patches/autotest/rttester.tar.bz2
+ # http://www.stardust.webpages.pl/files/patches/autotest/rttester.tar.bz2
- def setup(self, tarball = 'rttester.tar.bz2'):
- tarball = utils.unmap_url(self.bindir, tarball,
- self.tmpdir)
- autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
+ def setup(self, tarball = 'rttester.tar.bz2'):
+ tarball = utils.unmap_url(self.bindir, tarball,
+ self.tmpdir)
+ autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
- def execute(self):
- os.chdir(self.srcdir)
- utils.system(self.srcdir + '/check-all.sh')
+ def execute(self):
+ os.chdir(self.srcdir)
+ utils.system(self.srcdir + '/check-all.sh')
diff --git a/client/tests/scrashme/scrashme.py b/client/tests/scrashme/scrashme.py
index 3d8b2371..30ace1c9 100644
--- a/client/tests/scrashme/scrashme.py
+++ b/client/tests/scrashme/scrashme.py
@@ -4,31 +4,31 @@ from autotest_lib.client.common_lib import utils
class scrashme(test.test):
- version = 1
+ version = 1
- # http://www.codemonkey.org.uk/projects/git-snapshots/scrashme/scrashme-2007-07-08.tar.gz
- def setup(self, tarball = 'scrashme-2007-07-08.tar.gz'):
- tarball = utils.unmap_url(self.bindir, tarball,
- self.tmpdir)
- autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
- os.chdir(self.srcdir)
+ # http://www.codemonkey.org.uk/projects/git-snapshots/scrashme/scrashme-2007-07-08.tar.gz
+ def setup(self, tarball = 'scrashme-2007-07-08.tar.gz'):
+ tarball = utils.unmap_url(self.bindir, tarball,
+ self.tmpdir)
+ autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
+ os.chdir(self.srcdir)
- utils.system('make')
-
- def execute(self, iterations = 1, args_list = ''):
- if len(args_list) != 0:
- args = '' + args_list
- else:
- args = '-c100 -z'
+ utils.system('make')
- profilers = self.job.profilers
- if not profilers.only():
- for i in range(iterations):
- utils.system(self.srcdir + '/scrashme ' + args)
+ def execute(self, iterations = 1, args_list = ''):
+ if len(args_list) != 0:
+ args = '' + args_list
+ else:
+ args = '-c100 -z'
- # Do a profiling run if necessary
- if profilers.present():
- profilers.start(self)
- utils.system(self.srcdir + '/scrashme ' + args)
- profilers.stop(self)
- profilers.report(self)
+ profilers = self.job.profilers
+ if not profilers.only():
+ for i in range(iterations):
+ utils.system(self.srcdir + '/scrashme ' + args)
+
+ # Do a profiling run if necessary
+ if profilers.present():
+ profilers.start(self)
+ utils.system(self.srcdir + '/scrashme ' + args)
+ profilers.stop(self)
+ profilers.report(self)
diff --git a/client/tests/selftest/selftest.py b/client/tests/selftest/selftest.py
index 1baecf9a..825b9cfb 100644
--- a/client/tests/selftest/selftest.py
+++ b/client/tests/selftest/selftest.py
@@ -4,49 +4,49 @@ from autotest_lib.client.common_lib import error
class selftest(test.test):
- version = 1
-
- def setup(self):
- name = self.job.resultdir + '/sequence'
- if (not os.path.exists(name)):
- fd = file(name, 'w')
- fd.write('0')
- fd.close()
-
- def __mark(self, checkpoint):
- name = self.job.resultdir + '/sequence'
- fd = file(name, 'r')
- current = int(fd.readline())
- fd.close()
-
- current += 1
- fd = file(name + '.new', 'w')
- fd.write('%d' % current)
- fd.close()
-
- os.rename(name + '.new', name)
-
- print "checkpoint %d %d" % (current, checkpoint)
-
- if (current != checkpoint):
- raise error.JobError("selftest: sequence was " +
- "%d when %d expected" % (current, checkpoint))
-
- def __throw(self):
- __does_not_exist = __does_not_exist_either
-
- def __print(self, msg):
- sys.stdout.write(msg)
-
- def __warn(self, msg):
- sys.stderr.write(msg)
-
- def execute(self, cmd, *args):
- if cmd == 'mark':
- self.__mark(*args)
- elif cmd == 'throw':
- self.__throw(*args)
- elif cmd == 'print':
- self.__print(*args)
- elif cmd == 'warn':
- self.__warn(*args)
+ version = 1
+
+ def setup(self):
+ name = self.job.resultdir + '/sequence'
+ if (not os.path.exists(name)):
+ fd = file(name, 'w')
+ fd.write('0')
+ fd.close()
+
+ def __mark(self, checkpoint):
+ name = self.job.resultdir + '/sequence'
+ fd = file(name, 'r')
+ current = int(fd.readline())
+ fd.close()
+
+ current += 1
+ fd = file(name + '.new', 'w')
+ fd.write('%d' % current)
+ fd.close()
+
+ os.rename(name + '.new', name)
+
+ print "checkpoint %d %d" % (current, checkpoint)
+
+ if (current != checkpoint):
+ raise error.JobError("selftest: sequence was " +
+ "%d when %d expected" % (current, checkpoint))
+
+ def __throw(self):
+ __does_not_exist = __does_not_exist_either
+
+ def __print(self, msg):
+ sys.stdout.write(msg)
+
+ def __warn(self, msg):
+ sys.stderr.write(msg)
+
+ def execute(self, cmd, *args):
+ if cmd == 'mark':
+ self.__mark(*args)
+ elif cmd == 'throw':
+ self.__throw(*args)
+ elif cmd == 'print':
+ self.__print(*args)
+ elif cmd == 'warn':
+ self.__warn(*args)
diff --git a/client/tests/signaltest/signaltest.py b/client/tests/signaltest/signaltest.py
index 9c483c98..be4e095c 100644
--- a/client/tests/signaltest/signaltest.py
+++ b/client/tests/signaltest/signaltest.py
@@ -4,14 +4,14 @@ from autotest_lib.client.common_lib import utils
class signaltest(test.test):
- version = 1
- preserve_srcdir = True
+ version = 1
+ preserve_srcdir = True
- # git://git.kernel.org/pub/scm/linux/kernel/git/tglx/rt-tests.git
+ # git://git.kernel.org/pub/scm/linux/kernel/git/tglx/rt-tests.git
- def setup(self):
- os.chdir(self.srcdir)
- utils.system('make')
+ def setup(self):
+ os.chdir(self.srcdir)
+ utils.system('make')
- def execute(self, args = '-t 10 -l 100000'):
- utils.system(self.srcdir + '/signaltest ' + args)
+ def execute(self, args = '-t 10 -l 100000'):
+ utils.system(self.srcdir + '/signaltest ' + args)
diff --git a/client/tests/sleeptest/sleeptest.py b/client/tests/sleeptest/sleeptest.py
index 76c03831..35912f10 100755
--- a/client/tests/sleeptest/sleeptest.py
+++ b/client/tests/sleeptest/sleeptest.py
@@ -2,11 +2,11 @@ import time
from autotest_lib.client.bin import test
class sleeptest(test.test):
- version = 1
+ version = 1
- def execute(self, seconds = 1):
- profilers = self.job.profilers
- profilers.start(self)
- time.sleep(seconds)
- profilers.stop(self)
- profilers.report(self)
+ def execute(self, seconds = 1):
+ profilers = self.job.profilers
+ profilers.start(self)
+ time.sleep(seconds)
+ profilers.stop(self)
+ profilers.report(self)
diff --git a/client/tests/sparse/sparse.py b/client/tests/sparse/sparse.py
index 3a434cdd..f6ff11d5 100755
--- a/client/tests/sparse/sparse.py
+++ b/client/tests/sparse/sparse.py
@@ -4,25 +4,25 @@ from autotest_lib.client.common_lib import utils
class sparse(test.test):
- version = 1
+ version = 1
- # http://www.codemonkey.org.uk/projects/git-snapshots/sparse/sparse-2006-04-28.tar.gz
- def setup(self, tarball = 'sparse-2006-04-28.tar.gz'):
- tarball = utils.unmap_url(self.bindir, tarball,
- self.tmpdir)
- autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
- os.chdir(self.srcdir)
+ # http://www.codemonkey.org.uk/projects/git-snapshots/sparse/sparse-2006-04-28.tar.gz
+ def setup(self, tarball = 'sparse-2006-04-28.tar.gz'):
+ tarball = utils.unmap_url(self.bindir, tarball,
+ self.tmpdir)
+ autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
+ os.chdir(self.srcdir)
- utils.system('make')
- utils.system('ln check sparse')
-
- self.top_dir = self.job.tmpdir+'/sparse'
-
- def execute(self, base_tree, patches, config, config_list = None):
- kernel = self.job.kernel(base_tree, self.resultsdir)
- kernel.patch(patches)
- kernel.config(config, config_list)
+ utils.system('make')
+ utils.system('ln check sparse')
- os.environ['PATH'] = self.srcdir + ':' + os.environ['PATH']
- results = os.path.join (self.resultsdir, 'sparse')
- kernel.build(make_opts = 'C=1', logfile = results)
+ self.top_dir = self.job.tmpdir+'/sparse'
+
+ def execute(self, base_tree, patches, config, config_list = None):
+ kernel = self.job.kernel(base_tree, self.resultsdir)
+ kernel.patch(patches)
+ kernel.config(config, config_list)
+
+ os.environ['PATH'] = self.srcdir + ':' + os.environ['PATH']
+ results = os.path.join (self.resultsdir, 'sparse')
+ kernel.build(make_opts = 'C=1', logfile = results)
diff --git a/client/tests/spew/spew.py b/client/tests/spew/spew.py
index 7c04cc88..4c1cb557 100755
--- a/client/tests/spew/spew.py
+++ b/client/tests/spew/spew.py
@@ -4,41 +4,41 @@ from autotest_lib.client.common_lib import utils
class spew(test.test):
- version = 1
-
- # ftp://ftp.berlios.de/pub/spew/1.0.5/spew-1.0.5.tgz
- def setup(self, tarball = 'spew-1.0.5.tgz'):
- self.tarball = utils.unmap_url(self.bindir, tarball,
- self.tmpdir)
- autotest_utils.extract_tarball_to_dir(self.tarball, self.srcdir)
-
- os.chdir(self.srcdir)
- utils.system('./configure')
- utils.system('make')
-
-
- def execute(self, testdir = None, iterations = 1, filesize='100M', type='write', pattern='random'):
- cmd = os.path.join(self.srcdir, 'src/spew')
- if not testdir:
- testdir = self.tmpdir
- tmpfile = os.path.join(testdir, 'spew-test.%d' % os.getpid())
- results = os.path.join(self.resultsdir, 'stdout')
- args = '--%s -i %d -p %s -b 2k -B 2M %s %s' % \
- (type, iterations, pattern, filesize, tmpfile)
- cmd += ' ' + args
-
- # Do a profiling run if necessary
- profilers = self.job.profilers
- if profilers.present():
- profilers.start(self)
-
- open(self.resultsdir + '/command', 'w').write(cmd + '\n')
- self.job.stdout.redirect(results)
- try:
- utils.system(cmd)
- finally:
- self.job.stdout.restore()
-
- if profilers.present():
- profilers.stop(self)
- profilers.report(self)
+ version = 1
+
+ # ftp://ftp.berlios.de/pub/spew/1.0.5/spew-1.0.5.tgz
+ def setup(self, tarball = 'spew-1.0.5.tgz'):
+ self.tarball = utils.unmap_url(self.bindir, tarball,
+ self.tmpdir)
+ autotest_utils.extract_tarball_to_dir(self.tarball, self.srcdir)
+
+ os.chdir(self.srcdir)
+ utils.system('./configure')
+ utils.system('make')
+
+
+ def execute(self, testdir = None, iterations = 1, filesize='100M', type='write', pattern='random'):
+ cmd = os.path.join(self.srcdir, 'src/spew')
+ if not testdir:
+ testdir = self.tmpdir
+ tmpfile = os.path.join(testdir, 'spew-test.%d' % os.getpid())
+ results = os.path.join(self.resultsdir, 'stdout')
+ args = '--%s -i %d -p %s -b 2k -B 2M %s %s' % \
+ (type, iterations, pattern, filesize, tmpfile)
+ cmd += ' ' + args
+
+ # Do a profiling run if necessary
+ profilers = self.job.profilers
+ if profilers.present():
+ profilers.start(self)
+
+ open(self.resultsdir + '/command', 'w').write(cmd + '\n')
+ self.job.stdout.redirect(results)
+ try:
+ utils.system(cmd)
+ finally:
+ self.job.stdout.restore()
+
+ if profilers.present():
+ profilers.stop(self)
+ profilers.report(self)
diff --git a/client/tests/stress/stress.py b/client/tests/stress/stress.py
index 5714045b..d6688662 100644
--- a/client/tests/stress/stress.py
+++ b/client/tests/stress/stress.py
@@ -4,56 +4,56 @@ from autotest_lib.client.common_lib import utils
class stress(test.test):
- version = 1
-
- # http://weather.ou.edu/~apw/projects/stress/stress-0.18.8.tar.gz
- def setup(self, tarball = 'stress-0.18.8.tar.gz'):
- tarball = utils.unmap_url(self.bindir, tarball,
- self.tmpdir)
- autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
- os.chdir(self.srcdir)
-
- utils.system('./configure')
- utils.system('make')
-
-
- def execute(self, iterations = 1, args = ''):
- if not args:
- threads = 2*autotest_utils.count_cpus()
- args = '-c %d -i %d -m %d -d %d -t 60 -v' % \
- (threads, threads, threads, threads)
-
- profilers = self.job.profilers
- if not profilers.only():
- for i in range(iterations):
- utils.system(self.srcdir + '/src/stress ' + args)
-
- # Do a profiling run if necessary
- if profilers.present():
- profilers.start(self)
- utils.system(self.srcdir + '/src/stress ' + args)
- profilers.stop(self)
- profilers.report(self)
-
-# -v Turn up verbosity.
-# -q Turn down verbosity.
-# -n Show what would have been done (dry-run)
-# -t secs Time out after secs seconds.
-# --backoff usecs Wait for factor of usecs microseconds before starting
-# -c forks Spawn forks processes each spinning on sqrt().
-# -i forks Spawn forks processes each spinning on sync().
-# -m forks Spawn forks processes each spinning on malloc().
-# --vm-bytes bytes Allocate bytes number of bytes. The default is 1.
-# --vm-hang Instruct each vm hog process to go to sleep after
-# allocating memory. This contrasts with their normal
-# behavior, which is to free the memory and reallocate
-# ad infinitum. This is useful for simulating low memory
-# conditions on a machine. For example, the following
-# command allocates 256M of RAM and holds it until killed.
+ version = 1
+
+ # http://weather.ou.edu/~apw/projects/stress/stress-0.18.8.tar.gz
+ def setup(self, tarball = 'stress-0.18.8.tar.gz'):
+ tarball = utils.unmap_url(self.bindir, tarball,
+ self.tmpdir)
+ autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
+ os.chdir(self.srcdir)
+
+ utils.system('./configure')
+ utils.system('make')
+
+
+ def execute(self, iterations = 1, args = ''):
+ if not args:
+ threads = 2*autotest_utils.count_cpus()
+ args = '-c %d -i %d -m %d -d %d -t 60 -v' % \
+ (threads, threads, threads, threads)
+
+ profilers = self.job.profilers
+ if not profilers.only():
+ for i in range(iterations):
+ utils.system(self.srcdir + '/src/stress ' + args)
+
+ # Do a profiling run if necessary
+ if profilers.present():
+ profilers.start(self)
+ utils.system(self.srcdir + '/src/stress ' + args)
+ profilers.stop(self)
+ profilers.report(self)
+
+# -v Turn up verbosity.
+# -q Turn down verbosity.
+# -n Show what would have been done (dry-run)
+# -t secs Time out after secs seconds.
+# --backoff usecs Wait for factor of usecs microseconds before starting
+# -c forks Spawn forks processes each spinning on sqrt().
+# -i forks Spawn forks processes each spinning on sync().
+# -m forks Spawn forks processes each spinning on malloc().
+# --vm-bytes bytes Allocate bytes number of bytes. The default is 1.
+# --vm-hang Instruct each vm hog process to go to sleep after
+# allocating memory. This contrasts with their normal
+# behavior, which is to free the memory and reallocate
+# ad infinitum. This is useful for simulating low memory
+# conditions on a machine. For example, the following
+# command allocates 256M of RAM and holds it until killed.
#
-# % stress --vm 2 --vm-bytes 128M --vm-hang
-# -d forks Spawn forks processes each spinning on write().
-# --hdd-bytes bytes Write bytes number of bytes. The default is 1GB.
-# --hdd-noclean Do not unlink file(s) to which random data is written.
+# % stress --vm 2 --vm-bytes 128M --vm-hang
+# -d forks Spawn forks processes each spinning on write().
+# --hdd-bytes bytes Write bytes number of bytes. The default is 1GB.
+# --hdd-noclean Do not unlink file(s) to which random data is written.
#
-# Note: Suffixes may be s,m,h,d,y (time) or k,m,g (size).
+# Note: Suffixes may be s,m,h,d,y (time) or k,m,g (size).
diff --git a/client/tests/sysbench/sysbench.py b/client/tests/sysbench/sysbench.py
index 58b7d25f..571c37f6 100644
--- a/client/tests/sysbench/sysbench.py
+++ b/client/tests/sysbench/sysbench.py
@@ -4,188 +4,188 @@ from autotest_lib.client.common_lib import utils
class sysbench(test.test):
- version = 1
-
- # http://osdn.dl.sourceforge.net/sourceforge/sysbench/sysbench-0.4.8.tar.gz
- def setup(self, tarball = 'sysbench-0.4.8.tar.bz2'):
- tarball = utils.unmap_url(self.bindir, tarball,
- self.tmpdir)
- autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
- self.job.setup_dep(['pgsql', 'mysql'])
-
- os.chdir(self.srcdir)
-
- pgsql_dir = os.path.join(self.autodir, 'deps/pgsql/pgsql')
- mysql_dir = os.path.join(self.autodir, 'deps/mysql/mysql')
-
- # configure wants to get at pg_config, so add its path
- utils.system('PATH=%s/bin:$PATH ./configure --with-mysql=%s --with-pgsql' % (pgsql_dir, mysql_dir))
- utils.system('make -j %d' % autotest_utils.count_cpus())
-
-
- def execute(self, db_type = 'pgsql', build = 1, \
- num_threads = autotest_utils.count_cpus(), max_time = 60, \
- read_only = 0, args = ''):
- plib = os.path.join(self.autodir, 'deps/pgsql/pgsql/lib')
- mlib = os.path.join(self.autodir, 'deps/mysql/mysql/lib/mysql')
- ld_path = prepend_path(plib, environ('LD_LIBRARY_PATH'))
- ld_path = prepend_path(mlib, ld_path)
- os.environ['LD_LIBRARY_PATH'] = ld_path
-
- # The databases don't want to run as root so run them as nobody
- self.dbuser = 'nobody'
- self.dbuid = pwd.getpwnam(self.dbuser)[2]
- self.sudo = 'sudo -u ' + self.dbuser + ' '
-
- # Check for nobody user
- try:
- utils.system(self.sudo + '/bin/true')
- except:
- raise TestError('Unable to run as nobody')
-
- if (db_type == 'pgsql'):
- self.execute_pgsql(build, num_threads, max_time, \
- read_only, args)
- elif (db_type == 'mysql'):
- self.execute_mysql(build, num_threads, max_time, \
- read_only, args)
-
-
- def execute_pgsql(self, build, num_threads, max_time, read_only, args):
- bin = os.path.join(self.autodir, 'deps/pgsql/pgsql/bin')
- data = os.path.join(self.autodir, 'deps/pgsql/pgsql/data')
- log = os.path.join(self.debugdir, 'pgsql.log')
-
- if build == 1:
- utils.system('rm -rf ' + data)
- os.mkdir(data)
- os.chown(data, self.dbuid, 0)
- utils.system(self.sudo + bin + '/initdb -D ' + data)
-
- # Database must be able to write its output into debugdir
- os.chown(self.debugdir, self.dbuid, 0)
- utils.system(self.sudo + bin + '/pg_ctl -D ' + data + \
- ' -l ' + log + ' start')
-
- # Wait for database to start
- time.sleep(5)
-
- try:
- base_cmd = self.srcdir + '/sysbench/sysbench ' + \
- '--test=oltp --db-driver=pgsql ' + \
- '--pgsql-user=' + self.dbuser
-
- if build == 1:
- utils.system(self.sudo + bin + '/createdb sbtest')
- cmd = base_cmd +' prepare'
- utils.system(cmd)
-
- cmd = base_cmd + \
- ' --num-threads=' + str(num_threads) + \
- ' --max-time=' + str(max_time) + \
- ' --max-requests=0'
-
- if read_only:
- cmd = cmd + ' --oltp-read-only=on'
-
- results = []
-
- profilers = self.job.profilers
- if not profilers.only():
- results.append(utils.system_output(cmd + ' run',
- retain_output=True))
-
- # Do a profiling run if necessary
- if profilers.present():
- profilers.start(self)
- results.append("Profiling run ...")
- results.append(utils.system_output(cmd + ' run',
- retain_output=True))
- profilers.stop(self)
- profilers.report(self)
- except:
- utils.system(self.sudo + bin + '/pg_ctl -D ' + data + ' stop')
- raise
-
- utils.system(self.sudo + bin + '/pg_ctl -D ' + data + ' stop')
-
- self.__format_results("\n".join(results))
-
-
- def execute_mysql(self, build, num_threads, max_time, read_only, args):
- bin = os.path.join(self.autodir, 'deps/mysql/mysql/bin')
- data = os.path.join(self.autodir, 'deps/mysql/mysql/var')
- log = os.path.join(self.debugdir, 'mysql.log')
-
- if build == 1:
- utils.system('rm -rf ' + data)
- os.mkdir(data)
- os.chown(data, self.dbuid, 0)
- utils.system(bin + '/mysql_install_db --user=' + self.dbuser)
-
- utils.system(bin + '/mysqld_safe --log-error=' + log + \
- ' --user=' + self.dbuser + ' &')
-
- # Wait for database to start
- time.sleep(5)
-
- try:
- base_cmd = self.srcdir + '/sysbench/sysbench ' + \
- '--test=oltp --db-driver=mysql ' + \
- '--mysql-user=root'
-
- if build == 1:
- utils.system('echo "create database sbtest" | ' + \
- bin + '/mysql -u root')
- cmd = base_cmd +' prepare'
- utils.system(cmd)
-
- cmd = base_cmd + \
- ' --num-threads=' + str(num_threads) + \
- ' --max-time=' + str(max_time) + \
- ' --max-requests=0'
-
- if read_only:
- cmd = cmd + ' --oltp-read-only=on'
-
- results = []
-
- profilers = self.job.profilers
- if not profilers.only():
- results.append(utils.system_output(cmd + ' run',
- retain_output=True))
-
- # Do a profiling run if necessary
- if profilers.present():
- profilers.start(self)
- results.append("Profiling run ...")
- results.append(utils.system_output(cmd + ' run',
- retain_output=True))
- profilers.stop(self)
- profilers.report(self)
- except:
- utils.system(bin + '/mysqladmin shutdown')
- raise
-
- utils.system(bin + '/mysqladmin shutdown')
-
- self.__format_results("\n".join(results))
-
-
- def __format_results(self, results):
- threads = 0
- tps = 0
-
- out = open(self.resultsdir + '/keyval', 'w')
- for line in results.split('\n'):
- threads_re = re.search('Number of threads: (\d+)', line)
- if threads_re:
- threads = threads_re.group(1)
-
- tps_re = re.search('transactions:\s+\d+\s+\((\S+) per sec.\)', line)
- if tps_re:
- tps = tps_re.group(1)
- break
-
- print >> out, 'threads=%s\ntps=%s' % (threads, tps)
- out.close()
+ version = 1
+
+ # http://osdn.dl.sourceforge.net/sourceforge/sysbench/sysbench-0.4.8.tar.gz
+ def setup(self, tarball = 'sysbench-0.4.8.tar.bz2'):
+ tarball = utils.unmap_url(self.bindir, tarball,
+ self.tmpdir)
+ autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
+ self.job.setup_dep(['pgsql', 'mysql'])
+
+ os.chdir(self.srcdir)
+
+ pgsql_dir = os.path.join(self.autodir, 'deps/pgsql/pgsql')
+ mysql_dir = os.path.join(self.autodir, 'deps/mysql/mysql')
+
+ # configure wants to get at pg_config, so add its path
+ utils.system('PATH=%s/bin:$PATH ./configure --with-mysql=%s --with-pgsql' % (pgsql_dir, mysql_dir))
+ utils.system('make -j %d' % autotest_utils.count_cpus())
+
+
+ def execute(self, db_type = 'pgsql', build = 1, \
+ num_threads = autotest_utils.count_cpus(), max_time = 60, \
+ read_only = 0, args = ''):
+ plib = os.path.join(self.autodir, 'deps/pgsql/pgsql/lib')
+ mlib = os.path.join(self.autodir, 'deps/mysql/mysql/lib/mysql')
+ ld_path = prepend_path(plib, environ('LD_LIBRARY_PATH'))
+ ld_path = prepend_path(mlib, ld_path)
+ os.environ['LD_LIBRARY_PATH'] = ld_path
+
+ # The databases don't want to run as root so run them as nobody
+ self.dbuser = 'nobody'
+ self.dbuid = pwd.getpwnam(self.dbuser)[2]
+ self.sudo = 'sudo -u ' + self.dbuser + ' '
+
+ # Check for nobody user
+ try:
+ utils.system(self.sudo + '/bin/true')
+ except:
+ raise TestError('Unable to run as nobody')
+
+ if (db_type == 'pgsql'):
+ self.execute_pgsql(build, num_threads, max_time, \
+ read_only, args)
+ elif (db_type == 'mysql'):
+ self.execute_mysql(build, num_threads, max_time, \
+ read_only, args)
+
+
+ def execute_pgsql(self, build, num_threads, max_time, read_only, args):
+ bin = os.path.join(self.autodir, 'deps/pgsql/pgsql/bin')
+ data = os.path.join(self.autodir, 'deps/pgsql/pgsql/data')
+ log = os.path.join(self.debugdir, 'pgsql.log')
+
+ if build == 1:
+ utils.system('rm -rf ' + data)
+ os.mkdir(data)
+ os.chown(data, self.dbuid, 0)
+ utils.system(self.sudo + bin + '/initdb -D ' + data)
+
+ # Database must be able to write its output into debugdir
+ os.chown(self.debugdir, self.dbuid, 0)
+ utils.system(self.sudo + bin + '/pg_ctl -D ' + data + \
+ ' -l ' + log + ' start')
+
+ # Wait for database to start
+ time.sleep(5)
+
+ try:
+ base_cmd = self.srcdir + '/sysbench/sysbench ' + \
+ '--test=oltp --db-driver=pgsql ' + \
+ '--pgsql-user=' + self.dbuser
+
+ if build == 1:
+ utils.system(self.sudo + bin + '/createdb sbtest')
+ cmd = base_cmd +' prepare'
+ utils.system(cmd)
+
+ cmd = base_cmd + \
+ ' --num-threads=' + str(num_threads) + \
+ ' --max-time=' + str(max_time) + \
+ ' --max-requests=0'
+
+ if read_only:
+ cmd = cmd + ' --oltp-read-only=on'
+
+ results = []
+
+ profilers = self.job.profilers
+ if not profilers.only():
+ results.append(utils.system_output(cmd + ' run',
+ retain_output=True))
+
+ # Do a profiling run if necessary
+ if profilers.present():
+ profilers.start(self)
+ results.append("Profiling run ...")
+ results.append(utils.system_output(cmd + ' run',
+ retain_output=True))
+ profilers.stop(self)
+ profilers.report(self)
+ except:
+ utils.system(self.sudo + bin + '/pg_ctl -D ' + data + ' stop')
+ raise
+
+ utils.system(self.sudo + bin + '/pg_ctl -D ' + data + ' stop')
+
+ self.__format_results("\n".join(results))
+
+
+ def execute_mysql(self, build, num_threads, max_time, read_only, args):
+ bin = os.path.join(self.autodir, 'deps/mysql/mysql/bin')
+ data = os.path.join(self.autodir, 'deps/mysql/mysql/var')
+ log = os.path.join(self.debugdir, 'mysql.log')
+
+ if build == 1:
+ utils.system('rm -rf ' + data)
+ os.mkdir(data)
+ os.chown(data, self.dbuid, 0)
+ utils.system(bin + '/mysql_install_db --user=' + self.dbuser)
+
+ utils.system(bin + '/mysqld_safe --log-error=' + log + \
+ ' --user=' + self.dbuser + ' &')
+
+ # Wait for database to start
+ time.sleep(5)
+
+ try:
+ base_cmd = self.srcdir + '/sysbench/sysbench ' + \
+ '--test=oltp --db-driver=mysql ' + \
+ '--mysql-user=root'
+
+ if build == 1:
+ utils.system('echo "create database sbtest" | ' + \
+ bin + '/mysql -u root')
+ cmd = base_cmd +' prepare'
+ utils.system(cmd)
+
+ cmd = base_cmd + \
+ ' --num-threads=' + str(num_threads) + \
+ ' --max-time=' + str(max_time) + \
+ ' --max-requests=0'
+
+ if read_only:
+ cmd = cmd + ' --oltp-read-only=on'
+
+ results = []
+
+ profilers = self.job.profilers
+ if not profilers.only():
+ results.append(utils.system_output(cmd + ' run',
+ retain_output=True))
+
+ # Do a profiling run if necessary
+ if profilers.present():
+ profilers.start(self)
+ results.append("Profiling run ...")
+ results.append(utils.system_output(cmd + ' run',
+ retain_output=True))
+ profilers.stop(self)
+ profilers.report(self)
+ except:
+ utils.system(bin + '/mysqladmin shutdown')
+ raise
+
+ utils.system(bin + '/mysqladmin shutdown')
+
+ self.__format_results("\n".join(results))
+
+
+ def __format_results(self, results):
+ threads = 0
+ tps = 0
+
+ out = open(self.resultsdir + '/keyval', 'w')
+ for line in results.split('\n'):
+ threads_re = re.search('Number of threads: (\d+)', line)
+ if threads_re:
+ threads = threads_re.group(1)
+
+ tps_re = re.search('transactions:\s+\d+\s+\((\S+) per sec.\)', line)
+ if tps_re:
+ tps = tps_re.group(1)
+ break
+
+ print >> out, 'threads=%s\ntps=%s' % (threads, tps)
+ out.close()
diff --git a/client/tests/tbench/tbench.py b/client/tests/tbench/tbench.py
index 3bc06670..0aeb3cd4 100755
--- a/client/tests/tbench/tbench.py
+++ b/client/tests/tbench/tbench.py
@@ -4,58 +4,58 @@ from autotest_lib.client.common_lib import utils
class tbench(test.test):
- version = 2
-
- # http://samba.org/ftp/tridge/dbench/dbench-3.04.tar.gz
- def setup(self, tarball = 'dbench-3.04.tar.gz'):
- tarball = utils.unmap_url(self.bindir, tarball,
- self.tmpdir)
- autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
- os.chdir(self.srcdir)
-
- utils.system('./configure')
- utils.system('make')
-
- def execute(self, iterations = 1, nprocs = None, args = ''):
- # only supports combined server+client model at the moment
- # should support separate I suppose, but nobody uses it
- if not nprocs:
- nprocs = self.job.cpu_count()
- args += ' %s' % nprocs
- results = []
- profilers = self.job.profilers
- if not profilers.only():
- for i in range(iterations):
- results.append(self.run_tbench(args))
-
- # Do a profiling run if necessary
- if profilers.present():
- profilers.start(self)
- results.append(self.run_tbench(args))
- profilers.stop(self)
- profilers.report(self)
-
- self.__format_results("\n".join(results))
-
-
- def run_tbench(self, args):
- pid = os.fork()
- if pid: # parent
- time.sleep(1)
- client = self.srcdir + '/client.txt'
- args = '-c ' + client + ' ' + '%s' % args
- cmd = os.path.join(self.srcdir, "tbench") + " " + args
- results = utils.system_output(cmd, retain_output=True)
- os.kill(pid, signal.SIGTERM) # clean up the server
- else: # child
- server = self.srcdir + '/tbench_srv'
- os.execlp(server, server)
- return results
-
-
- def __format_results(self, results):
- out = open(self.resultsdir + '/keyval', 'w')
- pattern = re.compile(r"Throughput (.*?) MB/sec (.*?) procs")
- for result in pattern.findall(results):
- print >> out, "throughput=%s\nprocs=%s\n" % result
- out.close()
+ version = 2
+
+ # http://samba.org/ftp/tridge/dbench/dbench-3.04.tar.gz
+ def setup(self, tarball = 'dbench-3.04.tar.gz'):
+ tarball = utils.unmap_url(self.bindir, tarball,
+ self.tmpdir)
+ autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
+ os.chdir(self.srcdir)
+
+ utils.system('./configure')
+ utils.system('make')
+
+ def execute(self, iterations = 1, nprocs = None, args = ''):
+ # only supports combined server+client model at the moment
+ # should support separate I suppose, but nobody uses it
+ if not nprocs:
+ nprocs = self.job.cpu_count()
+ args += ' %s' % nprocs
+ results = []
+ profilers = self.job.profilers
+ if not profilers.only():
+ for i in range(iterations):
+ results.append(self.run_tbench(args))
+
+ # Do a profiling run if necessary
+ if profilers.present():
+ profilers.start(self)
+ results.append(self.run_tbench(args))
+ profilers.stop(self)
+ profilers.report(self)
+
+ self.__format_results("\n".join(results))
+
+
+ def run_tbench(self, args):
+ pid = os.fork()
+ if pid: # parent
+ time.sleep(1)
+ client = self.srcdir + '/client.txt'
+ args = '-c ' + client + ' ' + '%s' % args
+ cmd = os.path.join(self.srcdir, "tbench") + " " + args
+ results = utils.system_output(cmd, retain_output=True)
+ os.kill(pid, signal.SIGTERM) # clean up the server
+ else: # child
+ server = self.srcdir + '/tbench_srv'
+ os.execlp(server, server)
+ return results
+
+
+ def __format_results(self, results):
+ out = open(self.resultsdir + '/keyval', 'w')
+ pattern = re.compile(r"Throughput (.*?) MB/sec (.*?) procs")
+ for result in pattern.findall(results):
+ print >> out, "throughput=%s\nprocs=%s\n" % result
+ out.close()
diff --git a/client/tests/tiobench/tiobench.py b/client/tests/tiobench/tiobench.py
index 6c555675..8a4d5ba6 100644
--- a/client/tests/tiobench/tiobench.py
+++ b/client/tests/tiobench/tiobench.py
@@ -4,32 +4,31 @@ from autotest_lib.client.common_lib import utils
class tiobench(test.test):
- version = 1
+ version = 1
- # http://prdownloads.sourceforge.net/tiobench/tiobench-0.3.3.tar.gz
- def setup(self, tarball = 'tiobench-0.3.3.tar.bz2'):
- tarball = utils.unmap_url(self.bindir, tarball,
- self.tmpdir)
- autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
- os.chdir(self.srcdir)
+ # http://prdownloads.sourceforge.net/tiobench/tiobench-0.3.3.tar.gz
+ def setup(self, tarball = 'tiobench-0.3.3.tar.bz2'):
+ tarball = utils.unmap_url(self.bindir, tarball,
+ self.tmpdir)
+ autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
+ os.chdir(self.srcdir)
- utils.system('make')
-
- def execute(self, dir = None, iterations=1, args = None):
- if not dir:
- dir = self.tmpdir
- os.chdir(self.srcdir)
- if not args:
- args = '--block=4096 --block=8192 --threads=10 --size=1024 --numruns=2'
- profilers = self.job.profilers
- if not profilers.only():
- for i in range(iterations):
- utils.system('./tiobench.pl --dir %s %s' %(dir, args))
+ utils.system('make')
- # Do a profiling run if necessary
- if profilers.present():
- profilers.start(self)
- utils.system('./tiobench.pl --dir %s %s' %(dir, args))
- profilers.stop(self)
- profilers.report(self)
-
+ def execute(self, dir = None, iterations=1, args = None):
+ if not dir:
+ dir = self.tmpdir
+ os.chdir(self.srcdir)
+ if not args:
+ args = '--block=4096 --block=8192 --threads=10 --size=1024 --numruns=2'
+ profilers = self.job.profilers
+ if not profilers.only():
+ for i in range(iterations):
+ utils.system('./tiobench.pl --dir %s %s' %(dir, args))
+
+ # Do a profiling run if necessary
+ if profilers.present():
+ profilers.start(self)
+ utils.system('./tiobench.pl --dir %s %s' %(dir, args))
+ profilers.stop(self)
+ profilers.report(self)
diff --git a/client/tests/tsc/tsc.py b/client/tests/tsc/tsc.py
index 79b5c26f..54941f8b 100755
--- a/client/tests/tsc/tsc.py
+++ b/client/tests/tsc/tsc.py
@@ -3,16 +3,16 @@ from autotest_lib.client.bin import test, autotest_utils
from autotest_lib.client.common_lib import utils
class tsc(test.test):
- version = 1
+ version = 1
- def setup(self, tarball = 'checktsc.tar'):
- tarball = utils.unmap_url(self.bindir, tarball,
- self.tmpdir)
- autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
- os.chdir(self.srcdir)
- utils.system('make')
+ def setup(self, tarball = 'checktsc.tar'):
+ tarball = utils.unmap_url(self.bindir, tarball,
+ self.tmpdir)
+ autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
+ os.chdir(self.srcdir)
+ utils.system('make')
-
- def execute(self, iterations = 1, args = ''):
- for i in range(iterations):
- utils.system(self.srcdir + '/checktsc ' + args)
+
+ def execute(self, iterations = 1, args = ''):
+ for i in range(iterations):
+ utils.system(self.srcdir + '/checktsc ' + args)
diff --git a/client/tests/unixbench/unixbench.py b/client/tests/unixbench/unixbench.py
index 5685404c..001ce666 100755
--- a/client/tests/unixbench/unixbench.py
+++ b/client/tests/unixbench/unixbench.py
@@ -4,91 +4,91 @@ from autotest_lib.client.common_lib import utils, error
class unixbench(test.test):
- version = 2
-
- # http://www.tux.org/pub/tux/niemi/unixbench/unixbench-4.1.0.tgz
- def setup(self, tarball = 'unixbench-4.1.0.tar.bz2'):
- tarball = utils.unmap_url(self.bindir, tarball,
- self.tmpdir)
- autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
- os.chdir(self.srcdir)
-
- utils.system('patch -p1 < ../unixbench.patch')
- utils.system('make')
-
-
- def execute(self, iterations = 1, args = '', stepsecs=0):
- vars = ('TMPDIR=\"%s\" RESULTDIR=\"%s\"' %
- (self.tmpdir, self.resultsdir))
- profilers = self.job.profilers
- keyval = open(self.resultsdir + '/keyval', 'w')
- self.err = None
- if stepsecs:
- # change time per subtest from unixbench's defaults of
- # 10 secs for small tests, 30 secs for bigger tests
- vars += ' systime=%i looper=%i seconds=%i'\
- ' dhrytime=%i arithtime=%i' \
- % ((stepsecs,)*5)
- if not profilers.only():
- for i in range(iterations):
- os.chdir(self.srcdir)
- utils.system(vars + ' ./Run ' + args)
- report = open(self.resultsdir + '/report')
- self.format_results(report, keyval)
-
- # Do a profiling run if necessary
- if profilers.present():
- profilers.start(self)
- utils.system(vars + ' ./Run ' + args)
- profilers.stop(self)
- profilers.report(self)
-
- # check err string and possible throw
- if self.err != None:
- raise error.TestError(self.err)
-
-
- def check_for_error(self, words):
- l = len(words)
- if l >= 3 and words[-3:l] == ['no', 'measured', 'results']:
- # found a problem so record it in err string
- key = '_'.join(words[:-3])
- if self.err == None:
- self.err = key
- else:
- self.err = self.err + " " + key
- return True
- else:
- return False
-
-
- def format_results(self, report, keyval):
- for i in range(9):
- report.next()
- for line in report:
- if not line.strip():
- break
-
- words = line.split()
- # look for problems first
- if self.check_for_error(words):
- continue
-
- # we should make sure that there are at least
- # 6 guys before we start accessing the array
- if len(words) >= 6:
- key = '_'.join(words[:-6])
- value = words[-6]
- print >> keyval, '%s=%s' % (key, value)
- for line in report:
- if 'FINAL SCORE' in line:
- print >> keyval, 'score=%s\n' % line.split()[-1]
- break
+ version = 2
+
+ # http://www.tux.org/pub/tux/niemi/unixbench/unixbench-4.1.0.tgz
+ def setup(self, tarball = 'unixbench-4.1.0.tar.bz2'):
+ tarball = utils.unmap_url(self.bindir, tarball,
+ self.tmpdir)
+ autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
+ os.chdir(self.srcdir)
+
+ utils.system('patch -p1 < ../unixbench.patch')
+ utils.system('make')
+
+
+ def execute(self, iterations = 1, args = '', stepsecs=0):
+ vars = ('TMPDIR=\"%s\" RESULTDIR=\"%s\"' %
+ (self.tmpdir, self.resultsdir))
+ profilers = self.job.profilers
+ keyval = open(self.resultsdir + '/keyval', 'w')
+ self.err = None
+ if stepsecs:
+ # change time per subtest from unixbench's defaults of
+ # 10 secs for small tests, 30 secs for bigger tests
+ vars += ' systime=%i looper=%i seconds=%i'\
+ ' dhrytime=%i arithtime=%i' \
+ % ((stepsecs,)*5)
+ if not profilers.only():
+ for i in range(iterations):
+ os.chdir(self.srcdir)
+ utils.system(vars + ' ./Run ' + args)
+ report = open(self.resultsdir + '/report')
+ self.format_results(report, keyval)
+
+ # Do a profiling run if necessary
+ if profilers.present():
+ profilers.start(self)
+ utils.system(vars + ' ./Run ' + args)
+ profilers.stop(self)
+ profilers.report(self)
+
+ # check err string and possible throw
+ if self.err != None:
+ raise error.TestError(self.err)
+
+
+ def check_for_error(self, words):
+ l = len(words)
+ if l >= 3 and words[-3:l] == ['no', 'measured', 'results']:
+ # found a problem so record it in err string
+ key = '_'.join(words[:-3])
+ if self.err == None:
+ self.err = key
+ else:
+ self.err = self.err + " " + key
+ return True
+ else:
+ return False
+
+
+ def format_results(self, report, keyval):
+ for i in range(9):
+ report.next()
+ for line in report:
+ if not line.strip():
+ break
+
+ words = line.split()
+ # look for problems first
+ if self.check_for_error(words):
+ continue
+
+ # we should make sure that there are at least
+ # 6 guys before we start accessing the array
+ if len(words) >= 6:
+ key = '_'.join(words[:-6])
+ value = words[-6]
+ print >> keyval, '%s=%s' % (key, value)
+ for line in report:
+ if 'FINAL SCORE' in line:
+ print >> keyval, 'score=%s\n' % line.split()[-1]
+ break
if __name__ == '__main__':
- import sys
- unixbench.format_results(sys.stdin, sys.stdout)
+ import sys
+ unixbench.format_results(sys.stdin, sys.stdout)
""" Here is a sample report file:
diff --git a/client/tests/xmtest/xmtest.py b/client/tests/xmtest/xmtest.py
index 32c649a7..e9216332 100644
--- a/client/tests/xmtest/xmtest.py
+++ b/client/tests/xmtest/xmtest.py
@@ -1,33 +1,33 @@
# (C) Copyright IBM Corp. 2006
# Author: Paul Larson <pl@us.ibm.com>
# Description:
-# Autotest script for running Xen xm-test
-# This should be run from a Xen domain0
+# Autotest script for running Xen xm-test
+# This should be run from a Xen domain0
import os
from autotest_lib.client.bin import test, autotest_utils
from autotest_lib.client.common_lib import utils
class xmtest(test.test):
- version = 1
+ version = 1
- # This test expects just the xm-test directory, as a tarball
- # from the Xen source tree
- # hg clone http://xenbits.xensource.com/xen-unstable.hg
- # or wget http://www.cl.cam.ac.uk/Research/SRG/netos/xen/downloads/xen-unstable-src.tgz
- # cd tools
- # tar -czf xm-test.tgz xm-test
- def setup(self, tarball = 'xm-test.tar.bz2'):
- tarball = utils.unmap_url(self.bindir, tarball,
- self.tmpdir)
- autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
- os.chdir(self.srcdir)
+ # This test expects just the xm-test directory, as a tarball
+ # from the Xen source tree
+ # hg clone http://xenbits.xensource.com/xen-unstable.hg
+ # or wget http://www.cl.cam.ac.uk/Research/SRG/netos/xen/downloads/xen-unstable-src.tgz
+ # cd tools
+ # tar -czf xm-test.tgz xm-test
+ def setup(self, tarball = 'xm-test.tar.bz2'):
+ tarball = utils.unmap_url(self.bindir, tarball,
+ self.tmpdir)
+ autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
+ os.chdir(self.srcdir)
- utils.system('./autogen')
- utils.system('./configure')
- utils.system('make existing')
+ utils.system('./autogen')
+ utils.system('./configure')
+ utils.system('make existing')
- def execute(self, args = ''):
- os.chdir(self.srcdir)
- utils.system('./runtest.sh ' + args)
- utils.system('mv xmtest.* ' + self.resultsdir)
+ def execute(self, args = ''):
+ os.chdir(self.srcdir)
+ utils.system('./runtest.sh ' + args)
+ utils.system('mv xmtest.* ' + self.resultsdir)
diff --git a/client/tools/autotest b/client/tools/autotest
index 619db5a7..2b47b37c 100755
--- a/client/tools/autotest
+++ b/client/tools/autotest
@@ -3,21 +3,21 @@ import sys,os
autodir = None
try:
- autodir = os.path.dirname(os.path.realpath('/etc/autotest.conf'))
+ autodir = os.path.dirname(os.path.realpath('/etc/autotest.conf'))
except:
- pass
+ pass
if not autodir:
- for path in ['/usr/local/autotest', '/home/autotest']:
- if os.path.exists(os.path.join(path, '/bin/autotest')):
- autodir = path
+ for path in ['/usr/local/autotest', '/home/autotest']:
+ if os.path.exists(os.path.join(path, '/bin/autotest')):
+ autodir = path
autotest = os.path.join(autodir, 'bin/autotest')
control = os.path.join(autodir, 'control')
state = os.path.join(autodir, 'control.state')
if len(sys.argv) == 1 or sys.argv[1] == 'start':
- if os.path.exists(state):
- print "Restarting partially completed autotest job"
- os.system(autotest + ' --continue ' + control)
- else:
- print "No autotest jobs outstanding"
+ if os.path.exists(state):
+ print "Restarting partially completed autotest job"
+ os.system(autotest + ' --continue ' + control)
+ else:
+ print "No autotest jobs outstanding"
diff --git a/client/tools/avgtime b/client/tools/avgtime
index d78a750f..b5a1ed77 100755
--- a/client/tools/avgtime
+++ b/client/tools/avgtime
@@ -2,35 +2,34 @@
import sys, os, re
def avg_deviation(values):
- sum = 0
- count = 0
+ sum = 0
+ count = 0
- if not values:
- return (0, 0)
- for x in values:
- sum += x
- count += 1
- average = sum / count
- sum_sq_dev = 0
- for x in values:
- sum_sq_dev += (x - average) ** 2
- std_dev = (sum_sq_dev / count)**0.5
- return (average, 100 * std_dev / average)
+ if not values:
+ return (0, 0)
+ for x in values:
+ sum += x
+ count += 1
+ average = sum / count
+ sum_sq_dev = 0
+ for x in values:
+ sum_sq_dev += (x - average) ** 2
+ std_dev = (sum_sq_dev / count)**0.5
+ return (average, 100 * std_dev / average)
-list = []
+list = []
for line in sys.stdin.readlines():
- (user, system, elapsed, cpu) = line.split()[0:4]
- user = float(re.match(r'([\d\.]+)', user).group(0))
- system = float(re.match(r'([\d\.]+)', system).group(0))
- m = re.match(r'(\d+):([\d\.]+)', elapsed)
- elapsed = 60*int(m.group(1)) + float(m.group(2))
- cpu = int(re.match(r'(\d+)', cpu).group(0))
+ (user, system, elapsed, cpu) = line.split()[0:4]
+ user = float(re.match(r'([\d\.]+)', user).group(0))
+ system = float(re.match(r'([\d\.]+)', system).group(0))
+ m = re.match(r'(\d+):([\d\.]+)', elapsed)
+ elapsed = 60*int(m.group(1)) + float(m.group(2))
+ cpu = int(re.match(r'(\d+)', cpu).group(0))
- list.append((user, system, elapsed, cpu))
+ list.append((user, system, elapsed, cpu))
print " user: %0.2fs (%0.2f%%)" % avg_deviation([x[0] for x in list])
print " system: %0.2fs (%0.2f%%)" % avg_deviation([x[1] for x in list])
print "elapsed: %0.2fs (%0.2f%%)" % avg_deviation([x[2] for x in list])
print " cpu: %d%% (%0.2f%%)" % avg_deviation([x[3] for x in list])
-
diff --git a/client/tools/diffprofile b/client/tools/diffprofile
index 6bf2b6b4..da57eb09 100755
--- a/client/tools/diffprofile
+++ b/client/tools/diffprofile
@@ -7,20 +7,20 @@ import os, sys, re
results_per_sign = 10
def parse_lines(filename):
- results = []
- start_key = 1
- for line in open(filename).readlines():
- try:
- a = line.split()
- key = ' '.join(a[start_key:])
- count = int(a[0])
- results.append((key, count))
- except: # presumably a header line
- if re.match(r'samples\s*%\s*app name\s*symbol name', line):
- start_key = 2
- elif re.match(r'samples\s*%\s*image name\s*app name\s*symbol name', line):
- start_key = 3
- return results
+ results = []
+ start_key = 1
+ for line in open(filename).readlines():
+ try:
+ a = line.split()
+ key = ' '.join(a[start_key:])
+ count = int(a[0])
+ results.append((key, count))
+ except: # presumably a header line
+ if re.match(r'samples\s*%\s*app name\s*symbol name', line):
+ start_key = 2
+ elif re.match(r'samples\s*%\s*image name\s*app name\s*symbol name', line):
+ start_key = 3
+ return results
# Firstly, suck in both files.
@@ -29,43 +29,43 @@ new = {}
diff = {}
for (key, count) in parse_lines(sys.argv[1]):
- # Oprofile seems to be ... erm ... broken. Keys can appear > once ;-(
- if orig.has_key(key):
- orig[key] += count
- else:
- orig[key] = count
- if diff.has_key(key):
- diff[key] -= count
- else:
- diff[key] = -count
+ # Oprofile seems to be ... erm ... broken. Keys can appear > once ;-(
+ if orig.has_key(key):
+ orig[key] += count
+ else:
+ orig[key] = count
+ if diff.has_key(key):
+ diff[key] -= count
+ else:
+ diff[key] = -count
for (key, count) in parse_lines(sys.argv[2]):
- if new.has_key(key):
- new[key] += count
- else:
- new[key] = count
- if diff.has_key(key):
- diff[key] += count
- else:
- diff[key] = count
+ if new.has_key(key):
+ new[key] += count
+ else:
+ new[key] = count
+ if diff.has_key(key):
+ diff[key] += count
+ else:
+ diff[key] = count
if len(orig) < 2* results_per_sign or len(new) < 2 * results_per_sign:
- sys.exit(1) # one of the files was blank?
+ sys.exit(1) # one of the files was blank?
# Now sort and print the diffs.
def print_key(key):
- if orig.has_key(key) and orig[key] > 0:
- pct = (100 * diff[key]) / orig[key]
- else:
- pct = 0
- print "%10d %6.1f%% %s" % (diff[key], pct, key)
+ if orig.has_key(key) and orig[key] > 0:
+ pct = (100 * diff[key]) / orig[key]
+ else:
+ pct = 0
+ print "%10d %6.1f%% %s" % (diff[key], pct, key)
keys = sorted(diff.keys(), key=lambda x : diff[x], reverse = True)
for key in keys[:results_per_sign]:
- print_key(key)
+ print_key(key)
print "\n...\n"
for key in keys[len(keys)-results_per_sign:]:
- print_key(key)
+ print_key(key)
diff --git a/client/tools/make_clean b/client/tools/make_clean
index 5f47476a..f6853d48 100755
--- a/client/tools/make_clean
+++ b/client/tools/make_clean
@@ -2,25 +2,24 @@
import os
def purge_src(top_dir):
- for dir in os.listdir(top_dir):
- if dir.startswith('.'):
- continue
- py = os.path.join (top_dir, dir, dir + '.py')
- if not os.path.exists(py):
- continue
- ret = os.system('grep -q "preserve_srcdir = " ' + py)
- src_path = os.path.abspath(os.path.join('tests', dir, 'src'))
- if not os.path.exists(src_path):
- continue
- if ret: # This should have a replaceable src dir
- cmd = 'rm -rf ' + src_path
- else:
- cmd = 'cd %s; make clean > /dev/null 2>&1 ' % src_path
+ for dir in os.listdir(top_dir):
+ if dir.startswith('.'):
+ continue
+ py = os.path.join (top_dir, dir, dir + '.py')
+ if not os.path.exists(py):
+ continue
+ ret = os.system('grep -q "preserve_srcdir = " ' + py)
+ src_path = os.path.abspath(os.path.join('tests', dir, 'src'))
+ if not os.path.exists(src_path):
+ continue
+ if ret: # This should have a replaceable src dir
+ cmd = 'rm -rf ' + src_path
+ else:
+ cmd = 'cd %s; make clean > /dev/null 2>&1 ' % src_path
- print cmd
- os.system(cmd)
+ print cmd
+ os.system(cmd)
for dir in ['tests', 'profilers', 'deps']:
- purge_src(dir)
-
+ purge_src(dir)
diff --git a/conmux/contrib/console_check.py b/conmux/contrib/console_check.py
index eb0f37d3..7e450e81 100644
--- a/conmux/contrib/console_check.py
+++ b/conmux/contrib/console_check.py
@@ -7,8 +7,8 @@ _author_ = 'Scott Zawalski (scottz@google.com)'
Checks if machines are not only connected to conmux but also
responding in an expected way
- Supports options to show all, good, bad, unknown and add them
- to autotest as well.
+ Supports options to show all, good, bad, unknown and add them
+ to autotest as well.
*In order for the power update option to work you have to have
access to the etc directory of the conmux server
@@ -19,285 +19,285 @@ from optparse import OptionParser
def main(argv):
- consoles = {}
- consoles['good'] = []
- consoles['bad'] = []
- consoles['unknown'] = []
- # 0, 1, 2 status
- STATUS = [ 'good', 'bad', 'unknown']
- parser = OptionParser()
- parser.add_option('--conmux-server', dest="conmux_server",
- default='localhost',
- help="Conmux server to connect to")
- parser.add_option('--conmux-dir', dest="conmux_dir",
- default='/usr/local/conmux',
- help="Conmux server to connect to")
- parser.add_option('--console-binary', dest="console_binary",
- default='/usr/local/conmux/bin/console',
- help="Conmux console binary location")
- parser.add_option('--autotest-cli-dir', dest="autotest_cli_dir",
- default='/usr/local/autotest/cli',
- help="Autotest CLI dir")
- parser.add_option('--add-hosts',
- action="store_true", dest="add_hosts",
- default=False,
- help="If host not on autotest server try to add it")
- parser.add_option('--power-label', dest="power_label",
- default='remote-power',
- help="Label to add to hosts that support hard reset")
- parser.add_option('--console-label', dest="console_label",
- default='console',
- help="Label to add to hosts that support console")
- parser.add_option('--update-console-label',
- action="store_true", dest="update_console_label",
- default=False,
- help="Update console label on autotest server")
- parser.add_option('--update-power-label',
- action="store_true", dest="update_power_label",
- default=False,
- help="Update power label on autotest server" +\
- "*Note this runs then exists no consoles are checked")
- parser.add_option('--verbose',
- action="store_true", dest="verbose",
- default=False,
- help="Verbose output")
- parser.add_option('--show-bad',
- action="store_true", dest="show_bad",
- default=False,
- help="Show consoles that are no longer functioning")
- parser.add_option('--show-good',
- action="store_true", dest="show_good",
- default=False,
- help="Show consoles that are functioning properly")
- parser.add_option('--show-unknown',
- action="store_true", dest="show_unknown",
- default=False,
- help="Show consoles that are in an unknown state")
- parser.add_option('--show-all',
- action="store_true", dest="show_all",
- default=False,
- help="Show status of all consoles")
- options, args = parser.parse_args()
- if len(argv) == 2 and options.verbose:
- parser.print_help()
- return 1
- elif len(argv) < 2:
- parser.print_help()
- return 1
-
- if options.update_power_label:
- remove_create_label(options.power_label,
- options.autotest_cli_dir)
- update_power_label(options.power_label, options.conmux_dir,
- options.autotest_cli_dir, options.add_hosts)
- return
- print options.console_binary
- if not os.path.exists(options.console_binary):
- print "Error %s does not exist, please specify another path" %\
- options.console_binary
- return 1
- hosts = get_console_hosts(options.console_binary, options.conmux_server)
- for host in hosts:
- rc = check_host(host, options.console_binary)
- if options.verbose is True:
- print "%s status: %s" % (host, STATUS[rc])
- consoles[STATUS[rc]].append(host)
-
- if options.show_all:
- for status in consoles:
- print "--- %s ---" % status
- for host in consoles[status]:
- print host
- if options.show_good:
- print "--- good ---"
- for host in consoles['good']:
- print host
- if options.show_bad:
- print "--- bad ---"
- for host in consoles['bad']:
- print host
- if options.show_unknown:
- print "--- unknown ---"
- for host in consoles['unknown']:
- print host
-
- if options.update_console_label:
- remove_create_label(options.console_label,
- options.autotest_cli_dir)
- update_console_label(options.console_label, consoles['good'],
- options.autotest_cli_dir, options.add_hosts)
-
+ consoles = {}
+ consoles['good'] = []
+ consoles['bad'] = []
+ consoles['unknown'] = []
+ # 0, 1, 2 status
+ STATUS = [ 'good', 'bad', 'unknown']
+ parser = OptionParser()
+ parser.add_option('--conmux-server', dest="conmux_server",
+ default='localhost',
+ help="Conmux server to connect to")
+ parser.add_option('--conmux-dir', dest="conmux_dir",
+ default='/usr/local/conmux',
+ help="Conmux server to connect to")
+ parser.add_option('--console-binary', dest="console_binary",
+ default='/usr/local/conmux/bin/console',
+ help="Conmux console binary location")
+ parser.add_option('--autotest-cli-dir', dest="autotest_cli_dir",
+ default='/usr/local/autotest/cli',
+ help="Autotest CLI dir")
+ parser.add_option('--add-hosts',
+ action="store_true", dest="add_hosts",
+ default=False,
+ help="If host not on autotest server try to add it")
+ parser.add_option('--power-label', dest="power_label",
+ default='remote-power',
+ help="Label to add to hosts that support hard reset")
+ parser.add_option('--console-label', dest="console_label",
+ default='console',
+ help="Label to add to hosts that support console")
+ parser.add_option('--update-console-label',
+ action="store_true", dest="update_console_label",
+ default=False,
+ help="Update console label on autotest server")
+ parser.add_option('--update-power-label',
+ action="store_true", dest="update_power_label",
+ default=False,
+ help="Update power label on autotest server" +\
+ "*Note this runs then exists no consoles are checked")
+ parser.add_option('--verbose',
+ action="store_true", dest="verbose",
+ default=False,
+ help="Verbose output")
+ parser.add_option('--show-bad',
+ action="store_true", dest="show_bad",
+ default=False,
+ help="Show consoles that are no longer functioning")
+ parser.add_option('--show-good',
+ action="store_true", dest="show_good",
+ default=False,
+ help="Show consoles that are functioning properly")
+ parser.add_option('--show-unknown',
+ action="store_true", dest="show_unknown",
+ default=False,
+ help="Show consoles that are in an unknown state")
+ parser.add_option('--show-all',
+ action="store_true", dest="show_all",
+ default=False,
+ help="Show status of all consoles")
+ options, args = parser.parse_args()
+ if len(argv) == 2 and options.verbose:
+ parser.print_help()
+ return 1
+ elif len(argv) < 2:
+ parser.print_help()
+ return 1
+
+ if options.update_power_label:
+ remove_create_label(options.power_label,
+ options.autotest_cli_dir)
+ update_power_label(options.power_label, options.conmux_dir,
+ options.autotest_cli_dir, options.add_hosts)
+ return
+ print options.console_binary
+ if not os.path.exists(options.console_binary):
+ print "Error %s does not exist, please specify another path" %\
+ options.console_binary
+ return 1
+ hosts = get_console_hosts(options.console_binary, options.conmux_server)
+ for host in hosts:
+ rc = check_host(host, options.console_binary)
+ if options.verbose is True:
+ print "%s status: %s" % (host, STATUS[rc])
+ consoles[STATUS[rc]].append(host)
+
+ if options.show_all:
+ for status in consoles:
+ print "--- %s ---" % status
+ for host in consoles[status]:
+ print host
+ if options.show_good:
+ print "--- good ---"
+ for host in consoles['good']:
+ print host
+ if options.show_bad:
+ print "--- bad ---"
+ for host in consoles['bad']:
+ print host
+ if options.show_unknown:
+ print "--- unknown ---"
+ for host in consoles['unknown']:
+ print host
+
+ if options.update_console_label:
+ remove_create_label(options.console_label,
+ options.autotest_cli_dir)
+ update_console_label(options.console_label, consoles['good'],
+ options.autotest_cli_dir, options.add_hosts)
+
def update_console_label(console_label, consoles, cli_dir, add_hosts=False):
- """Update CONSOLE_LABEL on your autotest server.
- This removes the label and recreates it, then populating the label
- with all the machines your conmux server knows about.
-
- *Note If the hosts do not exist they are created.
- Args:
- console_label:
- string, describes the autotest label to add to machines.
- consoles:
- list, all the consoles that have confirmed console support.
- """
- # TODO: Update to new CLI and change logic until then
- # this is the best way to ensure a machine is added i.e. one at a time
-
- for host in consoles:
- if not host_label_add(host, console_label, cli_dir):
- # Try to create host
- if add_hosts:
- if host_create(host, cli_dir):
- host_label_add(host, power_label,
- cli_dir)
- else:
- print "Unable to add host " + host
+ """Update CONSOLE_LABEL on your autotest server.
+ This removes the label and recreates it, then populating the label
+ with all the machines your conmux server knows about.
+
+ *Note If the hosts do not exist they are created.
+ Args:
+ console_label:
+ string, describes the autotest label to add to machines.
+ consoles:
+ list, all the consoles that have confirmed console support.
+ """
+ # TODO: Update to new CLI and change logic until then
+ # this is the best way to ensure a machine is added i.e. one at a time
+
+ for host in consoles:
+ if not host_label_add(host, console_label, cli_dir):
+ # Try to create host
+ if add_hosts:
+ if host_create(host, cli_dir):
+ host_label_add(host, power_label,
+ cli_dir)
+ else:
+ print "Unable to add host " + host
def update_power_label(power_label, conmux_dir, cli_dir, add_hosts=False):
- """Look in CONSOLE_DIR/etc and grab known power commands
- Then remove POWER_LABEL and add machines to that label
- """
- # remove label and add it
- for host in hard_reset_hosts(conmux_dir):
- rc = label_add_host(host, power_label, cli_dir)
- if not rc:
- # Try to create the host
- if add_hosts:
- if host_create(host, cli_dir):
- rc = label_add_host(host, power_label,
- cli_dir)
- else:
- print "Unable to add host " + host
-
-
-def hard_reset_hosts(conmux_dir):
- """Go through conmux dir and find hosts that have reset commands"""
- config_dir = os.path.join(conmux_dir, "etc")
- hosts = []
- for file in os.listdir(config_dir):
- if not file.endswith(".cf"):
- continue
- file_path = os.path.join(config_dir, file)
- try:
- try:
- f = open(file_path)
- for line in f:
- if "reset" in line:
- hosts.append(file.rstrip(".cf"))
- except IOError:
- pass
- finally:
- f.close()
- return hosts
+ """Look in CONSOLE_DIR/etc and grab known power commands
+ Then remove POWER_LABEL and add machines to that label
+ """
+ # remove label and add it
+ for host in hard_reset_hosts(conmux_dir):
+ rc = label_add_host(host, power_label, cli_dir)
+ if not rc:
+ # Try to create the host
+ if add_hosts:
+ if host_create(host, cli_dir):
+ rc = label_add_host(host, power_label,
+ cli_dir)
+ else:
+ print "Unable to add host " + host
+
+
+def hard_reset_hosts(conmux_dir):
+ """Go through conmux dir and find hosts that have reset commands"""
+ config_dir = os.path.join(conmux_dir, "etc")
+ hosts = []
+ for file in os.listdir(config_dir):
+ if not file.endswith(".cf"):
+ continue
+ file_path = os.path.join(config_dir, file)
+ try:
+ try:
+ f = open(file_path)
+ for line in f:
+ if "reset" in line:
+ hosts.append(file.rstrip(".cf"))
+ except IOError:
+ pass
+ finally:
+ f.close()
+ return hosts
def host_create(host, cli_dir):
- """Create a host
- Return:
- True, if successfuly false if failed
- """
- cmd = "%s/host-create %s" % (cli_dir, host)
- status, output = commands.getstatusoutput(cmd)
- return status == 0
+ """Create a host
+ Return:
+ True, if successfuly false if failed
+ """
+ cmd = "%s/host-create %s" % (cli_dir, host)
+ status, output = commands.getstatusoutput(cmd)
+ return status == 0
def label_add_host(host, label, cli_dir):
- """Add a host to a label"""
- host_cmd = "%s/label-add-hosts %s %s" % (cli_dir, label, host)
- (status, output) = commands.getstatusoutput(host_cmd)
- if status != 0:
- return False
+ """Add a host to a label"""
+ host_cmd = "%s/label-add-hosts %s %s" % (cli_dir, label, host)
+ (status, output) = commands.getstatusoutput(host_cmd)
+ if status != 0:
+ return False
- return True
+ return True
def remove_create_label(label, cli_dir):
- """Remove and recreate a given label"""
- cmd = "%s/label-rm %s" % (cli_dir, label)
- status, output = commands.getstatusoutput(cmd)
- if status != 0:
- raise Exception("Error deleting label: " + label)
+ """Remove and recreate a given label"""
+ cmd = "%s/label-rm %s" % (cli_dir, label)
+ status, output = commands.getstatusoutput(cmd)
+ if status != 0:
+ raise Exception("Error deleting label: " + label)
- cmd = "%s/label-create %s" % (cli_dir, label)
- status, output = commands.getstatusoutput(cmd)
- if status != 0:
- raise Exception("Error creating label: " + label + output)
+ cmd = "%s/label-create %s" % (cli_dir, label)
+ status, output = commands.getstatusoutput(cmd)
+ if status != 0:
+ raise Exception("Error creating label: " + label + output)
- return True
+ return True
def get_console_hosts(console_binary, conmux_server):
- """Use console to collect console hosts and return a list.
-
- Args:
- console_binary:
- string, location of the conmux console binary
- conmux_server:
- string, hostname of the conmux server
-
- Returns:
- A List of console conmux is currently running on.
- """
-
- hosts_list = []
- cmd = "%s --list %s" % (console_binary, conmux_server)
- for line in commands.getoutput(cmd).split('\n'):
- host = (line.split(' '))[0]
- hosts_list.append(host)
-
- return hosts_list
+ """Use console to collect console hosts and return a list.
+
+ Args:
+ console_binary:
+ string, location of the conmux console binary
+ conmux_server:
+ string, hostname of the conmux server
+
+ Returns:
+ A List of console conmux is currently running on.
+ """
+
+ hosts_list = []
+ cmd = "%s --list %s" % (console_binary, conmux_server)
+ for line in commands.getoutput(cmd).split('\n'):
+ host = (line.split(' '))[0]
+ hosts_list.append(host)
+
+ return hosts_list
def check_host(host, console_binary):
- """Check hosts for common errors and return the status.
-
- Args:
- host:
- string, the console host identifier
-
- console_binary:
- string, location of the conmux console binary
- Returns:
- int, 0: Machine state is good
- int, 1: Machine state is bad
- int, 2: Machine state is unknown
- """
- RESPONSES = [ host + ' login:',
- 'ENOENT entry not found',
- 'login:',
- 'Connection refused',
- '<<<NOT CONNECTED>>>',
- 'Authentication failure',
- 'Give root password for maintenance', ]
-
- cmd = '%s %s' % (console_binary, host)
- shell = pexpect.spawn(cmd)
-
- shell.send('\r\n')
- shell.send('\r\n')
- shell.send('\r\n')
- try:
- # May need to increase the timeout but good so far
- response = shell.expect(RESPONSES, 1)
- except pexpect.TIMEOUT:
- shell.sendline('~$')
- shell.expect('>')
- shell.sendline('quit')
- return 1
- except pexpect.EOF:
- # unknown error
- shell.sendline('~$')
- shell.expect('>')
- shell.sendline('quit')
- return 2
- # TODO: Change actions based on what server returned
- if response == 0:
- # OK response
- return 0
- else:
- return 1
+ """Check hosts for common errors and return the status.
+
+ Args:
+ host:
+ string, the console host identifier
+
+ console_binary:
+ string, location of the conmux console binary
+ Returns:
+ int, 0: Machine state is good
+ int, 1: Machine state is bad
+ int, 2: Machine state is unknown
+ """
+ RESPONSES = [ host + ' login:',
+ 'ENOENT entry not found',
+ 'login:',
+ 'Connection refused',
+ '<<<NOT CONNECTED>>>',
+ 'Authentication failure',
+ 'Give root password for maintenance', ]
+
+ cmd = '%s %s' % (console_binary, host)
+ shell = pexpect.spawn(cmd)
+
+ shell.send('\r\n')
+ shell.send('\r\n')
+ shell.send('\r\n')
+ try:
+ # May need to increase the timeout but good so far
+ response = shell.expect(RESPONSES, 1)
+ except pexpect.TIMEOUT:
+ shell.sendline('~$')
+ shell.expect('>')
+ shell.sendline('quit')
+ return 1
+ except pexpect.EOF:
+ # unknown error
+ shell.sendline('~$')
+ shell.expect('>')
+ shell.sendline('quit')
+ return 2
+ # TODO: Change actions based on what server returned
+ if response == 0:
+ # OK response
+ return 0
+ else:
+ return 1
if __name__ == '__main__':
- main(sys.argv)
+ main(sys.argv)
diff --git a/frontend/afe/control_file.py b/frontend/afe/control_file.py
index fd129265..9b8602b5 100644
--- a/frontend/afe/control_file.py
+++ b/frontend/afe/control_file.py
@@ -14,11 +14,11 @@ AUTOTEST_DIR = os.path.abspath(os.path.join(
CLIENT_KERNEL_TEMPLATE = """\
kernel = '%(kernel)s'
def step_init():
- job.next_step([step_test])
- testkernel = job.kernel('%(kernel)s')
- %(kernel_config_line)s
- testkernel.install()
- testkernel.boot(args='%(kernel_args)s')
+ job.next_step([step_test])
+ testkernel = job.kernel('%(kernel)s')
+ %(kernel_config_line)s
+ testkernel.install()
+ testkernel.boot(args='%(kernel_args)s')
def step_test():
"""
@@ -26,13 +26,13 @@ def step_test():
SERVER_KERNEL_TEMPLATE = """\
kernel = '%%(kernel)s'
kernel_install_control = \"""
-%s pass
+%s pass
\"""
at = autotest.Autotest()
def install_kernel(machine):
- host = hosts.SSHHost(machine)
- at.run(kernel_install_control, host=host)
+ host = hosts.SSHHost(machine)
+ at.run(kernel_install_control, host=host)
job.parallel_simple(install_kernel, machines)
""" % CLIENT_KERNEL_TEMPLATE
@@ -41,73 +41,73 @@ CLIENT_STEP_TEMPLATE = "\tjob.next_step('step%d')\n"
def kernel_config_line(kernel, platform):
- if (not kernel.endswith('.rpm') and platform and
- platform.kernel_config):
- return "testkernel.config('%s')" % platform.kernel_config
- return ''
+ if (not kernel.endswith('.rpm') and platform and
+ platform.kernel_config):
+ return "testkernel.config('%s')" % platform.kernel_config
+ return ''
def read_control_file(test):
- control_file = open(os.path.join(AUTOTEST_DIR, test.path))
- control_contents = control_file.read()
- control_file.close()
- return control_contents
+ control_file = open(os.path.join(AUTOTEST_DIR, test.path))
+ control_contents = control_file.read()
+ control_file.close()
+ return control_contents
def get_kernel_stanza(kernel, platform=None, kernel_args='', is_server=False):
- if is_server:
- template = SERVER_KERNEL_TEMPLATE
- else:
- template = CLIENT_KERNEL_TEMPLATE
+ if is_server:
+ template = SERVER_KERNEL_TEMPLATE
+ else:
+ template = CLIENT_KERNEL_TEMPLATE
- stanza = template % {
- 'kernel' : kernel,
- 'kernel_config_line' : kernel_config_line(kernel, platform),
- 'kernel_args' : kernel_args}
- return stanza
+ stanza = template % {
+ 'kernel' : kernel,
+ 'kernel_config_line' : kernel_config_line(kernel, platform),
+ 'kernel_args' : kernel_args}
+ return stanza
def add_boilerplate_to_nested_steps(lines):
- # Look for a line that begins with 'def step_init():' while
- # being flexible on spacing. If it's found, this will be
- # a nested set of steps, so add magic to make it work.
- # See client/bin/job.py's step_engine for more info.
- if re.search(r'^(.*\n)*def\s+step_init\s*\(\s*\)\s*:', lines):
- lines += '\nreturn locals() '
- lines += '# Boilerplate magic for nested sets of steps'
- return lines
+ # Look for a line that begins with 'def step_init():' while
+ # being flexible on spacing. If it's found, this will be
+ # a nested set of steps, so add magic to make it work.
+ # See client/bin/job.py's step_engine for more info.
+ if re.search(r'^(.*\n)*def\s+step_init\s*\(\s*\)\s*:', lines):
+ lines += '\nreturn locals() '
+ lines += '# Boilerplate magic for nested sets of steps'
+ return lines
def format_step(item, lines):
- lines = indent_text(lines, '\t')
- lines = 'def step%d():\n%s' % (item, lines)
- return lines
+ lines = indent_text(lines, '\t')
+ lines = 'def step%d():\n%s' % (item, lines)
+ return lines
+
-
def get_tests_stanza(tests, is_server, prepend=[], append=[]):
- raw_control_files = [read_control_file(test) for test in tests]
- if is_server:
- return '\n'.join(raw_control_files)
- raw_steps = prepend + [add_boilerplate_to_nested_steps(step)
- for step in raw_control_files] + append
- steps = [format_step(index, step)
- for index, step in enumerate(raw_steps)]
- header = ''.join(CLIENT_STEP_TEMPLATE % i for i in xrange(len(steps)))
- return header + '\n' + '\n\n'.join(steps)
+ raw_control_files = [read_control_file(test) for test in tests]
+ if is_server:
+ return '\n'.join(raw_control_files)
+ raw_steps = prepend + [add_boilerplate_to_nested_steps(step)
+ for step in raw_control_files] + append
+ steps = [format_step(index, step)
+ for index, step in enumerate(raw_steps)]
+ header = ''.join(CLIENT_STEP_TEMPLATE % i for i in xrange(len(steps)))
+ return header + '\n' + '\n\n'.join(steps)
def indent_text(text, indent):
- lines = [indent + line for line in text.splitlines()]
- return '\n'.join(lines)
+ lines = [indent + line for line in text.splitlines()]
+ return '\n'.join(lines)
def generate_control(tests, kernel=None, platform=None, is_server=False):
- control_file_text = ''
- if kernel:
- control_file_text = get_kernel_stanza(kernel, platform,
- is_server=is_server)
- elif not is_server:
- control_file_text = 'def step_init():\n'
-
- control_file_text += get_tests_stanza(tests, is_server)
- return control_file_text
+ control_file_text = ''
+ if kernel:
+ control_file_text = get_kernel_stanza(kernel, platform,
+ is_server=is_server)
+ elif not is_server:
+ control_file_text = 'def step_init():\n'
+
+ control_file_text += get_tests_stanza(tests, is_server)
+ return control_file_text
diff --git a/frontend/afe/enum.py b/frontend/afe/enum.py
index c26ae815..284e04b5 100644
--- a/frontend/afe/enum.py
+++ b/frontend/afe/enum.py
@@ -5,69 +5,69 @@ Generic enumeration support.
__author__ = 'showard@google.com (Steve Howard)'
class Enum(object):
- """\
- Utility class to implement Enum-like functionality.
+ """\
+ Utility class to implement Enum-like functionality.
- >>> e = Enum('String one', 'String two')
- >>> e.STRING_ONE
- 0
- >>> e.STRING_TWO
- 1
- >>> e.choices()
- [(0, 'String one'), (1, 'String two')]
- >>> e.get_value('String one')
- 0
- >>> e.get_string(0)
- 'String one'
+ >>> e = Enum('String one', 'String two')
+ >>> e.STRING_ONE
+ 0
+ >>> e.STRING_TWO
+ 1
+ >>> e.choices()
+ [(0, 'String one'), (1, 'String two')]
+ >>> e.get_value('String one')
+ 0
+ >>> e.get_string(0)
+ 'String one'
- >>> e = Enum('Hello', 'Goodbye', string_values=True)
- >>> e.HELLO, e.GOODBYE
- ('Hello', 'Goodbye')
+ >>> e = Enum('Hello', 'Goodbye', string_values=True)
+ >>> e.HELLO, e.GOODBYE
+ ('Hello', 'Goodbye')
- >>> e = Enum('One', 'Two', start_value=1)
- >>> e.ONE
- 1
- >>> e.TWO
- 2
- """
- def __init__(self, *names, **kwargs):
- self.string_values = kwargs.get('string_values')
- start_value = kwargs.get('start_value', 0)
- self.names = names
- self.values = []
- for i, name in enumerate(names):
- if self.string_values:
- value = name
- else:
- value = i + start_value
- self.values.append(value)
- setattr(self, self.get_attr_name(name), value)
+ >>> e = Enum('One', 'Two', start_value=1)
+ >>> e.ONE
+ 1
+ >>> e.TWO
+ 2
+ """
+ def __init__(self, *names, **kwargs):
+ self.string_values = kwargs.get('string_values')
+ start_value = kwargs.get('start_value', 0)
+ self.names = names
+ self.values = []
+ for i, name in enumerate(names):
+ if self.string_values:
+ value = name
+ else:
+ value = i + start_value
+ self.values.append(value)
+ setattr(self, self.get_attr_name(name), value)
- @staticmethod
- def get_attr_name(string):
- return string.upper().replace(' ', '_')
+ @staticmethod
+ def get_attr_name(string):
+ return string.upper().replace(' ', '_')
- def choices(self):
- 'Return choice list suitable for Django model choices.'
- return zip(self.values, self.names)
+ def choices(self):
+ 'Return choice list suitable for Django model choices.'
+ return zip(self.values, self.names)
- def get_value(self, name):
- """\
- Convert a string name to it's corresponding value. If a value
- is passed in, it is returned.
- """
- if isinstance(name, int) and not self.string_values:
- # name is already a value
- return name
- return getattr(self, self.get_attr_name(name))
+ def get_value(self, name):
+ """\
+ Convert a string name to it's corresponding value. If a value
+ is passed in, it is returned.
+ """
+ if isinstance(name, int) and not self.string_values:
+ # name is already a value
+ return name
+ return getattr(self, self.get_attr_name(name))
- def get_string(self, value):
- ' Given a value, get the string name for it.'
- if value not in self.values:
- raise ValueError('Value %s not in this enum' % value)
- index = self.values.index(value)
- return self.names[index]
+ def get_string(self, value):
+ ' Given a value, get the string name for it.'
+ if value not in self.values:
+ raise ValueError('Value %s not in this enum' % value)
+ index = self.values.index(value)
+ return self.names[index]
diff --git a/frontend/afe/feeds/feed.py b/frontend/afe/feeds/feed.py
index 801cfe05..d34a0b93 100644
--- a/frontend/afe/feeds/feed.py
+++ b/frontend/afe/feeds/feed.py
@@ -11,57 +11,57 @@ from django.http import HttpResponse, Http404
# name changed from feed to feed_view
def feed_view(request, url, feed_dict=None):
- if not feed_dict:
- raise Http404, "No feeds are registered."
+ if not feed_dict:
+ raise Http404, "No feeds are registered."
- try:
- slug, param = url.split('/', 1)
- except ValueError:
- slug, param = url, ''
+ try:
+ slug, param = url.split('/', 1)
+ except ValueError:
+ slug, param = url, ''
- try:
- f = feed_dict[slug]
- except KeyError:
- raise Http404, "Slug %r isn't registered." % slug
+ try:
+ f = feed_dict[slug]
+ except KeyError:
+ raise Http404, "Slug %r isn't registered." % slug
- try:
- # this line is changed from the Django library version to pass
- # in request instead of request.path
- feedgen = f(slug, request).get_feed(param)
- except feeds.FeedDoesNotExist:
- raise Http404, "Invalid feed parameters. Slug %r is valid, but other parameters, or lack thereof, are not." % slug
+ try:
+ # this line is changed from the Django library version to pass
+ # in request instead of request.path
+ feedgen = f(slug, request).get_feed(param)
+ except feeds.FeedDoesNotExist:
+ raise Http404, "Invalid feed parameters. Slug %r is valid, but other parameters, or lack thereof, are not." % slug
- response = HttpResponse(mimetype=feedgen.mime_type)
- feedgen.write(response, 'utf-8')
- return response
+ response = HttpResponse(mimetype=feedgen.mime_type)
+ feedgen.write(response, 'utf-8')
+ return response
# end copied code
class JobFeed(feeds.Feed):
- """\
- Common feed functionality.
- """
- link = "/results"
- title_template = "feeds/job_feed_title.html"
- description_template = "feeds/job_feed_description.html"
+ """\
+ Common feed functionality.
+ """
+ link = "/results"
+ title_template = "feeds/job_feed_title.html"
+ description_template = "feeds/job_feed_description.html"
- NUM_ITEMS = 20
+ NUM_ITEMS = 20
- def __init__(self, slug, request):
- super(JobFeed, self).__init__(slug, request.path)
- server_hostname = django.http.get_host(request)
- self.full_link = 'http://' + server_hostname + self.link
+ def __init__(self, slug, request):
+ super(JobFeed, self).__init__(slug, request.path)
+ server_hostname = django.http.get_host(request)
+ self.full_link = 'http://' + server_hostname + self.link
- def title(self, obj):
- return "Automated Test Framework %s Jobs" % obj.capitalize()
+ def title(self, obj):
+ return "Automated Test Framework %s Jobs" % obj.capitalize()
- def get_object(self, bits):
- # bits[0] should be a job status
- return bits[0]
+ def get_object(self, bits):
+ # bits[0] should be a job status
+ return bits[0]
- def items(self, obj):
- item_list = models.HostQueueEntry.objects.filter(
- status__iexact=obj).select_related()
- return item_list.order_by('-id')[:self.NUM_ITEMS]
+ def items(self, obj):
+ item_list = models.HostQueueEntry.objects.filter(
+ status__iexact=obj).select_related()
+ return item_list.order_by('-id')[:self.NUM_ITEMS]
- def item_link(self, obj):
- return '%s/%s-%s' % (self.full_link, obj.job.id, obj.job.owner)
+ def item_link(self, obj):
+ return '%s/%s-%s' % (self.full_link, obj.job.id, obj.job.owner)
diff --git a/frontend/afe/json_rpc/proxy.py b/frontend/afe/json_rpc/proxy.py
index d33fc963..865ab3c0 100644
--- a/frontend/afe/json_rpc/proxy.py
+++ b/frontend/afe/json_rpc/proxy.py
@@ -39,20 +39,20 @@ class ServiceProxy(object):
return ServiceProxy(self.__serviceURL, name, self.__headers)
def __call__(self, *args, **kwargs):
- postdata = json_encoder.encode({"method": self.__serviceName,
- 'params': args + (kwargs,),
- 'id':'jsonrpc'})
- request = urllib2.Request(self.__serviceURL, data=postdata,
- headers=self.__headers)
- respdata = urllib2.urlopen(request).read()
- try:
- resp = json_decoder.decode(respdata)
- except ValueError:
- raise JSONRPCException('Error decoding JSON reponse:\n' + respdata)
- if resp['error'] != None:
- error_message = (resp['error']['name'] + ': ' +
- resp['error']['message'] + '\n' +
- resp['error']['traceback'])
- raise JSONRPCException(error_message)
- else:
- return resp['result']
+ postdata = json_encoder.encode({"method": self.__serviceName,
+ 'params': args + (kwargs,),
+ 'id':'jsonrpc'})
+ request = urllib2.Request(self.__serviceURL, data=postdata,
+ headers=self.__headers)
+ respdata = urllib2.urlopen(request).read()
+ try:
+ resp = json_decoder.decode(respdata)
+ except ValueError:
+ raise JSONRPCException('Error decoding JSON reponse:\n' + respdata)
+ if resp['error'] != None:
+ error_message = (resp['error']['name'] + ': ' +
+ resp['error']['message'] + '\n' +
+ resp['error']['traceback'])
+ raise JSONRPCException(error_message)
+ else:
+ return resp['result']
diff --git a/frontend/afe/json_rpc/serviceHandler.py b/frontend/afe/json_rpc/serviceHandler.py
index afd0b040..630363bd 100644
--- a/frontend/afe/json_rpc/serviceHandler.py
+++ b/frontend/afe/json_rpc/serviceHandler.py
@@ -69,7 +69,7 @@ class ServiceHandler(object):
def __init__(self, service):
self.service=service
-
+
def handleRequest(self, json):
err=None
err_traceback = None
@@ -77,7 +77,7 @@ class ServiceHandler(object):
id_=''
#print 'Request:', json
-
+
try:
req = self.translateRequest(json)
except ServiceRequestNotTranslatable, e:
diff --git a/frontend/afe/management.py b/frontend/afe/management.py
index c8f83804..506a218a 100644
--- a/frontend/afe/management.py
+++ b/frontend/afe/management.py
@@ -8,32 +8,32 @@ from django.contrib import auth
BASIC_ADMIN = 'Basic admin'
def create_admin_group(app, created_models, verbosity, **kwargs):
- """\
- Create a basic admin group with permissions for managing basic autotest
- objects.
- """
- admin_group, created = auth.models.Group.objects.get_or_create(
- name=BASIC_ADMIN)
- admin_group.save() # must save before adding permissions
- PermissionModel = auth.models.Permission
- have_permissions = list(admin_group.permissions.all())
- for model_name in ('host', 'label', 'test', 'acl_group'):
- for permission_type in ('add', 'change', 'delete'):
- codename = permission_type + '_' + model_name
- permissions = list(PermissionModel.objects.filter(
- codename=codename))
- if len(permissions) == 0:
- print ' No permission ' + codename
- continue
- for permission in permissions:
- if permission not in have_permissions:
- print ' Adding permission ' + codename
- admin_group.permissions.add(permission)
- if created:
- print 'Created group "%s"' % BASIC_ADMIN
- else:
- print 'Group "%s" already exists' % BASIC_ADMIN
+ """\
+ Create a basic admin group with permissions for managing basic autotest
+ objects.
+ """
+ admin_group, created = auth.models.Group.objects.get_or_create(
+ name=BASIC_ADMIN)
+ admin_group.save() # must save before adding permissions
+ PermissionModel = auth.models.Permission
+ have_permissions = list(admin_group.permissions.all())
+ for model_name in ('host', 'label', 'test', 'acl_group'):
+ for permission_type in ('add', 'change', 'delete'):
+ codename = permission_type + '_' + model_name
+ permissions = list(PermissionModel.objects.filter(
+ codename=codename))
+ if len(permissions) == 0:
+ print ' No permission ' + codename
+ continue
+ for permission in permissions:
+ if permission not in have_permissions:
+ print ' Adding permission ' + codename
+ admin_group.permissions.add(permission)
+ if created:
+ print 'Created group "%s"' % BASIC_ADMIN
+ else:
+ print 'Group "%s" already exists' % BASIC_ADMIN
dispatcher.connect(create_admin_group, sender=frontend.afe.models,
- signal=signals.post_syncdb)
+ signal=signals.post_syncdb)
diff --git a/frontend/afe/model_logic.py b/frontend/afe/model_logic.py
index f1197103..9c248e17 100644
--- a/frontend/afe/model_logic.py
+++ b/frontend/afe/model_logic.py
@@ -7,575 +7,575 @@ from django.utils import datastructures
class ValidationError(Exception):
- """\
- Data validation error in adding or updating an object. The associated
- value is a dictionary mapping field names to error strings.
- """
+ """\
+ Data validation error in adding or updating an object. The associated
+ value is a dictionary mapping field names to error strings.
+ """
class ExtendedManager(dbmodels.Manager):
- """\
- Extended manager supporting subquery filtering.
- """
-
- class _RawSqlQ(dbmodels.Q):
- """\
- A Django "Q" object constructed with a raw SQL query.
- """
- def __init__(self, sql, params=[], joins={}):
- """
- sql: the SQL to go into the WHERE clause
-
- params: substitution params for the WHERE SQL
-
- joins: a dict mapping alias to (table, join_type,
- condition). This converts to the SQL:
- "join_type table AS alias ON condition"
- For example:
- alias='host_hqe',
- table='host_queue_entries',
- join_type='INNER JOIN',
- condition='host_hqe.host_id=hosts.id'
- """
- self._sql = sql
- self._params = params[:]
- self._joins = datastructures.SortedDict(joins)
-
-
- def get_sql(self, opts):
- return (self._joins,
- [self._sql],
- self._params)
-
-
- @staticmethod
- def _get_quoted_field(table, field):
- return (backend.quote_name(table) + '.' +
- backend.quote_name(field))
-
-
- @classmethod
- def _get_sql_string_for(cls, value):
- """
- >>> ExtendedManager._get_sql_string_for((1L, 2L))
- '(1,2)'
- >>> ExtendedManager._get_sql_string_for(['abc', 'def'])
- 'abc,def'
- """
- if isinstance(value, list):
- return ','.join(cls._get_sql_string_for(item)
- for item in value)
- if isinstance(value, tuple):
- return '(%s)' % cls._get_sql_string_for(list(value))
- if isinstance(value, long):
- return str(int(value))
- return str(value)
-
-
- @staticmethod
- def _get_sql_query_for(query_object, select_field):
- query_table = query_object.model._meta.db_table
- quoted_field = ExtendedManager._get_quoted_field(query_table,
- select_field)
- _, where, params = query_object._get_sql_clause()
- # where includes the FROM clause
- return '(SELECT DISTINCT ' + quoted_field + where + ')', params
-
-
- def _get_key_on_this_table(self, key_field=None):
- if key_field is None:
- # default to primary key
- key_field = self.model._meta.pk.column
- return self._get_quoted_field(self.model._meta.db_table,
- key_field)
-
-
- def _do_subquery_filter(self, subquery_key, subquery, subquery_alias,
- this_table_key=None, not_in=False):
- """
- This method constructs SQL queries to accomplish IN/NOT IN
- subquery filtering using explicit joins. It does this by
- LEFT JOINing onto the subquery and then checking to see if
- the joined column is NULL or not.
-
- We use explicit joins instead of the SQL IN operator because
- MySQL (at least some versions) considers all IN subqueries to be
- dependent, so using explicit joins can be MUCH faster.
-
- The query we're going for is:
- SELECT * FROM <this table>
- LEFT JOIN (<subquery>) AS <subquery_alias>
- ON <subquery_alias>.<subquery_key> =
- <this table>.<this_table_key>
- WHERE <subquery_alias>.<subquery_key> IS [NOT] NULL
- """
- subselect, params = self._get_sql_query_for(subquery,
- subquery_key)
-
- this_full_key = self._get_key_on_this_table(this_table_key)
- alias_full_key = self._get_quoted_field(subquery_alias,
- subquery_key)
- join_condition = alias_full_key + ' = ' + this_full_key
- joins = {subquery_alias : (subselect, # join table
- 'LEFT JOIN', # join type
- join_condition)} # join on
-
- if not_in:
- where_sql = alias_full_key + ' IS NULL'
- else:
- where_sql = alias_full_key + ' IS NOT NULL'
- filter_obj = self._RawSqlQ(where_sql, params, joins)
- return self.complex_filter(filter_obj)
-
-
- def filter_in_subquery(self, subquery_key, subquery, subquery_alias,
- this_table_key=None):
- """\
- Construct a filter to perform a subquery match, i.e.
- WHERE id IN (SELECT host_id FROM ... WHERE ...)
- -subquery_key - the field to select in the subquery (host_id
- above)
- -subquery - a query object for the subquery
- -subquery_alias - a logical name for the query, to be used in
- the SQL (i.e. 'valid_hosts')
- -this_table_key - the field to match (id above). Defaults to
- this table's primary key.
- """
- return self._do_subquery_filter(subquery_key, subquery,
- subquery_alias, this_table_key)
-
-
- def filter_not_in_subquery(self, subquery_key, subquery,
- subquery_alias, this_table_key=None):
- 'Like filter_in_subquery, but use NOT IN rather than IN.'
- return self._do_subquery_filter(subquery_key, subquery,
- subquery_alias, this_table_key,
- not_in=True)
-
-
- def create_in_bulk(self, fields, values):
- """
- Creates many objects with a single SQL query.
- field - list of field names (model attributes, not actual DB
- field names) for which values will be specified.
- values - list of tuples containing values. Each tuple contains
- the values for the specified fields for a single
- object.
- Example: Host.objects.create_in_bulk(['hostname', 'status'],
- [('host1', 'Ready'), ('host2', 'Running')])
- """
- if not values:
- return
- field_dict = self.model.get_field_dict()
- field_names = [field_dict[field].column for field in fields]
- sql = 'INSERT INTO %s %s' % (
- self.model._meta.db_table,
- self._get_sql_string_for(tuple(field_names)))
- sql += ' VALUES ' + self._get_sql_string_for(list(values))
- cursor = connection.cursor()
- cursor.execute(sql)
- connection._commit()
-
-
- def delete_in_bulk(self, ids):
- """
- Deletes many objects with a single SQL query. ids should be a
- list of object ids to delete. Nonexistent ids will be silently
- ignored.
- """
- if not ids:
- return
- sql = 'DELETE FROM %s WHERE id IN %s' % (
- self.model._meta.db_table,
- self._get_sql_string_for(tuple(ids)))
- cursor = connection.cursor()
- cursor.execute(sql)
- connection._commit()
+ """\
+ Extended manager supporting subquery filtering.
+ """
+
+ class _RawSqlQ(dbmodels.Q):
+ """\
+ A Django "Q" object constructed with a raw SQL query.
+ """
+ def __init__(self, sql, params=[], joins={}):
+ """
+ sql: the SQL to go into the WHERE clause
+
+ params: substitution params for the WHERE SQL
+
+ joins: a dict mapping alias to (table, join_type,
+ condition). This converts to the SQL:
+ "join_type table AS alias ON condition"
+ For example:
+ alias='host_hqe',
+ table='host_queue_entries',
+ join_type='INNER JOIN',
+ condition='host_hqe.host_id=hosts.id'
+ """
+ self._sql = sql
+ self._params = params[:]
+ self._joins = datastructures.SortedDict(joins)
+
+
+ def get_sql(self, opts):
+ return (self._joins,
+ [self._sql],
+ self._params)
+
+
+ @staticmethod
+ def _get_quoted_field(table, field):
+ return (backend.quote_name(table) + '.' +
+ backend.quote_name(field))
+
+
+ @classmethod
+ def _get_sql_string_for(cls, value):
+ """
+ >>> ExtendedManager._get_sql_string_for((1L, 2L))
+ '(1,2)'
+ >>> ExtendedManager._get_sql_string_for(['abc', 'def'])
+ 'abc,def'
+ """
+ if isinstance(value, list):
+ return ','.join(cls._get_sql_string_for(item)
+ for item in value)
+ if isinstance(value, tuple):
+ return '(%s)' % cls._get_sql_string_for(list(value))
+ if isinstance(value, long):
+ return str(int(value))
+ return str(value)
+
+
+ @staticmethod
+ def _get_sql_query_for(query_object, select_field):
+ query_table = query_object.model._meta.db_table
+ quoted_field = ExtendedManager._get_quoted_field(query_table,
+ select_field)
+ _, where, params = query_object._get_sql_clause()
+ # where includes the FROM clause
+ return '(SELECT DISTINCT ' + quoted_field + where + ')', params
+
+
+ def _get_key_on_this_table(self, key_field=None):
+ if key_field is None:
+ # default to primary key
+ key_field = self.model._meta.pk.column
+ return self._get_quoted_field(self.model._meta.db_table,
+ key_field)
+
+
+ def _do_subquery_filter(self, subquery_key, subquery, subquery_alias,
+ this_table_key=None, not_in=False):
+ """
+ This method constructs SQL queries to accomplish IN/NOT IN
+ subquery filtering using explicit joins. It does this by
+ LEFT JOINing onto the subquery and then checking to see if
+ the joined column is NULL or not.
+
+ We use explicit joins instead of the SQL IN operator because
+ MySQL (at least some versions) considers all IN subqueries to be
+ dependent, so using explicit joins can be MUCH faster.
+
+ The query we're going for is:
+ SELECT * FROM <this table>
+ LEFT JOIN (<subquery>) AS <subquery_alias>
+ ON <subquery_alias>.<subquery_key> =
+ <this table>.<this_table_key>
+ WHERE <subquery_alias>.<subquery_key> IS [NOT] NULL
+ """
+ subselect, params = self._get_sql_query_for(subquery,
+ subquery_key)
+
+ this_full_key = self._get_key_on_this_table(this_table_key)
+ alias_full_key = self._get_quoted_field(subquery_alias,
+ subquery_key)
+ join_condition = alias_full_key + ' = ' + this_full_key
+ joins = {subquery_alias : (subselect, # join table
+ 'LEFT JOIN', # join type
+ join_condition)} # join on
+
+ if not_in:
+ where_sql = alias_full_key + ' IS NULL'
+ else:
+ where_sql = alias_full_key + ' IS NOT NULL'
+ filter_obj = self._RawSqlQ(where_sql, params, joins)
+ return self.complex_filter(filter_obj)
+
+
+ def filter_in_subquery(self, subquery_key, subquery, subquery_alias,
+ this_table_key=None):
+ """\
+ Construct a filter to perform a subquery match, i.e.
+ WHERE id IN (SELECT host_id FROM ... WHERE ...)
+ -subquery_key - the field to select in the subquery (host_id
+ above)
+ -subquery - a query object for the subquery
+ -subquery_alias - a logical name for the query, to be used in
+ the SQL (i.e. 'valid_hosts')
+ -this_table_key - the field to match (id above). Defaults to
+ this table's primary key.
+ """
+ return self._do_subquery_filter(subquery_key, subquery,
+ subquery_alias, this_table_key)
+
+
+ def filter_not_in_subquery(self, subquery_key, subquery,
+ subquery_alias, this_table_key=None):
+ 'Like filter_in_subquery, but use NOT IN rather than IN.'
+ return self._do_subquery_filter(subquery_key, subquery,
+ subquery_alias, this_table_key,
+ not_in=True)
+
+
+ def create_in_bulk(self, fields, values):
+ """
+ Creates many objects with a single SQL query.
+ field - list of field names (model attributes, not actual DB
+ field names) for which values will be specified.
+ values - list of tuples containing values. Each tuple contains
+ the values for the specified fields for a single
+ object.
+ Example: Host.objects.create_in_bulk(['hostname', 'status'],
+ [('host1', 'Ready'), ('host2', 'Running')])
+ """
+ if not values:
+ return
+ field_dict = self.model.get_field_dict()
+ field_names = [field_dict[field].column for field in fields]
+ sql = 'INSERT INTO %s %s' % (
+ self.model._meta.db_table,
+ self._get_sql_string_for(tuple(field_names)))
+ sql += ' VALUES ' + self._get_sql_string_for(list(values))
+ cursor = connection.cursor()
+ cursor.execute(sql)
+ connection._commit()
+
+
+ def delete_in_bulk(self, ids):
+ """
+ Deletes many objects with a single SQL query. ids should be a
+ list of object ids to delete. Nonexistent ids will be silently
+ ignored.
+ """
+ if not ids:
+ return
+ sql = 'DELETE FROM %s WHERE id IN %s' % (
+ self.model._meta.db_table,
+ self._get_sql_string_for(tuple(ids)))
+ cursor = connection.cursor()
+ cursor.execute(sql)
+ connection._commit()
class ValidObjectsManager(ExtendedManager):
- """
- Manager returning only objects with invalid=False.
- """
- def get_query_set(self):
- queryset = super(ValidObjectsManager, self).get_query_set()
- return queryset.filter(invalid=False)
+ """
+ Manager returning only objects with invalid=False.
+ """
+ def get_query_set(self):
+ queryset = super(ValidObjectsManager, self).get_query_set()
+ return queryset.filter(invalid=False)
class ModelExtensions(object):
- """\
- Mixin with convenience functions for models, built on top of the
- default Django model functions.
- """
- # TODO: at least some of these functions really belong in a custom
- # Manager class
-
- field_dict = None
- # subclasses should override if they want to support smart_get() by name
- name_field = None
-
-
- @classmethod
- def get_field_dict(cls):
- if cls.field_dict is None:
- cls.field_dict = {}
- for field in cls._meta.fields:
- cls.field_dict[field.name] = field
- return cls.field_dict
-
-
- @classmethod
- def clean_foreign_keys(cls, data):
- """\
- -Convert foreign key fields in data from <field>_id to just
- <field>.
- -replace foreign key objects with their IDs
- This method modifies data in-place.
- """
- for field in cls._meta.fields:
- if not field.rel:
- continue
- if (field.attname != field.name and
- field.attname in data):
- data[field.name] = data[field.attname]
- del data[field.attname]
- value = data[field.name]
- if isinstance(value, dbmodels.Model):
- data[field.name] = value.id
-
-
- # TODO(showard) - is there a way to not have to do this?
- @classmethod
- def provide_default_values(cls, data):
- """\
- Provide default values for fields with default values which have
- nothing passed in.
-
- For CharField and TextField fields with "blank=True", if nothing
- is passed, we fill in an empty string value, even if there's no
- default set.
- """
- new_data = dict(data)
- field_dict = cls.get_field_dict()
- for name, obj in field_dict.iteritems():
- if data.get(name) is not None:
- continue
- if obj.default is not dbmodels.fields.NOT_PROVIDED:
- new_data[name] = obj.default
- elif (isinstance(obj, dbmodels.CharField) or
- isinstance(obj, dbmodels.TextField)):
- new_data[name] = ''
- return new_data
-
-
- @classmethod
- def convert_human_readable_values(cls, data, to_human_readable=False):
- """\
- Performs conversions on user-supplied field data, to make it
- easier for users to pass human-readable data.
-
- For all fields that have choice sets, convert their values
- from human-readable strings to enum values, if necessary. This
- allows users to pass strings instead of the corresponding
- integer values.
-
- For all foreign key fields, call smart_get with the supplied
- data. This allows the user to pass either an ID value or
- the name of the object as a string.
-
- If to_human_readable=True, perform the inverse - i.e. convert
- numeric values to human readable values.
-
- This method modifies data in-place.
- """
- field_dict = cls.get_field_dict()
- for field_name in data:
- if data[field_name] is None:
- continue
- field_obj = field_dict[field_name]
- # convert enum values
- if field_obj.choices:
- for choice_data in field_obj.choices:
- # choice_data is (value, name)
- if to_human_readable:
- from_val, to_val = choice_data
- else:
- to_val, from_val = choice_data
- if from_val == data[field_name]:
- data[field_name] = to_val
- break
- # convert foreign key values
- elif field_obj.rel:
- dest_obj = field_obj.rel.to.smart_get(
- data[field_name])
- if (to_human_readable and
- dest_obj.name_field is not None):
- data[field_name] = (
- getattr(dest_obj,
- dest_obj.name_field))
- else:
- data[field_name] = dest_obj.id
-
-
- @classmethod
- def validate_field_names(cls, data):
- 'Checks for extraneous fields in data.'
- errors = {}
- field_dict = cls.get_field_dict()
- for field_name in data:
- if field_name not in field_dict:
- errors[field_name] = 'No field of this name'
- return errors
-
-
- @classmethod
- def prepare_data_args(cls, data, kwargs):
- 'Common preparation for add_object and update_object'
- data = dict(data) # don't modify the default keyword arg
- data.update(kwargs)
- # must check for extraneous field names here, while we have the
- # data in a dict
- errors = cls.validate_field_names(data)
- if errors:
- raise ValidationError(errors)
- cls.convert_human_readable_values(data)
- return data
-
-
- def validate_unique(self):
- """\
- Validate that unique fields are unique. Django manipulators do
- this too, but they're a huge pain to use manually. Trust me.
- """
- errors = {}
- cls = type(self)
- field_dict = self.get_field_dict()
- manager = cls.get_valid_manager()
- for field_name, field_obj in field_dict.iteritems():
- if not field_obj.unique:
- continue
-
- value = getattr(self, field_name)
- existing_objs = manager.filter(**{field_name : value})
- num_existing = existing_objs.count()
-
- if num_existing == 0:
- continue
- if num_existing == 1 and existing_objs[0].id == self.id:
- continue
- errors[field_name] = (
- 'This value must be unique (%s)' % (value))
- return errors
-
-
- def do_validate(self):
- errors = self.validate()
- unique_errors = self.validate_unique()
- for field_name, error in unique_errors.iteritems():
- errors.setdefault(field_name, error)
- if errors:
- raise ValidationError(errors)
-
-
- # actually (externally) useful methods follow
-
- @classmethod
- def add_object(cls, data={}, **kwargs):
- """\
- Returns a new object created with the given data (a dictionary
- mapping field names to values). Merges any extra keyword args
- into data.
- """
- data = cls.prepare_data_args(data, kwargs)
- data = cls.provide_default_values(data)
- obj = cls(**data)
- obj.do_validate()
- obj.save()
- return obj
-
-
- def update_object(self, data={}, **kwargs):
- """\
- Updates the object with the given data (a dictionary mapping
- field names to values). Merges any extra keyword args into
- data.
- """
- data = self.prepare_data_args(data, kwargs)
- for field_name, value in data.iteritems():
- if value is not None:
- setattr(self, field_name, value)
- self.do_validate()
- self.save()
-
-
- @classmethod
- def query_objects(cls, filter_data, valid_only=True):
- """\
- Returns a QuerySet object for querying the given model_class
- with the given filter_data. Optional special arguments in
- filter_data include:
- -query_start: index of first return to return
- -query_limit: maximum number of results to return
- -sort_by: list of fields to sort on. prefixing a '-' onto a
- field name changes the sort to descending order.
- -extra_args: keyword args to pass to query.extra() (see Django
- DB layer documentation)
- -extra_where: extra WHERE clause to append
- """
- query_start = filter_data.pop('query_start', None)
- query_limit = filter_data.pop('query_limit', None)
- if query_start and not query_limit:
- raise ValueError('Cannot pass query_start without '
- 'query_limit')
- sort_by = filter_data.pop('sort_by', [])
- extra_args = filter_data.pop('extra_args', {})
- extra_where = filter_data.pop('extra_where', None)
- if extra_where:
- extra_args.setdefault('where', []).append(extra_where)
-
- # filters
- query_dict = {}
- for field, value in filter_data.iteritems():
- query_dict[field] = value
- if valid_only:
- manager = cls.get_valid_manager()
- else:
- manager = cls.objects
- query = manager.filter(**query_dict).distinct()
-
- # other arguments
- if extra_args:
- query = query.extra(**extra_args)
-
- # sorting + paging
- assert isinstance(sort_by, list) or isinstance(sort_by, tuple)
- query = query.order_by(*sort_by)
- if query_start is not None and query_limit is not None:
- query_limit += query_start
- return query[query_start:query_limit]
-
-
- @classmethod
- def query_count(cls, filter_data):
- """\
- Like query_objects, but retreive only the count of results.
- """
- filter_data.pop('query_start', None)
- filter_data.pop('query_limit', None)
- return cls.query_objects(filter_data).count()
-
-
- @classmethod
- def clean_object_dicts(cls, field_dicts):
- """\
- Take a list of dicts corresponding to object (as returned by
- query.values()) and clean the data to be more suitable for
- returning to the user.
- """
- for i in range(len(field_dicts)):
- cls.clean_foreign_keys(field_dicts[i])
- cls.convert_human_readable_values(
- field_dicts[i], to_human_readable=True)
-
-
- @classmethod
- def list_objects(cls, filter_data):
- """\
- Like query_objects, but return a list of dictionaries.
- """
- query = cls.query_objects(filter_data)
- field_dicts = list(query.values())
- cls.clean_object_dicts(field_dicts)
- return field_dicts
-
-
- @classmethod
- def smart_get(cls, *args, **kwargs):
- """\
- smart_get(integer) -> get object by ID
- smart_get(string) -> get object by name_field
- smart_get(keyword args) -> normal ModelClass.objects.get()
- """
- assert bool(args) ^ bool(kwargs)
- if args:
- assert len(args) == 1
- arg = args[0]
- if isinstance(arg, int) or isinstance(arg, long):
- return cls.objects.get(id=arg)
- if isinstance(arg, str) or isinstance(arg, unicode):
- return cls.objects.get(
- **{cls.name_field : arg})
- raise ValueError(
- 'Invalid positional argument: %s (%s)' % (
- str(arg), type(arg)))
- return cls.objects.get(**kwargs)
-
-
- def get_object_dict(self):
- """\
- Return a dictionary mapping fields to this object's values.
- """
- object_dict = dict((field_name, getattr(self, field_name))
- for field_name
- in self.get_field_dict().iterkeys())
- self.clean_object_dicts([object_dict])
- return object_dict
-
-
- @classmethod
- def get_valid_manager(cls):
- return cls.objects
+ """\
+ Mixin with convenience functions for models, built on top of the
+ default Django model functions.
+ """
+ # TODO: at least some of these functions really belong in a custom
+ # Manager class
+
+ field_dict = None
+ # subclasses should override if they want to support smart_get() by name
+ name_field = None
+
+
+ @classmethod
+ def get_field_dict(cls):
+ if cls.field_dict is None:
+ cls.field_dict = {}
+ for field in cls._meta.fields:
+ cls.field_dict[field.name] = field
+ return cls.field_dict
+
+
+ @classmethod
+ def clean_foreign_keys(cls, data):
+ """\
+ -Convert foreign key fields in data from <field>_id to just
+ <field>.
+ -replace foreign key objects with their IDs
+ This method modifies data in-place.
+ """
+ for field in cls._meta.fields:
+ if not field.rel:
+ continue
+ if (field.attname != field.name and
+ field.attname in data):
+ data[field.name] = data[field.attname]
+ del data[field.attname]
+ value = data[field.name]
+ if isinstance(value, dbmodels.Model):
+ data[field.name] = value.id
+
+
+ # TODO(showard) - is there a way to not have to do this?
+ @classmethod
+ def provide_default_values(cls, data):
+ """\
+ Provide default values for fields with default values which have
+ nothing passed in.
+
+ For CharField and TextField fields with "blank=True", if nothing
+ is passed, we fill in an empty string value, even if there's no
+ default set.
+ """
+ new_data = dict(data)
+ field_dict = cls.get_field_dict()
+ for name, obj in field_dict.iteritems():
+ if data.get(name) is not None:
+ continue
+ if obj.default is not dbmodels.fields.NOT_PROVIDED:
+ new_data[name] = obj.default
+ elif (isinstance(obj, dbmodels.CharField) or
+ isinstance(obj, dbmodels.TextField)):
+ new_data[name] = ''
+ return new_data
+
+
+ @classmethod
+ def convert_human_readable_values(cls, data, to_human_readable=False):
+ """\
+ Performs conversions on user-supplied field data, to make it
+ easier for users to pass human-readable data.
+
+ For all fields that have choice sets, convert their values
+ from human-readable strings to enum values, if necessary. This
+ allows users to pass strings instead of the corresponding
+ integer values.
+
+ For all foreign key fields, call smart_get with the supplied
+ data. This allows the user to pass either an ID value or
+ the name of the object as a string.
+
+ If to_human_readable=True, perform the inverse - i.e. convert
+ numeric values to human readable values.
+
+ This method modifies data in-place.
+ """
+ field_dict = cls.get_field_dict()
+ for field_name in data:
+ if data[field_name] is None:
+ continue
+ field_obj = field_dict[field_name]
+ # convert enum values
+ if field_obj.choices:
+ for choice_data in field_obj.choices:
+ # choice_data is (value, name)
+ if to_human_readable:
+ from_val, to_val = choice_data
+ else:
+ to_val, from_val = choice_data
+ if from_val == data[field_name]:
+ data[field_name] = to_val
+ break
+ # convert foreign key values
+ elif field_obj.rel:
+ dest_obj = field_obj.rel.to.smart_get(
+ data[field_name])
+ if (to_human_readable and
+ dest_obj.name_field is not None):
+ data[field_name] = (
+ getattr(dest_obj,
+ dest_obj.name_field))
+ else:
+ data[field_name] = dest_obj.id
+
+
+ @classmethod
+ def validate_field_names(cls, data):
+ 'Checks for extraneous fields in data.'
+ errors = {}
+ field_dict = cls.get_field_dict()
+ for field_name in data:
+ if field_name not in field_dict:
+ errors[field_name] = 'No field of this name'
+ return errors
+
+
+ @classmethod
+ def prepare_data_args(cls, data, kwargs):
+ 'Common preparation for add_object and update_object'
+ data = dict(data) # don't modify the default keyword arg
+ data.update(kwargs)
+ # must check for extraneous field names here, while we have the
+ # data in a dict
+ errors = cls.validate_field_names(data)
+ if errors:
+ raise ValidationError(errors)
+ cls.convert_human_readable_values(data)
+ return data
+
+
+ def validate_unique(self):
+ """\
+ Validate that unique fields are unique. Django manipulators do
+ this too, but they're a huge pain to use manually. Trust me.
+ """
+ errors = {}
+ cls = type(self)
+ field_dict = self.get_field_dict()
+ manager = cls.get_valid_manager()
+ for field_name, field_obj in field_dict.iteritems():
+ if not field_obj.unique:
+ continue
+
+ value = getattr(self, field_name)
+ existing_objs = manager.filter(**{field_name : value})
+ num_existing = existing_objs.count()
+
+ if num_existing == 0:
+ continue
+ if num_existing == 1 and existing_objs[0].id == self.id:
+ continue
+ errors[field_name] = (
+ 'This value must be unique (%s)' % (value))
+ return errors
+
+
+ def do_validate(self):
+ errors = self.validate()
+ unique_errors = self.validate_unique()
+ for field_name, error in unique_errors.iteritems():
+ errors.setdefault(field_name, error)
+ if errors:
+ raise ValidationError(errors)
+
+
+ # actually (externally) useful methods follow
+
+ @classmethod
+ def add_object(cls, data={}, **kwargs):
+ """\
+ Returns a new object created with the given data (a dictionary
+ mapping field names to values). Merges any extra keyword args
+ into data.
+ """
+ data = cls.prepare_data_args(data, kwargs)
+ data = cls.provide_default_values(data)
+ obj = cls(**data)
+ obj.do_validate()
+ obj.save()
+ return obj
+
+
+ def update_object(self, data={}, **kwargs):
+ """\
+ Updates the object with the given data (a dictionary mapping
+ field names to values). Merges any extra keyword args into
+ data.
+ """
+ data = self.prepare_data_args(data, kwargs)
+ for field_name, value in data.iteritems():
+ if value is not None:
+ setattr(self, field_name, value)
+ self.do_validate()
+ self.save()
+
+
+ @classmethod
+ def query_objects(cls, filter_data, valid_only=True):
+ """\
+ Returns a QuerySet object for querying the given model_class
+ with the given filter_data. Optional special arguments in
+ filter_data include:
+ -query_start: index of first return to return
+ -query_limit: maximum number of results to return
+ -sort_by: list of fields to sort on. prefixing a '-' onto a
+ field name changes the sort to descending order.
+ -extra_args: keyword args to pass to query.extra() (see Django
+ DB layer documentation)
+ -extra_where: extra WHERE clause to append
+ """
+ query_start = filter_data.pop('query_start', None)
+ query_limit = filter_data.pop('query_limit', None)
+ if query_start and not query_limit:
+ raise ValueError('Cannot pass query_start without '
+ 'query_limit')
+ sort_by = filter_data.pop('sort_by', [])
+ extra_args = filter_data.pop('extra_args', {})
+ extra_where = filter_data.pop('extra_where', None)
+ if extra_where:
+ extra_args.setdefault('where', []).append(extra_where)
+
+ # filters
+ query_dict = {}
+ for field, value in filter_data.iteritems():
+ query_dict[field] = value
+ if valid_only:
+ manager = cls.get_valid_manager()
+ else:
+ manager = cls.objects
+ query = manager.filter(**query_dict).distinct()
+
+ # other arguments
+ if extra_args:
+ query = query.extra(**extra_args)
+
+ # sorting + paging
+ assert isinstance(sort_by, list) or isinstance(sort_by, tuple)
+ query = query.order_by(*sort_by)
+ if query_start is not None and query_limit is not None:
+ query_limit += query_start
+ return query[query_start:query_limit]
+
+
+ @classmethod
+ def query_count(cls, filter_data):
+ """\
+ Like query_objects, but retreive only the count of results.
+ """
+ filter_data.pop('query_start', None)
+ filter_data.pop('query_limit', None)
+ return cls.query_objects(filter_data).count()
+
+
+ @classmethod
+ def clean_object_dicts(cls, field_dicts):
+ """\
+ Take a list of dicts corresponding to object (as returned by
+ query.values()) and clean the data to be more suitable for
+ returning to the user.
+ """
+ for i in range(len(field_dicts)):
+ cls.clean_foreign_keys(field_dicts[i])
+ cls.convert_human_readable_values(
+ field_dicts[i], to_human_readable=True)
+
+
+ @classmethod
+ def list_objects(cls, filter_data):
+ """\
+ Like query_objects, but return a list of dictionaries.
+ """
+ query = cls.query_objects(filter_data)
+ field_dicts = list(query.values())
+ cls.clean_object_dicts(field_dicts)
+ return field_dicts
+
+
+ @classmethod
+ def smart_get(cls, *args, **kwargs):
+ """\
+ smart_get(integer) -> get object by ID
+ smart_get(string) -> get object by name_field
+ smart_get(keyword args) -> normal ModelClass.objects.get()
+ """
+ assert bool(args) ^ bool(kwargs)
+ if args:
+ assert len(args) == 1
+ arg = args[0]
+ if isinstance(arg, int) or isinstance(arg, long):
+ return cls.objects.get(id=arg)
+ if isinstance(arg, str) or isinstance(arg, unicode):
+ return cls.objects.get(
+ **{cls.name_field : arg})
+ raise ValueError(
+ 'Invalid positional argument: %s (%s)' % (
+ str(arg), type(arg)))
+ return cls.objects.get(**kwargs)
+
+
+ def get_object_dict(self):
+ """\
+ Return a dictionary mapping fields to this object's values.
+ """
+ object_dict = dict((field_name, getattr(self, field_name))
+ for field_name
+ in self.get_field_dict().iterkeys())
+ self.clean_object_dicts([object_dict])
+ return object_dict
+
+
+ @classmethod
+ def get_valid_manager(cls):
+ return cls.objects
class ModelWithInvalid(ModelExtensions):
- """
- Overrides model methods save() and delete() to support invalidation in
- place of actual deletion. Subclasses must have a boolean "invalid"
- field.
- """
-
- def save(self):
- # see if this object was previously added and invalidated
- my_name = getattr(self, self.name_field)
- filters = {self.name_field : my_name, 'invalid' : True}
- try:
- old_object = self.__class__.objects.get(**filters)
- except self.DoesNotExist:
- # no existing object
- super(ModelWithInvalid, self).save()
- return
-
- self.id = old_object.id
- super(ModelWithInvalid, self).save()
-
-
- def clean_object(self):
- """
- This method is called when an object is marked invalid.
- Subclasses should override this to clean up relationships that
- should no longer exist if the object were deleted."""
- pass
-
-
- def delete(self):
- assert not self.invalid
- self.invalid = True
- self.save()
- self.clean_object()
-
-
- @classmethod
- def get_valid_manager(cls):
- return cls.valid_objects
-
-
- class Manipulator(object):
- """
- Force default manipulators to look only at valid objects -
- otherwise they will match against invalid objects when checking
- uniqueness.
- """
- @classmethod
- def _prepare(cls, model):
- super(ModelWithInvalid.Manipulator, cls)._prepare(model)
- cls.manager = model.valid_objects
+ """
+ Overrides model methods save() and delete() to support invalidation in
+ place of actual deletion. Subclasses must have a boolean "invalid"
+ field.
+ """
+
+ def save(self):
+ # see if this object was previously added and invalidated
+ my_name = getattr(self, self.name_field)
+ filters = {self.name_field : my_name, 'invalid' : True}
+ try:
+ old_object = self.__class__.objects.get(**filters)
+ except self.DoesNotExist:
+ # no existing object
+ super(ModelWithInvalid, self).save()
+ return
+
+ self.id = old_object.id
+ super(ModelWithInvalid, self).save()
+
+
+ def clean_object(self):
+ """
+ This method is called when an object is marked invalid.
+ Subclasses should override this to clean up relationships that
+ should no longer exist if the object were deleted."""
+ pass
+
+
+ def delete(self):
+ assert not self.invalid
+ self.invalid = True
+ self.save()
+ self.clean_object()
+
+
+ @classmethod
+ def get_valid_manager(cls):
+ return cls.valid_objects
+
+
+ class Manipulator(object):
+ """
+ Force default manipulators to look only at valid objects -
+ otherwise they will match against invalid objects when checking
+ uniqueness.
+ """
+ @classmethod
+ def _prepare(cls, model):
+ super(ModelWithInvalid.Manipulator, cls)._prepare(model)
+ cls.manager = model.valid_objects
diff --git a/frontend/afe/models.py b/frontend/afe/models.py
index 8e7882d5..74d19e25 100644
--- a/frontend/afe/models.py
+++ b/frontend/afe/models.py
@@ -4,292 +4,292 @@ from frontend import settings
class AclAccessViolation(Exception):
- """\
- Raised when an operation is attempted with proper permissions as
- dictated by ACLs.
- """
+ """\
+ Raised when an operation is attempted with proper permissions as
+ dictated by ACLs.
+ """
class Label(model_logic.ModelWithInvalid, dbmodels.Model):
- """\
- Required:
- name: label name
+ """\
+ Required:
+ name: label name
- Optional:
- kernel_config: url/path to kernel config to use for jobs run on this
- label
- platform: if True, this is a platform label (defaults to False)
- """
- name = dbmodels.CharField(maxlength=255, unique=True)
- kernel_config = dbmodels.CharField(maxlength=255, blank=True)
- platform = dbmodels.BooleanField(default=False)
- invalid = dbmodels.BooleanField(default=False,
- editable=settings.FULL_ADMIN)
+ Optional:
+ kernel_config: url/path to kernel config to use for jobs run on this
+ label
+ platform: if True, this is a platform label (defaults to False)
+ """
+ name = dbmodels.CharField(maxlength=255, unique=True)
+ kernel_config = dbmodels.CharField(maxlength=255, blank=True)
+ platform = dbmodels.BooleanField(default=False)
+ invalid = dbmodels.BooleanField(default=False,
+ editable=settings.FULL_ADMIN)
- name_field = 'name'
- objects = model_logic.ExtendedManager()
- valid_objects = model_logic.ValidObjectsManager()
+ name_field = 'name'
+ objects = model_logic.ExtendedManager()
+ valid_objects = model_logic.ValidObjectsManager()
- def clean_object(self):
- self.host_set.clear()
+ def clean_object(self):
+ self.host_set.clear()
- def enqueue_job(self, job):
- 'Enqueue a job on any host of this label.'
- queue_entry = HostQueueEntry(meta_host=self, job=job,
- status=Job.Status.QUEUED,
- priority=job.priority)
- queue_entry.save()
+ def enqueue_job(self, job):
+ 'Enqueue a job on any host of this label.'
+ queue_entry = HostQueueEntry(meta_host=self, job=job,
+ status=Job.Status.QUEUED,
+ priority=job.priority)
+ queue_entry.save()
- class Meta:
- db_table = 'labels'
+ class Meta:
+ db_table = 'labels'
- class Admin:
- list_display = ('name', 'kernel_config')
- # see Host.Admin
- manager = model_logic.ValidObjectsManager()
+ class Admin:
+ list_display = ('name', 'kernel_config')
+ # see Host.Admin
+ manager = model_logic.ValidObjectsManager()
- def __str__(self):
- return self.name
+ def __str__(self):
+ return self.name
class Host(model_logic.ModelWithInvalid, dbmodels.Model):
- """\
- Required:
- hostname
-
- optional:
- locked: host is locked and will not be queued
-
- Internal:
- synch_id: currently unused
- status: string describing status of host
- """
- Status = enum.Enum('Verifying', 'Running', 'Ready', 'Repairing',
- 'Repair Failed', 'Dead', 'Rebooting',
- string_values=True)
-
- hostname = dbmodels.CharField(maxlength=255, unique=True)
- labels = dbmodels.ManyToManyField(Label, blank=True,
- filter_interface=dbmodels.HORIZONTAL)
- locked = dbmodels.BooleanField(default=False)
- synch_id = dbmodels.IntegerField(blank=True, null=True,
- editable=settings.FULL_ADMIN)
- status = dbmodels.CharField(maxlength=255, default=Status.READY,
- choices=Status.choices(),
- editable=settings.FULL_ADMIN)
- invalid = dbmodels.BooleanField(default=False,
- editable=settings.FULL_ADMIN)
-
- name_field = 'hostname'
- objects = model_logic.ExtendedManager()
- valid_objects = model_logic.ValidObjectsManager()
-
-
- def clean_object(self):
- self.aclgroup_set.clear()
- self.labels.clear()
-
-
- def save(self):
- # extra spaces in the hostname can be a sneaky source of errors
- self.hostname = self.hostname.strip()
- # is this a new object being saved for the first time?
- first_time = (self.id is None)
- super(Host, self).save()
- if first_time:
- everyone = AclGroup.objects.get(name='Everyone')
- everyone.hosts.add(self)
-
-
- def enqueue_job(self, job):
- ' Enqueue a job on this host.'
- queue_entry = HostQueueEntry(host=self, job=job,
- status=Job.Status.QUEUED,
- priority=job.priority)
- # allow recovery of dead hosts from the frontend
- if not self.active_queue_entry() and self.is_dead():
- self.status = Host.Status.READY
- self.save()
- queue_entry.save()
-
-
- def platform(self):
- # TODO(showard): slighly hacky?
- platforms = self.labels.filter(platform=True)
- if len(platforms) == 0:
- return None
- return platforms[0]
- platform.short_description = 'Platform'
-
-
- def is_dead(self):
- return self.status == Host.Status.REPAIR_FAILED
-
-
- def active_queue_entry(self):
- active = list(self.hostqueueentry_set.filter(active=True))
- if not active:
- return None
- assert len(active) == 1, ('More than one active entry for '
- 'host ' + self.hostname)
- return active[0]
-
-
- class Meta:
- db_table = 'hosts'
-
- class Admin:
- # TODO(showard) - showing platform requires a SQL query for
- # each row (since labels are many-to-many) - should we remove
- # it?
- list_display = ('hostname', 'platform', 'locked', 'status')
- list_filter = ('labels', 'locked')
- search_fields = ('hostname', 'status')
- # undocumented Django feature - if you set manager here, the
- # admin code will use it, otherwise it'll use a default Manager
- manager = model_logic.ValidObjectsManager()
-
- def __str__(self):
- return self.hostname
+ """\
+ Required:
+ hostname
+
+ optional:
+ locked: host is locked and will not be queued
+
+ Internal:
+ synch_id: currently unused
+ status: string describing status of host
+ """
+ Status = enum.Enum('Verifying', 'Running', 'Ready', 'Repairing',
+ 'Repair Failed', 'Dead', 'Rebooting',
+ string_values=True)
+
+ hostname = dbmodels.CharField(maxlength=255, unique=True)
+ labels = dbmodels.ManyToManyField(Label, blank=True,
+ filter_interface=dbmodels.HORIZONTAL)
+ locked = dbmodels.BooleanField(default=False)
+ synch_id = dbmodels.IntegerField(blank=True, null=True,
+ editable=settings.FULL_ADMIN)
+ status = dbmodels.CharField(maxlength=255, default=Status.READY,
+ choices=Status.choices(),
+ editable=settings.FULL_ADMIN)
+ invalid = dbmodels.BooleanField(default=False,
+ editable=settings.FULL_ADMIN)
+
+ name_field = 'hostname'
+ objects = model_logic.ExtendedManager()
+ valid_objects = model_logic.ValidObjectsManager()
+
+
+ def clean_object(self):
+ self.aclgroup_set.clear()
+ self.labels.clear()
+
+
+ def save(self):
+ # extra spaces in the hostname can be a sneaky source of errors
+ self.hostname = self.hostname.strip()
+ # is this a new object being saved for the first time?
+ first_time = (self.id is None)
+ super(Host, self).save()
+ if first_time:
+ everyone = AclGroup.objects.get(name='Everyone')
+ everyone.hosts.add(self)
+
+
+ def enqueue_job(self, job):
+ ' Enqueue a job on this host.'
+ queue_entry = HostQueueEntry(host=self, job=job,
+ status=Job.Status.QUEUED,
+ priority=job.priority)
+ # allow recovery of dead hosts from the frontend
+ if not self.active_queue_entry() and self.is_dead():
+ self.status = Host.Status.READY
+ self.save()
+ queue_entry.save()
+
+
+ def platform(self):
+ # TODO(showard): slighly hacky?
+ platforms = self.labels.filter(platform=True)
+ if len(platforms) == 0:
+ return None
+ return platforms[0]
+ platform.short_description = 'Platform'
+
+
+ def is_dead(self):
+ return self.status == Host.Status.REPAIR_FAILED
+
+
+ def active_queue_entry(self):
+ active = list(self.hostqueueentry_set.filter(active=True))
+ if not active:
+ return None
+ assert len(active) == 1, ('More than one active entry for '
+ 'host ' + self.hostname)
+ return active[0]
+
+
+ class Meta:
+ db_table = 'hosts'
+
+ class Admin:
+ # TODO(showard) - showing platform requires a SQL query for
+ # each row (since labels are many-to-many) - should we remove
+ # it?
+ list_display = ('hostname', 'platform', 'locked', 'status')
+ list_filter = ('labels', 'locked')
+ search_fields = ('hostname', 'status')
+ # undocumented Django feature - if you set manager here, the
+ # admin code will use it, otherwise it'll use a default Manager
+ manager = model_logic.ValidObjectsManager()
+
+ def __str__(self):
+ return self.hostname
class Test(dbmodels.Model, model_logic.ModelExtensions):
- """\
- Required:
- name: test name
- test_type: Client or Server
- path: path to pass to run_test()
- synch_type: whether the test should run synchronously or asynchronously
-
- Optional:
- test_class: used for categorization of tests
- description: arbirary text description
- """
- Classes = enum.Enum('Kernel', 'Hardware', 'Canned Test Sets',
- string_values=True)
- SynchType = enum.Enum('Asynchronous', 'Synchronous', start_value=1)
- # TODO(showard) - this should be merged with Job.ControlType (but right
- # now they use opposite values)
- Types = enum.Enum('Client', 'Server', start_value=1)
-
- name = dbmodels.CharField(maxlength=255, unique=True)
- test_class = dbmodels.CharField(maxlength=255,
- choices=Classes.choices())
- description = dbmodels.TextField(blank=True)
- test_type = dbmodels.SmallIntegerField(choices=Types.choices())
- synch_type = dbmodels.SmallIntegerField(choices=SynchType.choices(),
- default=SynchType.ASYNCHRONOUS)
- path = dbmodels.CharField(maxlength=255)
-
- name_field = 'name'
- objects = model_logic.ExtendedManager()
-
-
- class Meta:
- db_table = 'autotests'
-
- class Admin:
- fields = (
- (None, {'fields' :
- ('name', 'test_class', 'test_type', 'synch_type',
- 'path', 'description')}),
- )
- list_display = ('name', 'test_type', 'synch_type',
- 'description')
- search_fields = ('name',)
-
- def __str__(self):
- return self.name
+ """\
+ Required:
+ name: test name
+ test_type: Client or Server
+ path: path to pass to run_test()
+ synch_type: whether the test should run synchronously or asynchronously
+
+ Optional:
+ test_class: used for categorization of tests
+ description: arbirary text description
+ """
+ Classes = enum.Enum('Kernel', 'Hardware', 'Canned Test Sets',
+ string_values=True)
+ SynchType = enum.Enum('Asynchronous', 'Synchronous', start_value=1)
+ # TODO(showard) - this should be merged with Job.ControlType (but right
+ # now they use opposite values)
+ Types = enum.Enum('Client', 'Server', start_value=1)
+
+ name = dbmodels.CharField(maxlength=255, unique=True)
+ test_class = dbmodels.CharField(maxlength=255,
+ choices=Classes.choices())
+ description = dbmodels.TextField(blank=True)
+ test_type = dbmodels.SmallIntegerField(choices=Types.choices())
+ synch_type = dbmodels.SmallIntegerField(choices=SynchType.choices(),
+ default=SynchType.ASYNCHRONOUS)
+ path = dbmodels.CharField(maxlength=255)
+
+ name_field = 'name'
+ objects = model_logic.ExtendedManager()
+
+
+ class Meta:
+ db_table = 'autotests'
+
+ class Admin:
+ fields = (
+ (None, {'fields' :
+ ('name', 'test_class', 'test_type', 'synch_type',
+ 'path', 'description')}),
+ )
+ list_display = ('name', 'test_type', 'synch_type',
+ 'description')
+ search_fields = ('name',)
+
+ def __str__(self):
+ return self.name
class User(dbmodels.Model, model_logic.ModelExtensions):
- """\
- Required:
- login :user login name
+ """\
+ Required:
+ login :user login name
- Optional:
- access_level: 0=User (default), 1=Admin, 100=Root
- """
- ACCESS_ROOT = 100
- ACCESS_ADMIN = 1
- ACCESS_USER = 0
+ Optional:
+ access_level: 0=User (default), 1=Admin, 100=Root
+ """
+ ACCESS_ROOT = 100
+ ACCESS_ADMIN = 1
+ ACCESS_USER = 0
- login = dbmodels.CharField(maxlength=255, unique=True)
- access_level = dbmodels.IntegerField(default=ACCESS_USER, blank=True)
+ login = dbmodels.CharField(maxlength=255, unique=True)
+ access_level = dbmodels.IntegerField(default=ACCESS_USER, blank=True)
- name_field = 'login'
- objects = model_logic.ExtendedManager()
+ name_field = 'login'
+ objects = model_logic.ExtendedManager()
- def save(self):
- # is this a new object being saved for the first time?
- first_time = (self.id is None)
- super(User, self).save()
- if first_time:
- everyone = AclGroup.objects.get(name='Everyone')
- everyone.users.add(self)
+ def save(self):
+ # is this a new object being saved for the first time?
+ first_time = (self.id is None)
+ super(User, self).save()
+ if first_time:
+ everyone = AclGroup.objects.get(name='Everyone')
+ everyone.users.add(self)
- def has_access(self, target):
- if self.access_level >= self.ACCESS_ROOT:
- return True
+ def has_access(self, target):
+ if self.access_level >= self.ACCESS_ROOT:
+ return True
- if isinstance(target, int):
- return self.access_level >= target
- if isinstance(target, Job):
- return (target.owner == self.login or
- self.access_level >= self.ACCESS_ADMIN)
- if isinstance(target, Host):
- acl_intersect = [group
- for group in self.aclgroup_set.all()
- if group in target.aclgroup_set.all()]
- return bool(acl_intersect)
- if isinstance(target, User):
- return self.access_level >= target.access_level
- raise ValueError('Invalid target type')
+ if isinstance(target, int):
+ return self.access_level >= target
+ if isinstance(target, Job):
+ return (target.owner == self.login or
+ self.access_level >= self.ACCESS_ADMIN)
+ if isinstance(target, Host):
+ acl_intersect = [group
+ for group in self.aclgroup_set.all()
+ if group in target.aclgroup_set.all()]
+ return bool(acl_intersect)
+ if isinstance(target, User):
+ return self.access_level >= target.access_level
+ raise ValueError('Invalid target type')
- class Meta:
- db_table = 'users'
+ class Meta:
+ db_table = 'users'
- class Admin:
- list_display = ('login', 'access_level')
- search_fields = ('login',)
+ class Admin:
+ list_display = ('login', 'access_level')
+ search_fields = ('login',)
- def __str__(self):
- return self.login
+ def __str__(self):
+ return self.login
class AclGroup(dbmodels.Model, model_logic.ModelExtensions):
- """\
- Required:
- name: name of ACL group
+ """\
+ Required:
+ name: name of ACL group
- Optional:
- description: arbitrary description of group
- """
- name = dbmodels.CharField(maxlength=255, unique=True)
- description = dbmodels.CharField(maxlength=255, blank=True)
- users = dbmodels.ManyToManyField(User,
- filter_interface=dbmodels.HORIZONTAL)
- hosts = dbmodels.ManyToManyField(Host,
- filter_interface=dbmodels.HORIZONTAL)
+ Optional:
+ description: arbitrary description of group
+ """
+ name = dbmodels.CharField(maxlength=255, unique=True)
+ description = dbmodels.CharField(maxlength=255, blank=True)
+ users = dbmodels.ManyToManyField(User,
+ filter_interface=dbmodels.HORIZONTAL)
+ hosts = dbmodels.ManyToManyField(Host,
+ filter_interface=dbmodels.HORIZONTAL)
- name_field = 'name'
- objects = model_logic.ExtendedManager()
+ name_field = 'name'
+ objects = model_logic.ExtendedManager()
- class Meta:
- db_table = 'acl_groups'
+ class Meta:
+ db_table = 'acl_groups'
- class Admin:
- list_display = ('name', 'description')
- search_fields = ('name',)
+ class Admin:
+ list_display = ('name', 'description')
+ search_fields = ('name',)
- def __str__(self):
- return self.name
+ def __str__(self):
+ return self.name
# hack to make the column name in the many-to-many DB tables match the one
# generated by ruby
@@ -297,209 +297,209 @@ AclGroup._meta.object_name = 'acl_group'
class JobManager(model_logic.ExtendedManager):
- 'Custom manager to provide efficient status counts querying.'
- def get_status_counts(self, job_ids):
- """\
- Returns a dictionary mapping the given job IDs to their status
- count dictionaries.
- """
- if not job_ids:
- return {}
- id_list = '(%s)' % ','.join(str(job_id) for job_id in job_ids)
- cursor = connection.cursor()
- cursor.execute("""
- SELECT job_id, status, COUNT(*)
- FROM host_queue_entries
- WHERE job_id IN %s
- GROUP BY job_id, status
- """ % id_list)
- all_job_counts = {}
- for job_id in job_ids:
- all_job_counts[job_id] = {}
- for job_id, status, count in cursor.fetchall():
- all_job_counts[job_id][status] = count
- return all_job_counts
+ 'Custom manager to provide efficient status counts querying.'
+ def get_status_counts(self, job_ids):
+ """\
+ Returns a dictionary mapping the given job IDs to their status
+ count dictionaries.
+ """
+ if not job_ids:
+ return {}
+ id_list = '(%s)' % ','.join(str(job_id) for job_id in job_ids)
+ cursor = connection.cursor()
+ cursor.execute("""
+ SELECT job_id, status, COUNT(*)
+ FROM host_queue_entries
+ WHERE job_id IN %s
+ GROUP BY job_id, status
+ """ % id_list)
+ all_job_counts = {}
+ for job_id in job_ids:
+ all_job_counts[job_id] = {}
+ for job_id, status, count in cursor.fetchall():
+ all_job_counts[job_id][status] = count
+ return all_job_counts
class Job(dbmodels.Model, model_logic.ModelExtensions):
- """\
- owner: username of job owner
- name: job name (does not have to be unique)
- priority: Low, Medium, High, Urgent (or 0-3)
- control_file: contents of control file
- control_type: Client or Server
- created_on: date of job creation
- submitted_on: date of job submission
- synch_type: Asynchronous or Synchronous (i.e. job must run on all hosts
- simultaneously; used for server-side control files)
- synch_count: ???
- synchronizing: for scheduler use
- """
- Priority = enum.Enum('Low', 'Medium', 'High', 'Urgent')
- ControlType = enum.Enum('Server', 'Client', start_value=1)
- Status = enum.Enum('Created', 'Queued', 'Pending', 'Running',
- 'Completed', 'Abort', 'Aborting', 'Aborted',
- 'Failed', string_values=True)
-
- owner = dbmodels.CharField(maxlength=255)
- name = dbmodels.CharField(maxlength=255)
- priority = dbmodels.SmallIntegerField(choices=Priority.choices(),
- blank=True, # to allow 0
- default=Priority.MEDIUM)
- control_file = dbmodels.TextField()
- control_type = dbmodels.SmallIntegerField(choices=ControlType.choices(),
- blank=True) # to allow 0
- created_on = dbmodels.DateTimeField(auto_now_add=True)
- synch_type = dbmodels.SmallIntegerField(
- blank=True, null=True, choices=Test.SynchType.choices())
- synch_count = dbmodels.IntegerField(blank=True, null=True)
- synchronizing = dbmodels.BooleanField(default=False)
-
-
- # custom manager
- objects = JobManager()
-
-
- def is_server_job(self):
- return self.control_type == self.ControlType.SERVER
-
-
- @classmethod
- def create(cls, owner, name, priority, control_file, control_type,
- hosts, synch_type):
- """\
- Creates a job by taking some information (the listed args)
- and filling in the rest of the necessary information.
- """
- job = cls.add_object(
- owner=owner, name=name, priority=priority,
- control_file=control_file, control_type=control_type,
- synch_type=synch_type)
-
- if job.synch_type == Test.SynchType.SYNCHRONOUS:
- job.synch_count = len(hosts)
- else:
- if len(hosts) == 0:
- errors = {'hosts':
- 'asynchronous jobs require at least'
- + ' one host to run on'}
- raise model_logic.ValidationError(errors)
- job.save()
- return job
-
-
- def queue(self, hosts):
- 'Enqueue a job on the given hosts.'
- for host in hosts:
- host.enqueue_job(self)
- self.recompute_blocks()
-
-
- def recompute_blocks(self):
- """\
- Clear out the blocks (ineligible_host_queues) for this job and
- recompute the set. The set of blocks is the union of:
- -all hosts already assigned to this job
- -all hosts not ACL accessible to this job's owner
- """
- job_entries = self.hostqueueentry_set.all()
- query = Host.objects.filter_in_subquery(
- 'host_id', job_entries, subquery_alias='job_entries')
-
- old_ids = [block.id for block in
- self.ineligiblehostqueue_set.all()]
- block_values = [(self.id, host.id) for host in query]
- IneligibleHostQueue.objects.create_in_bulk(('job', 'host'),
- block_values)
- IneligibleHostQueue.objects.delete_in_bulk(old_ids)
-
-
- @classmethod
- def recompute_all_blocks(cls):
- 'Recompute blocks for all queued and active jobs.'
- for job in cls.objects.filter(
- hostqueueentry__complete=False).distinct():
- job.recompute_blocks()
-
-
- def requeue(self, new_owner):
- 'Creates a new job identical to this one'
- hosts = [queue_entry.meta_host or queue_entry.host
- for queue_entry in self.hostqueueentry_set.all()]
- new_job = Job.create(
- owner=new_owner, name=self.name, priority=self.priority,
- control_file=self.control_file,
- control_type=self.control_type, hosts=hosts,
- synch_type=self.synch_type)
- new_job.queue(hosts)
- return new_job
-
-
- def abort(self):
- for queue_entry in self.hostqueueentry_set.all():
- if queue_entry.active:
- queue_entry.status = Job.Status.ABORT
- elif not queue_entry.complete:
- queue_entry.status = Job.Status.ABORTED
- queue_entry.active = False
- queue_entry.complete = True
- queue_entry.save()
-
-
- def user(self):
- try:
- return User.objects.get(login=self.owner)
- except self.DoesNotExist:
- return None
-
-
- class Meta:
- db_table = 'jobs'
-
- if settings.FULL_ADMIN:
- class Admin:
- list_display = ('id', 'owner', 'name', 'control_type')
-
- def __str__(self):
- return '%s (%s-%s)' % (self.name, self.id, self.owner)
+ """\
+ owner: username of job owner
+ name: job name (does not have to be unique)
+ priority: Low, Medium, High, Urgent (or 0-3)
+ control_file: contents of control file
+ control_type: Client or Server
+ created_on: date of job creation
+ submitted_on: date of job submission
+ synch_type: Asynchronous or Synchronous (i.e. job must run on all hosts
+ simultaneously; used for server-side control files)
+ synch_count: ???
+ synchronizing: for scheduler use
+ """
+ Priority = enum.Enum('Low', 'Medium', 'High', 'Urgent')
+ ControlType = enum.Enum('Server', 'Client', start_value=1)
+ Status = enum.Enum('Created', 'Queued', 'Pending', 'Running',
+ 'Completed', 'Abort', 'Aborting', 'Aborted',
+ 'Failed', string_values=True)
+
+ owner = dbmodels.CharField(maxlength=255)
+ name = dbmodels.CharField(maxlength=255)
+ priority = dbmodels.SmallIntegerField(choices=Priority.choices(),
+ blank=True, # to allow 0
+ default=Priority.MEDIUM)
+ control_file = dbmodels.TextField()
+ control_type = dbmodels.SmallIntegerField(choices=ControlType.choices(),
+ blank=True) # to allow 0
+ created_on = dbmodels.DateTimeField(auto_now_add=True)
+ synch_type = dbmodels.SmallIntegerField(
+ blank=True, null=True, choices=Test.SynchType.choices())
+ synch_count = dbmodels.IntegerField(blank=True, null=True)
+ synchronizing = dbmodels.BooleanField(default=False)
+
+
+ # custom manager
+ objects = JobManager()
+
+
+ def is_server_job(self):
+ return self.control_type == self.ControlType.SERVER
+
+
+ @classmethod
+ def create(cls, owner, name, priority, control_file, control_type,
+ hosts, synch_type):
+ """\
+ Creates a job by taking some information (the listed args)
+ and filling in the rest of the necessary information.
+ """
+ job = cls.add_object(
+ owner=owner, name=name, priority=priority,
+ control_file=control_file, control_type=control_type,
+ synch_type=synch_type)
+
+ if job.synch_type == Test.SynchType.SYNCHRONOUS:
+ job.synch_count = len(hosts)
+ else:
+ if len(hosts) == 0:
+ errors = {'hosts':
+ 'asynchronous jobs require at least'
+ + ' one host to run on'}
+ raise model_logic.ValidationError(errors)
+ job.save()
+ return job
+
+
+ def queue(self, hosts):
+ 'Enqueue a job on the given hosts.'
+ for host in hosts:
+ host.enqueue_job(self)
+ self.recompute_blocks()
+
+
+ def recompute_blocks(self):
+ """\
+ Clear out the blocks (ineligible_host_queues) for this job and
+ recompute the set. The set of blocks is the union of:
+ -all hosts already assigned to this job
+ -all hosts not ACL accessible to this job's owner
+ """
+ job_entries = self.hostqueueentry_set.all()
+ query = Host.objects.filter_in_subquery(
+ 'host_id', job_entries, subquery_alias='job_entries')
+
+ old_ids = [block.id for block in
+ self.ineligiblehostqueue_set.all()]
+ block_values = [(self.id, host.id) for host in query]
+ IneligibleHostQueue.objects.create_in_bulk(('job', 'host'),
+ block_values)
+ IneligibleHostQueue.objects.delete_in_bulk(old_ids)
+
+
+ @classmethod
+ def recompute_all_blocks(cls):
+ 'Recompute blocks for all queued and active jobs.'
+ for job in cls.objects.filter(
+ hostqueueentry__complete=False).distinct():
+ job.recompute_blocks()
+
+
+ def requeue(self, new_owner):
+ 'Creates a new job identical to this one'
+ hosts = [queue_entry.meta_host or queue_entry.host
+ for queue_entry in self.hostqueueentry_set.all()]
+ new_job = Job.create(
+ owner=new_owner, name=self.name, priority=self.priority,
+ control_file=self.control_file,
+ control_type=self.control_type, hosts=hosts,
+ synch_type=self.synch_type)
+ new_job.queue(hosts)
+ return new_job
+
+
+ def abort(self):
+ for queue_entry in self.hostqueueentry_set.all():
+ if queue_entry.active:
+ queue_entry.status = Job.Status.ABORT
+ elif not queue_entry.complete:
+ queue_entry.status = Job.Status.ABORTED
+ queue_entry.active = False
+ queue_entry.complete = True
+ queue_entry.save()
+
+
+ def user(self):
+ try:
+ return User.objects.get(login=self.owner)
+ except self.DoesNotExist:
+ return None
+
+
+ class Meta:
+ db_table = 'jobs'
+
+ if settings.FULL_ADMIN:
+ class Admin:
+ list_display = ('id', 'owner', 'name', 'control_type')
+
+ def __str__(self):
+ return '%s (%s-%s)' % (self.name, self.id, self.owner)
class IneligibleHostQueue(dbmodels.Model, model_logic.ModelExtensions):
- job = dbmodels.ForeignKey(Job)
- host = dbmodels.ForeignKey(Host)
+ job = dbmodels.ForeignKey(Job)
+ host = dbmodels.ForeignKey(Host)
- objects = model_logic.ExtendedManager()
+ objects = model_logic.ExtendedManager()
- class Meta:
- db_table = 'ineligible_host_queues'
+ class Meta:
+ db_table = 'ineligible_host_queues'
- if settings.FULL_ADMIN:
- class Admin:
- list_display = ('id', 'job', 'host')
+ if settings.FULL_ADMIN:
+ class Admin:
+ list_display = ('id', 'job', 'host')
class HostQueueEntry(dbmodels.Model, model_logic.ModelExtensions):
- job = dbmodels.ForeignKey(Job)
- host = dbmodels.ForeignKey(Host, blank=True, null=True)
- priority = dbmodels.SmallIntegerField()
- status = dbmodels.CharField(maxlength=255)
- meta_host = dbmodels.ForeignKey(Label, blank=True, null=True,
- db_column='meta_host')
- active = dbmodels.BooleanField(default=False)
- complete = dbmodels.BooleanField(default=False)
+ job = dbmodels.ForeignKey(Job)
+ host = dbmodels.ForeignKey(Host, blank=True, null=True)
+ priority = dbmodels.SmallIntegerField()
+ status = dbmodels.CharField(maxlength=255)
+ meta_host = dbmodels.ForeignKey(Label, blank=True, null=True,
+ db_column='meta_host')
+ active = dbmodels.BooleanField(default=False)
+ complete = dbmodels.BooleanField(default=False)
- objects = model_logic.ExtendedManager()
+ objects = model_logic.ExtendedManager()
- def is_meta_host_entry(self):
- 'True if this is a entry has a meta_host instead of a host.'
- return self.host is None and self.meta_host is not None
+ def is_meta_host_entry(self):
+ 'True if this is a entry has a meta_host instead of a host.'
+ return self.host is None and self.meta_host is not None
- class Meta:
- db_table = 'host_queue_entries'
+ class Meta:
+ db_table = 'host_queue_entries'
- if settings.FULL_ADMIN:
- class Admin:
- list_display = ('id', 'job', 'host', 'status',
- 'meta_host')
+ if settings.FULL_ADMIN:
+ class Admin:
+ list_display = ('id', 'job', 'host', 'status',
+ 'meta_host')
diff --git a/frontend/afe/rpc_client_lib.py b/frontend/afe/rpc_client_lib.py
index 0d362b26..1707e044 100644
--- a/frontend/afe/rpc_client_lib.py
+++ b/frontend/afe/rpc_client_lib.py
@@ -8,4 +8,4 @@ __author__ = 'showard@google.com (Steve Howard)'
from json_rpc import proxy
def get_proxy(*args, **kwargs):
- return proxy.ServiceProxy(*args, **kwargs)
+ return proxy.ServiceProxy(*args, **kwargs)
diff --git a/frontend/afe/rpc_handler.py b/frontend/afe/rpc_handler.py
index f9ba2e2e..70ef6cdf 100644
--- a/frontend/afe/rpc_handler.py
+++ b/frontend/afe/rpc_handler.py
@@ -13,60 +13,60 @@ from frontend.afe import rpc_utils
class RpcMethodHolder(object):
- 'Dummy class to hold RPC interface methods as attributes.'
+ 'Dummy class to hold RPC interface methods as attributes.'
class RpcHandler(object):
- def __init__(self, rpc_interface_modules, document_module=None):
- self._rpc_methods = RpcMethodHolder()
- self._dispatcher = serviceHandler.ServiceHandler(
- self._rpc_methods)
-
- # store all methods from interface modules
- for module in rpc_interface_modules:
- self._grab_methods_from(module)
-
- # get documentation for rpc_interface we can send back to the
- # user
- if document_module is None:
- document_module = rpc_interface_modules[0]
- self.html_doc = pydoc.html.document(document_module)
-
-
- def handle_rpc_request(self, request):
- response = django.http.HttpResponse()
- if len(request.POST):
- response.write(self._dispatcher.handleRequest(
- request.raw_post_data))
- else:
- response.write(self.html_doc)
-
- response['Content-length'] = str(len(response.content))
- return response
-
-
- @staticmethod
- def _allow_keyword_args(f):
- """\
- Decorator to allow a function to take keyword args even though
- the RPC layer doesn't support that. The decorated function
- assumes its last argument is a dictionary of keyword args and
- passes them to the original function as keyword args.
- """
- def new_fn(*args):
- assert args
- keyword_args = args[-1]
- args = args[:-1]
- return f(*args, **keyword_args)
- new_fn.func_name = f.func_name
- return new_fn
-
-
- def _grab_methods_from(self, module):
- for name in dir(module):
- attribute = getattr(module, name)
- if not callable(attribute):
- continue
- decorated_function = (
- RpcHandler._allow_keyword_args(attribute))
- setattr(self._rpc_methods, name, decorated_function)
+ def __init__(self, rpc_interface_modules, document_module=None):
+ self._rpc_methods = RpcMethodHolder()
+ self._dispatcher = serviceHandler.ServiceHandler(
+ self._rpc_methods)
+
+ # store all methods from interface modules
+ for module in rpc_interface_modules:
+ self._grab_methods_from(module)
+
+ # get documentation for rpc_interface we can send back to the
+ # user
+ if document_module is None:
+ document_module = rpc_interface_modules[0]
+ self.html_doc = pydoc.html.document(document_module)
+
+
+ def handle_rpc_request(self, request):
+ response = django.http.HttpResponse()
+ if len(request.POST):
+ response.write(self._dispatcher.handleRequest(
+ request.raw_post_data))
+ else:
+ response.write(self.html_doc)
+
+ response['Content-length'] = str(len(response.content))
+ return response
+
+
+ @staticmethod
+ def _allow_keyword_args(f):
+ """\
+ Decorator to allow a function to take keyword args even though
+ the RPC layer doesn't support that. The decorated function
+ assumes its last argument is a dictionary of keyword args and
+ passes them to the original function as keyword args.
+ """
+ def new_fn(*args):
+ assert args
+ keyword_args = args[-1]
+ args = args[:-1]
+ return f(*args, **keyword_args)
+ new_fn.func_name = f.func_name
+ return new_fn
+
+
+ def _grab_methods_from(self, module):
+ for name in dir(module):
+ attribute = getattr(module, name)
+ if not callable(attribute):
+ continue
+ decorated_function = (
+ RpcHandler._allow_keyword_args(attribute))
+ setattr(self._rpc_methods, name, decorated_function)
diff --git a/frontend/afe/rpc_interface.py b/frontend/afe/rpc_interface.py
index d82b53e9..80159126 100644
--- a/frontend/afe/rpc_interface.py
+++ b/frontend/afe/rpc_interface.py
@@ -34,352 +34,352 @@ import models, control_file, rpc_utils
# labels
def add_label(name, kernel_config=None, platform=None):
- return models.Label.add_object(name=name, kernel_config=kernel_config,
- platform=platform).id
+ return models.Label.add_object(name=name, kernel_config=kernel_config,
+ platform=platform).id
def modify_label(id, **data):
- models.Label.smart_get(id).update_object(data)
+ models.Label.smart_get(id).update_object(data)
def delete_label(id):
- models.Label.smart_get(id).delete()
+ models.Label.smart_get(id).delete()
def label_add_hosts(id, hosts):
- host_objs = [models.Host.smart_get(host) for host in hosts]
- models.Label.smart_get(id).host_set.add(*host_objs)
+ host_objs = [models.Host.smart_get(host) for host in hosts]
+ models.Label.smart_get(id).host_set.add(*host_objs)
def label_remove_hosts(id, hosts):
- host_objs = [models.Host.smart_get(host) for host in hosts]
- models.Label.smart_get(id).host_set.remove(*host_objs)
+ host_objs = [models.Host.smart_get(host) for host in hosts]
+ models.Label.smart_get(id).host_set.remove(*host_objs)
def get_labels(**filter_data):
- return rpc_utils.prepare_for_serialization(
- models.Label.list_objects(filter_data))
+ return rpc_utils.prepare_for_serialization(
+ models.Label.list_objects(filter_data))
# hosts
def add_host(hostname, status=None, locked=None):
- return models.Host.add_object(hostname=hostname, status=status,
- locked=locked).id
+ return models.Host.add_object(hostname=hostname, status=status,
+ locked=locked).id
def modify_host(id, **data):
- models.Host.smart_get(id).update_object(data)
+ models.Host.smart_get(id).update_object(data)
def host_add_labels(id, labels):
- labels = [models.Label.smart_get(label) for label in labels]
- models.Host.smart_get(id).labels.add(*labels)
+ labels = [models.Label.smart_get(label) for label in labels]
+ models.Host.smart_get(id).labels.add(*labels)
def host_remove_labels(id, labels):
- labels = [models.Label.smart_get(label) for label in labels]
- models.Host.smart_get(id).labels.remove(*labels)
+ labels = [models.Label.smart_get(label) for label in labels]
+ models.Host.smart_get(id).labels.remove(*labels)
def delete_host(id):
- models.Host.smart_get(id).delete()
+ models.Host.smart_get(id).delete()
def get_hosts(multiple_labels=[], **filter_data):
- """\
- multiple_labels: match hosts in all of the labels given. Should be a
- list of label names.
- """
- filter_data['extra_args'] = (
- rpc_utils.extra_host_filters(multiple_labels))
- hosts = models.Host.list_objects(filter_data)
- for host in hosts:
- host_obj = models.Host.objects.get(id=host['id'])
- host['labels'] = [label.name
- for label in host_obj.labels.all()]
- platform = host_obj.platform()
- host['platform'] = platform and platform.name or None
- return rpc_utils.prepare_for_serialization(hosts)
+ """\
+ multiple_labels: match hosts in all of the labels given. Should be a
+ list of label names.
+ """
+ filter_data['extra_args'] = (
+ rpc_utils.extra_host_filters(multiple_labels))
+ hosts = models.Host.list_objects(filter_data)
+ for host in hosts:
+ host_obj = models.Host.objects.get(id=host['id'])
+ host['labels'] = [label.name
+ for label in host_obj.labels.all()]
+ platform = host_obj.platform()
+ host['platform'] = platform and platform.name or None
+ return rpc_utils.prepare_for_serialization(hosts)
def get_num_hosts(multiple_labels=[], **filter_data):
- filter_data['extra_args'] = (
- rpc_utils.extra_host_filters(multiple_labels))
- return models.Host.query_count(filter_data)
+ filter_data['extra_args'] = (
+ rpc_utils.extra_host_filters(multiple_labels))
+ return models.Host.query_count(filter_data)
# tests
def add_test(name, test_type, path, test_class=None, description=None):
- return models.Test.add_object(name=name, test_type=test_type, path=path,
- test_class=test_class,
- description=description).id
+ return models.Test.add_object(name=name, test_type=test_type, path=path,
+ test_class=test_class,
+ description=description).id
def modify_test(id, **data):
- models.Test.smart_get(id).update_object(data)
+ models.Test.smart_get(id).update_object(data)
def delete_test(id):
- models.Test.smart_get(id).delete()
+ models.Test.smart_get(id).delete()
def get_tests(**filter_data):
- return rpc_utils.prepare_for_serialization(
- models.Test.list_objects(filter_data))
+ return rpc_utils.prepare_for_serialization(
+ models.Test.list_objects(filter_data))
# users
def add_user(login, access_level=None):
- return models.User.add_object(login=login, access_level=access_level).id
+ return models.User.add_object(login=login, access_level=access_level).id
def modify_user(id, **data):
- models.User.smart_get(id).update_object(data)
+ models.User.smart_get(id).update_object(data)
def delete_user(id):
- models.User.smart_get(id).delete()
+ models.User.smart_get(id).delete()
def get_users(**filter_data):
- return rpc_utils.prepare_for_serialization(
- models.User.list_objects(filter_data))
+ return rpc_utils.prepare_for_serialization(
+ models.User.list_objects(filter_data))
# acl groups
def add_acl_group(name, description=None):
- return models.AclGroup.add_object(name=name, description=description).id
+ return models.AclGroup.add_object(name=name, description=description).id
def modify_acl_group(id, **data):
- models.AclGroup.smart_get(id).update_object(data)
+ models.AclGroup.smart_get(id).update_object(data)
def acl_group_add_users(id, users):
- users = [models.User.smart_get(user) for user in users]
- group = models.AclGroup.smart_get(id)
- group.users.add(*users)
+ users = [models.User.smart_get(user) for user in users]
+ group = models.AclGroup.smart_get(id)
+ group.users.add(*users)
def acl_group_remove_users(id, users):
- users = [models.User.smart_get(user) for user in users]
- group = models.AclGroup.smart_get(id)
- group.users.remove(*users)
+ users = [models.User.smart_get(user) for user in users]
+ group = models.AclGroup.smart_get(id)
+ group.users.remove(*users)
def acl_group_add_hosts(id, hosts):
- hosts = [models.Host.smart_get(host) for host in hosts]
- group = models.AclGroup.smart_get(id)
- group.hosts.add(*hosts)
+ hosts = [models.Host.smart_get(host) for host in hosts]
+ group = models.AclGroup.smart_get(id)
+ group.hosts.add(*hosts)
def acl_group_remove_hosts(id, hosts):
- hosts = [models.Host.smart_get(host) for host in hosts]
- group = models.AclGroup.smart_get(id)
- group.hosts.remove(*hosts)
+ hosts = [models.Host.smart_get(host) for host in hosts]
+ group = models.AclGroup.smart_get(id)
+ group.hosts.remove(*hosts)
def delete_acl_group(id):
- models.AclGroup.smart_get(id).delete()
+ models.AclGroup.smart_get(id).delete()
def get_acl_groups(**filter_data):
- acl_groups = models.AclGroup.list_objects(filter_data)
- for acl_group in acl_groups:
- acl_group_obj = models.AclGroup.objects.get(id=acl_group['id'])
- acl_group['users'] = [user.login
- for user in acl_group_obj.users.all()]
- acl_group['hosts'] = [host.hostname
- for host in acl_group_obj.hosts.all()]
- return rpc_utils.prepare_for_serialization(acl_groups)
+ acl_groups = models.AclGroup.list_objects(filter_data)
+ for acl_group in acl_groups:
+ acl_group_obj = models.AclGroup.objects.get(id=acl_group['id'])
+ acl_group['users'] = [user.login
+ for user in acl_group_obj.users.all()]
+ acl_group['hosts'] = [host.hostname
+ for host in acl_group_obj.hosts.all()]
+ return rpc_utils.prepare_for_serialization(acl_groups)
# jobs
def generate_control_file(tests, kernel=None, label=None):
- """\
- Generates a client-side control file to load a kernel and run a set of
- tests. Returns a tuple (control_file, is_server, is_synchronous):
- control_file - the control file text
- is_server - is the control file a server-side control file?
- is_synchronous - should the control file be run synchronously?
-
- tests: list of tests to run
- kernel: kernel to install in generated control file
- label: name of label to grab kernel config from
- """
- if not tests:
- return '', False, False
-
- is_server, is_synchronous, test_objects, label = (
- rpc_utils.prepare_generate_control_file(tests, kernel, label))
- cf_text = control_file.generate_control(test_objects, kernel, label,
- is_server)
- return cf_text, is_server, is_synchronous
+ """\
+ Generates a client-side control file to load a kernel and run a set of
+ tests. Returns a tuple (control_file, is_server, is_synchronous):
+ control_file - the control file text
+ is_server - is the control file a server-side control file?
+ is_synchronous - should the control file be run synchronously?
+
+ tests: list of tests to run
+ kernel: kernel to install in generated control file
+ label: name of label to grab kernel config from
+ """
+ if not tests:
+ return '', False, False
+
+ is_server, is_synchronous, test_objects, label = (
+ rpc_utils.prepare_generate_control_file(tests, kernel, label))
+ cf_text = control_file.generate_control(test_objects, kernel, label,
+ is_server)
+ return cf_text, is_server, is_synchronous
def create_job(name, priority, control_file, control_type, is_synchronous=None,
- hosts=None, meta_hosts=None):
- """\
- Create and enqueue a job.
-
- priority: Low, Medium, High, Urgent
- control_file: contents of control file
- control_type: type of control file, Client or Server
- is_synchronous: boolean indicating if a job is synchronous
- hosts: list of hosts to run job on
- meta_hosts: list where each entry is a label name, and for each entry
- one host will be chosen from that label to run the job
- on.
- """
- owner = rpc_utils.get_user().login
- # input validation
- if not hosts and not meta_hosts:
- raise models.ValidationError({
- 'arguments' : "You must pass at least one of 'hosts' or "
- "'meta_hosts'"
- })
-
- # convert hostnames & meta hosts to host/label objects
- host_objects = []
- for host in hosts or []:
- this_host = models.Host.smart_get(host)
- host_objects.append(this_host)
- for label in meta_hosts or []:
- this_label = models.Label.smart_get(label)
- host_objects.append(this_label)
-
- # default is_synchronous to some appropriate value
- ControlType = models.Job.ControlType
- control_type = ControlType.get_value(control_type)
- if is_synchronous is None:
- is_synchronous = (control_type == ControlType.SERVER)
- # convert the synch flag to an actual type
- if is_synchronous:
- synch_type = models.Test.SynchType.SYNCHRONOUS
- else:
- synch_type = models.Test.SynchType.ASYNCHRONOUS
-
- job = models.Job.create(owner=owner, name=name, priority=priority,
- control_file=control_file,
- control_type=control_type,
- synch_type=synch_type,
- hosts=host_objects)
- job.queue(host_objects)
- return job.id
+ hosts=None, meta_hosts=None):
+ """\
+ Create and enqueue a job.
+
+ priority: Low, Medium, High, Urgent
+ control_file: contents of control file
+ control_type: type of control file, Client or Server
+ is_synchronous: boolean indicating if a job is synchronous
+ hosts: list of hosts to run job on
+ meta_hosts: list where each entry is a label name, and for each entry
+ one host will be chosen from that label to run the job
+ on.
+ """
+ owner = rpc_utils.get_user().login
+ # input validation
+ if not hosts and not meta_hosts:
+ raise models.ValidationError({
+ 'arguments' : "You must pass at least one of 'hosts' or "
+ "'meta_hosts'"
+ })
+
+ # convert hostnames & meta hosts to host/label objects
+ host_objects = []
+ for host in hosts or []:
+ this_host = models.Host.smart_get(host)
+ host_objects.append(this_host)
+ for label in meta_hosts or []:
+ this_label = models.Label.smart_get(label)
+ host_objects.append(this_label)
+
+ # default is_synchronous to some appropriate value
+ ControlType = models.Job.ControlType
+ control_type = ControlType.get_value(control_type)
+ if is_synchronous is None:
+ is_synchronous = (control_type == ControlType.SERVER)
+ # convert the synch flag to an actual type
+ if is_synchronous:
+ synch_type = models.Test.SynchType.SYNCHRONOUS
+ else:
+ synch_type = models.Test.SynchType.ASYNCHRONOUS
+
+ job = models.Job.create(owner=owner, name=name, priority=priority,
+ control_file=control_file,
+ control_type=control_type,
+ synch_type=synch_type,
+ hosts=host_objects)
+ job.queue(host_objects)
+ return job.id
def requeue_job(id):
- """\
- Create and enqueue a copy of the given job.
- """
- job = models.Job.objects.get(id=id)
- new_job = job.requeue(rpc_utils.get_user().login)
- return new_job.id
+ """\
+ Create and enqueue a copy of the given job.
+ """
+ job = models.Job.objects.get(id=id)
+ new_job = job.requeue(rpc_utils.get_user().login)
+ return new_job.id
def abort_job(id):
- """\
- Abort the job with the given id number.
- """
- job = models.Job.objects.get(id=id)
- job.abort()
+ """\
+ Abort the job with the given id number.
+ """
+ job = models.Job.objects.get(id=id)
+ job.abort()
def get_jobs(not_yet_run=False, running=False, finished=False, **filter_data):
- """\
- Extra filter args for get_jobs:
- -not_yet_run: Include only jobs that have not yet started running.
- -running: Include only jobs that have start running but for which not
- all hosts have completed.
- -finished: Include only jobs for which all hosts have completed (or
- aborted).
- At most one of these three fields should be specified.
- """
- filter_data['extra_args'] = rpc_utils.extra_job_filters(not_yet_run,
- running,
- finished)
- return rpc_utils.prepare_for_serialization(
- models.Job.list_objects(filter_data))
+ """\
+ Extra filter args for get_jobs:
+ -not_yet_run: Include only jobs that have not yet started running.
+ -running: Include only jobs that have start running but for which not
+ all hosts have completed.
+ -finished: Include only jobs for which all hosts have completed (or
+ aborted).
+ At most one of these three fields should be specified.
+ """
+ filter_data['extra_args'] = rpc_utils.extra_job_filters(not_yet_run,
+ running,
+ finished)
+ return rpc_utils.prepare_for_serialization(
+ models.Job.list_objects(filter_data))
def get_num_jobs(not_yet_run=False, running=False, finished=False,
- **filter_data):
- """\
- See get_jobs() for documentation of extra filter parameters.
- """
- filter_data['extra_args'] = rpc_utils.extra_job_filters(not_yet_run,
- running,
- finished)
- return models.Job.query_count(filter_data)
+ **filter_data):
+ """\
+ See get_jobs() for documentation of extra filter parameters.
+ """
+ filter_data['extra_args'] = rpc_utils.extra_job_filters(not_yet_run,
+ running,
+ finished)
+ return models.Job.query_count(filter_data)
def get_jobs_summary(**filter_data):
- """\
- Like get_jobs(), but adds a 'stauts_counts' field, which is a dictionary
- mapping status strings to the number of hosts currently with that
- status, i.e. {'Queued' : 4, 'Running' : 2}.
- """
- jobs = get_jobs(**filter_data)
- ids = [job['id'] for job in jobs]
- all_status_counts = models.Job.objects.get_status_counts(ids)
- for job in jobs:
- job['status_counts'] = all_status_counts[job['id']]
- return rpc_utils.prepare_for_serialization(jobs)
+ """\
+ Like get_jobs(), but adds a 'stauts_counts' field, which is a dictionary
+ mapping status strings to the number of hosts currently with that
+ status, i.e. {'Queued' : 4, 'Running' : 2}.
+ """
+ jobs = get_jobs(**filter_data)
+ ids = [job['id'] for job in jobs]
+ all_status_counts = models.Job.objects.get_status_counts(ids)
+ for job in jobs:
+ job['status_counts'] = all_status_counts[job['id']]
+ return rpc_utils.prepare_for_serialization(jobs)
# host queue entries
def get_host_queue_entries(**filter_data):
- """\
- TODO
- """
- query = models.HostQueueEntry.query_objects(filter_data)
- all_dicts = []
- for queue_entry in query.select_related():
- entry_dict = queue_entry.get_object_dict()
- if entry_dict['host'] is not None:
- entry_dict['host'] = queue_entry.host.get_object_dict()
- entry_dict['job'] = queue_entry.job.get_object_dict()
- all_dicts.append(entry_dict)
- return rpc_utils.prepare_for_serialization(all_dicts)
+ """\
+ TODO
+ """
+ query = models.HostQueueEntry.query_objects(filter_data)
+ all_dicts = []
+ for queue_entry in query.select_related():
+ entry_dict = queue_entry.get_object_dict()
+ if entry_dict['host'] is not None:
+ entry_dict['host'] = queue_entry.host.get_object_dict()
+ entry_dict['job'] = queue_entry.job.get_object_dict()
+ all_dicts.append(entry_dict)
+ return rpc_utils.prepare_for_serialization(all_dicts)
def get_num_host_queue_entries(**filter_data):
- """\
- Get the number of host queue entries associated with this job.
- """
- return models.HostQueueEntry.query_count(filter_data)
+ """\
+ Get the number of host queue entries associated with this job.
+ """
+ return models.HostQueueEntry.query_count(filter_data)
# other
def get_static_data():
- """\
- Returns a dictionary containing a bunch of data that shouldn't change
- often and is otherwise inaccessible. This includes:
- priorities: list of job priority choices
- default_priority: default priority value for new jobs
- users: sorted list of all users
- labels: sorted list of all labels
- tests: sorted list of all tests
- user_login: logged-in username
- host_statuses: sorted list of possible Host statuses
- job_statuses: sorted list of possible HostQueueEntry statuses
- """
- result = {}
- result['priorities'] = models.Job.Priority.choices()
- default_priority = models.Job.get_field_dict()['priority'].default
- default_string = models.Job.Priority.get_string(default_priority)
- result['default_priority'] = default_string
- result['users'] = get_users(sort_by=['login'])
- result['labels'] = get_labels(sort_by=['-platform', 'name'])
- result['tests'] = get_tests(sort_by=['name'])
- result['user_login'] = rpc_utils.get_user().login
- result['host_statuses'] = rpc_utils.sorted(models.Host.Status.names)
- result['job_statuses'] = rpc_utils.sorted(models.Job.Status.names)
- return result
+ """\
+ Returns a dictionary containing a bunch of data that shouldn't change
+ often and is otherwise inaccessible. This includes:
+ priorities: list of job priority choices
+ default_priority: default priority value for new jobs
+ users: sorted list of all users
+ labels: sorted list of all labels
+ tests: sorted list of all tests
+ user_login: logged-in username
+ host_statuses: sorted list of possible Host statuses
+ job_statuses: sorted list of possible HostQueueEntry statuses
+ """
+ result = {}
+ result['priorities'] = models.Job.Priority.choices()
+ default_priority = models.Job.get_field_dict()['priority'].default
+ default_string = models.Job.Priority.get_string(default_priority)
+ result['default_priority'] = default_string
+ result['users'] = get_users(sort_by=['login'])
+ result['labels'] = get_labels(sort_by=['-platform', 'name'])
+ result['tests'] = get_tests(sort_by=['name'])
+ result['user_login'] = rpc_utils.get_user().login
+ result['host_statuses'] = rpc_utils.sorted(models.Host.Status.names)
+ result['job_statuses'] = rpc_utils.sorted(models.Job.Status.names)
+ return result
diff --git a/frontend/afe/rpc_utils.py b/frontend/afe/rpc_utils.py
index 14fa9877..58b524e2 100644
--- a/frontend/afe/rpc_utils.py
+++ b/frontend/afe/rpc_utils.py
@@ -9,146 +9,146 @@ import datetime, xmlrpclib, threading
from frontend.afe import models
def prepare_for_serialization(objects):
- """
- Prepare Python objects to be returned via RPC.
- """
- if (isinstance(objects, list) and len(objects) and
- isinstance(objects[0], dict) and 'id' in objects[0]):
- objects = gather_unique_dicts(objects)
- return _prepare_data(objects)
+ """
+ Prepare Python objects to be returned via RPC.
+ """
+ if (isinstance(objects, list) and len(objects) and
+ isinstance(objects[0], dict) and 'id' in objects[0]):
+ objects = gather_unique_dicts(objects)
+ return _prepare_data(objects)
def _prepare_data(data):
- """
- Recursively process data structures, performing necessary type
- conversions to values in data to allow for RPC serialization:
- -convert datetimes to strings
- -convert tuples to lists
- """
- if isinstance(data, dict):
- new_data = {}
- for key, value in data.iteritems():
- new_data[key] = _prepare_data(value)
- return new_data
- elif isinstance(data, list) or isinstance(data, tuple):
- return [_prepare_data(item) for item in data]
- elif isinstance(data, datetime.datetime):
- return str(data)
- else:
- return data
+ """
+ Recursively process data structures, performing necessary type
+ conversions to values in data to allow for RPC serialization:
+ -convert datetimes to strings
+ -convert tuples to lists
+ """
+ if isinstance(data, dict):
+ new_data = {}
+ for key, value in data.iteritems():
+ new_data[key] = _prepare_data(value)
+ return new_data
+ elif isinstance(data, list) or isinstance(data, tuple):
+ return [_prepare_data(item) for item in data]
+ elif isinstance(data, datetime.datetime):
+ return str(data)
+ else:
+ return data
def gather_unique_dicts(dict_iterable):
- """\
- Pick out unique objects (by ID) from an iterable of object dicts.
- """
- id_set = set()
- result = []
- for obj in dict_iterable:
- if obj['id'] not in id_set:
- id_set.add(obj['id'])
- result.append(obj)
- return result
+ """\
+ Pick out unique objects (by ID) from an iterable of object dicts.
+ """
+ id_set = set()
+ result = []
+ for obj in dict_iterable:
+ if obj['id'] not in id_set:
+ id_set.add(obj['id'])
+ result.append(obj)
+ return result
def extra_job_filters(not_yet_run=False, running=False, finished=False):
- """\
- Generate a SQL WHERE clause for job status filtering, and return it in
- a dict of keyword args to pass to query.extra(). No more than one of
- the parameters should be passed as True.
- """
- assert not ((not_yet_run and running) or
- (not_yet_run and finished) or
- (running and finished)), ('Cannot specify more than one '
- 'filter to this function')
- if not_yet_run:
- where = ['id NOT IN (SELECT job_id FROM host_queue_entries '
- 'WHERE active OR complete)']
- elif running:
- where = ['(id IN (SELECT job_id FROM host_queue_entries '
- 'WHERE active OR complete)) AND '
- '(id IN (SELECT job_id FROM host_queue_entries '
- 'WHERE not complete OR active))']
- elif finished:
- where = ['id NOT IN (SELECT job_id FROM host_queue_entries '
- 'WHERE not complete OR active)']
- else:
- return None
- return {'where': where}
+ """\
+ Generate a SQL WHERE clause for job status filtering, and return it in
+ a dict of keyword args to pass to query.extra(). No more than one of
+ the parameters should be passed as True.
+ """
+ assert not ((not_yet_run and running) or
+ (not_yet_run and finished) or
+ (running and finished)), ('Cannot specify more than one '
+ 'filter to this function')
+ if not_yet_run:
+ where = ['id NOT IN (SELECT job_id FROM host_queue_entries '
+ 'WHERE active OR complete)']
+ elif running:
+ where = ['(id IN (SELECT job_id FROM host_queue_entries '
+ 'WHERE active OR complete)) AND '
+ '(id IN (SELECT job_id FROM host_queue_entries '
+ 'WHERE not complete OR active))']
+ elif finished:
+ where = ['id NOT IN (SELECT job_id FROM host_queue_entries '
+ 'WHERE not complete OR active)']
+ else:
+ return None
+ return {'where': where}
def extra_host_filters(multiple_labels=[]):
- """\
- Generate SQL WHERE clauses for matching hosts in an intersection of
- labels.
- """
- extra_args = {}
- where_str = ('hosts.id in (select host_id from hosts_labels '
- 'where label_id=%s)')
- extra_args['where'] = [where_str] * len(multiple_labels)
- extra_args['params'] = [models.Label.smart_get(label).id
- for label in multiple_labels]
- return extra_args
+ """\
+ Generate SQL WHERE clauses for matching hosts in an intersection of
+ labels.
+ """
+ extra_args = {}
+ where_str = ('hosts.id in (select host_id from hosts_labels '
+ 'where label_id=%s)')
+ extra_args['where'] = [where_str] * len(multiple_labels)
+ extra_args['params'] = [models.Label.smart_get(label).id
+ for label in multiple_labels]
+ return extra_args
local_vars = threading.local()
def set_user(user):
- """\
- Sets the current request's logged-in user. user should be a
- afe.models.User object.
- """
- local_vars.user = user
+ """\
+ Sets the current request's logged-in user. user should be a
+ afe.models.User object.
+ """
+ local_vars.user = user
def get_user():
- 'Get the currently logged-in user as a afe.models.User object.'
- return local_vars.user
+ 'Get the currently logged-in user as a afe.models.User object.'
+ return local_vars.user
class InconsistencyException(Exception):
- 'Raised when a list of objects does not have a consistent value'
+ 'Raised when a list of objects does not have a consistent value'
def get_consistent_value(objects, field):
- value = getattr(objects[0], field)
- for obj in objects:
- this_value = getattr(obj, field)
- if this_value != value:
- raise InconsistencyException(objects[0], obj)
- return value
+ value = getattr(objects[0], field)
+ for obj in objects:
+ this_value = getattr(obj, field)
+ if this_value != value:
+ raise InconsistencyException(objects[0], obj)
+ return value
def prepare_generate_control_file(tests, kernel, label):
- test_objects = [models.Test.smart_get(test) for test in tests]
- # ensure tests are all the same type
- try:
- test_type = get_consistent_value(test_objects, 'test_type')
- except InconsistencyException, exc:
- test1, test2 = exc.args
- raise models.ValidationError(
- {'tests' : 'You cannot run both server- and client-side '
- 'tests together (tests %s and %s differ' % (
- test1.name, test2.name)})
-
- try:
- synch_type = get_consistent_value(test_objects, 'synch_type')
- except InconsistencyException, exc:
- test1, test2 = exc.args
- raise models.ValidationError(
- {'tests' : 'You cannot run both synchronous and '
- 'asynchronous tests together (tests %s and %s differ)' % (
- test1.name, test2.name)})
-
- is_server = (test_type == models.Test.Types.SERVER)
- is_synchronous = (synch_type == models.Test.SynchType.SYNCHRONOUS)
- if label:
- label = models.Label.smart_get(label)
-
- return is_server, is_synchronous, test_objects, label
+ test_objects = [models.Test.smart_get(test) for test in tests]
+ # ensure tests are all the same type
+ try:
+ test_type = get_consistent_value(test_objects, 'test_type')
+ except InconsistencyException, exc:
+ test1, test2 = exc.args
+ raise models.ValidationError(
+ {'tests' : 'You cannot run both server- and client-side '
+ 'tests together (tests %s and %s differ' % (
+ test1.name, test2.name)})
+
+ try:
+ synch_type = get_consistent_value(test_objects, 'synch_type')
+ except InconsistencyException, exc:
+ test1, test2 = exc.args
+ raise models.ValidationError(
+ {'tests' : 'You cannot run both synchronous and '
+ 'asynchronous tests together (tests %s and %s differ)' % (
+ test1.name, test2.name)})
+
+ is_server = (test_type == models.Test.Types.SERVER)
+ is_synchronous = (synch_type == models.Test.SynchType.SYNCHRONOUS)
+ if label:
+ label = models.Label.smart_get(label)
+
+ return is_server, is_synchronous, test_objects, label
def sorted(in_list):
- new_list = list(in_list)
- new_list.sort()
- return new_list
+ new_list = list(in_list)
+ new_list.sort()
+ return new_list
diff --git a/frontend/afe/simplejson/__init__.py b/frontend/afe/simplejson/__init__.py
index 181225b9..b903c7de 100644
--- a/frontend/afe/simplejson/__init__.py
+++ b/frontend/afe/simplejson/__init__.py
@@ -11,7 +11,7 @@ marshal and pickle modules.
BSD licensed.
Encoding basic Python object hierarchies::
-
+
>>> import simplejson
>>> simplejson.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
@@ -40,12 +40,12 @@ Pretty printing::
>>> import simplejson
>>> print simplejson.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4)
{
- "4": 5,
+ "4": 5,
"6": 7
}
Decoding JSON::
-
+
>>> import simplejson
>>> simplejson.loads('["foo", {"bar":["baz", null, 1.0, 2]}]')
[u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
@@ -63,27 +63,27 @@ Specializing JSON object decoding::
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
- ...
+ ...
>>> simplejson.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(1+2j)
Extending JSONEncoder::
-
+
>>> import simplejson
>>> class ComplexEncoder(simplejson.JSONEncoder):
... def default(self, obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... return simplejson.JSONEncoder.default(self, obj)
- ...
+ ...
>>> dumps(2 + 1j, cls=ComplexEncoder)
'[2.0, 1.0]'
>>> ComplexEncoder().encode(2 + 1j)
'[2.0, 1.0]'
>>> list(ComplexEncoder().iterencode(2 + 1j))
['[', '2.0', ', ', '1.0', ']']
-
+
Note that the JSON produced by this module's default settings
is a subset of YAML, so it may be used as a serializer for that as well.
@@ -104,7 +104,7 @@ def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
``.write()``-supporting file-like object).
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
- (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
+ (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is ``False``, then the some chunks written to ``fp``
@@ -146,7 +146,7 @@ def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
- (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
+ (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is ``False``, then the return value will be a
@@ -199,7 +199,7 @@ def load(fp, encoding=None, cls=None, object_hook=None, **kw):
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
-
+
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
diff --git a/frontend/afe/simplejson/decoder.py b/frontend/afe/simplejson/decoder.py
index 63f70cbc..46a8fb05 100644
--- a/frontend/afe/simplejson/decoder.py
+++ b/frontend/afe/simplejson/decoder.py
@@ -156,7 +156,7 @@ def JSONObject(match, context, _w=WHITESPACE.match):
pairs = object_hook(pairs)
return pairs, end
pattern(r'{')(JSONObject)
-
+
def JSONArray(match, context, _w=WHITESPACE.match):
values = []
s = match.string
@@ -182,7 +182,7 @@ def JSONArray(match, context, _w=WHITESPACE.match):
end = _w(s, end).end()
return values, end
pattern(r'\[')(JSONArray)
-
+
ANYTHING = [
JSONObject,
JSONArray,
@@ -198,7 +198,7 @@ class JSONDecoder(object):
Simple JSON <http://json.org> decoder
Performs the following translations in decoding:
-
+
+---------------+-------------------+
| JSON | Python |
+===============+===================+
@@ -231,7 +231,7 @@ class JSONDecoder(object):
``encoding`` determines the encoding used to interpret any ``str``
objects decoded by this instance (utf-8 by default). It has no
effect when decoding ``unicode`` objects.
-
+
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as ``unicode``.
diff --git a/frontend/afe/simplejson/encoder.py b/frontend/afe/simplejson/encoder.py
index c83c6873..5271f6be 100644
--- a/frontend/afe/simplejson/encoder.py
+++ b/frontend/afe/simplejson/encoder.py
@@ -58,14 +58,14 @@ def encode_basestring_ascii(s):
except KeyError:
return '\\u%04x' % (ord(s),)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
-
+
class JSONEncoder(object):
"""
Extensible JSON <http://json.org> encoder for Python data structures.
Supports the following objects and types by default:
-
+
+-------------------+---------------+
| Python | JSON |
+===================+===============+
@@ -287,7 +287,7 @@ class JSONEncoder(object):
For example, to support arbitrary iterators, you could
implement default like this::
-
+
def default(self, o):
try:
iterable = iter(o)
@@ -316,9 +316,9 @@ class JSONEncoder(object):
"""
Encode the given object and yield each string
representation as available.
-
+
For example::
-
+
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
diff --git a/frontend/afe/simplejson/scanner.py b/frontend/afe/simplejson/scanner.py
index 64f4999f..b21ce6c2 100644
--- a/frontend/afe/simplejson/scanner.py
+++ b/frontend/afe/simplejson/scanner.py
@@ -54,7 +54,7 @@ class Scanner(object):
match = self.scanner.scanner(string, matchend).match
yield rval, matchend
lastend = matchend
-
+
def pattern(pattern, flags=FLAGS):
def decorator(fn):
fn.pattern = pattern
diff --git a/frontend/afe/test.py b/frontend/afe/test.py
index 686eb91a..be4fe2aa 100644
--- a/frontend/afe/test.py
+++ b/frontend/afe/test.py
@@ -16,46 +16,46 @@ from frontend import settings, afe
app_name = 'afe'
doctest_dir = 'doctests'
doctest_paths = [os.path.join(doctest_dir, filename) for filename
- in os.listdir(os.path.join(app_name, doctest_dir))
- if not filename.startswith('.')]
+ in os.listdir(os.path.join(app_name, doctest_dir))
+ if not filename.startswith('.')]
doctest_paths.sort()
def get_modules():
- modules = []
- module_names = [os.path.basename(filename)[:-3]
- for filename in glob.glob(app_name + '/*.py')
- if '__init__' not in filename
- and 'test.py' not in filename]
- for module_name in module_names:
- __import__('frontend.afe', globals(), locals(), [module_name])
- modules.append(getattr(afe, module_name))
- return modules
+ modules = []
+ module_names = [os.path.basename(filename)[:-3]
+ for filename in glob.glob(app_name + '/*.py')
+ if '__init__' not in filename
+ and 'test.py' not in filename]
+ for module_name in module_names:
+ __import__('frontend.afe', globals(), locals(), [module_name])
+ modules.append(getattr(afe, module_name))
+ return modules
print_after = 'Ran %d tests from %s'
def run_tests(module_list, verbosity=1):
- modules = get_modules()
- total_errors = 0
- old_db = settings.DATABASE_NAME
- django.test.utils.setup_test_environment()
- django.test.utils.create_test_db(verbosity)
- try:
- for module in modules:
- failures, test_count = doctest.testmod(module)
- print print_after % (test_count, module.__name__)
- total_errors += failures
- for path in doctest_paths:
- failures, test_count = doctest.testfile(path)
- print print_after % (test_count, path)
- total_errors += failures
- finally:
- django.test.utils.destroy_test_db(old_db)
- django.test.utils.teardown_test_environment()
- print
- if total_errors == 0:
- print 'OK'
- else:
- print 'FAIL: %d errors' % total_errors
- return total_errors
+ modules = get_modules()
+ total_errors = 0
+ old_db = settings.DATABASE_NAME
+ django.test.utils.setup_test_environment()
+ django.test.utils.create_test_db(verbosity)
+ try:
+ for module in modules:
+ failures, test_count = doctest.testmod(module)
+ print print_after % (test_count, module.__name__)
+ total_errors += failures
+ for path in doctest_paths:
+ failures, test_count = doctest.testfile(path)
+ print print_after % (test_count, path)
+ total_errors += failures
+ finally:
+ django.test.utils.destroy_test_db(old_db)
+ django.test.utils.teardown_test_environment()
+ print
+ if total_errors == 0:
+ print 'OK'
+ else:
+ print 'FAIL: %d errors' % total_errors
+ return total_errors
diff --git a/frontend/afe/urls.py b/frontend/afe/urls.py
index fa8cdb6e..3a77311e 100644
--- a/frontend/afe/urls.py
+++ b/frontend/afe/urls.py
@@ -33,6 +33,6 @@ debug_pattern_list = [
]
if settings.DEBUG:
- pattern_list += debug_pattern_list
+ pattern_list += debug_pattern_list
urlpatterns = patterns('', *pattern_list)
diff --git a/frontend/afe/views.py b/frontend/afe/views.py
index d4482155..947c8084 100644
--- a/frontend/afe/views.py
+++ b/frontend/afe/views.py
@@ -7,38 +7,38 @@ from django.http import HttpResponse, HttpResponsePermanentRedirect
# since site_rpc_interface is later in the list, its methods will override those
# of rpc_interface
rpc_handler_obj = rpc_handler.RpcHandler((rpc_interface, site_rpc_interface),
- document_module=rpc_interface)
+ document_module=rpc_interface)
def handle_rpc(request):
- rpc_utils.set_user(request.afe_user)
- return rpc_handler_obj.handle_rpc_request(request)
+ rpc_utils.set_user(request.afe_user)
+ return rpc_handler_obj.handle_rpc_request(request)
def model_documentation(request):
- doc = '<h2>Models</h2>\n'
- for model_name in ('Label', 'Host', 'Test', 'User', 'AclGroup', 'Job'):
- model_class = getattr(models, model_name)
- doc += '<h3>%s</h3>\n' % model_name
- doc += '<pre>\n%s</pre>\n' % model_class.__doc__
- return HttpResponse(doc)
+ doc = '<h2>Models</h2>\n'
+ for model_name in ('Label', 'Host', 'Test', 'User', 'AclGroup', 'Job'):
+ model_class = getattr(models, model_name)
+ doc += '<h3>%s</h3>\n' % model_name
+ doc += '<pre>\n%s</pre>\n' % model_class.__doc__
+ return HttpResponse(doc)
def redirect_with_extra_data(request, url, **kwargs):
- kwargs['getdata'] = request.GET.urlencode()
- kwargs['server_name'] = request.META['SERVER_NAME']
- return HttpResponsePermanentRedirect(url % kwargs)
+ kwargs['getdata'] = request.GET.urlencode()
+ kwargs['server_name'] = request.META['SERVER_NAME']
+ return HttpResponsePermanentRedirect(url % kwargs)
GWT_SERVER = 'http://localhost:8888/'
def gwt_forward(request, forward_addr):
- if len(request.POST) == 0:
- data = None
- else:
- data = request.raw_post_data
- url_response = urllib2.urlopen(GWT_SERVER + forward_addr, data=data)
- http_response = HttpResponse(url_response.read())
- for header, value in url_response.info().items():
- if header not in ('connection',):
- http_response[header] = value
- return http_response
+ if len(request.POST) == 0:
+ data = None
+ else:
+ data = request.raw_post_data
+ url_response = urllib2.urlopen(GWT_SERVER + forward_addr, data=data)
+ http_response = HttpResponse(url_response.read())
+ for header, value in url_response.info().items():
+ if header not in ('connection',):
+ http_response[header] = value
+ return http_response
diff --git a/frontend/apache_auth.py b/frontend/apache_auth.py
index 552a6bf3..9ce0da27 100644
--- a/frontend/apache_auth.py
+++ b/frontend/apache_auth.py
@@ -7,59 +7,59 @@ from frontend.afe import models, management
DEBUG_USER = 'debug_user'
class SimpleAuthBackend:
- """
- Automatically allows any login. This backend is for use when Apache is
- doing the real authentication. Also ensures logged-in user exists in
- frontend.afe.models.User database.
- """
- def authenticate(self, username=None, password=None):
- try:
- user = User.objects.get(username=username)
- except User.DoesNotExist:
- # password is meaningless
- user = User(username=username,
- password='apache authentication')
- user.is_staff = True
- user.save() # need to save before adding groups
- user.groups.add(Group.objects.get(
- name=management.BASIC_ADMIN))
+ """
+ Automatically allows any login. This backend is for use when Apache is
+ doing the real authentication. Also ensures logged-in user exists in
+ frontend.afe.models.User database.
+ """
+ def authenticate(self, username=None, password=None):
+ try:
+ user = User.objects.get(username=username)
+ except User.DoesNotExist:
+ # password is meaningless
+ user = User(username=username,
+ password='apache authentication')
+ user.is_staff = True
+ user.save() # need to save before adding groups
+ user.groups.add(Group.objects.get(
+ name=management.BASIC_ADMIN))
- SimpleAuthBackend.check_afe_user(username)
- return user
+ SimpleAuthBackend.check_afe_user(username)
+ return user
- @staticmethod
- def check_afe_user(username):
- user, _ = models.User.objects.get_or_create(login=username)
- user.save()
+ @staticmethod
+ def check_afe_user(username):
+ user, _ = models.User.objects.get_or_create(login=username)
+ user.save()
- def get_user(self, user_id):
- try:
- return User.objects.get(pk=user_id)
- except User.DoesNotExist:
- return None
+ def get_user(self, user_id):
+ try:
+ return User.objects.get(pk=user_id)
+ except User.DoesNotExist:
+ return None
class ApacheAuthMiddleware(object):
- """
- Middleware for use when Apache is doing authentication. Looks for
- REQUEST_USER in requests and logs that user in. If no such header is
- found, looks for HTTP_AUTHORIZATION header with username to login (this
- allows CLI to authenticate).
- """
+ """
+ Middleware for use when Apache is doing authentication. Looks for
+ REQUEST_USER in requests and logs that user in. If no such header is
+ found, looks for HTTP_AUTHORIZATION header with username to login (this
+ allows CLI to authenticate).
+ """
- def process_request(self, request):
- # look for a username from Apache
- user = request.META.get('REMOTE_USER')
- if user is None:
- # look for a user in headers. This is insecure but
- # it's our temporarily solution for CLI auth.
- user = request.META.get('HTTP_AUTHORIZATION')
- if user is None:
- # no user info - assume we're in development mode
- user = DEBUG_USER
- user_object = auth.authenticate(username=user,
- password='')
- auth.login(request, user_object)
- request.afe_user = models.User.objects.get(login=user)
- return None
+ def process_request(self, request):
+ # look for a username from Apache
+ user = request.META.get('REMOTE_USER')
+ if user is None:
+ # look for a user in headers. This is insecure but
+ # it's our temporarily solution for CLI auth.
+ user = request.META.get('HTTP_AUTHORIZATION')
+ if user is None:
+ # no user info - assume we're in development mode
+ user = DEBUG_USER
+ user_object = auth.authenticate(username=user,
+ password='')
+ auth.login(request, user_object)
+ request.afe_user = models.User.objects.get(login=user)
+ return None
diff --git a/frontend/frontend_unittest.py b/frontend/frontend_unittest.py
index 87f18d10..65f95732 100644
--- a/frontend/frontend_unittest.py
+++ b/frontend/frontend_unittest.py
@@ -10,11 +10,11 @@ import common
class FrontendTest(unittest.TestCase):
- def test_all(self):
- manage_dir = os.path.dirname(__file__)
- result = os.system("cd %s && ./manage.py test" % (manage_dir))
- self.assert_(result == 0)
+ def test_all(self):
+ manage_dir = os.path.dirname(__file__)
+ result = os.system("cd %s && ./manage.py test" % (manage_dir))
+ self.assert_(result == 0)
if __name__ == '__main__':
- unittest.main()
+ unittest.main()
diff --git a/frontend/manage.py b/frontend/manage.py
index e70d6d23..5e78ea97 100755
--- a/frontend/manage.py
+++ b/frontend/manage.py
@@ -1,11 +1,11 @@
#!/usr/bin/env python
from django.core.management import execute_manager
try:
- import settings # Assumed to be in the same directory.
+ import settings # Assumed to be in the same directory.
except ImportError:
- import sys
- sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
- sys.exit(1)
+ import sys
+ sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
+ sys.exit(1)
if __name__ == "__main__":
- execute_manager(settings)
+ execute_manager(settings)
diff --git a/frontend/migrations/001_initial_db.py b/frontend/migrations/001_initial_db.py
index f16e6ec7..036754cc 100644
--- a/frontend/migrations/001_initial_db.py
+++ b/frontend/migrations/001_initial_db.py
@@ -1,33 +1,33 @@
import os
required_tables = ('acl_groups', 'acl_groups_hosts', 'acl_groups_users',
- 'autotests', 'host_queue_entries', 'hosts', 'hosts_labels',
- 'ineligible_host_queues', 'jobs', 'labels', 'users')
+ 'autotests', 'host_queue_entries', 'hosts', 'hosts_labels',
+ 'ineligible_host_queues', 'jobs', 'labels', 'users')
def migrate_up(manager):
- manager.execute("SHOW TABLES")
- tables = [row[0] for row in manager.cursor.fetchall()]
- db_initialized = True
- for table in required_tables:
- if table not in tables:
- db_initialized = False
- break
- if not db_initialized:
- response = raw_input(
- 'Your autotest_web database does not appear to be '
- 'initialized. Do you want to recreate it (this will '
- 'result in loss of any existing data) (yes/No)? ')
- if response != 'yes':
- raise Exception('User has chosen to abort migration')
-
- manager.execute_script(CREATE_DB_SQL)
-
- manager.create_migrate_table()
+ manager.execute("SHOW TABLES")
+ tables = [row[0] for row in manager.cursor.fetchall()]
+ db_initialized = True
+ for table in required_tables:
+ if table not in tables:
+ db_initialized = False
+ break
+ if not db_initialized:
+ response = raw_input(
+ 'Your autotest_web database does not appear to be '
+ 'initialized. Do you want to recreate it (this will '
+ 'result in loss of any existing data) (yes/No)? ')
+ if response != 'yes':
+ raise Exception('User has chosen to abort migration')
+
+ manager.execute_script(CREATE_DB_SQL)
+
+ manager.create_migrate_table()
def migrate_down(manager):
- manager.execute_script(DROP_DB_SQL)
+ manager.execute_script(DROP_DB_SQL)
CREATE_DB_SQL = """\
diff --git a/frontend/migrations/002_cleanup_fields.py b/frontend/migrations/002_cleanup_fields.py
index a3656ca9..88abc78b 100644
--- a/frontend/migrations/002_cleanup_fields.py
+++ b/frontend/migrations/002_cleanup_fields.py
@@ -1,12 +1,12 @@
def migrate_up(manager):
- manager.execute('ALTER TABLE autotests DROP params')
- manager.execute('ALTER TABLE jobs DROP kernel_url, DROP status, '
- 'DROP submitted_on')
- manager.execute('ALTER TABLE host_queue_entries DROP created_on')
+ manager.execute('ALTER TABLE autotests DROP params')
+ manager.execute('ALTER TABLE jobs DROP kernel_url, DROP status, '
+ 'DROP submitted_on')
+ manager.execute('ALTER TABLE host_queue_entries DROP created_on')
def migrate_down(manager):
- manager.execute('ALTER TABLE autotests ADD params VARCHAR(255)')
- manager.execute('ALTER TABLE jobs ADD kernel_url VARCHAR(255), '
- 'ADD status VARCHAR(255), ADD submitted_on datetime')
- manager.execute('ALTER TABLE host_queue_entries ADD created_on '
- 'datetime')
+ manager.execute('ALTER TABLE autotests ADD params VARCHAR(255)')
+ manager.execute('ALTER TABLE jobs ADD kernel_url VARCHAR(255), '
+ 'ADD status VARCHAR(255), ADD submitted_on datetime')
+ manager.execute('ALTER TABLE host_queue_entries ADD created_on '
+ 'datetime')
diff --git a/frontend/migrations/003_test_synch_type.py b/frontend/migrations/003_test_synch_type.py
index b20c01d6..939d65f3 100644
--- a/frontend/migrations/003_test_synch_type.py
+++ b/frontend/migrations/003_test_synch_type.py
@@ -1,9 +1,9 @@
def migrate_up(manager):
- manager.execute('ALTER TABLE autotests ADD `synch_type` smallint '
- 'NOT NULL')
- # set all to asynchronous by default
- manager.execute('UPDATE autotests SET synch_type=1')
+ manager.execute('ALTER TABLE autotests ADD `synch_type` smallint '
+ 'NOT NULL')
+ # set all to asynchronous by default
+ manager.execute('UPDATE autotests SET synch_type=1')
def migrate_down(manager):
- manager.execute('ALTER TABLE autotests DROP `synch_type`')
+ manager.execute('ALTER TABLE autotests DROP `synch_type`')
diff --git a/frontend/migrations/004_add_indexes.py b/frontend/migrations/004_add_indexes.py
index 7219f236..40b496f7 100644
--- a/frontend/migrations/004_add_indexes.py
+++ b/frontend/migrations/004_add_indexes.py
@@ -8,16 +8,16 @@ INDEXES = (
)
def get_index_name(table, field):
- return table + '_' + field
+ return table + '_' + field
def migrate_up(manager):
- for table, field in INDEXES:
- manager.execute('CREATE INDEX %s ON %s (%s)' %
- (get_index_name(table, field), table, field))
+ for table, field in INDEXES:
+ manager.execute('CREATE INDEX %s ON %s (%s)' %
+ (get_index_name(table, field), table, field))
def migrate_down(manager):
- for table, field in INDEXES:
- manager.execute('DROP INDEX %s ON %s' %
- (get_index_name(table, field), table))
+ for table, field in INDEXES:
+ manager.execute('DROP INDEX %s ON %s' %
+ (get_index_name(table, field), table))
diff --git a/frontend/migrations/005_one_more_index.py b/frontend/migrations/005_one_more_index.py
index aa3a81dd..787c1c0b 100644
--- a/frontend/migrations/005_one_more_index.py
+++ b/frontend/migrations/005_one_more_index.py
@@ -1,7 +1,7 @@
def migrate_up(manger):
- manger.execute('CREATE INDEX hosts_labels_host_id ON hosts_labels '
- '(host_id)')
+ manger.execute('CREATE INDEX hosts_labels_host_id ON hosts_labels '
+ '(host_id)')
def migrate_down(manger):
- manger.execute('DROP INDEX hosts_labels_host_id ON hosts_labels')
+ manger.execute('DROP INDEX hosts_labels_host_id ON hosts_labels')
diff --git a/frontend/migrations/006_host_label_invalid.py b/frontend/migrations/006_host_label_invalid.py
index 9f6a6f18..20c5e4c9 100644
--- a/frontend/migrations/006_host_label_invalid.py
+++ b/frontend/migrations/006_host_label_invalid.py
@@ -1,8 +1,8 @@
def migrate_up(manager):
- manager.execute('ALTER TABLE hosts ADD `invalid` bool NOT NULL')
- manager.execute('ALTER TABLE labels ADD `invalid` bool NOT NULL')
+ manager.execute('ALTER TABLE hosts ADD `invalid` bool NOT NULL')
+ manager.execute('ALTER TABLE labels ADD `invalid` bool NOT NULL')
def migrate_down(manager):
- manager.execute('ALTER TABLE hosts DROP invalid')
- manager.execute('ALTER TABLE labels DROP invalid')
+ manager.execute('ALTER TABLE hosts DROP invalid')
+ manager.execute('ALTER TABLE labels DROP invalid')
diff --git a/frontend/migrations/007_indexes_on_acl_tables.py b/frontend/migrations/007_indexes_on_acl_tables.py
index 71f6a010..1dab8cf1 100644
--- a/frontend/migrations/007_indexes_on_acl_tables.py
+++ b/frontend/migrations/007_indexes_on_acl_tables.py
@@ -6,16 +6,16 @@ INDEXES = (
)
def get_index_name(table, field):
- return table + '_' + field
+ return table + '_' + field
def migrate_up(manager):
- for table, field in INDEXES:
- manager.execute('CREATE INDEX %s ON %s (%s)' %
- (get_index_name(table, field), table, field))
+ for table, field in INDEXES:
+ manager.execute('CREATE INDEX %s ON %s (%s)' %
+ (get_index_name(table, field), table, field))
def migrate_down(manager):
- for table, field in INDEXES:
- manager.execute('DROP INDEX %s ON %s' %
- (get_index_name(table, field), table))
+ for table, field in INDEXES:
+ manager.execute('DROP INDEX %s ON %s' %
+ (get_index_name(table, field), table))
diff --git a/frontend/settings.py b/frontend/settings.py
index 74eb5ecb..a47eeca5 100644
--- a/frontend/settings.py
+++ b/frontend/settings.py
@@ -14,19 +14,19 @@ ADMINS = (
MANAGERS = ADMINS
-DATABASE_ENGINE = 'mysql_old' # 'postgresql_psycopg2', 'postgresql',
+DATABASE_ENGINE = 'mysql_old' # 'postgresql_psycopg2', 'postgresql',
# 'mysql', 'sqlite3' or 'ado_mssql'.
-DATABASE_PORT = '' # Set to empty string for default.
+DATABASE_PORT = '' # Set to empty string for default.
# Not used with sqlite3.
c = global_config.global_config
-DATABASE_HOST = c.get_config_value("AUTOTEST_WEB", "host")
+DATABASE_HOST = c.get_config_value("AUTOTEST_WEB", "host")
# Or path to database file if using sqlite3.
DATABASE_NAME = c.get_config_value("AUTOTEST_WEB", "database")
# The following not used with sqlite3.
DATABASE_USER = c.get_config_value("AUTOTEST_WEB", "user")
DATABASE_PASSWORD = c.get_config_value("AUTOTEST_WEB", "password")
-
+
# prefix applied to all URLs - useful if requests are coming through apache,
# and you need this app to coexist with others
diff --git a/migrate/migrate.py b/migrate/migrate.py
index 8bbcb5f9..fbed9057 100755
--- a/migrate/migrate.py
+++ b/migrate/migrate.py
@@ -10,267 +10,267 @@ MIGRATE_TABLE = 'migrate_info'
DEFAULT_MIGRATIONS_DIR = 'migrations'
class Migration(object):
- def __init__(self, filename):
- self.version = int(filename[:3])
- self.name = filename[:-3]
- self.module = __import__(self.name, globals(), locals(), [])
- assert hasattr(self.module, 'migrate_up')
- assert hasattr(self.module, 'migrate_down')
+ def __init__(self, filename):
+ self.version = int(filename[:3])
+ self.name = filename[:-3]
+ self.module = __import__(self.name, globals(), locals(), [])
+ assert hasattr(self.module, 'migrate_up')
+ assert hasattr(self.module, 'migrate_down')
- def migrate_up(self, manager):
- self.module.migrate_up(manager)
+ def migrate_up(self, manager):
+ self.module.migrate_up(manager)
- def migrate_down(self, manager):
- self.module.migrate_down(manager)
+ def migrate_down(self, manager):
+ self.module.migrate_down(manager)
class MigrationManager(object):
- connection = None
- cursor = None
- migrations_dir = None
-
- def __init__(self, database, migrations_dir=None):
- self.database = database
- if migrations_dir is None:
- migrations_dir = os.path.abspath(DEFAULT_MIGRATIONS_DIR)
- self.migrations_dir = migrations_dir
- sys.path.append(migrations_dir)
- assert os.path.exists(migrations_dir)
-
- self.db_host = None
- self.db_name = None
- self.username = None
- self.password = None
-
-
- def read_db_info(self):
- # grab the config file and parse for info
- c = global_config.global_config
- self.db_host = c.get_config_value(self.database, "host")
- self.db_name = c.get_config_value(self.database, "database")
- self.username = c.get_config_value(self.database, "user")
- self.password = c.get_config_value(self.database, "password")
-
-
- def connect(self, host, db_name, username, password):
- return MySQLdb.connect(host=host, db=db_name, user=username,
- passwd=password)
-
-
- def open_connection(self):
- self.connection = self.connect(self.db_host, self.db_name,
- self.username, self.password)
- self.connection.autocommit(True)
- self.cursor = self.connection.cursor()
-
-
- def close_connection(self):
- self.connection.close()
-
-
- def execute(self, query, *parameters):
- #print 'SQL:', query % parameters
- return self.cursor.execute(query, parameters)
-
-
- def execute_script(self, script):
- sql_statements = [statement.strip() for statement
- in script.split(';')]
- for statement in sql_statements:
- if statement:
- self.execute(statement)
-
-
- def check_migrate_table_exists(self):
- try:
- self.execute("SELECT * FROM %s" % MIGRATE_TABLE)
- return True
- except MySQLdb.ProgrammingError, exc:
- error_code, _ = exc.args
- if error_code == MySQLdb.constants.ER.NO_SUCH_TABLE:
- return False
- raise
-
-
- def create_migrate_table(self):
- if not self.check_migrate_table_exists():
- self.execute("CREATE TABLE %s (`version` integer)" %
- MIGRATE_TABLE)
- else:
- self.execute("DELETE FROM %s" % MIGRATE_TABLE)
- self.execute("INSERT INTO %s VALUES (0)" % MIGRATE_TABLE)
- assert self.cursor.rowcount == 1
-
-
- def set_db_version(self, version):
- assert isinstance(version, int)
- self.execute("UPDATE %s SET version=%%s" % MIGRATE_TABLE,
- version)
- assert self.cursor.rowcount == 1
-
-
- def get_db_version(self):
- if not self.check_migrate_table_exists():
- return 0
- self.execute("SELECT * FROM %s" % MIGRATE_TABLE)
- rows = self.cursor.fetchall()
- if len(rows) == 0:
- return 0
- assert len(rows) == 1 and len(rows[0]) == 1
- return rows[0][0]
-
-
- def get_migrations(self, minimum_version=None, maximum_version=None):
- migrate_files = [filename for filename
- in os.listdir(self.migrations_dir)
- if re.match(r'^\d\d\d_.*\.py$', filename)]
- migrate_files.sort()
- migrations = [Migration(filename) for filename in migrate_files]
- if minimum_version is not None:
- migrations = [migration for migration in migrations
- if migration.version >= minimum_version]
- if maximum_version is not None:
- migrations = [migration for migration in migrations
- if migration.version <= maximum_version]
- return migrations
-
-
- def do_migration(self, migration, migrate_up=True):
- print 'Applying migration %s' % migration.name, # no newline
- if migrate_up:
- print 'up'
- assert self.get_db_version() == migration.version - 1
- migration.migrate_up(self)
- new_version = migration.version
- else:
- print 'down'
- assert self.get_db_version() == migration.version
- migration.migrate_down(self)
- new_version = migration.version - 1
- self.set_db_version(new_version)
-
-
- def migrate_to_version(self, version):
- current_version = self.get_db_version()
- if current_version < version:
- lower, upper = current_version, version
- migrate_up = True
- else:
- lower, upper = version, current_version
- migrate_up = False
-
- migrations = self.get_migrations(lower + 1, upper)
- if not migrate_up:
- migrations.reverse()
- for migration in migrations:
- self.do_migration(migration, migrate_up)
-
- assert self.get_db_version() == version
- print 'At version', version
-
-
- def get_latest_version(self):
- migrations = self.get_migrations()
- return migrations[-1].version
-
-
- def migrate_to_latest(self):
- latest_version = self.get_latest_version()
- self.migrate_to_version(latest_version)
-
-
- def initialize_test_db(self):
- self.read_db_info()
- test_db_name = 'test_' + self.db_name
- # first, connect to no DB so we can create a test DB
- self.db_name = ''
- self.open_connection()
- print 'Creating test DB', test_db_name
- self.execute('CREATE DATABASE ' + test_db_name)
- self.close_connection()
- # now connect to the test DB
- self.db_name = test_db_name
- self.open_connection()
-
-
- def remove_test_db(self):
- print 'Removing test DB'
- self.execute('DROP DATABASE ' + self.db_name)
-
-
- def get_mysql_args(self):
- return ('-u %(user)s -p%(password)s -h %(host)s %(db)s' % {
- 'user' : self.username,
- 'password' : self.password,
- 'host' : self.db_host,
- 'db' : self.db_name})
-
-
- def migrate_to_version_or_latest(self, version):
- if version is None:
- self.migrate_to_latest()
- else:
- self.migrate_to_version(version)
-
-
- def do_sync_db(self, version=None):
- self.read_db_info()
- self.open_connection()
- print 'Migration starting for database', self.db_name
- self.migrate_to_version_or_latest(version)
- print 'Migration complete'
-
-
- def test_sync_db(self, version=None):
- """\
- Create a fresh DB and run all migrations on it.
- """
- self.initialize_test_db()
- try:
- print 'Starting migration test on DB', self.db_name
- self.migrate_to_version_or_latest(version)
- # show schema to the user
- os.system('mysqldump %s --no-data=true '
- '--add-drop-table=false' %
- self.get_mysql_args())
- finally:
- self.remove_test_db()
- print 'Test finished successfully'
-
-
- def simulate_sync_db(self, version=None):
- """\
- Create a fresh DB, copy the existing DB to it, and then
- try to synchronize it.
- """
- self.read_db_info()
- self.open_connection()
- db_version = self.get_db_version()
- self.close_connection()
- # don't do anything if we're already at the latest version
- if db_version == self.get_latest_version():
- print 'Skipping simulation, already at latest version'
- return
- # get existing data
- self.read_db_info()
- print 'Dumping existing data'
- dump_fd, dump_file = tempfile.mkstemp('.migrate_dump')
- os.close(dump_fd)
- os.system('mysqldump %s >%s' %
- (self.get_mysql_args(), dump_file))
- # fill in test DB
- self.initialize_test_db()
- print 'Filling in test DB'
- os.system('mysql %s <%s' % (self.get_mysql_args(), dump_file))
- os.remove(dump_file)
- try:
- print 'Starting migration test on DB', self.db_name
- self.migrate_to_version_or_latest(version)
- finally:
- self.remove_test_db()
- print 'Test finished successfully'
+ connection = None
+ cursor = None
+ migrations_dir = None
+
+ def __init__(self, database, migrations_dir=None):
+ self.database = database
+ if migrations_dir is None:
+ migrations_dir = os.path.abspath(DEFAULT_MIGRATIONS_DIR)
+ self.migrations_dir = migrations_dir
+ sys.path.append(migrations_dir)
+ assert os.path.exists(migrations_dir)
+
+ self.db_host = None
+ self.db_name = None
+ self.username = None
+ self.password = None
+
+
+ def read_db_info(self):
+ # grab the config file and parse for info
+ c = global_config.global_config
+ self.db_host = c.get_config_value(self.database, "host")
+ self.db_name = c.get_config_value(self.database, "database")
+ self.username = c.get_config_value(self.database, "user")
+ self.password = c.get_config_value(self.database, "password")
+
+
+ def connect(self, host, db_name, username, password):
+ return MySQLdb.connect(host=host, db=db_name, user=username,
+ passwd=password)
+
+
+ def open_connection(self):
+ self.connection = self.connect(self.db_host, self.db_name,
+ self.username, self.password)
+ self.connection.autocommit(True)
+ self.cursor = self.connection.cursor()
+
+
+ def close_connection(self):
+ self.connection.close()
+
+
+ def execute(self, query, *parameters):
+ #print 'SQL:', query % parameters
+ return self.cursor.execute(query, parameters)
+
+
+ def execute_script(self, script):
+ sql_statements = [statement.strip() for statement
+ in script.split(';')]
+ for statement in sql_statements:
+ if statement:
+ self.execute(statement)
+
+
+ def check_migrate_table_exists(self):
+ try:
+ self.execute("SELECT * FROM %s" % MIGRATE_TABLE)
+ return True
+ except MySQLdb.ProgrammingError, exc:
+ error_code, _ = exc.args
+ if error_code == MySQLdb.constants.ER.NO_SUCH_TABLE:
+ return False
+ raise
+
+
+ def create_migrate_table(self):
+ if not self.check_migrate_table_exists():
+ self.execute("CREATE TABLE %s (`version` integer)" %
+ MIGRATE_TABLE)
+ else:
+ self.execute("DELETE FROM %s" % MIGRATE_TABLE)
+ self.execute("INSERT INTO %s VALUES (0)" % MIGRATE_TABLE)
+ assert self.cursor.rowcount == 1
+
+
+ def set_db_version(self, version):
+ assert isinstance(version, int)
+ self.execute("UPDATE %s SET version=%%s" % MIGRATE_TABLE,
+ version)
+ assert self.cursor.rowcount == 1
+
+
+ def get_db_version(self):
+ if not self.check_migrate_table_exists():
+ return 0
+ self.execute("SELECT * FROM %s" % MIGRATE_TABLE)
+ rows = self.cursor.fetchall()
+ if len(rows) == 0:
+ return 0
+ assert len(rows) == 1 and len(rows[0]) == 1
+ return rows[0][0]
+
+
+ def get_migrations(self, minimum_version=None, maximum_version=None):
+ migrate_files = [filename for filename
+ in os.listdir(self.migrations_dir)
+ if re.match(r'^\d\d\d_.*\.py$', filename)]
+ migrate_files.sort()
+ migrations = [Migration(filename) for filename in migrate_files]
+ if minimum_version is not None:
+ migrations = [migration for migration in migrations
+ if migration.version >= minimum_version]
+ if maximum_version is not None:
+ migrations = [migration for migration in migrations
+ if migration.version <= maximum_version]
+ return migrations
+
+
+ def do_migration(self, migration, migrate_up=True):
+ print 'Applying migration %s' % migration.name, # no newline
+ if migrate_up:
+ print 'up'
+ assert self.get_db_version() == migration.version - 1
+ migration.migrate_up(self)
+ new_version = migration.version
+ else:
+ print 'down'
+ assert self.get_db_version() == migration.version
+ migration.migrate_down(self)
+ new_version = migration.version - 1
+ self.set_db_version(new_version)
+
+
+ def migrate_to_version(self, version):
+ current_version = self.get_db_version()
+ if current_version < version:
+ lower, upper = current_version, version
+ migrate_up = True
+ else:
+ lower, upper = version, current_version
+ migrate_up = False
+
+ migrations = self.get_migrations(lower + 1, upper)
+ if not migrate_up:
+ migrations.reverse()
+ for migration in migrations:
+ self.do_migration(migration, migrate_up)
+
+ assert self.get_db_version() == version
+ print 'At version', version
+
+
+ def get_latest_version(self):
+ migrations = self.get_migrations()
+ return migrations[-1].version
+
+
+ def migrate_to_latest(self):
+ latest_version = self.get_latest_version()
+ self.migrate_to_version(latest_version)
+
+
+ def initialize_test_db(self):
+ self.read_db_info()
+ test_db_name = 'test_' + self.db_name
+ # first, connect to no DB so we can create a test DB
+ self.db_name = ''
+ self.open_connection()
+ print 'Creating test DB', test_db_name
+ self.execute('CREATE DATABASE ' + test_db_name)
+ self.close_connection()
+ # now connect to the test DB
+ self.db_name = test_db_name
+ self.open_connection()
+
+
+ def remove_test_db(self):
+ print 'Removing test DB'
+ self.execute('DROP DATABASE ' + self.db_name)
+
+
+ def get_mysql_args(self):
+ return ('-u %(user)s -p%(password)s -h %(host)s %(db)s' % {
+ 'user' : self.username,
+ 'password' : self.password,
+ 'host' : self.db_host,
+ 'db' : self.db_name})
+
+
+ def migrate_to_version_or_latest(self, version):
+ if version is None:
+ self.migrate_to_latest()
+ else:
+ self.migrate_to_version(version)
+
+
+ def do_sync_db(self, version=None):
+ self.read_db_info()
+ self.open_connection()
+ print 'Migration starting for database', self.db_name
+ self.migrate_to_version_or_latest(version)
+ print 'Migration complete'
+
+
+ def test_sync_db(self, version=None):
+ """\
+ Create a fresh DB and run all migrations on it.
+ """
+ self.initialize_test_db()
+ try:
+ print 'Starting migration test on DB', self.db_name
+ self.migrate_to_version_or_latest(version)
+ # show schema to the user
+ os.system('mysqldump %s --no-data=true '
+ '--add-drop-table=false' %
+ self.get_mysql_args())
+ finally:
+ self.remove_test_db()
+ print 'Test finished successfully'
+
+
+ def simulate_sync_db(self, version=None):
+ """\
+ Create a fresh DB, copy the existing DB to it, and then
+ try to synchronize it.
+ """
+ self.read_db_info()
+ self.open_connection()
+ db_version = self.get_db_version()
+ self.close_connection()
+ # don't do anything if we're already at the latest version
+ if db_version == self.get_latest_version():
+ print 'Skipping simulation, already at latest version'
+ return
+ # get existing data
+ self.read_db_info()
+ print 'Dumping existing data'
+ dump_fd, dump_file = tempfile.mkstemp('.migrate_dump')
+ os.close(dump_fd)
+ os.system('mysqldump %s >%s' %
+ (self.get_mysql_args(), dump_file))
+ # fill in test DB
+ self.initialize_test_db()
+ print 'Filling in test DB'
+ os.system('mysql %s <%s' % (self.get_mysql_args(), dump_file))
+ os.remove(dump_file)
+ try:
+ print 'Starting migration test on DB', self.db_name
+ self.migrate_to_version_or_latest(version)
+ finally:
+ self.remove_test_db()
+ print 'Test finished successfully'
USAGE = """\
@@ -282,37 +282,37 @@ Options:
def main():
- parser = OptionParser()
- parser.add_option("-d", "--database",
- help="which database to act on",
- dest="database")
- parser.add_option("-a", "--action", help="what action to perform",
- dest="action")
- (options, args) = parser.parse_args()
- manager = MigrationManager(options.database)
-
- if len(args) > 0:
- if len(args) > 1:
- version = int(args[1])
- else:
- version = None
- if args[0] == 'sync':
- manager.do_sync_db(version)
- elif args[0] == 'test':
- manager.test_sync_db(version)
- elif args[0] == 'simulate':
- manager.simulate_sync_db(version)
- elif args[0] == 'safesync':
- print 'Simluating migration'
- manager.simulate_sync_db(version)
- print 'Performing real migration'
- manager.do_sync_db(version)
- else:
- print USAGE
- return
-
- print USAGE
+ parser = OptionParser()
+ parser.add_option("-d", "--database",
+ help="which database to act on",
+ dest="database")
+ parser.add_option("-a", "--action", help="what action to perform",
+ dest="action")
+ (options, args) = parser.parse_args()
+ manager = MigrationManager(options.database)
+
+ if len(args) > 0:
+ if len(args) > 1:
+ version = int(args[1])
+ else:
+ version = None
+ if args[0] == 'sync':
+ manager.do_sync_db(version)
+ elif args[0] == 'test':
+ manager.test_sync_db(version)
+ elif args[0] == 'simulate':
+ manager.simulate_sync_db(version)
+ elif args[0] == 'safesync':
+ print 'Simluating migration'
+ manager.simulate_sync_db(version)
+ print 'Performing real migration'
+ manager.do_sync_db(version)
+ else:
+ print USAGE
+ return
+
+ print USAGE
if __name__ == '__main__':
- main()
+ main()
diff --git a/migrate/migrate_unittest.py b/migrate/migrate_unittest.py
index ec4ece1d..d33dccd9 100644
--- a/migrate/migrate_unittest.py
+++ b/migrate/migrate_unittest.py
@@ -13,140 +13,140 @@ CONFIG_DB = 'AUTOTEST_WEB'
NUM_MIGRATIONS = 3
class DummyMigration(object):
- """\
- Dummy migration class that records all migrations done in a class
- varaible.
- """
+ """\
+ Dummy migration class that records all migrations done in a class
+ varaible.
+ """
- migrations_done = []
+ migrations_done = []
- def __init__(self, version):
- self.version = version
- self.name = '%03d_test' % version
+ def __init__(self, version):
+ self.version = version
+ self.name = '%03d_test' % version
- @classmethod
- def get_migrations_done(cls):
- return cls.migrations_done
+ @classmethod
+ def get_migrations_done(cls):
+ return cls.migrations_done
- @classmethod
- def clear_migrations_done(cls):
- cls.migrations_done = []
+ @classmethod
+ def clear_migrations_done(cls):
+ cls.migrations_done = []
- @classmethod
- def do_migration(cls, version, direction):
- cls.migrations_done.append((version, direction))
+ @classmethod
+ def do_migration(cls, version, direction):
+ cls.migrations_done.append((version, direction))
- def migrate_up(self, manager):
- self.do_migration(self.version, 'up')
- if self.version == 1:
- manager.create_migrate_table()
+ def migrate_up(self, manager):
+ self.do_migration(self.version, 'up')
+ if self.version == 1:
+ manager.create_migrate_table()
- def migrate_down(self, manager):
- self.do_migration(self.version, 'down')
+ def migrate_down(self, manager):
+ self.do_migration(self.version, 'down')
MIGRATIONS = [DummyMigration(n) for n in xrange(1, NUM_MIGRATIONS + 1)]
class TestableMigrationManager(migrate.MigrationManager):
- def __init__(self, database, migrations_dir=None):
- self.database = database
- self.migrations_dir = migrations_dir
- self.db_host = None
- self.db_name = None
- self.username = None
- self.password = None
+ def __init__(self, database, migrations_dir=None):
+ self.database = database
+ self.migrations_dir = migrations_dir
+ self.db_host = None
+ self.db_name = None
+ self.username = None
+ self.password = None
- def read_db_info(self):
- migrate.MigrationManager.read_db_info(self)
- self.db_name = 'test_' + self.db_name
+ def read_db_info(self):
+ migrate.MigrationManager.read_db_info(self)
+ self.db_name = 'test_' + self.db_name
- def get_migrations(self, minimum_version=None, maximum_version=None):
- minimum_version = minimum_version or 1
- maximum_version = maximum_version or len(MIGRATIONS)
- return MIGRATIONS[minimum_version-1:maximum_version]
+ def get_migrations(self, minimum_version=None, maximum_version=None):
+ minimum_version = minimum_version or 1
+ maximum_version = maximum_version or len(MIGRATIONS)
+ return MIGRATIONS[minimum_version-1:maximum_version]
class MigrateManagerTest(unittest.TestCase):
- config = global_config.global_config
- host = config.get_config_value(CONFIG_DB, 'host')
- db_name = 'test_' + config.get_config_value(CONFIG_DB, 'database')
- user = config.get_config_value(CONFIG_DB, 'user')
- password = config.get_config_value(CONFIG_DB, 'password')
-
- def do_sql(self, sql):
- self.con = MySQLdb.connect(host=self.host, user=self.user,
- passwd=self.password)
- self.con.autocommit(True)
- self.cur = self.con.cursor()
- try:
- self.cur.execute(sql)
- finally:
- self.con.close()
-
-
- def remove_db(self):
- self.do_sql('DROP DATABASE ' + self.db_name)
-
-
- def setUp(self):
- self.do_sql('CREATE DATABASE ' + self.db_name)
- try:
- self.manager = TestableMigrationManager(CONFIG_DB)
- except MySQLdb.OperationalError:
- self.remove_db()
- raise
- DummyMigration.clear_migrations_done()
-
-
- def tearDown(self):
- self.remove_db()
-
-
- def test_sync(self):
- self.manager.do_sync_db()
- self.assertEquals(self.manager.get_db_version(), NUM_MIGRATIONS)
- self.assertEquals(DummyMigration.get_migrations_done(),
- [(1, 'up'), (2, 'up'), (3, 'up')])
-
- DummyMigration.clear_migrations_done()
- self.manager.do_sync_db(0)
- self.assertEquals(self.manager.get_db_version(), 0)
- self.assertEquals(DummyMigration.get_migrations_done(),
- [(3, 'down'), (2, 'down'), (1, 'down')])
-
-
- def test_sync_one_by_one(self):
- for version in xrange(1, NUM_MIGRATIONS + 1):
- self.manager.do_sync_db(version)
- self.assertEquals(self.manager.get_db_version(),
- version)
- self.assertEquals(
- DummyMigration.get_migrations_done()[-1],
- (version, 'up'))
-
- for version in xrange(NUM_MIGRATIONS - 1, -1, -1):
- self.manager.do_sync_db(version)
- self.assertEquals(self.manager.get_db_version(),
- version)
- self.assertEquals(
- DummyMigration.get_migrations_done()[-1],
- (version + 1, 'down'))
-
-
- def test_null_sync(self):
- self.manager.do_sync_db()
- DummyMigration.clear_migrations_done()
- self.manager.do_sync_db()
- self.assertEquals(DummyMigration.get_migrations_done(), [])
+ config = global_config.global_config
+ host = config.get_config_value(CONFIG_DB, 'host')
+ db_name = 'test_' + config.get_config_value(CONFIG_DB, 'database')
+ user = config.get_config_value(CONFIG_DB, 'user')
+ password = config.get_config_value(CONFIG_DB, 'password')
+
+ def do_sql(self, sql):
+ self.con = MySQLdb.connect(host=self.host, user=self.user,
+ passwd=self.password)
+ self.con.autocommit(True)
+ self.cur = self.con.cursor()
+ try:
+ self.cur.execute(sql)
+ finally:
+ self.con.close()
+
+
+ def remove_db(self):
+ self.do_sql('DROP DATABASE ' + self.db_name)
+
+
+ def setUp(self):
+ self.do_sql('CREATE DATABASE ' + self.db_name)
+ try:
+ self.manager = TestableMigrationManager(CONFIG_DB)
+ except MySQLdb.OperationalError:
+ self.remove_db()
+ raise
+ DummyMigration.clear_migrations_done()
+
+
+ def tearDown(self):
+ self.remove_db()
+
+
+ def test_sync(self):
+ self.manager.do_sync_db()
+ self.assertEquals(self.manager.get_db_version(), NUM_MIGRATIONS)
+ self.assertEquals(DummyMigration.get_migrations_done(),
+ [(1, 'up'), (2, 'up'), (3, 'up')])
+
+ DummyMigration.clear_migrations_done()
+ self.manager.do_sync_db(0)
+ self.assertEquals(self.manager.get_db_version(), 0)
+ self.assertEquals(DummyMigration.get_migrations_done(),
+ [(3, 'down'), (2, 'down'), (1, 'down')])
+
+
+ def test_sync_one_by_one(self):
+ for version in xrange(1, NUM_MIGRATIONS + 1):
+ self.manager.do_sync_db(version)
+ self.assertEquals(self.manager.get_db_version(),
+ version)
+ self.assertEquals(
+ DummyMigration.get_migrations_done()[-1],
+ (version, 'up'))
+
+ for version in xrange(NUM_MIGRATIONS - 1, -1, -1):
+ self.manager.do_sync_db(version)
+ self.assertEquals(self.manager.get_db_version(),
+ version)
+ self.assertEquals(
+ DummyMigration.get_migrations_done()[-1],
+ (version + 1, 'down'))
+
+
+ def test_null_sync(self):
+ self.manager.do_sync_db()
+ DummyMigration.clear_migrations_done()
+ self.manager.do_sync_db()
+ self.assertEquals(DummyMigration.get_migrations_done(), [])
if __name__ == '__main__':
- unittest.main()
+ unittest.main()
diff --git a/mirror/mirror b/mirror/mirror
index 31be530f..16c77010 100755
--- a/mirror/mirror
+++ b/mirror/mirror
@@ -6,12 +6,12 @@ import rsync, trigger, common
excludes = ('2.6.0-test*/', 'broken-out/', '*.sign', '*.gz')
default_clients_path = os.path.expanduser(
- os.path.join('~','.autotest_mirror_clients'))
+ os.path.join('~','.autotest_mirror_clients'))
default_config_path = os.path.expanduser(
- os.path.join ('~', '.autotest_mirror_config'))
+ os.path.join ('~', '.autotest_mirror_config'))
options = optparse.Values(defaults={'email': None,
- 'clients': default_clients_path,
- 'config': default_config_path})
+ 'clients': default_clients_path,
+ 'config': default_config_path})
# This counter gets appended and incremented for all jobs created
# this ensures that no two jobs of the same name are created during
@@ -25,242 +25,242 @@ patches = %s
"""
AUTOTEST_WRAPPER = """def step_init():
- job.next_step([step_test])
- testkernel = job.kernel(kernel)
-
- if patches:
- testkernel.patch(*patches)
- if config:
- testkernel.config(config)
- else:
- testkernel.config('', None, True)
- testkernel.build()
- testkernel.boot()
+ job.next_step([step_test])
+ testkernel = job.kernel(kernel)
+
+ if patches:
+ testkernel.patch(*patches)
+ if config:
+ testkernel.config(config)
+ else:
+ testkernel.config('', None, True)
+ testkernel.build()
+ testkernel.boot()
def step_test():"""
def test_kernel(client, control, kernel, config, patches=None):
- """Creates a control file for testing
-
- Args:
- client: A str of the autoteset client hostname
- control: A str filename of the control file to wrap as a
- kernel test or an open file to the same
- kernel: A str of the kernel version (i.e. x.x.xx)
- config: A str filename to the kernel config on the client
- patches: A list of patches to apply during kernel build
-
- Returns:
- The path to the new control file which may not be needed
- since the control file will be created in the host's
- queue automatically
- """
-
- is_autoserv_ctl = control.endswith('.srv')
-
- # Open a temp file to create the generated control file in
- tmpfile = tempfile.mktemp()
- c = open(tmpfile, 'w')
-
- print >> c, PREAMBLE % tuple([repr(s) for s in (kernel, config, patches)])
-
- if not is_autoserv_ctl:
- print >> c, AUTOTEST_WRAPPER
-
- # Open the basis control file and pull its contents into this one
- control = open(os.path.expanduser(control), "r")
-
- # If is an AT file then we need to indent to match wrapper
- # function level indentation, srv files don't need this indentation
- indent = ('\t', '')[is_autoserv_ctl]
- for line in control:
- print >> c, "%s%s" % (indent, line.rstrip())
- c.close()
-
- # Create a name for the queued file
- # if we came from an .srv file create an .srv file
- extension = ('', '.srv')[is_autoserv_ctl]
- global counter
- output = os.path.join(options.queuedir, client, "%s-%d%.3d%s" % (kernel, \
- int(time.time()), counter, extension))
- counter = (counter + 1) % 1000
- shutil.move(tmpfile, output)
- return output
+ """Creates a control file for testing
+
+ Args:
+ client: A str of the autoteset client hostname
+ control: A str filename of the control file to wrap as a
+ kernel test or an open file to the same
+ kernel: A str of the kernel version (i.e. x.x.xx)
+ config: A str filename to the kernel config on the client
+ patches: A list of patches to apply during kernel build
+
+ Returns:
+ The path to the new control file which may not be needed
+ since the control file will be created in the host's
+ queue automatically
+ """
+
+ is_autoserv_ctl = control.endswith('.srv')
+
+ # Open a temp file to create the generated control file in
+ tmpfile = tempfile.mktemp()
+ c = open(tmpfile, 'w')
+
+ print >> c, PREAMBLE % tuple([repr(s) for s in (kernel, config, patches)])
+
+ if not is_autoserv_ctl:
+ print >> c, AUTOTEST_WRAPPER
+
+ # Open the basis control file and pull its contents into this one
+ control = open(os.path.expanduser(control), "r")
+
+ # If is an AT file then we need to indent to match wrapper
+ # function level indentation, srv files don't need this indentation
+ indent = ('\t', '')[is_autoserv_ctl]
+ for line in control:
+ print >> c, "%s%s" % (indent, line.rstrip())
+ c.close()
+
+ # Create a name for the queued file
+ # if we came from an .srv file create an .srv file
+ extension = ('', '.srv')[is_autoserv_ctl]
+ global counter
+ output = os.path.join(options.queuedir, client, "%s-%d%.3d%s" % (kernel, \
+ int(time.time()), counter, extension))
+ counter = (counter + 1) % 1000
+ shutil.move(tmpfile, output)
+ return output
def load_conf(conf_path):
- """Loads a configuration file in the form
- hostname1.domain.tla /path/to/control ~/another_control
- hostname2.domain.tla ~/another_control
-
- Returns:
- A dictionary keyed on hostname storing the args
- Lines beginning with # are stripped from the conf file
- """
- machine_controls = {}
- if not os.path.exists(conf_path):
- return machine_controls
- conf_file = open(conf_path)
- for line in conf_file.readlines():
- if line.startswith('#'):
- continue
- elts = line.split()
- # Ignore lines that aren't string doubles or more
- if len(elts) < 2:
- continue
- control_file_paths = [os.path.expanduser(e) for e in elts[1:]]
- machine_controls[elts[0]] = control_file_paths
- return machine_controls
+ """Loads a configuration file in the form
+ hostname1.domain.tla /path/to/control ~/another_control
+ hostname2.domain.tla ~/another_control
+
+ Returns:
+ A dictionary keyed on hostname storing the args
+ Lines beginning with # are stripped from the conf file
+ """
+ machine_controls = {}
+ if not os.path.exists(conf_path):
+ return machine_controls
+ conf_file = open(conf_path)
+ for line in conf_file.readlines():
+ if line.startswith('#'):
+ continue
+ elts = line.split()
+ # Ignore lines that aren't string doubles or more
+ if len(elts) < 2:
+ continue
+ control_file_paths = [os.path.expanduser(e) for e in elts[1:]]
+ machine_controls[elts[0]] = control_file_paths
+ return machine_controls
def mail(from_address, to_addresses, subject, message_text):
- # if passed a string for the to_addresses convert it to a tuple
- if type(to_addresses) is str:
- to_addresses = (to_addresses,)
+ # if passed a string for the to_addresses convert it to a tuple
+ if type(to_addresses) is str:
+ to_addresses = (to_addresses,)
- message = email.Message.Message()
- message["To"] = ", ".join(to_addresses)
- message["From"] = from_address
- message["Subject"] = subject
- message.set_payload(message_text)
+ message = email.Message.Message()
+ message["To"] = ", ".join(to_addresses)
+ message["From"] = from_address
+ message["Subject"] = subject
+ message.set_payload(message_text)
- try:
- sendmail(message.as_string())
- except SendmailException, e:
- server = smtplib.SMTP("localhost")
- server.sendmail(from_address, to_addresses, message.as_string())
- server.quit()
+ try:
+ sendmail(message.as_string())
+ except SendmailException, e:
+ server = smtplib.SMTP("localhost")
+ server.sendmail(from_address, to_addresses, message.as_string())
+ server.quit()
MAIL = "sendmail"
class SendmailException(Exception):
- pass
+ pass
def sendmail(message):
- """Send an email using sendmail"""
- # open a pipe to the mail program and
- # write the data to the pipe
- p = os.popen("%s -t" % MAIL, 'w')
- p.write(message)
- exitcode = p.close()
- if exitcode:
- raise SendmailException("Exit code: %s" % exitcode)
+ """Send an email using sendmail"""
+ # open a pipe to the mail program and
+ # write the data to the pipe
+ p = os.popen("%s -t" % MAIL, 'w')
+ p.write(message)
+ exitcode = p.close()
+ if exitcode:
+ raise SendmailException("Exit code: %s" % exitcode)
def send_kernel_mail(kernel_list):
- if not options.email:
- return
- if len(kernel_list) < 1:
- return
- message = "\n".join(kernel_list)
- message = "Testing new kernel releases:\n%s" % message
+ if not options.email:
+ return
+ if len(kernel_list) < 1:
+ return
+ message = "\n".join(kernel_list)
+ message = "Testing new kernel releases:\n%s" % message
- mail("autotest-server@localhost", options.email, \
- "autotest new kernel notification", message)
+ mail("autotest-server@localhost", options.email, \
+ "autotest new kernel notification", message)
encode_sep = re.compile('(\D+)')
def kver_encode(version):
- if 'rc' not in version:
- version += '-rc99'
- if 'git' not in version:
- version += '-git99'
- bits = encode_sep.split(version)
- for n in range(0, len(bits), 2):
- if len(bits[n]) < 2:
- bits[n] = '0' + bits[n]
- return ''.join(bits)
+ if 'rc' not in version:
+ version += '-rc99'
+ if 'git' not in version:
+ version += '-git99'
+ bits = encode_sep.split(version)
+ for n in range(0, len(bits), 2):
+ if len(bits[n]) < 2:
+ bits[n] = '0' + bits[n]
+ return ''.join(bits)
def kver_cmp(a, b):
- a, b = kver_encode(a), kver_encode(b)
- return cmp(a, b)
+ a, b = kver_encode(a), kver_encode(b)
+ return cmp(a, b)
def closest_kver_leq(klist, kver):
- """Return the closest kernel ver in the list that is leq kver unless
- kver is the lowest, in which case return the lowest in klist"""
- if kver in klist:
- return kver
- l = list(klist)
- l.append(kver)
- l.sort(cmp=kver_cmp)
- i = l.index(kver)
- if i == 0:
- return l[1]
- return l[i - 1]
+ """Return the closest kernel ver in the list that is leq kver unless
+ kver is the lowest, in which case return the lowest in klist"""
+ if kver in klist:
+ return kver
+ l = list(klist)
+ l.append(kver)
+ l.sort(cmp=kver_cmp)
+ i = l.index(kver)
+ if i == 0:
+ return l[1]
+ return l[i - 1]
def perform_client_tests(kernel_list):
- machines = load_conf(options.clients)
- for kernel in kernel_list:
- # Get a list of all the machines available for testing
- # and the tests that each one is to execute
- for machine, controls in machines.items():
- config_paths = load_conf(os.path.join(options.config,
- machine))
- config = '/boot/config'
- if len(config_paths) > 0:
- kvers = config_paths.keys()
- close = closest_kver_leq(kvers,
- kernel)
- config = config_paths[close][0]
- for control in controls:
- test_kernel(machine,
- control,
- kernel,
- config)
+ machines = load_conf(options.clients)
+ for kernel in kernel_list:
+ # Get a list of all the machines available for testing
+ # and the tests that each one is to execute
+ for machine, controls in machines.items():
+ config_paths = load_conf(os.path.join(options.config,
+ machine))
+ config = '/boot/config'
+ if len(config_paths) > 0:
+ kvers = config_paths.keys()
+ close = closest_kver_leq(kvers,
+ kernel)
+ config = config_paths[close][0]
+ for control in controls:
+ test_kernel(machine,
+ control,
+ kernel,
+ config)
def main():
- """Performs an rsync of kernel.org, sends email on new releases
- and starts testing on them
- """
- source = 'rsync://rsync.kernel.org/pub/linux/kernel'
- mirror = rsync.rsync(source, options.target, excludes)
+ """Performs an rsync of kernel.org, sends email on new releases
+ and starts testing on them
+ """
+ source = 'rsync://rsync.kernel.org/pub/linux/kernel'
+ mirror = rsync.rsync(source, options.target, excludes)
- mirror.sync('v2.6/patch-2.6.*.bz2', 'kernel/v2.6')
- # for some reason 'linux-2.6.[0-9]*.tar.bz2' doesn't work
- mirror.sync('v2.6/linux-2.6.[0-9].tar.bz2', 'kernel/v2.6')
- mirror.sync('v2.6/linux-2.6.[0-9][0-9].tar.bz2', 'kernel/v2.6')
- mirror.sync('v2.6/testing/patch*.bz2', 'kernel/v2.6/testing')
- mirror.sync('v2.6/snapshots/*.bz2', 'kernel/v2.6/snapshots')
- mirror.sync('people/akpm/patches/2.6/*', 'akpm')
+ mirror.sync('v2.6/patch-2.6.*.bz2', 'kernel/v2.6')
+ # for some reason 'linux-2.6.[0-9]*.tar.bz2' doesn't work
+ mirror.sync('v2.6/linux-2.6.[0-9].tar.bz2', 'kernel/v2.6')
+ mirror.sync('v2.6/linux-2.6.[0-9][0-9].tar.bz2', 'kernel/v2.6')
+ mirror.sync('v2.6/testing/patch*.bz2', 'kernel/v2.6/testing')
+ mirror.sync('v2.6/snapshots/*.bz2', 'kernel/v2.6/snapshots')
+ mirror.sync('people/akpm/patches/2.6/*', 'akpm')
- trig = trigger.Trigger()
+ trig = trigger.Trigger()
- trig.add_action(send_kernel_mail)
- trig.add_action(perform_client_tests)
+ trig.add_action(send_kernel_mail)
+ trig.add_action(perform_client_tests)
- trig.scan(mirror.tmpfile)
+ trig.scan(mirror.tmpfile)
if __name__ == '__main__':
- usage = "mirror [options] <mirrordir> <queuedir>"
- parser = optparse.OptionParser(usage=usage)
- parser.add_option("-e", "--email", dest="email",
- help="Email address to alert for new kernels",
- metavar="EMAIL")
- parser.add_option("-c", "--clients", dest="clients",
- help="Location of the file that describes which \
- control file to run on which clients; defaults \
- to %s" % default_clients_path, \
- metavar="CLIENTS", \
- default=default_clients_path)
- parser.add_option("-f", "--config", dest="config",
- help="Location of the directory that describes which \
- config files to use on which clients; defaults \
- to %s" % default_config_path,
- metavar="CONFIG",
- default=default_config_path)
- (options, args) = parser.parse_args()
- if len(args) < 2:
- parser.print_help()
- parser.error("A mirrordir and a queuedir must be specified")
- options.target = args[0]
- options.queuedir = args[1]
-
- main()
+ usage = "mirror [options] <mirrordir> <queuedir>"
+ parser = optparse.OptionParser(usage=usage)
+ parser.add_option("-e", "--email", dest="email",
+ help="Email address to alert for new kernels",
+ metavar="EMAIL")
+ parser.add_option("-c", "--clients", dest="clients",
+ help="Location of the file that describes which \
+ control file to run on which clients; defaults \
+ to %s" % default_clients_path, \
+ metavar="CLIENTS", \
+ default=default_clients_path)
+ parser.add_option("-f", "--config", dest="config",
+ help="Location of the directory that describes which \
+ config files to use on which clients; defaults \
+ to %s" % default_config_path,
+ metavar="CONFIG",
+ default=default_config_path)
+ (options, args) = parser.parse_args()
+ if len(args) < 2:
+ parser.print_help()
+ parser.error("A mirrordir and a queuedir must be specified")
+ options.target = args[0]
+ options.queuedir = args[1]
+
+ main()
diff --git a/mirror/rsync.py b/mirror/rsync.py
index 0133cb67..baa643e0 100755
--- a/mirror/rsync.py
+++ b/mirror/rsync.py
@@ -2,31 +2,31 @@
import os
class rsync:
- command = '/usr/bin/rsync -rltvz'
+ command = '/usr/bin/rsync -rltvz'
- def __init__(self, prefix, target, excludes = []):
- if not os.path.isdir(target):
- os.makedirs(target)
- self.prefix = prefix
- self.target = target
- # Have to use a tmpfile rather than a pipe, else we could
- # trigger from a file that's still only partially mirrored
- self.tmpfile = '/tmp/mirror.%d' % os.getpid()
- if os.path.exists(self.tmpfile):
- os.remove(self.tmpfile)
- self.exclude = ' '.join(['--exclude ' + x for x in excludes])
+ def __init__(self, prefix, target, excludes = []):
+ if not os.path.isdir(target):
+ os.makedirs(target)
+ self.prefix = prefix
+ self.target = target
+ # Have to use a tmpfile rather than a pipe, else we could
+ # trigger from a file that's still only partially mirrored
+ self.tmpfile = '/tmp/mirror.%d' % os.getpid()
+ if os.path.exists(self.tmpfile):
+ os.remove(self.tmpfile)
+ self.exclude = ' '.join(['--exclude ' + x for x in excludes])
- def __del__(self):
- os.remove(self.tmpfile)
+ def __del__(self):
+ os.remove(self.tmpfile)
- def sync(self, src, dest):
- os.chdir(self.target)
- if not os.path.isdir(dest):
- os.makedirs(dest)
- src = os.path.join(self.prefix, src)
- cmd = self.command + ' %s "%s" "%s"' % (self.exclude, src, dest)
- # print cmd + ' >> %s 2>&1' % self.tmpfile
- if os.system(cmd + ' >> %s 2>&1' % self.tmpfile):
- raise 'rsync command failed'
+ def sync(self, src, dest):
+ os.chdir(self.target)
+ if not os.path.isdir(dest):
+ os.makedirs(dest)
+ src = os.path.join(self.prefix, src)
+ cmd = self.command + ' %s "%s" "%s"' % (self.exclude, src, dest)
+ # print cmd + ' >> %s 2>&1' % self.tmpfile
+ if os.system(cmd + ' >> %s 2>&1' % self.tmpfile):
+ raise 'rsync command failed'
diff --git a/mirror/trigger.py b/mirror/trigger.py
index cccef7c7..562ec052 100755
--- a/mirror/trigger.py
+++ b/mirror/trigger.py
@@ -5,55 +5,55 @@ import os, re
# legitimate releases. Return the simplified triggers for each of those files.
matches = (
- # The major tarballs
- r'linux-(2\.6\.\d+)\.tar\.bz2',
- # Stable releases
- r'patch-(2\.6\.\d+\.\d+)\.bz2',
- # -rc releases
- r'patch-(2\.6\.\d+-rc\d+)\.bz2',
- # -git releases
- r'patch-(2\.6\.\d+(-rc\d+)?-git\d+).bz2',
- # -mm tree
- r'(2\.6\.\d+(-rc\d+)?-mm\d+)\.bz2',
- )
+ # The major tarballs
+ r'linux-(2\.6\.\d+)\.tar\.bz2',
+ # Stable releases
+ r'patch-(2\.6\.\d+\.\d+)\.bz2',
+ # -rc releases
+ r'patch-(2\.6\.\d+-rc\d+)\.bz2',
+ # -git releases
+ r'patch-(2\.6\.\d+(-rc\d+)?-git\d+).bz2',
+ # -mm tree
+ r'(2\.6\.\d+(-rc\d+)?-mm\d+)\.bz2',
+ )
compiled_matches = [re.compile(r) for r in matches]
class Trigger(object):
- def __init__(self):
- self.__actions = []
-
- def __re_scan(self, pattern, line):
- """
- First check to see whether the pattern matches.
- (eg. Does it match "linux-2.6.\d.tar.bz2" ?)
- Then we strip out the actual trigger itself from that,
- and return it.
- (eg. return "2.6.\d")
- Note that the pattern uses match,
- so you need the whole filename
- """
- match = pattern.match(line)
- if match:
- return match.group(1)
- else:
- return None
-
-
- def scan(self, input_file):
- triggers = []
- for line in open(input_file, 'r').readlines():
- for pattern in compiled_matches:
- filename = os.path.basename(line)
- t = self.__re_scan(pattern, filename)
- if t:
- triggers.append(t)
-
- # Call each of the actions and pass in the kernel list
- for action in self.__actions:
- action(triggers)
-
-
- def add_action(self, func):
- self.__actions.append(func)
+ def __init__(self):
+ self.__actions = []
+
+ def __re_scan(self, pattern, line):
+ """
+ First check to see whether the pattern matches.
+ (eg. Does it match "linux-2.6.\d.tar.bz2" ?)
+ Then we strip out the actual trigger itself from that,
+ and return it.
+ (eg. return "2.6.\d")
+ Note that the pattern uses match,
+ so you need the whole filename
+ """
+ match = pattern.match(line)
+ if match:
+ return match.group(1)
+ else:
+ return None
+
+
+ def scan(self, input_file):
+ triggers = []
+ for line in open(input_file, 'r').readlines():
+ for pattern in compiled_matches:
+ filename = os.path.basename(line)
+ t = self.__re_scan(pattern, filename)
+ if t:
+ triggers.append(t)
+
+ # Call each of the actions and pass in the kernel list
+ for action in self.__actions:
+ action(triggers)
+
+
+ def add_action(self, func):
+ self.__actions.append(func)
diff --git a/scheduler/monitor_db.py b/scheduler/monitor_db.py
index 31dba1f5..fd3e3e9c 100644
--- a/scheduler/monitor_db.py
+++ b/scheduler/monitor_db.py
@@ -15,12 +15,12 @@ AUTOSERV_NICE_LEVEL = 10
AUTOTEST_PATH = os.path.join(os.path.dirname(__file__), '..')
if os.environ.has_key('AUTOTEST_DIR'):
- AUTOTEST_PATH = os.environ['AUTOTEST_DIR']
+ AUTOTEST_PATH = os.environ['AUTOTEST_DIR']
AUTOTEST_SERVER_DIR = os.path.join(AUTOTEST_PATH, 'server')
AUTOTEST_TKO_DIR = os.path.join(AUTOTEST_PATH, 'tko')
if AUTOTEST_SERVER_DIR not in sys.path:
- sys.path.insert(0, AUTOTEST_SERVER_DIR)
+ sys.path.insert(0, AUTOTEST_SERVER_DIR)
AUTOSERV_PID_FILE = '.autoserv_execute'
# how long to wait for autoserv to write a pidfile
@@ -35,1744 +35,1744 @@ _global_config_section = 'SCHEDULER'
def main():
- usage = 'usage: %prog [options] results_dir'
-
- parser = optparse.OptionParser(usage)
- parser.add_option('--recover-hosts', help='Try to recover dead hosts',
- action='store_true')
- parser.add_option('--logfile', help='Set a log file that all stdout ' +
- 'should be redirected to. Stderr will go to this ' +
- 'file + ".err"')
- parser.add_option('--test', help='Indicate that scheduler is under ' +
- 'test and should use dummy autoserv and no parsing',
- action='store_true')
- (options, args) = parser.parse_args()
- if len(args) != 1:
- parser.print_usage()
- return
-
- global RESULTS_DIR
- RESULTS_DIR = args[0]
-
- # read in notify_email from global_config
- c = global_config.global_config
- global _notify_email
- val = c.get_config_value(_global_config_section, "notify_email")
- if val != "":
- _notify_email = val
-
- if options.test:
- global _autoserv_path
- _autoserv_path = 'autoserv_dummy'
- global _testing_mode
- _testing_mode = True
-
- init(options.logfile)
- dispatcher = Dispatcher()
- dispatcher.do_initial_recovery(recover_hosts=options.recover_hosts)
-
- try:
- while not _shutdown:
- dispatcher.tick()
- time.sleep(20)
- except:
- log_stacktrace("Uncaught exception; terminating monitor_db")
-
- email_manager.send_queued_emails()
- _db.disconnect()
+ usage = 'usage: %prog [options] results_dir'
+
+ parser = optparse.OptionParser(usage)
+ parser.add_option('--recover-hosts', help='Try to recover dead hosts',
+ action='store_true')
+ parser.add_option('--logfile', help='Set a log file that all stdout ' +
+ 'should be redirected to. Stderr will go to this ' +
+ 'file + ".err"')
+ parser.add_option('--test', help='Indicate that scheduler is under ' +
+ 'test and should use dummy autoserv and no parsing',
+ action='store_true')
+ (options, args) = parser.parse_args()
+ if len(args) != 1:
+ parser.print_usage()
+ return
+
+ global RESULTS_DIR
+ RESULTS_DIR = args[0]
+
+ # read in notify_email from global_config
+ c = global_config.global_config
+ global _notify_email
+ val = c.get_config_value(_global_config_section, "notify_email")
+ if val != "":
+ _notify_email = val
+
+ if options.test:
+ global _autoserv_path
+ _autoserv_path = 'autoserv_dummy'
+ global _testing_mode
+ _testing_mode = True
+
+ init(options.logfile)
+ dispatcher = Dispatcher()
+ dispatcher.do_initial_recovery(recover_hosts=options.recover_hosts)
+
+ try:
+ while not _shutdown:
+ dispatcher.tick()
+ time.sleep(20)
+ except:
+ log_stacktrace("Uncaught exception; terminating monitor_db")
+
+ email_manager.send_queued_emails()
+ _db.disconnect()
def handle_sigint(signum, frame):
- global _shutdown
- _shutdown = True
- print "Shutdown request received."
+ global _shutdown
+ _shutdown = True
+ print "Shutdown request received."
def init(logfile):
- if logfile:
- enable_logging(logfile)
- print "%s> dispatcher starting" % time.strftime("%X %x")
- print "My PID is %d" % os.getpid()
+ if logfile:
+ enable_logging(logfile)
+ print "%s> dispatcher starting" % time.strftime("%X %x")
+ print "My PID is %d" % os.getpid()
- os.environ['PATH'] = AUTOTEST_SERVER_DIR + ':' + os.environ['PATH']
- global _db
- _db = DatabaseConn()
- _db.connect()
+ os.environ['PATH'] = AUTOTEST_SERVER_DIR + ':' + os.environ['PATH']
+ global _db
+ _db = DatabaseConn()
+ _db.connect()
- print "Setting signal handler"
- signal.signal(signal.SIGINT, handle_sigint)
-
- print "Connected! Running..."
+ print "Setting signal handler"
+ signal.signal(signal.SIGINT, handle_sigint)
+
+ print "Connected! Running..."
def enable_logging(logfile):
- out_file = logfile
- err_file = "%s.err" % logfile
- print "Enabling logging to %s (%s)" % (out_file, err_file)
- out_fd = open(out_file, "a", buffering=0)
- err_fd = open(err_file, "a", buffering=0)
+ out_file = logfile
+ err_file = "%s.err" % logfile
+ print "Enabling logging to %s (%s)" % (out_file, err_file)
+ out_fd = open(out_file, "a", buffering=0)
+ err_fd = open(err_file, "a", buffering=0)
- os.dup2(out_fd.fileno(), sys.stdout.fileno())
- os.dup2(err_fd.fileno(), sys.stderr.fileno())
+ os.dup2(out_fd.fileno(), sys.stdout.fileno())
+ os.dup2(err_fd.fileno(), sys.stderr.fileno())
- sys.stdout = out_fd
- sys.stderr = err_fd
+ sys.stdout = out_fd
+ sys.stderr = err_fd
def queue_entries_to_abort():
- rows = _db.execute("""
- SELECT * FROM host_queue_entries WHERE status='Abort';
- """)
- qe = [HostQueueEntry(row=i) for i in rows]
- return qe
+ rows = _db.execute("""
+ SELECT * FROM host_queue_entries WHERE status='Abort';
+ """)
+ qe = [HostQueueEntry(row=i) for i in rows]
+ return qe
def remove_file_or_dir(path):
- if stat.S_ISDIR(os.stat(path).st_mode):
- # directory
- shutil.rmtree(path)
- else:
- # file
- os.remove(path)
+ if stat.S_ISDIR(os.stat(path).st_mode):
+ # directory
+ shutil.rmtree(path)
+ else:
+ # file
+ os.remove(path)
class DatabaseConn:
- def __init__(self):
- self.reconnect_wait = 20
- self.conn = None
- self.cur = None
+ def __init__(self):
+ self.reconnect_wait = 20
+ self.conn = None
+ self.cur = None
- import MySQLdb.converters
- self.convert_dict = MySQLdb.converters.conversions
- self.convert_dict.setdefault(bool, self.convert_boolean)
+ import MySQLdb.converters
+ self.convert_dict = MySQLdb.converters.conversions
+ self.convert_dict.setdefault(bool, self.convert_boolean)
- @staticmethod
- def convert_boolean(boolean, conversion_dict):
- 'Convert booleans to integer strings'
- return str(int(boolean))
+ @staticmethod
+ def convert_boolean(boolean, conversion_dict):
+ 'Convert booleans to integer strings'
+ return str(int(boolean))
- def connect(self, db_name=None):
- self.disconnect()
+ def connect(self, db_name=None):
+ self.disconnect()
- # get global config and parse for info
- c = global_config.global_config
- dbase = "AUTOTEST_WEB"
- db_host = c.get_config_value(dbase, "host")
- if db_name is None:
- db_name = c.get_config_value(dbase, "database")
+ # get global config and parse for info
+ c = global_config.global_config
+ dbase = "AUTOTEST_WEB"
+ db_host = c.get_config_value(dbase, "host")
+ if db_name is None:
+ db_name = c.get_config_value(dbase, "database")
- if _testing_mode:
- db_name = 'stresstest_autotest_web'
+ if _testing_mode:
+ db_name = 'stresstest_autotest_web'
- db_user = c.get_config_value(dbase, "user")
- db_pass = c.get_config_value(dbase, "password")
+ db_user = c.get_config_value(dbase, "user")
+ db_pass = c.get_config_value(dbase, "password")
- while not self.conn:
- try:
- self.conn = MySQLdb.connect(
- host=db_host, user=db_user, passwd=db_pass,
- db=db_name, conv=self.convert_dict)
+ while not self.conn:
+ try:
+ self.conn = MySQLdb.connect(
+ host=db_host, user=db_user, passwd=db_pass,
+ db=db_name, conv=self.convert_dict)
- self.conn.autocommit(True)
- self.cur = self.conn.cursor()
- except MySQLdb.OperationalError:
- traceback.print_exc()
- print "Can't connect to MYSQL; reconnecting"
- time.sleep(self.reconnect_wait)
- self.disconnect()
+ self.conn.autocommit(True)
+ self.cur = self.conn.cursor()
+ except MySQLdb.OperationalError:
+ traceback.print_exc()
+ print "Can't connect to MYSQL; reconnecting"
+ time.sleep(self.reconnect_wait)
+ self.disconnect()
- def disconnect(self):
- if self.conn:
- self.conn.close()
- self.conn = None
- self.cur = None
+ def disconnect(self):
+ if self.conn:
+ self.conn.close()
+ self.conn = None
+ self.cur = None
- def execute(self, *args, **dargs):
- while (True):
- try:
- self.cur.execute(*args, **dargs)
- return self.cur.fetchall()
- except MySQLdb.OperationalError:
- traceback.print_exc()
- print "MYSQL connection died; reconnecting"
- time.sleep(self.reconnect_wait)
- self.connect()
+ def execute(self, *args, **dargs):
+ while (True):
+ try:
+ self.cur.execute(*args, **dargs)
+ return self.cur.fetchall()
+ except MySQLdb.OperationalError:
+ traceback.print_exc()
+ print "MYSQL connection died; reconnecting"
+ time.sleep(self.reconnect_wait)
+ self.connect()
def generate_parse_command(results_dir, flags=""):
- parse = os.path.abspath(os.path.join(AUTOTEST_TKO_DIR, 'parse'))
- output = os.path.abspath(os.path.join(results_dir, '.parse.log'))
- cmd = "%s %s -r -o %s > %s 2>&1 &"
- return cmd % (parse, flags, results_dir, output)
+ parse = os.path.abspath(os.path.join(AUTOTEST_TKO_DIR, 'parse'))
+ output = os.path.abspath(os.path.join(results_dir, '.parse.log'))
+ cmd = "%s %s -r -o %s > %s 2>&1 &"
+ return cmd % (parse, flags, results_dir, output)
def parse_results(results_dir, flags=""):
- if _testing_mode:
- return
- os.system(generate_parse_command(results_dir, flags))
+ if _testing_mode:
+ return
+ os.system(generate_parse_command(results_dir, flags))
def log_stacktrace(reason):
- (type, value, tb) = sys.exc_info()
- str = "EXCEPTION: %s\n" % reason
- str += ''.join(traceback.format_exception(type, value, tb))
+ (type, value, tb) = sys.exc_info()
+ str = "EXCEPTION: %s\n" % reason
+ str += ''.join(traceback.format_exception(type, value, tb))
- sys.stderr.write("\n%s\n" % str)
- email_manager.enqueue_notify_email("monitor_db exception", str)
+ sys.stderr.write("\n%s\n" % str)
+ email_manager.enqueue_notify_email("monitor_db exception", str)
def get_proc_poll_fn(pid):
- proc_path = os.path.join('/proc', str(pid))
- def poll_fn():
- if os.path.exists(proc_path):
- return None
- return 0 # we can't get a real exit code
- return poll_fn
+ proc_path = os.path.join('/proc', str(pid))
+ def poll_fn():
+ if os.path.exists(proc_path):
+ return None
+ return 0 # we can't get a real exit code
+ return poll_fn
def kill_autoserv(pid, poll_fn=None):
- print 'killing', pid
- if poll_fn is None:
- poll_fn = get_proc_poll_fn(pid)
- if poll_fn() == None:
- os.kill(pid, signal.SIGCONT)
- os.kill(pid, signal.SIGTERM)
+ print 'killing', pid
+ if poll_fn is None:
+ poll_fn = get_proc_poll_fn(pid)
+ if poll_fn() == None:
+ os.kill(pid, signal.SIGCONT)
+ os.kill(pid, signal.SIGTERM)
class EmailNotificationManager(object):
- def __init__(self):
- self._emails = []
- # see os.getlogin() online docs
- self._sender = pwd.getpwuid(os.getuid())[0]
+ def __init__(self):
+ self._emails = []
+ # see os.getlogin() online docs
+ self._sender = pwd.getpwuid(os.getuid())[0]
- def enqueue_notify_email(self, subject, message):
- if not _notify_email:
- return
+ def enqueue_notify_email(self, subject, message):
+ if not _notify_email:
+ return
- body = 'Subject: ' + subject + '\n'
- body += "%s / %s / %s\n%s" % (socket.gethostname(),
- os.getpid(),
- time.strftime("%X %x"), message)
- self._emails.append(body)
+ body = 'Subject: ' + subject + '\n'
+ body += "%s / %s / %s\n%s" % (socket.gethostname(),
+ os.getpid(),
+ time.strftime("%X %x"), message)
+ self._emails.append(body)
- def send_queued_emails(self):
- if not self._emails:
- return
- subject = 'Scheduler notifications from ' + socket.gethostname()
- separator = '\n' + '-' * 40 + '\n'
- body = separator.join(self._emails)
- msg = "From: %s\nTo: %s\nSubject: %s\n\n%s" % (
- self._sender, _notify_email, subject, body)
+ def send_queued_emails(self):
+ if not self._emails:
+ return
+ subject = 'Scheduler notifications from ' + socket.gethostname()
+ separator = '\n' + '-' * 40 + '\n'
+ body = separator.join(self._emails)
+ msg = "From: %s\nTo: %s\nSubject: %s\n\n%s" % (
+ self._sender, _notify_email, subject, body)
- mailer = smtplib.SMTP('localhost')
- mailer.sendmail(self._sender, _notify_email, msg)
- mailer.quit()
- self._emails = []
+ mailer = smtplib.SMTP('localhost')
+ mailer.sendmail(self._sender, _notify_email, msg)
+ mailer.quit()
+ self._emails = []
email_manager = EmailNotificationManager()
class Dispatcher:
- autoserv_procs_cache = None
- max_running_agents = global_config.global_config.get_config_value(
- _global_config_section, 'max_running_jobs', type=int)
- max_jobs_started_per_cycle = (
- global_config.global_config.get_config_value(
- _global_config_section, 'max_jobs_started_per_cycle', type=int))
-
- def __init__(self):
- self._agents = []
-
-
- def do_initial_recovery(self, recover_hosts=True):
- # always recover processes
- self._recover_processes()
-
- if recover_hosts:
- self._recover_hosts()
-
-
- def tick(self):
- Dispatcher.autoserv_procs_cache = None
- self._find_aborting()
- self._schedule_new_jobs()
- self._handle_agents()
- self._clear_inactive_blocks()
- email_manager.send_queued_emails()
-
-
- def add_agent(self, agent):
- self._agents.append(agent)
- agent.dispatcher = self
-
- # Find agent corresponding to the specified queue_entry
- def get_agents(self, queue_entry):
- res_agents = []
- for agent in self._agents:
- if queue_entry.id in agent.queue_entry_ids:
- res_agents.append(agent)
- return res_agents
-
-
- def remove_agent(self, agent):
- self._agents.remove(agent)
-
-
- def num_started_agents(self):
- return len([agent for agent in self._agents
- if agent.is_started()])
-
-
- @classmethod
- def find_autoservs(cls, orphans_only=False):
- """\
- Returns a dict mapping pids to command lines for root autoserv
- processes. If orphans_only=True, return only processes that
- have been orphaned (i.e. parent pid = 1).
- """
- if cls.autoserv_procs_cache is not None:
- return cls.autoserv_procs_cache
-
- proc = subprocess.Popen(
- ['/bin/ps', 'x', '-o', 'pid,pgid,ppid,comm,args'],
- stdout=subprocess.PIPE)
- # split each line into the four columns output by ps
- procs = [line.split(None, 4) for line in
- proc.communicate()[0].splitlines()]
- autoserv_procs = {}
- for proc in procs:
- # check ppid == 1 for orphans
- if orphans_only and proc[2] != 1:
- continue
- # only root autoserv processes have pgid == pid
- if (proc[3] == 'autoserv' and # comm
- proc[1] == proc[0]): # pgid == pid
- # map pid to args
- autoserv_procs[int(proc[0])] = proc[4]
- cls.autoserv_procs_cache = autoserv_procs
- return autoserv_procs
-
-
- def recover_queue_entry(self, queue_entry, run_monitor):
- job = queue_entry.job
- if job.is_synchronous():
- all_queue_entries = job.get_host_queue_entries()
- else:
- all_queue_entries = [queue_entry]
- all_queue_entry_ids = [queue_entry.id for queue_entry
- in all_queue_entries]
- queue_task = RecoveryQueueTask(
- job=queue_entry.job,
- queue_entries=all_queue_entries,
- run_monitor=run_monitor)
- self.add_agent(Agent(tasks=[queue_task],
- queue_entry_ids=all_queue_entry_ids))
-
-
- def _recover_processes(self):
- orphans = self.find_autoservs(orphans_only=True)
-
- # first, recover running queue entries
- rows = _db.execute("""SELECT * FROM host_queue_entries
- WHERE status = 'Running'""")
- queue_entries = [HostQueueEntry(row=i) for i in rows]
- requeue_entries = []
- recovered_entry_ids = set()
- for queue_entry in queue_entries:
- run_monitor = PidfileRunMonitor(
- queue_entry.results_dir())
- run_monitor.run()
- pid, exit_code = run_monitor.get_pidfile_info()
- if pid is None:
- # autoserv apparently never got run, so requeue
- requeue_entries.append(queue_entry)
- continue
- if queue_entry.id in recovered_entry_ids:
- # synchronous job we've already recovered
- continue
- print 'Recovering queue entry %d (pid %d)' % (
- queue_entry.id, pid)
- job = queue_entry.job
- if job.is_synchronous():
- for entry in job.get_host_queue_entries():
- assert entry.active
- recovered_entry_ids.add(entry.id)
- self.recover_queue_entry(queue_entry,
- run_monitor)
- orphans.pop(pid, None)
-
- # and requeue other active queue entries
- rows = _db.execute("""SELECT * FROM host_queue_entries
- WHERE active AND NOT complete
- AND status != 'Running'
- AND status != 'Pending'
- AND status != 'Abort'
- AND status != 'Aborting'""")
- queue_entries = [HostQueueEntry(row=i) for i in rows]
- for queue_entry in queue_entries + requeue_entries:
- print 'Requeuing running QE %d' % queue_entry.id
- queue_entry.clear_results_dir(dont_delete_files=True)
- queue_entry.requeue()
-
-
- # now kill any remaining autoserv processes
- for pid in orphans.keys():
- print 'Killing orphan %d (%s)' % (pid, orphans[pid])
- kill_autoserv(pid)
-
- # recover aborting tasks
- rebooting_host_ids = set()
- rows = _db.execute("""SELECT * FROM host_queue_entries
- WHERE status='Abort' or status='Aborting'""")
- queue_entries = [HostQueueEntry(row=i) for i in rows]
- for queue_entry in queue_entries:
- print 'Recovering aborting QE %d' % queue_entry.id
- queue_host = queue_entry.get_host()
- reboot_task = RebootTask(queue_host)
- verify_task = VerifyTask(host = queue_host)
- self.add_agent(Agent(tasks=[reboot_task,
- verify_task],
- queue_entry_ids=[queue_entry.id]))
- queue_entry.set_status('Aborted')
- # Secure the host from being picked up
- queue_host.set_status('Rebooting')
- rebooting_host_ids.add(queue_host.id)
-
- # reverify hosts that were in the middle of verify, repair or
- # reboot
- self._reverify_hosts_where("""(status = 'Repairing' OR
- status = 'Verifying' OR
- status = 'Rebooting')""",
- exclude_ids=rebooting_host_ids)
-
- # finally, recover "Running" hosts with no active queue entries,
- # although this should never happen
- message = ('Recovering running host %s - this probably '
- 'indicates a scheduler bug')
- self._reverify_hosts_where("""status = 'Running' AND
- id NOT IN (SELECT host_id
- FROM host_queue_entries
- WHERE active)""",
- print_message=message)
-
-
- def _reverify_hosts_where(self, where,
- print_message='Reverifying host %s',
- exclude_ids=set()):
- rows = _db.execute('SELECT * FROM hosts WHERE locked = 0 AND '
- 'invalid = 0 AND ' + where)
- hosts = [Host(row=i) for i in rows]
- for host in hosts:
- if host.id in exclude_ids:
- continue
- if print_message is not None:
- print print_message % host.hostname
- verify_task = VerifyTask(host = host)
- self.add_agent(Agent(tasks = [verify_task]))
-
-
- def _recover_hosts(self):
- # recover "Repair Failed" hosts
- message = 'Reverifying dead host %s'
- self._reverify_hosts_where("status = 'Repair Failed'",
- print_message=message)
-
-
- def _clear_inactive_blocks(self):
- """
- Clear out blocks for all completed jobs.
- """
- # this would be simpler using NOT IN (subquery), but MySQL
- # treats all IN subqueries as dependent, so this optimizes much
- # better
- _db.execute("""
- DELETE ihq FROM ineligible_host_queues ihq
- LEFT JOIN (SELECT job_id FROM host_queue_entries
- WHERE NOT complete) hqe
- USING (job_id) WHERE hqe.job_id IS NULL""")
-
-
- def _extract_host_and_queue_entry(self, row):
- # each row contains host columns followed by host queue entry
- # columns
- num_host_cols = Host.num_cols()
- assert len(row) == num_host_cols + HostQueueEntry.num_cols()
- host = Host(row=row[:num_host_cols])
- queue_entry = HostQueueEntry(row=row[num_host_cols:])
- return host, queue_entry
-
-
- def _get_runnable_entries(self, extra_join='', extra_where=''):
- query = (
- 'SELECT DISTINCT h.*, queued_hqe.* FROM hosts h '
- # join with running entries
- """
- LEFT JOIN host_queue_entries AS active_hqe
- ON (h.id = active_hqe.host_id AND active_hqe.active)
- """ +
- extra_join +
- # exclude hosts with a running entry
- 'WHERE active_hqe.host_id IS NULL '
- # exclude locked, invalid, and non-Ready hosts
- """
- AND h.locked=false AND h.invalid=false
- AND (h.status IS null OR h.status='Ready')
- """)
- if extra_where:
- query += 'AND ' + extra_where + '\n'
- # respect priority, then sort by ID (most recent first)
- query += 'ORDER BY queued_hqe.priority DESC, queued_hqe.id'
-
- rows = _db.execute(query)
- return [self._extract_host_and_queue_entry(row) for row in rows]
-
-
- def _get_runnable_nonmetahosts(self):
- # find queued HQEs scheduled directly against hosts
- queued_hqe_join = """
- INNER JOIN host_queue_entries AS queued_hqe
- ON (h.id = queued_hqe.host_id
- AND NOT queued_hqe.active AND NOT queued_hqe.complete)
- """
- return self._get_runnable_entries(queued_hqe_join)
-
-
- def _get_runnable_metahosts(self):
- # join with labels for metahost matching
- labels_join = 'INNER JOIN hosts_labels hl ON (hl.host_id=h.id)'
- # find queued HQEs scheduled for metahosts that match idle hosts
- queued_hqe_join = """
- INNER JOIN host_queue_entries AS queued_hqe
- ON (queued_hqe.meta_host = hl.label_id
- AND queued_hqe.host_id IS NULL
- AND NOT queued_hqe.active AND NOT queued_hqe.complete)
- """
- # need to exclude acl-inaccessible hosts
- acl_join = """
- INNER JOIN acl_groups_hosts ON h.id=acl_groups_hosts.host_id
- INNER JOIN acl_groups_users
- ON acl_groups_users.acl_group_id=acl_groups_hosts.acl_group_id
- INNER JOIN users ON acl_groups_users.user_id=users.id
- INNER JOIN jobs
- ON users.login=jobs.owner AND jobs.id=queued_hqe.job_id
- """
- # need to exclude blocked hosts
- block_join = """
- LEFT JOIN ineligible_host_queues AS ihq
- ON (ihq.job_id=queued_hqe.job_id AND ihq.host_id=h.id)
- """
- block_where = 'ihq.id IS NULL'
- extra_join = '\n'.join([labels_join, queued_hqe_join,
- acl_join, block_join])
- return self._get_runnable_entries(extra_join,
- extra_where=block_where)
-
-
- def _schedule_new_jobs(self):
- print "finding work"
-
- scheduled_hosts, scheduled_queue_entries = set(), set()
- runnable = (self._get_runnable_nonmetahosts() +
- self._get_runnable_metahosts())
- for host, queue_entry in runnable:
- # we may get duplicate entries for a host or a queue
- # entry. we need to schedule each host and each queue
- # entry only once.
- if (host.id in scheduled_hosts or
- queue_entry.id in scheduled_queue_entries):
- continue
- agent = queue_entry.run(assigned_host=host)
- self.add_agent(agent)
- scheduled_hosts.add(host.id)
- scheduled_queue_entries.add(queue_entry.id)
-
-
- def _find_aborting(self):
- num_aborted = 0
- # Find jobs that are aborting
- for entry in queue_entries_to_abort():
- agents_to_abort = self.get_agents(entry)
- entry_host = entry.get_host()
- reboot_task = RebootTask(entry_host)
- verify_task = VerifyTask(host = entry_host)
- tasks = [reboot_task, verify_task]
- if agents_to_abort:
- abort_task = AbortTask(entry, agents_to_abort)
- tasks.insert(0, abort_task)
- else:
- entry.set_status('Aborted')
- # just to make sure this host does not get
- # taken away
- entry_host.set_status('Rebooting')
- self.add_agent(Agent(tasks=tasks,
- queue_entry_ids = [entry.id]))
- num_aborted += 1
- if num_aborted >= 50:
- break
-
-
- def _handle_agents(self):
- still_running = []
- num_started = self.num_started_agents()
- start_new = (num_started < self.max_running_agents)
- num_started_this_cycle = 0
- for agent in self._agents:
- if not agent.is_started():
- if not start_new:
- still_running.append(agent)
- continue
- num_started += 1
- num_started_this_cycle += 1
- if (num_started >= self.max_running_agents or
- num_started_this_cycle >=
- self.max_jobs_started_per_cycle):
- start_new = False
- agent.tick()
- if not agent.is_done():
- still_running.append(agent)
- else:
- print "agent finished"
- self._agents = still_running
- print num_started, 'running agents'
+ autoserv_procs_cache = None
+ max_running_agents = global_config.global_config.get_config_value(
+ _global_config_section, 'max_running_jobs', type=int)
+ max_jobs_started_per_cycle = (
+ global_config.global_config.get_config_value(
+ _global_config_section, 'max_jobs_started_per_cycle', type=int))
+
+ def __init__(self):
+ self._agents = []
+
+
+ def do_initial_recovery(self, recover_hosts=True):
+ # always recover processes
+ self._recover_processes()
+
+ if recover_hosts:
+ self._recover_hosts()
+
+
+ def tick(self):
+ Dispatcher.autoserv_procs_cache = None
+ self._find_aborting()
+ self._schedule_new_jobs()
+ self._handle_agents()
+ self._clear_inactive_blocks()
+ email_manager.send_queued_emails()
+
+
+ def add_agent(self, agent):
+ self._agents.append(agent)
+ agent.dispatcher = self
+
+ # Find agent corresponding to the specified queue_entry
+ def get_agents(self, queue_entry):
+ res_agents = []
+ for agent in self._agents:
+ if queue_entry.id in agent.queue_entry_ids:
+ res_agents.append(agent)
+ return res_agents
+
+
+ def remove_agent(self, agent):
+ self._agents.remove(agent)
+
+
+ def num_started_agents(self):
+ return len([agent for agent in self._agents
+ if agent.is_started()])
+
+
+ @classmethod
+ def find_autoservs(cls, orphans_only=False):
+ """\
+ Returns a dict mapping pids to command lines for root autoserv
+ processes. If orphans_only=True, return only processes that
+ have been orphaned (i.e. parent pid = 1).
+ """
+ if cls.autoserv_procs_cache is not None:
+ return cls.autoserv_procs_cache
+
+ proc = subprocess.Popen(
+ ['/bin/ps', 'x', '-o', 'pid,pgid,ppid,comm,args'],
+ stdout=subprocess.PIPE)
+ # split each line into the four columns output by ps
+ procs = [line.split(None, 4) for line in
+ proc.communicate()[0].splitlines()]
+ autoserv_procs = {}
+ for proc in procs:
+ # check ppid == 1 for orphans
+ if orphans_only and proc[2] != 1:
+ continue
+ # only root autoserv processes have pgid == pid
+ if (proc[3] == 'autoserv' and # comm
+ proc[1] == proc[0]): # pgid == pid
+ # map pid to args
+ autoserv_procs[int(proc[0])] = proc[4]
+ cls.autoserv_procs_cache = autoserv_procs
+ return autoserv_procs
+
+
+ def recover_queue_entry(self, queue_entry, run_monitor):
+ job = queue_entry.job
+ if job.is_synchronous():
+ all_queue_entries = job.get_host_queue_entries()
+ else:
+ all_queue_entries = [queue_entry]
+ all_queue_entry_ids = [queue_entry.id for queue_entry
+ in all_queue_entries]
+ queue_task = RecoveryQueueTask(
+ job=queue_entry.job,
+ queue_entries=all_queue_entries,
+ run_monitor=run_monitor)
+ self.add_agent(Agent(tasks=[queue_task],
+ queue_entry_ids=all_queue_entry_ids))
+
+
+ def _recover_processes(self):
+ orphans = self.find_autoservs(orphans_only=True)
+
+ # first, recover running queue entries
+ rows = _db.execute("""SELECT * FROM host_queue_entries
+ WHERE status = 'Running'""")
+ queue_entries = [HostQueueEntry(row=i) for i in rows]
+ requeue_entries = []
+ recovered_entry_ids = set()
+ for queue_entry in queue_entries:
+ run_monitor = PidfileRunMonitor(
+ queue_entry.results_dir())
+ run_monitor.run()
+ pid, exit_code = run_monitor.get_pidfile_info()
+ if pid is None:
+ # autoserv apparently never got run, so requeue
+ requeue_entries.append(queue_entry)
+ continue
+ if queue_entry.id in recovered_entry_ids:
+ # synchronous job we've already recovered
+ continue
+ print 'Recovering queue entry %d (pid %d)' % (
+ queue_entry.id, pid)
+ job = queue_entry.job
+ if job.is_synchronous():
+ for entry in job.get_host_queue_entries():
+ assert entry.active
+ recovered_entry_ids.add(entry.id)
+ self.recover_queue_entry(queue_entry,
+ run_monitor)
+ orphans.pop(pid, None)
+
+ # and requeue other active queue entries
+ rows = _db.execute("""SELECT * FROM host_queue_entries
+ WHERE active AND NOT complete
+ AND status != 'Running'
+ AND status != 'Pending'
+ AND status != 'Abort'
+ AND status != 'Aborting'""")
+ queue_entries = [HostQueueEntry(row=i) for i in rows]
+ for queue_entry in queue_entries + requeue_entries:
+ print 'Requeuing running QE %d' % queue_entry.id
+ queue_entry.clear_results_dir(dont_delete_files=True)
+ queue_entry.requeue()
+
+
+ # now kill any remaining autoserv processes
+ for pid in orphans.keys():
+ print 'Killing orphan %d (%s)' % (pid, orphans[pid])
+ kill_autoserv(pid)
+
+ # recover aborting tasks
+ rebooting_host_ids = set()
+ rows = _db.execute("""SELECT * FROM host_queue_entries
+ WHERE status='Abort' or status='Aborting'""")
+ queue_entries = [HostQueueEntry(row=i) for i in rows]
+ for queue_entry in queue_entries:
+ print 'Recovering aborting QE %d' % queue_entry.id
+ queue_host = queue_entry.get_host()
+ reboot_task = RebootTask(queue_host)
+ verify_task = VerifyTask(host = queue_host)
+ self.add_agent(Agent(tasks=[reboot_task,
+ verify_task],
+ queue_entry_ids=[queue_entry.id]))
+ queue_entry.set_status('Aborted')
+ # Secure the host from being picked up
+ queue_host.set_status('Rebooting')
+ rebooting_host_ids.add(queue_host.id)
+
+ # reverify hosts that were in the middle of verify, repair or
+ # reboot
+ self._reverify_hosts_where("""(status = 'Repairing' OR
+ status = 'Verifying' OR
+ status = 'Rebooting')""",
+ exclude_ids=rebooting_host_ids)
+
+ # finally, recover "Running" hosts with no active queue entries,
+ # although this should never happen
+ message = ('Recovering running host %s - this probably '
+ 'indicates a scheduler bug')
+ self._reverify_hosts_where("""status = 'Running' AND
+ id NOT IN (SELECT host_id
+ FROM host_queue_entries
+ WHERE active)""",
+ print_message=message)
+
+
+ def _reverify_hosts_where(self, where,
+ print_message='Reverifying host %s',
+ exclude_ids=set()):
+ rows = _db.execute('SELECT * FROM hosts WHERE locked = 0 AND '
+ 'invalid = 0 AND ' + where)
+ hosts = [Host(row=i) for i in rows]
+ for host in hosts:
+ if host.id in exclude_ids:
+ continue
+ if print_message is not None:
+ print print_message % host.hostname
+ verify_task = VerifyTask(host = host)
+ self.add_agent(Agent(tasks = [verify_task]))
+
+
+ def _recover_hosts(self):
+ # recover "Repair Failed" hosts
+ message = 'Reverifying dead host %s'
+ self._reverify_hosts_where("status = 'Repair Failed'",
+ print_message=message)
+
+
+ def _clear_inactive_blocks(self):
+ """
+ Clear out blocks for all completed jobs.
+ """
+ # this would be simpler using NOT IN (subquery), but MySQL
+ # treats all IN subqueries as dependent, so this optimizes much
+ # better
+ _db.execute("""
+ DELETE ihq FROM ineligible_host_queues ihq
+ LEFT JOIN (SELECT job_id FROM host_queue_entries
+ WHERE NOT complete) hqe
+ USING (job_id) WHERE hqe.job_id IS NULL""")
+
+
+ def _extract_host_and_queue_entry(self, row):
+ # each row contains host columns followed by host queue entry
+ # columns
+ num_host_cols = Host.num_cols()
+ assert len(row) == num_host_cols + HostQueueEntry.num_cols()
+ host = Host(row=row[:num_host_cols])
+ queue_entry = HostQueueEntry(row=row[num_host_cols:])
+ return host, queue_entry
+
+
+ def _get_runnable_entries(self, extra_join='', extra_where=''):
+ query = (
+ 'SELECT DISTINCT h.*, queued_hqe.* FROM hosts h '
+ # join with running entries
+ """
+ LEFT JOIN host_queue_entries AS active_hqe
+ ON (h.id = active_hqe.host_id AND active_hqe.active)
+ """ +
+ extra_join +
+ # exclude hosts with a running entry
+ 'WHERE active_hqe.host_id IS NULL '
+ # exclude locked, invalid, and non-Ready hosts
+ """
+ AND h.locked=false AND h.invalid=false
+ AND (h.status IS null OR h.status='Ready')
+ """)
+ if extra_where:
+ query += 'AND ' + extra_where + '\n'
+ # respect priority, then sort by ID (most recent first)
+ query += 'ORDER BY queued_hqe.priority DESC, queued_hqe.id'
+
+ rows = _db.execute(query)
+ return [self._extract_host_and_queue_entry(row) for row in rows]
+
+
+ def _get_runnable_nonmetahosts(self):
+ # find queued HQEs scheduled directly against hosts
+ queued_hqe_join = """
+ INNER JOIN host_queue_entries AS queued_hqe
+ ON (h.id = queued_hqe.host_id
+ AND NOT queued_hqe.active AND NOT queued_hqe.complete)
+ """
+ return self._get_runnable_entries(queued_hqe_join)
+
+
+ def _get_runnable_metahosts(self):
+ # join with labels for metahost matching
+ labels_join = 'INNER JOIN hosts_labels hl ON (hl.host_id=h.id)'
+ # find queued HQEs scheduled for metahosts that match idle hosts
+ queued_hqe_join = """
+ INNER JOIN host_queue_entries AS queued_hqe
+ ON (queued_hqe.meta_host = hl.label_id
+ AND queued_hqe.host_id IS NULL
+ AND NOT queued_hqe.active AND NOT queued_hqe.complete)
+ """
+ # need to exclude acl-inaccessible hosts
+ acl_join = """
+ INNER JOIN acl_groups_hosts ON h.id=acl_groups_hosts.host_id
+ INNER JOIN acl_groups_users
+ ON acl_groups_users.acl_group_id=acl_groups_hosts.acl_group_id
+ INNER JOIN users ON acl_groups_users.user_id=users.id
+ INNER JOIN jobs
+ ON users.login=jobs.owner AND jobs.id=queued_hqe.job_id
+ """
+ # need to exclude blocked hosts
+ block_join = """
+ LEFT JOIN ineligible_host_queues AS ihq
+ ON (ihq.job_id=queued_hqe.job_id AND ihq.host_id=h.id)
+ """
+ block_where = 'ihq.id IS NULL'
+ extra_join = '\n'.join([labels_join, queued_hqe_join,
+ acl_join, block_join])
+ return self._get_runnable_entries(extra_join,
+ extra_where=block_where)
+
+
+ def _schedule_new_jobs(self):
+ print "finding work"
+
+ scheduled_hosts, scheduled_queue_entries = set(), set()
+ runnable = (self._get_runnable_nonmetahosts() +
+ self._get_runnable_metahosts())
+ for host, queue_entry in runnable:
+ # we may get duplicate entries for a host or a queue
+ # entry. we need to schedule each host and each queue
+ # entry only once.
+ if (host.id in scheduled_hosts or
+ queue_entry.id in scheduled_queue_entries):
+ continue
+ agent = queue_entry.run(assigned_host=host)
+ self.add_agent(agent)
+ scheduled_hosts.add(host.id)
+ scheduled_queue_entries.add(queue_entry.id)
+
+
+ def _find_aborting(self):
+ num_aborted = 0
+ # Find jobs that are aborting
+ for entry in queue_entries_to_abort():
+ agents_to_abort = self.get_agents(entry)
+ entry_host = entry.get_host()
+ reboot_task = RebootTask(entry_host)
+ verify_task = VerifyTask(host = entry_host)
+ tasks = [reboot_task, verify_task]
+ if agents_to_abort:
+ abort_task = AbortTask(entry, agents_to_abort)
+ tasks.insert(0, abort_task)
+ else:
+ entry.set_status('Aborted')
+ # just to make sure this host does not get
+ # taken away
+ entry_host.set_status('Rebooting')
+ self.add_agent(Agent(tasks=tasks,
+ queue_entry_ids = [entry.id]))
+ num_aborted += 1
+ if num_aborted >= 50:
+ break
+
+
+ def _handle_agents(self):
+ still_running = []
+ num_started = self.num_started_agents()
+ start_new = (num_started < self.max_running_agents)
+ num_started_this_cycle = 0
+ for agent in self._agents:
+ if not agent.is_started():
+ if not start_new:
+ still_running.append(agent)
+ continue
+ num_started += 1
+ num_started_this_cycle += 1
+ if (num_started >= self.max_running_agents or
+ num_started_this_cycle >=
+ self.max_jobs_started_per_cycle):
+ start_new = False
+ agent.tick()
+ if not agent.is_done():
+ still_running.append(agent)
+ else:
+ print "agent finished"
+ self._agents = still_running
+ print num_started, 'running agents'
class RunMonitor(object):
- def __init__(self, cmd, nice_level = None, log_file = None):
- self.nice_level = nice_level
- self.log_file = log_file
- self.cmd = cmd
-
- def run(self):
- if self.nice_level:
- nice_cmd = ['nice','-n', str(self.nice_level)]
- nice_cmd.extend(self.cmd)
- self.cmd = nice_cmd
-
- out_file = None
- if self.log_file:
- try:
- os.makedirs(os.path.dirname(self.log_file))
- except OSError, exc:
- if exc.errno != errno.EEXIST:
- log_stacktrace(
- 'Unexpected error creating logfile '
- 'directory for %s' % self.log_file)
- try:
- out_file = open(self.log_file, 'a')
- out_file.write("\n%s\n" % ('*'*80))
- out_file.write("%s> %s\n" %
- (time.strftime("%X %x"),
- self.cmd))
- out_file.write("%s\n" % ('*'*80))
- except (OSError, IOError):
- log_stacktrace('Error opening log file %s' %
- self.log_file)
-
- if not out_file:
- out_file = open('/dev/null', 'w')
-
- in_devnull = open('/dev/null', 'r')
- print "cmd = %s" % self.cmd
- print "path = %s" % os.getcwd()
-
- self.proc = subprocess.Popen(self.cmd, stdout=out_file,
- stderr=subprocess.STDOUT,
- stdin=in_devnull)
- out_file.close()
- in_devnull.close()
-
-
- def get_pid(self):
- return self.proc.pid
-
-
- def kill(self):
- kill_autoserv(self.get_pid(), self.exit_code)
-
-
- def exit_code(self):
- return self.proc.poll()
+ def __init__(self, cmd, nice_level = None, log_file = None):
+ self.nice_level = nice_level
+ self.log_file = log_file
+ self.cmd = cmd
+
+ def run(self):
+ if self.nice_level:
+ nice_cmd = ['nice','-n', str(self.nice_level)]
+ nice_cmd.extend(self.cmd)
+ self.cmd = nice_cmd
+
+ out_file = None
+ if self.log_file:
+ try:
+ os.makedirs(os.path.dirname(self.log_file))
+ except OSError, exc:
+ if exc.errno != errno.EEXIST:
+ log_stacktrace(
+ 'Unexpected error creating logfile '
+ 'directory for %s' % self.log_file)
+ try:
+ out_file = open(self.log_file, 'a')
+ out_file.write("\n%s\n" % ('*'*80))
+ out_file.write("%s> %s\n" %
+ (time.strftime("%X %x"),
+ self.cmd))
+ out_file.write("%s\n" % ('*'*80))
+ except (OSError, IOError):
+ log_stacktrace('Error opening log file %s' %
+ self.log_file)
+
+ if not out_file:
+ out_file = open('/dev/null', 'w')
+
+ in_devnull = open('/dev/null', 'r')
+ print "cmd = %s" % self.cmd
+ print "path = %s" % os.getcwd()
+
+ self.proc = subprocess.Popen(self.cmd, stdout=out_file,
+ stderr=subprocess.STDOUT,
+ stdin=in_devnull)
+ out_file.close()
+ in_devnull.close()
+
+
+ def get_pid(self):
+ return self.proc.pid
+
+
+ def kill(self):
+ kill_autoserv(self.get_pid(), self.exit_code)
+
+
+ def exit_code(self):
+ return self.proc.poll()
class PidfileException(Exception):
- """\
- Raised when there's some unexpected behavior with the pid file.
- """
+ """\
+ Raised when there's some unexpected behavior with the pid file.
+ """
class PidfileRunMonitor(RunMonitor):
- def __init__(self, results_dir, cmd=None, nice_level=None,
- log_file=None):
- self.results_dir = os.path.abspath(results_dir)
- self.pid_file = os.path.join(results_dir, AUTOSERV_PID_FILE)
- self.lost_process = False
- self.start_time = time.time()
- if cmd is None:
- # we're reattaching to an existing pid, so don't call
- # the superconstructor (we don't want to kick off a new
- # process)
- pass
- else:
- super(PidfileRunMonitor, self).__init__(cmd,
- nice_level, log_file)
-
-
- def get_pid(self):
- pid, exit_status = self.get_pidfile_info()
- assert pid is not None
- return pid
-
-
- def _check_command_line(self, command_line, spacer=' ',
- print_error=False):
- results_dir_arg = spacer.join(('', '-r', self.results_dir, ''))
- match = results_dir_arg in command_line
- if print_error and not match:
- print '%s not found in %s' % (repr(results_dir_arg),
- repr(command_line))
- return match
-
-
- def _check_proc_fs(self, pid):
- cmdline_path = os.path.join('/proc', str(pid), 'cmdline')
- try:
- cmdline_file = open(cmdline_path, 'r')
- cmdline = cmdline_file.read().strip()
- cmdline_file.close()
- except IOError:
- return False
- # /proc/.../cmdline has \x00 separating args
- return self._check_command_line(cmdline, spacer='\x00',
- print_error=True)
-
-
- def read_pidfile(self):
- if not os.path.exists(self.pid_file):
- return None, None
- file_obj = open(self.pid_file, 'r')
- lines = file_obj.readlines()
- file_obj.close()
- assert 1 <= len(lines) <= 2
- try:
- pid = int(lines[0])
- exit_status = None
- if len(lines) == 2:
- exit_status = int(lines[1])
- except ValueError, exc:
- raise PidfileException('Corrupt pid file: ' +
- str(exc.args))
-
- return pid, exit_status
-
-
- def _find_autoserv_proc(self):
- autoserv_procs = Dispatcher.find_autoservs()
- for pid, args in autoserv_procs.iteritems():
- if self._check_command_line(args):
- return pid, args
- return None, None
-
-
- def get_pidfile_info(self):
- """\
- Returns:
- None, None if autoserv has not yet run
- pid, None if autoserv is running
- pid, exit_status if autoserv has completed
- """
- if self.lost_process:
- return self.pid, self.exit_status
-
- pid, exit_status = self.read_pidfile()
-
- if pid is None:
- return self._handle_no_pid()
-
- if exit_status is None:
- # double check whether or not autoserv is running
- proc_running = self._check_proc_fs(pid)
- if proc_running:
- return pid, exit_status
-
- # pid but no process - maybe process *just* exited
- pid, exit_status = self.read_pidfile()
- if exit_status is None:
- # autoserv exited without writing an exit code
- # to the pidfile
- error = ('autoserv died without writing exit '
- 'code')
- message = error + '\nPid: %s\nPidfile: %s' % (
- pid, self.pid_file)
- print message
- email_manager.enqueue_notify_email(error,
- message)
- self.on_lost_process(pid)
- return self.pid, self.exit_status
-
- return pid, exit_status
-
-
- def _handle_no_pid(self):
- """\
- Called when no pidfile is found or no pid is in the pidfile.
- """
- # is autoserv running?
- pid, args = self._find_autoserv_proc()
- if pid is None:
- # no autoserv process running
- message = 'No pid found at ' + self.pid_file
- else:
- message = ("Process %d (%s) hasn't written pidfile %s" %
- (pid, args, self.pid_file))
-
- print message
- if time.time() - self.start_time > PIDFILE_TIMEOUT:
- email_manager.enqueue_notify_email(
- 'Process has failed to write pidfile', message)
- if pid is not None:
- kill_autoserv(pid)
- else:
- pid = 0
- self.on_lost_process(pid)
- return self.pid, self.exit_status
-
- return None, None
-
-
- def on_lost_process(self, pid):
- """\
- Called when autoserv has exited without writing an exit status,
- or we've timed out waiting for autoserv to write a pid to the
- pidfile. In either case, we just return failure and the caller
- should signal some kind of warning.
-
- pid is unimportant here, as it shouldn't be used by anyone.
- """
- self.lost_process = True
- self.pid = pid
- self.exit_status = 1
-
-
- def exit_code(self):
- pid, exit_code = self.get_pidfile_info()
- return exit_code
+ def __init__(self, results_dir, cmd=None, nice_level=None,
+ log_file=None):
+ self.results_dir = os.path.abspath(results_dir)
+ self.pid_file = os.path.join(results_dir, AUTOSERV_PID_FILE)
+ self.lost_process = False
+ self.start_time = time.time()
+ if cmd is None:
+ # we're reattaching to an existing pid, so don't call
+ # the superconstructor (we don't want to kick off a new
+ # process)
+ pass
+ else:
+ super(PidfileRunMonitor, self).__init__(cmd,
+ nice_level, log_file)
+
+
+ def get_pid(self):
+ pid, exit_status = self.get_pidfile_info()
+ assert pid is not None
+ return pid
+
+
+ def _check_command_line(self, command_line, spacer=' ',
+ print_error=False):
+ results_dir_arg = spacer.join(('', '-r', self.results_dir, ''))
+ match = results_dir_arg in command_line
+ if print_error and not match:
+ print '%s not found in %s' % (repr(results_dir_arg),
+ repr(command_line))
+ return match
+
+
+ def _check_proc_fs(self, pid):
+ cmdline_path = os.path.join('/proc', str(pid), 'cmdline')
+ try:
+ cmdline_file = open(cmdline_path, 'r')
+ cmdline = cmdline_file.read().strip()
+ cmdline_file.close()
+ except IOError:
+ return False
+ # /proc/.../cmdline has \x00 separating args
+ return self._check_command_line(cmdline, spacer='\x00',
+ print_error=True)
+
+
+ def read_pidfile(self):
+ if not os.path.exists(self.pid_file):
+ return None, None
+ file_obj = open(self.pid_file, 'r')
+ lines = file_obj.readlines()
+ file_obj.close()
+ assert 1 <= len(lines) <= 2
+ try:
+ pid = int(lines[0])
+ exit_status = None
+ if len(lines) == 2:
+ exit_status = int(lines[1])
+ except ValueError, exc:
+ raise PidfileException('Corrupt pid file: ' +
+ str(exc.args))
+
+ return pid, exit_status
+
+
+ def _find_autoserv_proc(self):
+ autoserv_procs = Dispatcher.find_autoservs()
+ for pid, args in autoserv_procs.iteritems():
+ if self._check_command_line(args):
+ return pid, args
+ return None, None
+
+
+ def get_pidfile_info(self):
+ """\
+ Returns:
+ None, None if autoserv has not yet run
+ pid, None if autoserv is running
+ pid, exit_status if autoserv has completed
+ """
+ if self.lost_process:
+ return self.pid, self.exit_status
+
+ pid, exit_status = self.read_pidfile()
+
+ if pid is None:
+ return self._handle_no_pid()
+
+ if exit_status is None:
+ # double check whether or not autoserv is running
+ proc_running = self._check_proc_fs(pid)
+ if proc_running:
+ return pid, exit_status
+
+ # pid but no process - maybe process *just* exited
+ pid, exit_status = self.read_pidfile()
+ if exit_status is None:
+ # autoserv exited without writing an exit code
+ # to the pidfile
+ error = ('autoserv died without writing exit '
+ 'code')
+ message = error + '\nPid: %s\nPidfile: %s' % (
+ pid, self.pid_file)
+ print message
+ email_manager.enqueue_notify_email(error,
+ message)
+ self.on_lost_process(pid)
+ return self.pid, self.exit_status
+
+ return pid, exit_status
+
+
+ def _handle_no_pid(self):
+ """\
+ Called when no pidfile is found or no pid is in the pidfile.
+ """
+ # is autoserv running?
+ pid, args = self._find_autoserv_proc()
+ if pid is None:
+ # no autoserv process running
+ message = 'No pid found at ' + self.pid_file
+ else:
+ message = ("Process %d (%s) hasn't written pidfile %s" %
+ (pid, args, self.pid_file))
+
+ print message
+ if time.time() - self.start_time > PIDFILE_TIMEOUT:
+ email_manager.enqueue_notify_email(
+ 'Process has failed to write pidfile', message)
+ if pid is not None:
+ kill_autoserv(pid)
+ else:
+ pid = 0
+ self.on_lost_process(pid)
+ return self.pid, self.exit_status
+
+ return None, None
+
+
+ def on_lost_process(self, pid):
+ """\
+ Called when autoserv has exited without writing an exit status,
+ or we've timed out waiting for autoserv to write a pid to the
+ pidfile. In either case, we just return failure and the caller
+ should signal some kind of warning.
+
+ pid is unimportant here, as it shouldn't be used by anyone.
+ """
+ self.lost_process = True
+ self.pid = pid
+ self.exit_status = 1
+
+
+ def exit_code(self):
+ pid, exit_code = self.get_pidfile_info()
+ return exit_code
class Agent(object):
- def __init__(self, tasks, queue_entry_ids=[]):
- self.active_task = None
- self.queue = Queue.Queue(0)
- self.dispatcher = None
- self.queue_entry_ids = queue_entry_ids
-
- for task in tasks:
- self.add_task(task)
+ def __init__(self, tasks, queue_entry_ids=[]):
+ self.active_task = None
+ self.queue = Queue.Queue(0)
+ self.dispatcher = None
+ self.queue_entry_ids = queue_entry_ids
+
+ for task in tasks:
+ self.add_task(task)
- def add_task(self, task):
- self.queue.put_nowait(task)
- task.agent = self
+ def add_task(self, task):
+ self.queue.put_nowait(task)
+ task.agent = self
- def tick(self):
- print "agent tick"
- if self.active_task and not self.active_task.is_done():
- self.active_task.poll()
- else:
- self._next_task();
+ def tick(self):
+ print "agent tick"
+ if self.active_task and not self.active_task.is_done():
+ self.active_task.poll()
+ else:
+ self._next_task();
- def _next_task(self):
- print "agent picking task"
- if self.active_task:
- assert self.active_task.is_done()
+ def _next_task(self):
+ print "agent picking task"
+ if self.active_task:
+ assert self.active_task.is_done()
- if not self.active_task.success:
- self.on_task_failure()
+ if not self.active_task.success:
+ self.on_task_failure()
- self.active_task = None
- if not self.is_done():
- self.active_task = self.queue.get_nowait()
- if self.active_task:
- self.active_task.start()
+ self.active_task = None
+ if not self.is_done():
+ self.active_task = self.queue.get_nowait()
+ if self.active_task:
+ self.active_task.start()
- def on_task_failure(self):
- self.queue = Queue.Queue(0)
- for task in self.active_task.failure_tasks:
- self.add_task(task)
+ def on_task_failure(self):
+ self.queue = Queue.Queue(0)
+ for task in self.active_task.failure_tasks:
+ self.add_task(task)
- def is_started(self):
- return self.active_task is not None
+ def is_started(self):
+ return self.active_task is not None
- def is_done(self):
- return self.active_task == None and self.queue.empty()
+ def is_done(self):
+ return self.active_task == None and self.queue.empty()
- def start(self):
- assert self.dispatcher
+ def start(self):
+ assert self.dispatcher
+
+ self._next_task()
- self._next_task()
-
class AgentTask(object):
- def __init__(self, cmd, failure_tasks = []):
- self.done = False
- self.failure_tasks = failure_tasks
- self.started = False
- self.cmd = cmd
- self.task = None
- self.agent = None
- self.monitor = None
- self.success = None
+ def __init__(self, cmd, failure_tasks = []):
+ self.done = False
+ self.failure_tasks = failure_tasks
+ self.started = False
+ self.cmd = cmd
+ self.task = None
+ self.agent = None
+ self.monitor = None
+ self.success = None
+
+ def poll(self):
+ print "poll"
+ if self.monitor:
+ self.tick(self.monitor.exit_code())
+ else:
+ self.finished(False)
- def poll(self):
- print "poll"
- if self.monitor:
- self.tick(self.monitor.exit_code())
- else:
- self.finished(False)
+ def tick(self, exit_code):
+ if exit_code==None:
+ return
+# print "exit_code was %d" % exit_code
+ if exit_code == 0:
+ success = True
+ else:
+ success = False
- def tick(self, exit_code):
- if exit_code==None:
- return
-# print "exit_code was %d" % exit_code
- if exit_code == 0:
- success = True
- else:
- success = False
+ self.finished(success)
- self.finished(success)
+ def is_done(self):
+ return self.done
- def is_done(self):
- return self.done
+ def finished(self, success):
+ self.done = True
+ self.success = success
+ self.epilog()
- def finished(self, success):
- self.done = True
- self.success = success
- self.epilog()
+ def prolog(self):
+ pass
- def prolog(self):
- pass
-
- def create_temp_resultsdir(self, suffix=''):
- self.temp_results_dir = tempfile.mkdtemp(suffix=suffix)
+ def create_temp_resultsdir(self, suffix=''):
+ self.temp_results_dir = tempfile.mkdtemp(suffix=suffix)
- def cleanup(self):
- if (hasattr(self, 'temp_results_dir') and
- os.path.exists(self.temp_results_dir)):
- shutil.rmtree(self.temp_results_dir)
+ def cleanup(self):
+ if (hasattr(self, 'temp_results_dir') and
+ os.path.exists(self.temp_results_dir)):
+ shutil.rmtree(self.temp_results_dir)
- def epilog(self):
- self.cleanup()
+ def epilog(self):
+ self.cleanup()
- def start(self):
- assert self.agent
+ def start(self):
+ assert self.agent
- if not self.started:
- self.prolog()
- self.run()
+ if not self.started:
+ self.prolog()
+ self.run()
- self.started = True
+ self.started = True
-
- def abort(self):
- if self.monitor:
- self.monitor.kill()
- self.done = True
- self.cleanup()
+ def abort(self):
+ if self.monitor:
+ self.monitor.kill()
+ self.done = True
+ self.cleanup()
- def run(self):
- if self.cmd:
- print "agent starting monitor"
- log_file = None
- if hasattr(self, 'host'):
- log_file = os.path.join(RESULTS_DIR, 'hosts',
- self.host.hostname)
- self.monitor = RunMonitor(
- self.cmd, nice_level = AUTOSERV_NICE_LEVEL,
- log_file = log_file)
- self.monitor.run()
+
+ def run(self):
+ if self.cmd:
+ print "agent starting monitor"
+ log_file = None
+ if hasattr(self, 'host'):
+ log_file = os.path.join(RESULTS_DIR, 'hosts',
+ self.host.hostname)
+ self.monitor = RunMonitor(
+ self.cmd, nice_level = AUTOSERV_NICE_LEVEL,
+ log_file = log_file)
+ self.monitor.run()
class RepairTask(AgentTask):
- def __init__(self, host, fail_queue_entry=None):
- """\
- fail_queue_entry: queue entry to mark failed if this repair
- fails.
- """
- self.create_temp_resultsdir('.repair')
- cmd = [_autoserv_path , '-R', '-m', host.hostname,
- '-r', self.temp_results_dir]
- self.host = host
- self.fail_queue_entry = fail_queue_entry
- super(RepairTask, self).__init__(cmd)
-
-
- def prolog(self):
- print "repair_task starting"
- self.host.set_status('Repairing')
-
-
- def epilog(self):
- super(RepairTask, self).epilog()
- if self.success:
- self.host.set_status('Ready')
- else:
- self.host.set_status('Repair Failed')
- if self.fail_queue_entry:
- self.fail_queue_entry.handle_host_failure()
+ def __init__(self, host, fail_queue_entry=None):
+ """\
+ fail_queue_entry: queue entry to mark failed if this repair
+ fails.
+ """
+ self.create_temp_resultsdir('.repair')
+ cmd = [_autoserv_path , '-R', '-m', host.hostname,
+ '-r', self.temp_results_dir]
+ self.host = host
+ self.fail_queue_entry = fail_queue_entry
+ super(RepairTask, self).__init__(cmd)
+
+
+ def prolog(self):
+ print "repair_task starting"
+ self.host.set_status('Repairing')
+
+
+ def epilog(self):
+ super(RepairTask, self).epilog()
+ if self.success:
+ self.host.set_status('Ready')
+ else:
+ self.host.set_status('Repair Failed')
+ if self.fail_queue_entry:
+ self.fail_queue_entry.handle_host_failure()
class VerifyTask(AgentTask):
- def __init__(self, queue_entry=None, host=None):
- assert bool(queue_entry) != bool(host)
-
- self.host = host or queue_entry.host
- self.queue_entry = queue_entry
-
- self.create_temp_resultsdir('.verify')
- cmd = [_autoserv_path,'-v','-m',self.host.hostname,
- '-r', self.temp_results_dir]
-
- fail_queue_entry = None
- if queue_entry and not queue_entry.meta_host:
- fail_queue_entry = queue_entry
- failure_tasks = [RepairTask(self.host, fail_queue_entry)]
-
- super(VerifyTask, self).__init__(cmd,
- failure_tasks=failure_tasks)
-
-
- def prolog(self):
- print "starting verify on %s" % (self.host.hostname)
- if self.queue_entry:
- self.queue_entry.set_status('Verifying')
- self.queue_entry.clear_results_dir(
- self.queue_entry.verify_results_dir())
- self.host.set_status('Verifying')
-
-
- def cleanup(self):
- if not os.path.exists(self.temp_results_dir):
- return
- if self.queue_entry and (self.success or
- not self.queue_entry.meta_host):
- self.move_results()
- super(VerifyTask, self).cleanup()
-
-
- def epilog(self):
- super(VerifyTask, self).epilog()
-
- if self.success:
- self.host.set_status('Ready')
- elif self.queue_entry:
- self.queue_entry.requeue()
-
-
- def move_results(self):
- assert self.queue_entry is not None
- target_dir = self.queue_entry.verify_results_dir()
- if not os.path.exists(target_dir):
- os.makedirs(target_dir)
- files = os.listdir(self.temp_results_dir)
- for filename in files:
- if filename == AUTOSERV_PID_FILE:
- continue
- self.force_move(os.path.join(self.temp_results_dir,
- filename),
- os.path.join(target_dir, filename))
-
-
- @staticmethod
- def force_move(source, dest):
- """\
- Replacement for shutil.move() that will delete the destination
- if it exists, even if it's a directory.
- """
- if os.path.exists(dest):
- print ('Warning: removing existing destination file ' +
- dest)
- remove_file_or_dir(dest)
- shutil.move(source, dest)
+ def __init__(self, queue_entry=None, host=None):
+ assert bool(queue_entry) != bool(host)
+
+ self.host = host or queue_entry.host
+ self.queue_entry = queue_entry
+
+ self.create_temp_resultsdir('.verify')
+ cmd = [_autoserv_path,'-v','-m',self.host.hostname,
+ '-r', self.temp_results_dir]
+
+ fail_queue_entry = None
+ if queue_entry and not queue_entry.meta_host:
+ fail_queue_entry = queue_entry
+ failure_tasks = [RepairTask(self.host, fail_queue_entry)]
+
+ super(VerifyTask, self).__init__(cmd,
+ failure_tasks=failure_tasks)
+
+
+ def prolog(self):
+ print "starting verify on %s" % (self.host.hostname)
+ if self.queue_entry:
+ self.queue_entry.set_status('Verifying')
+ self.queue_entry.clear_results_dir(
+ self.queue_entry.verify_results_dir())
+ self.host.set_status('Verifying')
+
+
+ def cleanup(self):
+ if not os.path.exists(self.temp_results_dir):
+ return
+ if self.queue_entry and (self.success or
+ not self.queue_entry.meta_host):
+ self.move_results()
+ super(VerifyTask, self).cleanup()
+
+
+ def epilog(self):
+ super(VerifyTask, self).epilog()
+
+ if self.success:
+ self.host.set_status('Ready')
+ elif self.queue_entry:
+ self.queue_entry.requeue()
+
+
+ def move_results(self):
+ assert self.queue_entry is not None
+ target_dir = self.queue_entry.verify_results_dir()
+ if not os.path.exists(target_dir):
+ os.makedirs(target_dir)
+ files = os.listdir(self.temp_results_dir)
+ for filename in files:
+ if filename == AUTOSERV_PID_FILE:
+ continue
+ self.force_move(os.path.join(self.temp_results_dir,
+ filename),
+ os.path.join(target_dir, filename))
+
+
+ @staticmethod
+ def force_move(source, dest):
+ """\
+ Replacement for shutil.move() that will delete the destination
+ if it exists, even if it's a directory.
+ """
+ if os.path.exists(dest):
+ print ('Warning: removing existing destination file ' +
+ dest)
+ remove_file_or_dir(dest)
+ shutil.move(source, dest)
class VerifySynchronousTask(VerifyTask):
- def __init__(self, queue_entry):
- super(VerifySynchronousTask, self).__init__(
- queue_entry = queue_entry)
+ def __init__(self, queue_entry):
+ super(VerifySynchronousTask, self).__init__(
+ queue_entry = queue_entry)
- def epilog(self):
- super(VerifySynchronousTask, self).epilog()
- if self.success:
- if self.queue_entry.job.num_complete() > 0:
- # some other entry failed verify, and we've
- # already been marked as stopped
- return
+ def epilog(self):
+ super(VerifySynchronousTask, self).epilog()
+ if self.success:
+ if self.queue_entry.job.num_complete() > 0:
+ # some other entry failed verify, and we've
+ # already been marked as stopped
+ return
- self.queue_entry.set_status('Pending')
- job = self.queue_entry.job
- if job.is_ready():
- agent = job.run(self.queue_entry)
- self.agent.dispatcher.add_agent(agent)
+ self.queue_entry.set_status('Pending')
+ job = self.queue_entry.job
+ if job.is_ready():
+ agent = job.run(self.queue_entry)
+ self.agent.dispatcher.add_agent(agent)
class QueueTask(AgentTask):
- def __init__(self, job, queue_entries, cmd):
- super(QueueTask, self).__init__(cmd)
- self.job = job
- self.queue_entries = queue_entries
+ def __init__(self, job, queue_entries, cmd):
+ super(QueueTask, self).__init__(cmd)
+ self.job = job
+ self.queue_entries = queue_entries
- @staticmethod
- def _write_keyval(results_dir, field, value):
- key_path = os.path.join(results_dir, 'keyval')
- keyval_file = open(key_path, 'a')
- print >> keyval_file, '%s=%d' % (field, value)
- keyval_file.close()
+ @staticmethod
+ def _write_keyval(results_dir, field, value):
+ key_path = os.path.join(results_dir, 'keyval')
+ keyval_file = open(key_path, 'a')
+ print >> keyval_file, '%s=%d' % (field, value)
+ keyval_file.close()
- def results_dir(self):
- return self.queue_entries[0].results_dir()
+ def results_dir(self):
+ return self.queue_entries[0].results_dir()
- def run(self):
- """\
- Override AgentTask.run() so we can use a PidfileRunMonitor.
- """
- self.monitor = PidfileRunMonitor(self.results_dir(),
- cmd=self.cmd,
- nice_level=AUTOSERV_NICE_LEVEL)
- self.monitor.run()
+ def run(self):
+ """\
+ Override AgentTask.run() so we can use a PidfileRunMonitor.
+ """
+ self.monitor = PidfileRunMonitor(self.results_dir(),
+ cmd=self.cmd,
+ nice_level=AUTOSERV_NICE_LEVEL)
+ self.monitor.run()
- def prolog(self):
- # write some job timestamps into the job keyval file
- queued = time.mktime(self.job.created_on.timetuple())
- started = time.time()
- self._write_keyval(self.results_dir(), "job_queued", queued)
- self._write_keyval(self.results_dir(), "job_started", started)
- for queue_entry in self.queue_entries:
- print "starting queue_task on %s/%s" % (queue_entry.host.hostname, queue_entry.id)
- queue_entry.set_status('Running')
- queue_entry.host.set_status('Running')
- if (not self.job.is_synchronous() and
- self.job.num_machines() > 1):
- assert len(self.queue_entries) == 1
- self.job.write_to_machines_file(self.queue_entries[0])
+ def prolog(self):
+ # write some job timestamps into the job keyval file
+ queued = time.mktime(self.job.created_on.timetuple())
+ started = time.time()
+ self._write_keyval(self.results_dir(), "job_queued", queued)
+ self._write_keyval(self.results_dir(), "job_started", started)
+ for queue_entry in self.queue_entries:
+ print "starting queue_task on %s/%s" % (queue_entry.host.hostname, queue_entry.id)
+ queue_entry.set_status('Running')
+ queue_entry.host.set_status('Running')
+ if (not self.job.is_synchronous() and
+ self.job.num_machines() > 1):
+ assert len(self.queue_entries) == 1
+ self.job.write_to_machines_file(self.queue_entries[0])
- def _finish_task(self):
- # write out the finished time into the results keyval
- finished = time.time()
- self._write_keyval(self.results_dir(), "job_finished",
- finished)
+ def _finish_task(self):
+ # write out the finished time into the results keyval
+ finished = time.time()
+ self._write_keyval(self.results_dir(), "job_finished",
+ finished)
- # parse the results of the job
- if self.job.is_synchronous() or self.job.num_machines() == 1:
- parse_results(self.job.results_dir())
- else:
- for queue_entry in self.queue_entries:
- parse_results(queue_entry.results_dir(),
- flags="-l 2")
+ # parse the results of the job
+ if self.job.is_synchronous() or self.job.num_machines() == 1:
+ parse_results(self.job.results_dir())
+ else:
+ for queue_entry in self.queue_entries:
+ parse_results(queue_entry.results_dir(),
+ flags="-l 2")
- def abort(self):
- super(QueueTask, self).abort()
- self._finish_task()
+ def abort(self):
+ super(QueueTask, self).abort()
+ self._finish_task()
- def epilog(self):
- super(QueueTask, self).epilog()
- if self.success:
- status = 'Completed'
- else:
- status = 'Failed'
+ def epilog(self):
+ super(QueueTask, self).epilog()
+ if self.success:
+ status = 'Completed'
+ else:
+ status = 'Failed'
- for queue_entry in self.queue_entries:
- queue_entry.set_status(status)
- queue_entry.host.set_status('Ready')
+ for queue_entry in self.queue_entries:
+ queue_entry.set_status(status)
+ queue_entry.host.set_status('Ready')
- self._finish_task()
+ self._finish_task()
- print "queue_task finished with %s/%s" % (status, self.success)
+ print "queue_task finished with %s/%s" % (status, self.success)
class RecoveryQueueTask(QueueTask):
- def __init__(self, job, queue_entries, run_monitor):
- super(RecoveryQueueTask, self).__init__(job,
- queue_entries, cmd=None)
- self.run_monitor = run_monitor
+ def __init__(self, job, queue_entries, run_monitor):
+ super(RecoveryQueueTask, self).__init__(job,
+ queue_entries, cmd=None)
+ self.run_monitor = run_monitor
- def run(self):
- self.monitor = self.run_monitor
+ def run(self):
+ self.monitor = self.run_monitor
- def prolog(self):
- # recovering an existing process - don't do prolog
- pass
+ def prolog(self):
+ # recovering an existing process - don't do prolog
+ pass
class RebootTask(AgentTask):
- def __init__(self, host):
- global _autoserv_path
-
- # Current implementation of autoserv requires control file
- # to be passed on reboot action request. TODO: remove when no
- # longer appropriate.
- self.create_temp_resultsdir('.reboot')
- self.cmd = [_autoserv_path, '-b', '-m', host.hostname,
- '-r', self.temp_results_dir, '/dev/null']
- self.host = host
- super(RebootTask, self).__init__(self.cmd,
- failure_tasks=[RepairTask(host)])
+ def __init__(self, host):
+ global _autoserv_path
+
+ # Current implementation of autoserv requires control file
+ # to be passed on reboot action request. TODO: remove when no
+ # longer appropriate.
+ self.create_temp_resultsdir('.reboot')
+ self.cmd = [_autoserv_path, '-b', '-m', host.hostname,
+ '-r', self.temp_results_dir, '/dev/null']
+ self.host = host
+ super(RebootTask, self).__init__(self.cmd,
+ failure_tasks=[RepairTask(host)])
- def prolog(self):
- print "starting reboot task for host: %s" % self.host.hostname
- self.host.set_status("Rebooting")
+ def prolog(self):
+ print "starting reboot task for host: %s" % self.host.hostname
+ self.host.set_status("Rebooting")
class AbortTask(AgentTask):
- def __init__(self, queue_entry, agents_to_abort):
- self.queue_entry = queue_entry
- self.agents_to_abort = agents_to_abort
- for agent in agents_to_abort:
- agent.dispatcher.remove_agent(agent)
- super(AbortTask, self).__init__('')
+ def __init__(self, queue_entry, agents_to_abort):
+ self.queue_entry = queue_entry
+ self.agents_to_abort = agents_to_abort
+ for agent in agents_to_abort:
+ agent.dispatcher.remove_agent(agent)
+ super(AbortTask, self).__init__('')
+
+ def prolog(self):
+ print "starting abort on host %s, job %s" % (
+ self.queue_entry.host_id, self.queue_entry.job_id)
+ self.queue_entry.set_status('Aborting')
- def prolog(self):
- print "starting abort on host %s, job %s" % (
- self.queue_entry.host_id, self.queue_entry.job_id)
- self.queue_entry.set_status('Aborting')
-
- def epilog(self):
- super(AbortTask, self).epilog()
- self.queue_entry.set_status('Aborted')
- self.success = True
+ def epilog(self):
+ super(AbortTask, self).epilog()
+ self.queue_entry.set_status('Aborted')
+ self.success = True
- def run(self):
- for agent in self.agents_to_abort:
- if (agent.active_task):
- agent.active_task.abort()
+ def run(self):
+ for agent in self.agents_to_abort:
+ if (agent.active_task):
+ agent.active_task.abort()
class DBObject(object):
- def __init__(self, id=None, row=None, new_record=False):
- assert (bool(id) != bool(row))
+ def __init__(self, id=None, row=None, new_record=False):
+ assert (bool(id) != bool(row))
- self.__table = self._get_table()
- fields = self._fields()
+ self.__table = self._get_table()
+ fields = self._fields()
- self.__new_record = new_record
+ self.__new_record = new_record
- if row is None:
- sql = 'SELECT * FROM %s WHERE ID=%%s' % self.__table
- rows = _db.execute(sql, (id,))
- if len(rows) == 0:
- raise "row not found (table=%s, id=%s)" % \
- (self.__table, id)
- row = rows[0]
+ if row is None:
+ sql = 'SELECT * FROM %s WHERE ID=%%s' % self.__table
+ rows = _db.execute(sql, (id,))
+ if len(rows) == 0:
+ raise "row not found (table=%s, id=%s)" % \
+ (self.__table, id)
+ row = rows[0]
- assert len(row) == self.num_cols(), (
- "table = %s, row = %s/%d, fields = %s/%d" % (
- self.__table, row, len(row), fields, self.num_cols()))
+ assert len(row) == self.num_cols(), (
+ "table = %s, row = %s/%d, fields = %s/%d" % (
+ self.__table, row, len(row), fields, self.num_cols()))
- self.__valid_fields = {}
- for i,value in enumerate(row):
- self.__dict__[fields[i]] = value
- self.__valid_fields[fields[i]] = True
+ self.__valid_fields = {}
+ for i,value in enumerate(row):
+ self.__dict__[fields[i]] = value
+ self.__valid_fields[fields[i]] = True
- del self.__valid_fields['id']
+ del self.__valid_fields['id']
- @classmethod
- def _get_table(cls):
- raise NotImplementedError('Subclasses must override this')
+ @classmethod
+ def _get_table(cls):
+ raise NotImplementedError('Subclasses must override this')
- @classmethod
- def _fields(cls):
- raise NotImplementedError('Subclasses must override this')
+ @classmethod
+ def _fields(cls):
+ raise NotImplementedError('Subclasses must override this')
- @classmethod
- def num_cols(cls):
- return len(cls._fields())
+ @classmethod
+ def num_cols(cls):
+ return len(cls._fields())
- def count(self, where, table = None):
- if not table:
- table = self.__table
-
- rows = _db.execute("""
- SELECT count(*) FROM %s
- WHERE %s
- """ % (table, where))
+ def count(self, where, table = None):
+ if not table:
+ table = self.__table
- assert len(rows) == 1
+ rows = _db.execute("""
+ SELECT count(*) FROM %s
+ WHERE %s
+ """ % (table, where))
- return int(rows[0][0])
+ assert len(rows) == 1
+ return int(rows[0][0])
- def update_field(self, field, value):
- assert self.__valid_fields[field]
-
- if self.__dict__[field] == value:
- return
- query = "UPDATE %s SET %s = %%s WHERE id = %%s" % \
- (self.__table, field)
- _db.execute(query, (value, self.id))
+ def update_field(self, field, value):
+ assert self.__valid_fields[field]
- self.__dict__[field] = value
+ if self.__dict__[field] == value:
+ return
+ query = "UPDATE %s SET %s = %%s WHERE id = %%s" % \
+ (self.__table, field)
+ _db.execute(query, (value, self.id))
- def save(self):
- if self.__new_record:
- keys = self._fields()[1:] # avoid id
- columns = ','.join([str(key) for key in keys])
- values = ['"%s"' % self.__dict__[key] for key in keys]
- values = ','.join(values)
- query = """INSERT INTO %s (%s) VALUES (%s)""" % \
- (self.__table, columns, values)
- _db.execute(query)
+ self.__dict__[field] = value
- def delete(self):
- query = 'DELETE FROM %s WHERE id=%%s' % self.__table
- _db.execute(query, (self.id,))
+ def save(self):
+ if self.__new_record:
+ keys = self._fields()[1:] # avoid id
+ columns = ','.join([str(key) for key in keys])
+ values = ['"%s"' % self.__dict__[key] for key in keys]
+ values = ','.join(values)
+ query = """INSERT INTO %s (%s) VALUES (%s)""" % \
+ (self.__table, columns, values)
+ _db.execute(query)
- @classmethod
- def fetch(cls, where, params=()):
- rows = _db.execute(
- 'SELECT * FROM %s WHERE %s' % (cls._get_table(), where),
- params)
- for row in rows:
- yield cls(row=row)
+ def delete(self):
+ query = 'DELETE FROM %s WHERE id=%%s' % self.__table
+ _db.execute(query, (self.id,))
+
+
+ @classmethod
+ def fetch(cls, where, params=()):
+ rows = _db.execute(
+ 'SELECT * FROM %s WHERE %s' % (cls._get_table(), where),
+ params)
+ for row in rows:
+ yield cls(row=row)
class IneligibleHostQueue(DBObject):
- def __init__(self, id=None, row=None, new_record=None):
- super(IneligibleHostQueue, self).__init__(id=id, row=row,
- new_record=new_record)
+ def __init__(self, id=None, row=None, new_record=None):
+ super(IneligibleHostQueue, self).__init__(id=id, row=row,
+ new_record=new_record)
- @classmethod
- def _get_table(cls):
- return 'ineligible_host_queues'
+ @classmethod
+ def _get_table(cls):
+ return 'ineligible_host_queues'
- @classmethod
- def _fields(cls):
- return ['id', 'job_id', 'host_id']
+ @classmethod
+ def _fields(cls):
+ return ['id', 'job_id', 'host_id']
class Host(DBObject):
- def __init__(self, id=None, row=None):
- super(Host, self).__init__(id=id, row=row)
+ def __init__(self, id=None, row=None):
+ super(Host, self).__init__(id=id, row=row)
+
+ @classmethod
+ def _get_table(cls):
+ return 'hosts'
- @classmethod
- def _get_table(cls):
- return 'hosts'
+ @classmethod
+ def _fields(cls):
+ return ['id', 'hostname', 'locked', 'synch_id','status',
+ 'invalid']
- @classmethod
- def _fields(cls):
- return ['id', 'hostname', 'locked', 'synch_id','status',
- 'invalid']
+ def current_task(self):
+ rows = _db.execute("""
+ SELECT * FROM host_queue_entries WHERE host_id=%s AND NOT complete AND active
+ """, (self.id,))
- def current_task(self):
- rows = _db.execute("""
- SELECT * FROM host_queue_entries WHERE host_id=%s AND NOT complete AND active
- """, (self.id,))
-
- if len(rows) == 0:
- return None
- else:
- assert len(rows) == 1
- results = rows[0];
-# print "current = %s" % results
- return HostQueueEntry(row=results)
+ if len(rows) == 0:
+ return None
+ else:
+ assert len(rows) == 1
+ results = rows[0];
+# print "current = %s" % results
+ return HostQueueEntry(row=results)
- def yield_work(self):
- print "%s yielding work" % self.hostname
- if self.current_task():
- self.current_task().requeue()
-
- def set_status(self,status):
- print '%s -> %s' % (self.hostname, status)
- self.update_field('status',status)
+ def yield_work(self):
+ print "%s yielding work" % self.hostname
+ if self.current_task():
+ self.current_task().requeue()
+
+ def set_status(self,status):
+ print '%s -> %s' % (self.hostname, status)
+ self.update_field('status',status)
class HostQueueEntry(DBObject):
- def __init__(self, id=None, row=None):
- assert id or row
- super(HostQueueEntry, self).__init__(id=id, row=row)
- self.job = Job(self.job_id)
+ def __init__(self, id=None, row=None):
+ assert id or row
+ super(HostQueueEntry, self).__init__(id=id, row=row)
+ self.job = Job(self.job_id)
- if self.host_id:
- self.host = Host(self.host_id)
- else:
- self.host = None
+ if self.host_id:
+ self.host = Host(self.host_id)
+ else:
+ self.host = None
- self.queue_log_path = os.path.join(self.job.results_dir(),
- 'queue.log.' + str(self.id))
+ self.queue_log_path = os.path.join(self.job.results_dir(),
+ 'queue.log.' + str(self.id))
- @classmethod
- def _get_table(cls):
- return 'host_queue_entries'
+ @classmethod
+ def _get_table(cls):
+ return 'host_queue_entries'
- @classmethod
- def _fields(cls):
- return ['id', 'job_id', 'host_id', 'priority', 'status',
- 'meta_host', 'active', 'complete']
+ @classmethod
+ def _fields(cls):
+ return ['id', 'job_id', 'host_id', 'priority', 'status',
+ 'meta_host', 'active', 'complete']
- def set_host(self, host):
- if host:
- self.queue_log_record('Assigning host ' + host.hostname)
- self.update_field('host_id', host.id)
- self.update_field('active', True)
- self.block_host(host.id)
- else:
- self.queue_log_record('Releasing host')
- self.unblock_host(self.host.id)
- self.update_field('host_id', None)
+ def set_host(self, host):
+ if host:
+ self.queue_log_record('Assigning host ' + host.hostname)
+ self.update_field('host_id', host.id)
+ self.update_field('active', True)
+ self.block_host(host.id)
+ else:
+ self.queue_log_record('Releasing host')
+ self.unblock_host(self.host.id)
+ self.update_field('host_id', None)
- self.host = host
+ self.host = host
- def get_host(self):
- return self.host
+ def get_host(self):
+ return self.host
- def queue_log_record(self, log_line):
- now = str(datetime.datetime.now())
- queue_log = open(self.queue_log_path, 'a', 0)
- queue_log.write(now + ' ' + log_line + '\n')
- queue_log.close()
-
-
- def block_host(self, host_id):
- print "creating block %s/%s" % (self.job.id, host_id)
- row = [0, self.job.id, host_id]
- block = IneligibleHostQueue(row=row, new_record=True)
- block.save()
+ def queue_log_record(self, log_line):
+ now = str(datetime.datetime.now())
+ queue_log = open(self.queue_log_path, 'a', 0)
+ queue_log.write(now + ' ' + log_line + '\n')
+ queue_log.close()
+
+
+ def block_host(self, host_id):
+ print "creating block %s/%s" % (self.job.id, host_id)
+ row = [0, self.job.id, host_id]
+ block = IneligibleHostQueue(row=row, new_record=True)
+ block.save()
- def unblock_host(self, host_id):
- print "removing block %s/%s" % (self.job.id, host_id)
- blocks = IneligibleHostQueue.fetch(
- 'job_id=%d and host_id=%d' % (self.job.id, host_id))
- for block in blocks:
- block.delete()
+ def unblock_host(self, host_id):
+ print "removing block %s/%s" % (self.job.id, host_id)
+ blocks = IneligibleHostQueue.fetch(
+ 'job_id=%d and host_id=%d' % (self.job.id, host_id))
+ for block in blocks:
+ block.delete()
- def results_dir(self):
- if self.job.is_synchronous() or self.job.num_machines() == 1:
- return self.job.job_dir
- else:
- assert self.host
- return os.path.join(self.job.job_dir,
- self.host.hostname)
+ def results_dir(self):
+ if self.job.is_synchronous() or self.job.num_machines() == 1:
+ return self.job.job_dir
+ else:
+ assert self.host
+ return os.path.join(self.job.job_dir,
+ self.host.hostname)
- def verify_results_dir(self):
- if self.job.is_synchronous() or self.job.num_machines() > 1:
- assert self.host
- return os.path.join(self.job.job_dir,
- self.host.hostname)
- else:
- return self.job.job_dir
+ def verify_results_dir(self):
+ if self.job.is_synchronous() or self.job.num_machines() > 1:
+ assert self.host
+ return os.path.join(self.job.job_dir,
+ self.host.hostname)
+ else:
+ return self.job.job_dir
- def set_status(self, status):
- self.update_field('status', status)
- if self.host:
- hostname = self.host.hostname
- else:
- hostname = 'no host'
- print "%s/%d status -> %s" % (hostname, self.id, self.status)
- if status in ['Queued']:
- self.update_field('complete', False)
- self.update_field('active', False)
+ def set_status(self, status):
+ self.update_field('status', status)
+ if self.host:
+ hostname = self.host.hostname
+ else:
+ hostname = 'no host'
+ print "%s/%d status -> %s" % (hostname, self.id, self.status)
+ if status in ['Queued']:
+ self.update_field('complete', False)
+ self.update_field('active', False)
- if status in ['Pending', 'Running', 'Verifying', 'Starting',
- 'Abort', 'Aborting']:
- self.update_field('complete', False)
- self.update_field('active', True)
+ if status in ['Pending', 'Running', 'Verifying', 'Starting',
+ 'Abort', 'Aborting']:
+ self.update_field('complete', False)
+ self.update_field('active', True)
- if status in ['Failed', 'Completed', 'Stopped', 'Aborted']:
- self.update_field('complete', True)
- self.update_field('active', False)
+ if status in ['Failed', 'Completed', 'Stopped', 'Aborted']:
+ self.update_field('complete', True)
+ self.update_field('active', False)
- def run(self,assigned_host=None):
- if self.meta_host:
- assert assigned_host
- # ensure results dir exists for the queue log
- self.job.create_results_dir()
- self.set_host(assigned_host)
+ def run(self,assigned_host=None):
+ if self.meta_host:
+ assert assigned_host
+ # ensure results dir exists for the queue log
+ self.job.create_results_dir()
+ self.set_host(assigned_host)
- print "%s/%s scheduled on %s, status=%s" % (self.job.name,
- self.meta_host, self.host.hostname, self.status)
+ print "%s/%s scheduled on %s, status=%s" % (self.job.name,
+ self.meta_host, self.host.hostname, self.status)
- return self.job.run(queue_entry=self)
-
- def requeue(self):
- self.set_status('Queued')
-
- if self.meta_host:
- self.set_host(None)
-
-
- def handle_host_failure(self):
- """\
- Called when this queue entry's host has failed verification and
- repair.
- """
- assert not self.meta_host
- self.set_status('Failed')
- if self.job.is_synchronous():
- self.job.stop_all_entries()
+ return self.job.run(queue_entry=self)
+
+ def requeue(self):
+ self.set_status('Queued')
+
+ if self.meta_host:
+ self.set_host(None)
+
+
+ def handle_host_failure(self):
+ """\
+ Called when this queue entry's host has failed verification and
+ repair.
+ """
+ assert not self.meta_host
+ self.set_status('Failed')
+ if self.job.is_synchronous():
+ self.job.stop_all_entries()
- def clear_results_dir(self, results_dir=None, dont_delete_files=False):
- results_dir = results_dir or self.results_dir()
- if not os.path.exists(results_dir):
- return
- if dont_delete_files:
- temp_dir = tempfile.mkdtemp(suffix='.clear_results')
- print 'Moving results from %s to %s' % (results_dir,
- temp_dir)
- for filename in os.listdir(results_dir):
- path = os.path.join(results_dir, filename)
- if dont_delete_files:
- shutil.move(path,
- os.path.join(temp_dir, filename))
- else:
- remove_file_or_dir(path)
+ def clear_results_dir(self, results_dir=None, dont_delete_files=False):
+ results_dir = results_dir or self.results_dir()
+ if not os.path.exists(results_dir):
+ return
+ if dont_delete_files:
+ temp_dir = tempfile.mkdtemp(suffix='.clear_results')
+ print 'Moving results from %s to %s' % (results_dir,
+ temp_dir)
+ for filename in os.listdir(results_dir):
+ path = os.path.join(results_dir, filename)
+ if dont_delete_files:
+ shutil.move(path,
+ os.path.join(temp_dir, filename))
+ else:
+ remove_file_or_dir(path)
class Job(DBObject):
- def __init__(self, id=None, row=None):
- assert id or row
- super(Job, self).__init__(id=id, row=row)
+ def __init__(self, id=None, row=None):
+ assert id or row
+ super(Job, self).__init__(id=id, row=row)
+
+ self.job_dir = os.path.join(RESULTS_DIR, "%s-%s" % (self.id,
+ self.owner))
+
- self.job_dir = os.path.join(RESULTS_DIR, "%s-%s" % (self.id,
- self.owner))
+ @classmethod
+ def _get_table(cls):
+ return 'jobs'
- @classmethod
- def _get_table(cls):
- return 'jobs'
+ @classmethod
+ def _fields(cls):
+ return ['id', 'owner', 'name', 'priority', 'control_file',
+ 'control_type', 'created_on', 'synch_type',
+ 'synch_count', 'synchronizing']
- @classmethod
- def _fields(cls):
- return ['id', 'owner', 'name', 'priority', 'control_file',
- 'control_type', 'created_on', 'synch_type',
- 'synch_count', 'synchronizing']
+ def is_server_job(self):
+ return self.control_type != 2
- def is_server_job(self):
- return self.control_type != 2
+ def get_host_queue_entries(self):
+ rows = _db.execute("""
+ SELECT * FROM host_queue_entries
+ WHERE job_id= %s
+ """, (self.id,))
+ entries = [HostQueueEntry(row=i) for i in rows]
+ assert len(entries)>0
- def get_host_queue_entries(self):
- rows = _db.execute("""
- SELECT * FROM host_queue_entries
- WHERE job_id= %s
- """, (self.id,))
- entries = [HostQueueEntry(row=i) for i in rows]
+ return entries
- assert len(entries)>0
- return entries
+ def set_status(self, status, update_queues=False):
+ self.update_field('status',status)
+ if update_queues:
+ for queue_entry in self.get_host_queue_entries():
+ queue_entry.set_status(status)
- def set_status(self, status, update_queues=False):
- self.update_field('status',status)
-
- if update_queues:
- for queue_entry in self.get_host_queue_entries():
- queue_entry.set_status(status)
+ def is_synchronous(self):
+ return self.synch_type == 2
- def is_synchronous(self):
- return self.synch_type == 2
+ def is_ready(self):
+ if not self.is_synchronous():
+ return True
+ sql = "job_id=%s AND status='Pending'" % self.id
+ count = self.count(sql, table='host_queue_entries')
+ return (count == self.synch_count)
- def is_ready(self):
- if not self.is_synchronous():
- return True
- sql = "job_id=%s AND status='Pending'" % self.id
- count = self.count(sql, table='host_queue_entries')
- return (count == self.synch_count)
+ def ready_to_synchronize(self):
+ # heuristic
+ queue_entries = self.get_host_queue_entries()
+ count = 0
+ for queue_entry in queue_entries:
+ if queue_entry.status == 'Pending':
+ count += 1
- def ready_to_synchronize(self):
- # heuristic
- queue_entries = self.get_host_queue_entries()
- count = 0
- for queue_entry in queue_entries:
- if queue_entry.status == 'Pending':
- count += 1
+ return (count/self.synch_count >= 0.5)
- return (count/self.synch_count >= 0.5)
+ def start_synchronizing(self):
+ self.update_field('synchronizing', True)
- def start_synchronizing(self):
- self.update_field('synchronizing', True)
+ def results_dir(self):
+ return self.job_dir
- def results_dir(self):
- return self.job_dir
+ def num_machines(self, clause = None):
+ sql = "job_id=%s" % self.id
+ if clause:
+ sql += " AND (%s)" % clause
+ return self.count(sql, table='host_queue_entries')
- def num_machines(self, clause = None):
- sql = "job_id=%s" % self.id
- if clause:
- sql += " AND (%s)" % clause
- return self.count(sql, table='host_queue_entries')
+ def num_queued(self):
+ return self.num_machines('not complete')
- def num_queued(self):
- return self.num_machines('not complete')
+ def num_active(self):
+ return self.num_machines('active')
- def num_active(self):
- return self.num_machines('active')
+ def num_complete(self):
+ return self.num_machines('complete')
- def num_complete(self):
- return self.num_machines('complete')
+ def is_finished(self):
+ left = self.num_queued()
+ print "%s: %s machines left" % (self.name, left)
+ return left==0
- def is_finished(self):
- left = self.num_queued()
- print "%s: %s machines left" % (self.name, left)
- return left==0
+ def stop_synchronizing(self):
+ self.update_field('synchronizing', False)
+ self.set_status('Queued', update_queues = False)
- def stop_synchronizing(self):
- self.update_field('synchronizing', False)
- self.set_status('Queued', update_queues = False)
+ def stop_all_entries(self):
+ for child_entry in self.get_host_queue_entries():
+ if not child_entry.complete:
+ child_entry.set_status('Stopped')
- def stop_all_entries(self):
- for child_entry in self.get_host_queue_entries():
- if not child_entry.complete:
- child_entry.set_status('Stopped')
+ def write_to_machines_file(self, queue_entry):
+ hostname = queue_entry.get_host().hostname
+ print "writing %s to job %s machines file" % (hostname, self.id)
+ file_path = os.path.join(self.job_dir, '.machines')
+ mf = open(file_path, 'a')
+ mf.write("%s\n" % queue_entry.get_host().hostname)
+ mf.close()
- def write_to_machines_file(self, queue_entry):
- hostname = queue_entry.get_host().hostname
- print "writing %s to job %s machines file" % (hostname, self.id)
- file_path = os.path.join(self.job_dir, '.machines')
- mf = open(file_path, 'a')
- mf.write("%s\n" % queue_entry.get_host().hostname)
- mf.close()
+ def create_results_dir(self, queue_entry=None):
+ print "create: active: %s complete %s" % (self.num_active(),
+ self.num_complete())
- def create_results_dir(self, queue_entry=None):
- print "create: active: %s complete %s" % (self.num_active(),
- self.num_complete())
+ if not os.path.exists(self.job_dir):
+ os.makedirs(self.job_dir)
- if not os.path.exists(self.job_dir):
- os.makedirs(self.job_dir)
+ if queue_entry:
+ return queue_entry.results_dir()
+ return self.job_dir
- if queue_entry:
- return queue_entry.results_dir()
- return self.job_dir
+ def run(self, queue_entry):
+ results_dir = self.create_results_dir(queue_entry)
- def run(self, queue_entry):
- results_dir = self.create_results_dir(queue_entry)
+ if self.is_synchronous():
+ if not self.is_ready():
+ return Agent([VerifySynchronousTask(
+ queue_entry = queue_entry)],
+ [queue_entry.id])
- if self.is_synchronous():
- if not self.is_ready():
- return Agent([VerifySynchronousTask(
- queue_entry = queue_entry)],
- [queue_entry.id])
+ queue_entry.set_status('Starting')
- queue_entry.set_status('Starting')
+ ctrl = open(os.tmpnam(), 'w')
+ if self.control_file:
+ ctrl.write(self.control_file)
+ else:
+ ctrl.write("")
+ ctrl.flush()
- ctrl = open(os.tmpnam(), 'w')
- if self.control_file:
- ctrl.write(self.control_file)
- else:
- ctrl.write("")
- ctrl.flush()
+ if self.is_synchronous():
+ queue_entries = self.get_host_queue_entries()
+ else:
+ assert queue_entry
+ queue_entries = [queue_entry]
+ hostnames = ','.join([entry.get_host().hostname
+ for entry in queue_entries])
- if self.is_synchronous():
- queue_entries = self.get_host_queue_entries()
- else:
- assert queue_entry
- queue_entries = [queue_entry]
- hostnames = ','.join([entry.get_host().hostname
- for entry in queue_entries])
+ # determine the job tag
+ if self.is_synchronous() or self.num_machines() == 1:
+ job_name = "%s-%s" % (self.id, self.owner)
+ else:
+ job_name = "%s-%s/%s" % (self.id, self.owner,
+ hostnames)
- # determine the job tag
- if self.is_synchronous() or self.num_machines() == 1:
- job_name = "%s-%s" % (self.id, self.owner)
- else:
- job_name = "%s-%s/%s" % (self.id, self.owner,
- hostnames)
+ params = [_autoserv_path, '-P', job_name, '-p', '-n',
+ '-r', os.path.abspath(results_dir),
+ '-b', '-u', self.owner, '-l', self.name,
+ '-m', hostnames, ctrl.name]
- params = [_autoserv_path, '-P', job_name, '-p', '-n',
- '-r', os.path.abspath(results_dir),
- '-b', '-u', self.owner, '-l', self.name,
- '-m', hostnames, ctrl.name]
+ if not self.is_server_job():
+ params.append('-c')
- if not self.is_server_job():
- params.append('-c')
+ tasks = []
+ if not self.is_synchronous():
+ tasks.append(VerifyTask(queue_entry))
- tasks = []
- if not self.is_synchronous():
- tasks.append(VerifyTask(queue_entry))
+ tasks.append(QueueTask(job = self,
+ queue_entries = queue_entries,
+ cmd = params))
- tasks.append(QueueTask(job = self,
- queue_entries = queue_entries,
- cmd = params))
+ ids = []
+ for entry in queue_entries:
+ ids.append(entry.id)
- ids = []
- for entry in queue_entries:
- ids.append(entry.id)
-
- agent = Agent(tasks, ids)
+ agent = Agent(tasks, ids)
- return agent
+ return agent
if __name__ == '__main__':
- main()
+ main()
diff --git a/scheduler/monitor_db_unittest.py b/scheduler/monitor_db_unittest.py
index 68d71d26..c3126369 100644
--- a/scheduler/monitor_db_unittest.py
+++ b/scheduler/monitor_db_unittest.py
@@ -31,610 +31,610 @@ INSERT INTO hosts_labels (host_id, label_id) VALUES
"""
class Dummy(object):
- 'Dummy object that can have attribute assigned to it'
+ 'Dummy object that can have attribute assigned to it'
class DispatcherTest(unittest.TestCase):
- _jobs_scheduled = []
- _job_counter = 0
+ _jobs_scheduled = []
+ _job_counter = 0
- def _read_db_info(self):
- config = global_config.global_config
- section = 'AUTOTEST_WEB'
- self._host = config.get_config_value(section, "host")
- self._db_name = config.get_config_value(section, "database")
- self._user = config.get_config_value(section, "user")
- self._password = config.get_config_value(section, "password")
+ def _read_db_info(self):
+ config = global_config.global_config
+ section = 'AUTOTEST_WEB'
+ self._host = config.get_config_value(section, "host")
+ self._db_name = config.get_config_value(section, "database")
+ self._user = config.get_config_value(section, "user")
+ self._password = config.get_config_value(section, "password")
- def _connect_to_db(self, db_name=''):
- self._con = MySQLdb.connect(host=self._host, user=self._user,
- passwd=self._password, db=db_name)
- self._con.autocommit(True)
- self._cur = self._con.cursor()
+ def _connect_to_db(self, db_name=''):
+ self._con = MySQLdb.connect(host=self._host, user=self._user,
+ passwd=self._password, db=db_name)
+ self._con.autocommit(True)
+ self._cur = self._con.cursor()
- def _disconnect_from_db(self):
- self._con.close()
+ def _disconnect_from_db(self):
+ self._con.close()
-
- def _do_query(self, sql):
- if _DEBUG:
- print 'SQL:', sql
- self._cur.execute(sql)
+
+ def _do_query(self, sql):
+ if _DEBUG:
+ print 'SQL:', sql
+ self._cur.execute(sql)
- def _do_queries(self, sql_queries):
- for query in sql_queries.split(';'):
- query = query.strip()
- if query:
- self._do_query(query)
+ def _do_queries(self, sql_queries):
+ for query in sql_queries.split(';'):
+ query = query.strip()
+ if query:
+ self._do_query(query)
- def _get_db_schema(self):
- command = 'mysqldump --no-data -u %s -p%s -h %s %s' % (
- self._user, self._password, self._host, self._db_name)
- proc = subprocess.Popen(command, stdout=subprocess.PIPE,
- shell=True)
- return proc.communicate()[0]
+ def _get_db_schema(self):
+ command = 'mysqldump --no-data -u %s -p%s -h %s %s' % (
+ self._user, self._password, self._host, self._db_name)
+ proc = subprocess.Popen(command, stdout=subprocess.PIPE,
+ shell=True)
+ return proc.communicate()[0]
- def _open_test_db(self, schema):
- self._db_name = 'test_' + self._db_name
- self._connect_to_db()
- self._do_query('CREATE DATABASE ' + self._db_name)
- self._disconnect_from_db()
- self._connect_to_db(self._db_name)
- self._do_queries(schema)
+ def _open_test_db(self, schema):
+ self._db_name = 'test_' + self._db_name
+ self._connect_to_db()
+ self._do_query('CREATE DATABASE ' + self._db_name)
+ self._disconnect_from_db()
+ self._connect_to_db(self._db_name)
+ self._do_queries(schema)
- def _close_test_db(self):
- self._do_query('DROP DATABASE ' + self._db_name)
- self._disconnect_from_db()
+ def _close_test_db(self):
+ self._do_query('DROP DATABASE ' + self._db_name)
+ self._disconnect_from_db()
- def _fill_in_test_data(self):
- self._do_queries(_TEST_DATA)
+ def _fill_in_test_data(self):
+ self._do_queries(_TEST_DATA)
- def _set_monitor_stubs(self):
- monitor_db._db = monitor_db.DatabaseConn()
- monitor_db._db.connect(db_name=self._db_name)
- def run_stub(hqe_self, assigned_host=None):
- if hqe_self.meta_host:
- host = assigned_host
- else:
- host = hqe_self.host
- self._record_job_scheduled(hqe_self.job.id, host.id)
- return Dummy()
- monitor_db.HostQueueEntry.run = run_stub
-
-
- def _record_job_scheduled(self, job_id, host_id):
- record = (job_id, host_id)
- self.assert_(record not in self._jobs_scheduled,
- 'Job %d scheduled on host %d twice' %
- (job_id, host_id))
- self._jobs_scheduled.append(record)
-
-
- def _assert_job_scheduled_on(self, job_id, host_id):
- record = (job_id, host_id)
- self.assert_(record in self._jobs_scheduled,
- 'Job %d not scheduled on host %d as expected\n'
- 'Jobs scheduled: %s' %
- (job_id, host_id, self._jobs_scheduled))
- self._jobs_scheduled.remove(record)
-
-
- def _check_for_extra_schedulings(self):
- if len(self._jobs_scheduled) != 0:
- self.fail('Extra jobs scheduled: ' +
- str(self._jobs_scheduled))
-
-
- def _create_job(self, hosts=[], metahosts=[], priority=0, active=0):
- self._do_query('INSERT INTO jobs (name, owner, priority) '
- 'VALUES ("test", "my_user", %d)' % priority)
- self._job_counter += 1
- job_id = self._job_counter
- queue_entry_sql = (
- 'INSERT INTO host_queue_entries '
- '(job_id, priority, host_id, meta_host, active) '
- 'VALUES (%d, %d, %%s, %%s, %d)' %
- (job_id, priority, active))
- for host_id in hosts:
- self._do_query(queue_entry_sql % (host_id, 'NULL'))
- self._do_query('INSERT INTO ineligible_host_queues '
- '(job_id, host_id) VALUES (%d, %d)' %
- (job_id, host_id))
- for label_id in metahosts:
- self._do_query(queue_entry_sql % ('NULL', label_id))
+ def _set_monitor_stubs(self):
+ monitor_db._db = monitor_db.DatabaseConn()
+ monitor_db._db.connect(db_name=self._db_name)
+ def run_stub(hqe_self, assigned_host=None):
+ if hqe_self.meta_host:
+ host = assigned_host
+ else:
+ host = hqe_self.host
+ self._record_job_scheduled(hqe_self.job.id, host.id)
+ return Dummy()
+ monitor_db.HostQueueEntry.run = run_stub
+
+
+ def _record_job_scheduled(self, job_id, host_id):
+ record = (job_id, host_id)
+ self.assert_(record not in self._jobs_scheduled,
+ 'Job %d scheduled on host %d twice' %
+ (job_id, host_id))
+ self._jobs_scheduled.append(record)
+
+
+ def _assert_job_scheduled_on(self, job_id, host_id):
+ record = (job_id, host_id)
+ self.assert_(record in self._jobs_scheduled,
+ 'Job %d not scheduled on host %d as expected\n'
+ 'Jobs scheduled: %s' %
+ (job_id, host_id, self._jobs_scheduled))
+ self._jobs_scheduled.remove(record)
+
+
+ def _check_for_extra_schedulings(self):
+ if len(self._jobs_scheduled) != 0:
+ self.fail('Extra jobs scheduled: ' +
+ str(self._jobs_scheduled))
+
+
+ def _create_job(self, hosts=[], metahosts=[], priority=0, active=0):
+ self._do_query('INSERT INTO jobs (name, owner, priority) '
+ 'VALUES ("test", "my_user", %d)' % priority)
+ self._job_counter += 1
+ job_id = self._job_counter
+ queue_entry_sql = (
+ 'INSERT INTO host_queue_entries '
+ '(job_id, priority, host_id, meta_host, active) '
+ 'VALUES (%d, %d, %%s, %%s, %d)' %
+ (job_id, priority, active))
+ for host_id in hosts:
+ self._do_query(queue_entry_sql % (host_id, 'NULL'))
+ self._do_query('INSERT INTO ineligible_host_queues '
+ '(job_id, host_id) VALUES (%d, %d)' %
+ (job_id, host_id))
+ for label_id in metahosts:
+ self._do_query(queue_entry_sql % ('NULL', label_id))
- def _create_job_simple(self, hosts, use_metahost=False,
- priority=0, active=0):
- 'An alternative interface to _create_job'
- args = {'hosts' : [], 'metahosts' : []}
- if use_metahost:
- args['metahosts'] = hosts
- else:
- args['hosts'] = hosts
- self._create_job(priority=priority, active=active, **args)
+ def _create_job_simple(self, hosts, use_metahost=False,
+ priority=0, active=0):
+ 'An alternative interface to _create_job'
+ args = {'hosts' : [], 'metahosts' : []}
+ if use_metahost:
+ args['metahosts'] = hosts
+ else:
+ args['hosts'] = hosts
+ self._create_job(priority=priority, active=active, **args)
- def _convert_jobs_to_metahosts(self, *job_ids):
- sql_tuple = '(' + ','.join(str(i) for i in job_ids) + ')'
- self._do_query('UPDATE host_queue_entries SET '
- 'meta_host=host_id, host_id=NULL '
- 'WHERE job_id IN ' + sql_tuple)
+ def _convert_jobs_to_metahosts(self, *job_ids):
+ sql_tuple = '(' + ','.join(str(i) for i in job_ids) + ')'
+ self._do_query('UPDATE host_queue_entries SET '
+ 'meta_host=host_id, host_id=NULL '
+ 'WHERE job_id IN ' + sql_tuple)
- def _lock_host(self, host_id):
- self._do_query('UPDATE hosts SET locked=1 WHERE id=' +
- str(host_id))
+ def _lock_host(self, host_id):
+ self._do_query('UPDATE hosts SET locked=1 WHERE id=' +
+ str(host_id))
- def setUp(self):
- self._read_db_info()
- schema = self._get_db_schema()
- self._open_test_db(schema)
- self._fill_in_test_data()
- self._set_monitor_stubs()
- self._dispatcher = monitor_db.Dispatcher()
- self._jobs_scheduled = []
- self._job_counter = 0
+ def setUp(self):
+ self._read_db_info()
+ schema = self._get_db_schema()
+ self._open_test_db(schema)
+ self._fill_in_test_data()
+ self._set_monitor_stubs()
+ self._dispatcher = monitor_db.Dispatcher()
+ self._jobs_scheduled = []
+ self._job_counter = 0
- def tearDown(self):
- self._close_test_db()
+ def tearDown(self):
+ self._close_test_db()
- def _test_basic_scheduling_helper(self, use_metahosts):
- 'Basic nonmetahost scheduling'
- self._create_job_simple([1], use_metahosts)
- self._create_job_simple([2], use_metahosts)
- self._dispatcher._schedule_new_jobs()
- self._assert_job_scheduled_on(1, 1)
- self._assert_job_scheduled_on(2, 2)
- self._check_for_extra_schedulings()
-
+ def _test_basic_scheduling_helper(self, use_metahosts):
+ 'Basic nonmetahost scheduling'
+ self._create_job_simple([1], use_metahosts)
+ self._create_job_simple([2], use_metahosts)
+ self._dispatcher._schedule_new_jobs()
+ self._assert_job_scheduled_on(1, 1)
+ self._assert_job_scheduled_on(2, 2)
+ self._check_for_extra_schedulings()
+
- def _test_priorities_helper(self, use_metahosts):
- 'Test prioritization ordering'
- self._create_job_simple([1], use_metahosts)
- self._create_job_simple([2], use_metahosts)
- self._create_job_simple([1,2], use_metahosts)
- self._create_job_simple([1], use_metahosts, priority=1)
- self._dispatcher._schedule_new_jobs()
- self._assert_job_scheduled_on(4, 1) # higher priority
- self._assert_job_scheduled_on(2, 2) # earlier job over later
- self._check_for_extra_schedulings()
+ def _test_priorities_helper(self, use_metahosts):
+ 'Test prioritization ordering'
+ self._create_job_simple([1], use_metahosts)
+ self._create_job_simple([2], use_metahosts)
+ self._create_job_simple([1,2], use_metahosts)
+ self._create_job_simple([1], use_metahosts, priority=1)
+ self._dispatcher._schedule_new_jobs()
+ self._assert_job_scheduled_on(4, 1) # higher priority
+ self._assert_job_scheduled_on(2, 2) # earlier job over later
+ self._check_for_extra_schedulings()
- def _test_hosts_ready_helper(self, use_metahosts):
- """
- Only hosts that are status=Ready, unlocked and not invalid get
- scheduled.
- """
- self._create_job_simple([1], use_metahosts)
- self._do_query('UPDATE hosts SET status="Running" WHERE id=1')
- self._dispatcher._schedule_new_jobs()
- self._check_for_extra_schedulings()
+ def _test_hosts_ready_helper(self, use_metahosts):
+ """
+ Only hosts that are status=Ready, unlocked and not invalid get
+ scheduled.
+ """
+ self._create_job_simple([1], use_metahosts)
+ self._do_query('UPDATE hosts SET status="Running" WHERE id=1')
+ self._dispatcher._schedule_new_jobs()
+ self._check_for_extra_schedulings()
- self._do_query('UPDATE hosts SET status="Ready", locked=1 '
- 'WHERE id=1')
- self._dispatcher._schedule_new_jobs()
- self._check_for_extra_schedulings()
+ self._do_query('UPDATE hosts SET status="Ready", locked=1 '
+ 'WHERE id=1')
+ self._dispatcher._schedule_new_jobs()
+ self._check_for_extra_schedulings()
- self._do_query('UPDATE hosts SET locked=0, invalid=1 '
- 'WHERE id=1')
- self._dispatcher._schedule_new_jobs()
- self._check_for_extra_schedulings()
+ self._do_query('UPDATE hosts SET locked=0, invalid=1 '
+ 'WHERE id=1')
+ self._dispatcher._schedule_new_jobs()
+ self._check_for_extra_schedulings()
- def _test_hosts_idle_helper(self, use_metahosts):
- 'Only idle hosts get scheduled'
- self._create_job(hosts=[1], active=1)
- self._create_job_simple([1], use_metahosts)
- self._dispatcher._schedule_new_jobs()
- self._check_for_extra_schedulings()
+ def _test_hosts_idle_helper(self, use_metahosts):
+ 'Only idle hosts get scheduled'
+ self._create_job(hosts=[1], active=1)
+ self._create_job_simple([1], use_metahosts)
+ self._dispatcher._schedule_new_jobs()
+ self._check_for_extra_schedulings()
- def test_basic_scheduling(self):
- self._test_basic_scheduling_helper(False)
+ def test_basic_scheduling(self):
+ self._test_basic_scheduling_helper(False)
- def test_priorities(self):
- self._test_priorities_helper(False)
+ def test_priorities(self):
+ self._test_priorities_helper(False)
- def test_hosts_ready(self):
- self._test_hosts_ready_helper(False)
+ def test_hosts_ready(self):
+ self._test_hosts_ready_helper(False)
- def test_hosts_idle(self):
- self._test_hosts_idle_helper(False)
+ def test_hosts_idle(self):
+ self._test_hosts_idle_helper(False)
- def test_metahost_scheduling(self):
- 'Basic metahost scheduling'
- self._test_basic_scheduling_helper(True)
+ def test_metahost_scheduling(self):
+ 'Basic metahost scheduling'
+ self._test_basic_scheduling_helper(True)
- def test_metahost_priorities(self):
- self._test_priorities_helper(True)
+ def test_metahost_priorities(self):
+ self._test_priorities_helper(True)
- def test_metahost_hosts_ready(self):
- self._test_hosts_ready_helper(True)
+ def test_metahost_hosts_ready(self):
+ self._test_hosts_ready_helper(True)
- def test_metahost_hosts_idle(self):
- self._test_hosts_idle_helper(True)
+ def test_metahost_hosts_idle(self):
+ self._test_hosts_idle_helper(True)
- def test_nonmetahost_over_metahost(self):
- """
- Non-metahost entries should take priority over metahost entries
- for the same host
- """
- self._create_job(metahosts=[1])
- self._create_job(hosts=[1])
- self._dispatcher._schedule_new_jobs()
- self._assert_job_scheduled_on(2, 1)
- self._check_for_extra_schedulings()
+ def test_nonmetahost_over_metahost(self):
+ """
+ Non-metahost entries should take priority over metahost entries
+ for the same host
+ """
+ self._create_job(metahosts=[1])
+ self._create_job(hosts=[1])
+ self._dispatcher._schedule_new_jobs()
+ self._assert_job_scheduled_on(2, 1)
+ self._check_for_extra_schedulings()
- def test_metahosts_obey_blocks(self):
- """
- Metahosts can't get scheduled on hosts already scheduled for
- that job.
- """
- self._create_job(metahosts=[1], hosts=[1])
- # make the nonmetahost entry complete, so the metahost can try
- # to get scheduled
- self._do_query('UPDATE host_queue_entries SET complete = 1 '
- 'WHERE host_id=1')
- self._dispatcher._schedule_new_jobs()
- self._check_for_extra_schedulings()
+ def test_metahosts_obey_blocks(self):
+ """
+ Metahosts can't get scheduled on hosts already scheduled for
+ that job.
+ """
+ self._create_job(metahosts=[1], hosts=[1])
+ # make the nonmetahost entry complete, so the metahost can try
+ # to get scheduled
+ self._do_query('UPDATE host_queue_entries SET complete = 1 '
+ 'WHERE host_id=1')
+ self._dispatcher._schedule_new_jobs()
+ self._check_for_extra_schedulings()
- def test_metahosts_obey_ACLs(self):
- "ACL-inaccessible hosts can't get scheduled for metahosts"
- self._do_query('DELETE FROM acl_groups_hosts WHERE host_id=1')
- self._create_job(metahosts=[1])
- self._dispatcher._schedule_new_jobs()
- self._check_for_extra_schedulings()
+ def test_metahosts_obey_ACLs(self):
+ "ACL-inaccessible hosts can't get scheduled for metahosts"
+ self._do_query('DELETE FROM acl_groups_hosts WHERE host_id=1')
+ self._create_job(metahosts=[1])
+ self._dispatcher._schedule_new_jobs()
+ self._check_for_extra_schedulings()
class PidfileRunMonitorTest(unittest.TestCase):
- results_dir = '/test/path'
- pidfile_path = os.path.join(results_dir, monitor_db.AUTOSERV_PID_FILE)
- pid = 12345
- args = ('nice -n 10 autoserv -P 123-myuser/myhost -p -n '
- '-r ' + results_dir + ' -b -u myuser -l my-job-name '
- '-m myhost /tmp/filejx43Zi -c')
- bad_args = args.replace(results_dir, '/random/results/dir')
+ results_dir = '/test/path'
+ pidfile_path = os.path.join(results_dir, monitor_db.AUTOSERV_PID_FILE)
+ pid = 12345
+ args = ('nice -n 10 autoserv -P 123-myuser/myhost -p -n '
+ '-r ' + results_dir + ' -b -u myuser -l my-job-name '
+ '-m myhost /tmp/filejx43Zi -c')
+ bad_args = args.replace(results_dir, '/random/results/dir')
- def setUp(self):
- self.god = mock.mock_god()
- self.god.stub_function(monitor_db, 'open')
- self.god.stub_function(os.path, 'exists')
- self.god.stub_function(monitor_db.email_manager,
- 'enqueue_notify_email')
- self.monitor = monitor_db.PidfileRunMonitor(self.results_dir)
+ def setUp(self):
+ self.god = mock.mock_god()
+ self.god.stub_function(monitor_db, 'open')
+ self.god.stub_function(os.path, 'exists')
+ self.god.stub_function(monitor_db.email_manager,
+ 'enqueue_notify_email')
+ self.monitor = monitor_db.PidfileRunMonitor(self.results_dir)
- def tearDown(self):
- self.god.unstub_all()
+ def tearDown(self):
+ self.god.unstub_all()
- def set_not_yet_run(self):
- os.path.exists.expect_call(self.pidfile_path).and_return(False)
+ def set_not_yet_run(self):
+ os.path.exists.expect_call(self.pidfile_path).and_return(False)
- def setup_pidfile(self, pidfile_contents):
- os.path.exists.expect_call(self.pidfile_path).and_return(True)
- pidfile = StringIO.StringIO(pidfile_contents)
- monitor_db.open.expect_call(
- self.pidfile_path, 'r').and_return(pidfile)
+ def setup_pidfile(self, pidfile_contents):
+ os.path.exists.expect_call(self.pidfile_path).and_return(True)
+ pidfile = StringIO.StringIO(pidfile_contents)
+ monitor_db.open.expect_call(
+ self.pidfile_path, 'r').and_return(pidfile)
- def set_running(self):
- self.setup_pidfile(str(self.pid) + '\n')
+ def set_running(self):
+ self.setup_pidfile(str(self.pid) + '\n')
- def set_complete(self, error_code):
- self.setup_pidfile(str(self.pid) + '\n' +
- str(error_code) + '\n')
+ def set_complete(self, error_code):
+ self.setup_pidfile(str(self.pid) + '\n' +
+ str(error_code) + '\n')
- def _test_read_pidfile_helper(self, expected_pid, expected_exit_status):
- pid, exit_status = self.monitor.read_pidfile()
- self.assertEquals(pid, expected_pid)
- self.assertEquals(exit_status, expected_exit_status)
- self.god.check_playback()
+ def _test_read_pidfile_helper(self, expected_pid, expected_exit_status):
+ pid, exit_status = self.monitor.read_pidfile()
+ self.assertEquals(pid, expected_pid)
+ self.assertEquals(exit_status, expected_exit_status)
+ self.god.check_playback()
- def test_read_pidfile(self):
- self.set_not_yet_run()
- self._test_read_pidfile_helper(None, None)
+ def test_read_pidfile(self):
+ self.set_not_yet_run()
+ self._test_read_pidfile_helper(None, None)
- self.set_running()
- self._test_read_pidfile_helper(self.pid, None)
+ self.set_running()
+ self._test_read_pidfile_helper(self.pid, None)
- self.set_complete(123)
- self._test_read_pidfile_helper(self.pid, 123)
+ self.set_complete(123)
+ self._test_read_pidfile_helper(self.pid, 123)
- def test_read_pidfile_error(self):
- self.setup_pidfile('asdf')
- self.assertRaises(monitor_db.PidfileException,
- self.monitor.read_pidfile)
- self.god.check_playback()
+ def test_read_pidfile_error(self):
+ self.setup_pidfile('asdf')
+ self.assertRaises(monitor_db.PidfileException,
+ self.monitor.read_pidfile)
+ self.god.check_playback()
- def setup_proc_cmdline(self, args):
- proc_cmdline = args.replace(' ', '\x00')
- proc_file = StringIO.StringIO(proc_cmdline)
- monitor_db.open.expect_call(
- '/proc/%d/cmdline' % self.pid, 'r').and_return(proc_file)
+ def setup_proc_cmdline(self, args):
+ proc_cmdline = args.replace(' ', '\x00')
+ proc_file = StringIO.StringIO(proc_cmdline)
+ monitor_db.open.expect_call(
+ '/proc/%d/cmdline' % self.pid, 'r').and_return(proc_file)
- def setup_find_autoservs(self, process_dict):
- self.god.stub_class_method(monitor_db.Dispatcher,
- 'find_autoservs')
- monitor_db.Dispatcher.find_autoservs.expect_call().and_return(
- process_dict)
+ def setup_find_autoservs(self, process_dict):
+ self.god.stub_class_method(monitor_db.Dispatcher,
+ 'find_autoservs')
+ monitor_db.Dispatcher.find_autoservs.expect_call().and_return(
+ process_dict)
- def _test_get_pidfile_info_helper(self, expected_pid,
- expected_exit_status):
- pid, exit_status = self.monitor.get_pidfile_info()
- self.assertEquals(pid, expected_pid)
- self.assertEquals(exit_status, expected_exit_status)
- self.god.check_playback()
+ def _test_get_pidfile_info_helper(self, expected_pid,
+ expected_exit_status):
+ pid, exit_status = self.monitor.get_pidfile_info()
+ self.assertEquals(pid, expected_pid)
+ self.assertEquals(exit_status, expected_exit_status)
+ self.god.check_playback()
- def test_get_pidfile_info(self):
- 'normal cases for get_pidfile_info'
- # running
- self.set_running()
- self.setup_proc_cmdline(self.args)
- self._test_get_pidfile_info_helper(self.pid, None)
+ def test_get_pidfile_info(self):
+ 'normal cases for get_pidfile_info'
+ # running
+ self.set_running()
+ self.setup_proc_cmdline(self.args)
+ self._test_get_pidfile_info_helper(self.pid, None)
- # exited during check
- self.set_running()
- monitor_db.open.expect_call(
- '/proc/%d/cmdline' % self.pid, 'r').and_raises(IOError)
- self.set_complete(123) # pidfile gets read again
- self._test_get_pidfile_info_helper(self.pid, 123)
+ # exited during check
+ self.set_running()
+ monitor_db.open.expect_call(
+ '/proc/%d/cmdline' % self.pid, 'r').and_raises(IOError)
+ self.set_complete(123) # pidfile gets read again
+ self._test_get_pidfile_info_helper(self.pid, 123)
- # completed
- self.set_complete(123)
- self._test_get_pidfile_info_helper(self.pid, 123)
+ # completed
+ self.set_complete(123)
+ self._test_get_pidfile_info_helper(self.pid, 123)
- def test_get_pidfile_info_running_no_proc(self):
- 'pidfile shows process running, but no proc exists'
- # running but no proc
- self.set_running()
- monitor_db.open.expect_call(
- '/proc/%d/cmdline' % self.pid, 'r').and_raises(IOError)
- self.set_running()
- monitor_db.email_manager.enqueue_notify_email.expect_call(
- mock.is_string_comparator(), mock.is_string_comparator())
- self._test_get_pidfile_info_helper(self.pid, 1)
- self.assertTrue(self.monitor.lost_process)
+ def test_get_pidfile_info_running_no_proc(self):
+ 'pidfile shows process running, but no proc exists'
+ # running but no proc
+ self.set_running()
+ monitor_db.open.expect_call(
+ '/proc/%d/cmdline' % self.pid, 'r').and_raises(IOError)
+ self.set_running()
+ monitor_db.email_manager.enqueue_notify_email.expect_call(
+ mock.is_string_comparator(), mock.is_string_comparator())
+ self._test_get_pidfile_info_helper(self.pid, 1)
+ self.assertTrue(self.monitor.lost_process)
- def test_get_pidfile_info_not_yet_run(self):
- "pidfile hasn't been written yet"
- # process not running
- self.set_not_yet_run()
- self.setup_find_autoservs({})
- self._test_get_pidfile_info_helper(None, None)
+ def test_get_pidfile_info_not_yet_run(self):
+ "pidfile hasn't been written yet"
+ # process not running
+ self.set_not_yet_run()
+ self.setup_find_autoservs({})
+ self._test_get_pidfile_info_helper(None, None)
- # process running
- self.set_not_yet_run()
- self.setup_find_autoservs({self.pid : self.args})
- self._test_get_pidfile_info_helper(None, None)
+ # process running
+ self.set_not_yet_run()
+ self.setup_find_autoservs({self.pid : self.args})
+ self._test_get_pidfile_info_helper(None, None)
- # another process running under same pid
- self.set_not_yet_run()
- self.setup_find_autoservs({self.pid : self.bad_args})
- self._test_get_pidfile_info_helper(None, None)
+ # another process running under same pid
+ self.set_not_yet_run()
+ self.setup_find_autoservs({self.pid : self.bad_args})
+ self._test_get_pidfile_info_helper(None, None)
class AgentTest(unittest.TestCase):
- def setUp(self):
- self.god = mock.mock_god()
+ def setUp(self):
+ self.god = mock.mock_god()
- def tearDown(self):
- self.god.unstub_all()
+ def tearDown(self):
+ self.god.unstub_all()
- def test_agent(self):
- task1 = self.god.create_mock_class(monitor_db.AgentTask,
- 'task1')
- task2 = self.god.create_mock_class(monitor_db.AgentTask,
- 'task2')
- task3 = self.god.create_mock_class(monitor_db.AgentTask,
- 'task3')
+ def test_agent(self):
+ task1 = self.god.create_mock_class(monitor_db.AgentTask,
+ 'task1')
+ task2 = self.god.create_mock_class(monitor_db.AgentTask,
+ 'task2')
+ task3 = self.god.create_mock_class(monitor_db.AgentTask,
+ 'task3')
- task1.start.expect_call()
- task1.is_done.expect_call().and_return(False)
- task1.poll.expect_call()
- task1.is_done.expect_call().and_return(True)
- task1.is_done.expect_call().and_return(True)
- task1.success = True
+ task1.start.expect_call()
+ task1.is_done.expect_call().and_return(False)
+ task1.poll.expect_call()
+ task1.is_done.expect_call().and_return(True)
+ task1.is_done.expect_call().and_return(True)
+ task1.success = True
- task2.start.expect_call()
- task2.is_done.expect_call().and_return(True)
- task2.is_done.expect_call().and_return(True)
- task2.success = False
- task2.failure_tasks = [task3]
+ task2.start.expect_call()
+ task2.is_done.expect_call().and_return(True)
+ task2.is_done.expect_call().and_return(True)
+ task2.success = False
+ task2.failure_tasks = [task3]
- task3.start.expect_call()
- task3.is_done.expect_call().and_return(True)
- task3.is_done.expect_call().and_return(True)
- task3.success = True
+ task3.start.expect_call()
+ task3.is_done.expect_call().and_return(True)
+ task3.is_done.expect_call().and_return(True)
+ task3.success = True
- agent = monitor_db.Agent([task1, task2])
- agent.dispatcher = object()
- agent.start()
- while not agent.is_done():
- agent.tick()
- self.god.check_playback()
+ agent = monitor_db.Agent([task1, task2])
+ agent.dispatcher = object()
+ agent.start()
+ while not agent.is_done():
+ agent.tick()
+ self.god.check_playback()
class AgentTasksTest(unittest.TestCase):
- TEMP_DIR = '/temp/dir'
- HOSTNAME = 'myhost'
-
- def setUp(self):
- self.god = mock.mock_god()
- self.god.stub_with(tempfile, 'mkdtemp',
- mock.mock_function('mkdtemp', self.TEMP_DIR))
- self.god.stub_class_method(monitor_db.RunMonitor, 'run')
- self.god.stub_class_method(monitor_db.RunMonitor, 'exit_code')
- self.host = self.god.create_mock_class(monitor_db.Host, 'host')
- self.host.hostname = self.HOSTNAME
- self.queue_entry = self.god.create_mock_class(
- monitor_db.HostQueueEntry, 'queue_entry')
- self.queue_entry.host = self.host
- self.queue_entry.meta_host = None
-
-
- def tearDown(self):
- self.god.unstub_all()
-
-
- def run_task(self, task, success):
- """
- Do essentially what an Agent would do, but protect againt
- infinite looping from test errors.
- """
- if not getattr(task, 'agent', None):
- task.agent = object()
- task.start()
- count = 0
- while not task.is_done():
- count += 1
- if count > 10:
- print 'Task failed to finish'
- # in case the playback has clues to why it
- # failed
- self.god.check_playback()
- self.fail()
- task.poll()
- self.assertEquals(task.success, success)
-
-
- def setup_run_monitor(self, exit_status):
- monitor_db.RunMonitor.run.expect_call()
- monitor_db.RunMonitor.exit_code.expect_call()
- monitor_db.RunMonitor.exit_code.expect_call().and_return(
- exit_status)
-
-
- def _test_repair_task_helper(self, success):
- self.host.set_status.expect_call('Repairing')
- if success:
- self.setup_run_monitor(0)
- self.host.set_status.expect_call('Ready')
- else:
- self.setup_run_monitor(1)
- self.host.set_status.expect_call('Repair Failed')
-
- task = monitor_db.RepairTask(self.host)
- self.run_task(task, success)
- self.assertEquals(task.monitor.cmd,
- ['autoserv', '-R', '-m', self.HOSTNAME, '-r',
- self.TEMP_DIR])
- self.god.check_playback()
-
-
- def test_repair_task(self):
- self._test_repair_task_helper(True)
- self._test_repair_task_helper(False)
-
-
- def test_repair_task_with_queue_entry(self):
- queue_entry = self.god.create_mock_class(
- monitor_db.HostQueueEntry, 'queue_entry')
- self.host.set_status.expect_call('Repairing')
- self.setup_run_monitor(1)
- self.host.set_status.expect_call('Repair Failed')
- queue_entry.handle_host_failure.expect_call()
-
- task = monitor_db.RepairTask(self.host, queue_entry)
- self.run_task(task, False)
- self.god.check_playback()
-
-
- def setup_verify_expects(self, success, use_queue_entry):
- if use_queue_entry:
- self.queue_entry.set_status.expect_call('Verifying')
- self.queue_entry.verify_results_dir.expect_call(
- ).and_return('/verify/results/dir')
- self.queue_entry.clear_results_dir.expect_call(
- '/verify/results/dir')
- self.host.set_status.expect_call('Verifying')
- if success:
- self.setup_run_monitor(0)
- self.host.set_status.expect_call('Ready')
- else:
- self.setup_run_monitor(1)
- if use_queue_entry:
- self.queue_entry.requeue.expect_call()
-
-
- def _test_verify_task_with_host_helper(self, success, use_queue_entry):
- self.setup_verify_expects(success, use_queue_entry)
-
- if use_queue_entry:
- task = monitor_db.VerifyTask(
- queue_entry=self.queue_entry)
- else:
- task = monitor_db.VerifyTask(host=self.host)
- self.run_task(task, success)
- self.assertEquals(task.monitor.cmd,
- ['autoserv', '-v', '-m', self.HOSTNAME, '-r',
- self.TEMP_DIR])
- self.god.check_playback()
-
-
- def test_verify_task_with_host(self):
- self._test_verify_task_with_host_helper(True, False)
- self._test_verify_task_with_host_helper(False, False)
-
-
- def test_verify_task_with_queue_entry(self):
- self._test_verify_task_with_host_helper(True, True)
- self._test_verify_task_with_host_helper(False, True)
-
-
- def test_verify_synchronous_task(self):
- job = self.god.create_mock_class(monitor_db.Job, 'job')
-
- self.setup_verify_expects(True, True)
- job.num_complete.expect_call().and_return(0)
- self.queue_entry.set_status.expect_call('Pending')
- job.is_ready.expect_call().and_return(True)
- job.run.expect_call(self.queue_entry)
- self.queue_entry.job = job
-
- task = monitor_db.VerifySynchronousTask(self.queue_entry)
- task.agent = Dummy()
- task.agent.dispatcher = Dummy()
- self.god.stub_with(task.agent.dispatcher, 'add_agent',
- mock.mock_function('add_agent'))
- self.run_task(task, True)
- self.god.check_playback()
+ TEMP_DIR = '/temp/dir'
+ HOSTNAME = 'myhost'
+
+ def setUp(self):
+ self.god = mock.mock_god()
+ self.god.stub_with(tempfile, 'mkdtemp',
+ mock.mock_function('mkdtemp', self.TEMP_DIR))
+ self.god.stub_class_method(monitor_db.RunMonitor, 'run')
+ self.god.stub_class_method(monitor_db.RunMonitor, 'exit_code')
+ self.host = self.god.create_mock_class(monitor_db.Host, 'host')
+ self.host.hostname = self.HOSTNAME
+ self.queue_entry = self.god.create_mock_class(
+ monitor_db.HostQueueEntry, 'queue_entry')
+ self.queue_entry.host = self.host
+ self.queue_entry.meta_host = None
+
+
+ def tearDown(self):
+ self.god.unstub_all()
+
+
+ def run_task(self, task, success):
+ """
+ Do essentially what an Agent would do, but protect againt
+ infinite looping from test errors.
+ """
+ if not getattr(task, 'agent', None):
+ task.agent = object()
+ task.start()
+ count = 0
+ while not task.is_done():
+ count += 1
+ if count > 10:
+ print 'Task failed to finish'
+ # in case the playback has clues to why it
+ # failed
+ self.god.check_playback()
+ self.fail()
+ task.poll()
+ self.assertEquals(task.success, success)
+
+
+ def setup_run_monitor(self, exit_status):
+ monitor_db.RunMonitor.run.expect_call()
+ monitor_db.RunMonitor.exit_code.expect_call()
+ monitor_db.RunMonitor.exit_code.expect_call().and_return(
+ exit_status)
+
+
+ def _test_repair_task_helper(self, success):
+ self.host.set_status.expect_call('Repairing')
+ if success:
+ self.setup_run_monitor(0)
+ self.host.set_status.expect_call('Ready')
+ else:
+ self.setup_run_monitor(1)
+ self.host.set_status.expect_call('Repair Failed')
+
+ task = monitor_db.RepairTask(self.host)
+ self.run_task(task, success)
+ self.assertEquals(task.monitor.cmd,
+ ['autoserv', '-R', '-m', self.HOSTNAME, '-r',
+ self.TEMP_DIR])
+ self.god.check_playback()
+
+
+ def test_repair_task(self):
+ self._test_repair_task_helper(True)
+ self._test_repair_task_helper(False)
+
+
+ def test_repair_task_with_queue_entry(self):
+ queue_entry = self.god.create_mock_class(
+ monitor_db.HostQueueEntry, 'queue_entry')
+ self.host.set_status.expect_call('Repairing')
+ self.setup_run_monitor(1)
+ self.host.set_status.expect_call('Repair Failed')
+ queue_entry.handle_host_failure.expect_call()
+
+ task = monitor_db.RepairTask(self.host, queue_entry)
+ self.run_task(task, False)
+ self.god.check_playback()
+
+
+ def setup_verify_expects(self, success, use_queue_entry):
+ if use_queue_entry:
+ self.queue_entry.set_status.expect_call('Verifying')
+ self.queue_entry.verify_results_dir.expect_call(
+ ).and_return('/verify/results/dir')
+ self.queue_entry.clear_results_dir.expect_call(
+ '/verify/results/dir')
+ self.host.set_status.expect_call('Verifying')
+ if success:
+ self.setup_run_monitor(0)
+ self.host.set_status.expect_call('Ready')
+ else:
+ self.setup_run_monitor(1)
+ if use_queue_entry:
+ self.queue_entry.requeue.expect_call()
+
+
+ def _test_verify_task_with_host_helper(self, success, use_queue_entry):
+ self.setup_verify_expects(success, use_queue_entry)
+
+ if use_queue_entry:
+ task = monitor_db.VerifyTask(
+ queue_entry=self.queue_entry)
+ else:
+ task = monitor_db.VerifyTask(host=self.host)
+ self.run_task(task, success)
+ self.assertEquals(task.monitor.cmd,
+ ['autoserv', '-v', '-m', self.HOSTNAME, '-r',
+ self.TEMP_DIR])
+ self.god.check_playback()
+
+
+ def test_verify_task_with_host(self):
+ self._test_verify_task_with_host_helper(True, False)
+ self._test_verify_task_with_host_helper(False, False)
+
+
+ def test_verify_task_with_queue_entry(self):
+ self._test_verify_task_with_host_helper(True, True)
+ self._test_verify_task_with_host_helper(False, True)
+
+
+ def test_verify_synchronous_task(self):
+ job = self.god.create_mock_class(monitor_db.Job, 'job')
+
+ self.setup_verify_expects(True, True)
+ job.num_complete.expect_call().and_return(0)
+ self.queue_entry.set_status.expect_call('Pending')
+ job.is_ready.expect_call().and_return(True)
+ job.run.expect_call(self.queue_entry)
+ self.queue_entry.job = job
+
+ task = monitor_db.VerifySynchronousTask(self.queue_entry)
+ task.agent = Dummy()
+ task.agent.dispatcher = Dummy()
+ self.god.stub_with(task.agent.dispatcher, 'add_agent',
+ mock.mock_function('add_agent'))
+ self.run_task(task, True)
+ self.god.check_playback()
if __name__ == '__main__':
- unittest.main()
+ unittest.main()
diff --git a/scheduler/monitor_queue b/scheduler/monitor_queue
index df058b4f..74eaca15 100755
--- a/scheduler/monitor_queue
+++ b/scheduler/monitor_queue
@@ -5,35 +5,35 @@ from subprocess import *
import tempfile
if (len(sys.argv) < 3):
- print "Usage: monitor_queue <spool_directory> <resultsdir> [<conmux_server>]"
- sys.exit(1)
+ print "Usage: monitor_queue <spool_directory> <resultsdir> [<conmux_server>]"
+ sys.exit(1)
(spooldir, resultsdir) = [os.path.abspath(p) for p in sys.argv[1:3]]
queue_name = os.path.basename(spooldir)
dotmachines = os.path.join(spooldir, '.machines')
if os.path.exists(dotmachines):
- machines = [l.strip() for l in open(dotmachines).readlines() if len(l.strip())]
+ machines = [l.strip() for l in open(dotmachines).readlines() if len(l.strip())]
else:
- print "No .machines file in %s, assuming queue name is a machine"\
- % queue_name
- machines = [queue_name]
+ print "No .machines file in %s, assuming queue name is a machine"\
+ % queue_name
+ machines = [queue_name]
if len(sys.argv) == 5:
- console = sys.argv[4]
+ console = sys.argv[4]
else:
- console = None
+ console = None
if not os.path.exists(spooldir):
- print "spooldir %s does not exist" % spooldir
- sys.exit(1)
+ print "spooldir %s does not exist" % spooldir
+ sys.exit(1)
if not os.path.exists(resultsdir):
- print "resultsdir %s does not exist" % resultsdir
- sys.exit(1)
+ print "resultsdir %s does not exist" % resultsdir
+ sys.exit(1)
##### Control file templates #####
SERV_MULTI = """# monitor_queue generated autoserv file (SERV_MULTI template)
hosts = [hosts.SSHHost(hostname, server=%s)
- for hostname in machines]
+ for hostname in machines]
at = autotest.Autotest()
@@ -41,16 +41,16 @@ control_path = %s
results = %s
def install_run(host):
- at.install(host)
- host_results = os.path.join(results, host.hostname)
- at.run(control_path, host_results, host)
+ at.install(host)
+ host_results = os.path.join(results, host.hostname)
+ at.run(control_path, host_results, host)
parallel([subcommand(install_run, [host]) for host in hosts])"""
SERV_SINGLE = """# monitor_queue generated autoserv file (SERV_SINGLE template)
host = hosts.SSHHost(machines[0], server=%s)
-
+
at = autotest.Autotest()
control_path = %s
@@ -62,99 +62,99 @@ at.run(control_path, results, host)"""
##### End control file templates #####
def pick_job(jobs):
- """Pick the next job to run. Currently we just pick the oldest job
- However, this would be the place to put prioritizations."""
- if not jobs:
- return None
- return sorted(jobs, key=lambda x:os.stat(x).st_mtime, reverse=True)[0]
+ """Pick the next job to run. Currently we just pick the oldest job
+ However, this would be the place to put prioritizations."""
+ if not jobs:
+ return None
+ return sorted(jobs, key=lambda x:os.stat(x).st_mtime, reverse=True)[0]
def __create_autoserv_wrapper(template, control_path, results):
- """Create an autoserv file that runs an autotest file at
- control_path on clients and outputs the results in results."""
- # Create an autoserv control file to run this autotest control file
- tmpfd, tmpname = tempfile.mkstemp()
- tmp = os.fdopen(tmpfd, 'w')
-
- print >> tmp, template % tuple([repr(s) for s in (console,
- control_path,
- results)])
- return tmpname
-
+ """Create an autoserv file that runs an autotest file at
+ control_path on clients and outputs the results in results."""
+ # Create an autoserv control file to run this autotest control file
+ tmpfd, tmpname = tempfile.mkstemp()
+ tmp = os.fdopen(tmpfd, 'w')
+
+ print >> tmp, template % tuple([repr(s) for s in (console,
+ control_path,
+ results)])
+ return tmpname
+
def run_job(control, queuename, scheduler_dir):
- """Runs a control file from the spooldir.
- Args:
- control: A path to a control file. It is assumed to be an
- Autotest control file in which case it will automatically
- be wrapped with autoserv control commands and run with
- autoserv. If the file name ends with .srv the wrapping
- procedure will be skipped and the autoserv file will be
- run directly.
-
- Return:
- The return code from the autoserv process.
- """
- # Make sure all the output directories are all setup
- results = os.path.join(resultsdir, queuename + '-' + control)
- if os.path.exists(results):
- print "Resultsdir %s already present, " % results,
- results = "%s.%d" % (results, int(time.time()))
- print "changing to %s" % results
- os.mkdir(results)
- debug = os.path.join(results, 'debug')
- os.mkdir(debug)
-
- # If this is an autoserv file then don't create the wrapper control
- is_autoserv_ctl = control.endswith('.srv')
- control_path = os.path.abspath(os.path.join(spooldir, control))
- # Otherwise create a tmp autoserv file just to launch the AT ctl file
- if not is_autoserv_ctl:
- if len(machines) > 1:
- # Run autotest file on *all* machines in *parallel*
- template = SERV_MULTI
- else:
- # Run autotest file on *one* machine
- template = SERV_SINGLE
- control_path = __create_autoserv_wrapper(template,
- control_path,
- results)
-
- # Now run the job
- autoserv_exe = os.path.join(scheduler_dir, '..', 'server', 'autoserv')
- autoserv_exe = os.path.abspath(autoserv_exe)
-
- autoserv_cmd = ' '.join([autoserv_exe, '-m', ','.join(machines),
- control_path])
-
- print "Starting job: %s" % control
- print autoserv_cmd
-
- open(os.path.join(debug, 'autoserv.cmd'), 'w', 0).write(autoserv_cmd + '\n')
- autoserv_stdout = open(os.path.join(debug, 'autoserv.stdout'), 'w', 0)
- autoserv_stderr = open(os.path.join(debug, 'autoserv.stderr'), 'w', 0)
- p = Popen(autoserv_cmd, shell=True, stdout=autoserv_stdout,
- stderr=autoserv_stderr, cwd=results)
- (pid, ret) = os.waitpid(p.pid, 0)
- autoserv_stdout.close()
- autoserv_stderr.close()
-
- # If this was a tempfile then clean it up
- if not is_autoserv_ctl:
- os.unlink(control_path)
- print "Completed job: %s (%d) " % (control, ret)
-
- return ret
-
+ """Runs a control file from the spooldir.
+ Args:
+ control: A path to a control file. It is assumed to be an
+ Autotest control file in which case it will automatically
+ be wrapped with autoserv control commands and run with
+ autoserv. If the file name ends with .srv the wrapping
+ procedure will be skipped and the autoserv file will be
+ run directly.
+
+ Return:
+ The return code from the autoserv process.
+ """
+ # Make sure all the output directories are all setup
+ results = os.path.join(resultsdir, queuename + '-' + control)
+ if os.path.exists(results):
+ print "Resultsdir %s already present, " % results,
+ results = "%s.%d" % (results, int(time.time()))
+ print "changing to %s" % results
+ os.mkdir(results)
+ debug = os.path.join(results, 'debug')
+ os.mkdir(debug)
+
+ # If this is an autoserv file then don't create the wrapper control
+ is_autoserv_ctl = control.endswith('.srv')
+ control_path = os.path.abspath(os.path.join(spooldir, control))
+ # Otherwise create a tmp autoserv file just to launch the AT ctl file
+ if not is_autoserv_ctl:
+ if len(machines) > 1:
+ # Run autotest file on *all* machines in *parallel*
+ template = SERV_MULTI
+ else:
+ # Run autotest file on *one* machine
+ template = SERV_SINGLE
+ control_path = __create_autoserv_wrapper(template,
+ control_path,
+ results)
+
+ # Now run the job
+ autoserv_exe = os.path.join(scheduler_dir, '..', 'server', 'autoserv')
+ autoserv_exe = os.path.abspath(autoserv_exe)
+
+ autoserv_cmd = ' '.join([autoserv_exe, '-m', ','.join(machines),
+ control_path])
+
+ print "Starting job: %s" % control
+ print autoserv_cmd
+
+ open(os.path.join(debug, 'autoserv.cmd'), 'w', 0).write(autoserv_cmd + '\n')
+ autoserv_stdout = open(os.path.join(debug, 'autoserv.stdout'), 'w', 0)
+ autoserv_stderr = open(os.path.join(debug, 'autoserv.stderr'), 'w', 0)
+ p = Popen(autoserv_cmd, shell=True, stdout=autoserv_stdout,
+ stderr=autoserv_stderr, cwd=results)
+ (pid, ret) = os.waitpid(p.pid, 0)
+ autoserv_stdout.close()
+ autoserv_stderr.close()
+
+ # If this was a tempfile then clean it up
+ if not is_autoserv_ctl:
+ os.unlink(control_path)
+ print "Completed job: %s (%d) " % (control, ret)
+
+ return ret
+
scheduler_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
os.chdir(spooldir)
print "monitoring spool directory: " + spooldir
while True:
- jobs = [j for j in os.listdir(spooldir) if not j.startswith('.')]
- next_job = pick_job(jobs)
- if not next_job:
- time.sleep(10)
- continue
- ret = run_job(next_job, os.path.basename(spooldir), scheduler_dir)
- os.remove(next_job)
+ jobs = [j for j in os.listdir(spooldir) if not j.startswith('.')]
+ next_job = pick_job(jobs)
+ if not next_job:
+ time.sleep(10)
+ continue
+ ret = run_job(next_job, os.path.basename(spooldir), scheduler_dir)
+ os.remove(next_job)
diff --git a/scheduler/start_all_queues b/scheduler/start_all_queues
index ba1b219b..d3bbf34e 100755
--- a/scheduler/start_all_queues
+++ b/scheduler/start_all_queues
@@ -6,8 +6,8 @@ dir = os.path.abspath(os.path.dirname(sys.argv[0]))
monitor_queue = os.path.join(dir, 'monitor_queue')
if len(sys.argv) < 2:
- print "Usage: start_all_queues <top_level_dir>"
- sys.exit(1)
+ print "Usage: start_all_queues <top_level_dir>"
+ sys.exit(1)
os.system("killall monitor_queue")
@@ -16,20 +16,20 @@ queue_dir = os.path.join(top_dir, 'queue')
results_dir = os.path.join(top_dir, 'results')
for machine in os.listdir(queue_dir):
- queue = os.path.join(queue_dir, machine)
- if not os.path.isdir(queue):
- continue
-
- if not os.path.exists(results_dir):
- print "No results directory: %s" % results_dir
- sys.exit(1)
-
- console_path = "/usr/local/conmux/etc/%s.cf" % machine
- if os.path.exists(console_path):
- console = machine
- else:
- console = ''
-
- cmd = ' '.join([monitor_queue, queue, results_dir, console])
- print cmd
- os.system("nohup %s > %s/.log 2>&1 &" % (cmd,queue))
+ queue = os.path.join(queue_dir, machine)
+ if not os.path.isdir(queue):
+ continue
+
+ if not os.path.exists(results_dir):
+ print "No results directory: %s" % results_dir
+ sys.exit(1)
+
+ console_path = "/usr/local/conmux/etc/%s.cf" % machine
+ if os.path.exists(console_path):
+ console = machine
+ else:
+ console = ''
+
+ cmd = ' '.join([monitor_queue, queue, results_dir, console])
+ print cmd
+ os.system("nohup %s > %s/.log 2>&1 &" % (cmd,queue))
diff --git a/server/autoserv b/server/autoserv
index bf7dfa17..bab1a585 100755
--- a/server/autoserv
+++ b/server/autoserv
@@ -18,155 +18,155 @@ from autotest_lib.server import autoserv_parser
class PidFileManager(object):
- pid_file = None
+ pid_file = None
- def open_pid_file(self, results_dir):
- pid_file_path = os.path.join(results_dir, '.autoserv_execute')
- assert not os.path.exists(pid_file_path)
- self.pid_file = open(pid_file_path, 'w')
- self.pid_file.write(str(os.getpid()) + '\n')
- self.pid_file.flush()
+ def open_pid_file(self, results_dir):
+ pid_file_path = os.path.join(results_dir, '.autoserv_execute')
+ assert not os.path.exists(pid_file_path)
+ self.pid_file = open(pid_file_path, 'w')
+ self.pid_file.write(str(os.getpid()) + '\n')
+ self.pid_file.flush()
- def close_pid_file(self, exit_code, signal_code=0):
- if not self.pid_file:
- return
- real_exit_code = (exit_code << 8) | (signal_code & 0xFF)
- self.pid_file.write(str(real_exit_code) + '\n')
- self.pid_file.close()
- self.pid_file = None
+ def close_pid_file(self, exit_code, signal_code=0):
+ if not self.pid_file:
+ return
+ real_exit_code = (exit_code << 8) | (signal_code & 0xFF)
+ self.pid_file.write(str(real_exit_code) + '\n')
+ self.pid_file.close()
+ self.pid_file = None
def run_autoserv(pid_file_manager, results, parser):
- # send stdin to /dev/null
- dev_null = os.open(os.devnull, os.O_RDONLY)
- os.dup2(dev_null, sys.stdin.fileno())
- os.close(dev_null)
-
- # Create separate process group
- os.setpgrp()
-
- # Implement SIGTERM handler
- def handle_sigint(signum, frame):
- pid_file_manager.close_pid_file(1, signal.SIGTERM)
- os.killpg(os.getpgrp(), signal.SIGKILL)
-
- # Set signal handler
- signal.signal(signal.SIGTERM, handle_sigint)
-
- # Get a useful value for running 'USER'
- realuser = os.environ.get('USER')
- if not realuser:
- realuser = 'anonymous'
-
- machines = parser.options.machines.split(',')
- machines_file = parser.options.machines_file
- label = parser.options.label
- user = parser.options.user
- client = parser.options.client
- reboot = parser.options.reboot
- install_before = parser.options.install_before
- install_after = parser.options.install_after
- verify = parser.options.verify
- repair = parser.options.repair
- no_tee = parser.options.no_tee
- parse_job = parser.options.parse_job
- ssh_user = parser.options.ssh_user
- ssh_port = parser.options.ssh_port
- ssh_pass = parser.options.ssh_pass
-
- if len(parser.args) < 1 and not verify and not repair:
- print parser.parser.print_help()
- sys.exit(1)
-
- # We have a control file unless it's just a verify/repair job
- if len(parser.args) > 0:
- control = parser.args[0]
- else:
- control = None
-
- if machines_file:
- machines = []
- for m in open(machines_file, 'r').readlines():
- # remove comments, spaces
- m = re.sub('#.*', '', m).strip()
- if m:
- machines.append(m)
- print "Read list of machines from file: %s" % machines_file
- print ','.join(machines)
-
- if machines:
- for machine in machines:
- if not machine or re.search('\s', machine):
- print "Invalid machine %s" % str(machine)
- sys.exit(1)
- machines = list(set(machines))
- machines.sort()
-
- job = server_job.server_job(control, parser.args[1:], results, label,
- user, machines, client, parse_job,
- ssh_user, ssh_port, ssh_pass)
- debug_dir = os.path.join(results, 'debug')
- stdout = os.path.join(debug_dir, 'autoserv.stdout')
- stderr = os.path.join(debug_dir, 'autoserv.stderr')
- if no_tee:
- job.stdout.redirect(stdout)
- job.stderr.redirect(stderr)
- else:
- job.stdout.tee_redirect(stdout)
- job.stderr.tee_redirect(stderr)
-
- # run the job
- exit_code = 0
- try:
- if repair:
- job.repair()
- elif verify:
- job.verify()
- else:
- try:
- job.run(reboot, install_before, install_after)
- finally:
- job.cleanup_parser()
- except:
- job.aborted = True
- traceback.print_exc()
-
- if getattr(job, 'aborted', False):
- sys.exit(1)
+ # send stdin to /dev/null
+ dev_null = os.open(os.devnull, os.O_RDONLY)
+ os.dup2(dev_null, sys.stdin.fileno())
+ os.close(dev_null)
+
+ # Create separate process group
+ os.setpgrp()
+
+ # Implement SIGTERM handler
+ def handle_sigint(signum, frame):
+ pid_file_manager.close_pid_file(1, signal.SIGTERM)
+ os.killpg(os.getpgrp(), signal.SIGKILL)
+
+ # Set signal handler
+ signal.signal(signal.SIGTERM, handle_sigint)
+
+ # Get a useful value for running 'USER'
+ realuser = os.environ.get('USER')
+ if not realuser:
+ realuser = 'anonymous'
+
+ machines = parser.options.machines.split(',')
+ machines_file = parser.options.machines_file
+ label = parser.options.label
+ user = parser.options.user
+ client = parser.options.client
+ reboot = parser.options.reboot
+ install_before = parser.options.install_before
+ install_after = parser.options.install_after
+ verify = parser.options.verify
+ repair = parser.options.repair
+ no_tee = parser.options.no_tee
+ parse_job = parser.options.parse_job
+ ssh_user = parser.options.ssh_user
+ ssh_port = parser.options.ssh_port
+ ssh_pass = parser.options.ssh_pass
+
+ if len(parser.args) < 1 and not verify and not repair:
+ print parser.parser.print_help()
+ sys.exit(1)
+
+ # We have a control file unless it's just a verify/repair job
+ if len(parser.args) > 0:
+ control = parser.args[0]
+ else:
+ control = None
+
+ if machines_file:
+ machines = []
+ for m in open(machines_file, 'r').readlines():
+ # remove comments, spaces
+ m = re.sub('#.*', '', m).strip()
+ if m:
+ machines.append(m)
+ print "Read list of machines from file: %s" % machines_file
+ print ','.join(machines)
+
+ if machines:
+ for machine in machines:
+ if not machine or re.search('\s', machine):
+ print "Invalid machine %s" % str(machine)
+ sys.exit(1)
+ machines = list(set(machines))
+ machines.sort()
+
+ job = server_job.server_job(control, parser.args[1:], results, label,
+ user, machines, client, parse_job,
+ ssh_user, ssh_port, ssh_pass)
+ debug_dir = os.path.join(results, 'debug')
+ stdout = os.path.join(debug_dir, 'autoserv.stdout')
+ stderr = os.path.join(debug_dir, 'autoserv.stderr')
+ if no_tee:
+ job.stdout.redirect(stdout)
+ job.stderr.redirect(stderr)
+ else:
+ job.stdout.tee_redirect(stdout)
+ job.stderr.tee_redirect(stderr)
+
+ # run the job
+ exit_code = 0
+ try:
+ if repair:
+ job.repair()
+ elif verify:
+ job.verify()
+ else:
+ try:
+ job.run(reboot, install_before, install_after)
+ finally:
+ job.cleanup_parser()
+ except:
+ job.aborted = True
+ traceback.print_exc()
+
+ if getattr(job, 'aborted', False):
+ sys.exit(1)
def main():
- pid_file_manager = PidFileManager()
-
- # grab the parser
- parser = autoserv_parser.autoserv_parser
-
- if len(sys.argv) == 1:
- parser.parser.print_help()
- sys.exit(1)
-
- results = parser.options.results
- results = os.path.abspath(results)
- write_pidfile = parser.options.write_pidfile
- if write_pidfile:
- pid_file_manager.open_pid_file(results)
-
- exit_code = 0
- try:
- try:
- run_autoserv(pid_file_manager, results, parser)
- except SystemExit, e:
- exit_code = e.code
- except:
- traceback.print_exc()
- # If we don't know what happened, we'll classify it as
- # an 'abort' and return 1.
- exit_code = 1
- finally:
- pid_file_manager.close_pid_file(exit_code)
- sys.exit(exit_code)
+ pid_file_manager = PidFileManager()
+
+ # grab the parser
+ parser = autoserv_parser.autoserv_parser
+
+ if len(sys.argv) == 1:
+ parser.parser.print_help()
+ sys.exit(1)
+
+ results = parser.options.results
+ results = os.path.abspath(results)
+ write_pidfile = parser.options.write_pidfile
+ if write_pidfile:
+ pid_file_manager.open_pid_file(results)
+
+ exit_code = 0
+ try:
+ try:
+ run_autoserv(pid_file_manager, results, parser)
+ except SystemExit, e:
+ exit_code = e.code
+ except:
+ traceback.print_exc()
+ # If we don't know what happened, we'll classify it as
+ # an 'abort' and return 1.
+ exit_code = 1
+ finally:
+ pid_file_manager.close_pid_file(exit_code)
+ sys.exit(exit_code)
if __name__ == '__main__':
- main()
+ main()
diff --git a/server/autoserv_parser.py b/server/autoserv_parser.py
index 43e859f0..b32e8892 100644
--- a/server/autoserv_parser.py
+++ b/server/autoserv_parser.py
@@ -5,97 +5,97 @@ import os, sys, getopt, optparse
class base_autoserv_parser(object):
- """Custom command-line options parser for autoserv.
-
- We can't use the general getopt methods here, as there will be unknown
- extra arguments that we pass down into the control file instead.
- Thus we process the arguments by hand, for which we are duly repentant.
- Making a single function here just makes it harder to read. Suck it up.
- """
- def __init__(self):
- self.args = sys.argv[1:]
- self.parser = optparse.OptionParser()
- self.setup_options()
- self.parse_args()
-
-
- def setup_options(self):
- self.parser.add_option("-m", action="store", type="string",
- dest="machines",
- help="list of machines")
- self.parser.add_option("-M", action="store", type="string",
- dest="machines_file",
- help="list of machines from file")
- self.parser.add_option("-c", action="store_true",
- dest="client", default=False,
- help="control file is client side")
- self.parser.add_option("-r", action="store", type="string",
- dest="results", default='.',
- help="specify results directory")
- self.parser.add_option("-l", action="store", type="string",
- dest="label", default='',
- help="label for the job")
- self.parser.add_option("-u", action="store", type="string",
- dest="user",
- default=os.environ.get('USER'),
- help="username for the job")
- self.parser.add_option("-P", action="store", type="string",
- dest="parse_job",
- default='',
- help="parse the results of the job")
- self.parser.add_option("-i", action="store_true",
- dest="install_before", default=False,
- help="reinstall machines before running the job")
- self.parser.add_option("-I", action="store_true",
- dest="install_after", default=False,
- help="reinstall machines after running the job")
- self.parser.add_option("-b", action="store_true",
- dest="reboot", default=False,
- help="reboot all machines after job")
- self.parser.add_option("-v", action="store_true",
- dest="verify", default=False,
- help="verify the machines only")
- self.parser.add_option("-R", action="store_true",
- dest="repair", default=False,
- help="repair the machines")
- self.parser.add_option("-n", action="store_true",
- dest="no_tee", default=False,
- help="no teeing the status to stdout/err")
- self.parser.add_option("-p", action="store_true",
- dest="write_pidfile", default=False,
- help="write pidfile (.autoserv_execute)")
- self.parser.add_option("--ssh-user", action="store",
- type="string", dest="ssh_user",
- default="root",
- help=("specify the user for ssh"
- "connections"))
- self.parser.add_option("--ssh-port", action="store",
- type="int", dest="ssh_port",
- default=22,
- help=("specify the port to use for "
- "ssh connections"))
- self.parser.add_option("--ssh-pass", action="store",
- type="string", dest="ssh_pass",
- default="",
- help=("specify the password to use "
- "for ssh connections"))
-
-
- def parse_args(self):
- (self.options, self.args) = self.parser.parse_args()
+ """Custom command-line options parser for autoserv.
+
+ We can't use the general getopt methods here, as there will be unknown
+ extra arguments that we pass down into the control file instead.
+ Thus we process the arguments by hand, for which we are duly repentant.
+ Making a single function here just makes it harder to read. Suck it up.
+ """
+ def __init__(self):
+ self.args = sys.argv[1:]
+ self.parser = optparse.OptionParser()
+ self.setup_options()
+ self.parse_args()
+
+
+ def setup_options(self):
+ self.parser.add_option("-m", action="store", type="string",
+ dest="machines",
+ help="list of machines")
+ self.parser.add_option("-M", action="store", type="string",
+ dest="machines_file",
+ help="list of machines from file")
+ self.parser.add_option("-c", action="store_true",
+ dest="client", default=False,
+ help="control file is client side")
+ self.parser.add_option("-r", action="store", type="string",
+ dest="results", default='.',
+ help="specify results directory")
+ self.parser.add_option("-l", action="store", type="string",
+ dest="label", default='',
+ help="label for the job")
+ self.parser.add_option("-u", action="store", type="string",
+ dest="user",
+ default=os.environ.get('USER'),
+ help="username for the job")
+ self.parser.add_option("-P", action="store", type="string",
+ dest="parse_job",
+ default='',
+ help="parse the results of the job")
+ self.parser.add_option("-i", action="store_true",
+ dest="install_before", default=False,
+ help="reinstall machines before running the job")
+ self.parser.add_option("-I", action="store_true",
+ dest="install_after", default=False,
+ help="reinstall machines after running the job")
+ self.parser.add_option("-b", action="store_true",
+ dest="reboot", default=False,
+ help="reboot all machines after job")
+ self.parser.add_option("-v", action="store_true",
+ dest="verify", default=False,
+ help="verify the machines only")
+ self.parser.add_option("-R", action="store_true",
+ dest="repair", default=False,
+ help="repair the machines")
+ self.parser.add_option("-n", action="store_true",
+ dest="no_tee", default=False,
+ help="no teeing the status to stdout/err")
+ self.parser.add_option("-p", action="store_true",
+ dest="write_pidfile", default=False,
+ help="write pidfile (.autoserv_execute)")
+ self.parser.add_option("--ssh-user", action="store",
+ type="string", dest="ssh_user",
+ default="root",
+ help=("specify the user for ssh"
+ "connections"))
+ self.parser.add_option("--ssh-port", action="store",
+ type="int", dest="ssh_port",
+ default=22,
+ help=("specify the port to use for "
+ "ssh connections"))
+ self.parser.add_option("--ssh-pass", action="store",
+ type="string", dest="ssh_pass",
+ default="",
+ help=("specify the password to use "
+ "for ssh connections"))
+
+
+ def parse_args(self):
+ (self.options, self.args) = self.parser.parse_args()
try:
- from autotest_lib.server.site_autoserv_parser \
- import site_autoserv_parser
+ from autotest_lib.server.site_autoserv_parser \
+ import site_autoserv_parser
except ImportError:
- class site_autoserv_parser(base_autoserv_parser):
- pass
+ class site_autoserv_parser(base_autoserv_parser):
+ pass
class autoserv_parser(site_autoserv_parser):
- pass
+ pass
# create the one and only one instance of autoserv_parser
diff --git a/server/autotest.py b/server/autotest.py
index 57a80a11..21e519b1 100644
--- a/server/autotest.py
+++ b/server/autotest.py
@@ -5,7 +5,7 @@
"""
This module defines the Autotest class
- Autotest: software to run tests automatically
+ Autotest: software to run tests automatically
"""
__author__ = """
@@ -31,462 +31,462 @@ CRASH_RECOVERY_TIME = 9000
class BaseAutotest(installable_object.InstallableObject):
- """
- This class represents the Autotest program.
-
- Autotest is used to run tests automatically and collect the results.
- It also supports profilers.
-
- Implementation details:
- This is a leaf class in an abstract class hierarchy, it must
- implement the unimplemented methods in parent classes.
- """
- job = None
-
-
- def __init__(self, host = None):
- self.host = host
- self.got = False
- self.installed = False
- self.serverdir = utils.get_server_dir()
- super(BaseAutotest, self).__init__()
-
-
- @logging.record
- def install(self, host = None):
- """
- Install autotest. If get() was not called previously, an
- attempt will be made to install from the autotest svn
- repository.
-
- Args:
- host: a Host instance on which autotest will be
- installed
-
- Raises:
- AutoservError: if a tarball was not specified and
- the target host does not have svn installed in its path
-
- TODO(poirier): check dependencies
- autotest needs:
- bzcat
- liboptdev (oprofile)
- binutils-dev (oprofile)
- make
- psutils (netperf)
- """
- if not host:
- host = self.host
- if not self.got:
- self.get()
- host.wait_up(timeout=30)
- host.setup()
- print "Installing autotest on %s" % host.hostname
-
- # Let's try to figure out where autotest is installed. If we can't,
- # (autotest not installed) just assume '/usr/local/autotest' and
- # proceed.
- try:
- autodir = _get_autodir(host)
- except error.AutotestRunError:
- autodir = '/usr/local/autotest'
-
- host.run('mkdir -p "%s"' % utils.sh_escape(autodir))
-
- if getattr(host, 'site_install_autotest', None):
- if host.site_install_autotest():
- self.installed = True
- return
-
- # try to install from file or directory
- if self.source_material:
- if os.path.isdir(self.source_material):
- # Copy autotest recursively
- host.send_file(self.source_material, autodir)
- else:
- # Copy autotest via tarball
- e_msg = 'Installation method not yet implemented!'
- raise NotImplementedError(e_msg)
- print "Installation of autotest completed"
- self.installed = True
- return
-
- # if that fails try to install using svn
- if utils.run('which svn').exit_status:
- raise error.AutoservError('svn not found in path on \
- target machine: %s' % host.name)
- try:
- host.run('svn checkout %s %s' %
- (AUTOTEST_SVN, autodir))
- except error.AutoservRunError, e:
- host.run('svn checkout %s %s' %
- (AUTOTEST_HTTP, autodir))
- print "Installation of autotest completed"
- self.installed = True
-
-
- def get(self, location = None):
- if not location:
- location = os.path.join(self.serverdir, '../client')
- location = os.path.abspath(location)
- # If there's stuff run on our client directory already, it
- # can cause problems. Try giving it a quick clean first.
- cwd = os.getcwd()
- os.chdir(location)
- os.system('tools/make_clean')
- os.chdir(cwd)
- super(BaseAutotest, self).get(location)
- self.got = True
-
-
- def run(self, control_file, results_dir = '.', host = None,
- timeout=None, tag=None, parallel_flag=False):
- """
- Run an autotest job on the remote machine.
-
- Args:
- control_file: an open file-like-obj of the control file
- results_dir: a str path where the results should be stored
- on the local filesystem
- host: a Host instance on which the control file should
- be run
- tag: tag name for the client side instance of autotest
- parallel_flag: flag set when multiple jobs are run at the
- same time
- Raises:
- AutotestRunError: if there is a problem executing
- the control file
- """
- host = self._get_host_and_setup(host)
- results_dir = os.path.abspath(results_dir)
-
- if tag:
- results_dir = os.path.join(results_dir, tag)
-
- atrun = _Run(host, results_dir, tag, parallel_flag)
- self._do_run(control_file, results_dir, host, atrun, timeout)
-
-
- def _get_host_and_setup(self, host):
- if not host:
- host = self.host
- if not self.installed:
- self.install(host)
-
- host.wait_up(timeout=30)
- return host
-
-
- def prepare_for_copying_logs(self, src, dest, host):
- keyval_path = ''
- if not os.path.exists(os.path.join(dest, 'keyval')):
- # Client-side keyval file can be copied directly
- return keyval_path
- # Copy client-side keyval to temporary location
- try:
- try:
- # Create temp file
- fd, keyval_path = tempfile.mkstemp(
- '.keyval_%s' % host.hostname)
- host.get_file(os.path.join(src, 'keyval'),
- keyval_path)
- finally:
- # We will squirrel away the client side keyval
- # away and move it back when we are done
- self.temp_keyval_path = tempfile.mktemp()
- host.run('mv %s %s' %
- (os.path.join(src, 'keyval'),
- self.temp_keyval_path))
- except (error.AutoservRunError, error.AutoservSSHTimeout):
- print "Prepare for copying logs failed"
- return keyval_path
-
-
- def process_copied_logs(self, dest, host, keyval_path):
- if not os.path.exists(os.path.join(dest, 'keyval')):
- # Client-side keyval file was copied directly
- return
- # Append contents of keyval_<host> file to keyval file
- try:
- # Read in new and old keyval files
- new_keyval = utils.read_keyval(keyval_path)
- old_keyval = utils.read_keyval(dest)
- # 'Delete' from new keyval entries that are in both
- tmp_keyval = {}
- for key, val in new_keyval.iteritems():
- if key not in old_keyval:
- tmp_keyval[key] = val
- # Append new info to keyval file
- utils.write_keyval(dest, tmp_keyval)
- # Delete keyval_<host> file
- os.remove(keyval_path)
- except IOError:
- print "Process copied logs failed"
-
-
- def postprocess_copied_logs(self, src, host):
- # we can now put our keyval file back
- try:
- host.run('mv %s %s' % (self.temp_keyval_path,
- os.path.join(src, 'keyval')))
- except:
- pass
-
-
- def _do_run(self, control_file, results_dir, host, atrun, timeout):
- try:
- atrun.verify_machine()
- except:
- print "Verify machine failed on %s. Reinstalling" % \
- host.hostname
- self.install(host)
- atrun.verify_machine()
- debug = os.path.join(results_dir, 'debug')
- try:
- os.makedirs(debug)
- except:
- pass
-
- # Ready .... Aim ....
- for control in [atrun.remote_control_file,
- atrun.remote_control_file + '.state',
- atrun.manual_control_file,
- atrun.manual_control_file + '.state']:
- host.run('rm -f ' + control)
-
- # Copy control_file to remote_control_file on the host
- tmppath = utils.get(control_file)
- host.send_file(tmppath, atrun.remote_control_file)
- if os.path.abspath(tmppath) != os.path.abspath(control_file):
- os.remove(tmppath)
-
- try:
- atrun.execute_control(timeout=timeout)
- finally:
- # make an effort to wait for the machine to come up
- try:
- host.wait_up(timeout=30)
- except error.AutoservError:
- # don't worry about any errors, we'll try and
- # get the results anyway
- pass
-
- # get the results
- if not atrun.tag:
- results = os.path.join(atrun.autodir,
- 'results', 'default')
- else:
- results = os.path.join(atrun.autodir,
- 'results', atrun.tag)
-
- # Copy all dirs in default to results_dir
- keyval_path = self.prepare_for_copying_logs(results,
- results_dir, host)
- host.get_file(results + '/', results_dir)
- self.process_copied_logs(results_dir, host, keyval_path)
- self.postprocess_copied_logs(results, host)
-
-
- def run_timed_test(self, test_name, results_dir='.', host=None,
- timeout=None, tag=None, *args, **dargs):
- """
- Assemble a tiny little control file to just run one test,
- and run it as an autotest client-side test
- """
- if not host:
- host = self.host
- if not self.installed:
- self.install(host)
- opts = ["%s=%s" % (o[0], repr(o[1])) for o in dargs.items()]
- cmd = ", ".join([repr(test_name)] + map(repr, args) + opts)
- control = "job.run_test(%s)\n" % cmd
- self.run(control, results_dir, host, timeout=timeout, tag=tag)
-
-
- def run_test(self, test_name, results_dir='.', host=None,
- tag=None, *args, **dargs):
- self.run_timed_test(test_name, results_dir, host, timeout=None,
- tag=tag, *args, **dargs)
+ """
+ This class represents the Autotest program.
+
+ Autotest is used to run tests automatically and collect the results.
+ It also supports profilers.
+
+ Implementation details:
+ This is a leaf class in an abstract class hierarchy, it must
+ implement the unimplemented methods in parent classes.
+ """
+ job = None
+
+
+ def __init__(self, host = None):
+ self.host = host
+ self.got = False
+ self.installed = False
+ self.serverdir = utils.get_server_dir()
+ super(BaseAutotest, self).__init__()
+
+
+ @logging.record
+ def install(self, host = None):
+ """
+ Install autotest. If get() was not called previously, an
+ attempt will be made to install from the autotest svn
+ repository.
+
+ Args:
+ host: a Host instance on which autotest will be
+ installed
+
+ Raises:
+ AutoservError: if a tarball was not specified and
+ the target host does not have svn installed in its path
+
+ TODO(poirier): check dependencies
+ autotest needs:
+ bzcat
+ liboptdev (oprofile)
+ binutils-dev (oprofile)
+ make
+ psutils (netperf)
+ """
+ if not host:
+ host = self.host
+ if not self.got:
+ self.get()
+ host.wait_up(timeout=30)
+ host.setup()
+ print "Installing autotest on %s" % host.hostname
+
+ # Let's try to figure out where autotest is installed. If we can't,
+ # (autotest not installed) just assume '/usr/local/autotest' and
+ # proceed.
+ try:
+ autodir = _get_autodir(host)
+ except error.AutotestRunError:
+ autodir = '/usr/local/autotest'
+
+ host.run('mkdir -p "%s"' % utils.sh_escape(autodir))
+
+ if getattr(host, 'site_install_autotest', None):
+ if host.site_install_autotest():
+ self.installed = True
+ return
+
+ # try to install from file or directory
+ if self.source_material:
+ if os.path.isdir(self.source_material):
+ # Copy autotest recursively
+ host.send_file(self.source_material, autodir)
+ else:
+ # Copy autotest via tarball
+ e_msg = 'Installation method not yet implemented!'
+ raise NotImplementedError(e_msg)
+ print "Installation of autotest completed"
+ self.installed = True
+ return
+
+ # if that fails try to install using svn
+ if utils.run('which svn').exit_status:
+ raise error.AutoservError('svn not found in path on \
+ target machine: %s' % host.name)
+ try:
+ host.run('svn checkout %s %s' %
+ (AUTOTEST_SVN, autodir))
+ except error.AutoservRunError, e:
+ host.run('svn checkout %s %s' %
+ (AUTOTEST_HTTP, autodir))
+ print "Installation of autotest completed"
+ self.installed = True
+
+
+ def get(self, location = None):
+ if not location:
+ location = os.path.join(self.serverdir, '../client')
+ location = os.path.abspath(location)
+ # If there's stuff run on our client directory already, it
+ # can cause problems. Try giving it a quick clean first.
+ cwd = os.getcwd()
+ os.chdir(location)
+ os.system('tools/make_clean')
+ os.chdir(cwd)
+ super(BaseAutotest, self).get(location)
+ self.got = True
+
+
+ def run(self, control_file, results_dir = '.', host = None,
+ timeout=None, tag=None, parallel_flag=False):
+ """
+ Run an autotest job on the remote machine.
+
+ Args:
+ control_file: an open file-like-obj of the control file
+ results_dir: a str path where the results should be stored
+ on the local filesystem
+ host: a Host instance on which the control file should
+ be run
+ tag: tag name for the client side instance of autotest
+ parallel_flag: flag set when multiple jobs are run at the
+ same time
+ Raises:
+ AutotestRunError: if there is a problem executing
+ the control file
+ """
+ host = self._get_host_and_setup(host)
+ results_dir = os.path.abspath(results_dir)
+
+ if tag:
+ results_dir = os.path.join(results_dir, tag)
+
+ atrun = _Run(host, results_dir, tag, parallel_flag)
+ self._do_run(control_file, results_dir, host, atrun, timeout)
+
+
+ def _get_host_and_setup(self, host):
+ if not host:
+ host = self.host
+ if not self.installed:
+ self.install(host)
+
+ host.wait_up(timeout=30)
+ return host
+
+
+ def prepare_for_copying_logs(self, src, dest, host):
+ keyval_path = ''
+ if not os.path.exists(os.path.join(dest, 'keyval')):
+ # Client-side keyval file can be copied directly
+ return keyval_path
+ # Copy client-side keyval to temporary location
+ try:
+ try:
+ # Create temp file
+ fd, keyval_path = tempfile.mkstemp(
+ '.keyval_%s' % host.hostname)
+ host.get_file(os.path.join(src, 'keyval'),
+ keyval_path)
+ finally:
+ # We will squirrel away the client side keyval
+ # away and move it back when we are done
+ self.temp_keyval_path = tempfile.mktemp()
+ host.run('mv %s %s' %
+ (os.path.join(src, 'keyval'),
+ self.temp_keyval_path))
+ except (error.AutoservRunError, error.AutoservSSHTimeout):
+ print "Prepare for copying logs failed"
+ return keyval_path
+
+
+ def process_copied_logs(self, dest, host, keyval_path):
+ if not os.path.exists(os.path.join(dest, 'keyval')):
+ # Client-side keyval file was copied directly
+ return
+ # Append contents of keyval_<host> file to keyval file
+ try:
+ # Read in new and old keyval files
+ new_keyval = utils.read_keyval(keyval_path)
+ old_keyval = utils.read_keyval(dest)
+ # 'Delete' from new keyval entries that are in both
+ tmp_keyval = {}
+ for key, val in new_keyval.iteritems():
+ if key not in old_keyval:
+ tmp_keyval[key] = val
+ # Append new info to keyval file
+ utils.write_keyval(dest, tmp_keyval)
+ # Delete keyval_<host> file
+ os.remove(keyval_path)
+ except IOError:
+ print "Process copied logs failed"
+
+
+ def postprocess_copied_logs(self, src, host):
+ # we can now put our keyval file back
+ try:
+ host.run('mv %s %s' % (self.temp_keyval_path,
+ os.path.join(src, 'keyval')))
+ except:
+ pass
+
+
+ def _do_run(self, control_file, results_dir, host, atrun, timeout):
+ try:
+ atrun.verify_machine()
+ except:
+ print "Verify machine failed on %s. Reinstalling" % \
+ host.hostname
+ self.install(host)
+ atrun.verify_machine()
+ debug = os.path.join(results_dir, 'debug')
+ try:
+ os.makedirs(debug)
+ except:
+ pass
+
+ # Ready .... Aim ....
+ for control in [atrun.remote_control_file,
+ atrun.remote_control_file + '.state',
+ atrun.manual_control_file,
+ atrun.manual_control_file + '.state']:
+ host.run('rm -f ' + control)
+
+ # Copy control_file to remote_control_file on the host
+ tmppath = utils.get(control_file)
+ host.send_file(tmppath, atrun.remote_control_file)
+ if os.path.abspath(tmppath) != os.path.abspath(control_file):
+ os.remove(tmppath)
+
+ try:
+ atrun.execute_control(timeout=timeout)
+ finally:
+ # make an effort to wait for the machine to come up
+ try:
+ host.wait_up(timeout=30)
+ except error.AutoservError:
+ # don't worry about any errors, we'll try and
+ # get the results anyway
+ pass
+
+ # get the results
+ if not atrun.tag:
+ results = os.path.join(atrun.autodir,
+ 'results', 'default')
+ else:
+ results = os.path.join(atrun.autodir,
+ 'results', atrun.tag)
+
+ # Copy all dirs in default to results_dir
+ keyval_path = self.prepare_for_copying_logs(results,
+ results_dir, host)
+ host.get_file(results + '/', results_dir)
+ self.process_copied_logs(results_dir, host, keyval_path)
+ self.postprocess_copied_logs(results, host)
+
+
+ def run_timed_test(self, test_name, results_dir='.', host=None,
+ timeout=None, tag=None, *args, **dargs):
+ """
+ Assemble a tiny little control file to just run one test,
+ and run it as an autotest client-side test
+ """
+ if not host:
+ host = self.host
+ if not self.installed:
+ self.install(host)
+ opts = ["%s=%s" % (o[0], repr(o[1])) for o in dargs.items()]
+ cmd = ", ".join([repr(test_name)] + map(repr, args) + opts)
+ control = "job.run_test(%s)\n" % cmd
+ self.run(control, results_dir, host, timeout=timeout, tag=tag)
+
+
+ def run_test(self, test_name, results_dir='.', host=None,
+ tag=None, *args, **dargs):
+ self.run_timed_test(test_name, results_dir, host, timeout=None,
+ tag=tag, *args, **dargs)
class _Run(object):
- """
- Represents a run of autotest control file. This class maintains
- all the state necessary as an autotest control file is executed.
-
- It is not intended to be used directly, rather control files
- should be run using the run method in Autotest.
- """
- def __init__(self, host, results_dir, tag, parallel_flag):
- self.host = host
- self.results_dir = results_dir
- self.env = host.env
- self.tag = tag
- self.parallel_flag = parallel_flag
- self.autodir = _get_autodir(self.host)
- if tag:
- self.manual_control_file = os.path.join(self.autodir,
- 'control.%s' % tag)
- self.remote_control_file = os.path.join(self.autodir,
- 'control.%s.autoserv' % tag)
- else:
- self.manual_control_file = os.path.join(self.autodir,
- 'control')
- self.remote_control_file = os.path.join(self.autodir,
- 'control.autoserv')
-
-
- def verify_machine(self):
- binary = os.path.join(self.autodir, 'bin/autotest')
- try:
- self.host.run('ls %s > /dev/null 2>&1' % binary)
- except:
- raise "Autotest does not appear to be installed"
-
- if not self.parallel_flag:
- tmpdir = os.path.join(self.autodir, 'tmp')
- download = os.path.join(self.autodir, 'tests/download')
- self.host.run('umount %s' % tmpdir, ignore_status=True)
- self.host.run('umount %s' % download, ignore_status=True)
-
- def get_full_cmd(self, section):
- # build up the full command we want to run over the host
- cmd = [os.path.join(self.autodir, 'bin/autotest_client')]
- if section > 0:
- cmd.append('-c')
- if self.tag:
- cmd.append('-t %s' % self.tag)
- if self.host.job.use_external_logging():
- cmd.append('-l')
- cmd.append(self.remote_control_file)
- return ' '.join(cmd)
-
-
- def get_client_log(self, section):
- # open up the files we need for our logging
- client_log_file = os.path.join(self.results_dir, 'debug',
- 'client.log.%d' % section)
- return open(client_log_file, 'w', 0)
-
-
- def execute_section(self, section, timeout):
- print "Executing %s/bin/autotest %s/control phase %d" % \
- (self.autodir, self.autodir,
- section)
-
- full_cmd = self.get_full_cmd(section)
- client_log = self.get_client_log(section)
- redirector = server_job.client_logger(self.host.job)
-
- try:
- old_resultdir = self.host.job.resultdir
- self.host.job.resultdir = self.results_dir
- result = self.host.run(full_cmd, ignore_status=True,
- timeout=timeout,
- stdout_tee=client_log,
- stderr_tee=redirector)
- finally:
- redirector.close()
- self.host.job.resultdir = old_resultdir
-
- if result.exit_status == 1:
- self.host.job.aborted = True
- if not result.stderr:
- raise error.AutotestRunError(
- "execute_section: %s failed to return anything\n"
- "stdout:%s\n" % (full_cmd, result.stdout))
-
- return redirector.last_line
-
-
- def execute_control(self, timeout=None):
- section = 0
- time_left = None
- if timeout:
- end_time = time.time() + timeout
- time_left = end_time - time.time()
- while not timeout or time_left > 0:
- last = self.execute_section(section, time_left)
- if timeout:
- time_left = end_time - time.time()
- if time_left <= 0:
- break
- section += 1
- if re.match(r'^END .*\t----\t----\t.*$', last):
- print "Client complete"
- return
- elif re.match('^\t*GOOD\t----\treboot\.start.*$', last):
- print "Client is rebooting"
- print "Waiting for client to halt"
- if not self.host.wait_down(HALT_TIME):
- raise error.AutotestRunError("%s \
- failed to shutdown after %ds" %
- (self.host.hostname,
- HALT_TIME))
- print "Client down, waiting for restart"
- if not self.host.wait_up(BOOT_TIME):
- # since reboot failed
- # hardreset the machine once if possible
- # before failing this control file
- print "Hardresetting %s" % (
- self.host.hostname,)
- try:
- self.host.hardreset(wait=False)
- except error.AutoservUnsupportedError:
- print "Hardreset unsupported on %s" % (
- self.host.hostname,)
- raise error.AutotestRunError("%s failed"
- " to boot after %ds" % (
- self.host.hostname,
- BOOT_TIME,))
- self.host.reboot_followup()
- continue
- self.host.job.record("ABORT", None, None,
- "Autotest client terminated " +
- "unexpectedly")
- # give the client machine a chance to recover from
- # possible crash
- self.host.wait_up(CRASH_RECOVERY_TIME)
- raise error.AutotestRunError("Aborting - unexpected "
- "final status message "
- "from client: %s\n"
- % last)
-
- # should only get here if we timed out
- assert timeout
- raise error.AutotestTimeoutError()
+ """
+ Represents a run of autotest control file. This class maintains
+ all the state necessary as an autotest control file is executed.
+
+ It is not intended to be used directly, rather control files
+ should be run using the run method in Autotest.
+ """
+ def __init__(self, host, results_dir, tag, parallel_flag):
+ self.host = host
+ self.results_dir = results_dir
+ self.env = host.env
+ self.tag = tag
+ self.parallel_flag = parallel_flag
+ self.autodir = _get_autodir(self.host)
+ if tag:
+ self.manual_control_file = os.path.join(self.autodir,
+ 'control.%s' % tag)
+ self.remote_control_file = os.path.join(self.autodir,
+ 'control.%s.autoserv' % tag)
+ else:
+ self.manual_control_file = os.path.join(self.autodir,
+ 'control')
+ self.remote_control_file = os.path.join(self.autodir,
+ 'control.autoserv')
+
+
+ def verify_machine(self):
+ binary = os.path.join(self.autodir, 'bin/autotest')
+ try:
+ self.host.run('ls %s > /dev/null 2>&1' % binary)
+ except:
+ raise "Autotest does not appear to be installed"
+
+ if not self.parallel_flag:
+ tmpdir = os.path.join(self.autodir, 'tmp')
+ download = os.path.join(self.autodir, 'tests/download')
+ self.host.run('umount %s' % tmpdir, ignore_status=True)
+ self.host.run('umount %s' % download, ignore_status=True)
+
+ def get_full_cmd(self, section):
+ # build up the full command we want to run over the host
+ cmd = [os.path.join(self.autodir, 'bin/autotest_client')]
+ if section > 0:
+ cmd.append('-c')
+ if self.tag:
+ cmd.append('-t %s' % self.tag)
+ if self.host.job.use_external_logging():
+ cmd.append('-l')
+ cmd.append(self.remote_control_file)
+ return ' '.join(cmd)
+
+
+ def get_client_log(self, section):
+ # open up the files we need for our logging
+ client_log_file = os.path.join(self.results_dir, 'debug',
+ 'client.log.%d' % section)
+ return open(client_log_file, 'w', 0)
+
+
+ def execute_section(self, section, timeout):
+ print "Executing %s/bin/autotest %s/control phase %d" % \
+ (self.autodir, self.autodir,
+ section)
+
+ full_cmd = self.get_full_cmd(section)
+ client_log = self.get_client_log(section)
+ redirector = server_job.client_logger(self.host.job)
+
+ try:
+ old_resultdir = self.host.job.resultdir
+ self.host.job.resultdir = self.results_dir
+ result = self.host.run(full_cmd, ignore_status=True,
+ timeout=timeout,
+ stdout_tee=client_log,
+ stderr_tee=redirector)
+ finally:
+ redirector.close()
+ self.host.job.resultdir = old_resultdir
+
+ if result.exit_status == 1:
+ self.host.job.aborted = True
+ if not result.stderr:
+ raise error.AutotestRunError(
+ "execute_section: %s failed to return anything\n"
+ "stdout:%s\n" % (full_cmd, result.stdout))
+
+ return redirector.last_line
+
+
+ def execute_control(self, timeout=None):
+ section = 0
+ time_left = None
+ if timeout:
+ end_time = time.time() + timeout
+ time_left = end_time - time.time()
+ while not timeout or time_left > 0:
+ last = self.execute_section(section, time_left)
+ if timeout:
+ time_left = end_time - time.time()
+ if time_left <= 0:
+ break
+ section += 1
+ if re.match(r'^END .*\t----\t----\t.*$', last):
+ print "Client complete"
+ return
+ elif re.match('^\t*GOOD\t----\treboot\.start.*$', last):
+ print "Client is rebooting"
+ print "Waiting for client to halt"
+ if not self.host.wait_down(HALT_TIME):
+ raise error.AutotestRunError("%s \
+ failed to shutdown after %ds" %
+ (self.host.hostname,
+ HALT_TIME))
+ print "Client down, waiting for restart"
+ if not self.host.wait_up(BOOT_TIME):
+ # since reboot failed
+ # hardreset the machine once if possible
+ # before failing this control file
+ print "Hardresetting %s" % (
+ self.host.hostname,)
+ try:
+ self.host.hardreset(wait=False)
+ except error.AutoservUnsupportedError:
+ print "Hardreset unsupported on %s" % (
+ self.host.hostname,)
+ raise error.AutotestRunError("%s failed"
+ " to boot after %ds" % (
+ self.host.hostname,
+ BOOT_TIME,))
+ self.host.reboot_followup()
+ continue
+ self.host.job.record("ABORT", None, None,
+ "Autotest client terminated " +
+ "unexpectedly")
+ # give the client machine a chance to recover from
+ # possible crash
+ self.host.wait_up(CRASH_RECOVERY_TIME)
+ raise error.AutotestRunError("Aborting - unexpected "
+ "final status message "
+ "from client: %s\n"
+ % last)
+
+ # should only get here if we timed out
+ assert timeout
+ raise error.AutotestTimeoutError()
def _get_autodir(host):
- dir = host.get_autodir()
- if dir:
- return dir
- try:
- # There's no clean way to do this. readlink may not exist
- cmd = "python -c 'import os,sys; print os.readlink(sys.argv[1])' /etc/autotest.conf 2> /dev/null"
- dir = os.path.dirname(host.run(cmd).stdout)
- if dir:
- return dir
- except error.AutoservRunError:
- pass
- for path in ['/usr/local/autotest', '/home/autotest']:
- try:
- host.run('ls %s > /dev/null 2>&1' % \
- os.path.join(path, 'bin/autotest'))
- return path
- except error.AutoservRunError:
- pass
- raise error.AutotestRunError("Cannot figure out autotest directory")
+ dir = host.get_autodir()
+ if dir:
+ return dir
+ try:
+ # There's no clean way to do this. readlink may not exist
+ cmd = "python -c 'import os,sys; print os.readlink(sys.argv[1])' /etc/autotest.conf 2> /dev/null"
+ dir = os.path.dirname(host.run(cmd).stdout)
+ if dir:
+ return dir
+ except error.AutoservRunError:
+ pass
+ for path in ['/usr/local/autotest', '/home/autotest']:
+ try:
+ host.run('ls %s > /dev/null 2>&1' % \
+ os.path.join(path, 'bin/autotest'))
+ return path
+ except error.AutoservRunError:
+ pass
+ raise error.AutotestRunError("Cannot figure out autotest directory")
# site_autotest.py may be non-existant or empty, make sure that an appropriate
# SiteAutotest class is created nevertheless
try:
- from site_autotest import SiteAutotest
+ from site_autotest import SiteAutotest
except ImportError:
- class SiteAutotest(BaseAutotest):
- pass
+ class SiteAutotest(BaseAutotest):
+ pass
class Autotest(SiteAutotest):
- pass
+ pass
diff --git a/server/autotest_unittest.py b/server/autotest_unittest.py
index f14ea21f..3df353cf 100644
--- a/server/autotest_unittest.py
+++ b/server/autotest_unittest.py
@@ -11,287 +11,287 @@ from autotest_lib.client.common_lib.test_utils import mock
class TestBaseAutotest(unittest.TestCase):
- def setUp(self):
- # create god
- self.god = mock.mock_god()
+ def setUp(self):
+ # create god
+ self.god = mock.mock_god()
- # stub out utils
- self.utils_obj = self.god.create_mock_class(utils, "utils")
- self.old_utils = autotest.utils
- autotest.utils = self.utils_obj
+ # stub out utils
+ self.utils_obj = self.god.create_mock_class(utils, "utils")
+ self.old_utils = autotest.utils
+ autotest.utils = self.utils_obj
- # stub out os
- self.old_os = autotest.os
- self.os_obj = self.god.create_mock_class(os, "os")
- autotest.os = self.os_obj
+ # stub out os
+ self.old_os = autotest.os
+ self.os_obj = self.god.create_mock_class(os, "os")
+ autotest.os = self.os_obj
- # stub out os.path
- self.path_obj = self.god.create_mock_class(os.path, "os.path")
- autotest.os.path = self.path_obj
+ # stub out os.path
+ self.path_obj = self.god.create_mock_class(os.path, "os.path")
+ autotest.os.path = self.path_obj
- # need to set return of one function in utils called in constr.
- self.server_dir = "autotest_lib.server"
- func_call = self.utils_obj.get_server_dir.expect_call()
- func_call.and_return(self.server_dir)
+ # need to set return of one function in utils called in constr.
+ self.server_dir = "autotest_lib.server"
+ func_call = self.utils_obj.get_server_dir.expect_call()
+ func_call.and_return(self.server_dir)
- # create our host mock (and give it a hostname)
- self.host = self.god.create_mock_class(ssh_host.SSHHost,
- "SSHHost")
- self.host.hostname = "foo"
+ # create our host mock (and give it a hostname)
+ self.host = self.god.create_mock_class(ssh_host.SSHHost,
+ "SSHHost")
+ self.host.hostname = "foo"
- # create the autotest object
- self.base_autotest = autotest.BaseAutotest(self.host)
-
-
- def tearDown(self):
- # put things back
- autotest.utils = self.old_utils
- autotest.os = self.old_os
+ # create the autotest object
+ self.base_autotest = autotest.BaseAutotest(self.host)
+
+
+ def tearDown(self):
+ # put things back
+ autotest.utils = self.old_utils
+ autotest.os = self.old_os
- def test_constructor(self):
- # we should check the calls
- self.god.check_playback()
+ def test_constructor(self):
+ # we should check the calls
+ self.god.check_playback()
- def common_install_test_setup(self, autodir, is_site_install_autotest):
- # mock other methods
- old_get_autodir = autotest._get_autodir
- get_autodir_obj = self.god.create_mock_function("_get_autodir")
- autotest._get_autodir = get_autodir_obj
-
- self.base_autotest.got = True
- self.source_material = None
+ def common_install_test_setup(self, autodir, is_site_install_autotest):
+ # mock other methods
+ old_get_autodir = autotest._get_autodir
+ get_autodir_obj = self.god.create_mock_function("_get_autodir")
+ autotest._get_autodir = get_autodir_obj
+
+ self.base_autotest.got = True
+ self.source_material = None
- # record calls
- self.host.wait_up.expect_call(timeout=30)
- self.host.setup.expect_call()
- get_autodir_obj.expect_call(self.host).and_return(autodir)
- rt = self.utils_obj.sh_escape.expect_call(autodir)
- rt.and_return(autodir)
- self.host.run.expect_call('mkdir -p "%s"' % (autodir))
- rt = self.host.site_install_autotest.expect_call()
- rt.and_return(is_site_install_autotest)
-
- return old_get_autodir
+ # record calls
+ self.host.wait_up.expect_call(timeout=30)
+ self.host.setup.expect_call()
+ get_autodir_obj.expect_call(self.host).and_return(autodir)
+ rt = self.utils_obj.sh_escape.expect_call(autodir)
+ rt.and_return(autodir)
+ self.host.run.expect_call('mkdir -p "%s"' % (autodir))
+ rt = self.host.site_install_autotest.expect_call()
+ rt.and_return(is_site_install_autotest)
+
+ return old_get_autodir
- def common_install_test_teardown(self, old_get_autodir):
- # put things back
- autotest._get_autodir = old_get_autodir
-
-
- def test_install1(self):
- # setup
- autodir = "autodir"
- old_get_autodir = self.common_install_test_setup(autodir, True)
-
- # run test
- self.base_autotest.install()
-
- # check
- self.assertTrue(self.base_autotest.installed)
- self.god.check_playback()
-
- # put back
- self.common_install_test_teardown(old_get_autodir)
-
-
- def test_install2(self):
- # setup
- autodir = "autodir"
- old_get_autodir = self.common_install_test_setup(autodir, False)
- cmd = 'which svn'
- cmdresult = client_utils.CmdResult(cmd)
- self.utils_obj.run.expect_call(cmd).and_return(cmdresult)
- cmd = 'svn checkout %s %s' % (autotest.AUTOTEST_SVN, autodir)
- self.host.run.expect_call(cmd)
-
- # run test
- self.base_autotest.install()
-
- # check
- self.assertTrue(self.base_autotest.installed)
- self.god.check_playback()
-
- # put back
- self.common_install_test_teardown(old_get_autodir)
-
-
- def test_get(self):
- # setup
- location = "autotest_lib.client"
- cwd = "current_dir"
- self.os_obj.getcwd.expect_call().and_return(cwd)
- self.os_obj.chdir.expect_call(location)
- self.os_obj.system.expect_call('tools/make_clean')
- self.os_obj.chdir.expect_call(cwd)
-
- # call method under test
- self.base_autotest.get(location)
-
- # do tests
- self.assertTrue(self.base_autotest.got)
- self.god.check_playback()
-
-
- def test_get_default(self):
- # setup the test
- location = "autotest_lib.client"
- self.path_obj.join.expect_call(self.base_autotest.serverdir,
- '../client').and_return(location)
- self.path_obj.abspath.expect_call(location).and_return(location)
- cwd = "current_dir"
- self.os_obj.getcwd.expect_call().and_return(cwd)
- self.os_obj.chdir.expect_call(location)
- self.os_obj.system.expect_call('tools/make_clean')
- self.os_obj.chdir.expect_call(cwd)
-
- # call method under test
- self.base_autotest.get()
-
- # do tests
- self.assertTrue(self.base_autotest.got)
- self.god.check_playback()
-
-
- def test_run_default(self):
- # need to stub out _get_host_and_setup
- old_func = self.base_autotest._get_host_and_setup
- name = "_get_host_and_setup"
- new_func = self.god.create_mock_function(name)
- self.base_autotest._get_host_and_setup = new_func
-
- # need to stub out _do_run
- old_do_run = self.base_autotest._do_run
- do_run = self.god.create_mock_function("_do_run")
- self.base_autotest._do_run = do_run
-
- # need a mock of _Run object
- run = self.god.create_mock_class(autotest._Run, "run")
-
- # need a mock for _Run constuctor
- oldRun = autotest._Run
- newRun = self.god.create_mock_function("_Run")
- autotest._Run = newRun
-
- new_func.expect_call(None).and_return(self.host)
- results_dir = "results_dir"
- self.path_obj.abspath.expect_call(".").and_return(results_dir)
- newRun.expect_call(self.host,
- results_dir, None, False).and_return(run)
- do_run.expect_call("control", results_dir, self.host, run, None)
-
- # call method
- self.base_autotest.run("control")
-
- # do test
- self.god.check_playback()
-
- # put things back
- self.base_autotest._get_host_and_setup = old_func
- self.base_autotest._do_run = old_do_run
- autotest._Run = oldRun
-
-
- def test_prepare_for_copying_logs1(self):
- src = "src"
- dest = "dest"
- keyval_path = ''
- dkeyval = "dest/keyval"
-
- # setup
- self.path_obj.join.expect_call(dest,
- 'keyval').and_return(dkeyval)
- self.path_obj.exists.expect_call(dkeyval).and_return(False)
-
- # run test
- self.base_autotest.prepare_for_copying_logs(src, dest,
- self.host)
-
- # check
- self.god.check_playback()
-
-
- def test_prepare_for_copying_logs2(self):
- src = "src"
- dest = "dest"
- keyval_path = ''
- dkeyval = "dest/keyval"
- skeyval = "src/keyval"
- file_path = (0, ".keyavl_host")
-
- # make stub for tempfile.mkstemp
- old_mkstemp = autotest.tempfile.mkstemp
- mkstemp_obj = self.god.create_mock_function("tempfile.mkstemp")
- autotest.tempfile.mkstemp = mkstemp_obj
-
- # setup
- self.path_obj.join.expect_call(dest,
- 'keyval').and_return(dkeyval)
- self.path_obj.exists.expect_call(dkeyval).and_return(True)
- mkstemp_obj.expect_call('.keyval_%s'
- % self.host.hostname).and_return(file_path)
- self.path_obj.join.expect_call(src,
- 'keyval').and_return(skeyval)
- self.host.get_file.expect_call(skeyval, file_path[1])
- self.path_obj.join.expect_call(src,
- 'keyval').and_return(skeyval)
- self.host.run.expect_call('rm -rf %s' % (skeyval))
-
- # run test
- self.base_autotest.prepare_for_copying_logs(src, dest,
- self.host)
-
- # check results
- self.god.check_playback()
+ def common_install_test_teardown(self, old_get_autodir):
+ # put things back
+ autotest._get_autodir = old_get_autodir
+
+
+ def test_install1(self):
+ # setup
+ autodir = "autodir"
+ old_get_autodir = self.common_install_test_setup(autodir, True)
+
+ # run test
+ self.base_autotest.install()
+
+ # check
+ self.assertTrue(self.base_autotest.installed)
+ self.god.check_playback()
+
+ # put back
+ self.common_install_test_teardown(old_get_autodir)
+
+
+ def test_install2(self):
+ # setup
+ autodir = "autodir"
+ old_get_autodir = self.common_install_test_setup(autodir, False)
+ cmd = 'which svn'
+ cmdresult = client_utils.CmdResult(cmd)
+ self.utils_obj.run.expect_call(cmd).and_return(cmdresult)
+ cmd = 'svn checkout %s %s' % (autotest.AUTOTEST_SVN, autodir)
+ self.host.run.expect_call(cmd)
+
+ # run test
+ self.base_autotest.install()
+
+ # check
+ self.assertTrue(self.base_autotest.installed)
+ self.god.check_playback()
+
+ # put back
+ self.common_install_test_teardown(old_get_autodir)
+
+
+ def test_get(self):
+ # setup
+ location = "autotest_lib.client"
+ cwd = "current_dir"
+ self.os_obj.getcwd.expect_call().and_return(cwd)
+ self.os_obj.chdir.expect_call(location)
+ self.os_obj.system.expect_call('tools/make_clean')
+ self.os_obj.chdir.expect_call(cwd)
+
+ # call method under test
+ self.base_autotest.get(location)
+
+ # do tests
+ self.assertTrue(self.base_autotest.got)
+ self.god.check_playback()
+
+
+ def test_get_default(self):
+ # setup the test
+ location = "autotest_lib.client"
+ self.path_obj.join.expect_call(self.base_autotest.serverdir,
+ '../client').and_return(location)
+ self.path_obj.abspath.expect_call(location).and_return(location)
+ cwd = "current_dir"
+ self.os_obj.getcwd.expect_call().and_return(cwd)
+ self.os_obj.chdir.expect_call(location)
+ self.os_obj.system.expect_call('tools/make_clean')
+ self.os_obj.chdir.expect_call(cwd)
+
+ # call method under test
+ self.base_autotest.get()
+
+ # do tests
+ self.assertTrue(self.base_autotest.got)
+ self.god.check_playback()
+
+
+ def test_run_default(self):
+ # need to stub out _get_host_and_setup
+ old_func = self.base_autotest._get_host_and_setup
+ name = "_get_host_and_setup"
+ new_func = self.god.create_mock_function(name)
+ self.base_autotest._get_host_and_setup = new_func
+
+ # need to stub out _do_run
+ old_do_run = self.base_autotest._do_run
+ do_run = self.god.create_mock_function("_do_run")
+ self.base_autotest._do_run = do_run
+
+ # need a mock of _Run object
+ run = self.god.create_mock_class(autotest._Run, "run")
+
+ # need a mock for _Run constuctor
+ oldRun = autotest._Run
+ newRun = self.god.create_mock_function("_Run")
+ autotest._Run = newRun
+
+ new_func.expect_call(None).and_return(self.host)
+ results_dir = "results_dir"
+ self.path_obj.abspath.expect_call(".").and_return(results_dir)
+ newRun.expect_call(self.host,
+ results_dir, None, False).and_return(run)
+ do_run.expect_call("control", results_dir, self.host, run, None)
+
+ # call method
+ self.base_autotest.run("control")
+
+ # do test
+ self.god.check_playback()
+
+ # put things back
+ self.base_autotest._get_host_and_setup = old_func
+ self.base_autotest._do_run = old_do_run
+ autotest._Run = oldRun
+
+
+ def test_prepare_for_copying_logs1(self):
+ src = "src"
+ dest = "dest"
+ keyval_path = ''
+ dkeyval = "dest/keyval"
+
+ # setup
+ self.path_obj.join.expect_call(dest,
+ 'keyval').and_return(dkeyval)
+ self.path_obj.exists.expect_call(dkeyval).and_return(False)
+
+ # run test
+ self.base_autotest.prepare_for_copying_logs(src, dest,
+ self.host)
+
+ # check
+ self.god.check_playback()
+
+
+ def test_prepare_for_copying_logs2(self):
+ src = "src"
+ dest = "dest"
+ keyval_path = ''
+ dkeyval = "dest/keyval"
+ skeyval = "src/keyval"
+ file_path = (0, ".keyavl_host")
+
+ # make stub for tempfile.mkstemp
+ old_mkstemp = autotest.tempfile.mkstemp
+ mkstemp_obj = self.god.create_mock_function("tempfile.mkstemp")
+ autotest.tempfile.mkstemp = mkstemp_obj
+
+ # setup
+ self.path_obj.join.expect_call(dest,
+ 'keyval').and_return(dkeyval)
+ self.path_obj.exists.expect_call(dkeyval).and_return(True)
+ mkstemp_obj.expect_call('.keyval_%s'
+ % self.host.hostname).and_return(file_path)
+ self.path_obj.join.expect_call(src,
+ 'keyval').and_return(skeyval)
+ self.host.get_file.expect_call(skeyval, file_path[1])
+ self.path_obj.join.expect_call(src,
+ 'keyval').and_return(skeyval)
+ self.host.run.expect_call('rm -rf %s' % (skeyval))
+
+ # run test
+ self.base_autotest.prepare_for_copying_logs(src, dest,
+ self.host)
+
+ # check results
+ self.god.check_playback()
- # set things back
- autotest.tempfile.mkstemp = old_mkstemp
+ # set things back
+ autotest.tempfile.mkstemp = old_mkstemp
- def test_process_copied_logs_no_dest_keyval(self):
- # setup test
- dest = "dest"
- path = "keyval_path"
- self.path_obj.join.expect_call(dest, 'keyval').and_return(path)
- self.path_obj.exists.expect_call(path).and_return(False)
-
- # run test
- self.base_autotest.process_copied_logs(dest, self.host, path)
-
- # run check
- self.god.check_playback()
+ def test_process_copied_logs_no_dest_keyval(self):
+ # setup test
+ dest = "dest"
+ path = "keyval_path"
+ self.path_obj.join.expect_call(dest, 'keyval').and_return(path)
+ self.path_obj.exists.expect_call(path).and_return(False)
+
+ # run test
+ self.base_autotest.process_copied_logs(dest, self.host, path)
+
+ # run check
+ self.god.check_playback()
- def test_process_copied_logs_with_dest_keyval(self):
- # setup test
- dest = "dest"
- kpath = "keyval_path"
- path = "path"
- self.path_obj.join.expect_call(dest, 'keyval').and_return(path)
- self.path_obj.exists.expect_call(path).and_return(True)
+ def test_process_copied_logs_with_dest_keyval(self):
+ # setup test
+ dest = "dest"
+ kpath = "keyval_path"
+ path = "path"
+ self.path_obj.join.expect_call(dest, 'keyval').and_return(path)
+ self.path_obj.exists.expect_call(path).and_return(True)
- vals = {'version': 1, 'author': "wonder woman"}
- kvals = {'version': 1}
- mvals = {'author': "wonder woman"}
+ vals = {'version': 1, 'author': "wonder woman"}
+ kvals = {'version': 1}
+ mvals = {'author': "wonder woman"}
- self.utils_obj.read_keyval.expect_call(path).and_return(vals)
- self.path_obj.join.expect_call(dest, 'keyval').and_return(kpath)
- self.utils_obj.read_keyval.expect_call(kpath).and_return(kvals)
- self.path_obj.join.expect_call(dest, 'keyval').and_return(dest)
- self.utils_obj.write_keyval.expect_call(dest, mvals)
- self.os_obj.remove.expect_call(path)
+ self.utils_obj.read_keyval.expect_call(path).and_return(vals)
+ self.path_obj.join.expect_call(dest, 'keyval').and_return(kpath)
+ self.utils_obj.read_keyval.expect_call(kpath).and_return(kvals)
+ self.path_obj.join.expect_call(dest, 'keyval').and_return(dest)
+ self.utils_obj.write_keyval.expect_call(dest, mvals)
+ self.os_obj.remove.expect_call(path)
- # call test
- self.base_autotest.process_copied_logs(dest, self.host, path)
+ # call test
+ self.base_autotest.process_copied_logs(dest, self.host, path)
- # run check
- self.god.check_playback()
+ # run check
+ self.god.check_playback()
- def test_run_timed_test(self):
- pass
+ def test_run_timed_test(self):
+ pass
if __name__ == "__main__":
- unittest.main()
+ unittest.main()
diff --git a/server/deb_kernel.py b/server/deb_kernel.py
index 26523b97..d6513291 100644
--- a/server/deb_kernel.py
+++ b/server/deb_kernel.py
@@ -5,7 +5,7 @@
"""
This module defines the Kernel class
- Kernel: an os kernel
+ Kernel: an os kernel
"""
__author__ = """
@@ -22,132 +22,132 @@ from autotest_lib.server import kernel, utils
class DEBKernel(kernel.Kernel):
- """
- This class represents a .deb pre-built kernel.
-
- It is used to obtain a built kernel and install it on a Host.
-
- Implementation details:
- This is a leaf class in an abstract class hierarchy, it must
- implement the unimplemented methods in parent classes.
- """
- def __init__(self):
- super(DEBKernel, self).__init__()
-
-
- def install(self, host, **kwargs):
- """
- Install a kernel on the remote host.
-
- This will also invoke the guest's bootloader to set this
- kernel as the default kernel.
-
- Args:
- host: the host on which to install the kernel
- [kwargs]: remaining keyword arguments will be passed
- to Bootloader.add_kernel()
-
- Raises:
- AutoservError: no package has yet been obtained. Call
- DEBKernel.get() with a .deb package.
- """
- if self.source_material is None:
- raise error.AutoservError("A kernel must first be "
- "specified via get()")
-
- remote_tmpdir = host.get_tmp_dir()
- basename = os.path.basename(self.source_material)
- remote_filename = os.path.join(remote_tmpdir, basename)
- host.send_file(self.source_material, remote_filename)
- host.run('dpkg -i "%s"' % (utils.sh_escape(remote_filename),))
- host.run('mkinitramfs -o "%s" "%s"' % (
- utils.sh_escape(self.get_initrd_name()),
- utils.sh_escape(self.get_version()),))
-
- host.bootloader.add_kernel(self.get_image_name(),
- initrd=self.get_initrd_name(), **kwargs)
-
-
- def get_version(self):
- """Get the version of the kernel to be installed.
-
- Returns:
- The version string, as would be returned
- by 'make kernelrelease'.
-
- Raises:
- AutoservError: no package has yet been obtained. Call
- DEBKernel.get() with a .deb package.
- """
- if self.source_material is None:
- raise error.AutoservError("A kernel must first be "
- "specified via get()")
-
- retval= utils.run('dpkg-deb -f "%s" version' %
- utils.sh_escape(self.source_material),)
- return retval.stdout.strip()
-
-
- def get_image_name(self):
- """Get the name of the kernel image to be installed.
-
- Returns:
- The full path to the kernel image file as it will be
- installed on the host.
-
- Raises:
- AutoservError: no package has yet been obtained. Call
- DEBKernel.get() with a .deb package.
- """
- return "/boot/vmlinuz-%s" % (self.get_version(),)
-
-
- def get_initrd_name(self):
- """Get the name of the initrd file to be installed.
-
- Returns:
- The full path to the initrd file as it will be
- installed on the host. If the package includes no
- initrd file, None is returned
-
- Raises:
- AutoservError: no package has yet been obtained. Call
- DEBKernel.get() with a .deb package.
- """
- if self.source_material is None:
- raise error.AutoservError("A kernel must first be "
- "specified via get()")
-
- return "/boot/initrd.img-%s" % (self.get_version(),)
-
- def extract(self, host):
- """Extract the kernel package.
-
- This function is only useful to access the content of the
- package (for example the kernel image) without
- installing it. It is not necessary to run this function to
- install the kernel.
-
- Args:
- host: the host on which to extract the kernel package.
-
- Returns:
- The full path to the temporary directory on host where
- the package was extracted.
-
- Raises:
- AutoservError: no package has yet been obtained. Call
- DEBKernel.get() with a .deb package.
- """
- if self.source_material is None:
- raise error.AutoservError("A kernel must first be "
- "specified via get()")
-
- remote_tmpdir = host.get_tmp_dir()
- basename = os.path.basename(self.source_material)
- remote_filename = os.path.join(remote_tmpdir, basename)
- host.send_file(self.source_material, remote_filename)
- content_dir= os.path.join(remote_tmpdir, "contents")
- host.run('dpkg -x "%s" "%s"' % (utils.sh_escape(remote_filename), utils.sh_escape(content_dir),))
-
- return content_dir
+ """
+ This class represents a .deb pre-built kernel.
+
+ It is used to obtain a built kernel and install it on a Host.
+
+ Implementation details:
+ This is a leaf class in an abstract class hierarchy, it must
+ implement the unimplemented methods in parent classes.
+ """
+ def __init__(self):
+ super(DEBKernel, self).__init__()
+
+
+ def install(self, host, **kwargs):
+ """
+ Install a kernel on the remote host.
+
+ This will also invoke the guest's bootloader to set this
+ kernel as the default kernel.
+
+ Args:
+ host: the host on which to install the kernel
+ [kwargs]: remaining keyword arguments will be passed
+ to Bootloader.add_kernel()
+
+ Raises:
+ AutoservError: no package has yet been obtained. Call
+ DEBKernel.get() with a .deb package.
+ """
+ if self.source_material is None:
+ raise error.AutoservError("A kernel must first be "
+ "specified via get()")
+
+ remote_tmpdir = host.get_tmp_dir()
+ basename = os.path.basename(self.source_material)
+ remote_filename = os.path.join(remote_tmpdir, basename)
+ host.send_file(self.source_material, remote_filename)
+ host.run('dpkg -i "%s"' % (utils.sh_escape(remote_filename),))
+ host.run('mkinitramfs -o "%s" "%s"' % (
+ utils.sh_escape(self.get_initrd_name()),
+ utils.sh_escape(self.get_version()),))
+
+ host.bootloader.add_kernel(self.get_image_name(),
+ initrd=self.get_initrd_name(), **kwargs)
+
+
+ def get_version(self):
+ """Get the version of the kernel to be installed.
+
+ Returns:
+ The version string, as would be returned
+ by 'make kernelrelease'.
+
+ Raises:
+ AutoservError: no package has yet been obtained. Call
+ DEBKernel.get() with a .deb package.
+ """
+ if self.source_material is None:
+ raise error.AutoservError("A kernel must first be "
+ "specified via get()")
+
+ retval= utils.run('dpkg-deb -f "%s" version' %
+ utils.sh_escape(self.source_material),)
+ return retval.stdout.strip()
+
+
+ def get_image_name(self):
+ """Get the name of the kernel image to be installed.
+
+ Returns:
+ The full path to the kernel image file as it will be
+ installed on the host.
+
+ Raises:
+ AutoservError: no package has yet been obtained. Call
+ DEBKernel.get() with a .deb package.
+ """
+ return "/boot/vmlinuz-%s" % (self.get_version(),)
+
+
+ def get_initrd_name(self):
+ """Get the name of the initrd file to be installed.
+
+ Returns:
+ The full path to the initrd file as it will be
+ installed on the host. If the package includes no
+ initrd file, None is returned
+
+ Raises:
+ AutoservError: no package has yet been obtained. Call
+ DEBKernel.get() with a .deb package.
+ """
+ if self.source_material is None:
+ raise error.AutoservError("A kernel must first be "
+ "specified via get()")
+
+ return "/boot/initrd.img-%s" % (self.get_version(),)
+
+ def extract(self, host):
+ """Extract the kernel package.
+
+ This function is only useful to access the content of the
+ package (for example the kernel image) without
+ installing it. It is not necessary to run this function to
+ install the kernel.
+
+ Args:
+ host: the host on which to extract the kernel package.
+
+ Returns:
+ The full path to the temporary directory on host where
+ the package was extracted.
+
+ Raises:
+ AutoservError: no package has yet been obtained. Call
+ DEBKernel.get() with a .deb package.
+ """
+ if self.source_material is None:
+ raise error.AutoservError("A kernel must first be "
+ "specified via get()")
+
+ remote_tmpdir = host.get_tmp_dir()
+ basename = os.path.basename(self.source_material)
+ remote_filename = os.path.join(remote_tmpdir, basename)
+ host.send_file(self.source_material, remote_filename)
+ content_dir= os.path.join(remote_tmpdir, "contents")
+ host.run('dpkg -x "%s" "%s"' % (utils.sh_escape(remote_filename), utils.sh_escape(content_dir),))
+
+ return content_dir
diff --git a/server/git.py b/server/git.py
index 52812c45..72f5b81f 100644
--- a/server/git.py
+++ b/server/git.py
@@ -18,168 +18,168 @@ from autotest_lib.server import utils, installable_object
class GitRepo(installable_object.InstallableObject):
- """
- This class represents a git repo.
-
- It is used to pull down a local copy of a git repo, check if the local
- repo is up-to-date, if not update. It delegates the install to
- implementation classes.
-
- """
-
- def __init__(self, repodir, giturl, weburl):
- super(installable_object.InstallableObject, self).__init__()
- if repodir == None:
- e_msg = 'You must provide a directory to hold the git repository'
- raise ValueError(e_msg)
- self.repodir = sh_escape(repodir)
- if giturl == None:
- raise ValueError('You must provide a git URL to the repository')
- self.giturl = giturl
- if weburl == None:
- raise ValueError('You must provide a http URL to the repository')
- self.weburl = weburl
-
- # path to .git dir
- self.gitpath = utils.sh_escape(os.path.join(self.repodir,'.git'))
-
- # base git command , pointing to gitpath git dir
- self.gitcmdbase = 'git --git-dir=%s' % self.gitpath
-
- # default to same remote path as local
- self.__build = os.path.dirname(self.repodir)
-
-
- def run(self, command, timeout=None, ignore_status=False):
- return utils.run(r'%s' % (utils.sh_escape(command)),
- timeout, ignore_status)
-
-
- # base install method
- def install(self, host, builddir=None):
- # allow override of target remote dir
- if builddir:
- self.__build = builddir
-
- # push source to host for install
- print 'pushing %s to host:%s' %(self.source_material, self.__build)
- host.send_file(self.source_material, self.__build)
-
-
- def gitcmd(self, cmd, ignore_status=False):
- return self.run('%s %s'%(self.gitcmdbase, cmd),
- ignore_status=ignore_status)
-
-
- def get(self, **kwargs):
- """
- This method overrides baseclass get so we can do proper git
- clone/pulls, and check for updated versions. The result of
- this method will leave an up-to-date version of git repo at
- 'giturl' in 'repodir' directory to be used by build/install
- methods.
- """
-
- if not self.is_repo_initialized():
- # this is your first time ...
- print 'cloning repo...'
- cmd = 'clone %s %s ' %(self.giturl, self.repodir)
- rv = self.gitcmd(cmd, True)
- if rv.exit_status != 0:
- print rv.stderr
- raise error.CmdError('Failed to clone git url', rv)
- else:
- print rv.stdout
-
- else:
- # exiting repo, check if we're up-to-date
- if self.is_out_of_date():
- print 'updating repo...'
- rv = self.gitcmd('pull', True)
- if rv.exit_status != 0:
- print rv.stderr
- e_msg = 'Failed to pull git repo data'
- raise error.CmdError(e_msg, rv)
- else:
- print 'repo up-to-date'
-
-
- # remember where the source is
- self.source_material = self.repodir
-
-
- def get_local_head(self):
- cmd = 'log --max-count=1'
- gitlog = self.gitcmd(cmd).stdout
-
- # parsing the commit checksum out of git log 's first entry.
- # Output looks like:
- #
- # commit 1dccba29b4e5bf99fb98c324f952386dda5b097f
- # Merge: 031b69b... df6af41...
- # Author: Avi Kivity <avi@qumranet.com>
- # Date: Tue Oct 23 10:36:11 2007 +0200
- #
- # Merge home:/home/avi/kvm/linux-2.6
- return str(gitlog.split('\n')[0]).split()[1]
-
-
- def get_remote_head(self):
- def __needs_refresh(lines):
- tag = '<meta http-equiv="refresh" content="0"/>'
- if len(filter(lambda x: x.startswith(tag), lines)) > 0:
- return True
-
- return False
-
+ """
+ This class represents a git repo.
+
+ It is used to pull down a local copy of a git repo, check if the local
+ repo is up-to-date, if not update. It delegates the install to
+ implementation classes.
+
+ """
+
+ def __init__(self, repodir, giturl, weburl):
+ super(installable_object.InstallableObject, self).__init__()
+ if repodir == None:
+ e_msg = 'You must provide a directory to hold the git repository'
+ raise ValueError(e_msg)
+ self.repodir = sh_escape(repodir)
+ if giturl == None:
+ raise ValueError('You must provide a git URL to the repository')
+ self.giturl = giturl
+ if weburl == None:
+ raise ValueError('You must provide a http URL to the repository')
+ self.weburl = weburl
+
+ # path to .git dir
+ self.gitpath = utils.sh_escape(os.path.join(self.repodir,'.git'))
+
+ # base git command , pointing to gitpath git dir
+ self.gitcmdbase = 'git --git-dir=%s' % self.gitpath
+
+ # default to same remote path as local
+ self.__build = os.path.dirname(self.repodir)
+
+
+ def run(self, command, timeout=None, ignore_status=False):
+ return utils.run(r'%s' % (utils.sh_escape(command)),
+ timeout, ignore_status)
+
+
+ # base install method
+ def install(self, host, builddir=None):
+ # allow override of target remote dir
+ if builddir:
+ self.__build = builddir
+
+ # push source to host for install
+ print 'pushing %s to host:%s' %(self.source_material, self.__build)
+ host.send_file(self.source_material, self.__build)
+
+
+ def gitcmd(self, cmd, ignore_status=False):
+ return self.run('%s %s'%(self.gitcmdbase, cmd),
+ ignore_status=ignore_status)
+
+
+ def get(self, **kwargs):
+ """
+ This method overrides baseclass get so we can do proper git
+ clone/pulls, and check for updated versions. The result of
+ this method will leave an up-to-date version of git repo at
+ 'giturl' in 'repodir' directory to be used by build/install
+ methods.
+ """
+
+ if not self.is_repo_initialized():
+ # this is your first time ...
+ print 'cloning repo...'
+ cmd = 'clone %s %s ' %(self.giturl, self.repodir)
+ rv = self.gitcmd(cmd, True)
+ if rv.exit_status != 0:
+ print rv.stderr
+ raise error.CmdError('Failed to clone git url', rv)
+ else:
+ print rv.stdout
+
+ else:
+ # exiting repo, check if we're up-to-date
+ if self.is_out_of_date():
+ print 'updating repo...'
+ rv = self.gitcmd('pull', True)
+ if rv.exit_status != 0:
+ print rv.stderr
+ e_msg = 'Failed to pull git repo data'
+ raise error.CmdError(e_msg, rv)
+ else:
+ print 'repo up-to-date'
+
+
+ # remember where the source is
+ self.source_material = self.repodir
+
+
+ def get_local_head(self):
+ cmd = 'log --max-count=1'
+ gitlog = self.gitcmd(cmd).stdout
+
+ # parsing the commit checksum out of git log 's first entry.
+ # Output looks like:
+ #
+ # commit 1dccba29b4e5bf99fb98c324f952386dda5b097f
+ # Merge: 031b69b... df6af41...
+ # Author: Avi Kivity <avi@qumranet.com>
+ # Date: Tue Oct 23 10:36:11 2007 +0200
+ #
+ # Merge home:/home/avi/kvm/linux-2.6
+ return str(gitlog.split('\n')[0]).split()[1]
+
+
+ def get_remote_head(self):
+ def __needs_refresh(lines):
+ tag = '<meta http-equiv="refresh" content="0"/>'
+ if len(filter(lambda x: x.startswith(tag), lines)) > 0:
+ return True
+
+ return False
+
- # scan git web interface for revision HEAD's commit tag
- gitwebaction=';a=commit;h=HEAD'
- url = self.weburl+gitwebaction
- max_refresh = 4
- r = 0
+ # scan git web interface for revision HEAD's commit tag
+ gitwebaction=';a=commit;h=HEAD'
+ url = self.weburl+gitwebaction
+ max_refresh = 4
+ r = 0
- print 'checking %s for changes' %(url)
- u = utils.urlopen(url)
- lines = u.read().split('\n')
+ print 'checking %s for changes' %(url)
+ u = utils.urlopen(url)
+ lines = u.read().split('\n')
- while __needs_refresh(lines) and r < max_refresh:
- print 'refreshing url'
- r = r+1
- u = utils.urlopen(url)
- lines = u.read().split('\n')
+ while __needs_refresh(lines) and r < max_refresh:
+ print 'refreshing url'
+ r = r+1
+ u = utils.urlopen(url)
+ lines = u.read().split('\n')
- if r >= max_refresh:
- e_msg = 'Failed to get remote repo status, refreshed %s times' % r
- raise IndexError(e_msg)
+ if r >= max_refresh:
+ e_msg = 'Failed to get remote repo status, refreshed %s times' % r
+ raise IndexError(e_msg)
- # looking for a line like:
- # <tr><td>commit</td><td # class="sha1">aadea67210c8b9e7a57744a1c2845501d2cdbac7</td></tr>
- commit_filter = lambda x: x.startswith('<tr><td>commit</td>')
- commit_line = filter(commit_filter, lines)
-
- # extract the sha1 sum from the commit line
- return str(commit_line).split('>')[4].split('<')[0]
+ # looking for a line like:
+ # <tr><td>commit</td><td # class="sha1">aadea67210c8b9e7a57744a1c2845501d2cdbac7</td></tr>
+ commit_filter = lambda x: x.startswith('<tr><td>commit</td>')
+ commit_line = filter(commit_filter, lines)
+
+ # extract the sha1 sum from the commit line
+ return str(commit_line).split('>')[4].split('<')[0]
-
- def is_out_of_date(self):
- local_head = self.get_local_head()
- remote_head = self.get_remote_head()
-
- # local is out-of-date, pull
- if local_head != remote_head:
- return True
-
- return False
-
-
- def is_repo_initialized(self):
- # if we fail to get a rv of 0 out of the git log command
- # then the repo is bogus
-
- cmd = 'log --max-count=1'
- rv = self.gitcmd(cmd, True)
- if rv.exit_status == 0:
- return True
-
- return False
+
+ def is_out_of_date(self):
+ local_head = self.get_local_head()
+ remote_head = self.get_remote_head()
+
+ # local is out-of-date, pull
+ if local_head != remote_head:
+ return True
+
+ return False
+
+
+ def is_repo_initialized(self):
+ # if we fail to get a rv of 0 out of the git log command
+ # then the repo is bogus
+
+ cmd = 'log --max-count=1'
+ rv = self.gitcmd(cmd, True)
+ if rv.exit_status == 0:
+ return True
+
+ return False
diff --git a/server/git_kernel.py b/server/git_kernel.py
index f9937322..1f4273c1 100644
--- a/server/git_kernel.py
+++ b/server/git_kernel.py
@@ -17,56 +17,56 @@ import git, source_kernel
class GitKernel(git.GitRepo):
- """
- This class represents a git kernel repo.
+ """
+ This class represents a git kernel repo.
- It is used to pull down a local copy of a git repo, check if the local repo
- is up-to-date, if not update and then build the kernel from the git repo.
+ It is used to pull down a local copy of a git repo, check if the local repo
+ is up-to-date, if not update and then build the kernel from the git repo.
- """
- def __init__(self, repodir, giturl, weburl):
- git.GitRepo.__init__(self, repodir, giturl, weburl)
- self.__patches = []
- self.__config = None
- self.__build = None
+ """
+ def __init__(self, repodir, giturl, weburl):
+ git.GitRepo.__init__(self, repodir, giturl, weburl)
+ self.__patches = []
+ self.__config = None
+ self.__build = None
- def configure(self, config):
- self.__config = config
+ def configure(self, config):
+ self.__config = config
- def patch(self, patch):
- self.__patches.append(patch)
+ def patch(self, patch):
+ self.__patches.append(patch)
- def install(self, host, build=True, builddir=None):
- # use tmpdir if no builddir specified
- # NB: pass a builddir to install() method if you
- # need to ensure the build remains after the completion
- # of a job
- if not builddir:
- self.__build = os.path.join(host.get_tmp_dir(),"build")
- print 'warning: builddir %s is not persistent' %(self.__build)
+ def install(self, host, build=True, builddir=None):
+ # use tmpdir if no builddir specified
+ # NB: pass a builddir to install() method if you
+ # need to ensure the build remains after the completion
+ # of a job
+ if not builddir:
+ self.__build = os.path.join(host.get_tmp_dir(),"build")
+ print 'warning: builddir %s is not persistent' %(self.__build)
- # push source to host for install
- print 'pushing %s to host' %(self.source_material)
- host.send_file(self.source_material, self.__build)
- remote_source_material= os.path.join(self.__build,
- os.path.basename(self.source_material))
+ # push source to host for install
+ print 'pushing %s to host' %(self.source_material)
+ host.send_file(self.source_material, self.__build)
+ remote_source_material= os.path.join(self.__build,
+ os.path.basename(self.source_material))
- # use a source_kernel to configure, patch, build and install.
- sk = source_kernel.SourceKernel(remote_source_material)
+ # use a source_kernel to configure, patch, build and install.
+ sk = source_kernel.SourceKernel(remote_source_material)
- if build:
- # apply patches
- for p in self.__patches:
- sk.patch(p)
+ if build:
+ # apply patches
+ for p in self.__patches:
+ sk.patch(p)
- # configure
- sk.configure(self.__config)
+ # configure
+ sk.configure(self.__config)
- # build
- sk.build(host)
-
- # install
- sk.install(host)
+ # build
+ sk.build(host)
+
+ # install
+ sk.install(host)
diff --git a/server/hosts/base_classes.py b/server/hosts/base_classes.py
index ddcfcdd5..0f637bdb 100644
--- a/server/hosts/base_classes.py
+++ b/server/hosts/base_classes.py
@@ -8,8 +8,8 @@ This module defines the base classes for the Host hierarchy.
Implementation details:
You should import the "hosts" package instead of importing each type of host.
- Host: a machine on which you can run programs
- RemoteHost: a remote machine on which you can run programs
+ Host: a machine on which you can run programs
+ RemoteHost: a remote machine on which you can run programs
"""
__author__ = """
@@ -26,91 +26,91 @@ import bootloader
class Host(object):
- """
- This class represents a machine on which you can run programs.
+ """
+ This class represents a machine on which you can run programs.
- It may be a local machine, the one autoserv is running on, a remote
- machine or a virtual machine.
+ It may be a local machine, the one autoserv is running on, a remote
+ machine or a virtual machine.
- Implementation details:
- This is an abstract class, leaf subclasses must implement the methods
- listed here. You must not instantiate this class but should
- instantiate one of those leaf subclasses.
- """
+ Implementation details:
+ This is an abstract class, leaf subclasses must implement the methods
+ listed here. You must not instantiate this class but should
+ instantiate one of those leaf subclasses.
+ """
- bootloader = None
+ bootloader = None
- def __init__(self):
- super(Host, self).__init__()
- self.serverdir = utils.get_server_dir()
- self.bootloader= bootloader.Bootloader(self)
- self.env = {}
+ def __init__(self):
+ super(Host, self).__init__()
+ self.serverdir = utils.get_server_dir()
+ self.bootloader= bootloader.Bootloader(self)
+ self.env = {}
- def run(self, command):
- pass
+ def run(self, command):
+ pass
- def reboot(self):
- pass
+ def reboot(self):
+ pass
- def reboot_setup(self):
- pass
+ def reboot_setup(self):
+ pass
- def reboot_followup(self):
- pass
+ def reboot_followup(self):
+ pass
- def get_file(self, source, dest):
- pass
+ def get_file(self, source, dest):
+ pass
- def send_file(self, source, dest):
- pass
+ def send_file(self, source, dest):
+ pass
- def get_tmp_dir(self):
- pass
+ def get_tmp_dir(self):
+ pass
- def is_up(self):
- pass
+ def is_up(self):
+ pass
- def get_wait_up_processes(self):
- """
- Gets the list of local processes to wait for in wait_up.
- """
- get_config = global_config.global_config.get_config_value
- proc_list = get_config("HOSTS", "wait_up_processes",
- default="").strip()
- processes = set(p.strip() for p in proc_list.split(","))
- processes.discard("")
- return processes
+ def get_wait_up_processes(self):
+ """
+ Gets the list of local processes to wait for in wait_up.
+ """
+ get_config = global_config.global_config.get_config_value
+ proc_list = get_config("HOSTS", "wait_up_processes",
+ default="").strip()
+ processes = set(p.strip() for p in proc_list.split(","))
+ processes.discard("")
+ return processes
- def wait_up(self, timeout):
- pass
+ def wait_up(self, timeout):
+ pass
- def wait_down(self, timeout):
- pass
+ def wait_down(self, timeout):
+ pass
- def get_num_cpu(self):
- pass
+ def get_num_cpu(self):
+ pass
- def machine_install(self):
- raise NotImplementedError('Machine install not implemented!')
+ def machine_install(self):
+ raise NotImplementedError('Machine install not implemented!')
- def install(self, installableObject):
- installableObject.install(self)
+ def install(self, installableObject):
+ installableObject.install(self)
- def get_crashdumps(self, test_start_time):
- pass
+ def get_crashdumps(self, test_start_time):
+ pass
- def get_autodir(self):
- return None
+ def get_autodir(self):
+ return None
diff --git a/server/hosts/bootloader.py b/server/hosts/bootloader.py
index 7994a8a5..4fb8d519 100644
--- a/server/hosts/bootloader.py
+++ b/server/hosts/bootloader.py
@@ -5,7 +5,7 @@
"""
This module defines the Bootloader class.
- Bootloader: a program to boot Kernels on a Host.
+ Bootloader: a program to boot Kernels on a Host.
"""
__author__ = """
@@ -26,172 +26,172 @@ BOOTTOOL_SRC = '../client/tools/boottool' # Get it from autotest client
class Bootloader(object):
- """
- This class represents a bootloader.
+ """
+ This class represents a bootloader.
- It can be used to add a kernel to the list of kernels that can be
- booted by a bootloader. It can also make sure that this kernel will
- be the one chosen at next reboot.
- """
+ It can be used to add a kernel to the list of kernels that can be
+ booted by a bootloader. It can also make sure that this kernel will
+ be the one chosen at next reboot.
+ """
- def __init__(self, host, xen_mode=False):
- super(Bootloader, self).__init__()
- self._host = weakref.ref(host)
- self._boottool_path = None
- self.xen_mode = xen_mode
+ def __init__(self, host, xen_mode=False):
+ super(Bootloader, self).__init__()
+ self._host = weakref.ref(host)
+ self._boottool_path = None
+ self.xen_mode = xen_mode
- def get_type(self):
- return self._run_boottool('--bootloader-probe').stdout.strip()
+ def get_type(self):
+ return self._run_boottool('--bootloader-probe').stdout.strip()
- def get_architecture(self):
- return self._run_boottool('--arch-probe').stdout.strip()
+ def get_architecture(self):
+ return self._run_boottool('--arch-probe').stdout.strip()
- def get_titles(self):
- return self._run_boottool('--info all | grep title | '
- 'cut -d " " -f2-').stdout.strip().split('\n')
-
-
- def get_default(self):
- return self._run_boottool('--default').stdout.strip()
-
-
- def _get_info(self, info_id):
- retval = self._run_boottool('--info=%s' % info_id).stdout
-
- results = []
- info = {}
- for line in retval.splitlines():
- if not line.strip():
- if info:
- results.append(info)
- info = {}
- else:
- key, val = line.split(":", 1)
- info[key.strip()] = val.strip()
- if info:
- results.append(info)
-
- return results
-
-
- def get_info(self, index):
- results = self._get_info(index)
- if results:
- return results[0]
- else:
- return {}
-
+ def get_titles(self):
+ return self._run_boottool('--info all | grep title | '
+ 'cut -d " " -f2-').stdout.strip().split('\n')
- def get_all_info(self):
- return self._get_info('all')
+ def get_default(self):
+ return self._run_boottool('--default').stdout.strip()
- def set_default(self, index):
- self._run_boottool('--set-default=%s' % index)
+ def _get_info(self, info_id):
+ retval = self._run_boottool('--info=%s' % info_id).stdout
- # 'kernel' can be a position number or a title
- def add_args(self, kernel, args):
- parameters = '--update-kernel=%s --args="%s"' % (kernel, args)
-
- #add parameter if this is a Xen entry
- if self.xen_mode:
- parameters += ' --xen'
-
- self._run_boottool(parameters)
+ results = []
+ info = {}
+ for line in retval.splitlines():
+ if not line.strip():
+ if info:
+ results.append(info)
+ info = {}
+ else:
+ key, val = line.split(":", 1)
+ info[key.strip()] = val.strip()
+ if info:
+ results.append(info)
+ return results
- def add_xen_hypervisor_args(self, kernel, args):
- self._run_boottool('--xen --update-xenhyper=%s --xha="%s"' \
- % (kernel, args))
+ def get_info(self, index):
+ results = self._get_info(index)
+ if results:
+ return results[0]
+ else:
+ return {}
- def remove_args(self, kernel, args):
- params = '--update-kernel=%s --remove-args="%s"' % (kernel, args)
-
- #add parameter if this is a Xen entry
- if self.xen_mode:
- params += ' --xen'
-
- self._run_boottool(params)
+ def get_all_info(self):
+ return self._get_info('all')
- def remove_xen_hypervisor_args(self, kernel, args):
- self._run_boottool('--xen --update-xenhyper=%s '
- '--remove-args="%s"') % (kernel, args)
+ def set_default(self, index):
+ self._run_boottool('--set-default=%s' % index)
- def add_kernel(self, path, title='autoserv', root=None, args=None,
- initrd=None, xen_hypervisor=None, default=True):
- """
- If an entry with the same title is already present, it will be
- replaced.
- """
- if title in self.get_titles():
- self._run_boottool('--remove-kernel "%s"' % (
- utils.sh_escape(title),))
-
- parameters = '--add-kernel "%s" --title "%s"' % (
- utils.sh_escape(path), utils.sh_escape(title),)
-
- if root:
- parameters += ' --root "%s"' % (utils.sh_escape(root),)
-
- if args:
- parameters += ' --args "%s"' % (utils.sh_escape(args),)
-
- # add an initrd now or forever hold your peace
- if initrd:
- parameters += ' --initrd "%s"' % (
- utils.sh_escape(initrd),)
-
- if default:
- parameters += ' --make-default'
-
- # add parameter if this is a Xen entry
- if self.xen_mode:
- parameters += ' --xen'
- if xen_hypervisor:
- parameters += ' --xenhyper "%s"' % (
- utils.sh_escape(xen_hypervisor),)
-
- self._run_boottool(parameters)
-
-
- def remove_kernel(self, kernel):
- self._run_boottool('--remove-kernel=%s' % kernel)
-
-
- def boot_once(self, title):
- self._run_boottool('--boot-once --title=%s' % title)
-
-
- def install_boottool(self):
- if self._host() is None:
- raise error.AutoservError(
- "Host does not exist anymore")
- tmpdir = self._host().get_tmp_dir()
- self._host().send_file(os.path.abspath(os.path.join(
- utils.get_server_dir(), BOOTTOOL_SRC)), tmpdir)
- self._boottool_path= os.path.join(tmpdir,
- os.path.basename(BOOTTOOL_SRC))
-
-
- def _get_boottool_path(self):
- if not self._boottool_path:
- self.install_boottool()
- return self._boottool_path
-
-
- def _set_boottool_path(self, path):
- self._boottool_path = path
-
-
- boottool_path = property(_get_boottool_path, _set_boottool_path)
-
-
- def _run_boottool(self, cmd):
- return self._host().run(self.boottool_path + ' ' + cmd)
+
+ # 'kernel' can be a position number or a title
+ def add_args(self, kernel, args):
+ parameters = '--update-kernel=%s --args="%s"' % (kernel, args)
+
+ #add parameter if this is a Xen entry
+ if self.xen_mode:
+ parameters += ' --xen'
+
+ self._run_boottool(parameters)
+
+
+ def add_xen_hypervisor_args(self, kernel, args):
+ self._run_boottool('--xen --update-xenhyper=%s --xha="%s"' \
+ % (kernel, args))
+
+
+ def remove_args(self, kernel, args):
+ params = '--update-kernel=%s --remove-args="%s"' % (kernel, args)
+
+ #add parameter if this is a Xen entry
+ if self.xen_mode:
+ params += ' --xen'
+
+ self._run_boottool(params)
+
+
+ def remove_xen_hypervisor_args(self, kernel, args):
+ self._run_boottool('--xen --update-xenhyper=%s '
+ '--remove-args="%s"') % (kernel, args)
+
+
+ def add_kernel(self, path, title='autoserv', root=None, args=None,
+ initrd=None, xen_hypervisor=None, default=True):
+ """
+ If an entry with the same title is already present, it will be
+ replaced.
+ """
+ if title in self.get_titles():
+ self._run_boottool('--remove-kernel "%s"' % (
+ utils.sh_escape(title),))
+
+ parameters = '--add-kernel "%s" --title "%s"' % (
+ utils.sh_escape(path), utils.sh_escape(title),)
+
+ if root:
+ parameters += ' --root "%s"' % (utils.sh_escape(root),)
+
+ if args:
+ parameters += ' --args "%s"' % (utils.sh_escape(args),)
+
+ # add an initrd now or forever hold your peace
+ if initrd:
+ parameters += ' --initrd "%s"' % (
+ utils.sh_escape(initrd),)
+
+ if default:
+ parameters += ' --make-default'
+
+ # add parameter if this is a Xen entry
+ if self.xen_mode:
+ parameters += ' --xen'
+ if xen_hypervisor:
+ parameters += ' --xenhyper "%s"' % (
+ utils.sh_escape(xen_hypervisor),)
+
+ self._run_boottool(parameters)
+
+
+ def remove_kernel(self, kernel):
+ self._run_boottool('--remove-kernel=%s' % kernel)
+
+
+ def boot_once(self, title):
+ self._run_boottool('--boot-once --title=%s' % title)
+
+
+ def install_boottool(self):
+ if self._host() is None:
+ raise error.AutoservError(
+ "Host does not exist anymore")
+ tmpdir = self._host().get_tmp_dir()
+ self._host().send_file(os.path.abspath(os.path.join(
+ utils.get_server_dir(), BOOTTOOL_SRC)), tmpdir)
+ self._boottool_path= os.path.join(tmpdir,
+ os.path.basename(BOOTTOOL_SRC))
+
+
+ def _get_boottool_path(self):
+ if not self._boottool_path:
+ self.install_boottool()
+ return self._boottool_path
+
+
+ def _set_boottool_path(self, path):
+ self._boottool_path = path
+
+
+ boottool_path = property(_get_boottool_path, _set_boottool_path)
+
+
+ def _run_boottool(self, cmd):
+ return self._host().run(self.boottool_path + ' ' + cmd)
diff --git a/server/hosts/bootloader_unittest.py b/server/hosts/bootloader_unittest.py
index 9161a3ec..a6f67561 100644
--- a/server/hosts/bootloader_unittest.py
+++ b/server/hosts/bootloader_unittest.py
@@ -10,347 +10,347 @@ from autotest_lib.server.hosts import bootloader, ssh_host
class test_bootloader_install(unittest.TestCase):
- def setUp(self):
- self.god = mock.mock_god()
-
- # mock out get_server_dir
- self.god.stub_function(utils, "get_server_dir")
-
-
- def tearDown(self):
- self.god.unstub_all()
-
-
- def create_mock_sshhost(self):
- # useful for building disposable SSHHost mocks
- return self.god.create_mock_class(ssh_host.SSHHost, "SSHHost")
-
-
- def create_install_boottool_mock(self, loader, dst_dir):
- mock_install_boottool = \
- self.god.create_mock_function("install_boottool")
- def install_boottool():
- loader._boottool_path = dst_dir
- mock_install_boottool()
- loader.install_boottool = install_boottool
- return mock_install_boottool
-
-
- def test_install_fails_without_host(self):
- host = self.create_mock_sshhost()
- loader = bootloader.Bootloader(host)
- del host
- self.assertRaises(error.AutoservError, loader.install_boottool)
-
-
- def test_installs_to_tmpdir(self):
- TMPDIR = "/unittest/tmp"
- SERVERDIR = "/unittest/server"
- BOOTTOOL_SRC = os.path.join(SERVERDIR, bootloader.BOOTTOOL_SRC)
- BOOTTOOL_SRC = os.path.abspath(BOOTTOOL_SRC)
- BOOTTOOL_DST = os.path.join(TMPDIR, "boottool")
- # set up the recording
- host = self.create_mock_sshhost()
- host.get_tmp_dir.expect_call().and_return(TMPDIR)
- utils.get_server_dir.expect_call().and_return(SERVERDIR)
- host.send_file.expect_call(BOOTTOOL_SRC, TMPDIR)
- # run the test
- loader = bootloader.Bootloader(host)
- loader.install_boottool()
- # assert the playback is correct
- self.god.check_playback()
- # assert the final dest is correct
- self.assertEquals(loader.boottool_path, BOOTTOOL_DST)
-
-
- def test_get_path_automatically_installs(self):
- BOOTTOOL_DST = "/unittest/tmp/boottool"
- host = self.create_mock_sshhost()
- loader = bootloader.Bootloader(host)
- # mock out loader.install_boottool
- mock_install = \
- self.create_install_boottool_mock(loader, BOOTTOOL_DST)
- # set up the recording
- mock_install.expect_call()
- # run the test
- self.assertEquals(loader.boottool_path, BOOTTOOL_DST)
- self.god.check_playback()
-
-
- def test_install_is_only_called_once(self):
- BOOTTOOL_DST = "/unittest/tmp/boottool"
- host = self.create_mock_sshhost()
- loader = bootloader.Bootloader(host)
- # mock out loader.install_boottool
- mock_install = \
- self.create_install_boottool_mock(loader, BOOTTOOL_DST)
- # set up the recording
- mock_install.expect_call()
- # run the test
- self.assertEquals(loader.boottool_path, BOOTTOOL_DST)
- self.god.check_playback()
- self.assertEquals(loader.boottool_path, BOOTTOOL_DST)
- self.god.check_playback()
+ def setUp(self):
+ self.god = mock.mock_god()
+
+ # mock out get_server_dir
+ self.god.stub_function(utils, "get_server_dir")
+
+
+ def tearDown(self):
+ self.god.unstub_all()
+
+
+ def create_mock_sshhost(self):
+ # useful for building disposable SSHHost mocks
+ return self.god.create_mock_class(ssh_host.SSHHost, "SSHHost")
+
+
+ def create_install_boottool_mock(self, loader, dst_dir):
+ mock_install_boottool = \
+ self.god.create_mock_function("install_boottool")
+ def install_boottool():
+ loader._boottool_path = dst_dir
+ mock_install_boottool()
+ loader.install_boottool = install_boottool
+ return mock_install_boottool
+
+
+ def test_install_fails_without_host(self):
+ host = self.create_mock_sshhost()
+ loader = bootloader.Bootloader(host)
+ del host
+ self.assertRaises(error.AutoservError, loader.install_boottool)
+
+
+ def test_installs_to_tmpdir(self):
+ TMPDIR = "/unittest/tmp"
+ SERVERDIR = "/unittest/server"
+ BOOTTOOL_SRC = os.path.join(SERVERDIR, bootloader.BOOTTOOL_SRC)
+ BOOTTOOL_SRC = os.path.abspath(BOOTTOOL_SRC)
+ BOOTTOOL_DST = os.path.join(TMPDIR, "boottool")
+ # set up the recording
+ host = self.create_mock_sshhost()
+ host.get_tmp_dir.expect_call().and_return(TMPDIR)
+ utils.get_server_dir.expect_call().and_return(SERVERDIR)
+ host.send_file.expect_call(BOOTTOOL_SRC, TMPDIR)
+ # run the test
+ loader = bootloader.Bootloader(host)
+ loader.install_boottool()
+ # assert the playback is correct
+ self.god.check_playback()
+ # assert the final dest is correct
+ self.assertEquals(loader.boottool_path, BOOTTOOL_DST)
+
+
+ def test_get_path_automatically_installs(self):
+ BOOTTOOL_DST = "/unittest/tmp/boottool"
+ host = self.create_mock_sshhost()
+ loader = bootloader.Bootloader(host)
+ # mock out loader.install_boottool
+ mock_install = \
+ self.create_install_boottool_mock(loader, BOOTTOOL_DST)
+ # set up the recording
+ mock_install.expect_call()
+ # run the test
+ self.assertEquals(loader.boottool_path, BOOTTOOL_DST)
+ self.god.check_playback()
+
+
+ def test_install_is_only_called_once(self):
+ BOOTTOOL_DST = "/unittest/tmp/boottool"
+ host = self.create_mock_sshhost()
+ loader = bootloader.Bootloader(host)
+ # mock out loader.install_boottool
+ mock_install = \
+ self.create_install_boottool_mock(loader, BOOTTOOL_DST)
+ # set up the recording
+ mock_install.expect_call()
+ # run the test
+ self.assertEquals(loader.boottool_path, BOOTTOOL_DST)
+ self.god.check_playback()
+ self.assertEquals(loader.boottool_path, BOOTTOOL_DST)
+ self.god.check_playback()
class test_bootloader_methods(unittest.TestCase):
- def setUp(self):
- self.god = mock.mock_god()
- self.host = self.god.create_mock_class(ssh_host.SSHHost,
- "SSHHost")
- # creates a bootloader with _run_boottool mocked out
- self.loader = bootloader.Bootloader(self.host)
- self.god.stub_function(self.loader, "_run_boottool")
-
-
- def tearDown(self):
- self.god.unstub_all()
-
-
- def expect_run_boottool(self, arg, result):
- result = common_utils.CmdResult(stdout=result, exit_status=0)
- self.loader._run_boottool.expect_call(arg).and_return(result)
-
-
- def test_get_type(self):
- # set up the recording
- self.expect_run_boottool("--bootloader-probe", "lilo\n")
- # run the test
- self.assertEquals(self.loader.get_type(), "lilo")
- self.god.check_playback()
-
-
- def test_get_arch(self):
- # set up the recording
- self.expect_run_boottool("--arch-probe", "x86_64\n")
- # run the test
- self.assertEquals(self.loader.get_architecture(), "x86_64")
- self.god.check_playback()
-
-
- def test_get_default(self):
- # set up the recording
- self.expect_run_boottool("--default", "0\n")
- # run the test
- self.assertEquals(self.loader.get_default(), "0")
- self.god.check_playback()
-
-
- def test_get_titles(self):
- # set up the recording
- self.expect_run_boottool(mock.regex_comparator(
- r"^--info all \|"), "title #1\ntitle #2\n")
- # run the test
- self.assertEquals(self.loader.get_titles(),
- ["title #1", "title #2"])
- self.god.check_playback()
-
-
- def test_get_info_single_result(self):
- RESULT = (
- "index\t: 5\n"
- "args\t: ro single\n"
- "boot\t: (hd0,0)\n"
- "initrd\t: /boot/initrd.img-2.6.15-23-386\n"
- "kernel\t: /boot/vmlinuz-2.6.15-23-386\n"
- "root\t: UUID=07D7-0714\n"
- "savedefault\t: \n"
- "title\t: Distro, kernel 2.6.15-23-386\n"
- )
- # set up the recording
- self.expect_run_boottool("--info=5", RESULT)
- # run the test
- info = self.loader.get_info(5)
- self.god.check_playback()
- expected_info = {"index": "5", "args": "ro single",
- "boot": "(hd0,0)",
- "initrd": "/boot/initrd.img-2.6.15-23-386",
- "kernel": "/boot/vmlinuz-2.6.15-23-386",
- "root": "UUID=07D7-0714", "savedefault": "",
- "title": "Distro, kernel 2.6.15-23-386"}
- self.assertEquals(expected_info, info)
-
-
- def test_get_info_missing_result(self):
- # set up the recording
- self.expect_run_boottool("--info=4", "")
- # run the test
- info = self.loader.get_info(4)
- self.god.check_playback()
- self.assertEquals({}, info)
-
-
- def test_get_info_multiple_results(self):
- RESULT = (
- "index\t: 5\n"
- "args\t: ro single\n"
- "boot\t: (hd0,0)\n"
- "initrd\t: /boot/initrd.img-2.6.15-23-386\n"
- "kernel\t: /boot/vmlinuz-2.6.15-23-386\n"
- "root\t: UUID=07D7-0714\n"
- "savedefault\t: \n"
- "title\t: Distro, kernel 2.6.15-23-386\n"
- "\n"
- "index\t: 7\n"
- "args\t: ro single\n"
- "boot\t: (hd0,0)\n"
- "initrd\t: /boot/initrd.img-2.6.15-23-686\n"
- "kernel\t: /boot/vmlinuz-2.6.15-23-686\n"
- "root\t: UUID=07D7-0714\n"
- "savedefault\t: \n"
- "title\t: Distro, kernel 2.6.15-23-686\n"
- )
- # set up the recording
- self.expect_run_boottool("--info=all", RESULT)
- # run the test
- info = self.loader.get_all_info()
- self.god.check_playback()
- expected_info = [{"index": "5", "args": "ro single",
- "boot": "(hd0,0)",
- "initrd": "/boot/initrd.img-2.6.15-23-386",
- "kernel": "/boot/vmlinuz-2.6.15-23-386",
- "root": "UUID=07D7-0714", "savedefault": "",
- "title": "Distro, kernel 2.6.15-23-386"},
- {"index": "7", "args": "ro single",
- "boot": "(hd0,0)",
- "initrd": "/boot/initrd.img-2.6.15-23-686",
- "kernel": "/boot/vmlinuz-2.6.15-23-686",
- "root": "UUID=07D7-0714", "savedefault": "",
- "title": "Distro, kernel 2.6.15-23-686"}]
- self.assertEquals(expected_info, info)
-
-
- def test_set_default(self):
- # set up the recording
- self.loader._run_boottool.expect_call("--set-default=41")
- # run the test
- self.loader.set_default(41)
- self.god.check_playback()
-
-
- def test_add_args(self):
- # set up the recording
- self.loader._run_boottool.expect_call(
- "--update-kernel=10 --args=\"some kernel args\"")
- # run the test
- self.loader.add_args(10, "some kernel args")
- self.god.check_playback()
-
-
- def test_remove_args(self):
- # set up the recording
- self.loader._run_boottool.expect_call(
- "--update-kernel=12 --remove-args=\"some kernel args\"")
- # run the test
- self.loader.remove_args(12, "some kernel args")
- self.god.check_playback()
-
-
- def test_add_kernel_basic(self):
- self.loader.get_titles = self.god.create_mock_function(
- "get_titles")
- # set up the recording
- self.loader.get_titles.expect_call().and_return(["notmylabel"])
- self.loader._run_boottool.expect_call(
- "--add-kernel \"/unittest/kernels/vmlinuz\" "
- "--title \"mylabel\" --make-default")
- # run the test
- self.loader.add_kernel("/unittest/kernels/vmlinuz",
- "mylabel")
- self.god.check_playback()
-
-
- def test_add_kernel_adds_root(self):
- self.loader.get_titles = self.god.create_mock_function(
- "get_titles")
- # set up the recording
- self.loader.get_titles.expect_call().and_return(["notmylabel"])
- self.loader._run_boottool.expect_call(
- "--add-kernel \"/unittest/kernels/vmlinuz\" "
- "--title \"mylabel\" --root \"/unittest/root\" "
- "--make-default")
- # run the test
- self.loader.add_kernel("/unittest/kernels/vmlinuz",
- "mylabel", root="/unittest/root")
- self.god.check_playback()
-
-
- def test_add_kernel_adds_args(self):
- self.loader.get_titles = self.god.create_mock_function(
- "get_titles")
- # set up the recording
- self.loader.get_titles.expect_call().and_return(["notmylabel"])
- self.loader._run_boottool.expect_call(
- "--add-kernel \"/unittest/kernels/vmlinuz\" "
- "--title \"mylabel\" --args \"my kernel args\" "
- "--make-default")
- # run the test
- self.loader.add_kernel("/unittest/kernels/vmlinuz",
- "mylabel", args="my kernel args")
- self.god.check_playback()
-
-
- def test_add_kernel_adds_initrd(self):
- self.loader.get_titles = self.god.create_mock_function(
- "get_titles")
- # set up the recording
- self.loader.get_titles.expect_call().and_return(["notmylabel"])
- self.loader._run_boottool.expect_call(
- "--add-kernel \"/unittest/kernels/vmlinuz\" "
- "--title \"mylabel\" --initrd \"/unittest/initrd\" "
- "--make-default")
- # run the test
- self.loader.add_kernel("/unittest/kernels/vmlinuz",
- "mylabel", initrd="/unittest/initrd")
- self.god.check_playback()
-
-
- def test_add_kernel_disables_make_default(self):
- self.loader.get_titles = self.god.create_mock_function(
- "get_titles")
- # set up the recording
- self.loader.get_titles.expect_call().and_return(["notmylabel"])
- self.loader._run_boottool.expect_call(
- "--add-kernel \"/unittest/kernels/vmlinuz\" "
- "--title \"mylabel\"")
- # run the test
- self.loader.add_kernel("/unittest/kernels/vmlinuz",
- "mylabel", default=False)
- self.god.check_playback()
-
-
- def test_add_kernel_removes_old(self):
- self.loader.get_titles = self.god.create_mock_function(
- "get_titles")
- # set up the recording
- self.loader.get_titles.expect_call().and_return(["mylabel"])
- self.loader._run_boottool.expect_call(
- "--remove-kernel \"mylabel\"")
- self.loader._run_boottool.expect_call(
- "--add-kernel \"/unittest/kernels/vmlinuz\" "
- "--title \"mylabel\" --make-default")
- # run the test
- self.loader.add_kernel("/unittest/kernels/vmlinuz",
- "mylabel")
- self.god.check_playback()
-
-
- def test_remove_kernel(self):
- # set up the recording
- self.loader._run_boottool.expect_call("--remove-kernel=14")
- # run the test
- self.loader.remove_kernel(14)
- self.god.check_playback()
-
-
- def test_boot_once(self):
- # set up the recording
- self.loader._run_boottool.expect_call(
- "--boot-once --title=autotest")
- # run the test
- self.loader.boot_once("autotest")
- self.god.check_playback()
+ def setUp(self):
+ self.god = mock.mock_god()
+ self.host = self.god.create_mock_class(ssh_host.SSHHost,
+ "SSHHost")
+ # creates a bootloader with _run_boottool mocked out
+ self.loader = bootloader.Bootloader(self.host)
+ self.god.stub_function(self.loader, "_run_boottool")
+
+
+ def tearDown(self):
+ self.god.unstub_all()
+
+
+ def expect_run_boottool(self, arg, result):
+ result = common_utils.CmdResult(stdout=result, exit_status=0)
+ self.loader._run_boottool.expect_call(arg).and_return(result)
+
+
+ def test_get_type(self):
+ # set up the recording
+ self.expect_run_boottool("--bootloader-probe", "lilo\n")
+ # run the test
+ self.assertEquals(self.loader.get_type(), "lilo")
+ self.god.check_playback()
+
+
+ def test_get_arch(self):
+ # set up the recording
+ self.expect_run_boottool("--arch-probe", "x86_64\n")
+ # run the test
+ self.assertEquals(self.loader.get_architecture(), "x86_64")
+ self.god.check_playback()
+
+
+ def test_get_default(self):
+ # set up the recording
+ self.expect_run_boottool("--default", "0\n")
+ # run the test
+ self.assertEquals(self.loader.get_default(), "0")
+ self.god.check_playback()
+
+
+ def test_get_titles(self):
+ # set up the recording
+ self.expect_run_boottool(mock.regex_comparator(
+ r"^--info all \|"), "title #1\ntitle #2\n")
+ # run the test
+ self.assertEquals(self.loader.get_titles(),
+ ["title #1", "title #2"])
+ self.god.check_playback()
+
+
+ def test_get_info_single_result(self):
+ RESULT = (
+ "index\t: 5\n"
+ "args\t: ro single\n"
+ "boot\t: (hd0,0)\n"
+ "initrd\t: /boot/initrd.img-2.6.15-23-386\n"
+ "kernel\t: /boot/vmlinuz-2.6.15-23-386\n"
+ "root\t: UUID=07D7-0714\n"
+ "savedefault\t: \n"
+ "title\t: Distro, kernel 2.6.15-23-386\n"
+ )
+ # set up the recording
+ self.expect_run_boottool("--info=5", RESULT)
+ # run the test
+ info = self.loader.get_info(5)
+ self.god.check_playback()
+ expected_info = {"index": "5", "args": "ro single",
+ "boot": "(hd0,0)",
+ "initrd": "/boot/initrd.img-2.6.15-23-386",
+ "kernel": "/boot/vmlinuz-2.6.15-23-386",
+ "root": "UUID=07D7-0714", "savedefault": "",
+ "title": "Distro, kernel 2.6.15-23-386"}
+ self.assertEquals(expected_info, info)
+
+
+ def test_get_info_missing_result(self):
+ # set up the recording
+ self.expect_run_boottool("--info=4", "")
+ # run the test
+ info = self.loader.get_info(4)
+ self.god.check_playback()
+ self.assertEquals({}, info)
+
+
+ def test_get_info_multiple_results(self):
+ RESULT = (
+ "index\t: 5\n"
+ "args\t: ro single\n"
+ "boot\t: (hd0,0)\n"
+ "initrd\t: /boot/initrd.img-2.6.15-23-386\n"
+ "kernel\t: /boot/vmlinuz-2.6.15-23-386\n"
+ "root\t: UUID=07D7-0714\n"
+ "savedefault\t: \n"
+ "title\t: Distro, kernel 2.6.15-23-386\n"
+ "\n"
+ "index\t: 7\n"
+ "args\t: ro single\n"
+ "boot\t: (hd0,0)\n"
+ "initrd\t: /boot/initrd.img-2.6.15-23-686\n"
+ "kernel\t: /boot/vmlinuz-2.6.15-23-686\n"
+ "root\t: UUID=07D7-0714\n"
+ "savedefault\t: \n"
+ "title\t: Distro, kernel 2.6.15-23-686\n"
+ )
+ # set up the recording
+ self.expect_run_boottool("--info=all", RESULT)
+ # run the test
+ info = self.loader.get_all_info()
+ self.god.check_playback()
+ expected_info = [{"index": "5", "args": "ro single",
+ "boot": "(hd0,0)",
+ "initrd": "/boot/initrd.img-2.6.15-23-386",
+ "kernel": "/boot/vmlinuz-2.6.15-23-386",
+ "root": "UUID=07D7-0714", "savedefault": "",
+ "title": "Distro, kernel 2.6.15-23-386"},
+ {"index": "7", "args": "ro single",
+ "boot": "(hd0,0)",
+ "initrd": "/boot/initrd.img-2.6.15-23-686",
+ "kernel": "/boot/vmlinuz-2.6.15-23-686",
+ "root": "UUID=07D7-0714", "savedefault": "",
+ "title": "Distro, kernel 2.6.15-23-686"}]
+ self.assertEquals(expected_info, info)
+
+
+ def test_set_default(self):
+ # set up the recording
+ self.loader._run_boottool.expect_call("--set-default=41")
+ # run the test
+ self.loader.set_default(41)
+ self.god.check_playback()
+
+
+ def test_add_args(self):
+ # set up the recording
+ self.loader._run_boottool.expect_call(
+ "--update-kernel=10 --args=\"some kernel args\"")
+ # run the test
+ self.loader.add_args(10, "some kernel args")
+ self.god.check_playback()
+
+
+ def test_remove_args(self):
+ # set up the recording
+ self.loader._run_boottool.expect_call(
+ "--update-kernel=12 --remove-args=\"some kernel args\"")
+ # run the test
+ self.loader.remove_args(12, "some kernel args")
+ self.god.check_playback()
+
+
+ def test_add_kernel_basic(self):
+ self.loader.get_titles = self.god.create_mock_function(
+ "get_titles")
+ # set up the recording
+ self.loader.get_titles.expect_call().and_return(["notmylabel"])
+ self.loader._run_boottool.expect_call(
+ "--add-kernel \"/unittest/kernels/vmlinuz\" "
+ "--title \"mylabel\" --make-default")
+ # run the test
+ self.loader.add_kernel("/unittest/kernels/vmlinuz",
+ "mylabel")
+ self.god.check_playback()
+
+
+ def test_add_kernel_adds_root(self):
+ self.loader.get_titles = self.god.create_mock_function(
+ "get_titles")
+ # set up the recording
+ self.loader.get_titles.expect_call().and_return(["notmylabel"])
+ self.loader._run_boottool.expect_call(
+ "--add-kernel \"/unittest/kernels/vmlinuz\" "
+ "--title \"mylabel\" --root \"/unittest/root\" "
+ "--make-default")
+ # run the test
+ self.loader.add_kernel("/unittest/kernels/vmlinuz",
+ "mylabel", root="/unittest/root")
+ self.god.check_playback()
+
+
+ def test_add_kernel_adds_args(self):
+ self.loader.get_titles = self.god.create_mock_function(
+ "get_titles")
+ # set up the recording
+ self.loader.get_titles.expect_call().and_return(["notmylabel"])
+ self.loader._run_boottool.expect_call(
+ "--add-kernel \"/unittest/kernels/vmlinuz\" "
+ "--title \"mylabel\" --args \"my kernel args\" "
+ "--make-default")
+ # run the test
+ self.loader.add_kernel("/unittest/kernels/vmlinuz",
+ "mylabel", args="my kernel args")
+ self.god.check_playback()
+
+
+ def test_add_kernel_adds_initrd(self):
+ self.loader.get_titles = self.god.create_mock_function(
+ "get_titles")
+ # set up the recording
+ self.loader.get_titles.expect_call().and_return(["notmylabel"])
+ self.loader._run_boottool.expect_call(
+ "--add-kernel \"/unittest/kernels/vmlinuz\" "
+ "--title \"mylabel\" --initrd \"/unittest/initrd\" "
+ "--make-default")
+ # run the test
+ self.loader.add_kernel("/unittest/kernels/vmlinuz",
+ "mylabel", initrd="/unittest/initrd")
+ self.god.check_playback()
+
+
+ def test_add_kernel_disables_make_default(self):
+ self.loader.get_titles = self.god.create_mock_function(
+ "get_titles")
+ # set up the recording
+ self.loader.get_titles.expect_call().and_return(["notmylabel"])
+ self.loader._run_boottool.expect_call(
+ "--add-kernel \"/unittest/kernels/vmlinuz\" "
+ "--title \"mylabel\"")
+ # run the test
+ self.loader.add_kernel("/unittest/kernels/vmlinuz",
+ "mylabel", default=False)
+ self.god.check_playback()
+
+
+ def test_add_kernel_removes_old(self):
+ self.loader.get_titles = self.god.create_mock_function(
+ "get_titles")
+ # set up the recording
+ self.loader.get_titles.expect_call().and_return(["mylabel"])
+ self.loader._run_boottool.expect_call(
+ "--remove-kernel \"mylabel\"")
+ self.loader._run_boottool.expect_call(
+ "--add-kernel \"/unittest/kernels/vmlinuz\" "
+ "--title \"mylabel\" --make-default")
+ # run the test
+ self.loader.add_kernel("/unittest/kernels/vmlinuz",
+ "mylabel")
+ self.god.check_playback()
+
+
+ def test_remove_kernel(self):
+ # set up the recording
+ self.loader._run_boottool.expect_call("--remove-kernel=14")
+ # run the test
+ self.loader.remove_kernel(14)
+ self.god.check_playback()
+
+
+ def test_boot_once(self):
+ # set up the recording
+ self.loader._run_boottool.expect_call(
+ "--boot-once --title=autotest")
+ # run the test
+ self.loader.boot_once("autotest")
+ self.god.check_playback()
if __name__ == "__main__":
- unittest.main()
+ unittest.main()
diff --git a/server/hosts/guest.py b/server/hosts/guest.py
index 0d8270c7..8fe6bc2b 100644
--- a/server/hosts/guest.py
+++ b/server/hosts/guest.py
@@ -8,7 +8,7 @@ This module defines the Guest class in the Host hierarchy.
Implementation details:
You should import the "hosts" package instead of importing each type of host.
- Guest: a virtual machine on which you can run programs
+ Guest: a virtual machine on which you can run programs
"""
__author__ = """
@@ -22,49 +22,49 @@ import ssh_host
class Guest(ssh_host.SSHHost):
- """
- This class represents a virtual machine on which you can run
- programs.
-
- It is not the machine autoserv is running on.
-
- Implementation details:
- This is an abstract class, leaf subclasses must implement the methods
- listed here and in parent classes which have no implementation. They
- may reimplement methods which already have an implementation. You
- must not instantiate this class but should instantiate one of those
- leaf subclasses.
- """
-
- controlling_hypervisor = None
-
-
- def __init__(self, controlling_hypervisor):
- """
- Construct a Guest object
-
- Args:
- controlling_hypervisor: Hypervisor object that is
- responsible for the creation and management of
- this guest
- """
- hostname= controlling_hypervisor.new_guest()
- super(Guest, self).__init__(hostname)
- self.controlling_hypervisor= controlling_hypervisor
-
-
- def __del__(self):
- """
- Destroy a Guest object
- """
- self.controlling_hypervisor.delete_guest(self.hostname)
-
-
- def hardreset(self, timeout=600, wait=True):
- """
- Perform a "hardreset" of the guest.
-
- It is restarted through the hypervisor. That will restart it
- even if the guest otherwise innaccessible through ssh.
- """
- return self.controlling_hypervisor.reset_guest(self.hostname)
+ """
+ This class represents a virtual machine on which you can run
+ programs.
+
+ It is not the machine autoserv is running on.
+
+ Implementation details:
+ This is an abstract class, leaf subclasses must implement the methods
+ listed here and in parent classes which have no implementation. They
+ may reimplement methods which already have an implementation. You
+ must not instantiate this class but should instantiate one of those
+ leaf subclasses.
+ """
+
+ controlling_hypervisor = None
+
+
+ def __init__(self, controlling_hypervisor):
+ """
+ Construct a Guest object
+
+ Args:
+ controlling_hypervisor: Hypervisor object that is
+ responsible for the creation and management of
+ this guest
+ """
+ hostname= controlling_hypervisor.new_guest()
+ super(Guest, self).__init__(hostname)
+ self.controlling_hypervisor= controlling_hypervisor
+
+
+ def __del__(self):
+ """
+ Destroy a Guest object
+ """
+ self.controlling_hypervisor.delete_guest(self.hostname)
+
+
+ def hardreset(self, timeout=600, wait=True):
+ """
+ Perform a "hardreset" of the guest.
+
+ It is restarted through the hypervisor. That will restart it
+ even if the guest otherwise innaccessible through ssh.
+ """
+ return self.controlling_hypervisor.reset_guest(self.hostname)
diff --git a/server/hosts/kvm_guest.py b/server/hosts/kvm_guest.py
index a676cbae..ae0acc86 100644
--- a/server/hosts/kvm_guest.py
+++ b/server/hosts/kvm_guest.py
@@ -8,7 +8,7 @@ This module defines the Host class.
Implementation details:
You should import the "hosts" package instead of importing each type of host.
- KVMGuest: a KVM virtual machine on which you can run programs
+ KVMGuest: a KVM virtual machine on which you can run programs
"""
__author__ = """
@@ -22,26 +22,26 @@ import guest
class KVMGuest(guest.Guest):
- """This class represents a KVM virtual machine on which you can run
- programs.
-
- Implementation details:
- This is a leaf class in an abstract class hierarchy, it must
- implement the unimplemented methods in parent classes.
- """
-
- def __init__(self, controlling_hypervisor, qemu_options):
- """
- Construct a KVMGuest object
-
- Args:
- controlling_hypervisor: hypervisor object that is
- responsible for the creation and management of
- this guest
- qemu_options: options to pass to qemu, these should be
- appropriately shell escaped, if need be.
- """
- hostname= controlling_hypervisor.new_guest(qemu_options)
- # bypass Guest's __init__
- super(guest.Guest, self).__init__(hostname)
- self.controlling_hypervisor= controlling_hypervisor
+ """This class represents a KVM virtual machine on which you can run
+ programs.
+
+ Implementation details:
+ This is a leaf class in an abstract class hierarchy, it must
+ implement the unimplemented methods in parent classes.
+ """
+
+ def __init__(self, controlling_hypervisor, qemu_options):
+ """
+ Construct a KVMGuest object
+
+ Args:
+ controlling_hypervisor: hypervisor object that is
+ responsible for the creation and management of
+ this guest
+ qemu_options: options to pass to qemu, these should be
+ appropriately shell escaped, if need be.
+ """
+ hostname= controlling_hypervisor.new_guest(qemu_options)
+ # bypass Guest's __init__
+ super(guest.Guest, self).__init__(hostname)
+ self.controlling_hypervisor= controlling_hypervisor
diff --git a/server/hosts/remote.py b/server/hosts/remote.py
index 9f5e9550..84bb4001 100644
--- a/server/hosts/remote.py
+++ b/server/hosts/remote.py
@@ -1,32 +1,32 @@
"""This class defines the Remote host class, mixing in the SiteHost class
if it is available."""
-# site_host.py may be non-existant or empty, make sure that an appropriate
+# site_host.py may be non-existant or empty, make sure that an appropriate
# SiteHost class is created nevertheless
try:
- from site_host import SiteHost
+ from site_host import SiteHost
except ImportError:
- import base_classes
- class SiteHost(base_classes.Host):
- def __init__(self):
- super(SiteHost, self).__init__()
+ import base_classes
+ class SiteHost(base_classes.Host):
+ def __init__(self):
+ super(SiteHost, self).__init__()
class RemoteHost(SiteHost):
- """This class represents a remote machine on which you can run
- programs.
+ """This class represents a remote machine on which you can run
+ programs.
- It may be accessed through a network, a serial line, ...
- It is not the machine autoserv is running on.
+ It may be accessed through a network, a serial line, ...
+ It is not the machine autoserv is running on.
- Implementation details:
- This is an abstract class, leaf subclasses must implement the methods
- listed here and in parent classes which have no implementation. They
- may reimplement methods which already have an implementation. You
- must not instantiate this class but should instantiate one of those
- leaf subclasses."""
+ Implementation details:
+ This is an abstract class, leaf subclasses must implement the methods
+ listed here and in parent classes which have no implementation. They
+ may reimplement methods which already have an implementation. You
+ must not instantiate this class but should instantiate one of those
+ leaf subclasses."""
- hostname= None
+ hostname= None
- def __init__(self):
- super(RemoteHost, self).__init__()
+ def __init__(self):
+ super(RemoteHost, self).__init__()
diff --git a/server/hosts/site_host.py b/server/hosts/site_host.py
index 4be6a4d5..c0b6693b 100644
--- a/server/hosts/site_host.py
+++ b/server/hosts/site_host.py
@@ -11,7 +11,7 @@ base_classes.py contains logic to provision for this.
Implementation details:
You should import the "hosts" package instead of importing each type of host.
- SiteHost: Host containing site-specific customizations.
+ SiteHost: Host containing site-specific customizations.
"""
__author__ = """
@@ -24,14 +24,14 @@ stutsman@google.com (Ryan Stutsman)
import base_classes, utils
class SiteHost(base_classes.Host):
- """
- Custom host to containing site-specific methods or attributes.
- """
-
- def __init__(self):
- super(SiteHost, self).__init__()
- self.serverdir = utils.get_server_dir()
-
-
- def setup(self):
- return
+ """
+ Custom host to containing site-specific methods or attributes.
+ """
+
+ def __init__(self):
+ super(SiteHost, self).__init__()
+ self.serverdir = utils.get_server_dir()
+
+
+ def setup(self):
+ return
diff --git a/server/hosts/ssh_host.py b/server/hosts/ssh_host.py
index 378d048e..c4ce4abd 100644
--- a/server/hosts/ssh_host.py
+++ b/server/hosts/ssh_host.py
@@ -8,7 +8,7 @@ This module defines the SSHHost class.
Implementation details:
You should import the "hosts" package instead of importing each type of host.
- SSHHost: a remote machine with a ssh access
+ SSHHost: a remote machine with a ssh access
"""
__author__ = """
@@ -26,869 +26,869 @@ import remote, bootloader
class PermissionDeniedError(error.AutoservRunError):
- pass
+ pass
class SSHHost(remote.RemoteHost):
- """
- This class represents a remote machine controlled through an ssh
- session on which you can run programs.
-
- It is not the machine autoserv is running on. The machine must be
- configured for password-less login, for example through public key
- authentication.
-
- It includes support for controlling the machine through a serial
- console on which you can run programs. If such a serial console is
- set up on the machine then capabilities such as hard reset and
- boot strap monitoring are available. If the machine does not have a
- serial console available then ordinary SSH-based commands will
- still be available, but attempts to use extensions such as
- console logging or hard reset will fail silently.
-
- Implementation details:
- This is a leaf class in an abstract class hierarchy, it must
- implement the unimplemented methods in parent classes.
- """
-
- DEFAULT_REBOOT_TIMEOUT = 1800
- job = None
-
- def __init__(self, hostname, user="root", port=22, initialize=True,
- conmux_log="console.log",
- conmux_server=None, conmux_attach=None,
- netconsole_log=None, netconsole_port=6666, autodir=None,
- password=''):
- """
- Construct a SSHHost object
-
- Args:
- hostname: network hostname or address of remote machine
- user: user to log in as on the remote machine
- port: port the ssh daemon is listening on on the remote
- machine
- """
- self.hostname= hostname
- self.user= user
- self.port= port
- self.tmp_dirs= []
- self.initialize = initialize
- self.autodir = autodir
- self.password = password
-
- super(SSHHost, self).__init__()
-
- self.conmux_server = conmux_server
- if conmux_attach:
- self.conmux_attach = conmux_attach
- else:
- self.conmux_attach = os.path.abspath(os.path.join(
- self.serverdir, '..',
- 'conmux', 'conmux-attach'))
- self.logger_popen = None
- self.warning_stream = None
- self.__start_console_log(conmux_log)
-
- self.bootloader = bootloader.Bootloader(self)
-
- self.__netconsole_param = ""
- self.netlogger_popen = None
- if netconsole_log:
- self.__init_netconsole_params(netconsole_port)
- self.__start_netconsole_log(netconsole_log, netconsole_port)
- self.__load_netconsole_module()
-
-
- @staticmethod
- def __kill(popen):
- return_code = popen.poll()
- if return_code is None:
- try:
- os.kill(popen.pid, signal.SIGTERM)
- except OSError:
- pass
-
-
- def __del__(self):
- """
- Destroy a SSHHost object
- """
- for dir in self.tmp_dirs:
- try:
- self.run('rm -rf "%s"' % (utils.sh_escape(dir)))
- except error.AutoservRunError:
- pass
- # kill the console logger
- if getattr(self, 'logger_popen', None):
- self.__kill(self.logger_popen)
- if self.job:
- self.job.warning_loggers.discard(
- self.warning_stream)
- self.warning_stream.close()
- # kill the netconsole logger
- if getattr(self, 'netlogger_popen', None):
- self.__unload_netconsole_module()
- self.__kill(self.netlogger_popen)
-
-
- def __init_netconsole_params(self, port):
- """
- Connect to the remote machine and determine the values to use for the
- required netconsole parameters.
- """
- # PROBLEM: on machines with multiple IPs this may not make any sense
- # It also doesn't work with IPv6
- remote_ip = socket.gethostbyname(self.hostname)
- local_ip = socket.gethostbyname(socket.gethostname())
- # Get the gateway of the remote machine
- try:
- traceroute = self.run('traceroute -n %s' % local_ip)
- except error.AutoservRunError:
- return
- first_node = traceroute.stdout.split("\n")[0]
- match = re.search(r'\s+((\d+\.){3}\d+)\s+', first_node)
- if match:
- router_ip = match.group(1)
- else:
- return
- # Look up the MAC address of the gateway
- try:
- self.run('ping -c 1 %s' % router_ip)
- arp = self.run('arp -n -a %s' % router_ip)
- except error.AutoservRunError:
- return
- match = re.search(r'\s+(([0-9A-F]{2}:){5}[0-9A-F]{2})\s+', arp.stdout)
- if match:
- gateway_mac = match.group(1)
- else:
- return
- self.__netconsole_param = 'netconsole=@%s/,%s@%s/%s' % (remote_ip,
- port,
- local_ip,
- gateway_mac)
-
-
- def __start_netconsole_log(self, logfilename, port):
- """
- Log the output of netconsole to a specified file
- """
- if logfilename == None:
- return
- cmd = ['nc', '-u', '-l', '-p', str(port)]
- logfile = open(logfilename, 'a', 0)
- self.netlogger_popen = subprocess.Popen(cmd, stdout=logfile)
-
-
- def __load_netconsole_module(self):
- """
- Make a best effort to load the netconsole module.
-
- Note that loading the module can fail even when the remote machine is
- working correctly if netconsole is already compiled into the kernel
- and started.
- """
- if not self.__netconsole_param:
- return
- try:
- self.run('modprobe netconsole %s' % self.__netconsole_param)
- except error.AutoservRunError:
- # if it fails there isn't much we can do, just keep going
- pass
-
-
- def __unload_netconsole_module(self):
- try:
- self.run('modprobe -r netconsole')
- except error.AutoservRunError:
- pass
-
-
- def wait_for_restart(self, timeout=DEFAULT_REBOOT_TIMEOUT):
- if not self.wait_down(300): # Make sure he's dead, Jim
- self.__record("ABORT", None, "reboot.verify", "shutdown failed")
- raise error.AutoservRebootError(
- "Host did not shut down")
- self.wait_up(timeout)
- time.sleep(2) # this is needed for complete reliability
- if self.wait_up(timeout):
- self.__record("GOOD", None, "reboot.verify")
- else:
- self.__record("ABORT", None, "reboot.verify", "Host did not return from reboot")
- raise error.AutoservRebootError(
- "Host did not return from reboot")
- print "Reboot complete"
-
-
- def hardreset(self, timeout=DEFAULT_REBOOT_TIMEOUT, wait=True,
- conmux_command='hardreset'):
- """
- Reach out and slap the box in the power switch.
- Args:
- conmux_command: The command to run via the conmux interface
- timeout: timelimit in seconds before the machine is considered unreachable
- wait: Whether or not to wait for the machine to reboot
-
- """
- conmux_command = r"'~$%s'" % conmux_command
- if not self.__console_run(conmux_command):
- self.__record("ABORT", None, "reboot.start", "hard reset unavailable")
- raise error.AutoservUnsupportedError(
- 'Hard reset unavailable')
-
- if wait:
- self.wait_for_restart(timeout)
- self.__record("GOOD", None, "reboot.start", "hard reset")
-
-
- def __conmux_hostname(self):
- if self.conmux_server:
- return '%s/%s' % (self.conmux_server, self.hostname)
- else:
- return self.hostname
-
-
- def __start_console_log(self, logfilename):
- """
- Log the output of the console session to a specified file
- """
- if logfilename == None:
- return
- if not self.conmux_attach or not os.path.exists(self.conmux_attach):
- return
-
- r, w = os.pipe()
- script_path = os.path.join(self.serverdir,
- 'warning_monitor.py')
- cmd = [self.conmux_attach, self.__conmux_hostname(),
- '%s %s %s %d' % (sys.executable, script_path,
- logfilename, w)]
- dev_null = open(os.devnull, 'w')
-
- self.warning_stream = os.fdopen(r, 'r', 0)
- if self.job:
- self.job.warning_loggers.add(self.warning_stream)
- self.logger_popen = subprocess.Popen(cmd, stderr=dev_null)
- os.close(w)
-
-
- def __console_run(self, cmd):
- """
- Send a command to the conmux session
- """
- if not self.conmux_attach or not os.path.exists(self.conmux_attach):
- return False
- cmd = '%s %s echo %s 2> /dev/null' % (self.conmux_attach,
- self.__conmux_hostname(),
- cmd)
- result = utils.system(cmd, ignore_status=True)
- return result == 0
-
-
- def __run_reboot_group(self, reboot_func):
- if self.job:
- self.job.run_reboot(reboot_func, self.get_kernel_ver)
- else:
- reboot_func()
-
-
- def __record(self, status_code, subdir, operation, status = ''):
- if self.job:
- self.job.record(status_code, subdir, operation, status)
- else:
- if not subdir:
- subdir = "----"
- msg = "%s\t%s\t%s\t%s" % (status_code, subdir, operation, status)
- sys.stderr.write(msg + "\n")
-
-
- def ssh_base_command(self, connect_timeout=30):
- SSH_BASE_COMMAND = '/usr/bin/ssh -a -x -o ' + \
- 'BatchMode=yes -o ConnectTimeout=%d ' + \
- '-o ServerAliveInterval=300'
- assert isinstance(connect_timeout, (int, long))
- assert connect_timeout > 0 # can't disable the timeout
- return SSH_BASE_COMMAND % connect_timeout
-
-
- def ssh_command(self, connect_timeout=30, options=''):
- """Construct an ssh command with proper args for this host."""
- ssh = self.ssh_base_command(connect_timeout)
- return r'%s %s -l %s -p %d %s' % (ssh,
- options,
- self.user,
- self.port,
- self.hostname)
-
-
- def _run(self, command, timeout, ignore_status, stdout, stderr,
- connect_timeout, env, options):
- """Helper function for run()."""
-
- ssh_cmd = self.ssh_command(connect_timeout, options)
- echo_cmd = 'echo Connected. >&2'
- full_cmd = '%s "%s;%s %s"' % (ssh_cmd, echo_cmd, env,
- utils.sh_escape(command))
- result = utils.run(full_cmd, timeout, True, stdout, stderr)
-
- # The error messages will show up in band (indistinguishable
- # from stuff sent through the SSH connection), so we have the
- # remote computer echo the message "Connected." before running
- # any command. Since the following 2 errors have to do with
- # connecting, it's safe to do these checks.
- if result.exit_status == 255:
- if re.search(r'^ssh: connect to host .* port .*: '
- r'Connection timed out\r$', result.stderr):
- raise error.AutoservSSHTimeout("ssh timed out",
- result)
- if result.stderr == "Permission denied.\r\n":
- msg = "ssh permission denied"
- raise PermissionDeniedError(msg, result)
-
- if not ignore_status and result.exit_status > 0:
- raise error.AutoservRunError("command execution error",
- result)
-
- return result
-
-
- def run(self, command, timeout=3600, ignore_status=False,
- stdout_tee=None, stderr_tee=None, connect_timeout=30):
- """
- Run a command on the remote host.
-
- Args:
- command: the command line string
- timeout: time limit in seconds before attempting to
- kill the running process. The run() function
- will take a few seconds longer than 'timeout'
- to complete if it has to kill the process.
- ignore_status: do not raise an exception, no matter
- what the exit code of the command is.
-
- Returns:
- a hosts.base_classes.CmdResult object
-
- Raises:
- AutoservRunError: the exit code of the command
- execution was not 0
- AutoservSSHTimeout: ssh connection has timed out
- """
- stdout = stdout_tee or sys.stdout
- stderr = stderr_tee or sys.stdout
- print "ssh: %s" % command
- env = " ".join("=".join(pair) for pair in self.env.iteritems())
- try:
- try:
- return self._run(command, timeout,
- ignore_status, stdout,
- stderr, connect_timeout,
- env, '')
- except PermissionDeniedError:
- print("Permission denied to ssh; re-running"
- "with increased logging:")
- return self._run(command, timeout,
- ignore_status, stdout,
- stderr, connect_timeout,
- env, '-v -v -v')
- except error.CmdError, cmderr:
- # We get a CmdError here only if there is timeout of
- # that command. Catch that and stuff it into
- # AutoservRunError and raise it.
- raise error.AutoservRunError(cmderr.args[0],
- cmderr.args[1])
-
-
- def run_short(self, command, **kwargs):
- """
- Calls the run() command with a short default timeout.
-
- Args:
- Takes the same arguments as does run(),
- with the exception of the timeout argument which
- here is fixed at 60 seconds.
- It returns the result of run.
- """
- return self.run(command, timeout=60, **kwargs)
-
-
- def run_grep(self, command, timeout=30, ignore_status=False,
- stdout_ok_regexp=None, stdout_err_regexp=None,
- stderr_ok_regexp=None, stderr_err_regexp=None,
- connect_timeout=30):
- """
- Run a command on the remote host and look for regexp
- in stdout or stderr to determine if the command was
- successul or not.
-
- Args:
- command: the command line string
- timeout: time limit in seconds before attempting to
- kill the running process. The run() function
- will take a few seconds longer than 'timeout'
- to complete if it has to kill the process.
- ignore_status: do not raise an exception, no matter
- what the exit code of the command is.
- stdout_ok_regexp: regexp that should be in stdout
- if the command was successul.
- stdout_err_regexp: regexp that should be in stdout
- if the command failed.
- stderr_ok_regexp: regexp that should be in stderr
- if the command was successul.
- stderr_err_regexp: regexp that should be in stderr
- if the command failed.
-
- Returns:
- if the command was successul, raises an exception
- otherwise.
-
- Raises:
- AutoservRunError:
- - the exit code of the command execution was not 0.
- - If stderr_err_regexp is found in stderr,
- - If stdout_err_regexp is found in stdout,
- - If stderr_ok_regexp is not found in stderr.
- - If stdout_ok_regexp is not found in stdout,
- """
-
- # We ignore the status, because we will handle it at the end.
- result = self.run(command, timeout, ignore_status=True,
- connect_timeout=connect_timeout)
-
- # Look for the patterns, in order
- for (regexp, stream) in ((stderr_err_regexp, result.stderr),
- (stdout_err_regexp, result.stdout)):
- if regexp and stream:
- err_re = re.compile (regexp)
- if err_re.search(stream):
- raise error.AutoservRunError(
- '%s failed, found error pattern: '
- '"%s"' % (command, regexp), result)
-
- for (regexp, stream) in ((stderr_ok_regexp, result.stderr),
- (stdout_ok_regexp, result.stdout)):
- if regexp and stream:
- ok_re = re.compile (regexp)
- if ok_re.search(stream):
- if ok_re.search(stream):
- return
-
- if not ignore_status and result.exit_status > 0:
- raise error.AutoservRunError("command execution error",
- result)
-
-
- def reboot(self, timeout=DEFAULT_REBOOT_TIMEOUT, label=None,
- kernel_args=None, wait=True):
- """
- Reboot the remote host.
-
- Args:
- timeout
- """
- self.reboot_setup()
-
- # forcibly include the "netconsole" kernel arg
- if self.__netconsole_param:
- if kernel_args is None:
- kernel_args = self.__netconsole_param
- else:
- kernel_args += " " + self.__netconsole_param
- # unload the (possibly loaded) module to avoid shutdown issues
- self.__unload_netconsole_module()
- if label or kernel_args:
- self.bootloader.install_boottool()
- if label:
- self.bootloader.set_default(label)
- if kernel_args:
- if not label:
- default = int(self.bootloader.get_default())
- label = self.bootloader.get_titles()[default]
- self.bootloader.add_args(label, kernel_args)
-
- # define a function for the reboot and run it in a group
- print "Reboot: initiating reboot"
- def reboot():
- self.__record("GOOD", None, "reboot.start")
- try:
- self.run('(sleep 5; reboot) '
- '</dev/null >/dev/null 2>&1 &')
- except error.AutoservRunError:
- self.__record("ABORT", None, "reboot.start",
- "reboot command failed")
- raise
- if wait:
- self.wait_for_restart(timeout)
- self.reboot_followup()
- self.__run_reboot_group(reboot)
-
-
- def reboot_followup(self):
- super(SSHHost, self).reboot_followup()
- self.__load_netconsole_module() # if the builtin fails
-
-
- def __copy_files(self, sources, dest):
- """
- Copy files from one machine to another.
-
- This is for internal use by other methods that intend to move
- files between machines. It expects a list of source files and
- a destination (a filename if the source is a single file, a
- destination otherwise). The names must already be
- pre-processed into the appropriate rsync/scp friendly
- format (%s@%s:%s).
- """
- # wait until there are only a small number of copies running
- # before starting this one
- get_config = global_config.global_config.get_config_value
- max_simultaneous = get_config("HOSTS",
- "max_simultaneous_file_copies",
- type=int)
- while True:
- copy_count = 0
- procs = utils.system_output('ps -ef')
- for line in procs.splitlines():
- if 'rsync ' in line or 'scp ' in line:
- copy_count += 1
- if copy_count < max_simultaneous:
- break
- time.sleep(60)
-
- print '__copy_files: copying %s to %s' % (sources, dest)
- try:
- utils.run('rsync --rsh="%s" -az %s %s' % (
- self.ssh_base_command(), ' '.join(sources), dest))
- except Exception:
- try:
- utils.run('scp -rpq -P %d %s "%s"' % (
- self.port, ' '.join(sources), dest))
- except error.CmdError, cmderr:
- raise error.AutoservRunError(cmderr.args[0],
- cmderr.args[1])
-
- def get_file(self, source, dest):
- """
- Copy files from the remote host to a local path.
-
- Directories will be copied recursively.
- If a source component is a directory with a trailing slash,
- the content of the directory will be copied, otherwise, the
- directory itself and its content will be copied. This
- behavior is similar to that of the program 'rsync'.
-
- Args:
- source: either
- 1) a single file or directory, as a string
- 2) a list of one or more (possibly mixed)
- files or directories
- dest: a file or a directory (if source contains a
- directory or more than one element, you must
- supply a directory dest)
-
- Raises:
- AutoservRunError: the scp command failed
- """
- if isinstance(source, types.StringTypes):
- source= [source]
-
- processed_source= []
- for entry in source:
- if entry.endswith('/'):
- format_string= '%s@%s:"%s*"'
- else:
- format_string= '%s@%s:"%s"'
- entry= format_string % (self.user, self.hostname,
- utils.scp_remote_escape(entry))
- processed_source.append(entry)
-
- processed_dest= os.path.abspath(dest)
- if os.path.isdir(dest):
- processed_dest= "%s/" % (utils.sh_escape(processed_dest),)
- else:
- processed_dest= utils.sh_escape(processed_dest)
-
- self.__copy_files(processed_source, processed_dest)
-
-
- def send_file(self, source, dest):
- """
- Copy files from a local path to the remote host.
-
- Directories will be copied recursively.
- If a source component is a directory with a trailing slash,
- the content of the directory will be copied, otherwise, the
- directory itself and its content will be copied. This
- behavior is similar to that of the program 'rsync'.
-
- Args:
- source: either
- 1) a single file or directory, as a string
- 2) a list of one or more (possibly mixed)
- files or directories
- dest: a file or a directory (if source contains a
- directory or more than one element, you must
- supply a directory dest)
-
- Raises:
- AutoservRunError: the scp command failed
- """
- if isinstance(source, types.StringTypes):
- source= [source]
-
- processed_source= []
- for entry in source:
- if entry.endswith('/'):
- format_string= '"%s/"*'
- else:
- format_string= '"%s"'
- entry= format_string % (utils.sh_escape(os.path.abspath(entry)),)
- processed_source.append(entry)
-
- remote_dest = '%s@%s:"%s"' % (
- self.user, self.hostname,
- utils.scp_remote_escape(dest))
-
- self.__copy_files(processed_source, remote_dest)
- self.run('find "%s" -type d | xargs -i -r chmod o+rx "{}"' % dest)
- self.run('find "%s" -type f | xargs -i -r chmod o+r "{}"' % dest)
-
- def get_tmp_dir(self):
- """
- Return the pathname of a directory on the host suitable
- for temporary file storage.
-
- The directory and its content will be deleted automatically
- on the destruction of the Host object that was used to obtain
- it.
- """
- dir_name= self.run("mktemp -d /tmp/autoserv-XXXXXX").stdout.rstrip(" \n")
- self.tmp_dirs.append(dir_name)
- return dir_name
-
-
- def is_up(self):
- """
- Check if the remote host is up.
-
- Returns:
- True if the remote host is up, False otherwise
- """
- try:
- self.ssh_ping()
- except:
- return False
- return True
-
-
- def _is_wait_up_process_up(self):
- """
- Checks if any SSHHOST waitup processes are running yet on the
- remote host.
-
- Returns True if any the waitup processes are running, False
- otherwise.
- """
- processes = self.get_wait_up_processes()
- if len(processes) == 0:
- return True # wait up processes aren't being used
- for procname in processes:
- exit_status = self.run("ps -e | grep '%s'" % procname,
- ignore_status=True).exit_status
- if exit_status == 0:
- return True
- return False
-
-
- def wait_up(self, timeout=None):
- """
- Wait until the remote host is up or the timeout expires.
-
- In fact, it will wait until an ssh connection to the remote
- host can be established, and getty is running.
-
- Args:
- timeout: time limit in seconds before returning even
- if the host is not up.
-
- Returns:
- True if the host was found to be up, False otherwise
- """
- if timeout:
- end_time= time.time() + timeout
-
- while not timeout or time.time() < end_time:
- try:
- self.ssh_ping()
- except (error.AutoservRunError,
- error.AutoservSSHTimeout):
- pass
- else:
- try:
- if self._is_wait_up_process_up():
- return True
- except (error.AutoservRunError,
- error.AutoservSSHTimeout):
- pass
- time.sleep(1)
-
- return False
-
-
- def wait_down(self, timeout=None):
- """
- Wait until the remote host is down or the timeout expires.
-
- In fact, it will wait until an ssh connection to the remote
- host fails.
-
- Args:
- timeout: time limit in seconds before returning even
- if the host is not up.
-
- Returns:
- True if the host was found to be down, False otherwise
- """
- if timeout:
- end_time= time.time() + timeout
-
- while not timeout or time.time() < end_time:
- try:
- self.ssh_ping()
- except:
- return True
- time.sleep(1)
-
- return False
-
-
- def ensure_up(self):
- """
- Ensure the host is up if it is not then do not proceed;
- this prevents cacading failures of tests
- """
- print 'Ensuring that %s is up before continuing' % self.hostname
- if hasattr(self, 'hardreset') and not self.wait_up(300):
- print "Performing a hardreset on %s" % self.hostname
- try:
- self.hardreset()
- except error.AutoservUnsupportedError:
- print "Hardreset is unsupported on %s" % self.hostname
- if not self.wait_up(60 * 30):
- # 30 minutes should be more than enough
- raise error.AutoservHostError
- print 'Host up, continuing'
-
-
- def get_num_cpu(self):
- """
- Get the number of CPUs in the host according to
- /proc/cpuinfo.
-
- Returns:
- The number of CPUs
- """
-
- proc_cpuinfo = self.run("cat /proc/cpuinfo",
- stdout_tee=open('/dev/null', 'w')).stdout
- cpus = 0
- for line in proc_cpuinfo.splitlines():
- if line.startswith('processor'):
- cpus += 1
- return cpus
-
-
- def check_uptime(self):
- """
- Check that uptime is available and monotonically increasing.
- """
- if not self.ping():
- raise error.AutoservHostError('Client is not pingable')
- result = self.run("/bin/cat /proc/uptime", 30)
- return result.stdout.strip().split()[0]
-
-
- def get_arch(self):
- """
- Get the hardware architecture of the remote machine
- """
- arch = self.run('/bin/uname -m').stdout.rstrip()
- if re.match(r'i\d86$', arch):
- arch = 'i386'
- return arch
-
-
- def get_kernel_ver(self):
- """
- Get the kernel version of the remote machine
- """
- return self.run('/bin/uname -r').stdout.rstrip()
-
-
- def get_cmdline(self):
- """
- Get the kernel command line of the remote machine
- """
- return self.run('cat /proc/cmdline').stdout.rstrip()
-
-
- def ping(self):
- """
- Ping the remote system, and return whether it's available
- """
- fpingcmd = "%s -q %s" % ('/usr/bin/fping', self.hostname)
- rc = utils.system(fpingcmd, ignore_status = 1)
- return (rc == 0)
-
-
- def ssh_ping(self, timeout = 60):
- try:
- self.run('true', timeout = timeout, connect_timeout = timeout)
- except error.AutoservSSHTimeout:
- msg = "ssh ping timed out. timeout = %s" % timeout
- raise error.AutoservSSHTimeout(msg)
- except error.AutoservRunError, exc:
- msg = "command true failed in ssh ping"
- raise error.AutoservRunError(msg, exc.args[1])
-
-
- def get_autodir(self):
- return self.autodir
-
-
- def ssh_setup_key(self):
- try:
- print 'Performing ssh key setup on %s:%d as %s' % \
- (self.hostname, self.port, self.user)
-
- host = pxssh.pxssh()
- host.login(self.hostname, self.user, self.password,
- port=self.port)
-
- try:
- public_key = utils.get_public_key()
-
- host.sendline('mkdir -p ~/.ssh')
- host.prompt()
- host.sendline('chmod 700 ~/.ssh')
- host.prompt()
- host.sendline("echo '%s' >> ~/.ssh/authorized_keys; " %
- (public_key))
- host.prompt()
- host.sendline('chmod 600 ~/.ssh/authorized_keys')
- host.prompt()
-
- print 'SSH key setup complete'
-
- finally:
- host.logout()
-
- except:
- pass
-
-
- def setup(self):
- if not self.password == '':
- try:
- self.ssh_ping()
- except error.AutoservRunError:
- self.ssh_setup_key()
+ """
+ This class represents a remote machine controlled through an ssh
+ session on which you can run programs.
+
+ It is not the machine autoserv is running on. The machine must be
+ configured for password-less login, for example through public key
+ authentication.
+
+ It includes support for controlling the machine through a serial
+ console on which you can run programs. If such a serial console is
+ set up on the machine then capabilities such as hard reset and
+ boot strap monitoring are available. If the machine does not have a
+ serial console available then ordinary SSH-based commands will
+ still be available, but attempts to use extensions such as
+ console logging or hard reset will fail silently.
+
+ Implementation details:
+ This is a leaf class in an abstract class hierarchy, it must
+ implement the unimplemented methods in parent classes.
+ """
+
+ DEFAULT_REBOOT_TIMEOUT = 1800
+ job = None
+
+ def __init__(self, hostname, user="root", port=22, initialize=True,
+ conmux_log="console.log",
+ conmux_server=None, conmux_attach=None,
+ netconsole_log=None, netconsole_port=6666, autodir=None,
+ password=''):
+ """
+ Construct a SSHHost object
+
+ Args:
+ hostname: network hostname or address of remote machine
+ user: user to log in as on the remote machine
+ port: port the ssh daemon is listening on on the remote
+ machine
+ """
+ self.hostname= hostname
+ self.user= user
+ self.port= port
+ self.tmp_dirs= []
+ self.initialize = initialize
+ self.autodir = autodir
+ self.password = password
+
+ super(SSHHost, self).__init__()
+
+ self.conmux_server = conmux_server
+ if conmux_attach:
+ self.conmux_attach = conmux_attach
+ else:
+ self.conmux_attach = os.path.abspath(os.path.join(
+ self.serverdir, '..',
+ 'conmux', 'conmux-attach'))
+ self.logger_popen = None
+ self.warning_stream = None
+ self.__start_console_log(conmux_log)
+
+ self.bootloader = bootloader.Bootloader(self)
+
+ self.__netconsole_param = ""
+ self.netlogger_popen = None
+ if netconsole_log:
+ self.__init_netconsole_params(netconsole_port)
+ self.__start_netconsole_log(netconsole_log, netconsole_port)
+ self.__load_netconsole_module()
+
+
+ @staticmethod
+ def __kill(popen):
+ return_code = popen.poll()
+ if return_code is None:
+ try:
+ os.kill(popen.pid, signal.SIGTERM)
+ except OSError:
+ pass
+
+
+ def __del__(self):
+ """
+ Destroy a SSHHost object
+ """
+ for dir in self.tmp_dirs:
+ try:
+ self.run('rm -rf "%s"' % (utils.sh_escape(dir)))
+ except error.AutoservRunError:
+ pass
+ # kill the console logger
+ if getattr(self, 'logger_popen', None):
+ self.__kill(self.logger_popen)
+ if self.job:
+ self.job.warning_loggers.discard(
+ self.warning_stream)
+ self.warning_stream.close()
+ # kill the netconsole logger
+ if getattr(self, 'netlogger_popen', None):
+ self.__unload_netconsole_module()
+ self.__kill(self.netlogger_popen)
+
+
+ def __init_netconsole_params(self, port):
+ """
+ Connect to the remote machine and determine the values to use for the
+ required netconsole parameters.
+ """
+ # PROBLEM: on machines with multiple IPs this may not make any sense
+ # It also doesn't work with IPv6
+ remote_ip = socket.gethostbyname(self.hostname)
+ local_ip = socket.gethostbyname(socket.gethostname())
+ # Get the gateway of the remote machine
+ try:
+ traceroute = self.run('traceroute -n %s' % local_ip)
+ except error.AutoservRunError:
+ return
+ first_node = traceroute.stdout.split("\n")[0]
+ match = re.search(r'\s+((\d+\.){3}\d+)\s+', first_node)
+ if match:
+ router_ip = match.group(1)
+ else:
+ return
+ # Look up the MAC address of the gateway
+ try:
+ self.run('ping -c 1 %s' % router_ip)
+ arp = self.run('arp -n -a %s' % router_ip)
+ except error.AutoservRunError:
+ return
+ match = re.search(r'\s+(([0-9A-F]{2}:){5}[0-9A-F]{2})\s+', arp.stdout)
+ if match:
+ gateway_mac = match.group(1)
+ else:
+ return
+ self.__netconsole_param = 'netconsole=@%s/,%s@%s/%s' % (remote_ip,
+ port,
+ local_ip,
+ gateway_mac)
+
+
+ def __start_netconsole_log(self, logfilename, port):
+ """
+ Log the output of netconsole to a specified file
+ """
+ if logfilename == None:
+ return
+ cmd = ['nc', '-u', '-l', '-p', str(port)]
+ logfile = open(logfilename, 'a', 0)
+ self.netlogger_popen = subprocess.Popen(cmd, stdout=logfile)
+
+
+ def __load_netconsole_module(self):
+ """
+ Make a best effort to load the netconsole module.
+
+ Note that loading the module can fail even when the remote machine is
+ working correctly if netconsole is already compiled into the kernel
+ and started.
+ """
+ if not self.__netconsole_param:
+ return
+ try:
+ self.run('modprobe netconsole %s' % self.__netconsole_param)
+ except error.AutoservRunError:
+ # if it fails there isn't much we can do, just keep going
+ pass
+
+
+ def __unload_netconsole_module(self):
+ try:
+ self.run('modprobe -r netconsole')
+ except error.AutoservRunError:
+ pass
+
+
+ def wait_for_restart(self, timeout=DEFAULT_REBOOT_TIMEOUT):
+ if not self.wait_down(300): # Make sure he's dead, Jim
+ self.__record("ABORT", None, "reboot.verify", "shutdown failed")
+ raise error.AutoservRebootError(
+ "Host did not shut down")
+ self.wait_up(timeout)
+ time.sleep(2) # this is needed for complete reliability
+ if self.wait_up(timeout):
+ self.__record("GOOD", None, "reboot.verify")
+ else:
+ self.__record("ABORT", None, "reboot.verify", "Host did not return from reboot")
+ raise error.AutoservRebootError(
+ "Host did not return from reboot")
+ print "Reboot complete"
+
+
+ def hardreset(self, timeout=DEFAULT_REBOOT_TIMEOUT, wait=True,
+ conmux_command='hardreset'):
+ """
+ Reach out and slap the box in the power switch.
+ Args:
+ conmux_command: The command to run via the conmux interface
+ timeout: timelimit in seconds before the machine is considered unreachable
+ wait: Whether or not to wait for the machine to reboot
+
+ """
+ conmux_command = r"'~$%s'" % conmux_command
+ if not self.__console_run(conmux_command):
+ self.__record("ABORT", None, "reboot.start", "hard reset unavailable")
+ raise error.AutoservUnsupportedError(
+ 'Hard reset unavailable')
+
+ if wait:
+ self.wait_for_restart(timeout)
+ self.__record("GOOD", None, "reboot.start", "hard reset")
+
+
+ def __conmux_hostname(self):
+ if self.conmux_server:
+ return '%s/%s' % (self.conmux_server, self.hostname)
+ else:
+ return self.hostname
+
+
+ def __start_console_log(self, logfilename):
+ """
+ Log the output of the console session to a specified file
+ """
+ if logfilename == None:
+ return
+ if not self.conmux_attach or not os.path.exists(self.conmux_attach):
+ return
+
+ r, w = os.pipe()
+ script_path = os.path.join(self.serverdir,
+ 'warning_monitor.py')
+ cmd = [self.conmux_attach, self.__conmux_hostname(),
+ '%s %s %s %d' % (sys.executable, script_path,
+ logfilename, w)]
+ dev_null = open(os.devnull, 'w')
+
+ self.warning_stream = os.fdopen(r, 'r', 0)
+ if self.job:
+ self.job.warning_loggers.add(self.warning_stream)
+ self.logger_popen = subprocess.Popen(cmd, stderr=dev_null)
+ os.close(w)
+
+
+ def __console_run(self, cmd):
+ """
+ Send a command to the conmux session
+ """
+ if not self.conmux_attach or not os.path.exists(self.conmux_attach):
+ return False
+ cmd = '%s %s echo %s 2> /dev/null' % (self.conmux_attach,
+ self.__conmux_hostname(),
+ cmd)
+ result = utils.system(cmd, ignore_status=True)
+ return result == 0
+
+
+ def __run_reboot_group(self, reboot_func):
+ if self.job:
+ self.job.run_reboot(reboot_func, self.get_kernel_ver)
+ else:
+ reboot_func()
+
+
+ def __record(self, status_code, subdir, operation, status = ''):
+ if self.job:
+ self.job.record(status_code, subdir, operation, status)
+ else:
+ if not subdir:
+ subdir = "----"
+ msg = "%s\t%s\t%s\t%s" % (status_code, subdir, operation, status)
+ sys.stderr.write(msg + "\n")
+
+
+ def ssh_base_command(self, connect_timeout=30):
+ SSH_BASE_COMMAND = '/usr/bin/ssh -a -x -o ' + \
+ 'BatchMode=yes -o ConnectTimeout=%d ' + \
+ '-o ServerAliveInterval=300'
+ assert isinstance(connect_timeout, (int, long))
+ assert connect_timeout > 0 # can't disable the timeout
+ return SSH_BASE_COMMAND % connect_timeout
+
+
+ def ssh_command(self, connect_timeout=30, options=''):
+ """Construct an ssh command with proper args for this host."""
+ ssh = self.ssh_base_command(connect_timeout)
+ return r'%s %s -l %s -p %d %s' % (ssh,
+ options,
+ self.user,
+ self.port,
+ self.hostname)
+
+
+ def _run(self, command, timeout, ignore_status, stdout, stderr,
+ connect_timeout, env, options):
+ """Helper function for run()."""
+
+ ssh_cmd = self.ssh_command(connect_timeout, options)
+ echo_cmd = 'echo Connected. >&2'
+ full_cmd = '%s "%s;%s %s"' % (ssh_cmd, echo_cmd, env,
+ utils.sh_escape(command))
+ result = utils.run(full_cmd, timeout, True, stdout, stderr)
+
+ # The error messages will show up in band (indistinguishable
+ # from stuff sent through the SSH connection), so we have the
+ # remote computer echo the message "Connected." before running
+ # any command. Since the following 2 errors have to do with
+ # connecting, it's safe to do these checks.
+ if result.exit_status == 255:
+ if re.search(r'^ssh: connect to host .* port .*: '
+ r'Connection timed out\r$', result.stderr):
+ raise error.AutoservSSHTimeout("ssh timed out",
+ result)
+ if result.stderr == "Permission denied.\r\n":
+ msg = "ssh permission denied"
+ raise PermissionDeniedError(msg, result)
+
+ if not ignore_status and result.exit_status > 0:
+ raise error.AutoservRunError("command execution error",
+ result)
+
+ return result
+
+
+ def run(self, command, timeout=3600, ignore_status=False,
+ stdout_tee=None, stderr_tee=None, connect_timeout=30):
+ """
+ Run a command on the remote host.
+
+ Args:
+ command: the command line string
+ timeout: time limit in seconds before attempting to
+ kill the running process. The run() function
+ will take a few seconds longer than 'timeout'
+ to complete if it has to kill the process.
+ ignore_status: do not raise an exception, no matter
+ what the exit code of the command is.
+
+ Returns:
+ a hosts.base_classes.CmdResult object
+
+ Raises:
+ AutoservRunError: the exit code of the command
+ execution was not 0
+ AutoservSSHTimeout: ssh connection has timed out
+ """
+ stdout = stdout_tee or sys.stdout
+ stderr = stderr_tee or sys.stdout
+ print "ssh: %s" % command
+ env = " ".join("=".join(pair) for pair in self.env.iteritems())
+ try:
+ try:
+ return self._run(command, timeout,
+ ignore_status, stdout,
+ stderr, connect_timeout,
+ env, '')
+ except PermissionDeniedError:
+ print("Permission denied to ssh; re-running"
+ "with increased logging:")
+ return self._run(command, timeout,
+ ignore_status, stdout,
+ stderr, connect_timeout,
+ env, '-v -v -v')
+ except error.CmdError, cmderr:
+ # We get a CmdError here only if there is timeout of
+ # that command. Catch that and stuff it into
+ # AutoservRunError and raise it.
+ raise error.AutoservRunError(cmderr.args[0],
+ cmderr.args[1])
+
+
+ def run_short(self, command, **kwargs):
+ """
+ Calls the run() command with a short default timeout.
+
+ Args:
+ Takes the same arguments as does run(),
+ with the exception of the timeout argument which
+ here is fixed at 60 seconds.
+ It returns the result of run.
+ """
+ return self.run(command, timeout=60, **kwargs)
+
+
+ def run_grep(self, command, timeout=30, ignore_status=False,
+ stdout_ok_regexp=None, stdout_err_regexp=None,
+ stderr_ok_regexp=None, stderr_err_regexp=None,
+ connect_timeout=30):
+ """
+ Run a command on the remote host and look for regexp
+ in stdout or stderr to determine if the command was
+ successul or not.
+
+ Args:
+ command: the command line string
+ timeout: time limit in seconds before attempting to
+ kill the running process. The run() function
+ will take a few seconds longer than 'timeout'
+ to complete if it has to kill the process.
+ ignore_status: do not raise an exception, no matter
+ what the exit code of the command is.
+ stdout_ok_regexp: regexp that should be in stdout
+ if the command was successul.
+ stdout_err_regexp: regexp that should be in stdout
+ if the command failed.
+ stderr_ok_regexp: regexp that should be in stderr
+ if the command was successul.
+ stderr_err_regexp: regexp that should be in stderr
+ if the command failed.
+
+ Returns:
+ if the command was successul, raises an exception
+ otherwise.
+
+ Raises:
+ AutoservRunError:
+ - the exit code of the command execution was not 0.
+ - If stderr_err_regexp is found in stderr,
+ - If stdout_err_regexp is found in stdout,
+ - If stderr_ok_regexp is not found in stderr.
+ - If stdout_ok_regexp is not found in stdout,
+ """
+
+ # We ignore the status, because we will handle it at the end.
+ result = self.run(command, timeout, ignore_status=True,
+ connect_timeout=connect_timeout)
+
+ # Look for the patterns, in order
+ for (regexp, stream) in ((stderr_err_regexp, result.stderr),
+ (stdout_err_regexp, result.stdout)):
+ if regexp and stream:
+ err_re = re.compile (regexp)
+ if err_re.search(stream):
+ raise error.AutoservRunError(
+ '%s failed, found error pattern: '
+ '"%s"' % (command, regexp), result)
+
+ for (regexp, stream) in ((stderr_ok_regexp, result.stderr),
+ (stdout_ok_regexp, result.stdout)):
+ if regexp and stream:
+ ok_re = re.compile (regexp)
+ if ok_re.search(stream):
+ if ok_re.search(stream):
+ return
+
+ if not ignore_status and result.exit_status > 0:
+ raise error.AutoservRunError("command execution error",
+ result)
+
+
+ def reboot(self, timeout=DEFAULT_REBOOT_TIMEOUT, label=None,
+ kernel_args=None, wait=True):
+ """
+ Reboot the remote host.
+
+ Args:
+ timeout
+ """
+ self.reboot_setup()
+
+ # forcibly include the "netconsole" kernel arg
+ if self.__netconsole_param:
+ if kernel_args is None:
+ kernel_args = self.__netconsole_param
+ else:
+ kernel_args += " " + self.__netconsole_param
+ # unload the (possibly loaded) module to avoid shutdown issues
+ self.__unload_netconsole_module()
+ if label or kernel_args:
+ self.bootloader.install_boottool()
+ if label:
+ self.bootloader.set_default(label)
+ if kernel_args:
+ if not label:
+ default = int(self.bootloader.get_default())
+ label = self.bootloader.get_titles()[default]
+ self.bootloader.add_args(label, kernel_args)
+
+ # define a function for the reboot and run it in a group
+ print "Reboot: initiating reboot"
+ def reboot():
+ self.__record("GOOD", None, "reboot.start")
+ try:
+ self.run('(sleep 5; reboot) '
+ '</dev/null >/dev/null 2>&1 &')
+ except error.AutoservRunError:
+ self.__record("ABORT", None, "reboot.start",
+ "reboot command failed")
+ raise
+ if wait:
+ self.wait_for_restart(timeout)
+ self.reboot_followup()
+ self.__run_reboot_group(reboot)
+
+
+ def reboot_followup(self):
+ super(SSHHost, self).reboot_followup()
+ self.__load_netconsole_module() # if the builtin fails
+
+
+ def __copy_files(self, sources, dest):
+ """
+ Copy files from one machine to another.
+
+ This is for internal use by other methods that intend to move
+ files between machines. It expects a list of source files and
+ a destination (a filename if the source is a single file, a
+ destination otherwise). The names must already be
+ pre-processed into the appropriate rsync/scp friendly
+ format (%s@%s:%s).
+ """
+ # wait until there are only a small number of copies running
+ # before starting this one
+ get_config = global_config.global_config.get_config_value
+ max_simultaneous = get_config("HOSTS",
+ "max_simultaneous_file_copies",
+ type=int)
+ while True:
+ copy_count = 0
+ procs = utils.system_output('ps -ef')
+ for line in procs.splitlines():
+ if 'rsync ' in line or 'scp ' in line:
+ copy_count += 1
+ if copy_count < max_simultaneous:
+ break
+ time.sleep(60)
+
+ print '__copy_files: copying %s to %s' % (sources, dest)
+ try:
+ utils.run('rsync --rsh="%s" -az %s %s' % (
+ self.ssh_base_command(), ' '.join(sources), dest))
+ except Exception:
+ try:
+ utils.run('scp -rpq -P %d %s "%s"' % (
+ self.port, ' '.join(sources), dest))
+ except error.CmdError, cmderr:
+ raise error.AutoservRunError(cmderr.args[0],
+ cmderr.args[1])
+
+ def get_file(self, source, dest):
+ """
+ Copy files from the remote host to a local path.
+
+ Directories will be copied recursively.
+ If a source component is a directory with a trailing slash,
+ the content of the directory will be copied, otherwise, the
+ directory itself and its content will be copied. This
+ behavior is similar to that of the program 'rsync'.
+
+ Args:
+ source: either
+ 1) a single file or directory, as a string
+ 2) a list of one or more (possibly mixed)
+ files or directories
+ dest: a file or a directory (if source contains a
+ directory or more than one element, you must
+ supply a directory dest)
+
+ Raises:
+ AutoservRunError: the scp command failed
+ """
+ if isinstance(source, types.StringTypes):
+ source= [source]
+
+ processed_source= []
+ for entry in source:
+ if entry.endswith('/'):
+ format_string= '%s@%s:"%s*"'
+ else:
+ format_string= '%s@%s:"%s"'
+ entry= format_string % (self.user, self.hostname,
+ utils.scp_remote_escape(entry))
+ processed_source.append(entry)
+
+ processed_dest= os.path.abspath(dest)
+ if os.path.isdir(dest):
+ processed_dest= "%s/" % (utils.sh_escape(processed_dest),)
+ else:
+ processed_dest= utils.sh_escape(processed_dest)
+
+ self.__copy_files(processed_source, processed_dest)
+
+
+ def send_file(self, source, dest):
+ """
+ Copy files from a local path to the remote host.
+
+ Directories will be copied recursively.
+ If a source component is a directory with a trailing slash,
+ the content of the directory will be copied, otherwise, the
+ directory itself and its content will be copied. This
+ behavior is similar to that of the program 'rsync'.
+
+ Args:
+ source: either
+ 1) a single file or directory, as a string
+ 2) a list of one or more (possibly mixed)
+ files or directories
+ dest: a file or a directory (if source contains a
+ directory or more than one element, you must
+ supply a directory dest)
+
+ Raises:
+ AutoservRunError: the scp command failed
+ """
+ if isinstance(source, types.StringTypes):
+ source= [source]
+
+ processed_source= []
+ for entry in source:
+ if entry.endswith('/'):
+ format_string= '"%s/"*'
+ else:
+ format_string= '"%s"'
+ entry= format_string % (utils.sh_escape(os.path.abspath(entry)),)
+ processed_source.append(entry)
+
+ remote_dest = '%s@%s:"%s"' % (
+ self.user, self.hostname,
+ utils.scp_remote_escape(dest))
+
+ self.__copy_files(processed_source, remote_dest)
+ self.run('find "%s" -type d | xargs -i -r chmod o+rx "{}"' % dest)
+ self.run('find "%s" -type f | xargs -i -r chmod o+r "{}"' % dest)
+
+ def get_tmp_dir(self):
+ """
+ Return the pathname of a directory on the host suitable
+ for temporary file storage.
+
+ The directory and its content will be deleted automatically
+ on the destruction of the Host object that was used to obtain
+ it.
+ """
+ dir_name= self.run("mktemp -d /tmp/autoserv-XXXXXX").stdout.rstrip(" \n")
+ self.tmp_dirs.append(dir_name)
+ return dir_name
+
+
+ def is_up(self):
+ """
+ Check if the remote host is up.
+
+ Returns:
+ True if the remote host is up, False otherwise
+ """
+ try:
+ self.ssh_ping()
+ except:
+ return False
+ return True
+
+
+ def _is_wait_up_process_up(self):
+ """
+ Checks if any SSHHOST waitup processes are running yet on the
+ remote host.
+
+ Returns True if any the waitup processes are running, False
+ otherwise.
+ """
+ processes = self.get_wait_up_processes()
+ if len(processes) == 0:
+ return True # wait up processes aren't being used
+ for procname in processes:
+ exit_status = self.run("ps -e | grep '%s'" % procname,
+ ignore_status=True).exit_status
+ if exit_status == 0:
+ return True
+ return False
+
+
+ def wait_up(self, timeout=None):
+ """
+ Wait until the remote host is up or the timeout expires.
+
+ In fact, it will wait until an ssh connection to the remote
+ host can be established, and getty is running.
+
+ Args:
+ timeout: time limit in seconds before returning even
+ if the host is not up.
+
+ Returns:
+ True if the host was found to be up, False otherwise
+ """
+ if timeout:
+ end_time= time.time() + timeout
+
+ while not timeout or time.time() < end_time:
+ try:
+ self.ssh_ping()
+ except (error.AutoservRunError,
+ error.AutoservSSHTimeout):
+ pass
+ else:
+ try:
+ if self._is_wait_up_process_up():
+ return True
+ except (error.AutoservRunError,
+ error.AutoservSSHTimeout):
+ pass
+ time.sleep(1)
+
+ return False
+
+
+ def wait_down(self, timeout=None):
+ """
+ Wait until the remote host is down or the timeout expires.
+
+ In fact, it will wait until an ssh connection to the remote
+ host fails.
+
+ Args:
+ timeout: time limit in seconds before returning even
+ if the host is not up.
+
+ Returns:
+ True if the host was found to be down, False otherwise
+ """
+ if timeout:
+ end_time= time.time() + timeout
+
+ while not timeout or time.time() < end_time:
+ try:
+ self.ssh_ping()
+ except:
+ return True
+ time.sleep(1)
+
+ return False
+
+
+ def ensure_up(self):
+ """
+ Ensure the host is up if it is not then do not proceed;
+ this prevents cacading failures of tests
+ """
+ print 'Ensuring that %s is up before continuing' % self.hostname
+ if hasattr(self, 'hardreset') and not self.wait_up(300):
+ print "Performing a hardreset on %s" % self.hostname
+ try:
+ self.hardreset()
+ except error.AutoservUnsupportedError:
+ print "Hardreset is unsupported on %s" % self.hostname
+ if not self.wait_up(60 * 30):
+ # 30 minutes should be more than enough
+ raise error.AutoservHostError
+ print 'Host up, continuing'
+
+
+ def get_num_cpu(self):
+ """
+ Get the number of CPUs in the host according to
+ /proc/cpuinfo.
+
+ Returns:
+ The number of CPUs
+ """
+
+ proc_cpuinfo = self.run("cat /proc/cpuinfo",
+ stdout_tee=open('/dev/null', 'w')).stdout
+ cpus = 0
+ for line in proc_cpuinfo.splitlines():
+ if line.startswith('processor'):
+ cpus += 1
+ return cpus
+
+
+ def check_uptime(self):
+ """
+ Check that uptime is available and monotonically increasing.
+ """
+ if not self.ping():
+ raise error.AutoservHostError('Client is not pingable')
+ result = self.run("/bin/cat /proc/uptime", 30)
+ return result.stdout.strip().split()[0]
+
+
+ def get_arch(self):
+ """
+ Get the hardware architecture of the remote machine
+ """
+ arch = self.run('/bin/uname -m').stdout.rstrip()
+ if re.match(r'i\d86$', arch):
+ arch = 'i386'
+ return arch
+
+
+ def get_kernel_ver(self):
+ """
+ Get the kernel version of the remote machine
+ """
+ return self.run('/bin/uname -r').stdout.rstrip()
+
+
+ def get_cmdline(self):
+ """
+ Get the kernel command line of the remote machine
+ """
+ return self.run('cat /proc/cmdline').stdout.rstrip()
+
+
+ def ping(self):
+ """
+ Ping the remote system, and return whether it's available
+ """
+ fpingcmd = "%s -q %s" % ('/usr/bin/fping', self.hostname)
+ rc = utils.system(fpingcmd, ignore_status = 1)
+ return (rc == 0)
+
+
+ def ssh_ping(self, timeout = 60):
+ try:
+ self.run('true', timeout = timeout, connect_timeout = timeout)
+ except error.AutoservSSHTimeout:
+ msg = "ssh ping timed out. timeout = %s" % timeout
+ raise error.AutoservSSHTimeout(msg)
+ except error.AutoservRunError, exc:
+ msg = "command true failed in ssh ping"
+ raise error.AutoservRunError(msg, exc.args[1])
+
+
+ def get_autodir(self):
+ return self.autodir
+
+
+ def ssh_setup_key(self):
+ try:
+ print 'Performing ssh key setup on %s:%d as %s' % \
+ (self.hostname, self.port, self.user)
+
+ host = pxssh.pxssh()
+ host.login(self.hostname, self.user, self.password,
+ port=self.port)
+
+ try:
+ public_key = utils.get_public_key()
+
+ host.sendline('mkdir -p ~/.ssh')
+ host.prompt()
+ host.sendline('chmod 700 ~/.ssh')
+ host.prompt()
+ host.sendline("echo '%s' >> ~/.ssh/authorized_keys; " %
+ (public_key))
+ host.prompt()
+ host.sendline('chmod 600 ~/.ssh/authorized_keys')
+ host.prompt()
+
+ print 'SSH key setup complete'
+
+ finally:
+ host.logout()
+
+ except:
+ pass
+
+
+ def setup(self):
+ if not self.password == '':
+ try:
+ self.ssh_ping()
+ except error.AutoservRunError:
+ self.ssh_setup_key()
diff --git a/server/hypervisor.py b/server/hypervisor.py
index ac213cc4..75ad3215 100644
--- a/server/hypervisor.py
+++ b/server/hypervisor.py
@@ -5,7 +5,7 @@
"""
This module defines the Hypervisor class
- Hypervisor: a virtual machine monitor
+ Hypervisor: a virtual machine monitor
"""
__author__ = """
@@ -19,32 +19,32 @@ import installable_object
class Hypervisor(installable_object.InstallableObject):
- """
- This class represents a virtual machine monitor.
+ """
+ This class represents a virtual machine monitor.
- Implementation details:
- This is an abstract class, leaf subclasses must implement the methods
- listed here and in parent classes which have no implementation. They
- may reimplement methods which already have an implementation. You
- must not instantiate this class but should instantiate one of those
- leaf subclasses.
- """
+ Implementation details:
+ This is an abstract class, leaf subclasses must implement the methods
+ listed here and in parent classes which have no implementation. They
+ may reimplement methods which already have an implementation. You
+ must not instantiate this class but should instantiate one of those
+ leaf subclasses.
+ """
- host = None
- guests = None
+ host = None
+ guests = None
- def __init__(self, host):
- super(Hypervisor, self).__init__()
- self.host= host
+ def __init__(self, host):
+ super(Hypervisor, self).__init__()
+ self.host= host
- def new_guest(self):
- pass
+ def new_guest(self):
+ pass
- def delete_guest(self, guest_hostname):
- pass
+ def delete_guest(self, guest_hostname):
+ pass
- def reset_guest(self, guest_hostname):
- pass
+ def reset_guest(self, guest_hostname):
+ pass
diff --git a/server/installable_object.py b/server/installable_object.py
index 04925956..3c61e05c 100644
--- a/server/installable_object.py
+++ b/server/installable_object.py
@@ -5,7 +5,7 @@
"""
This module defines the InstallableObject class
- InstallableObject: a software package that can be installed on a Host
+ InstallableObject: a software package that can be installed on a Host
"""
__author__ = """
@@ -19,37 +19,37 @@ import utils
class InstallableObject(object):
- """
- This class represents a software package that can be installed on
- a Host.
-
- Implementation details:
- This is an abstract class, leaf subclasses must implement the methods
- listed here. You must not instantiate this class but should
- instantiate one of those leaf subclasses.
- """
-
- source_material= None
-
- def __init__(self):
- super(InstallableObject, self).__init__()
-
-
- def get(self, location):
- """
- Get the source material required to install the object.
-
- Through the utils.get() function, the argument passed will be
- saved in a temporary location on the LocalHost. That location
- is saved in the source_material attribute.
-
- Args:
- location: the path to the source material. This path
- may be of any type that the utils.get()
- function will accept.
- """
- self.source_material= utils.get(location)
-
-
- def install(self, host):
- pass
+ """
+ This class represents a software package that can be installed on
+ a Host.
+
+ Implementation details:
+ This is an abstract class, leaf subclasses must implement the methods
+ listed here. You must not instantiate this class but should
+ instantiate one of those leaf subclasses.
+ """
+
+ source_material= None
+
+ def __init__(self):
+ super(InstallableObject, self).__init__()
+
+
+ def get(self, location):
+ """
+ Get the source material required to install the object.
+
+ Through the utils.get() function, the argument passed will be
+ saved in a temporary location on the LocalHost. That location
+ is saved in the source_material attribute.
+
+ Args:
+ location: the path to the source material. This path
+ may be of any type that the utils.get()
+ function will accept.
+ """
+ self.source_material= utils.get(location)
+
+
+ def install(self, host):
+ pass
diff --git a/server/kernel.py b/server/kernel.py
index 63743cd3..5fcaba7d 100644
--- a/server/kernel.py
+++ b/server/kernel.py
@@ -5,7 +5,7 @@
"""
This module defines the Kernel class
- Kernel: an os kernel
+ Kernel: an os kernel
"""
__author__ = """
@@ -19,27 +19,27 @@ import installable_object
class Kernel(installable_object.InstallableObject):
- """
- This class represents a kernel.
-
- It is used to obtain a built kernel or create one from source and
- install it on a Host.
-
- Implementation details:
- This is an abstract class, leaf subclasses must implement the methods
- listed here and in parent classes which have no implementation. They
- may reimplement methods which already have an implementation. You
- must not instantiate this class but should instantiate one of those
- leaf subclasses.
- """
-
- def get_version():
- pass
-
-
- def get_image_name():
- pass
-
-
- def get_initrd_name():
- pass
+ """
+ This class represents a kernel.
+
+ It is used to obtain a built kernel or create one from source and
+ install it on a Host.
+
+ Implementation details:
+ This is an abstract class, leaf subclasses must implement the methods
+ listed here and in parent classes which have no implementation. They
+ may reimplement methods which already have an implementation. You
+ must not instantiate this class but should instantiate one of those
+ leaf subclasses.
+ """
+
+ def get_version():
+ pass
+
+
+ def get_image_name():
+ pass
+
+
+ def get_initrd_name():
+ pass
diff --git a/server/kvm.py b/server/kvm.py
index 72e3e7d7..0e577317 100644
--- a/server/kvm.py
+++ b/server/kvm.py
@@ -5,7 +5,7 @@
"""
This module defines the KVM class
- KVM: a KVM virtual machine monitor
+ KVM: a KVM virtual machine monitor
"""
__author__ = """
@@ -31,15 +31,15 @@ brctl addif br0 $1
_check_process_script= """\
if [ -f "%(pid_file_name)s" ]
then
- pid=$(cat "%(pid_file_name)s")
- if [ -L /proc/$pid/exe ] && stat /proc/$pid/exe |
- grep -q -- "-> \`%(qemu_binary)s\'\$"
- then
- echo "process present"
- else
- rm -f "%(pid_file_name)s"
- rm -f "%(monitor_file_name)s"
- fi
+ pid=$(cat "%(pid_file_name)s")
+ if [ -L /proc/$pid/exe ] && stat /proc/$pid/exe |
+ grep -q -- "-> \`%(qemu_binary)s\'\$"
+ then
+ echo "process present"
+ else
+ rm -f "%(pid_file_name)s"
+ rm -f "%(monitor_file_name)s"
+ fi
fi
"""
@@ -54,430 +54,430 @@ monitor_socket.send("system_reset\\n")\n')
_remove_modules_script= """\
if $(grep -q "^kvm_intel [[:digit:]]\+ 0" /proc/modules)
then
- rmmod kvm-intel
+ rmmod kvm-intel
fi
if $(grep -q "^kvm_amd [[:digit:]]\+ 0" /proc/modules)
then
- rmmod kvm-amd
+ rmmod kvm-amd
fi
if $(grep -q "^kvm [[:digit:]]\+ 0" /proc/modules)
then
- rmmod kvm
+ rmmod kvm
fi
"""
class KVM(hypervisor.Hypervisor):
- """
- This class represents a KVM virtual machine monitor.
-
- Implementation details:
- This is a leaf class in an abstract class hierarchy, it must
- implement the unimplemented methods in parent classes.
- """
-
- build_dir= None
- pid_dir= None
- support_dir= None
- addresses= []
- insert_modules= True
- modules= {}
-
-
- def __del__(self):
- """
- Destroy a KVM object.
-
- Guests managed by this hypervisor that are still running will
- be killed.
- """
- self.deinitialize()
-
-
- def _insert_modules(self):
- """
- Insert the kvm modules into the kernel.
-
- The modules inserted are the ones from the build directory, NOT
- the ones from the kernel.
-
- This function should only be called after install(). It will
- check that the modules are not already loaded before attempting
- to insert them.
- """
- cpu_flags= self.host.run('cat /proc/cpuinfo | '
- 'grep -e "^flags" | head -1 | cut -d " " -f 2-'
- ).stdout.strip()
-
- if cpu_flags.find('vmx') != -1:
- module_type= "intel"
- elif cpu_flags.find('svm') != -1:
- module_type= "amd"
- else:
- raise error.AutoservVirtError("No harware "
- "virtualization extensions found, "
- "KVM cannot run")
-
- self.host.run('if ! $(grep -q "^kvm " /proc/modules); '
- 'then insmod "%s"; fi' % (utils.sh_escape(
- os.path.join(self.build_dir, "kernel/kvm.ko")),))
- if module_type == "intel":
- self.host.run('if ! $(grep -q "^kvm_intel " '
- '/proc/modules); then insmod "%s"; fi' %
- (utils.sh_escape(os.path.join(self.build_dir,
- "kernel/kvm-intel.ko")),))
- elif module_type == "amd":
- self.host.run('if ! $(grep -q "^kvm_amd " '
- '/proc/modules); then insmod "%s"; fi' %
- (utils.sh_escape(os.path.join(self.build_dir,
- "kernel/kvm-amd.ko")),))
-
-
- def _remove_modules(self):
- """
- Remove the kvm modules from the kernel.
-
- This function checks that they're not in use before trying to
- remove them.
- """
- self.host.run(_remove_modules_script)
-
-
- def install(self, addresses, build=True, insert_modules=True, syncdir=None):
- """
- Compile the kvm software on the host that the object was
- initialized with.
-
- The kvm kernel modules are compiled, for this, the kernel
- sources must be available. A custom qemu is also compiled.
- Note that 'make install' is not run, the kernel modules and
- qemu are run from where they were built, therefore not
- conflicting with what might already be installed.
-
- Args:
- addresses: a list of dict entries of the form
- {"mac" : "xx:xx:xx:xx:xx:xx",
- "ip" : "yyy.yyy.yyy.yyy"} where x and y
- are replaced with sensible values. The ip
- address may be a hostname or an IPv6 instead.
-
- When a new virtual machine is created, the
- first available entry in that list will be
- used. The network card in the virtual machine
- will be assigned the specified mac address and
- autoserv will use the specified ip address to
- connect to the virtual host via ssh. The virtual
- machine os must therefore be configured to
- configure its network with the ip corresponding
- to the mac.
- build: build kvm from the source material, if False,
- it is assumed that the package contains the
- source tree after a 'make'.
- insert_modules: build kvm modules from the source
- material and insert them. Otherwise, the
- running kernel is assumed to already have
- kvm support and nothing will be done concerning
- the modules.
-
- TODO(poirier): check dependencies before building
- kvm needs:
- libasound2-dev
- libsdl1.2-dev (or configure qemu with --disable-gfx-check, how?)
- bridge-utils
- """
- self.addresses= [
- {"mac" : address["mac"],
- "ip" : address["ip"],
- "is_used" : False} for address in addresses]
-
- self.build_dir = self.host.get_tmp_dir()
- self.support_dir= self.host.get_tmp_dir()
-
- self.host.run('echo "%s" > "%s"' % (
- utils.sh_escape(_qemu_ifup_script),
- utils.sh_escape(os.path.join(self.support_dir,
- "qemu-ifup.sh")),))
- self.host.run('chmod a+x "%s"' % (
- utils.sh_escape(os.path.join(self.support_dir,
- "qemu-ifup.sh")),))
-
- self.host.send_file(self.source_material, self.build_dir)
- remote_source_material= os.path.join(self.build_dir,
- os.path.basename(self.source_material))
-
- self.build_dir= utils.unarchive(self.host,
- remote_source_material)
-
- if insert_modules:
- configure_modules= ""
- self.insert_modules= True
- else:
- configure_modules= "--with-patched-kernel "
- self.insert_modules= False
-
- # build
- if build:
- try:
- self.host.run('make -C "%s" clean' % (
- utils.sh_escape(self.build_dir),),
- timeout=600)
- except error.AutoservRunError:
- # directory was already clean and contained
- # no makefile
- pass
- self.host.run('cd "%s" && ./configure %s' % (
- utils.sh_escape(self.build_dir),
- configure_modules,), timeout=600)
- if syncdir:
- cmd = 'cd "%s/kernel" && make sync LINUX=%s' % (
- utils.sh_escape(self.build_dir),
- utils.sh_escape(syncdir))
- self.host.run(cmd)
- self.host.run('make -j%d -C "%s"' % (
- self.host.get_num_cpu() * 2,
- utils.sh_escape(self.build_dir),), timeout=3600)
- # remember path to modules
- self.modules['kvm'] = "%s" %(
- utils.sh_escape(os.path.join(self.build_dir,
- "kernel/kvm.ko")))
- self.modules['kvm-intel'] = "%s" %(
- utils.sh_escape(os.path.join(self.build_dir,
- "kernel/kvm-intel.ko")))
- self.modules['kvm-amd'] = "%s" %(
- utils.sh_escape(os.path.join(self.build_dir,
- "kernel/kvm-amd.ko")))
- print self.modules
-
- self.initialize()
-
-
- def initialize(self):
- """
- Initialize the hypervisor.
-
- Loads needed kernel modules and creates temporary directories.
- The logic is that you could compile once and
- initialize - deinitialize many times. But why you would do that
- has yet to be figured.
-
- Raises:
- AutoservVirtError: cpuid doesn't report virtualization
- extentions (vmx for intel or svm for amd), in
- this case, kvm cannot run.
- """
- self.pid_dir= self.host.get_tmp_dir()
-
- if self.insert_modules:
- self._remove_modules()
- self._insert_modules()
-
-
- def deinitialize(self):
- """
- Terminate the hypervisor.
-
- Kill all the virtual machines that are still running and
- unload the kernel modules.
- """
- self.refresh_guests()
- for address in self.addresses:
- if address["is_used"]:
- self.delete_guest(address["ip"])
- self.pid_dir= None
-
- if self.insert_modules:
- self._remove_modules()
-
-
- def new_guest(self, qemu_options):
- """
- Start a new guest ("virtual machine").
-
- Returns:
- The ip that was picked from the list supplied to
- install() and assigned to this guest.
-
- Raises:
- AutoservVirtError: no more addresses are available.
- """
- for address in self.addresses:
- if not address["is_used"]:
- break
- else:
- raise error.AutoservVirtError(
- "No more addresses available")
-
- retval= self.host.run(
- '%s'
- # this is the line of options that can be modified
- ' %s '
- '-pidfile "%s" -daemonize -nographic '
- #~ '-serial telnet::4444,server '
- '-monitor unix:"%s",server,nowait '
- '-net nic,macaddr="%s" -net tap,script="%s" -L "%s"' % (
- utils.sh_escape(os.path.join(
- self.build_dir,
- "qemu/x86_64-softmmu/qemu-system-x86_64")),
- qemu_options,
- utils.sh_escape(os.path.join(
- self.pid_dir,
- "vhost%s_pid" % (address["ip"],))),
- utils.sh_escape(os.path.join(
- self.pid_dir,
- "vhost%s_monitor" % (address["ip"],))),
- utils.sh_escape(address["mac"]),
- utils.sh_escape(os.path.join(
- self.support_dir,
- "qemu-ifup.sh")),
- utils.sh_escape(os.path.join(
- self.build_dir,
- "qemu/pc-bios")),))
-
- address["is_used"]= True
- return address["ip"]
-
-
- def refresh_guests(self):
- """
- Refresh the list of guests addresses.
-
- The is_used status will be updated according to the presence
- of the process specified in the pid file that was written when
- the virtual machine was started.
-
- TODO(poirier): there are a lot of race conditions in this code
- because the process might terminate on its own anywhere in
- between
- """
- for address in self.addresses:
- if address["is_used"]:
- pid_file_name= utils.sh_escape(os.path.join(
- self.pid_dir,
- "vhost%s_pid" % (address["ip"],)))
- monitor_file_name= utils.sh_escape(os.path.join(
- self.pid_dir,
- "vhost%s_monitor" % (address["ip"],)))
- retval= self.host.run(
- _check_process_script % {
- "pid_file_name" : pid_file_name,
- "monitor_file_name" : monitor_file_name,
- "qemu_binary" : utils.sh_escape(
- os.path.join(self.build_dir,
- "qemu/x86_64-softmmu/"
- "qemu-system-x86_64")),})
- if (retval.stdout.strip() !=
- "process present"):
- address["is_used"]= False
-
-
- def delete_guest(self, guest_hostname):
- """
- Terminate a virtual machine.
-
- Args:
- guest_hostname: the ip (as it was specified in the
- address list given to install()) of the guest
- to terminate.
-
- Raises:
- AutoservVirtError: the guest_hostname argument is
- invalid
-
- TODO(poirier): is there a difference in qemu between
- sending SIGTEM or quitting from the monitor?
- TODO(poirier): there are a lot of race conditions in this code
- because the process might terminate on its own anywhere in
- between
- """
- for address in self.addresses:
- if address["ip"] == guest_hostname:
- if address["is_used"]:
- break
- else:
- # Will happen if deinitialize() is
- # called while guest objects still
- # exit and these are del'ed after.
- # In that situation, nothing is to
- # be done here, don't throw an error
- # either because it will print an
- # ugly message during garbage
- # collection. The solution would be to
- # delete the guest objects before
- # calling deinitialize(), this can't be
- # done by the KVM class, it has no
- # reference to those objects and it
- # cannot have any either. The Guest
- # objects already need to have a
- # reference to their managing
- # hypervisor. If the hypervisor had a
- # reference to the Guest objects it
- # manages, it would create a circular
- # reference and those objects would
- # not be elligible for garbage
- # collection. In turn, this means that
- # the KVM object would not be
- # automatically del'ed at the end of
- # the program and guests that are still
- # running would be left unattended.
- # Note that this circular reference
- # problem could be avoided by using
- # weakref's in class KVM but the
- # control file will most likely also
- # have references to the guests.
- return
- else:
- raise error.AutoservVirtError("Unknown guest hostname")
-
- pid_file_name= utils.sh_escape(os.path.join(self.pid_dir,
- "vhost%s_pid" % (address["ip"],)))
- monitor_file_name= utils.sh_escape(os.path.join(self.pid_dir,
- "vhost%s_monitor" % (address["ip"],)))
-
- retval= self.host.run(
- _check_process_script % {
- "pid_file_name" : pid_file_name,
- "monitor_file_name" : monitor_file_name,
- "qemu_binary" : utils.sh_escape(os.path.join(
- self.build_dir,
- "qemu/x86_64-softmmu/qemu-system-x86_64")),})
- if retval.stdout.strip() == "process present":
- self.host.run('kill $(cat "%s")' %(
- pid_file_name,))
- self.host.run('rm -f "%s"' %(
- pid_file_name,))
- self.host.run('rm -f "%s"' %(
- monitor_file_name,))
- address["is_used"]= False
-
-
- def reset_guest(self, guest_hostname):
- """
- Perform a hard reset on a virtual machine.
-
- Args:
- guest_hostname: the ip (as it was specified in the
- address list given to install()) of the guest
- to terminate.
-
- Raises:
- AutoservVirtError: the guest_hostname argument is
- invalid
- """
- for address in self.addresses:
- if address["ip"] is guest_hostname:
- if address["is_used"]:
- break
- else:
- raise error.AutoservVirtError("guest "
- "hostname not in use")
- else:
- raise error.AutoservVirtError("Unknown guest hostname")
-
- monitor_file_name= utils.sh_escape(os.path.join(self.pid_dir,
- "vhost%s_monitor" % (address["ip"],)))
-
- self.host.run('python -c "%s"' % (utils.sh_escape(
- _hard_reset_script % {
- "monitor_file_name" : monitor_file_name,}),))
+ """
+ This class represents a KVM virtual machine monitor.
+
+ Implementation details:
+ This is a leaf class in an abstract class hierarchy, it must
+ implement the unimplemented methods in parent classes.
+ """
+
+ build_dir= None
+ pid_dir= None
+ support_dir= None
+ addresses= []
+ insert_modules= True
+ modules= {}
+
+
+ def __del__(self):
+ """
+ Destroy a KVM object.
+
+ Guests managed by this hypervisor that are still running will
+ be killed.
+ """
+ self.deinitialize()
+
+
+ def _insert_modules(self):
+ """
+ Insert the kvm modules into the kernel.
+
+ The modules inserted are the ones from the build directory, NOT
+ the ones from the kernel.
+
+ This function should only be called after install(). It will
+ check that the modules are not already loaded before attempting
+ to insert them.
+ """
+ cpu_flags= self.host.run('cat /proc/cpuinfo | '
+ 'grep -e "^flags" | head -1 | cut -d " " -f 2-'
+ ).stdout.strip()
+
+ if cpu_flags.find('vmx') != -1:
+ module_type= "intel"
+ elif cpu_flags.find('svm') != -1:
+ module_type= "amd"
+ else:
+ raise error.AutoservVirtError("No harware "
+ "virtualization extensions found, "
+ "KVM cannot run")
+
+ self.host.run('if ! $(grep -q "^kvm " /proc/modules); '
+ 'then insmod "%s"; fi' % (utils.sh_escape(
+ os.path.join(self.build_dir, "kernel/kvm.ko")),))
+ if module_type == "intel":
+ self.host.run('if ! $(grep -q "^kvm_intel " '
+ '/proc/modules); then insmod "%s"; fi' %
+ (utils.sh_escape(os.path.join(self.build_dir,
+ "kernel/kvm-intel.ko")),))
+ elif module_type == "amd":
+ self.host.run('if ! $(grep -q "^kvm_amd " '
+ '/proc/modules); then insmod "%s"; fi' %
+ (utils.sh_escape(os.path.join(self.build_dir,
+ "kernel/kvm-amd.ko")),))
+
+
+ def _remove_modules(self):
+ """
+ Remove the kvm modules from the kernel.
+
+ This function checks that they're not in use before trying to
+ remove them.
+ """
+ self.host.run(_remove_modules_script)
+
+
+ def install(self, addresses, build=True, insert_modules=True, syncdir=None):
+ """
+ Compile the kvm software on the host that the object was
+ initialized with.
+
+ The kvm kernel modules are compiled, for this, the kernel
+ sources must be available. A custom qemu is also compiled.
+ Note that 'make install' is not run, the kernel modules and
+ qemu are run from where they were built, therefore not
+ conflicting with what might already be installed.
+
+ Args:
+ addresses: a list of dict entries of the form
+ {"mac" : "xx:xx:xx:xx:xx:xx",
+ "ip" : "yyy.yyy.yyy.yyy"} where x and y
+ are replaced with sensible values. The ip
+ address may be a hostname or an IPv6 instead.
+
+ When a new virtual machine is created, the
+ first available entry in that list will be
+ used. The network card in the virtual machine
+ will be assigned the specified mac address and
+ autoserv will use the specified ip address to
+ connect to the virtual host via ssh. The virtual
+ machine os must therefore be configured to
+ configure its network with the ip corresponding
+ to the mac.
+ build: build kvm from the source material, if False,
+ it is assumed that the package contains the
+ source tree after a 'make'.
+ insert_modules: build kvm modules from the source
+ material and insert them. Otherwise, the
+ running kernel is assumed to already have
+ kvm support and nothing will be done concerning
+ the modules.
+
+ TODO(poirier): check dependencies before building
+ kvm needs:
+ libasound2-dev
+ libsdl1.2-dev (or configure qemu with --disable-gfx-check, how?)
+ bridge-utils
+ """
+ self.addresses= [
+ {"mac" : address["mac"],
+ "ip" : address["ip"],
+ "is_used" : False} for address in addresses]
+
+ self.build_dir = self.host.get_tmp_dir()
+ self.support_dir= self.host.get_tmp_dir()
+
+ self.host.run('echo "%s" > "%s"' % (
+ utils.sh_escape(_qemu_ifup_script),
+ utils.sh_escape(os.path.join(self.support_dir,
+ "qemu-ifup.sh")),))
+ self.host.run('chmod a+x "%s"' % (
+ utils.sh_escape(os.path.join(self.support_dir,
+ "qemu-ifup.sh")),))
+
+ self.host.send_file(self.source_material, self.build_dir)
+ remote_source_material= os.path.join(self.build_dir,
+ os.path.basename(self.source_material))
+
+ self.build_dir= utils.unarchive(self.host,
+ remote_source_material)
+
+ if insert_modules:
+ configure_modules= ""
+ self.insert_modules= True
+ else:
+ configure_modules= "--with-patched-kernel "
+ self.insert_modules= False
+
+ # build
+ if build:
+ try:
+ self.host.run('make -C "%s" clean' % (
+ utils.sh_escape(self.build_dir),),
+ timeout=600)
+ except error.AutoservRunError:
+ # directory was already clean and contained
+ # no makefile
+ pass
+ self.host.run('cd "%s" && ./configure %s' % (
+ utils.sh_escape(self.build_dir),
+ configure_modules,), timeout=600)
+ if syncdir:
+ cmd = 'cd "%s/kernel" && make sync LINUX=%s' % (
+ utils.sh_escape(self.build_dir),
+ utils.sh_escape(syncdir))
+ self.host.run(cmd)
+ self.host.run('make -j%d -C "%s"' % (
+ self.host.get_num_cpu() * 2,
+ utils.sh_escape(self.build_dir),), timeout=3600)
+ # remember path to modules
+ self.modules['kvm'] = "%s" %(
+ utils.sh_escape(os.path.join(self.build_dir,
+ "kernel/kvm.ko")))
+ self.modules['kvm-intel'] = "%s" %(
+ utils.sh_escape(os.path.join(self.build_dir,
+ "kernel/kvm-intel.ko")))
+ self.modules['kvm-amd'] = "%s" %(
+ utils.sh_escape(os.path.join(self.build_dir,
+ "kernel/kvm-amd.ko")))
+ print self.modules
+
+ self.initialize()
+
+
+ def initialize(self):
+ """
+ Initialize the hypervisor.
+
+ Loads needed kernel modules and creates temporary directories.
+ The logic is that you could compile once and
+ initialize - deinitialize many times. But why you would do that
+ has yet to be figured.
+
+ Raises:
+ AutoservVirtError: cpuid doesn't report virtualization
+ extentions (vmx for intel or svm for amd), in
+ this case, kvm cannot run.
+ """
+ self.pid_dir= self.host.get_tmp_dir()
+
+ if self.insert_modules:
+ self._remove_modules()
+ self._insert_modules()
+
+
+ def deinitialize(self):
+ """
+ Terminate the hypervisor.
+
+ Kill all the virtual machines that are still running and
+ unload the kernel modules.
+ """
+ self.refresh_guests()
+ for address in self.addresses:
+ if address["is_used"]:
+ self.delete_guest(address["ip"])
+ self.pid_dir= None
+
+ if self.insert_modules:
+ self._remove_modules()
+
+
+ def new_guest(self, qemu_options):
+ """
+ Start a new guest ("virtual machine").
+
+ Returns:
+ The ip that was picked from the list supplied to
+ install() and assigned to this guest.
+
+ Raises:
+ AutoservVirtError: no more addresses are available.
+ """
+ for address in self.addresses:
+ if not address["is_used"]:
+ break
+ else:
+ raise error.AutoservVirtError(
+ "No more addresses available")
+
+ retval= self.host.run(
+ '%s'
+ # this is the line of options that can be modified
+ ' %s '
+ '-pidfile "%s" -daemonize -nographic '
+ #~ '-serial telnet::4444,server '
+ '-monitor unix:"%s",server,nowait '
+ '-net nic,macaddr="%s" -net tap,script="%s" -L "%s"' % (
+ utils.sh_escape(os.path.join(
+ self.build_dir,
+ "qemu/x86_64-softmmu/qemu-system-x86_64")),
+ qemu_options,
+ utils.sh_escape(os.path.join(
+ self.pid_dir,
+ "vhost%s_pid" % (address["ip"],))),
+ utils.sh_escape(os.path.join(
+ self.pid_dir,
+ "vhost%s_monitor" % (address["ip"],))),
+ utils.sh_escape(address["mac"]),
+ utils.sh_escape(os.path.join(
+ self.support_dir,
+ "qemu-ifup.sh")),
+ utils.sh_escape(os.path.join(
+ self.build_dir,
+ "qemu/pc-bios")),))
+
+ address["is_used"]= True
+ return address["ip"]
+
+
+ def refresh_guests(self):
+ """
+ Refresh the list of guests addresses.
+
+ The is_used status will be updated according to the presence
+ of the process specified in the pid file that was written when
+ the virtual machine was started.
+
+ TODO(poirier): there are a lot of race conditions in this code
+ because the process might terminate on its own anywhere in
+ between
+ """
+ for address in self.addresses:
+ if address["is_used"]:
+ pid_file_name= utils.sh_escape(os.path.join(
+ self.pid_dir,
+ "vhost%s_pid" % (address["ip"],)))
+ monitor_file_name= utils.sh_escape(os.path.join(
+ self.pid_dir,
+ "vhost%s_monitor" % (address["ip"],)))
+ retval= self.host.run(
+ _check_process_script % {
+ "pid_file_name" : pid_file_name,
+ "monitor_file_name" : monitor_file_name,
+ "qemu_binary" : utils.sh_escape(
+ os.path.join(self.build_dir,
+ "qemu/x86_64-softmmu/"
+ "qemu-system-x86_64")),})
+ if (retval.stdout.strip() !=
+ "process present"):
+ address["is_used"]= False
+
+
+ def delete_guest(self, guest_hostname):
+ """
+ Terminate a virtual machine.
+
+ Args:
+ guest_hostname: the ip (as it was specified in the
+ address list given to install()) of the guest
+ to terminate.
+
+ Raises:
+ AutoservVirtError: the guest_hostname argument is
+ invalid
+
+ TODO(poirier): is there a difference in qemu between
+ sending SIGTEM or quitting from the monitor?
+ TODO(poirier): there are a lot of race conditions in this code
+ because the process might terminate on its own anywhere in
+ between
+ """
+ for address in self.addresses:
+ if address["ip"] == guest_hostname:
+ if address["is_used"]:
+ break
+ else:
+ # Will happen if deinitialize() is
+ # called while guest objects still
+ # exit and these are del'ed after.
+ # In that situation, nothing is to
+ # be done here, don't throw an error
+ # either because it will print an
+ # ugly message during garbage
+ # collection. The solution would be to
+ # delete the guest objects before
+ # calling deinitialize(), this can't be
+ # done by the KVM class, it has no
+ # reference to those objects and it
+ # cannot have any either. The Guest
+ # objects already need to have a
+ # reference to their managing
+ # hypervisor. If the hypervisor had a
+ # reference to the Guest objects it
+ # manages, it would create a circular
+ # reference and those objects would
+ # not be elligible for garbage
+ # collection. In turn, this means that
+ # the KVM object would not be
+ # automatically del'ed at the end of
+ # the program and guests that are still
+ # running would be left unattended.
+ # Note that this circular reference
+ # problem could be avoided by using
+ # weakref's in class KVM but the
+ # control file will most likely also
+ # have references to the guests.
+ return
+ else:
+ raise error.AutoservVirtError("Unknown guest hostname")
+
+ pid_file_name= utils.sh_escape(os.path.join(self.pid_dir,
+ "vhost%s_pid" % (address["ip"],)))
+ monitor_file_name= utils.sh_escape(os.path.join(self.pid_dir,
+ "vhost%s_monitor" % (address["ip"],)))
+
+ retval= self.host.run(
+ _check_process_script % {
+ "pid_file_name" : pid_file_name,
+ "monitor_file_name" : monitor_file_name,
+ "qemu_binary" : utils.sh_escape(os.path.join(
+ self.build_dir,
+ "qemu/x86_64-softmmu/qemu-system-x86_64")),})
+ if retval.stdout.strip() == "process present":
+ self.host.run('kill $(cat "%s")' %(
+ pid_file_name,))
+ self.host.run('rm -f "%s"' %(
+ pid_file_name,))
+ self.host.run('rm -f "%s"' %(
+ monitor_file_name,))
+ address["is_used"]= False
+
+
+ def reset_guest(self, guest_hostname):
+ """
+ Perform a hard reset on a virtual machine.
+
+ Args:
+ guest_hostname: the ip (as it was specified in the
+ address list given to install()) of the guest
+ to terminate.
+
+ Raises:
+ AutoservVirtError: the guest_hostname argument is
+ invalid
+ """
+ for address in self.addresses:
+ if address["ip"] is guest_hostname:
+ if address["is_used"]:
+ break
+ else:
+ raise error.AutoservVirtError("guest "
+ "hostname not in use")
+ else:
+ raise error.AutoservVirtError("Unknown guest hostname")
+
+ monitor_file_name= utils.sh_escape(os.path.join(self.pid_dir,
+ "vhost%s_monitor" % (address["ip"],)))
+
+ self.host.run('python -c "%s"' % (utils.sh_escape(
+ _hard_reset_script % {
+ "monitor_file_name" : monitor_file_name,}),))
diff --git a/server/rpm_kernel.py b/server/rpm_kernel.py
index adaf1806..5b8c8c0c 100644
--- a/server/rpm_kernel.py
+++ b/server/rpm_kernel.py
@@ -5,7 +5,7 @@
"""
This module defines the Kernel class
- Kernel: an os kernel
+ Kernel: an os kernel
"""
__author__ = """
@@ -20,147 +20,147 @@ from autotest_lib.server import kernel, utils
class RPMKernel(kernel.Kernel):
- """
- This class represents a .rpm pre-built kernel.
-
- It is used to obtain a built kernel and install it on a Host.
-
- Implementation details:
- This is a leaf class in an abstract class hierarchy, it must
- implement the unimplemented methods in parent classes.
- """
- def __init__(self):
- super(RPMKernel, self).__init__()
-
- def install(self, host, label='autoserv',
- default=False, kernel_args = '', install_vmlinux=False):
- """
- Install a kernel on the remote host.
-
- This will also invoke the guest's bootloader to set this
- kernel as the default kernel if default=True.
-
- Args:
- host: the host on which to install the kernel
- [kwargs]: remaining keyword arguments will be passed
- to Bootloader.add_kernel()
-
- Raises:
- AutoservError: no package has yet been obtained. Call
- RPMKernel.get() with a .rpm package.
- """
- if len(label) > 15:
- raise error.AutoservError("label for kernel is too long \
- (> 15 chars): %s" % label)
- if self.source_material is None:
- raise error.AutoservError("A kernel must first be \
- specified via get()")
- rpm = self.source_material
-
- remote_tmpdir = host.get_tmp_dir()
- remote_rpm = os.path.join(remote_tmpdir, os.path.basename(rpm))
- rpm_package = utils.run('/usr/bin/rpm -q -p %s' % rpm).stdout
- vmlinuz = self.get_image_name()
- host.send_file(rpm, remote_rpm)
- host.run('rpm -e ' + rpm_package, ignore_status = True)
- host.run('rpm --force -i ' + remote_rpm)
-
- # Copy over the uncompressed image if there is one
- if install_vmlinux:
- vmlinux = self.get_vmlinux_name()
- host.run('cd /;rpm2cpio %s | cpio -imuv .%s'
- % (remote_rpm, vmlinux))
- host.run('ls ' + vmlinux) # Verify
-
- host.bootloader.remove_kernel(label)
- host.bootloader.add_kernel(vmlinuz, label,
- args=kernel_args, default=default)
- if kernel_args:
- host.bootloader.add_args(label, kernel_args)
- if not default:
- host.bootloader.boot_once(label)
-
-
- def get_version(self):
- """Get the version of the kernel to be installed.
-
- Returns:
- The version string, as would be returned
- by 'make kernelrelease'.
-
- Raises:
- AutoservError: no package has yet been obtained. Call
- RPMKernel.get() with a .rpm package.
- """
- if self.source_material is None:
- raise error.AutoservError("A kernel must first be \
- specified via get()")
-
- retval = utils.run('rpm -qpi %s | grep Version | \
- awk \'{print($3);}\'' % utils.sh_escape(self.source_material))
- return retval.stdout.strip()
-
-
- def get_image_name(self):
- """Get the name of the kernel image to be installed.
-
- Returns:
- The full path to the kernel image file as it will be
- installed on the host.
-
- Raises:
- AutoservError: no package has yet been obtained. Call
- RPMKernel.get() with a .rpm package.
- """
- if self.source_material is None:
- raise error.AutoservError("A kernel must first be \
- specified via get()")
-
- vmlinuz = utils.run('rpm -q -l -p %s \
- | grep /boot/vmlinuz' % self.source_material).stdout.strip()
- return vmlinuz
-
-
- def get_vmlinux_name(self):
- """Get the name of the kernel image to be installed.
-
- Returns:
- The full path to the kernel image file as it will be
- installed on the host. It is the uncompressed and
- unstripped version of the kernel that can be used with
- oprofile.
-
- Raises:
- AutoservError: no package has yet been obtained. Call
- RPMKernel.get() with a .rpm package.
- """
- if self.source_material is None:
- raise error.AutoservError("A kernel must first be \
- specified via get()")
-
- vmlinux = utils.run('rpm -q -l -p %s \
- | grep /boot/vmlinux' % self.source_material).stdout.strip()
- return vmlinux
-
-
- def get_initrd_name(self):
- """Get the name of the initrd file to be installed.
-
- Returns:
- The full path to the initrd file as it will be
- installed on the host. If the package includes no
- initrd file, None is returned
-
- Raises:
- AutoservError: no package has yet been obtained. Call
- RPMKernel.get() with a .rpm package.
- """
- if self.source_material is None:
- raise error.AutoservError("A kernel must first be \
- specified via get()")
-
- res = utils.run('rpm -q -l -p %s \
- | grep /boot/initrd' % self.source_material, ignore_status=True)
- if res.exit_status:
- return None
- return res.stdout.strip()
+ """
+ This class represents a .rpm pre-built kernel.
+
+ It is used to obtain a built kernel and install it on a Host.
+
+ Implementation details:
+ This is a leaf class in an abstract class hierarchy, it must
+ implement the unimplemented methods in parent classes.
+ """
+ def __init__(self):
+ super(RPMKernel, self).__init__()
+
+ def install(self, host, label='autoserv',
+ default=False, kernel_args = '', install_vmlinux=False):
+ """
+ Install a kernel on the remote host.
+
+ This will also invoke the guest's bootloader to set this
+ kernel as the default kernel if default=True.
+
+ Args:
+ host: the host on which to install the kernel
+ [kwargs]: remaining keyword arguments will be passed
+ to Bootloader.add_kernel()
+
+ Raises:
+ AutoservError: no package has yet been obtained. Call
+ RPMKernel.get() with a .rpm package.
+ """
+ if len(label) > 15:
+ raise error.AutoservError("label for kernel is too long \
+ (> 15 chars): %s" % label)
+ if self.source_material is None:
+ raise error.AutoservError("A kernel must first be \
+ specified via get()")
+ rpm = self.source_material
+
+ remote_tmpdir = host.get_tmp_dir()
+ remote_rpm = os.path.join(remote_tmpdir, os.path.basename(rpm))
+ rpm_package = utils.run('/usr/bin/rpm -q -p %s' % rpm).stdout
+ vmlinuz = self.get_image_name()
+ host.send_file(rpm, remote_rpm)
+ host.run('rpm -e ' + rpm_package, ignore_status = True)
+ host.run('rpm --force -i ' + remote_rpm)
+
+ # Copy over the uncompressed image if there is one
+ if install_vmlinux:
+ vmlinux = self.get_vmlinux_name()
+ host.run('cd /;rpm2cpio %s | cpio -imuv .%s'
+ % (remote_rpm, vmlinux))
+ host.run('ls ' + vmlinux) # Verify
+
+ host.bootloader.remove_kernel(label)
+ host.bootloader.add_kernel(vmlinuz, label,
+ args=kernel_args, default=default)
+ if kernel_args:
+ host.bootloader.add_args(label, kernel_args)
+ if not default:
+ host.bootloader.boot_once(label)
+
+
+ def get_version(self):
+ """Get the version of the kernel to be installed.
+
+ Returns:
+ The version string, as would be returned
+ by 'make kernelrelease'.
+
+ Raises:
+ AutoservError: no package has yet been obtained. Call
+ RPMKernel.get() with a .rpm package.
+ """
+ if self.source_material is None:
+ raise error.AutoservError("A kernel must first be \
+ specified via get()")
+
+ retval = utils.run('rpm -qpi %s | grep Version | \
+ awk \'{print($3);}\'' % utils.sh_escape(self.source_material))
+ return retval.stdout.strip()
+
+
+ def get_image_name(self):
+ """Get the name of the kernel image to be installed.
+
+ Returns:
+ The full path to the kernel image file as it will be
+ installed on the host.
+
+ Raises:
+ AutoservError: no package has yet been obtained. Call
+ RPMKernel.get() with a .rpm package.
+ """
+ if self.source_material is None:
+ raise error.AutoservError("A kernel must first be \
+ specified via get()")
+
+ vmlinuz = utils.run('rpm -q -l -p %s \
+ | grep /boot/vmlinuz' % self.source_material).stdout.strip()
+ return vmlinuz
+
+
+ def get_vmlinux_name(self):
+ """Get the name of the kernel image to be installed.
+
+ Returns:
+ The full path to the kernel image file as it will be
+ installed on the host. It is the uncompressed and
+ unstripped version of the kernel that can be used with
+ oprofile.
+
+ Raises:
+ AutoservError: no package has yet been obtained. Call
+ RPMKernel.get() with a .rpm package.
+ """
+ if self.source_material is None:
+ raise error.AutoservError("A kernel must first be \
+ specified via get()")
+
+ vmlinux = utils.run('rpm -q -l -p %s \
+ | grep /boot/vmlinux' % self.source_material).stdout.strip()
+ return vmlinux
+
+
+ def get_initrd_name(self):
+ """Get the name of the initrd file to be installed.
+
+ Returns:
+ The full path to the initrd file as it will be
+ installed on the host. If the package includes no
+ initrd file, None is returned
+
+ Raises:
+ AutoservError: no package has yet been obtained. Call
+ RPMKernel.get() with a .rpm package.
+ """
+ if self.source_material is None:
+ raise error.AutoservError("A kernel must first be \
+ specified via get()")
+
+ res = utils.run('rpm -q -l -p %s \
+ | grep /boot/initrd' % self.source_material, ignore_status=True)
+ if res.exit_status:
+ return None
+ return res.stdout.strip()
diff --git a/server/samples/autoserv_console.srv b/server/samples/autoserv_console.srv
index 50bb62a0..b0c69e61 100755
--- a/server/samples/autoserv_console.srv
+++ b/server/samples/autoserv_console.srv
@@ -9,12 +9,12 @@
# -Steve Howard (showard@google.com)
if machines:
- host = hosts.SSHHost(machines[0])
+ host = hosts.SSHHost(machines[0])
try:
- import IPython
- ipshell = IPython.Shell.IPShellEmbed(argv=[])
- ipshell()
+ import IPython
+ ipshell = IPython.Shell.IPShellEmbed(argv=[])
+ ipshell()
except ImportError:
- import code
- code.interact("Autoserv console", raw_input, locals())
+ import code
+ code.interact("Autoserv console", raw_input, locals())
diff --git a/server/samples/continuous_reboot.srv b/server/samples/continuous_reboot.srv
index 900273b7..adbfc435 100644
--- a/server/samples/continuous_reboot.srv
+++ b/server/samples/continuous_reboot.srv
@@ -1,6 +1,6 @@
def run(machine):
- host = hosts.SSHHost(machine)
- while True:
- host.reboot()
+ host = hosts.SSHHost(machine)
+ while True:
+ host.reboot()
job.parallel_simple(run, machines)
diff --git a/server/samples/failtest.srv b/server/samples/failtest.srv
index 975ab832..a793e592 100644
--- a/server/samples/failtest.srv
+++ b/server/samples/failtest.srv
@@ -1,6 +1,6 @@
def run(machine):
- host = hosts.SSHHost(machine)
- at = autotest.Autotest(host)
- at.run_test('failtest')
+ host = hosts.SSHHost(machine)
+ at = autotest.Autotest(host)
+ at.run_test('failtest')
job.parallel_simple(run, machines)
diff --git a/server/samples/info.srv b/server/samples/info.srv
index 0dd7f88b..a5d23502 100644
--- a/server/samples/info.srv
+++ b/server/samples/info.srv
@@ -1,8 +1,8 @@
def run(machine):
- host = hosts.SSHHost(machine, initialize = False)
- print 'Uptime: ' + host.check_uptime()
- print 'Arch: ' + host.get_arch()
- print 'Kernel ver: ' + host.get_kernel_ver()
- print 'Kernel cmdline: ' + host.get_cmdline()
+ host = hosts.SSHHost(machine, initialize = False)
+ print 'Uptime: ' + host.check_uptime()
+ print 'Arch: ' + host.get_arch()
+ print 'Kernel ver: ' + host.get_kernel_ver()
+ print 'Kernel cmdline: ' + host.get_cmdline()
job.parallel_simple(run, machines)
diff --git a/server/samples/kernbench.srv b/server/samples/kernbench.srv
index 174e6e75..a8d84e3c 100644
--- a/server/samples/kernbench.srv
+++ b/server/samples/kernbench.srv
@@ -1,6 +1,6 @@
def run(machine):
- host = hosts.SSHHost(machine)
- at = autotest.Autotest(host)
- at.run_test('kernbench', iterations=2, threads=5)
+ host = hosts.SSHHost(machine)
+ at = autotest.Autotest(host)
+ at.run_test('kernbench', iterations=2, threads=5)
job.parallel_simple(run, machines)
diff --git a/server/samples/netperf-guest-to-host-far.srv b/server/samples/netperf-guest-to-host-far.srv
index 57b67ae7..10ac8c73 100644
--- a/server/samples/netperf-guest-to-host-far.srv
+++ b/server/samples/netperf-guest-to-host-far.srv
@@ -2,19 +2,19 @@ print "XXXX start of control file"
def check_kernel(host, version, package):
- if host.run("uname -r").stdout.strip() != version:
- print "XXXX installing kernel on %s" % (host.hostname,)
- package.install(host)
-
- host.reboot()
- host.wait_up()
+ if host.run("uname -r").stdout.strip() != version:
+ print "XXXX installing kernel on %s" % (host.hostname,)
+ package.install(host)
+
+ host.reboot()
+ host.wait_up()
def install_kvm(kvm_on_host_var_name, host, source, addresses):
- exec ("global %(var_name)s\n"
- "%(var_name)s= kvm.KVM(host)\n"
- "%(var_name)s.get(source)\n"
- "%(var_name)s.install(addresses)\n" % {"var_name": kvm_on_host_var_name})
+ exec ("global %(var_name)s\n"
+ "%(var_name)s= kvm.KVM(host)\n"
+ "%(var_name)s.get(source)\n"
+ "%(var_name)s.install(addresses)\n" % {"var_name": kvm_on_host_var_name})
print "XXXX creating SSHHost objects"
diff --git a/server/samples/parallel.srv b/server/samples/parallel.srv
index 2fdfc573..24dc1f07 100644
--- a/server/samples/parallel.srv
+++ b/server/samples/parallel.srv
@@ -1,7 +1,7 @@
at = autotest.Autotest()
def run(machine):
- host = hosts.SSHHost(machine)
- at.run_test('sleeptest', host = host)
+ host = hosts.SSHHost(machine)
+ at.run_test('sleeptest', host = host)
job.parallel_simple(run, machines)
diff --git a/server/samples/parallel_kvm.srv b/server/samples/parallel_kvm.srv
index 682c020a..101b51da 100644
--- a/server/samples/parallel_kvm.srv
+++ b/server/samples/parallel_kvm.srv
@@ -21,9 +21,9 @@ qemu_options= "-m 256 -hda /var/local/vdisk.img -snapshot"
num_guests= 5
g= []
for i in range(num_guests):
- g.append(hosts.KVMGuest(kvm_on_remote_host, qemu_options))
+ g.append(hosts.KVMGuest(kvm_on_remote_host, qemu_options))
for i in range(num_guests):
- g[i].wait_up()
+ g[i].wait_up()
print "XXXX running transfers"
@@ -35,10 +35,10 @@ print g[0].run('sha1sum "%s"' % (big_file,)).stdout.strip()
args= range(1, num_guests)
def f(i):
- print "This is %s" % i
- tmp_dir= g[i].get_tmp_dir()
- g[i].run('scp "%s":"%s" "%s"' % (g[0].hostname, big_file, tmp_dir,))
- print g[i].run('sha1sum "%s"' % (os.path.join(tmp_dir, "big_file"),)).stdout.strip()
+ print "This is %s" % i
+ tmp_dir= g[i].get_tmp_dir()
+ g[i].run('scp "%s":"%s" "%s"' % (g[0].hostname, big_file, tmp_dir,))
+ print g[i].run('sha1sum "%s"' % (os.path.join(tmp_dir, "big_file"),)).stdout.strip()
job.parallel_simple(f, args)
diff --git a/server/samples/parallel_sleeptest.srv b/server/samples/parallel_sleeptest.srv
index 49bb0d1d..c46353ee 100644
--- a/server/samples/parallel_sleeptest.srv
+++ b/server/samples/parallel_sleeptest.srv
@@ -22,34 +22,34 @@ from autotest_lib.client.common_lib import utils
# Specify the path to the client control files and the tag names
# for the respective jobs here.
tests = [("client/tests/sleeptest/control", "sleeptag0"),
- ("client/tests/sleeptest/control", "sleeptag1"),
- ]
+ ("client/tests/sleeptest/control", "sleeptag1"),
+ ]
def run_client(at, machine_name, machine_num, instance):
- control = open(os.path.join(job.autodir,tests[instance][0])).read()
- '''
- The get_sync_control_file method basically does the setup of the barriers
- required to start these multiple tests at the same time and returns the
- modified control file (that contains the barrier code added to it)
- Check client/common_lib/utils.py for detailed documentation of how this
- method sets up the barriers.
- '''
- control_new = utils.get_sync_control_file(control, machine_name,
- machine_num, instance, len(tests))
- '''
- This control file is now simply passed in to the run method along with
- a tag name of the test and a 'parallel_flag' that identifies this scenario
- of running multiple tests on the same machine at the same time.
- '''
- at.run(control_new, tag='%s' % tests[instance][1], parallel_flag=True)
+ control = open(os.path.join(job.autodir,tests[instance][0])).read()
+ '''
+ The get_sync_control_file method basically does the setup of the barriers
+ required to start these multiple tests at the same time and returns the
+ modified control file (that contains the barrier code added to it)
+ Check client/common_lib/utils.py for detailed documentation of how this
+ method sets up the barriers.
+ '''
+ control_new = utils.get_sync_control_file(control, machine_name,
+ machine_num, instance, len(tests))
+ '''
+ This control file is now simply passed in to the run method along with
+ a tag name of the test and a 'parallel_flag' that identifies this scenario
+ of running multiple tests on the same machine at the same time.
+ '''
+ at.run(control_new, tag='%s' % tests[instance][1], parallel_flag=True)
def main(machine_name, machine_num):
- host = hosts.SSHHost(machine_name)
- at = autotest.Autotest(host)
- at.install()
+ host = hosts.SSHHost(machine_name)
+ at = autotest.Autotest(host)
+ at.install()
- parallel([subcommand(run_client, [at, machine_name, machine_num, i])
- for i in range(len(tests))])
+ parallel([subcommand(run_client, [at, machine_name, machine_num, i])
+ for i in range(len(tests))])
parallel([subcommand(main, [machines[i], i], machines[i])
- for i in range(len(machines))])
+ for i in range(len(machines))])
diff --git a/server/samples/profilertest.srv b/server/samples/profilertest.srv
index 2743b760..42f32f22 100644
--- a/server/samples/profilertest.srv
+++ b/server/samples/profilertest.srv
@@ -5,63 +5,63 @@ at_hosts = [autotest.Autotest(h) for h in ssh_hosts]
def add_profilers(at, profilers, timeout_sync, timeout_start, timeout_stop,
- machines, name):
- control_file = []
- for profiler in profilers:
- control_file.append("job.profilers.add(%s)"
- % str(profiler)[1:-1])
+ machines, name):
+ control_file = []
+ for profiler in profilers:
+ control_file.append("job.profilers.add(%s)"
+ % str(profiler)[1:-1])
- control_file.append("job.run_test('barriertest',%d,%d,%d,'%s','%s',%s)"
- % (timeout_sync, timeout_start, timeout_stop,
- at.host.hostname, "PROF_MASTER", str(machines)))
+ control_file.append("job.run_test('barriertest',%d,%d,%d,'%s','%s',%s)"
+ % (timeout_sync, timeout_start, timeout_stop,
+ at.host.hostname, "PROF_MASTER", str(machines)))
- for profiler in profilers:
- control_file.append("job.profilers.delete('%s')" % profiler[0])
+ for profiler in profilers:
+ control_file.append("job.profilers.delete('%s')" % profiler[0])
- params = ["\n".join(control_file), "profile-" + profiler[0], at.host]
- return subcommand(at.run, params, name)
+ params = ["\n".join(control_file), "profile-" + profiler[0], at.host]
+ return subcommand(at.run, params, name)
def wait_for_profilers(machines, timeout = 180):
- # wait until the profilers have started
- sync_bar = barrier("PROF_MASTER", "sync_profilers",
- timeout, port=63100)
- sync_bar.rendevous_servers("PROF_MASTER", *machines)
+ # wait until the profilers have started
+ sync_bar = barrier("PROF_MASTER", "sync_profilers",
+ timeout, port=63100)
+ sync_bar.rendevous_servers("PROF_MASTER", *machines)
def start_profilers(machines, timeout = 180):
- # wait until the profilers have started
- start_bar = barrier("PROF_MASTER", "start_profilers",
- timeout, port=63100)
- start_bar.rendevous_servers("PROF_MASTER", *machines)
+ # wait until the profilers have started
+ start_bar = barrier("PROF_MASTER", "start_profilers",
+ timeout, port=63100)
+ start_bar.rendevous_servers("PROF_MASTER", *machines)
def stop_profilers(machines, timeout = 120):
- stop_bar = barrier("PROF_MASTER", "stop_profilers", timeout, port=63100)
- stop_bar.rendevous_servers("PROF_MASTER", *machines)
+ stop_bar = barrier("PROF_MASTER", "stop_profilers", timeout, port=63100)
+ stop_bar.rendevous_servers("PROF_MASTER", *machines)
def server_sleep_test(seconds):
- wait_for_profilers(machines)
- start_profilers(machines)
- for i in range(seconds):
- print "%d of %d" % (i, seconds)
- time.sleep(1)
- stop_profilers(machines)
+ wait_for_profilers(machines)
+ start_profilers(machines)
+ for i in range(seconds):
+ print "%d of %d" % (i, seconds)
+ time.sleep(1)
+ stop_profilers(machines)
def main():
- timeout_sync = 180
- timeout_start = 60
- timeout_stop = 60
- profilers = [["vmstat"], ["iostat"]]
-
- tests = [subcommand(server_sleep_test, [20], "server_sleep_test")]
- for at in at_hosts:
- name = "profiled-%s" % at.host.hostname
- tests.append(add_profilers(at, profilers, timeout_sync,
- timeout_start, timeout_stop, machines, name))
- parallel(tests)
+ timeout_sync = 180
+ timeout_start = 60
+ timeout_stop = 60
+ profilers = [["vmstat"], ["iostat"]]
+
+ tests = [subcommand(server_sleep_test, [20], "server_sleep_test")]
+ for at in at_hosts:
+ name = "profiled-%s" % at.host.hostname
+ tests.append(add_profilers(at, profilers, timeout_sync,
+ timeout_start, timeout_stop, machines, name))
+ parallel(tests)
main()
diff --git a/server/samples/reboot.srv b/server/samples/reboot.srv
index 670bf5cf..58375703 100644
--- a/server/samples/reboot.srv
+++ b/server/samples/reboot.srv
@@ -1,6 +1,6 @@
def run(machine):
- host = hosts.SSHHost(machine)
- print host.is_up()
- host.reboot()
+ host = hosts.SSHHost(machine)
+ print host.is_up()
+ host.reboot()
job.parallel_simple(run, machines)
diff --git a/server/samples/run_test.srv b/server/samples/run_test.srv
index 12204458..e8b1f36b 100644
--- a/server/samples/run_test.srv
+++ b/server/samples/run_test.srv
@@ -5,38 +5,38 @@ logdir = None
def usage():
- print "usage: -t <test name> -m <machines> -l <log dir>"
+ print "usage: -t <test name> -m <machines> -l <log dir>"
def run(client):
- m = hosts.SSHHost(client)
- at = autotest.Autotest()
+ m = hosts.SSHHost(client)
+ at = autotest.Autotest()
- results_dir = os.path.join(logdir, client)
- at.run_test(test, results_dir, m)
+ results_dir = os.path.join(logdir, client)
+ at.run_test(test, results_dir, m)
def main():
- global test, logdir, args
-
- try:
- opts, args = getopt.getopt(args, 't:l:', [])
- except getopt.GetoptError, e:
- usage()
- print e
- sys.exit(1)
-
- for flag, value in opts:
- if flag == '-t':
- test = value
- elif flag == '-l':
- logdir = value
-
- if test == None or logdir == None:
- usage()
- sys.exit(1)
-
- print "Going to launch %s on %r with log dir of %s." % (test, machines, logdir)
- parallel_simple(run, machines)
+ global test, logdir, args
+
+ try:
+ opts, args = getopt.getopt(args, 't:l:', [])
+ except getopt.GetoptError, e:
+ usage()
+ print e
+ sys.exit(1)
+
+ for flag, value in opts:
+ if flag == '-t':
+ test = value
+ elif flag == '-l':
+ logdir = value
+
+ if test == None or logdir == None:
+ usage()
+ sys.exit(1)
+
+ print "Going to launch %s on %r with log dir of %s." % (test, machines, logdir)
+ parallel_simple(run, machines)
main()
diff --git a/server/samples/sleeptest.srv b/server/samples/sleeptest.srv
index 805ac097..85cbf083 100644
--- a/server/samples/sleeptest.srv
+++ b/server/samples/sleeptest.srv
@@ -1,6 +1,6 @@
def run(machine):
- host = hosts.SSHHost(machine)
- at = autotest.Autotest(host)
- at.run_test('sleeptest')
+ host = hosts.SSHHost(machine)
+ at = autotest.Autotest(host)
+ at.run_test('sleeptest')
job.parallel_simple(run, machines)
diff --git a/server/samples/uname.srv b/server/samples/uname.srv
index b141d5c5..f67d6260 100644
--- a/server/samples/uname.srv
+++ b/server/samples/uname.srv
@@ -1,5 +1,5 @@
hosts = [hosts.SSHHost(h, initialize=False) for h in machines]
for host in hosts:
- print host.hostname
- print host.run('uname -a').stdout.rstrip()
+ print host.hostname
+ print host.run('uname -a').stdout.rstrip()
diff --git a/server/self-test/alltests_suite.py b/server/self-test/alltests_suite.py
index 14ffc1d5..1d9a8b04 100644
--- a/server/self-test/alltests_suite.py
+++ b/server/self-test/alltests_suite.py
@@ -12,7 +12,7 @@ import os, sys
# Adjust the path so Python can find the autoserv modules
src = os.path.abspath("%s/.." % (os.path.dirname(sys.argv[0]),))
if src not in sys.path:
- sys.path.insert(1, src)
+ sys.path.insert(1, src)
import unittest
@@ -22,9 +22,9 @@ import utils_test
def suite():
- return unittest.TestSuite([autotest_test.suite(),
- utils_test.suite()])
+ return unittest.TestSuite([autotest_test.suite(),
+ utils_test.suite()])
if __name__ == '__main__':
- unittest.TextTestRunner(verbosity=2).run(suite())
+ unittest.TextTestRunner(verbosity=2).run(suite())
diff --git a/server/self-test/autotest_test.py b/server/self-test/autotest_test.py
index 033b0912..08dc4824 100644
--- a/server/self-test/autotest_test.py
+++ b/server/self-test/autotest_test.py
@@ -14,7 +14,7 @@ import unittest
# Adjust the path so Python can find the autoserv modules
src = os.path.abspath("%s/.." % (os.path.dirname(sys.argv[0]),))
if src not in sys.path:
- sys.path.insert(1, src)
+ sys.path.insert(1, src)
import utils
import autotest
@@ -22,119 +22,119 @@ import hosts
class AutotestTestCase(unittest.TestCase):
- def setUp(self):
- self.autotest = autotest.Autotest()
-
- def tearDown(self):
- pass
-
-
- def testGetAutoDir(self):
- class MockInstallHost:
- def __init__(self):
- self.commands = []
- self.result = "autodir='/stuff/autotest'\n"
-
- def run(self, command):
- if command == "grep autodir= /etc/autotest.conf":
- result = hosts.CmdResult()
- result.stdout = self.result
- return result
- else:
- self.commands.append(command)
-
- host = MockInstallHost()
- self.assertEqual('/stuff/autotest',
- autotest._get_autodir(host))
- host.result = "autodir=/stuff/autotest\n"
- self.assertEqual('/stuff/autotest',
- autotest._get_autodir(host))
- host.result = 'autodir="/stuff/auto test"\n'
- self.assertEqual('/stuff/auto test',
- autotest._get_autodir(host))
-
-
- def testInstallFromDir(self):
- class MockInstallHost:
- def __init__(self):
- self.commands = []
-
- def run(self, command):
- if command == "grep autodir= /etc/autotest.conf":
- result= hosts.CmdResult()
- result.stdout = "autodir=/usr/local/autotest\n"
- return result
- else:
- self.commands.append(command)
-
- def send_file(self, src, dst):
- self.commands.append("send_file: %s %s" % (src,
- dst))
-
- host = MockInstallHost()
- tmpdir = utils.get_tmp_dir()
- self.autotest.get(tmpdir)
- self.autotest.install(host)
- self.assertEqual(host.commands[0],
- 'mkdir -p /usr/local/autotest')
- self.assertTrue(host.commands[1].startswith('send_file: /tmp/'))
- self.assertTrue(host.commands[1].endswith(
- '/ /usr/local/autotest'))
-
-
-
-
- def testInstallFromSVN(self):
- class MockInstallHost:
- def __init__(self):
- self.commands = []
-
- def run(self, command):
- if command == "grep autodir= /etc/autotest.conf":
- result= hosts.CmdResult()
- result.stdout = "autodir=/usr/local/autotest\n"
- return result
- else:
- self.commands.append(command)
-
- host = MockInstallHost()
- self.autotest.install(host)
- self.assertEqual(host.commands,
- ['svn checkout '
- + autotest.AUTOTEST_SVN + ' '
- + "/usr/local/autotest"])
-
-
- def testFirstInstallFromSVNFails(self):
- class MockFirstInstallFailsHost:
- def __init__(self):
- self.commands = []
-
- def run(self, command):
- if command == "grep autodir= /etc/autotest.conf":
- result= hosts.CmdResult()
- result.stdout = "autodir=/usr/local/autotest\n"
- return result
- else:
- self.commands.append(command)
- first = ('svn checkout ' +
- autotest.AUTOTEST_SVN + ' ' +
- "/usr/local/autotest")
- if (command == first):
- raise autotest.AutoservRunError(
- "svn not found")
-
- host = MockFirstInstallFailsHost()
- self.autotest.install(host)
- self.assertEqual(host.commands,
- ['svn checkout ' + autotest.AUTOTEST_SVN +
- ' ' + "/usr/local/autotest",
- 'svn checkout ' + autotest.AUTOTEST_HTTP +
- ' ' + "/usr/local/autotest"])
+ def setUp(self):
+ self.autotest = autotest.Autotest()
+
+ def tearDown(self):
+ pass
+
+
+ def testGetAutoDir(self):
+ class MockInstallHost:
+ def __init__(self):
+ self.commands = []
+ self.result = "autodir='/stuff/autotest'\n"
+
+ def run(self, command):
+ if command == "grep autodir= /etc/autotest.conf":
+ result = hosts.CmdResult()
+ result.stdout = self.result
+ return result
+ else:
+ self.commands.append(command)
+
+ host = MockInstallHost()
+ self.assertEqual('/stuff/autotest',
+ autotest._get_autodir(host))
+ host.result = "autodir=/stuff/autotest\n"
+ self.assertEqual('/stuff/autotest',
+ autotest._get_autodir(host))
+ host.result = 'autodir="/stuff/auto test"\n'
+ self.assertEqual('/stuff/auto test',
+ autotest._get_autodir(host))
+
+
+ def testInstallFromDir(self):
+ class MockInstallHost:
+ def __init__(self):
+ self.commands = []
+
+ def run(self, command):
+ if command == "grep autodir= /etc/autotest.conf":
+ result= hosts.CmdResult()
+ result.stdout = "autodir=/usr/local/autotest\n"
+ return result
+ else:
+ self.commands.append(command)
+
+ def send_file(self, src, dst):
+ self.commands.append("send_file: %s %s" % (src,
+ dst))
+
+ host = MockInstallHost()
+ tmpdir = utils.get_tmp_dir()
+ self.autotest.get(tmpdir)
+ self.autotest.install(host)
+ self.assertEqual(host.commands[0],
+ 'mkdir -p /usr/local/autotest')
+ self.assertTrue(host.commands[1].startswith('send_file: /tmp/'))
+ self.assertTrue(host.commands[1].endswith(
+ '/ /usr/local/autotest'))
+
+
+
+
+ def testInstallFromSVN(self):
+ class MockInstallHost:
+ def __init__(self):
+ self.commands = []
+
+ def run(self, command):
+ if command == "grep autodir= /etc/autotest.conf":
+ result= hosts.CmdResult()
+ result.stdout = "autodir=/usr/local/autotest\n"
+ return result
+ else:
+ self.commands.append(command)
+
+ host = MockInstallHost()
+ self.autotest.install(host)
+ self.assertEqual(host.commands,
+ ['svn checkout '
+ + autotest.AUTOTEST_SVN + ' '
+ + "/usr/local/autotest"])
+
+
+ def testFirstInstallFromSVNFails(self):
+ class MockFirstInstallFailsHost:
+ def __init__(self):
+ self.commands = []
+
+ def run(self, command):
+ if command == "grep autodir= /etc/autotest.conf":
+ result= hosts.CmdResult()
+ result.stdout = "autodir=/usr/local/autotest\n"
+ return result
+ else:
+ self.commands.append(command)
+ first = ('svn checkout ' +
+ autotest.AUTOTEST_SVN + ' ' +
+ "/usr/local/autotest")
+ if (command == first):
+ raise autotest.AutoservRunError(
+ "svn not found")
+
+ host = MockFirstInstallFailsHost()
+ self.autotest.install(host)
+ self.assertEqual(host.commands,
+ ['svn checkout ' + autotest.AUTOTEST_SVN +
+ ' ' + "/usr/local/autotest",
+ 'svn checkout ' + autotest.AUTOTEST_HTTP +
+ ' ' + "/usr/local/autotest"])
def suite():
- return unittest.TestLoader().loadTestsFromTestCase(AutotestTestCase)
+ return unittest.TestLoader().loadTestsFromTestCase(AutotestTestCase)
if __name__ == '__main__':
- unittest.TextTestRunner(verbosity=2).run(suite())
+ unittest.TextTestRunner(verbosity=2).run(suite())
diff --git a/server/self-test/utils_test.py b/server/self-test/utils_test.py
index 0978752b..fffbf9a4 100644
--- a/server/self-test/utils_test.py
+++ b/server/self-test/utils_test.py
@@ -15,62 +15,62 @@ import unittest
# Adjust the path so Python can find the autoserv modules
src = os.path.abspath("%s/.." % (os.path.dirname(sys.argv[0]),))
if src not in sys.path:
- sys.path.insert(1, src)
+ sys.path.insert(1, src)
import utils
class UtilsTestCase(unittest.TestCase):
- def setUp(self):
- pass
+ def setUp(self):
+ pass
- def tearDown(self):
- pass
+ def tearDown(self):
+ pass
- def testGetWithOpenFile(self):
- tmpdir = utils.get_tmp_dir()
- tmppath = os.path.join(tmpdir, 'testfile')
- tmpfile = file(tmppath, 'w')
- print >> tmpfile, 'Test string'
- tmpfile.close()
- tmpfile = file(tmppath)
- newtmppath = utils.get(tmpfile)
- self.assertEqual(file(newtmppath).read(), 'Test string\n')
+ def testGetWithOpenFile(self):
+ tmpdir = utils.get_tmp_dir()
+ tmppath = os.path.join(tmpdir, 'testfile')
+ tmpfile = file(tmppath, 'w')
+ print >> tmpfile, 'Test string'
+ tmpfile.close()
+ tmpfile = file(tmppath)
+ newtmppath = utils.get(tmpfile)
+ self.assertEqual(file(newtmppath).read(), 'Test string\n')
- def testGetWithHTTP(self):
- # Yeah, this test is a bad idea, oh well
- url = 'http://www.kernel.org/pub/linux/kernel/README'
- tmppath = utils.get(url)
- f = file(tmppath)
- f.readline()
- self.assertTrue('Linux' in f.readline().split())
+ def testGetWithHTTP(self):
+ # Yeah, this test is a bad idea, oh well
+ url = 'http://www.kernel.org/pub/linux/kernel/README'
+ tmppath = utils.get(url)
+ f = file(tmppath)
+ f.readline()
+ self.assertTrue('Linux' in f.readline().split())
- def testGetWithPath(self):
- path = utils.get('/proc/cpuinfo')
- self.assertTrue(file(path).readline().startswith('processor'))
+ def testGetWithPath(self):
+ path = utils.get('/proc/cpuinfo')
+ self.assertTrue(file(path).readline().startswith('processor'))
- def testGetWithString(self):
- path = utils.get('/tmp loves rabbits!')
- self.assertTrue(file(path).readline().startswith('/tmp loves'))
+ def testGetWithString(self):
+ path = utils.get('/tmp loves rabbits!')
+ self.assertTrue(file(path).readline().startswith('/tmp loves'))
- def testGetWithDir(self):
- tmpdir = utils.get_tmp_dir()
- origpath = os.path.join(tmpdir, 'testGetWithDir')
- os.mkdir(origpath)
- dstpath = utils.get(origpath)
- self.assertTrue(dstpath.endswith('/'))
- self.assertTrue(os.path.isdir(dstpath))
+ def testGetWithDir(self):
+ tmpdir = utils.get_tmp_dir()
+ origpath = os.path.join(tmpdir, 'testGetWithDir')
+ os.mkdir(origpath)
+ dstpath = utils.get(origpath)
+ self.assertTrue(dstpath.endswith('/'))
+ self.assertTrue(os.path.isdir(dstpath))
def suite():
- return unittest.TestLoader().loadTestsFromTestCase(UtilsTestCase)
+ return unittest.TestLoader().loadTestsFromTestCase(UtilsTestCase)
if __name__ == '__main__':
- unittest.TextTestRunner(verbosity=2).run(suite())
+ unittest.TextTestRunner(verbosity=2).run(suite())
diff --git a/server/server_job.py b/server/server_job.py
index ed91713a..8b6c2c66 100755
--- a/server/server_job.py
+++ b/server/server_job.py
@@ -23,12 +23,12 @@ from autotest_lib.client.common_lib import utils
# load up a control segment
# these are all stored in <server_dir>/control_segments
def load_control_segment(name):
- server_dir = os.path.dirname(os.path.abspath(__file__))
- script_file = os.path.join(server_dir, "control_segments", name)
- if os.path.exists(script_file):
- return file(script_file).read()
- else:
- return ""
+ server_dir = os.path.dirname(os.path.abspath(__file__))
+ script_file = os.path.join(server_dir, "control_segments", name)
+ if os.path.exists(script_file):
+ return file(script_file).read()
+ else:
+ return ""
preamble = """\
@@ -47,54 +47,54 @@ autotest.Autotest.job = job
hosts.SSHHost.job = job
barrier = barrier.barrier
if len(machines) > 1:
- open('.machines', 'w').write('\\n'.join(machines) + '\\n')
+ open('.machines', 'w').write('\\n'.join(machines) + '\\n')
"""
client_wrapper = """
at = autotest.Autotest()
def run_client(machine):
- hostname, user, password, port = parse_machine(machine,
- ssh_user, ssh_port, ssh_pass)
+ hostname, user, password, port = parse_machine(machine,
+ ssh_user, ssh_port, ssh_pass)
- host = hosts.SSHHost(hostname, user, port, password=password)
- at.run(control, host=host)
+ host = hosts.SSHHost(hostname, user, port, password=password)
+ at.run(control, host=host)
job.parallel_simple(run_client, machines)
"""
crashdumps = """
def crashdumps(machine):
- hostname, user, password, port = parse_machine(machine,
- ssh_user, ssh_port, ssh_pass)
+ hostname, user, password, port = parse_machine(machine,
+ ssh_user, ssh_port, ssh_pass)
- host = hosts.SSHHost(hostname, user, port, initialize=False, \
- password=password)
- host.get_crashdumps(test_start_time)
+ host = hosts.SSHHost(hostname, user, port, initialize=False, \
+ password=password)
+ host.get_crashdumps(test_start_time)
job.parallel_simple(crashdumps, machines, log=False)
"""
reboot_segment="""\
def reboot(machine):
- hostname, user, password, port = parse_machine(machine,
- ssh_user, ssh_port, ssh_pass)
+ hostname, user, password, port = parse_machine(machine,
+ ssh_user, ssh_port, ssh_pass)
- host = hosts.SSHHost(hostname, user, port, initialize=False, \
- password=password)
- host.reboot()
+ host = hosts.SSHHost(hostname, user, port, initialize=False, \
+ password=password)
+ host.reboot()
job.parallel_simple(reboot, machines, log=False)
"""
install="""\
def install(machine):
- hostname, user, password, port = parse_machine(machine,
- ssh_user, ssh_port, ssh_pass)
+ hostname, user, password, port = parse_machine(machine,
+ ssh_user, ssh_port, ssh_pass)
- host = hosts.SSHHost(hostname, user, port, initialize=False, \
- password=password)
- host.machine_install()
+ host = hosts.SSHHost(hostname, user, port, initialize=False, \
+ password=password)
+ host.machine_install()
job.parallel_simple(install, machines, log=False)
"""
@@ -110,701 +110,701 @@ repair += load_control_segment("repair")
# load up site-specific code for generating site-specific job data
try:
- import site_job
- get_site_job_data = site_job.get_site_job_data
- del site_job
+ import site_job
+ get_site_job_data = site_job.get_site_job_data
+ del site_job
except ImportError:
- # by default provide a stub that generates no site data
- def get_site_job_data(job):
- return {}
+ # by default provide a stub that generates no site data
+ def get_site_job_data(job):
+ return {}
class base_server_job:
- """The actual job against which we do everything.
-
- Properties:
- autodir
- The top level autotest directory (/usr/local/autotest).
- serverdir
- <autodir>/server/
- clientdir
- <autodir>/client/
- conmuxdir
- <autodir>/conmux/
- testdir
- <autodir>/server/tests/
- control
- the control file for this job
- """
-
- STATUS_VERSION = 1
-
-
- def __init__(self, control, args, resultdir, label, user, machines,
- client=False, parse_job="",
- ssh_user='root', ssh_port=22, ssh_pass=''):
- """
- control
- The control file (pathname of)
- args
- args to pass to the control file
- resultdir
- where to throw the results
- label
- label for the job
- user
- Username for the job (email address)
- client
- True if a client-side control file
- """
- path = os.path.dirname(__file__)
- self.autodir = os.path.abspath(os.path.join(path, '..'))
- self.serverdir = os.path.join(self.autodir, 'server')
- self.testdir = os.path.join(self.serverdir, 'tests')
- self.tmpdir = os.path.join(self.serverdir, 'tmp')
- self.conmuxdir = os.path.join(self.autodir, 'conmux')
- self.clientdir = os.path.join(self.autodir, 'client')
- self.toolsdir = os.path.join(self.autodir, 'client/tools')
- if control:
- self.control = open(control, 'r').read()
- self.control = re.sub('\r', '', self.control)
- else:
- self.control = None
- self.resultdir = resultdir
- if not os.path.exists(resultdir):
- os.mkdir(resultdir)
- self.debugdir = os.path.join(resultdir, 'debug')
- if not os.path.exists(self.debugdir):
- os.mkdir(self.debugdir)
- self.status = os.path.join(resultdir, 'status')
- self.label = label
- self.user = user
- self.args = args
- self.machines = machines
- self.client = client
- self.record_prefix = ''
- self.warning_loggers = set()
- self.ssh_user = ssh_user
- self.ssh_port = ssh_port
- self.ssh_pass = ssh_pass
-
- self.stdout = fd_stack.fd_stack(1, sys.stdout)
- self.stderr = fd_stack.fd_stack(2, sys.stderr)
-
- if os.path.exists(self.status):
- os.unlink(self.status)
- job_data = {'label' : label, 'user' : user,
- 'hostname' : ','.join(machines),
- 'status_version' : str(self.STATUS_VERSION)}
- job_data.update(get_site_job_data(self))
- utils.write_keyval(self.resultdir, job_data)
-
- self.parse_job = parse_job
- if self.parse_job and len(machines) == 1:
- self.using_parser = True
- self.init_parser(resultdir)
- else:
- self.using_parser = False
-
-
- def init_parser(self, resultdir):
- """Start the continuous parsing of resultdir. This sets up
- the database connection and inserts the basic job object into
- the database if necessary."""
- # redirect parser debugging to .parse.log
- parse_log = os.path.join(resultdir, '.parse.log')
- parse_log = open(parse_log, 'w', 0)
- tko_utils.redirect_parser_debugging(parse_log)
- # create a job model object and set up the db
- self.results_db = tko_db.db(autocommit=True)
- self.parser = status_lib.parser(self.STATUS_VERSION)
- self.job_model = self.parser.make_job(resultdir)
- self.parser.start(self.job_model)
- # check if a job already exists in the db and insert it if
- # it does not
- job_idx = self.results_db.find_job(self.parse_job)
- if job_idx is None:
- self.results_db.insert_job(self.parse_job,
- self.job_model)
- else:
- machine_idx = self.results_db.lookup_machine(
- self.job_model.machine)
- self.job_model.index = job_idx
- self.job_model.machine_idx = machine_idx
-
-
- def cleanup_parser(self):
- """This should be called after the server job is finished
- to carry out any remaining cleanup (e.g. flushing any
- remaining test results to the results db)"""
- if not self.using_parser:
- return
- final_tests = self.parser.end()
- for test in final_tests:
- self.__insert_test(test)
- self.using_parser = False
-
-
- def verify(self):
- if not self.machines:
- raise error.AutoservError(
- 'No machines specified to verify')
- try:
- namespace = {'machines' : self.machines, 'job' : self, \
- 'ssh_user' : self.ssh_user, \
- 'ssh_port' : self.ssh_port, \
- 'ssh_pass' : self.ssh_pass}
- exec(preamble + verify, namespace, namespace)
- except Exception, e:
- msg = ('Verify failed\n' + str(e) + '\n'
- + traceback.format_exc())
- self.record('ABORT', None, None, msg)
- raise
-
-
- def repair(self):
- if not self.machines:
- raise error.AutoservError(
- 'No machines specified to repair')
- namespace = {'machines' : self.machines, 'job' : self, \
- 'ssh_user' : self.ssh_user, \
- 'ssh_port' : self.ssh_port, \
- 'ssh_pass' : self.ssh_pass}
- # no matter what happens during repair, go on to try to reverify
- try:
- exec(preamble + repair, namespace, namespace)
- except Exception, exc:
- print 'Exception occured during repair'
- traceback.print_exc()
- self.verify()
-
-
- def enable_external_logging(self):
- """Start or restart external logging mechanism.
- """
- pass
-
-
- def disable_external_logging(self):
- """ Pause or stop external logging mechanism.
- """
- pass
-
-
- def use_external_logging(self):
- """Return True if external logging should be used.
- """
- return False
-
-
- def parallel_simple(self, function, machines, log=True, timeout=None):
- """Run 'function' using parallel_simple, with an extra
- wrapper to handle the necessary setup for continuous parsing,
- if possible. If continuous parsing is already properly
- initialized then this should just work."""
- is_forking = not (len(machines) == 1 and
- self.machines == machines)
- if self.parse_job and is_forking:
- def wrapper(machine):
- self.parse_job += "/" + machine
- self.using_parser = True
- self.machines = [machine]
- self.resultdir = os.path.join(self.resultdir,
- machine)
- self.init_parser(self.resultdir)
- result = function(machine)
- self.cleanup_parser()
- return result
- else:
- wrapper = function
- subcommand.parallel_simple(wrapper, machines, log, timeout)
-
-
- def run(self, reboot = False, install_before = False,
- install_after = False, collect_crashdumps = True,
- namespace = {}):
- # use a copy so changes don't affect the original dictionary
- namespace = namespace.copy()
- machines = self.machines
-
- self.aborted = False
- namespace['machines'] = machines
- namespace['args'] = self.args
- namespace['job'] = self
- namespace['ssh_user'] = self.ssh_user
- namespace['ssh_port'] = self.ssh_port
- namespace['ssh_pass'] = self.ssh_pass
- test_start_time = int(time.time())
-
- os.chdir(self.resultdir)
-
- self.enable_external_logging()
- status_log = os.path.join(self.resultdir, 'status.log')
- try:
- if install_before and machines:
- exec(preamble + install, namespace, namespace)
- if self.client:
- namespace['control'] = self.control
- open('control', 'w').write(self.control)
- open('control.srv', 'w').write(client_wrapper)
- server_control = client_wrapper
- else:
- open('control.srv', 'w').write(self.control)
- server_control = self.control
- exec(preamble + server_control, namespace, namespace)
-
- finally:
- if machines and collect_crashdumps:
- namespace['test_start_time'] = test_start_time
- exec(preamble + crashdumps,
- namespace, namespace)
- self.disable_external_logging()
- if reboot and machines:
- exec(preamble + reboot_segment,
- namespace, namespace)
- if install_after and machines:
- exec(preamble + install, namespace, namespace)
-
-
- def run_test(self, url, *args, **dargs):
- """Summon a test object and run it.
-
- tag
- tag to add to testname
- url
- url of the test to run
- """
-
- (group, testname) = test.testname(url)
- tag = None
- subdir = testname
-
- if dargs.has_key('tag'):
- tag = dargs['tag']
- del dargs['tag']
- if tag:
- subdir += '.' + tag
-
- outputdir = os.path.join(self.resultdir, subdir)
- if os.path.exists(outputdir):
- msg = ("%s already exists, test <%s> may have"
- " already run with tag <%s>"
- % (outputdir, testname, tag) )
- raise error.TestError(msg)
- os.mkdir(outputdir)
-
- try:
- test.runtest(self, url, tag, args, dargs)
- self.record('GOOD', subdir, testname, 'completed successfully')
- except error.TestNAError, detail:
- self.record('TEST_NA', subdir, testname, str(detail))
- except Exception, detail:
- info = str(detail) + "\n" + traceback.format_exc()
- self.record('FAIL', subdir, testname, info)
-
-
- def run_group(self, function, *args, **dargs):
- """\
- function:
- subroutine to run
- *args:
- arguments for the function
- """
-
- result = None
- name = function.__name__
-
- # Allow the tag for the group to be specified.
- if dargs.has_key('tag'):
- tag = dargs['tag']
- del dargs['tag']
- if tag:
- name = tag
-
- old_record_prefix = self.record_prefix
- try:
- try:
- self.record('START', None, name)
- self.record_prefix += '\t'
- result = function(*args, **dargs)
- except Exception, e:
- self.record_prefix = old_record_prefix
- err_msg = str(e) + '\n'
- err_msg += traceback.format_exc()
- self.record('END FAIL', None, name, err_msg)
- else:
- self.record_prefix = old_record_prefix
- self.record('END GOOD', None, name)
-
- # We don't want to raise up an error higher if it's just
- # a TestError - we want to carry on to other tests. Hence
- # this outer try/except block.
- except error.TestError:
- pass
- except:
- raise error.TestError(name + ' failed\n' +
- traceback.format_exc())
-
- return result
-
-
- def run_reboot(self, reboot_func, get_kernel_func):
- """\
- A specialization of run_group meant specifically for handling
- a reboot. Includes support for capturing the kernel version
- after the reboot.
-
- reboot_func: a function that carries out the reboot
-
- get_kernel_func: a function that returns a string
- representing the kernel version.
- """
-
- old_record_prefix = self.record_prefix
- try:
- self.record('START', None, 'reboot')
- self.record_prefix += '\t'
- reboot_func()
- except Exception, e:
- self.record_prefix = old_record_prefix
- err_msg = str(e) + '\n' + traceback.format_exc()
- self.record('END FAIL', None, 'reboot', err_msg)
- else:
- kernel = get_kernel_func()
- self.record_prefix = old_record_prefix
- self.record('END GOOD', None, 'reboot',
- optional_fields={"kernel": kernel})
-
-
- def record(self, status_code, subdir, operation, status='',
- optional_fields=None):
- """
- Record job-level status
-
- The intent is to make this file both machine parseable and
- human readable. That involves a little more complexity, but
- really isn't all that bad ;-)
-
- Format is <status code>\t<subdir>\t<operation>\t<status>
-
- status code: see common_lib.logging.is_valid_status()
- for valid status definition
-
- subdir: MUST be a relevant subdirectory in the results,
- or None, which will be represented as '----'
-
- operation: description of what you ran (e.g. "dbench", or
- "mkfs -t foobar /dev/sda9")
-
- status: error message or "completed sucessfully"
-
- ------------------------------------------------------------
-
- Initial tabs indicate indent levels for grouping, and is
- governed by self.record_prefix
-
- multiline messages have secondary lines prefaced by a double
- space (' ')
-
- Executing this method will trigger the logging of all new
- warnings to date from the various console loggers.
- """
- # poll all our warning loggers for new warnings
- warnings = self._read_warnings()
- for timestamp, msg in warnings:
- self.__record("WARN", None, None, msg, timestamp)
-
- # write out the actual status log line
- self.__record(status_code, subdir, operation, status,
- optional_fields=optional_fields)
-
-
- def _read_warnings(self):
- warnings = []
- while True:
- # pull in a line of output from every logger that has
- # output ready to be read
- loggers, _, _ = select.select(self.warning_loggers,
- [], [], 0)
- closed_loggers = set()
- for logger in loggers:
- line = logger.readline()
- # record any broken pipes (aka line == empty)
- if len(line) == 0:
- closed_loggers.add(logger)
- continue
- timestamp, msg = line.split('\t', 1)
- warnings.append((int(timestamp), msg.strip()))
-
- # stop listening to loggers that are closed
- self.warning_loggers -= closed_loggers
-
- # stop if none of the loggers have any output left
- if not loggers:
- break
-
- # sort into timestamp order
- warnings.sort()
- return warnings
-
-
- def _render_record(self, status_code, subdir, operation, status='',
- epoch_time=None, record_prefix=None,
- optional_fields=None):
- """
- Internal Function to generate a record to be written into a
- status log. For use by server_job.* classes only.
- """
- if subdir:
- if re.match(r'[\n\t]', subdir):
- raise ValueError(
- 'Invalid character in subdir string')
- substr = subdir
- else:
- substr = '----'
-
- if not logging.is_valid_status(status_code):
- raise ValueError('Invalid status code supplied: %s' %
- status_code)
- if not operation:
- operation = '----'
- if re.match(r'[\n\t]', operation):
- raise ValueError(
- 'Invalid character in operation string')
- operation = operation.rstrip()
- status = status.rstrip()
- status = re.sub(r"\t", " ", status)
- # Ensure any continuation lines are marked so we can
- # detect them in the status file to ensure it is parsable.
- status = re.sub(r"\n", "\n" + self.record_prefix + " ", status)
-
- if not optional_fields:
- optional_fields = {}
-
- # Generate timestamps for inclusion in the logs
- if epoch_time is None:
- epoch_time = int(time.time())
- local_time = time.localtime(epoch_time)
- optional_fields["timestamp"] = str(epoch_time)
- optional_fields["localtime"] = time.strftime("%b %d %H:%M:%S",
- local_time)
-
- fields = [status_code, substr, operation]
- fields += ["%s=%s" % x for x in optional_fields.iteritems()]
- fields.append(status)
-
- if record_prefix is None:
- record_prefix = self.record_prefix
-
- msg = '\t'.join(str(x) for x in fields)
-
- return record_prefix + msg + '\n'
-
-
- def _record_prerendered(self, msg):
- """
- Record a pre-rendered msg into the status logs. The only
- change this makes to the message is to add on the local
- indentation. Should not be called outside of server_job.*
- classes. Unlike __record, this does not write the message
- to standard output.
- """
- lines = []
- status_file = os.path.join(self.resultdir, 'status.log')
- status_log = open(status_file, 'a')
- for line in msg.splitlines():
- line = self.record_prefix + line + '\n'
- lines.append(line)
- status_log.write(line)
- status_log.close()
- self.__parse_status(lines)
-
-
- def __record(self, status_code, subdir, operation, status='',
- epoch_time=None, optional_fields=None):
- """
- Actual function for recording a single line into the status
- logs. Should never be called directly, only by job.record as
- this would bypass the console monitor logging.
- """
-
- msg = self._render_record(status_code, subdir, operation,
- status, epoch_time,
- optional_fields=optional_fields)
-
-
- status_file = os.path.join(self.resultdir, 'status.log')
- sys.stdout.write(msg)
- open(status_file, "a").write(msg)
- if subdir:
- test_dir = os.path.join(self.resultdir, subdir)
- status_file = os.path.join(test_dir, 'status')
- open(status_file, "a").write(msg)
- self.__parse_status(msg.splitlines())
-
-
- def __parse_status(self, new_lines):
- if not self.using_parser:
- return
- new_tests = self.parser.process_lines(new_lines)
- for test in new_tests:
- self.__insert_test(test)
-
-
- def __insert_test(self, test):
- """ An internal method to insert a new test result into the
- database. This method will not raise an exception, even if an
- error occurs during the insert, to avoid failing a test
- simply because of unexpected database issues."""
- try:
- self.results_db.insert_test(self.job_model, test)
- except Exception:
- msg = ("WARNING: An unexpected error occured while "
- "inserting test results into the database. "
- "Ignoring error.\n" + traceback.format_exc())
- print >> sys.stderr, msg
+ """The actual job against which we do everything.
+
+ Properties:
+ autodir
+ The top level autotest directory (/usr/local/autotest).
+ serverdir
+ <autodir>/server/
+ clientdir
+ <autodir>/client/
+ conmuxdir
+ <autodir>/conmux/
+ testdir
+ <autodir>/server/tests/
+ control
+ the control file for this job
+ """
+
+ STATUS_VERSION = 1
+
+
+ def __init__(self, control, args, resultdir, label, user, machines,
+ client=False, parse_job="",
+ ssh_user='root', ssh_port=22, ssh_pass=''):
+ """
+ control
+ The control file (pathname of)
+ args
+ args to pass to the control file
+ resultdir
+ where to throw the results
+ label
+ label for the job
+ user
+ Username for the job (email address)
+ client
+ True if a client-side control file
+ """
+ path = os.path.dirname(__file__)
+ self.autodir = os.path.abspath(os.path.join(path, '..'))
+ self.serverdir = os.path.join(self.autodir, 'server')
+ self.testdir = os.path.join(self.serverdir, 'tests')
+ self.tmpdir = os.path.join(self.serverdir, 'tmp')
+ self.conmuxdir = os.path.join(self.autodir, 'conmux')
+ self.clientdir = os.path.join(self.autodir, 'client')
+ self.toolsdir = os.path.join(self.autodir, 'client/tools')
+ if control:
+ self.control = open(control, 'r').read()
+ self.control = re.sub('\r', '', self.control)
+ else:
+ self.control = None
+ self.resultdir = resultdir
+ if not os.path.exists(resultdir):
+ os.mkdir(resultdir)
+ self.debugdir = os.path.join(resultdir, 'debug')
+ if not os.path.exists(self.debugdir):
+ os.mkdir(self.debugdir)
+ self.status = os.path.join(resultdir, 'status')
+ self.label = label
+ self.user = user
+ self.args = args
+ self.machines = machines
+ self.client = client
+ self.record_prefix = ''
+ self.warning_loggers = set()
+ self.ssh_user = ssh_user
+ self.ssh_port = ssh_port
+ self.ssh_pass = ssh_pass
+
+ self.stdout = fd_stack.fd_stack(1, sys.stdout)
+ self.stderr = fd_stack.fd_stack(2, sys.stderr)
+
+ if os.path.exists(self.status):
+ os.unlink(self.status)
+ job_data = {'label' : label, 'user' : user,
+ 'hostname' : ','.join(machines),
+ 'status_version' : str(self.STATUS_VERSION)}
+ job_data.update(get_site_job_data(self))
+ utils.write_keyval(self.resultdir, job_data)
+
+ self.parse_job = parse_job
+ if self.parse_job and len(machines) == 1:
+ self.using_parser = True
+ self.init_parser(resultdir)
+ else:
+ self.using_parser = False
+
+
+ def init_parser(self, resultdir):
+ """Start the continuous parsing of resultdir. This sets up
+ the database connection and inserts the basic job object into
+ the database if necessary."""
+ # redirect parser debugging to .parse.log
+ parse_log = os.path.join(resultdir, '.parse.log')
+ parse_log = open(parse_log, 'w', 0)
+ tko_utils.redirect_parser_debugging(parse_log)
+ # create a job model object and set up the db
+ self.results_db = tko_db.db(autocommit=True)
+ self.parser = status_lib.parser(self.STATUS_VERSION)
+ self.job_model = self.parser.make_job(resultdir)
+ self.parser.start(self.job_model)
+ # check if a job already exists in the db and insert it if
+ # it does not
+ job_idx = self.results_db.find_job(self.parse_job)
+ if job_idx is None:
+ self.results_db.insert_job(self.parse_job,
+ self.job_model)
+ else:
+ machine_idx = self.results_db.lookup_machine(
+ self.job_model.machine)
+ self.job_model.index = job_idx
+ self.job_model.machine_idx = machine_idx
+
+
+ def cleanup_parser(self):
+ """This should be called after the server job is finished
+ to carry out any remaining cleanup (e.g. flushing any
+ remaining test results to the results db)"""
+ if not self.using_parser:
+ return
+ final_tests = self.parser.end()
+ for test in final_tests:
+ self.__insert_test(test)
+ self.using_parser = False
+
+
+ def verify(self):
+ if not self.machines:
+ raise error.AutoservError(
+ 'No machines specified to verify')
+ try:
+ namespace = {'machines' : self.machines, 'job' : self, \
+ 'ssh_user' : self.ssh_user, \
+ 'ssh_port' : self.ssh_port, \
+ 'ssh_pass' : self.ssh_pass}
+ exec(preamble + verify, namespace, namespace)
+ except Exception, e:
+ msg = ('Verify failed\n' + str(e) + '\n'
+ + traceback.format_exc())
+ self.record('ABORT', None, None, msg)
+ raise
+
+
+ def repair(self):
+ if not self.machines:
+ raise error.AutoservError(
+ 'No machines specified to repair')
+ namespace = {'machines' : self.machines, 'job' : self, \
+ 'ssh_user' : self.ssh_user, \
+ 'ssh_port' : self.ssh_port, \
+ 'ssh_pass' : self.ssh_pass}
+ # no matter what happens during repair, go on to try to reverify
+ try:
+ exec(preamble + repair, namespace, namespace)
+ except Exception, exc:
+ print 'Exception occured during repair'
+ traceback.print_exc()
+ self.verify()
+
+
+ def enable_external_logging(self):
+ """Start or restart external logging mechanism.
+ """
+ pass
+
+
+ def disable_external_logging(self):
+ """ Pause or stop external logging mechanism.
+ """
+ pass
+
+
+ def use_external_logging(self):
+ """Return True if external logging should be used.
+ """
+ return False
+
+
+ def parallel_simple(self, function, machines, log=True, timeout=None):
+ """Run 'function' using parallel_simple, with an extra
+ wrapper to handle the necessary setup for continuous parsing,
+ if possible. If continuous parsing is already properly
+ initialized then this should just work."""
+ is_forking = not (len(machines) == 1 and
+ self.machines == machines)
+ if self.parse_job and is_forking:
+ def wrapper(machine):
+ self.parse_job += "/" + machine
+ self.using_parser = True
+ self.machines = [machine]
+ self.resultdir = os.path.join(self.resultdir,
+ machine)
+ self.init_parser(self.resultdir)
+ result = function(machine)
+ self.cleanup_parser()
+ return result
+ else:
+ wrapper = function
+ subcommand.parallel_simple(wrapper, machines, log, timeout)
+
+
+ def run(self, reboot = False, install_before = False,
+ install_after = False, collect_crashdumps = True,
+ namespace = {}):
+ # use a copy so changes don't affect the original dictionary
+ namespace = namespace.copy()
+ machines = self.machines
+
+ self.aborted = False
+ namespace['machines'] = machines
+ namespace['args'] = self.args
+ namespace['job'] = self
+ namespace['ssh_user'] = self.ssh_user
+ namespace['ssh_port'] = self.ssh_port
+ namespace['ssh_pass'] = self.ssh_pass
+ test_start_time = int(time.time())
+
+ os.chdir(self.resultdir)
+
+ self.enable_external_logging()
+ status_log = os.path.join(self.resultdir, 'status.log')
+ try:
+ if install_before and machines:
+ exec(preamble + install, namespace, namespace)
+ if self.client:
+ namespace['control'] = self.control
+ open('control', 'w').write(self.control)
+ open('control.srv', 'w').write(client_wrapper)
+ server_control = client_wrapper
+ else:
+ open('control.srv', 'w').write(self.control)
+ server_control = self.control
+ exec(preamble + server_control, namespace, namespace)
+
+ finally:
+ if machines and collect_crashdumps:
+ namespace['test_start_time'] = test_start_time
+ exec(preamble + crashdumps,
+ namespace, namespace)
+ self.disable_external_logging()
+ if reboot and machines:
+ exec(preamble + reboot_segment,
+ namespace, namespace)
+ if install_after and machines:
+ exec(preamble + install, namespace, namespace)
+
+
+ def run_test(self, url, *args, **dargs):
+ """Summon a test object and run it.
+
+ tag
+ tag to add to testname
+ url
+ url of the test to run
+ """
+
+ (group, testname) = test.testname(url)
+ tag = None
+ subdir = testname
+
+ if dargs.has_key('tag'):
+ tag = dargs['tag']
+ del dargs['tag']
+ if tag:
+ subdir += '.' + tag
+
+ outputdir = os.path.join(self.resultdir, subdir)
+ if os.path.exists(outputdir):
+ msg = ("%s already exists, test <%s> may have"
+ " already run with tag <%s>"
+ % (outputdir, testname, tag) )
+ raise error.TestError(msg)
+ os.mkdir(outputdir)
+
+ try:
+ test.runtest(self, url, tag, args, dargs)
+ self.record('GOOD', subdir, testname, 'completed successfully')
+ except error.TestNAError, detail:
+ self.record('TEST_NA', subdir, testname, str(detail))
+ except Exception, detail:
+ info = str(detail) + "\n" + traceback.format_exc()
+ self.record('FAIL', subdir, testname, info)
+
+
+ def run_group(self, function, *args, **dargs):
+ """\
+ function:
+ subroutine to run
+ *args:
+ arguments for the function
+ """
+
+ result = None
+ name = function.__name__
+
+ # Allow the tag for the group to be specified.
+ if dargs.has_key('tag'):
+ tag = dargs['tag']
+ del dargs['tag']
+ if tag:
+ name = tag
+
+ old_record_prefix = self.record_prefix
+ try:
+ try:
+ self.record('START', None, name)
+ self.record_prefix += '\t'
+ result = function(*args, **dargs)
+ except Exception, e:
+ self.record_prefix = old_record_prefix
+ err_msg = str(e) + '\n'
+ err_msg += traceback.format_exc()
+ self.record('END FAIL', None, name, err_msg)
+ else:
+ self.record_prefix = old_record_prefix
+ self.record('END GOOD', None, name)
+
+ # We don't want to raise up an error higher if it's just
+ # a TestError - we want to carry on to other tests. Hence
+ # this outer try/except block.
+ except error.TestError:
+ pass
+ except:
+ raise error.TestError(name + ' failed\n' +
+ traceback.format_exc())
+
+ return result
+
+
+ def run_reboot(self, reboot_func, get_kernel_func):
+ """\
+ A specialization of run_group meant specifically for handling
+ a reboot. Includes support for capturing the kernel version
+ after the reboot.
+
+ reboot_func: a function that carries out the reboot
+
+ get_kernel_func: a function that returns a string
+ representing the kernel version.
+ """
+
+ old_record_prefix = self.record_prefix
+ try:
+ self.record('START', None, 'reboot')
+ self.record_prefix += '\t'
+ reboot_func()
+ except Exception, e:
+ self.record_prefix = old_record_prefix
+ err_msg = str(e) + '\n' + traceback.format_exc()
+ self.record('END FAIL', None, 'reboot', err_msg)
+ else:
+ kernel = get_kernel_func()
+ self.record_prefix = old_record_prefix
+ self.record('END GOOD', None, 'reboot',
+ optional_fields={"kernel": kernel})
+
+
+ def record(self, status_code, subdir, operation, status='',
+ optional_fields=None):
+ """
+ Record job-level status
+
+ The intent is to make this file both machine parseable and
+ human readable. That involves a little more complexity, but
+ really isn't all that bad ;-)
+
+ Format is <status code>\t<subdir>\t<operation>\t<status>
+
+ status code: see common_lib.logging.is_valid_status()
+ for valid status definition
+
+ subdir: MUST be a relevant subdirectory in the results,
+ or None, which will be represented as '----'
+
+ operation: description of what you ran (e.g. "dbench", or
+ "mkfs -t foobar /dev/sda9")
+
+ status: error message or "completed sucessfully"
+
+ ------------------------------------------------------------
+
+ Initial tabs indicate indent levels for grouping, and is
+ governed by self.record_prefix
+
+ multiline messages have secondary lines prefaced by a double
+ space (' ')
+
+ Executing this method will trigger the logging of all new
+ warnings to date from the various console loggers.
+ """
+ # poll all our warning loggers for new warnings
+ warnings = self._read_warnings()
+ for timestamp, msg in warnings:
+ self.__record("WARN", None, None, msg, timestamp)
+
+ # write out the actual status log line
+ self.__record(status_code, subdir, operation, status,
+ optional_fields=optional_fields)
+
+
+ def _read_warnings(self):
+ warnings = []
+ while True:
+ # pull in a line of output from every logger that has
+ # output ready to be read
+ loggers, _, _ = select.select(self.warning_loggers,
+ [], [], 0)
+ closed_loggers = set()
+ for logger in loggers:
+ line = logger.readline()
+ # record any broken pipes (aka line == empty)
+ if len(line) == 0:
+ closed_loggers.add(logger)
+ continue
+ timestamp, msg = line.split('\t', 1)
+ warnings.append((int(timestamp), msg.strip()))
+
+ # stop listening to loggers that are closed
+ self.warning_loggers -= closed_loggers
+
+ # stop if none of the loggers have any output left
+ if not loggers:
+ break
+
+ # sort into timestamp order
+ warnings.sort()
+ return warnings
+
+
+ def _render_record(self, status_code, subdir, operation, status='',
+ epoch_time=None, record_prefix=None,
+ optional_fields=None):
+ """
+ Internal Function to generate a record to be written into a
+ status log. For use by server_job.* classes only.
+ """
+ if subdir:
+ if re.match(r'[\n\t]', subdir):
+ raise ValueError(
+ 'Invalid character in subdir string')
+ substr = subdir
+ else:
+ substr = '----'
+
+ if not logging.is_valid_status(status_code):
+ raise ValueError('Invalid status code supplied: %s' %
+ status_code)
+ if not operation:
+ operation = '----'
+ if re.match(r'[\n\t]', operation):
+ raise ValueError(
+ 'Invalid character in operation string')
+ operation = operation.rstrip()
+ status = status.rstrip()
+ status = re.sub(r"\t", " ", status)
+ # Ensure any continuation lines are marked so we can
+ # detect them in the status file to ensure it is parsable.
+ status = re.sub(r"\n", "\n" + self.record_prefix + " ", status)
+
+ if not optional_fields:
+ optional_fields = {}
+
+ # Generate timestamps for inclusion in the logs
+ if epoch_time is None:
+ epoch_time = int(time.time())
+ local_time = time.localtime(epoch_time)
+ optional_fields["timestamp"] = str(epoch_time)
+ optional_fields["localtime"] = time.strftime("%b %d %H:%M:%S",
+ local_time)
+
+ fields = [status_code, substr, operation]
+ fields += ["%s=%s" % x for x in optional_fields.iteritems()]
+ fields.append(status)
+
+ if record_prefix is None:
+ record_prefix = self.record_prefix
+
+ msg = '\t'.join(str(x) for x in fields)
+
+ return record_prefix + msg + '\n'
+
+
+ def _record_prerendered(self, msg):
+ """
+ Record a pre-rendered msg into the status logs. The only
+ change this makes to the message is to add on the local
+ indentation. Should not be called outside of server_job.*
+ classes. Unlike __record, this does not write the message
+ to standard output.
+ """
+ lines = []
+ status_file = os.path.join(self.resultdir, 'status.log')
+ status_log = open(status_file, 'a')
+ for line in msg.splitlines():
+ line = self.record_prefix + line + '\n'
+ lines.append(line)
+ status_log.write(line)
+ status_log.close()
+ self.__parse_status(lines)
+
+
+ def __record(self, status_code, subdir, operation, status='',
+ epoch_time=None, optional_fields=None):
+ """
+ Actual function for recording a single line into the status
+ logs. Should never be called directly, only by job.record as
+ this would bypass the console monitor logging.
+ """
+
+ msg = self._render_record(status_code, subdir, operation,
+ status, epoch_time,
+ optional_fields=optional_fields)
+
+
+ status_file = os.path.join(self.resultdir, 'status.log')
+ sys.stdout.write(msg)
+ open(status_file, "a").write(msg)
+ if subdir:
+ test_dir = os.path.join(self.resultdir, subdir)
+ status_file = os.path.join(test_dir, 'status')
+ open(status_file, "a").write(msg)
+ self.__parse_status(msg.splitlines())
+
+
+ def __parse_status(self, new_lines):
+ if not self.using_parser:
+ return
+ new_tests = self.parser.process_lines(new_lines)
+ for test in new_tests:
+ self.__insert_test(test)
+
+
+ def __insert_test(self, test):
+ """ An internal method to insert a new test result into the
+ database. This method will not raise an exception, even if an
+ error occurs during the insert, to avoid failing a test
+ simply because of unexpected database issues."""
+ try:
+ self.results_db.insert_test(self.job_model, test)
+ except Exception:
+ msg = ("WARNING: An unexpected error occured while "
+ "inserting test results into the database. "
+ "Ignoring error.\n" + traceback.format_exc())
+ print >> sys.stderr, msg
# a file-like object for catching stderr from an autotest client and
# extracting status logs from it
class client_logger(object):
- """Partial file object to write to both stdout and
- the status log file. We only implement those methods
- utils.run() actually calls.
- """
- parser = re.compile(r"^AUTOTEST_STATUS:([^:]*):(.*)$")
- extract_indent = re.compile(r"^(\t*).*$")
-
- def __init__(self, job):
- self.job = job
- self.leftover = ""
- self.last_line = ""
- self.logs = {}
-
-
- def _process_log_dict(self, log_dict):
- log_list = log_dict.pop("logs", [])
- for key in sorted(log_dict.iterkeys()):
- log_list += self._process_log_dict(log_dict.pop(key))
- return log_list
-
-
- def _process_logs(self):
- """Go through the accumulated logs in self.log and print them
- out to stdout and the status log. Note that this processes
- logs in an ordering where:
-
- 1) logs to different tags are never interleaved
- 2) logs to x.y come before logs to x.y.z for all z
- 3) logs to x.y come before x.z whenever y < z
-
- Note that this will in general not be the same as the
- chronological ordering of the logs. However, if a chronological
- ordering is desired that one can be reconstructed from the
- status log by looking at timestamp lines."""
- log_list = self._process_log_dict(self.logs)
- for line in log_list:
- self.job._record_prerendered(line + '\n')
- if log_list:
- self.last_line = log_list[-1]
-
-
- def _process_quoted_line(self, tag, line):
- """Process a line quoted with an AUTOTEST_STATUS flag. If the
- tag is blank then we want to push out all the data we've been
- building up in self.logs, and then the newest line. If the
- tag is not blank, then push the line into the logs for handling
- later."""
- print line
- if tag == "":
- self._process_logs()
- self.job._record_prerendered(line + '\n')
- self.last_line = line
- else:
- tag_parts = [int(x) for x in tag.split(".")]
- log_dict = self.logs
- for part in tag_parts:
- log_dict = log_dict.setdefault(part, {})
- log_list = log_dict.setdefault("logs", [])
- log_list.append(line)
-
-
- def _process_line(self, line):
- """Write out a line of data to the appropriate stream. Status
- lines sent by autotest will be prepended with
- "AUTOTEST_STATUS", and all other lines are ssh error
- messages."""
- match = self.parser.search(line)
- if match:
- tag, line = match.groups()
- self._process_quoted_line(tag, line)
- else:
- print line
-
-
- def _format_warnings(self, last_line, warnings):
- # use the indentation of whatever the last log line was
- indent = self.extract_indent.match(last_line).group(1)
- # if the last line starts a new group, add an extra indent
- if last_line.lstrip('\t').startswith("START\t"):
- indent += '\t'
- return [self.job._render_record("WARN", None, None, msg,
- timestamp, indent).rstrip('\n')
- for timestamp, msg in warnings]
-
-
- def _process_warnings(self, last_line, log_dict, warnings):
- if log_dict.keys() in ([], ["logs"]):
- # there are no sub-jobs, just append the warnings here
- warnings = self._format_warnings(last_line, warnings)
- log_list = log_dict.setdefault("logs", [])
- log_list += warnings
- for warning in warnings:
- sys.stdout.write(warning + '\n')
- else:
- # there are sub-jobs, so put the warnings in there
- log_list = log_dict.get("logs", [])
- if log_list:
- last_line = log_list[-1]
- for key in sorted(log_dict.iterkeys()):
- if key != "logs":
- self._process_warnings(last_line,
- log_dict[key],
- warnings)
-
-
- def write(self, data):
- # first check for any new console warnings
- warnings = self.job._read_warnings()
- self._process_warnings(self.last_line, self.logs, warnings)
- # now process the newest data written out
- data = self.leftover + data
- lines = data.split("\n")
- # process every line but the last one
- for line in lines[:-1]:
- self._process_line(line)
- # save the last line for later processing
- # since we may not have the whole line yet
- self.leftover = lines[-1]
-
-
- def flush(self):
- sys.stdout.flush()
-
-
- def close(self):
- if self.leftover:
- self._process_line(self.leftover)
- self._process_logs()
- self.flush()
+ """Partial file object to write to both stdout and
+ the status log file. We only implement those methods
+ utils.run() actually calls.
+ """
+ parser = re.compile(r"^AUTOTEST_STATUS:([^:]*):(.*)$")
+ extract_indent = re.compile(r"^(\t*).*$")
+
+ def __init__(self, job):
+ self.job = job
+ self.leftover = ""
+ self.last_line = ""
+ self.logs = {}
+
+
+ def _process_log_dict(self, log_dict):
+ log_list = log_dict.pop("logs", [])
+ for key in sorted(log_dict.iterkeys()):
+ log_list += self._process_log_dict(log_dict.pop(key))
+ return log_list
+
+
+ def _process_logs(self):
+ """Go through the accumulated logs in self.log and print them
+ out to stdout and the status log. Note that this processes
+ logs in an ordering where:
+
+ 1) logs to different tags are never interleaved
+ 2) logs to x.y come before logs to x.y.z for all z
+ 3) logs to x.y come before x.z whenever y < z
+
+ Note that this will in general not be the same as the
+ chronological ordering of the logs. However, if a chronological
+ ordering is desired that one can be reconstructed from the
+ status log by looking at timestamp lines."""
+ log_list = self._process_log_dict(self.logs)
+ for line in log_list:
+ self.job._record_prerendered(line + '\n')
+ if log_list:
+ self.last_line = log_list[-1]
+
+
+ def _process_quoted_line(self, tag, line):
+ """Process a line quoted with an AUTOTEST_STATUS flag. If the
+ tag is blank then we want to push out all the data we've been
+ building up in self.logs, and then the newest line. If the
+ tag is not blank, then push the line into the logs for handling
+ later."""
+ print line
+ if tag == "":
+ self._process_logs()
+ self.job._record_prerendered(line + '\n')
+ self.last_line = line
+ else:
+ tag_parts = [int(x) for x in tag.split(".")]
+ log_dict = self.logs
+ for part in tag_parts:
+ log_dict = log_dict.setdefault(part, {})
+ log_list = log_dict.setdefault("logs", [])
+ log_list.append(line)
+
+
+ def _process_line(self, line):
+ """Write out a line of data to the appropriate stream. Status
+ lines sent by autotest will be prepended with
+ "AUTOTEST_STATUS", and all other lines are ssh error
+ messages."""
+ match = self.parser.search(line)
+ if match:
+ tag, line = match.groups()
+ self._process_quoted_line(tag, line)
+ else:
+ print line
+
+
+ def _format_warnings(self, last_line, warnings):
+ # use the indentation of whatever the last log line was
+ indent = self.extract_indent.match(last_line).group(1)
+ # if the last line starts a new group, add an extra indent
+ if last_line.lstrip('\t').startswith("START\t"):
+ indent += '\t'
+ return [self.job._render_record("WARN", None, None, msg,
+ timestamp, indent).rstrip('\n')
+ for timestamp, msg in warnings]
+
+
+ def _process_warnings(self, last_line, log_dict, warnings):
+ if log_dict.keys() in ([], ["logs"]):
+ # there are no sub-jobs, just append the warnings here
+ warnings = self._format_warnings(last_line, warnings)
+ log_list = log_dict.setdefault("logs", [])
+ log_list += warnings
+ for warning in warnings:
+ sys.stdout.write(warning + '\n')
+ else:
+ # there are sub-jobs, so put the warnings in there
+ log_list = log_dict.get("logs", [])
+ if log_list:
+ last_line = log_list[-1]
+ for key in sorted(log_dict.iterkeys()):
+ if key != "logs":
+ self._process_warnings(last_line,
+ log_dict[key],
+ warnings)
+
+
+ def write(self, data):
+ # first check for any new console warnings
+ warnings = self.job._read_warnings()
+ self._process_warnings(self.last_line, self.logs, warnings)
+ # now process the newest data written out
+ data = self.leftover + data
+ lines = data.split("\n")
+ # process every line but the last one
+ for line in lines[:-1]:
+ self._process_line(line)
+ # save the last line for later processing
+ # since we may not have the whole line yet
+ self.leftover = lines[-1]
+
+
+ def flush(self):
+ sys.stdout.flush()
+
+
+ def close(self):
+ if self.leftover:
+ self._process_line(self.leftover)
+ self._process_logs()
+ self.flush()
# site_server_job.py may be non-existant or empty, make sure that an
# appropriate site_server_job class is created nevertheless
try:
- from autotest_lib.server.site_server_job import site_server_job
+ from autotest_lib.server.site_server_job import site_server_job
except ImportError:
- class site_server_job(base_server_job):
- pass
-
+ class site_server_job(base_server_job):
+ pass
+
class server_job(site_server_job):
- pass
+ pass
diff --git a/server/site_autoserv_parser.py b/server/site_autoserv_parser.py
index f8703f7c..03b8c4e5 100644
--- a/server/site_autoserv_parser.py
+++ b/server/site_autoserv_parser.py
@@ -9,6 +9,6 @@ add_usage = """\
class site_autoserv_parser(base_autoserv_parser):
- def get_usage(self):
- usage = super(site_autoserv_parser, self).get_usage()
- return usage+add_usage
+ def get_usage(self):
+ usage = super(site_autoserv_parser, self).get_usage()
+ return usage+add_usage
diff --git a/server/source_kernel.py b/server/source_kernel.py
index 8d27d112..ae7f032d 100644
--- a/server/source_kernel.py
+++ b/server/source_kernel.py
@@ -5,7 +5,7 @@
"""
This module defines the SourceKernel class
- SourceKernel: an linux kernel built from source
+ SourceKernel: an linux kernel built from source
"""
__author__ = """
@@ -21,60 +21,60 @@ import autotest
class SourceKernel(kernel.Kernel):
- """
- This class represents a linux kernel built from source.
-
- It is used to obtain a built kernel or create one from source and
- install it on a Host.
-
- Implementation details:
- This is a leaf class in an abstract class hierarchy, it must
- implement the unimplemented methods in parent classes.
- """
- def __init__(self, k):
- super(kernel.Kernel, self).__init__()
- self.__kernel = k
- self.__patch_list = []
- self.__config_file = None
- self.__autotest = autotest.Autotest()
+ """
+ This class represents a linux kernel built from source.
+ It is used to obtain a built kernel or create one from source and
+ install it on a Host.
- def configure(self, configFile):
- self.__config_file = configFile
+ Implementation details:
+ This is a leaf class in an abstract class hierarchy, it must
+ implement the unimplemented methods in parent classes.
+ """
+ def __init__(self, k):
+ super(kernel.Kernel, self).__init__()
+ self.__kernel = k
+ self.__patch_list = []
+ self.__config_file = None
+ self.__autotest = autotest.Autotest()
- def patch(self, patchFile):
- self.__patch_list.append(patchFile)
+ def configure(self, configFile):
+ self.__config_file = configFile
- def build(self, host):
- ctlfile = self.__control_file(self.__kernel, self.__patch_list,
- self.__config_file)
- self.__autotest.run(ctlfile, host.get_tmp_dir(), host)
+ def patch(self, patchFile):
+ self.__patch_list.append(patchFile)
- def install(self, host):
- self.__autotest.install(host)
- ctlfile = ("testkernel = job.kernel('%s')\n"
- "testkernel.install()\n"
- "testkernel.add_to_bootloader()\n" %(self.__kernel))
- self.__autotest.run(ctlfile, host.get_tmp_dir(), host)
-
+ def build(self, host):
+ ctlfile = self.__control_file(self.__kernel, self.__patch_list,
+ self.__config_file)
+ self.__autotest.run(ctlfile, host.get_tmp_dir(), host)
- def __control_file(self, kernel, patch_list, config):
- ctl = ("testkernel = job.kernel('%s')\n" % kernel)
- if len(patch_list):
- patches = ', '.join(["'%s'" % x for x in patch_list])
- ctl += "testkernel.patch(%s)\n" % patches
+ def install(self, host):
+ self.__autotest.install(host)
+ ctlfile = ("testkernel = job.kernel('%s')\n"
+ "testkernel.install()\n"
+ "testkernel.add_to_bootloader()\n" %(self.__kernel))
+ self.__autotest.run(ctlfile, host.get_tmp_dir(), host)
- if config:
- ctl += "testkernel.config('%s')\n" % config
- else:
- ctl += "testkernel.config('', None, True)\n"
- ctl += "testkernel.build()\n"
+ def __control_file(self, kernel, patch_list, config):
+ ctl = ("testkernel = job.kernel('%s')\n" % kernel)
- # copy back to server
+ if len(patch_list):
+ patches = ', '.join(["'%s'" % x for x in patch_list])
+ ctl += "testkernel.patch(%s)\n" % patches
- return ctl
+ if config:
+ ctl += "testkernel.config('%s')\n" % config
+ else:
+ ctl += "testkernel.config('', None, True)\n"
+
+ ctl += "testkernel.build()\n"
+
+ # copy back to server
+
+ return ctl
diff --git a/server/standalone_profiler.py b/server/standalone_profiler.py
index 81bf7698..f681ff40 100644
--- a/server/standalone_profiler.py
+++ b/server/standalone_profiler.py
@@ -15,35 +15,35 @@ from autotest_lib.client.common_lib import barrier
def generate_test(machines, hostname, profilers, timeout_start, timeout_stop,
- timeout_sync=180):
- control_file = []
- for profiler in profilers:
- control_file.append("job.profilers.add(%s)"
- % str(profiler)[1:-1]) # Remove parens
+ timeout_sync=180):
+ control_file = []
+ for profiler in profilers:
+ control_file.append("job.profilers.add(%s)"
+ % str(profiler)[1:-1]) # Remove parens
- control_file.append("job.run_test('barriertest',%d,%d,%d,'%s','%s',%s)"
- % (timeout_sync, timeout_start, timeout_stop,
- hostname, "PROF_MASTER", str(machines)))
+ control_file.append("job.run_test('barriertest',%d,%d,%d,'%s','%s',%s)"
+ % (timeout_sync, timeout_start, timeout_stop,
+ hostname, "PROF_MASTER", str(machines)))
- for profiler in profilers:
- control_file.append("job.profilers.delete('%s')" % profiler[0])
+ for profiler in profilers:
+ control_file.append("job.profilers.delete('%s')" % profiler[0])
- return "\n".join(control_file)
+ return "\n".join(control_file)
def wait_for_profilers(machines, timeout = 300):
- sb = barrier.barrier("PROF_MASTER", "sync_profilers",
- timeout, port=63100)
- sb.rendevous_servers("PROF_MASTER", *machines)
+ sb = barrier.barrier("PROF_MASTER", "sync_profilers",
+ timeout, port=63100)
+ sb.rendevous_servers("PROF_MASTER", *machines)
def start_profilers(machines, timeout = 120):
- sb = barrier.barrier("PROF_MASTER", "start_profilers",
- timeout, port=63100)
- sb.rendevous_servers("PROF_MASTER", *machines)
+ sb = barrier.barrier("PROF_MASTER", "start_profilers",
+ timeout, port=63100)
+ sb.rendevous_servers("PROF_MASTER", *machines)
def stop_profilers(machines, timeout = 120):
- sb = barrier.barrier("PROF_MASTER", "stop_profilers",
- timeout, port=63100)
- sb.rendevous_servers("PROF_MASTER", *machines)
+ sb = barrier.barrier("PROF_MASTER", "stop_profilers",
+ timeout, port=63100)
+ sb.rendevous_servers("PROF_MASTER", *machines)
diff --git a/server/status.py b/server/status.py
index 382626af..b0f44083 100644
--- a/server/status.py
+++ b/server/status.py
@@ -3,194 +3,194 @@ import sys, re, os, itertools
class Machine:
- """
- Represents the current state of a machine. Possible values are:
- TESTING currently running a test
- REBOOTING currently rebooting
- BROKEN busted somehow (e.g. reboot timed out)
- OTHER none of the above
-
- The implementation is basically that of a state machine. From an
- external point of view the only relevant attributes are:
- details text description of the current status
- test_count number of tests run
- """
- def __init__(self):
- self.state = "OTHER"
- self.details = "Running"
- self.test_name = ""
- self.test_count = 0
-
-
- def process_line(self, line):
- self.handlers[self.state](self, line)
-
-
- def _OTHER_handler(self, line):
- match = self.job_start.match(line)
- if match and match.group(2) != "----":
- self.state = "TESTING"
- self.tab_level = len(match.group(1))
- self.test_name = match.group(2)
- self.test_status = "GOOD"
- self.details = "Running %s" % self.test_name
- return
-
- match = self.reboot_start.match(line)
- if match:
- self.boot_status = match.group(1)
- if self.worse_status("GOOD", self.boot_status) == "GOOD":
- self.state = "REBOOTING"
- self.details = "Rebooting"
- else:
- self.state = "BROKEN"
- self.details = "Reboot failed - machine broken"
- return
-
-
- def _TESTING_handler(self, line):
- match = self.job_status.match(line)
- if match:
- if len(match.group(1)) != self.tab_level + 1:
- return # we don't care about subgroups
- if self.test_name != match.group(3):
- return # we don't care about other tests
- self.test_status = self.worse_status(self.test_status,
- match.group(2))
- self.details = "Running %s: %s" % (self.test_name,
- match.group(4))
- return
-
- match = self.job_end.match(line)
- if match:
- if len(match.group(1)) != self.tab_level:
- return # we don't care about subgroups
- if self.test_name != match.group(3):
- raise ValueError('Group START and END name mismatch')
- self.state = "OTHER"
- self.test_status = self.worse_status(self.test_status,
- match.group(2))
- self.test_name = ""
- del self.test_status
- self.details = "Running"
- self.test_count += 1
- return
-
-
- def _REBOOTING_handler(self, line):
- match = self.reboot_done.match(line)
- if match:
- status = self.worse_status(self.boot_status,
- match.group(1))
- del self.boot_status
- if status == "GOOD":
- self.state = "OTHER"
- self.details = "Running"
- else:
- self.state = "BROKEN"
- self.details = "Reboot failed - machine broken"
- return
-
-
- def _BROKEN_handler(self, line):
- pass # just do nothing - we're broken and staying broken
-
-
- handlers = {"OTHER": _OTHER_handler,
- "TESTING": _TESTING_handler,
- "REBOOTING": _REBOOTING_handler,
- "BROKEN": _BROKEN_handler}
-
-
- status_list = ["GOOD", "WARN", "FAIL", "ABORT", "ERROR"]
- order_dict = {None: -1}
- order_dict.update((status, i)
- for i, status in enumerate(status_list))
-
-
- job_start = re.compile(r"^(\t*)START\t----\t([^\t]+).*$")
- job_status = re.compile(r"^(\t*)(%s)\t([^\t]+)\t(?:[^\t]+).*\t([^\t]+)$" %
- "|".join(status_list))
- job_end = re.compile(r"^(\t*)END (%s)\t----\t([^\t]+).*$" %
- "|".join(status_list))
- reboot_start = re.compile(r"^\t?(%s)\t[^\t]+\treboot\.start.*$" %
- "|".join(status_list))
- reboot_done = re.compile(r"^\t?(%s)\t[^\t]+\treboot\.verify.*$" %
- "|".join(status_list))
-
- @classmethod
- def worse_status(cls, old_status, new_status):
- if cls.order_dict[new_status] > cls.order_dict[old_status]:
- return new_status
- else:
- return old_status
+ """
+ Represents the current state of a machine. Possible values are:
+ TESTING currently running a test
+ REBOOTING currently rebooting
+ BROKEN busted somehow (e.g. reboot timed out)
+ OTHER none of the above
+
+ The implementation is basically that of a state machine. From an
+ external point of view the only relevant attributes are:
+ details text description of the current status
+ test_count number of tests run
+ """
+ def __init__(self):
+ self.state = "OTHER"
+ self.details = "Running"
+ self.test_name = ""
+ self.test_count = 0
+
+
+ def process_line(self, line):
+ self.handlers[self.state](self, line)
+
+
+ def _OTHER_handler(self, line):
+ match = self.job_start.match(line)
+ if match and match.group(2) != "----":
+ self.state = "TESTING"
+ self.tab_level = len(match.group(1))
+ self.test_name = match.group(2)
+ self.test_status = "GOOD"
+ self.details = "Running %s" % self.test_name
+ return
+
+ match = self.reboot_start.match(line)
+ if match:
+ self.boot_status = match.group(1)
+ if self.worse_status("GOOD", self.boot_status) == "GOOD":
+ self.state = "REBOOTING"
+ self.details = "Rebooting"
+ else:
+ self.state = "BROKEN"
+ self.details = "Reboot failed - machine broken"
+ return
+
+
+ def _TESTING_handler(self, line):
+ match = self.job_status.match(line)
+ if match:
+ if len(match.group(1)) != self.tab_level + 1:
+ return # we don't care about subgroups
+ if self.test_name != match.group(3):
+ return # we don't care about other tests
+ self.test_status = self.worse_status(self.test_status,
+ match.group(2))
+ self.details = "Running %s: %s" % (self.test_name,
+ match.group(4))
+ return
+
+ match = self.job_end.match(line)
+ if match:
+ if len(match.group(1)) != self.tab_level:
+ return # we don't care about subgroups
+ if self.test_name != match.group(3):
+ raise ValueError('Group START and END name mismatch')
+ self.state = "OTHER"
+ self.test_status = self.worse_status(self.test_status,
+ match.group(2))
+ self.test_name = ""
+ del self.test_status
+ self.details = "Running"
+ self.test_count += 1
+ return
+
+
+ def _REBOOTING_handler(self, line):
+ match = self.reboot_done.match(line)
+ if match:
+ status = self.worse_status(self.boot_status,
+ match.group(1))
+ del self.boot_status
+ if status == "GOOD":
+ self.state = "OTHER"
+ self.details = "Running"
+ else:
+ self.state = "BROKEN"
+ self.details = "Reboot failed - machine broken"
+ return
+
+
+ def _BROKEN_handler(self, line):
+ pass # just do nothing - we're broken and staying broken
+
+
+ handlers = {"OTHER": _OTHER_handler,
+ "TESTING": _TESTING_handler,
+ "REBOOTING": _REBOOTING_handler,
+ "BROKEN": _BROKEN_handler}
+
+
+ status_list = ["GOOD", "WARN", "FAIL", "ABORT", "ERROR"]
+ order_dict = {None: -1}
+ order_dict.update((status, i)
+ for i, status in enumerate(status_list))
+
+
+ job_start = re.compile(r"^(\t*)START\t----\t([^\t]+).*$")
+ job_status = re.compile(r"^(\t*)(%s)\t([^\t]+)\t(?:[^\t]+).*\t([^\t]+)$" %
+ "|".join(status_list))
+ job_end = re.compile(r"^(\t*)END (%s)\t----\t([^\t]+).*$" %
+ "|".join(status_list))
+ reboot_start = re.compile(r"^\t?(%s)\t[^\t]+\treboot\.start.*$" %
+ "|".join(status_list))
+ reboot_done = re.compile(r"^\t?(%s)\t[^\t]+\treboot\.verify.*$" %
+ "|".join(status_list))
+
+ @classmethod
+ def worse_status(cls, old_status, new_status):
+ if cls.order_dict[new_status] > cls.order_dict[old_status]:
+ return new_status
+ else:
+ return old_status
def parse_status(status_log):
- """\
- Parse the status from a single status log.
- Do not use with status logs from multi-machine tests.
- """
- parser = Machine()
- for line in file(status_log):
- parser.process_line(line)
- result = {
- "status": parser.details,
- "test_on": parser.test_name,
- "test_num_complete": parser.test_count
- }
- return result
+ """\
+ Parse the status from a single status log.
+ Do not use with status logs from multi-machine tests.
+ """
+ parser = Machine()
+ for line in file(status_log):
+ parser.process_line(line)
+ result = {
+ "status": parser.details,
+ "test_on": parser.test_name,
+ "test_num_complete": parser.test_count
+ }
+ return result
def _file_iterator(filename):
- """\
- Return an iterator over file(filename), or an empty iterator
- if the file does not exist.
- """
- if os.path.exists(filename):
- return iter(file(filename))
- else:
- return ()
+ """\
+ Return an iterator over file(filename), or an empty iterator
+ if the file does not exist.
+ """
+ if os.path.exists(filename):
+ return iter(file(filename))
+ else:
+ return ()
def parse_machine_status(root_path, name):
- """Parse the status for one machine (of a multi-machine test)"""
- general_log = _file_iterator(os.path.join(root_path, "status.log"))
- machine_log = _file_iterator(os.path.join(root_path, name, "status.log"))
- timestamp_regex = re.compile("\ttimestamp=(\d+)")
- # collect all the lines from both the root & machine-specific log
- lines = []
- timestamp = 0
- for line in itertools.chain(general_log, machine_log):
- timestamp_match = timestamp_regex.search(line)
- # if the log line has a timestamp, use it
- # otherwise, just use the timestamp from the previous line
- if timestamp_match:
- timestamp = int(timestamp_match.group(1))
- lines.append((timestamp, line))
- lines.sort() # this will sort the lines by timestamp
- # now actually run the lines through the parser
- parser = Machine()
- for timestamp, line in lines:
- parser.process_line(line)
- return {
- "status": parser.details,
- "test_on": parser.test_name,
- "test_num_complete": parser.test_count
- }
+ """Parse the status for one machine (of a multi-machine test)"""
+ general_log = _file_iterator(os.path.join(root_path, "status.log"))
+ machine_log = _file_iterator(os.path.join(root_path, name, "status.log"))
+ timestamp_regex = re.compile("\ttimestamp=(\d+)")
+ # collect all the lines from both the root & machine-specific log
+ lines = []
+ timestamp = 0
+ for line in itertools.chain(general_log, machine_log):
+ timestamp_match = timestamp_regex.search(line)
+ # if the log line has a timestamp, use it
+ # otherwise, just use the timestamp from the previous line
+ if timestamp_match:
+ timestamp = int(timestamp_match.group(1))
+ lines.append((timestamp, line))
+ lines.sort() # this will sort the lines by timestamp
+ # now actually run the lines through the parser
+ parser = Machine()
+ for timestamp, line in lines:
+ parser.process_line(line)
+ return {
+ "status": parser.details,
+ "test_on": parser.test_name,
+ "test_num_complete": parser.test_count
+ }
def parse_multimachine_status(root_path, machine_names):
- """Parse the status for a set of machines."""
- results = {}
- for name in machine_names:
- results[name] = parse_machine_status(root_path, name)
- return results
+ """Parse the status for a set of machines."""
+ results = {}
+ for name in machine_names:
+ results[name] = parse_machine_status(root_path, name)
+ return results
if __name__ == "__main__":
- args = sys.argv[1:]
- if len(args) != 1:
- print "USAGE: status.py status_log"
- sys.exit(1)
- print parse_status(args[0])
+ args = sys.argv[1:]
+ if len(args) != 1:
+ print "USAGE: status.py status_log"
+ sys.exit(1)
+ print parse_status(args[0])
diff --git a/server/subcommand.py b/server/subcommand.py
index 85898a10..8b29b827 100644
--- a/server/subcommand.py
+++ b/server/subcommand.py
@@ -6,196 +6,196 @@ from autotest_lib.client.common_lib import error, utils
def parallel(tasklist, timeout=None):
- """Run an set of predefined subcommands in parallel"""
- pids = []
- run_error = False
- for task in tasklist:
- task.fork_start()
-
- remaining_timeout = None
- if timeout:
- endtime = time.time() + timeout
-
- for task in tasklist:
- if timeout:
- remaining_timeout = max(endtime - time.time(), 1)
- try:
- status = task.fork_waitfor(remaining_timeout)
- except error.AutoservSubcommandError:
- run_error = True
- else:
- if status != 0:
- run_error = True
-
- if run_error:
- raise error.AutoservError('One or more subcommands failed')
+ """Run an set of predefined subcommands in parallel"""
+ pids = []
+ run_error = False
+ for task in tasklist:
+ task.fork_start()
+
+ remaining_timeout = None
+ if timeout:
+ endtime = time.time() + timeout
+
+ for task in tasklist:
+ if timeout:
+ remaining_timeout = max(endtime - time.time(), 1)
+ try:
+ status = task.fork_waitfor(remaining_timeout)
+ except error.AutoservSubcommandError:
+ run_error = True
+ else:
+ if status != 0:
+ run_error = True
+
+ if run_error:
+ raise error.AutoservError('One or more subcommands failed')
def parallel_simple(function, arglist, log=True, timeout=None):
- """Each element in the arglist used to create a subcommand object,
- where that arg is used both as a subdir name, and a single argument
- to pass to "function".
- We create a subcommand object for each element in the list,
- then execute those subcommand objects in parallel."""
-
- # Bypass the multithreading if only one machine.
- if len (arglist) == 1:
- function(arglist[0])
- return
-
- subcommands = []
- for arg in arglist:
- args = [arg]
- if log:
- subdir = str(arg)
- else:
- subdir = None
- subcommands.append(subcommand(function, args, subdir))
- parallel(subcommands, timeout)
+ """Each element in the arglist used to create a subcommand object,
+ where that arg is used both as a subdir name, and a single argument
+ to pass to "function".
+ We create a subcommand object for each element in the list,
+ then execute those subcommand objects in parallel."""
+
+ # Bypass the multithreading if only one machine.
+ if len (arglist) == 1:
+ function(arglist[0])
+ return
+
+ subcommands = []
+ for arg in arglist:
+ args = [arg]
+ if log:
+ subdir = str(arg)
+ else:
+ subdir = None
+ subcommands.append(subcommand(function, args, subdir))
+ parallel(subcommands, timeout)
def _where_art_thy_filehandles():
- os.system("ls -l /proc/%d/fd >> /dev/tty" % os.getpid())
+ os.system("ls -l /proc/%d/fd >> /dev/tty" % os.getpid())
def _print_to_tty(string):
- open('/dev/tty', 'w').write(string + '\n')
+ open('/dev/tty', 'w').write(string + '\n')
def _redirect_stream(fd, output):
- newfd = os.open(output, os.O_WRONLY | os.O_CREAT)
- os.dup2(newfd, fd)
- os.close(newfd)
- if fd == 1:
- sys.stdout = os.fdopen(fd, 'w')
- if fd == 2:
- sys.stderr = os.fdopen(fd, 'w')
+ newfd = os.open(output, os.O_WRONLY | os.O_CREAT)
+ os.dup2(newfd, fd)
+ os.close(newfd)
+ if fd == 1:
+ sys.stdout = os.fdopen(fd, 'w')
+ if fd == 2:
+ sys.stderr = os.fdopen(fd, 'w')
def _redirect_stream_tee(fd, output, tag):
- """Use the low-level fork & pipe operations here to get a fd,
- not a filehandle. This ensures that we get both the
- filehandle and fd for stdout/stderr redirected correctly."""
- r, w = os.pipe()
- pid = os.fork()
- if pid: # Parent
- os.dup2(w, fd)
- os.close(r)
- os.close(w)
- if fd == 1:
- sys.stdout = os.fdopen(fd, 'w', 1)
- if fd == 2:
- sys.stderr = os.fdopen(fd, 'w', 1)
- return
- else: # Child
- os.close(w)
- log = open(output, 'w')
- f = os.fdopen(r, 'r')
- for line in iter(f.readline, ''):
- # Tee straight to file
- log.write(line)
- log.flush()
- # Prepend stdout with the tag
- print tag + ' : ' + line,
- sys.stdout.flush()
- log.close()
- os._exit(0)
+ """Use the low-level fork & pipe operations here to get a fd,
+ not a filehandle. This ensures that we get both the
+ filehandle and fd for stdout/stderr redirected correctly."""
+ r, w = os.pipe()
+ pid = os.fork()
+ if pid: # Parent
+ os.dup2(w, fd)
+ os.close(r)
+ os.close(w)
+ if fd == 1:
+ sys.stdout = os.fdopen(fd, 'w', 1)
+ if fd == 2:
+ sys.stderr = os.fdopen(fd, 'w', 1)
+ return
+ else: # Child
+ os.close(w)
+ log = open(output, 'w')
+ f = os.fdopen(r, 'r')
+ for line in iter(f.readline, ''):
+ # Tee straight to file
+ log.write(line)
+ log.flush()
+ # Prepend stdout with the tag
+ print tag + ' : ' + line,
+ sys.stdout.flush()
+ log.close()
+ os._exit(0)
class subcommand:
- def __init__(self, func, args, subdir = None, stdprint = True):
- # func(args) - the subcommand to run
- # subdir - the subdirectory to log results in
- # stdprint - whether to print results to stdout/stderr
- if subdir:
- self.subdir = os.path.abspath(subdir)
- if not os.path.exists(self.subdir):
- os.mkdir(self.subdir)
- self.debug = os.path.join(self.subdir, 'debug')
- if not os.path.exists(self.debug):
- os.mkdir(self.debug)
- self.stdout = os.path.join(self.debug, 'stdout')
- self.stderr = os.path.join(self.debug, 'stderr')
- else:
- self.subdir = None
- self.debug = '/dev/null'
- self.stdout = '/dev/null'
- self.stderr = '/dev/null'
-
- self.func = func
- self.args = args
- self.lambda_function = lambda: func(*args)
- self.pid = None
- self.stdprint = stdprint
-
-
- def redirect_output(self):
- if self.stdprint:
- if self.subdir:
- tag = os.path.basename(self.subdir)
- _redirect_stream_tee(1, self.stdout, tag)
- _redirect_stream_tee(2, self.stderr, tag)
- else:
- _redirect_stream(1, self.stdout)
- _redirect_stream(2, self.stderr)
-
-
- def fork_start(self):
- sys.stdout.flush()
- sys.stderr.flush()
- self.pid = os.fork()
-
- if self.pid: # I am the parent
- return
-
- # We are the child from this point on. Never return.
- signal.signal(signal.SIGTERM, signal.SIG_DFL) # clear handler
- if self.subdir:
- os.chdir(self.subdir)
- self.redirect_output()
-
- try:
- self.lambda_function()
-
- except:
- traceback.print_exc()
- sys.stdout.flush()
- sys.stderr.flush()
- os._exit(1)
-
- sys.stdout.flush()
- sys.stderr.flush()
- os._exit(0)
-
-
- def fork_waitfor(self, timeout=None):
- if not timeout:
- (pid, status) = os.waitpid(self.pid, 0)
- else:
- pid = None
- start_time = time.time()
- while time.time() <= start_time + timeout:
- (pid, status) = os.waitpid(self.pid, os.WNOHANG)
- if pid:
- break
- time.sleep(1)
-
- if not pid:
- utils.nuke_pid(self.pid)
- print "subcommand failed pid %d" % self.pid
- print "%s" % (self.func,)
- print "timeout after %ds" % timeout
- print
- return None
-
- if status != 0:
- print "subcommand failed pid %d" % pid
- print "%s" % (self.func,)
- print "rc=%d" % status
- print
- if os.path.exists(self.stderr):
- for line in open(self.stderr).readlines():
- print line,
- print "\n--------------------------------------------\n"
- raise error.AutoservSubcommandError(self.func, status)
- return status
+ def __init__(self, func, args, subdir = None, stdprint = True):
+ # func(args) - the subcommand to run
+ # subdir - the subdirectory to log results in
+ # stdprint - whether to print results to stdout/stderr
+ if subdir:
+ self.subdir = os.path.abspath(subdir)
+ if not os.path.exists(self.subdir):
+ os.mkdir(self.subdir)
+ self.debug = os.path.join(self.subdir, 'debug')
+ if not os.path.exists(self.debug):
+ os.mkdir(self.debug)
+ self.stdout = os.path.join(self.debug, 'stdout')
+ self.stderr = os.path.join(self.debug, 'stderr')
+ else:
+ self.subdir = None
+ self.debug = '/dev/null'
+ self.stdout = '/dev/null'
+ self.stderr = '/dev/null'
+
+ self.func = func
+ self.args = args
+ self.lambda_function = lambda: func(*args)
+ self.pid = None
+ self.stdprint = stdprint
+
+
+ def redirect_output(self):
+ if self.stdprint:
+ if self.subdir:
+ tag = os.path.basename(self.subdir)
+ _redirect_stream_tee(1, self.stdout, tag)
+ _redirect_stream_tee(2, self.stderr, tag)
+ else:
+ _redirect_stream(1, self.stdout)
+ _redirect_stream(2, self.stderr)
+
+
+ def fork_start(self):
+ sys.stdout.flush()
+ sys.stderr.flush()
+ self.pid = os.fork()
+
+ if self.pid: # I am the parent
+ return
+
+ # We are the child from this point on. Never return.
+ signal.signal(signal.SIGTERM, signal.SIG_DFL) # clear handler
+ if self.subdir:
+ os.chdir(self.subdir)
+ self.redirect_output()
+
+ try:
+ self.lambda_function()
+
+ except:
+ traceback.print_exc()
+ sys.stdout.flush()
+ sys.stderr.flush()
+ os._exit(1)
+
+ sys.stdout.flush()
+ sys.stderr.flush()
+ os._exit(0)
+
+
+ def fork_waitfor(self, timeout=None):
+ if not timeout:
+ (pid, status) = os.waitpid(self.pid, 0)
+ else:
+ pid = None
+ start_time = time.time()
+ while time.time() <= start_time + timeout:
+ (pid, status) = os.waitpid(self.pid, os.WNOHANG)
+ if pid:
+ break
+ time.sleep(1)
+
+ if not pid:
+ utils.nuke_pid(self.pid)
+ print "subcommand failed pid %d" % self.pid
+ print "%s" % (self.func,)
+ print "timeout after %ds" % timeout
+ print
+ return None
+
+ if status != 0:
+ print "subcommand failed pid %d" % pid
+ print "%s" % (self.func,)
+ print "rc=%d" % status
+ print
+ if os.path.exists(self.stderr):
+ for line in open(self.stderr).readlines():
+ print line,
+ print "\n--------------------------------------------\n"
+ raise error.AutoservSubcommandError(self.func, status)
+ return status
diff --git a/server/test.py b/server/test.py
index 34de6a6a..31768579 100755
--- a/server/test.py
+++ b/server/test.py
@@ -9,12 +9,12 @@ from autotest_lib.client.common_lib import test as common_test
class test(common_test.base_test):
- pass
+ pass
testname = common_test.testname
def runtest(job, url, tag, args, dargs):
- common_test.runtest(job, url, tag, args, dargs,
- locals(), globals())
+ common_test.runtest(job, url, tag, args, dargs,
+ locals(), globals())
diff --git a/server/tests/sleeptest/sleeptest.py b/server/tests/sleeptest/sleeptest.py
index 7b9d12c3..1ae3013e 100755
--- a/server/tests/sleeptest/sleeptest.py
+++ b/server/tests/sleeptest/sleeptest.py
@@ -1,7 +1,7 @@
import test, time
class sleeptest(test.test):
- version = 1
+ version = 1
- def execute(self, seconds = 1):
- time.sleep(seconds)
+ def execute(self, seconds = 1):
+ time.sleep(seconds)
diff --git a/server/utils.py b/server/utils.py
index 56a102ee..e5e0dd69 100644
--- a/server/utils.py
+++ b/server/utils.py
@@ -22,368 +22,368 @@ __tmp_dirs = {}
############# we need pass throughs for the methods in client/common_lib/utils
-def run(command, timeout=None, ignore_status=False,
- stdout_tee=None, stderr_tee=None):
- return utils.run(command, timeout, ignore_status,
- stdout_tee, stderr_tee)
+def run(command, timeout=None, ignore_status=False,
+ stdout_tee=None, stderr_tee=None):
+ return utils.run(command, timeout, ignore_status,
+ stdout_tee, stderr_tee)
def system(command, timeout=None, ignore_status=False):
- return utils.system(command, timeout, ignore_status)
+ return utils.system(command, timeout, ignore_status)
def system_output(command, timeout=None, ignore_status=False,
- retain_output=False):
- return utils.system_output(command, timeout, ignore_status,
- retain_output)
+ retain_output=False):
+ return utils.system_output(command, timeout, ignore_status,
+ retain_output)
def urlopen(url, data=None, proxies=None, timeout=300):
- return utils.urlopen(url, data=data, proxies=proxies, timeout=timeout)
+ return utils.urlopen(url, data=data, proxies=proxies, timeout=timeout)
def urlretrieve(url, filename=None, reporthook=None, data=None, timeout=300):
- return utils.urlretrieve(url, filename=filename, reporthook=reporthook,
- data=data, timeout=timeout)
+ return utils.urlretrieve(url, filename=filename, reporthook=reporthook,
+ data=data, timeout=timeout)
def read_keyval(path):
- return utils.read_keyval(path)
+ return utils.read_keyval(path)
def write_keyval(path, dictionary):
- return utils.write_keyval(path, dictionary)
+ return utils.write_keyval(path, dictionary)
####################################################################
def sh_escape(command):
- """
- Escape special characters from a command so that it can be passed
- as a double quoted (" ") string in a (ba)sh command.
+ """
+ Escape special characters from a command so that it can be passed
+ as a double quoted (" ") string in a (ba)sh command.
- Args:
- command: the command string to escape.
+ Args:
+ command: the command string to escape.
- Returns:
- The escaped command string. The required englobing double
- quotes are NOT added and so should be added at some point by
- the caller.
+ Returns:
+ The escaped command string. The required englobing double
+ quotes are NOT added and so should be added at some point by
+ the caller.
- See also: http://www.tldp.org/LDP/abs/html/escapingsection.html
- """
- command = command.replace("\\", "\\\\")
- command = command.replace("$", r'\$')
- command = command.replace('"', r'\"')
- command = command.replace('`', r'\`')
- return command
+ See also: http://www.tldp.org/LDP/abs/html/escapingsection.html
+ """
+ command = command.replace("\\", "\\\\")
+ command = command.replace("$", r'\$')
+ command = command.replace('"', r'\"')
+ command = command.replace('`', r'\`')
+ return command
def scp_remote_escape(filename):
- """
- Escape special characters from a filename so that it can be passed
- to scp (within double quotes) as a remote file.
+ """
+ Escape special characters from a filename so that it can be passed
+ to scp (within double quotes) as a remote file.
- Bis-quoting has to be used with scp for remote files, "bis-quoting"
- as in quoting x 2
- scp does not support a newline in the filename
+ Bis-quoting has to be used with scp for remote files, "bis-quoting"
+ as in quoting x 2
+ scp does not support a newline in the filename
- Args:
- filename: the filename string to escape.
+ Args:
+ filename: the filename string to escape.
- Returns:
- The escaped filename string. The required englobing double
- quotes are NOT added and so should be added at some point by
- the caller.
- """
- escape_chars= r' !"$&' "'" r'()*,:;<=>?[\]^`{|}'
+ Returns:
+ The escaped filename string. The required englobing double
+ quotes are NOT added and so should be added at some point by
+ the caller.
+ """
+ escape_chars= r' !"$&' "'" r'()*,:;<=>?[\]^`{|}'
- new_name= []
- for char in filename:
- if char in escape_chars:
- new_name.append("\\%s" % (char,))
- else:
- new_name.append(char)
+ new_name= []
+ for char in filename:
+ if char in escape_chars:
+ new_name.append("\\%s" % (char,))
+ else:
+ new_name.append(char)
- return sh_escape("".join(new_name))
+ return sh_escape("".join(new_name))
def get(location, local_copy = False):
- """Get a file or directory to a local temporary directory.
-
- Args:
- location: the source of the material to get. This source may
- be one of:
- * a local file or directory
- * a URL (http or ftp)
- * a python file-like object
-
- Returns:
- The location of the file or directory where the requested
- content was saved. This will be contained in a temporary
- directory on the local host. If the material to get was a
- directory, the location will contain a trailing '/'
- """
- tmpdir = get_tmp_dir()
-
- # location is a file-like object
- if hasattr(location, "read"):
- tmpfile = os.path.join(tmpdir, "file")
- tmpfileobj = file(tmpfile, 'w')
- shutil.copyfileobj(location, tmpfileobj)
- tmpfileobj.close()
- return tmpfile
-
- if isinstance(location, types.StringTypes):
- # location is a URL
- if location.startswith('http') or location.startswith('ftp'):
- tmpfile = os.path.join(tmpdir, os.path.basename(location))
- utils.urlretrieve(location, tmpfile)
- return tmpfile
- # location is a local path
- elif os.path.exists(os.path.abspath(location)):
- if not local_copy:
- if os.path.isdir(location):
- return location.rstrip('/') + '/'
- else:
- return location
- tmpfile = os.path.join(tmpdir, os.path.basename(location))
- if os.path.isdir(location):
- tmpfile += '/'
- shutil.copytree(location, tmpfile, symlinks=True)
- return tmpfile
- shutil.copyfile(location, tmpfile)
- return tmpfile
- # location is just a string, dump it to a file
- else:
- tmpfd, tmpfile = tempfile.mkstemp(dir=tmpdir)
- tmpfileobj = os.fdopen(tmpfd, 'w')
- tmpfileobj.write(location)
- tmpfileobj.close()
- return tmpfile
+ """Get a file or directory to a local temporary directory.
+
+ Args:
+ location: the source of the material to get. This source may
+ be one of:
+ * a local file or directory
+ * a URL (http or ftp)
+ * a python file-like object
+
+ Returns:
+ The location of the file or directory where the requested
+ content was saved. This will be contained in a temporary
+ directory on the local host. If the material to get was a
+ directory, the location will contain a trailing '/'
+ """
+ tmpdir = get_tmp_dir()
+
+ # location is a file-like object
+ if hasattr(location, "read"):
+ tmpfile = os.path.join(tmpdir, "file")
+ tmpfileobj = file(tmpfile, 'w')
+ shutil.copyfileobj(location, tmpfileobj)
+ tmpfileobj.close()
+ return tmpfile
+
+ if isinstance(location, types.StringTypes):
+ # location is a URL
+ if location.startswith('http') or location.startswith('ftp'):
+ tmpfile = os.path.join(tmpdir, os.path.basename(location))
+ utils.urlretrieve(location, tmpfile)
+ return tmpfile
+ # location is a local path
+ elif os.path.exists(os.path.abspath(location)):
+ if not local_copy:
+ if os.path.isdir(location):
+ return location.rstrip('/') + '/'
+ else:
+ return location
+ tmpfile = os.path.join(tmpdir, os.path.basename(location))
+ if os.path.isdir(location):
+ tmpfile += '/'
+ shutil.copytree(location, tmpfile, symlinks=True)
+ return tmpfile
+ shutil.copyfile(location, tmpfile)
+ return tmpfile
+ # location is just a string, dump it to a file
+ else:
+ tmpfd, tmpfile = tempfile.mkstemp(dir=tmpdir)
+ tmpfileobj = os.fdopen(tmpfd, 'w')
+ tmpfileobj.write(location)
+ tmpfileobj.close()
+ return tmpfile
def get_tmp_dir():
- """Return the pathname of a directory on the host suitable
- for temporary file storage.
+ """Return the pathname of a directory on the host suitable
+ for temporary file storage.
- The directory and its content will be deleted automatically
- at the end of the program execution if they are still present.
- """
- global __tmp_dirs
+ The directory and its content will be deleted automatically
+ at the end of the program execution if they are still present.
+ """
+ global __tmp_dirs
- dir_name= tempfile.mkdtemp(prefix="autoserv-")
- pid = os.getpid()
- if not pid in __tmp_dirs:
- __tmp_dirs[pid] = []
- __tmp_dirs[pid].append(dir_name)
- return dir_name
+ dir_name= tempfile.mkdtemp(prefix="autoserv-")
+ pid = os.getpid()
+ if not pid in __tmp_dirs:
+ __tmp_dirs[pid] = []
+ __tmp_dirs[pid].append(dir_name)
+ return dir_name
@atexit.register
def __clean_tmp_dirs():
- """Erase temporary directories that were created by the get_tmp_dir()
- function and that are still present.
- """
- global __tmp_dirs
-
- pid = os.getpid()
- if pid not in __tmp_dirs:
- return
- for dir in __tmp_dirs[pid]:
- try:
- shutil.rmtree(dir)
- except OSError, e:
- if e.errno == 2:
- pass
- __tmp_dirs[pid] = []
+ """Erase temporary directories that were created by the get_tmp_dir()
+ function and that are still present.
+ """
+ global __tmp_dirs
+
+ pid = os.getpid()
+ if pid not in __tmp_dirs:
+ return
+ for dir in __tmp_dirs[pid]:
+ try:
+ shutil.rmtree(dir)
+ except OSError, e:
+ if e.errno == 2:
+ pass
+ __tmp_dirs[pid] = []
def unarchive(host, source_material):
- """Uncompress and untar an archive on a host.
-
- If the "source_material" is compresses (according to the file
- extension) it will be uncompressed. Supported compression formats
- are gzip and bzip2. Afterwards, if the source_material is a tar
- archive, it will be untarred.
-
- Args:
- host: the host object on which the archive is located
- source_material: the path of the archive on the host
-
- Returns:
- The file or directory name of the unarchived source material.
- If the material is a tar archive, it will be extracted in the
- directory where it is and the path returned will be the first
- entry in the archive, assuming it is the topmost directory.
- If the material is not an archive, nothing will be done so this
- function is "harmless" when it is "useless".
- """
- # uncompress
- if (source_material.endswith(".gz") or
- source_material.endswith(".gzip")):
- host.run('gunzip "%s"' % (sh_escape(source_material)))
- source_material= ".".join(source_material.split(".")[:-1])
- elif source_material.endswith("bz2"):
- host.run('bunzip2 "%s"' % (sh_escape(source_material)))
- source_material= ".".join(source_material.split(".")[:-1])
-
- # untar
- if source_material.endswith(".tar"):
- retval= host.run('tar -C "%s" -xvf "%s"' % (
- sh_escape(os.path.dirname(source_material)),
- sh_escape(source_material),))
- source_material= os.path.join(os.path.dirname(source_material),
- retval.stdout.split()[0])
-
- return source_material
+ """Uncompress and untar an archive on a host.
+
+ If the "source_material" is compresses (according to the file
+ extension) it will be uncompressed. Supported compression formats
+ are gzip and bzip2. Afterwards, if the source_material is a tar
+ archive, it will be untarred.
+
+ Args:
+ host: the host object on which the archive is located
+ source_material: the path of the archive on the host
+
+ Returns:
+ The file or directory name of the unarchived source material.
+ If the material is a tar archive, it will be extracted in the
+ directory where it is and the path returned will be the first
+ entry in the archive, assuming it is the topmost directory.
+ If the material is not an archive, nothing will be done so this
+ function is "harmless" when it is "useless".
+ """
+ # uncompress
+ if (source_material.endswith(".gz") or
+ source_material.endswith(".gzip")):
+ host.run('gunzip "%s"' % (sh_escape(source_material)))
+ source_material= ".".join(source_material.split(".")[:-1])
+ elif source_material.endswith("bz2"):
+ host.run('bunzip2 "%s"' % (sh_escape(source_material)))
+ source_material= ".".join(source_material.split(".")[:-1])
+
+ # untar
+ if source_material.endswith(".tar"):
+ retval= host.run('tar -C "%s" -xvf "%s"' % (
+ sh_escape(os.path.dirname(source_material)),
+ sh_escape(source_material),))
+ source_material= os.path.join(os.path.dirname(source_material),
+ retval.stdout.split()[0])
+
+ return source_material
def get_server_dir():
- path = os.path.dirname(sys.modules['autotest_lib.server.utils'].__file__)
- return os.path.abspath(path)
+ path = os.path.dirname(sys.modules['autotest_lib.server.utils'].__file__)
+ return os.path.abspath(path)
def find_pid(command):
- for line in utils.system_output('ps -eo pid,cmd').rstrip().split('\n'):
- (pid, cmd) = line.split(None, 1)
- if re.search(command, cmd):
- return int(pid)
- return None
+ for line in utils.system_output('ps -eo pid,cmd').rstrip().split('\n'):
+ (pid, cmd) = line.split(None, 1)
+ if re.search(command, cmd):
+ return int(pid)
+ return None
def nohup(command, stdout='/dev/null', stderr='/dev/null', background=True,
- env = {}):
- cmd = ' '.join(key+'='+val for key, val in env.iteritems())
- cmd += ' nohup ' + command
- cmd += ' > %s' % stdout
- if stdout == stderr:
- cmd += ' 2>&1'
- else:
- cmd += ' 2> %s' % stderr
- if background:
- cmd += ' &'
- utils.system(cmd)
+ env = {}):
+ cmd = ' '.join(key+'='+val for key, val in env.iteritems())
+ cmd += ' nohup ' + command
+ cmd += ' > %s' % stdout
+ if stdout == stderr:
+ cmd += ' 2>&1'
+ else:
+ cmd += ' 2> %s' % stderr
+ if background:
+ cmd += ' &'
+ utils.system(cmd)
def default_mappings(machines):
- """
- Returns a simple mapping in which all machines are assigned to the
- same key. Provides the default behavior for
- form_ntuples_from_machines. """
- mappings = {}
- failures = []
-
- mach = machines[0]
- mappings['ident'] = [mach]
- if len(machines) > 1:
- machines = machines[1:]
- for machine in machines:
- mappings['ident'].append(machine)
-
- return (mappings, failures)
+ """
+ Returns a simple mapping in which all machines are assigned to the
+ same key. Provides the default behavior for
+ form_ntuples_from_machines. """
+ mappings = {}
+ failures = []
+
+ mach = machines[0]
+ mappings['ident'] = [mach]
+ if len(machines) > 1:
+ machines = machines[1:]
+ for machine in machines:
+ mappings['ident'].append(machine)
+
+ return (mappings, failures)
def form_ntuples_from_machines(machines, n=2, mapping_func=default_mappings):
- """Returns a set of ntuples from machines where the machines in an
- ntuple are in the same mapping, and a set of failures which are
- (machine name, reason) tuples."""
- ntuples = []
- (mappings, failures) = mapping_func(machines)
-
- # now run through the mappings and create n-tuples.
- # throw out the odd guys out
- for key in mappings:
- key_machines = mappings[key]
- total_machines = len(key_machines)
+ """Returns a set of ntuples from machines where the machines in an
+ ntuple are in the same mapping, and a set of failures which are
+ (machine name, reason) tuples."""
+ ntuples = []
+ (mappings, failures) = mapping_func(machines)
+
+ # now run through the mappings and create n-tuples.
+ # throw out the odd guys out
+ for key in mappings:
+ key_machines = mappings[key]
+ total_machines = len(key_machines)
- # form n-tuples
- while len(key_machines) >= n:
- ntuples.append(key_machines[0:n])
- key_machines = key_machines[n:]
+ # form n-tuples
+ while len(key_machines) >= n:
+ ntuples.append(key_machines[0:n])
+ key_machines = key_machines[n:]
- for mach in key_machines:
- failures.append((mach, "machine can not be tupled"))
+ for mach in key_machines:
+ failures.append((mach, "machine can not be tupled"))
- return (ntuples, failures)
+ return (ntuples, failures)
def parse_machine(machine, user = 'root', port = 22, password = ''):
- """
- Parse the machine string user:pass@host:port and return it separately,
- if the machine string is not complete, use the default parameters
- when appropriate.
- """
+ """
+ Parse the machine string user:pass@host:port and return it separately,
+ if the machine string is not complete, use the default parameters
+ when appropriate.
+ """
- user = user
- port = port
- password = password
+ user = user
+ port = port
+ password = password
- if re.search('@', machine):
- machine = machine.split('@')
+ if re.search('@', machine):
+ machine = machine.split('@')
- if re.search(':', machine[0]):
- machine[0] = machine[0].split(':')
- user = machine[0][0]
- password = machine[0][1]
+ if re.search(':', machine[0]):
+ machine[0] = machine[0].split(':')
+ user = machine[0][0]
+ password = machine[0][1]
- else:
- user = machine[0]
+ else:
+ user = machine[0]
- if re.search(':', machine[1]):
- machine[1] = machine[1].split(':')
- hostname = machine[1][0]
- port = int(machine[1][1])
+ if re.search(':', machine[1]):
+ machine[1] = machine[1].split(':')
+ hostname = machine[1][0]
+ port = int(machine[1][1])
- else:
- hostname = machine[1]
+ else:
+ hostname = machine[1]
- elif re.search(':', machine):
- machine = machine.split(':')
- hostname = machine[0]
- port = int(machine[1])
+ elif re.search(':', machine):
+ machine = machine.split(':')
+ hostname = machine[0]
+ port = int(machine[1])
- else:
- hostname = machine
+ else:
+ hostname = machine
- return hostname, user, password, port
+ return hostname, user, password, port
def get_public_key():
- """
- Return a valid string ssh public key for the user executing autoserv or
- autotest. If there's no DSA or RSA public key, create a DSA keypair with
- ssh-keygen and return it.
- """
+ """
+ Return a valid string ssh public key for the user executing autoserv or
+ autotest. If there's no DSA or RSA public key, create a DSA keypair with
+ ssh-keygen and return it.
+ """
- ssh_conf_path = os.path.join(os.environ['HOME'], '.ssh')
+ ssh_conf_path = os.path.join(os.environ['HOME'], '.ssh')
- dsa_public_key_path = os.path.join(ssh_conf_path, 'id_dsa.pub')
- dsa_private_key_path = os.path.join(ssh_conf_path, 'id_dsa')
+ dsa_public_key_path = os.path.join(ssh_conf_path, 'id_dsa.pub')
+ dsa_private_key_path = os.path.join(ssh_conf_path, 'id_dsa')
- rsa_public_key_path = os.path.join(ssh_conf_path, 'id_rsa.pub')
- rsa_private_key_path = os.path.join(ssh_conf_path, 'id_rsa')
+ rsa_public_key_path = os.path.join(ssh_conf_path, 'id_rsa.pub')
+ rsa_private_key_path = os.path.join(ssh_conf_path, 'id_rsa')
- has_dsa_keypair = os.path.isfile(dsa_public_key_path) and \
- os.path.isfile(dsa_private_key_path)
- has_rsa_keypair = os.path.isfile(rsa_public_key_path) and \
- os.path.isfile(rsa_private_key_path)
+ has_dsa_keypair = os.path.isfile(dsa_public_key_path) and \
+ os.path.isfile(dsa_private_key_path)
+ has_rsa_keypair = os.path.isfile(rsa_public_key_path) and \
+ os.path.isfile(rsa_private_key_path)
- if has_dsa_keypair:
- print 'DSA keypair found, using it'
- public_key_path = dsa_public_key_path
+ if has_dsa_keypair:
+ print 'DSA keypair found, using it'
+ public_key_path = dsa_public_key_path
- elif has_rsa_keypair:
- print 'RSA keypair found, using it'
- public_key_path = rsa_public_key_path
+ elif has_rsa_keypair:
+ print 'RSA keypair found, using it'
+ public_key_path = rsa_public_key_path
- else:
- print 'Neither RSA nor DSA keypair found, creating DSA ssh key pair'
- system('ssh-keygen -t dsa -q -N "" -f %s' % dsa_private_key_path)
- public_key_path = dsa_public_key_path
+ else:
+ print 'Neither RSA nor DSA keypair found, creating DSA ssh key pair'
+ system('ssh-keygen -t dsa -q -N "" -f %s' % dsa_private_key_path)
+ public_key_path = dsa_public_key_path
- public_key = open(public_key_path, 'r')
- public_key_str = public_key.read()
- public_key.close()
+ public_key = open(public_key_path, 'r')
+ public_key_str = public_key.read()
+ public_key.close()
- return public_key_str
+ return public_key_str
diff --git a/server/utils_unittest.py b/server/utils_unittest.py
index d28ec161..002164ee 100644
--- a/server/utils_unittest.py
+++ b/server/utils_unittest.py
@@ -8,23 +8,23 @@ from autotest_lib.server import utils
class UtilsTest(unittest.TestCase):
-
- def setUp(self):
- # define out machines here
- self.machines = ['mach1', 'mach2', 'mach3', 'mach4', 'mach5',
- 'mach6', 'mach7']
-
- self.ntuples = [['mach1', 'mach2'], ['mach3', 'mach4'],
- ['mach5', 'mach6']]
- self.failures = []
- self.failures.append(('mach7', "machine can not be tupled"))
+ def setUp(self):
+ # define out machines here
+ self.machines = ['mach1', 'mach2', 'mach3', 'mach4', 'mach5',
+ 'mach6', 'mach7']
- def test_form_cell_mappings(self):
- (ntuples, failures) = utils.form_ntuples_from_machines(self.machines)
- self.assertEquals(self.ntuples, ntuples)
- self.assertEquals(self.failures, failures)
+ self.ntuples = [['mach1', 'mach2'], ['mach3', 'mach4'],
+ ['mach5', 'mach6']]
+ self.failures = []
+ self.failures.append(('mach7', "machine can not be tupled"))
+
+
+ def test_form_cell_mappings(self):
+ (ntuples, failures) = utils.form_ntuples_from_machines(self.machines)
+ self.assertEquals(self.ntuples, ntuples)
+ self.assertEquals(self.failures, failures)
if __name__ == "__main__":
- unittest.main()
+ unittest.main()
diff --git a/server/warning_monitor.py b/server/warning_monitor.py
index 6cd15c76..3b216b95 100644
--- a/server/warning_monitor.py
+++ b/server/warning_monitor.py
@@ -8,11 +8,11 @@ warnfile = os.fdopen(int(sys.argv[2]), 'w', 0)
# the format for a warning used here is:
# <timestamp (integer)> <tab> <status (string)> <newline>
def make_alert(msg):
- def alert(*params):
- formatted_msg = msg % params
- timestamped_msg = "%d\t%s" % (time.time(), formatted_msg)
- print >> warnfile, timestamped_msg
- return alert
+ def alert(*params):
+ formatted_msg = msg % params
+ timestamped_msg = "%d\t%s" % (time.time(), formatted_msg)
+ print >> warnfile, timestamped_msg
+ return alert
pattern_file = os.path.join(os.path.dirname(__file__), 'warning_patterns')
@@ -28,15 +28,15 @@ patterns = zip(pattern_lines[0::3], pattern_lines[1::3])
# assert that the patterns are separated by empty lines
if sum(len(line.strip()) for line in pattern_lines[2::3]) > 0:
- raise ValueError('warning patterns are not separated by blank lines')
+ raise ValueError('warning patterns are not separated by blank lines')
hooks = [(re.compile(regex.rstrip('\n')), make_alert(alert.rstrip('\n')))
- for regex, alert in patterns]
+ for regex, alert in patterns]
while True:
- line = sys.stdin.readline()
- logfile.write(line)
- for regex, callback in hooks:
- match = re.match(regex, line.strip())
- if match:
- callback(*match.groups())
+ line = sys.stdin.readline()
+ logfile.write(line)
+ for regex, callback in hooks:
+ match = re.match(regex, line.strip())
+ if match:
+ callback(*match.groups())
diff --git a/tko/db.py b/tko/db.py
index 235b485d..175869bc 100644
--- a/tko/db.py
+++ b/tko/db.py
@@ -5,488 +5,488 @@ from autotest_lib.client.common_lib import global_config
class MySQLTooManyRows(Exception):
- pass
+ pass
class db_sql:
- def __init__(self, debug=False, autocommit=True, host=None,
- database=None, user=None, password=None):
- self.debug = debug
- self.autocommit = autocommit
- self._load_config(host, database, user, password)
-
- self.con = None
- self._init_db()
-
- # if not present, insert statuses
- self.status_idx = {}
- self.status_word = {}
- status_rows = self.select('status_idx, word', 'status', None)
- for s in status_rows:
- self.status_idx[s[1]] = s[0]
- self.status_word[s[0]] = s[1]
-
- machine_map = os.path.join(os.path.dirname(__file__),
- 'machines')
- if os.path.exists(machine_map):
- self.machine_map = machine_map
- else:
- self.machine_map = None
- self.machine_group = {}
-
-
- def _load_config(self, host, database, user, password):
- # grab the global config
- get_value = global_config.global_config.get_config_value
-
- # grab the host, database
- if host:
- self.host = host
- else:
- self.host = get_value("TKO", "host")
- if database:
- self.database = database
- else:
- self.database = get_value("TKO", "database")
-
- # grab the user and password
- if user:
- self.user = user
- else:
- self.user = get_value("TKO", "user")
- if password:
- self.password = password
- else:
- self.password = get_value("TKO", "password")
-
- # grab the timeout configuration
- self.query_timeout = get_value("TKO", "query_timeout",
- type=int, default=3600)
- self.min_delay = get_value("TKO", "min_retry_delay", type=int,
- default=20)
- self.max_delay = get_value("TKO", "max_retry_delay", type=int,
- default=60)
-
-
- def _init_db(self):
- # make sure we clean up any existing connection
- if self.con:
- self.con.close()
- self.con = None
-
- # create the db connection and cursor
- self.con = self.connect(self.host, self.database,
- self.user, self.password)
- self.cur = self.con.cursor()
-
-
- def _random_delay(self):
- delay = random.randint(self.min_delay, self.max_delay)
- time.sleep(delay)
-
-
- def run_with_retry(self, function, *args, **dargs):
- """Call function(*args, **dargs) until either it passes
- without an operational error, or a timeout is reached.
- This will re-connect to the database, so it is NOT safe
- to use this inside of a database transaction.
-
- It can be safely used with transactions, but the
- transaction start & end must be completely contained
- within the call to 'function'."""
- OperationalError = _get_error_class("OperationalError")
-
- success = False
- start_time = time.time()
- while not success:
- try:
- result = function(*args, **dargs)
- except OperationalError, e:
- self._log_operational_error(e)
- stop_time = time.time()
- elapsed_time = stop_time - start_time
- if elapsed_time > self.query_timeout:
- raise
- else:
- try:
- self._random_delay()
- self._init_db()
- except OperationalError, e:
- self._log_operational_error(e)
- else:
- success = True
- return result
-
-
- def _log_operational_error(self, e):
- msg = ("An operational error occured during a database "
- "operation: %s" % str(e))
- print >> sys.stderr, msg
- sys.stderr.flush() # we want these msgs to show up immediately
-
-
- def dprint(self, value):
- if self.debug:
- sys.stdout.write('SQL: ' + str(value) + '\n')
-
-
- def commit(self):
- self.con.commit()
-
-
- def get_last_autonumber_value(self):
- self.cur.execute('SELECT LAST_INSERT_ID()', [])
- return self.cur.fetchall()[0][0]
-
-
- def select(self, fields, table, where, wherein={},
- distinct = False, group_by = None, max_rows = None):
- """\
- This selects all the fields requested from a
- specific table with a particular where clause.
- The where clause can either be a dictionary of
- field=value pairs, a string, or a tuple of (string,
- a list of values). The last option is what you
- should use when accepting user input as it'll
- protect you against sql injection attacks (if
- all user data is placed in the array rather than
- the raw SQL).
-
- For example:
- where = ("a = %s AND b = %s", ['val', 'val'])
- is better than
- where = "a = 'val' AND b = 'val'"
- """
- cmd = ['select']
- if distinct:
- cmd.append('distinct')
- cmd += [fields, 'from', table]
-
- values = []
- if where and isinstance(where, types.DictionaryType):
- # key/value pairs (which should be equal)
- keys = [field + '=%s' for field in where.keys()]
- values = [where[field] for field in where.keys()]
-
- cmd.append(' where ' + ' and '.join(keys))
- elif where and isinstance(where, types.StringTypes):
- # the exact string
- cmd.append(' where ' + where)
- elif where and isinstance(where, types.TupleType):
- # preformatted where clause + values
- (sql, vals) = where
- values = vals
- cmd.append(' where (%s) ' % sql)
-
- # TODO: this assumes there's a where clause...bad
- if wherein and isinstance(wherein, types.DictionaryType):
- keys_in = ["%s in (%s) " % (field, ','.join(where))
- for field, where in wherein.iteritems()]
- cmd.append(' and '+' and '.join(keys_in))
-
- if group_by:
- cmd.append(' GROUP BY ' + group_by)
-
- self.dprint('%s %s' % (' '.join(cmd), values))
-
- # create a re-runable function for executing the query
- def exec_sql():
- sql = ' '.join(cmd)
- numRec = self.cur.execute(sql, values)
- if max_rows != None and numRec > max_rows:
- msg = 'Exceeded allowed number of records'
- raise MySQLTooManyRows(msg)
- return self.cur.fetchall()
-
- # run the query, re-trying after operational errors
- if self.autocommit:
- return self.run_with_retry(exec_sql)
- else:
- return exec_sql()
-
-
- def select_sql(self, fields, table, sql, values):
- """\
- select fields from table "sql"
- """
- cmd = 'select %s from %s %s' % (fields, table, sql)
- self.dprint(cmd)
-
- # create a -re-runable function for executing the query
- def exec_sql():
- self.cur.execute(cmd, values)
- return self.cur.fetchall()
-
- # run the query, re-trying after operational errors
- if self.autocommit:
- return self.run_with_retry(exec_sql)
- else:
- return exec_sql()
-
-
- def _exec_sql_with_commit(self, sql, values, commit):
- if self.autocommit:
- # re-run the query until it succeeds
- def exec_sql():
- self.cur.execute(sql, values)
- self.con.commit()
- self.run_with_retry(exec_sql)
- else:
- # take one shot at running the query
- self.cur.execute(sql, values)
- if commit:
- self.con.commit()
-
-
- def insert(self, table, data, commit = None):
- """\
- 'insert into table (keys) values (%s ... %s)', values
-
- data:
- dictionary of fields and data
- """
- fields = data.keys()
- refs = ['%s' for field in fields]
- values = [data[field] for field in fields]
- cmd = 'insert into %s (%s) values (%s)' % \
- (table, ','.join(fields), ','.join(refs))
- self.dprint('%s %s' % (cmd, values))
-
- self._exec_sql_with_commit(cmd, values, commit)
-
-
- def delete(self, table, where, commit = None):
- cmd = ['delete from', table]
- if commit == None:
- commit = self.autocommit
- if where and isinstance(where, types.DictionaryType):
- keys = [field + '=%s' for field in where.keys()]
- values = [where[field] for field in where.keys()]
- cmd += ['where', ' and '.join(keys)]
- sql = ' '.join(cmd)
- self.dprint('%s %s' % (sql, values))
-
- self._exec_sql_with_commit(sql, values, commit)
-
-
- def update(self, table, data, where, commit = None):
- """\
- 'update table set data values (%s ... %s) where ...'
-
- data:
- dictionary of fields and data
- """
- if commit == None:
- commit = self.autocommit
- cmd = 'update %s ' % table
- fields = data.keys()
- data_refs = [field + '=%s' for field in fields]
- data_values = [data[field] for field in fields]
- cmd += ' set ' + ' and '.join(data_refs)
-
- where_keys = [field + '=%s' for field in where.keys()]
- where_values = [where[field] for field in where.keys()]
- cmd += ' where ' + ' and '.join(where_keys)
-
- values = data_values + where_values
- print '%s %s' % (cmd, values)
-
- self._exec_sql_with_commit(cmd, values, commit)
-
-
- def delete_job(self, tag, commit = None):
- job_idx = self.find_job(tag)
- for test_idx in self.find_tests(job_idx):
- where = {'test_idx' : test_idx}
- self.delete('iteration_result', where)
- self.delete('test_attributes', where)
- where = {'job_idx' : job_idx}
- self.delete('tests', where)
- self.delete('jobs', where)
-
-
- def insert_job(self, tag, job, commit = None):
- job.machine_idx = self.lookup_machine(job.machine)
- if not job.machine_idx:
- job.machine_idx = self.insert_machine(job,
- commit=commit)
- self.insert('jobs', {'tag':tag,
- 'label': job.label,
- 'username': job.user,
- 'machine_idx': job.machine_idx,
- 'queued_time': job.queued_time,
- 'started_time': job.started_time,
- 'finished_time': job.finished_time},
- commit=commit)
- job.index = self.get_last_autonumber_value()
- for test in job.tests:
- self.insert_test(job, test, commit=commit)
-
-
- def insert_test(self, job, test, commit = None):
- kver = self.insert_kernel(test.kernel, commit=commit)
- data = {'job_idx':job.index, 'test':test.testname,
- 'subdir':test.subdir, 'kernel_idx':kver,
- 'status':self.status_idx[test.status],
- 'reason':test.reason, 'machine_idx':job.machine_idx,
- 'started_time': test.started_time,
- 'finished_time':test.finished_time}
- self.insert('tests', data, commit=commit)
- test_idx = self.get_last_autonumber_value()
- data = { 'test_idx':test_idx }
-
- for i in test.iterations:
- data['iteration'] = i.index
- for key, value in i.attr_keyval.iteritems():
- data['attribute'] = key
- data['value'] = value
- self.insert('iteration_attributes', data,
- commit=commit)
- for key, value in i.perf_keyval.iteritems():
- data['attribute'] = key
- data['value'] = value
- self.insert('iteration_result', data,
- commit=commit)
-
- for key, value in test.attributes.iteritems():
- data = {'test_idx': test_idx, 'attribute': key,
- 'value': value}
- self.insert('test_attributes', data, commit=commit)
-
-
- def read_machine_map(self):
- self.machine_group = {}
- for line in open(self.machine_map, 'r').readlines():
- (machine, group) = line.split()
- self.machine_group[machine] = group
-
-
- def insert_machine(self, job, group = None, commit = None):
- hostname = job.machine
- if self.machine_map and not self.machine_group:
- self.read_machine_map()
-
- if not group:
- group = self.machine_group.get(hostname, hostname)
- if group == hostname and job.machine_owner:
- group = job.machine_owner + '/' + hostname
-
- self.insert('machines',
- { 'hostname' : hostname ,
- 'machine_group' : group ,
- 'owner' : job.machine_owner },
- commit=commit)
- return self.get_last_autonumber_value()
-
-
- def lookup_machine(self, hostname):
- where = { 'hostname' : hostname }
- rows = self.select('machine_idx', 'machines', where)
- if rows:
- return rows[0][0]
- else:
- return None
-
-
- def lookup_kernel(self, kernel):
- rows = self.select('kernel_idx', 'kernels',
- {'kernel_hash':kernel.kernel_hash})
- if rows:
- return rows[0][0]
- else:
- return None
-
-
- def insert_kernel(self, kernel, commit = None):
- kver = self.lookup_kernel(kernel)
- if kver:
- return kver
-
- # If this kernel has any significant patches, append their hash
- # as diferentiator.
- printable = kernel.base
- patch_count = 0
- for patch in kernel.patches:
- match = re.match(r'.*(-mm[0-9]+|-git[0-9]+)\.(bz2|gz)$',
- patch.reference)
- if not match:
- patch_count += 1
-
- self.insert('kernels',
- {'base':kernel.base,
- 'kernel_hash':kernel.kernel_hash,
- 'printable':printable},
- commit=commit)
- kver = self.get_last_autonumber_value()
-
- if patch_count > 0:
- printable += ' p%d' % (kver)
- self.update('kernels',
- {'printable':printable},
- {'kernel_idx':kver})
-
- for patch in kernel.patches:
- self.insert_patch(kver, patch, commit=commit)
- return kver
-
-
- def insert_patch(self, kver, patch, commit = None):
- print patch.reference
- name = os.path.basename(patch.reference)[:80]
- self.insert('patches',
- {'kernel_idx': kver,
- 'name':name,
- 'url':patch.reference,
- 'hash':patch.hash},
+ def __init__(self, debug=False, autocommit=True, host=None,
+ database=None, user=None, password=None):
+ self.debug = debug
+ self.autocommit = autocommit
+ self._load_config(host, database, user, password)
+
+ self.con = None
+ self._init_db()
+
+ # if not present, insert statuses
+ self.status_idx = {}
+ self.status_word = {}
+ status_rows = self.select('status_idx, word', 'status', None)
+ for s in status_rows:
+ self.status_idx[s[1]] = s[0]
+ self.status_word[s[0]] = s[1]
+
+ machine_map = os.path.join(os.path.dirname(__file__),
+ 'machines')
+ if os.path.exists(machine_map):
+ self.machine_map = machine_map
+ else:
+ self.machine_map = None
+ self.machine_group = {}
+
+
+ def _load_config(self, host, database, user, password):
+ # grab the global config
+ get_value = global_config.global_config.get_config_value
+
+ # grab the host, database
+ if host:
+ self.host = host
+ else:
+ self.host = get_value("TKO", "host")
+ if database:
+ self.database = database
+ else:
+ self.database = get_value("TKO", "database")
+
+ # grab the user and password
+ if user:
+ self.user = user
+ else:
+ self.user = get_value("TKO", "user")
+ if password:
+ self.password = password
+ else:
+ self.password = get_value("TKO", "password")
+
+ # grab the timeout configuration
+ self.query_timeout = get_value("TKO", "query_timeout",
+ type=int, default=3600)
+ self.min_delay = get_value("TKO", "min_retry_delay", type=int,
+ default=20)
+ self.max_delay = get_value("TKO", "max_retry_delay", type=int,
+ default=60)
+
+
+ def _init_db(self):
+ # make sure we clean up any existing connection
+ if self.con:
+ self.con.close()
+ self.con = None
+
+ # create the db connection and cursor
+ self.con = self.connect(self.host, self.database,
+ self.user, self.password)
+ self.cur = self.con.cursor()
+
+
+ def _random_delay(self):
+ delay = random.randint(self.min_delay, self.max_delay)
+ time.sleep(delay)
+
+
+ def run_with_retry(self, function, *args, **dargs):
+ """Call function(*args, **dargs) until either it passes
+ without an operational error, or a timeout is reached.
+ This will re-connect to the database, so it is NOT safe
+ to use this inside of a database transaction.
+
+ It can be safely used with transactions, but the
+ transaction start & end must be completely contained
+ within the call to 'function'."""
+ OperationalError = _get_error_class("OperationalError")
+
+ success = False
+ start_time = time.time()
+ while not success:
+ try:
+ result = function(*args, **dargs)
+ except OperationalError, e:
+ self._log_operational_error(e)
+ stop_time = time.time()
+ elapsed_time = stop_time - start_time
+ if elapsed_time > self.query_timeout:
+ raise
+ else:
+ try:
+ self._random_delay()
+ self._init_db()
+ except OperationalError, e:
+ self._log_operational_error(e)
+ else:
+ success = True
+ return result
+
+
+ def _log_operational_error(self, e):
+ msg = ("An operational error occured during a database "
+ "operation: %s" % str(e))
+ print >> sys.stderr, msg
+ sys.stderr.flush() # we want these msgs to show up immediately
+
+
+ def dprint(self, value):
+ if self.debug:
+ sys.stdout.write('SQL: ' + str(value) + '\n')
+
+
+ def commit(self):
+ self.con.commit()
+
+
+ def get_last_autonumber_value(self):
+ self.cur.execute('SELECT LAST_INSERT_ID()', [])
+ return self.cur.fetchall()[0][0]
+
+
+ def select(self, fields, table, where, wherein={},
+ distinct = False, group_by = None, max_rows = None):
+ """\
+ This selects all the fields requested from a
+ specific table with a particular where clause.
+ The where clause can either be a dictionary of
+ field=value pairs, a string, or a tuple of (string,
+ a list of values). The last option is what you
+ should use when accepting user input as it'll
+ protect you against sql injection attacks (if
+ all user data is placed in the array rather than
+ the raw SQL).
+
+ For example:
+ where = ("a = %s AND b = %s", ['val', 'val'])
+ is better than
+ where = "a = 'val' AND b = 'val'"
+ """
+ cmd = ['select']
+ if distinct:
+ cmd.append('distinct')
+ cmd += [fields, 'from', table]
+
+ values = []
+ if where and isinstance(where, types.DictionaryType):
+ # key/value pairs (which should be equal)
+ keys = [field + '=%s' for field in where.keys()]
+ values = [where[field] for field in where.keys()]
+
+ cmd.append(' where ' + ' and '.join(keys))
+ elif where and isinstance(where, types.StringTypes):
+ # the exact string
+ cmd.append(' where ' + where)
+ elif where and isinstance(where, types.TupleType):
+ # preformatted where clause + values
+ (sql, vals) = where
+ values = vals
+ cmd.append(' where (%s) ' % sql)
+
+ # TODO: this assumes there's a where clause...bad
+ if wherein and isinstance(wherein, types.DictionaryType):
+ keys_in = ["%s in (%s) " % (field, ','.join(where))
+ for field, where in wherein.iteritems()]
+ cmd.append(' and '+' and '.join(keys_in))
+
+ if group_by:
+ cmd.append(' GROUP BY ' + group_by)
+
+ self.dprint('%s %s' % (' '.join(cmd), values))
+
+ # create a re-runable function for executing the query
+ def exec_sql():
+ sql = ' '.join(cmd)
+ numRec = self.cur.execute(sql, values)
+ if max_rows != None and numRec > max_rows:
+ msg = 'Exceeded allowed number of records'
+ raise MySQLTooManyRows(msg)
+ return self.cur.fetchall()
+
+ # run the query, re-trying after operational errors
+ if self.autocommit:
+ return self.run_with_retry(exec_sql)
+ else:
+ return exec_sql()
+
+
+ def select_sql(self, fields, table, sql, values):
+ """\
+ select fields from table "sql"
+ """
+ cmd = 'select %s from %s %s' % (fields, table, sql)
+ self.dprint(cmd)
+
+ # create a -re-runable function for executing the query
+ def exec_sql():
+ self.cur.execute(cmd, values)
+ return self.cur.fetchall()
+
+ # run the query, re-trying after operational errors
+ if self.autocommit:
+ return self.run_with_retry(exec_sql)
+ else:
+ return exec_sql()
+
+
+ def _exec_sql_with_commit(self, sql, values, commit):
+ if self.autocommit:
+ # re-run the query until it succeeds
+ def exec_sql():
+ self.cur.execute(sql, values)
+ self.con.commit()
+ self.run_with_retry(exec_sql)
+ else:
+ # take one shot at running the query
+ self.cur.execute(sql, values)
+ if commit:
+ self.con.commit()
+
+
+ def insert(self, table, data, commit = None):
+ """\
+ 'insert into table (keys) values (%s ... %s)', values
+
+ data:
+ dictionary of fields and data
+ """
+ fields = data.keys()
+ refs = ['%s' for field in fields]
+ values = [data[field] for field in fields]
+ cmd = 'insert into %s (%s) values (%s)' % \
+ (table, ','.join(fields), ','.join(refs))
+ self.dprint('%s %s' % (cmd, values))
+
+ self._exec_sql_with_commit(cmd, values, commit)
+
+
+ def delete(self, table, where, commit = None):
+ cmd = ['delete from', table]
+ if commit == None:
+ commit = self.autocommit
+ if where and isinstance(where, types.DictionaryType):
+ keys = [field + '=%s' for field in where.keys()]
+ values = [where[field] for field in where.keys()]
+ cmd += ['where', ' and '.join(keys)]
+ sql = ' '.join(cmd)
+ self.dprint('%s %s' % (sql, values))
+
+ self._exec_sql_with_commit(sql, values, commit)
+
+
+ def update(self, table, data, where, commit = None):
+ """\
+ 'update table set data values (%s ... %s) where ...'
+
+ data:
+ dictionary of fields and data
+ """
+ if commit == None:
+ commit = self.autocommit
+ cmd = 'update %s ' % table
+ fields = data.keys()
+ data_refs = [field + '=%s' for field in fields]
+ data_values = [data[field] for field in fields]
+ cmd += ' set ' + ' and '.join(data_refs)
+
+ where_keys = [field + '=%s' for field in where.keys()]
+ where_values = [where[field] for field in where.keys()]
+ cmd += ' where ' + ' and '.join(where_keys)
+
+ values = data_values + where_values
+ print '%s %s' % (cmd, values)
+
+ self._exec_sql_with_commit(cmd, values, commit)
+
+
+ def delete_job(self, tag, commit = None):
+ job_idx = self.find_job(tag)
+ for test_idx in self.find_tests(job_idx):
+ where = {'test_idx' : test_idx}
+ self.delete('iteration_result', where)
+ self.delete('test_attributes', where)
+ where = {'job_idx' : job_idx}
+ self.delete('tests', where)
+ self.delete('jobs', where)
+
+
+ def insert_job(self, tag, job, commit = None):
+ job.machine_idx = self.lookup_machine(job.machine)
+ if not job.machine_idx:
+ job.machine_idx = self.insert_machine(job,
+ commit=commit)
+ self.insert('jobs', {'tag':tag,
+ 'label': job.label,
+ 'username': job.user,
+ 'machine_idx': job.machine_idx,
+ 'queued_time': job.queued_time,
+ 'started_time': job.started_time,
+ 'finished_time': job.finished_time},
+ commit=commit)
+ job.index = self.get_last_autonumber_value()
+ for test in job.tests:
+ self.insert_test(job, test, commit=commit)
+
+
+ def insert_test(self, job, test, commit = None):
+ kver = self.insert_kernel(test.kernel, commit=commit)
+ data = {'job_idx':job.index, 'test':test.testname,
+ 'subdir':test.subdir, 'kernel_idx':kver,
+ 'status':self.status_idx[test.status],
+ 'reason':test.reason, 'machine_idx':job.machine_idx,
+ 'started_time': test.started_time,
+ 'finished_time':test.finished_time}
+ self.insert('tests', data, commit=commit)
+ test_idx = self.get_last_autonumber_value()
+ data = { 'test_idx':test_idx }
+
+ for i in test.iterations:
+ data['iteration'] = i.index
+ for key, value in i.attr_keyval.iteritems():
+ data['attribute'] = key
+ data['value'] = value
+ self.insert('iteration_attributes', data,
+ commit=commit)
+ for key, value in i.perf_keyval.iteritems():
+ data['attribute'] = key
+ data['value'] = value
+ self.insert('iteration_result', data,
commit=commit)
-
- def find_test(self, job_idx, subdir):
- where = { 'job_idx':job_idx , 'subdir':subdir }
- rows = self.select('test_idx', 'tests', where)
- if rows:
- return rows[0][0]
- else:
- return None
-
-
- def find_tests(self, job_idx):
- where = { 'job_idx':job_idx }
- rows = self.select('test_idx', 'tests', where)
- if rows:
- return [row[0] for row in rows]
- else:
- return []
-
-
- def find_job(self, tag):
- rows = self.select('job_idx', 'jobs', {'tag': tag})
- if rows:
- return rows[0][0]
- else:
- return None
+ for key, value in test.attributes.iteritems():
+ data = {'test_idx': test_idx, 'attribute': key,
+ 'value': value}
+ self.insert('test_attributes', data, commit=commit)
+
+
+ def read_machine_map(self):
+ self.machine_group = {}
+ for line in open(self.machine_map, 'r').readlines():
+ (machine, group) = line.split()
+ self.machine_group[machine] = group
+
+
+ def insert_machine(self, job, group = None, commit = None):
+ hostname = job.machine
+ if self.machine_map and not self.machine_group:
+ self.read_machine_map()
+
+ if not group:
+ group = self.machine_group.get(hostname, hostname)
+ if group == hostname and job.machine_owner:
+ group = job.machine_owner + '/' + hostname
+
+ self.insert('machines',
+ { 'hostname' : hostname ,
+ 'machine_group' : group ,
+ 'owner' : job.machine_owner },
+ commit=commit)
+ return self.get_last_autonumber_value()
+
+
+ def lookup_machine(self, hostname):
+ where = { 'hostname' : hostname }
+ rows = self.select('machine_idx', 'machines', where)
+ if rows:
+ return rows[0][0]
+ else:
+ return None
+
+
+ def lookup_kernel(self, kernel):
+ rows = self.select('kernel_idx', 'kernels',
+ {'kernel_hash':kernel.kernel_hash})
+ if rows:
+ return rows[0][0]
+ else:
+ return None
+
+
+ def insert_kernel(self, kernel, commit = None):
+ kver = self.lookup_kernel(kernel)
+ if kver:
+ return kver
+
+ # If this kernel has any significant patches, append their hash
+ # as diferentiator.
+ printable = kernel.base
+ patch_count = 0
+ for patch in kernel.patches:
+ match = re.match(r'.*(-mm[0-9]+|-git[0-9]+)\.(bz2|gz)$',
+ patch.reference)
+ if not match:
+ patch_count += 1
+
+ self.insert('kernels',
+ {'base':kernel.base,
+ 'kernel_hash':kernel.kernel_hash,
+ 'printable':printable},
+ commit=commit)
+ kver = self.get_last_autonumber_value()
+
+ if patch_count > 0:
+ printable += ' p%d' % (kver)
+ self.update('kernels',
+ {'printable':printable},
+ {'kernel_idx':kver})
+
+ for patch in kernel.patches:
+ self.insert_patch(kver, patch, commit=commit)
+ return kver
+
+
+ def insert_patch(self, kver, patch, commit = None):
+ print patch.reference
+ name = os.path.basename(patch.reference)[:80]
+ self.insert('patches',
+ {'kernel_idx': kver,
+ 'name':name,
+ 'url':patch.reference,
+ 'hash':patch.hash},
+ commit=commit)
+
+
+ def find_test(self, job_idx, subdir):
+ where = { 'job_idx':job_idx , 'subdir':subdir }
+ rows = self.select('test_idx', 'tests', where)
+ if rows:
+ return rows[0][0]
+ else:
+ return None
+
+
+ def find_tests(self, job_idx):
+ where = { 'job_idx':job_idx }
+ rows = self.select('test_idx', 'tests', where)
+ if rows:
+ return [row[0] for row in rows]
+ else:
+ return []
+
+
+ def find_job(self, tag):
+ rows = self.select('job_idx', 'jobs', {'tag': tag})
+ if rows:
+ return rows[0][0]
+ else:
+ return None
def _get_db_type():
- """Get the database type name to use from the global config."""
- get_value = global_config.global_config.get_config_value
- return "db_" + get_value("TKO", "db_type", default="mysql")
+ """Get the database type name to use from the global config."""
+ get_value = global_config.global_config.get_config_value
+ return "db_" + get_value("TKO", "db_type", default="mysql")
def _get_error_class(class_name):
- """Retrieves the appropriate error class by name from the database
- module."""
- db_module = __import__("autotest_lib.tko." + _get_db_type(),
- globals(), locals(), ["driver"])
- return getattr(db_module.driver, class_name)
+ """Retrieves the appropriate error class by name from the database
+ module."""
+ db_module = __import__("autotest_lib.tko." + _get_db_type(),
+ globals(), locals(), ["driver"])
+ return getattr(db_module.driver, class_name)
def db(*args, **dargs):
- """Creates an instance of the database class with the arguments
- provided in args and dargs, using the database type specified by
- the global configuration (defaulting to mysql)."""
- db_type = _get_db_type()
- db_module = __import__("autotest_lib.tko." + db_type, globals(),
- locals(), [db_type])
- db = getattr(db_module, db_type)(*args, **dargs)
- return db
+ """Creates an instance of the database class with the arguments
+ provided in args and dargs, using the database type specified by
+ the global configuration (defaulting to mysql)."""
+ db_type = _get_db_type()
+ db_module = __import__("autotest_lib.tko." + db_type, globals(),
+ locals(), [db_type])
+ db = getattr(db_module, db_type)(*args, **dargs)
+ return db
diff --git a/tko/db_mysql.py b/tko/db_mysql.py
index da59ecd9..13c20371 100644
--- a/tko/db_mysql.py
+++ b/tko/db_mysql.py
@@ -2,6 +2,6 @@ import MySQLdb as driver
import db
class db_mysql(db.db_sql):
- def connect(self, host, database, user, password):
- return driver.connect(host=host, user=user,
- passwd=password, db=database)
+ def connect(self, host, database, user, password):
+ return driver.connect(host=host, user=user,
+ passwd=password, db=database)
diff --git a/tko/db_postgres.py b/tko/db_postgres.py
index 166ee026..31834fa0 100644
--- a/tko/db_postgres.py
+++ b/tko/db_postgres.py
@@ -2,6 +2,6 @@ import psycopg2.psycopg1 as driver
import db
class db_postgres(db.db_sql):
- def connect(self, host, database, user, password):
- return driver.connect("dbname=%s user=%s password=%s" % \
- (database, user, password))
+ def connect(self, host, database, user, password):
+ return driver.connect("dbname=%s user=%s password=%s" % \
+ (database, user, password))
diff --git a/tko/delete_job_results b/tko/delete_job_results
index 49779887..f747510e 100644
--- a/tko/delete_job_results
+++ b/tko/delete_job_results
@@ -7,15 +7,14 @@ import db
usage = "usage: delete_job_results <job tag>"
if len(sys.argv) < 2:
- print usage
- sys.exit(2)
+ print usage
+ sys.exit(2)
tag = sys.argv[1]
resultsdir = os.path.abspath(os.path.join(thisdir, '../results', tag))
db = db.db()
if not db.find_job(tag):
- raise "Job tag %s does not exist in database" % tag
+ raise "Job tag %s does not exist in database" % tag
db.delete_job(tag)
shutil.rmtree(resultsdir)
-
diff --git a/tko/display.py b/tko/display.py
index ccf19238..f233fb8b 100755
--- a/tko/display.py
+++ b/tko/display.py
@@ -2,23 +2,23 @@ import os, re, string, sys
import frontend, reason_qualifier
color_map = {
- 'header' : '#e5e5c0', # greyish yellow
- 'blank' : '#ffffff', # white
- 'plain_text' : '#e5e5c0', # greyish yellow
- 'borders' : '#bbbbbb', # grey
- 'white' : '#ffffff', # white
- 'green' : '#66ff66', # green
- 'yellow' : '#fffc00', # yellow
- 'red' : '#ff6666', # red
-
- #### additional keys for shaded color of a box
- #### depending on stats of GOOD/FAIL
- '100pct' : '#32CD32', # green, 94% to 100% of success
- '95pct' : '#c0ff80', # step twrds yellow, 88% to 94% of success
- '90pct' : '#ffff00', # yellow, 82% to 88%
- '85pct' : '#ffc040', # 76% to 82%
- '75pct' : '#ff4040', # red, 1% to 76%
- '0pct' : '#d080d0', # violet, <1% of success
+ 'header' : '#e5e5c0', # greyish yellow
+ 'blank' : '#ffffff', # white
+ 'plain_text' : '#e5e5c0', # greyish yellow
+ 'borders' : '#bbbbbb', # grey
+ 'white' : '#ffffff', # white
+ 'green' : '#66ff66', # green
+ 'yellow' : '#fffc00', # yellow
+ 'red' : '#ff6666', # red
+
+ #### additional keys for shaded color of a box
+ #### depending on stats of GOOD/FAIL
+ '100pct' : '#32CD32', # green, 94% to 100% of success
+ '95pct' : '#c0ff80', # step twrds yellow, 88% to 94% of success
+ '90pct' : '#ffff00', # yellow, 82% to 88%
+ '85pct' : '#ffc040', # 76% to 82%
+ '75pct' : '#ff4040', # red, 1% to 76%
+ '0pct' : '#d080d0', # violet, <1% of success
}
@@ -26,316 +26,315 @@ _brief_mode = False
def set_brief_mode():
- global _brief_mode
- _brief_mode = True
+ global _brief_mode
+ _brief_mode = True
def is_brief_mode():
- return _brief_mode
+ return _brief_mode
def color_keys_row():
- """ Returns one row table with samples of 'NNpct' colors
- defined in the color_map
- and numbers of corresponding %%
- """
- ### This function does not require maintenance in case of
- ### color_map augmenting - as long as
- ### color keys for box shading have names that end with 'pct'
- keys = filter(lambda key: key.endswith('pct'), color_map.keys())
- def num_pct(key):
- return int(key.replace('pct',''))
- keys.sort(key=num_pct)
- html = ''
- for key in keys:
- html+= "\t\t\t<td bgcolor =%s>&nbsp;&nbsp;&nbsp;</td>\n"\
- % color_map[key]
- hint = key.replace('pct',' %')
- if hint[0]<>'0': ## anything but 0 %
- hint = 'to ' + hint
- html+= "\t\t\t<td> %s </td>\n" % hint
-
- html = """
+ """ Returns one row table with samples of 'NNpct' colors
+ defined in the color_map
+ and numbers of corresponding %%
+ """
+ ### This function does not require maintenance in case of
+ ### color_map augmenting - as long as
+ ### color keys for box shading have names that end with 'pct'
+ keys = filter(lambda key: key.endswith('pct'), color_map.keys())
+ def num_pct(key):
+ return int(key.replace('pct',''))
+ keys.sort(key=num_pct)
+ html = ''
+ for key in keys:
+ html+= "\t\t\t<td bgcolor =%s>&nbsp;&nbsp;&nbsp;</td>\n"\
+ % color_map[key]
+ hint = key.replace('pct',' %')
+ if hint[0]<>'0': ## anything but 0 %
+ hint = 'to ' + hint
+ html+= "\t\t\t<td> %s </td>\n" % hint
+
+ html = """
<table width = "500" border="0" cellpadding="2" cellspacing="2">\n
- <tbody>\n
- <tr>\n
+ <tbody>\n
+ <tr>\n
%s
- </tr>\n
- </tbody>
+ </tr>\n
+ </tbody>
</table><br>
""" % html
- return html
+ return html
def calculate_html(link, data, tooltip=None, row_label=None, column_label=None):
- if not is_brief_mode():
- hover_text = '%s:%s' % (row_label, column_label)
- if data: ## cell is not empty
- hover_text += '<br>%s' % tooltip
- else:
- ## avoid "None" printed in empty cells
- data = '&nbsp;'
- html = ('<center><a class="info" href="%s">'
- '%s<span>%s</span></a></center>' %
- (link, data, hover_text))
- return html
- # no hover if embedded into AFE but links shall redirect to new window
- if data: ## cell is non empty
- html = '<a href="%s" target=NEW>%s</a>' % (link, data)
- return html
- else: ## cell is empty
- return '&nbsp;'
+ if not is_brief_mode():
+ hover_text = '%s:%s' % (row_label, column_label)
+ if data: ## cell is not empty
+ hover_text += '<br>%s' % tooltip
+ else:
+ ## avoid "None" printed in empty cells
+ data = '&nbsp;'
+ html = ('<center><a class="info" href="%s">'
+ '%s<span>%s</span></a></center>' %
+ (link, data, hover_text))
+ return html
+ # no hover if embedded into AFE but links shall redirect to new window
+ if data: ## cell is non empty
+ html = '<a href="%s" target=NEW>%s</a>' % (link, data)
+ return html
+ else: ## cell is empty
+ return '&nbsp;'
class box:
- def __init__(self, data, color_key = None, header = False, link = None,
- tooltip = None, row_label = None, column_label = None):
-
- ## in brief mode we display grid table only and nothing more
- ## - mouse hovering feature is stubbed in brief mode
- ## - any link opens new window or tab
-
- redirect = ""
- if is_brief_mode():
- ## we are acting under AFE
- ## any link shall open new window
- redirect = " target=NEW"
-
- if data:
- data = "<tt>%s</tt>" % data
-
- if link and not tooltip:
- ## FlipAxis corner, column and row headers
- self.data = ('<a href="%s"%s>%s</a>' %
- (link, redirect, data))
- else:
- self.data = calculate_html(link, data, tooltip,
- row_label, column_label)
-
- if color_map.has_key(color_key):
- self.color = color_map[color_key]
- elif header:
- self.color = color_map['header']
- elif data:
- self.color = color_map['plain_text']
- else:
- self.color = color_map['blank']
- self.header = header
-
-
- def html(self):
- if self.data:
- data = self.data
- else:
- data = '&nbsp'
-
- if self.header:
- box_html = 'th'
- else:
- box_html = 'td'
-
- return "<%s bgcolor=%s>%s</%s>" % \
- (box_html, self.color, data, box_html)
+ def __init__(self, data, color_key = None, header = False, link = None,
+ tooltip = None, row_label = None, column_label = None):
+
+ ## in brief mode we display grid table only and nothing more
+ ## - mouse hovering feature is stubbed in brief mode
+ ## - any link opens new window or tab
+
+ redirect = ""
+ if is_brief_mode():
+ ## we are acting under AFE
+ ## any link shall open new window
+ redirect = " target=NEW"
+
+ if data:
+ data = "<tt>%s</tt>" % data
+
+ if link and not tooltip:
+ ## FlipAxis corner, column and row headers
+ self.data = ('<a href="%s"%s>%s</a>' %
+ (link, redirect, data))
+ else:
+ self.data = calculate_html(link, data, tooltip,
+ row_label, column_label)
+
+ if color_map.has_key(color_key):
+ self.color = color_map[color_key]
+ elif header:
+ self.color = color_map['header']
+ elif data:
+ self.color = color_map['plain_text']
+ else:
+ self.color = color_map['blank']
+ self.header = header
+
+
+ def html(self):
+ if self.data:
+ data = self.data
+ else:
+ data = '&nbsp'
+
+ if self.header:
+ box_html = 'th'
+ else:
+ box_html = 'td'
+
+ return "<%s bgcolor=%s>%s</%s>" % \
+ (box_html, self.color, data, box_html)
def grade_from_status(status):
- # % of goodness
- # GOOD (6) -> 1
- # TEST_NA (8) is not counted
- # ## If the test doesn't PASS, it FAILS
- # else -> 0
+ # % of goodness
+ # GOOD (6) -> 1
+ # TEST_NA (8) is not counted
+ # ## If the test doesn't PASS, it FAILS
+ # else -> 0
- if status == 6:
- return 1.0
- else:
- return 0.0
+ if status == 6:
+ return 1.0
+ else:
+ return 0.0
def average_grade_from_status_count(status_count):
- average_grade = 0
- total_count = 0
- for key in status_count.keys():
- if key != 8: # TEST_NA status
- average_grade += (grade_from_status(key)
- * status_count[key])
- total_count += status_count[key]
- if total_count != 0:
- average_grade = average_grade / total_count
- else:
- average_grade = 0.0
- return average_grade
+ average_grade = 0
+ total_count = 0
+ for key in status_count.keys():
+ if key != 8: # TEST_NA status
+ average_grade += (grade_from_status(key)
+ * status_count[key])
+ total_count += status_count[key]
+ if total_count != 0:
+ average_grade = average_grade / total_count
+ else:
+ average_grade = 0.0
+ return average_grade
def shade_from_status_count(status_count):
- if not status_count:
- return None
-
- ## average_grade defines a shade of the box
- ## 0 -> violet
- ## 0.76 -> red
- ## 0.88-> yellow
- ## 1.0 -> green
- average_grade = average_grade_from_status_count(status_count)
-
- ## find appropiate keyword from color_map
- if average_grade<0.01:
- shade = '0pct'
- elif average_grade<0.75:
- shade = '75pct'
- elif average_grade<0.85:
- shade = '85pct'
- elif average_grade<0.90:
- shade = '90pct'
- elif average_grade<0.95:
- shade = '95pct'
- else:
- shade = '100pct'
-
- return shade
+ if not status_count:
+ return None
+
+ ## average_grade defines a shade of the box
+ ## 0 -> violet
+ ## 0.76 -> red
+ ## 0.88-> yellow
+ ## 1.0 -> green
+ average_grade = average_grade_from_status_count(status_count)
+
+ ## find appropiate keyword from color_map
+ if average_grade<0.01:
+ shade = '0pct'
+ elif average_grade<0.75:
+ shade = '75pct'
+ elif average_grade<0.85:
+ shade = '85pct'
+ elif average_grade<0.90:
+ shade = '90pct'
+ elif average_grade<0.95:
+ shade = '95pct'
+ else:
+ shade = '100pct'
+
+ return shade
def status_html(db, box_data, shade):
- """
- status_count: dict mapping from status (integer key) to count
- eg. { 'GOOD' : 4, 'FAIL' : 1 }
- """
- status_count = box_data.status_count
- if 6 in status_count.keys():
- html = "%d&nbsp;/&nbsp;%d " \
- %(status_count[6],sum(status_count.values()))
- else:
- html = "%d&nbsp;/&nbsp;%d " % \
- (0, sum(status_count.values()))
-
- if box_data.reasons_list:
- reasons_list = box_data.reasons_list
- aggregated_reasons_list = \
- reason_qualifier.aggregate_reason_fields(reasons_list)
- for reason in aggregated_reasons_list:
- ## a bit of more postprocessing
- ## to look nicer in a cell
- ## in future: to do subtable within the cell
- reason = reason.replace('<br>','\n')
- reason = reason.replace('<','[').replace('>',']')
- reason = reason.replace('|','\n').replace('&',' AND ')
- reason = reason.replace('\n','<br>')
- html += '<br>' + reason
-
- tooltip = ""
- for status in sorted(status_count.keys(), reverse = True):
- status_word = db.status_word[status]
- tooltip += "%d %s " % (status_count[status], status_word)
- return (html,tooltip)
+ """
+ status_count: dict mapping from status (integer key) to count
+ eg. { 'GOOD' : 4, 'FAIL' : 1 }
+ """
+ status_count = box_data.status_count
+ if 6 in status_count.keys():
+ html = "%d&nbsp;/&nbsp;%d " \
+ %(status_count[6],sum(status_count.values()))
+ else:
+ html = "%d&nbsp;/&nbsp;%d " % \
+ (0, sum(status_count.values()))
+
+ if box_data.reasons_list:
+ reasons_list = box_data.reasons_list
+ aggregated_reasons_list = \
+ reason_qualifier.aggregate_reason_fields(reasons_list)
+ for reason in aggregated_reasons_list:
+ ## a bit of more postprocessing
+ ## to look nicer in a cell
+ ## in future: to do subtable within the cell
+ reason = reason.replace('<br>','\n')
+ reason = reason.replace('<','[').replace('>',']')
+ reason = reason.replace('|','\n').replace('&',' AND ')
+ reason = reason.replace('\n','<br>')
+ html += '<br>' + reason
+
+ tooltip = ""
+ for status in sorted(status_count.keys(), reverse = True):
+ status_word = db.status_word[status]
+ tooltip += "%d %s " % (status_count[status], status_word)
+ return (html,tooltip)
def status_count_box(db, tests, link = None):
- """
- Display a ratio of total number of GOOD tests
- to total number of all tests in the group of tests.
- More info (e.g. 10 GOOD, 2 WARN, 3 FAIL) is in tooltips
- """
- if not tests:
- return box(None, None)
+ """
+ Display a ratio of total number of GOOD tests
+ to total number of all tests in the group of tests.
+ More info (e.g. 10 GOOD, 2 WARN, 3 FAIL) is in tooltips
+ """
+ if not tests:
+ return box(None, None)
- status_count = {}
- for test in tests:
- count = status_count.get(test.status_num, 0)
- status_count[test.status_num] = count + 1
- return status_precounted_box(db, status_count, link)
+ status_count = {}
+ for test in tests:
+ count = status_count.get(test.status_num, 0)
+ status_count[test.status_num] = count + 1
+ return status_precounted_box(db, status_count, link)
def status_precounted_box(db, box_data, link = None,
- x_label = None, y_label = None):
- """
- Display a ratio of total number of GOOD tests
- to total number of all tests in the group of tests.
- More info (e.g. 10 GOOD, 2 WARN, 3 FAIL) is in tooltips
- """
- status_count = box_data.status_count
- if not status_count:
- return box(None, None)
-
- shade = shade_from_status_count(status_count)
- html,tooltip = status_html(db, box_data, shade)
- precounted_box = box(html, shade, False, link, tooltip,
- x_label, y_label)
- return precounted_box
+ x_label = None, y_label = None):
+ """
+ Display a ratio of total number of GOOD tests
+ to total number of all tests in the group of tests.
+ More info (e.g. 10 GOOD, 2 WARN, 3 FAIL) is in tooltips
+ """
+ status_count = box_data.status_count
+ if not status_count:
+ return box(None, None)
+
+ shade = shade_from_status_count(status_count)
+ html,tooltip = status_html(db, box_data, shade)
+ precounted_box = box(html, shade, False, link, tooltip,
+ x_label, y_label)
+ return precounted_box
def print_table(matrix):
- """
- matrix: list of lists of boxes, giving a matrix of data
- Each of the inner lists is a row, not a column.
+ """
+ matrix: list of lists of boxes, giving a matrix of data
+ Each of the inner lists is a row, not a column.
- Display the given matrix of data as a table.
- """
+ Display the given matrix of data as a table.
+ """
- print ('<table bgcolor="%s" cellspacing="1" cellpadding="5" '
- 'style="margin-right: 200px;">') % (
- color_map['borders'])
- for row in matrix:
- print '<tr>'
- for element in row:
- print element.html()
- print '</tr>'
- print '</table>'
+ print ('<table bgcolor="%s" cellspacing="1" cellpadding="5" '
+ 'style="margin-right: 200px;">') % (
+ color_map['borders'])
+ for row in matrix:
+ print '<tr>'
+ for element in row:
+ print element.html()
+ print '</tr>'
+ print '</table>'
def sort_tests(tests):
- kernel_order = ['patch', 'config', 'build', 'mkinitrd', 'install']
+ kernel_order = ['patch', 'config', 'build', 'mkinitrd', 'install']
- results = []
- for kernel_op in kernel_order:
- test = 'kernel.' + kernel_op
- if tests.count(test):
- results.append(test)
- tests.remove(test)
- if tests.count('boot'):
- results.append('boot')
- tests.remove('boot')
- return results + sorted(tests)
+ results = []
+ for kernel_op in kernel_order:
+ test = 'kernel.' + kernel_op
+ if tests.count(test):
+ results.append(test)
+ tests.remove(test)
+ if tests.count('boot'):
+ results.append('boot')
+ tests.remove('boot')
+ return results + sorted(tests)
def print_main_header():
- hover_css="""\
+ hover_css="""\
a.info{
- position:relative; /*this is the key*/
- z-index:1
- color:#000;
- text-decoration:none}
+position:relative; /*this is the key*/
+z-index:1
+color:#000;
+text-decoration:none}
a.info:hover{z-index:25;}
a.info span{display: none}
a.info:hover span{ /*the span will display just on :hover state*/
- display:block;
- position:absolute;
- top:1em; left:1em;
- min-width: 100px;
- overflow: visible;
- border:1px solid #036;
- background-color:#fff; color:#000;
- text-align: left
+display:block;
+position:absolute;
+top:1em; left:1em;
+min-width: 100px;
+overflow: visible;
+border:1px solid #036;
+background-color:#fff; color:#000;
+text-align: left
}
"""
- print '<head><style type="text/css">'
- print 'a { text-decoration: none }'
- print hover_css
- print '</style></head>'
- print '<h2>'
- print '<a href="compose_query.cgi">Functional</a>'
- print '&nbsp&nbsp&nbsp'
- print '<a href="machine_benchmark.cgi">Performance</a>'
- print '&nbsp&nbsp&nbsp'
- print '<a href="http://test.kernel.org/autotest">[about Autotest]</a>'
- print '</h2><p>'
+ print '<head><style type="text/css">'
+ print 'a { text-decoration: none }'
+ print hover_css
+ print '</style></head>'
+ print '<h2>'
+ print '<a href="compose_query.cgi">Functional</a>'
+ print '&nbsp&nbsp&nbsp'
+ print '<a href="machine_benchmark.cgi">Performance</a>'
+ print '&nbsp&nbsp&nbsp'
+ print '<a href="http://test.kernel.org/autotest">[about Autotest]</a>'
+ print '</h2><p>'
def group_name(group):
- name = re.sub('_', '<br>', group.name)
- if re.search('/', name):
- (owner, machine) = name.split('/', 1)
- name = owner + '<br>' + machine
- return name
-
+ name = re.sub('_', '<br>', group.name)
+ if re.search('/', name):
+ (owner, machine) = name.split('/', 1)
+ name = owner + '<br>' + machine
+ return name
diff --git a/tko/frontend.py b/tko/frontend.py
index 00e7fed5..45de298f 100755
--- a/tko/frontend.py
+++ b/tko/frontend.py
@@ -10,299 +10,299 @@ import kernel_versions
root_url_file = os.path.join(tko, '.root_url')
if os.path.exists(root_url_file):
- html_root = open(root_url_file, 'r').readline().rstrip()
+ html_root = open(root_url_file, 'r').readline().rstrip()
else:
- html_root = '/results/'
+ html_root = '/results/'
class status_cell:
- # One cell in the matrix of status data.
- def __init__(self):
- # Count is a dictionary: status -> count of tests with status
- self.status_count = {}
- self.reasons_list = []
- self.job_tag = None
- self.job_tag_count = 0
-
-
- def add(self, status, count, job_tags, reasons = None):
- assert count > 0
-
- self.job_tag = job_tags
- self.job_tag_count += count
- if self.job_tag_count > 1:
- self.job_tag = None
-
- self.status_count[status] = count
- ### status == 6 means 'GOOD'
- if status != 6:
- ## None implies sorting problems and extra CRs in a cell
- if reasons:
- self.reasons_list.append(reasons)
+ # One cell in the matrix of status data.
+ def __init__(self):
+ # Count is a dictionary: status -> count of tests with status
+ self.status_count = {}
+ self.reasons_list = []
+ self.job_tag = None
+ self.job_tag_count = 0
+
+
+ def add(self, status, count, job_tags, reasons = None):
+ assert count > 0
+
+ self.job_tag = job_tags
+ self.job_tag_count += count
+ if self.job_tag_count > 1:
+ self.job_tag = None
+
+ self.status_count[status] = count
+ ### status == 6 means 'GOOD'
+ if status != 6:
+ ## None implies sorting problems and extra CRs in a cell
+ if reasons:
+ self.reasons_list.append(reasons)
class status_data:
- def __init__(self, sql_rows, x_field, y_field, query_reasons = False):
- data = {}
- y_values = set()
-
- # Walk through the query, filing all results by x, y info
- for row in sql_rows:
- if query_reasons:
- (x,y, status, count, job_tags, reasons) = row
- else:
- (x,y, status, count, job_tags) = row
- reasons = None
- if not data.has_key(x):
- data[x] = {}
- if not data[x].has_key(y):
- y_values.add(y)
- data[x][y] = status_cell()
- data[x][y].add(status, count, job_tags, reasons)
-
- # 2-d hash of data - [x-value][y-value]
- self.data = data
- # List of possible columns (x-values)
- self.x_values = smart_sort(data.keys(), x_field)
- # List of rows columns (y-values)
- self.y_values = smart_sort(list(y_values), y_field)
- nCells = len(self.y_values)*len(self.x_values)
- if nCells > MAX_CELLS:
- msg = 'Exceeded allowed number of cells in a table'
- raise db.MySQLTooManyRows(msg)
-
+ def __init__(self, sql_rows, x_field, y_field, query_reasons = False):
+ data = {}
+ y_values = set()
+
+ # Walk through the query, filing all results by x, y info
+ for row in sql_rows:
+ if query_reasons:
+ (x,y, status, count, job_tags, reasons) = row
+ else:
+ (x,y, status, count, job_tags) = row
+ reasons = None
+ if not data.has_key(x):
+ data[x] = {}
+ if not data[x].has_key(y):
+ y_values.add(y)
+ data[x][y] = status_cell()
+ data[x][y].add(status, count, job_tags, reasons)
+
+ # 2-d hash of data - [x-value][y-value]
+ self.data = data
+ # List of possible columns (x-values)
+ self.x_values = smart_sort(data.keys(), x_field)
+ # List of rows columns (y-values)
+ self.y_values = smart_sort(list(y_values), y_field)
+ nCells = len(self.y_values)*len(self.x_values)
+ if nCells > MAX_CELLS:
+ msg = 'Exceeded allowed number of cells in a table'
+ raise db.MySQLTooManyRows(msg)
+
def get_matrix_data(db_obj, x_axis, y_axis, where = None,
- query_reasons = False):
- # Searches on the test_view table - x_axis and y_axis must both be
- # column names in that table.
- x_field = test_view_field_dict[x_axis]
- y_field = test_view_field_dict[y_axis]
- query_fields_list = [x_field, y_field, 'status','COUNT(status)']
- query_fields_list.append("LEFT(GROUP_CONCAT(job_tag),100)")
- if query_reasons:
- query_fields_list.append(
- "LEFT(GROUP_CONCAT(DISTINCT reason SEPARATOR '|'),500)"
- )
- fields = ','.join(query_fields_list)
-
- group_by = '%s, %s, status' % (x_field, y_field)
- rows = db_obj.select(fields, 'test_view',
- where=where, group_by=group_by, max_rows = MAX_RECORDS)
- return status_data(rows, x_field, y_field, query_reasons)
+ query_reasons = False):
+ # Searches on the test_view table - x_axis and y_axis must both be
+ # column names in that table.
+ x_field = test_view_field_dict[x_axis]
+ y_field = test_view_field_dict[y_axis]
+ query_fields_list = [x_field, y_field, 'status','COUNT(status)']
+ query_fields_list.append("LEFT(GROUP_CONCAT(job_tag),100)")
+ if query_reasons:
+ query_fields_list.append(
+ "LEFT(GROUP_CONCAT(DISTINCT reason SEPARATOR '|'),500)"
+ )
+ fields = ','.join(query_fields_list)
+
+ group_by = '%s, %s, status' % (x_field, y_field)
+ rows = db_obj.select(fields, 'test_view',
+ where=where, group_by=group_by, max_rows = MAX_RECORDS)
+ return status_data(rows, x_field, y_field, query_reasons)
# Dictionary used simply for fast lookups from short reference names for users
# to fieldnames in test_view
test_view_field_dict = {
- 'kernel' : 'kernel_printable',
- 'hostname' : 'machine_hostname',
- 'test' : 'test',
- 'label' : 'job_label',
- 'machine_group' : 'machine_group',
- 'reason' : 'reason',
- 'tag' : 'job_tag',
- 'user' : 'job_username',
- 'status' : 'status_word',
- 'time' : 'test_finished_time',
- 'time_daily' : 'DATE(test_finished_time)'
+ 'kernel' : 'kernel_printable',
+ 'hostname' : 'machine_hostname',
+ 'test' : 'test',
+ 'label' : 'job_label',
+ 'machine_group' : 'machine_group',
+ 'reason' : 'reason',
+ 'tag' : 'job_tag',
+ 'user' : 'job_username',
+ 'status' : 'status_word',
+ 'time' : 'test_finished_time',
+ 'time_daily' : 'DATE(test_finished_time)'
}
def smart_sort(list, field):
- if field == 'kernel_printable':
- def kernel_encode(kernel):
- return kernel_versions.version_encode(kernel)
- list.sort(key = kernel_encode, reverse = True)
- return list
- ## old records may contain time=None
- ## make None comparable with timestamp datetime or date
- elif field == 'test_finished_time':
- def convert_None_to_datetime(date_time):
- if not date_time:
- return datetime.datetime(1970, 1, 1, 0, 0, 0)
- else:
- return date_time
- list = map(convert_None_to_datetime, list)
- elif field == 'DATE(test_finished_time)':
- def convert_None_to_date(date):
- if not date:
- return datetime.date(1970, 1, 1)
- else:
- return date
- list = map(convert_None_to_date, list)
- list.sort()
- return list
+ if field == 'kernel_printable':
+ def kernel_encode(kernel):
+ return kernel_versions.version_encode(kernel)
+ list.sort(key = kernel_encode, reverse = True)
+ return list
+ ## old records may contain time=None
+ ## make None comparable with timestamp datetime or date
+ elif field == 'test_finished_time':
+ def convert_None_to_datetime(date_time):
+ if not date_time:
+ return datetime.datetime(1970, 1, 1, 0, 0, 0)
+ else:
+ return date_time
+ list = map(convert_None_to_datetime, list)
+ elif field == 'DATE(test_finished_time)':
+ def convert_None_to_date(date):
+ if not date:
+ return datetime.date(1970, 1, 1)
+ else:
+ return date
+ list = map(convert_None_to_date, list)
+ list.sort()
+ return list
class group:
- @classmethod
- def select(klass, db):
- """Return all possible machine groups"""
- rows = db.select('distinct machine_group', 'machines',
- 'machine_group is not null')
- groupnames = sorted([row[0] for row in rows])
- return [klass(db, groupname) for groupname in groupnames]
+ @classmethod
+ def select(klass, db):
+ """Return all possible machine groups"""
+ rows = db.select('distinct machine_group', 'machines',
+ 'machine_group is not null')
+ groupnames = sorted([row[0] for row in rows])
+ return [klass(db, groupname) for groupname in groupnames]
- def __init__(self, db, name):
- self.name = name
- self.db = db
+ def __init__(self, db, name):
+ self.name = name
+ self.db = db
- def machines(self):
- return machine.select(self.db, { 'machine_group' : self.name })
+ def machines(self):
+ return machine.select(self.db, { 'machine_group' : self.name })
- def tests(self, where = {}):
- values = [self.name]
- sql = 't inner join machines m on m.machine_idx=t.machine_idx'
- sql += ' where m.machine_group=%s'
- for key in where.keys():
- sql += ' and %s=%%s' % key
- values.append(where[key])
- return test.select_sql(self.db, sql, values)
+ def tests(self, where = {}):
+ values = [self.name]
+ sql = 't inner join machines m on m.machine_idx=t.machine_idx'
+ sql += ' where m.machine_group=%s'
+ for key in where.keys():
+ sql += ' and %s=%%s' % key
+ values.append(where[key])
+ return test.select_sql(self.db, sql, values)
class machine:
- @classmethod
- def select(klass, db, where = {}):
- fields = ['machine_idx', 'hostname', 'machine_group', 'owner']
- machines = []
- for row in db.select(','.join(fields), 'machines', where):
- machines.append(klass(db, *row))
- return machines
+ @classmethod
+ def select(klass, db, where = {}):
+ fields = ['machine_idx', 'hostname', 'machine_group', 'owner']
+ machines = []
+ for row in db.select(','.join(fields), 'machines', where):
+ machines.append(klass(db, *row))
+ return machines
- def __init__(self, db, idx, hostname, group, owner):
- self.db = db
- self.idx = idx
- self.hostname = hostname
- self.group = group
- self.owner = owner
+ def __init__(self, db, idx, hostname, group, owner):
+ self.db = db
+ self.idx = idx
+ self.hostname = hostname
+ self.group = group
+ self.owner = owner
class kernel:
- @classmethod
- def select(klass, db, where = {}):
- fields = ['kernel_idx', 'kernel_hash', 'base', 'printable']
- rows = db.select(','.join(fields), 'kernels', where)
- return [klass(db, *row) for row in rows]
+ @classmethod
+ def select(klass, db, where = {}):
+ fields = ['kernel_idx', 'kernel_hash', 'base', 'printable']
+ rows = db.select(','.join(fields), 'kernels', where)
+ return [klass(db, *row) for row in rows]
- def __init__(self, db, idx, hash, base, printable):
- self.db = db
- self.idx = idx
- self.hash = hash
- self.base = base
- self.printable = printable
- self.patches = [] # THIS SHOULD PULL IN PATCHES!
+ def __init__(self, db, idx, hash, base, printable):
+ self.db = db
+ self.idx = idx
+ self.hash = hash
+ self.base = base
+ self.printable = printable
+ self.patches = [] # THIS SHOULD PULL IN PATCHES!
class test:
- @classmethod
- def select(klass, db, where = {}, wherein = {}, distinct = False):
- fields = ['test_idx', 'job_idx', 'test', 'subdir',
- 'kernel_idx', 'status', 'reason', 'machine_idx']
- tests = []
- for row in db.select(','.join(fields), 'tests', where,
- wherein,distinct):
- tests.append(klass(db, *row))
- return tests
-
-
- @classmethod
- def select_sql(klass, db, sql, values):
- fields = ['test_idx', 'job_idx', 'test', 'subdir',
- 'kernel_idx', 'status', 'reason', 'machine_idx']
- fields = ['t.'+field for field in fields]
- rows = db.select_sql(','.join(fields), 'tests', sql, values)
- return [klass(db, *row) for row in rows]
-
-
- def __init__(self, db, test_idx, job_idx, testname, subdir, kernel_idx,
- status_num, reason, machine_idx):
- self.idx = test_idx
- self.job = job(db, job_idx)
- self.testname = testname
- self.subdir = subdir
- self.kernel_idx = kernel_idx
- self.__kernel = None
- self.__iterations = None
- self.machine_idx = machine_idx
- self.__machine = None
- self.status_num = status_num
- self.status_word = db.status_word[status_num]
- self.reason = reason
- self.db = db
- if self.subdir:
- self.url = html_root + self.job.tag + '/' + self.subdir
- else:
- self.url = None
-
-
- def iterations(self):
- """
- Caching function for iterations
- """
- if not self.__iterations:
- self.__iterations = {}
- # A dictionary - dict{key} = [value1, value2, ....]
- where = {'test_idx' : self.idx}
- for i in iteration.select(self.db, where):
- if self.__iterations.has_key(i.key):
- self.__iterations[i.key].append(i.value)
- else:
- self.__iterations[i.key] = [i.value]
- return self.__iterations
-
-
- def kernel(self):
- """
- Caching function for kernels
- """
- if not self.__kernel:
- where = {'kernel_idx' : self.kernel_idx}
- self.__kernel = kernel.select(self.db, where)[0]
- return self.__kernel
-
-
- def machine(self):
- """
- Caching function for kernels
- """
- if not self.__machine:
- where = {'machine_idx' : self.machine_idx}
- self.__machine = machine.select(self.db, where)[0]
- return self.__machine
+ @classmethod
+ def select(klass, db, where = {}, wherein = {}, distinct = False):
+ fields = ['test_idx', 'job_idx', 'test', 'subdir',
+ 'kernel_idx', 'status', 'reason', 'machine_idx']
+ tests = []
+ for row in db.select(','.join(fields), 'tests', where,
+ wherein,distinct):
+ tests.append(klass(db, *row))
+ return tests
+
+
+ @classmethod
+ def select_sql(klass, db, sql, values):
+ fields = ['test_idx', 'job_idx', 'test', 'subdir',
+ 'kernel_idx', 'status', 'reason', 'machine_idx']
+ fields = ['t.'+field for field in fields]
+ rows = db.select_sql(','.join(fields), 'tests', sql, values)
+ return [klass(db, *row) for row in rows]
+
+
+ def __init__(self, db, test_idx, job_idx, testname, subdir, kernel_idx,
+ status_num, reason, machine_idx):
+ self.idx = test_idx
+ self.job = job(db, job_idx)
+ self.testname = testname
+ self.subdir = subdir
+ self.kernel_idx = kernel_idx
+ self.__kernel = None
+ self.__iterations = None
+ self.machine_idx = machine_idx
+ self.__machine = None
+ self.status_num = status_num
+ self.status_word = db.status_word[status_num]
+ self.reason = reason
+ self.db = db
+ if self.subdir:
+ self.url = html_root + self.job.tag + '/' + self.subdir
+ else:
+ self.url = None
+
+
+ def iterations(self):
+ """
+ Caching function for iterations
+ """
+ if not self.__iterations:
+ self.__iterations = {}
+ # A dictionary - dict{key} = [value1, value2, ....]
+ where = {'test_idx' : self.idx}
+ for i in iteration.select(self.db, where):
+ if self.__iterations.has_key(i.key):
+ self.__iterations[i.key].append(i.value)
+ else:
+ self.__iterations[i.key] = [i.value]
+ return self.__iterations
+
+
+ def kernel(self):
+ """
+ Caching function for kernels
+ """
+ if not self.__kernel:
+ where = {'kernel_idx' : self.kernel_idx}
+ self.__kernel = kernel.select(self.db, where)[0]
+ return self.__kernel
+
+
+ def machine(self):
+ """
+ Caching function for kernels
+ """
+ if not self.__machine:
+ where = {'machine_idx' : self.machine_idx}
+ self.__machine = machine.select(self.db, where)[0]
+ return self.__machine
class job:
- def __init__(self, db, job_idx):
- where = {'job_idx' : job_idx}
- rows = db.select('tag, machine_idx', 'jobs', where)
- if not rows:
- return None
- (self.tag, self.machine_idx) = rows[0]
- self.job_idx = job_idx
-
-
+ def __init__(self, db, job_idx):
+ where = {'job_idx' : job_idx}
+ rows = db.select('tag, machine_idx', 'jobs', where)
+ if not rows:
+ return None
+ (self.tag, self.machine_idx) = rows[0]
+ self.job_idx = job_idx
+
+
class iteration:
- @classmethod
- def select(klass, db, where):
- fields = ['iteration', 'attribute', 'value']
- iterations = []
- rows = db.select(','.join(fields), 'iteration_result', where)
- for row in rows:
- iterations.append(klass(*row))
- return iterations
+ @classmethod
+ def select(klass, db, where):
+ fields = ['iteration', 'attribute', 'value']
+ iterations = []
+ rows = db.select(','.join(fields), 'iteration_result', where)
+ for row in rows:
+ iterations.append(klass(*row))
+ return iterations
- def __init__(self, iteration, key, value):
- self.iteration = iteration
- self.key = key
- self.value = value
+ def __init__(self, iteration, key, value):
+ self.iteration = iteration
+ self.key = key
+ self.value = value
# class patch:
-# def __init__(self):
-# self.spec = None
+# def __init__(self):
+# self.spec = None
diff --git a/tko/machine_load b/tko/machine_load
index 98b0fa9d..b8c04721 100755
--- a/tko/machine_load
+++ b/tko/machine_load
@@ -4,8 +4,8 @@ import db
db = db.db()
for line in open('machines', 'r').readlines():
- (machine, group) = line.rstrip().split()
- print 'X %s Y %s' % (machine, group)
- set = { 'machine_group' : group }
- where = { 'hostname' : machine }
- db.update('machines', set, where)
+ (machine, group) = line.rstrip().split()
+ print 'X %s Y %s' % (machine, group)
+ set = { 'machine_group' : group }
+ where = { 'hostname' : machine }
+ db.update('machines', set, where)
diff --git a/tko/migrations/001_initial_db.py b/tko/migrations/001_initial_db.py
index 2c19abc1..065e5c5c 100755
--- a/tko/migrations/001_initial_db.py
+++ b/tko/migrations/001_initial_db.py
@@ -1,31 +1,31 @@
import os
required_tables = ('machines', 'jobs', 'patches', 'tests', 'test_attributes',
- 'iteration_result')
+ 'iteration_result')
def migrate_up(manager):
- manager.execute("SHOW TABLES")
- tables = [row[0] for row in manager.cursor.fetchall()]
- db_initialized = True
- for table in required_tables:
- if table not in tables:
- db_initialized = False
- break
- if not db_initialized:
- response = raw_input(
- 'Your tko database does not appear to be initialized. Do '
- 'you want to recreate it (this will result in loss of any '
- 'existing data) (yes/No)? ')
- if response != 'yes':
- raise Exception('User has chosen to abort migration')
-
- manager.execute_script(CREATE_DB_SQL)
-
- manager.create_migrate_table()
+ manager.execute("SHOW TABLES")
+ tables = [row[0] for row in manager.cursor.fetchall()]
+ db_initialized = True
+ for table in required_tables:
+ if table not in tables:
+ db_initialized = False
+ break
+ if not db_initialized:
+ response = raw_input(
+ 'Your tko database does not appear to be initialized. Do '
+ 'you want to recreate it (this will result in loss of any '
+ 'existing data) (yes/No)? ')
+ if response != 'yes':
+ raise Exception('User has chosen to abort migration')
+
+ manager.execute_script(CREATE_DB_SQL)
+
+ manager.create_migrate_table()
def migrate_down(manager):
- manager.execute_script(DROP_DB_SQL)
+ manager.execute_script(DROP_DB_SQL)
DROP_DB_SQL = """\
@@ -48,44 +48,44 @@ DROP TABLE IF EXISTS status;
CREATE_DB_SQL = DROP_DB_SQL + """\
-- status key
CREATE TABLE status (
-status_idx int(10) unsigned NOT NULL auto_increment PRIMARY KEY , -- numerical status
-word VARCHAR(10) -- status word
+status_idx int(10) unsigned NOT NULL auto_increment PRIMARY KEY , -- numerical status
+word VARCHAR(10) -- status word
) TYPE=InnoDB;
-- kernel versions
CREATE TABLE kernels (
kernel_idx int(10) unsigned NOT NULL auto_increment PRIMARY KEY,
-kernel_hash VARCHAR(35), -- Hash of base + all patches
-base VARCHAR(30), -- Base version without patches
-printable VARCHAR(100) -- Full version with patches
+kernel_hash VARCHAR(35), -- Hash of base + all patches
+base VARCHAR(30), -- Base version without patches
+printable VARCHAR(100) -- Full version with patches
) TYPE=InnoDB;
-- machines/hosts table
CREATE TABLE machines (
machine_idx int(10) unsigned NOT NULL auto_increment PRIMARY KEY,
-hostname VARCHAR(100) unique KEY, -- hostname
-machine_group VARCHAR(80), -- group name
-owner VARCHAR(80) -- owner name
+hostname VARCHAR(100) unique KEY, -- hostname
+machine_group VARCHAR(80), -- group name
+owner VARCHAR(80) -- owner name
) TYPE=InnoDB;
-- main jobs table
CREATE TABLE jobs (
-job_idx int(10) unsigned NOT NULL auto_increment PRIMARY KEY, -- index number
-tag VARCHAR(100) unique KEY, -- job key
+job_idx int(10) unsigned NOT NULL auto_increment PRIMARY KEY, -- index number
+tag VARCHAR(100) unique KEY, -- job key
label VARCHAR(100), -- job label assigned by user
KEY (label),
-username VARCHAR(80), -- user name
+username VARCHAR(80), -- user name
KEY (username),
-machine_idx INT(10) unsigned NOT NULL, -- reference to machine table
+machine_idx INT(10) unsigned NOT NULL, -- reference to machine table
KEY (machine_idx),
FOREIGN KEY (machine_idx) REFERENCES machines(machine_idx) ON DELETE CASCADE
) TYPE=InnoDB;
-- One entry per patch used, anywhere
CREATE TABLE patches (
-kernel_idx INT(10) unsigned NOT NULL, -- index number
-name VARCHAR(80), -- short name
-url VARCHAR(300), -- full URL
+kernel_idx INT(10) unsigned NOT NULL, -- index number
+name VARCHAR(80), -- short name
+url VARCHAR(300), -- full URL
hash VARCHAR(35),
KEY (kernel_idx),
FOREIGN KEY (kernel_idx) REFERENCES kernels(kernel_idx) ON DELETE CASCADE
@@ -93,18 +93,18 @@ FOREIGN KEY (kernel_idx) REFERENCES kernels(kernel_idx) ON DELETE CASCADE
-- test functional results
CREATE TABLE tests (
-test_idx int(10) unsigned NOT NULL auto_increment PRIMARY KEY, -- index number
-job_idx INTEGER, -- ref to job table
-test VARCHAR(30), -- name of test
-subdir VARCHAR(60), -- subdirectory name
-kernel_idx INT(10) unsigned NOT NULL, -- kernel test was AGAINST
+test_idx int(10) unsigned NOT NULL auto_increment PRIMARY KEY, -- index number
+job_idx INTEGER, -- ref to job table
+test VARCHAR(30), -- name of test
+subdir VARCHAR(60), -- subdirectory name
+kernel_idx INT(10) unsigned NOT NULL, -- kernel test was AGAINST
KEY (kernel_idx),
FOREIGN KEY (kernel_idx) REFERENCES kernels(kernel_idx) ON DELETE CASCADE,
-status int(10) unsigned NOT NULL, -- test status
+status int(10) unsigned NOT NULL, -- test status
KEY (status),
FOREIGN KEY (status) REFERENCES status(status_idx) ON DELETE CASCADE,
-reason VARCHAR(100), -- reason for test status
-machine_idx INT(10) unsigned NOT NULL, -- reference to machine table
+reason VARCHAR(100), -- reason for test status
+machine_idx INT(10) unsigned NOT NULL, -- reference to machine table
KEY (machine_idx),
FOREIGN KEY (machine_idx) REFERENCES machines(machine_idx) ON DELETE CASCADE,
invalid BOOL NOT NULL
@@ -112,49 +112,49 @@ invalid BOOL NOT NULL
-- test attributes (key value pairs at a test level)
CREATE TABLE test_attributes (
-test_idx int(10) unsigned NOT NULL, -- ref to test table
+test_idx int(10) unsigned NOT NULL, -- ref to test table
FOREIGN KEY (test_idx) REFERENCES tests(test_idx) ON DELETE CASCADE,
-attribute VARCHAR(30), -- attribute name (e.g. 'version')
-value VARCHAR(100), -- attribute value
+attribute VARCHAR(30), -- attribute name (e.g. 'version')
+value VARCHAR(100), -- attribute value
KEY `test_idx` (`test_idx`)
) TYPE=InnoDB;
-- test performance results
CREATE TABLE iteration_result(
-test_idx int(10) unsigned NOT NULL, -- ref to test table
+test_idx int(10) unsigned NOT NULL, -- ref to test table
FOREIGN KEY (test_idx) REFERENCES tests(test_idx) ON DELETE CASCADE,
-iteration INTEGER, -- integer
-attribute VARCHAR(30), -- attribute name (e.g. 'throughput')
-value FLOAT, -- attribute value (eg 700.1)
+iteration INTEGER, -- integer
+attribute VARCHAR(30), -- attribute name (e.g. 'throughput')
+value FLOAT, -- attribute value (eg 700.1)
KEY `test_idx` (`test_idx`)
) TYPE=InnoDB;
-- BRRD syncronization
CREATE TABLE brrd_sync (
-test_idx int(10) unsigned NOT NULL, -- ref to test table
+test_idx int(10) unsigned NOT NULL, -- ref to test table
FOREIGN KEY (test_idx) REFERENCES tests(test_idx) ON DELETE CASCADE
) TYPE=InnoDB;
-- test_view (to make life easier for people trying to mine data)
CREATE VIEW test_view AS
-SELECT tests.test_idx,
- tests.job_idx,
- tests.test,
- tests.subdir,
- tests.kernel_idx,
- tests.status,
- tests.reason,
- tests.machine_idx,
- jobs.tag AS job_tag,
- jobs.label AS job_label,
- jobs.username AS job_username,
- machines.hostname AS machine_hostname,
- machines.machine_group,
- machines.owner AS machine_owner,
- kernels.kernel_hash,
- kernels.base AS kernel_base,
- kernels.printable AS kernel_printable,
- status.word AS status_word
+SELECT tests.test_idx,
+ tests.job_idx,
+ tests.test,
+ tests.subdir,
+ tests.kernel_idx,
+ tests.status,
+ tests.reason,
+ tests.machine_idx,
+ jobs.tag AS job_tag,
+ jobs.label AS job_label,
+ jobs.username AS job_username,
+ machines.hostname AS machine_hostname,
+ machines.machine_group,
+ machines.owner AS machine_owner,
+ kernels.kernel_hash,
+ kernels.base AS kernel_base,
+ kernels.printable AS kernel_printable,
+ status.word AS status_word
FROM tests
INNER JOIN jobs ON jobs.job_idx = tests.job_idx
INNER JOIN machines ON machines.machine_idx = jobs.machine_idx
@@ -163,27 +163,27 @@ INNER JOIN status ON status.status_idx = tests.status;
-- perf_view (to make life easier for people trying to mine performance data)
CREATE VIEW perf_view AS
-SELECT tests.test_idx,
- tests.job_idx,
- tests.test,
- tests.subdir,
- tests.kernel_idx,
- tests.status,
- tests.reason,
- tests.machine_idx,
- jobs.tag AS job_tag,
- jobs.label AS job_label,
- jobs.username AS job_username,
- machines.hostname AS machine_hostname,
- machines.machine_group,
- machines.owner AS machine_owner,
- kernels.kernel_hash,
- kernels.base AS kernel_base,
- kernels.printable AS kernel_printable,
- status.word AS status_word,
- iteration_result.iteration,
- iteration_result.attribute AS iteration_key,
- iteration_result.value AS iteration_value
+SELECT tests.test_idx,
+ tests.job_idx,
+ tests.test,
+ tests.subdir,
+ tests.kernel_idx,
+ tests.status,
+ tests.reason,
+ tests.machine_idx,
+ jobs.tag AS job_tag,
+ jobs.label AS job_label,
+ jobs.username AS job_username,
+ machines.hostname AS machine_hostname,
+ machines.machine_group,
+ machines.owner AS machine_owner,
+ kernels.kernel_hash,
+ kernels.base AS kernel_base,
+ kernels.printable AS kernel_printable,
+ status.word AS status_word,
+ iteration_result.iteration,
+ iteration_result.attribute AS iteration_key,
+ iteration_result.value AS iteration_value
FROM tests
INNER JOIN jobs ON jobs.job_idx = tests.job_idx
INNER JOIN machines ON machines.machine_idx = jobs.machine_idx
diff --git a/tko/migrations/002_add_job_timestamps.py b/tko/migrations/002_add_job_timestamps.py
index 8bbb83a3..7cd50043 100644
--- a/tko/migrations/002_add_job_timestamps.py
+++ b/tko/migrations/002_add_job_timestamps.py
@@ -1,11 +1,11 @@
def migrate_up(manager):
- manager.execute_script(ADD_COLUMNS_SQL)
- manager.execute_script(ALTER_VIEWS_UP_SQL)
+ manager.execute_script(ADD_COLUMNS_SQL)
+ manager.execute_script(ALTER_VIEWS_UP_SQL)
def migrate_down(manager):
- manager.execute_script(DROP_COLUMNS_SQL)
- manager.execute_script(ALTER_VIEWS_DOWN_SQL)
+ manager.execute_script(DROP_COLUMNS_SQL)
+ manager.execute_script(ALTER_VIEWS_DOWN_SQL)
ADD_COLUMNS_SQL = """\
@@ -22,27 +22,27 @@ ALTER TABLE jobs DROP queued_time, DROP started_time, DROP finished_time;
ALTER_VIEWS_UP_SQL = """\
ALTER VIEW test_view AS
-SELECT tests.test_idx,
- tests.job_idx,
- tests.test,
- tests.subdir,
- tests.kernel_idx,
- tests.status,
- tests.reason,
- tests.machine_idx,
- jobs.tag AS job_tag,
- jobs.label AS job_label,
- jobs.username AS job_username,
- jobs.queued_time AS job_queued_time,
- jobs.started_time AS job_started_time,
- jobs.finished_time AS job_finished_time,
- machines.hostname AS machine_hostname,
- machines.machine_group,
- machines.owner AS machine_owner,
- kernels.kernel_hash,
- kernels.base AS kernel_base,
- kernels.printable AS kernel_printable,
- status.word AS status_word
+SELECT tests.test_idx,
+ tests.job_idx,
+ tests.test,
+ tests.subdir,
+ tests.kernel_idx,
+ tests.status,
+ tests.reason,
+ tests.machine_idx,
+ jobs.tag AS job_tag,
+ jobs.label AS job_label,
+ jobs.username AS job_username,
+ jobs.queued_time AS job_queued_time,
+ jobs.started_time AS job_started_time,
+ jobs.finished_time AS job_finished_time,
+ machines.hostname AS machine_hostname,
+ machines.machine_group,
+ machines.owner AS machine_owner,
+ kernels.kernel_hash,
+ kernels.base AS kernel_base,
+ kernels.printable AS kernel_printable,
+ status.word AS status_word
FROM tests
INNER JOIN jobs ON jobs.job_idx = tests.job_idx
INNER JOIN machines ON machines.machine_idx = jobs.machine_idx
@@ -51,30 +51,30 @@ INNER JOIN status ON status.status_idx = tests.status;
-- perf_view (to make life easier for people trying to mine performance data)
ALTER VIEW perf_view AS
-SELECT tests.test_idx,
- tests.job_idx,
- tests.test,
- tests.subdir,
- tests.kernel_idx,
- tests.status,
- tests.reason,
- tests.machine_idx,
- jobs.tag AS job_tag,
- jobs.label AS job_label,
- jobs.username AS job_username,
- jobs.queued_time AS job_queued_time,
- jobs.started_time AS job_started_time,
- jobs.finished_time AS job_finished_time,
- machines.hostname AS machine_hostname,
- machines.machine_group,
- machines.owner AS machine_owner,
- kernels.kernel_hash,
- kernels.base AS kernel_base,
- kernels.printable AS kernel_printable,
- status.word AS status_word,
- iteration_result.iteration,
- iteration_result.attribute AS iteration_key,
- iteration_result.value AS iteration_value
+SELECT tests.test_idx,
+ tests.job_idx,
+ tests.test,
+ tests.subdir,
+ tests.kernel_idx,
+ tests.status,
+ tests.reason,
+ tests.machine_idx,
+ jobs.tag AS job_tag,
+ jobs.label AS job_label,
+ jobs.username AS job_username,
+ jobs.queued_time AS job_queued_time,
+ jobs.started_time AS job_started_time,
+ jobs.finished_time AS job_finished_time,
+ machines.hostname AS machine_hostname,
+ machines.machine_group,
+ machines.owner AS machine_owner,
+ kernels.kernel_hash,
+ kernels.base AS kernel_base,
+ kernels.printable AS kernel_printable,
+ status.word AS status_word,
+ iteration_result.iteration,
+ iteration_result.attribute AS iteration_key,
+ iteration_result.value AS iteration_value
FROM tests
INNER JOIN jobs ON jobs.job_idx = tests.job_idx
INNER JOIN machines ON machines.machine_idx = jobs.machine_idx
@@ -86,24 +86,24 @@ INNER JOIN iteration_result ON iteration_result.test_idx = tests.kernel_idx;
ALTER_VIEWS_DOWN_SQL = """\
ALTER VIEW test_view AS
-SELECT tests.test_idx,
- tests.job_idx,
- tests.test,
- tests.subdir,
- tests.kernel_idx,
- tests.status,
- tests.reason,
- tests.machine_idx,
- jobs.tag AS job_tag,
- jobs.label AS job_label,
- jobs.username AS job_username,
- machines.hostname AS machine_hostname,
- machines.machine_group,
- machines.owner AS machine_owner,
- kernels.kernel_hash,
- kernels.base AS kernel_base,
- kernels.printable AS kernel_printable,
- status.word AS status_word
+SELECT tests.test_idx,
+ tests.job_idx,
+ tests.test,
+ tests.subdir,
+ tests.kernel_idx,
+ tests.status,
+ tests.reason,
+ tests.machine_idx,
+ jobs.tag AS job_tag,
+ jobs.label AS job_label,
+ jobs.username AS job_username,
+ machines.hostname AS machine_hostname,
+ machines.machine_group,
+ machines.owner AS machine_owner,
+ kernels.kernel_hash,
+ kernels.base AS kernel_base,
+ kernels.printable AS kernel_printable,
+ status.word AS status_word
FROM tests
INNER JOIN jobs ON jobs.job_idx = tests.job_idx
INNER JOIN machines ON machines.machine_idx = jobs.machine_idx
@@ -112,27 +112,27 @@ INNER JOIN status ON status.status_idx = tests.status;
-- perf_view (to make life easier for people trying to mine performance data)
ALTER VIEW perf_view AS
-SELECT tests.test_idx,
- tests.job_idx,
- tests.test,
- tests.subdir,
- tests.kernel_idx,
- tests.status,
- tests.reason,
- tests.machine_idx,
- jobs.tag AS job_tag,
- jobs.label AS job_label,
- jobs.username AS job_username,
- machines.hostname AS machine_hostname,
- machines.machine_group,
- machines.owner AS machine_owner,
- kernels.kernel_hash,
- kernels.base AS kernel_base,
- kernels.printable AS kernel_printable,
- status.word AS status_word,
- iteration_result.iteration,
- iteration_result.attribute AS iteration_key,
- iteration_result.value AS iteration_value
+SELECT tests.test_idx,
+ tests.job_idx,
+ tests.test,
+ tests.subdir,
+ tests.kernel_idx,
+ tests.status,
+ tests.reason,
+ tests.machine_idx,
+ jobs.tag AS job_tag,
+ jobs.label AS job_label,
+ jobs.username AS job_username,
+ machines.hostname AS machine_hostname,
+ machines.machine_group,
+ machines.owner AS machine_owner,
+ kernels.kernel_hash,
+ kernels.base AS kernel_base,
+ kernels.printable AS kernel_printable,
+ status.word AS status_word,
+ iteration_result.iteration,
+ iteration_result.attribute AS iteration_key,
+ iteration_result.value AS iteration_value
FROM tests
INNER JOIN jobs ON jobs.job_idx = tests.job_idx
INNER JOIN machines ON machines.machine_idx = jobs.machine_idx
diff --git a/tko/migrations/003_add_test_timestamps.py b/tko/migrations/003_add_test_timestamps.py
index e9148cb4..fee4cabc 100644
--- a/tko/migrations/003_add_test_timestamps.py
+++ b/tko/migrations/003_add_test_timestamps.py
@@ -1,11 +1,11 @@
def migrate_up(manager):
- manager.execute_script(ADD_COLUMN_SQL)
- manager.execute_script(ALTER_VIEWS_UP_SQL)
+ manager.execute_script(ADD_COLUMN_SQL)
+ manager.execute_script(ALTER_VIEWS_UP_SQL)
def migrate_down(manager):
- manager.execute_script(DROP_COLUMN_SQL)
- manager.execute_script(ALTER_VIEWS_DOWN_SQL)
+ manager.execute_script(DROP_COLUMN_SQL)
+ manager.execute_script(ALTER_VIEWS_DOWN_SQL)
ADD_COLUMN_SQL = """\
@@ -18,28 +18,28 @@ ALTER TABLE tests DROP finished_time;
ALTER_VIEWS_UP_SQL = """\
ALTER VIEW test_view AS
-SELECT tests.test_idx,
- tests.job_idx,
- tests.test,
- tests.subdir,
- tests.kernel_idx,
- tests.status,
- tests.reason,
- tests.machine_idx,
- tests.finished_time AS test_finished_time,
- jobs.tag AS job_tag,
- jobs.label AS job_label,
- jobs.username AS job_username,
- jobs.queued_time AS job_queued_time,
- jobs.started_time AS job_started_time,
- jobs.finished_time AS job_finished_time,
- machines.hostname AS machine_hostname,
- machines.machine_group,
- machines.owner AS machine_owner,
- kernels.kernel_hash,
- kernels.base AS kernel_base,
- kernels.printable AS kernel_printable,
- status.word AS status_word
+SELECT tests.test_idx,
+ tests.job_idx,
+ tests.test,
+ tests.subdir,
+ tests.kernel_idx,
+ tests.status,
+ tests.reason,
+ tests.machine_idx,
+ tests.finished_time AS test_finished_time,
+ jobs.tag AS job_tag,
+ jobs.label AS job_label,
+ jobs.username AS job_username,
+ jobs.queued_time AS job_queued_time,
+ jobs.started_time AS job_started_time,
+ jobs.finished_time AS job_finished_time,
+ machines.hostname AS machine_hostname,
+ machines.machine_group,
+ machines.owner AS machine_owner,
+ kernels.kernel_hash,
+ kernels.base AS kernel_base,
+ kernels.printable AS kernel_printable,
+ status.word AS status_word
FROM tests
INNER JOIN jobs ON jobs.job_idx = tests.job_idx
INNER JOIN machines ON machines.machine_idx = jobs.machine_idx
@@ -48,31 +48,31 @@ INNER JOIN status ON status.status_idx = tests.status;
-- perf_view (to make life easier for people trying to mine performance data)
ALTER VIEW perf_view AS
-SELECT tests.test_idx,
- tests.job_idx,
- tests.test,
- tests.subdir,
- tests.kernel_idx,
- tests.status,
- tests.reason,
- tests.machine_idx,
- tests.finished_time AS test_finished_time,
- jobs.tag AS job_tag,
- jobs.label AS job_label,
- jobs.username AS job_username,
- jobs.queued_time AS job_queued_time,
- jobs.started_time AS job_started_time,
- jobs.finished_time AS job_finished_time,
- machines.hostname AS machine_hostname,
- machines.machine_group,
- machines.owner AS machine_owner,
- kernels.kernel_hash,
- kernels.base AS kernel_base,
- kernels.printable AS kernel_printable,
- status.word AS status_word,
- iteration_result.iteration,
- iteration_result.attribute AS iteration_key,
- iteration_result.value AS iteration_value
+SELECT tests.test_idx,
+ tests.job_idx,
+ tests.test,
+ tests.subdir,
+ tests.kernel_idx,
+ tests.status,
+ tests.reason,
+ tests.machine_idx,
+ tests.finished_time AS test_finished_time,
+ jobs.tag AS job_tag,
+ jobs.label AS job_label,
+ jobs.username AS job_username,
+ jobs.queued_time AS job_queued_time,
+ jobs.started_time AS job_started_time,
+ jobs.finished_time AS job_finished_time,
+ machines.hostname AS machine_hostname,
+ machines.machine_group,
+ machines.owner AS machine_owner,
+ kernels.kernel_hash,
+ kernels.base AS kernel_base,
+ kernels.printable AS kernel_printable,
+ status.word AS status_word,
+ iteration_result.iteration,
+ iteration_result.attribute AS iteration_key,
+ iteration_result.value AS iteration_value
FROM tests
INNER JOIN jobs ON jobs.job_idx = tests.job_idx
INNER JOIN machines ON machines.machine_idx = jobs.machine_idx
@@ -84,27 +84,27 @@ INNER JOIN iteration_result ON iteration_result.test_idx = tests.kernel_idx;
ALTER_VIEWS_DOWN_SQL = """\
ALTER VIEW test_view AS
-SELECT tests.test_idx,
- tests.job_idx,
- tests.test,
- tests.subdir,
- tests.kernel_idx,
- tests.status,
- tests.reason,
- tests.machine_idx,
- jobs.tag AS job_tag,
- jobs.label AS job_label,
- jobs.username AS job_username,
- jobs.queued_time AS job_queued_time,
- jobs.started_time AS job_started_time,
- jobs.finished_time AS job_finished_time,
- machines.hostname AS machine_hostname,
- machines.machine_group,
- machines.owner AS machine_owner,
- kernels.kernel_hash,
- kernels.base AS kernel_base,
- kernels.printable AS kernel_printable,
- status.word AS status_word
+SELECT tests.test_idx,
+ tests.job_idx,
+ tests.test,
+ tests.subdir,
+ tests.kernel_idx,
+ tests.status,
+ tests.reason,
+ tests.machine_idx,
+ jobs.tag AS job_tag,
+ jobs.label AS job_label,
+ jobs.username AS job_username,
+ jobs.queued_time AS job_queued_time,
+ jobs.started_time AS job_started_time,
+ jobs.finished_time AS job_finished_time,
+ machines.hostname AS machine_hostname,
+ machines.machine_group,
+ machines.owner AS machine_owner,
+ kernels.kernel_hash,
+ kernels.base AS kernel_base,
+ kernels.printable AS kernel_printable,
+ status.word AS status_word
FROM tests
INNER JOIN jobs ON jobs.job_idx = tests.job_idx
INNER JOIN machines ON machines.machine_idx = jobs.machine_idx
@@ -113,30 +113,30 @@ INNER JOIN status ON status.status_idx = tests.status;
-- perf_view (to make life easier for people trying to mine performance data)
ALTER VIEW perf_view AS
-SELECT tests.test_idx,
- tests.job_idx,
- tests.test,
- tests.subdir,
- tests.kernel_idx,
- tests.status,
- tests.reason,
- tests.machine_idx,
- jobs.tag AS job_tag,
- jobs.label AS job_label,
- jobs.username AS job_username,
- jobs.queued_time AS job_queued_time,
- jobs.started_time AS job_started_time,
- jobs.finished_time AS job_finished_time,
- machines.hostname AS machine_hostname,
- machines.machine_group,
- machines.owner AS machine_owner,
- kernels.kernel_hash,
- kernels.base AS kernel_base,
- kernels.printable AS kernel_printable,
- status.word AS status_word,
- iteration_result.iteration,
- iteration_result.attribute AS iteration_key,
- iteration_result.value AS iteration_value
+SELECT tests.test_idx,
+ tests.job_idx,
+ tests.test,
+ tests.subdir,
+ tests.kernel_idx,
+ tests.status,
+ tests.reason,
+ tests.machine_idx,
+ jobs.tag AS job_tag,
+ jobs.label AS job_label,
+ jobs.username AS job_username,
+ jobs.queued_time AS job_queued_time,
+ jobs.started_time AS job_started_time,
+ jobs.finished_time AS job_finished_time,
+ machines.hostname AS machine_hostname,
+ machines.machine_group,
+ machines.owner AS machine_owner,
+ kernels.kernel_hash,
+ kernels.base AS kernel_base,
+ kernels.printable AS kernel_printable,
+ status.word AS status_word,
+ iteration_result.iteration,
+ iteration_result.attribute AS iteration_key,
+ iteration_result.value AS iteration_value
FROM tests
INNER JOIN jobs ON jobs.job_idx = tests.job_idx
INNER JOIN machines ON machines.machine_idx = jobs.machine_idx
diff --git a/tko/migrations/004_add_test_started.py b/tko/migrations/004_add_test_started.py
index f3d9f3d9..cf910644 100644
--- a/tko/migrations/004_add_test_started.py
+++ b/tko/migrations/004_add_test_started.py
@@ -1,11 +1,11 @@
def migrate_up(manager):
- manager.execute_script(ADD_COLUMN_SQL)
- manager.execute_script(ALTER_VIEWS_UP_SQL)
+ manager.execute_script(ADD_COLUMN_SQL)
+ manager.execute_script(ALTER_VIEWS_UP_SQL)
def migrate_down(manager):
- manager.execute_script(DROP_COLUMN_SQL)
- manager.execute_script(ALTER_VIEWS_DOWN_SQL)
+ manager.execute_script(DROP_COLUMN_SQL)
+ manager.execute_script(ALTER_VIEWS_DOWN_SQL)
ADD_COLUMN_SQL = """\
@@ -18,29 +18,29 @@ ALTER TABLE tests DROP started_time;
ALTER_VIEWS_UP_SQL = """\
ALTER VIEW test_view AS
-SELECT tests.test_idx,
- tests.job_idx,
- tests.test,
- tests.subdir,
- tests.kernel_idx,
- tests.status,
- tests.reason,
- tests.machine_idx,
+SELECT tests.test_idx,
+ tests.job_idx,
+ tests.test,
+ tests.subdir,
+ tests.kernel_idx,
+ tests.status,
+ tests.reason,
+ tests.machine_idx,
tests.started_time AS test_started_time,
- tests.finished_time AS test_finished_time,
- jobs.tag AS job_tag,
- jobs.label AS job_label,
- jobs.username AS job_username,
- jobs.queued_time AS job_queued_time,
- jobs.started_time AS job_started_time,
- jobs.finished_time AS job_finished_time,
- machines.hostname AS machine_hostname,
- machines.machine_group,
- machines.owner AS machine_owner,
- kernels.kernel_hash,
- kernels.base AS kernel_base,
- kernels.printable AS kernel_printable,
- status.word AS status_word
+ tests.finished_time AS test_finished_time,
+ jobs.tag AS job_tag,
+ jobs.label AS job_label,
+ jobs.username AS job_username,
+ jobs.queued_time AS job_queued_time,
+ jobs.started_time AS job_started_time,
+ jobs.finished_time AS job_finished_time,
+ machines.hostname AS machine_hostname,
+ machines.machine_group,
+ machines.owner AS machine_owner,
+ kernels.kernel_hash,
+ kernels.base AS kernel_base,
+ kernels.printable AS kernel_printable,
+ status.word AS status_word
FROM tests
INNER JOIN jobs ON jobs.job_idx = tests.job_idx
INNER JOIN machines ON machines.machine_idx = jobs.machine_idx
@@ -49,32 +49,32 @@ INNER JOIN status ON status.status_idx = tests.status;
-- perf_view (to make life easier for people trying to mine performance data)
ALTER VIEW perf_view AS
-SELECT tests.test_idx,
- tests.job_idx,
- tests.test,
- tests.subdir,
- tests.kernel_idx,
- tests.status,
- tests.reason,
- tests.machine_idx,
+SELECT tests.test_idx,
+ tests.job_idx,
+ tests.test,
+ tests.subdir,
+ tests.kernel_idx,
+ tests.status,
+ tests.reason,
+ tests.machine_idx,
tests.started_time AS test_started_time,
- tests.finished_time AS test_finished_time,
- jobs.tag AS job_tag,
- jobs.label AS job_label,
- jobs.username AS job_username,
- jobs.queued_time AS job_queued_time,
- jobs.started_time AS job_started_time,
- jobs.finished_time AS job_finished_time,
- machines.hostname AS machine_hostname,
- machines.machine_group,
- machines.owner AS machine_owner,
- kernels.kernel_hash,
- kernels.base AS kernel_base,
- kernels.printable AS kernel_printable,
- status.word AS status_word,
- iteration_result.iteration,
- iteration_result.attribute AS iteration_key,
- iteration_result.value AS iteration_value
+ tests.finished_time AS test_finished_time,
+ jobs.tag AS job_tag,
+ jobs.label AS job_label,
+ jobs.username AS job_username,
+ jobs.queued_time AS job_queued_time,
+ jobs.started_time AS job_started_time,
+ jobs.finished_time AS job_finished_time,
+ machines.hostname AS machine_hostname,
+ machines.machine_group,
+ machines.owner AS machine_owner,
+ kernels.kernel_hash,
+ kernels.base AS kernel_base,
+ kernels.printable AS kernel_printable,
+ status.word AS status_word,
+ iteration_result.iteration,
+ iteration_result.attribute AS iteration_key,
+ iteration_result.value AS iteration_value
FROM tests
INNER JOIN jobs ON jobs.job_idx = tests.job_idx
INNER JOIN machines ON machines.machine_idx = jobs.machine_idx
@@ -85,28 +85,28 @@ INNER JOIN iteration_result ON iteration_result.test_idx = tests.kernel_idx;
ALTER_VIEWS_DOWN_SQL = """\
ALTER VIEW test_view AS
-SELECT tests.test_idx,
- tests.job_idx,
- tests.test,
- tests.subdir,
- tests.kernel_idx,
- tests.status,
- tests.reason,
- tests.machine_idx,
- tests.finished_time AS test_finished_time,
- jobs.tag AS job_tag,
- jobs.label AS job_label,
- jobs.username AS job_username,
- jobs.queued_time AS job_queued_time,
- jobs.started_time AS job_started_time,
- jobs.finished_time AS job_finished_time,
- machines.hostname AS machine_hostname,
- machines.machine_group,
- machines.owner AS machine_owner,
- kernels.kernel_hash,
- kernels.base AS kernel_base,
- kernels.printable AS kernel_printable,
- status.word AS status_word
+SELECT tests.test_idx,
+ tests.job_idx,
+ tests.test,
+ tests.subdir,
+ tests.kernel_idx,
+ tests.status,
+ tests.reason,
+ tests.machine_idx,
+ tests.finished_time AS test_finished_time,
+ jobs.tag AS job_tag,
+ jobs.label AS job_label,
+ jobs.username AS job_username,
+ jobs.queued_time AS job_queued_time,
+ jobs.started_time AS job_started_time,
+ jobs.finished_time AS job_finished_time,
+ machines.hostname AS machine_hostname,
+ machines.machine_group,
+ machines.owner AS machine_owner,
+ kernels.kernel_hash,
+ kernels.base AS kernel_base,
+ kernels.printable AS kernel_printable,
+ status.word AS status_word
FROM tests
INNER JOIN jobs ON jobs.job_idx = tests.job_idx
INNER JOIN machines ON machines.machine_idx = jobs.machine_idx
@@ -115,31 +115,31 @@ INNER JOIN status ON status.status_idx = tests.status;
-- perf_view (to make life easier for people trying to mine performance data)
ALTER VIEW perf_view AS
-SELECT tests.test_idx,
- tests.job_idx,
- tests.test,
- tests.subdir,
- tests.kernel_idx,
- tests.status,
- tests.reason,
- tests.machine_idx,
- tests.finished_time AS test_finished_time,
- jobs.tag AS job_tag,
- jobs.label AS job_label,
- jobs.username AS job_username,
- jobs.queued_time AS job_queued_time,
- jobs.started_time AS job_started_time,
- jobs.finished_time AS job_finished_time,
- machines.hostname AS machine_hostname,
- machines.machine_group,
- machines.owner AS machine_owner,
- kernels.kernel_hash,
- kernels.base AS kernel_base,
- kernels.printable AS kernel_printable,
- status.word AS status_word,
- iteration_result.iteration,
- iteration_result.attribute AS iteration_key,
- iteration_result.value AS iteration_value
+SELECT tests.test_idx,
+ tests.job_idx,
+ tests.test,
+ tests.subdir,
+ tests.kernel_idx,
+ tests.status,
+ tests.reason,
+ tests.machine_idx,
+ tests.finished_time AS test_finished_time,
+ jobs.tag AS job_tag,
+ jobs.label AS job_label,
+ jobs.username AS job_username,
+ jobs.queued_time AS job_queued_time,
+ jobs.started_time AS job_started_time,
+ jobs.finished_time AS job_finished_time,
+ machines.hostname AS machine_hostname,
+ machines.machine_group,
+ machines.owner AS machine_owner,
+ kernels.kernel_hash,
+ kernels.base AS kernel_base,
+ kernels.printable AS kernel_printable,
+ status.word AS status_word,
+ iteration_result.iteration,
+ iteration_result.attribute AS iteration_key,
+ iteration_result.value AS iteration_value
FROM tests
INNER JOIN jobs ON jobs.job_idx = tests.job_idx
INNER JOIN machines ON machines.machine_idx = jobs.machine_idx
diff --git a/tko/migrations/005_add_testna_status.py b/tko/migrations/005_add_testna_status.py
index e22abe5d..4e4e4807 100644
--- a/tko/migrations/005_add_testna_status.py
+++ b/tko/migrations/005_add_testna_status.py
@@ -1,6 +1,6 @@
def migrate_up(monger):
- monger.execute("INSERT INTO status (word) values ('TEST_NA')")
+ monger.execute("INSERT INTO status (word) values ('TEST_NA')")
def migrate_down(monger):
- monger.execute("DELETE FROM status where word = 'TEST_NA'")
+ monger.execute("DELETE FROM status where word = 'TEST_NA'")
diff --git a/tko/migrations/006_add_table_query_history.py b/tko/migrations/006_add_table_query_history.py
index 41c5e29d..891836fe 100644
--- a/tko/migrations/006_add_table_query_history.py
+++ b/tko/migrations/006_add_table_query_history.py
@@ -1,9 +1,9 @@
def migrate_up(manager):
- manager.execute_script(ADD_TABLE_QUERY_HISTORY)
+ manager.execute_script(ADD_TABLE_QUERY_HISTORY)
def migrate_down(manager):
- manager.execute_script(DROP_TABLE_QUERY_HISTORY)
+ manager.execute_script(DROP_TABLE_QUERY_HISTORY)
ADD_TABLE_QUERY_HISTORY = """
@@ -15,4 +15,3 @@ url VARCHAR(1000));
DROP_TABLE_QUERY_HISTORY = """
DROP TABLE query_history;
"""
-
diff --git a/tko/migrations/007_widen_reason_field.py b/tko/migrations/007_widen_reason_field.py
index 7df6bc87..aae2a44e 100644
--- a/tko/migrations/007_widen_reason_field.py
+++ b/tko/migrations/007_widen_reason_field.py
@@ -1,5 +1,5 @@
def migrate_up(mgr):
- mgr.execute("alter table tests modify column reason varchar(1024);")
+ mgr.execute("alter table tests modify column reason varchar(1024);")
def migrate_down(mgr):
- mgr.execute("alter table tests modify column reason varchar(100);")
+ mgr.execute("alter table tests modify column reason varchar(100);")
diff --git a/tko/migrations/008_add_iteration_attributes.py b/tko/migrations/008_add_iteration_attributes.py
index 4ef176e4..87cd56bf 100644
--- a/tko/migrations/008_add_iteration_attributes.py
+++ b/tko/migrations/008_add_iteration_attributes.py
@@ -1,18 +1,18 @@
def migrate_up(manager):
- manager.execute_script(CREATE_TABLE_SQL)
+ manager.execute_script(CREATE_TABLE_SQL)
def migrate_down(manager):
- manager.execute_script(DROP_TABLE_SQL)
+ manager.execute_script(DROP_TABLE_SQL)
CREATE_TABLE_SQL = """
-- test iteration attributes (key value pairs at an iteration level)
CREATE TABLE iteration_attributes (
-test_idx int(10) unsigned NOT NULL, -- ref to test table
+test_idx int(10) unsigned NOT NULL, -- ref to test table
FOREIGN KEY (test_idx) REFERENCES tests(test_idx) ON DELETE CASCADE,
-iteration INTEGER, -- integer
-attribute VARCHAR(30), -- attribute name (e.g. 'run_id')
-value VARCHAR(100), -- attribute value
+iteration INTEGER, -- integer
+attribute VARCHAR(30), -- attribute name (e.g. 'run_id')
+value VARCHAR(100), -- attribute value
KEY `test_idx` (`test_idx`)
) TYPE=InnoDB;
"""
diff --git a/tko/models.py b/tko/models.py
index b1651f33..a5e230b6 100644
--- a/tko/models.py
+++ b/tko/models.py
@@ -5,136 +5,136 @@ from autotest_lib.tko import utils as tko_utils
class job(object):
- def __init__(self, dir, user, label, machine, queued_time,
- started_time, finished_time, machine_owner):
- self.dir = dir
- self.tests = []
- self.user = user
- self.label = label
- self.machine = machine
- self.queued_time = queued_time
- self.started_time = started_time
- self.finished_time = finished_time
- self.machine_owner = machine_owner
+ def __init__(self, dir, user, label, machine, queued_time,
+ started_time, finished_time, machine_owner):
+ self.dir = dir
+ self.tests = []
+ self.user = user
+ self.label = label
+ self.machine = machine
+ self.queued_time = queued_time
+ self.started_time = started_time
+ self.finished_time = finished_time
+ self.machine_owner = machine_owner
class kernel(object):
- def __init__(self, base, patches, kernel_hash):
- self.base = base
- self.patches = patches
- self.kernel_hash = kernel_hash
+ def __init__(self, base, patches, kernel_hash):
+ self.base = base
+ self.patches = patches
+ self.kernel_hash = kernel_hash
- @staticmethod
- def compute_hash(base, hashes):
- key_string = ','.join([base] + hashes)
- return md5.new(key_string).hexdigest()
+ @staticmethod
+ def compute_hash(base, hashes):
+ key_string = ','.join([base] + hashes)
+ return md5.new(key_string).hexdigest()
class test(object):
- def __init__(self, subdir, testname, status, reason, test_kernel,
- machine, started_time, finished_time, iterations,
- attributes):
- self.subdir = subdir
- self.testname = testname
- self.status = status
- self.reason = reason
- self.kernel = test_kernel
- self.machine = machine
- self.started_time = started_time
- self.finished_time = finished_time
- self.iterations = iterations
- self.attributes = attributes
-
-
- @staticmethod
- def load_iterations(keyval_path):
- """Abstract method to load a list of iterations from a keyval
- file."""
- raise NotImplementedError
-
-
- @classmethod
- def parse_test(cls, job, subdir, testname, status, reason, test_kernel,
- started_time, finished_time):
- """Given a job and the basic metadata about the test that
- can be extracted from the status logs, parse the test
- keyval files and use it to construct a complete test
- instance."""
- tko_utils.dprint("parsing test %s %s" % (subdir, testname))
-
- if subdir:
- # grab iterations from the results keyval
- iteration_keyval = os.path.join(job.dir, subdir,
- "results", "keyval")
- iterations = cls.load_iterations(iteration_keyval)
-
- # grab test attributes from the subdir keyval
- test_keyval = os.path.join(job.dir, subdir, "keyval")
- attributes = test.load_attributes(test_keyval)
- else:
- iterations = []
- attributes = {}
-
- return cls(subdir, testname, status, reason, test_kernel,
- job.machine, started_time, finished_time,
- iterations, attributes)
-
-
- @staticmethod
- def load_attributes(keyval_path):
- """Load the test attributes into a dictionary from a test
- keyval path. Does not assume that the path actually exists."""
- if not os.path.exists(keyval_path):
- return {}
- return utils.read_keyval(keyval_path)
+ def __init__(self, subdir, testname, status, reason, test_kernel,
+ machine, started_time, finished_time, iterations,
+ attributes):
+ self.subdir = subdir
+ self.testname = testname
+ self.status = status
+ self.reason = reason
+ self.kernel = test_kernel
+ self.machine = machine
+ self.started_time = started_time
+ self.finished_time = finished_time
+ self.iterations = iterations
+ self.attributes = attributes
+
+
+ @staticmethod
+ def load_iterations(keyval_path):
+ """Abstract method to load a list of iterations from a keyval
+ file."""
+ raise NotImplementedError
+
+
+ @classmethod
+ def parse_test(cls, job, subdir, testname, status, reason, test_kernel,
+ started_time, finished_time):
+ """Given a job and the basic metadata about the test that
+ can be extracted from the status logs, parse the test
+ keyval files and use it to construct a complete test
+ instance."""
+ tko_utils.dprint("parsing test %s %s" % (subdir, testname))
+
+ if subdir:
+ # grab iterations from the results keyval
+ iteration_keyval = os.path.join(job.dir, subdir,
+ "results", "keyval")
+ iterations = cls.load_iterations(iteration_keyval)
+
+ # grab test attributes from the subdir keyval
+ test_keyval = os.path.join(job.dir, subdir, "keyval")
+ attributes = test.load_attributes(test_keyval)
+ else:
+ iterations = []
+ attributes = {}
+
+ return cls(subdir, testname, status, reason, test_kernel,
+ job.machine, started_time, finished_time,
+ iterations, attributes)
+
+
+ @staticmethod
+ def load_attributes(keyval_path):
+ """Load the test attributes into a dictionary from a test
+ keyval path. Does not assume that the path actually exists."""
+ if not os.path.exists(keyval_path):
+ return {}
+ return utils.read_keyval(keyval_path)
class patch(object):
- def __init__(self, spec, reference, hash):
- self.spec = spec
- self.reference = reference
- self.hash = hash
+ def __init__(self, spec, reference, hash):
+ self.spec = spec
+ self.reference = reference
+ self.hash = hash
class iteration(object):
- def __init__(self, index, attr_keyval, perf_keyval):
- self.index = index
- self.attr_keyval = attr_keyval
- self.perf_keyval = perf_keyval
-
-
-
- @staticmethod
- def parse_line_into_dicts(line, attr_dict, perf_dict):
- """Abstract method to parse a keyval line and insert it into
- the appropriate dictionary.
- attr_dict: generic iteration attributes
- perf_dict: iteration performance results
- """
- raise NotImplementedError
-
-
- @classmethod
- def load_from_keyval(cls, keyval_path):
- """Load a list of iterations from an iteration keyval file.
- Keyval data from separate iterations is separated by blank
- lines. Makes use of the parse_line_into_dicts method to
- actually parse the individual lines."""
- if not os.path.exists(keyval_path):
- return []
-
- iterations = []
- index = 1
- attr, perf = {}, {}
- for line in file(keyval_path):
- line = line.strip()
- if line:
- cls.parse_line_into_dicts(line, attr, perf)
- else:
- iterations.append(cls(index, attr, perf))
- index += 1
- attr, perf = {}, {}
- if attr or perf:
- iterations.append(cls(index, attr, perf))
- return iterations
+ def __init__(self, index, attr_keyval, perf_keyval):
+ self.index = index
+ self.attr_keyval = attr_keyval
+ self.perf_keyval = perf_keyval
+
+
+
+ @staticmethod
+ def parse_line_into_dicts(line, attr_dict, perf_dict):
+ """Abstract method to parse a keyval line and insert it into
+ the appropriate dictionary.
+ attr_dict: generic iteration attributes
+ perf_dict: iteration performance results
+ """
+ raise NotImplementedError
+
+
+ @classmethod
+ def load_from_keyval(cls, keyval_path):
+ """Load a list of iterations from an iteration keyval file.
+ Keyval data from separate iterations is separated by blank
+ lines. Makes use of the parse_line_into_dicts method to
+ actually parse the individual lines."""
+ if not os.path.exists(keyval_path):
+ return []
+
+ iterations = []
+ index = 1
+ attr, perf = {}, {}
+ for line in file(keyval_path):
+ line = line.strip()
+ if line:
+ cls.parse_line_into_dicts(line, attr, perf)
+ else:
+ iterations.append(cls(index, attr, perf))
+ index += 1
+ attr, perf = {}, {}
+ if attr or perf:
+ iterations.append(cls(index, attr, perf))
+ return iterations
diff --git a/tko/parse.py b/tko/parse.py
index 6d88d833..bdccdda4 100755
--- a/tko/parse.py
+++ b/tko/parse.py
@@ -8,194 +8,194 @@ from autotest_lib.tko import db as tko_db, utils as tko_utils, status_lib
def parse_args():
- # build up our options parser and parse sys.argv
- parser = optparse.OptionParser()
- parser.add_option("-m", help="Send mail for FAILED tests",
- dest="mailit", action="store_true")
- parser.add_option("-r", help="Reparse the results of a job",
- dest="reparse", action="store_true")
- parser.add_option("-o", help="Parse a single results directory",
- dest="singledir", action="store_true")
- parser.add_option("-l", help=("Levels of subdirectories to include "
- "in the job name"),
- type="int", dest="level", default=1)
- parser.add_option("-n", help="No blocking on an existing parse",
- dest="noblock", action="store_true")
- parser.add_option("-s", help="Database server hostname",
- dest="db_host", action="store")
- parser.add_option("-u", help="Database username", dest="db_user",
- action="store")
- parser.add_option("-p", help="Database password", dest="db_pass",
- action="store")
- parser.add_option("-d", help="Database name", dest="db_name",
- action="store")
- options, args = parser.parse_args()
-
- # we need a results directory
- if len(args) == 0:
- tko_utils.dprint("ERROR: at least one results directory must "
- "be provided")
- parser.print_help()
- sys.exit(1)
-
- # pass the options back
- return options, args
+ # build up our options parser and parse sys.argv
+ parser = optparse.OptionParser()
+ parser.add_option("-m", help="Send mail for FAILED tests",
+ dest="mailit", action="store_true")
+ parser.add_option("-r", help="Reparse the results of a job",
+ dest="reparse", action="store_true")
+ parser.add_option("-o", help="Parse a single results directory",
+ dest="singledir", action="store_true")
+ parser.add_option("-l", help=("Levels of subdirectories to include "
+ "in the job name"),
+ type="int", dest="level", default=1)
+ parser.add_option("-n", help="No blocking on an existing parse",
+ dest="noblock", action="store_true")
+ parser.add_option("-s", help="Database server hostname",
+ dest="db_host", action="store")
+ parser.add_option("-u", help="Database username", dest="db_user",
+ action="store")
+ parser.add_option("-p", help="Database password", dest="db_pass",
+ action="store")
+ parser.add_option("-d", help="Database name", dest="db_name",
+ action="store")
+ options, args = parser.parse_args()
+
+ # we need a results directory
+ if len(args) == 0:
+ tko_utils.dprint("ERROR: at least one results directory must "
+ "be provided")
+ parser.print_help()
+ sys.exit(1)
+
+ # pass the options back
+ return options, args
def format_failure_message(jobname, kernel, testname, status, reason):
- format_string = "%-12s %-20s %-12s %-10s %s"
- return format_string % (jobname, kernel, testname, status, reason)
+ format_string = "%-12s %-20s %-12s %-10s %s"
+ return format_string % (jobname, kernel, testname, status, reason)
def mailfailure(jobname, job, message):
- message_lines = [""]
- message_lines.append("The following tests FAILED for this job")
- message_lines.append("http://%s/results/%s" %
- (socket.gethostname(), jobname))
- message_lines.append("")
- message_lines.append(format_failure_message("Job name", "Kernel",
- "Test name", "FAIL/WARN",
- "Failure reason"))
- message_lines.append(format_failure_message("=" * 8, "=" * 6, "=" * 8,
- "=" * 8, "=" * 14))
- message_header = "\n".join(message_lines)
-
- subject = "AUTOTEST: FAILED tests from job %s" % jobname
- mail.send("", job.user, "", subject, message_header + message)
+ message_lines = [""]
+ message_lines.append("The following tests FAILED for this job")
+ message_lines.append("http://%s/results/%s" %
+ (socket.gethostname(), jobname))
+ message_lines.append("")
+ message_lines.append(format_failure_message("Job name", "Kernel",
+ "Test name", "FAIL/WARN",
+ "Failure reason"))
+ message_lines.append(format_failure_message("=" * 8, "=" * 6, "=" * 8,
+ "=" * 8, "=" * 14))
+ message_header = "\n".join(message_lines)
+
+ subject = "AUTOTEST: FAILED tests from job %s" % jobname
+ mail.send("", job.user, "", subject, message_header + message)
def parse_one(db, jobname, path, reparse, mail_on_failure):
- """
- Parse a single job. Optionally send email on failure.
- """
- tko_utils.dprint("\nScanning %s (%s)" % (jobname, path))
- if reparse and db.find_job(jobname):
- tko_utils.dprint("! Deleting old copy of job results to "
- "reparse it")
- db.delete_job(jobname)
- if db.find_job(jobname):
- tko_utils.dprint("! Job is already parsed, done")
- return
-
- # look up the status version
- try:
- job_keyval = utils.read_keyval(path)
- except IOError, e:
- if e.errno == errno.ENOENT:
- status_version = 0
- else:
- raise
- else:
- status_version = job_keyval.get("status_version", 0)
-
- # parse out the job
- parser = status_lib.parser(status_version)
- job = parser.make_job(path)
- status_log = os.path.join(path, "status.log")
- if not os.path.exists(status_log):
- status_log = os.path.join(path, "status")
- if not os.path.exists(status_log):
- tko_utils.dprint("! Unable to parse job, no status file")
- return
-
- # parse the status logs
- tko_utils.dprint("+ Parsing dir=%s, jobname=%s" % (path, jobname))
- status_lines = open(status_log).readlines()
- parser.start(job)
- tests = parser.end(status_lines)
- job.tests = tests
-
- # check for failures
- message_lines = [""]
- for test in job.tests:
- if not test.subdir:
- continue
- tko_utils.dprint("* testname, status, reason: %s %s %s"
- % (test.subdir, test.status, test.reason))
- if test.status in ("FAIL", "WARN"):
- message_lines.append(format_failure_message(
- jobname, test.kernel.base, test.subdir,
- test.status, test.reason))
- message = "\n".join(message_lines)
-
- # send out a email report of failure
- if len(message) > 2 and mail_on_failure:
- tko_utils.dprint("Sending email report of failure on %s to %s"
- % (jobname, job.user))
- mailfailure(jobname, job, message)
-
- # write the job into the database
- db.insert_job(jobname, job)
- db.commit()
+ """
+ Parse a single job. Optionally send email on failure.
+ """
+ tko_utils.dprint("\nScanning %s (%s)" % (jobname, path))
+ if reparse and db.find_job(jobname):
+ tko_utils.dprint("! Deleting old copy of job results to "
+ "reparse it")
+ db.delete_job(jobname)
+ if db.find_job(jobname):
+ tko_utils.dprint("! Job is already parsed, done")
+ return
+
+ # look up the status version
+ try:
+ job_keyval = utils.read_keyval(path)
+ except IOError, e:
+ if e.errno == errno.ENOENT:
+ status_version = 0
+ else:
+ raise
+ else:
+ status_version = job_keyval.get("status_version", 0)
+
+ # parse out the job
+ parser = status_lib.parser(status_version)
+ job = parser.make_job(path)
+ status_log = os.path.join(path, "status.log")
+ if not os.path.exists(status_log):
+ status_log = os.path.join(path, "status")
+ if not os.path.exists(status_log):
+ tko_utils.dprint("! Unable to parse job, no status file")
+ return
+
+ # parse the status logs
+ tko_utils.dprint("+ Parsing dir=%s, jobname=%s" % (path, jobname))
+ status_lines = open(status_log).readlines()
+ parser.start(job)
+ tests = parser.end(status_lines)
+ job.tests = tests
+
+ # check for failures
+ message_lines = [""]
+ for test in job.tests:
+ if not test.subdir:
+ continue
+ tko_utils.dprint("* testname, status, reason: %s %s %s"
+ % (test.subdir, test.status, test.reason))
+ if test.status in ("FAIL", "WARN"):
+ message_lines.append(format_failure_message(
+ jobname, test.kernel.base, test.subdir,
+ test.status, test.reason))
+ message = "\n".join(message_lines)
+
+ # send out a email report of failure
+ if len(message) > 2 and mail_on_failure:
+ tko_utils.dprint("Sending email report of failure on %s to %s"
+ % (jobname, job.user))
+ mailfailure(jobname, job, message)
+
+ # write the job into the database
+ db.insert_job(jobname, job)
+ db.commit()
def parse_path(db, path, level, reparse, mail_on_failure):
- machine_list = os.path.join(path, ".machines")
- if os.path.exists(machine_list):
- # multi-machine job
- for m in file(machine_list):
- machine = m.rstrip()
- if not machine:
- continue
- jobpath = os.path.join(path, machine)
- jobname = "%s/%s" % (os.path.basename(path), machine)
- try:
- db.run_with_retry(parse_one, db, jobname,
- path, reparse,
- mail_on_failure)
- except Exception:
- traceback.print_exc()
- continue
- else:
- # single machine job
- job_elements = path.split("/")[-level:]
- jobname = "/".join(job_elements)
- try:
- db.run_with_retry(parse_one, db, jobname, path,
- reparse, mail_on_failure)
- except Exception:
- traceback.print_exc()
+ machine_list = os.path.join(path, ".machines")
+ if os.path.exists(machine_list):
+ # multi-machine job
+ for m in file(machine_list):
+ machine = m.rstrip()
+ if not machine:
+ continue
+ jobpath = os.path.join(path, machine)
+ jobname = "%s/%s" % (os.path.basename(path), machine)
+ try:
+ db.run_with_retry(parse_one, db, jobname,
+ path, reparse,
+ mail_on_failure)
+ except Exception:
+ traceback.print_exc()
+ continue
+ else:
+ # single machine job
+ job_elements = path.split("/")[-level:]
+ jobname = "/".join(job_elements)
+ try:
+ db.run_with_retry(parse_one, db, jobname, path,
+ reparse, mail_on_failure)
+ except Exception:
+ traceback.print_exc()
def main():
- options, args = parse_args()
- results_dir = os.path.abspath(args[0])
- assert os.path.exists(results_dir)
-
- # build up the list of job dirs to parse
- if options.singledir:
- jobs_list = [results_dir]
- else:
- jobs_list = [os.path.join(results_dir, subdir)
- for subdir in os.listdir(results_dir)]
-
- # build up the database
- db = tko_db.db(autocommit=False, host=options.db_host,
- user=options.db_user, password=options.db_pass,
- database=options.db_name)
-
- # parse all the jobs
- for path in jobs_list:
- lockfile = open(os.path.join(path, ".parse.lock"), "w")
- flags = fcntl.LOCK_EX
- if options.noblock:
- flags != fcntl.LOCK_NB
- try:
- fcntl.flock(lockfile, flags)
- except IOError, e:
- # was this because the lock is unavailable?
- if e.errno == errno.EWOULDBLOCK:
- lockfile.close()
- continue
- else:
- raise # something unexpected happened
- try:
- parse_path(db, path, options.level, options.reparse,
- options.mailit)
- finally:
- fcntl.flock(lockfile, fcntl.LOCK_UN)
- lockfile.close()
+ options, args = parse_args()
+ results_dir = os.path.abspath(args[0])
+ assert os.path.exists(results_dir)
+
+ # build up the list of job dirs to parse
+ if options.singledir:
+ jobs_list = [results_dir]
+ else:
+ jobs_list = [os.path.join(results_dir, subdir)
+ for subdir in os.listdir(results_dir)]
+
+ # build up the database
+ db = tko_db.db(autocommit=False, host=options.db_host,
+ user=options.db_user, password=options.db_pass,
+ database=options.db_name)
+
+ # parse all the jobs
+ for path in jobs_list:
+ lockfile = open(os.path.join(path, ".parse.lock"), "w")
+ flags = fcntl.LOCK_EX
+ if options.noblock:
+ flags != fcntl.LOCK_NB
+ try:
+ fcntl.flock(lockfile, flags)
+ except IOError, e:
+ # was this because the lock is unavailable?
+ if e.errno == errno.EWOULDBLOCK:
+ lockfile.close()
+ continue
+ else:
+ raise # something unexpected happened
+ try:
+ parse_path(db, path, options.level, options.reparse,
+ options.mailit)
+ finally:
+ fcntl.flock(lockfile, fcntl.LOCK_UN)
+ lockfile.close()
if __name__ == "__main__":
- main()
+ main()
diff --git a/tko/parsers/base.py b/tko/parsers/base.py
index c0517a8b..37429b63 100644
--- a/tko/parsers/base.py
+++ b/tko/parsers/base.py
@@ -4,67 +4,67 @@ from autotest_lib.tko import status_lib, utils as tko_utils
class parser(object):
- """
- Abstract parser base class. Provides a generic implementation of the
- standard parser interfaction functions. The derived classes must
- implement a state_iterator method for this class to be useful.
- """
- def start(self, job):
- """ Initialize the parser for processing the results of
- 'job'."""
- # initialize all the basic parser parameters
- self.job = job
- self.finished = False
- self.line_buffer = status_lib.line_buffer()
- # create and prime the parser state machine
- self.state = self.state_iterator(self.line_buffer)
- self.state.next()
+ """
+ Abstract parser base class. Provides a generic implementation of the
+ standard parser interfaction functions. The derived classes must
+ implement a state_iterator method for this class to be useful.
+ """
+ def start(self, job):
+ """ Initialize the parser for processing the results of
+ 'job'."""
+ # initialize all the basic parser parameters
+ self.job = job
+ self.finished = False
+ self.line_buffer = status_lib.line_buffer()
+ # create and prime the parser state machine
+ self.state = self.state_iterator(self.line_buffer)
+ self.state.next()
- def process_lines(self, lines):
- """ Feed 'lines' into the parser state machine, and return
- a list of all the new test results produced."""
- self.line_buffer.put_multiple(lines)
- try:
- return self.state.next()
- except StopIteration:
- msg = ("WARNING: parser was called to process status "
- "lines after it was end()ed\n"
- "Current traceback:\n" +
- traceback.format_exc() +
- "\nCurrent stack:\n" +
- "".join(traceback.format_stack()))
- tko_utils.dprint(msg)
- return []
+ def process_lines(self, lines):
+ """ Feed 'lines' into the parser state machine, and return
+ a list of all the new test results produced."""
+ self.line_buffer.put_multiple(lines)
+ try:
+ return self.state.next()
+ except StopIteration:
+ msg = ("WARNING: parser was called to process status "
+ "lines after it was end()ed\n"
+ "Current traceback:\n" +
+ traceback.format_exc() +
+ "\nCurrent stack:\n" +
+ "".join(traceback.format_stack()))
+ tko_utils.dprint(msg)
+ return []
- def end(self, lines=[]):
- """ Feed 'lines' into the parser state machine, signal to the
- state machine that no more lines are forthcoming, and then
- return a list of all the new test results produced."""
- self.line_buffer.put_multiple(lines)
- # run the state machine to clear out the buffer
- self.finished = True
- try:
- return self.state.next()
- except StopIteration:
- msg = ("WARNING: parser was end()ed multiple times\n"
- "Current traceback:\n" +
- traceback.format_exc() +
- "\nCurrent stack:\n" +
- "".join(traceback.format_stack()))
- tko_utils.dprint(msg)
- return []
+ def end(self, lines=[]):
+ """ Feed 'lines' into the parser state machine, signal to the
+ state machine that no more lines are forthcoming, and then
+ return a list of all the new test results produced."""
+ self.line_buffer.put_multiple(lines)
+ # run the state machine to clear out the buffer
+ self.finished = True
+ try:
+ return self.state.next()
+ except StopIteration:
+ msg = ("WARNING: parser was end()ed multiple times\n"
+ "Current traceback:\n" +
+ traceback.format_exc() +
+ "\nCurrent stack:\n" +
+ "".join(traceback.format_stack()))
+ tko_utils.dprint(msg)
+ return []
- @staticmethod
- def make_job(dir):
- """ Create a new instance of the job model used by the
- parser, given a results directory."""
- raise NotImplemented
+ @staticmethod
+ def make_job(dir):
+ """ Create a new instance of the job model used by the
+ parser, given a results directory."""
+ raise NotImplemented
- def state_iterator(self, buffer):
- """ A generator method that implements the actual parser
- state machine. """
- raise NotImplemented
+ def state_iterator(self, buffer):
+ """ A generator method that implements the actual parser
+ state machine. """
+ raise NotImplemented
diff --git a/tko/parsers/version_0.py b/tko/parsers/version_0.py
index 13d38593..91d0ad72 100644
--- a/tko/parsers/version_0.py
+++ b/tko/parsers/version_0.py
@@ -6,392 +6,392 @@ from autotest_lib.tko.parsers import base
class job(models.job):
- def __init__(self, dir):
- job_dict = job.load_from_dir(dir)
- super(job, self).__init__(dir, **job_dict)
-
- @staticmethod
- def load_from_dir(dir):
- try:
- keyval = common_utils.read_keyval(dir)
- tko_utils.dprint(str(keyval))
- except Exception:
- keyval = {}
-
- user = keyval.get("user", None)
- label = keyval.get("label", None)
- machine = keyval.get("hostname", None)
- if machine:
- assert "," not in machine
- queued_time = tko_utils.get_timestamp(keyval, "job_queued")
- started_time = tko_utils.get_timestamp(keyval, "job_started")
- finished_time = tko_utils.get_timestamp(keyval, "job_finished")
- machine_owner = keyval.get("owner", None)
-
- if not machine:
- machine = job.find_hostname(dir)
- tko_utils.dprint("MACHINE NAME: %s" % machine)
-
- return {"user": user, "label": label, "machine": machine,
- "queued_time": queued_time,
- "started_time": started_time,
- "finished_time": finished_time,
- "machine_owner": machine_owner}
-
-
- @staticmethod
- def find_hostname(path):
- hostname = os.path.join(path, "sysinfo", "hostname")
- try:
- machine = open(hostname).readline().rstrip()
- return machine
- except Exception:
- tko_utils.dprint("Could not read a hostname from "
- "sysinfo/hostname")
-
- uname = os.path.join(path, "sysinfo", "uname_-a")
- try:
- machine = open(uname).readline().split()[1]
- return
- except Exception:
- tko_utils.dprint("Could not read a hostname from "
- "sysinfo/uname_-a")
-
- raise Exception("Unable to find a machine name")
+ def __init__(self, dir):
+ job_dict = job.load_from_dir(dir)
+ super(job, self).__init__(dir, **job_dict)
+
+ @staticmethod
+ def load_from_dir(dir):
+ try:
+ keyval = common_utils.read_keyval(dir)
+ tko_utils.dprint(str(keyval))
+ except Exception:
+ keyval = {}
+
+ user = keyval.get("user", None)
+ label = keyval.get("label", None)
+ machine = keyval.get("hostname", None)
+ if machine:
+ assert "," not in machine
+ queued_time = tko_utils.get_timestamp(keyval, "job_queued")
+ started_time = tko_utils.get_timestamp(keyval, "job_started")
+ finished_time = tko_utils.get_timestamp(keyval, "job_finished")
+ machine_owner = keyval.get("owner", None)
+
+ if not machine:
+ machine = job.find_hostname(dir)
+ tko_utils.dprint("MACHINE NAME: %s" % machine)
+
+ return {"user": user, "label": label, "machine": machine,
+ "queued_time": queued_time,
+ "started_time": started_time,
+ "finished_time": finished_time,
+ "machine_owner": machine_owner}
+
+
+ @staticmethod
+ def find_hostname(path):
+ hostname = os.path.join(path, "sysinfo", "hostname")
+ try:
+ machine = open(hostname).readline().rstrip()
+ return machine
+ except Exception:
+ tko_utils.dprint("Could not read a hostname from "
+ "sysinfo/hostname")
+
+ uname = os.path.join(path, "sysinfo", "uname_-a")
+ try:
+ machine = open(uname).readline().split()[1]
+ return
+ except Exception:
+ tko_utils.dprint("Could not read a hostname from "
+ "sysinfo/uname_-a")
+
+ raise Exception("Unable to find a machine name")
class kernel(models.kernel):
- def __init__(self, job, verify_ident=None):
- kernel_dict = kernel.load_from_dir(job.dir, verify_ident)
- super(kernel, self).__init__(**kernel_dict)
-
-
- @staticmethod
- def load_from_dir(dir, verify_ident=None):
- # try and load the booted kernel version
- build_log = os.path.join(dir, "build", "debug", "build_log")
- attributes = kernel.load_from_build_log(build_log)
- if not attributes:
- if verify_ident:
- base = verify_ident
- else:
- base = kernel.load_from_sysinfo(dir)
- patches = []
- hashes = []
- else:
- base, patches, hashes = attributes
- tko_utils.dprint("kernel.__init__() found kernel version %s"
- % base)
-
- # compute the kernel hash
- if base == "UNKNOWN":
- kernel_hash = "UNKNOWN"
- else:
- kernel_hash = kernel.compute_hash(base, hashes)
-
- return {"base": base, "patches": patches,
- "kernel_hash": kernel_hash}
-
-
- @staticmethod
- def load_from_sysinfo(path):
- for subdir in ("reboot1", ""):
- uname_path = os.path.join(path, "sysinfo", subdir,
- "uname_-a")
- if not os.path.exists(uname_path):
- continue
- uname = open(uname_path).readline().split()
- return re.sub("-autotest$", "", uname[2])
- return "UNKNOWN"
-
-
- @staticmethod
- def load_from_build_log(path):
- if not os.path.exists(path):
- return None
-
- base, patches, hashes = "UNKNOWN", [], []
- for line in file(path):
- head, rest = line.split(": ", 1)
- rest = rest.split()
- if head == "BASE":
- base = rest[0]
- elif head == "PATCH":
- patches.append(patch(*rest))
- hashes.append(rest[2])
- return base, patches, hashes
+ def __init__(self, job, verify_ident=None):
+ kernel_dict = kernel.load_from_dir(job.dir, verify_ident)
+ super(kernel, self).__init__(**kernel_dict)
+
+
+ @staticmethod
+ def load_from_dir(dir, verify_ident=None):
+ # try and load the booted kernel version
+ build_log = os.path.join(dir, "build", "debug", "build_log")
+ attributes = kernel.load_from_build_log(build_log)
+ if not attributes:
+ if verify_ident:
+ base = verify_ident
+ else:
+ base = kernel.load_from_sysinfo(dir)
+ patches = []
+ hashes = []
+ else:
+ base, patches, hashes = attributes
+ tko_utils.dprint("kernel.__init__() found kernel version %s"
+ % base)
+
+ # compute the kernel hash
+ if base == "UNKNOWN":
+ kernel_hash = "UNKNOWN"
+ else:
+ kernel_hash = kernel.compute_hash(base, hashes)
+
+ return {"base": base, "patches": patches,
+ "kernel_hash": kernel_hash}
+
+
+ @staticmethod
+ def load_from_sysinfo(path):
+ for subdir in ("reboot1", ""):
+ uname_path = os.path.join(path, "sysinfo", subdir,
+ "uname_-a")
+ if not os.path.exists(uname_path):
+ continue
+ uname = open(uname_path).readline().split()
+ return re.sub("-autotest$", "", uname[2])
+ return "UNKNOWN"
+
+
+ @staticmethod
+ def load_from_build_log(path):
+ if not os.path.exists(path):
+ return None
+
+ base, patches, hashes = "UNKNOWN", [], []
+ for line in file(path):
+ head, rest = line.split(": ", 1)
+ rest = rest.split()
+ if head == "BASE":
+ base = rest[0]
+ elif head == "PATCH":
+ patches.append(patch(*rest))
+ hashes.append(rest[2])
+ return base, patches, hashes
class test(models.test):
- def __init__(self, subdir, testname, status, reason, test_kernel,
- machine, started_time, finished_time, iterations,
- attributes):
- # for backwards compatibility with the original parser
- # implementation, if there is no test version we need a NULL
- # value to be used; also, if there is a version it should
- # be terminated by a newline
- if "version" in attributes:
- attributes["version"] = str(attributes["version"])
- else:
- attributes["version"] = None
+ def __init__(self, subdir, testname, status, reason, test_kernel,
+ machine, started_time, finished_time, iterations,
+ attributes):
+ # for backwards compatibility with the original parser
+ # implementation, if there is no test version we need a NULL
+ # value to be used; also, if there is a version it should
+ # be terminated by a newline
+ if "version" in attributes:
+ attributes["version"] = str(attributes["version"])
+ else:
+ attributes["version"] = None
- super(test, self).__init__(subdir, testname, status, reason,
- test_kernel, machine, started_time,
- finished_time, iterations,
- attributes)
+ super(test, self).__init__(subdir, testname, status, reason,
+ test_kernel, machine, started_time,
+ finished_time, iterations,
+ attributes)
- @staticmethod
- def load_iterations(keyval_path):
- return iteration.load_from_keyval(keyval_path)
+ @staticmethod
+ def load_iterations(keyval_path):
+ return iteration.load_from_keyval(keyval_path)
class patch(models.patch):
- def __init__(self, spec, reference, hash):
- tko_utils.dprint("PATCH::%s %s %s" % (spec, reference, hash))
- super(patch, self).__init__(spec, reference, hash)
- self.spec = spec
- self.reference = reference
- self.hash = hash
+ def __init__(self, spec, reference, hash):
+ tko_utils.dprint("PATCH::%s %s %s" % (spec, reference, hash))
+ super(patch, self).__init__(spec, reference, hash)
+ self.spec = spec
+ self.reference = reference
+ self.hash = hash
class iteration(models.iteration):
- @staticmethod
- def parse_line_into_dicts(line, attr_dict, perf_dict):
- key, value = line.split("=", 1)
- perf_dict[key] = value
+ @staticmethod
+ def parse_line_into_dicts(line, attr_dict, perf_dict):
+ key, value = line.split("=", 1)
+ perf_dict[key] = value
class status_line(object):
- def __init__(self, indent, status, subdir, testname, reason,
- optional_fields):
- # pull out the type & status of the line
- if status == "START":
- self.type = "START"
- self.status = None
- elif status.startswith("END "):
- self.type = "END"
- self.status = status[4:]
- else:
- self.type = "STATUS"
- self.status = status
- assert (self.status is None or
- self.status in status_lib.status_stack.statuses)
-
- # save all the other parameters
- self.indent = indent
- self.subdir = self.parse_name(subdir)
- self.testname = self.parse_name(testname)
- self.reason = reason
- self.optional_fields = optional_fields
-
-
- @staticmethod
- def parse_name(name):
- if name == "----":
- return None
- return name
-
-
- @staticmethod
- def is_status_line(line):
- return re.search(r"^\t*\S", line) is not None
-
-
- @classmethod
- def parse_line(cls, line):
- if not status_line.is_status_line(line):
- return None
- indent, line = re.search(r"^(\t*)(.*)$", line).groups()
- indent = len(indent)
-
- # split the line into the fixed and optional fields
- parts = line.split("\t")
- status, subdir, testname = parts[0:3]
- reason = parts[-1]
- optional_parts = parts[3:-1]
-
- # all the optional parts should be of the form "key=value"
- assert sum('=' not in part for part in optional_parts) == 0
- optional_fields = dict(part.split("=", 1)
- for part in optional_parts)
-
- # build up a new status_line and return it
- return cls(indent, status, subdir, testname, reason,
- optional_fields)
+ def __init__(self, indent, status, subdir, testname, reason,
+ optional_fields):
+ # pull out the type & status of the line
+ if status == "START":
+ self.type = "START"
+ self.status = None
+ elif status.startswith("END "):
+ self.type = "END"
+ self.status = status[4:]
+ else:
+ self.type = "STATUS"
+ self.status = status
+ assert (self.status is None or
+ self.status in status_lib.status_stack.statuses)
+
+ # save all the other parameters
+ self.indent = indent
+ self.subdir = self.parse_name(subdir)
+ self.testname = self.parse_name(testname)
+ self.reason = reason
+ self.optional_fields = optional_fields
+
+
+ @staticmethod
+ def parse_name(name):
+ if name == "----":
+ return None
+ return name
+
+
+ @staticmethod
+ def is_status_line(line):
+ return re.search(r"^\t*\S", line) is not None
+
+
+ @classmethod
+ def parse_line(cls, line):
+ if not status_line.is_status_line(line):
+ return None
+ indent, line = re.search(r"^(\t*)(.*)$", line).groups()
+ indent = len(indent)
+
+ # split the line into the fixed and optional fields
+ parts = line.split("\t")
+ status, subdir, testname = parts[0:3]
+ reason = parts[-1]
+ optional_parts = parts[3:-1]
+
+ # all the optional parts should be of the form "key=value"
+ assert sum('=' not in part for part in optional_parts) == 0
+ optional_fields = dict(part.split("=", 1)
+ for part in optional_parts)
+
+ # build up a new status_line and return it
+ return cls(indent, status, subdir, testname, reason,
+ optional_fields)
class parser(base.parser):
- @staticmethod
- def make_job(dir):
- return job(dir)
-
-
- def state_iterator(self, buffer):
- new_tests = []
- boot_count = 0
- group_subdir = None
- sought_level = 0
- stack = status_lib.status_stack()
- current_kernel = kernel(self.job)
- boot_in_progress = False
- alert_pending = None
- started_time = None
-
- while not self.finished or buffer.size():
- # stop processing once the buffer is empty
- if buffer.size() == 0:
- yield new_tests
- new_tests = []
- continue
-
- # parse the next line
- line = buffer.get()
- tko_utils.dprint('\nSTATUS: ' + line.strip())
- line = status_line.parse_line(line)
- if line is None:
- tko_utils.dprint('non-status line, ignoring')
- continue # ignore non-status lines
-
- # have we hit the job start line?
- if (line.type == "START" and not line.subdir and
- not line.testname):
- sought_level = 1
- tko_utils.dprint("found job level start "
- "marker, looking for level "
- "1 groups now")
- continue
-
- # have we hit the job end line?
- if (line.type == "END" and not line.subdir and
- not line.testname):
- tko_utils.dprint("found job level end "
- "marker, looking for level "
- "0 lines now")
- sought_level = 0
-
- # START line, just push another layer on to the stack
- # and grab the start time if this is at the job level
- # we're currently seeking
- if line.type == "START":
- group_subdir = None
- stack.start()
- if line.indent == sought_level:
- started_time = \
- tko_utils.get_timestamp(
- line.optional_fields, "timestamp")
- tko_utils.dprint("start line, ignoring")
- continue
- # otherwise, update the status on the stack
- else:
- tko_utils.dprint("GROPE_STATUS: %s" %
- [stack.current_status(),
- line.status, line.subdir,
- line.testname, line.reason])
- stack.update(line.status)
-
- if line.status == "ALERT":
- tko_utils.dprint("job level alert, recording")
- alert_pending = line.reason
- continue
-
- # ignore Autotest.install => GOOD lines
- if (line.testname == "Autotest.install" and
- line.status == "GOOD"):
- tko_utils.dprint("Successful Autotest "
- "install, ignoring")
- continue
-
- # ignore END lines for a reboot group
- if (line.testname == "reboot" and line.type == "END"):
- tko_utils.dprint("reboot group, ignoring")
- continue
-
- # convert job-level ABORTs into a 'JOB' test, and
- # ignore other job-level events
- if line.testname is None:
- if (line.status == "ABORT" and
- line.type != "END"):
- line.testname = "JOB"
- else:
- tko_utils.dprint("job level event, "
- "ignoring")
- continue
-
- # use the group subdir for END lines
- if line.type == "END":
- line.subdir = group_subdir
-
- # are we inside a block group?
- if (line.indent != sought_level and
- line.status != "ABORT" and
- not line.testname.startswith('reboot.')):
- if line.subdir:
- tko_utils.dprint("set group_subdir: "
- + line.subdir)
- group_subdir = line.subdir
- tko_utils.dprint("ignoring incorrect indent "
- "level %d != %d," %
- (line.indent, sought_level))
- continue
-
- # use the subdir as the testname, except for
- # boot.* and kernel.* tests
- if (line.testname is None or
- not re.search(r"^(boot(\.\d+)?$|kernel\.)",
- line.testname)):
- if line.subdir and '.' in line.subdir:
- line.testname = line.subdir
-
- # has a reboot started?
- if line.testname == "reboot.start":
- started_time = tko_utils.get_timestamp(
- line.optional_fields, "timestamp")
- tko_utils.dprint("reboot start event, "
- "ignoring")
- boot_in_progress = True
- continue
-
- # has a reboot finished?
- if line.testname == "reboot.verify":
- line.testname = "boot.%d" % boot_count
- tko_utils.dprint("reboot verified")
- boot_in_progress = False
- verify_ident = line.reason.strip()
- current_kernel = kernel(self.job, verify_ident)
- boot_count += 1
-
- if alert_pending:
- line.status = "ALERT"
- line.reason = alert_pending
- alert_pending = None
-
- # create the actual test object
- finished_time = tko_utils.get_timestamp(
- line.optional_fields, "timestamp")
- final_status = stack.end()
- tko_utils.dprint("Adding: "
- "%s\nSubdir:%s\nTestname:%s\n%s" %
- (final_status, line.subdir,
- line.testname, line.reason))
- new_test = test.parse_test(self.job, line.subdir,
- line.testname,
- final_status, line.reason,
- current_kernel,
- started_time,
- finished_time)
- started_time = None
- new_tests.append(new_test)
-
- # the job is finished, but we never came back from reboot
- if boot_in_progress:
- testname = "boot.%d" % boot_count
- reason = "machine did not return from reboot"
- tko_utils.dprint(("Adding: ABORT\nSubdir:----\n"
- "Testname:%s\n%s")
- % (testname, reason))
- new_test = test.parse_test(self.job, None, testname,
- "ABORT", reason,
- current_kernel, None, None)
- new_tests.append(new_test)
- yield new_tests
+ @staticmethod
+ def make_job(dir):
+ return job(dir)
+
+
+ def state_iterator(self, buffer):
+ new_tests = []
+ boot_count = 0
+ group_subdir = None
+ sought_level = 0
+ stack = status_lib.status_stack()
+ current_kernel = kernel(self.job)
+ boot_in_progress = False
+ alert_pending = None
+ started_time = None
+
+ while not self.finished or buffer.size():
+ # stop processing once the buffer is empty
+ if buffer.size() == 0:
+ yield new_tests
+ new_tests = []
+ continue
+
+ # parse the next line
+ line = buffer.get()
+ tko_utils.dprint('\nSTATUS: ' + line.strip())
+ line = status_line.parse_line(line)
+ if line is None:
+ tko_utils.dprint('non-status line, ignoring')
+ continue # ignore non-status lines
+
+ # have we hit the job start line?
+ if (line.type == "START" and not line.subdir and
+ not line.testname):
+ sought_level = 1
+ tko_utils.dprint("found job level start "
+ "marker, looking for level "
+ "1 groups now")
+ continue
+
+ # have we hit the job end line?
+ if (line.type == "END" and not line.subdir and
+ not line.testname):
+ tko_utils.dprint("found job level end "
+ "marker, looking for level "
+ "0 lines now")
+ sought_level = 0
+
+ # START line, just push another layer on to the stack
+ # and grab the start time if this is at the job level
+ # we're currently seeking
+ if line.type == "START":
+ group_subdir = None
+ stack.start()
+ if line.indent == sought_level:
+ started_time = \
+ tko_utils.get_timestamp(
+ line.optional_fields, "timestamp")
+ tko_utils.dprint("start line, ignoring")
+ continue
+ # otherwise, update the status on the stack
+ else:
+ tko_utils.dprint("GROPE_STATUS: %s" %
+ [stack.current_status(),
+ line.status, line.subdir,
+ line.testname, line.reason])
+ stack.update(line.status)
+
+ if line.status == "ALERT":
+ tko_utils.dprint("job level alert, recording")
+ alert_pending = line.reason
+ continue
+
+ # ignore Autotest.install => GOOD lines
+ if (line.testname == "Autotest.install" and
+ line.status == "GOOD"):
+ tko_utils.dprint("Successful Autotest "
+ "install, ignoring")
+ continue
+
+ # ignore END lines for a reboot group
+ if (line.testname == "reboot" and line.type == "END"):
+ tko_utils.dprint("reboot group, ignoring")
+ continue
+
+ # convert job-level ABORTs into a 'JOB' test, and
+ # ignore other job-level events
+ if line.testname is None:
+ if (line.status == "ABORT" and
+ line.type != "END"):
+ line.testname = "JOB"
+ else:
+ tko_utils.dprint("job level event, "
+ "ignoring")
+ continue
+
+ # use the group subdir for END lines
+ if line.type == "END":
+ line.subdir = group_subdir
+
+ # are we inside a block group?
+ if (line.indent != sought_level and
+ line.status != "ABORT" and
+ not line.testname.startswith('reboot.')):
+ if line.subdir:
+ tko_utils.dprint("set group_subdir: "
+ + line.subdir)
+ group_subdir = line.subdir
+ tko_utils.dprint("ignoring incorrect indent "
+ "level %d != %d," %
+ (line.indent, sought_level))
+ continue
+
+ # use the subdir as the testname, except for
+ # boot.* and kernel.* tests
+ if (line.testname is None or
+ not re.search(r"^(boot(\.\d+)?$|kernel\.)",
+ line.testname)):
+ if line.subdir and '.' in line.subdir:
+ line.testname = line.subdir
+
+ # has a reboot started?
+ if line.testname == "reboot.start":
+ started_time = tko_utils.get_timestamp(
+ line.optional_fields, "timestamp")
+ tko_utils.dprint("reboot start event, "
+ "ignoring")
+ boot_in_progress = True
+ continue
+
+ # has a reboot finished?
+ if line.testname == "reboot.verify":
+ line.testname = "boot.%d" % boot_count
+ tko_utils.dprint("reboot verified")
+ boot_in_progress = False
+ verify_ident = line.reason.strip()
+ current_kernel = kernel(self.job, verify_ident)
+ boot_count += 1
+
+ if alert_pending:
+ line.status = "ALERT"
+ line.reason = alert_pending
+ alert_pending = None
+
+ # create the actual test object
+ finished_time = tko_utils.get_timestamp(
+ line.optional_fields, "timestamp")
+ final_status = stack.end()
+ tko_utils.dprint("Adding: "
+ "%s\nSubdir:%s\nTestname:%s\n%s" %
+ (final_status, line.subdir,
+ line.testname, line.reason))
+ new_test = test.parse_test(self.job, line.subdir,
+ line.testname,
+ final_status, line.reason,
+ current_kernel,
+ started_time,
+ finished_time)
+ started_time = None
+ new_tests.append(new_test)
+
+ # the job is finished, but we never came back from reboot
+ if boot_in_progress:
+ testname = "boot.%d" % boot_count
+ reason = "machine did not return from reboot"
+ tko_utils.dprint(("Adding: ABORT\nSubdir:----\n"
+ "Testname:%s\n%s")
+ % (testname, reason))
+ new_test = test.parse_test(self.job, None, testname,
+ "ABORT", reason,
+ current_kernel, None, None)
+ new_tests.append(new_test)
+ yield new_tests
diff --git a/tko/parsers/version_0_unittest.py b/tko/parsers/version_0_unittest.py
index 947199b1..3b879ae7 100644
--- a/tko/parsers/version_0_unittest.py
+++ b/tko/parsers/version_0_unittest.py
@@ -7,118 +7,118 @@ from autotest_lib.tko.parsers import version_0
class test_status_line(unittest.TestCase):
- statuses = ["GOOD", "WARN", "FAIL", "ABORT"]
-
-
- def test_handles_start(self):
- line = version_0.status_line(0, "START", "----", "test",
- "", {})
- self.assertEquals(line.type, "START")
- self.assertEquals(line.status, None)
-
-
- def test_handles_status(self):
- for stat in self.statuses:
- line = version_0.status_line(0, stat, "----", "test",
- "", {})
- self.assertEquals(line.type, "STATUS")
- self.assertEquals(line.status, stat)
-
-
- def test_handles_endstatus(self):
- for stat in self.statuses:
- line = version_0.status_line(0, "END " + stat, "----",
- "test", "", {})
- self.assertEquals(line.type, "END")
- self.assertEquals(line.status, stat)
-
-
- def test_fails_on_bad_status(self):
- for stat in self.statuses:
- self.assertRaises(AssertionError,
- version_0.status_line, 0,
- "BAD " + stat, "----", "test",
- "", {})
-
-
- def test_saves_all_fields(self):
- line = version_0.status_line(5, "GOOD", "subdir_name",
- "test_name", "my reason here",
- {"key1": "value",
- "key2": "another value",
- "key3": "value3"})
- self.assertEquals(line.indent, 5)
- self.assertEquals(line.status, "GOOD")
- self.assertEquals(line.subdir, "subdir_name")
- self.assertEquals(line.testname, "test_name")
- self.assertEquals(line.reason, "my reason here")
- self.assertEquals(line.optional_fields,
- {"key1": "value", "key2": "another value",
- "key3": "value3"})
-
-
- def test_parses_blank_subdir(self):
- line = version_0.status_line(0, "GOOD", "----", "test",
- "", {})
- self.assertEquals(line.subdir, None)
-
-
- def test_parses_blank_testname(self):
- line = version_0.status_line(0, "GOOD", "subdir", "----",
- "", {})
- self.assertEquals(line.testname, None)
-
-
- def test_parse_line_smoketest(self):
- input_data = ("\t\t\tGOOD\t----\t----\t"
- "field1=val1\tfield2=val2\tTest Passed")
- line = version_0.status_line.parse_line(input_data)
- self.assertEquals(line.indent, 3)
- self.assertEquals(line.type, "STATUS")
- self.assertEquals(line.status, "GOOD")
- self.assertEquals(line.subdir, None)
- self.assertEquals(line.testname, None)
- self.assertEquals(line.reason, "Test Passed")
- self.assertEquals(line.optional_fields,
- {"field1": "val1", "field2": "val2"})
-
- def test_parse_line_handles_newline(self):
- input_data = ("\t\tGOOD\t----\t----\t"
- "field1=val1\tfield2=val2\tNo newline here!")
- for suffix in ("", "\n"):
- line = version_0.status_line.parse_line(input_data +
- suffix)
- self.assertEquals(line.indent, 2)
- self.assertEquals(line.type, "STATUS")
- self.assertEquals(line.status, "GOOD")
- self.assertEquals(line.subdir, None)
- self.assertEquals(line.testname, None)
- self.assertEquals(line.reason, "No newline here!")
- self.assertEquals(line.optional_fields,
- {"field1": "val1",
- "field2": "val2"})
-
-
- def test_parse_line_fails_on_untabbed_lines(self):
- input_data = " GOOD\trandom\tfields\tof text"
- line = version_0.status_line.parse_line(input_data)
- self.assertEquals(line, None)
- line = version_0.status_line.parse_line(input_data.lstrip())
- self.assertEquals(line.indent, 0)
- self.assertEquals(line.type, "STATUS")
- self.assertEquals(line.status, "GOOD")
- self.assertEquals(line.subdir, "random")
- self.assertEquals(line.testname, "fields")
- self.assertEquals(line.reason, "of text")
- self.assertEquals(line.optional_fields, {})
-
-
- def test_parse_line_fails_on_bad_optional_fields(self):
- input_data = "GOOD\tfield1\tfield2\tfield3\tfield4"
- self.assertRaises(AssertionError,
- version_0.status_line.parse_line,
- input_data)
+ statuses = ["GOOD", "WARN", "FAIL", "ABORT"]
+
+
+ def test_handles_start(self):
+ line = version_0.status_line(0, "START", "----", "test",
+ "", {})
+ self.assertEquals(line.type, "START")
+ self.assertEquals(line.status, None)
+
+
+ def test_handles_status(self):
+ for stat in self.statuses:
+ line = version_0.status_line(0, stat, "----", "test",
+ "", {})
+ self.assertEquals(line.type, "STATUS")
+ self.assertEquals(line.status, stat)
+
+
+ def test_handles_endstatus(self):
+ for stat in self.statuses:
+ line = version_0.status_line(0, "END " + stat, "----",
+ "test", "", {})
+ self.assertEquals(line.type, "END")
+ self.assertEquals(line.status, stat)
+
+
+ def test_fails_on_bad_status(self):
+ for stat in self.statuses:
+ self.assertRaises(AssertionError,
+ version_0.status_line, 0,
+ "BAD " + stat, "----", "test",
+ "", {})
+
+
+ def test_saves_all_fields(self):
+ line = version_0.status_line(5, "GOOD", "subdir_name",
+ "test_name", "my reason here",
+ {"key1": "value",
+ "key2": "another value",
+ "key3": "value3"})
+ self.assertEquals(line.indent, 5)
+ self.assertEquals(line.status, "GOOD")
+ self.assertEquals(line.subdir, "subdir_name")
+ self.assertEquals(line.testname, "test_name")
+ self.assertEquals(line.reason, "my reason here")
+ self.assertEquals(line.optional_fields,
+ {"key1": "value", "key2": "another value",
+ "key3": "value3"})
+
+
+ def test_parses_blank_subdir(self):
+ line = version_0.status_line(0, "GOOD", "----", "test",
+ "", {})
+ self.assertEquals(line.subdir, None)
+
+
+ def test_parses_blank_testname(self):
+ line = version_0.status_line(0, "GOOD", "subdir", "----",
+ "", {})
+ self.assertEquals(line.testname, None)
+
+
+ def test_parse_line_smoketest(self):
+ input_data = ("\t\t\tGOOD\t----\t----\t"
+ "field1=val1\tfield2=val2\tTest Passed")
+ line = version_0.status_line.parse_line(input_data)
+ self.assertEquals(line.indent, 3)
+ self.assertEquals(line.type, "STATUS")
+ self.assertEquals(line.status, "GOOD")
+ self.assertEquals(line.subdir, None)
+ self.assertEquals(line.testname, None)
+ self.assertEquals(line.reason, "Test Passed")
+ self.assertEquals(line.optional_fields,
+ {"field1": "val1", "field2": "val2"})
+
+ def test_parse_line_handles_newline(self):
+ input_data = ("\t\tGOOD\t----\t----\t"
+ "field1=val1\tfield2=val2\tNo newline here!")
+ for suffix in ("", "\n"):
+ line = version_0.status_line.parse_line(input_data +
+ suffix)
+ self.assertEquals(line.indent, 2)
+ self.assertEquals(line.type, "STATUS")
+ self.assertEquals(line.status, "GOOD")
+ self.assertEquals(line.subdir, None)
+ self.assertEquals(line.testname, None)
+ self.assertEquals(line.reason, "No newline here!")
+ self.assertEquals(line.optional_fields,
+ {"field1": "val1",
+ "field2": "val2"})
+
+
+ def test_parse_line_fails_on_untabbed_lines(self):
+ input_data = " GOOD\trandom\tfields\tof text"
+ line = version_0.status_line.parse_line(input_data)
+ self.assertEquals(line, None)
+ line = version_0.status_line.parse_line(input_data.lstrip())
+ self.assertEquals(line.indent, 0)
+ self.assertEquals(line.type, "STATUS")
+ self.assertEquals(line.status, "GOOD")
+ self.assertEquals(line.subdir, "random")
+ self.assertEquals(line.testname, "fields")
+ self.assertEquals(line.reason, "of text")
+ self.assertEquals(line.optional_fields, {})
+
+
+ def test_parse_line_fails_on_bad_optional_fields(self):
+ input_data = "GOOD\tfield1\tfield2\tfield3\tfield4"
+ self.assertRaises(AssertionError,
+ version_0.status_line.parse_line,
+ input_data)
if __name__ == "__main__":
- unittest.main()
+ unittest.main()
diff --git a/tko/parsers/version_1.py b/tko/parsers/version_1.py
index b13e272e..77c60c22 100644
--- a/tko/parsers/version_1.py
+++ b/tko/parsers/version_1.py
@@ -5,85 +5,85 @@ from autotest_lib.tko.parsers import base, version_0
class kernel(models.kernel):
- def __init__(self, base, patches):
- if base:
- patches = [patch(*p.split()) for p in patches]
- hashes = [p.hash for p in patches]
- kernel_hash = self.compute_hash(base, hashes)
- else:
- base = "UNKNOWN"
- patches = []
- kernel_hash = "UNKNOWN"
- super(kernel, self).__init__(base, patches, kernel_hash)
+ def __init__(self, base, patches):
+ if base:
+ patches = [patch(*p.split()) for p in patches]
+ hashes = [p.hash for p in patches]
+ kernel_hash = self.compute_hash(base, hashes)
+ else:
+ base = "UNKNOWN"
+ patches = []
+ kernel_hash = "UNKNOWN"
+ super(kernel, self).__init__(base, patches, kernel_hash)
class test(models.test):
- @staticmethod
- def load_iterations(keyval_path):
- return iteration.load_from_keyval(keyval_path)
+ @staticmethod
+ def load_iterations(keyval_path):
+ return iteration.load_from_keyval(keyval_path)
class iteration(models.iteration):
- @staticmethod
- def parse_line_into_dicts(line, attr_dict, perf_dict):
- typed_match = re.search("^([^=]*)\{(\w*)\}=(.*)$", line)
- if typed_match:
- key, val_type, value = typed_match.groups()
- if val_type == "attr":
- attr_dict[key] = value
- elif val_type == "perf":
- perf_dict[key] = value
- else:
- msg = ("WARNING: line '%s' found in test "
- "iteration keyval could not be parsed")
- msg %= line
- tko_utils.dprint(msg)
- return # skip the line
- else:
- # old-fashioned untyped match, assume perf
- untyped_match = re.search("^([^=]*)=(.*)$", line)
- if not untyped_match:
- msg = ("WARNING: line '%s' found in test "
- "iteration keyval could not be parsed")
- msg %= line
- tko_utils.dprint(msg)
- return # skip this line
- key, value = untyped_match.groups()
- perf_dict[key] = value
+ @staticmethod
+ def parse_line_into_dicts(line, attr_dict, perf_dict):
+ typed_match = re.search("^([^=]*)\{(\w*)\}=(.*)$", line)
+ if typed_match:
+ key, val_type, value = typed_match.groups()
+ if val_type == "attr":
+ attr_dict[key] = value
+ elif val_type == "perf":
+ perf_dict[key] = value
+ else:
+ msg = ("WARNING: line '%s' found in test "
+ "iteration keyval could not be parsed")
+ msg %= line
+ tko_utils.dprint(msg)
+ return # skip the line
+ else:
+ # old-fashioned untyped match, assume perf
+ untyped_match = re.search("^([^=]*)=(.*)$", line)
+ if not untyped_match:
+ msg = ("WARNING: line '%s' found in test "
+ "iteration keyval could not be parsed")
+ msg %= line
+ tko_utils.dprint(msg)
+ return # skip this line
+ key, value = untyped_match.groups()
+ perf_dict[key] = value
class status_line(version_0.status_line):
- def is_successful_reboot(self, current_status):
- # make sure this is a reboot line
- if self.testname != "reboot":
- return False
+ def is_successful_reboot(self, current_status):
+ # make sure this is a reboot line
+ if self.testname != "reboot":
+ return False
- # make sure this was not a failure
- get_index = status_lib.status_stack.statuses.index
- if get_index(current_status) <= get_index("FAIL"):
- return False
+ # make sure this was not a failure
+ get_index = status_lib.status_stack.statuses.index
+ if get_index(current_status) <= get_index("FAIL"):
+ return False
- # it must have been a successful reboot
- return True
+ # it must have been a successful reboot
+ return True
- def get_kernel(self):
- # get the base kernel version
- fields = self.optional_fields
- base = fields.get("kernel", "")
- # get a list of patches
- patches = []
- patch_index = 0
- while ("patch%d" % patch_index) in fields:
- patches.append(fields["patch%d" % patch_index])
- patch_index += 1
- # create a new kernel instance
- return kernel(base, patches)
+ def get_kernel(self):
+ # get the base kernel version
+ fields = self.optional_fields
+ base = fields.get("kernel", "")
+ # get a list of patches
+ patches = []
+ patch_index = 0
+ while ("patch%d" % patch_index) in fields:
+ patches.append(fields["patch%d" % patch_index])
+ patch_index += 1
+ # create a new kernel instance
+ return kernel(base, patches)
- def get_timestamp(self):
- return tko_utils.get_timestamp(self.optional_fields,
- "timestamp")
+ def get_timestamp(self):
+ return tko_utils.get_timestamp(self.optional_fields,
+ "timestamp")
# the default implementations from version 0 will do for now
@@ -92,127 +92,127 @@ patch = version_0.patch
class parser(base.parser):
- @staticmethod
- def make_job(dir):
- return job(dir)
-
-
- @staticmethod
- def make_dummy_abort(indent):
- indent = "\t" * indent
- return indent + "END ABORT\t----\t----\tUnexpected ABORT"
-
-
- def state_iterator(self, buffer):
- new_tests = []
- boot_count = 0
- min_stack_size = 0
- stack = status_lib.status_stack()
- current_kernel = kernel("", []) # UNKNOWN
- started_time_stack = [None]
- subdir_stack = [None]
-
- while True:
- # are we finished with parsing?
- if buffer.size() == 0 and self.finished:
- if stack.size() == 0:
- break
- # we have status lines left on the stack,
- # we need to implicitly abort them first
- for i in reversed(xrange(stack.size())):
- buffer.put(self.make_dummy_abort(i))
-
- # stop processing once the buffer is empty
- if buffer.size() == 0:
- yield new_tests
- new_tests = []
- continue
-
- # reinitialize the per-iteration state
- started_time = None
- finished_time = None
-
- # get the next line
- raw_line = buffer.get()
- tko_utils.dprint('\nSTATUS: ' + raw_line.strip())
- line = status_line.parse_line(raw_line)
- if line is None:
- tko_utils.dprint('non-status line, ignoring')
- continue
-
- # initial line processing
- if line.type == "START":
- stack.start()
- if (line.testname, line.subdir) == (None,) * 2:
- min_stack_size = stack.size()
- started_time_stack.append(line.get_timestamp())
- subdir_stack.append(line.subdir)
- continue
- elif line.type == "STATUS":
- stack.update(line.status)
- indent = line.indent
- started_time = None
- finished_time = line.get_timestamp()
- if line.subdir:
- subdir_stack[-1] = line.subdir
- elif line.type == "END":
- if (line.testname, line.subdir) == (None,) * 2:
- min_stack_size = stack.size() - 1
- subdir_stack.pop()
- else:
- line.subdir = subdir_stack.pop()
- stack.update(line.status)
- indent = line.indent + 1
- started_time = started_time_stack.pop()
- finished_time = line.get_timestamp()
- else:
- assert False
-
- # have we unexpectedly exited a group?
- if indent < stack.size():
- # yes, implicitly ABORT
- buffer.put_back(raw_line)
- abort = self.make_dummy_abort(stack.size() - 1)
- buffer.put_back(abort)
- continue
- else:
- # no, just update the group status
- current_status = line.status
- stack.update(current_status)
-
- # do we need to pop the stack?
- if line.type == "END":
- current_status = stack.end()
- stack.update(current_status)
- if line.is_successful_reboot(current_status):
- current_kernel = line.get_kernel()
- # rename the reboot testname
- if line.testname == "reboot":
- line.testname = "boot.%d" % boot_count
- boot_count += 1
-
- # have we just finished a test?
- if stack.size() <= min_stack_size:
- # if there was no testname, just use the subdir
- if line.testname is None:
- line.testname = line.subdir
- # if there was no testname or subdir, use 'JOB'
- if line.testname is None:
- line.testname = "JOB"
-
- new_test = test.parse_test(self.job,
- line.subdir,
- line.testname,
- current_status,
- line.reason,
- current_kernel,
- started_time,
- finished_time)
- msg = "ADD: %s\nSubdir: %s\nTestname: %s\n%s"
- msg %= (new_test.status, new_test.subdir,
- new_test.testname, new_test.reason)
- tko_utils.dprint(msg)
- new_tests.append(new_test)
-
- # the job is finished, nothing to do here but exit
- yield new_tests
+ @staticmethod
+ def make_job(dir):
+ return job(dir)
+
+
+ @staticmethod
+ def make_dummy_abort(indent):
+ indent = "\t" * indent
+ return indent + "END ABORT\t----\t----\tUnexpected ABORT"
+
+
+ def state_iterator(self, buffer):
+ new_tests = []
+ boot_count = 0
+ min_stack_size = 0
+ stack = status_lib.status_stack()
+ current_kernel = kernel("", []) # UNKNOWN
+ started_time_stack = [None]
+ subdir_stack = [None]
+
+ while True:
+ # are we finished with parsing?
+ if buffer.size() == 0 and self.finished:
+ if stack.size() == 0:
+ break
+ # we have status lines left on the stack,
+ # we need to implicitly abort them first
+ for i in reversed(xrange(stack.size())):
+ buffer.put(self.make_dummy_abort(i))
+
+ # stop processing once the buffer is empty
+ if buffer.size() == 0:
+ yield new_tests
+ new_tests = []
+ continue
+
+ # reinitialize the per-iteration state
+ started_time = None
+ finished_time = None
+
+ # get the next line
+ raw_line = buffer.get()
+ tko_utils.dprint('\nSTATUS: ' + raw_line.strip())
+ line = status_line.parse_line(raw_line)
+ if line is None:
+ tko_utils.dprint('non-status line, ignoring')
+ continue
+
+ # initial line processing
+ if line.type == "START":
+ stack.start()
+ if (line.testname, line.subdir) == (None,) * 2:
+ min_stack_size = stack.size()
+ started_time_stack.append(line.get_timestamp())
+ subdir_stack.append(line.subdir)
+ continue
+ elif line.type == "STATUS":
+ stack.update(line.status)
+ indent = line.indent
+ started_time = None
+ finished_time = line.get_timestamp()
+ if line.subdir:
+ subdir_stack[-1] = line.subdir
+ elif line.type == "END":
+ if (line.testname, line.subdir) == (None,) * 2:
+ min_stack_size = stack.size() - 1
+ subdir_stack.pop()
+ else:
+ line.subdir = subdir_stack.pop()
+ stack.update(line.status)
+ indent = line.indent + 1
+ started_time = started_time_stack.pop()
+ finished_time = line.get_timestamp()
+ else:
+ assert False
+
+ # have we unexpectedly exited a group?
+ if indent < stack.size():
+ # yes, implicitly ABORT
+ buffer.put_back(raw_line)
+ abort = self.make_dummy_abort(stack.size() - 1)
+ buffer.put_back(abort)
+ continue
+ else:
+ # no, just update the group status
+ current_status = line.status
+ stack.update(current_status)
+
+ # do we need to pop the stack?
+ if line.type == "END":
+ current_status = stack.end()
+ stack.update(current_status)
+ if line.is_successful_reboot(current_status):
+ current_kernel = line.get_kernel()
+ # rename the reboot testname
+ if line.testname == "reboot":
+ line.testname = "boot.%d" % boot_count
+ boot_count += 1
+
+ # have we just finished a test?
+ if stack.size() <= min_stack_size:
+ # if there was no testname, just use the subdir
+ if line.testname is None:
+ line.testname = line.subdir
+ # if there was no testname or subdir, use 'JOB'
+ if line.testname is None:
+ line.testname = "JOB"
+
+ new_test = test.parse_test(self.job,
+ line.subdir,
+ line.testname,
+ current_status,
+ line.reason,
+ current_kernel,
+ started_time,
+ finished_time)
+ msg = "ADD: %s\nSubdir: %s\nTestname: %s\n%s"
+ msg %= (new_test.status, new_test.subdir,
+ new_test.testname, new_test.reason)
+ tko_utils.dprint(msg)
+ new_tests.append(new_test)
+
+ # the job is finished, nothing to do here but exit
+ yield new_tests
diff --git a/tko/parsers/version_1_unittest.py b/tko/parsers/version_1_unittest.py
index eb3ad37a..33077c65 100644
--- a/tko/parsers/version_1_unittest.py
+++ b/tko/parsers/version_1_unittest.py
@@ -7,187 +7,187 @@ from autotest_lib.tko.parsers import version_1
class test_status_line(unittest.TestCase):
- statuses = ["GOOD", "WARN", "FAIL", "ABORT"]
-
-
- def test_handles_start(self):
- line = version_1.status_line(0, "START", "----", "test",
- "", {})
- self.assertEquals(line.type, "START")
- self.assertEquals(line.status, None)
-
-
- def test_handles_status(self):
- for stat in self.statuses:
- line = version_1.status_line(0, stat, "----", "test",
- "", {})
- self.assertEquals(line.type, "STATUS")
- self.assertEquals(line.status, stat)
-
-
- def test_handles_endstatus(self):
- for stat in self.statuses:
- line = version_1.status_line(0, "END " + stat, "----",
- "test", "", {})
- self.assertEquals(line.type, "END")
- self.assertEquals(line.status, stat)
-
-
- def test_fails_on_bad_status(self):
- for stat in self.statuses:
- self.assertRaises(AssertionError,
- version_1.status_line, 0,
- "BAD " + stat, "----", "test",
- "", {})
-
-
- def test_saves_all_fields(self):
- line = version_1.status_line(5, "GOOD", "subdir_name",
- "test_name", "my reason here",
- {"key1": "value",
- "key2": "another value",
- "key3": "value3"})
- self.assertEquals(line.indent, 5)
- self.assertEquals(line.status, "GOOD")
- self.assertEquals(line.subdir, "subdir_name")
- self.assertEquals(line.testname, "test_name")
- self.assertEquals(line.reason, "my reason here")
- self.assertEquals(line.optional_fields,
- {"key1": "value", "key2": "another value",
- "key3": "value3"})
-
-
- def test_parses_blank_subdir(self):
- line = version_1.status_line(0, "GOOD", "----", "test",
- "", {})
- self.assertEquals(line.subdir, None)
-
-
- def test_parses_blank_testname(self):
- line = version_1.status_line(0, "GOOD", "subdir", "----",
- "", {})
- self.assertEquals(line.testname, None)
-
-
- def test_parse_line_smoketest(self):
- input_data = ("\t\t\tGOOD\t----\t----\t"
- "field1=val1\tfield2=val2\tTest Passed")
- line = version_1.status_line.parse_line(input_data)
- self.assertEquals(line.indent, 3)
- self.assertEquals(line.type, "STATUS")
- self.assertEquals(line.status, "GOOD")
- self.assertEquals(line.subdir, None)
- self.assertEquals(line.testname, None)
- self.assertEquals(line.reason, "Test Passed")
- self.assertEquals(line.optional_fields,
- {"field1": "val1", "field2": "val2"})
-
- def test_parse_line_handles_newline(self):
- input_data = ("\t\tGOOD\t----\t----\t"
- "field1=val1\tfield2=val2\tNo newline here!")
- for suffix in ("", "\n"):
- line = version_1.status_line.parse_line(input_data +
- suffix)
- self.assertEquals(line.indent, 2)
- self.assertEquals(line.type, "STATUS")
- self.assertEquals(line.status, "GOOD")
- self.assertEquals(line.subdir, None)
- self.assertEquals(line.testname, None)
- self.assertEquals(line.reason, "No newline here!")
- self.assertEquals(line.optional_fields,
- {"field1": "val1",
- "field2": "val2"})
-
-
- def test_parse_line_fails_on_untabbed_lines(self):
- input_data = " GOOD\trandom\tfields\tof text"
- line = version_1.status_line.parse_line(input_data)
- self.assertEquals(line, None)
- line = version_1.status_line.parse_line(input_data.lstrip())
- self.assertEquals(line.indent, 0)
- self.assertEquals(line.type, "STATUS")
- self.assertEquals(line.status, "GOOD")
- self.assertEquals(line.subdir, "random")
- self.assertEquals(line.testname, "fields")
- self.assertEquals(line.reason, "of text")
- self.assertEquals(line.optional_fields, {})
-
-
- def test_parse_line_fails_on_bad_optional_fields(self):
- input_data = "GOOD\tfield1\tfield2\tfield3\tfield4"
- self.assertRaises(AssertionError,
- version_1.status_line.parse_line,
- input_data)
-
-
- def test_good_reboot_passes_success_test(self):
- line = version_1.status_line(0, "NOSTATUS", None, "reboot",
- "reboot success", {})
- self.assertEquals(line.is_successful_reboot("GOOD"), True)
- self.assertEquals(line.is_successful_reboot("WARN"), True)
-
-
- def test_bad_reboot_passes_success_test(self):
- line = version_1.status_line(0, "NOSTATUS", None, "reboot",
- "reboot success", {})
- self.assertEquals(line.is_successful_reboot("FAIL"), False)
- self.assertEquals(line.is_successful_reboot("ABORT"), False)
-
-
- def test_get_kernel_returns_kernel_plus_patches(self):
- line = version_1.status_line(0, "GOOD", "subdir", "testname",
- "reason text",
- {"kernel": "2.6.24-rc40",
- "patch0": "first_patch 0 0",
- "patch1": "another_patch 0 0"})
- kern = line.get_kernel()
- kernel_hash = md5.new("2.6.24-rc40,0,0").hexdigest()
- self.assertEquals(kern.base, "2.6.24-rc40")
- self.assertEquals(kern.patches[0].spec, "first_patch")
- self.assertEquals(kern.patches[1].spec, "another_patch")
- self.assertEquals(len(kern.patches), 2)
- self.assertEquals(kern.kernel_hash, kernel_hash)
-
-
- def test_get_kernel_ignores_out_of_sequence_patches(self):
- line = version_1.status_line(0, "GOOD", "subdir", "testname",
- "reason text",
- {"kernel": "2.6.24-rc40",
- "patch0": "first_patch 0 0",
- "patch2": "another_patch 0 0"})
- kern = line.get_kernel()
- kernel_hash = md5.new("2.6.24-rc40,0").hexdigest()
- self.assertEquals(kern.base, "2.6.24-rc40")
- self.assertEquals(kern.patches[0].spec, "first_patch")
- self.assertEquals(len(kern.patches), 1)
- self.assertEquals(kern.kernel_hash, kernel_hash)
-
-
- def test_get_kernel_returns_unknown_with_no_kernel(self):
- line = version_1.status_line(0, "GOOD", "subdir", "testname",
- "reason text",
- {"patch0": "first_patch 0 0",
- "patch2": "another_patch 0 0"})
- kern = line.get_kernel()
- self.assertEquals(kern.base, "UNKNOWN")
- self.assertEquals(kern.patches, [])
- self.assertEquals(kern.kernel_hash, "UNKNOWN")
-
-
- def test_get_timestamp_returns_timestamp_field(self):
- timestamp = datetime.datetime(1970, 1, 1, 4, 30)
- timestamp -= datetime.timedelta(seconds=time.timezone)
- line = version_1.status_line(0, "GOOD", "subdir", "testname",
- "reason text",
- {"timestamp": "16200"})
- self.assertEquals(timestamp, line.get_timestamp())
-
-
- def test_get_timestamp_returns_none_on_missing_field(self):
- line = version_1.status_line(0, "GOOD", "subdir", "testname",
- "reason text", {})
- self.assertEquals(None, line.get_timestamp())
+ statuses = ["GOOD", "WARN", "FAIL", "ABORT"]
+
+
+ def test_handles_start(self):
+ line = version_1.status_line(0, "START", "----", "test",
+ "", {})
+ self.assertEquals(line.type, "START")
+ self.assertEquals(line.status, None)
+
+
+ def test_handles_status(self):
+ for stat in self.statuses:
+ line = version_1.status_line(0, stat, "----", "test",
+ "", {})
+ self.assertEquals(line.type, "STATUS")
+ self.assertEquals(line.status, stat)
+
+
+ def test_handles_endstatus(self):
+ for stat in self.statuses:
+ line = version_1.status_line(0, "END " + stat, "----",
+ "test", "", {})
+ self.assertEquals(line.type, "END")
+ self.assertEquals(line.status, stat)
+
+
+ def test_fails_on_bad_status(self):
+ for stat in self.statuses:
+ self.assertRaises(AssertionError,
+ version_1.status_line, 0,
+ "BAD " + stat, "----", "test",
+ "", {})
+
+
+ def test_saves_all_fields(self):
+ line = version_1.status_line(5, "GOOD", "subdir_name",
+ "test_name", "my reason here",
+ {"key1": "value",
+ "key2": "another value",
+ "key3": "value3"})
+ self.assertEquals(line.indent, 5)
+ self.assertEquals(line.status, "GOOD")
+ self.assertEquals(line.subdir, "subdir_name")
+ self.assertEquals(line.testname, "test_name")
+ self.assertEquals(line.reason, "my reason here")
+ self.assertEquals(line.optional_fields,
+ {"key1": "value", "key2": "another value",
+ "key3": "value3"})
+
+
+ def test_parses_blank_subdir(self):
+ line = version_1.status_line(0, "GOOD", "----", "test",
+ "", {})
+ self.assertEquals(line.subdir, None)
+
+
+ def test_parses_blank_testname(self):
+ line = version_1.status_line(0, "GOOD", "subdir", "----",
+ "", {})
+ self.assertEquals(line.testname, None)
+
+
+ def test_parse_line_smoketest(self):
+ input_data = ("\t\t\tGOOD\t----\t----\t"
+ "field1=val1\tfield2=val2\tTest Passed")
+ line = version_1.status_line.parse_line(input_data)
+ self.assertEquals(line.indent, 3)
+ self.assertEquals(line.type, "STATUS")
+ self.assertEquals(line.status, "GOOD")
+ self.assertEquals(line.subdir, None)
+ self.assertEquals(line.testname, None)
+ self.assertEquals(line.reason, "Test Passed")
+ self.assertEquals(line.optional_fields,
+ {"field1": "val1", "field2": "val2"})
+
+ def test_parse_line_handles_newline(self):
+ input_data = ("\t\tGOOD\t----\t----\t"
+ "field1=val1\tfield2=val2\tNo newline here!")
+ for suffix in ("", "\n"):
+ line = version_1.status_line.parse_line(input_data +
+ suffix)
+ self.assertEquals(line.indent, 2)
+ self.assertEquals(line.type, "STATUS")
+ self.assertEquals(line.status, "GOOD")
+ self.assertEquals(line.subdir, None)
+ self.assertEquals(line.testname, None)
+ self.assertEquals(line.reason, "No newline here!")
+ self.assertEquals(line.optional_fields,
+ {"field1": "val1",
+ "field2": "val2"})
+
+
+ def test_parse_line_fails_on_untabbed_lines(self):
+ input_data = " GOOD\trandom\tfields\tof text"
+ line = version_1.status_line.parse_line(input_data)
+ self.assertEquals(line, None)
+ line = version_1.status_line.parse_line(input_data.lstrip())
+ self.assertEquals(line.indent, 0)
+ self.assertEquals(line.type, "STATUS")
+ self.assertEquals(line.status, "GOOD")
+ self.assertEquals(line.subdir, "random")
+ self.assertEquals(line.testname, "fields")
+ self.assertEquals(line.reason, "of text")
+ self.assertEquals(line.optional_fields, {})
+
+
+ def test_parse_line_fails_on_bad_optional_fields(self):
+ input_data = "GOOD\tfield1\tfield2\tfield3\tfield4"
+ self.assertRaises(AssertionError,
+ version_1.status_line.parse_line,
+ input_data)
+
+
+ def test_good_reboot_passes_success_test(self):
+ line = version_1.status_line(0, "NOSTATUS", None, "reboot",
+ "reboot success", {})
+ self.assertEquals(line.is_successful_reboot("GOOD"), True)
+ self.assertEquals(line.is_successful_reboot("WARN"), True)
+
+
+ def test_bad_reboot_passes_success_test(self):
+ line = version_1.status_line(0, "NOSTATUS", None, "reboot",
+ "reboot success", {})
+ self.assertEquals(line.is_successful_reboot("FAIL"), False)
+ self.assertEquals(line.is_successful_reboot("ABORT"), False)
+
+
+ def test_get_kernel_returns_kernel_plus_patches(self):
+ line = version_1.status_line(0, "GOOD", "subdir", "testname",
+ "reason text",
+ {"kernel": "2.6.24-rc40",
+ "patch0": "first_patch 0 0",
+ "patch1": "another_patch 0 0"})
+ kern = line.get_kernel()
+ kernel_hash = md5.new("2.6.24-rc40,0,0").hexdigest()
+ self.assertEquals(kern.base, "2.6.24-rc40")
+ self.assertEquals(kern.patches[0].spec, "first_patch")
+ self.assertEquals(kern.patches[1].spec, "another_patch")
+ self.assertEquals(len(kern.patches), 2)
+ self.assertEquals(kern.kernel_hash, kernel_hash)
+
+
+ def test_get_kernel_ignores_out_of_sequence_patches(self):
+ line = version_1.status_line(0, "GOOD", "subdir", "testname",
+ "reason text",
+ {"kernel": "2.6.24-rc40",
+ "patch0": "first_patch 0 0",
+ "patch2": "another_patch 0 0"})
+ kern = line.get_kernel()
+ kernel_hash = md5.new("2.6.24-rc40,0").hexdigest()
+ self.assertEquals(kern.base, "2.6.24-rc40")
+ self.assertEquals(kern.patches[0].spec, "first_patch")
+ self.assertEquals(len(kern.patches), 1)
+ self.assertEquals(kern.kernel_hash, kernel_hash)
+
+
+ def test_get_kernel_returns_unknown_with_no_kernel(self):
+ line = version_1.status_line(0, "GOOD", "subdir", "testname",
+ "reason text",
+ {"patch0": "first_patch 0 0",
+ "patch2": "another_patch 0 0"})
+ kern = line.get_kernel()
+ self.assertEquals(kern.base, "UNKNOWN")
+ self.assertEquals(kern.patches, [])
+ self.assertEquals(kern.kernel_hash, "UNKNOWN")
+
+
+ def test_get_timestamp_returns_timestamp_field(self):
+ timestamp = datetime.datetime(1970, 1, 1, 4, 30)
+ timestamp -= datetime.timedelta(seconds=time.timezone)
+ line = version_1.status_line(0, "GOOD", "subdir", "testname",
+ "reason text",
+ {"timestamp": "16200"})
+ self.assertEquals(timestamp, line.get_timestamp())
+
+
+ def test_get_timestamp_returns_none_on_missing_field(self):
+ line = version_1.status_line(0, "GOOD", "subdir", "testname",
+ "reason text", {})
+ self.assertEquals(None, line.get_timestamp())
if __name__ == "__main__":
- unittest.main()
+ unittest.main()
diff --git a/tko/plotgraph.py b/tko/plotgraph.py
index 8b12cbe6..c7af2a30 100755
--- a/tko/plotgraph.py
+++ b/tko/plotgraph.py
@@ -11,97 +11,96 @@ from math import sqrt
Popen = subprocess.Popen
def avg_dev(values):
- if len(values) == 0:
- return (0,0)
- average = float(sum(values)) / len(values)
- sum_sq_dev = sum( [(x - average) ** 2 for x in values] )
- std_dev = sqrt(sum_sq_dev / float(len(values)));
- return (average, std_dev);
+ if len(values) == 0:
+ return (0,0)
+ average = float(sum(values)) / len(values)
+ sum_sq_dev = sum( [(x - average) ** 2 for x in values] )
+ std_dev = sqrt(sum_sq_dev / float(len(values)));
+ return (average, std_dev);
class gnuplot:
- def __init__(self, title, xlabel, ylabel, xsort = sorted, size = "1180,900", keytitle = None):
- self.title = title
- self.xlabel = xlabel
- self.ylabel = ylabel
- self.data_titles = []
- self.datasets = []
- self.xsort = xsort
- self.xvalues = set([])
- self.size = size
- self.keytitle = keytitle
-
- def xtics(self):
- count = 1
- tics = []
- for label in self.xsort(self.xlabels):
- tics.append('"%s" %d' % (label, count))
- count += 1
- return tics
-
-
- def add_dataset(self, title, labeled_values):
- """
- Add a data line
-
- title: title of the dataset
- labeled_values: dictionary of lists
- { label : [value1, value2, ... ] , ... }
- """
- if not labeled_values:
- raise "plotgraph:add_dataset - dataset was empty! %s" %\
- title
- self.data_titles.append(title)
- data_points = {}
- for label in labeled_values:
- point = "%s %s" % avg_dev(labeled_values[label])
- data_points[label] = point
- self.xvalues.add(label)
- self.datasets.append(data_points)
-
-
- def plot(self, cgi_header = False, output = None, test = None):
- if cgi_header:
- print "Content-type: image/png\n"
- sys.stdout.flush()
- if test:
- g = open(test, 'w')
- else:
- p = Popen("/usr/bin/gnuplot", stdin = subprocess.PIPE)
- g = p.stdin
- g.write('set terminal png size %s\n' % self.size)
- if self.keytitle:
- g.write('set key title "%s"\n' % self.keytitle)
- g.write('set key outside\n') # outside right
- else:
- g.write('set key below\n')
- g.write('set title "%s"\n' % self.title)
- g.write('set xlabel "%s"\n' % self.xlabel)
- g.write('set ylabel "%s"\n' % self.ylabel)
- if output:
- g.write('set output "%s"\n' % output)
- g.write('set style data yerrorlines\n')
- g.write('set grid\n')
-
- self.xlabels = self.xsort(list(self.xvalues))
-
- g.write('set xrange [0.5:%f]\n' % (len(self.xvalues)+0.5))
- g.write('set xtics rotate (%s)\n' % ','.join(self.xtics()))
-
- plot_lines = ['"-" title "%s"' % t for t in self.data_titles]
- g.write('plot ' + ', '.join(plot_lines) + '\n')
-
- for dataset in self.datasets:
- count = 1
- for label in self.xlabels:
- if label in dataset:
- data = dataset[label]
- g.write("%d %s\n" % (count, str(data)))
- count += 1
- sys.stdout.flush()
- g.write('e\n')
-
- g.close()
- if not test:
- sts = os.waitpid(p.pid, 0)
-
+ def __init__(self, title, xlabel, ylabel, xsort = sorted, size = "1180,900", keytitle = None):
+ self.title = title
+ self.xlabel = xlabel
+ self.ylabel = ylabel
+ self.data_titles = []
+ self.datasets = []
+ self.xsort = xsort
+ self.xvalues = set([])
+ self.size = size
+ self.keytitle = keytitle
+
+ def xtics(self):
+ count = 1
+ tics = []
+ for label in self.xsort(self.xlabels):
+ tics.append('"%s" %d' % (label, count))
+ count += 1
+ return tics
+
+
+ def add_dataset(self, title, labeled_values):
+ """
+ Add a data line
+
+ title: title of the dataset
+ labeled_values: dictionary of lists
+ { label : [value1, value2, ... ] , ... }
+ """
+ if not labeled_values:
+ raise "plotgraph:add_dataset - dataset was empty! %s" %\
+ title
+ self.data_titles.append(title)
+ data_points = {}
+ for label in labeled_values:
+ point = "%s %s" % avg_dev(labeled_values[label])
+ data_points[label] = point
+ self.xvalues.add(label)
+ self.datasets.append(data_points)
+
+
+ def plot(self, cgi_header = False, output = None, test = None):
+ if cgi_header:
+ print "Content-type: image/png\n"
+ sys.stdout.flush()
+ if test:
+ g = open(test, 'w')
+ else:
+ p = Popen("/usr/bin/gnuplot", stdin = subprocess.PIPE)
+ g = p.stdin
+ g.write('set terminal png size %s\n' % self.size)
+ if self.keytitle:
+ g.write('set key title "%s"\n' % self.keytitle)
+ g.write('set key outside\n') # outside right
+ else:
+ g.write('set key below\n')
+ g.write('set title "%s"\n' % self.title)
+ g.write('set xlabel "%s"\n' % self.xlabel)
+ g.write('set ylabel "%s"\n' % self.ylabel)
+ if output:
+ g.write('set output "%s"\n' % output)
+ g.write('set style data yerrorlines\n')
+ g.write('set grid\n')
+
+ self.xlabels = self.xsort(list(self.xvalues))
+
+ g.write('set xrange [0.5:%f]\n' % (len(self.xvalues)+0.5))
+ g.write('set xtics rotate (%s)\n' % ','.join(self.xtics()))
+
+ plot_lines = ['"-" title "%s"' % t for t in self.data_titles]
+ g.write('plot ' + ', '.join(plot_lines) + '\n')
+
+ for dataset in self.datasets:
+ count = 1
+ for label in self.xlabels:
+ if label in dataset:
+ data = dataset[label]
+ g.write("%d %s\n" % (count, str(data)))
+ count += 1
+ sys.stdout.flush()
+ g.write('e\n')
+
+ g.close()
+ if not test:
+ sts = os.waitpid(p.pid, 0)
diff --git a/tko/query_lib.py b/tko/query_lib.py
index 09b76352..35e72808 100644
--- a/tko/query_lib.py
+++ b/tko/query_lib.py
@@ -15,76 +15,75 @@ import display, frontend, db
db = db.db()
def dprint(str):
- pass
- #print "! %s<br>" % str
+ pass
+ #print "! %s<br>" % str
def parse_scrub_and_gen_condition(condition, valid_field_dict):
- me = parse_scrub_and_gen_condition # shorten the name
- compare_ops = {'=':'=', '<>':'<>', '==':'=', '!=':'<>', '>':'>',
- '<':'<', '>=':'>=', '<=':'<=', '~':'LIKE', '#':'REGEXP'}
+ me = parse_scrub_and_gen_condition # shorten the name
+ compare_ops = {'=':'=', '<>':'<>', '==':'=', '!=':'<>', '>':'>',
+ '<':'<', '>=':'>=', '<=':'<=', '~':'LIKE', '#':'REGEXP'}
- # strip white space
- condition = condition.strip()
+ # strip white space
+ condition = condition.strip()
- # ()'s
- #match = re.match(r'^[(](.+)[)]$', condition)
- #if match:
- # dprint("Matched () on %s" % condition)
- # depth = 0
- # for c in match.group(1):
- # if c == '(': depth += 1
- # if c == ')': depth -= 1
- # if depth < 0: break
- # dprint("Depth is %d" % depth)
- # if depth == 0:
- # dprint("Match...stripping ()'s")
- # return me(match.group(1), valid_field_dict)
+ # ()'s
+ #match = re.match(r'^[(](.+)[)]$', condition)
+ #if match:
+ # dprint("Matched () on %s" % condition)
+ # depth = 0
+ # for c in match.group(1):
+ # if c == '(': depth += 1
+ # if c == ')': depth -= 1
+ # if depth < 0: break
+ # dprint("Depth is %d" % depth)
+ # if depth == 0:
+ # dprint("Match...stripping ()'s")
+ # return me(match.group(1), valid_field_dict)
- # OR
- match = re.match(r'^(.+)[|](.+)$', condition)
- if match:
- dprint("Matched | on %s" % condition)
- (a_sql, a_values) = me(match.group(1), valid_field_dict)
- (b_sql, b_values) = me(match.group(2), valid_field_dict)
- return (" (%s) OR (%s) " % (a_sql, b_sql),
- a_values + b_values)
+ # OR
+ match = re.match(r'^(.+)[|](.+)$', condition)
+ if match:
+ dprint("Matched | on %s" % condition)
+ (a_sql, a_values) = me(match.group(1), valid_field_dict)
+ (b_sql, b_values) = me(match.group(2), valid_field_dict)
+ return (" (%s) OR (%s) " % (a_sql, b_sql),
+ a_values + b_values)
- # AND
- match = re.match(r'^(.+)[&](.+)$', condition)
- if match:
- dprint("Matched & on %s" % condition)
- (a_sql, a_values) = me(match.group(1), valid_field_dict)
- (b_sql, b_values) = me(match.group(2), valid_field_dict)
- return (" (%s) AND (%s) " % (a_sql, b_sql),
- a_values + b_values)
+ # AND
+ match = re.match(r'^(.+)[&](.+)$', condition)
+ if match:
+ dprint("Matched & on %s" % condition)
+ (a_sql, a_values) = me(match.group(1), valid_field_dict)
+ (b_sql, b_values) = me(match.group(2), valid_field_dict)
+ return (" (%s) AND (%s) " % (a_sql, b_sql),
+ a_values + b_values)
- # NOT
- #match = re.match(r'^[!](.+)$', condition)
- #if match:
- # dprint("Matched ! on %s" % condition)
- # (sql, values) = me(match.group(1), valid_field_dict)
- # return (" NOT (%s) " % (sql,), values)
+ # NOT
+ #match = re.match(r'^[!](.+)$', condition)
+ #if match:
+ # dprint("Matched ! on %s" % condition)
+ # (sql, values) = me(match.group(1), valid_field_dict)
+ # return (" NOT (%s) " % (sql,), values)
- # '<field> <op> <value>' where value can be quoted
- # double quotes are escaped....i.e. '''' is the same as "'"
- regex = r'^(%s)[ \t]*(%s)[ \t]*' + \
- r'(\'((\'\'|[^\'])*)\'|"((""|[^"])*)"|([^\'"].*))$'
- regex = regex % ('|'.join(valid_field_dict.keys()),
- '|'.join(compare_ops.keys()))
- match = re.match(regex, condition)
- if match:
- field = valid_field_dict[match.group(1)]
- op = compare_ops[match.group(2)]
- if match.group(5):
- val = match.group(4).replace("''", "'")
- elif match.group(7):
- val = match.group(6).replace('""', '"')
- elif match.group(8):
- val = match.group(8)
- else:
- raise "Internal error"
- return ("%s %s %%s" % (field, op), [val])
-
+ # '<field> <op> <value>' where value can be quoted
+ # double quotes are escaped....i.e. '''' is the same as "'"
+ regex = r'^(%s)[ \t]*(%s)[ \t]*' + \
+ r'(\'((\'\'|[^\'])*)\'|"((""|[^"])*)"|([^\'"].*))$'
+ regex = regex % ('|'.join(valid_field_dict.keys()),
+ '|'.join(compare_ops.keys()))
+ match = re.match(regex, condition)
+ if match:
+ field = valid_field_dict[match.group(1)]
+ op = compare_ops[match.group(2)]
+ if match.group(5):
+ val = match.group(4).replace("''", "'")
+ elif match.group(7):
+ val = match.group(6).replace('""', '"')
+ elif match.group(8):
+ val = match.group(8)
+ else:
+ raise "Internal error"
+ return ("%s %s %%s" % (field, op), [val])
- raise "Could not parse '%s' (%s)" % (condition, regex)
+ raise "Could not parse '%s' (%s)" % (condition, regex)
diff --git a/tko/reason_qualifier.py b/tko/reason_qualifier.py
index 29e50d75..6347a4f4 100755
--- a/tko/reason_qualifier.py
+++ b/tko/reason_qualifier.py
@@ -2,63 +2,60 @@ import re,string
class reason_counter:
- def __init__(self, wording):
- self.wording = wording
- self.num = 1
-
- def update(self, new_wording):
- self.num += 1
- self.wording = new_wording
+ def __init__(self, wording):
+ self.wording = wording
+ self.num = 1
- def html(self):
- if self.num == 1:
- return self.wording
- else:
- return "%s (%d+)" % (self.wording, self.num)
+ def update(self, new_wording):
+ self.num += 1
+ self.wording = new_wording
+
+ def html(self):
+ if self.num == 1:
+ return self.wording
+ else:
+ return "%s (%d+)" % (self.wording, self.num)
def numbers_are_irrelevant(txt):
- ## ? when do we replace numbers with NN ?
- ## By default is always, but
- ## if/when some categories of reasons choose to keep their numbers,
- ## then the function shall return False for such categories
- return True
+ ## ? when do we replace numbers with NN ?
+ ## By default is always, but
+ ## if/when some categories of reasons choose to keep their numbers,
+ ## then the function shall return False for such categories
+ return True
def aggregate_reason_fields(reasons_list):
- # each reason in the list may be a combination
- # of | - separated reasons.
- # expand into list
- reasons_txt = '|'.join(reasons_list)
- reasons = reasons_txt.split('|')
- reason_htable = {}
- for reason in reasons:
- reason_reduced = reason.strip()
- ## reduce whitespaces
- reason_reduced = re.sub(r"\s+"," ", reason_reduced)
-
- if reason_reduced == '':
- continue # ignore empty reasons
-
- if numbers_are_irrelevant(reason_reduced):
- # reduce numbers included into reason descriptor
- # by replacing them with generic NN
- reason_reduced = re.sub(r"\d+","NN", reason_reduced)
-
- if not reason_reduced in reason_htable:
- reason_htable[reason_reduced] = reason_counter(reason)
- else:
- ## reason_counter keeps original ( non reduced )
- ## reason if it occured once
- ## if reason occured more then once, reason_counter
- ## will keep it in reduced/generalized form
- reason_htable[reason_reduced].update(reason_reduced)
-
- generic_reasons = reason_htable.keys()
- generic_reasons.sort(key = (lambda k: reason_htable[k].num),
- reverse = True)
- return map(lambda generic_reason: reason_htable[generic_reason].html(),
- generic_reasons)
-
-
-
+ # each reason in the list may be a combination
+ # of | - separated reasons.
+ # expand into list
+ reasons_txt = '|'.join(reasons_list)
+ reasons = reasons_txt.split('|')
+ reason_htable = {}
+ for reason in reasons:
+ reason_reduced = reason.strip()
+ ## reduce whitespaces
+ reason_reduced = re.sub(r"\s+"," ", reason_reduced)
+
+ if reason_reduced == '':
+ continue # ignore empty reasons
+
+ if numbers_are_irrelevant(reason_reduced):
+ # reduce numbers included into reason descriptor
+ # by replacing them with generic NN
+ reason_reduced = re.sub(r"\d+","NN", reason_reduced)
+
+ if not reason_reduced in reason_htable:
+ reason_htable[reason_reduced] = reason_counter(reason)
+ else:
+ ## reason_counter keeps original ( non reduced )
+ ## reason if it occured once
+ ## if reason occured more then once, reason_counter
+ ## will keep it in reduced/generalized form
+ reason_htable[reason_reduced].update(reason_reduced)
+
+ generic_reasons = reason_htable.keys()
+ generic_reasons.sort(key = (lambda k: reason_htable[k].num),
+ reverse = True)
+ return map(lambda generic_reason: reason_htable[generic_reason].html(),
+ generic_reasons)
diff --git a/tko/retrieve_jobs b/tko/retrieve_jobs
index 196a9d07..58985c6d 100755
--- a/tko/retrieve_jobs
+++ b/tko/retrieve_jobs
@@ -1,11 +1,11 @@
#!/usr/bin/python
import sys, db
-try:
- arg = sys.argv[1]
+try:
+ arg = sys.argv[1]
except:
- arg = ''
+ arg = ''
db = db.db()
for record in db.select('* from jobs ' + arg):
- print record
+ print record
diff --git a/tko/status_lib.py b/tko/status_lib.py
index 4ddfa5aa..ab5c6440 100644
--- a/tko/status_lib.py
+++ b/tko/status_lib.py
@@ -4,67 +4,67 @@ from autotest_lib.client.common_lib import logging
class status_stack(object):
- statuses = logging.job_statuses
+ statuses = logging.job_statuses
- def __init__(self):
- self.status_stack = [self.statuses[-1]]
+ def __init__(self):
+ self.status_stack = [self.statuses[-1]]
- def current_status(self):
- return self.status_stack[-1]
+ def current_status(self):
+ return self.status_stack[-1]
- def update(self, new_status):
- if new_status not in self.statuses:
- return
- old = self.statuses.index(self.current_status())
- new = self.statuses.index(new_status)
- if new < old:
- self.status_stack[-1] = new_status
+ def update(self, new_status):
+ if new_status not in self.statuses:
+ return
+ old = self.statuses.index(self.current_status())
+ new = self.statuses.index(new_status)
+ if new < old:
+ self.status_stack[-1] = new_status
- def start(self):
- self.status_stack.append(self.statuses[-1])
+ def start(self):
+ self.status_stack.append(self.statuses[-1])
- def end(self):
- result = self.status_stack.pop()
- if len(self.status_stack) == 0:
- self.status_stack.append(self.statuses[-1])
- return result
+ def end(self):
+ result = self.status_stack.pop()
+ if len(self.status_stack) == 0:
+ self.status_stack.append(self.statuses[-1])
+ return result
- def size(self):
- return len(self.status_stack) - 1
+ def size(self):
+ return len(self.status_stack) - 1
class line_buffer(object):
- def __init__(self):
- self.buffer = collections.deque()
+ def __init__(self):
+ self.buffer = collections.deque()
- def get(self):
- return self.buffer.pop()
+ def get(self):
+ return self.buffer.pop()
- def put(self, line):
- self.buffer.appendleft(line)
+ def put(self, line):
+ self.buffer.appendleft(line)
- def put_multiple(self, lines):
- self.buffer.extendleft(lines)
+ def put_multiple(self, lines):
+ self.buffer.extendleft(lines)
- def put_back(self, line):
- self.buffer.append(line)
+ def put_back(self, line):
+ self.buffer.append(line)
- def size(self):
- return len(self.buffer)
+ def size(self):
+ return len(self.buffer)
def parser(version):
- library = "autotest_lib.tko.parsers.version_%d" % version
- module = __import__(library, globals(), locals(), ["parser"])
- return module.parser()
+ library = "autotest_lib.tko.parsers.version_%d" % version
+ module = __import__(library, globals(), locals(), ["parser"])
+ return module.parser()
diff --git a/tko/status_lib_unittest.py b/tko/status_lib_unittest.py
index e8c15f2a..2378f97d 100644
--- a/tko/status_lib_unittest.py
+++ b/tko/status_lib_unittest.py
@@ -7,168 +7,168 @@ from autotest_lib.client.common_lib import logging
class line_buffer_test(unittest.TestCase):
- def test_get_empty(self):
- buf = status_lib.line_buffer()
- self.assertRaises(IndexError, buf.get)
-
-
- def test_get_single(self):
- buf = status_lib.line_buffer()
- buf.put("single line")
- self.assertEquals(buf.get(), "single line")
- self.assertRaises(IndexError, buf.get)
-
-
- def test_is_fifo(self):
- buf = status_lib.line_buffer()
- lines = ["line #%d" for x in xrange(10)]
- for line in lines:
- buf.put(line)
- results = []
- while buf.size():
- results.append(buf.get())
- self.assertEquals(lines, results)
-
-
- def test_put_multiple_same_as_multiple_puts(self):
- buf_put, buf_multi = [status_lib.line_buffer()
- for x in xrange(2)]
- lines = ["line #%d" % x for x in xrange(10)]
- for line in lines:
- buf_put.put(line)
- buf_multi.put_multiple(lines)
- counter = 0
- while buf_put.size():
- self.assertEquals(buf_put.size(), buf_multi.size())
- line = "line #%d" % counter
- self.assertEquals(buf_put.get(), line)
- self.assertEquals(buf_multi.get(), line)
- counter += 1
-
-
- def test_put_back_is_lifo(self):
- buf = status_lib.line_buffer()
- lines = ["1", "2", "3"]
- for line in lines:
- buf.put(line)
- results = []
- results.append(buf.get())
- buf.put_back("1")
- buf.put_back("0")
- while buf.size():
- results.append(buf.get())
- self.assertEquals(results, ["1", "0", "1", "2", "3"])
-
-
- def test_size_increased_by_put(self):
- buf = status_lib.line_buffer()
- self.assertEquals(buf.size(), 0)
- buf.put("1")
- buf.put("2")
- self.assertEquals(buf.size(), 2)
- buf.put("3")
- self.assertEquals(buf.size(), 3)
-
-
- def test_size_increased_by_put(self):
- buf = status_lib.line_buffer()
- self.assertEquals(buf.size(), 0)
- buf.put("1")
- buf.put("2")
- self.assertEquals(buf.size(), 2)
- buf.put("3")
- self.assertEquals(buf.size(), 3)
-
-
- def test_size_decreased_by_get(self):
- buf = status_lib.line_buffer()
- buf.put("1")
- buf.put("2")
- buf.put("3")
- self.assertEquals(buf.size(), 3)
- buf.get()
- self.assertEquals(buf.size(), 2)
- buf.get()
- buf.get()
- self.assertEquals(buf.size(), 0)
+ def test_get_empty(self):
+ buf = status_lib.line_buffer()
+ self.assertRaises(IndexError, buf.get)
+
+
+ def test_get_single(self):
+ buf = status_lib.line_buffer()
+ buf.put("single line")
+ self.assertEquals(buf.get(), "single line")
+ self.assertRaises(IndexError, buf.get)
+
+
+ def test_is_fifo(self):
+ buf = status_lib.line_buffer()
+ lines = ["line #%d" for x in xrange(10)]
+ for line in lines:
+ buf.put(line)
+ results = []
+ while buf.size():
+ results.append(buf.get())
+ self.assertEquals(lines, results)
+
+
+ def test_put_multiple_same_as_multiple_puts(self):
+ buf_put, buf_multi = [status_lib.line_buffer()
+ for x in xrange(2)]
+ lines = ["line #%d" % x for x in xrange(10)]
+ for line in lines:
+ buf_put.put(line)
+ buf_multi.put_multiple(lines)
+ counter = 0
+ while buf_put.size():
+ self.assertEquals(buf_put.size(), buf_multi.size())
+ line = "line #%d" % counter
+ self.assertEquals(buf_put.get(), line)
+ self.assertEquals(buf_multi.get(), line)
+ counter += 1
+
+
+ def test_put_back_is_lifo(self):
+ buf = status_lib.line_buffer()
+ lines = ["1", "2", "3"]
+ for line in lines:
+ buf.put(line)
+ results = []
+ results.append(buf.get())
+ buf.put_back("1")
+ buf.put_back("0")
+ while buf.size():
+ results.append(buf.get())
+ self.assertEquals(results, ["1", "0", "1", "2", "3"])
+
+
+ def test_size_increased_by_put(self):
+ buf = status_lib.line_buffer()
+ self.assertEquals(buf.size(), 0)
+ buf.put("1")
+ buf.put("2")
+ self.assertEquals(buf.size(), 2)
+ buf.put("3")
+ self.assertEquals(buf.size(), 3)
+
+
+ def test_size_increased_by_put(self):
+ buf = status_lib.line_buffer()
+ self.assertEquals(buf.size(), 0)
+ buf.put("1")
+ buf.put("2")
+ self.assertEquals(buf.size(), 2)
+ buf.put("3")
+ self.assertEquals(buf.size(), 3)
+
+
+ def test_size_decreased_by_get(self):
+ buf = status_lib.line_buffer()
+ buf.put("1")
+ buf.put("2")
+ buf.put("3")
+ self.assertEquals(buf.size(), 3)
+ buf.get()
+ self.assertEquals(buf.size(), 2)
+ buf.get()
+ buf.get()
+ self.assertEquals(buf.size(), 0)
class status_stack_test(unittest.TestCase):
- statuses = logging.job_statuses
-
- def test_default_to_nostatus(self):
- stack = status_lib.status_stack()
- self.assertEquals(stack.current_status(), "NOSTATUS")
-
-
- def test_default_on_start_to_nostatus(self):
- stack = status_lib.status_stack()
- stack.update("FAIL")
- stack.start()
- self.assertEquals(stack.current_status(), "NOSTATUS")
-
-
- def test_size_always_at_least_zero(self):
- stack = status_lib.status_stack()
- self.assertEquals(stack.size(), 0)
- stack.start()
- stack.end()
- self.assertEquals(stack.size(), 0)
- stack.end()
- self.assertEquals(stack.size(), 0)
-
-
- def test_anything_overrides_nostatus(self):
- for status in self.statuses:
- stack = status_lib.status_stack()
- stack.update(status)
- self.assertEquals(stack.current_status(), status)
-
-
- def test_worse_overrides_better(self):
- for i in xrange(len(self.statuses)):
- worse_status = self.statuses[i]
- for j in xrange(i + 1, len(self.statuses)):
- stack = status_lib.status_stack()
- better_status = self.statuses[j]
- stack.update(better_status)
- stack.update(worse_status)
- self.assertEquals(stack.current_status(),
- worse_status)
-
-
- def test_better_never_overrides_better(self):
- for i in xrange(len(self.statuses)):
- better_status = self.statuses[i]
- for j in xrange(i):
- stack = status_lib.status_stack()
- worse_status = self.statuses[j]
- stack.update(worse_status)
- stack.update(better_status)
- self.assertEquals(stack.current_status(),
- worse_status)
-
-
- def test_stack_is_lifo(self):
- stack = status_lib.status_stack()
- stack.update("GOOD")
- stack.start()
- stack.update("FAIL")
- stack.start()
- stack.update("WARN")
- self.assertEquals(stack.end(), "WARN")
- self.assertEquals(stack.end(), "FAIL")
- self.assertEquals(stack.end(), "GOOD")
- self.assertEquals(stack.end(), "NOSTATUS")
+ statuses = logging.job_statuses
+
+ def test_default_to_nostatus(self):
+ stack = status_lib.status_stack()
+ self.assertEquals(stack.current_status(), "NOSTATUS")
+
+
+ def test_default_on_start_to_nostatus(self):
+ stack = status_lib.status_stack()
+ stack.update("FAIL")
+ stack.start()
+ self.assertEquals(stack.current_status(), "NOSTATUS")
+
+
+ def test_size_always_at_least_zero(self):
+ stack = status_lib.status_stack()
+ self.assertEquals(stack.size(), 0)
+ stack.start()
+ stack.end()
+ self.assertEquals(stack.size(), 0)
+ stack.end()
+ self.assertEquals(stack.size(), 0)
+
+
+ def test_anything_overrides_nostatus(self):
+ for status in self.statuses:
+ stack = status_lib.status_stack()
+ stack.update(status)
+ self.assertEquals(stack.current_status(), status)
+
+
+ def test_worse_overrides_better(self):
+ for i in xrange(len(self.statuses)):
+ worse_status = self.statuses[i]
+ for j in xrange(i + 1, len(self.statuses)):
+ stack = status_lib.status_stack()
+ better_status = self.statuses[j]
+ stack.update(better_status)
+ stack.update(worse_status)
+ self.assertEquals(stack.current_status(),
+ worse_status)
+
+
+ def test_better_never_overrides_better(self):
+ for i in xrange(len(self.statuses)):
+ better_status = self.statuses[i]
+ for j in xrange(i):
+ stack = status_lib.status_stack()
+ worse_status = self.statuses[j]
+ stack.update(worse_status)
+ stack.update(better_status)
+ self.assertEquals(stack.current_status(),
+ worse_status)
+
+
+ def test_stack_is_lifo(self):
+ stack = status_lib.status_stack()
+ stack.update("GOOD")
+ stack.start()
+ stack.update("FAIL")
+ stack.start()
+ stack.update("WARN")
+ self.assertEquals(stack.end(), "WARN")
+ self.assertEquals(stack.end(), "FAIL")
+ self.assertEquals(stack.end(), "GOOD")
+ self.assertEquals(stack.end(), "NOSTATUS")
class parser_test(unittest.TestCase):
- available_versions = [0, 1]
- def test_can_import_available_versions(self):
- for version in self.available_versions:
- p = status_lib.parser(0)
- self.assertNotEqual(p, None)
+ available_versions = [0, 1]
+ def test_can_import_available_versions(self):
+ for version in self.available_versions:
+ p = status_lib.parser(0)
+ self.assertNotEqual(p, None)
if __name__ == "__main__":
- unittest.main()
+ unittest.main()
diff --git a/tko/unique_cookie.py b/tko/unique_cookie.py
index 6aed57d8..292d12ae 100644
--- a/tko/unique_cookie.py
+++ b/tko/unique_cookie.py
@@ -4,32 +4,29 @@ import os, random
def unique_id(cookie_key):
- """
- Find out if remote caller has cookie set on the key.
- If not, set cookie on client side: evaluate this key by a random string.
- ( unique user identifier )
- In both scenarios return value of the cookie, be it old or newly set one
- """
- uid = ''
- ## try to retrieve uid from Cookie
- if 'HTTP_COOKIE' in os.environ:
- ## parse os.environ['HTTP_COOKIE']
- cookies = os.environ['HTTP_COOKIE'].split(';')
- key = '%s=' % cookie_key
- uid_cookies = [c for c in cookies if c.strip().startswith(key)]
-
- if uid_cookies:
- assert(len(uid_cookies) == 1)
- uid_cookie = uid_cookies[0]
- uid = uid_cookie.replace(key, '')
-
- if not uid:
- uid = str(random.random())[2:16] # random string of 14 digits
- set_cookie_statement = 'Set-Cookie:%s=%s;' % (cookie_key, uid)
- set_cookie_statement += 'expires=Thu, 26-Dec-2013 22:03:25 GMT;'
- print set_cookie_statement
-
- return uid
-
-
-
+ """
+ Find out if remote caller has cookie set on the key.
+ If not, set cookie on client side: evaluate this key by a random string.
+ ( unique user identifier )
+ In both scenarios return value of the cookie, be it old or newly set one
+ """
+ uid = ''
+ ## try to retrieve uid from Cookie
+ if 'HTTP_COOKIE' in os.environ:
+ ## parse os.environ['HTTP_COOKIE']
+ cookies = os.environ['HTTP_COOKIE'].split(';')
+ key = '%s=' % cookie_key
+ uid_cookies = [c for c in cookies if c.strip().startswith(key)]
+
+ if uid_cookies:
+ assert(len(uid_cookies) == 1)
+ uid_cookie = uid_cookies[0]
+ uid = uid_cookie.replace(key, '')
+
+ if not uid:
+ uid = str(random.random())[2:16] # random string of 14 digits
+ set_cookie_statement = 'Set-Cookie:%s=%s;' % (cookie_key, uid)
+ set_cookie_statement += 'expires=Thu, 26-Dec-2013 22:03:25 GMT;'
+ print set_cookie_statement
+
+ return uid
diff --git a/tko/utils.py b/tko/utils.py
index 42815b9a..82d92166 100644
--- a/tko/utils.py
+++ b/tko/utils.py
@@ -3,16 +3,16 @@ import sys, datetime
_debug_logger = sys.stderr
def dprint(msg):
- print >> _debug_logger, msg
+ print >> _debug_logger, msg
def redirect_parser_debugging(ostream):
- global _debug_logger
- _debug_logger = ostream
+ global _debug_logger
+ _debug_logger = ostream
def get_timestamp(mapping, field):
- val = mapping.get(field, None)
- if val is not None:
- val = datetime.datetime.fromtimestamp(int(val))
- return val
+ val = mapping.get(field, None)
+ if val is not None:
+ val = datetime.datetime.fromtimestamp(int(val))
+ return val
diff --git a/tko/utils_unittest.py b/tko/utils_unittest.py
index 9368f959..8e9d8e6f 100644
--- a/tko/utils_unittest.py
+++ b/tko/utils_unittest.py
@@ -7,38 +7,38 @@ from autotest_lib.tko import utils
class get_timestamp_test(unittest.TestCase):
- def testZeroTime(self):
- date = utils.get_timestamp({"key": "0"}, "key")
- timezone = datetime.timedelta(seconds=time.timezone)
- utc_date = date + timezone
- # should be equal to epoch, i.e. Jan 1, 1970
- self.assertEquals(utc_date.year, 1970)
- self.assertEquals(utc_date.month, 1)
- self.assertEquals(utc_date.day, 1)
- self.assertEquals(utc_date.hour, 0)
- self.assertEquals(utc_date.minute, 0)
- self.assertEquals(utc_date.second, 0)
- self.assertEquals(utc_date.microsecond, 0)
+ def testZeroTime(self):
+ date = utils.get_timestamp({"key": "0"}, "key")
+ timezone = datetime.timedelta(seconds=time.timezone)
+ utc_date = date + timezone
+ # should be equal to epoch, i.e. Jan 1, 1970
+ self.assertEquals(utc_date.year, 1970)
+ self.assertEquals(utc_date.month, 1)
+ self.assertEquals(utc_date.day, 1)
+ self.assertEquals(utc_date.hour, 0)
+ self.assertEquals(utc_date.minute, 0)
+ self.assertEquals(utc_date.second, 0)
+ self.assertEquals(utc_date.microsecond, 0)
- def test_returns_none_on_missing_value(self):
- date = utils.get_timestamp({}, "missing_key")
- self.assertEquals(date, None)
+ def test_returns_none_on_missing_value(self):
+ date = utils.get_timestamp({}, "missing_key")
+ self.assertEquals(date, None)
- def test_fails_on_non_integer_values(self):
- self.assertRaises(ValueError, utils.get_timestamp,
- {"key": "zero"}, "key")
+ def test_fails_on_non_integer_values(self):
+ self.assertRaises(ValueError, utils.get_timestamp,
+ {"key": "zero"}, "key")
- def test_date_can_be_string_or_integer(self):
- int_times = [1, 12, 123, 1234, 12345, 123456]
- str_times = [str(t) for t in int_times]
- for int_t, str_t in itertools.izip(int_times, str_times):
- date_int = utils.get_timestamp({"key": int_t}, "key")
- date_str = utils.get_timestamp({"key": str_t}, "key")
- self.assertEquals(date_int, date_str)
+ def test_date_can_be_string_or_integer(self):
+ int_times = [1, 12, 123, 1234, 12345, 123456]
+ str_times = [str(t) for t in int_times]
+ for int_t, str_t in itertools.izip(int_times, str_times):
+ date_int = utils.get_timestamp({"key": int_t}, "key")
+ date_str = utils.get_timestamp({"key": str_t}, "key")
+ self.assertEquals(date_int, date_str)
if __name__ == "__main__":
- unittest.main()
+ unittest.main()
diff --git a/tko/vertical_text.py b/tko/vertical_text.py
index 4173411d..371de1cc 100755
--- a/tko/vertical_text.py
+++ b/tko/vertical_text.py
@@ -39,4 +39,3 @@ def simple():
f.close()
simple()
-
diff --git a/ui/dialog.py b/ui/dialog.py
index f5aa4ad7..27131c72 100644
--- a/ui/dialog.py
+++ b/ui/dialog.py
@@ -134,7 +134,7 @@ operation" (e.g., a system call) that should work in "normal" situations.
"""
ExceptionShortDescription = "System error"
-
+
class PythonDialogIOError(PythonDialogSystemError):
"""Exception raised when pythondialog catches an IOError exception that \
should be passed to the calling program."""
@@ -244,7 +244,7 @@ _common_args_syntax = {
"title": lambda title: ("--title", title),
"trim": lambda enable: _simple_option("--trim", enable),
"version": lambda enable: _simple_option("--version", enable)}
-
+
def _simple_option(option, enable):
"""Turn on or off the simplest dialog Common Options."""
@@ -298,7 +298,7 @@ def _path_to_executable(f):
ExecutableNotFound
PythonDialogOSError
-
+
"""
try:
if '/' in f:
@@ -570,7 +570,7 @@ class Dialog:
ExecutableNotFound
PythonDialogOSError
- """
+ """
# DIALOGRC differs from the other DIALOG* variables in that:
# 1. It should be a string if not None
# 2. We may very well want it to be unset
@@ -612,18 +612,18 @@ class Dialog:
This method is obsolete. Please remove calls to it from your
programs.
- """
- self.add_persistent_args(("--backtitle", text))
+ """
+ self.add_persistent_args(("--backtitle", text))
def _call_program(self, redirect_child_stdin, cmdargs, **kwargs):
- """Do the actual work of invoking the dialog-like program.
+ """Do the actual work of invoking the dialog-like program.
Communication with the dialog-like program is performed
through one or two pipes, depending on
`redirect_child_stdin'. There is always one pipe that is
created to allow the parent process to read what dialog
writes on its standard error stream.
-
+
If `redirect_child_stdin' is True, an additional pipe is
created whose reading end is connected to dialog's standard
input. This is used by the gauge widget to feed data to
@@ -771,7 +771,7 @@ class Dialog:
"be the exit status of the dialog-like program, for some "
"unknown reason (-> probably a bug in the dialog-like "
"program); otherwise, we have probably found a python bug")
-
+
# We might want to check here whether exit_code is really one of
# DIALOG_OK, DIALOG_CANCEL, etc. However, I prefer not doing it
# because it would break pythondialog for no strong reason when new
@@ -806,7 +806,7 @@ class Dialog:
return (exit_code, child_output)
def _perform(self, cmdargs, **kwargs):
- """Perform a complete dialog-like program invocation.
+ """Perform a complete dialog-like program invocation.
This function invokes the dialog-like program, waits for its
termination and returns its exit status and whatever it wrote
@@ -823,7 +823,7 @@ class Dialog:
(exit_code, output) = \
self._wait_for_program_termination(child_pid,
child_rfd)
- return (exit_code, output)
+ return (exit_code, output)
def _strip_xdialog_newline(self, output):
"""Remove trailing newline (if any), if using Xdialog"""
@@ -833,18 +833,18 @@ class Dialog:
# This is for compatibility with the old dialog.py
def _perform_no_options(self, cmd):
- """Call dialog without passing any more options."""
- return os.system(self._dialog_prg + ' ' + cmd)
+ """Call dialog without passing any more options."""
+ return os.system(self._dialog_prg + ' ' + cmd)
# For compatibility with the old dialog.py
def clear(self):
- """Clear the screen. Equivalent to the dialog --clear option.
+ """Clear the screen. Equivalent to the dialog --clear option.
This method is obsolete. Please remove calls to it from your
programs.
- """
- self._perform_no_options('--clear')
+ """
+ self._perform_no_options('--clear')
def calendar(self, text, height=6, width=0, day=0, month=0, year=0,
**kwargs):
@@ -857,7 +857,7 @@ class Dialog:
month -- inititial month displayed
year -- inititial year selected (0 causes the current date
to be used as the initial date)
-
+
A calendar box displays month, day and year in separately
adjustable windows. If the values for day, month or year are
missing or negative, the current date's corresponding values
@@ -878,8 +878,8 @@ class Dialog:
- UnexpectedDialogOutput
- PythonDialogReModuleError
- """
- (code, output) = self._perform(
+ """
+ (code, output) = self._perform(
*(["--calendar", text, str(height), str(width), str(day),
str(month), str(year)],),
**kwargs)
@@ -888,7 +888,7 @@ class Dialog:
mo = _calendar_date_rec.match(output)
except re.error, v:
raise PythonDialogReModuleError(v)
-
+
if mo is None:
raise UnexpectedDialogOutput(
"the dialog-like program returned the following "
@@ -900,7 +900,7 @@ class Dialog:
def checklist(self, text, height=15, width=54, list_height=7,
choices=[], **kwargs):
- """Display a checklist box.
+ """Display a checklist box.
text -- text to display in the box
height -- height of the box
@@ -937,7 +937,7 @@ class Dialog:
# double-quote).
kwargs["separate_output"] = True
- (code, output) = self._perform(*(cmd,), **kwargs)
+ (code, output) = self._perform(*(cmd,), **kwargs)
# Since we used --separate-output, the tags are separated by a newline
# in the output. There is also a final newline after the last tag.
@@ -952,7 +952,7 @@ class Dialog:
filepath -- initial file path
height -- height of the box
width -- width of the box
-
+
The file-selection dialog displays a text-entry window in
which you can type a filename (or directory), and above that
two windows with directory names and filenames.
@@ -979,22 +979,22 @@ class Dialog:
exit status (an integer) of the dialog-like program and
`path' is the path chosen by the user (whose last element may
be a directory or a file).
-
+
Notable exceptions:
any exception raised by self._perform()
- """
+ """
(code, output) = self._perform(
*(["--fselect", filepath, str(height), str(width)],),
**kwargs)
output = self._strip_xdialog_newline(output)
-
- return (code, output)
-
+
+ return (code, output)
+
def gauge_start(self, text="", height=8, width=54, percent=0, **kwargs):
- """Display gauge box.
+ """Display gauge box.
text -- text to display in the box
height -- height of the box
@@ -1015,20 +1015,20 @@ class Dialog:
-------------------
Gauge typical usage (assuming that `d' is an instance of the
- Dialog class) looks like this:
- d.gauge_start()
- # do something
- d.gauge_update(10) # 10% of the whole task is done
- # ...
- d.gauge_update(100, "any text here") # work is done
- exit_code = d.gauge_stop() # cleanup actions
+ Dialog class) looks like this:
+ d.gauge_start()
+ # do something
+ d.gauge_update(10) # 10% of the whole task is done
+ # ...
+ d.gauge_update(100, "any text here") # work is done
+ exit_code = d.gauge_stop() # cleanup actions
Notable exceptions:
- any exception raised by self._call_program()
- PythonDialogOSError
- """
+ """
(child_pid, child_rfd, child_stdin_wfd) = self._call_program(
True,
*(["--gauge", text, str(height), str(width), str(percent)],),
@@ -1041,10 +1041,10 @@ class Dialog:
}
except os.error, v:
raise PythonDialogOSError(v.strerror)
-
+
def gauge_update(self, percent, text="", update_text=0):
- """Update a running gauge box.
-
+ """Update a running gauge box.
+
percent -- new percentage to show in the gauge meter
text -- new text to optionally display in the box
update-text -- boolean indicating whether to update the
@@ -1055,8 +1055,8 @@ class Dialog:
called previously). If update_text is true (for instance, 1),
the text displayed in the box is also updated.
- See the `gauge_start' function's documentation for
- information about how to use a gauge.
+ See the `gauge_start' function's documentation for
+ information about how to use a gauge.
Return value: undefined.
@@ -1064,28 +1064,28 @@ class Dialog:
is an I/O error while writing to the pipe
used to talk to the dialog-like program.
- """
- if update_text:
- gauge_data = "%d\nXXX\n%s\nXXX\n" % (percent, text)
- else:
- gauge_data = "%d\n" % percent
- try:
+ """
+ if update_text:
+ gauge_data = "%d\nXXX\n%s\nXXX\n" % (percent, text)
+ else:
+ gauge_data = "%d\n" % percent
+ try:
self._gauge_process["stdin"].write(gauge_data)
self._gauge_process["stdin"].flush()
except IOError, v:
raise PythonDialogIOError(v)
-
+
# For "compatibility" with the old dialog.py...
gauge_iterate = gauge_update
def gauge_stop(self):
- """Terminate a running gauge.
+ """Terminate a running gauge.
This function performs the appropriate cleanup actions to
terminate a running gauge (started with `gauge_start').
-
- See the `gauge_start' function's documentation for
- information about how to use a gauge.
+
+ See the `gauge_start' function's documentation for
+ information about how to use a gauge.
Return value: undefined.
@@ -1095,7 +1095,7 @@ class Dialog:
- PythonDialogIOError can be raised if closing the pipe
used to talk to the dialog-like program fails.
- """
+ """
p = self._gauge_process
# Close the pipe that we are using to feed dialog's stdin
try:
@@ -1129,8 +1129,8 @@ class Dialog:
any exception raised by self._perform()
- """
- return self._perform(
+ """
+ return self._perform(
*(["--infobox", text, str(height), str(width)],),
**kwargs)[0]
@@ -1157,14 +1157,14 @@ class Dialog:
any exception raised by self._perform()
- """
+ """
(code, tag) = self._perform(
*(["--inputbox", text, str(height), str(width), init],),
**kwargs)
tag = self._strip_xdialog_newline(tag)
-
- return (code, tag)
+
+ return (code, tag)
def menu(self, text, height=15, width=54, menu_height=7, choices=[],
**kwargs):
@@ -1246,14 +1246,14 @@ class Dialog:
any exception raised by self._perform()
- """
+ """
cmd = ["--menu", text, str(height), str(width), str(menu_height)]
for t in choices:
cmd.extend(t)
- (code, output) = self._perform(*(cmd,), **kwargs)
+ (code, output) = self._perform(*(cmd,), **kwargs)
output = self._strip_xdialog_newline(output)
-
+
if "help_button" in kwargs.keys() and output.startswith("HELP "):
return ("help", output[5:])
else:
@@ -1281,8 +1281,8 @@ class Dialog:
any exception raised by self._perform()
- """
- return self._perform(
+ """
+ return self._perform(
*(["--msgbox", text, str(height), str(width)],),
**kwargs)[0]
@@ -1311,8 +1311,8 @@ class Dialog:
any exception raised by self._perform()
- """
- (code, password) = self._perform(
+ """
+ (code, password) = self._perform(
*(["--passwordbox", text, str(height), str(width), init],),
**kwargs)
@@ -1322,7 +1322,7 @@ class Dialog:
def radiolist(self, text, height=15, width=54, list_height=7,
choices=[], **kwargs):
- """Display a radiolist box.
+ """Display a radiolist box.
text -- text to display in the box
height -- height of the box
@@ -1352,7 +1352,7 @@ class Dialog:
any exception raised by self._perform() or _to_onoff()
- """
+ """
cmd = ["--radiolist", text, str(height), str(width), str(list_height)]
for t in choices:
cmd.extend(((t[0], t[1], _to_onoff(t[2]))))
@@ -1360,11 +1360,11 @@ class Dialog:
(code, tag) = self._perform(*(cmd,), **kwargs)
tag = self._strip_xdialog_newline(tag)
-
- return (code, tag)
+
+ return (code, tag)
def scrollbox(self, text, height=20, width=78, **kwargs):
- """Display a string in a scrollable box.
+ """Display a string in a scrollable box.
text -- text to display in the box
height -- height of the box
@@ -1386,7 +1386,7 @@ class Dialog:
unfortunately not mentioned in its documentation, at
least in Python 2.3.3...)
- """
+ """
# In Python < 2.3, the standard library does not have
# tempfile.mkstemp(), and unfortunately, tempfile.mktemp() is
# insecure. So, I create a non-world-writable temporary directory and
@@ -1450,8 +1450,8 @@ class Dialog:
any exception raised by self._perform()
- """
- return self._perform(
+ """
+ return self._perform(
*(["--tailbox", filename, str(height), str(width)],),
**kwargs)[0]
# No tailboxbg widget, at least for now.
@@ -1480,12 +1480,12 @@ class Dialog:
any exception raised by self._perform()
- """
+ """
# This is for backward compatibility... not that it is
# stupid, but I prefer explicit programming.
if not "title" in kwargs.keys():
- kwargs["title"] = filename
- return self._perform(
+ kwargs["title"] = filename
+ return self._perform(
*(["--textbox", filename, str(height), str(width)],),
**kwargs)[0]
@@ -1499,7 +1499,7 @@ class Dialog:
hour -- inititial hour selected
minute -- inititial minute selected
second -- inititial second selected
-
+
A dialog is displayed which allows you to select hour, minute
and second. If the values for hour, minute or second are
negative (or not explicitely provided, as they default to
@@ -1520,8 +1520,8 @@ class Dialog:
- PythonDialogReModuleError
- UnexpectedDialogOutput
- """
- (code, output) = self._perform(
+ """
+ (code, output) = self._perform(
*(["--timebox", text, str(height), str(width),
str(hour), str(minute), str(second)],),
**kwargs)
@@ -1565,7 +1565,7 @@ class Dialog:
any exception raised by self._perform()
- """
- return self._perform(
+ """
+ return self._perform(
*(["--yesno", text, str(height), str(width)],),
**kwargs)[0]
diff --git a/ui/menu b/ui/menu
index 01c4cede..d9a995b4 100755
--- a/ui/menu
+++ b/ui/menu
@@ -11,383 +11,383 @@ import os, sys, dircache, string, dialog, time
from menu_lib import *
def handle_exit_code(d, code):
- if code in (d.DIALOG_CANCEL, d.DIALOG_ESC):
- if d.yesno("Do you want to exit Autotest Control Center?") == d.DIALOG_OK:
- sys.exit(0)
- return 0
- else:
- return 1
+ if code in (d.DIALOG_CANCEL, d.DIALOG_ESC):
+ if d.yesno("Do you want to exit Autotest Control Center?") == d.DIALOG_OK:
+ sys.exit(0)
+ return 0
+ else:
+ return 1
def atcc_t_menu(test_type, t):
- u = atcc_list_control_files(test_type, at_dir)
+ u = atcc_list_control_files(test_type, at_dir)
- (code, tag) = d.checklist(text = test_type + ":", choices = u, title = t)
+ (code, tag) = d.checklist(text = test_type + ":", choices = u, title = t)
- return tag
+ return tag
def atcc_t_run(res, test_type):
- if len(res) == 0:
- return -1
+ if len(res) == 0:
+ return -1
- atcc_setup_tmp_dirs_files(menu_dir)
+ atcc_setup_tmp_dirs_files(menu_dir)
- for i in res:
- os.system(at_dir + '/bin/autotest ' + at_dir + '/' + test_type + '/' + i)
- atcc_save_results1(i, at_dir, menu_dir)
+ for i in res:
+ os.system(at_dir + '/bin/autotest ' + at_dir + '/' + test_type + '/' + i)
+ atcc_save_results1(i, at_dir, menu_dir)
def atcc_t_p_run(res, test_type):
- if len(res) == 0:
- return -1
-
- atcc_setup_tmp_dirs_files(menu_dir)
-
- file = (menu_dir + '/tmp/parallel')
- f = open(file, 'w')
-
- for i in res:
- z = i.split('/')
- line = ("def " + z[0] + "():\n")
- z = str(line)
- f.write(z)
-
- file = (at_dir + '/' + test_type + '/' + i)
- f2 = open(file, 'r')
- k = f2.readlines()
-
- for i in k:
- x = ("\t" + i + "\n")
- z = str(x)
- f.write(z)
-
- f2.close()
-
- f.write('job.parallel(')
-
- for i in range(len(res)):
- z = res[i].split('/')
- z = ('[' + z[0] + '],')
- z = str(z)
- f.write(z)
-
- f.write(')')
-
- f.close()
-
- os.system(at_dir + '/bin/autotest ' + menu_dir + '/tmp/parallel')
- atcc_save_results1("", at_dir, menu_dir)
+ if len(res) == 0:
+ return -1
+
+ atcc_setup_tmp_dirs_files(menu_dir)
+
+ file = (menu_dir + '/tmp/parallel')
+ f = open(file, 'w')
+
+ for i in res:
+ z = i.split('/')
+ line = ("def " + z[0] + "():\n")
+ z = str(line)
+ f.write(z)
+
+ file = (at_dir + '/' + test_type + '/' + i)
+ f2 = open(file, 'r')
+ k = f2.readlines()
+
+ for i in k:
+ x = ("\t" + i + "\n")
+ z = str(x)
+ f.write(z)
+
+ f2.close()
+
+ f.write('job.parallel(')
+
+ for i in range(len(res)):
+ z = res[i].split('/')
+ z = ('[' + z[0] + '],')
+ z = str(z)
+ f.write(z)
+
+ f.write(')')
+
+ f.close()
+
+ os.system(at_dir + '/bin/autotest ' + menu_dir + '/tmp/parallel')
+ atcc_save_results1("", at_dir, menu_dir)
def atcc_profilers_run(res1, res2):
- if len(res1) == 0 or len(res2) == 0:
- return -1
+ if len(res1) == 0 or len(res2) == 0:
+ return -1
- atcc_setup_tmp_dirs_files(menu_dir)
+ atcc_setup_tmp_dirs_files(menu_dir)
- file = (menu_dir + '/tmp/profilers')
- f = open(file, 'w')
+ file = (menu_dir + '/tmp/profilers')
+ f = open(file, 'w')
- f.write('for profiler in (')
+ f.write('for profiler in (')
- for i in range(len(res1)):
- z = res1[i].split('/')
- z = ('\'' + z[0] + '\', ')
- z = str(z)
- f.write(z)
+ for i in range(len(res1)):
+ z = res1[i].split('/')
+ z = ('\'' + z[0] + '\', ')
+ z = str(z)
+ f.write(z)
- f.write('):\n')
+ f.write('):\n')
- f.write('\ttry:\n')
- f.write('\t\tprint "Testing profiler %s ..." % profiler\n')
- f.write('\t\tjob.profilers.add(profiler)\n')
+ f.write('\ttry:\n')
+ f.write('\t\tprint "Testing profiler %s ..." % profiler\n')
+ f.write('\t\tjob.profilers.add(profiler)\n')
- for i in range(len(res2)):
- z = atcc_control_file_read(res2[i], 'tests', at_dir)
- n = res2[i].split('/')
- k = ('\t\tjob.run_test(\'' + n[0] + '\',' + z + ')\n')
- k = str(k)
- f.write(k)
+ for i in range(len(res2)):
+ z = atcc_control_file_read(res2[i], 'tests', at_dir)
+ n = res2[i].split('/')
+ k = ('\t\tjob.run_test(\'' + n[0] + '\',' + z + ')\n')
+ k = str(k)
+ f.write(k)
- f.write('\t\tjob.profilers.delete(profiler)\n')
- f.write('\texcept:\n')
- f.write('\t\tprint "Test of profiler %s failed" % profiler\n')
- f.write('\t\traise\n')
+ f.write('\t\tjob.profilers.delete(profiler)\n')
+ f.write('\texcept:\n')
+ f.write('\t\tprint "Test of profiler %s failed" % profiler\n')
+ f.write('\t\traise\n')
- f.close()
+ f.close()
- os.system(at_dir + '/bin/autotest ' + menu_dir + '/tmp/profilers')
- atcc_save_results2(res1, res2, at_dir, menu_dir)
+ os.system(at_dir + '/bin/autotest ' + menu_dir + '/tmp/profilers')
+ atcc_save_results2(res1, res2, at_dir, menu_dir)
def atcc_remote_get_ip():
- while 1:
- (code, answer) = d.inputbox("IP address of remote host", init = "192.168.0.1")
+ while 1:
+ (code, answer) = d.inputbox("IP address of remote host", init = "192.168.0.1")
- if code == d.DIALOG_CANCEL or code == d.DIALOG_ESC:
- break
- return answer
+ if code == d.DIALOG_CANCEL or code == d.DIALOG_ESC:
+ break
+ return answer
def atcc_t_remote_run(ip, res, test_type):
- if len(res) == 0:
- return -1
+ if len(res) == 0:
+ return -1
- atcc_setup_tmp_dirs_files(menu_dir)
+ atcc_setup_tmp_dirs_files(menu_dir)
- file = (menu_dir + '/tmp/remote_test')
- f = open(file, 'w')
+ file = (menu_dir + '/tmp/remote_test')
+ f = open(file, 'w')
- st = str("remote_host = hosts.SSHHost(\"" + ip +"\")")
- f.write(st + "\n")
- st = str("at = autotest.Autotest()")
- f.write(st + "\n")
+ st = str("remote_host = hosts.SSHHost(\"" + ip +"\")")
+ f.write(st + "\n")
+ st = str("at = autotest.Autotest()")
+ f.write(st + "\n")
- file2 = (menu_dir + '/tmp/remote_control')
- f2 = open(file2, 'w')
+ file2 = (menu_dir + '/tmp/remote_control')
+ f2 = open(file2, 'w')
- for i in res:
- print i
- file3 = (at_dir + '/' + test_type + '/' + i)
- f3 = open(file3, 'r')
- k = f3.readlines()
- for i in k:
- x = (i + "\n")
- z = str(x)
- f2.write(z)
- f3.close()
+ for i in res:
+ print i
+ file3 = (at_dir + '/' + test_type + '/' + i)
+ f3 = open(file3, 'r')
+ k = f3.readlines()
+ for i in k:
+ x = (i + "\n")
+ z = str(x)
+ f2.write(z)
+ f3.close()
- f2.close()
- st = str("at.run(\"" + menu_dir + "/tmp/remote_control\",\"results\", remote_host)")
- f.write(st + "\n")
- f.close()
+ f2.close()
+ st = str("at.run(\"" + menu_dir + "/tmp/remote_control\",\"results\", remote_host)")
+ f.write(st + "\n")
+ f.close()
- os.system(srv_dir + '/autoserv ' + menu_dir + '/tmp/remote_test')
+ os.system(srv_dir + '/autoserv ' + menu_dir + '/tmp/remote_test')
def atcc_t_p_remote_run(ip, res, test_type):
- if len(res) == 0:
- return -1
+ if len(res) == 0:
+ return -1
+
+ atcc_setup_tmp_dirs_files(menu_dir)
+
+ file = (menu_dir + '/tmp/remote_parallel_control')
+ f = open(file, 'w')
- atcc_setup_tmp_dirs_files(menu_dir)
+ for i in res:
+ z = i.split('/')
+ line = ("def " + z[0] + "():\n")
+ z = str(line)
+ f.write(z)
- file = (menu_dir + '/tmp/remote_parallel_control')
- f = open(file, 'w')
-
- for i in res:
- z = i.split('/')
- line = ("def " + z[0] + "():\n")
- z = str(line)
- f.write(z)
-
- file = (at_dir + '/' + test_type + '/' + i)
- f2 = open(file, 'r')
- k = f2.readlines()
+ file = (at_dir + '/' + test_type + '/' + i)
+ f2 = open(file, 'r')
+ k = f2.readlines()
- for i in k:
- x = ("\t" + i + "\n")
- z = str(x)
- f.write(z)
+ for i in k:
+ x = ("\t" + i + "\n")
+ z = str(x)
+ f.write(z)
- f2.close()
+ f2.close()
- f.write('job.parallel(')
+ f.write('job.parallel(')
- for i in range(len(res)):
- z = res[i].split('/')
- z = ('[' + z[0] + '],')
- z = str(z)
- f.write(z)
+ for i in range(len(res)):
+ z = res[i].split('/')
+ z = ('[' + z[0] + '],')
+ z = str(z)
+ f.write(z)
- f.write(')')
+ f.write(')')
- f.close()
+ f.close()
- file = (menu_dir + '/tmp/remote_parallel_test')
- f = open(file, 'w')
+ file = (menu_dir + '/tmp/remote_parallel_test')
+ f = open(file, 'w')
- st = str("remote_host = hosts.SSHHost(\"" + ip +"\")")
- f.write(st + "\n")
- st = str("at = autotest.Autotest()")
- f.write(st + "\n")
+ st = str("remote_host = hosts.SSHHost(\"" + ip +"\")")
+ f.write(st + "\n")
+ st = str("at = autotest.Autotest()")
+ f.write(st + "\n")
- st = str("at.run(\"" + menu_dir + "/tmp/remote_parallel_control\",\"results\", remote_host)")
- f.write(st + "\n")
- f.close()
+ st = str("at.run(\"" + menu_dir + "/tmp/remote_parallel_control\",\"results\", remote_host)")
+ f.write(st + "\n")
+ f.close()
- os.system(srv_dir + '/autoserv ' + menu_dir + '/tmp/remote_parallel_test')
+ os.system(srv_dir + '/autoserv ' + menu_dir + '/tmp/remote_parallel_test')
def atcc_profilers_remote_run(ip, res1, res2):
- if len(res1) == 0 or len(res2) == 0:
- return -1
+ if len(res1) == 0 or len(res2) == 0:
+ return -1
- atcc_setup_tmp_dirs_files(menu_dir)
+ atcc_setup_tmp_dirs_files(menu_dir)
- file = (menu_dir + '/tmp/remote_profilers_control')
- f = open(file, 'w')
+ file = (menu_dir + '/tmp/remote_profilers_control')
+ f = open(file, 'w')
- f.write('for profiler in (')
+ f.write('for profiler in (')
- for i in range(len(res1)):
- z = res1[i].split('/')
- z = ('\'' + z[0] + '\', ')
- z = str(z)
- f.write(z)
+ for i in range(len(res1)):
+ z = res1[i].split('/')
+ z = ('\'' + z[0] + '\', ')
+ z = str(z)
+ f.write(z)
- f.write('):\n')
+ f.write('):\n')
- f.write('\ttry:\n')
- f.write('\t\tprint "Testing profiler %s ..." % profiler\n')
- f.write('\t\tjob.profilers.add(profiler)\n')
+ f.write('\ttry:\n')
+ f.write('\t\tprint "Testing profiler %s ..." % profiler\n')
+ f.write('\t\tjob.profilers.add(profiler)\n')
- for i in range(len(res2)):
- z = atcc_control_file_read(res2[i], 'tests', at_dir)
- n = res2[i].split('/')
- k = ('\t\tjob.run_test(\'' + n[0] + '\',' + z + ')\n')
- k = str(k)
- f.write(k)
+ for i in range(len(res2)):
+ z = atcc_control_file_read(res2[i], 'tests', at_dir)
+ n = res2[i].split('/')
+ k = ('\t\tjob.run_test(\'' + n[0] + '\',' + z + ')\n')
+ k = str(k)
+ f.write(k)
- f.write('\t\tjob.profilers.delete(profiler)\n')
- f.write('\texcept:\n')
- f.write('\t\tprint "Test of profiler %s failed" % profiler\n')
- f.write('\t\traise\n')
+ f.write('\t\tjob.profilers.delete(profiler)\n')
+ f.write('\texcept:\n')
+ f.write('\t\tprint "Test of profiler %s failed" % profiler\n')
+ f.write('\t\traise\n')
- f.close()
+ f.close()
- file = (menu_dir + '/tmp/remote_profilers')
- f = open(file, 'w')
+ file = (menu_dir + '/tmp/remote_profilers')
+ f = open(file, 'w')
- st = str("remote_host = hosts.SSHHost(\"" + ip +"\")")
- f.write(st + "\n")
- st = str("at = autotest.Autotest()")
- f.write(st + "\n")
+ st = str("remote_host = hosts.SSHHost(\"" + ip +"\")")
+ f.write(st + "\n")
+ st = str("at = autotest.Autotest()")
+ f.write(st + "\n")
- st = str("at.run(\"" + menu_dir + "/tmp/remote_profilers_control\",\"results\", remote_host)")
- f.write(st + "\n")
- f.close()
+ st = str("at.run(\"" + menu_dir + "/tmp/remote_profilers_control\",\"results\", remote_host)")
+ f.write(st + "\n")
+ f.close()
- os.system(srv_dir + '/autoserv ' + menu_dir + '/tmp/remote_profilers')
+ os.system(srv_dir + '/autoserv ' + menu_dir + '/tmp/remote_profilers')
def atcc_tests_results(t):
- if os.path.exists(menu_dir + "/tmp/"):
- dir_ls = dircache.listdir(menu_dir + "/tmp/")
- else:
- d.infobox(menu_dir + "/tmp/ doesn't exist")
- time.sleep(5)
- return -1
+ if os.path.exists(menu_dir + "/tmp/"):
+ dir_ls = dircache.listdir(menu_dir + "/tmp/")
+ else:
+ d.infobox(menu_dir + "/tmp/ doesn't exist")
+ time.sleep(5)
+ return -1
- if len(dir_ls) == 0:
- return -1
+ if len(dir_ls) == 0:
+ return -1
- u = []
+ u = []
- for i in dir_ls:
- k = i, ""
- u.append(k)
+ for i in dir_ls:
+ k = i, ""
+ u.append(k)
- while 1:
- (code, tag) = d.menu("Results:", choices = u, title = t)
+ while 1:
+ (code, tag) = d.menu("Results:", choices = u, title = t)
- if code == d.DIALOG_CANCEL or code == d.DIALOG_ESC:
- break
- else:
- d.textbox(menu_dir + '/tmp/' + tag, width = -1)
+ if code == d.DIALOG_CANCEL or code == d.DIALOG_ESC:
+ break
+ else:
+ d.textbox(menu_dir + '/tmp/' + tag, width = -1)
def atcc_config_show_help(tag, test_type):
- tag = tag.split('/')
- tag = tag[0]
- if os.path.exists(at_dir + '/' + test_type + '/' + tag + '/help'):
- d.textbox(at_dir + '/' + test_type + '/' + tag + '/help', width = -1)
- else:
- d.infobox(at_dir + '/' + test_type + '/' + tag + '/help' " doesn't exist")
- time.sleep(5)
+ tag = tag.split('/')
+ tag = tag[0]
+ if os.path.exists(at_dir + '/' + test_type + '/' + tag + '/help'):
+ d.textbox(at_dir + '/' + test_type + '/' + tag + '/help', width = -1)
+ else:
+ d.infobox(at_dir + '/' + test_type + '/' + tag + '/help' " doesn't exist")
+ time.sleep(5)
def atcc_config_edit(tag, test_type, ed):
- os.system(ed + " " + at_dir + '/' + test_type + '/' + tag)
+ os.system(ed + " " + at_dir + '/' + test_type + '/' + tag)
def atcc_config_help_or_edit(tag, test_type):
- ed = "vim"
- while 1:
- (code, answer) = d.inputbox("Type 'help' to see documentation, or name of favourite text editor", init = ed)
-
- if code == d.DIALOG_CANCEL or code == d.DIALOG_ESC:
- break
- elif answer == "help":
- atcc_config_show_help(tag, test_type)
- continue
- else:
- ed = answer
- atcc_config_edit(tag, test_type, ed)
- break
+ ed = "vim"
+ while 1:
+ (code, answer) = d.inputbox("Type 'help' to see documentation, or name of favourite text editor", init = ed)
+
+ if code == d.DIALOG_CANCEL or code == d.DIALOG_ESC:
+ break
+ elif answer == "help":
+ atcc_config_show_help(tag, test_type)
+ continue
+ else:
+ ed = answer
+ atcc_config_edit(tag, test_type, ed)
+ break
def atcc_config_choose_control_file(test_type, t):
- u_tmp = atcc_list_control_files(test_type, at_dir)
- u = []
- for i in u_tmp:
- k = i[0], ""
- u.append(k)
+ u_tmp = atcc_list_control_files(test_type, at_dir)
+ u = []
+ for i in u_tmp:
+ k = i[0], ""
+ u.append(k)
- while 1:
- (code, tag) = d.menu(test_type + ":", choices = u, title = t)
+ while 1:
+ (code, tag) = d.menu(test_type + ":", choices = u, title = t)
- if code == d.DIALOG_CANCEL or code == d.DIALOG_ESC:
- break
- else:
- atcc_config_help_or_edit(tag, test_type)
+ if code == d.DIALOG_CANCEL or code == d.DIALOG_ESC:
+ break
+ else:
+ atcc_config_help_or_edit(tag, test_type)
def atcc_upgrade():
- os.system("svn checkout svn://test.kernel.org/autotest/trunk " + at_dir + "/../")
+ os.system("svn checkout svn://test.kernel.org/autotest/trunk " + at_dir + "/../")
def atcc_rsync_mirror():
- os.system(at_dir + "/../mirror/mirror")
+ os.system(at_dir + "/../mirror/mirror")
def atcc_main_menu():
- while 1:
- (code, tag) = d.menu("Main menu",
- choices = [("1", "Tests (local)"),
- ("2", "Parallel tests (local)"),
- ("3", "Profilers (local)"),
- ("4", "Tests (remote)"),
- ("5", "Parallel tests (remote)"),
- ("6", "Profilers (remote)"),
- ("7", "Tests' results"),
- ("8", "Configure tests"),
- ("9", "Upgrade Autotest"),
- ("10", "Sync local kernel.org mirror")])
- if handle_exit_code(d, code):
- break
- return tag
+ while 1:
+ (code, tag) = d.menu("Main menu",
+ choices = [("1", "Tests (local)"),
+ ("2", "Parallel tests (local)"),
+ ("3", "Profilers (local)"),
+ ("4", "Tests (remote)"),
+ ("5", "Parallel tests (remote)"),
+ ("6", "Profilers (remote)"),
+ ("7", "Tests' results"),
+ ("8", "Configure tests"),
+ ("9", "Upgrade Autotest"),
+ ("10", "Sync local kernel.org mirror")])
+ if handle_exit_code(d, code):
+ break
+ return tag
def main():
- while 1:
- res = int(atcc_main_menu())
- if res == 1:
- res = atcc_t_menu(test_type = 'tests', t = 'Tests selection menu')
- atcc_t_run(res, test_type = 'tests')
- elif res == 2:
- res = atcc_t_menu(test_type = 'tests', t = 'Parallel tests selection menu')
- atcc_t_p_run(res, test_type = 'tests')
- elif res == 3:
- res1 = atcc_t_menu(test_type = 'profilers', t = 'Profilers selection menu')
- res2 = atcc_t_menu(test_type = 'tests', t = 'Tests selection menu')
- atcc_profilers_run(res1, res2)
- elif res == 4:
- ip = atcc_remote_get_ip()
- res = atcc_t_menu(test_type = 'tests', t = 'Tests selection menu')
- atcc_t_remote_run(ip, res, test_type = 'tests')
- elif res == 5:
- ip = atcc_remote_get_ip()
- res = atcc_t_menu(test_type = 'tests', t = 'Parallel tests selection menu')
- atcc_t_p_remote_run(ip, res, test_type = 'tests')
- elif res == 6:
- ip = atcc_remote_get_ip()
- res1 = atcc_t_menu(test_type = 'profilers', t = 'Profilers selection menu')
- res2 = atcc_t_menu(test_type = 'tests', t = 'Tests selection menu')
- atcc_profilers_remote_run(ip, res1, res2)
- elif res == 7:
- atcc_tests_results(t = 'Tests\' results menu')
- elif res == 8:
- atcc_config_choose_control_file(test_type = 'tests', t = 'Tests configuration menu')
- elif res == 9:
- atcc_upgrade()
- elif res == 10:
- atcc_rsync_mirror()
- elif res == 0:
- sys.exit(1)
+ while 1:
+ res = int(atcc_main_menu())
+ if res == 1:
+ res = atcc_t_menu(test_type = 'tests', t = 'Tests selection menu')
+ atcc_t_run(res, test_type = 'tests')
+ elif res == 2:
+ res = atcc_t_menu(test_type = 'tests', t = 'Parallel tests selection menu')
+ atcc_t_p_run(res, test_type = 'tests')
+ elif res == 3:
+ res1 = atcc_t_menu(test_type = 'profilers', t = 'Profilers selection menu')
+ res2 = atcc_t_menu(test_type = 'tests', t = 'Tests selection menu')
+ atcc_profilers_run(res1, res2)
+ elif res == 4:
+ ip = atcc_remote_get_ip()
+ res = atcc_t_menu(test_type = 'tests', t = 'Tests selection menu')
+ atcc_t_remote_run(ip, res, test_type = 'tests')
+ elif res == 5:
+ ip = atcc_remote_get_ip()
+ res = atcc_t_menu(test_type = 'tests', t = 'Parallel tests selection menu')
+ atcc_t_p_remote_run(ip, res, test_type = 'tests')
+ elif res == 6:
+ ip = atcc_remote_get_ip()
+ res1 = atcc_t_menu(test_type = 'profilers', t = 'Profilers selection menu')
+ res2 = atcc_t_menu(test_type = 'tests', t = 'Tests selection menu')
+ atcc_profilers_remote_run(ip, res1, res2)
+ elif res == 7:
+ atcc_tests_results(t = 'Tests\' results menu')
+ elif res == 8:
+ atcc_config_choose_control_file(test_type = 'tests', t = 'Tests configuration menu')
+ elif res == 9:
+ atcc_upgrade()
+ elif res == 10:
+ atcc_rsync_mirror()
+ elif res == 0:
+ sys.exit(1)
check_python_version()
diff --git a/ui/menu_lib.py b/ui/menu_lib.py
index 6435d558..e2f8c3cb 100644
--- a/ui/menu_lib.py
+++ b/ui/menu_lib.py
@@ -1,87 +1,87 @@
import os, sys, dircache, string, re
def check_python_version():
- version = sys.version_info[0:2]
- if version < (2, 4):
- print "Python 2.4 or newer is needed"
- sys.exit(1)
+ version = sys.version_info[0:2]
+ if version < (2, 4):
+ print "Python 2.4 or newer is needed"
+ sys.exit(1)
def atcc_list_control_files(test_type, at_dir):
- dir_ls = dircache.listdir(at_dir + '/' + test_type)
- u = []
- for i in dir_ls:
- if i != ".svn":
- dir_ls2 = dircache.listdir(at_dir + '/' + test_type + '/' + i)
- for j in dir_ls2:
- result = re.match("^control", j)
- if result != None:
- z = str(i + "/" + j)
- k = z, "", 0
- u.append(k)
+ dir_ls = dircache.listdir(at_dir + '/' + test_type)
+ u = []
+ for i in dir_ls:
+ if i != ".svn":
+ dir_ls2 = dircache.listdir(at_dir + '/' + test_type + '/' + i)
+ for j in dir_ls2:
+ result = re.match("^control", j)
+ if result != None:
+ z = str(i + "/" + j)
+ k = z, "", 0
+ u.append(k)
- return u
+ return u
def atcc_control_file_read(tag, test_type, at_dir):
- file = (at_dir + '/' + test_type + '/' + tag)
- f = open(file, 'r')
+ file = (at_dir + '/' + test_type + '/' + tag)
+ f = open(file, 'r')
- z = f.readline()
- z = z.lstrip("job.run_test(")
- z = z.rstrip('\n')
- z = z.rstrip(')')
- z = z.split(',')
+ z = f.readline()
+ z = z.lstrip("job.run_test(")
+ z = z.rstrip('\n')
+ z = z.rstrip(')')
+ z = z.split(',')
- x = len(z)
+ x = len(z)
- if x == 1:
- z = ""
- elif x > 1:
- z = z[1:]
- m = ""
- for i in z:
- m += (',' + i)
+ if x == 1:
+ z = ""
+ elif x > 1:
+ z = z[1:]
+ m = ""
+ for i in z:
+ m += (',' + i)
- m = m.lstrip(',')
- m = m.strip()
- z = str(m)
+ m = m.lstrip(',')
+ m = m.strip()
+ z = str(m)
- f.close()
+ f.close()
- return z
+ return z
def atcc_setup_tmp_dirs_files(menu_dir):
- if not os.path.isdir(menu_dir + '/tmp/'):
- os.mkdir(menu_dir + '/tmp/')
- if os.path.isfile(menu_dir + '/tmp/Tests results'):
- os.remove(menu_dir + '/tmp/Tests results')
- if os.path.isfile(menu_dir + '/tmp/Possible kernel memory leaks'):
- os.remove(menu_dir + '/tmp/Possible kernel memory leaks')
+ if not os.path.isdir(menu_dir + '/tmp/'):
+ os.mkdir(menu_dir + '/tmp/')
+ if os.path.isfile(menu_dir + '/tmp/Tests results'):
+ os.remove(menu_dir + '/tmp/Tests results')
+ if os.path.isfile(menu_dir + '/tmp/Possible kernel memory leaks'):
+ os.remove(menu_dir + '/tmp/Possible kernel memory leaks')
def atcc_save_results1(i, at_dir, menu_dir):
- if i != "":
- if os.path.isfile(at_dir + '/results/default/' + i + '/debug/stderr'):
- os.system('cp ' + at_dir + '/results/default/' + i + '/debug/stderr ' + menu_dir + '/tmp/' + i + '.stderr')
- if os.path.isfile(at_dir + '/results/default/' + i + '/debug/stdout'):
- os.system('cp ' + at_dir + '/results/default/' + i + '/debug/stdout ' + menu_dir + '/tmp/' + i + '.stdout')
- if os.path.isfile(at_dir + '/results/default/status'):
- os.system('cat ' + at_dir + '/results/default/status >> ' + menu_dir + '/tmp/Tests\ results')
- if os.path.isfile('/sys/kernel/debug/memleak'):
- print "Saving possible kernel memory leaks"
- os.system('echo "' + i + '" >> ' + menu_dir + '/tmp/Possible kernel memory leaks')
- os.system('cat /sys/kernel/debug/memleak >> ' + menu_dir + '/tmp/Possible kernel memory leaks')
+ if i != "":
+ if os.path.isfile(at_dir + '/results/default/' + i + '/debug/stderr'):
+ os.system('cp ' + at_dir + '/results/default/' + i + '/debug/stderr ' + menu_dir + '/tmp/' + i + '.stderr')
+ if os.path.isfile(at_dir + '/results/default/' + i + '/debug/stdout'):
+ os.system('cp ' + at_dir + '/results/default/' + i + '/debug/stdout ' + menu_dir + '/tmp/' + i + '.stdout')
+ if os.path.isfile(at_dir + '/results/default/status'):
+ os.system('cat ' + at_dir + '/results/default/status >> ' + menu_dir + '/tmp/Tests\ results')
+ if os.path.isfile('/sys/kernel/debug/memleak'):
+ print "Saving possible kernel memory leaks"
+ os.system('echo "' + i + '" >> ' + menu_dir + '/tmp/Possible kernel memory leaks')
+ os.system('cat /sys/kernel/debug/memleak >> ' + menu_dir + '/tmp/Possible kernel memory leaks')
def atcc_save_profilers_results(i, j, at_dir, menu_dir):
- if os.path.isfile(at_dir + '/results/default/' + j + '.' + i + '/profiling/monitor'):
- os.system('cp ' + at_dir + '/results/default/' + j + '.' + i + '/profiling/monitor ' + menu_dir + '/tmp/' + j + '.monitor')
- if os.path.isfile(at_dir + '/results/default/' + j + '.' + i + '/profiling/oprofile.kernel'):
- os.system('cp ' + at_dir + '/results/default/' + j + '.' + i + '/profiling/oprofile.kernel ' + menu_dir + '/tmp/' + j + '.oprofile.kernel')
- if os.path.isfile(at_dir + '/results/default/' + j + '.' + i + '/profiling/oprofile.user'):
- os.system('cp ' + at_dir + '/results/default/' + j + '.' + i + '/profiling/oprofile.user ' + menu_dir + '/tmp/' + j + '.oprofile.user')
+ if os.path.isfile(at_dir + '/results/default/' + j + '.' + i + '/profiling/monitor'):
+ os.system('cp ' + at_dir + '/results/default/' + j + '.' + i + '/profiling/monitor ' + menu_dir + '/tmp/' + j + '.monitor')
+ if os.path.isfile(at_dir + '/results/default/' + j + '.' + i + '/profiling/oprofile.kernel'):
+ os.system('cp ' + at_dir + '/results/default/' + j + '.' + i + '/profiling/oprofile.kernel ' + menu_dir + '/tmp/' + j + '.oprofile.kernel')
+ if os.path.isfile(at_dir + '/results/default/' + j + '.' + i + '/profiling/oprofile.user'):
+ os.system('cp ' + at_dir + '/results/default/' + j + '.' + i + '/profiling/oprofile.user ' + menu_dir + '/tmp/' + j + '.oprofile.user')
def atcc_save_results2(res1, res2, at_dir, menu_dir):
- if os.path.isfile(at_dir + '/results/default/status'):
- os.system('cat ' + at_dir + '/results/default/status >> ' + menu_dir + '/tmp/Tests\ results')
+ if os.path.isfile(at_dir + '/results/default/status'):
+ os.system('cat ' + at_dir + '/results/default/status >> ' + menu_dir + '/tmp/Tests\ results')
- for i in res1:
- for j in res2:
- atcc_save_profilers_results(i, j, at_dir, menu_dir)
+ for i in res1:
+ for j in res2:
+ atcc_save_profilers_results(i, j, at_dir, menu_dir)
diff --git a/unittest_suite.py b/unittest_suite.py
index 4aab623c..b8949b46 100644
--- a/unittest_suite.py
+++ b/unittest_suite.py
@@ -11,18 +11,18 @@ setup_modules.setup(base_path=root, root_module_name="autotest_lib")
suites = []
def lister(dummy, dirname, files):
- loader = unittest.TestLoader()
- for f in files:
- if f.endswith('_unittest.py'):
- temp = os.path.join(dirname, f).strip('.py')
- mod = ('autotest_lib'
- + temp[len(root):].replace('/', '.'))
- suite = loader.loadTestsFromName(mod)
- suites.append(suite)
+ loader = unittest.TestLoader()
+ for f in files:
+ if f.endswith('_unittest.py'):
+ temp = os.path.join(dirname, f).strip('.py')
+ mod = ('autotest_lib'
+ + temp[len(root):].replace('/', '.'))
+ suite = loader.loadTestsFromName(mod)
+ suites.append(suite)
if __name__ == "__main__":
- os.path.walk(root, lister, None)
- alltests = unittest.TestSuite(suites)
- runner = unittest.TextTestRunner(verbosity=2)
- runner.run(alltests)
+ os.path.walk(root, lister, None)
+ alltests = unittest.TestSuite(suites)
+ runner = unittest.TextTestRunner(verbosity=2)
+ runner.run(alltests)