summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorlmr <lmr@592f7852-d20e-0410-864c-8624ca9c26a4>2011-06-16 16:09:15 +0000
committerlmr <lmr@592f7852-d20e-0410-864c-8624ca9c26a4>2011-06-16 16:09:15 +0000
commit7f0ce7f9e79e07494d20be6c2781d2edd515b5bf (patch)
tree337eeac70f54fa29e637cc6ffec73c619bf6534b
parent7955e643ab1911c53b967dad3ae8024c75456250 (diff)
Planner: remove backend application
This removes the planner backend (django) application. Signed-off-by: Cleber Rosa <crosa@redhat.com> git-svn-id: svn://test.kernel.org/autotest/trunk@5427 592f7852-d20e-0410-864c-8624ca9c26a4
-rw-r--r--frontend/planner/__init__.py0
-rw-r--r--frontend/planner/common.py8
-rw-r--r--frontend/planner/control_file.py98
-rwxr-xr-xfrontend/planner/control_file_unittest.py73
-rw-r--r--frontend/planner/execution_engine.py268
-rw-r--r--frontend/planner/execution_engine_control.srv7
-rwxr-xr-xfrontend/planner/execution_engine_unittest.py293
-rw-r--r--frontend/planner/failure_actions.py17
-rw-r--r--frontend/planner/model_attributes.py27
-rw-r--r--frontend/planner/models.py486
-rwxr-xr-xfrontend/planner/models_test.py168
-rw-r--r--frontend/planner/planner_test_utils.py78
-rw-r--r--frontend/planner/rpc_interface.py623
-rwxr-xr-xfrontend/planner/rpc_interface_unittest.py331
-rw-r--r--frontend/planner/rpc_utils.py365
-rwxr-xr-xfrontend/planner/rpc_utils_unittest.py385
-rw-r--r--frontend/planner/set_atomic_group_control.srv17
-rw-r--r--frontend/planner/support.py84
-rw-r--r--frontend/planner/urls.py10
-rw-r--r--frontend/planner/views.py24
20 files changed, 0 insertions, 3362 deletions
diff --git a/frontend/planner/__init__.py b/frontend/planner/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/frontend/planner/__init__.py
+++ /dev/null
diff --git a/frontend/planner/common.py b/frontend/planner/common.py
deleted file mode 100644
index 1edf302f..00000000
--- a/frontend/planner/common.py
+++ /dev/null
@@ -1,8 +0,0 @@
-import os, sys
-dirname = os.path.dirname(sys.modules[__name__].__file__)
-autotest_dir = os.path.abspath(os.path.join(dirname, '..', '..'))
-client_dir = os.path.join(autotest_dir, "client")
-sys.path.insert(0, client_dir)
-import setup_modules
-sys.path.pop(0)
-setup_modules.setup(base_path=autotest_dir, root_module_name="autotest_lib")
diff --git a/frontend/planner/control_file.py b/frontend/planner/control_file.py
deleted file mode 100644
index 3b370023..00000000
--- a/frontend/planner/control_file.py
+++ /dev/null
@@ -1,98 +0,0 @@
-import base64
-from autotest_lib.client.common_lib import utils
-
-
-VERIFY_TEST_SEGMENT = """\
-######################################################
-### Run the verify test
-######################################################
-
-def run(machine):
- host = hosts.create_host(machine, initialize=False)
- host.log_kernel()
- ret = job.run_test('verify_test', host=host, %(verify_args)s)
- if not ret:
- raise JobError("Verify test failed; aborting job")
-
-job.parallel_simple(run, machines)
-
-"""
-
-CLIENT_SEGMENT = """\
-######################################################
-### Run the client-side control file
-######################################################
-
-# The following is encoded in base64 in the variable control, below:
-#
-%(control_comment)s
-#
-import base64
-control = base64.decodestring(%(control_base64)r)
-
-def run(machine):
- host = hosts.create_host(machine)
- at = autotest.Autotest()
- at.run(control, host=host)
-
-job.parallel_simple(run, machines)
-"""
-
-
-SERVER_SEGMENT = """\
-######################################################
-### Run the server side control file
-######################################################
-
-%(control_raw)s
-"""
-
-def _generate_additional_segments_dummy(**kwargs):
- return ''
-
-
-def wrap_control_file(control_file, is_server, skip_verify,
- verify_params=None, **kwargs):
- """
- Wraps a control file for use with Test Planner
- """
- wrapped = ''
-
- if not skip_verify:
- prepared_args = prepare_args(verify_params)
- wrapped += apply_string_arguments(VERIFY_TEST_SEGMENT,
- verify_args=prepared_args)
-
- site_generate_additional_segments = utils.import_site_function(
- __file__, 'autotest_lib.frontend.planner.site_control_file',
- 'generate_additional_segments', _generate_additional_segments_dummy)
- wrapped += site_generate_additional_segments(**kwargs)
-
- if is_server:
- wrapped += apply_string_arguments(SERVER_SEGMENT,
- control_raw=control_file)
- else:
- control_base64 = base64.encodestring(control_file)
- control_comment = '\n'.join('# ' + l for l in control_file.split('\n'))
- wrapped += apply_string_arguments(CLIENT_SEGMENT,
- control_base64=control_base64,
- control_comment=control_comment)
-
- return wrapped
-
-
-def prepare_args(args_dict):
- if not args_dict:
- return ''
-
- args = []
- for k, v in args_dict.iteritems():
- args.append("%s=%s" % (k, v))
- return ', '.join(args)
-
-
-def apply_string_arguments(source, **kwargs):
- """
- Separate method to facilitate unit testing
- """
- return source % kwargs
diff --git a/frontend/planner/control_file_unittest.py b/frontend/planner/control_file_unittest.py
deleted file mode 100755
index 9f3263db..00000000
--- a/frontend/planner/control_file_unittest.py
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/usr/bin/python
-
-import unittest, base64
-import common
-from autotest_lib.frontend.planner import control_file
-from autotest_lib.client.common_lib.test_utils import mock
-
-
-class ControlFileUnittest(unittest.TestCase):
- def setUp(self):
- self.god = mock.mock_god()
-
-
- def tearDown(self):
- self.god.unstub_all()
-
-
- def _test_wrap_control_file_helper(self):
- self.verify_params = object()
- self.control = 'control'
- self.verify_segment = '|verify_segment|'
- prepared_verify_args = 'prepared_verify_args'
-
- self.god.stub_function(control_file, 'prepare_args')
- self.god.stub_function(control_file, 'apply_string_arguments')
- control_file.prepare_args.expect_call(
- self.verify_params).and_return(prepared_verify_args)
- control_file.apply_string_arguments.expect_call(
- control_file.VERIFY_TEST_SEGMENT,
- verify_args=prepared_verify_args).and_return(
- self.verify_segment)
-
-
- def test_wrap_control_file_client(self):
- self._test_wrap_control_file_helper()
- control_base64 = 'control_base64'
- control_segment = '|control_segment|'
-
- self.god.stub_function(base64, 'encodestring')
- base64.encodestring.expect_call(self.control).and_return(control_base64)
- control_file.apply_string_arguments.expect_call(
- control_file.CLIENT_SEGMENT, control_base64=control_base64,
- control_comment=mock.is_string_comparator()).and_return(
- control_segment)
-
- result = control_file.wrap_control_file(control_file=self.control,
- is_server=False,
- skip_verify=False,
- verify_params=self.verify_params)
-
- self.assertEqual(result, self.verify_segment + control_segment)
- self.god.check_playback()
-
-
- def test_wrap_control_file_server(self):
- self._test_wrap_control_file_helper()
- control_segment = '|control_segment|'
-
- control_file.apply_string_arguments.expect_call(
- control_file.SERVER_SEGMENT,
- control_raw=self.control).and_return(control_segment)
-
- result = control_file.wrap_control_file(control_file=self.control,
- is_server=True,
- skip_verify=False,
- verify_params=self.verify_params)
-
- self.assertEqual(result, self.verify_segment + control_segment)
- self.god.check_playback()
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/frontend/planner/execution_engine.py b/frontend/planner/execution_engine.py
deleted file mode 100644
index e1b0dad1..00000000
--- a/frontend/planner/execution_engine.py
+++ /dev/null
@@ -1,268 +0,0 @@
-import time, logging
-from autotest_lib.frontend.afe import model_attributes as afe_model_attributes
-from autotest_lib.frontend.shared import rest_client
-from autotest_lib.frontend.planner import model_attributes, support
-from autotest_lib.server import frontend
-
-
-TICK_INTERVAL_SECS = 10
-PAUSE_BEFORE_RESTARTING_SECS = 60
-
-class ExecutionEngine(object):
- """
- Provides the Test Planner execution engine
- """
-
- _planner_rpc = frontend.Planner()
- _tko_rpc = frontend.TKO()
-
- def __init__(self, plan_id, server, label_name, owner):
- self._plan_id = plan_id
- self._server = server
- self._label_name = label_name
- self._owner = owner
- self._afe_rest = rest_client.Resource.load(
- 'http://%s/afe/server/resources' % server)
-
-
- def start(self):
- """
- Starts the execution engine.
-
- Thread remains in this method until the execution engine is complete.
- """
- while True:
- try:
- self._initialize_plan()
-
- while not self._tick():
- time.sleep(TICK_INTERVAL_SECS)
-
- self._cleanup()
- break
- except Exception, e:
- logging.error('Execution engine caught exception, restarting:'
- '\n%s', e)
- time.sleep(PAUSE_BEFORE_RESTARTING_SECS)
-
-
- def _initialize_plan(self):
- """
- Performs actions necessary to start a test plan.
-
- Adds the hosts into the proper atomic group, and waits for the plan to
- be ready to start before returning
- """
- plan = self._planner_rpc.run('get_plan', id=self._plan_id)
- name = plan['name'] + '_set_atomic_group'
- if not self._afe_rest.jobs.get(name=name).total_results:
- self._launch_set_atomic_group_job(name)
-
- self._wait_for_initialization()
-
-
- def _launch_set_atomic_group_job(self, name):
- """
- Launch the job to set the hosts' atomic group, and initate the plan
-
- If the hosts are already part of an atomic group, wait for a tick and
- try again. Return when successful
- """
- while True:
- hosts = self._planner_rpc.run('get_hosts', plan_id=self._plan_id)
- control = (self._planner_rpc.run('get_atomic_group_control_file') %
- dict(server=self._server, label_name=self._label_name,
- plan_id=self._plan_id))
-
- info = self._afe_rest.execution_info.get().execution_info
- info['control_file'] = control
- info['cleanup_before_job'] = afe_model_attributes.RebootBefore.NEVER
- info['cleanup_after_job'] = afe_model_attributes.RebootAfter.NEVER
- info['run_verify'] = False
- info['machines_per_execution'] = len(hosts)
-
- entries = self._afe_rest.queue_entries_request.get(
- hosts=hosts).queue_entries
-
- job_req = {'name' : name,
- 'owner': self._owner,
- 'execution_info' : info,
- 'queue_entries' : entries}
-
- try:
- self._afe_rest.jobs.post(job_req)
- logging.info('created job to set atomic group')
- break
- except rest_client.ClientError, e:
- logging.info('hosts already in atomic group')
- logging.info('(error was %s)' % e.message)
- logging.info('waiting...')
- time.sleep(TICK_INTERVAL_SECS)
-
-
- def _wait_for_initialization(self):
- while True:
- plan = self._planner_rpc.run('get_plan', id=self._plan_id)
- if plan['initialized']:
- break
- logging.info('waiting for initialization...')
- time.sleep(TICK_INTERVAL_SECS)
-
-
- def _cleanup(self):
- self._afe_rest.labels.get(name=self._label_name).members[0].delete()
-
-
- def _tick(self):
- """
- Processes one tick of the execution engine.
-
- Returns True if the engine has completed the plan.
- """
- logging.info('tick')
- self._process_finished_runs()
- self._check_tko_jobs()
- return self._schedule_new_runs()
-
-
- def _process_finished_runs(self):
- """
- Finalize the test runs that have finished.
-
- Look for runs that are in PASSED or FAILED, perform any additional
- processing required, and set the entry to 'finalized'.
- """
- Status = model_attributes.TestRunStatus
- runs = self._planner_rpc.run('get_test_runs', plan__id=self._plan_id,
- status__in=(Status.PASSED, Status.FAILED),
- finalized=False)
- for run in runs:
- logging.info('finalizing test run %s', run)
-
- controller = support.TestPlanController(
- machine=run['host']['host'],
- test_alias=run['test_job']['test_config']['alias'])
- self._run_execute_after(controller, tko_test_id=run['tko_test'],
- success=(run['status'] == Status.PASSED))
-
- if controller._fail:
- raise NotImplemented('TODO: implement forced failure')
-
- failed = (run['status'] == Status.FAILED or controller._fail)
- if failed and not controller._unblock:
- self._planner_rpc.run('modify_host', id=run['host']['id'],
- blocked=True)
- self._planner_rpc.run('modify_test_run', id=run['id'],
- finalized=True)
-
-
- def _check_tko_jobs(self):
- """
- Instructs the server to update the Planner test runs table
-
- Sends an RPC to have the server pull the proper TKO tests and add them
- to the Planner tables. Logs information about what was added.
- """
- test_runs_updated = self._planner_rpc.run('update_test_runs',
- plan_id=self._plan_id)
- for update in test_runs_updated:
- logging.info('added %s test run for tko test id %s (%s)',
- update['status'], update['tko_test_idx'],
- update['hostname'])
-
-
- def _schedule_new_runs(self):
- next_configs = self._planner_rpc.run('get_next_test_configs',
- plan_id=self._plan_id)
- if next_configs['complete']:
- return True
-
- for config in next_configs['next_configs']:
- config_id = config['next_test_config_id']
- controller = support.TestPlanController(
- machine=config['host'],
- test_alias=config['next_test_config_alias'])
- self._run_execute_before(controller)
- if controller._skip:
- self._planner_rpc.run('skip_test', test_config_id=config_id,
- hostname=config['host'])
- continue
-
- self._run_job(hostname=config['host'],
- test_config_id=config_id,
- cleanup_before_job=controller._reboot_before,
- cleanup_after_job=controller._reboot_after,
- run_verify=controller._run_verify)
-
- return False
-
-
- def _run_job(self, hostname, test_config_id, cleanup_before_job,
- cleanup_after_job, run_verify):
- if run_verify is None:
- run_verify = True
-
- test_config = self._planner_rpc.run('get_wrapped_test_config',
- id=test_config_id,
- hostname=hostname,
- run_verify=run_verify)
-
- info = self._afe_rest.execution_info.get().execution_info
- info['control_file'] = test_config['wrapped_control_file']
- info['is_server'] = True
- info['cleanup_before_job'] = cleanup_before_job
- info['cleanup_after_job'] = cleanup_after_job
- info['run_verify'] = False
-
- atomic_group_class = self._afe_rest.labels.get(
- name=self._label_name).members[0].get().atomic_group_class.href
-
- request = self._afe_rest.queue_entries_request.get(
- hosts=(hostname,), atomic_group_class=atomic_group_class)
- entries = request.queue_entries
-
- plan = self._planner_rpc.run('get_plan', id=self._plan_id)
- prefix = plan['label_override']
- if prefix is None:
- prefix = plan['name']
- job_req = {'name' : '%s_%s_%s' % (prefix, test_config['alias'],
- hostname),
- 'owner': self._owner,
- 'execution_info' : info,
- 'queue_entries' : entries}
-
- logging.info('starting test alias %s for host %s',
- test_config['alias'], hostname)
- job = self._afe_rest.jobs.post(job_req)
- self._planner_rpc.run('add_job',
- plan_id=self._plan_id,
- test_config_id=test_config_id,
- afe_job_id=job.get().id)
-
-
- def _run_execute_before(self, controller):
- """
- Execute the global support's execute_before() for the plan
- """
- self._run_global_support(controller, 'execute_before')
-
-
- def _run_execute_after(self, controller, tko_test_id, success):
- """
- Execute the global support's execute_after() for the plan
- """
- self._run_global_support(controller, 'execute_after',
- tko_test_id=tko_test_id, success=success)
-
-
- def _run_global_support(self, controller, function_name, **kwargs):
- plan = self._planner_rpc.run('get_plan', id=self._plan_id)
- if plan['support']:
- context = {'model_attributes': afe_model_attributes}
- exec plan['support'] in context
- function = context.get(function_name)
- if function:
- if not callable(function):
- raise Exception('Global support defines %s, but it is not '
- 'callable' % function_name)
- function(controller, **kwargs)
diff --git a/frontend/planner/execution_engine_control.srv b/frontend/planner/execution_engine_control.srv
deleted file mode 100644
index 28fd33d7..00000000
--- a/frontend/planner/execution_engine_control.srv
+++ /dev/null
@@ -1,7 +0,0 @@
-from autotest_lib.frontend.planner import execution_engine
-
-engine = execution_engine.ExecutionEngine(plan_id=%(plan_id)d,
- server='%(server)s',
- label_name='%(label_name)s',
- owner='%(owner)s')
-engine.start()
diff --git a/frontend/planner/execution_engine_unittest.py b/frontend/planner/execution_engine_unittest.py
deleted file mode 100755
index a65978f2..00000000
--- a/frontend/planner/execution_engine_unittest.py
+++ /dev/null
@@ -1,293 +0,0 @@
-#!/usr/bin/python
-
-import unittest
-import common
-from autotest_lib.frontend import setup_django_environment
-from autotest_lib.client.common_lib.test_utils import mock
-from autotest_lib.frontend.afe import frontend_test_utils, models as afe_models
-from autotest_lib.frontend.afe import model_attributes as afe_model_attributes
-from autotest_lib.frontend.shared import rest_client
-from autotest_lib.frontend.planner import models, execution_engine, support
-from autotest_lib.frontend.planner import model_attributes
-
-
-class MockObject(object):
- """
- Empty mock object class, so that setattr() works on all names
- """
- pass
-
-
-class MockAfeRest(object):
- jobs = MockObject()
- execution_info = MockObject()
- queue_entries_request = MockObject()
-
-
-class MockRestJobs(object):
- def __init__(self, total_results):
- self.total_results = total_results
-
-
-class MockExecutionInfo(object):
- execution_info = {}
-
-
-class MockQueueEntriesRequest(object):
- queue_entries = object()
-
-
-class MockExecutionEngine(execution_engine.ExecutionEngine):
- _planner_rpc = MockObject()
- _tko_rpc = object()
- _plan_id = object()
- _server = object()
- _afe_rest = MockAfeRest()
- _label_name = object()
- _owner = object()
-
-
- def __init__(self, *args, **kwargs):
- pass
-
-
-class MockTestPlanController(support.TestPlanController):
- def __init__(self, *args, **kwargs):
- super(MockTestPlanController, self).__init__(machine=None,
- test_alias=None)
-
-
-class ExecutionEngineTest(unittest.TestCase,
- frontend_test_utils.FrontendTestMixin):
- def setUp(self):
- self._frontend_common_setup()
- self.engine = MockExecutionEngine()
-
-
- def tearDown(self):
- self._frontend_common_teardown()
-
-
- def _setup_test_initialize_plan(self):
- self.god.stub_function(self.engine._planner_rpc, 'run')
- self.god.stub_function(self.engine._afe_rest.jobs, 'get')
- self.god.stub_function(self.engine, '_wait_for_initialization')
-
-
- def test_initialize_plan_new_plan(self):
- self._setup_test_initialize_plan()
- self.god.stub_function(self.engine, '_launch_set_atomic_group_job')
-
- self.engine._planner_rpc.run.expect_call(
- 'get_plan', id=self.engine._plan_id).and_return(
- {'name': 'plan'})
- self.engine._afe_rest.jobs.get.expect_call(
- name='plan_set_atomic_group').and_return(MockRestJobs(None))
- self.engine._launch_set_atomic_group_job.expect_call(
- 'plan_set_atomic_group')
- self.engine._wait_for_initialization.expect_call()
-
- self.engine._initialize_plan()
- self.god.check_playback
-
-
- def test_initialize_plan_existing(self):
- self._setup_test_initialize_plan()
-
- self.engine._planner_rpc.run.expect_call(
- 'get_plan', id=self.engine._plan_id).and_return(
- {'name': 'plan'})
- self.engine._afe_rest.jobs.get.expect_call(
- name='plan_set_atomic_group').and_return(MockRestJobs(object()))
- self.engine._wait_for_initialization.expect_call()
-
- self.engine._initialize_plan()
- self.god.check_playback
-
-
- def _setup_test_launch_atomic_group_job(self, name):
- DUMMY_CONTROL = '%(server)r %(label_name)r %(plan_id)r'
- DUMMY_EXECUTION_INFO = MockExecutionInfo()
- DUMMY_QUEUE_ENTRIES_REQUEST = MockQueueEntriesRequest()
-
- self.god.stub_function(self.engine._planner_rpc, 'run')
- self.god.stub_function(self.engine._afe_rest.execution_info, 'get')
- self.god.stub_function(
- self.engine._afe_rest.queue_entries_request, 'get')
-
- self.engine._planner_rpc.run.expect_call(
- 'get_hosts', plan_id=self.engine._plan_id).and_return(
- self.hosts)
- self.engine._planner_rpc.run.expect_call(
- 'get_atomic_group_control_file').and_return(DUMMY_CONTROL)
- self.engine._afe_rest.execution_info.get.expect_call().and_return(
- DUMMY_EXECUTION_INFO)
- self.engine._afe_rest.queue_entries_request.get.expect_call(
- hosts=self.hosts).and_return(DUMMY_QUEUE_ENTRIES_REQUEST)
-
- control_file = DUMMY_CONTROL % dict(server=self.engine._server,
- label_name=self.engine._label_name,
- plan_id=self.engine._plan_id)
- DUMMY_EXECUTION_INFO.execution_info = {
- 'control_file': control_file,
- 'cleanup_before_job': afe_model_attributes.RebootBefore.NEVER,
- 'cleanup_after_job': afe_model_attributes.RebootAfter.NEVER,
- 'run_verify': False,
- 'machines_per_execution': len(self.hosts)}
-
- job_req = {'name': name,
- 'owner': self.engine._owner,
- 'execution_info': DUMMY_EXECUTION_INFO.execution_info,
- 'queue_entries': DUMMY_QUEUE_ENTRIES_REQUEST.queue_entries}
-
- return job_req
-
-
- def test_launch_atomic_group_job(self):
- job_req = self._setup_test_launch_atomic_group_job('atomic_group_job')
- self.god.stub_function(self.engine._afe_rest.jobs, 'post')
-
- self.engine._afe_rest.jobs.post.expect_call(job_req)
-
- self.engine._launch_set_atomic_group_job('atomic_group_job')
- self.god.check_playback()
-
-
- def _setup_mock_controller(self, controller_options):
- mock_controller = MockTestPlanController()
- for key, value in controller_options.iteritems():
- setattr(mock_controller, key, value)
- self.god.stub_with(support, 'TestPlanController',
- lambda *args, **kwargs : mock_controller)
- return mock_controller
-
-
- def _test_process_finished_runs_helper(self, status, should_block=False,
- controller_options={}):
- Status = model_attributes.TestRunStatus
- TEST_RUN_ID = object()
- TKO_TEST_ID = object()
- HOST_ID = object()
-
- mock_controller = self._setup_mock_controller(controller_options)
-
- self.god.stub_function(self.engine._planner_rpc, 'run')
- self.god.stub_function(self.engine, '_run_execute_after')
-
- test_run = {'id': TEST_RUN_ID,
- 'host': {'host': self.hosts[0].hostname,
- 'id': HOST_ID},
- 'test_job': {'test_config': {'alias': 'test_alias'}},
- 'tko_test': TKO_TEST_ID,
- 'status': status}
-
- self.engine._planner_rpc.run.expect_call(
- 'get_test_runs',
- plan__id=self.engine._plan_id,
- status__in=(Status.PASSED, Status.FAILED),
- finalized=False).and_return([test_run])
- self.engine._run_execute_after.expect_call(
- mock_controller, tko_test_id=TKO_TEST_ID,
- success=(status == Status.PASSED))
- if should_block:
- self.engine._planner_rpc.run.expect_call('modify_host', id=HOST_ID,
- blocked=True)
- self.engine._planner_rpc.run.expect_call('modify_test_run',
- id=TEST_RUN_ID, finalized=True)
-
- self.engine._process_finished_runs()
-
- self.god.check_playback()
-
-
- def test_process_finished_runs_pass(self):
- self._test_process_finished_runs_helper(
- model_attributes.TestRunStatus.PASSED)
-
-
- def test_process_finished_runs_fail(self):
- self._test_process_finished_runs_helper(
- model_attributes.TestRunStatus.FAILED, should_block=True)
-
-
- def test_process_finished_runs_fail_unblock(self):
- self._test_process_finished_runs_helper(
- model_attributes.TestRunStatus.FAILED, should_block=False,
- controller_options={'_unblock': True})
-
-
- def _test_schedule_new_runs_helper(self, complete=False, should_skip=False,
- controller_options={}):
- TEST_CONFIG_ID = object()
-
- self.god.stub_function(self.engine._planner_rpc, 'run')
- self.god.stub_function(self.engine, '_run_execute_before')
-
- result = {'complete': complete,
- 'next_configs': [{'next_test_config_id': TEST_CONFIG_ID,
- 'host': self.hosts[0].hostname,
- 'next_test_config_alias': object()}]}
-
- mock_controller = self._setup_mock_controller(controller_options)
-
- self.engine._planner_rpc.run.expect_call(
- 'get_next_test_configs',
- plan_id=self.engine._plan_id).and_return(result)
-
- if not complete:
- self.engine._run_execute_before.expect_call(mock_controller)
-
- if should_skip:
- self.engine._planner_rpc.run.expect_call(
- 'skip_test', test_config_id=TEST_CONFIG_ID,
- hostname=self.hosts[0].hostname)
- else:
- self.god.stub_function(self.engine, '_run_job')
- self.engine._run_job.expect_call(
- hostname=self.hosts[0].hostname,
- test_config_id=TEST_CONFIG_ID,
- cleanup_before_job=mock_controller._reboot_before,
- cleanup_after_job=mock_controller._reboot_after,
- run_verify=mock_controller._run_verify)
-
- self.engine._schedule_new_runs()
-
- self.god.check_playback()
-
-
- def test_schedule_new_runs(self):
- self._test_schedule_new_runs_helper()
-
-
- def test_schedule_new_runs_complete(self):
- self._test_schedule_new_runs_helper(complete=True)
-
-
- def test_schedule_new_runs_skip(self):
- self._test_schedule_new_runs_helper(should_skip=True,
- controller_options={'_skip': True})
-
-
- def test_run_global_support(self):
- self._ran_global_support = False
- support = """
-def test_global_support(controller):
- controller._ran_global_support = True
-"""
-
- DUMMY_PLAN = {'support': support}
-
- self.god.stub_function(self.engine._planner_rpc, 'run')
-
- self.engine._planner_rpc.run.expect_call(
- 'get_plan', id=self.engine._plan_id).and_return(DUMMY_PLAN)
-
- self.engine._run_global_support(controller=self,
- function_name='test_global_support')
-
- self.assertTrue(self._ran_global_support)
- self.god.check_playback()
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/frontend/planner/failure_actions.py b/frontend/planner/failure_actions.py
deleted file mode 100644
index 2dd37f5b..00000000
--- a/frontend/planner/failure_actions.py
+++ /dev/null
@@ -1,17 +0,0 @@
-import common
-from autotest_lib.client.common_lib import enum, utils
-
-
-def _site_host_actions_dummy():
- return []
-
-_site_host_actions = utils.import_site_function(
- __file__, 'autotest_lib.frontend.planner.site_failure_actions',
- 'site_host_actions', _site_host_actions_dummy)
-
-HostAction = enum.Enum(
- string_values=True,
- *(_site_host_actions() + ['Block', 'Unblock', 'Reinstall']))
-
-
-TestAction = enum.Enum('Skip', 'Rerun', string_values=True)
diff --git a/frontend/planner/model_attributes.py b/frontend/planner/model_attributes.py
deleted file mode 100644
index 87fadea6..00000000
--- a/frontend/planner/model_attributes.py
+++ /dev/null
@@ -1,27 +0,0 @@
-import common
-from autotest_lib.client.common_lib import enum, utils
-
-
-# common enums for Host attributes
-HostStatus = enum.Enum('Finished', 'Running', 'Blocked', string_values=True)
-
-
-# common enums for TestRun attributes
-TestRunStatus = enum.Enum('Active', 'Passed', 'Failed', string_values=True)
-
-
-# common enums for SavedObject attributes
-SavedObjectType = enum.Enum('support', 'triage', 'autoprocess', 'custom_query',
- string_values=True)
-
-
-# common enums for AdditionalParameter attributes
-def _site_additional_parameter_types_dummy():
- return []
-_site_additional_parameter_types = utils.import_site_function(
- __file__, 'autotest_lib.frontend.planner.site_model_attributes',
- 'site_additional_parameter_types',
- _site_additional_parameter_types_dummy)
-AdditionalParameterType = enum.Enum(
- string_values=True,
- *(_site_additional_parameter_types() + ['Verify']))
diff --git a/frontend/planner/models.py b/frontend/planner/models.py
deleted file mode 100644
index 76aa4a26..00000000
--- a/frontend/planner/models.py
+++ /dev/null
@@ -1,486 +0,0 @@
-import re
-from django.db import models as dbmodels
-import common
-from autotest_lib.frontend.afe import models as afe_models
-from autotest_lib.frontend.afe import model_logic, rpc_utils
-from autotest_lib.frontend.tko import models as tko_models
-from autotest_lib.frontend.planner import model_attributes
-from autotest_lib.client.common_lib import utils, host_queue_entry_states
-
-
-class Plan(dbmodels.Model, model_logic.ModelExtensions):
- """A test plan
-
- Required:
- name: Plan name, unique
- complete: True if the plan is completed
- dirty: True if the plan has been changed since the execution engine has
- last seen it
- initialized: True if the plan has started
-
- Optional:
- label_override: A label to apply to each Autotest job.
- support: The global support script to apply to this plan
- """
- name = dbmodels.CharField(max_length=255, unique=True)
- label_override = dbmodels.CharField(max_length=255, null=True, blank=True)
- support = dbmodels.TextField(blank=True)
- complete = dbmodels.BooleanField(default=False)
- dirty = dbmodels.BooleanField(default=False)
- initialized = dbmodels.BooleanField(default=False)
-
- owners = dbmodels.ManyToManyField(afe_models.User,
- db_table='planner_plan_owners')
- hosts = dbmodels.ManyToManyField(afe_models.Host, through='Host')
- host_labels = dbmodels.ManyToManyField(afe_models.Label,
- db_table='planner_plan_host_labels')
-
- name_field = 'name'
-
- class Meta:
- db_table = 'planner_plans'
-
-
- def __unicode__(self):
- return unicode(self.name)
-
-
-class ModelWithPlan(dbmodels.Model):
- """Superclass for models that have a plan_id
-
- Required:
- plan: The associated test plan
- """
- plan = dbmodels.ForeignKey(Plan)
-
- class Meta:
- abstract = True
-
-
- def __unicode__(self):
- return u'%s (%s)' % (self._get_details_unicode(), self.plan.name)
-
-
- def _get_details_unicode(self):
- """Gets the first part of the unicode string
-
- subclasses must override this method
- """
- raise NotImplementedError(
- 'Subclasses must override _get_details_unicode()')
-
-
-class Host(ModelWithPlan, model_logic.ModelExtensions):
- """A plan host
-
- Required:
- host: The AFE host
- complete: True if and only if this host is finished in the test plan
- blocked: True if and only if the host is blocked (not executing tests)
- added_by_label: True if and only if the host was added because of a host
- label (as opposed to being explicitly added)
- """
- host = dbmodels.ForeignKey(afe_models.Host)
- complete = dbmodels.BooleanField(default=False)
- blocked = dbmodels.BooleanField(default=False)
- added_by_label = dbmodels.BooleanField(default=False)
-
- class Meta:
- db_table = 'planner_hosts'
-
-
- def status(self):
- if self.complete:
- return model_attributes.HostStatus.FINISHED
- if self.blocked:
- return model_attributes.HostStatus.BLOCKED
- return model_attributes.HostStatus.RUNNING
-
-
- def _get_details_unicode(self):
- return 'Host: %s' % self.host.hostname
-
-
-class ControlFile(model_logic.ModelWithHash,
- model_logic.ModelExtensions):
- """A control file. Immutable once added to the table
-
- Required:
- contents: The text of the control file
-
- Others:
- the_hash: The SHA1 hash of the control file, for duplicate detection
- and fast search
- """
- contents = dbmodels.TextField()
-
- class Meta:
- db_table = 'planner_test_control_files'
-
-
- @classmethod
- def _compute_hash(cls, **kwargs):
- return utils.hash('sha1', kwargs['contents']).hexdigest()
-
-
- def __unicode__(self):
- return u'Control file id %s (SHA1: %s)' % (self.id, self.control_hash)
-
-
-class TestConfig(ModelWithPlan, model_logic.ModelExtensions):
- """A configuration for a planned test
-
- Required:
- alias: The name to give this test within the plan. Unique with plan id
- control_file: The control file to run
- is_server: True if this control file is a server-side test
- execution_order: An integer describing when this test should be run in
- the test plan
- estimated_runtime: Time in hours that the test is expected to run. Will
- be automatically generated (on the frontend) for
- tests in Autotest.
- skipped_hosts: Hosts that are going to skip this test.
- """
- alias = dbmodels.CharField(max_length=255)
- control_file = dbmodels.ForeignKey(ControlFile)
- is_server = dbmodels.BooleanField(default=True)
- execution_order = dbmodels.IntegerField(blank=True)
- estimated_runtime = dbmodels.IntegerField()
- skipped_hosts = dbmodels.ManyToManyField(
- afe_models.Host, db_table='planner_test_configs_skipped_hosts')
-
- class Meta:
- db_table = 'planner_test_configs'
- ordering = ('execution_order',)
- unique_together = (('plan', 'alias'),)
-
-
- def _get_details_unicode(self):
- return 'Planned test config - Control file id %s' % self.control_file.id
-
-
-class Job(ModelWithPlan, model_logic.ModelExtensions):
- """Represents an Autotest job initiated for a test plan
-
- Required:
- test: The TestConfig associated with this Job
- afe_job: The Autotest job
- """
- test_config = dbmodels.ForeignKey(TestConfig)
- afe_job = dbmodels.ForeignKey(afe_models.Job)
- requires_rerun = dbmodels.BooleanField(default=False)
-
- class Meta:
- db_table = 'planner_test_jobs'
-
-
- def active(self):
- for hqe in self.afe_job.hostqueueentry_set.all():
- if not hqe.complete:
- return True
- return False
-
-
- def all_tests_passed(self):
- if self.active():
- return False
-
- Status = host_queue_entry_states.Status
- if self.afe_job.hostqueueentry_set.exclude(status=Status.COMPLETED):
- return False
-
- tko_tests = tko_models.Test.objects.filter(
- job__afe_job_id=self.afe_job.id)
- for tko_test in tko_tests:
- if tko_test.status.word != 'GOOD':
- return False
- return True
-
-
- def _get_details_unicode(self):
- return 'AFE job %s' % self.afe_job.id
-
-
-class Bug(dbmodels.Model):
- """Represents a bug ID
-
- Required:
- external_uid: External unique ID for the bug
- """
- external_uid = dbmodels.CharField(max_length=255, unique=True)
-
- class Meta:
- db_table = 'planner_bugs'
-
-
- def __unicode__(self):
- return u'Bug external ID %s' % self.external_uid
-
-
-class TestRun(ModelWithPlan, model_logic.ModelExtensions):
- """An individual test run from an Autotest job for the test plan.
-
- Each Job object may have multiple TestRun objects associated with it.
-
- Required:
- test_job: The Job object associated with this TestRun
- tko_test: The TKO Test associated with this TestRun
- status: One of 'Active', 'Passed', 'Failed'
- finalized: True if and only if the TestRun is ready to be shown in
- triage
- invalidated: True if and only if a user has decided to invalidate this
- TestRun's results
- seen: True if and only if a user has marked this TestRun as "seen"
- triaged: True if and only if the TestRun no longer requires any user
- intervention
-
- Optional:
- bugs: Bugs filed that a relevant to this run
- """
- test_job = dbmodels.ForeignKey(Job)
- tko_test = dbmodels.ForeignKey(tko_models.Test)
- host = dbmodels.ForeignKey(Host)
- status = dbmodels.CharField(
- max_length=16,
- choices=model_attributes.TestRunStatus.choices(),
- default=model_attributes.TestRunStatus.ACTIVE)
- finalized = dbmodels.BooleanField(default=False)
- seen = dbmodels.BooleanField(default=False)
- triaged = dbmodels.BooleanField(default=False)
- invalidated = dbmodels.BooleanField(default=False)
-
- bugs = dbmodels.ManyToManyField(Bug, null=True,
- db_table='planner_test_run_bugs')
-
- class Meta:
- db_table = 'planner_test_runs'
- unique_together = (('plan', 'test_job', 'tko_test', 'host'),)
-
-
- def _get_details_unicode(self):
- return 'Test Run: %s' % self.id
-
-
-class DataType(dbmodels.Model):
- """Encodes the data model types
-
- For use in the history table, to identify the type of object that was
- changed.
-
- Required:
- name: The name of the data type
- db_table: The name of the database table that stores this type
- """
- name = dbmodels.CharField(max_length=255)
- db_table = dbmodels.CharField(max_length=255)
-
- class Meta:
- db_table = 'planner_data_types'
-
-
- def __unicode__(self):
- return u'Data type %s (stored in table %s)' % (self.name, self.db_table)
-
-
-class History(ModelWithPlan):
- """Represents a history action
-
- Required:
- action_id: An arbitrary ID that uniquely identifies the user action
- related to the history entry. One user action may result in
- multiple history entries
- user: The user who initiated the change
- data_type: The type of object that was changed
- object_id: Value of the primary key field for the changed object
- old_object_repr: A string representation of the object before the change
- new_object_repr: A string representation of the object after the change
-
- Others:
- time: A timestamp. Automatically generated.
- """
- action_id = dbmodels.IntegerField()
- user = dbmodels.ForeignKey(afe_models.User)
- data_type = dbmodels.ForeignKey(DataType)
- object_id = dbmodels.IntegerField()
- old_object_repr = dbmodels.TextField(blank=True)
- new_object_repr = dbmodels.TextField(blank=True)
-
- time = dbmodels.DateTimeField(auto_now_add=True)
-
- class Meta:
- db_table = 'planner_history'
-
-
- def _get_details_unicode(self):
- return 'History entry: %s => %s' % (self.old_object_repr,
- self.new_object_repr)
-
-
-class SavedObject(dbmodels.Model):
- """A saved object that can be recalled at certain points in the UI
-
- Required:
- user: The creator of the object
- object_type: One of 'support', 'triage', 'autoprocess', 'custom_query'
- name: The name given to the object
- encoded_object: The actual object
- """
- user = dbmodels.ForeignKey(afe_models.User)
- object_type = dbmodels.CharField(
- max_length=16,
- choices=model_attributes.SavedObjectType.choices(),
- db_column='type')
- name = dbmodels.CharField(max_length=255)
- encoded_object = dbmodels.TextField()
-
- class Meta:
- db_table = 'planner_saved_objects'
- unique_together = ('user', 'object_type', 'name')
-
-
- def __unicode__(self):
- return u'Saved %s object: %s, by %s' % (self.object_type, self.name,
- self.user.login)
-
-
-class CustomQuery(ModelWithPlan):
- """A custom SQL query for the triage page
-
- Required:
- query: the SQL WHERE clause to attach to the main query
- """
- query = dbmodels.TextField()
-
- class Meta:
- db_table = 'planner_custom_queries'
-
-
- def _get_details_unicode(self):
- return 'Custom Query: %s' % self.query
-
-
-class KeyVal(model_logic.ModelWithHash):
- """Represents a keyval. Immutable once added to the table.
-
- Required:
- key: The key
- value: The value
-
- Others:
- the_hash: The result of SHA1(SHA1(key) ++ value), for duplicate
- detection and fast search.
- """
- key = dbmodels.CharField(max_length=1024)
- value = dbmodels.CharField(max_length=1024)
-
- class Meta:
- db_table = 'planner_keyvals'
-
-
- @classmethod
- def _compute_hash(cls, **kwargs):
- round1 = utils.hash('sha1', kwargs['key']).hexdigest()
- return utils.hash('sha1', round1 + kwargs['value']).hexdigest()
-
-
- def __unicode__(self):
- return u'Keyval: %s = %s' % (self.key, self.value)
-
-
-class AutoProcess(ModelWithPlan):
- """An autoprocessing directive to perform on test runs that enter triage
-
- Required:
- condition: A SQL WHERE clause. The autoprocessing will be applied if the
- test run matches this condition
- enabled: If this is False, this autoprocessing entry will not be applied
-
- Optional:
- labels: Labels to apply to the TKO test
- keyvals: Keyval overrides to apply to the TKO test
- bugs: Bugs filed that a relevant to this run
- reason_override: Override for the AFE reason
- """
- condition = dbmodels.TextField()
- enabled = dbmodels.BooleanField(default=False)
-
- labels = dbmodels.ManyToManyField(tko_models.TestLabel, null=True,
- db_table='planner_autoprocess_labels')
- keyvals = dbmodels.ManyToManyField(KeyVal, null=True,
- db_table='planner_autoprocess_keyvals')
- bugs = dbmodels.ManyToManyField(Bug, null=True,
- db_table='planner_autoprocess_bugs')
- reason_override = dbmodels.CharField(max_length=255, null=True, blank=True)
-
- class Meta:
- db_table = 'planner_autoprocess'
-
-
- def _get_details_unicode(self):
- return 'Autoprocessing condition: %s' % self.condition
-
-
-class AdditionalParameter(ModelWithPlan):
- """
- Allows parameters to be passed to the execution engine for test configs
-
- If this object matches a hostname by regex, it will apply the associated
- parameters at their applicable locations.
-
- Required:
- hostname_regex: A regular expression, for matching on the hostname
- param_type: Currently only 'Verify' (and site-specific values) allowed
- application_order: The order in which to apply this parameter.
- Parameters are attempted in the order specified here,
- and stop when the first match is found
- """
- hostname_regex = dbmodels.CharField(max_length=255)
- param_type = dbmodels.CharField(
- max_length=32,
- choices=model_attributes.AdditionalParameterType.choices())
- application_order = dbmodels.IntegerField(blank=True)
-
- class Meta:
- db_table = 'planner_additional_parameters'
- unique_together = ('plan', 'hostname_regex', 'param_type')
-
-
- @classmethod
- def find_applicable_additional_parameter(cls, plan, hostname, param_type):
- """
- Finds the first AdditionalParameter that matches the given arguments
- """
- params = cls.objects.filter(
- plan=plan, param_type=param_type).order_by('application_order')
- for param in params:
- if re.match(param.hostname_regex, hostname):
- return param
- return None
-
-
- def _get_details_unicode(self):
- return 'Additional %s parameters, regex: %s' % (self.param_type,
- self.hostname_regex)
-
-
-class AdditionalParameterValue(dbmodels.Model):
- """
- The actual values for the additional parameters
-
- Required:
- additional_parameter: The associated AdditionalParameter
- key: The name of the parameter
- value: The value of the parameter
- """
- additional_parameter = dbmodels.ForeignKey(AdditionalParameter)
- key = dbmodels.CharField(max_length=255)
- value = dbmodels.CharField(max_length=255)
-
- class Meta:
- db_table = 'planner_additional_parameter_values'
- unique_together = ('additional_parameter', 'key')
-
-
- def __unicode__(self):
- return u'Value for parameter %d: %s=%s' % (self.additional_parameter.id,
- self.key, self.value)
diff --git a/frontend/planner/models_test.py b/frontend/planner/models_test.py
deleted file mode 100755
index 21219921..00000000
--- a/frontend/planner/models_test.py
+++ /dev/null
@@ -1,168 +0,0 @@
-#!/usr/bin/python
-
-import unittest
-import common
-from autotest_lib.frontend import setup_django_environment
-from autotest_lib.frontend.afe import frontend_test_utils, rpc_utils
-from autotest_lib.frontend.tko import models as tko_models
-from autotest_lib.frontend.planner import models, model_attributes
-from autotest_lib.frontend.planner import planner_test_utils
-
-
-class ModelWithHashTestBase(frontend_test_utils.FrontendTestMixin):
- def setUp(self):
- self._frontend_common_setup(fill_data=False)
-
-
- def tearDown(self):
- self._frontend_common_teardown()
-
-
- def _model_class(self):
- raise NotImplementedError('Subclasses must override _model_class()')
-
-
- def _test_data(self):
- raise NotImplementedError('Subclasses must override _test_data()')
-
-
- def test_disallowed_operations(self):
- def _call_create():
- self._model_class().objects.create(**self._test_data())
- self.assertRaises(Exception, _call_create)
-
- model = self._model_class().objects.get_or_create(
- **self._test_data())[0]
- self.assertRaises(Exception, model.save)
-
-
- def test_hash_field(self):
- model = self._model_class().objects.get_or_create(
- **self._test_data())[0]
- self.assertNotEqual(model.id, None)
- self.assertEqual(self._model_class()._compute_hash(**self._test_data()),
- model.the_hash)
-
-
-class ControlFileTest(ModelWithHashTestBase, unittest.TestCase):
- def _model_class(self):
- return models.ControlFile
-
-
- def _test_data(self):
- return {'contents' : 'test_control'}
-
-
-class KeyValTest(ModelWithHashTestBase, unittest.TestCase):
- def _model_class(self):
- return models.KeyVal
-
-
- def _test_data(self):
- return {'key' : 'test_key',
- 'value' : 'test_value'}
-
-
-class AdditionalParameterTest(frontend_test_utils.FrontendTestMixin,
- unittest.TestCase):
- def setUp(self):
- self._frontend_common_setup()
- self.plan = models.Plan.objects.create(name='plan')
- self.param_type = model_attributes.AdditionalParameterType.VERIFY
-
- def tearDown(self):
- self._frontend_common_teardown()
-
-
- def test_find_applicable_control_parameter_match(self):
- parameter = models.AdditionalParameter.objects.create(
- plan=self.plan, hostname_regex='host.*',
- param_type=self.param_type, application_order=0)
- found = models.AdditionalParameter.find_applicable_additional_parameter(
- plan=self.plan, hostname='host1', param_type=self.param_type)
-
- self.assertEqual(parameter, found)
-
-
- def test_find_applicable_additional_parameter_ordered(self):
- additional1 = models.AdditionalParameter.objects.create(
- plan=self.plan, hostname_regex='host.*',
- param_type=self.param_type, application_order=0)
- additional2 = models.AdditionalParameter.objects.create(
- plan=self.plan, hostname_regex='.*',
- param_type=self.param_type, application_order=1)
-
- found1 = (
- models.AdditionalParameter.find_applicable_additional_parameter(
- plan=self.plan, hostname='host1',
- param_type=self.param_type))
- found2 = (
- models.AdditionalParameter.find_applicable_additional_parameter(
- plan=self.plan, hostname='other',
- param_type=self.param_type))
-
- self.assertEqual(additional1, found1)
- self.assertEqual(additional2, found2)
-
-
- def test_find_applicable_additional_parameter_no_match(self):
- models.AdditionalParameter.objects.create(
- plan=self.plan, hostname_regex='host.*',
- param_type=self.param_type, application_order=0)
- found = models.AdditionalParameter.find_applicable_additional_parameter(
- plan=self.plan, hostname='other', param_type=self.param_type)
-
- self.assertEqual(None, found)
-
-
-class JobTest(planner_test_utils.PlannerTestMixin,
- unittest.TestCase):
- def setUp(self):
- self._planner_common_setup()
- self._setup_active_plan()
-
-
- def tearDown(self):
- self._planner_common_teardown()
-
-
- def test_active(self):
- self.assertEqual(True, self._planner_job.active())
- self._afe_job.hostqueueentry_set.update(complete=True)
- self.assertEqual(False, self._planner_job.active())
-
-
- def test_all_tests_passed_active(self):
- self.assertEqual(True, self._planner_job.active())
- self.assertEqual(False, self._planner_job.all_tests_passed())
-
-
- def test_all_tests_passed_failed_queue_entry(self):
- self._afe_job.hostqueueentry_set.update(complete=True, status='Failed')
- self.assertEqual(False, self._planner_job.active())
-
- self.assertEqual(False, self._planner_job.all_tests_passed())
-
-
- def _setup_test_all_tests_passed(self, status):
- self._afe_job.hostqueueentry_set.update(complete=True,
- status='Completed')
- tko_test = tko_models.Test.objects.create(job=self._tko_job,
- status=status,
- kernel=self._tko_kernel,
- machine=self._tko_machine)
- self.assertEqual(False, self._planner_job.active())
-
-
- def test_all_tests_passed_success(self):
- self._setup_test_all_tests_passed(self._good_status)
- self.assertEqual(True, self._planner_job.all_tests_passed())
-
-
- def test_all_tests_passed_failure(self):
- self._setup_test_all_tests_passed(self._fail_status)
- self.assertEqual(False, self._planner_job.all_tests_passed())
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/frontend/planner/planner_test_utils.py b/frontend/planner/planner_test_utils.py
deleted file mode 100644
index 1ce52d7a..00000000
--- a/frontend/planner/planner_test_utils.py
+++ /dev/null
@@ -1,78 +0,0 @@
-import common
-from autotest_lib.frontend.afe import frontend_test_utils
-from autotest_lib.frontend.afe import models as afe_models
-from autotest_lib.frontend.tko import models as tko_models
-from autotest_lib.frontend.planner import models
-from autotest_lib.client.common_lib import utils
-
-class PlannerTestMixin(frontend_test_utils.FrontendTestMixin):
- _PLAN_NAME = 'plan'
- GOOD_STATUS_WORD = 'GOOD'
- RUNNING_STATUS_WORD = 'RUNNING'
- FAIL_STATUS_WORD = 'FAIL'
-
- def _planner_common_setup(self):
- self._frontend_common_setup()
-
- plan = models.Plan.objects.create(name=self._PLAN_NAME)
- models.Host.objects.create(
- plan=plan, host=afe_models.Host.objects.get(hostname='host1'))
- models.Host.objects.create(
- plan=plan, host=afe_models.Host.objects.get(hostname='host2'))
- plan.host_labels.add(afe_models.Label.objects.get(name='label1'))
- plan.save()
-
- self._plan = plan
-
-
- def _planner_common_teardown(self):
- self._plan.delete()
- self._frontend_common_teardown()
-
-
- def _setup_active_plan(self):
- """
- Create an active test plan
-
- Sets up all the infrastructure for a active test plan. Stores the
- following in self:
-
- _hostname: hostname of the machine under test
- _control: the models.ControlFile object
- _test_config: the models.TestConfig object
- _afe_job: the AFE job started by the plan
- _planner_host: the models.Host object
- _planner_job: the models.Job object
- _tko_machine: the TKO machine (as a tko_models.Machine object) for the
- results
- _tko_job: the TKO job (as a tko_models.Job object) for the results
- _tko_kernel: the TKO kernel (as a tko_models.Kernel object) associated
- with the TKO machine
- _running_status: the TKO status (as a tko_models.Status object) that
- indicates a running TKO test
- _good_status: the TKO status (as a tko_models.Status object) that
- indicates a completed and passed TKO test
- """
- self._hostname = self.hosts[0].hostname
- self._control, _ = models.ControlFile.objects.get_or_create(
- contents='test_control')
- self._test_config = models.TestConfig.objects.create(
- plan=self._plan, alias='config', control_file=self._control,
- execution_order=1, estimated_runtime=1)
- self._afe_job = self._create_job(hosts=(1,))
- self._planner_host = self._plan.host_set.get(host=self.hosts[0])
- self._planner_job = models.Job.objects.create(
- plan=self._plan, test_config=self._test_config,
- afe_job=self._afe_job)
- self._tko_machine = tko_models.Machine.objects.create(
- hostname=self._hostname)
- self._tko_job = tko_models.Job.objects.create(
- tag='job', machine=self._tko_machine,
- afe_job_id=self._afe_job.id)
- self._tko_kernel = tko_models.Kernel.objects.create()
- self._running_status = tko_models.Status.objects.create(
- word=self.RUNNING_STATUS_WORD)
- self._good_status = tko_models.Status.objects.create(
- word=self.GOOD_STATUS_WORD)
- self._fail_status = tko_models.Status.objects.create(
- word=self.FAIL_STATUS_WORD)
diff --git a/frontend/planner/rpc_interface.py b/frontend/planner/rpc_interface.py
deleted file mode 100644
index b97c3d23..00000000
--- a/frontend/planner/rpc_interface.py
+++ /dev/null
@@ -1,623 +0,0 @@
-"""\
-Functions to expose over the RPC interface.
-"""
-
-__author__ = 'jamesren@google.com (James Ren)'
-
-
-import os, re
-import common
-from django.db import models as django_models
-from autotest_lib.frontend import thread_local
-from autotest_lib.frontend.afe import model_logic, models as afe_models
-from autotest_lib.frontend.afe import rpc_utils as afe_rpc_utils
-from autotest_lib.frontend.afe import rpc_interface as afe_rpc_interface
-from autotest_lib.frontend.tko import models as tko_models
-from autotest_lib.frontend.planner import models, rpc_utils, model_attributes
-from autotest_lib.frontend.planner import failure_actions
-from autotest_lib.client.common_lib import utils
-
-# basic getter/setter calls
-# TODO: deprecate the basic calls and reimplement them in the REST framework
-
-def get_plan(id):
- return afe_rpc_utils.prepare_for_serialization(
- models.Plan.smart_get(id).get_object_dict())
-
-
-def modify_plan(id, **data):
- models.Plan.smart_get(id).update_object(data)
-
-
-def modify_test_run(id, **data):
- models.TestRun.objects.get(id=id).update_object(data)
-
-
-def modify_host(id, **data):
- models.Host.objects.get(id=id).update_object(data)
-
-
-def add_job(plan_id, test_config_id, afe_job_id):
- models.Job.objects.create(
- plan=models.Plan.objects.get(id=plan_id),
- test_config=models.TestConfig.objects.get(id=test_config_id),
- afe_job=afe_models.Job.objects.get(id=afe_job_id))
-
-
-# more advanced calls
-
-def submit_plan(name, hosts, host_labels, tests, support=None,
- label_override=None, additional_parameters=None):
- """
- Submits a plan to the Test Planner
-
- @param name: the name of the plan
- @param hosts: a list of hostnames
- @param host_labels: a list of host labels. The hosts under test will update
- to reflect changes in the label
- @param tests: an ordered list of dictionaries:
- alias: an alias for the test
- control_file: the test control file
- is_server: True if is a server-side control file
- estimated_runtime: estimated number of hours this test
- will run
- @param support: the global support script
- @param label_override: label to prepend to all AFE jobs for this test plan.
- Defaults to the plan name.
- @param additional_parameters: A mapping of AdditionalParameters to apply to
- this test plan, as an ordered list. Each item
- of the list is a dictionary:
- hostname_regex: A regular expression; the
- additional parameter in the
- value will be applied if the
- hostname matches this regex
- param_type: The type of additional parameter
- param_values: A dictionary of key=value pairs
- for this parameter
- example:
- [{'hostname_regex': 'host[0-9]',
- 'param_type': 'Verify',
- 'param_values': {'key1': 'value1',
- 'key2': 'value2'}},
- {'hostname_regex': '.*',
- 'param_type': 'Verify',
- 'param_values': {'key': 'value'}}]
-
- Currently, the only (non-site-specific)
- param_type available is 'Verify'. Setting
- these parameters allows the user to specify
- arguments to the
- job.run_test('verify_test', ...) line at the
- beginning of the wrapped control file for each
- test
- """
- host_objects = []
- label_objects = []
-
- for host in hosts or []:
- try:
- host_objects.append(
- afe_models.Host.valid_objects.get(hostname=host))
- except afe_models.Host.DoesNotExist:
- raise model_logic.ValidationError(
- {'hosts': 'host %s does not exist' % host})
-
- for label in host_labels or []:
- try:
- label_objects.append(afe_models.Label.valid_objects.get(name=label))
- except afe_models.Label.DoesNotExist:
- raise model_logic.ValidationError(
- {'host_labels': 'host label %s does not exist' % label})
-
- aliases_seen = set()
- test_required_fields = (
- 'alias', 'control_file', 'is_server', 'estimated_runtime')
- for test in tests:
- for field in test_required_fields:
- if field not in test:
- raise model_logic.ValidationError(
- {'tests': 'field %s is required' % field})
-
- alias = test['alias']
- if alias in aliases_seen:
- raise model_logic.Validationerror(
- {'tests': 'alias %s occurs more than once' % alias})
- aliases_seen.add(alias)
-
- plan, created = models.Plan.objects.get_or_create(name=name)
- if not created:
- raise model_logic.ValidationError(
- {'name': 'Plan name %s already exists' % name})
-
- try:
- rpc_utils.set_additional_parameters(plan, additional_parameters)
- label = rpc_utils.create_plan_label(plan)
- try:
- for i, test in enumerate(tests):
- control, _ = models.ControlFile.objects.get_or_create(
- contents=test['control_file'])
- models.TestConfig.objects.create(
- plan=plan, alias=test['alias'], control_file=control,
- is_server=test['is_server'], execution_order=i,
- estimated_runtime=test['estimated_runtime'])
-
- plan.label_override = label_override
- plan.support = support or ''
- plan.save()
-
- plan.owners.add(afe_models.User.current_user())
-
- for host in host_objects:
- planner_host = models.Host.objects.create(plan=plan, host=host)
-
- plan.host_labels.add(*label_objects)
-
- rpc_utils.start_plan(plan, label)
-
- return plan.id
- except:
- label.delete()
- raise
- except:
- plan.delete()
- raise
-
-
-def get_hosts(plan_id):
- """
- Gets the hostnames of all the hosts in this test plan.
-
- Resolves host labels in the plan.
- """
- plan = models.Plan.smart_get(plan_id)
-
- hosts = set(plan.hosts.all().values_list('hostname', flat=True))
- for label in plan.host_labels.all():
- hosts.update(label.host_set.all().values_list('hostname', flat=True))
-
- return afe_rpc_utils.prepare_for_serialization(hosts)
-
-
-def get_atomic_group_control_file():
- """
- Gets the control file to apply the atomic group for a set of machines
- """
- return rpc_utils.lazy_load(os.path.join(os.path.dirname(__file__),
- 'set_atomic_group_control.srv'))
-
-
-def get_next_test_configs(plan_id):
- """
- Gets information about the next planner test configs that need to be run
-
- @param plan_id: the ID or name of the test plan
- @return a dictionary:
- complete: True or False, shows test plan completion
- next_configs: a list of dictionaries:
- host: ID of the host
- next_test_config_id: ID of the next Planner test to run
- """
- plan = models.Plan.smart_get(plan_id)
-
- result = {'next_configs': []}
-
- rpc_utils.update_hosts_table(plan)
- for host in models.Host.objects.filter(plan=plan):
- next_test_config = rpc_utils.compute_next_test_config(plan, host)
- if next_test_config:
- config = {'next_test_config_id': next_test_config.id,
- 'next_test_config_alias': next_test_config.alias,
- 'host': host.host.hostname}
- result['next_configs'].append(config)
-
- rpc_utils.check_for_completion(plan)
- result['complete'] = plan.complete
-
- return result
-
-
-def update_test_runs(plan_id):
- """
- Add all applicable TKO jobs to the Planner DB tables
-
- Looks for tests in the TKO tables that were started as a part of the test
- plan, and add them to the Planner tables.
-
- Also updates the status of the test run if the underlying TKO test move from
- an active status to a completed status.
-
- @return a list of dictionaries:
- status: the status of the new (or updated) test run
- tko_test_idx: the ID of the TKO test added
- hostname: the host added
- """
- plan = models.Plan.smart_get(plan_id)
- updated = []
-
- for planner_job in plan.job_set.all():
- known_statuses = dict((test_run.tko_test.test_idx, test_run.status)
- for test_run in planner_job.testrun_set.all())
- tko_tests_for_job = tko_models.Test.objects.filter(
- job__afe_job_id=planner_job.afe_job.id)
-
- for tko_test in tko_tests_for_job:
- status = rpc_utils.compute_test_run_status(tko_test.status.word)
- needs_update = (tko_test.test_idx not in known_statuses or
- status != known_statuses[tko_test.test_idx])
- if needs_update:
- hostnames = tko_test.machine.hostname.split(',')
- for hostname in hostnames:
- rpc_utils.add_test_run(
- plan, planner_job, tko_test, hostname, status)
- updated.append({'status': status,
- 'tko_test_idx': tko_test.test_idx,
- 'hostname': hostname})
-
- return updated
-
-
-def get_failures(plan_id):
- """
- Gets a list of the untriaged failures associated with this plan
-
- @return a list of dictionaries:
- id: the failure ID, for passing back to triage the failure
- group: the group for the failure. Normally the same as the
- reason, but can be different for custom queries
- machine: the failed machine
- blocked: True if the failure caused the machine to block
- test_name: Concatenation of the Planner alias and the TKO test
- name for the failed test
- reason: test failure reason
- seen: True if the failure is marked as "seen"
- """
- plan = models.Plan.smart_get(plan_id)
- result = {}
-
- failures = plan.testrun_set.filter(
- finalized=True, triaged=False,
- status=model_attributes.TestRunStatus.FAILED)
- failures = failures.order_by('seen').select_related('test_job__test',
- 'host__host',
- 'tko_test')
- for failure in failures:
- test_name = '%s: %s' % (
- failure.test_job.test_config.alias, failure.tko_test.test)
-
- group_failures = result.setdefault(failure.tko_test.reason, [])
- failure_dict = {'id': failure.id,
- 'machine': failure.host.host.hostname,
- 'blocked': bool(failure.host.blocked),
- 'test_name': test_name,
- 'reason': failure.tko_test.reason,
- 'seen': bool(failure.seen)}
- group_failures.append(failure_dict)
-
- return result
-
-
-def get_test_runs(**filter_data):
- """
- Gets a list of test runs that match the filter data.
-
- Returns a list of expanded TestRun object dictionaries. Specifically, the
- "host" and "test_job" fields are expanded. Additionally, the "test_config"
- field of the "test_job" expansion is also expanded.
- """
- result = []
- for test_run in models.TestRun.objects.filter(**filter_data):
- test_run_dict = test_run.get_object_dict()
- test_run_dict['host'] = test_run.host.get_object_dict()
- test_run_dict['test_job'] = test_run.test_job.get_object_dict()
- test_run_dict['test_job']['test_config'] = (
- test_run.test_job.test_config.get_object_dict())
- result.append(test_run_dict)
- return result
-
-
-def skip_test(test_config_id, hostname):
- """
- Marks a test config as "skipped" for a given host
- """
- config = models.TestConfig.objects.get(id=test_config_id)
- config.skipped_hosts.add(afe_models.Host.objects.get(hostname=hostname))
-
-
-def mark_failures_as_seen(failure_ids):
- """
- Marks a set of failures as 'seen'
-
- @param failure_ids: A list of failure IDs, as returned by get_failures(), to
- mark as seen
- """
- models.TestRun.objects.filter(id__in=failure_ids).update(seen=True)
-
-
-def process_failures(failure_ids, host_action, test_action, labels=(),
- keyvals=None, bugs=(), reason=None, invalidate=False):
- """
- Triage a failure
-
- @param failure_id: The failure ID, as returned by get_failures()
- @param host_action: One of 'Block', 'Unblock', 'Reinstall'
- @param test_action: One of 'Skip', 'Rerun'
-
- @param labels: Test labels to apply, by name
- @param keyvals: Dictionary of job keyvals to add (or replace)
- @param bugs: List of bug IDs to associate with this failure
- @param reason: An override for the test failure reason
- @param invalidate: True if failure should be invalidated for the purposes of
- reporting. Defaults to False.
- """
- host_choices = failure_actions.HostAction.values
- test_choices = failure_actions.TestAction.values
- if host_action not in host_choices:
- raise model_logic.ValidationError(
- {'host_action': ('host action %s not valid; must be one of %s'
- % (host_action, ', '.join(host_choices)))})
- if test_action not in test_choices:
- raise model_logic.ValidationError(
- {'test_action': ('test action %s not valid; must be one of %s'
- % (test_action, ', '.join(test_choices)))})
-
- for failure_id in failure_ids:
- rpc_utils.process_failure(
- failure_id=failure_id, host_action=host_action,
- test_action=test_action, labels=labels, keyvals=keyvals,
- bugs=bugs, reason=reason, invalidate=invalidate)
-
-
-def get_machine_view_data(plan_id):
- """
- Gets the data required for the web frontend Machine View.
-
- @param plan_id: The ID of the test plan
- @return An array. Each element is a dictionary:
- machine: The name of the machine
- status: The machine's status (one of
- model_attributes.HostStatus)
- bug_ids: List of the IDs for the bugs filed
- tests_run: An array of dictionaries:
- test_name: The TKO name of the test
- success: True if the test passed
- """
- plan = models.Plan.smart_get(plan_id)
- result = []
- for host in plan.host_set.all():
- tests_run = []
-
- machine = host.host.hostname
- host_status = host.status()
- bug_ids = set()
-
- testruns = plan.testrun_set.filter(host=host, invalidated=False,
- finalized=True)
- for testrun in testruns:
- test_name = testrun.tko_test.test
- test_status = testrun.tko_test.status.word
- testrun_bug_ids = testrun.bugs.all().values_list(
- 'external_uid', flat=True)
-
- tests_run.append({'test_name': test_name,
- 'status': test_status})
- bug_ids.update(testrun_bug_ids)
-
- result.append({'machine': machine,
- 'status': host_status,
- 'tests_run': tests_run,
- 'bug_ids': list(bug_ids)})
- return result
-
-
-def generate_test_config(alias, afe_test_name=None,
- estimated_runtime=0, **kwargs):
- """
- Creates and returns a test config suitable for passing into submit_plan()
-
- Also accepts optional parameters to pass directly in to the AFE RPC
- interface's generate_control_file() method.
-
- @param alias: The alias for the test
- @param afe_test_name: The name of the test, as shown on AFE
- @param estimated_runtime: Estimated number of hours this test is expected to
- run. For reporting purposes.
- """
- if afe_test_name is None:
- afe_test_name = alias
- alias = alias.replace(' ', '_')
-
- control = afe_rpc_interface.generate_control_file(tests=[afe_test_name],
- **kwargs)
-
- return {'alias': alias,
- 'control_file': control['control_file'],
- 'is_server': control['is_server'],
- 'estimated_runtime': estimated_runtime}
-
-
-def get_wrapped_test_config(id, hostname, run_verify):
- """
- Gets the TestConfig object identified by the ID
-
- Returns the object dict of the TestConfig, plus an additional
- 'wrapped_control_file' value, which includes the pre-processing that the
- ControlParameters specify.
-
- @param hostname: Hostname of the machine this test config will run on
- @param run_verify: Set to True or False to override the default behavior
- (which is to run the verify test unless the skip_verify
- ControlParameter is set)
- """
- test_config = models.TestConfig.objects.get(id=id)
- object_dict = test_config.get_object_dict()
- object_dict['control_file'] = test_config.control_file.get_object_dict()
- object_dict['wrapped_control_file'] = rpc_utils.wrap_control_file(
- plan=test_config.plan, hostname=hostname,
- run_verify=run_verify, test_config=test_config)
-
- return object_dict
-
-
-def generate_additional_parameters(hostname_regex, param_type, param_values):
- """
- Generates an AdditionalParamter dictionary, for passing in to submit_plan()
-
- Returns a dictionary. To use in submit_job(), put this dictionary into a
- list (possibly with other additional_parameters dictionaries)
-
- @param hostname_regex: The hostname regular expression to match
- @param param_type: One of get_static_data()['additional_parameter_types']
- @param param_values: Dictionary of key=value pairs for this parameter
- """
- try:
- re.compile(hostname_regex)
- except Exception:
- raise model_logic.ValidationError(
- {'hostname_regex': '%s is not a valid regex' % hostname_regex})
-
- if param_type not in model_attributes.AdditionalParameterType.values:
- raise model_logic.ValidationError(
- {'param_type': '%s is not a valid parameter type' % param_type})
-
- if type(param_values) is not dict:
- raise model_logic.ValidationError(
- {'param_values': '%s is not a dictionary' % repr(param_values)})
-
- return {'hostname_regex': hostname_regex,
- 'param_type': param_type,
- 'param_values': param_values}
-
-
-def get_overview_data(plan_ids):
- """
- Gets the data for the Overview tab
-
- @param plan_ids: A list of the plans, by id or name
- @return A dictionary - keys are plan names, values are dictionaries of data:
- machines: A list of dictionaries:
- hostname: The machine's hostname
- status: The host's status
- passed: True if the machine passed the test plan. A 'pass' means
- that, for every test configuration in the plan, the
- machine had at least one AFE job with no failed tests.
- 'passed' could also be None, meaning that this host is
- still running tests.
- bugs: A list of the bugs filed
- test_configs: A list of dictionaries, each representing a test
- config:
- complete: Number of hosts that have completed this test
- config
- estimated_runtime: Number of hours this test config is
- expected to run on each host
- """
- plans = models.Plan.smart_get_bulk(plan_ids)
- result = {}
-
- for plan in plans:
- machines = []
- for host in plan.host_set.all():
- pass_status = rpc_utils.compute_test_config_status(host)
- if pass_status == rpc_utils.ComputeTestConfigStatusResult.PASS:
- passed = True
- elif pass_status == rpc_utils.ComputeTestConfigStatusResult.FAIL:
- passed = False
- else:
- passed = None
- machines.append({'hostname': host.host.hostname,
- 'status': host.status(),
- 'passed': passed})
-
- bugs = set()
- for testrun in plan.testrun_set.all():
- bugs.update(testrun.bugs.values_list('external_uid', flat=True))
-
- test_configs = []
- for test_config in plan.testconfig_set.all():
- complete_jobs = test_config.job_set.filter(
- afe_job__hostqueueentry__complete=True)
- complete_afe_jobs = afe_models.Job.objects.filter(
- id__in=complete_jobs.values_list('afe_job', flat=True))
-
- complete_hosts = afe_models.Host.objects.filter(
- hostqueueentry__job__in=complete_afe_jobs)
- complete_hosts |= test_config.skipped_hosts.all()
-
- test_configs.append(
- {'complete': complete_hosts.distinct().count(),
- 'estimated_runtime': test_config.estimated_runtime})
-
- plan_data = {'machines': machines,
- 'bugs': list(bugs),
- 'test_configs': test_configs}
- result[plan.name] = plan_data
-
- return result
-
-
-def get_test_view_data(plan_id):
- """
- Gets the data for the Test View tab
-
- @param plan_id: The name or ID of the test plan
- @return A dictionary - Keys are test config aliases, values are dictionaries
- of data:
- total_machines: Total number of machines scheduled for this test
- config. Excludes machines that are set to skip
- this config.
- machine_status: A dictionary:
- key: The hostname
- value: The status of the machine: one of 'Scheduled',
- 'Running', 'Pass', or 'Fail'
- total_runs: Total number of runs of this test config. Includes
- repeated runs (from triage re-run)
- total_passes: Number of runs that resulted in a 'pass', meaning
- that none of the tests in the test config had any
- status other than GOOD.
- bugs: List of bugs that were filed under this test config
- """
- plan = models.Plan.smart_get(plan_id)
- result = {}
- for test_config in plan.testconfig_set.all():
- skipped_host_ids = test_config.skipped_hosts.values_list('id',
- flat=True)
- hosts = plan.host_set.exclude(host__id__in=skipped_host_ids)
- total_machines = hosts.count()
-
- machine_status = {}
- for host in hosts:
- machine_status[host.host.hostname] = (
- rpc_utils.compute_test_config_status(host, test_config))
-
- planner_jobs = test_config.job_set.all()
- total_runs = planner_jobs.count()
- total_passes = 0
- for planner_job in planner_jobs:
- if planner_job.all_tests_passed():
- total_passes += 1
-
- test_runs = plan.testrun_set.filter(
- test_job__in=test_config.job_set.all())
- bugs = set()
- for test_run in test_runs:
- bugs.update(test_run.bugs.values_list('external_uid', flat=True))
-
- result[test_config.alias] = {'total_machines': total_machines,
- 'machine_status': machine_status,
- 'total_runs': total_runs,
- 'total_passes': total_passes,
- 'bugs': list(bugs)}
- return result
-
-
-def get_motd():
- return afe_rpc_utils.get_motd()
-
-
-def get_static_data():
- result = {'motd': get_motd(),
- 'host_actions': sorted(failure_actions.HostAction.values),
- 'test_actions': sorted(failure_actions.TestAction.values),
- 'additional_parameter_types':
- sorted(model_attributes.AdditionalParameterType.values),
- 'host_statuses': sorted(model_attributes.HostStatus.values)}
- return result
diff --git a/frontend/planner/rpc_interface_unittest.py b/frontend/planner/rpc_interface_unittest.py
deleted file mode 100755
index 30366bf2..00000000
--- a/frontend/planner/rpc_interface_unittest.py
+++ /dev/null
@@ -1,331 +0,0 @@
-#!/usr/bin/python
-
-import unittest
-import common
-from autotest_lib.frontend import setup_django_environment
-from autotest_lib.frontend import setup_test_environment
-from autotest_lib.frontend.planner import planner_test_utils, model_attributes
-from autotest_lib.frontend.planner import rpc_interface, models, rpc_utils
-from autotest_lib.frontend.planner import failure_actions
-from autotest_lib.frontend.afe import model_logic, models as afe_models
-from autotest_lib.frontend.afe import rpc_interface as afe_rpc_interface
-from autotest_lib.frontend.tko import models as tko_models
-
-
-class DummyTestConfig(object):
- def __init__(self):
- self.id = object()
- self.alias = object()
-
-
-class RpcInterfaceTest(unittest.TestCase,
- planner_test_utils.PlannerTestMixin):
- def setUp(self):
- self._planner_common_setup()
- self.god.stub_function(rpc_utils, 'start_plan')
-
-
- def tearDown(self):
- self._planner_common_teardown()
-
-
- def test_submit_plan_success(self):
- hosts = ('host1', 'host2')
- plan_name = self._PLAN_NAME + '2'
-
- rpc_utils.start_plan.expect_any_call()
- rpc_interface.submit_plan(plan_name, hosts, ('label1',), ())
-
- plan = models.Plan.objects.get(name=plan_name)
- self.assertEqual(
- set(afe_models.Host.objects.filter(hostname__in=hosts)),
- set(plan.hosts.all()))
-
- self.assertEqual(1, plan.host_labels.all().count())
- self.assertEqual(afe_models.Label.objects.get(name='label1'),
- plan.host_labels.all()[0])
- self.god.check_playback()
-
-
- def test_submit_plan_duplicate(self):
- self.assertRaises(
- model_logic.ValidationError, rpc_interface.submit_plan,
- self._PLAN_NAME, (), (), ())
-
-
- def test_submit_plan_bad_host(self):
- self.assertRaises(
- model_logic.ValidationError, rpc_interface.submit_plan,
- self._PLAN_NAME + '2', ('fakehost'), (), ())
-
-
- def test_submit_plan_bad_label(self):
- self.assertRaises(
- model_logic.ValidationError, rpc_interface.submit_plan,
- self._PLAN_NAME + '2', (), ('fakelabel'), ())
-
-
- def test_get_hosts(self):
- hosts = rpc_interface.get_hosts(self._PLAN_NAME)
- self.assertEqual(set(('host1', 'host2')), set(hosts))
-
- afe_models.Host.objects.get(hostname='host3').labels.add(
- afe_models.Label.objects.get(name='label1'))
-
- hosts = rpc_interface.get_hosts(self._PLAN_NAME)
- self.assertEqual(set(('host1', 'host2', 'host3')), set(hosts))
-
- afe_models.Host.objects.get(hostname='host3').labels.clear()
-
- hosts = rpc_interface.get_hosts(self._PLAN_NAME)
- self.assertEqual(set(('host1', 'host2')), set(hosts))
-
-
- def test_get_next_test_configs(self):
- DUMMY_CONFIGS = {'host1': DummyTestConfig(),
- 'host2': DummyTestConfig()}
- DUMMY_COMPLETE = object()
- self.god.stub_function(rpc_utils, 'compute_next_test_config')
-
- for host in models.Host.objects.filter(plan=self._plan):
- rpc_utils.compute_next_test_config.expect_call(
- self._plan, host).and_return(
- DUMMY_CONFIGS[host.host.hostname])
-
- def _dummy_check_for_completion(plan):
- plan.complete = DUMMY_COMPLETE
- rpc_utils.check_for_completion = _dummy_check_for_completion
-
- result = rpc_interface.get_next_test_configs(self._plan.id)
-
- self.god.check_playback()
- self.assertEqual(result['complete'], DUMMY_COMPLETE)
- for config in result['next_configs']:
- self.assertTrue(config['host'] in DUMMY_CONFIGS)
- self.assertEqual(config['next_test_config_id'],
- DUMMY_CONFIGS[config['host']].id)
- self.assertEqual(config['next_test_config_alias'],
- DUMMY_CONFIGS[config['host']].alias)
-
-
- def test_update_test_runs(self):
- self._setup_active_plan()
-
- self.god.stub_function(rpc_utils, 'compute_test_run_status')
- self.god.stub_function(rpc_utils, 'add_test_run')
-
- # No TKO tests
- self.assertEqual([], rpc_interface.update_test_runs(self._plan.id))
- self.god.check_playback()
-
- # active TKO test
- tko_test = tko_models.Test.objects.create(job=self._tko_job,
- machine=self._tko_machine,
- kernel=self._tko_kernel,
- status=self._running_status)
-
- rpc_utils.compute_test_run_status.expect_call(
- self.RUNNING_STATUS_WORD).and_return(
- model_attributes.TestRunStatus.ACTIVE)
- rpc_utils.add_test_run.expect_call(
- self._plan, self._planner_job, tko_test, self._hostname,
- model_attributes.TestRunStatus.ACTIVE)
- self.assertEqual(rpc_interface.update_test_runs(self._plan.id),
- [{'status': model_attributes.TestRunStatus.ACTIVE,
- 'tko_test_idx': tko_test.test_idx,
- 'hostname': self._hostname}])
- self.god.check_playback()
- test_run = models.TestRun.objects.create(
- plan=self._plan, test_job=self._planner_job,
- tko_test=tko_test, host=self._planner_host,
- status=model_attributes.TestRunStatus.ACTIVE)
-
- # no change to TKO test
- rpc_utils.compute_test_run_status.expect_call(
- self.RUNNING_STATUS_WORD).and_return(
- model_attributes.TestRunStatus.ACTIVE)
- self.assertEqual([], rpc_interface.update_test_runs(self._plan.id))
- self.god.check_playback()
-
- # TKO test is now complete, passed
- tko_test.status = self._good_status
- tko_test.save()
-
- rpc_utils.compute_test_run_status.expect_call(
- self.GOOD_STATUS_WORD).and_return(
- model_attributes.TestRunStatus.PASSED)
- rpc_utils.add_test_run.expect_call(
- self._plan, self._planner_job, tko_test, self._hostname,
- model_attributes.TestRunStatus.PASSED)
- self.assertEqual(rpc_interface.update_test_runs(self._plan.id),
- [{'status': model_attributes.TestRunStatus.PASSED,
- 'tko_test_idx': tko_test.test_idx,
- 'hostname': self._hostname}])
- self.god.check_playback()
-
-
- def test_get_machine_view_data(self):
- self._setup_active_plan()
-
- host1_expected = {'machine': 'host1',
- 'status': 'Running',
- 'tests_run': [],
- 'bug_ids': []}
- host2_expected = {'machine': 'host2',
- 'status': 'Running',
- 'tests_run': [],
- 'bug_ids': []}
-
- expected = (host1_expected, host2_expected)
- actual = rpc_interface.get_machine_view_data(plan_id=self._plan.id)
- self.assertEqual(sorted(actual), sorted(expected))
-
- # active TKO test
- tko_test = tko_models.Test.objects.create(job=self._tko_job,
- test='test',
- machine=self._tko_machine,
- kernel=self._tko_kernel,
- status=self._running_status)
- testrun = models.TestRun.objects.create(plan=self._plan,
- test_job=self._planner_job,
- host=self._planner_host,
- tko_test=tko_test,
- finalized=True)
-
- host1_expected['tests_run'] = [{'test_name': 'test',
- 'status': self._running_status.word}]
- actual = rpc_interface.get_machine_view_data(plan_id=self._plan.id)
- self.assertEqual(sorted(actual), sorted(expected))
-
- # TKO test complete, passed, with bug filed
- tko_test.status = self._good_status
- tko_test.save()
- bug = models.Bug.objects.create(external_uid='bug')
- testrun.bugs.add(bug)
-
- host1_expected['tests_run'] = [{'test_name': 'test',
- 'status': self._good_status.word}]
- host1_expected['bug_ids'] = ['bug']
- actual = rpc_interface.get_machine_view_data(plan_id=self._plan.id)
- self.assertEqual(sorted(actual), sorted(expected))
-
-
- def test_generate_test_config(self):
- control = {'control_file': object(),
- 'is_server': object()}
- test = 'test'
- alias = 'test alias'
- estimated_runtime = object()
-
- self.god.stub_function(afe_rpc_interface, 'generate_control_file')
- afe_rpc_interface.generate_control_file.expect_call(
- tests=[test]).and_return(control)
-
- result = rpc_interface.generate_test_config(
- alias=alias, afe_test_name=test,
- estimated_runtime=estimated_runtime)
-
- self.assertEqual(result['alias'], 'test_alias')
- self.assertEqual(result['control_file'], control['control_file'])
- self.assertEqual(result['is_server'], control['is_server'])
- self.assertEqual(result['estimated_runtime'], estimated_runtime)
- self.god.check_playback()
-
-
- def _test_get_overview_data_helper(self, stage):
- self._setup_active_plan()
- self.god.stub_function(rpc_utils, 'compute_test_config_status')
- rpc_utils.compute_test_config_status.expect_call(
- self._plan.host_set.get(host=self.hosts[0])).and_return(None)
- rpc_utils.compute_test_config_status.expect_call(
- self._plan.host_set.get(host=self.hosts[1])).and_return(None)
-
- data = {'test_configs': [{'complete': 0, 'estimated_runtime': 1}],
- 'bugs': [],
- 'machines': [{'hostname': self.hosts[0].hostname,
- 'status': 'Running',
- 'passed': None},
- {'hostname': self.hosts[1].hostname,
- 'status': 'Running',
- 'passed': None}]}
- if stage < 1:
- return {self._plan.name: data}
-
- tko_test = self._tko_job.test_set.create(kernel=self._tko_kernel,
- machine=self._tko_machine,
- status=self._fail_status)
- test_run = self._plan.testrun_set.create(test_job=self._planner_job,
- tko_test=tko_test,
- host=self._planner_host)
- self._afe_job.hostqueueentry_set.update(complete=True)
- self._planner_host.complete = True
- self._planner_host.save()
- test_run.bugs.create(external_uid='bug')
- data['bugs'] = ['bug']
- data['test_configs'][0]['complete'] = 1
- data['machines'][0]['status'] = 'Finished'
- return {self._plan.name: data}
-
-
- def test_get_overview_data_no_progress(self):
- self.assertEqual(self._test_get_overview_data_helper(0),
- rpc_interface.get_overview_data([self._plan.id]))
- self.god.check_playback()
-
-
- def test_get_overview_data_one_finished_with_bug(self):
- self.assertEqual(self._test_get_overview_data_helper(1),
- rpc_interface.get_overview_data([self._plan.id]))
- self.god.check_playback()
-
-
- def _test_get_test_view_data_helper(self, stage):
- self._setup_active_plan()
- self.god.stub_function(rpc_utils, 'compute_test_config_status')
- hosts = self._plan.host_set.filter(host__in=self.hosts[0:2])
- rpc_utils.compute_test_config_status.expect_call(
- hosts[0], self._test_config).and_return(None)
-
- data = {'total_machines': 2,
- 'machine_status': {'host1': None,
- 'host2': None},
- 'total_runs': 1,
- 'total_passes': 0,
- 'bugs': []}
- if stage < 1:
- rpc_utils.compute_test_config_status.expect_call(
- hosts[1], self._test_config).and_return(None)
- return {self._test_config.alias: data}
-
- fail_status = rpc_utils.ComputeTestConfigStatusResult.FAIL
- rpc_utils.compute_test_config_status.expect_call(
- hosts[1], self._test_config).and_return(fail_status)
- tko_test = self._tko_job.test_set.create(kernel=self._tko_kernel,
- machine=self._tko_machine,
- status=self._fail_status)
- test_run = self._plan.testrun_set.create(test_job=self._planner_job,
- tko_test=tko_test,
- host=self._planner_host)
- self._afe_job.hostqueueentry_set.update(complete=True)
-
- test_run.bugs.create(external_uid='bug')
-
- data['machine_status']['host2'] = fail_status
- data['bugs'] = ['bug']
- return {self._test_config.alias: data}
-
-
- def test_get_test_view_data_no_progress(self):
- self.assertEqual(self._test_get_test_view_data_helper(0),
- rpc_interface.get_test_view_data(self._plan.id))
- self.god.check_playback()
-
-
- def test_get_test_view_data_one_failed_with_bug(self):
- self.assertEqual(self._test_get_test_view_data_helper(1),
- rpc_interface.get_test_view_data(self._plan.id))
- self.god.check_playback()
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/frontend/planner/rpc_utils.py b/frontend/planner/rpc_utils.py
deleted file mode 100644
index f314fa76..00000000
--- a/frontend/planner/rpc_utils.py
+++ /dev/null
@@ -1,365 +0,0 @@
-import common
-import os
-from autotest_lib.frontend.afe import models as afe_models, model_logic
-from autotest_lib.frontend.planner import models, model_attributes
-from autotest_lib.frontend.planner import failure_actions, control_file
-from autotest_lib.frontend.tko import models as tko_models
-from autotest_lib.client.common_lib import global_config, utils, global_config
-from autotest_lib.client.common_lib import enum
-
-
-PLANNER_LABEL_PREFIX = 'planner_'
-PLANNER_ATOMIC_GROUP_NAME = 'planner_global_atomic_group'
-SERVER = global_config.global_config.get_config_value('SERVER', 'hostname')
-LAZY_LOADED_FILES = {}
-
-
-def create_plan_label(plan):
- """
- Creates the host label to apply on the plan hosts
- """
- group, _ = afe_models.AtomicGroup.objects.get_or_create(
- name=PLANNER_ATOMIC_GROUP_NAME)
- if group.invalid:
- group.invalid = False
- group.save()
-
- name = PLANNER_LABEL_PREFIX + plan.name
- if bool(afe_models.Label.valid_objects.filter(name=name)):
- raise model_logic.ValidationError('Label %s already exists, '
- 'cannot start plan' % name)
- label = afe_models.Label(name=name, atomic_group=group)
- label.save()
-
- return label
-
-
-def start_plan(plan, label):
- """
- Takes the necessary steps to start a test plan in Autotest
- """
- timeout = global_config.global_config.get_config_value(
- 'PLANNER', 'execution_engine_timeout')
- control = _get_execution_engine_control(
- server=SERVER,
- plan_id=plan.id,
- label_name=label.name,
- owner=afe_models.User.current_user().login)
- options = {'name': plan.name + '_execution_engine',
- 'priority': afe_models.Job.Priority.MEDIUM,
- 'control_file': control,
- 'control_type': afe_models.Job.ControlType.SERVER,
- 'synch_count': None,
- 'timeout': timeout,
- 'max_runtime_hrs': timeout,
- 'run_verify': False,
- 'reboot_before': False,
- 'reboot_after': False,
- 'dependencies': ()}
- job = afe_models.Job.create(owner=afe_models.User.current_user().login,
- options=options, hosts=())
- job.queue(hosts=())
-
-
-def _get_execution_engine_control(server, plan_id, label_name, owner):
- """
- Gets the control file to run the execution engine
- """
- control = lazy_load(os.path.join(os.path.dirname(__file__),
- 'execution_engine_control.srv'))
- return control % dict(server=server, plan_id=plan_id,
- label_name=label_name, owner=owner)
-
-
-def lazy_load(path):
- """
- Lazily loads the file indicated by the path given, and caches the result
- """
- if path not in LAZY_LOADED_FILES:
- LAZY_LOADED_FILES[path] = utils.read_file(path)
-
- return LAZY_LOADED_FILES[path]
-
-
-def update_hosts_table(plan):
- """
- Resolves the host labels into host objects
-
- Adds or removes hosts from the planner Hosts model based on changes to the
- host label
- """
- label_hosts = set()
-
- for label in plan.host_labels.all():
- for afe_host in label.host_set.all():
- host, created = models.Host.objects.get_or_create(plan=plan,
- host=afe_host)
- if created:
- host.added_by_label = True
- host.save()
-
- label_hosts.add(host.host.id)
-
- deleted_hosts = models.Host.objects.filter(
- plan=plan, added_by_label=True).exclude(host__id__in=label_hosts)
- deleted_hosts.delete()
-
-
-def compute_next_test_config(plan, host):
- """
- Gets the next test config that should be run for this plan and host
-
- Returns None if the host is already running a job. Also sets the host's
- complete bit if the host is finished running tests.
- """
- if host.blocked:
- return None
-
- test_configs = plan.testconfig_set.exclude(
- skipped_hosts=host.host).order_by('execution_order')
- result = None
-
- for test_config in test_configs:
- planner_jobs = test_config.job_set.filter(
- afe_job__hostqueueentry__host=host.host)
- for planner_job in planner_jobs:
- if planner_job.active():
- # There is a job active; do not start another one
- return None
- try:
- planner_job = planner_jobs.get(requires_rerun=False)
- except models.Job.DoesNotExist:
- if not result:
- result = test_config
-
- if result:
- return result
-
- # All jobs related to this host are complete
- host.complete = True
- host.save()
- return None
-
-
-def check_for_completion(plan):
- """
- Checks if a plan is actually complete. Sets complete=True if so
- """
- if not models.Host.objects.filter(plan=plan, complete=False):
- plan.complete = True
- plan.save()
-
-
-def compute_test_run_status(status):
- """
- Converts a TKO test status to a Planner test run status
- """
- Status = model_attributes.TestRunStatus
- if status == 'GOOD':
- return Status.PASSED
- if status == 'RUNNING':
- return Status.ACTIVE
- return Status.FAILED
-
-
-def add_test_run(plan, planner_job, tko_test, hostname, status):
- """
- Adds a TKO test to the Planner Test Run tables
- """
- host = afe_models.Host.objects.get(hostname=hostname)
-
- planner_host = models.Host.objects.get(plan=plan, host=host)
- test_run, _ = models.TestRun.objects.get_or_create(plan=plan,
- test_job=planner_job,
- tko_test=tko_test,
- host=planner_host)
- test_run.status = status
- test_run.save()
-
-
-def process_failure(failure_id, host_action, test_action, labels, keyvals,
- bugs, reason, invalidate):
- if keyvals is None:
- keyvals = {}
-
- failure = models.TestRun.objects.get(id=failure_id)
-
- _process_host_action(failure.host, host_action)
- _process_test_action(failure.test_job, test_action)
-
- # Add the test labels
- for label in labels:
- tko_test_label, _ = (
- tko_models.TestLabel.objects.get_or_create(name=label))
- failure.tko_test.testlabel_set.add(tko_test_label)
-
- # Set the job keyvals
- for key, value in keyvals.iteritems():
- keyval, created = tko_models.JobKeyval.objects.get_or_create(
- job=failure.tko_test.job, key=key)
- if not created:
- tko_models.JobKeyval.objects.create(job=failure.tko_test.job,
- key='original_' + key,
- value=keyval.value)
- keyval.value = value
- keyval.save()
-
- # Add the bugs
- for bug_id in bugs:
- bug, _ = models.Bug.objects.get_or_create(external_uid=bug_id)
- failure.bugs.add(bug)
-
- # Set the failure reason
- if reason is not None:
- tko_models.TestAttribute.objects.create(test=failure.tko_test,
- attribute='original_reason',
- value=failure.tko_test.reason)
- failure.tko_test.reason = reason
- failure.tko_test.save()
-
- # Set 'invalidated', 'seen', and 'triaged'
- failure.invalidated = invalidate
- failure.seen = True
- failure.triaged = True
- failure.save()
-
-
-def _site_process_host_action_dummy(host, action):
- return False
-
-
-def _process_host_action(host, action):
- """
- Takes the specified action on the host
- """
- HostAction = failure_actions.HostAction
- if action not in HostAction.values:
- raise ValueError('Unexpected host action %s' % action)
-
- site_process = utils.import_site_function(
- __file__, 'autotest_lib.frontend.planner.site_rpc_utils',
- 'site_process_host_action', _site_process_host_action_dummy)
-
- if not site_process(host, action):
- # site_process_host_action returns True and and only if it matched a
- # site-specific processing option
- if action == HostAction.BLOCK:
- host.blocked = True
- elif action == HostAction.UNBLOCK:
- host.blocked = False
- else:
- assert action == HostAction.REINSTALL
- raise NotImplemented('TODO: implement reinstall')
-
- host.save()
-
-
-def _process_test_action(planner_job, action):
- """
- Takes the specified action for this planner job
- """
- TestAction = failure_actions.TestAction
- if action not in TestAction.values:
- raise ValueError('Unexpected test action %s' % action)
-
- if action == TestAction.SKIP:
- # Do nothing
- pass
- else:
- assert action == TestAction.RERUN
- planner_job.requires_rerun = True
- planner_job.save()
-
-
-def set_additional_parameters(plan, additional_parameters):
- if not additional_parameters:
- return
-
- for index, additional_parameter in enumerate(additional_parameters):
- hostname_regex = additional_parameter['hostname_regex']
- param_type = additional_parameter['param_type']
- param_values = additional_parameter['param_values']
-
- additional_param = models.AdditionalParameter.objects.create(
- plan=plan, hostname_regex=hostname_regex,
- param_type=param_type, application_order=index)
-
- for key, value in param_values.iteritems():
- models.AdditionalParameterValue.objects.create(
- additional_parameter=additional_param,
- key=key, value=repr(value))
-
-
-def _additional_wrap_arguments_dummy(plan, hostname):
- return {}
-
-
-def get_wrap_arguments(plan, hostname, param_type):
- additional_param = (
- models.AdditionalParameter.find_applicable_additional_parameter(
- plan=plan, hostname=hostname, param_type=param_type))
- if not additional_param:
- return {}
-
- param_values = additional_param.additionalparametervalue_set.values_list(
- 'key', 'value')
- return dict(param_values)
-
-
-def wrap_control_file(plan, hostname, run_verify, test_config):
- """
- Wraps a control file using the ControlParameters for the plan
- """
- site_additional_wrap_arguments = utils.import_site_function(
- __file__, 'autotest_lib.frontend.planner.site_rpc_utils',
- 'additional_wrap_arguments', _additional_wrap_arguments_dummy)
- additional_wrap_arguments = site_additional_wrap_arguments(plan, hostname)
-
- verify_params = get_wrap_arguments(
- plan, hostname, model_attributes.AdditionalParameterType.VERIFY)
-
- return control_file.wrap_control_file(
- control_file=test_config.control_file.contents,
- is_server=test_config.is_server,
- skip_verify=(not run_verify),
- verify_params=verify_params,
- **additional_wrap_arguments)
-
-
-ComputeTestConfigStatusResult = enum.Enum('Pass', 'Fail', 'Scheduled',
- 'Running', string_values=True)
-def compute_test_config_status(host, test_config=None):
- """
- Returns a value of ComputeTestConfigStatusResult:
- Pass: This host passed the test config
- Fail: This host failed the test config
- Scheduled: This host has not yet run this test config
- Running: This host is currently running this test config
-
- A 'pass' means that, for every test configuration in the plan, the machine
- had at least one AFE job with no failed tests. 'passed' could also be None,
- meaning that this host is still running tests.
-
- @param test_config: A test config to check. None to check all test configs
- in the plan
- """
- if test_config:
- test_configs = [test_config]
- else:
- test_configs = host.plan.testconfig_set.exclude(skipped_hosts=host.host)
-
- for test_config in test_configs:
- try:
- planner_job = test_config.job_set.get(
- afe_job__hostqueueentry__host=host.host,
- requires_rerun=False)
- except models.Job.DoesNotExist:
- return ComputeTestConfigStatusResult.SCHEDULED
-
- if planner_job.active():
- return ComputeTestConfigStatusResult.RUNNING
-
- if planner_job.testrun_set.exclude(tko_test__status__word='GOOD'):
- return ComputeTestConfigStatusResult.FAIL
-
- return ComputeTestConfigStatusResult.PASS
diff --git a/frontend/planner/rpc_utils_unittest.py b/frontend/planner/rpc_utils_unittest.py
deleted file mode 100755
index 6d97dd19..00000000
--- a/frontend/planner/rpc_utils_unittest.py
+++ /dev/null
@@ -1,385 +0,0 @@
-#!/usr/bin/python
-
-import unittest
-import common
-from autotest_lib.frontend import setup_django_environment
-from autotest_lib.frontend import setup_test_environment
-from autotest_lib.frontend.afe import model_logic, models as afe_models
-from autotest_lib.frontend.planner import planner_test_utils, model_attributes
-from autotest_lib.frontend.planner import models, rpc_utils, failure_actions
-from autotest_lib.frontend.tko import models as tko_models
-from autotest_lib.client.common_lib import utils, host_queue_entry_states
-
-
-class RpcUtilsTest(unittest.TestCase,
- planner_test_utils.PlannerTestMixin):
- def setUp(self):
- self._planner_common_setup()
-
-
- def tearDown(self):
- self._planner_common_teardown()
-
-
- def test_create_plan_label(self):
- label, group = self._create_label_helper()
-
- label.delete()
- group.invalid = True
- group.save()
-
- label, group = self._create_label_helper()
-
- self.assertRaises(model_logic.ValidationError,
- rpc_utils.create_plan_label, self._plan)
-
-
- def _create_label_helper(self):
- label = rpc_utils.create_plan_label(self._plan)
- group = afe_models.AtomicGroup.objects.get(
- name=rpc_utils.PLANNER_ATOMIC_GROUP_NAME)
- self.assertFalse(group.invalid)
- self.assertEqual(label.atomic_group, group)
-
- return (label, group)
-
-
- def test_lazy_load(self):
- self.god.stub_function(utils, 'read_file')
-
- DUMMY_PATH_1 = object()
- DUMMY_PATH_2 = object()
- DUMMY_FILE_1 = object()
- DUMMY_FILE_2 = object()
-
- utils.read_file.expect_call(DUMMY_PATH_1).and_return(DUMMY_FILE_1)
- self.assertEqual(DUMMY_FILE_1, rpc_utils.lazy_load(DUMMY_PATH_1))
- self.god.check_playback()
-
- # read_file should not be called again for this path
- self.assertEqual(DUMMY_FILE_1, rpc_utils.lazy_load(DUMMY_PATH_1))
- self.god.check_playback()
-
- # new file; read_file must be called again
- utils.read_file.expect_call(DUMMY_PATH_2).and_return(DUMMY_FILE_2)
- self.assertEqual(DUMMY_FILE_2, rpc_utils.lazy_load(DUMMY_PATH_2))
- self.god.check_playback()
-
-
- def test_update_hosts_table(self):
- label = self.labels[3]
- default_hosts = set(self._plan.hosts.all())
-
- rpc_utils.update_hosts_table(self._plan)
- self.assertEqual(default_hosts, set(self._plan.hosts.all()))
- self.assertEqual(set(), self._get_added_by_label_hosts())
-
- self._plan.host_labels.add(label)
- rpc_utils.update_hosts_table(self._plan)
- self.assertEqual(default_hosts.union(label.host_set.all()),
- set(self._plan.hosts.all()))
- self.assertEqual(set(label.host_set.all()),
- self._get_added_by_label_hosts())
-
- self._plan.host_labels.remove(label)
- rpc_utils.update_hosts_table(self._plan)
- self.assertEqual(default_hosts, set(self._plan.hosts.all()))
- self.assertEqual(set(), self._get_added_by_label_hosts())
-
-
- def _get_added_by_label_hosts(self):
- return set(host.host for host in models.Host.objects.filter(
- plan=self._plan, added_by_label=True))
-
-
- def test_compute_next_test_config(self):
- self._setup_active_plan()
- test_config = models.TestConfig.objects.create(
- plan=self._plan, alias='config2', control_file=self._control,
- execution_order=2, estimated_runtime=1)
-
- self.assertEqual(1, self._afe_job.hostqueueentry_set.count())
- self.assertEqual(
- None, rpc_utils.compute_next_test_config(self._plan,
- self._planner_host))
- self.assertFalse(self._planner_host.complete)
-
- hqe = self._afe_job.hostqueueentry_set.all()[0]
- hqe.status = host_queue_entry_states.Status.COMPLETED
- hqe.save()
-
- self.assertEqual(
- test_config,
- rpc_utils.compute_next_test_config(self._plan,
- self._planner_host))
- self.assertFalse(self._planner_host.complete)
-
- afe_job = self._create_job(hosts=(1,))
- planner_job = models.Job.objects.create(plan=self._plan,
- test_config=test_config,
- afe_job=afe_job)
-
- self.assertEqual(
- None, rpc_utils.compute_next_test_config(self._plan,
- self._planner_host))
- self.assertFalse(self._planner_host.complete)
-
- hqe = afe_job.hostqueueentry_set.all()[0]
- hqe.status = host_queue_entry_states.Status.COMPLETED
- hqe.save()
-
- self.assertEqual(
- None, rpc_utils.compute_next_test_config(self._plan,
- self._planner_host))
- self.assertTrue(self._planner_host.complete)
-
-
- def test_process_failure(self):
- self._setup_active_plan()
- tko_test = tko_models.Test.objects.create(job=self._tko_job,
- machine=self._tko_machine,
- kernel=self._tko_kernel,
- status=self._running_status)
- failure = models.TestRun.objects.create(
- plan=self._plan,
- test_job=self._planner_job,
- tko_test=tko_test,
- host=self._planner_host,
- status=model_attributes.TestRunStatus.FAILED,
- finalized=True, seen=False, triaged=False)
- host_action = failure_actions.HostAction.UNBLOCK
- test_action = failure_actions.TestAction.SKIP
- labels = ['label1', 'label2']
- keyvals = {'key1': 'value1',
- 'key2': 'value2'}
- bugs = ['bug1', 'bug2']
- reason = 'overriden reason'
- invalidate = True
-
- self.god.stub_function(rpc_utils, '_process_host_action')
- self.god.stub_function(rpc_utils, '_process_test_action')
-
- rpc_utils._process_host_action.expect_call(self._planner_host,
- host_action)
- rpc_utils._process_test_action.expect_call(self._planner_job,
- test_action)
-
- rpc_utils.process_failure(
- failure_id=failure.id, host_action=host_action,
- test_action=test_action, labels=labels, keyvals=keyvals,
- bugs=bugs, reason=reason, invalidate=invalidate)
- failure = models.TestRun.objects.get(id=failure.id)
-
- self.assertEqual(
- set(failure.tko_test.testlabel_set.all()),
- set(tko_models.TestLabel.objects.filter(name__in=labels)))
- self.assertEqual(
- set(failure.tko_test.job.jobkeyval_set.all()),
- set(tko_models.JobKeyval.objects.filter(
- key__in=keyvals.iterkeys())))
- self.assertEqual(set(failure.bugs.all()),
- set(models.Bug.objects.filter(external_uid__in=bugs)))
- self.assertEqual(failure.tko_test.reason, reason)
- self.assertEqual(failure.invalidated, invalidate)
- self.assertTrue(failure.seen)
- self.assertTrue(failure.triaged)
- self.god.check_playback()
-
-
- def _replace_site_process_host_action(self, replacement):
- self.god.stub_function(utils, 'import_site_function')
- utils.import_site_function.expect_any_call().and_return(replacement)
-
-
- def _remove_site_process_host_action(self):
- def _site_process_host_action_dummy(host, action):
- return False
- self._replace_site_process_host_action(_site_process_host_action_dummy)
-
-
- def test_process_host_action_block(self):
- self._remove_site_process_host_action()
- host = models.Host.objects.create(plan=self._plan, host=self.hosts[0],
- blocked=False)
- assert not host.blocked
-
- rpc_utils._process_host_action(host, failure_actions.HostAction.BLOCK)
- host = models.Host.objects.get(id=host.id)
-
- self.assertTrue(host.blocked)
- self.god.check_playback()
-
-
- def test_process_host_action_unblock(self):
- self._remove_site_process_host_action()
- host = models.Host.objects.create(plan=self._plan, host=self.hosts[0],
- blocked=True)
- assert host.blocked
-
- rpc_utils._process_host_action(host, failure_actions.HostAction.UNBLOCK)
- host = models.Host.objects.get(id=host.id)
-
- self.assertFalse(host.blocked)
- self.god.check_playback()
-
-
- def test_process_host_action_site(self):
- self._remove_site_process_host_action
- action = object()
- failure_actions.HostAction.values.append(action)
- host = models.Host.objects.create(plan=self._plan, host=self.hosts[0])
-
- self.assertRaises(AssertionError, rpc_utils._process_host_action,
- host, action)
- self.god.check_playback()
-
- self._called = False
- def _site_process_host_action(host, action):
- self._called = True
- return True
- self._replace_site_process_host_action(_site_process_host_action)
-
- rpc_utils._process_host_action(host, action)
-
- self.assertTrue(self._called)
- self.god.check_playback()
-
-
- def test_process_test_action_skip(self):
- self._setup_active_plan()
- planner_job = self._planner_job
- assert not planner_job.requires_rerun
-
- rpc_utils._process_test_action(planner_job,
- failure_actions.TestAction.SKIP)
- planner_job = models.Job.objects.get(id=planner_job.id)
-
- self.assertFalse(planner_job.requires_rerun)
-
-
- def test_process_test_action_rerun(self):
- self._setup_active_plan()
- planner_job = self._planner_job
- assert not planner_job.requires_rerun
-
- rpc_utils._process_test_action(planner_job,
- failure_actions.TestAction.RERUN)
- planner_job = models.Job.objects.get(id=planner_job.id)
-
- self.assertTrue(planner_job.requires_rerun)
-
-
- def test_set_additional_parameters(self):
- hostname_regex = 'host[0-9]'
- param_type = model_attributes.AdditionalParameterType.VERIFY
- param_values = {'key1': 'value1',
- 'key2': []}
-
- additional_parameters = {'hostname_regex': hostname_regex,
- 'param_type': param_type,
- 'param_values': param_values}
-
- rpc_utils.set_additional_parameters(self._plan, [additional_parameters])
-
- additional_parameters_query = (
- models.AdditionalParameter.objects.filter(plan=self._plan))
- self.assertEqual(additional_parameters_query.count(), 1)
-
- additional_parameter = additional_parameters_query[0]
- self.assertEqual(additional_parameter.hostname_regex, hostname_regex)
- self.assertEqual(additional_parameter.param_type, param_type)
- self.assertEqual(additional_parameter.application_order, 0)
-
- values_query = additional_parameter.additionalparametervalue_set.all()
- self.assertEqual(values_query.count(), 2)
-
- value_query1 = values_query.filter(key='key1')
- value_query2 = values_query.filter(key='key2')
- self.assertEqual(value_query1.count(), 1)
- self.assertEqual(value_query2.count(), 1)
-
- self.assertEqual(value_query1[0].value, repr('value1'))
- self.assertEqual(value_query2[0].value, repr([]))
-
-
- def test_get_wrap_arguments(self):
- hostname_regex = '.*'
- param_type = model_attributes.AdditionalParameterType.VERIFY
-
- additional_param = models.AdditionalParameter.objects.create(
- plan=self._plan, hostname_regex=hostname_regex,
- param_type=param_type, application_order=0)
- models.AdditionalParameterValue.objects.create(
- additional_parameter=additional_param,
- key='key1', value=repr('value1'))
- models.AdditionalParameterValue.objects.create(
- additional_parameter=additional_param,
- key='key2', value=repr([]))
-
- actual = rpc_utils.get_wrap_arguments(self._plan, 'host', param_type)
- expected = {'key1': repr('value1'),
- 'key2': repr([])}
-
- self.assertEqual(actual, expected)
-
-
- def test_compute_test_config_status_scheduled(self):
- self._setup_active_plan()
- self._planner_job.delete()
-
- self.assertEqual(
- rpc_utils.compute_test_config_status(self._planner_host),
- rpc_utils.ComputeTestConfigStatusResult.SCHEDULED)
-
-
- def test_compute_test_config_status_running(self):
- self._setup_active_plan()
- self.god.stub_function(models.Job, 'active')
- models.Job.active.expect_call().and_return(True)
-
- self.assertEqual(
- rpc_utils.compute_test_config_status(self._planner_host),
- rpc_utils.ComputeTestConfigStatusResult.RUNNING)
- self.god.check_playback()
-
-
- def test_compute_test_config_status_good(self):
- self._setup_active_plan()
- tko_test = self._tko_job.test_set.create(kernel=self._tko_kernel,
- status=self._good_status,
- machine=self._tko_machine)
- self._plan.testrun_set.create(test_job=self._planner_job,
- tko_test=tko_test,
- host=self._planner_host)
- self._planner_host.complete = True
- self._planner_host.save()
- self.god.stub_function(models.Job, 'active')
- models.Job.active.expect_call().and_return(False)
-
- self.assertEqual(
- rpc_utils.compute_test_config_status(self._planner_host),
- rpc_utils.ComputeTestConfigStatusResult.PASS)
- self.god.check_playback()
-
-
- def test_compute_test_config_status_bad(self):
- self._setup_active_plan()
- tko_test = self._tko_job.test_set.create(kernel=self._tko_kernel,
- status=self._fail_status,
- machine=self._tko_machine)
- self._plan.testrun_set.create(test_job=self._planner_job,
- tko_test=tko_test,
- host=self._planner_host)
- self._planner_host.complete = True
- self._planner_host.save()
- self.god.stub_function(models.Job, 'active')
- models.Job.active.expect_call().and_return(False)
-
- self.assertEqual(
- rpc_utils.compute_test_config_status(self._planner_host),
- rpc_utils.ComputeTestConfigStatusResult.FAIL)
- self.god.check_playback()
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/frontend/planner/set_atomic_group_control.srv b/frontend/planner/set_atomic_group_control.srv
deleted file mode 100644
index 54d0b022..00000000
--- a/frontend/planner/set_atomic_group_control.srv
+++ /dev/null
@@ -1,17 +0,0 @@
-from autotest_lib.frontend.shared import rest_client
-from autotest_lib.server import frontend
-
-planner_rpc = frontend.Planner()
-afe_rest = rest_client.Resource.load('http://%(server)s/afe/server/resources')
-
-
-label = afe_rest.labels.get(name='%(label_name)s').members[0].get()
-
-def run(machine):
- hostname = hosts.create_host(machine).hostname
- host = afe_rest.hosts.get(hostname=hostname).members[0]
- label.hosts.post({'host': host})
-
-job.parallel_simple(run, machines)
-
-planner_rpc.run('modify_plan', id=%(plan_id)d, initialized=True)
diff --git a/frontend/planner/support.py b/frontend/planner/support.py
deleted file mode 100644
index b7b9ab24..00000000
--- a/frontend/planner/support.py
+++ /dev/null
@@ -1,84 +0,0 @@
-import common
-from autotest_lib.frontend.afe import model_attributes as afe_model_attributes
-
-class TestPlanController(object):
- """
- Allows a TestPlanSupport to manage the test plan.
-
- Contains the variables that the TestPlanSupport methods can manipulate, as
- well as methods for controlling the flow of the test plan.
- """
- def __init__(self, machine, test_alias, *args, **kwargs):
- super(TestPlanController, self).__init__(*args, **kwargs)
- self.machine = machine
- self.test_alias = test_alias
-
- self._skip = False
- self._fail = None
- self._unblock = False
-
- self._reboot_before = afe_model_attributes.RebootBefore.IF_DIRTY
- self._reboot_after = afe_model_attributes.RebootAfter.ALWAYS
- self._run_verify = None
-
-
- def skip_test(self):
- """
- Call this in execute_before() to skip the current test.
- """
- self._skip = True
-
-
- def fail_test(self, reason, attributes={}):
- """
- Fails the test with the reason and optional attributes provided.
-
- Call this in execute_before() to force the test to fail, setting the
- reason to the provided reason. You may optionally specify some test
- attributes to set as well, as a dictionary.
- """
- self._fail = (reason, attributes)
-
-
- def unblock(self):
- """
- Call this in execute_after() to keep the host unblocked.
-
- Hosts will block by default if a test fails. If this has been called,
- the host will be unblocked and will continue in the plan.
-
- You do not need to call this method for the test plan to continue if the
- test succeeded. Calling this method from a successful run has no effect.
- """
- self._unblock = True
-
-
- def set_reboot_before(self, reboot_before):
- """
- Sets the upcoming job's "Reboot Before" option.
-
- Must be a value from the RebootBefore frontend model attributes.
- Defaults to IF_DIRTY.
- """
- assert reboot_before in afe_model_attributes.RebootBefore.values
- self._reboot_before = reboot_before
-
-
- def set_reboot_after(self, reboot_after):
- """
- Sets the upcoming job's "Reboot After" option.
-
- Must be a value from the RebootAfter frontend model attributes.
- Defaults to ALWAYS.
- """
- assert reboot_after in afe_model_attributes.RebootAfter.values
- self._reboot_after = reboot_after
-
-
- def set_run_verify(self, run_verify):
- """
- Sets whether or not the job should run the verify_test.
-
- Defaults to True.
- """
- self._run_verify = run_verify
diff --git a/frontend/planner/urls.py b/frontend/planner/urls.py
deleted file mode 100644
index c636bc67..00000000
--- a/frontend/planner/urls.py
+++ /dev/null
@@ -1,10 +0,0 @@
-from django.conf.urls import defaults
-import common
-from autotest_lib.frontend import settings, urls_common
-
-urlpatterns, debug_patterns = (
- urls_common.generate_patterns('frontend.planner',
- 'TestPlannerClient'))
-
-if settings.DEBUG:
- urlpatterns += debug_patterns
diff --git a/frontend/planner/views.py b/frontend/planner/views.py
deleted file mode 100644
index d2d4837e..00000000
--- a/frontend/planner/views.py
+++ /dev/null
@@ -1,24 +0,0 @@
-import urllib2, sys, traceback, cgi
-
-import common
-from autotest_lib.frontend import views_common
-from autotest_lib.frontend.afe import rpc_handler
-from autotest_lib.frontend.planner import models, rpc_interface
-
-rpc_handler_obj = rpc_handler.RpcHandler((rpc_interface,),
- document_module=rpc_interface)
-
-
-def handle_rpc(request):
- return rpc_handler_obj.handle_rpc_request(request)
-
-
-def rpc_documentation(request):
- return rpc_handler_obj.get_rpc_documentation()
-
-
-def model_documentation(request):
- model_names = ('Plan', 'Host', 'ControlFile', 'TestConfig', 'Job', 'Bug',
- 'TestRun', 'DataType', 'History', 'SavedObject', 'KeyVal',
- 'AutoProcess')
- return views_common.model_documentation(models, model_names)