summaryrefslogtreecommitdiff
path: root/scheduler
diff options
context:
space:
mode:
authorshoward <showard@592f7852-d20e-0410-864c-8624ca9c26a4>2009-05-26 19:35:29 +0000
committershoward <showard@592f7852-d20e-0410-864c-8624ca9c26a4>2009-05-26 19:35:29 +0000
commitf9fdb3f55ec7cd27ba2507c12e66c9ef4ebf4828 (patch)
tree9dfba11740711d49c447dd5966c263745ff02abe /scheduler
parent4e5339dae3147839f708eb2078cbede68fb592dc (diff)
fix JobManager.get_status_counts, which was returning incorrect counts in some cases when jobs were aborted. the problem was that it's possible for a complete entry to have aborted set or not and have the same full status, which was violating an assumption of the method.
to test it, instead of adding stuff to the doctests (which would be messy in this particular case, since we need to reach in and mess with HQE stauses), i instead started a new rpc_interface_unittest, which seems to be the way of the future. since it shared a bunch of logic with the scheduler unit test (which also depends on setting up a fake AFE database), i extracted common logic into frontend/afe/frontend_test_utils.py. i also fixed up some of the logic extracted from monitor_db_unittest for reusing an initial DB between tests. Signed-off-by: Steve Howard <showard@google.com> git-svn-id: svn://test.kernel.org/autotest/trunk@3177 592f7852-d20e-0410-864c-8624ca9c26a4
Diffstat (limited to 'scheduler')
-rw-r--r--scheduler/monitor_db_unittest.py147
1 files changed, 9 insertions, 138 deletions
diff --git a/scheduler/monitor_db_unittest.py b/scheduler/monitor_db_unittest.py
index 773af7b7..3bec0b3c 100644
--- a/scheduler/monitor_db_unittest.py
+++ b/scheduler/monitor_db_unittest.py
@@ -4,7 +4,7 @@ import unittest, time, subprocess, os, StringIO, tempfile, datetime, shutil
import common
import MySQLdb
from autotest_lib.frontend import setup_django_environment
-from autotest_lib.frontend import setup_test_environment
+from autotest_lib.frontend.afe import frontend_test_utils
from autotest_lib.client.common_lib import global_config, host_protections
from autotest_lib.client.common_lib.test_utils import mock
from autotest_lib.database import database_connection, migrate
@@ -73,167 +73,38 @@ def _set_host_and_qe_ids(agent_or_task, id_list=None):
agent_or_task.host_ids = agent_or_task.queue_entry_ids = id_list
-class BaseSchedulerTest(unittest.TestCase):
+class BaseSchedulerTest(unittest.TestCase,
+ frontend_test_utils.FrontendTestMixin):
_config_section = 'AUTOTEST_WEB'
- _test_db_initialized = False
def _do_query(self, sql):
self._database.execute(sql)
- @classmethod
- def _initialize_test_db(cls):
- if cls._test_db_initialized:
- return
- temp_fd, cls._test_db_file = tempfile.mkstemp(suffix='.monitor_test')
- os.close(temp_fd)
- setup_test_environment.set_test_database(cls._test_db_file)
- setup_test_environment.run_syncdb()
- cls._test_db_backup = setup_test_environment.backup_test_database()
- cls._test_db_initialized = True
-
+ def _set_monitor_stubs(self):
+ # Clear the instance cache as this is a brand new database.
+ monitor_db.DBObject._clear_instance_cache()
- def _open_test_db(self):
- self._initialize_test_db()
- setup_test_environment.restore_test_database(self._test_db_backup)
self._database = (
database_connection.DatabaseConnection.get_test_database(
self._test_db_file))
self._database.connect()
self._database.debug = _DEBUG
-
- def _close_test_db(self):
- self._database.disconnect()
-
-
- def _set_monitor_stubs(self):
- # Clear the instance cache as this is a brand new database.
- monitor_db.DBObject._clear_instance_cache()
monitor_db._db = self._database
monitor_db._drone_manager._results_dir = '/test/path'
monitor_db._drone_manager._temporary_directory = '/test/path/tmp'
- def _fill_in_test_data(self):
- """Populate the test database with some hosts and labels."""
- user = models.User.objects.create(login='my_user')
- acl_group = models.AclGroup.objects.create(name='my_acl')
- acl_group.users.add(user)
-
- hosts = [models.Host.objects.create(hostname=hostname) for hostname in
- ('host1', 'host2', 'host3', 'host4', 'host5', 'host6',
- 'host7', 'host8', 'host9')]
-
- acl_group.hosts = hosts
- models.AclGroup.smart_get('Everyone').hosts = []
-
- labels = [models.Label.objects.create(name=name) for name in
- ('label1', 'label2', 'label3', 'label4', 'label5', 'label6',
- 'label7')]
-
- atomic_group1 = models.AtomicGroup.objects.create(
- name='atomic1', max_number_of_machines=2)
- atomic_group2 = models.AtomicGroup.objects.create(
- name='atomic2', max_number_of_machines=2)
-
- self.label3 = labels[2]
- self.label3.only_if_needed = True
- self.label3.save()
- self.label4 = labels[3]
- self.label4.atomic_group = atomic_group1
- self.label4.save()
- self.label5 = labels[4]
- self.label5.atomic_group = atomic_group1
- self.label5.save()
- hosts[0].labels.add(labels[0]) # label1
- hosts[1].labels.add(labels[1]) # label2
- self.label6 = labels[5]
- self.label7 = labels[6]
- for hostnum in xrange(4,7): # host5..host7
- hosts[hostnum].labels.add(self.label4) # an atomic group lavel
- hosts[hostnum].labels.add(self.label6) # a normal label
- hosts[6].labels.add(self.label7)
- for hostnum in xrange(7,9): # host8..host9
- hosts[hostnum].labels.add(self.label5) # an atomic group lavel
- hosts[hostnum].labels.add(self.label6) # a normal label
- hosts[hostnum].labels.add(self.label7)
-
-
- def _setup_dummy_user(self):
- user = models.User.objects.create(login='dummy', access_level=100)
- thread_local.set_user(user)
-
-
def setUp(self):
- self.god = mock.mock_god()
- self._open_test_db()
- self._fill_in_test_data()
+ self._frontend_common_setup()
self._set_monitor_stubs()
self._dispatcher = monitor_db.Dispatcher()
- self._setup_dummy_user()
def tearDown(self):
- self._close_test_db()
- self.god.unstub_all()
-
-
- def _create_job(self, hosts=[], metahosts=[], priority=0, active=False,
- synchronous=False, atomic_group=None):
- """
- Create a job row in the test database.
-
- @param hosts - A list of explicit host ids for this job to be
- scheduled on.
- @param metahosts - A list of label ids for each host that this job
- should be scheduled on (meta host scheduling).
- @param priority - The job priority (integer).
- @param active - bool, mark this job as running or not in the database?
- @param synchronous - bool, if True use synch_count=2 otherwise use
- synch_count=1.
- @param atomic_group - An atomic group id for this job to schedule on
- or None if atomic scheduling is not required. Each metahost
- becomes a request to schedule an entire atomic group.
- This does not support creating an active atomic group job.
- """
- assert not (atomic_group and active) # TODO(gps): support this
- synch_count = synchronous and 2 or 1
- created_on = datetime.datetime(2008, 1, 1)
- status = models.HostQueueEntry.Status.QUEUED
- if active:
- status = models.HostQueueEntry.Status.RUNNING
- job = models.Job.objects.create(
- name='test', owner='my_user', priority=priority,
- synch_count=synch_count, created_on=created_on,
- reboot_before=models.RebootBefore.NEVER)
- for host_id in hosts:
- models.HostQueueEntry.objects.create(job=job, host_id=host_id,
- status=status,
- atomic_group_id=atomic_group)
- models.IneligibleHostQueue.objects.create(job=job, host_id=host_id)
- for label_id in metahosts:
- models.HostQueueEntry.objects.create(job=job, meta_host_id=label_id,
- status=status,
- atomic_group_id=atomic_group)
- if atomic_group and not (metahosts or hosts):
- # Create a single HQE to request the atomic group of hosts even if
- # no metahosts or hosts are supplied.
- models.HostQueueEntry.objects.create(job=job,
- status=status,
- atomic_group_id=atomic_group)
- return job
-
-
- def _create_job_simple(self, hosts, use_metahost=False,
- priority=0, active=False):
- """An alternative interface to _create_job"""
- args = {'hosts' : [], 'metahosts' : []}
- if use_metahost:
- args['metahosts'] = hosts
- else:
- args['hosts'] = hosts
- return self._create_job(priority=priority, active=active, **args)
+ self._database.disconnect()
+ self._frontend_common_teardown()
def _update_hqe(self, set, where=''):