diff options
author | mbligh <mbligh@592f7852-d20e-0410-864c-8624ca9c26a4> | 2009-03-26 21:10:16 +0000 |
---|---|---|
committer | mbligh <mbligh@592f7852-d20e-0410-864c-8624ca9c26a4> | 2009-03-26 21:10:16 +0000 |
commit | 68a0996a75d6d8a190afba0c667bdfd727a4810b (patch) | |
tree | 6462dab1f8cf654f8b74bdb2cc845182a3d503c8 /server | |
parent | 91ea482e1af2cb42052155bd99c6a19fd7d25cc0 (diff) |
New version of the patch, I moved the logging config code right before
all the results directory checking, also made autoserv handle the flag
-N appropriately. While I was there, I found some bugs exposed by the
collect logs feature recently introduced when using autoserv with the
flag -N. So I think it's better to have everything fixed on one patch.
From: lmr
git-svn-id: svn://test.kernel.org/autotest/trunk@2939 592f7852-d20e-0410-864c-8624ca9c26a4
Diffstat (limited to 'server')
-rwxr-xr-x | server/autoserv | 26 | ||||
-rwxr-xr-x | server/server_job.py | 14 |
2 files changed, 28 insertions, 12 deletions
diff --git a/server/autoserv b/server/autoserv index 00fd5f44..2f68dd74 100755 --- a/server/autoserv +++ b/server/autoserv @@ -13,12 +13,6 @@ from autotest_lib.server import server_job, utils, autoserv_parser, autotest from autotest_lib.client.common_lib import pidfile def run_autoserv(pid_file_manager, results, parser): - os.environ['AUTOSERV_RESULTS'] = results - path = os.path.dirname(__file__) - autodir = os.path.abspath(os.path.join(path, '..')) - serverdir = os.path.join(autodir, 'server') - logging.config.fileConfig('%s/debug_server.ini' % serverdir) - # send stdin to /dev/null dev_null = os.open(os.devnull, os.O_RDONLY) os.dup2(dev_null, sys.stdin.fileno()) @@ -160,7 +154,25 @@ def main(): error = "Error: results directory already exists: %s\n" % results sys.stderr.write(error) sys.exit(1) - print "Results placed in %s" % results + + # Now that we certified that there's no leftover results dir from + # previous jobs, lets create the result dir since the logging system + # needs to create the log file in there. + if not os.path.isdir(results): + os.makedirs(results) + os.environ['AUTOSERV_RESULTS'] = results + serverdir = os.path.dirname(__file__) + logging.config.fileConfig('%s/debug_server.ini' % serverdir) + logging.info("Results placed in %s" % results) + else: + # If we supply -N, no results dir will be generated, so + # we'll configure the logging system on code. + stamp = '[%(asctime)s - %(levelname)-8s] %(message)s' + root_logger = logging.getLogger() + formatter = logging.Formatter(stamp, datefmt='%H:%M:%S') + root_logger.handlers[0].setFormatter(formatter) + root_logger.setLevel(logging.DEBUG) + if parser.options.write_pidfile: pid_file_manager = pidfile.PidFileManager("autoserv", results) diff --git a/server/server_job.py b/server/server_job.py index 05bd19fb..1d8c86f2 100755 --- a/server/server_job.py +++ b/server/server_job.py @@ -99,9 +99,11 @@ class base_server_job(object): else: self.control = '' self.resultdir = resultdir - self.uncollected_log_file = os.path.join(resultdir, "uncollected_logs") - self.debugdir = os.path.join(resultdir, 'debug') + self.uncollected_log_file = None if resultdir: + self.uncollected_log_file = os.path.join(resultdir, "uncollected_logs") + self.debugdir = os.path.join(resultdir, 'debug') + if not os.path.exists(resultdir): os.mkdir(resultdir) if not os.path.exists(self.uncollected_log_file): @@ -432,7 +434,8 @@ class base_server_job(object): self._execute_code(CRASHINFO_CONTROL_FILE, namespace) else: self._execute_code(CRASHDUMPS_CONTROL_FILE, namespace) - os.remove(self.uncollected_log_file) + if self.uncollected_log_file: + os.remove(self.uncollected_log_file) self.disable_external_logging() if cleanup and machines: self._execute_code(CLEANUP_CONTROL_FILE, namespace) @@ -702,8 +705,9 @@ class base_server_job(object): @param update_func - a function that updates the list of uncollected logs. Should take one parameter, the list to be updated. """ - log_file = open(self.uncollected_log_file, "r+") - fcntl.flock(log_file, fcntl.LOCK_EX) + if self.uncollected_log_file: + log_file = open(self.uncollected_log_file, "r+") + fcntl.flock(log_file, fcntl.LOCK_EX) try: uncollected_logs = pickle.load(log_file) update_func(uncollected_logs) |