Selaa lähdekoodia

Partial rework of regression scripts for branch independent testing.

[SVN r39963]
Rene Rivera 18 vuotta sitten
vanhempi
sitoutus
c9b8202539

+ 21 - 0
regression/run.py

@@ -0,0 +1,21 @@
+#!/usr/bin/python
+
+# Copyright Redshift Software, Inc. 2007
+#
+# Distributed under the Boost Software License, Version 1.0.
+# (See accompanying file LICENSE_1_0.txt or copy at
+# http://www.boost.org/LICENSE_1_0.txt)
+
+import os.path
+import sys
+
+#~ The directory this file is in.
+root = os.path.abspath(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
+print '--- %s' % root
+
+#~ Bootstrap
+sys.path.insert(0,root)
+
+#~ Launch runner
+from src.regression import runner
+runner(root)

+ 5 - 0
regression/src/__init__.py

@@ -0,0 +1,5 @@
+# Copyright Redshift Software, Inc. 2007
+#
+# Distributed under the Boost Software License, Version 1.0.
+# (See accompanying file LICENSE_1_0.txt or copy at
+# http://www.boost.org/LICENSE_1_0.txt)

+ 500 - 0
regression/src/collect_and_upload_logs.py

@@ -0,0 +1,500 @@
+
+# Copyright (c) MetaCommunications, Inc. 2003-2007
+#
+# Distributed under the Boost Software License, Version 1.0. 
+# (See accompanying file LICENSE_1_0.txt or copy at 
+# http://www.boost.org/LICENSE_1_0.txt)
+
+import xml.sax.saxutils
+import zipfile
+import ftplib
+import time
+import stat
+import xml.dom.minidom
+import xmlrpclib
+import httplib
+
+import os.path
+import string
+import sys
+
+
+def process_xml_file( input_file, output_file ):
+    utils.log( 'Processing test log "%s"' % input_file )
+    
+    f = open( input_file, 'r' )
+    xml = f.readlines()
+    f.close()
+    
+    for i in range( 0, len(xml)):
+        xml[i] = string.translate( xml[i], utils.char_translation_table )
+
+    output_file.writelines( xml )
+
+
+def process_test_log_files( output_file, dir, names ):
+    for file in names:
+        if os.path.basename( file ) == 'test_log.xml':
+            process_xml_file( os.path.join( dir, file ), output_file )
+
+
+def collect_test_logs( input_dirs, test_results_writer ):
+    __log__ = 1
+    utils.log( 'Collecting test logs ...' )
+    for input_dir in input_dirs:
+        utils.log( 'Walking directory "%s" ...' % input_dir )
+        os.path.walk( input_dir, process_test_log_files, test_results_writer )
+
+dart_status_from_result = {
+    'succeed': 'passed',
+    'fail': 'failed',
+    'note': 'passed',
+    '': 'notrun'
+    }
+
+dart_project = {
+    'trunk': 'Boost_HEAD',
+    '': 'Boost_HEAD'
+    }
+
+dart_track = {
+    'full': 'Nightly',
+    'incremental': 'Continuous',
+    '': 'Experimental'
+    }
+
+ascii_only_table = ""
+for i in range(0,256):
+    if chr(i) == '\n' or chr(i) == '\r':
+        ascii_only_table += chr(i)
+    elif i < 32 or i >= 0x80:
+        ascii_only_table += '?'
+    else:
+        ascii_only_table += chr(i)
+
+class xmlrpcProxyTransport(xmlrpclib.Transport):
+    def __init__(self, proxy):
+        self.proxy = proxy
+    def make_connection(self, host):
+        self.realhost = host
+        return httplib.HTTP(self.proxy)
+    def send_request(self, connection, handler, request_body):
+        connection.putrequest('POST','http://%s%s' % (self.realhost,handler))
+    def send_host(self, connection, host):
+        connection.putheader('Host',self.realhost)
+    
+
+def publish_test_logs(
+    input_dirs,
+    runner_id, tag, platform, comment_file, timestamp, user, source, run_type,
+    dart_server = None,
+    http_proxy = None,
+    **unused
+    ):
+    __log__ = 1
+    utils.log( 'Publishing test logs ...' )
+    dart_rpc = None
+    dart_dom = {}
+    
+    def _publish_test_log_files_ ( unused, dir, names ):
+        for file in names:
+            if os.path.basename( file ) == 'test_log.xml':
+                utils.log( 'Publishing test log "%s"' % os.path.join(dir,file) )
+                if dart_server:
+                    log_xml = open(os.path.join(dir,file)).read().translate(ascii_only_table)
+                    #~ utils.log( '--- XML:\n%s' % log_xml)
+                    #~ It seems possible to get an empty XML result file :-(
+                    if log_xml == "": continue
+                    log_dom = xml.dom.minidom.parseString(log_xml)
+                    test = {
+                        'library': log_dom.documentElement.getAttribute('library'),
+                        'test-name': log_dom.documentElement.getAttribute('test-name'),
+                        'toolset': log_dom.documentElement.getAttribute('toolset')
+                        }
+                    if not test['test-name'] or test['test-name'] == '':
+                        test['test-name'] = 'unknown'
+                    if not test['toolset'] or test['toolset'] == '':
+                        test['toolset'] = 'unknown'
+                    if not dart_dom.has_key(test['toolset']):
+                        dart_dom[test['toolset']] = xml.dom.minidom.parseString(
+'''<?xml version="1.0" encoding="UTF-8"?>
+<DartSubmission version="2.0" createdby="collect_and_upload_logs.py">
+    <Site>%(site)s</Site>
+    <BuildName>%(buildname)s</BuildName>
+    <Track>%(track)s</Track>
+    <DateTimeStamp>%(datetimestamp)s</DateTimeStamp>
+</DartSubmission>
+'''                         % {
+                                'site': runner_id,
+                                'buildname': "%s -- %s (%s)" % (platform,test['toolset'],run_type),
+                                'track': dart_track[run_type],
+                                'datetimestamp' : timestamp
+                            } )
+                    submission_dom = dart_dom[test['toolset']]
+                    for node in log_dom.documentElement.childNodes:
+                        if node.nodeType == xml.dom.Node.ELEMENT_NODE:
+                            if node.firstChild:
+                                log_data = xml.sax.saxutils.escape(node.firstChild.data)
+                            else:
+                                log_data = ''
+                            test_dom = xml.dom.minidom.parseString('''<?xml version="1.0" encoding="UTF-8"?>
+<Test>
+    <Name>.Test.Boost.%(tag)s.%(library)s.%(test-name)s.%(type)s</Name>
+    <Status>%(result)s</Status>
+    <Measurement name="Toolset" type="text/string">%(toolset)s</Measurement>
+    <Measurement name="Timestamp" type="text/string">%(timestamp)s</Measurement>
+    <Measurement name="Log" type="text/text">%(log)s</Measurement>
+</Test>
+    '''                         % {
+                                    'tag': tag,
+                                    'library': test['library'],
+                                    'test-name': test['test-name'],
+                                    'toolset': test['toolset'],
+                                    'type': node.nodeName,
+                                    'result': dart_status_from_result[node.getAttribute('result')],
+                                    'timestamp': node.getAttribute('timestamp'),
+                                    'log': log_data
+                                })
+                            submission_dom.documentElement.appendChild(
+                                test_dom.documentElement.cloneNode(1) )
+    
+    for input_dir in input_dirs:
+        utils.log( 'Walking directory "%s" ...' % input_dir )
+        os.path.walk( input_dir, _publish_test_log_files_, None )
+    if dart_server:
+        try:
+            rpc_transport = None
+            if http_proxy:
+                rpc_transport = xmlrpcProxyTransport(http_proxy)
+            dart_rpc = xmlrpclib.ServerProxy(
+                'http://%s/%s/Command/' % (dart_server,dart_project[tag]),
+                rpc_transport )
+            for dom in dart_dom.values():
+                #~ utils.log('Dart XML: %s' % dom.toxml('utf-8'))
+                dart_rpc.Submit.put(xmlrpclib.Binary(dom.toxml('utf-8')))
+        except Exception, e:
+            utils.log('Dart server error: %s' % e)
+
+
+def upload_to_ftp( tag, results_file, ftp_proxy, debug_level ):
+    ftp_site = 'fx.meta-comm.com'
+    site_path = '/boost-regression'
+    utils.log( 'Uploading log archive "%s" to ftp://%s%s/%s' % ( results_file, ftp_site, site_path, tag ) )
+    
+    if not ftp_proxy:
+        ftp = ftplib.FTP( ftp_site )
+        ftp.set_debuglevel( debug_level )
+        ftp.login()
+    else:
+        utils.log( '    Connecting through FTP proxy server "%s"' % ftp_proxy )
+        ftp = ftplib.FTP( ftp_proxy )
+        ftp.set_debuglevel( debug_level )
+        ftp.set_pasv (0) # turn off PASV mode
+        ftp.login( 'anonymous@%s' % ftp_site, 'anonymous@' )
+
+    ftp.cwd( site_path )
+    try:
+        ftp.cwd( tag )
+    except ftplib.error_perm:
+        for dir in tag.split( '/' ):
+            ftp.mkd( dir )
+            ftp.cwd( dir )
+
+    f = open( results_file, 'rb' )
+    ftp.storbinary( 'STOR %s' % os.path.basename( results_file ), f )
+    ftp.quit()
+
+
+def copy_comments( results_xml, comment_file ):
+    results_xml.startElement( 'comment', {} )
+
+    if os.path.exists( comment_file ):
+        utils.log( 'Reading comments file "%s"...' % comment_file )
+        f = open( comment_file, 'r' )
+        try:
+            results_xml.characters( f.read() )
+        finally:
+            f.close()    
+    else:
+        utils.log( 'Warning: comment file "%s" is not found.' % comment_file )
+ 
+    results_xml.endElement( 'comment' )
+
+
+def compress_file( file_path, archive_path ):
+    utils.log( 'Compressing "%s"...' % file_path )
+
+    try:
+        z = zipfile.ZipFile( archive_path, 'w', zipfile.ZIP_DEFLATED )
+        z.write( file_path, os.path.basename( file_path ) )
+        z.close()
+        utils.log( 'Done writing "%s".'% archive_path )
+    except Exception, msg:
+        utils.log( 'Warning: Compressing falied (%s)' % msg )
+        utils.log( '         Trying to compress using a platform-specific tool...' )
+        try: import zip_cmd
+        except ImportError:
+            script_dir = os.path.dirname( os.path.abspath( sys.argv[0] ) )
+            utils.log( 'Could not find \'zip_cmd\' module in the script directory (%s).' % script_dir )
+            raise Exception( 'Compressing failed!' )
+        else:
+            if os.path.exists( archive_path ):
+                os.unlink( archive_path )
+                utils.log( 'Removing stale "%s".' % archive_path )
+                
+            zip_cmd.main( file_path, archive_path )
+            utils.log( 'Done compressing "%s".' % archive_path )
+
+
+def read_timestamp( file ):
+    if not os.path.exists( file ):
+        result = time.gmtime()
+        utils.log( 'Warning: timestamp file "%s" does not exist'% file )
+        utils.log( 'Using current UTC time (%s)' % result )
+        return result
+
+    return time.gmtime( os.stat( file ).st_mtime )
+
+
+def collect_logs( 
+          results_dir
+        , runner_id
+        , tag
+        , platform
+        , comment_file
+        , timestamp_file
+        , user
+        , source
+        , run_type
+        , dart_server = None
+        , http_proxy = None
+        , revision = ''
+        , **unused
+        ):
+    
+    timestamp = time.strftime( '%Y-%m-%dT%H:%M:%SZ', read_timestamp( timestamp_file ) )
+    
+    if dart_server:
+        publish_test_logs( [ results_dir ],
+            runner_id, tag, platform, comment_file, timestamp, user, source, run_type,
+            dart_server = dart_server,
+            http_proxy = http_proxy )
+    
+    results_file = os.path.join( results_dir, '%s.xml' % runner_id )
+    results_writer = open( results_file, 'w' )
+    utils.log( 'Collecting test logs into "%s"...' % results_file )
+        
+    results_xml = xml.sax.saxutils.XMLGenerator( results_writer )
+    results_xml.startDocument()
+    results_xml.startElement( 
+          'test-run'
+        , { 
+              'tag':        tag
+            , 'platform':   platform
+            , 'runner':     runner_id
+            , 'timestamp':  timestamp
+            , 'source':     source
+            , 'run-type':   run_type
+            , 'revision':   revision
+            }
+        )
+    
+    copy_comments( results_xml, comment_file )
+    collect_test_logs( [ results_dir ], results_writer )
+
+    results_xml.endElement( "test-run" )
+    results_xml.endDocument()
+    results_writer.close()
+    utils.log( 'Done writing "%s".' % results_file )
+
+    compress_file(
+          results_file
+        , os.path.join( results_dir,'%s.zip' % runner_id )
+        )
+
+
+def upload_logs(
+          results_dir
+        , runner_id
+        , tag
+        , user
+        , ftp_proxy
+        , debug_level
+        , send_bjam_log = False
+        , timestamp_file = None
+        , dart_server = None
+        , **unused
+        ):
+
+    logs_archive = os.path.join( results_dir, '%s.zip' % runner_id )
+    upload_to_ftp( tag, logs_archive, ftp_proxy, debug_level )
+    if send_bjam_log:
+        bjam_log_path = os.path.join( results_dir, 'bjam.log' )
+        if not timestamp_file:
+            timestamp_file = bjam_log_path
+
+        timestamp = time.strftime( '%Y-%m-%d-%H-%M-%S', read_timestamp( timestamp_file ) )
+        logs_archive = os.path.join( results_dir, '%s.%s.log.zip' % ( runner_id, timestamp ) )
+        compress_file( bjam_log_path, logs_archive )
+        upload_to_ftp( '%s/logs' % tag, logs_archive, ftp_proxy, debug_level )
+
+
+def collect_and_upload_logs( 
+          results_dir
+        , runner_id
+        , tag
+        , platform
+        , comment_file
+        , timestamp_file
+        , user
+        , source
+        , run_type
+        , revision = None
+        , ftp_proxy = None
+        , debug_level = 0
+        , send_bjam_log = False
+        , dart_server = None
+        , http_proxy = None
+        , **unused
+        ):
+    
+    collect_logs( 
+          results_dir
+        , runner_id
+        , tag
+        , platform
+        , comment_file
+        , timestamp_file
+        , user
+        , source
+        , run_type
+        , revision = revision
+        , dart_server = dart_server
+        , http_proxy = http_proxy
+        )
+    
+    upload_logs(
+          results_dir
+        , runner_id
+        , tag
+        , user
+        , ftp_proxy
+        , debug_level
+        , send_bjam_log
+        , timestamp_file
+        , dart_server = dart_server
+        )
+
+
+def accept_args( args ):
+    args_spec = [ 
+          'locate-root='
+        , 'runner='
+        , 'tag='
+        , 'platform='
+        , 'comment='
+        , 'timestamp='
+        , 'source='
+        , 'run-type='
+        , 'user='
+        , 'ftp-proxy='
+        , 'proxy='
+        , 'debug-level='
+        , 'send-bjam-log'
+        , 'help'
+        , 'dart-server='
+        , 'revision='
+        ]
+    
+    options = {
+          '--tag'           : 'trunk'
+        , '--platform'      : sys.platform
+        , '--comment'       : 'comment.html'
+        , '--timestamp'     : 'timestamp'
+        , '--user'          : None
+        , '--source'        : 'SVN'
+        , '--run-type'      : 'full'
+        , '--ftp-proxy'     : None
+        , '--proxy'         : None
+        , '--debug-level'   : 0
+        , '--dart-server'   : 'beta.boost.org:8081'
+        , '--revision'      : None
+        
+        }
+    
+    utils.accept_args( args_spec, args, options, usage )
+        
+    return {
+          'results_dir'     : options[ '--locate-root' ]
+        , 'runner_id'       : options[ '--runner' ]
+        , 'tag'             : options[ '--tag' ]
+        , 'platform'        : options[ '--platform']
+        , 'comment_file'    : options[ '--comment' ]
+        , 'timestamp_file'  : options[ '--timestamp' ]
+        , 'user'            : options[ '--user' ]
+        , 'source'          : options[ '--source' ]
+        , 'run_type'        : options[ '--run-type' ]
+        , 'ftp_proxy'       : options[ '--ftp-proxy' ]
+        , 'http_proxy'      : options[ '--proxy' ]
+        , 'debug_level'     : int(options[ '--debug-level' ])
+        , 'send_bjam_log'   : options.has_key( '--send-bjam-log' )
+        , 'dart_server'     : options[ '--dart-server' ]
+        , 'revision   '     : options[ '--revision' ]
+        }
+
+
+commands = {
+      'collect-and-upload'  : collect_and_upload_logs
+    , 'collect-logs'        : collect_logs
+    , 'upload-logs'         : upload_logs
+    }
+
+def usage():
+    print 'Usage: %s [command] [options]' % os.path.basename( sys.argv[0] )
+    print    '''
+Commands:
+\t%s
+
+Options:
+\t--locate-root   directory to to scan for "test_log.xml" files
+\t--runner        runner ID (e.g. "Metacomm")
+\t--timestamp     path to a file which modification time will be used 
+\t                as a timestamp of the run ("timestamp" by default)
+\t--comment       an HTML comment file to be inserted in the reports
+\t                ("comment.html" by default)
+\t--tag           the tag for the results ("trunk" by default)
+\t--user          SourceForge user name for a shell account (optional)
+\t--source        where Boost sources came from ("SVN" or "tarball";
+\t                "SVN" by default)
+\t--run-type      "incremental" or "full" ("full" by default)
+\t--send-bjam-log in addition to regular XML results, send in full bjam
+\t                log of the regression run
+\t--proxy         HTTP proxy server address and port (e.g.
+\t                'http://www.someproxy.com:3128', optional)
+\t--ftp-proxy     FTP proxy server (e.g. 'ftpproxy', optional)
+\t--debug-level   debugging level; controls the amount of debugging 
+\t                output printed; 0 by default (no debug output)
+\t--dart-server   The dart server to send results to.
+''' % '\n\t'.join( commands.keys() )
+
+    
+def main():
+    if len(sys.argv) > 1 and sys.argv[1] in commands:
+        command = sys.argv[1]
+        args = sys.argv[ 2: ]
+    else:
+        command = 'collect-and-upload'
+        args = sys.argv[ 1: ]
+    
+    commands[ command ]( **accept_args( args ) )
+
+
+if __name__ != '__main__':  import utils
+else:
+    # in absense of relative import...
+    xsl_path = os.path.abspath( os.path.dirname( sys.argv[ 0 ] ) )
+    while os.path.basename( xsl_path ) != 'xsl_reports': xsl_path = os.path.dirname( xsl_path )
+    sys.path.append( xsl_path )
+
+    import utils
+    main()

+ 990 - 0
regression/src/regression.py

@@ -0,0 +1,990 @@
+#!/usr/bin/python
+
+# Copyright (c) MetaCommunications, Inc. 2003-2007
+#
+# Distributed under the Boost Software License, Version 1.0.
+# (See accompanying file LICENSE_1_0.txt or copy at
+# http://www.boost.org/LICENSE_1_0.txt)
+
+import optparse
+import os
+import os.path
+import platform
+import sys
+
+class runner:
+    
+    def __init__(self,root):
+        commands = "commands: %s" % ', '.join(
+            map(
+                lambda m: m[8:].replace('_','-'),
+                filter(
+                    lambda m: m.startswith('command_'),
+                    runner.__dict__.keys())
+                )
+            )
+        
+        opt = optparse.OptionParser(
+            usage="%prog options [commands]",
+            description=commands)
+            
+        #~ Base Options:
+        opt.add_option( '--runner',
+            help="runner ID (e.g. 'Metacomm')" )
+        opt.add_option( '--comment',
+            help="an HTML comment file to be inserted in the reports",
+            default='comment.html' )
+        opt.add_option( '--tag',
+            help="the tag for the results",
+            default='trunk' )
+        opt.add_option( '--toolsets',
+            help="comma-separated list of toolsets to test with" )
+        opt.add_option( '--incremental',
+            help="do incremental run (do not remove previous binaries)",
+            action='store_true',
+            default=False )
+        opt.add_option( '--timeout',
+            help="specifies the timeout, in minutes, for a single test run/compilation",
+            default=5, type='int' )
+        opt.add_option( '--bjam-options',
+            help="options to pass to the regression test" )
+        opt.add_option( '--bjam-toolset',
+            help="bootstrap toolset for 'bjam' executable" )
+        opt.add_option( '--pjl-toolset',
+            help="bootstrap toolset for 'process_jam_log' executable" )
+        opt.add_option( '--platform',
+            default=self.platform_name() )
+
+        #~ Source Options:
+        opt.add_option( '--user',
+            help="Boost SVN user ID" )
+        opt.add_option( '--local',
+            help="the name of the boost tarball" )
+        opt.add_option( '--force-update',
+            help="do an SVN update (if applicable) instead of a clean checkout, even when performing a full run" )
+        opt.add_option( '--have-source',
+            help="do neither a tarball download nor an SVN update; used primarily for testing script changes" )
+
+        #~ Connection Options:
+        opt.add_option( '--proxy',
+            help="HTTP proxy server address and port (e.g.'http://www.someproxy.com:3128')" )
+        opt.add_option( '--ftp-proxy',
+            help="FTP proxy server (e.g. 'ftpproxy')" )
+        opt.add_option( '--dart-server',
+            help="the dart server to send results to" )
+
+        #~ Debug Options:
+        opt.add_option( '--debug-level',
+            help="debugging level; controls the amount of debugging output printed",
+            default=0, type='int' )
+        opt.add_option( '--send-bjam-log',
+            help="send full bjam log of the regression run",
+            action='store_true',
+            default=False )
+        opt.add_option( '--mail',
+            help="email address to send run notification to" )
+        opt.add_option( '--smtp-login',
+            help="STMP server address/login information, in the following form: <user>:<password>@<host>[:<port>]" )
+        opt.add_option( '--skip-tests',
+            help="do not run bjam; used for testing script changes" )
+        
+        ( _opt_, self.actions ) = opt.parse_args(None,self)
+        
+        self.tools_root = os.path.abspath(
+            os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) )
+        self.regression_root = root
+        self.boost_root = os.path.join( self.regression_root, 'boost' )
+        self.regression_results = os.path.join( self.regression_root, 'results' )
+        self.regression_log = os.path.join( self.regression_results, 'bjam.log' )
+        self.xsl_reports_dir = os.path.join( self.tools_root, 'regression', 'xsl_reports' )
+        self.timestamp_path = os.path.join( self.regression_root, 'timestamp' )
+        self.main()
+    
+    def command_cleanup(self,*args):
+        if args == []: args = [ 'source', 'bin' ]
+
+        if 'source' in args:
+            self.log( 'Cleaning up "%s" directory ...' % self.boost_root )
+            self.rmtree( self.boost_root )
+
+        if 'bin' in args:
+            boost_bin_dir = os.path.join( self.boost_root, 'bin' )
+            self.log( 'Cleaning up "%s" directory ...' % self.boost_bin_dir )
+            self.rmtree( self.boost_bin_dir )
+
+            boost_binv2_dir = os.path.join( self.boost_root, 'bin.v2' )
+            self.log( 'Cleaning up "%s" directory ...' % boost_binv2_dir )
+            self.rmtree( boost_binv2_dir )
+
+            self.log( 'Cleaning up "%s" directory ...' % self.regression_results )
+            self.rmtree( self.regression_results )
+    
+    def command_get_source(self):
+        pass
+    
+    def command_update_source(self):
+        pass
+    
+    def command_setup(self):
+        pass
+    
+    def command_install(self):
+        pass
+    
+    def command_test(self):
+        pass
+    
+    def command_collect_logs(self):
+        pass
+    
+    def command_upoad_logs(self):
+        pass
+    
+    def command_regression(self):
+        pass
+
+    def command_show_revision(self):
+        modified = '$Date$'
+        revision = '$Revision$'
+
+        import re
+        re_keyword_value = re.compile( r'^\$\w+:\s+(.*)\s+\$$' )
+        print '\n\tRevision: %s' % re_keyword_value.match( revision ).group( 1 )
+        print '\tLast modified on: %s\n' % re_keyword_value.match( modified ).group( 1 )
+    
+    def main(self):
+        for action in self.actions:
+            action_m = "command_"+action.replace('-','_')
+            if hasattr(self,action_m):
+                getattr(self,action_m)()
+
+    def platform_name(self):
+        # See http://article.gmane.org/gmane.comp.lib.boost.testing/933
+        if sys.platform == 'win32':
+            return 'Windows'
+        elif sys.platform == 'cygwin':
+            return 'Windows/Cygwin'
+        return platform.system()
+
+    def log(self,message):
+        sys.stdout.flush()
+        sys.stderr.flush()
+        sys.stderr.write( '# %s\n' % message )
+        sys.stderr.flush()
+
+    def rmtree(self,path):
+        if os.path.exists( path ):
+            if sys.platform == 'win32':
+                os.system( 'del /f /s /q "%s" >nul 2>&1' % path )
+                shutil.rmtree( unicode( path ) )
+            else:
+                os.system( 'rm -f -r "%s"' % path )
+    
+'''
+import urllib
+import tarfile
+import socket
+import time
+import getopt
+import glob
+import shutil
+import stat
+import os.path
+import os
+import platform
+import traceback
+import string
+import sys
+
+
+svn_anonymous_command_line = 'svn %(command)s'
+svn_command_line           = 'svn --non-interactive --username=%(user)s %(command)s'
+
+
+bjam = {}
+process_jam_log = {}
+
+
+if sys.platform == 'win32':
+    bjam[ 'name' ] = 'bjam.exe'
+    bjam[ 'build_cmd' ] = lambda toolset, v2: bjam_build_script_cmd( 'build.bat %s' % toolset )
+    bjam[ 'is_supported_toolset' ] = lambda x: x in [ \
+        'borland', 'como', 'gcc', 'gcc-nocygwin', 'intel-win32', 'metrowerks', 'mingw', \
+        'msvc', 'vc7', 'vc8' \
+        ]
+    process_jam_log[ 'name' ] = 'process_jam_log.exe'
+
+    def default_toolset(v2):
+        if v2:
+            return 'msvc'
+        else:
+            return 'vc-7_1'
+
+    process_jam_log[ 'default_toolset' ] = default_toolset
+    patch_boost_name = 'patch_boost.bat'
+else:
+    bjam[ 'name' ] = 'bjam'
+    bjam[ 'build_cmd' ] = lambda toolset, v2: bjam_build_script_cmd( './build.sh %s' % toolset )
+    bjam[ 'is_supported_toolset' ] = lambda x: x in [ \
+        'acc', 'como', 'darwin', 'gcc', 'intel-linux', 'kcc', 'kylix', 'mipspro', \
+        'pathscale', 'pgi', 'qcc', 'sun', 'sunpro', 'tru64cxx', 'vacpp' \
+        ]
+    process_jam_log[ 'name' ] = 'process_jam_log'
+    process_jam_log[ 'default_toolset' ] = lambda x: 'gcc'
+    patch_boost_name = 'patch_boost'
+
+bjam[ 'default_toolset' ] = lambda x: ''
+bjam[ 'path' ] = os.path.join( regression_root, bjam[ 'name' ] )
+bjam[ 'source_dir' ] = os.path.join( boost_root, 'tools', 'jam', 'src' )
+bjam[ 'build_path_root' ] = lambda unused: bjam[ 'source_dir' ]
+
+process_jam_log[ 'path' ] = os.path.join( regression_root, process_jam_log[ 'name' ] )
+process_jam_log[ 'source_dir' ] = os.path.join( boost_root, 'tools', 'regression', 'build' )
+
+
+def process_jam_build_root(v2):
+    if v2:
+        return os.path.join(boost_root, 'dist', 'bin')
+    else:
+        return os.path.join(
+            boost_root, 'bin', 'boost', 'tools', 'regression', 'build'
+            , process_jam_log[ 'name' ])
+
+
+process_jam_log[ 'build_path_root' ] = process_jam_build_root
+
+process_jam_log[ 'build_cmd' ] = lambda toolset, v2: bjam_command( toolset, v2 )
+process_jam_log[ 'is_supported_toolset' ] = lambda x : True
+
+build_monitor_url = 'http://engineering.meta-comm.com/resources/build_monitor.zip'
+pskill_url = 'http://www.sysinternals.com/files/pskill.zip'
+
+utils = None
+
+
+
+def retry( f, args, max_attempts=5, sleep_secs=10 ):
+    for attempts in range( max_attempts, -1, -1 ):
+        try:
+            return f( *args )
+        except Exception, msg:
+            log( '%s failed with message "%s"' % ( f.__name__, msg ) )
+            if attempts == 0:
+                log( 'Giving up.' )
+                raise
+
+            log( 'Retrying (%d more attempts).' % attempts )
+            time.sleep( sleep_secs )
+
+
+
+
+def http_get( source_url, destination, proxy ):
+    if proxy is None: proxies = None
+    else:             proxies = { 'http' : proxy }
+
+    src = urllib.urlopen( source_url, proxies = proxies )
+
+    f = open( destination, 'wb' )
+    while True:
+        data = src.read( 16*1024 )
+        if len( data ) == 0: break
+        f.write( data )
+
+    f.close()
+    src.close()
+
+
+def tarball_name_for_tag( tag, timestamp = False ):
+    tag = tag.split( '/' )[-1]
+    if not timestamp: return 'boost-%s.tar.bz2' % tag
+    else:             return 'boost-%s.timestamp' % tag
+
+
+def download_boost_tarball( destination, tag, proxy, timestamp_only = False ):
+    tarball_name = tarball_name_for_tag( tag, timestamp_only )
+    tarball_path = os.path.join( destination, tarball_name )
+    tarball_url = 'http://beta.boost.org/development/snapshot.php/%s' % tag
+
+    log( 'Downloading "%s" to "%s"...'  % ( tarball_url, os.path.dirname( tarball_path ) ) )
+    if os.path.exists( tarball_path ):
+        os.unlink( tarball_path )
+
+    http_get(
+          tarball_url
+        , tarball_path
+        , proxy
+        )
+
+    return tarball_path
+
+
+def find_boost_dirs( path ):
+    return [ x for x in glob.glob( os.path.join( path, 'boost[-_]*' ) )
+                        if os.path.isdir( x ) ]
+
+
+def unpack_tarball( tarball_path, destination  ):
+    log( 'Looking for old unpacked archives...' )
+    old_boost_dirs = find_boost_dirs( destination )
+
+    for old_boost_dir in old_boost_dirs:
+        if old_boost_dir != tarball_path:
+            log( 'Deleting old directory %s.' % old_boost_dir )
+            rmtree( old_boost_dir )
+
+    log( 'Unpacking boost tarball ("%s")...' % tarball_path )
+
+    tarball_name = os.path.basename( tarball_path )
+    extension = tarball_name[ tarball_name.find( '.' ) : ]
+
+    if extension in ( ".tar.gz", ".tar.bz2" ):
+        mode = os.path.splitext( extension )[1][1:]
+        tar = tarfile.open( tarball_path, 'r:%s' % mode )
+        for tarinfo in tar:
+            tar.extract( tarinfo, destination )
+            if sys.platform == 'win32' and not tarinfo.isdir():
+                # workaround what appears to be a Win32-specific bug in 'tarfile'
+                # (modification times for extracted files are not set properly)
+                f = os.path.join( destination, tarinfo.name )
+                os.chmod( f, stat.S_IWRITE )
+                os.utime( f, ( tarinfo.mtime, tarinfo.mtime ) )
+        tar.close()
+    elif extension in ( ".zip" ):
+        import zipfile
+
+        z = zipfile.ZipFile( tarball_path, 'r', zipfile.ZIP_DEFLATED )
+        for f in z.infolist():
+            destination_file_path = os.path.join( destination, f.filename )
+            if destination_file_path[-1] == "/": # directory
+                if not os.path.exists( destination_file_path  ):
+                    os.makedirs( destination_file_path  )
+            else: # file
+                result = open( destination_file_path, 'wb' )
+                result.write( z.read( f.filename ) )
+                result.close()
+        z.close()
+    else:
+        raise 'Do not know how to unpack archives with extension \"%s\"' % extension
+
+    boost_dir = find_boost_dirs( destination )[0]
+    log( '    Unpacked into directory "%s"' % boost_dir )
+
+    if os.path.exists( boost_root ):
+        log( 'Deleting "%s" directory...' % boost_root )
+        rmtree( boost_root )
+
+    log( 'Renaming "%s" into "%s"' % ( boost_dir, boost_root ) )
+    os.rename( boost_dir, boost_root )
+
+
+def svn_command( user, command ):
+    if user is None or user == 'anonymous':
+        cmd = svn_anonymous_command_line % { 'command': command }
+    else:
+        cmd = svn_command_line % { 'user': user, 'command': command }
+
+    log( 'Executing SVN command "%s"' % cmd )
+    rc = os.system( cmd )
+    if rc != 0:
+        raise Exception( 'SVN command "%s" failed with code %d' % ( cmd, rc ) )
+
+
+def svn_repository_url( user, tag ):
+    if user != 'anonymous': return 'https://svn.boost.org/svn/boost/%s' % tag
+    else:                   return 'http://svn.boost.org/svn/boost/%s' % tag
+
+
+def svn_checkout( user, tag, args ):
+    command = 'co %s boost' % svn_repository_url( user, tag )
+    os.chdir( regression_root )
+    svn_command( user, command )
+
+
+def svn_update( user, tag, args ):
+    os.chdir( boost_root )
+    svn_command( user, 'update' )
+
+
+def format_time( t ):
+    return time.strftime(
+          '%a, %d %b %Y %H:%M:%S +0000'
+        , t
+        )
+
+
+def refresh_timestamp():
+    if os.path.exists( timestamp_path ):
+       os. unlink( timestamp_path )
+
+    open( timestamp_path, 'w' ).close()
+
+
+def timestamp():
+    return time.strftime(
+          '%Y-%m-%dT%H:%M:%SZ'
+        , time.gmtime( os.stat( timestamp_path ).st_mtime )
+        )
+
+
+def get_tarball( tag, proxy, args, **unused ):
+    if args == []: args = [ 'download', 'unpack' ]
+
+    tarball_path = None
+
+    if 'download' in args:
+        tarball_path = download_boost_tarball( regression_root, tag, proxy )
+
+    if 'unpack' in args:
+        if not tarball_path:
+            tarball_path = os.path.join( regression_root, tarball_name_for_tag( tag ) )
+        unpack_tarball( tarball_path, regression_root )
+
+
+def get_source( user, tag, proxy, args, **unused ):
+    refresh_timestamp()
+    log( 'Getting sources (%s)...' % timestamp() )
+
+    if user is not None:
+        retry(
+              svn_checkout
+            , ( user, tag, args )
+            )
+    else:
+        retry(
+              get_tarball
+            , ( tag, proxy, args )
+            )
+
+
+def update_source( user, tag, proxy, args, **unused ):
+    if user is not None or os.path.exists( os.path.join( boost_root, '.svn' ) ):
+        open( timestamp_path, 'w' ).close()
+        log( 'Updating sources from SVN (%s)...' % timestamp() )
+        retry(
+              svn_update
+            , ( user, tag, args )
+            )
+    else:
+        get_source( user, tag, proxy, args )
+
+
+def tool_path( name_or_spec, v2=None ):
+    if isinstance( name_or_spec, basestring ):
+        return os.path.join( regression_root, name_or_spec )
+
+    if os.path.exists( name_or_spec[ 'path' ] ):
+        return name_or_spec[ 'path' ]
+
+    if name_or_spec.has_key( 'build_path' ):
+        return name_or_spec[ 'build_path' ]
+
+    build_path_root = name_or_spec[ 'build_path_root' ]( v2 )
+    log( 'Searching for "%s" in "%s"...' % ( name_or_spec[ 'name' ], build_path_root ) )
+    for root, dirs, files in os.walk( build_path_root ):
+        if name_or_spec[ 'name' ] in files:
+            return os.path.join( root, name_or_spec[ 'name' ] )
+
+    raise Exception( 'Cannot find "%s" in any of the following locations:\n%s' % (
+          name_or_spec[ 'name' ]
+        , '\n'.join( [ name_or_spec[ 'path' ], build_path_root ] )
+        ) )
+
+
+def build_if_needed( tool, toolset, toolsets, v2 ):
+    if os.path.exists( tool[ 'path' ] ):
+        log( 'Found preinstalled "%s"; will use it.' % tool[ 'path' ] )
+        return
+
+    log( 'Preinstalled "%s" is not found; building one...' % tool[ 'path' ] )
+
+    if toolset is None:
+        if toolsets is not None:
+            toolset = string.split( toolsets, ',' )[0]
+            if not tool[ 'is_supported_toolset' ]( toolset ):
+                log( 'Warning: Specified toolset (%s) cannot be used to bootstrap "%s".'\
+                     % ( toolset, tool[ 'name' ] ) )
+
+                toolset = tool[ 'default_toolset' ](v2)
+                log( '         Using default toolset for the platform (%s).' % toolset )
+        else:
+            toolset = tool[ 'default_toolset' ](v2)
+            log( 'Warning: No bootstrap toolset for "%s" was specified.' % tool[ 'name' ] )
+            log( '         Using default toolset for the platform (%s).' % toolset )
+
+    if os.path.exists( tool[ 'source_dir' ] ):
+        log( 'Found "%s" source directory "%s"' % ( tool[ 'name' ], tool[ 'source_dir' ] ) )
+        build_cmd = tool[ 'build_cmd' ]( toolset, v2 )
+        log( 'Building "%s" (%s)...' % ( tool[ 'name'], build_cmd ) )
+        utils.system( [
+              'cd "%s"' % tool[ 'source_dir' ]
+            , build_cmd
+            ] )
+    else:
+        raise 'Could not find "%s" source directory "%s"' % ( tool[ 'name' ], tool[ 'source_dir' ] )
+
+    if not tool.has_key( 'build_path' ):
+        tool[ 'build_path' ] = tool_path( tool, v2 )
+
+    if not os.path.exists( tool[ 'build_path' ] ):
+        raise 'Failed to find "%s" after build.' % tool[ 'build_path' ]
+
+    log( '%s succesfully built in "%s" location' % ( tool[ 'name' ], tool[ 'build_path' ] ) )
+
+
+def import_utils():
+    global utils
+    if utils is None:
+        sys.path.append( xsl_reports_dir )
+        import utils as utils_module
+        utils = utils_module
+
+
+def download_if_needed( tool_name, tool_url, proxy ):
+    path = tool_path( tool_name )
+    if not os.path.exists( path ):
+        log( 'Preinstalled "%s" is not found.' % path )
+        log( '  Downloading from %s...' % tool_url )
+
+        zip_path = '%s.zip' % os.path.splitext( path )[0]
+        http_get( tool_url, zip_path, proxy )
+
+        log( '  Unzipping %s...' % path )
+        utils.unzip( zip_path, os.path.dirname( path ) )
+
+        log( '  Removing %s...' % zip_path )
+        os.unlink( zip_path )
+        log( 'Done.' )
+
+
+def setup(
+          comment
+        , toolsets
+        , book
+        , bjam_toolset
+        , pjl_toolset
+        , monitored
+        , proxy
+        , v2
+        , args
+        , **unused
+        ):
+    import_utils()
+
+    patch_boost_path = os.path.join( regression_root, patch_boost_name )
+    if os.path.exists( patch_boost_path ):
+        log( 'Found patch file "%s". Executing it.' % patch_boost_path )
+        os.chdir( regression_root )
+        utils.system( [ patch_boost_path ] )
+
+    build_if_needed( bjam, bjam_toolset, toolsets, v2 )
+    build_if_needed( process_jam_log, pjl_toolset, toolsets, v2 )
+
+    if monitored:
+        if sys.platform == 'win32':
+            download_if_needed( 'build_monitor.exe', build_monitor_url, proxy )
+            download_if_needed( 'pskill.exe', pskill_url, proxy )
+        else:
+            log( 'Warning: Test monitoring is not supported on this platform (yet).'     )
+            log( '         Please consider contributing this piece!' )
+
+
+def bjam_build_script_cmd( cmd ):
+    env_setup_key = 'BJAM_ENVIRONMENT_SETUP'
+    if os.environ.has_key( env_setup_key ):
+        return '%s & %s' % ( os.environ[env_setup_key], cmd )
+
+    return cmd
+
+
+def bjam_command( toolsets, v2 ):
+    build_path = regression_root
+    if build_path[-1] == '\\': build_path += '\\'
+
+    v2_option = ""
+    if v2:
+        v2_option = "--v2"
+
+    result = '"%s" %s "-sBOOST_BUILD_PATH=%s" "-sBOOST_ROOT=%s"'\
+        % (
+            tool_path( bjam, v2 )
+          , v2_option
+          , build_path
+          , boost_root
+          )
+
+    if toolsets:
+        if v2:
+            result += ' ' + string.join(string.split( toolsets, ',' ), ' ' )
+        else:
+            result += ' "-sTOOLS=%s"' % string.join( string.split( toolsets, ',' ), ' ' )
+
+    return result
+
+
+def install( toolsets, v2, **unused ):
+    import_utils()
+    os.chdir( os.path.join( boost_root ) )
+
+    log( 'Making "%s" directory...' % regression_results )
+    utils.makedirs( regression_results )
+
+    install_cmd = '%s -d2 install >>%s 2>&1' % ( bjam_command( toolsets, v2 ), install_log )
+    log( 'Installing libraries (%s)...' % install_cmd )
+    utils.system( [ install_cmd ] )
+
+
+def start_build_monitor( timeout ):
+    if sys.platform == 'win32':
+        build_monitor_path = tool_path( 'build_monitor.exe' )
+        if os.path.exists( build_monitor_path ):
+            utils.system( [ 'start /belownormal "" "%s" bjam.exe %d' % ( build_monitor_path, timeout*60 ) ] )
+        else:
+            log( 'Warning: Build monitor is not found at "%s"' % build_monitor_path )
+
+
+def stop_build_monitor():
+    if sys.platform == 'win32':
+        build_monitor_path = tool_path( 'build_monitor.exe' )
+        if os.path.exists( build_monitor_path ):
+            utils.system( [ '"%s" build_monitor' %  tool_path( 'pskill.exe' ) ] )
+
+
+def run_process_jam_log(v2):
+    log( 'Getting test case results out of "%s"...' % regression_log )
+
+    if v2:
+        v2 = "--v2"
+    else:
+        v2 = ""
+
+    utils.checked_system( [
+        '"%s" %s "%s" <"%s"' % (
+              tool_path( process_jam_log, v2 )
+            , v2
+            , regression_results
+            , regression_log
+            )
+        ] )
+
+
+def test(
+          toolsets
+        , bjam_options
+        , monitored
+        , timeout
+        , v2
+        , args
+        , **unused
+        ):
+    if args == []:
+        args = [ "test", "process" ]
+
+    import_utils()
+
+    try:
+        if monitored:
+            start_build_monitor( timeout )
+
+        cd = os.getcwd()
+        os.chdir( os.path.join( boost_root, 'status' ) )
+
+        log( 'Making "%s" directory...' % regression_results )
+        utils.makedirs( regression_results )
+
+        results_libs = os.path.join( regression_results, 'libs' )
+        results_status = os.path.join( regression_results, 'status' )
+
+        if "clean" in args:
+            rmtree( results_libs )
+            rmtree( results_status )
+
+        build_dir_option = "-sALL_LOCATE_TARGET"
+        if v2:
+            build_dir_option = "--build-dir"
+
+        if "test" in args:
+            test_cmd = '%s -d2 --dump-tests %s "%s=%s" >>"%s" 2>&1' % (
+                  bjam_command( toolsets, v2 )
+                , bjam_options
+                , build_dir_option
+                , regression_results
+                , regression_log
+                )
+
+            log( 'Starting tests (%s)...' % test_cmd )
+            utils.system( [ test_cmd ] )
+
+        if "process" in args:
+            run_process_jam_log(v2)
+
+        os.chdir( cd )
+    finally:
+        if monitored:
+            stop_build_monitor()
+
+
+def build_book( **kargs ):
+    # To do
+    # 1. PDF generation
+    # 2. Do we need to cleanup before the build?
+    # 3. Incremental builds
+    if not os.path.exists( regression_results ):
+        os.makedirs( regression_results )
+    import_utils()
+    cwd = os.getcwd()
+    try:
+        os.chdir( os.path.join( boost_root, 'doc' ) )
+        if os.path.exists( boostbook_log ):
+            os.unlink( boostbook_log )
+        utils.system( [ '%s --v2 html >>%s 2>&1' % ( tool_path( bjam, v2=True ), boostbook_log ) ] )
+        # utils.system( [ '%s --v2 pdf >>%s 2>&1' % ( tool_path( bjam, v2=True ), boostbook_log ) ] )
+    finally:
+        os.chdir( cwd )
+
+def collect_logs(
+          tag
+        , runner
+        , platform
+        , user
+        , comment
+        , incremental
+        , dart_server
+        , ftp_proxy
+        , args
+        , **unused
+        ):
+    import_utils()
+
+    if comment is None:
+        comment = 'comment.html'
+
+    comment_path = os.path.join( regression_root, comment )
+    if not os.path.exists( comment_path ):
+        log( 'Comment file "%s" not found; creating default comment.' % comment_path )
+        f = open( comment_path, 'w' )
+        f.write( '<p>Tests are run on %s platform.</p>' % platform_name() )
+        f.close()
+
+    run_type = ''
+    if incremental: run_type = 'incremental'
+    else:           run_type = 'full'
+
+    source = 'tarball'
+    revision = ''
+    svn_root_file = os.path.join( boost_root, '.svn' )
+    svn_info_file = os.path.join( boost_root, 'svn_info.txt' )
+    if os.path.exists( svn_root_file ):
+        source = 'SVN'
+        svn_command( 'user', 'info ' + boost_root + ' >' + svn_info_file )
+
+    if os.path.exists( svn_info_file ):
+        f = open( svn_info_file, 'r' )
+        svn_info = f.read()
+        f.close()
+        i = svn_info.find( 'Revision:' )
+        if i >= 0:
+            i += 10
+            while svn_info[i] >= '0' and svn_info[i] <= '9':
+              revision += svn_info[i]
+              i += 1
+      
+      
+    from runner import collect_logs
+    collect_logs(
+          regression_results
+        , runner
+        , tag
+        , platform
+        , comment_path
+        , timestamp_path
+        , user
+        , source
+        , run_type
+        , dart_server
+        , ftp_proxy
+        , revision
+        )
+
+
+def collect_book( **unused ):
+    log( 'Collecting files for BoostBook into "%s"...' % boostbook_archive_name )
+    import zipfile
+    boostbook_archive = zipfile.ZipFile( boostbook_archive_name, 'w', zipfile.ZIP_DEFLATED )
+    html_root = os.path.join( boost_root, 'doc/html' )
+
+    boostbook_archive.writestr( 'timestamp', timestamp())
+    boostbook_archive.write( boostbook_log, os.path.basename( boostbook_log ) )
+    
+    def add_files( arg, dirname, names ):
+        for name in names:
+            path = os.path.join( dirname, name )
+            if not os.path.isdir( path ):
+                boostbook_archive.write( path, path[ len( html_root ) + 1: ] )
+
+    os.path.walk( html_root, add_files, None ) 
+    
+
+def upload_logs(
+          tag
+        , runner
+        , user
+        , ftp_proxy
+        , debug_level
+        , send_bjam_log
+        , dart_server
+        , **unused
+        ):
+    import_utils()
+    from runner import upload_logs
+    retry(
+          upload_logs
+        , ( regression_results, runner, tag, user, ftp_proxy, debug_level,
+            send_bjam_log, timestamp_path, dart_server )
+        )
+
+
+def upload_book( tag, runner, ftp_proxy, debug_level, **unused ):
+    import_utils()
+    from runner import upload_to_ftp
+    upload_to_ftp( tag, boostbook_archive_name, ftp_proxy, debug_level )
+
+
+def update_itself( tag, **unused ):
+    source = os.path.join( xsl_reports_dir, 'runner', os.path.basename( sys.argv[0] ) )
+    self = os.path.join( regression_root, os.path.basename( sys.argv[0] ) )
+    
+    # Through revision 38985, the update copy was not done if
+    # os.stat(self).st_mtime > os.stat(source).st_mtime. This was not
+    # reliable on all systems, so the copy is now done unconditionally.
+    log( '    Saving a backup copy of the current script...' )
+    os.chmod( self, stat.S_IWRITE ) # Win32 workaround
+    shutil.move( self, '%s~' % self )
+    log( 'Updating %s from %s...' % ( self, source )  )
+    shutil.copy2( source, self )
+
+
+def send_mail( smtp_login, mail, subject, msg = '', debug_level = 0 ):
+    import smtplib
+    if not smtp_login:
+        server_name = 'mail.%s' % mail.split( '@' )[-1]
+        user_name = None
+        password = None
+    else:
+        server_name = smtp_login.split( '@' )[-1]
+        ( user_name, password ) = string.split( smtp_login.split( '@' )[0], ':' )
+
+    log( '    Sending mail through "%s"...' % server_name )
+    smtp_server = smtplib.SMTP( server_name )
+    smtp_server.set_debuglevel( debug_level )
+    if user_name:
+        smtp_server.login( user_name, password )
+
+    smtp_server.sendmail(
+          mail
+        , [ mail ]
+        , 'Subject: %s\nTo: %s\n\n%s' % ( subject, mail, msg )
+        )
+
+
+def regression(
+          tag
+        , local
+        , runner
+        , platform
+        , user
+        , comment
+        , toolsets
+        , book
+        , bjam_options
+        , bjam_toolset
+        , pjl_toolset
+        , incremental
+        , send_bjam_log
+        , force_update
+        , have_source
+        , skip_tests
+        , monitored
+        , timeout
+        , mail = None
+        , smtp_login = None
+        , proxy = None
+        , ftp_proxy = None
+        , debug_level = 0
+        , v2 = 1
+        , dart_server = None
+        , args = []
+        ):
+
+    try:
+        mail_subject = 'Boost regression for %s on %s' % ( tag, string.split(socket.gethostname(), '.')[0] )
+        start_time = time.localtime()
+        if mail:
+            log( 'Sending start notification to "%s"' % mail )
+            send_mail(
+                  smtp_login
+                , mail
+                , '%s started at %s.' % ( mail_subject, format_time( start_time ) )
+                , debug_level = debug_level
+                )
+
+        if local is not None:
+            log( 'Using local file "%s"' % local )
+
+            b = os.path.basename( local )
+            tag = b[ 0: b.find( '.' ) ]
+            log( 'Tag: "%s"' % tag  )
+
+            unpack_tarball( local, regression_root )
+        elif have_source:
+            if not incremental: cleanup( [ 'bin' ] )
+        else:
+            if incremental or force_update:
+                if not incremental: cleanup( [ 'bin' ] )
+                update_source( user, tag, proxy, [] )
+            else:
+                cleanup( [] )
+                get_source( user, tag, proxy, [] )
+
+        setup( comment, toolsets, book, bjam_toolset, pjl_toolset, monitored, proxy,
+               v2, [] )
+
+        # Not specifying --toolset in command line is not enough
+        # that would mean to use Boost.Build default ones
+        # We can skip test only we were explictly 
+        # told to have no toolsets in command line "--toolset="
+        if toolsets != '': # --toolset=,
+            if not skip_tests: test( toolsets, bjam_options, monitored, timeout, v2, [] )
+            collect_logs( tag, runner, platform, user, comment, incremental, dart_server, proxy, [] )
+            upload_logs( tag, runner, user, ftp_proxy, debug_level, send_bjam_log, dart_server )
+
+        if book:
+            build_book()
+            collect_book()
+            upload_book( tag, runner, ftp_proxy, debug_level )
+
+        update_itself( tag )
+
+        if mail:
+            log( 'Sending report to "%s"' % mail )
+            end_time = time.localtime()
+            send_mail(
+                  smtp_login
+                , mail
+                , '%s completed successfully at %s.' % ( mail_subject, format_time( end_time ) )
+                , debug_level = debug_level
+                )
+    except:
+        if mail:
+            log( 'Sending report to "%s"' % mail )
+            traceback_ = '\n'.join( apply( traceback.format_exception, sys.exc_info() ) )
+            end_time = time.localtime()
+            send_mail(
+                  smtp_login
+                , mail
+                , '%s failed at %s.' % ( mail_subject, format_time( end_time ) )
+                , traceback_
+                , debug_level
+                )
+        raise
+'''

粤ICP备19079148号