Просмотр исходного кода

Improved processing times for Boost-wide reports

[SVN r27571]
Aleksey Gurtovoy 21 лет назад
Родитель
Сommit
4b82743907

+ 640 - 0
tools/regression/xsl_reports/boost_wide_report.py

@@ -0,0 +1,640 @@
+
+# Copyright (c) MetaCommunications, Inc. 2003-2004
+#
+# Distributed under the Boost Software License, Version 1.0. 
+# (See accompanying file LICENSE_1_0.txt or copy at 
+# http://www.boost.org/LICENSE_1_0.txt)
+
+import shutil
+import xml.sax.handler
+import glob
+import os.path
+import os
+import string
+import time
+import sys
+import ftplib
+
+import utils
+import runner
+
+
+report_types = [ 'us', 'ds', 'ud', 'dd', 'l', 'p', 'i', 'n', 'ddr', 'dsr' ]
+
+if __name__ == '__main__':
+    run_dir = os.path.abspath( os.path.dirname( sys.argv[ 0 ] ) )
+else:
+    run_dir = os.path.abspath( os.path.dirname( sys.modules[ __name__ ].__file__ ) )
+
+
+def map_path( path ):
+    return os.path.join( run_dir, path ) 
+
+
+def xsl_path( xsl_file_name ):
+    return map_path( os.path.join( 'xsl/v2', xsl_file_name ) )
+
+class file_info:
+    def __init__( self, file_name, file_size, file_date ):
+        self.name = file_name
+        self.size = file_size
+        self.date = file_date
+
+    def __repr__( self ):
+        return "name: %s, size: %s, date %s" % ( self.name, self.size, self.date )
+
+#
+# Find the mod time from unix format directory listing line
+#
+
+def get_date( words ):
+    date = words[ 5: -1 ]
+    t = time.localtime()
+
+    month_names = [ "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec" ]
+
+    year = time.localtime()[0] # If year is not secified is it the current year
+    month = month_names.index( date[0] ) + 1
+    day = int( date[1] )
+    hours = 0 
+    minutes = 0
+
+    if  date[2].find( ":" ) != -1:
+        ( hours, minutes ) = [ int(x) for x in date[2].split( ":" ) ]
+    else:
+        # there is no way to get seconds for not current year dates
+        year = int( date[2] )
+
+    return ( year, month, day, hours, minutes, 0, 0, 0, 0 )
+
+def list_ftp( f ):
+    # f is an ftp object
+    utils.log( "listing source content" )
+    lines = []
+
+    # 1. get all lines
+    f.dir( lambda x: lines.append( x ) )
+
+    # 2. split lines into words
+    word_lines = [ x.split( None, 8 ) for x in lines ]
+
+    result = [ file_info( l[-1], None, get_date( l ) ) for l in word_lines ]
+    for f in result:
+        utils.log( "    %s" % f )
+    return result
+
+def list_dir( dir ):
+    utils.log( "listing destination content %s" % dir )
+    result = []
+    for file_name in  os.listdir( dir ):
+        file_path = os.path.join( dir, file_name )
+        if os.path.isfile( file_path ):
+            # print f, time.localtime( os.path.getmtime( f ) )
+            mod_time = time.localtime( os.path.getmtime( file_path ) )
+            # no wday, yday and isdst
+            mod_time = ( mod_time[0], mod_time[1], mod_time[2], mod_time[3], mod_time[4], mod_time[5], 0, 0, 0 )
+            # no size (for now)
+            result.append( file_info( file_name, None, mod_time ) )
+    for fi in result:
+        utils.log( "    %s" % fi )
+    return result
+
+def find_by_name( d, name ):
+    for dd in d:
+        if dd.name == name:
+            return dd
+    return None
+
+def diff( s, d ):
+    utils.log( "Finding files to copy" )
+    result = []
+    for source in s:
+        found = find_by_name( d, source.name )
+        if found is None: result.append( source.name )
+        elif found.date != source.date: result.append( source.name )
+        else:
+            pass
+    for f in result:
+        utils.log( "    %s" % f )
+    return result
+        
+        
+def ftp_task( site, site_path , destination ):
+    utils.log( 'Execting ftp from "ftp://%s/%s" -> %s' % ( site, site_path, destination ) )
+
+    utils.log( '   logging on ftp site %s' % site )
+    f = ftplib.FTP( site )
+    f.login()
+    utils.log( '   cwd to "%s"' % site_path )
+    f.cwd( site_path )
+
+    source_content = list_ftp( f )
+    #print "\n".join( [ "%s" % x for x in source_content ] )
+
+    # print ""
+    
+    destination_content = list_dir( destination )
+    # print "\n".join( [ "%s" % x for x in destination_content ] )
+
+    d = diff( source_content, destination_content )
+
+    utils.log( "Copying update files" )
+    for source in d:
+        utils.log( 'Copying "%s"' % source )
+        result = open( os.path.join( destination, source ), 'wb' )
+        f.retrbinary( 'RETR %s' % source, result.write )
+        result.close()
+        m = time.mktime( find_by_name( source_content, source ).date )
+        os.utime( os.path.join( destination, source ), ( m, m ) )
+        
+    
+
+
+def sync_dirs( file_mask, source_dir, destination_dir, timestamp, do_sync ):
+    utils.makedirs( destination_dir )
+    files = glob.glob( os.path.join( source_dir, file_mask ) )
+    for src in files:
+        dst = os.path.join( destination_dir, os.path.basename( src ) )
+        utils.log( '    "%s" <-> "%s"' % ( src, dst ) )
+        if not os.path.exists( dst ) or timestamp( src ) > timestamp( dst ):
+            do_sync( src )
+
+
+def sync_archives_task( source_dir, processed_dir, unzip_func ):
+
+    def _modtime_timestamp( file ):
+        return os.stat( file ).st_mtime
+
+    def _unzip( zip_file ):
+        try:
+            utils.log( '  Unzipping "%s" ...' % zip_file )
+            unzip_func( zip_file, source_dir )
+            utils.log( '  Copying "%s" into "%s"' % ( zip_file, processed_dir ) )
+            shutil.copy2( zip_file, processed_dir )
+        except Exception, msg:
+            utils.log( '  Skipping "%s" due to errors (%s)' % ( zip_file, msg ) )
+
+    utils.log( 'Unpacking updated archives in "%s"...' % source_dir )
+    sync_dirs(
+          '*.zip'
+        , source_dir
+        , processed_dir
+        , _modtime_timestamp
+        , _unzip
+        )
+
+
+
+def sync_xmls_task( source_dir, processed_dir, merged_dir, expected_results_file, failures_markup_file ):    
+
+    def _xml_timestamp( xml_path ):
+
+        class timestamp_reader( xml.sax.handler.ContentHandler ):
+            def startElement( self, name, attrs ):
+                if name == 'test-run':
+                    self.timestamp = attrs.getValue( 'timestamp' )
+                    raise self
+
+        try:
+            xml.sax.parse( xml_path, timestamp_reader() )
+            raise 'Cannot extract timestamp from "%s". Invalid XML file format?' % xml_path
+        except timestamp_reader, x:
+            return x.timestamp
+
+        
+    def _process_updated_xml( xml ):
+        utils.log( '  Merging "%s" with expected results...' % xml )
+        utils.libxslt( 
+            utils.log
+            , xml
+            , xsl_path( 'add_expected_results.xsl' )
+            , os.path.join( merged_dir, os.path.basename( xml ) )
+            , {
+              "expected_results_file" : expected_results_file
+              , "failures_markup_file": failures_markup_file
+              }
+            )
+        
+        utils.log( '  Copying "%s" into "%s"' % ( xml, processed_dir ) )
+        shutil.copy2( xml, processed_dir )
+
+
+    utils.log( 'Processing updated XMLs in "%s"...' % source_dir )
+    sync_dirs(
+          '*.xml'
+        , source_dir
+        , processed_dir
+        , _xml_timestamp
+        , _process_updated_xml
+        )
+
+class xmlgen( xml.sax.saxutils.XMLGenerator ):
+    document_started = 0
+        
+    def startDocument(self):
+        if not self.document_started:
+            xml.sax.saxutils.XMLGenerator.startDocument( self )
+            self.document_started = 1
+
+
+def merge_processed_test_runs( test_runs_dir, tag, writer ):
+    all_runs_xml = xmlgen( writer )
+    all_runs_xml.startDocument()
+    all_runs_xml.startElement( 'all-test-runs', {} )
+
+    utils.log( 'Merging processed test runs into a single XML... %s' % test_runs_dir )
+    files = glob.glob( os.path.join( test_runs_dir, '*.xml' ) )
+    for test_run in files:
+        try:
+            utils.log( '  Writing "%s" into the resulting XML...' % test_run )
+            xml.sax.parse( test_run, all_runs_xml  )
+        except Exception, msg:
+            utils.log( '  Skipping "%s" due to errors (%s)' % ( test_run, msg ) )
+
+    all_runs_xml.endElement( 'all-test-runs' )
+    all_runs_xml.endDocument()
+
+def execute_tasks(
+          tag
+        , user
+        , run_date
+        , comment_file
+        , results_dir
+        , output_dir
+        , reports
+        , extended_test_results
+        , dont_collect_logs
+        , expected_results_file
+        , failures_markup_file
+        ):
+
+    
+    # results_xml_path = os.path.join( results_dir, results_xml )
+    # utils.log( 'Merging test runs into "%s"...' % results_xml_path )
+
+
+    incoming_dir = os.path.join( results_dir, 'incoming', tag )
+    processed_dir = os.path.join( incoming_dir, 'processed' )
+    merged_dir = os.path.join( processed_dir, 'merged' )
+    if not os.path.exists( incoming_dir ):
+        os.makedirs( incoming_dir )
+    if not os.path.exists( processed_dir ):
+        os.makedirs( processed_dir )
+    if not os.path.exists( merged_dir ):
+        os.makedirs( merged_dir )
+    
+    if not dont_collect_logs:
+        ftp_site = 'fx.meta-comm.com'
+        site_path = '/boost-regression/%s' % tag
+
+        ftp_task( ftp_site, site_path, incoming_dir )
+
+
+    sync_archives_task( incoming_dir, processed_dir, utils.unzip )
+    sync_xmls_task( incoming_dir, processed_dir, merged_dir, expected_results_file, failures_markup_file )
+    
+    make_links_task( merged_dir
+                     , output_dir
+                     , tag
+                     , run_date
+                     , comment_file
+                     , extended_test_results
+                     , failures_markup_file )
+
+
+    results_xml_path = os.path.join( output_dir, "extended_test_results.xml" )
+    writer = open( results_xml_path, 'w' )
+    merge_processed_test_runs( merged_dir, tag, writer )
+    writer.close()
+
+    
+    make_result_pages(
+          extended_test_results
+        , expected_results_file
+        , failures_markup_file
+        , tag
+        , run_date
+        , comment_file
+        , output_dir
+        , reports
+        )
+
+def stamp( stamp_path, stamp_date_origin ):
+    if not os.path.exists( stamp_path ):
+        open( stamp_path, "w" ).close()
+
+    timestamp = os.path.getmtime( stamp_date_origin )
+    os.utime( stamp_path, ( timestamp, timestamp ) )
+
+def timestamps_different( f1, f2 ):
+    if not os.path.exists( f1 ) or not os.path.exists( f2 ):
+        return 1
+    else:
+        return time.localtime( os.path.getmtime( f1 ) ) != time.localtime( os.path.getmtime( f2 ) )
+
+def make_links_task( input_dir, output_dir, tag, run_date, comment_file, extended_test_results, failures_markup_file ):
+
+    input_files = glob.glob( os.path.join( input_dir, "*.xml" ) )
+    links = os.path.join( output_dir, 'links.html' )
+    for input_file in input_files:
+        stamp_file = input_file + ".links"
+
+        if timestamps_different( input_file, stamp_file ):
+            utils.makedirs( os.path.join( output_dir, 'output' ) )
+            utils.log( '    Making test output files...' )
+            utils.libxslt( 
+                  utils.log
+                , input_file
+                , xsl_path( 'links_page.xsl' )
+                , links
+                , {
+                      'source':                 tag
+                    , 'run_date':               run_date 
+                    , 'comment_file':           comment_file
+                    , 'explicit_markup_file':   failures_markup_file
+                    }
+                )
+        stamp( stamp_file, input_file )
+
+         
+    return links
+
+
+def make_result_pages(
+          extended_test_results
+        , expected_results_file
+        , failures_markup_file
+        , tag
+        , run_date
+        , comment_file
+        , output_dir
+        , reports
+        ):
+
+    utils.log( 'Producing the reports...' )
+    __log__ = 1
+    
+    if comment_file != '':
+        comment_file = os.path.abspath( comment_file )
+        
+    links = os.path.join( output_dir, 'links.html' )
+    
+    utils.makedirs( os.path.join( output_dir, 'output' ) )
+    for mode in ( 'developer', 'user' ):
+        utils.makedirs( os.path.join( output_dir, mode , 'output' ) )
+        
+    issues = os.path.join( output_dir, 'developer', 'issues.html'  )
+    if 'i' in reports:
+        utils.log( '    Making issues list...' )
+        utils.libxslt( 
+              utils.log
+            , extended_test_results
+            , xsl_path( 'issues_page.xsl' )
+            , issues
+            , {
+                  'source':                 tag
+                , 'run_date':               run_date
+                , 'comment_file':           comment_file
+                , 'explicit_markup_file':   failures_markup_file
+                }
+            )
+
+    for mode in ( 'developer', 'user' ):
+        if mode[0] + 'd' in reports:
+            utils.log( '    Making detailed %s  report...' % mode )
+            utils.libxslt( 
+                  utils.log
+                , extended_test_results
+                , xsl_path( 'result_page.xsl' )
+                , os.path.join( output_dir, mode, 'index.html' )
+                , { 
+                      'links_file':             'links.html'
+                    , 'mode':                   mode
+                    , 'source':                 tag
+                    , 'run_date':               run_date 
+                    , 'comment_file':           comment_file
+                    , 'expected_results_file':  expected_results_file
+                    , 'explicit_markup_file' :  failures_markup_file
+                    }
+                )
+    
+    for mode in ( 'developer', 'user' ):
+        if mode[0] + 's' in reports:
+            utils.log( '    Making summary %s  report...' % mode )
+            utils.libxslt(
+                  utils.log
+                , extended_test_results
+                , xsl_path( 'summary_page.xsl' )
+                , os.path.join( output_dir, mode, 'summary.html' )
+                , { 
+                      'mode' :                  mode 
+                    , 'source':                 tag
+                    , 'run_date':               run_date 
+                    , 'comment_file':           comment_file
+                    , 'explicit_markup_file' :  failures_markup_file
+                    }
+                )
+
+    if "ddr" in reports:
+        mode = 'developer'
+        utils.log( '    Making detailed %s release report...' % mode )
+        utils.libxslt( 
+                  utils.log
+                , extended_test_results
+                , xsl_path( 'result_page.xsl' )
+                , os.path.join( output_dir, "developer", 'index_release.html' )
+                , { 
+                      'links_file':             'links.html'
+                    , 'mode':                   mode
+                    , 'source':                 tag
+                    , 'run_date':               run_date 
+                    , 'comment_file':           comment_file
+                    , 'expected_results_file':  expected_results_file
+                    , 'explicit_markup_file' :  failures_markup_file
+                    , 'release':                "yes"
+                    }
+                )
+
+    if "dsr" in reports:
+        mode = 'developer'
+        utils.log( '    Making summary %s release report...' % mode )
+        utils.libxslt(
+                  utils.log
+                , extended_test_results
+                , xsl_path( 'summary_page.xsl' )
+                , os.path.join( output_dir, "developer", 'summary_release.html' )
+                , { 
+                      'mode' :                  mode
+                    , 'source':                 tag
+                    , 'run_date':               run_date 
+                    , 'comment_file':           comment_file
+                    , 'explicit_markup_file' :  failures_markup_file
+                    , 'release':                'yes'
+                    }
+                )
+        
+    if 'e' in reports:
+        utils.log( '    Generating expected_results ...' )
+        utils.libxslt(
+              utils.log
+            , extended_test_results
+            , xsl_path( 'produce_expected_results.xsl' )
+            , os.path.join( output_dir, 'expected_results.xml' )
+            )
+
+    if  'n' in reports:
+        utils.log( '    Making runner comment files...' )
+        utils.libxslt(
+              utils.log
+            , extended_test_results
+            , xsl_path( 'runners.xsl' )
+            , os.path.join( output_dir, 'runners.html' )
+            )
+
+    shutil.copyfile(
+          xsl_path( 'html/master.css' )
+        , os.path.join( output_dir, 'master.css' )
+        )
+
+
+def build_xsl_reports( 
+          locate_root_dir
+        , tag
+        , expected_results_file
+        , failures_markup_file
+        , comment_file
+        , results_dir
+        , result_file_prefix
+        , dont_collect_logs = 0
+        , reports = report_types
+        , user = None
+        , upload = False
+        ):
+
+    ( run_date ) = time.strftime('%a, %d %b %Y %H:%M:%S +0000', time.gmtime() )
+        
+    bin_boost_dir = os.path.join( locate_root_dir, 'bin', 'boost' )
+    
+    output_dir = os.path.join( results_dir, result_file_prefix )
+    utils.makedirs( output_dir )
+    
+    if expected_results_file != '':
+        expected_results_file = os.path.abspath( expected_results_file )
+    else:
+        expected_results_file = os.path.abspath( map_path( 'empty_expected_results.xml' ) )
+
+
+    extended_test_results = os.path.join( output_dir, 'extended_test_results.xml' )
+        
+    execute_tasks(
+          tag
+        , user
+        , run_date
+        , comment_file
+        , results_dir
+        , output_dir
+        , reports
+        , extended_test_results
+        , dont_collect_logs
+        , expected_results_file
+        , failures_markup_file
+        )
+
+    if upload:
+        upload_dir = 'regression-logs/'
+        utils.log( 'Uploading  results into "%s" [connecting as %s]...' % ( upload_dir, user ) )
+        
+        archive_name = '%s.tar.gz' % result_file_prefix
+        utils.tar( 
+              os.path.join( results_dir, result_file_prefix )
+            , archive_name
+            )
+        
+        utils.sourceforge.upload( os.path.join( results_dir, archive_name ), upload_dir, user )
+        utils.sourceforge.untar( os.path.join( upload_dir, archive_name ), user, background = True )
+
+
+def accept_args( args ):
+    args_spec = [ 
+          'locate-root='
+        , 'tag='
+        , 'expected-results='
+        , 'failures-markup='
+        , 'comment='
+        , 'results-dir='
+        , 'results-prefix='
+        , 'dont-collect-logs'
+        , 'reports='
+        , 'user='
+        , 'upload'
+        , 'help'
+        ]
+        
+    options = { 
+          '--comment': ''
+        , '--expected-results': ''
+        , '--failures-markup': ''
+        , '--reports': string.join( report_types, ',' )
+        , '--tag': None
+        , '--user': None
+        , 'upload': False
+        }
+    
+    utils.accept_args( args_spec, args, options, usage )
+    if not options.has_key( '--results-dir' ):
+         options[ '--results-dir' ] = options[ '--locate-root' ]
+
+    if not options.has_key( '--results-prefix' ):
+        options[ '--results-prefix' ] = 'all'
+    
+    return ( 
+          options[ '--locate-root' ]
+        , options[ '--tag' ]
+        , options[ '--expected-results' ]
+        , options[ '--failures-markup' ]
+        , options[ '--comment' ]
+        , options[ '--results-dir' ]
+        , options[ '--results-prefix' ]
+        , options.has_key( '--dont-collect-logs' )
+        , options[ '--reports' ].split( ',' )
+        , options[ '--user' ]
+        , options.has_key( '--upload' )
+        )
+
+
+def usage():
+    print 'Usage: %s [options]' % os.path.basename( sys.argv[0] )
+    print    '''
+\t--locate-root         the same as --locate-root in compiler_status
+\t--tag                 the tag for the results (i.e. 'CVS-HEAD')
+\t--expected-results    the file with the results to be compared with
+\t                      the current run
+\t--failures-markup     the file with the failures markup
+\t--comment             an html comment file (will be inserted in the reports)
+\t--results-dir         the directory containing -links.html, -fail.html
+\t                      files produced by compiler_status (by default the
+\t                      same as specified in --locate-root)
+\t--results-prefix      the prefix of -links.html, -fail.html
+\t                      files produced by compiler_status
+\t--user                SourceForge user name for a shell account
+\t--upload              upload reports to SourceForge 
+
+The following options are useful in debugging:
+
+\t--dont-collect-logs dont collect the test logs
+\t--reports           produce only the specified reports
+\t                        us - user summary
+\t                        ds - developer summary
+\t                        ud - user detailed
+\t                        dd - developer detailed
+\t                        l  - links
+\t                        p  - patches
+\t                        x  - extended results file
+\t                        i  - issues
+'''
+
+def main():
+    build_xsl_reports( *accept_args( sys.argv[ 1 : ] ) )
+
+if __name__ == '__main__':
+    main()

+ 0 - 2
tools/regression/xsl_reports/merger/__init__.py

@@ -1,2 +0,0 @@
-
-from merge_logs import *

+ 0 - 202
tools/regression/xsl_reports/merger/merge_logs.py

@@ -1,202 +0,0 @@
-
-# Copyright (c) MetaCommunications, Inc. 2003-2004
-#
-# Distributed under the Boost Software License, Version 1.0. 
-# (See accompanying file LICENSE_1_0.txt or copy at 
-# http://www.boost.org/LICENSE_1_0.txt)
-
-import xml.sax.saxutils
-import xml.dom.minidom
-import ftplib
-import glob
-import os.path
-import sys
-
-
-def download_from_ftp( destination_dir, tag ):
-    ftp_site = 'fx.meta-comm.com'
-    site_path = '/boost-regression/%s' % tag
-    utils.log( 'Downloading test runs from  ftp://%s%s' % ( ftp_site, site_path ) )
-
-    ftp = ftplib.FTP( ftp_site )
-    ftp.login()
-    ftp.cwd( site_path )
-    
-    files = ftp.nlst()
-    for f in files:
-        utils.log( '  Downloading %s into "%s" directory...' % ( f, destination_dir ) )
-        result = open( os.path.join( destination_dir, f ), 'wb' )
-        ftp.retrbinary( 'RETR %s' % f, result.write )
-        result.close()
-
-    ftp.quit()
-
-
-def download_test_runs( incoming_dir, tag, user ):
-    utils.log( 'Downloading test runs for tag "%s"...' % tag )
-
-    destination_dir = os.path.join( incoming_dir, tag )
-    utils.makedirs( destination_dir )
-    
-    if user is not None:
-        utils.log( 'Downloading test runs from SourceForge [connecting as %s]...' % user )
-        utils.sourceforge.download( 
-              'regression-logs/incoming/%s/' % tag
-            , destination_dir
-            , user
-            )
-    
-    download_from_ftp( destination_dir, tag )
-
-
-def unzip_and_remove( zip_path, dir, unzip_func ):
-    try:
-        utils.log( '  Unzipping "%s" ...' % zip_path  )
-        unzip_func( zip_path, dir )
-        utils.log( '  Removing "%s" ...' % zip_path )
-        os.unlink( zip_path )
-    except Exception, msg:
-        utils.log( '  Skipping "%s" due to errors (%s)' % ( zip_path, msg ) )
-
-
-def unzip_test_runs( dir ):
-    files_mask = os.path.join( dir, '*.zip' )
-
-    files = glob.glob( files_mask )
-    for f in files:
-        unzip_and_remove( f, dir, utils.unzip )
-        
-    files = glob.glob( files_mask )
-    if len( files ):
-        utils.log( 'Warning: Some files could not be unzipped using the built-in \'zipfile\' module.' )
-        utils.log( '         Trying to decompress them using a platform-specific tool...' )
-        try: import unzip_cmd
-        except ImportError:
-            utils.log( '  Could not find \'unzip_cmd\' module in the script directory.' )
-        else:
-            for f in files:
-                unzip_and_remove( f, dir, unzip_cmd.main )
-        
-        files = glob.glob( files_mask )
-        if len( files ):
-            utils.log( 'Warning: The following files have not been decompressed:' )
-            for f in files:
-                utils.log( '\t%s' % f )
-                zip_path = '%s.xml' % os.path.splitext( f )[0]
-                if os.path.exists( zip_path ):
-                    os.unlink( zip_path )
-
-
-class xmlgen( xml.sax.saxutils.XMLGenerator ):
-    document_started = 0
-        
-    def startDocument(self):
-        if not self.document_started:
-            xml.sax.saxutils.XMLGenerator.startDocument( self )
-            self.document_started = 1
-
-
-def merge_test_runs( incoming_dir, tag, writer, dont_collect_logs ):
-    test_runs_dir = os.path.join( incoming_dir, tag )
-    
-    if not dont_collect_logs:
-        utils.log( 'Removing stale XMLs in "%s"...' % test_runs_dir )
-        files = glob.glob( os.path.join( test_runs_dir, '*.xml' ) )
-        for f in files:  
-            utils.log( '  Removing "%s" ...' % f )
-            os.unlink( f )
-
-        utils.log( 'Unzipping new test runs...' )
-        unzip_test_runs( test_runs_dir )
-    
-    all_runs_xml = xmlgen( writer )
-    all_runs_xml.startDocument()
-    all_runs_xml.startElement( 'all-test-runs', {} )
-
-    utils.log( 'Processing test runs...' )
-    files = glob.glob( os.path.join( test_runs_dir, '*.xml' ) )
-    for test_run in files:
-        try:
-            utils.log( '  Writing "%s" into the resulting XML...' % test_run )
-            xml.sax.parse( test_run, all_runs_xml  )
-        except Exception, msg:
-            utils.log( '  Skipping "%s" due to errors (%s)' % ( test_run, msg ) )
-
-    all_runs_xml.endElement( 'all-test-runs' )
-    all_runs_xml.endDocument()
-    writer.close()
-
-
-def merge_logs(
-          tag
-        , user
-        , results_dir
-        , results_xml
-        , dont_collect_logs
-        ):
-    
-    results_xml_path = os.path.join( results_dir, results_xml )
-    utils.log( 'Merging test runs into "%s"...' % results_xml_path )
-    
-    incoming_dir = os.path.join( results_dir, 'incoming/' )
-
-    utils.log( '  dont_collect_logs: %s' % dont_collect_logs )
-    if not dont_collect_logs:
-        download_test_runs( incoming_dir, tag, user )
-    
-    writer = open( results_xml_path, 'w' )
-    merge_test_runs( incoming_dir, tag, writer, dont_collect_logs )
-
-    utils.log( 'Done writing "%s"' % results_xml_path )
-
-
-def accept_args( args ):
-    args_spec = [ 
-          'tag='
-        , 'user='
-        , 'results-dir='
-        , 'results-xml='
-        , 'dont-collect-logs'
-        , 'help'
-        ]
-
-    options = { 
-          '--results-xml' : 'all-runs.xml'
-        , '--user' :        None
-        , '--tag' :         'CVS-HEAD'
-        }
-    utils.accept_args( args_spec, args, options, usage )
-
-    return (
-          options[ '--tag' ]
-        , options[ '--user' ]
-        , options[ '--results-dir' ]
-        , options[ '--results-xml' ]
-        , options.has_key( '--dont-collect-logs' )
-        )
-
-
-def usage():
-    print 'Usage: %s [options]' % os.path.basename( sys.argv[0] )
-    print    '''
-\t--results-dir         directory for the resulting XML/collected logs
-\t--results-xml         name of the resulting XML document (default 'all-runs.xml')
-\t--tag                 the tag for the results ('CVS-HEAD' by default)
-\t--user                SourceForge user name for a shell account (optional,
-\t                      if not provided, logs are collected from FTP only)
-\t--dont-collect-logs   don't collect logs from SourceForge/FTP
-'''
-    
-def main():
-    merge_logs( *accept_args( sys.argv[ 1 : ] ) )
-    
-
-if __name__ != '__main__':  import utils
-else:
-    # in absense of relative import...
-    xsl_path = os.path.abspath( os.path.dirname( sys.argv[ 0 ] ) )
-    while os.path.basename( xsl_path ) != 'xsl_reports': xsl_path = os.path.dirname( xsl_path )
-    sys.path.append( xsl_path )
-
-    import utils
-    main()

+ 1 - 0
tools/regression/xsl_reports/utils/__init__.py

@@ -3,6 +3,7 @@ from accept_args import *
 from char_translation_table import *
 from check_existance import *
 from checked_system import *
+from ftp import *
 from libxslt import *
 from log import *
 from makedirs import *

+ 5 - 2
tools/regression/xsl_reports/utils/libxslt.py

@@ -1,12 +1,15 @@
 
+import utils.makedirs
 import os.path
 import os
 import sys
 
 def libxslt( log, xml_file, xsl_file, output_file, parameters = None ):
 
-    ## if sys.platform == 'win32':
-    ##    os.chdir( os.path.dirname( xsl_file ) )
+    utils.makedirs( os.path.dirname( output_file ) )
+    
+    if sys.platform == 'win32':
+        os.chdir( os.path.dirname( xsl_file ) )
 
     transform_command = 'xsltproc'
     transform_command = transform_command + ' -o ' + '%s' % output_file.replace( '\\', '/' ).replace( ' ', '%20' )

+ 162 - 146
tools/regression/xsl_reports/xsl/v2/add_expected_results.xsl

@@ -22,166 +22,182 @@ http://www.boost.org/LICENSE_1_0.txt)
     <xsl:param name="expected_results_file"/>
     <xsl:param name="failures_markup_file"/>
     <xsl:variable name="expected_results" select="document( $expected_results_file )" />
-    <xsl:variable name="failures_markup" select="document( $failures_markup_file )" />
 
+    <xsl:key name = "trk" match = "test-result" use = "concat( ../../@name, '-', ../@name, '-', @test-name )" />
+
+    <xsl:variable name="failures_markup" select="document( $failures_markup_file )" />
     <xsl:template match="/">
         <xsl:apply-templates/>
     </xsl:template>
       
     <xsl:template match="test-log">
+        <xsl:variable name="test_log" select="."/>
         <xsl:variable name="library" select="@library"/>
         <xsl:variable name="test-name" select="@test-name"/>
         <xsl:variable name="toolset" select="@toolset"/>
 
         <xsl:element name="{local-name()}">
-        <xsl:apply-templates select="@*"/>
+            <xsl:apply-templates select="@*"/>
 
-        <xsl:variable name="actual_result">
-            <xsl:choose>
-            <!-- Hack: needs to be researched (and removed). See M.Wille's incident. -->
-            <xsl:when test="run/@result='succeed' and lib/@result='fail'">
-                <xsl:text>success</xsl:text>
-            </xsl:when>
-            <xsl:when test="./*/@result = 'fail'" >
-                <xsl:text>fail</xsl:text>
-            </xsl:when>
-            <xsl:otherwise>
-                <xsl:text>success</xsl:text>
-            </xsl:otherwise>
-            </xsl:choose>                     
-        </xsl:variable>
-
-        <xsl:variable name="expected_results_test_case" select="$expected_results//*/test-result[ @library=$library and ( @test-name=$test-name or @test-name='*' ) and @toolset = $toolset]"/>
-        <xsl:variable name="test_failures_markup" select="$failures_markup//library[@name=$library]/test[ meta:re_match( @name, $test-name ) ]/mark-failure/toolset[ meta:re_match( @name, $toolset ) ]/.."/>
-        <xsl:variable name="test_failures_markup2" select="$failures_markup//library[@name=$library]/mark-expected-failures/test[ meta:re_match( @name, $test-name ) ]/../toolset[ meta:re_match( @name, $toolset ) ]/.."/>
-
-        <xsl:variable name="is_new">
-            <xsl:choose>
-                <xsl:when test="$expected_results_test_case">
-                <xsl:text>no</xsl:text>
-                </xsl:when>
-                <xsl:otherwise>yes</xsl:otherwise>
-            </xsl:choose>
-        </xsl:variable>
-
-        <xsl:variable name="has_explicit_markup" select="count( $test_failures_markup ) > 0 or count( $test_failures_markup2 ) > 0"/>
-
-        <xsl:variable name="expected_result">
-            <xsl:choose>
-            <xsl:when test="$has_explicit_markup">
-                <xsl:text>fail</xsl:text>
-            </xsl:when>
-              
-            <xsl:otherwise>
-                <xsl:choose>
-                <xsl:when test="$expected_results_test_case and $expected_results_test_case/@result = 'fail'">
-                    <xsl:text>fail</xsl:text>
-                </xsl:when>
-                  
-                <xsl:otherwise>success</xsl:otherwise>
-                </xsl:choose>
-            </xsl:otherwise>
-            </xsl:choose>
-        </xsl:variable>
-
-        <xsl:variable name="status">
-            <xsl:choose>
-            <xsl:when test="$expected_result = $actual_result">expected</xsl:when>
-            <xsl:otherwise>unexpected</xsl:otherwise>
-            </xsl:choose>
-        </xsl:variable>
-
-        <xsl:variable name="unexpected_success" select="$status = 'unexpected' and $actual_result = 'success'"/>
-
-        <xsl:variable name="expected_reason">
-            <xsl:choose>
-                <xsl:when test="$test_failures_markup/@reason">
-                    <xsl:value-of select="$test_failures_markup/@reason"/>
-                </xsl:when>
-                <xsl:otherwise>
-                    <xsl:value-of select="$test_failures_markup2/@reason"/>
-                </xsl:otherwise>
-            </xsl:choose>
-        </xsl:variable>
-
-        <xsl:variable name="notes">
-
-            <xsl:if test="$unexpected_success and $has_explicit_markup">
-                <note>
-                    <span class="auto-note">
-                    This test case was explicitly marked up in 
-                    <a href="http://cvs.sourceforge.net/viewcvs.py/boost/boost/status/explicit-failures-markup.xml">
-                    status/explicit-failures-markup.xml</a> file in the Boost CVS as "expected to fail",
-                    but is passing. Please consult the notes/output below for more details.
-                    </span>
-                </note>
-            </xsl:if>
-
-            <xsl:if test="$has_explicit_markup and count( $test_failures_markup2/note ) = 0 and count( $test_failures_markup/note ) = 0">
-                <xsl:choose>
-                <xsl:when test="$unexpected_success">
-                    <note>
-                        <span class="auto-note">
-                        No explanation was provided for this markup. Please contact the library 
-                        author(s)/maintainer(s) for more details.
-                        </span>
-                    </note>
-                </xsl:when>
-                <xsl:otherwise>
-                    <note>
-                        <span class="auto-note">
-                        This failure was explicitly marked as expected in 
-                        <a href="http://cvs.sourceforge.net/viewcvs.py/boost/boost/status/explicit-failures-markup.xml">
-                        status/explicit-failures-markup.xml</a> file in the Boost CVS. 
-                        Please contact the library author(s)/maintainer(s) for the explanation of this markup.
-                        </span>
-                    </note>
-                </xsl:otherwise>
-                </xsl:choose>
-            </xsl:if>
-
-            <xsl:if test="count( $test_failures_markup ) > 0">
-                <xsl:for-each select="$test_failures_markup/note">
-                    <xsl:copy-of select="."/>
-                </xsl:for-each>
-            </xsl:if>
-
-            <xsl:if test="count( $test_failures_markup2 ) > 0">
-                <xsl:for-each select="$test_failures_markup2/note">
-                    <xsl:copy-of select="."/>
-                </xsl:for-each>
-            </xsl:if>
-
-
-            <xsl:if test="$expected_results_test_case and $expected_results_test_case/@result = 'fail'">
+
+            <xsl:variable name="actual_result">
                 <xsl:choose>
-                <xsl:when test="$unexpected_success">
-                    <note>
-                        <span class="auto-note">
-                        This test case used to fail in the reference ("last-known-good") release.
-                        </span>
-                    </note>
-                </xsl:when>
-                <xsl:otherwise>
-                    <note>
-                        <span class="auto-note">
-                        This failure was present in the reference ("last-known-good") release.
-                        </span>
-                    </note>
-                </xsl:otherwise>
-                </xsl:choose>
-            </xsl:if>
+                    <!-- Hack: needs to be researched (and removed). See M.Wille's incident. -->
+                    <xsl:when test="run/@result='succeed' and lib/@result='fail'">
+                        <xsl:text>success</xsl:text>
+                    </xsl:when>
+                    <xsl:when test="./*/@result = 'fail'" >
+                        <xsl:text>fail</xsl:text>
+                    </xsl:when>
+                    <xsl:otherwise>
+                        <xsl:text>success</xsl:text>
+                    </xsl:otherwise>
+                </xsl:choose>                     
+            </xsl:variable>
             
-        </xsl:variable>
-
-        <xsl:attribute name="result"><xsl:value-of select="$actual_result"/></xsl:attribute>
-        <xsl:attribute name="expected-result"><xsl:value-of select="$expected_result"/></xsl:attribute>
-        <xsl:attribute name="expected-reason"><xsl:value-of select="$expected_reason"/></xsl:attribute>
-        <xsl:attribute name="status"><xsl:value-of select="$status"/></xsl:attribute>
-        <xsl:attribute name="is-new"><xsl:value-of select="$is_new"/></xsl:attribute>
-        <xsl:element name="notes"><xsl:copy-of select="$notes"/></xsl:element>
-
-        <xsl:apply-templates select="node()" />
+            <!-- 
+                 Select expected_results context 
+                 See http://clover.slavic.pitt.edu/~repertorium/plectogram/keys/keys.html for a good explanation.
+                 
+                 Briefly, for-each doesn't iterate through expected_results, it just selects expected result
+                 as current context to make "key" function work.
+                 -->
+
+            <xsl:for-each select="$expected_results">
+                
+                <xsl:variable name="expected_results_test_case" select="key( 'trk', concat( $toolset, '-', $library, '-', $test-name ) )"/>
+                <xsl:variable name="test_failures_markup" select="$failures_markup//library[@name=$library]/test[ meta:re_match( @name, $test-name ) ]/mark-failure/toolset[ meta:re_match( @name, $toolset ) ]/.."/>
+                <xsl:variable name="test_failures_markup2" select="$failures_markup//library[@name=$library]/mark-expected-failures/test[ meta:re_match( @name, $test-name ) ]/../toolset[ meta:re_match( @name, $toolset ) ]/.."/>
+
+                <xsl:variable name="is_new">
+                    <xsl:choose>
+                        <xsl:when test="$expected_results_test_case">
+                            <xsl:text>no</xsl:text>
+                        </xsl:when>
+                        <xsl:otherwise>yes</xsl:otherwise>
+                    </xsl:choose>
+                </xsl:variable>
+                
+
+                <xsl:variable name="has_explicit_markup" select="count( $test_failures_markup ) > 0 or count( $test_failures_markup2 ) > 0"/>
+                    
+                <xsl:variable name="expected_result">
+                    <xsl:choose>
+                        <xsl:when test="$has_explicit_markup">
+                            <xsl:text>fail</xsl:text>
+                        </xsl:when>
+                        
+                        <xsl:otherwise>
+                            <xsl:choose>
+                                <xsl:when test="$expected_results_test_case and $expected_results_test_case/@result = 'fail'">
+                                    <xsl:text>fail</xsl:text>
+                                </xsl:when>
+                                <xsl:otherwise>success</xsl:otherwise>
+                            </xsl:choose>
+                        </xsl:otherwise>
+                    </xsl:choose>
+                </xsl:variable>
+                
+                <xsl:variable name="status">
+                    <xsl:choose>
+                        <xsl:when test="$expected_result = $actual_result">expected</xsl:when>
+                        <xsl:otherwise>unexpected</xsl:otherwise>
+                    </xsl:choose>
+                </xsl:variable>
+
+                <xsl:variable name="unexpected_success" select="$status = 'unexpected' and $actual_result = 'success'"/>
+            
+                <xsl:variable name="expected_reason">
+                    <xsl:choose>
+                        <xsl:when test="$test_failures_markup/@reason">
+                            <xsl:value-of select="$test_failures_markup/@reason"/>
+                        </xsl:when>
+                        <xsl:otherwise>
+                            <xsl:value-of select="$test_failures_markup2/@reason"/>
+                        </xsl:otherwise>
+                    </xsl:choose>
+                </xsl:variable>
+
+
+                <xsl:variable name="notes">
+                    
+                    <xsl:if test="$unexpected_success and $has_explicit_markup">
+                        <note>
+                            <span class="auto-note">
+                                This test case was explicitly marked up in 
+                                <a href="http://cvs.sourceforge.net/viewcvs.py/boost/boost/status/explicit-failures-markup.xml">
+                                    status/explicit-failures-markup.xml</a> file in the Boost CVS as "expected to fail",
+                                but is passing. Please consult the notes/output below for more details.
+                            </span>
+                        </note>
+                    </xsl:if>
+                    
+                    <xsl:if test="$has_explicit_markup and count( $test_failures_markup2/note ) = 0 and count( $test_failures_markup/note ) = 0">
+                        <xsl:choose>
+                            <xsl:when test="$unexpected_success">
+                                <note>
+                                    <span class="auto-note">
+                                        No explanation was provided for this markup. Please contact the library 
+                                        author(s)/maintainer(s) for more details.
+                                    </span>
+                                </note>
+                            </xsl:when>
+                            <xsl:otherwise>
+                                <note>
+                                    <span class="auto-note">
+                                        This failure was explicitly marked as expected in 
+                                        <a href="http://cvs.sourceforge.net/viewcvs.py/boost/boost/status/explicit-failures-markup.xml">
+                                            status/explicit-failures-markup.xml</a> file in the Boost CVS. 
+                                        Please contact the library author(s)/maintainer(s) for the explanation of this markup.
+                                    </span>
+                                </note>
+                            </xsl:otherwise>
+                        </xsl:choose>
+                    </xsl:if>
+                    
+                    <xsl:if test="count( $test_failures_markup ) > 0">
+                        <xsl:for-each select="$test_failures_markup/note">
+                            <xsl:copy-of select="."/>
+                        </xsl:for-each>
+                    </xsl:if>
+                    
+                    <xsl:if test="count( $test_failures_markup2 ) > 0">
+                        <xsl:for-each select="$test_failures_markup2/note">
+                            <xsl:copy-of select="."/>
+                        </xsl:for-each>
+                    </xsl:if>
+                    
+                    
+                    <xsl:if test="$expected_results_test_case and $expected_results_test_case/@result = 'fail'">
+                        <xsl:choose>
+                            <xsl:when test="$unexpected_success">
+                                <note>
+                                    <span class="auto-note">
+                                        This test case used to fail in the reference ("last-known-good") release.
+                                    </span>
+                                </note>
+                            </xsl:when>
+                            <xsl:otherwise>
+                                <note>
+                                    <span class="auto-note">
+                                        This failure was present in the reference ("last-known-good") release.
+                                    </span>
+                                </note>
+                            </xsl:otherwise>
+                        </xsl:choose>
+                    </xsl:if>
+                </xsl:variable>
+                
+                
+                <xsl:attribute name="result"><xsl:value-of select="$actual_result"/></xsl:attribute>
+                <xsl:attribute name="expected-result"><xsl:value-of select="$expected_result"/></xsl:attribute>
+                <xsl:attribute name="expected-reason"><xsl:value-of select="$expected_reason"/></xsl:attribute>
+                <xsl:attribute name="status"><xsl:value-of select="$status"/></xsl:attribute>
+                <xsl:attribute name="is-new"><xsl:value-of select="$is_new"/></xsl:attribute>
+                <xsl:element name="notes"><xsl:copy-of select="$notes"/></xsl:element>
+                
+                <xsl:apply-templates select="$test_log/node()" />
+            </xsl:for-each>
         </xsl:element>
     </xsl:template>
 

+ 32 - 0
tools/regression/xsl_reports/xsl/v2/expected_results_2_new_format.xsl

@@ -0,0 +1,32 @@
+<?xml version="1.0" encoding="utf-8"?>
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+                xmlns:set="http://exslt.org/sets"
+                extension-element-prefixes="set"
+                version="1.0">
+
+  <xsl:output method="xml" encoding="utf-8"/>
+
+  <xsl:template match="/">
+      <expected-failures>
+          <xsl:variable name="toolsets" select="set:distinct(//test-result/@toolset)"/>
+          <xsl:for-each select="$toolsets">
+              <xsl:variable name="toolset" select="."/>
+              <toolset name="{$toolset}">
+                  <xsl:variable name="toolset_test_cases" select="//test-result[@toolset = $toolset]"/>
+                  <xsl:variable name="libraries" select="set:distinct($toolset_test_cases/@library)"/>
+                  <xsl:for-each select="$libraries">
+                      <xsl:variable name="library" select="."/>
+                      <library name="{$library}">
+                          <xsl:variable name="test_results" select="$toolset_test_cases[@library = $library]"/>
+                          <xsl:for-each select="$test_results">
+                              <xsl:variable name="test_result" select="."/>
+                              <test-result test-name="{$test_result/@test-name}" result="{$test_result/@result}"/>
+                          </xsl:for-each>
+                      </library>
+                  </xsl:for-each>
+              </toolset>
+          </xsl:for-each>
+      </expected-failures>
+  </xsl:template>
+
+</xsl:stylesheet>

粤ICP备19079148号