Rene Rivera 18 лет назад
Родитель
Сommit
6481902f8f
100 измененных файлов с 11578 добавлено и 0 удалено
  1. 70 0
      regression/boost_svn_export_archive.sh
  2. 53 0
      regression/build/Jamfile.v2
  3. 59 0
      regression/build/vcide/compiler_status.vcproj
  4. 64 0
      regression/build/vcide/library_status.vcproj
  5. 21 0
      regression/build/vcide/process_jam_log.sln
  6. 72 0
      regression/build/vcide/process_jam_log.vcproj
  7. 37 0
      regression/build/vcide/regression.sln
  8. 1017 0
      regression/compiler_status.cpp
  9. 167 0
      regression/detail/tiny_xml.cpp
  10. 70 0
      regression/detail/tiny_xml.hpp
  11. 17 0
      regression/detail/tiny_xml_test.cpp
  12. 12 0
      regression/detail/tiny_xml_test.txt
  13. 48 0
      regression/index.htm
  14. 45 0
      regression/index.shtml
  15. 983 0
      regression/library_status.cpp
  16. 166 0
      regression/library_status.html
  17. 15 0
      regression/library_test.bat
  18. 14 0
      regression/library_test.sh
  19. 85 0
      regression/library_test_all.sh
  20. 809 0
      regression/process_jam_log.cpp
  21. 197 0
      regression/regression-logs.pl
  22. 191 0
      regression/run_tests.sh
  23. 21 0
      regression/test/Jamfile.v2
  24. 10 0
      regression/test/compile-fail~fail.cpp
  25. 9 0
      regression/test/compile-fail~pass.cpp
  26. 9 0
      regression/test/compile~fail.cpp
  27. 9 0
      regression/test/compile~pass.cpp
  28. 18 0
      regression/test/compile~warn.cpp
  29. 9 0
      regression/test/run-fail~compile-fail.cpp
  30. 16 0
      regression/test/run-fail~fail-warn.cpp
  31. 12 0
      regression/test/run-fail~fail.cpp
  32. 15 0
      regression/test/run-fail~pass.cpp
  33. 20 0
      regression/test/run-fail~warn.cpp
  34. 9 0
      regression/test/run~compile-fail.cpp
  35. 17 0
      regression/test/run~fail-note.cpp
  36. 20 0
      regression/test/run~fail-warn.cpp
  37. 14 0
      regression/test/run~fail.cpp
  38. 17 0
      regression/test/run~note.cpp
  39. 12 0
      regression/test/run~pass.cpp
  40. 24 0
      regression/test/run~warn-note.cpp
  41. 18 0
      regression/test/run~warn.cpp
  42. 11 0
      regression/test/test-boost-build/ignored_rc/ignored_rc.jam
  43. 12 0
      regression/test/test-boost-build/ignored_rc/recognized_rc.jam
  44. 9 0
      regression/test/test-boost-build/missing_dependencies/Jamfile.v2
  45. 7 0
      regression/test/test-boost-build/missing_dependencies/lib/Jamfile.v2
  46. 1 0
      regression/test/test-boost-build/missing_dependencies/lib/lib.cpp
  47. 1 0
      regression/test/test-boost-build/missing_dependencies/test.cpp
  48. 1 0
      regression/test/test-cases/Huber2629/.cvsignore
  49. 36 0
      regression/test/test-cases/Huber2629/bjam.log
  50. 27 0
      regression/test/test-cases/Huber2629/expected/results.xml
  51. 1 0
      regression/test/test-cases/general/.cvsignore
  52. 325 0
      regression/test/test-cases/general/bjam.log
  53. 167 0
      regression/test/test-cases/general/expected/results.xml
  54. 33 0
      regression/test/test-cases/incremental/bjam.log
  55. 38 0
      regression/test/test-cases/incremental/bjam.log.1
  56. 9 0
      regression/test/test-cases/incremental/expected/results.xml
  57. 10 0
      regression/test/test.bat
  58. 181 0
      regression/test/test.py
  59. 833 0
      regression/xsl_reports/boost_wide_report.py
  60. 179 0
      regression/xsl_reports/boostbook_report.py
  61. 52 0
      regression/xsl_reports/build_results.sh
  62. 631 0
      regression/xsl_reports/email_maintainers.py
  63. 5 0
      regression/xsl_reports/empty_expected_results.xml
  64. 174 0
      regression/xsl_reports/make_snapshot.py
  65. 369 0
      regression/xsl_reports/report.py
  66. 2 0
      regression/xsl_reports/runner/__init__.py
  67. 500 0
      regression/xsl_reports/runner/collect_and_upload_logs.py
  68. 229 0
      regression/xsl_reports/runner/default.css
  69. 485 0
      regression/xsl_reports/runner/instructions.html
  70. 258 0
      regression/xsl_reports/runner/instructions.rst
  71. 1 0
      regression/xsl_reports/runner/instructions2html
  72. 1042 0
      regression/xsl_reports/runner/regression.py
  73. 165 0
      regression/xsl_reports/test/common.py
  74. 3 0
      regression/xsl_reports/test/expected_results.xml
  75. 159 0
      regression/xsl_reports/test/generate_test_results.py
  76. 85 0
      regression/xsl_reports/test/generate_test_results_v1.py
  77. 36 0
      regression/xsl_reports/test/restrict_to_library.xsl
  78. 32 0
      regression/xsl_reports/test/run_notes_regression.py
  79. 35 0
      regression/xsl_reports/test/run_v1.py
  80. 34 0
      regression/xsl_reports/test/test.py
  81. 36 0
      regression/xsl_reports/test/test_boost_wide_report.py
  82. 107 0
      regression/xsl_reports/test_results.xsd
  83. 13 0
      regression/xsl_reports/utils/__init__.py
  84. 30 0
      regression/xsl_reports/utils/accept_args.py
  85. 13 0
      regression/xsl_reports/utils/char_translation_table.py
  86. 9 0
      regression/xsl_reports/utils/check_existance.py
  87. 22 0
      regression/xsl_reports/utils/checked_system.py
  88. 49 0
      regression/xsl_reports/utils/libxslt.py
  89. 18 0
      regression/xsl_reports/utils/log.py
  90. 7 0
      regression/xsl_reports/utils/makedirs.py
  91. 17 0
      regression/xsl_reports/utils/rename.py
  92. 13 0
      regression/xsl_reports/utils/send_mail.py
  93. 48 0
      regression/xsl_reports/utils/sourceforge.py
  94. 16 0
      regression/xsl_reports/utils/tar.py
  95. 12 0
      regression/xsl_reports/utils/zip.py
  96. 144 0
      regression/xsl_reports/xsl/add_expected_results.xsl
  97. 182 0
      regression/xsl_reports/xsl/common.xsl
  98. 36 0
      regression/xsl_reports/xsl/html/issues_legend.html
  99. 72 0
      regression/xsl_reports/xsl/html/library_developer_legend.html
  100. 65 0
      regression/xsl_reports/xsl/html/library_user_legend.html

+ 70 - 0
regression/boost_svn_export_archive.sh

@@ -0,0 +1,70 @@
+#!/bin/sh
+
+#~ Copyright Redshift Software, Inc. 2007
+#~ Distributed under the Boost Software License, Version 1.0.
+#~ (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
+
+export PATH=/bin:/usr/bin:${PATH}
+
+timestamp=`date +%F-%H-%M-%S-%Z`
+branch=$1
+revision=`svn info file:///home/subversion/boost/${branch} | grep '^Revision:' | cut --byte=11-`
+tag=boost-${1/\/*}-${timestamp}
+export_dir=boost-$$
+
+# Remove files as listed in stdin, the assumption is that processing
+# of the file is complete and can be removed.
+rm_c()
+{
+  while read f; do
+    rm -f ${f}
+  done
+}
+# Generate the export file tree, and incrementally output the files
+# created.
+svn_export()
+{
+  svn export -r ${revision} file:///home/subversion/boost/${branch} ${tag}
+  echo "Revision: ${revision}" > ${tag}/svn_info.txt
+  echo "---- ${tag}/svn_info.txt"
+}
+# Create the archive incrementally, deleting files as we are done
+# adding them to the archive.
+make_archive()
+{
+  svn_export \
+    | cut --bytes=6- \
+    | star -c -D -to-stdout -d artype=pax list=- 2>/dev/null \
+    | bzip2 -6 -c \
+    | tee $1 \
+    | tar -jtf - \
+    | rm_c
+}
+
+run()
+{
+  cd /tmp
+  rm -rf ${export_dir}
+  mkdir ${export_dir}
+  cd ${export_dir}
+  mkfifo out.tbz2
+  make_archive out.tbz2 &
+  cat out.tbz2
+  cd /tmp
+  rm -rf ${export_dir}
+}
+
+run_debug()
+{
+  rm -rf ${export_dir}
+  mkdir ${export_dir}
+  cd ${export_dir}
+  mkfifo out.tbz2
+  make_archive out.tbz2 &
+  cat out.tbz2 > ../${tag}.tar.bz2
+  cd ..
+  rm -rf ${export_dir}
+}
+
+run
+#run_debug

+ 53 - 0
regression/build/Jamfile.v2

@@ -0,0 +1,53 @@
+# Regression test status reporting tools build Jamfile
+
+exe process_jam_log
+    :
+    ../process_jam_log.cpp ../detail/tiny_xml.cpp
+    /boost/filesystem//boost_filesystem
+    :
+    :
+    release
+    ;
+
+exe compiler_status
+    :
+    ../compiler_status.cpp ../detail/tiny_xml.cpp
+    /boost/filesystem//boost_filesystem
+    :
+    :
+    release
+    ;
+
+exe library_status
+    :
+    ../library_status.cpp ../detail/tiny_xml.cpp
+    /boost/filesystem//boost_filesystem
+    :
+    :
+    release
+    ;
+explicit library_status ;    
+
+install dist-bin
+    :
+    process_jam_log
+    compiler_status
+    library_status
+    :
+    <install-type>EXE
+    <location>../../../dist/bin
+    :
+    release
+    ;
+
+install dist-lib
+    :
+    process_jam_log
+    compiler_status
+    library_status
+    :
+    <install-type>LIB
+    <location>../../../dist/lib
+    :
+    release
+    ;

+ 59 - 0
regression/build/vcide/compiler_status.vcproj

@@ -0,0 +1,59 @@
+<?xml version="1.0" encoding="Windows-1252"?>
+<VisualStudioProject
+	ProjectType="Visual C++"
+	Version="7.10"
+	Name="compiler_status"
+	ProjectGUID="{81F22EF9-A1B8-46CB-9C2D-56FD4327B942}"
+	Keyword="MakeFileProj">
+	<Platforms>
+		<Platform
+			Name="Win32"/>
+	</Platforms>
+	<Configurations>
+		<Configuration
+			Name="Debug|Win32"
+			OutputDirectory="Debug"
+			IntermediateDirectory="Debug"
+			ConfigurationType="0">
+			<Tool
+				Name="VCNMakeTool"
+				BuildCommandLine="cd ..\..\..\..\tools\regression\build
+..\..\jam\src\bin.ntx86\bjam.exe compiler_status.exe variant=debug"
+				ReBuildCommandLine="cd ..\..\..\..\tools\regression\build
+...\..\jam\src\bin.ntx86\bjam.exe -a compiler_status.exe variant=debug"
+				CleanCommandLine="cd ..\..\..\..\tools\regression\build
+call bjam --v2 msvc-7.1 debug clean
+"
+				Output="compiler_status.exe"/>
+		</Configuration>
+		<Configuration
+			Name="Release|Win32"
+			OutputDirectory="Release"
+			IntermediateDirectory="Release"
+			ConfigurationType="0">
+			<Tool
+				Name="VCNMakeTool"
+				BuildCommandLine="cd ..
+..\..\jam\src\bin.ntx86\bjam compiler_status variant=release link=static"
+				ReBuildCommandLine="cd ..
+..\..\jam\src\bin.ntx86\bjam -a compiler_status variant=release link=static"
+				CleanCommandLine="cd ..
+..\..\jam\src\bin.ntx86\bjam clean"
+				Output="..\..\..\..\bin.v2\tools\regression\build\msvc-7.1\release\link-static\compiler_status.exe"/>
+		</Configuration>
+	</Configurations>
+	<References>
+	</References>
+	<Files>
+		<Filter
+			Name="Source Files"
+			Filter="cpp;c;cxx;def;odl;idl;hpj;bat;asm;asmx"
+			UniqueIdentifier="{4FC737F1-C7A5-4376-A066-2A32D752A2FF}">
+			<File
+				RelativePath="..\..\compiler_status.cpp">
+			</File>
+		</Filter>
+	</Files>
+	<Globals>
+	</Globals>
+</VisualStudioProject>

+ 64 - 0
regression/build/vcide/library_status.vcproj

@@ -0,0 +1,64 @@
+<?xml version="1.0" encoding="Windows-1252"?>
+<VisualStudioProject
+	ProjectType="Visual C++"
+	Version="7.10"
+	Name="library_status"
+	ProjectGUID="{465BDB84-92B5-4C60-AF26-8BD1A61A089E}"
+	Keyword="MakeFileProj">
+	<Platforms>
+		<Platform
+			Name="Win32"/>
+	</Platforms>
+	<Configurations>
+		<Configuration
+			Name="Debug|Win32"
+			OutputDirectory="Debug"
+			IntermediateDirectory="Debug"
+			ConfigurationType="0">
+			<Tool
+				Name="VCNMakeTool"
+				BuildCommandLine="cd ..
+..\..\jam\src\bin.ntx86\bjam library_status variant=debug link=static"
+				ReBuildCommandLine="cd ..
+..\..\jam\src\bin.ntx86\bjam -a library_status variant=debug link=static"
+				CleanCommandLine="cd ..
+..\..\jam\src\bin.ntx86\bjam clean"
+				Output="..\..\..\..\bin.v2\tools\regression\build\msvc-7.1\debug\link-static\library_status.exe"/>
+		</Configuration>
+		<Configuration
+			Name="Release|Win32"
+			OutputDirectory="Release"
+			IntermediateDirectory="Release"
+			ConfigurationType="0">
+			<Tool
+				Name="VCNMakeTool"
+				BuildCommandLine="cd ..
+..\..\jam\src\bin.ntx86\bjam library_status variant=release link=static"
+				ReBuildCommandLine="cd ..
+..\..\jam\src\bin.ntx86\bjam -a library_status variant=release link=static"
+				CleanCommandLine="cd ..
+..\..\jam\src\bin.ntx86\bjam clean"
+				Output="..\..\..\..\bin.v2\tools\regression\build\msvc-7.1\release\link-static\library_status.exe"/>
+		</Configuration>
+	</Configurations>
+	<References>
+	</References>
+	<Files>
+		<Filter
+			Name="Source Files"
+			Filter="cpp;c;cxx;def;odl;idl;hpj;bat;asm;asmx"
+			UniqueIdentifier="{4FC737F1-C7A5-4376-A066-2A32D752A2FF}">
+			<File
+				RelativePath="..\..\library_status.cpp">
+			</File>
+			<File
+				RelativePath="..\..\detail\tiny_xml.cpp">
+			</File>
+			<File
+				RelativePath="..\..\detail\tiny_xml.hpp">
+			</File>
+		</Filter>
+	</Files>
+	<Globals>
+	</Globals>
+</VisualStudioProject>

+ 21 - 0
regression/build/vcide/process_jam_log.sln

@@ -0,0 +1,21 @@
+Microsoft Visual Studio Solution File, Format Version 8.00
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "process_jam_log", "process_jam_log.vcproj", "{9A751791-929F-496A-8DE7-B61020619BFA}"
+	ProjectSection(ProjectDependencies) = postProject
+	EndProjectSection
+EndProject
+Global
+	GlobalSection(SolutionConfiguration) = preSolution
+		Debug = Debug
+		Release = Release
+	EndGlobalSection
+	GlobalSection(ProjectConfiguration) = postSolution
+		{9A751791-929F-496A-8DE7-B61020619BFA}.Debug.ActiveCfg = Debug|Win32
+		{9A751791-929F-496A-8DE7-B61020619BFA}.Debug.Build.0 = Debug|Win32
+		{9A751791-929F-496A-8DE7-B61020619BFA}.Release.ActiveCfg = Release|Win32
+		{9A751791-929F-496A-8DE7-B61020619BFA}.Release.Build.0 = Release|Win32
+	EndGlobalSection
+	GlobalSection(ExtensibilityGlobals) = postSolution
+	EndGlobalSection
+	GlobalSection(ExtensibilityAddIns) = postSolution
+	EndGlobalSection
+EndGlobal

+ 72 - 0
regression/build/vcide/process_jam_log.vcproj

@@ -0,0 +1,72 @@
+<?xml version="1.0" encoding="windows-1251"?>
+<VisualStudioProject
+	ProjectType="Visual C++"
+	Version="7.10"
+	Name="process_jam_log"
+	ProjectGUID="{9A751791-929F-496A-8DE7-B61020619BFA}"
+	RootNamespace="process_jam_log"
+	Keyword="MakeFileProj">
+	<Platforms>
+		<Platform
+			Name="Win32"/>
+	</Platforms>
+	<Configurations>
+		<Configuration
+			Name="Debug|Win32"
+			OutputDirectory="Debug"
+			IntermediateDirectory="Debug"
+			ConfigurationType="0">
+			<Tool
+				Name="VCNMakeTool"
+				BuildCommandLine="cd ..\..\..\..\tools\regression\build
+..\..\jam\src\bin.ntx86\bjam.exe process_jam_log variant=debug
+"
+				ReBuildCommandLine="cd ..\..\..\..\tools\regression\build
+call bjam --v2 msvc-7.1 debug clean
+call bjam --v2 msvc-7.1 debug"
+				CleanCommandLine="cd ..\..\..\..\tools\regression\build
+call bjam --v2 msvc-7.1 debug clean
+"
+				Output="../../../../bin.v2/tools/regression/build/msvc-7.1/debug/link-static/process_jam_log.exe"/>
+		</Configuration>
+		<Configuration
+			Name="Release|Win32"
+			OutputDirectory="Release"
+			IntermediateDirectory="Release"
+			ConfigurationType="0">
+			<Tool
+				Name="VCNMakeTool"
+				BuildCommandLine="cd ..
+..\..\jam\src\bin.ntx86\bjam process_jam_log variant=release link=static"
+				ReBuildCommandLine="cd ..
+..\..\jam\src\bin.ntx86\bjam -a process_jam_log variant=release link=static"
+				CleanCommandLine="cd ..
+..\..\jam\src\bin.ntx86\bjam clean"
+				Output="..\..\..\..\bin.v2\tools\regression\build\msvc-7.1\release\link-static\process_jam_log.exe"/>
+		</Configuration>
+	</Configurations>
+	<References>
+	</References>
+	<Files>
+		<Filter
+			Name="Source Files"
+			Filter="cpp;c;cxx;def;odl;idl;hpj;bat;asm;asmx"
+			UniqueIdentifier="{4FC737F1-C7A5-4376-A066-2A32D752A2FF}">
+		</Filter>
+		<Filter
+			Name="Header Files"
+			Filter="h;hpp;hxx;hm;inl;inc;xsd"
+			UniqueIdentifier="{93995380-89BD-4b04-88EB-625FBE52EBFB}">
+		</Filter>
+		<Filter
+			Name="Resource Files"
+			Filter="rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx"
+			UniqueIdentifier="{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}">
+		</Filter>
+		<File
+			RelativePath=".\readme.txt">
+		</File>
+	</Files>
+	<Globals>
+	</Globals>
+</VisualStudioProject>

+ 37 - 0
regression/build/vcide/regression.sln

@@ -0,0 +1,37 @@
+Microsoft Visual Studio Solution File, Format Version 8.00
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "compiler_status", "compiler_status.vcproj", "{81F22EF9-A1B8-46CB-9C2D-56FD4327B942}"
+	ProjectSection(ProjectDependencies) = postProject
+	EndProjectSection
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "process_jam_log", "process_jam_log.vcproj", "{9A751791-929F-496A-8DE7-B61020619BFA}"
+	ProjectSection(ProjectDependencies) = postProject
+	EndProjectSection
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "library_status", "library_status.vcproj", "{465BDB84-92B5-4C60-AF26-8BD1A61A089E}"
+	ProjectSection(ProjectDependencies) = postProject
+	EndProjectSection
+EndProject
+Global
+	GlobalSection(SolutionConfiguration) = preSolution
+		Debug = Debug
+		Release = Release
+	EndGlobalSection
+	GlobalSection(ProjectConfiguration) = postSolution
+		{81F22EF9-A1B8-46CB-9C2D-56FD4327B942}.Debug.ActiveCfg = Debug|Win32
+		{81F22EF9-A1B8-46CB-9C2D-56FD4327B942}.Debug.Build.0 = Debug|Win32
+		{81F22EF9-A1B8-46CB-9C2D-56FD4327B942}.Release.ActiveCfg = Release|Win32
+		{81F22EF9-A1B8-46CB-9C2D-56FD4327B942}.Release.Build.0 = Release|Win32
+		{9A751791-929F-496A-8DE7-B61020619BFA}.Debug.ActiveCfg = Debug|Win32
+		{9A751791-929F-496A-8DE7-B61020619BFA}.Debug.Build.0 = Debug|Win32
+		{9A751791-929F-496A-8DE7-B61020619BFA}.Release.ActiveCfg = Release|Win32
+		{9A751791-929F-496A-8DE7-B61020619BFA}.Release.Build.0 = Release|Win32
+		{465BDB84-92B5-4C60-AF26-8BD1A61A089E}.Debug.ActiveCfg = Debug|Win32
+		{465BDB84-92B5-4C60-AF26-8BD1A61A089E}.Debug.Build.0 = Debug|Win32
+		{465BDB84-92B5-4C60-AF26-8BD1A61A089E}.Release.ActiveCfg = Release|Win32
+		{465BDB84-92B5-4C60-AF26-8BD1A61A089E}.Release.Build.0 = Release|Win32
+	EndGlobalSection
+	GlobalSection(ExtensibilityGlobals) = postSolution
+	EndGlobalSection
+	GlobalSection(ExtensibilityAddIns) = postSolution
+	EndGlobalSection
+EndGlobal

+ 1017 - 0
regression/compiler_status.cpp

@@ -0,0 +1,1017 @@
+//  Generate Compiler Status HTML from jam regression test output  -----------//
+
+//  Copyright Beman Dawes 2002.  Distributed under the Boost
+//  Software License, Version 1.0. (See accompanying file
+//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+
+//  See http://www.boost.org/tools/regression/ for documentation.
+
+/*******************************************************************************
+
+    This program was designed to work unchanged on all platforms and
+    configurations.  All output which is platform or configuration dependent
+    is obtained from external sources such as the .xml file from
+    process_jam_log execution, the tools/build/xxx-tools.jam files, or the
+    output of the config_info tests.
+
+    Please avoid adding platform or configuration dependencies during
+    program maintenance.
+
+*******************************************************************************/
+
+#include "boost/config.hpp"
+#include "boost/filesystem/operations.hpp"
+#include "boost/filesystem/fstream.hpp"
+#include "detail/tiny_xml.hpp"
+namespace fs = boost::filesystem;
+namespace xml = boost::tiny_xml;
+
+#include <cstdlib>  // for abort, exit
+#include <cctype>   // for toupper
+#include <string>
+#include <vector>
+#include <set>
+#include <map>
+#include <algorithm>
+#include <iostream>
+#include <fstream>
+#include <ctime>
+#include <stdexcept>
+#include <cassert>
+
+using std::string;
+
+const string pass_msg( "Pass" );
+const string warn_msg( "<i>Warn</i>" );
+const string fail_msg( "<font color=\"#FF0000\"><i>Fail</i></font>" );
+const string note_msg( "<sup>*</sup>" );
+const string missing_residue_msg( "<i>Missing</i>" );
+
+const std::size_t max_compile_msg_size = 10000;
+
+namespace
+{
+  fs::path boost_root;  // boost-root complete path
+  fs::path locate_root; // locate-root (AKA ALL_LOCATE_TARGET) complete path
+
+  bool compile_time;
+  bool run_time;
+  bool ignore_pass;
+  bool no_warn;
+  bool no_links;
+  bool boost_build_v2 = true;
+
+  fs::path jamfile_path;
+
+  fs::directory_iterator end_itr;
+
+  // It's immportant for reliability that we find the same compilers for each
+  // test, and that they match the column header.  So save the names at the
+  // time column headings are generated.
+  std::vector<string> toolsets;
+
+  fs::ifstream jamfile;
+  fs::ofstream report;
+  fs::ofstream links_file;
+  string links_name;
+
+  fs::path notes_path;
+  string notes_html;
+
+  fs::path notes_map_path;
+  typedef std::multimap< string, string > notes_map; // key is test_name-toolset,
+                                                // value is note bookmark
+  notes_map notes;
+
+  string specific_compiler; // if running on one toolset only
+
+  const string empty_string;
+
+  // prefix for library and test hyperlink prefix
+  string cvs_root ( "http://boost.cvs.sourceforge.net/" );
+  string url_prefix_dir_view( cvs_root + "boost/boost" );
+  string url_prefix_checkout_view( cvs_root + "*checkout*/boost/boost" );
+  string url_suffix_text_view( "?view=markup&rev=HEAD" );
+
+//  get revision number (as a string) if boost_root is svn working copy  -----//
+
+  string revision( const fs::path & boost_root )
+  {
+    string rev;
+    fs::path entries( boost_root / ".svn" / "entries" );
+    fs::ifstream entries_file( entries );
+    if ( entries_file )
+    {
+      std::getline( entries_file, rev );
+      std::getline( entries_file, rev );
+      std::getline( entries_file, rev );
+      std::getline( entries_file, rev );  // revision number as a string
+    }
+    return rev;
+  }
+
+
+//  build notes_bookmarks from notes HTML  -----------------------------------//
+
+  void build_notes_bookmarks()
+  {
+    if ( notes_map_path.empty() ) return;
+    fs::ifstream notes_map_file( notes_map_path );
+    if ( !notes_map_file )
+    {
+      std::cerr << "Could not open --notes-map input file: " << notes_map_path.string() << std::endl;
+      std::exit( 1 );
+    }
+    string line;
+    while( std::getline( notes_map_file, line ) )
+    {
+      string::size_type pos = 0;
+      if ( (pos = line.find( ',', pos )) == string::npos ) continue;
+      string key(line.substr( 0, pos ) );
+      string bookmark( line.substr( pos+1 ) );
+
+//      std::cout << "inserting \"" << key << "\",\"" << bookmark << "\"\n";
+      notes.insert( notes_map::value_type( key, bookmark ) );
+    }
+  }
+
+//  load_notes_html  ---------------------------------------------------------//
+
+  bool load_notes_html()
+  {
+    if ( notes_path.empty() ) return false;
+    fs::ifstream notes_file( notes_path );
+    if ( !notes_file )
+    {
+      std::cerr << "Could not open --notes input file: " << notes_path.string() << std::endl;
+      std::exit( 1 );
+    }
+    string line;
+    bool in_body( false );
+    while( std::getline( notes_file, line ) )
+    {
+      if ( in_body && line.find( "</body>" ) != string::npos ) in_body = false;
+      if ( in_body ) notes_html += line;
+      else if ( line.find( "<body>" ) ) in_body = true;
+    }
+    return true;
+  }
+
+//  relative path between two paths  -----------------------------------------//
+
+  void relative_path( const fs::path & from, const fs::path & to,
+    fs::path & target )
+  {
+    if ( from.string().size() <= to.string().size() ) return;
+    target /= "..";
+    relative_path( from.branch_path(), to, target );
+    return;
+  }
+
+//  extract object library name from target directory string  ----------------//
+
+  string extract_object_library_name( const string & s )
+  {
+    string t( s );
+    string::size_type pos = t.find( "/build/" );
+    if ( pos != string::npos ) pos += 7;
+    else if ( (pos = t.find( "/test/" )) != string::npos ) pos += 6;
+    else return "";
+    return t.substr( pos, t.find( "/", pos ) - pos );
+  }
+
+//  find_file  ---------------------------------------------------------------//
+//  given a directory to recursively search
+
+  bool find_file( const fs::path & dir_path, const string & name,
+    fs::path & path_found, const string & ignore_dir_named="" )
+  {
+    if ( !fs::exists( dir_path ) ) return false;
+    for ( fs::directory_iterator itr( dir_path ); itr != end_itr; ++itr )
+      if ( fs::is_directory( *itr )
+        && itr->leaf() != ignore_dir_named )
+      {
+        if ( find_file( *itr, name, path_found ) ) return true;
+      }
+      else if ( itr->leaf() == name )
+      {
+        path_found = *itr;
+        return true;
+      }
+    return false;
+  }
+
+//  platform_desc  -----------------------------------------------------------//
+
+  string platform_desc()
+  {
+    string result = BOOST_PLATFORM;
+    result[0] = std::toupper( result[0] );
+    return result;
+  }
+
+//  version_desc  ------------------------------------------------------------//
+//  from locate-root/status/bin/config_info.test/xxx/.../config_info.output
+
+  string version_desc( const string & compiler_name )
+  {
+    string result;
+    fs::path dot_output_path;
+    if ( find_file( locate_root / "bin/boost/status/config_info.test"
+      / compiler_name, "config_info.output", dot_output_path )
+      || find_file( locate_root / "status/bin/config_info.test"
+      / compiler_name, "config_info.output", dot_output_path ) )
+    {
+      fs::ifstream file( dot_output_path );
+      if ( file )
+      {
+        if( std::getline( file, result ) )
+        {
+          string::size_type pos = result.find( "version " );
+          if ( pos != string::npos )
+          {
+            result.erase( 0, pos+8 );
+          }
+          else result.clear();
+        }
+      }
+    }
+    return result;
+  }
+
+//  compiler_desc  -----------------------------------------------------------//
+//  from boost-root/tools/build/xxx-tools.jam
+
+  string compiler_desc( const string & compiler_name )
+  {
+    string result;
+    fs::path tools_path( boost_root / "tools/build/v1" / (compiler_name
+      + "-tools.jam") );
+    if ( !fs::exists( tools_path ) )
+      tools_path = boost_root / "tools/build" / (compiler_name + "-tools.jam");
+    fs::ifstream file( tools_path );
+    if ( file )
+    {
+      while( std::getline( file, result ) )
+      {
+        if ( result.substr( 0, 3 ) == "#//" )
+        {
+          result.erase( 0, 3 );
+          return result;
+        }
+      }
+      result.clear();
+    }
+    return result;
+  }
+
+//  target_directory  --------------------------------------------------------//
+//  this amounts to a request to find a unique leaf directory
+
+  fs::path target_directory( const fs::path & root )
+  {
+    if ( !fs::exists( root ) ) return fs::path("no-such-path");
+    fs::path child;
+    for ( fs::directory_iterator itr( root ); itr != end_itr; ++itr )
+    {
+      if ( fs::is_directory( *itr ) )
+      {
+        // SunCC creates an internal subdirectory everywhere it writes
+        // object files.  This confuses the target_directory() algorithm.
+        // This patch ignores the SunCC internal directory. Jens Maurer
+        if ( (*itr).leaf() == "SunWS_cache" ) continue;
+        // SGI does something similar for template instantiations. Jens Maurer
+        if(  (*itr).leaf() == "ii_files" ) continue; 
+
+        if ( child.empty() ) child = *itr;
+        else
+        {
+          std::cout << "Warning: only first of two target possibilities will be reported for: \n "
+            << root.string() << ": " << child.leaf()
+            << " and " << (*itr).leaf() << "\n";
+        }
+      }
+    }
+    if ( child.empty() ) return root; // this dir has no children
+    return target_directory( child );
+  }
+
+//  element_content  ---------------------------------------------------------//
+
+  const string & element_content(
+    const xml::element & root, const string & name )
+  {
+    static string empty_string;
+    xml::element_list::const_iterator itr;
+    for ( itr = root.elements.begin();
+          itr != root.elements.end() && (*itr)->name != name;
+          ++itr ) {}
+    return itr != root.elements.end() ? (*itr)->content : empty_string;
+  }
+
+//  find_element  ------------------------------------------------------------//
+
+  const xml::element empty_element;
+
+  const xml::element & find_element(
+    const xml::element & root, const string & name )
+  {
+    xml::element_list::const_iterator itr;
+    for ( itr = root.elements.begin();
+          itr != root.elements.end() && (*itr)->name != name;
+          ++itr ) {}
+    return itr != root.elements.end() ? *((*itr).get()) : empty_element;
+  }
+
+//  attribute_value  ----------------------------------------------------------//
+
+const string & attribute_value( const xml::element & element,
+                                const string & attribute_name )
+{
+  static const string empty_string;
+  xml::attribute_list::const_iterator atr;
+  for ( atr = element.attributes.begin();
+        atr != element.attributes.end() && atr->name != attribute_name;
+        ++atr ) {}
+  return atr == element.attributes.end() ? empty_string : atr->value;
+}
+
+//  find_bin_path  -----------------------------------------------------------//
+
+// Takes a relative path from boost root to a Jamfile.
+// Returns the directory where the build targets from
+// that Jamfile are located. If not found, emits a warning 
+// and returns empty path.
+const fs::path find_bin_path(const string& relative)
+{
+  fs::path bin_path;
+  if (boost_build_v2)
+  {
+    bin_path = locate_root / "bin.v2" / relative;
+    if (!fs::exists(bin_path))
+    {
+      std::cerr << "warning: could not find build results for '" 
+                << relative << "'.\n";
+      std::cerr << "warning: tried directory " 
+                << bin_path.native_directory_string() << "\n";
+      bin_path = "";
+    }
+  }
+  else
+  {
+    bin_path = locate_root / "bin/boost" / relative;
+    if (!fs::exists(bin_path))
+    {
+      bin_path = locate_root / "bin" / relative / "bin";
+      if (!fs::exists(bin_path))
+      {
+        bin_path = fs::path( locate_root / relative / "bin" );
+        if (!fs::exists(bin_path))
+        {
+          bin_path = fs::path( locate_root / "bin/boost/libs" /
+            relative.substr( relative.find( '/' )+1 ) );
+        }
+      }
+    }
+    if (!fs::exists(bin_path))
+    {
+      std::cerr << "warning: could not find build results for '" 
+                << relative << "'.\n";
+      bin_path = "";
+    }
+  }
+  return bin_path;
+}
+
+
+//  generate_report  ---------------------------------------------------------//
+
+  // return 0 if nothing generated, 1 otherwise, except 2 if compiler msgs
+  int generate_report( const xml::element & db,
+                       const string & source_library_name,
+                       const string & test_type,
+                       const string & test_name, // possibly object library name
+                       const string & toolset,
+                       bool pass,
+                       bool always_show_run_output = false )
+  {
+    // compile msgs sometimes modified, so make a local copy
+    string compile( ((pass && no_warn)
+      ? empty_string :  element_content( db, "compile" )) );
+
+    const string & link( pass ? empty_string : element_content( db, "link" ) );
+    const string & run( (pass && !always_show_run_output)
+      ? empty_string : element_content( db, "run" ) );
+    string lib( (pass ? empty_string : element_content( db, "lib" )) );
+
+    // some compilers output the filename even if there are no errors or
+    // warnings; detect this if one line of output and it contains no space.
+    string::size_type pos = compile.find( '\n', 1 );
+    if ( pos != string::npos && compile.size()-pos <= 2
+        && compile.find( ' ' ) == string::npos ) compile.clear();
+
+    if ( lib.empty()
+      && (compile.empty() || test_type == "compile_fail")
+      && link.empty() && run.empty() ) return 0;
+
+    int result = 1; // some kind of msg for sure
+
+    // limit compile message length
+    if ( compile.size() > max_compile_msg_size )
+    {
+      compile.erase( max_compile_msg_size );
+      compile += "...\n   (remainder deleted because of excessive size)\n";
+    }
+
+    links_file << "<h2><a name=\""
+      << source_library_name << "-" << test_name << "-" << toolset << "\">"
+      << source_library_name << " - " << test_name << " - " << toolset << "</a></h2>\n";
+
+    if ( !compile.empty() )
+    {
+      ++result;
+      links_file << "<h3>Compiler output:</h3><pre>"
+        << compile << "</pre>\n";
+    }
+    if ( !link.empty() )
+      links_file << "<h3>Linker output:</h3><pre>" << link << "</pre>\n";
+    if ( !run.empty() )
+      links_file << "<h3>Run output:</h3><pre>" << run << "</pre>\n";
+
+    // for an object library failure, generate a reference to the object
+    // library failure message, and (once only) generate the object
+    // library failure message itself
+    static std::set< string > failed_lib_target_dirs; // only generate once
+    if ( !lib.empty() )
+    {
+      if ( lib[0] == '\n' ) lib.erase( 0, 1 );
+      string object_library_name( extract_object_library_name( lib ) );
+
+      // changing the target directory naming scheme breaks
+      // extract_object_library_name()
+      assert( !object_library_name.empty() );
+      if ( object_library_name.empty() )
+        std::cerr << "Failed to extract object library name from " << lib << "\n";
+
+      links_file << "<h3>Library build failure: </h3>\n"
+        "See <a href=\"#"
+        << source_library_name << "-"
+        << object_library_name << "-" << toolset << "\">"
+        << source_library_name << " - "
+        << object_library_name << " - " << toolset << "</a>";
+
+      if ( failed_lib_target_dirs.find( lib ) == failed_lib_target_dirs.end() )
+      {
+        failed_lib_target_dirs.insert( lib );
+        fs::path pth( locate_root / lib / "test_log.xml" );
+        fs::ifstream file( pth );
+        if ( file )
+        {
+          xml::element_ptr db = xml::parse( file, pth.string() );
+          generate_report( *db, source_library_name, test_type, object_library_name, toolset, false );
+        }
+        else
+        {
+          links_file << "<h2><a name=\""
+            << object_library_name << "-" << toolset << "\">"
+            << object_library_name << " - " << toolset << "</a></h2>\n"
+            "test_log.xml not found\n";
+        }
+      }
+    }
+    return result;
+  }
+
+  //  add_notes --------------------------------------------------------------//
+
+  void add_notes( const string & key, bool fail, string & sep, string & target )
+  {
+    notes_map::const_iterator itr = notes.lower_bound( key );
+    if ( itr != notes.end() && itr->first == key )
+    {
+      for ( ; itr != notes.end() && itr->first == key; ++itr )
+      {
+        string note_desc( itr->second[0] == '-'
+          ? itr->second.substr( 1 ) : itr->second );
+        if ( fail || itr->second[0] == '-' )
+        {
+          target += sep;
+          sep = ",";
+          target += "<a href=\"";
+          target += "#";
+          target += note_desc;
+          target += "\">";
+          target += note_desc;
+          target += "</a>";
+        }
+      }
+    }
+  }
+
+  //  get_notes  -------------------------------------------------------------//
+
+  string get_notes( const string & toolset,
+                    const string & library, const string & test, bool fail )
+  {
+    string sep;
+    string target( "<sup>" );
+    add_notes( toolset + "/" + library + "/" + test, fail, sep, target ); 
+    add_notes( "*/" + library + "/" + test, fail, sep, target ); 
+    add_notes( toolset + "/" + library + "/*", fail, sep, target ); 
+    add_notes( "*/" + library + "/*", fail, sep, target );
+    if ( target == "<sup>" ) target.clear();
+    else target += "</sup>";
+    return target;
+  }
+
+  //  do_cell  ---------------------------------------------------------------//
+
+  bool do_cell( const string & lib_name,
+    const fs::path & test_dir,
+    const string & test_type,
+    const string & test_name,
+    const string & toolset,
+    string & target,
+    bool always_show_run_output )
+  // return true if any results except simple pass_msg
+  {
+    fs::path target_dir( target_directory( test_dir / toolset ) );
+    bool pass = false;
+
+    if ( !fs::exists( target_dir / "test_log.xml" ) )
+    {
+      std::cerr << "Missing jam_log.xml in target:\n "
+        << target_dir.string() << "\n";
+      target += "<td>" + missing_residue_msg + "</td>";
+      return true;
+    }
+
+    int anything_generated = 0;
+    bool note = false;
+
+    fs::path pth( target_dir / "test_log.xml" );
+    fs::ifstream file( pth );
+    if ( !file ) // could not open jam_log.xml
+    {
+      std::cerr << "Can't open jam_log.xml in target:\n "
+        << target_dir.string() << "\n";
+      target += "<td>" + missing_residue_msg + "</td>";
+      return false;
+    }
+
+    xml::element_ptr dbp = xml::parse( file, pth.string() );
+    const xml::element & db( *dbp );
+
+    std::string test_type_base( test_type );
+    if ( test_type_base == "run_pyd" ) test_type_base = "run";
+    else if ( test_type_base.size() > 5 )
+    {
+      const string::size_type trailer = test_type_base.size() - 5;
+      if ( test_type_base.substr( trailer ) == "_fail" )
+      {
+        test_type_base.erase( trailer );
+      }
+    }
+    const xml::element & test_type_element( find_element( db, test_type_base ) );
+
+    pass = !test_type_element.name.empty()
+      && attribute_value( test_type_element, "result" ) != "fail";
+
+    if ( !no_links )
+    {
+      note = attribute_value( test_type_element, "result" ) == "note";
+
+      // generate bookmarked report of results, and link to it
+      anything_generated
+        = generate_report( db, lib_name, test_type, test_name, toolset, pass,
+          always_show_run_output || note );
+    }
+
+    target += "<td>";
+
+    // generate the status table cell pass/warn/fail HTML
+    if ( anything_generated != 0 )
+    {
+      target += "<a href=\"";
+      target += links_name;
+      target += "#";
+      target += lib_name;
+      target += "-";
+      target += test_name;
+      target += "-";
+      target += toolset;
+      target += "\">";
+      target += pass
+        ? (anything_generated < 2 ? pass_msg : warn_msg)
+        : fail_msg;
+      target += "</a>";
+      if ( pass && note ) target += note_msg;
+    }
+    else  target += pass ? pass_msg : fail_msg;
+
+    // if notes, generate the superscript HTML
+    if ( !notes.empty() ) 
+      target += get_notes( toolset, lib_name, test_name, !pass );
+
+    // generate compile-time if requested
+    if ( compile_time )
+    {
+      const xml::element & compile_element( find_element( db, "compile" ) );
+
+      if ( !compile_element.name.empty() )
+      {
+        string times = attribute_value( compile_element, "timings" );
+        if ( !times.empty() )
+        {
+          target += "<br>";
+          target += times.substr( 0, times.find( " " ) );
+        }
+      }
+    }
+      
+    // generate run-time if requested
+    if ( run_time )
+    {
+      const xml::element & run_element( find_element( db, "run" ) );
+
+      if ( !run_element.name.empty() )
+      {
+        string times = attribute_value( run_element, "timings" );
+        if ( !times.empty() )
+        {
+          target += "<br>";
+          target += times.substr( 0, times.find( " " ) );
+        }
+      }
+    }
+      
+    target += "</td>";
+    return (anything_generated != 0) || !pass;
+  }
+
+//  do_row  ------------------------------------------------------------------//
+
+  void do_row(
+    const fs::path & test_dir, // locate_root / "status/bin/any_test.test"
+    const string & test_name, // "any_test"
+    string & target )
+  {
+    // get library name, test-type, test-program path, etc., from the .xml file
+    string lib_name;
+    string test_path( test_name ); // test_name is default if missing .test
+    string test_type( "unknown" );
+    bool always_show_run_output( false );
+    fs::path xml_file_path;
+    if ( find_file( test_dir, "test_log.xml", xml_file_path ) )
+    {
+      fs::ifstream file( xml_file_path );
+      if ( file )
+      {
+        xml::element_ptr dbp = xml::parse( file, xml_file_path.string() );
+        const xml::element & db( *dbp );
+        test_path = attribute_value( db, "test-program" );
+        lib_name = attribute_value( db, "library" );
+        test_type = attribute_value( db, "test-type" );
+        always_show_run_output
+          = attribute_value( db, "show-run-output" ) == "true";
+      }
+    }
+
+    // generate the library name, test name, and test type table data
+    string::size_type row_start_pos = target.size();
+    target += "<tr><td><a href=\"" + url_prefix_dir_view + "/libs/" + lib_name
+      + "\">"  + lib_name  + "</a></td>";
+    target += "<td><a href=\"" + url_prefix_checkout_view + "/" + test_path
+      + url_suffix_text_view + "\">" + test_name + "</a>";
+
+    if ( compile_time ) target += "<br> Compile time:";
+    if ( run_time ) target += "<br> Run time:";
+
+    target += "</td>";
+    target += "<td>" + test_type + "</td>";
+
+    bool no_warn_save = no_warn;
+    //if ( test_type.find( "fail" ) != string::npos ) no_warn = true;
+
+    // for each compiler, generate <td>...</td> html
+    bool anything_to_report = false;
+    for ( std::vector<string>::const_iterator itr=toolsets.begin();
+      itr != toolsets.end(); ++itr )
+    {
+      anything_to_report |= do_cell( lib_name, test_dir, test_type, test_name, *itr, target,
+        always_show_run_output );
+    }
+
+    target += "</tr>";
+    if ( ignore_pass && !anything_to_report ) target.erase( row_start_pos );
+    no_warn = no_warn_save;
+  }
+
+//  do_rows_for_sub_tree  ----------------------------------------------------//
+
+  void do_rows_for_sub_tree(
+    const fs::path & bin_dir, std::vector<string> & results )
+  {
+    for ( fs::directory_iterator itr( bin_dir ); itr != end_itr; ++itr )
+    {
+      if ( fs::is_directory( *itr )
+        && itr->string().find( ".test" ) == (itr->string().size()-5) )
+      {
+        results.push_back( std::string() ); 
+        do_row( *itr,
+                itr->leaf().substr( 0, itr->leaf().size()-5 ),
+                results[results.size()-1] );
+      }
+    }
+  }
+
+//  do_table_body  -----------------------------------------------------------//
+
+  void do_table_body( const fs::path & bin_dir )
+  {
+    // rows are held in a vector so they can be sorted, if desired.
+    std::vector<string> results;
+
+    // do primary bin directory
+    do_rows_for_sub_tree( bin_dir, results );
+
+    // do subinclude bin directories
+    jamfile.clear();
+    jamfile.seekg(0);
+    string line;
+    while( std::getline( jamfile, line ) )
+    {
+      bool v2(false);
+      string::size_type pos( line.find( "subinclude" ) );
+      if ( pos == string::npos ) {
+        pos = line.find( "build-project" );
+        v2 = true;
+      }
+      if ( pos != string::npos
+        && line.find( '#' ) > pos )
+      {
+        if (v2)
+          pos = line.find_first_not_of( " \t./", pos+13 );
+        else
+          pos = line.find_first_not_of( " \t./", pos+10 );
+      
+        if ( pos == string::npos ) continue;
+        string subinclude_bin_dir(
+          line.substr( pos, line.find_first_of( " \t", pos )-pos ) );
+
+        fs::path bin_path = find_bin_path(subinclude_bin_dir);
+        if (!bin_path.empty())
+          do_rows_for_sub_tree( bin_path, results );
+      }
+    }
+
+
+    std::sort( results.begin(), results.end() );
+
+    for ( std::vector<string>::iterator v(results.begin());
+      v != results.end(); ++v )
+      { report << *v << "\n"; }
+  }
+
+//  do_table  ----------------------------------------------------------------//
+
+  void do_table()
+  {
+    // Find test result locations, trying:
+    // - Boost.Build V1 location with ALL_LOCATE_TARGET
+    // - Boost.Build V2 location with top-lelve "build-dir" 
+    // - Boost.Build V1 location without ALL_LOCATE_TARGET
+    string relative( fs::initial_path().string() );
+    relative.erase( 0, boost_root.string().size()+1 );    
+    fs::path bin_path = find_bin_path(relative);
+
+    report << "<table border=\"1\" cellspacing=\"0\" cellpadding=\"5\">\n";
+
+    // generate the column headings
+
+    report << "<tr><td>Library</td><td>Test Name</td>\n"
+      "<td><a href=\"compiler_status.html#test-type\">Test Type</a></td>\n";
+
+    fs::directory_iterator itr( bin_path );
+    while ( itr != end_itr 
+      && ((itr->string().find( ".test" ) != (itr->string().size()-5))
+      || !fs::is_directory( *itr )))
+      ++itr; // bypass chaff
+    if ( itr != end_itr )
+    {
+      fs::directory_iterator compiler_itr( *itr );
+      if ( specific_compiler.empty() )
+        std::clog << "Using " << itr->string() << " to determine compilers\n";
+      for (; compiler_itr != end_itr; ++compiler_itr )
+      {
+        if ( fs::is_directory( *compiler_itr )  // check just to be sure
+          && compiler_itr->leaf() != "test" ) // avoid strange directory (Jamfile bug?)
+        {
+          if ( specific_compiler.size() != 0
+            && specific_compiler != compiler_itr->leaf() ) continue;
+          toolsets.push_back( compiler_itr->leaf() );
+          string desc( compiler_desc( compiler_itr->leaf() ) );
+          string vers( version_desc( compiler_itr->leaf() ) );
+          report << "<td>"
+               << (desc.size() ? desc : compiler_itr->leaf())
+               << (vers.size() ? (string( "<br>" ) + vers ) : string( "" ))
+               << "</td>\n";
+        }
+      }
+    }
+
+    report << "</tr>\n";
+
+    // now the rest of the table body
+
+    do_table_body( bin_path );
+
+    report << "</table>\n";
+  }
+
+} // unnamed namespace
+
+//  main  --------------------------------------------------------------------//
+
+#define BOOST_NO_CPP_MAIN_SUCCESS_MESSAGE
+#include <boost/test/included/prg_exec_monitor.hpp>
+
+int cpp_main( int argc, char * argv[] ) // note name!
+{
+  fs::path comment_path;
+  while ( argc > 1 && *argv[1] == '-' )
+  {
+    if ( argc > 2 && std::strcmp( argv[1], "--compiler" ) == 0 )
+      { specific_compiler = argv[2]; --argc; ++argv; }
+    else if ( argc > 2 && std::strcmp( argv[1], "--locate-root" ) == 0 )
+      { locate_root = fs::path( argv[2], fs::native ); --argc; ++argv; }
+    else if ( argc > 2 && std::strcmp( argv[1], "--comment" ) == 0 )
+      { comment_path = fs::path( argv[2], fs::native ); --argc; ++argv; }
+    else if ( argc > 2 && std::strcmp( argv[1], "--notes" ) == 0 )
+      { notes_path = fs::path( argv[2], fs::native ); --argc; ++argv; }
+    else if ( argc > 2 && std::strcmp( argv[1], "--notes-map" ) == 0 )
+      { notes_map_path = fs::path( argv[2], fs::native ); --argc; ++argv; }
+    else if ( std::strcmp( argv[1], "--ignore-pass" ) == 0 ) ignore_pass = true;
+    else if ( std::strcmp( argv[1], "--no-warn" ) == 0 ) no_warn = true;
+    else if ( std::strcmp( argv[1], "--v1" ) == 0 ) boost_build_v2 = false;
+    else if ( std::strcmp( argv[1], "--v2" ) == 0 ) boost_build_v2 = true;
+    else if ( argc > 2 && std::strcmp( argv[1], "--jamfile" ) == 0)
+      { jamfile_path = fs::path( argv[2], fs::native ); --argc; ++argv; }
+    else if ( std::strcmp( argv[1], "--compile-time" ) == 0 ) compile_time = true;
+    else if ( std::strcmp( argv[1], "--run-time" ) == 0 ) run_time = true;
+    else { std::cerr << "Unknown option: " << argv[1] << "\n"; argc = 1; }
+    --argc;
+    ++argv;
+  }
+
+  if ( argc != 3 && argc != 4 )
+  {
+    std::cerr <<
+      "Usage: compiler_status [options...] boost-root status-file [links-file]\n"
+      "  boost-root is the path to the boost tree root directory.\n"
+      "  status-file and links-file are paths to the output files.\n"
+      "Must be run from directory containing Jamfile\n"
+      "  options: --compiler name     Run for named compiler only\n"
+      "           --ignore-pass       Do not report tests which pass all compilers\n"
+      "           --no-warn           Warnings not reported if test passes\n"
+      "           --locate-root path  Path to ALL_LOCATE_TARGET for bjam;\n"
+      "                               default boost-root.\n"
+      "           --comment path      Path to file containing HTML\n"
+      "                               to be copied into status-file.\n"
+      "           --notes path        Path to file containing HTML\n"
+      "                               to be copied into status-file.\n"
+      "           --notes-map path    Path to file of toolset/test,n lines, where\n"
+      "                               n is number of note bookmark in --notes file.\n"
+      "           --jamfile path      Path to Jamfile. By default \"Jamfile\".\n"
+      "           --v1                Assume Boost.Build version 1.\n"
+      "           --v2                Assume Boost.Build version 2. (default)\n"
+      "           --ignore-pass       Ignore passing tests.\n"
+      "           --no-warn           Do not report warnings.\n"
+      "           --compile-time      Show compile time.\n"
+      "           --run-time          Show run time.\n"
+      "Example: compiler_status --compiler gcc /boost-root cs.html cs-links.html\n"
+      "Note: Only the leaf of the links-file path and --notes file string are\n"
+      "used in status-file HTML links. Thus for browsing, status-file,\n"
+      "links-file, and --notes file must all be in the same directory.\n"
+      ;
+    return 1;
+  }
+
+  boost_root = fs::path( argv[1], fs::native );
+  if ( locate_root.empty() ) locate_root = boost_root;
+  
+  if (jamfile_path.empty())
+    if (boost_build_v2)
+      jamfile_path = "Jamfile.v2";
+    else
+      jamfile_path = "Jamfile";
+  jamfile_path = fs::complete( jamfile_path, fs::initial_path() );
+  jamfile.open( jamfile_path );
+  if ( !jamfile )
+  {
+    std::cerr << "Could not open Jamfile: " << jamfile_path.native_file_string() << std::endl;
+    return 1;
+  }
+
+  report.open( fs::path( argv[2], fs::native ) );
+  if ( !report )
+  {
+    std::cerr << "Could not open report output file: " << argv[2] << std::endl;
+    return 1;
+  }
+
+  if ( argc == 4 )
+  {
+    fs::path links_path( argv[3], fs::native );
+    links_name = links_path.leaf();
+    links_file.open( links_path );
+    if ( !links_file )
+    {
+      std::cerr << "Could not open links output file: " << argv[3] << std::endl;
+      return 1;
+    }
+  }
+  else no_links = true;
+
+  build_notes_bookmarks();
+
+  char run_date[128];
+  std::time_t tod;
+  std::time( &tod );
+  std::strftime( run_date, sizeof(run_date),
+    "%X UTC, %A %d %B %Y", std::gmtime( &tod ) );
+
+  std::string rev = revision( boost_root );
+
+  report << "<html>\n"
+          "<head>\n"
+          "<title>Boost Test Results</title>\n"
+          "</head>\n"
+          "<body bgcolor=\"#ffffff\" text=\"#000000\">\n"
+          "<table border=\"0\">\n"
+          "<tr>\n"
+          "<td><img border=\"0\" src=\"http://www.boost.org/boost.png\" width=\"277\" "
+          "height=\"86\"></td>\n"
+          "<td>\n"
+          "<h1>Boost Test Results - " + platform_desc() + "</h1>\n"
+          "<b>Run</b> "
+       << run_date;
+  if ( !rev.empty() ) report << ", <b>Revision</b> " << rev;
+  report << "\n";
+
+  
+  if ( compile_time )
+    report << "<p>Times reported are elapsed wall clock time in seconds.</p>\n";
+
+
+  if ( !comment_path.empty() )
+  {
+    fs::ifstream comment_file( comment_path );
+    if ( !comment_file )
+    {
+      std::cerr << "Could not open \"--comment\" input file: " << comment_path.string() << std::endl;
+      return 1;
+    }
+    char c;
+    while ( comment_file.get( c ) ) { report.put( c ); }
+  }
+
+  report << "</td>\n</table>\n<br>\n";
+
+  if ( !no_links )
+  {
+    links_file
+      << "<html>\n"
+         "<head>\n"
+         "<title>Boost Test Details</title>\n"
+         "</head>\n"
+         "<body bgcolor=\"#ffffff\" text=\"#000000\">\n"
+         "<table border=\"0\">\n"
+         "<tr>\n"
+         "<td><img border=\"0\" src=\"http://www.boost.org/boost.png\" width=\"277\" "
+         "height=\"86\"></td>\n"
+         "<td>\n"
+         "<h1>Boost Test Details - " + platform_desc() + "</h1>\n"
+         "<b>Run Date:</b> "
+      << run_date;
+    if ( !rev.empty() ) links_file << ", <b>Revision</b> " << rev;
+    links_file << "\n</td>\n</table>\n<br>\n";
+  }
+
+  do_table();
+
+  if ( load_notes_html() ) report << notes_html << "\n";
+
+  report << "</body>\n"
+          "</html>\n"
+          ;
+
+  if ( !no_links )
+  {
+    links_file
+      << "</body>\n"
+         "</html>\n"
+         ;
+  }
+  return 0;
+}

+ 167 - 0
regression/detail/tiny_xml.cpp

@@ -0,0 +1,167 @@
+//  tiny XML sub-set tools implementation  -----------------------------------//
+
+//  (C) Copyright Beman Dawes 2002.  Distributed under the Boost
+//  Software License, Version 1.0. (See accompanying file
+//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+
+#include "tiny_xml.hpp"
+#include <cassert>
+#include <cstring>
+
+namespace
+{
+
+  void eat_whitespace( char & c, std::istream & in )
+  {
+    while ( c == ' ' || c == '\r' || c == '\n' || c == '\t' )
+      in.get( c );
+  }
+
+  std::string get_name( char & c, std::istream & in )
+  {
+    std::string result;
+    eat_whitespace( c, in );
+    while ( std::strchr(
+      "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-.", c )
+      != 0 )
+    {
+      result += c;
+      if(!in.get( c ))
+        throw std::string("xml: unexpected eof");
+    }
+    return result;
+  }
+
+  void eat_delim( char & c, std::istream & in,
+                  char delim, const std::string & msg )
+  {
+    eat_whitespace( c, in );
+    if ( c != delim )
+      throw std::string("xml syntax error, expected ") + delim
+       + " (" + msg + ")";
+    in.get( c );
+  }
+
+  std::string get_value( char & c, std::istream & in )
+  {
+    std::string result;
+    while ( c != '\"' )
+    {
+      result += c;
+      in.get( c );
+    }
+    in.get( c );
+    return result;
+  }
+
+}
+
+namespace boost
+{
+  namespace tiny_xml
+  {
+
+  //  parse  -----------------------------------------------------------------//
+
+    element_ptr parse( std::istream & in, const std::string & msg )
+    {
+      char c = 0;  // current character
+      element_ptr e( new element );
+
+      if(!in.get( c ))
+        throw std::string("xml: unexpected eof");
+      if ( c == '<' )
+        if(!in.get( c ))
+          throw std::string("xml: unexpected eof");
+
+      e->name = get_name( c, in );
+      eat_whitespace( c, in );
+
+      // attributes
+      while ( c != '>' )
+      {
+        attribute a;
+        a.name = get_name( c, in );
+
+        eat_delim( c, in, '=', msg );
+        eat_delim( c, in, '\"', msg );
+
+        a.value = get_value( c, in );
+
+        e->attributes.push_back( a );
+        eat_whitespace( c, in );
+      }
+      if(!in.get( c )) // next after '>'
+        throw std::string("xml: unexpected eof");
+
+      eat_whitespace( c, in );
+
+      // sub-elements
+      while ( c == '<' )
+      {
+        if ( in.peek() == '/' ) break;
+        e->elements.push_back( parse( in, msg ) );
+        in.get( c ); // next after '>'
+        eat_whitespace( c, in );
+      }
+
+      // content
+      if ( c != '<' )
+      {
+        e->content += '\n';
+        while ( c != '<' )
+        {
+          e->content += c;
+          if(!in.get( c ))
+            throw std::string("xml: unexpected eof");
+        }
+      }
+
+      assert( c == '<' );
+      if(!in.get( c )) // next after '<'
+        throw std::string("xml: unexpected eof");
+
+      eat_delim( c, in, '/', msg );
+      std::string end_name( get_name( c, in ) );
+      if ( e->name != end_name )
+        throw std::string("xml syntax error: beginning name ")
+          + e->name + " did not match end name " + end_name
+          + " (" + msg + ")";
+
+      eat_delim( c, in, '>', msg );
+      return e;
+    }
+
+    //  write  ---------------------------------------------------------------//
+
+    void write( const element & e, std::ostream & out )
+    {
+      out << "<" << e.name;
+      if ( !e.attributes.empty() )
+      {
+        for( attribute_list::const_iterator itr = e.attributes.begin();
+             itr != e.attributes.end(); ++itr )
+        {
+          out << " " << itr->name << "=\"" << itr->value << "\"";
+        }
+      }
+      out << ">";
+      if ( !e.elements.empty() )
+      {
+        out << "\n";
+        for( element_list::const_iterator itr = e.elements.begin();
+             itr != e.elements.end(); ++itr )
+        {
+          write( **itr, out );
+        }
+      }
+      if ( !e.content.empty() )
+      {
+        out << e.content;
+      }
+      out << "</" << e.name << ">\n";
+    }
+
+  } // namespace tiny_xml
+} // namespace boost
+

+ 70 - 0
regression/detail/tiny_xml.hpp

@@ -0,0 +1,70 @@
+//  tiny XML sub-set tools  --------------------------------------------------//
+
+//  (C) Copyright Beman Dawes 2002.  Distributed under the Boost
+//  Software License, Version 1.0. (See accompanying file
+//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+
+//  Provides self-contained tools for this XML sub-set:
+//
+//    element ::= { "<" name { name "=" "\"" value "\"" } ">"
+//                  {element} [contents] "</" name ">" }
+//
+//  The point of "self-contained" is to minimize tool-chain dependencies.
+
+#ifndef BOOST_TINY_XML_H
+#define BOOST_TINY_XML_H
+
+#include "boost/smart_ptr.hpp" // for shared_ptr
+#include "boost/utility.hpp"   // for noncopyable
+#include <list>
+#include <iostream>
+#include <string>
+
+namespace boost
+{
+  namespace tiny_xml
+  {
+    class element;
+    struct attribute
+    {
+      std::string name;
+      std::string value;
+
+      attribute(){}
+      attribute( const std::string & name, const std::string & value )
+        : name(name), value(value) {}
+    };
+    typedef boost::shared_ptr< element >  element_ptr;
+    typedef std::list< element_ptr  >     element_list;
+    typedef std::list< attribute >        attribute_list;
+
+    class element
+      : private boost::noncopyable  // because deep copy sematics would be required
+    {
+     public:
+      std::string     name;
+      attribute_list  attributes;
+      element_list    elements;
+      std::string     content;
+
+      element() {}
+      explicit element( const std::string & name ) : name(name) {}
+    };
+
+    element_ptr parse( std::istream & in, const std::string & msg );
+    // Precondition: stream positioned at either the initial "<"
+    // or the first character after the initial "<".
+    // Postcondition: stream positioned at the first character after final
+    //  ">" (or eof).
+    // Returns: an element_ptr to an element representing the parsed stream.
+    // Throws: std::string on syntax error. msg appended to what() string.
+
+    void write( const element & e, std::ostream & out );
+
+  }
+}
+
+#endif  // BOOST_TINY_XML_H
+
+
+

+ 17 - 0
regression/detail/tiny_xml_test.cpp

@@ -0,0 +1,17 @@
+//  tiny XML test program  ---------------------------------------------------//
+
+//  Copyright Beman Dawes 2002.  Distributed under the Boost
+//  Software License, Version 1.0. (See accompanying file
+//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+
+#include "tiny_xml.hpp"
+
+#include <iostream>
+
+int main()
+{
+  boost::tiny_xml::element_ptr tree( boost::tiny_xml::parse( std::cin ) );
+  boost::tiny_xml::write( *tree, std::cout );
+  return 0;
+}
+

+ 12 - 0
regression/detail/tiny_xml_test.txt

@@ -0,0 +1,12 @@
+<root>
+<element-1 at-1="abcd" at-2 = "defg" >
+<element-1a>
+It's Howdy Doody time!
+</element-1a>
+<element-1b>It's not Howdy Doody time!</element-1b>
+</element-1>
+<element-2>
+It's
+Eastern Standard time!
+</element-2>
+</root>

+ 48 - 0
regression/index.htm

@@ -0,0 +1,48 @@
+<html>
+
+<head>
+<meta http-equiv="Content-Language" content="en-us">
+<meta http-equiv="Content-Type"
+content="text/html; charset=iso-8859-1">
+<meta name="ProgId" content="FrontPage.Editor.Document">
+<meta name="GENERATOR" content="Microsoft FrontPage Express 2.0">
+<title>Regression Test Reporting Tools</title>
+</head>
+
+<body bgcolor="#FFFFFF">
+
+<h1><img src="../../boost.png" alt="boost.png (6897 bytes)"
+align="center" width="277" height="86"> Regression Test Reporting
+Tools</h1>
+
+<p>Boost regression testing uses <a href="../build/index.html">Boost.Build</a>
+to run the actual builds and tests. A separate set of tools is
+used to generate the actual status reports.</p>
+
+<ul>
+    <li><a
+          href="xsl_reports/runner/instructions.html">Instructions</a>
+        for running regression tests and reporting results back to
+        Boost.</li> 
+    <li><a href="process_jam_log.cpp">process_jam_log.cpp</a> -
+        Processes the bjam outputs, creating a file named
+        test_log.xml for each test encountered.</li>
+    <li><a href="compiler_status.cpp">compiler_status.cpp</a> -
+        Generates HTML status tables from test_log.xml and other
+        files.</li>
+    <li><a href="build/Jamfile.v2">Jamfile.v2</a> - Builds
+        process_jam_log and compiler_status executables.</li>
+    <li><a href="library_status.html">Library Status</a> - Runs test
+        programs for one or all boost libraries on
+        your local installation and generates complete tables
+        to show which combinations of libraries, compilers,
+        compiler settings pass and fail at your local installation.</li>
+</ul>
+
+<hr>
+
+<p>Revised <!--webbot bot="Timestamp" startspan s-type="EDITED"
+s-format="%d %B, %Y" -->09 January, 2003<!--webbot bot="Timestamp"
+i-checksum="38582" endspan --></p>
+</body>
+</html>

+ 45 - 0
regression/index.shtml

@@ -0,0 +1,45 @@
+<!--
+Copyright Rene Rivera 2003-2004.
+
+Distributed under the Boost Software License, Version 1.0.
+(See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
+
+This is the page wrapper for the summary table of regression tests.
+This calls the script "regression-logs.pl" to generate the summary
+table. This file should be place at:
+    http://boost.sourceforge.net/regression-logs/index.shtml
+Which is at this location in the shell SourceForge services:
+    /home/groups/b/bo/boost/htdocs/regression-logs/index.shtml
+
+The regression-logs.pl script handles individual placement
+of the calling shtml file, so this file can be placed anywhere
+one wants a regression summary page, and has regression files
+in it.
+-->
+<html>
+<head>
+<title>Boost Regression Tests</title>
+<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
+</head>
+
+<body bgcolor="#FFFFFF" text="#000000">
+<table border="0">
+<tr>
+<td><img border="0" src="../../boost.png" width="277" height="86"></td>
+<td><h1>Boost Regression Tests</h1></td>
+</tr>
+</table>
+
+<h3>Boost-wide reports</h3>
+<ul type="square">
+<li><a href="http://www.boost.org/regression-logs/developer">Developer report</a></li>
+<li><a href="http://www.boost.org/regression-logs/user">User report</a></li>
+<li><a href="http://www.boost.org/regression-logs/inspection_report.html">Inspection report</a></li>
+<li><a href="http://www.boost.org/regression-logs/license_report.html">License report</a></li>
+</ul>
+
+<h3>Reports by platform</h3>
+<!--#exec cmd="/usr/bin/perl /home/groups/b/bo/boost/cgi-bin/regression-logs.pl" -->
+
+</body>
+</html>

+ 983 - 0
regression/library_status.cpp

@@ -0,0 +1,983 @@
+//  Generate Compiler Status HTML from jam regression test output  -----------//
+
+//  Copyright Beman Dawes 2002.  Distributed under the Boost
+//  Software License, Version 1.0. (See accompanying file
+//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+
+//  See http://www.boost.org/tools/regression/ for documentation.
+
+//Note: This version of the original program builds a large table
+//which includes all build variations such as build/release, static/dynamic, etc.
+
+
+/*******************************************************************************
+
+This program was designed to work unchanged on all platforms and
+configurations.  All output which is platform or configuration dependent
+is obtained from external sources such as the .xml file from
+process_jam_log execution, the tools/build/xxx-tools.jam files, or the
+output of the config_info tests.
+
+Please avoid adding platform or configuration dependencies during
+program maintenance.
+
+*******************************************************************************/
+
+#include "boost/filesystem/operations.hpp"
+#include "boost/filesystem/fstream.hpp"
+
+namespace fs = boost::filesystem;
+
+#include "detail/tiny_xml.hpp"
+namespace xml = boost::tiny_xml;
+
+#include "boost/iterator/transform_iterator.hpp"
+
+#include <cstdlib>  // for abort, exit
+#include <string>
+#include <vector>
+#include <set>
+#include <utility>  // for make_pair on STLPort
+#include <map>
+#include <algorithm>
+#include <iostream>
+#include <fstream>
+#include <ctime>
+#include <stdexcept>
+#include <cassert>
+#include <utility>
+
+using std::string;
+
+const string pass_msg( "Pass" );
+const string warn_msg( "<i>Warn</i>" );
+const string fail_msg( "<font color=\"#FF0000\"><i>Fail</i></font>" );
+const string note_msg( "<sup>*</sup>" );
+const string missing_residue_msg( "<i>Missing</i>" );
+
+const std::size_t max_compile_msg_size = 10000;
+
+namespace
+{
+    fs::path boost_root;  // boost-root complete path
+    fs::path locate_root; // locate-root (AKA ALL_LOCATE_TARGET) complete path
+    bool ignore_pass = false;
+    bool no_warn = false;
+    bool no_links = false;
+
+    fs::directory_iterator end_itr;
+
+    // transform pathname to something html can accept
+    struct char_xlate {
+        typedef char result_type;
+        result_type operator()(char c) const{
+            if(c == '/')
+                return '-';
+            return c;
+        }
+    };
+    typedef boost::transform_iterator<char_xlate, std::string::const_iterator> html_from_path; 
+
+    template<class I1, class I2>
+    std::ostream & operator<<(
+    std::ostream &os, 
+    std::pair<I1, I2> p
+    ){
+        while(p.first != p.second)
+            os << *p.first++;
+        return os;
+    }
+
+    struct col_node {
+        int rows, cols;
+        bool has_leaf;
+        typedef std::map<std::string, col_node> subcolumns_t;
+        subcolumns_t m_subcolumns;
+        bool operator<(const col_node &cn) const;
+        col_node() :
+            has_leaf(false)
+        {}
+        std::pair<int, int> get_spans();
+    };
+
+    std::pair<int, int> col_node::get_spans(){
+        rows = 1;
+        cols = 0;
+        if(has_leaf){
+            cols = 1;
+        }
+        if(! m_subcolumns.empty()){
+            subcolumns_t::iterator itr;
+            for(itr = m_subcolumns.begin(); itr != m_subcolumns.end(); ++itr){
+                std::pair<int, int> spans;
+                spans = itr->second.get_spans();
+                rows = std::max(rows, spans.first);
+                cols += spans.second;
+            }
+            ++rows;
+        }
+        return std::make_pair(rows, cols);
+    }
+
+    void build_node_tree(const fs::path & dir_root, col_node & node){
+        fs::path xml_file_path( dir_root / "test_log.xml" );
+        if (fs::exists( xml_file_path ) )
+        {
+            node.has_leaf = true;
+        }
+        fs::directory_iterator itr(dir_root);
+        while(itr != end_itr){
+            if(fs::is_directory(*itr)){
+                std::pair<col_node::subcolumns_t::iterator, bool> result 
+                    = node.m_subcolumns.insert(
+                        std::make_pair(itr->leaf(), col_node())
+                    );
+                build_node_tree(*itr, result.first->second);
+            }
+            ++itr;
+        }
+    }
+
+    fs::ofstream report;
+    fs::ofstream links_file;
+    string links_name;
+
+    fs::path notes_path;
+    string notes_html;
+
+    fs::path notes_map_path;
+    typedef std::multimap< string, string > notes_map; // key is test_name-toolset,
+    // value is note bookmark
+    notes_map notes;
+
+    string specific_compiler; // if running on one toolset only
+
+    const string empty_string;
+
+    //  build notes_bookmarks from notes HTML  -----------------------------------//
+
+    void build_notes_bookmarks()
+    {
+        if ( notes_map_path.empty() ) return;
+        fs::ifstream notes_map_file( notes_map_path );
+        if ( !notes_map_file )
+        {
+            std::cerr << "Could not open --notes-map input file: " << notes_map_path.string() << std::endl;
+            std::exit( 1 );
+        }
+        string line;
+        while( std::getline( notes_map_file, line ) )
+        {
+            string::size_type pos = 0;
+            if ( (pos = line.find( ',', pos )) == string::npos ) continue;
+            string key(line.substr( 0, pos ) );
+            string bookmark( line.substr( pos+1 ) );
+
+            //      std::cout << "inserting \"" << key << "\",\"" << bookmark << "\"\n";
+            notes.insert( notes_map::value_type( key, bookmark ) );
+        }
+    }
+
+    //  load_notes_html  ---------------------------------------------------------//
+
+    bool load_notes_html()
+    {
+        if ( notes_path.empty() ) return false;
+        fs::ifstream notes_file( notes_path );
+        if ( !notes_file )
+        {
+            std::cerr << "Could not open --notes input file: " << notes_path.string() << std::endl;
+            std::exit( 1 );
+        }
+        string line;
+        bool in_body( false );
+        while( std::getline( notes_file, line ) )
+        {
+            if ( in_body && line.find( "</body>" ) != string::npos ) in_body = false;
+            if ( in_body ) notes_html += line;
+            else if ( line.find( "<body>" ) ) in_body = true;
+        }
+        return true;
+    }
+
+    //  extract object library name from target directory string  ----------------//
+
+    string extract_object_library_name( const string & s )
+    {
+        string t( s );
+        string::size_type pos = t.find( "/build/" );
+        if ( pos != string::npos ) pos += 7;
+        else if ( (pos = t.find( "/test/" )) != string::npos ) pos += 6;
+        else return "";
+        return t.substr( pos, t.find( "/", pos ) - pos );
+    }
+
+    //  element_content  ---------------------------------------------------------//
+
+    const string & element_content(
+        const xml::element & root, const string & name )
+    {
+        const static string empty_string;
+        xml::element_list::const_iterator itr;
+        for ( itr = root.elements.begin();
+            itr != root.elements.end() && (*itr)->name != name;
+            ++itr ) {}
+            return itr != root.elements.end() ? (*itr)->content : empty_string;
+    }
+
+    //  find_element  ------------------------------------------------------------//
+
+    const xml::element & find_element(
+        const xml::element & root, const string & name )
+    {
+        static const xml::element empty_element;
+        xml::element_list::const_iterator itr;
+        for ( itr = root.elements.begin();
+            itr != root.elements.end() && (*itr)->name != name;
+            ++itr ) {}
+            return itr != root.elements.end() ? *((*itr).get()) : empty_element;
+    }
+
+    //  attribute_value  ----------------------------------------------------------//
+
+    const string & attribute_value( 
+        const xml::element & element,
+        const string & attribute_name 
+    ){
+        xml::attribute_list::const_iterator atr;
+        for(
+            atr = element.attributes.begin();
+            atr != element.attributes.end();
+            ++atr
+        ){
+            if(atr->name == attribute_name)
+                return atr->value;
+        }
+        static const string empty_string;
+        return empty_string;
+    }
+
+    //  generate_report  ---------------------------------------------------------//
+
+    // return 0 if nothing generated, 1 otherwise, except 2 if compiler msgs
+    int generate_report( 
+        const xml::element & db,
+        const std::string source_library_name,
+        const string & test_type,
+        const fs::path & target_dir,
+        bool pass,
+        bool always_show_run_output 
+        )
+    {
+        // compile msgs sometimes modified, so make a local copy
+        string compile( ((pass && no_warn)
+            ? empty_string :  element_content( db, "compile" )) );
+
+        const string & link( pass ? empty_string : element_content( db, "link" ) );
+        const string & run( (pass && !always_show_run_output)
+            ? empty_string : element_content( db, "run" ) );
+        string lib( (pass ? empty_string : element_content( db, "lib" )) );
+
+        // some compilers output the filename even if there are no errors or
+        // warnings; detect this if one line of output and it contains no space.
+        string::size_type pos = compile.find( '\n', 1 );
+        if ( pos != string::npos && compile.size()-pos <= 2
+            && compile.find( ' ' ) == string::npos ) compile.clear();
+
+        if ( lib.empty() 
+            && (
+                compile.empty() || test_type == "compile_fail"
+            ) 
+            && link.empty() 
+            && run.empty()
+        ) 
+            return 0; 
+
+        int result = 1; // some kind of msg for sure
+
+        // limit compile message length
+        if ( compile.size() > max_compile_msg_size )
+        {
+            compile.erase( max_compile_msg_size );
+            compile += "...\n   (remainder deleted because of excessive size)\n";
+        }
+
+        links_file << "<h2><a name=\"";
+        links_file << std::make_pair(
+            html_from_path(target_dir.string().begin()), 
+            html_from_path(target_dir.string().end())
+            )
+            << "\">"
+            << std::make_pair(
+            html_from_path(target_dir.string().begin()), 
+            html_from_path(target_dir.string().end())
+            )
+            ;
+        links_file << "</a></h2>\n";;
+
+        if ( !compile.empty() )
+        {
+            ++result;
+            links_file << "<h3>Compiler output:</h3><pre>"
+                << compile << "</pre>\n";
+        }
+        if ( !link.empty() )
+            links_file << "<h3>Linker output:</h3><pre>" << link << "</pre>\n";
+        if ( !run.empty() )
+            links_file << "<h3>Run output:</h3><pre>" << run << "</pre>\n";
+
+        // for an object library failure, generate a reference to the object
+        // library failure message, and (once only) generate the object
+        // library failure message itself
+        static std::set< string > failed_lib_target_dirs; // only generate once
+        if ( !lib.empty() )
+        {
+            if ( lib[0] == '\n' ) lib.erase( 0, 1 );
+            string object_library_name( extract_object_library_name( lib ) );
+
+            // changing the target directory naming scheme breaks
+            // extract_object_library_name()
+            assert( !object_library_name.empty() );
+            if ( object_library_name.empty() )
+                std::cerr << "Failed to extract object library name from " << lib << "\n";
+
+            links_file << "<h3>Library build failure: </h3>\n"
+                "See <a href=\"#"
+                << source_library_name << "-"
+                << object_library_name << "-" 
+                << std::make_pair(
+                html_from_path(target_dir.string().begin()), 
+                html_from_path(target_dir.string().end())
+                )
+                << source_library_name << " - "
+                << object_library_name << " - " 
+                << std::make_pair(
+                html_from_path(target_dir.string().begin()), 
+                html_from_path(target_dir.string().end())
+                )
+                << "</a>";
+            if ( failed_lib_target_dirs.find( lib ) == failed_lib_target_dirs.end() )
+            {
+                failed_lib_target_dirs.insert( lib );
+                fs::path pth( locate_root / lib / "test_log.xml" );
+                fs::ifstream file( pth );
+                if ( file )
+                {
+                    xml::element_ptr db = xml::parse( file, pth.string() );
+                    generate_report( 
+                        *db, 
+                        source_library_name, 
+                        test_type,
+                        target_dir,
+                        false,
+                        false
+                    );
+                }
+                else
+                {
+                    links_file << "<h2><a name=\""
+                        << object_library_name << "-" 
+                        << std::make_pair(
+                        html_from_path(target_dir.string().begin()), 
+                        html_from_path(target_dir.string().end())
+                        )
+                        << "\">"
+                        << object_library_name << " - " 
+                        << std::make_pair(
+                        html_from_path(target_dir.string().begin()), 
+                        html_from_path(target_dir.string().end())
+                        )
+                        << "</a></h2>\n"
+                        << "test_log.xml not found\n";
+                }
+            }
+        }
+        return result;
+    }
+
+    //  add_notes --------------------------------------------------------------//
+
+    void add_notes( const string & key, bool fail, string & sep, string & target )
+    {
+        notes_map::const_iterator itr = notes.lower_bound( key );
+        if ( itr != notes.end() && itr->first == key )
+        {
+            for ( ; itr != notes.end() && itr->first == key; ++itr )
+            {
+                string note_desc( itr->second[0] == '-'
+                    ? itr->second.substr( 1 ) : itr->second );
+                if ( fail || itr->second[0] == '-' )
+                {
+                    target += sep;
+                    sep = ",";
+                    target += "<a href=\"";
+                    target += "#";
+                    target += note_desc;
+                    target += "\">";
+                    target += note_desc;
+                    target += "</a>";
+                }
+            }
+        }
+    }
+
+    //  do_cell  ---------------------------------------------------------------//
+    bool do_cell(
+        const fs::path & target_dir,
+        const string & lib_name,
+        string & target,
+        bool profile
+    ){
+        // return true if any results except pass_msg
+        bool pass = false;
+
+        fs::path xml_file_path( target_dir / "test_log.xml" );
+        if ( !fs::exists( xml_file_path ) )
+        {
+            // suppress message because there are too many of them.
+            // "missing" is a legitmate result as its not a requirement
+            // that every test be run in every figuration.
+            //std::cerr << "Missing jam_log.xml in target:\n "
+            //    << target_dir.string() << "\n";
+            target += "<td align=\"right\">" + missing_residue_msg + "</td>";
+            return true;
+        }
+
+        int anything_generated = 0;
+        bool note = false;
+
+        fs::ifstream file( xml_file_path );
+        if ( !file ) // could not open jam_log.xml
+        {
+            std::cerr << "Can't open jam_log.xml in target:\n "
+                << target_dir.string() << "\n";
+            target += "<td>" + missing_residue_msg + "</td>";
+            return false;
+        }
+
+        string test_type( "unknown" );
+        bool always_show_run_output( false );
+
+        xml::element_ptr dbp = xml::parse( file, xml_file_path.string() );
+        const xml::element & db( *dbp );
+        test_type = attribute_value( db, "test-type" );
+        always_show_run_output
+            = attribute_value( db, "show-run-output" ) == "true";
+
+        std::string test_type_base( test_type );
+        if ( test_type_base.size() > 5 )
+        {
+            const string::size_type trailer = test_type_base.size() - 5;
+            if ( test_type_base.substr( trailer ) == "_fail" )
+            {
+                test_type_base.erase( trailer );
+            }
+        }
+        if ( test_type_base.size() > 4 )
+        {
+            const string::size_type trailer = test_type_base.size() - 4;
+            if ( test_type_base.substr( trailer ) == "_pyd" )
+            {
+                test_type_base.erase( trailer );
+            }
+        }
+        const xml::element & test_type_element( find_element( db, test_type_base ) );
+
+        pass = !test_type_element.name.empty()
+            && attribute_value( test_type_element, "result" ) != "fail";
+
+        if (!no_links){
+            if(!test_type_element.name.empty())
+                note = attribute_value( test_type_element, "result" ) == "note";
+            anything_generated = 
+                generate_report(
+                db, 
+                lib_name, 
+                test_type,
+                target_dir,
+                pass,
+                always_show_run_output || note 
+            );
+        }
+
+        // generate the status table cell pass/warn/fail HTML
+        target += "<td align=\"right\">";
+        if ( anything_generated != 0 )
+        {
+            target += "<a href=\"";
+            target += links_name;
+            target += "#";
+            std::copy(
+                html_from_path(target_dir.string().begin()), 
+                html_from_path(target_dir.string().end()),
+                std::back_inserter(target)
+                );
+            target += "\">";
+            target += pass
+                ? (anything_generated < 2 ? pass_msg : warn_msg)
+                : fail_msg;
+            target += "</a>";
+            if ( pass && note ) target += note_msg;
+        }
+        else  target += pass ? pass_msg : fail_msg;
+
+        // if profiling
+        if(profile && pass){
+            // add link to profile
+            target += " <a href=\"";
+            target += (target_dir / "profile.txt").string();
+            target += "\"><i>Profile</i></a>";
+        }
+        
+        // if notes, generate the superscript HTML
+//        if ( !notes.empty() ) 
+//            target += get_notes( toolset, lib_name, test_name, !pass );
+
+        target += "</td>";
+        return (anything_generated != 0) || !pass;
+    }
+
+    bool visit_node_tree(
+        const col_node & node,
+        fs::path dir_root,
+        const string & lib_name,
+        string & target,
+        bool profile
+    ){
+        bool retval = false;
+        if(node.has_leaf){
+            retval = do_cell(
+                dir_root,
+                lib_name,
+                target,
+                profile
+            );
+        }
+
+        col_node::subcolumns_t::const_iterator col_itr;
+        for(
+            col_itr = node.m_subcolumns.begin(); 
+            col_itr != node.m_subcolumns.end();
+            ++col_itr
+        ){
+            fs::path subdir = dir_root / col_itr->first;
+            retval |= visit_node_tree(
+                col_itr->second, 
+                subdir,
+                lib_name,
+                target,
+                col_itr->first == "profile"
+            );
+        }
+        return retval;
+    }
+
+    // emit results for each test
+    void do_row(
+        col_node test_node,
+        const fs::path & test_dir,
+        const string & lib_name,
+        const string & test_name,
+        string & target 
+    ){
+        string::size_type row_start_pos = target.size();
+
+        target += "<tr>";
+
+        target += "<td>";
+        //target += "<a href=\"" + url_prefix_dir_view + "/libs/" + lib_name + "\">";
+        target += test_name;
+        target += "</a>";
+        target += "</td>";
+
+//        target += "<td>" + test_type + "</td>";
+
+        bool no_warn_save = no_warn;
+
+//        if ( test_type.find( "fail" ) != string::npos ) no_warn = true;
+
+        // emit cells on this row
+        bool anything_to_report = visit_node_tree(
+            test_node, 
+            test_dir,
+            lib_name,
+            target,
+            false
+        );
+
+        target += "</tr>";
+
+        if ( ignore_pass 
+        && ! anything_to_report ) 
+            target.erase( row_start_pos );
+
+        no_warn = no_warn_save;
+    }
+
+    //  do_table_body  -----------------------------------------------------------//
+
+    void do_table_body(
+        col_node root_node, 
+        const string & lib_name,
+        const fs::path & test_lib_dir 
+    ){
+        // rows are held in a vector so they can be sorted, if desired.
+        std::vector<string> results;
+
+        for ( fs::directory_iterator itr( test_lib_dir ); itr != end_itr; ++itr )
+        {
+            if(! fs::is_directory(*itr))
+                continue;
+            
+            string test_name = itr->leaf();
+            // strip off the ".test" is there is one
+            string::size_type s = test_name.find( ".test" );
+            if(string::npos != s)
+                test_name.resize(s);
+
+            results.push_back( std::string() ); 
+            do_row(
+                root_node, //*test_node_itr++,
+                *itr, // test dir
+                lib_name,
+                test_name,
+                results[results.size()-1] 
+            );
+        }
+
+        std::sort( results.begin(), results.end() );
+
+        for ( 
+            std::vector<string>::iterator v(results.begin());
+            v != results.end(); 
+            ++v 
+        ){ 
+            report << *v << "\n"; 
+        }
+    }
+
+    //  column header-----------------------------------------------------------//
+    int header_depth(const col_node & root){
+        col_node::subcolumns_t::const_iterator itr;
+        int max_depth = 1;
+        for(itr = root.m_subcolumns.begin(); itr != root.m_subcolumns.end(); ++itr){
+            max_depth = std::max(max_depth, itr->second.rows);
+        }
+        return max_depth;
+    }
+
+    void header_cell(int rows, int cols, const std::string & name){
+        // add row cells
+        report << "<td align=\"center\" " ;
+        if(1 < cols)
+            report << "colspan=\"" << cols << "\" " ;
+        if(1 < rows)
+            // span rows to the end the header
+            report << "rowspan=\"" << rows << "\" " ;
+        report << ">" ;
+        report << name;
+        report << "</td>\n";
+    }
+
+    void emit_column_headers(
+        const col_node & node, 
+        int display_row, 
+        int current_row,
+        int row_count
+    ){
+        if(current_row < display_row){
+            if(! node.m_subcolumns.empty()){
+                col_node::subcolumns_t::const_iterator itr;
+                for(itr = node.m_subcolumns.begin(); itr != node.m_subcolumns.end(); ++itr){
+                    emit_column_headers(itr->second, display_row, current_row + 1, row_count);
+                }
+            }
+            return;
+        }
+        if(node.has_leaf && ! node.m_subcolumns.empty()){
+            header_cell(row_count - current_row, 1, std::string(""));
+        }
+
+        col_node::subcolumns_t::const_iterator itr;
+        for(itr = node.m_subcolumns.begin(); itr != node.m_subcolumns.end(); ++itr){
+            if(1 == itr->second.rows)
+                header_cell(row_count - current_row, itr->second.cols, itr->first);
+            else
+                header_cell(1, itr->second.cols, itr->first);
+        }
+    }
+
+    fs::path find_lib_test_dir(){
+        // walk up from the path were we started until we find
+        // bin or bin.v2
+
+        fs::path::const_iterator leaf_itr = fs::initial_path().end();
+        fs::path test_lib_dir = fs::initial_path();
+        for(;;){
+            if(fs::is_directory( test_lib_dir / "bin.v2")){
+                test_lib_dir /= "bin.v2";
+                break;
+            }
+            if(fs::is_directory( test_lib_dir / "bin")){
+                // v1 includes the word boost
+                test_lib_dir /= "bin";
+                test_lib_dir /= "boost";
+                break;
+            }
+            if(test_lib_dir.empty())
+                throw std::string("binary path not found");
+            if(*leaf_itr != "libs")
+                --leaf_itr;
+            test_lib_dir.remove_leaf();
+        }
+
+        if(leaf_itr == fs::initial_path().end())
+            throw std::string("must be run from within a library directory");
+
+        while(leaf_itr != fs::initial_path().end()){
+            test_lib_dir /= *leaf_itr++;    // append "libs"
+        }
+        return test_lib_dir;
+    }
+
+    // note : uncomment the #if/#endif and what this compile !!!
+    string find_lib_name(fs::path lib_test_dir){
+        unsigned int count;
+        fs::path::iterator e_itr = lib_test_dir.end();
+        for(count =  0;; ++count){
+            if(*--e_itr == "libs")
+                break;
+            if(lib_test_dir.empty())
+                throw std::string("must be run from within a library directory");
+        }
+        string library_name;
+        for(;;){
+            library_name.append(*++e_itr);
+            if(1 == --count)
+                break;
+            library_name.append("/");
+        }
+        return library_name;
+    }
+
+    fs::path find_boost_root(){
+        fs::path boost_root = fs::initial_path();
+        for(;;){
+            if(fs::is_directory( boost_root / "boost")){
+                break;
+            }
+            if(boost_root.empty())
+                throw std::string("boost root not found");
+            boost_root.remove_leaf();
+        }
+
+        return boost_root;
+    }
+
+    //  do_table  ----------------------------------------------------------------//
+    void do_table(const string & lib_name)
+    {
+        col_node root_node;
+
+        fs::path lib_test_dir = find_lib_test_dir();
+
+        for ( fs::directory_iterator itr(lib_test_dir); itr != end_itr; ++itr )
+        {
+            if(! fs::is_directory(*itr))
+                continue;
+            build_node_tree(*itr, root_node);
+        }
+
+        // visit directory nodes and record nodetree
+        report << "<table border=\"1\" cellspacing=\"0\" cellpadding=\"5\">\n";
+
+        // emit
+        root_node.get_spans();
+        int row_count = header_depth(root_node);
+        report << "<tr>\n";
+        report << "<td rowspan=\"" << row_count << "\">Test Name</td>\n";
+
+        // emit column headers
+        int row_index = 0;
+        for(;;){
+            emit_column_headers(root_node, row_index, 0, row_count);
+            report << "</tr>" ;
+            if(++row_index == row_count)
+                break;
+            report << "<tr>\n";
+        }
+
+        // now the rest of the table body
+        do_table_body(root_node, lib_name, lib_test_dir);
+
+        report << "</table>\n";
+   }
+}// unnamed namespace
+
+//  main  --------------------------------------------------------------------//
+
+#define BOOST_NO_CPP_MAIN_SUCCESS_MESSAGE
+#include <boost/test/included/prg_exec_monitor.hpp>
+
+int cpp_main( int argc, char * argv[] ) // note name!
+{
+    fs::path comment_path;
+    while ( argc > 1 && *argv[1] == '-' )
+    {
+        if ( argc > 2 && std::strcmp( argv[1], "--compiler" ) == 0 )
+        { specific_compiler = argv[2]; --argc; ++argv; }
+        else if ( argc > 2 && std::strcmp( argv[1], "--locate-root" ) == 0 )
+        { locate_root = fs::path( argv[2], fs::native ); --argc; ++argv; }
+        else if ( argc > 2 && std::strcmp( argv[1], "--boost-root" ) == 0 )
+        { boost_root = fs::path( argv[2], fs::native ); --argc; ++argv; }
+        else if ( argc > 2 && std::strcmp( argv[1], "--comment" ) == 0 )
+        { comment_path = fs::path( argv[2], fs::native ); --argc; ++argv; }
+        else if ( argc > 2 && std::strcmp( argv[1], "--notes" ) == 0 )
+        { notes_path = fs::path( argv[2], fs::native ); --argc; ++argv; }
+        else if ( argc > 2 && std::strcmp( argv[1], "--notes-map" ) == 0 )
+        { notes_map_path = fs::path( argv[2], fs::native ); --argc; ++argv; }
+        else if ( std::strcmp( argv[1], "--ignore-pass" ) == 0 ) ignore_pass = true;
+        else if ( std::strcmp( argv[1], "--no-warn" ) == 0 ) no_warn = true;
+        else if ( std::strcmp( argv[1], "--v2" ) == 0 )
+        {--argc; ++argv ;} // skip
+        else if ( argc > 2 && std::strcmp( argv[1], "--jamfile" ) == 0)
+        {--argc; ++argv;} // skip
+        else { std::cerr << "Unknown option: " << argv[1] << "\n"; argc = 1; }
+        --argc;
+        ++argv;
+    }
+
+    if ( argc != 2 && argc != 3 )
+    {
+        std::cerr <<
+            "Usage: library_status [options...] status-file [links-file]\n"
+            "  boost-root is the path to the boost tree root directory.\n"
+            "  status-file and links-file are paths to the output files.\n"
+            "  options: --compiler name     Run for named compiler only\n"
+            "           --ignore-pass       Do not report tests which pass all compilers\n"
+            "           --no-warn           Warnings not reported if test passes\n"
+		    "           --boost-root path default derived from current path.\n"
+		    "           --locate-root path  Path to ALL_LOCATE_TARGET for bjam;\n"
+		    "                               default boost-root.\n"
+            "           --comment path      Path to file containing HTML\n"
+            "                               to be copied into status-file.\n"
+            "           --notes path        Path to file containing HTML\n"
+            "                               to be copied into status-file.\n"
+            "           --notes-map path    Path to file of toolset/test,n lines, where\n"
+            "                               n is number of note bookmark in --notes file.\n"
+            "Example: compiler_status --compiler gcc /boost-root cs.html cs-links.html\n"
+            "Note: Only the leaf of the links-file path and --notes file string are\n"
+            "used in status-file HTML links. Thus for browsing, status-file,\n"
+            "links-file, and --notes file must all be in the same directory.\n"
+            ;
+        return 1;
+    }
+
+	if(boost_root.empty())
+		boost_root = find_boost_root();
+	if ( locate_root.empty() ) 
+		locate_root = boost_root;
+
+    report.open( fs::path( argv[1], fs::native ) );
+    if ( !report )
+    {
+        std::cerr << "Could not open report output file: " << argv[2] << std::endl;
+        return 1;
+    }
+
+    if ( argc == 3 )
+    {
+        fs::path links_path( argv[2], fs::native );
+        links_name = links_path.leaf();
+        links_file.open( links_path );
+        if ( !links_file )
+        {
+            std::cerr << "Could not open links output file: " << argv[3] << std::endl;
+            return 1;
+        }
+    }
+    else no_links = true;
+
+    build_notes_bookmarks();
+
+    const string library_name = find_lib_name(fs::initial_path());
+
+    char run_date[128];
+    std::time_t tod;
+    std::time( &tod );
+    std::strftime( run_date, sizeof(run_date),
+        "%X UTC, %A %d %B %Y", std::gmtime( &tod ) );
+
+    report 
+        << "<html>\n"
+        << "<head>\n"
+        << "<title>Boost Library Status Automatic Test</title>\n"
+        << "</head>\n"
+        << "<body bgcolor=\"#ffffff\" text=\"#000000\">\n"
+        << "<table border=\"0\">\n"
+        << "<tr>\n"
+        << "<td><img border=\"0\" " 
+        << "src=\""
+        << boost_root / "boost.png"
+        << "\" width=\"277\" "
+        << "height=\"86\"></td>\n"
+        << "<td>\n"
+        << "<h1>Library Status: " + library_name + "</h1>\n"
+        << "<b>Run Date:</b> "
+        << run_date
+        << "\n"
+        ;
+
+    if ( !comment_path.empty() )
+    {
+        fs::ifstream comment_file( comment_path );
+        if ( !comment_file )
+        {
+            std::cerr << "Could not open \"--comment\" input file: " << comment_path.string() << std::endl;
+            return 1;
+        }
+        char c;
+        while ( comment_file.get( c ) ) { report.put( c ); }
+    }
+
+    report << "</td>\n</table>\n<br>\n";
+
+    if ( !no_links )
+    {
+        links_file
+            << "<html>\n"
+            << "<head>\n"
+            << "<title>Boost Library Status Error Log</title>\n"
+            << "</head>\n"
+            << "<body bgcolor=\"#ffffff\" text=\"#000000\">\n"
+            << "<table border=\"0\">\n"
+            << "<tr>\n"
+            << "<td><img border=\"0\" src=\""
+            << boost_root / "boost.png"
+            << "\" width=\"277\" "
+            << "height=\"86\"></td>\n"
+            << "<td>\n"
+            << "<h1>Library Status: " + library_name + "</h1>\n"
+            << "<b>Run Date:</b> "
+            << run_date
+            << "\n</td>\n</table>\n<br>\n"
+            ;
+    }
+
+    do_table(library_name);
+
+    if ( load_notes_html() ) report << notes_html << "\n";
+
+    report << "</body>\n"
+        "</html>\n"
+        ;
+
+    if ( !no_links )
+    {
+        links_file
+            << "</body>\n"
+            "</html>\n"
+            ;
+    }
+    return 0;
+}

+ 166 - 0
regression/library_status.html

@@ -0,0 +1,166 @@
+<html>
+
+<head>
+<meta http-equiv="Content-Language" content="en-us">
+<meta http-equiv="Content-Type"
+content="text/html; charset=iso-8859-1">
+<title>Libary Status</title>
+</head>
+
+<body bgcolor="#FFFFFF">
+
+<table border="0">
+<tr>
+<td><img border="0" src="../../boost.png" width="277" height="86" alt="boost.png (6897 bytes)"></td>
+<td><h1>Generating Library Status Tables</h1></td>
+</tr>
+</table>
+
+<h3>Purpose</h3>
+Any time one considers using a library as large and complex
+as the Boost libraries, he must have a way of validating
+the the library functions in his environment.  This should
+be done when the library is installed and anytime questions
+are raised regarding its applicabililty and/or its usage.
+<p>
+The procedures described here permit a user to run any
+combination of tests on any or all libraries and generate
+a set of convenient tables which show which libraries
+pass which tests under what conditions.
+<h3>Preliminaries</h3>
+Generating these tables requires a couple of utility programs:
+<code>process_jam_log</code> and <code>library_status</code>. 
+These can be built by moving to the directory <code>tools/regression/build</code>
+and invoking bjam. If all goes well these utility programs
+will be found in the directory <code>dist/bin</code>.  From
+there they should be moved to a place in the current
+path.
+<p>
+<h3>Running Tests for One Library</h3>
+
+<ol>
+    <li>Start from your command line environment.
+    <li>set the current directory to:../libs/&lt;library name&gt;/test
+    <li>Invoke one of the following:
+    <ul>
+        <li><code>../../../tools/regression/library_test (*nix)</code>.
+        <li><code>..\..\..\tools\regression\library_test (windows)</code>.
+    </ul>
+    <li>This will display short help message describing the how to set
+    the command line arguments for the compilers and variants you want to
+    appear in the final table.
+    <li>Setting these arguments requires rudimentary knowledge of bjam
+    usage. Hopefully, if you've arrived at this page you've gained the
+    required knowledge during the installation and library build process.
+    <li>Rerun the abve command with the argument set accordingly.
+    <li>When the command terminates, there should be a file named 
+    "library_status.html" in the current directory.
+    <li>Display this file with any web browser.
+</ol>
+There should appear a table similar to the following for the regex
+library.
+<p>
+<table border="1" cellspacing="0" cellpadding="5">
+<tr>
+<td rowspan="4">Test Name</td>
+<td align="center" colspan="4" >msvc-7.1</td>
+</tr><tr>
+<td align="center" colspan="2" >debug</td>
+<td align="center" colspan="2" >release</td>
+</tr><tr>
+<td align="center" >link-static</td>
+<td align="center" rowspan="2" >threading-multi</td>
+<td align="center" >link-static</td>
+<td align="center" rowspan="2" >threading-multi</td>
+</tr><tr>
+<td align="center" >threading-multi</td>
+<td align="center" >threading-multi</td>
+</tr><tr><td>bad_expression_test</a></td><td align="right"><i>Missing</i></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-bad_expression_test.test-msvc-7.1-debug-threading-multi"><i>Warn</i></a></td><td align="right"><i>Missing</i></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-bad_expression_test.test-msvc-7.1-release-threading-multi"><i>Warn</i></a></td></tr>
+<tr><td>captures</a></td><td align="right"><i>Missing</i></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-captures-msvc-7.1-debug-threading-multi"><font color="#FF0000"><i>Fail</i></font></a></td><td align="right"><i>Missing</i></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-captures-msvc-7.1-release-threading-multi"><font color="#FF0000"><i>Fail</i></font></a></td></tr>
+<tr><td>captures_test</a></td><td align="right"><i>Missing</i></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-captures_test.test-msvc-7.1-debug-threading-multi"><i>Warn</i></a></td><td align="right"><i>Missing</i></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-captures_test.test-msvc-7.1-release-threading-multi"><i>Warn</i></a></td></tr>
+<tr><td>concept_check</a></td><td align="right"><i>Missing</i></td><td align="right">Pass</td><td align="right"><i>Missing</i></td><td align="right">Pass</td></tr>
+<tr><td>icu_concept_check</a></td><td align="right"><i>Missing</i></td><td align="right">Pass</td><td align="right"><i>Missing</i></td><td align="right">Pass</td></tr>
+<tr><td>object_cache_test</a></td><td align="right"><i>Missing</i></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-object_cache_test.test-msvc-7.1-debug-threading-multi"><i>Warn</i></a></td><td align="right"><i>Missing</i></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-object_cache_test.test-msvc-7.1-release-threading-multi"><i>Warn</i></a></td></tr>
+<tr><td>posix_api_check</a></td><td align="right"><i>Missing</i></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-posix_api_check.test-msvc-7.1-debug-threading-multi"><i>Warn</i></a></td><td align="right"><i>Missing</i></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-posix_api_check.test-msvc-7.1-release-threading-multi"><i>Warn</i></a></td></tr>
+<tr><td>posix_api_check_cpp</a></td><td align="right"><i>Missing</i></td><td align="right">Pass</td><td align="right"><i>Missing</i></td><td align="right">Pass</td></tr>
+<tr><td>recursion_test</a></td><td align="right"><i>Missing</i></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-recursion_test.test-msvc-7.1-debug-threading-multi"><i>Warn</i></a></td><td align="right"><i>Missing</i></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-recursion_test.test-msvc-7.1-release-threading-multi"><i>Warn</i></a></td></tr>
+<tr><td>regex_config_info</a></td><td align="right"><i>Missing</i></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-regex_config_info.test-msvc-7.1-debug-threading-multi">Pass</a></td><td align="right"><i>Missing</i></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-regex_config_info.test-msvc-7.1-release-threading-multi">Pass</a></td></tr>
+<tr><td>regex_dll_config_info</a></td><td align="right"><i>Missing</i></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-regex_dll_config_info.test-msvc-7.1-debug-threading-multi">Pass</a></td><td align="right"><i>Missing</i></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-regex_dll_config_info.test-msvc-7.1-release-threading-multi">Pass</a></td></tr>
+<tr><td>regex_regress</a></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-regex_regress.test-msvc-7.1-debug-link-static-threading-multi">Pass</a><sup>*</sup></td><td align="right"><i>Missing</i></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-regex_regress.test-msvc-7.1-release-link-static-threading-multi">Pass</a><sup>*</sup></td><td align="right"><i>Missing</i></td></tr>
+<tr><td>regex_regress_dll</a></td><td align="right"><i>Missing</i></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-regex_regress_dll.test-msvc-7.1-debug-threading-multi">Pass</a><sup>*</sup></td><td align="right"><i>Missing</i></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-regex_regress_dll.test-msvc-7.1-release-threading-multi">Pass</a><sup>*</sup></td></tr>
+<tr><td>regex_regress_threaded</a></td><td align="right"><i>Missing</i></td><td align="right">Pass</td><td align="right"><i>Missing</i></td><td align="right">Pass</td></tr>
+<tr><td>static_mutex_test</a></td><td align="right"><i>Missing</i></td><td align="right">Pass</td><td align="right"><i>Missing</i></td><td align="right">Pass</td></tr>
+<tr><td>test_collate_info</a></td><td align="right"><i>Missing</i></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-test_collate_info.test-msvc-7.1-debug-threading-multi"><i>Warn</i></a></td><td align="right"><i>Missing</i></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-test_collate_info.test-msvc-7.1-release-threading-multi"><i>Warn</i></a></td></tr>
+<tr><td>unicode_iterator_test</a></td><td align="right"><i>Missing</i></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-unicode_iterator_test.test-msvc-7.1-debug-threading-multi"><i>Warn</i></a></td><td align="right"><i>Missing</i></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-unicode_iterator_test.test-msvc-7.1-release-threading-multi"><i>Warn</i></a></td></tr>
+<tr><td>wide_posix_api_check_c</a></td><td align="right"><i>Missing</i></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-wide_posix_api_check_c.test-msvc-7.1-debug-threading-multi"><i>Warn</i></a></td><td align="right"><i>Missing</i></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-wide_posix_api_check_c.test-msvc-7.1-release-threading-multi"><i>Warn</i></a></td></tr>
+<tr><td>wide_posix_api_check_cpp</a></td><td align="right"><i>Missing</i></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-wide_posix_api_check_cpp.test-msvc-7.1-debug-threading-multi"><i>Warn</i></a></td><td align="right"><i>Missing</i></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-wide_posix_api_check_cpp.test-msvc-7.1-release-threading-multi"><i>Warn</i></a></td></tr>
+</table>
+<p>
+This table was generated by invoking the following command line:
+<p>
+<code>
+../../../tools/regression/library_test --toolset=msvc-7.1 variant=debug,release
+</code>
+<p>
+from within the .../libs/regex/test directory.
+<p>
+This table shows the regex test results for both debug and release
+versions of the library.  Also it displays the fact that one of the
+tests is run specifically with the static linking/multi-threading
+versions of the runtime libraries.  The cells marked "Missing" correspond
+to tests that were not run for some reason or another.  This is usually
+because the corresponding <code>Jamfile.v2</code> excludes this test
+for the given combination of compiler and build attributes. In this
+example, all tests were run with the same compiler.  If additional
+compilers were used, they would appear as more columns in the table.
+<p>
+The table above is just an illustration so the links don't actually
+point to anything.  In the table you generated, the links will
+display a page describing any errors, warnings or other available 
+information about the tests.  If the test passes, usually, there
+is no additional information and hence no link.
+<p>
+The tables are cumulative. That is, if you run one set of tests
+now and tests with different attributes later, the table will
+contain all the results to date.  The test results are stored
+in <code>../bin.v2/libs/test/&lt;library%gt;/...</code>.
+To reinitialize the test results to empty, delete the corresponding
+files in this directory.
+<p>
+The procedure above assumes that the table are generated within
+the directory <code>../libs/&lt;library&gt;/test</code>.  This is the
+most common case since this directory contains the
+<code>Jamfile.v2</code> as well as the source code that is
+used by official boost testers.  However, this is just a convention.
+The table can be generated for other directories within the
+libary.  One possiblity would be to generate the table for
+all the examples in <code>../libs/%lt;library%gt;/example</code>. Or
+one might have a special directory of performance tests which
+take a long time to run and hence are not suitable for running
+by official boost testers.  Just remember that library
+status table is generated in the directory from which the
+<code>library_test</code> command is invoked.
+<p>
+<h3>Running Tests for All Libraries</h3>
+For those with *nix or cygwin command line shells, there is shell
+script that can be run from the boost root directory:
+<p>
+<code> tools/regression/library_test_all</code>
+<p>
+The command line arguments are the same as for running the test
+for one library.  This script creates all the html files in all
+the test directories as well as an html page in the <code>status</code>
+directory named <code>library_status_summary.html</code>.  This
+can be used to browse through all test results for all test in
+all libraries.
+<hr>
+
+<p>
+Copyright 2007 Robert Ramey. Distributed under the Boost Software License, Version 1.0. 
+(See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
+<p>
+Revised <!--webbot bot="Timestamp" startspan s-type="EDITED"
+s-format="%d %B, %Y" -->14 August, 2007<!--webbot bot="Timestamp"
+i-checksum="38582" endspan --></p>
+</body>
+</html>

+ 15 - 0
regression/library_test.bat

@@ -0,0 +1,15 @@
+@echo off
+if not "%1" == "" goto bjam
+    echo Usage: %0 "<bjam arguments>"
+    echo where typical bjam arguements are:
+    echo   --toolset=msvc-7.1,gcc
+    echo   variant=debug,release,profile
+    echo   link=static,shared
+    echo   threading=single,multi
+    echo   -sBOOST_ARCHIVE_LIST="<archive name>"
+    goto end
+:bjam
+    bjam --dump-tests %* >bjam.log 2>&1
+    process_jam_log --v2 <bjam.log
+    library_status library_status.html links.html
+:end

+ 14 - 0
regression/library_test.sh

@@ -0,0 +1,14 @@
+if test $# -eq 0 
+then
+    echo "Usage: $0 <bjam arguments>"
+    echo "Typical bjam arguements are:"
+    echo "  --toolset=msvc-7.1,gcc"
+    echo "  variant=debug,release,profile"
+    echo "  link=static,shared"
+    echo "  threading=single,multi"
+    echo "  -sBOOST_ARCHIVE_LIST=<archive name>"
+else
+    bjam --dump-tests $@ >bjam.log 2>&1
+    process_jam_log --v2 <bjam.log
+    library_status library_status.html links.html
+fi

+ 85 - 0
regression/library_test_all.sh

@@ -0,0 +1,85 @@
+if test $# -eq 0
+then
+    echo "Usage: $0 <bjam arguments>"
+    echo "Typical bjam arguments are:"
+    echo "  --toolset=msvc-7.1,gcc"
+    echo "  variant=debug,release,profile"
+    echo "  link=static,shared"
+    echo "  threading=single,multi"
+    echo
+    echo "note: make sure this script is run from boost root directory !!!"
+    exit 1
+fi
+
+if ! test -e libs
+then
+    echo No libs directory found. Run from boost root directory !!!
+    exit 1
+fi
+
+#html header
+cat <<end >status/library_status_contents.html
+<!doctype HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html>
+<!--
+(C) Copyright 2007 Robert Ramey - http://www.rrsd.com . 
+Use, modification and distribution is subject to the Boost Software
+License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
+http://www.boost.org/LICENSE_1_0.txt)
+-->
+<head>
+<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
+<link rel="stylesheet" type="text/css" href="../boost.css">
+<title>Library Status Contents</title>
+<body>
+end
+
+cd >nul libs
+
+# runtests, create library pages, and body of summary page
+for lib_name in *
+do
+    if test -d $lib_name
+    then
+        cd >nul $lib_name
+
+        if test -e "test/Jamfile.v2"
+        then
+            cd >nul test
+            echo $lib_name
+            echo >>../../../status/library_status_contents.html "<a target=\"detail\" href=\"../libs/$lib_name/test/library_status.html\">$lib_name</a><br>"
+            ../../../tools/regression/library_test $@
+            cd >nul ..
+        fi
+
+        for sublib_name in *
+        do
+            if test -d $sublib_name
+            then
+                cd >nul $sublib_name
+                if test -e "test/Jamfile.v2"
+                then
+                    cd >nul test
+                    echo $lib_name/$sublib_name
+                    echo >>../../../../status/library_status_contents.html "<a target=\"detail\" href=\"../libs/$lib_name/$sublib_name/test/library_status.html\">$lib_name/$sublib_name</a><br>"
+                    ../../../../tools/regression/library_test $@
+                    cd >nul ..
+                fi
+                cd >nul ..
+            fi
+        done
+           
+        cd >nul ..
+    fi
+done
+
+
+cd >nul ..
+
+#html trailer
+cat <<end >>status/library_status_contents.html
+</body>
+</html>
+end
+
+

+ 809 - 0
regression/process_jam_log.cpp

@@ -0,0 +1,809 @@
+//  process jam regression test output into XML  -----------------------------//
+
+//  Copyright Beman Dawes 2002.  Distributed under the Boost
+//  Software License, Version 1.0. (See accompanying file
+//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+
+//  See http://www.boost.org/tools/regression for documentation.
+
+#include "detail/tiny_xml.hpp"
+#include "boost/filesystem/operations.hpp"
+#include "boost/filesystem/fstream.hpp"
+#include "boost/filesystem/exception.hpp"
+#include "boost/filesystem/convenience.hpp"
+
+#include <iostream>
+#include <string>
+#include <cstring>
+#include <map>
+#include <utility> // for make_pair
+#include <ctime>
+#include <cctype>   // for tolower
+
+using std::string;
+namespace xml = boost::tiny_xml;
+namespace fs = boost::filesystem;
+
+#define BOOST_NO_CPP_MAIN_SUCCESS_MESSAGE
+#include <boost/test/included/prg_exec_monitor.hpp>
+
+// options 
+
+static bool echo = false;
+static bool create_dirs = false;
+static bool boost_build_v2 = false;
+
+namespace
+{
+  struct test_info
+  {
+    string      file_path; // relative boost-root
+    string      type;
+    bool        always_show_run_output;
+  };
+  typedef std::map< string, test_info > test2info_map;  // key is test-name
+  test2info_map test2info;
+
+  fs::path boost_root;
+  fs::path locate_root; // ALL_LOCATE_TARGET (or boost_root if none)
+
+//  append_html  -------------------------------------------------------------//
+
+  void append_html( const string & src, string & target )
+  {
+    // there are a few lines we want to ignore
+    if ( src.find( "th target..." ) != string::npos
+      || src.find( "cc1plus.exe: warning: changing search order for system directory" ) != string::npos
+      || src.find( "cc1plus.exe: warning:   as it has already been specified as a non-system directory" ) != string::npos
+      ) return;
+
+    // on some platforms (e.g. tru64cxx) the following line is a real performance boost
+    target.reserve(src.size() * 2 + target.size());
+
+    for ( string::size_type pos = 0; pos < src.size(); ++pos )
+    {
+      if ( src[pos] == '<' ) target += "&lt;";
+      else if ( src[pos] == '>' ) target += "&gt;";
+      else if ( src[pos] == '&' ) target += "&amp;";
+      else target += src[pos];
+    }
+  }
+
+ //  timestamp  ---------------------------------------------------------------//
+
+  string timestamp()
+  {
+    char run_date[128];
+    std::time_t tod;
+    std::time( &tod );
+    std::strftime( run_date, sizeof(run_date),
+      "%Y-%m-%d %X UTC", std::gmtime( &tod ) );
+    return string( run_date );
+  }
+
+//  convert path separators to forward slashes  ------------------------------//
+
+  void convert_path_separators( string & s )
+  {
+    for ( string::iterator itr = s.begin(); itr != s.end(); ++itr )
+      if ( *itr == '\\' || *itr == '!' ) *itr = '/';
+  }
+
+//  trim_left ----------------------------------------------------------------//
+
+  std::string trim_left( std::string const& s )
+  {
+    std::string::size_type const pos( s.find_first_not_of(' ') );
+    return pos != std::string::npos
+        ? s.substr( pos, s.size() - pos + 1 )
+        : ""
+        ;
+  }
+  
+
+//  split --------------------------------------------------------------------//
+
+  std::vector<std::string> split( std::string const& s )
+  {
+    std::string::size_type const pos( s.find_first_of(' ') );
+    std::vector<std::string> result( 1, s.substr( 0, pos ) );
+    if ( pos == std::string::npos )
+        return result;
+
+    std::vector<std::string> const rest( split( trim_left( s.substr( pos, s.size() - pos + 1 ) ) ) );
+    result.insert( result.end(), rest.begin(), rest.end() );    
+    return result;
+  }
+
+
+//  extract a target directory path from a jam target string  ----------------//
+//  s may be relative to the initial_path:
+//    ..\..\..\libs\foo\build\bin\libfoo.lib\vc7\debug\runtime-link-dynamic\boo.obj
+//  s may be absolute:
+//    d:\myboost\libs\foo\build\bin\libfoo.lib\vc7\debug\runtime-link-dynamic\boo.obj
+//  return path is always relative to the boost directory tree:
+//    libs/foo/build/bin/libfs.lib/vc7/debug/runtime-link-dynamic
+
+  string target_directory( const string & s )
+  {
+    string temp( s );
+    convert_path_separators( temp );
+    temp.erase( temp.find_last_of( "/" ) ); // remove leaf
+    temp = split( trim_left( temp ) ).back();
+    if ( temp[0] == '.' ) temp.erase( 0, temp.find_first_not_of( "./" ) ); 
+    else temp.erase( 0, locate_root.string().size()+1 );
+    if ( echo )
+        std::cout << "\ttarget_directory( \"" << s << "\") -> \"" << temp << "\"" << std::endl;
+    return temp;
+  }
+
+  string::size_type target_name_end( const string & s )
+  {
+    string::size_type pos = s.find( ".test/" );
+    if ( pos == string::npos ) pos = s.find( ".dll/" );
+    if ( pos == string::npos ) pos = s.find( ".so/" );
+    if ( pos == string::npos ) pos = s.find( ".lib/" );
+    if ( pos == string::npos ) pos = s.find( ".pyd/" );
+    if ( pos == string::npos ) pos = s.find( ".a/" );
+    return pos;
+  }
+
+  string toolset( const string & s )
+  {
+    string::size_type pos = target_name_end( s );
+    if ( pos == string::npos ) pos = s.find( "build/" );
+    if ( pos == string::npos ) return "";
+    pos = s.find( "/", pos ) + 1;
+    return s.substr( pos, s.find( "/", pos ) - pos );
+  }
+
+  string test_name( const string & s )
+  {
+    string::size_type pos = target_name_end( s );
+    if ( pos == string::npos ) return "";
+    string::size_type pos_start = s.rfind( '/', pos ) + 1;
+    return s.substr( pos_start,
+      (s.find( ".test/" ) != string::npos
+        ? pos : s.find( "/", pos )) - pos_start );
+  }
+
+  // Take a path to a target directory of test, and
+  // returns library name corresponding to that path.
+  string test_path_to_library_name( string const& path )
+  {
+    std::string result;
+    string::size_type start_pos( path.find( "libs/" ) );
+    if ( start_pos != string::npos )
+    {
+      // The path format is ...libs/functional/hash/test/something.test/....      
+      // So, the part between "libs" and "test/something.test" can be considered
+      // as library name. But, for some libraries tests are located too deep,
+      // say numeric/ublas/test/test1 directory, and some libraries have tests
+      // in several subdirectories (regex/example and regex/test). So, nested
+      // directory may belong to several libraries.
+
+      // To disambituate, it's possible to place a 'sublibs' file in
+      // a directory. It means that child directories are separate libraries.
+      // It's still possible to have tests in the directory that has 'sublibs'
+      // file.
+
+      std::string interesting;
+      start_pos += 5;
+      string::size_type end_pos( path.find( ".test/", start_pos ) );
+      end_pos = path.rfind('/', end_pos);
+      if (path.substr(end_pos - 5, 5) == "/test")
+        interesting = path.substr( start_pos, end_pos - 5 - start_pos );
+      else
+        interesting = path.substr( start_pos, end_pos - start_pos );
+
+      // Take slash separate elements until we have corresponding 'sublibs'.
+      end_pos = 0;
+      for(;;)
+      {
+        end_pos = interesting.find('/', end_pos);
+        if (end_pos == string::npos) {
+          result = interesting;
+          break;
+        }
+        result = interesting.substr(0, end_pos);
+
+        if ( fs::exists( ( boost_root / "libs" ) / result / "sublibs" ) )
+        {
+          end_pos = end_pos + 1;
+        }
+        else
+          break;
+      }
+    }
+
+    return result;
+  }
+
+  // Tries to find target name in the string 'msg', starting from 
+  // position start.
+  // If found, extract the directory name from the target name and
+  // stores it in 'dir', and return the position after the target name.
+  // Otherwise, returns string::npos.
+  string::size_type parse_skipped_msg_aux(const string& msg,
+                                          string::size_type start,
+                                          string& dir)
+  {
+    dir.clear();
+    string::size_type start_pos = msg.find( '<', start );
+    if ( start_pos == string::npos ) return string::npos;
+    ++start_pos;
+    string::size_type end_pos = msg.find( '>', start_pos );
+    dir += msg.substr( start_pos, end_pos - start_pos );
+    if ( boost_build_v2 )
+    {
+        // The first letter is a magic value indicating
+        // the type of grist.
+        convert_path_separators( dir );
+        dir.erase( 0, 1 );
+        // We need path from root, not from 'status' dir.
+        if (dir.find("../") == 0)
+          dir.erase(0,3);
+        else // dir is always relative to the boost directory tree
+          dir.erase( 0, locate_root.string().size()+1 );
+    }
+    else
+    {
+      if ( dir[0] == '@' )
+      {
+        // new style build path, rooted build tree
+        convert_path_separators( dir );
+        dir.replace( 0, 1, "bin/" );
+      }
+      else
+      {
+        // old style build path, integrated build tree
+        start_pos = dir.rfind( '!' );
+        convert_path_separators( dir );
+        string::size_type path_sep_pos = dir.find( '/', start_pos + 1 );
+        if ( path_sep_pos != string::npos )
+           dir.insert( path_sep_pos, "/bin" );
+        else
+        {
+          // see http://article.gmane.org/gmane.comp.lib.boost.devel/146688;
+          // the following code assumes that: a) 'dir' is not empty,
+          // b) 'end_pos != string::npos' and c) 'msg' always ends with '...'
+          if ( dir[dir.size() - 1] == '@' )
+            dir += "/" + msg.substr( end_pos + 1, msg.size() - end_pos - 1 - 3 );
+        }
+      }
+    }
+    return end_pos;
+  }
+  
+  // the format of paths is really kinky, so convert to normal form
+  //   first path is missing the leading "..\".
+  //   first path is missing "\bin" after "status".
+  //   second path is missing the leading "..\".
+  //   second path is missing "\bin" after "build".
+  //   second path uses "!" for some separators.
+  void parse_skipped_msg( const string & msg,
+    string & first_dir, string & second_dir )
+  {
+    string::size_type pos = parse_skipped_msg_aux(msg, 0, first_dir);
+    if (pos == string::npos)
+      return;
+    parse_skipped_msg_aux(msg, pos, second_dir);
+  }
+
+//  test_log hides database details  -----------------------------------------//
+
+  class test_log
+    : boost::noncopyable
+  {
+    const string & m_target_directory;
+    xml::element_ptr m_root;
+  public:
+    test_log( const string & target_directory,
+              const string & test_name,
+              const string & toolset,
+              bool force_new_file )
+      : m_target_directory( target_directory )
+    {
+      if ( !force_new_file )
+      {
+        fs::path pth( locate_root / target_directory / "test_log.xml" );
+        fs::ifstream file( pth  );
+        if ( file )   // existing file
+        {
+          try
+          {
+            m_root = xml::parse( file, pth.string() );
+            return;
+          }
+          catch(...)
+          {
+            // unable to parse existing XML file, fall through
+          }
+        }
+      }
+
+      string library_name( test_path_to_library_name( target_directory ) );
+
+      test_info info;
+      test2info_map::iterator itr( test2info.find( library_name + "/" + test_name ) );
+      if ( itr != test2info.end() )
+        info = itr->second;
+      
+      if ( !info.file_path.empty() )
+        library_name = test_path_to_library_name( info.file_path );
+      
+      if ( info.type.empty() )
+      {
+        if ( target_directory.find( ".lib/" ) != string::npos
+          || target_directory.find( ".dll/" ) != string::npos 
+          || target_directory.find( ".so/" ) != string::npos 
+          || target_directory.find( ".dylib/" ) != string::npos 
+          || target_directory.find( "/build/" ) != string::npos 
+          )
+        {
+          info.type = "lib";
+        }
+        else if ( target_directory.find( ".pyd/" ) != string::npos )
+          info.type = "pyd";
+      }
+  
+      m_root.reset( new xml::element( "test-log" ) );
+      m_root->attributes.push_back(
+        xml::attribute( "library", library_name ) );
+      m_root->attributes.push_back(
+        xml::attribute( "test-name", test_name ) );
+      m_root->attributes.push_back(
+        xml::attribute( "test-type", info.type ) );
+      m_root->attributes.push_back(
+        xml::attribute( "test-program", info.file_path ) );
+      m_root->attributes.push_back(
+        xml::attribute( "target-directory", target_directory ) );
+      m_root->attributes.push_back(
+        xml::attribute( "toolset", toolset ) );
+      m_root->attributes.push_back(
+        xml::attribute( "show-run-output",
+          info.always_show_run_output ? "true" : "false" ) );
+    }
+
+    ~test_log()
+    {
+      fs::path pth( locate_root / m_target_directory / "test_log.xml" );
+      if ( create_dirs && !fs::exists( pth.branch_path() ) )
+          fs::create_directories( pth.branch_path() );
+      fs::ofstream file( pth );
+      if ( !file )
+      {
+        std::cout << "*****Warning - can't open output file: "
+          << pth.string() << "\n";
+      }
+      else xml::write( *m_root, file );
+    }
+
+    const string & target_directory() const { return m_target_directory; }
+
+    void remove_action( const string & action_name )
+    // no effect if action_name not found
+    {
+      xml::element_list::iterator itr;
+      for ( itr = m_root->elements.begin();
+            itr != m_root->elements.end() && (*itr)->name != action_name;
+            ++itr ) {}
+      if ( itr != m_root->elements.end() ) m_root->elements.erase( itr );
+    }
+
+    void add_action( const string & action_name,
+                     const string & result,
+                     const string & timestamp,
+                     const string & content )
+    {
+      remove_action( action_name );
+      xml::element_ptr action( new xml::element(action_name) );
+      m_root->elements.push_back( action );
+      action->attributes.push_back( xml::attribute( "result", result ) );
+      action->attributes.push_back( xml::attribute( "timestamp", timestamp ) );
+      action->content = content;
+    }
+  };
+
+//  message_manager maps input messages into test_log actions  ---------------//
+
+  class message_manager
+    : boost::noncopyable
+  {
+    string  m_action_name;  // !empty() implies action pending
+                            // IOW, a start_message awaits stop_message
+    string  m_target_directory;
+    string  m_test_name;
+    string  m_toolset;
+
+    bool    m_note;  // if true, run result set to "note"
+                     // set false by start_message()
+
+    // data needed to stop further compile action after a compile failure
+    // detected in the same target directory
+    string  m_previous_target_directory;
+    bool    m_compile_failed;
+
+  public:
+    message_manager() : m_note(false) {}
+    ~message_manager() { /*assert( m_action_name.empty() );*/ }
+
+    bool note() const { return m_note; }
+    void note( bool value ) { m_note = value; }
+
+    void start_message( const string & action_name,
+                      const string & target_directory,
+                      const string & test_name,
+                      const string & toolset,
+                      const string & prior_content )
+    {
+      assert( !target_directory.empty() );
+
+      if ( !m_action_name.empty() ) stop_message( prior_content );
+      m_action_name = action_name;
+      m_target_directory = target_directory;
+      m_test_name = test_name;
+      m_toolset = toolset;
+      m_note = false;
+
+      if ( m_previous_target_directory != target_directory )
+      {
+        m_previous_target_directory = target_directory;
+        m_compile_failed = false;
+      }
+    }
+
+    void stop_message( const string & content )
+    {
+      if ( m_action_name.empty() ) return;
+      stop_message( m_action_name, m_target_directory,
+        "succeed", timestamp(), content );
+    }
+
+    void stop_message( const string & action_name,
+                     const string & target_directory,
+                     const string & result,
+                     const string & timestamp,
+                     const string & content )
+    // the only valid action_names are "compile", "link", "run", "lib"
+    {
+      // My understanding of the jam output is that there should never be
+      // a stop_message that was not preceeded by a matching start_message.
+      // That understanding is built into message_manager code.
+      assert( m_action_name == action_name );
+      assert( m_target_directory == target_directory );
+      assert( result == "succeed" || result == "fail" );
+
+      // if test_log.xml entry needed
+      if ( !m_compile_failed
+        || action_name != "compile"
+        || m_previous_target_directory != target_directory )
+      {
+        if ( action_name == "compile"
+          && result == "fail" ) m_compile_failed = true;
+
+        test_log tl( target_directory,
+          m_test_name, m_toolset, action_name == "compile" );
+        tl.remove_action( "lib" ); // always clear out lib residue
+
+        // dependency removal
+        if ( action_name == "lib" )
+        {
+          tl.remove_action( "compile" );
+          tl.remove_action( "link" );
+          tl.remove_action( "run" );
+        }
+        else if ( action_name == "compile" )
+        {
+          tl.remove_action( "link" );
+          tl.remove_action( "run" );
+          if ( result == "fail" ) m_compile_failed = true;
+        }
+        else if ( action_name == "link" )
+        {
+          tl.remove_action( "run" );
+        }
+
+        // dependency removal won't work right with random names, so assert
+        else { assert( action_name == "run" ); }
+
+        // add the "run" stop_message action
+        tl.add_action( action_name,
+           result == "succeed" && note() ? std::string("note") : result,
+          timestamp, content );
+      }
+
+      m_action_name = ""; // signal no pending action
+      m_previous_target_directory = target_directory;
+    }
+  };
+}
+
+
+//  main  --------------------------------------------------------------------//
+
+
+int cpp_main( int argc, char ** argv )
+{
+  // Turn off synchronization with corresponding C standard library files. This
+  // gives a significant speed improvement on platforms where the standard C++
+  // streams are implemented using standard C files.
+  std::ios::sync_with_stdio(false);
+
+  if ( argc <= 1 )
+    std::cout << "Usage: bjam [bjam-args] | process_jam_log [--echo] [--create-directories] [--v2] [locate-root]\n"
+                 "locate-root         - the same as the bjam ALL_LOCATE_TARGET\n"
+                 "                      parameter, if any. Default is boost-root.\n"
+                 "create-directories  - if the directory for xml file doesn't exists - creates it.\n"
+                 "                      usually used for processing logfile on different machine\n";
+
+  boost_root = fs::initial_path();
+
+  while ( !boost_root.empty()
+    && !fs::exists( boost_root / "libs" ) )
+  {
+    boost_root /=  "..";
+  }
+
+  if ( boost_root.empty() )
+  {
+    std::cout << "must be run from within the boost-root directory tree\n";
+    return 1;
+  }
+
+  boost_root.normalize();
+  
+  if ( argc > 1 && std::strcmp( argv[1], "--echo" ) == 0 )
+  {
+    echo = true;
+    --argc; ++argv;
+  }
+
+
+  if (argc > 1 && std::strcmp( argv[1], "--create-directories" ) == 0 )
+  {
+      create_dirs = true;
+      --argc; ++argv;
+  } 
+
+  if ( argc > 1 && std::strcmp( argv[1], "--v2" ) == 0 )
+  {
+    boost_build_v2 = true;
+    --argc; ++argv;
+  }
+
+
+  if (argc > 1)
+  {
+      locate_root = fs::path( argv[1], fs::native );
+      if ( !locate_root.is_complete() )
+        locate_root = ( fs::initial_path() / locate_root ).normalize();
+      
+      --argc; ++argv;
+  } 
+  else
+  {
+      locate_root = boost_root;
+  }
+
+  std::cout << "boost_root: " << boost_root.string() << '\n'
+            << "locate_root: " << locate_root.string() << '\n';
+
+  message_manager mgr;
+
+  string line;
+  string content;
+  bool capture_lines = false;
+
+  std::istream* input;
+  if (argc > 1)
+  {
+      input = new std::ifstream(argv[1]);
+  }
+  else
+  {
+      input = &std::cin;
+  }
+
+  // This loop looks at lines for certain signatures, and accordingly:
+  //   * Calls start_message() to start capturing lines. (start_message() will
+  //     automatically call stop_message() if needed.)
+  //   * Calls stop_message() to stop capturing lines.
+  //   * Capture lines if line capture on.
+
+  int line_num = 0;
+  while ( std::getline( *input, line ) )
+  {
+    ++line_num;
+    
+    std::vector<std::string> const line_parts( split( line ) );
+    std::string const line_start( line_parts[0] != "...failed" 
+        ? line_parts[0]
+        : line_parts[0] + " " + line_parts[1]
+        );
+    
+    if ( echo )
+    {
+      std::cout
+        << "line " << line_num << ": " << line << "\n"
+        << "\tline_start: " << line_start << "\n";        
+    }
+
+    // create map of test-name to test-info
+    if ( line_start.find( "boost-test(" ) == 0 )
+    {
+      string::size_type pos = line.find( '"' );
+      string test_name( line.substr( pos+1, line.find( '"', pos+1)-pos-1 ) );
+      test_info info;
+      info.always_show_run_output
+        = line.find( "\"always_show_run_output\"" ) != string::npos;
+      info.type = line.substr( 11, line.find( ')' )-11 );
+      for (unsigned int i = 0; i!=info.type.size(); ++i )
+        { info.type[i] = std::tolower( info.type[i] ); }
+      pos = line.find( ':' );
+      // the rest of line is missing if bjam didn't know how to make target
+      if ( pos + 1 != line.size() )
+      {
+        info.file_path = line.substr( pos+3,
+          line.find( "\"", pos+3 )-pos-3 );
+        convert_path_separators( info.file_path );
+        if ( info.file_path.find( "libs/libs/" ) == 0 ) info.file_path.erase( 0, 5 );
+        if ( test_name.find( "/" ) == string::npos )
+            test_name = "/" + test_name;
+        test2info.insert( std::make_pair( test_name, info ) );
+  //      std::cout << test_name << ", " << info.type << ", " << info.file_path << "\n";
+      }
+      else
+      {
+        std::cout << "*****Warning - missing test path: " << line << "\n"
+          << "  (Usually occurs when bjam doesn't know how to make a target)\n";
+      }
+      continue;
+    }
+
+    // these actions represent both the start of a new action
+    // and the end of a failed action
+    else if ( line_start.find( "C++-action" ) != string::npos
+      || line_start.find( "vc-C++" ) != string::npos
+      || line_start.find( "C-action" ) != string::npos
+      || line_start.find( "Cc-action" ) != string::npos
+      || line_start.find( "vc-Cc" ) != string::npos
+      || line_start.find( ".compile.") != string::npos
+      || line_start.find( "compile-") != string::npos
+      || line_start.find( "-compile") != string::npos
+      || line_start.find( "Link-action" ) != string::npos
+      || line_start.find( "vc-Link" ) != string::npos 
+      || line_start.find( "Archive-action" ) != string::npos
+      || line_start.find( ".archive") != string::npos
+      || ( line_start.find( ".link") != string::npos &&
+           // .linkonce is present in gcc linker messages about
+           // unresolved symbols. We don't have to parse those
+           line_start.find( ".linkonce" ) == string::npos )
+    )
+    {
+      if ( !test2info.size() )
+      {
+        std::cout << "*****Error - No \"boost-test\" lines encountered.\n"
+                     "     (Usually occurs when bjam was envoked without the --dump-tests option\n"
+                     "      or bjam was envoked in the wrong directory)\n";
+        return 1;
+      }
+
+      string action( ( line_start.find( "Link-action" ) != string::npos
+            || line_start.find( "vc-Link" ) != string::npos 
+            || line_start.find( "Archive-action" ) != string::npos
+            || line_start.find( ".archive") != string::npos
+            || line_start.find( ".link") != string::npos
+            )
+          ? "link" : "compile"
+        );
+      
+      if ( line_start.find( "...failed " ) != string::npos )
+      {
+        mgr.stop_message( action, target_directory( line ),
+          "fail", timestamp(), content );
+      }
+      else
+      {
+        string target_dir( target_directory( line ) );
+        mgr.start_message( action, target_dir,
+          test_name( target_dir ), toolset( target_dir ), content );
+      }
+      content = "\n";
+      capture_lines = true;
+    }
+
+    // these actions are only used to stop the previous action
+    else if ( line_start.find( "-Archive" ) != string::npos
+      || line_start.find( "MkDir" ) == 0 )
+    {
+      mgr.stop_message( content );
+      content.clear();
+      capture_lines = false;
+    }
+
+    else if ( line_start.find( "execute-test" ) != string::npos 
+             || line_start.find( "capture-output" ) != string::npos )
+    {
+      if ( line_start.find( "...failed " ) != string::npos )
+      {
+        mgr.stop_message( "run", target_directory( line ),
+          "fail", timestamp(), content );
+        content = "\n";
+        capture_lines = true;
+      }
+      else
+      {
+        string target_dir( target_directory( line ) );
+        mgr.start_message( "run", target_dir,
+          test_name( target_dir ), toolset( target_dir ), content );
+
+        // contents of .output file for content
+        capture_lines = false;
+        content = "\n";
+        fs::ifstream file( locate_root / target_dir
+          / (test_name(target_dir) + ".output") );
+        if ( file )
+        {
+          string ln;
+          while ( std::getline( file, ln ) )
+          {
+            if ( ln.find( "<note>" ) != string::npos ) mgr.note( true );
+            append_html( ln, content );
+            content += "\n";
+          }
+        }
+      }
+    }
+
+    // bjam indicates some prior dependency failed by a "...skipped" message
+    else if ( line_start.find( "...skipped" ) != string::npos 
+        && line.find( "<directory-grist>" ) == string::npos
+        )
+    {
+      mgr.stop_message( content );
+      content.clear();
+      capture_lines = false;
+
+      if ( line.find( " for lack of " ) != string::npos )
+      {
+        capture_lines = ( line.find( ".run for lack of " ) == string::npos );
+
+        string target_dir;
+        string lib_dir;
+
+        parse_skipped_msg( line, target_dir, lib_dir );
+
+        if ( target_dir != lib_dir ) // it's a lib problem
+        {
+          mgr.start_message( "lib", target_dir, 
+            test_name( target_dir ), toolset( target_dir ), content );
+          content = lib_dir;
+          mgr.stop_message( "lib", target_dir, "fail", timestamp(), content );
+          content = "\n";
+        }
+      }
+
+    }
+
+    else if ( line_start.find( "**passed**" ) != string::npos
+      || line_start.find( "failed-test-file" ) != string::npos
+      || line_start.find( "command-file-dump" ) != string::npos )
+    {
+      mgr.stop_message( content );
+      content = "\n";
+      capture_lines = true;
+    }
+
+    else if ( capture_lines ) // hang onto lines for possible later use
+    {
+      append_html( line, content );;
+      content += "\n";
+    }
+  }
+
+  mgr.stop_message( content );
+  if (input != &std::cin)
+      delete input;
+  return 0;
+}

+ 197 - 0
regression/regression-logs.pl

@@ -0,0 +1,197 @@
+#!/usr/bin/perl
+
+#~ Copyright 2003, Rene Rivera.
+#~ Use, modification and distribution are subject to the Boost Software
+#~ License Version 1.0. (See accompanying file LICENSE_1_0.txt or
+#~ http://www.boost.org/LICENSE_1_0.txt)
+
+use FileHandle;
+use Time::Local;
+
+# Get the whle percent value
+#
+sub percent_value
+{
+    my ($count,$total) = @_;
+    my $percent = int (($count/$total)*100+0.5);
+    if ($count > 0 && $percent == 0) { $percent = 1; }
+    if ($count < $total && $percent == 100) { $percent = 99; }
+    return $percent;
+}
+
+# Generate item html for the pass column.
+#
+sub result_info_pass
+{
+    my ($color,$pass,$warn,$fail,$missing) = @_;
+    my $percent = 100-percent_value($fail+$missing,$pass+$warn+$fail+$missing);
+    return "<font color=\"$color\"><font size=\"+1\">$percent%</font><br>($warn&nbsp;warnings)</font>";
+}
+
+# Generate item html for the fail column.
+#
+sub result_info_fail
+{
+    my ($color,$pass,$warn,$fail,$missing) = @_;
+    my $percent = percent_value($fail+$missing,$pass+$warn+$fail+$missing);
+    return "<font color=\"$color\"><font size=\"+1\">$percent%</font><br>($fail)</font>";
+}
+
+# Generate an age highlighted run date string.
+# Use as: data_info(run-date-html)
+#
+sub date_info
+{
+    my %m = ('January',0,'February',1,'March',2,'April',3,'May',4,'June',5,
+        'July',6,'August',7,'September',8,'October',9,'November',10,'December',11);
+    my @d = split(/ |:/,$_[0]);
+    my ($hour,$min,$sec,$day,$month,$year) = ($d[0],$d[1],$d[2],$d[4],$m{$d[5]},$d[6]);
+    #print "<!-- $hour.$min.$sec.$day.$month.$year -->\n";
+    my $test_t = timegm($sec,$min,$hour,$day,$month,$year);
+    my $age = time-$test_t;
+    my $age_days = $age/(60*60*24);
+    #print "<!-- $age_days days old -->\n";
+    my $age_html = "<font>";
+    if ($age_days <= 2) { }
+    elsif ($age_days <= 14) { $age_html = "<font color=\"#FF9900\">"; }
+    else { $age_html = "<font color=\"#FF0000\">"; }
+    return $age_html.$_[0]."</font>";
+}
+
+# Generate an age string based on the run date.
+# Use as: age_info(run-date-html)
+#
+sub age_info
+{
+    my %m = ('January',0,'February',1,'March',2,'April',3,'May',4,'June',5,
+        'July',6,'August',7,'September',8,'October',9,'November',10,'December',11);
+    my @d = split(/ |:/,$_[0]);
+    my ($hour,$min,$sec,$day,$month,$year) = ($d[0],$d[1],$d[2],$d[4],$m{$d[5]},$d[6]);
+    #print "<!-- $hour.$min.$sec.$day.$month.$year -->\n";
+    my $test_t = timegm($sec,$min,$hour,$day,$month,$year);
+    my $age = time-$test_t;
+    my $age_days = $age/(60*60*24);
+    #print "<!-- $age_days days old -->\n";
+    my $age_html = "<font>";
+    if ($age_days <= 2) { }
+    elsif ($age_days <= 14) { $age_html = "<font color=\"#FF9900\">"; }
+    else { $age_html = "<font color=\"#FF0000\">"; }
+    if ($age_days <= 1) { $age_html = $age_html."today"; }
+    elsif ($age_days <= 2) { $age_html = $age_html."yesterday"; }
+    elsif ($age_days < 14) { my $days = int $age_days; $age_html = $age_html.$days." days"; }
+    elsif ($age_days < 7*8) { my $weeks = int $age_days/7; $age_html = $age_html.$weeks." weeks"; }
+    else { my $months = int $age_days/28; $age_html = $age_html.$months." months"; }
+    return $age_html."</font>";
+}
+
+#~ foreach my $k (sort keys %ENV)
+#~ {
+    #~ print "<!-- $k = $ENV{$k} -->\n";
+#~ }
+my $logdir = "$ENV{PWD}";
+#~ my $logdir = "C:\\CVSROOTs\\Boost\\boost\\status";
+opendir LOGS, "$logdir";
+my @logs = grep /.*links[^.]*\.html$/, readdir LOGS;
+closedir LOGS;
+my @bgcolor = ( "bgcolor=\"#EEEEFF\"", "" );
+my $row = 0;
+print "<table>\n";
+print "<tr>\n",
+    "<th align=\"left\" bgcolor=\"#DDDDDD\">Platform</th>\n",
+    "<th align=\"left\" bgcolor=\"#DDDDDD\">Run Date</th>\n",
+    "<th align=\"left\" bgcolor=\"#DDDDDD\">Age</th>\n",
+    "<th align=\"left\" bgcolor=\"#DDDDDD\">Compilers</th>\n",
+    "<th align=\"left\" bgcolor=\"#DDDDDD\">Pass</th>\n",
+    "<th align=\"left\" bgcolor=\"#DDDDDD\">Fail</th>\n",
+    "</tr>\n";
+foreach $l (sort { lc($a) cmp lc($b) } @logs)
+{
+    my $log = $l;
+    $log =~ s/-links//s;
+    my ($spec) = ($log =~ /cs-([^\.]+)/);
+    my $fh = new FileHandle;
+    if ($fh->open("<$logdir/$log"))
+    {
+        my $content = join('',$fh->getlines());
+        $fh->close;
+        my ($status) = ($content =~ /(<h1>Compiler(.(?!<\/td>))+.)/si);
+        my ($platform) = ($status =~ /Status: ([^<]+)/si);
+        my ($run_date) = ($status =~ /Date:<\/b> ([^<]+)/si);
+        $run_date =~ s/, /<br>/g;
+        my ($compilers) = ($content =~ /Test Type<\/a><\/t[dh]>((.(?!<\/tr>))+.)/si);
+        if ($compilers eq "") { next; }
+        $compilers =~ s/-<br>//g;
+        $compilers =~ s/<\/td>//g;
+        my @compiler = ($compilers =~ /<td>(.*)$/gim);
+        my $count = @compiler;
+        my @results = ($content =~ /(>Pass<|>Warn<|>Fail<|>Missing<)/gi);
+        my $test_count = (scalar @results)/$count;
+        my @pass = map { 0 } (1..$count);
+        my @warn = map { 0 } (1..$count);
+        my @fail = map { 0 } (1..$count);
+        my @missing = map { 0 } (1..$count);
+        my @total = map { 0 } (1..$count);
+        #~ print "<!-- ",
+            #~ "pass = ",join(',',@pass)," ",
+            #~ "warn = ",join(',',@warn)," ",
+            #~ "fail = ",join(',',@fail)," ",
+            #~ "missing = ",join(',',@missing)," ",
+            #~ "total = ",join(',',@total)," ",
+            #~ " -->\n";
+        for my $t (1..$test_count)
+        {
+            my $r0 = (($t-1)*$count);
+            my $r1 = (($t-1)*$count+$count-1);
+            my @r = @results[(($t-1)*$count)..(($t-1)*$count+$count-1)];
+            #~ print "<!-- ",
+                #~ "result = ",join(',',@r)," ",
+                #~ "range = ",$r0,"..",$r1," (",(scalar @results),")",
+                #~ " -->\n";
+            for my $c (1..$count)
+            {
+                if ($r[$c-1] =~ /Pass/i) { ++$pass[$c-1]; }
+                elsif ($r[$c-1] =~ /Warn/i) { ++$warn[$c-1]; }
+                elsif ($r[$c-1] =~ /Fail/i) { ++$fail[$c-1]; }
+                elsif ($r[$c-1] =~ /Missing/i) { ++$missing[$c-1]; }
+                ++$total[$c-1];
+            }
+        }
+        #~ print "<!-- ",
+            #~ "pass = ",join(',',@pass)," ",
+            #~ "warn = ",join(',',@warn)," ",
+            #~ "fail = ",join(',',@fail)," ",
+            #~ "missing = ",join(',',@missing)," ",
+            #~ "total = ",join(',',@total)," ",
+            #~ " -->\n";
+        for my $comp (1..(scalar @compiler))
+        {
+            my @lines = split(/<br>/,$compiler[$comp-1]);
+            if (@lines > 2) { $compiler[$comp-1] = join(' ',@lines[0..(scalar @lines)-2])."<br>".$lines[(scalar @lines)-1]; }
+        }
+        print
+            "<tr>\n",
+            "<td rowspan=\"$count\" valign=\"top\"><font size=\"+1\">$platform</font><br>(<a href=\"./$log\">$spec</a>)</td>\n",
+            "<td rowspan=\"$count\" valign=\"top\">",$run_date,"</td>\n",
+            "<td rowspan=\"$count\" valign=\"top\">",age_info($run_date),"</td>\n",
+            "<td valign=\"top\" ",$bgcolor[$row],">",$compiler[0],"</td>\n",
+            "<td valign=\"top\" ",$bgcolor[$row],">",result_info_pass("#000000",$pass[0],$warn[0],$fail[0],$missing[0]),"</td>\n",
+            "<td valign=\"top\" ",$bgcolor[$row],">",result_info_fail("#FF0000",$pass[0],$warn[0],$fail[0],$missing[0]),"</td>\n",
+            "</tr>\n";
+        $row = ($row+1)%2;
+        foreach my $c (1..($count-1))
+        {
+            print
+                "<tr>\n",
+                "<td valign=\"top\" ",$bgcolor[$row],">",$compiler[$c],"</td>\n",
+                "<td valign=\"top\" ",$bgcolor[$row],">",result_info_pass("#000000",$pass[$c],$warn[$c],$fail[$c],$missing[$c]),"</td>\n",
+                "<td valign=\"top\" ",$bgcolor[$row],">",result_info_fail("#FF0000",$pass[$c],$warn[$c],$fail[$c],$missing[$c]),"</td>\n",
+                "</tr>\n";
+            $row = ($row+1)%2;
+        }
+        print
+            "<tr>\n",
+            "<td colspan=\"7\"><hr size=\"1\" noshade></td>\n",
+            "</tr>\n";
+    }
+}
+print "</table>\n";

+ 191 - 0
regression/run_tests.sh

@@ -0,0 +1,191 @@
+#!/bin/sh
+#
+# shell script for running the boost regression test suite and generating
+# a html table of results.
+
+# Set the following variables to configure the operation. Variables you
+# should set, i.e. usually required are listed first. Optional variables
+# have reasonable defaults for most situations.
+
+
+### THESE SHOULD BE CHANGED!
+
+#
+# "boost_root" points to the root of you boost installation:
+# This can be either a non-exitent directory or an already complete Boost
+# source tree.
+#
+boost_root="$HOME/CVSROOTs/Boost/boost_regression"
+
+#
+# Wether to fetch the most current Boost code from CVS (yes/no):
+# There are two contexts to use this script in: on an active Boost CVS
+# tree, and on a fresh Boost CVS tree. If "yes" is specified here an attempt
+# to fetch the latest CVS Boost files is made. For an active Boost CVS
+# the CVS connection information is used. If an empty tree is detected
+# the code is fetched with the anonymous read only information.
+#
+cvs_update=no
+
+#
+# "test_tools" are the Boost.Build toolsets to use for building and running the
+# regression tests. Specify a space separated list, of the Boost.Build toolsets.
+# Each will be built and tested in sequence.
+#
+test_tools=gcc
+
+#
+# "toolset" is the Boost.Build toolset to use for building the helper programs.
+# This is usually different than the toolsets one is testing. And this is
+# normally a toolset that corresponds to the compiler built into your platform.
+#
+toolset=gcc
+
+#
+# "comment_path" is the path to an html-file describing the test environment.
+# The content of this file will be embedded in the status pages being produced.
+#
+comment_path="$boost_root/../regression_comment.html"
+#
+# "test_dir" is the relative path to the directory to run the tests in,
+# defaults to "status" and runs all the tests, but could be a sub-directory
+# for example "libs/regex/test" to run the regex tests alone.
+#
+test_dir="status"
+
+
+### DEFAULTS ARE OK FOR THESE.
+
+#
+# "exe_suffix" the suffix used by exectable files:
+# In case your platform requires use of a special suffix for executables specify
+# it here, including the "." if needed. This should not be needed even in Windows
+# like platforms as they will execute without the suffix anyway.
+#
+exe_suffix=
+
+#
+# "bjam" points to your built bjam executable:
+# The location of the binary for running bjam. The default should work
+# under most circumstances.
+#
+bjam="$boost_root/tools/jam/src/bin/bjam$exe_suffix"
+
+#
+# "process_jam_log", and "compiler_status" paths to built helper programs:
+# The location of the executables of the regression help programs. These
+# are built locally so the default should work in most situations.
+#
+process_jam_log="$boost_root/dist/bin/process_jam_log$exe_suffix"
+compiler_status="$boost_root/dist/bin/compiler_status$exe_suffix"
+
+#
+# "boost_build_path" can point to additional locations to find toolset files.
+#
+boost_build_path="$HOME/.boost-build"
+
+
+### NO MORE CONFIGURABLE PARTS.
+
+#
+# Some setup.
+#
+boost_dir=`basename "$boost_root"`
+if test -n "${BOOST_BUILD_PATH}" ; then
+    BOOST_BUILD_PATH="$boost_build_path:$BOOST_BUILD_PATH"
+else
+    BOOST_BUILD_PATH="$boost_build_path"
+fi
+export BOOST_BUILD_PATH
+
+#
+# STEP 0:
+#
+# Get the source code:
+#
+if test ! -d "$boost_root" ; then
+    mkdir -p "$boost_root"
+    if test $? -ne 0 ; then
+        echo "creation of $boost_root directory failed."
+        exit 256
+    fi
+fi
+if test $cvs_update = yes ; then
+    echo fetching Boost:
+    echo "/1 :pserver:anonymous@cvs.sourceforge.net:2401/cvsroot/boost A" >> "$HOME/.cvspass"
+    cat "$HOME/.cvspass" | sort | uniq > "$HOME/.cvspass"
+    cd `dirname "$boost_root"`
+    if test -f boost/CVS/Root ; then
+        cvs -z3 -d `cat "$boost_dir/CVS/Root"` co -d "$boost_dir" boost
+    else
+        cvs -z3 -d :pserver:anonymous@cvs.sourceforge.net:2401/cvsroot/boost co -d "$boost_dir" boost
+    fi
+fi
+
+#
+# STEP 1:
+# rebuild bjam if required:
+#
+echo building bjam:
+cd "$boost_root/tools/jam/src" && \
+LOCATE_TARGET=bin sh ./build.sh
+if test $? != 0 ; then
+    echo "bjam build failed."
+    exit 256
+fi
+
+#
+# STEP 2:
+# rebuild the regression test helper programs if required:
+#
+echo building regression test helper programs:
+cd "$boost_root/tools/regression/build" && \
+"$bjam" $toolset release
+if test $? != 0 ; then
+    echo "helper program build failed."
+    exit 256
+fi
+
+#
+# STEP 5:
+# repeat steps 3 and 4 for each additional toolset:
+#
+for tool in $test_tools ; do
+
+#
+# STEP 3:
+# run the regression tests:
+#
+echo running the $tool regression tests:
+cd "$boost_root/$test_dir"
+"$bjam" $tool --dump-tests 2>&1 | tee regress.log
+
+#
+# STEP 4:
+# post process the results:
+#
+echo processing the regression test results for $tool:
+cat regress.log | "$process_jam_log" --v2
+if test $? != 0 ; then
+    echo "Failed regression log post processing."
+    exit 256
+fi
+
+done
+
+#
+# STEP 6:
+# create the html table:
+#
+uname=`uname`
+echo generating html tables:
+"$compiler_status" --v2  --comment "$comment_path" "$boost_root" cs-$uname.html cs-$uname-links.html
+if test $? != 0 ; then
+    echo "Failed HTML result table generation."
+    exit 256
+fi
+
+echo "done!"
+
+
+

+ 21 - 0
regression/test/Jamfile.v2

@@ -0,0 +1,21 @@
+test-suite testlib :
+    [ compile-fail compile-fail~fail.cpp ]
+    [ compile-fail compile-fail~pass.cpp ]
+    [ compile compile~fail.cpp  ]
+    [ compile compile~pass.cpp  ]
+    [ compile compile~warn.cpp  ]
+    [ link link~fail.cpp ]
+    [ link link~pass.cpp ]
+    [ link-fail link-fail~fail.cpp ]
+    [ link-fail link-fail~pass.cpp ]
+    [ run-fail run-fail~compile-fail.cpp ]
+    [ run-fail run-fail~fail-warn.cpp ]
+    [ run-fail run-fail~fail.cpp ]
+    [ run-fail run-fail~pass.cpp ]
+    [ run run~fail.cpp ]
+    [ run run~note.cpp ]
+    [ run run~pass.cpp ]
+    [ run run~warn-note.cpp ]
+    [ run run~warn.cpp ]
+
+    ;

+ 10 - 0
regression/test/compile-fail~fail.cpp

@@ -0,0 +1,10 @@
+//  (C) Copyright Beman Dawes 2003.  Distributed under the Boost
+//  Software License, Version 1.0. (See accompanying file
+//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+
+//  Test naming convention: the portion of the name before the tilde ("~")
+//  identifies the bjam test type. The portion after the tilde
+//  identifies the correct result to be reported by compiler_status.
+
+int main() { return 0; }
+

+ 9 - 0
regression/test/compile-fail~pass.cpp

@@ -0,0 +1,9 @@
+//  (C) Copyright Beman Dawes 2003.  Distributed under the Boost
+//  Software License, Version 1.0. (See accompanying file
+//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+
+//  Test naming convention: the portion of the name before the tilde ("~")
+//  identifies the bjam test type. The portion after the tilde
+//  identifies the correct result to be reported by compiler_status.
+
+#error example of a compile failure

+ 9 - 0
regression/test/compile~fail.cpp

@@ -0,0 +1,9 @@
+//  (C) Copyright Beman Dawes 2003.  Distributed under the Boost
+//  Software License, Version 1.0. (See accompanying file
+//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+
+//  Test naming convention: the portion of the name before the tilde ("~")
+//  identifies the bjam test type. The portion after the tilde
+//  identifies the correct result to be reported by compiler_status.
+
+#error example of a compile failure

+ 9 - 0
regression/test/compile~pass.cpp

@@ -0,0 +1,9 @@
+//  (C) Copyright Beman Dawes 2003.  Distributed under the Boost
+//  Software License, Version 1.0. (See accompanying file
+//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+
+//  Test naming convention: the portion of the name before the tilde ("~")
+//  identifies the bjam test type. The portion after the tilde
+//  identifies the correct result to be reported by compiler_status.
+
+int main() { return 0; }

+ 18 - 0
regression/test/compile~warn.cpp

@@ -0,0 +1,18 @@
+//  (C) Copyright Beman Dawes 2003.  Distributed under the Boost
+//  Software License, Version 1.0. (See accompanying file
+//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+
+//  Test naming convention: the portion of the name before the tilde ("~")
+//  identifies the bjam test type. The portion after the tilde
+//  identifies the correct result to be reported by compiler_status.
+
+// provoke one or more compiler warnings
+
+int main(int argc, char * argv[] )
+{
+  short s;
+  unsigned long ul;
+  s = s & ul; // warning from many compilers
+  if ( s == ul ) {} // warning from GCC
+  return 0;
+}

+ 9 - 0
regression/test/run-fail~compile-fail.cpp

@@ -0,0 +1,9 @@
+//  (C) Copyright Beman Dawes 2003.  Distributed under the Boost
+//  Software License, Version 1.0. (See accompanying file
+//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+
+//  Test naming convention: the portion of the name before the tilde ("~")
+//  identifies the bjam test type. The portion after the tilde
+//  identifies the correct result to be reported by compiler_status.
+
+#error example of a compile failure

+ 16 - 0
regression/test/run-fail~fail-warn.cpp

@@ -0,0 +1,16 @@
+//  (C) Copyright Beman Dawes 2003.  Distributed under the Boost
+//  Software License, Version 1.0. (See accompanying file
+//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+
+//  Test naming convention: the portion of the name before the tilde ("~")
+//  identifies the bjam test type. The portion after the tilde
+//  identifies the correct result to be reported by compiler_status.
+
+int main()
+{
+  short s;
+  unsigned long ul;
+  s = s & ul; // warning from many compilers
+  if ( s == ul ) {} // warning from GCC
+  return 0;
+}

+ 12 - 0
regression/test/run-fail~fail.cpp

@@ -0,0 +1,12 @@
+//  (C) Copyright Beman Dawes 2003.  Distributed under the Boost
+//  Software License, Version 1.0. (See accompanying file
+//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+
+//  Test naming convention: the portion of the name before the tilde ("~")
+//  identifies the bjam test type. The portion after the tilde
+//  identifies the correct result to be reported by compiler_status.
+
+int main()
+{
+  return 0;
+}

+ 15 - 0
regression/test/run-fail~pass.cpp

@@ -0,0 +1,15 @@
+//  (C) Copyright Beman Dawes 2003.  Distributed under the Boost
+//  Software License, Version 1.0. (See accompanying file
+//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+
+//  Test naming convention: the portion of the name before the tilde ("~")
+//  identifies the bjam test type. The portion after the tilde
+//  identifies the correct result to be reported by compiler_status.
+
+#include <iostream>
+
+int main()
+{
+  std::cout << "example of output from a run-time failure\n";
+  return 1;
+}

+ 20 - 0
regression/test/run-fail~warn.cpp

@@ -0,0 +1,20 @@
+//  (C) Copyright Beman Dawes 2003.  Distributed under the Boost
+//  Software License, Version 1.0. (See accompanying file
+//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+
+//  Test naming convention: the portion of the name before the tilde ("~")
+//  identifies the bjam test type. The portion after the tilde
+//  identifies the correct result to be reported by compiler_status.
+
+#include <iostream>
+
+int main()
+{
+  short s;
+  unsigned long ul;
+  s = s & ul; // warning from many compilers
+  if ( s == ul ) {} // warning from GCC
+
+  std::cout << "example of output from a run-time failure\n";
+  return 1;
+}

+ 9 - 0
regression/test/run~compile-fail.cpp

@@ -0,0 +1,9 @@
+//  (C) Copyright Beman Dawes 2003.  Distributed under the Boost
+//  Software License, Version 1.0. (See accompanying file
+//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+
+//  Test naming convention: the portion of the name before the tilde ("~")
+//  identifies the bjam test type. The portion after the tilde
+//  identifies the correct result to be reported by compiler_status.
+
+#error example of a compile failure

+ 17 - 0
regression/test/run~fail-note.cpp

@@ -0,0 +1,17 @@
+//  (C) Copyright Beman Dawes 2003.  Distributed under the Boost
+//  Software License, Version 1.0. (See accompanying file
+//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+
+//  Test naming convention: the portion of the name before the tilde ("~")
+//  identifies the bjam test type. The portion after the tilde
+//  identifies the correct result to be reported by compiler_status.
+
+#include <iostream>
+
+int main()
+{
+  std::cout << "example of output before a <note> line\n";
+  std::cout << "<note>\n";
+  std::cout << "example of output after a <note> line\n";
+  return 1;
+}

+ 20 - 0
regression/test/run~fail-warn.cpp

@@ -0,0 +1,20 @@
+//  (C) Copyright Beman Dawes 2003.  Distributed under the Boost
+//  Software License, Version 1.0. (See accompanying file
+//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+
+//  Test naming convention: the portion of the name before the tilde ("~")
+//  identifies the bjam test type. The portion after the tilde
+//  identifies the correct result to be reported by compiler_status.
+
+#include <iostream>
+
+int main()
+{
+  short s;
+  unsigned long ul;
+  s = s & ul; // warning from many compilers
+  if ( s == ul ) {} // warning from GCC
+
+  std::cout << "example of output from a run-time failure\n";
+  return 1;
+}

+ 14 - 0
regression/test/run~fail.cpp

@@ -0,0 +1,14 @@
+//  (C) Copyright Beman Dawes 2003.  Distributed under the Boost
+//  Software License, Version 1.0. (See accompanying file
+//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+
+//  Test naming convention: the portion of the name before the tilde ("~")
+//  identifies the bjam test type. The portion after the tilde
+//  identifies the correct result to be reported by compiler_status.
+
+#include <iostream>
+
+int main()
+{
+  return 1;
+}

+ 17 - 0
regression/test/run~note.cpp

@@ -0,0 +1,17 @@
+//  (C) Copyright Beman Dawes 2003.  Distributed under the Boost
+//  Software License, Version 1.0. (See accompanying file
+//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+
+//  Test naming convention: the portion of the name before the tilde ("~")
+//  identifies the bjam test type. The portion after the tilde
+//  identifies the correct result to be reported by compiler_status.
+
+#include <iostream>
+
+int main()
+{
+  std::cout << "example of output before a <note> line\n";
+  std::cout << "<note>\n";
+  std::cout << "example of output after a <note> line\n";
+  return 0;
+}

+ 12 - 0
regression/test/run~pass.cpp

@@ -0,0 +1,12 @@
+//  (C) Copyright Beman Dawes 2003.  Distributed under the Boost
+//  Software License, Version 1.0. (See accompanying file
+//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+
+//  Test naming convention: the portion of the name before the tilde ("~")
+//  identifies the bjam test type. The portion after the tilde
+//  identifies the correct result to be reported by compiler_status.
+
+int main()
+{
+  return 0;
+}

+ 24 - 0
regression/test/run~warn-note.cpp

@@ -0,0 +1,24 @@
+//  (C) Copyright Beman Dawes 2003.  Distributed under the Boost
+//  Software License, Version 1.0. (See accompanying file
+//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+
+//  Test naming convention: the portion of the name before the tilde ("~")
+//  identifies the bjam test type. The portion after the tilde
+//  identifies the correct result to be reported by compiler_status.
+
+#include <iostream>
+
+int main()
+{
+  std::cout << "example of output before a <note> line\n";
+  std::cout << "<note>\n";
+  std::cout << "example of output after a <note> line\n";
+
+  // provoke a compiler warning to make sure <note> takes priority over
+  // a warning, but neither is lost from status reporting links HTML.
+  short s;
+  unsigned long ul;
+  s = s & ul; // warning from many compilers
+  if ( s == ul ) {} // warning from GCC
+  return 0;
+}

+ 18 - 0
regression/test/run~warn.cpp

@@ -0,0 +1,18 @@
+//  (C) Copyright Beman Dawes 2003.  Distributed under the Boost
+//  Software License, Version 1.0. (See accompanying file
+//  LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+
+//  Test naming convention: the portion of the name before the tilde ("~")
+//  identifies the bjam test type. The portion after the tilde
+//  identifies the correct result to be reported by compiler_status.
+
+// provoke one or more compiler warnings
+
+int main(int argc, char * argv[] )
+{
+  short s;
+  unsigned long ul;
+  s = s & ul; // warning from many compilers
+  if ( s == ul ) {} // warning from GCC
+  return 0;
+}

+ 11 - 0
regression/test/test-boost-build/ignored_rc/ignored_rc.jam

@@ -0,0 +1,11 @@
+rule failure
+   {
+   }
+
+actions failure
+   {
+   dir _
+   echo a
+   }
+
+failure f ;

+ 12 - 0
regression/test/test-boost-build/ignored_rc/recognized_rc.jam

@@ -0,0 +1,12 @@
+rule failure
+   {
+   }
+
+actions failure
+   {
+   dir _
+   if errorlevel 1 exit %errorlevel%
+   echo a
+   }
+
+failure f ;

+ 9 - 0
regression/test/test-boost-build/missing_dependencies/Jamfile.v2

@@ -0,0 +1,9 @@
+project 
+    : requirements
+      <library>/boost/filesystem//boost_filesystem
+      <define>BOOST_ALL_NO_LIB
+    ;
+
+   test-suite "missing_dependencies" :
+       [ run test.cpp  lib//<link>static ]                  
+       ;

+ 7 - 0
regression/test/test-boost-build/missing_dependencies/lib/Jamfile.v2

@@ -0,0 +1,7 @@
+SOURCES =
+    lib ;
+
+lib lib
+    :
+    $(SOURCES).cpp
+    ;

+ 1 - 0
regression/test/test-boost-build/missing_dependencies/lib/lib.cpp

@@ -0,0 +1 @@
+#error

+ 1 - 0
regression/test/test-boost-build/missing_dependencies/test.cpp

@@ -0,0 +1 @@
+int main() { return 0; }

+ 1 - 0
regression/test/test-cases/Huber2629/.cvsignore

@@ -0,0 +1 @@
+actual

+ 36 - 0
regression/test/test-cases/Huber2629/bjam.log

@@ -0,0 +1,36 @@
+boost-test(RUN) "statechart/DllTestNative" : "libs/statechart/test/TuTestMain.cpp"
+boost-test(RUN) "statechart/DllTestNormal" : "libs/statechart/test/TuTestMain.cpp"
+
+compile-c-c++ ..\..\..\bin.v2\libs\statechart\test\DllTestNormal.test\msvc-7.1\debug\threading-multi\TuTestMain.obj
+TuTestMain.cpp
+c:\Users\Misha\Stuff\boost\HEAD\boost\libs\statechart\test\TuTest.hpp(36) : warning C4275: non dll-interface class 'boost::statechart::event_base' used as base for dll-interface class 'boost::statechart::detail::rtti_policy::rtti_derived_type<MostDerived,Base>'
+        with
+        [
+            MostDerived=EvX,
+            Base=boost::statechart::event_base
+        ]
+        ..\..\..\boost\statechart\event_base.hpp(49) : see declaration of 'boost::statechart::event_base'
+compile-c-c++ ..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLibTuTest.obj
+TuTest.cpp
+c:\Users\Misha\Stuff\boost\HEAD\boost\libs\statechart\test\TuTest.hpp(36) : warning C4275: non dll-interface class 'boost::statechart::event_base' used as base for dll-interface class 'boost::statechart::detail::rtti_policy::rtti_derived_type<MostDerived,Base>'
+        with
+        [
+            MostDerived=EvX,
+            Base=boost::statechart::event_base
+        ]
+        ..\..\..\boost\statechart\event_base.hpp(49) : see declaration of 'boost::statechart::event_base'
+msvc.link.dll ..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.dll ..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.lib
+   Creating library ..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.lib and object ..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.exp
+
+        call "C:\Program Files\Microsoft Visual Studio .NET 2003\Vc7\bin\vcvars32.bat"  >nul
+link /NOLOGO /INCREMENTAL:NO /DLL /DEBUG /subsystem:console /out:"..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.dll" /IMPLIB:"..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.lib"    @"..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.dll.rsp"
+        if %errorlevel% 1 exit %errorlevel%
+        if exist "..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.dll.manifest" (
+            mt -nologo -manifest "..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.dll.manifest" "-outputresource:..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.dll;2"
+        )
+    
+...failed msvc.link.dll ..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.dll ..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.lib...
+...removing ..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.dll
+...removing ..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.lib
+...skipped <p..\..\..\bin.v2\libs\statechart\test\DllTestNormal.test\msvc-7.1\debug\threading-multi>DllTestNormal.exe for lack of <p..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi>DllTestNormalLib-vc71-mt-gd-1_35.lib...
+...skipped <p..\..\..\bin.v2\libs\statechart\test\DllTestNormal.test\msvc-7.1\debug\threading-multi>DllTestNormal.run for lack of <p..\..\..\bin.v2\libs\statechart\test\DllTestNormal.test\msvc-7.1\debug\threading-multi>DllTestNormal.exe...

+ 27 - 0
regression/test/test-cases/Huber2629/expected/results.xml

@@ -0,0 +1,27 @@
+<test-log library="statechart" test-name="DllTestNormal" test-type="run" test-program="libs/statechart/test/TuTestMain.cpp" target-directory="bin.v2/libs/statechart/test/DllTestNormal.test/msvc-7.1/debug/threading-multi" toolset="msvc-7.1" show-run-output="false">
+<lib result="fail" timestamp="">../../bin.v2/libs/statechart/test/msvc-7.1/debug/threading-multi</lib>
+</test-log>
+
+<test-log library="statechart" test-name="" test-type="" test-program="" target-directory="bin.v2/libs/statechart/test/msvc-7.1/debug/threading-multi" toolset="" show-run-output="true">
+<compile result="succeed" timestamp="">
+TuTest.cpp
+c:\Users\Misha\Stuff\boost\HEAD\boost\libs\statechart\test\TuTest.hpp(36) : warning C4275: non dll-interface class 'boost::statechart::event_base' used as base for dll-interface class 'boost::statechart::detail::rtti_policy::rtti_derived_type&lt;MostDerived,Base&gt;'
+        with
+        [
+            MostDerived=EvX,
+            Base=boost::statechart::event_base
+        ]
+        ..\..\..\boost\statechart\event_base.hpp(49) : see declaration of 'boost::statechart::event_base'
+</compile>
+<link result="fail" timestamp="">
+   Creating library ..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.lib and object ..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.exp
+
+        call "C:\Program Files\Microsoft Visual Studio .NET 2003\Vc7\bin\vcvars32.bat"  &gt;nul
+link /NOLOGO /INCREMENTAL:NO /DLL /DEBUG /subsystem:console /out:"..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.dll" /IMPLIB:"..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.lib"    @"..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.dll.rsp"
+        if %errorlevel% 1 exit %errorlevel%
+        if exist "..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.dll.manifest" (
+            mt -nologo -manifest "..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.dll.manifest" "-outputresource:..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.dll;2"
+        )
+    
+</link>
+</test-log>

+ 1 - 0
regression/test/test-cases/general/.cvsignore

@@ -0,0 +1 @@
+actual

+ 325 - 0
regression/test/test-cases/general/bjam.log

@@ -0,0 +1,325 @@
+locate-root "..\..\..\bin.v2"
+C:\Users\Misha\Stuff\boost\HEAD\boost\tools\regression\test>C:\Users\Misha\Stuff\boost\HEAD\bin\..\boost\tools\jam\src\bin.ntx86\bjam.exe --dump-tests --v2 msvc-7.1 "-sBOOST_BUILD_PATH=C:\Users\Misha\Stuff\boost\HEAD\bin\.." "-sBOOST_ROOT="C:\Users\Misha\Stuff\boost\HEAD\bin\..\boost"  
+warning: Python location is not configured
+warning: the Boost.Python library won't be built
+Building Boost.Regex with the optional Unicode/ICU support disabled.
+Please refer to the Boost.Regex documentation for more information
+(and if you don't know what ICU is then you probably don't need it).
+boost-test(RUN) "testlib/run~warn" : "tools/regression/test/run~warn.cpp"
+boost-test(RUN) "testlib/run~warn-note" : "tools/regression/test/run~warn-note.cpp"
+boost-test(RUN) "testlib/run~pass" : "tools/regression/test/run~pass.cpp"
+boost-test(RUN) "testlib/run~note" : "tools/regression/test/run~note.cpp"
+boost-test(RUN) "testlib/run~fail" : "tools/regression/test/run~fail.cpp"
+boost-test(RUN_FAIL) "testlib/run-fail~pass" : "tools/regression/test/run-fail~pass.cpp"
+boost-test(RUN_FAIL) "testlib/run-fail~fail" : "tools/regression/test/run-fail~fail.cpp"
+boost-test(RUN_FAIL) "testlib/run-fail~fail-warn" : "tools/regression/test/run-fail~fail-warn.cpp"
+boost-test(RUN_FAIL) "testlib/run-fail~compile-fail" : "tools/regression/test/run-fail~compile-fail.cpp"
+boost-test(LINK_FAIL) "testlib/link-fail~pass" : "tools/regression/test/link-fail~pass.cpp"
+boost-test(LINK_FAIL) "testlib/link-fail~fail" : "tools/regression/test/link-fail~fail.cpp"
+boost-test(LINK) "testlib/link~pass" : "tools/regression/test/link~pass.cpp"
+boost-test(LINK) "testlib/link~fail" : "tools/regression/test/link~fail.cpp"
+boost-test(COMPILE) "testlib/compile~warn" : "tools/regression/test/compile~warn.cpp"
+boost-test(COMPILE) "testlib/compile~pass" : "tools/regression/test/compile~pass.cpp"
+boost-test(COMPILE) "testlib/compile~fail" : "tools/regression/test/compile~fail.cpp"
+boost-test(COMPILE_FAIL) "testlib/compile-fail~pass" : "tools/regression/test/compile-fail~pass.cpp"
+boost-test(COMPILE_FAIL) "testlib/compile-fail~fail" : "tools/regression/test/compile-fail~fail.cpp"
+...found 210 targets...
+...updating 157 targets...
+MkDir1 ..\..\..\bin.v2\tools\regression\test
+MkDir1 ..\..\..\bin.v2\tools\regression\test\compile-fail~fail.test
+MkDir1 ..\..\..\bin.v2\tools\regression\test\compile-fail~fail.test\msvc-7.1
+MkDir1 ..\..\..\bin.v2\tools\regression\test\compile-fail~fail.test\msvc-7.1\debug
+MkDir1 ..\..\..\bin.v2\tools\regression\test\compile-fail~fail.test\msvc-7.1\debug\link-static
+MkDir1 ..\..\..\bin.v2\tools\regression\test\compile-fail~fail.test\msvc-7.1\debug\link-static\threading-multi
+compile-c-c++ ..\..\..\bin.v2\tools\regression\test\compile-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\compile-fail~fail.obj
+compile-fail~fail.cpp
+
+    call "C:\Program Files\Microsoft Visual Studio .NET 2003\Vc7\bin\vcvars32.bat"  >nul
+cl /Zm800 -nologo @"..\..\..\bin.v2\tools\regression\test\compile-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\compile-fail~fail.obj.rsp"
+
+...failed compile-c-c++ ..\..\..\bin.v2\tools\regression\test\compile-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\compile-fail~fail.obj...
+...removing ..\..\..\bin.v2\tools\regression\test\compile-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\compile-fail~fail.obj
+MkDir1 ..\..\..\bin.v2\tools\regression\test\compile-fail~pass.test
+MkDir1 ..\..\..\bin.v2\tools\regression\test\compile-fail~pass.test\msvc-7.1
+MkDir1 ..\..\..\bin.v2\tools\regression\test\compile-fail~pass.test\msvc-7.1\debug
+MkDir1 ..\..\..\bin.v2\tools\regression\test\compile-fail~pass.test\msvc-7.1\debug\link-static
+MkDir1 ..\..\..\bin.v2\tools\regression\test\compile-fail~pass.test\msvc-7.1\debug\link-static\threading-multi
+compile-c-c++ ..\..\..\bin.v2\tools\regression\test\compile-fail~pass.test\msvc-7.1\debug\link-static\threading-multi\compile-fail~pass.obj
+compile-fail~pass.cpp
+compile-fail~pass.cpp(9) : fatal error C1189: #error :  example of a compile failure
+(failed-as-expected) ..\..\..\bin.v2\tools\regression\test\compile-fail~pass.test\msvc-7.1\debug\link-static\threading-multi\compile-fail~pass.obj
+**passed** ..\..\..\bin.v2\tools\regression\test\compile-fail~pass.test\msvc-7.1\debug\link-static\threading-multi\compile-fail~pass.test
+MkDir1 ..\..\..\bin.v2\tools\regression\test\compile~fail.test
+MkDir1 ..\..\..\bin.v2\tools\regression\test\compile~fail.test\msvc-7.1
+MkDir1 ..\..\..\bin.v2\tools\regression\test\compile~fail.test\msvc-7.1\debug
+MkDir1 ..\..\..\bin.v2\tools\regression\test\compile~fail.test\msvc-7.1\debug\link-static
+MkDir1 ..\..\..\bin.v2\tools\regression\test\compile~fail.test\msvc-7.1\debug\link-static\threading-multi
+compile-c-c++ ..\..\..\bin.v2\tools\regression\test\compile~fail.test\msvc-7.1\debug\link-static\threading-multi\compile~fail.obj
+compile~fail.cpp
+compile~fail.cpp(9) : fatal error C1189: #error :  example of a compile failure
+
+    call "C:\Program Files\Microsoft Visual Studio .NET 2003\Vc7\bin\vcvars32.bat"  >nul
+cl /Zm800 -nologo @"..\..\..\bin.v2\tools\regression\test\compile~fail.test\msvc-7.1\debug\link-static\threading-multi\compile~fail.obj.rsp"
+
+...failed compile-c-c++ ..\..\..\bin.v2\tools\regression\test\compile~fail.test\msvc-7.1\debug\link-static\threading-multi\compile~fail.obj...
+MkDir1 ..\..\..\bin.v2\tools\regression\test\compile~pass.test
+MkDir1 ..\..\..\bin.v2\tools\regression\test\compile~pass.test\msvc-7.1
+MkDir1 ..\..\..\bin.v2\tools\regression\test\compile~pass.test\msvc-7.1\debug
+MkDir1 ..\..\..\bin.v2\tools\regression\test\compile~pass.test\msvc-7.1\debug\link-static
+MkDir1 ..\..\..\bin.v2\tools\regression\test\compile~pass.test\msvc-7.1\debug\link-static\threading-multi
+compile-c-c++ ..\..\..\bin.v2\tools\regression\test\compile~pass.test\msvc-7.1\debug\link-static\threading-multi\compile~pass.obj
+compile~pass.cpp
+**passed** ..\..\..\bin.v2\tools\regression\test\compile~pass.test\msvc-7.1\debug\link-static\threading-multi\compile~pass.test
+MkDir1 ..\..\..\bin.v2\tools\regression\test\compile~warn.test
+MkDir1 ..\..\..\bin.v2\tools\regression\test\compile~warn.test\msvc-7.1
+MkDir1 ..\..\..\bin.v2\tools\regression\test\compile~warn.test\msvc-7.1\debug
+MkDir1 ..\..\..\bin.v2\tools\regression\test\compile~warn.test\msvc-7.1\debug\link-static
+MkDir1 ..\..\..\bin.v2\tools\regression\test\compile~warn.test\msvc-7.1\debug\link-static\threading-multi
+compile-c-c++ ..\..\..\bin.v2\tools\regression\test\compile~warn.test\msvc-7.1\debug\link-static\threading-multi\compile~warn.obj
+compile~warn.cpp
+compile~warn.cpp(15) : warning C4244: '=' : conversion from 'unsigned long' to 'short', possible loss of data
+c:\users\misha\stuff\boost\head\boost\tools\regression\test\compile~warn.cpp(15) : warning C4700: local variable 'ul' used without having been initialized
+**passed** ..\..\..\bin.v2\tools\regression\test\compile~warn.test\msvc-7.1\debug\link-static\threading-multi\compile~warn.test
+MkDir1 ..\..\..\bin.v2\tools\regression\test\link~fail.test
+MkDir1 ..\..\..\bin.v2\tools\regression\test\link~fail.test\msvc-7.1
+MkDir1 ..\..\..\bin.v2\tools\regression\test\link~fail.test\msvc-7.1\debug
+MkDir1 ..\..\..\bin.v2\tools\regression\test\link~fail.test\msvc-7.1\debug\link-static
+MkDir1 ..\..\..\bin.v2\tools\regression\test\link~fail.test\msvc-7.1\debug\link-static\threading-multi
+compile-c-c++ ..\..\..\bin.v2\tools\regression\test\link~fail.test\msvc-7.1\debug\link-static\threading-multi\link~fail.obj
+link~fail.cpp
+msvc.link ..\..\..\bin.v2\tools\regression\test\link~fail.test\msvc-7.1\debug\link-static\threading-multi\link~fail.exe
+link~fail.obj : error LNK2019: unresolved external symbol "int __cdecl f(void)" (?f@@YAHXZ) referenced in function _main
+..\..\..\bin.v2\tools\regression\test\link~fail.test\msvc-7.1\debug\link-static\threading-multi\link~fail.exe : fatal error LNK1120: 1 unresolved externals
+
+        call "C:\Program Files\Microsoft Visual Studio .NET 2003\Vc7\bin\vcvars32.bat"  >nul
+link /NOLOGO /INCREMENTAL:NO /DEBUG /subsystem:console /out:"..\..\..\bin.v2\tools\regression\test\link~fail.test\msvc-7.1\debug\link-static\threading-multi\link~fail.exe"   @"..\..\..\bin.v2\tools\regression\test\link~fail.test\msvc-7.1\debug\link-static\threading-multi\link~fail.exe.rsp"
+        if errorlevel 1 exit %errorlevel%
+        if exist "..\..\..\bin.v2\tools\regression\test\link~fail.test\msvc-7.1\debug\link-static\threading-multi\link~fail.exe.manifest" (
+            mt -nologo -manifest "..\..\..\bin.v2\tools\regression\test\link~fail.test\msvc-7.1\debug\link-static\threading-multi\link~fail.exe.manifest" "-outputresource:..\..\..\bin.v2\tools\regression\test\link~fail.test\msvc-7.1\debug\link-static\threading-multi\link~fail.exe;1"
+        )
+    
+...failed msvc.link ..\..\..\bin.v2\tools\regression\test\link~fail.test\msvc-7.1\debug\link-static\threading-multi\link~fail.exe...
+MkDir1 ..\..\..\bin.v2\tools\regression\test\link~pass.test
+MkDir1 ..\..\..\bin.v2\tools\regression\test\link~pass.test\msvc-7.1
+MkDir1 ..\..\..\bin.v2\tools\regression\test\link~pass.test\msvc-7.1\debug
+MkDir1 ..\..\..\bin.v2\tools\regression\test\link~pass.test\msvc-7.1\debug\link-static
+MkDir1 ..\..\..\bin.v2\tools\regression\test\link~pass.test\msvc-7.1\debug\link-static\threading-multi
+compile-c-c++ ..\..\..\bin.v2\tools\regression\test\link~pass.test\msvc-7.1\debug\link-static\threading-multi\link~pass.obj
+link~pass.cpp
+msvc.link ..\..\..\bin.v2\tools\regression\test\link~pass.test\msvc-7.1\debug\link-static\threading-multi\link~pass.exe
+**passed** ..\..\..\bin.v2\tools\regression\test\link~pass.test\msvc-7.1\debug\link-static\threading-multi\link~pass.test
+MkDir1 ..\..\..\bin.v2\tools\regression\test\link-fail~fail.test
+MkDir1 ..\..\..\bin.v2\tools\regression\test\link-fail~fail.test\msvc-7.1
+MkDir1 ..\..\..\bin.v2\tools\regression\test\link-fail~fail.test\msvc-7.1\debug
+MkDir1 ..\..\..\bin.v2\tools\regression\test\link-fail~fail.test\msvc-7.1\debug\link-static
+MkDir1 ..\..\..\bin.v2\tools\regression\test\link-fail~fail.test\msvc-7.1\debug\link-static\threading-multi
+compile-c-c++ ..\..\..\bin.v2\tools\regression\test\link-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\link-fail~fail.obj
+link-fail~fail.cpp
+msvc.link ..\..\..\bin.v2\tools\regression\test\link-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\link-fail~fail.exe
+
+        call "C:\Program Files\Microsoft Visual Studio .NET 2003\Vc7\bin\vcvars32.bat"  >nul
+link /NOLOGO /INCREMENTAL:NO /DEBUG /subsystem:console /out:"..\..\..\bin.v2\tools\regression\test\link-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\link-fail~fail.exe"   @"..\..\..\bin.v2\tools\regression\test\link-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\link-fail~fail.exe.rsp"
+        if errorlevel 1 exit %errorlevel%
+        if exist "..\..\..\bin.v2\tools\regression\test\link-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\link-fail~fail.exe.manifest" (
+            mt -nologo -manifest "..\..\..\bin.v2\tools\regression\test\link-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\link-fail~fail.exe.manifest" "-outputresource:..\..\..\bin.v2\tools\regression\test\link-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\link-fail~fail.exe;1"
+        )
+    
+...failed msvc.link ..\..\..\bin.v2\tools\regression\test\link-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\link-fail~fail.exe...
+...removing ..\..\..\bin.v2\tools\regression\test\link-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\link-fail~fail.exe
+MkDir1 ..\..\..\bin.v2\tools\regression\test\link-fail~pass.test
+MkDir1 ..\..\..\bin.v2\tools\regression\test\link-fail~pass.test\msvc-7.1
+MkDir1 ..\..\..\bin.v2\tools\regression\test\link-fail~pass.test\msvc-7.1\debug
+MkDir1 ..\..\..\bin.v2\tools\regression\test\link-fail~pass.test\msvc-7.1\debug\link-static
+MkDir1 ..\..\..\bin.v2\tools\regression\test\link-fail~pass.test\msvc-7.1\debug\link-static\threading-multi
+compile-c-c++ ..\..\..\bin.v2\tools\regression\test\link-fail~pass.test\msvc-7.1\debug\link-static\threading-multi\link-fail~pass.obj
+link-fail~pass.cpp
+msvc.link ..\..\..\bin.v2\tools\regression\test\link-fail~pass.test\msvc-7.1\debug\link-static\threading-multi\link-fail~pass.exe
+link-fail~pass.obj : error LNK2019: unresolved external symbol "int __cdecl f(void)" (?f@@YAHXZ) referenced in function _main
+..\..\..\bin.v2\tools\regression\test\link-fail~pass.test\msvc-7.1\debug\link-static\threading-multi\link-fail~pass.exe : fatal error LNK1120: 1 unresolved externals
+(failed-as-expected) ..\..\..\bin.v2\tools\regression\test\link-fail~pass.test\msvc-7.1\debug\link-static\threading-multi\link-fail~pass.exe
+**passed** ..\..\..\bin.v2\tools\regression\test\link-fail~pass.test\msvc-7.1\debug\link-static\threading-multi\link-fail~pass.test
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run-fail~compile-fail.test
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run-fail~compile-fail.test\msvc-7.1
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run-fail~compile-fail.test\msvc-7.1\debug
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run-fail~compile-fail.test\msvc-7.1\debug\link-static
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run-fail~compile-fail.test\msvc-7.1\debug\link-static\threading-multi
+compile-c-c++ ..\..\..\bin.v2\tools\regression\test\run-fail~compile-fail.test\msvc-7.1\debug\link-static\threading-multi\run-fail~compile-fail.obj
+run-fail~compile-fail.cpp
+run-fail~compile-fail.cpp(9) : fatal error C1189: #error :  example of a compile failure
+
+    call "C:\Program Files\Microsoft Visual Studio .NET 2003\Vc7\bin\vcvars32.bat"  >nul
+cl /Zm800 -nologo @"..\..\..\bin.v2\tools\regression\test\run-fail~compile-fail.test\msvc-7.1\debug\link-static\threading-multi\run-fail~compile-fail.obj.rsp"
+
+...failed compile-c-c++ ..\..\..\bin.v2\tools\regression\test\run-fail~compile-fail.test\msvc-7.1\debug\link-static\threading-multi\run-fail~compile-fail.obj...
+...skipped <p..\..\..\bin.v2\tools\regression\test\run-fail~compile-fail.test\msvc-7.1\debug\link-static\threading-multi>run-fail~compile-fail.exe for lack of <p..\..\..\bin.v2\tools\regression\test\run-fail~compile-fail.test\msvc-7.1\debug\link-static\threading-multi>run-fail~compile-fail.obj...
+...skipped <p..\..\..\bin.v2\tools\regression\test\run-fail~compile-fail.test\msvc-7.1\debug\link-static\threading-multi>run-fail~compile-fail.run for lack of <p..\..\..\bin.v2\tools\regression\test\run-fail~compile-fail.test\msvc-7.1\debug\link-static\threading-multi>run-fail~compile-fail.exe...
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run-fail~fail-warn.test
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run-fail~fail-warn.test\msvc-7.1
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run-fail~fail-warn.test\msvc-7.1\debug
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run-fail~fail-warn.test\msvc-7.1\debug\link-static
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run-fail~fail-warn.test\msvc-7.1\debug\link-static\threading-multi
+compile-c-c++ ..\..\..\bin.v2\tools\regression\test\run-fail~fail-warn.test\msvc-7.1\debug\link-static\threading-multi\run-fail~fail-warn.obj
+run-fail~fail-warn.cpp
+run-fail~fail-warn.cpp(13) : warning C4244: '=' : conversion from 'unsigned long' to 'short', possible loss of data
+c:\users\misha\stuff\boost\head\boost\tools\regression\test\run-fail~fail-warn.cpp(13) : warning C4700: local variable 'ul' used without having been initialized
+msvc.link ..\..\..\bin.v2\tools\regression\test\run-fail~fail-warn.test\msvc-7.1\debug\link-static\threading-multi\run-fail~fail-warn.exe
+testing.capture-output ..\..\..\bin.v2\tools\regression\test\run-fail~fail-warn.test\msvc-7.1\debug\link-static\threading-multi\run-fail~fail-warn.run
+        1 file(s) copied.
+
+    
+     ..\..\..\bin.v2\tools\regression\test\run-fail~fail-warn.test\msvc-7.1\debug\link-static\threading-multi\run-fail~fail-warn.exe   > ..\..\..\bin.v2\tools\regression\test\run-fail~fail-warn.test\msvc-7.1\debug\link-static\threading-multi\run-fail~fail-warn.output 2>&1      
+    set status=%ERRORLEVEL%
+    echo. >> ..\..\..\bin.v2\tools\regression\test\run-fail~fail-warn.test\msvc-7.1\debug\link-static\threading-multi\run-fail~fail-warn.output
+    echo EXIT STATUS: %status% >> ..\..\..\bin.v2\tools\regression\test\run-fail~fail-warn.test\msvc-7.1\debug\link-static\threading-multi\run-fail~fail-warn.output
+    if %status% EQU 0 (
+        copy ..\..\..\bin.v2\tools\regression\test\run-fail~fail-warn.test\msvc-7.1\debug\link-static\threading-multi\run-fail~fail-warn.output ..\..\..\bin.v2\tools\regression\test\run-fail~fail-warn.test\msvc-7.1\debug\link-static\threading-multi\run-fail~fail-warn.run
+    )
+    set verbose=0
+    if %status% NEQ 0 (
+        set verbose=1
+    )
+    if %verbose% EQU 1 (
+        echo ====== BEGIN OUTPUT ======
+        type ..\..\..\bin.v2\tools\regression\test\run-fail~fail-warn.test\msvc-7.1\debug\link-static\threading-multi\run-fail~fail-warn.output
+        echo ====== END OUTPUT ======
+    )    
+    exit %status%      
+
+...failed testing.capture-output ..\..\..\bin.v2\tools\regression\test\run-fail~fail-warn.test\msvc-7.1\debug\link-static\threading-multi\run-fail~fail-warn.run...
+...removing ..\..\..\bin.v2\tools\regression\test\run-fail~fail-warn.test\msvc-7.1\debug\link-static\threading-multi\run-fail~fail-warn.run
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run-fail~fail.test
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run-fail~fail.test\msvc-7.1
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run-fail~fail.test\msvc-7.1\debug
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run-fail~fail.test\msvc-7.1\debug\link-static
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run-fail~fail.test\msvc-7.1\debug\link-static\threading-multi
+compile-c-c++ ..\..\..\bin.v2\tools\regression\test\run-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\run-fail~fail.obj
+run-fail~fail.cpp
+msvc.link ..\..\..\bin.v2\tools\regression\test\run-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\run-fail~fail.exe
+testing.capture-output ..\..\..\bin.v2\tools\regression\test\run-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\run-fail~fail.run
+        1 file(s) copied.
+
+    
+     ..\..\..\bin.v2\tools\regression\test\run-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\run-fail~fail.exe   > ..\..\..\bin.v2\tools\regression\test\run-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\run-fail~fail.output 2>&1      
+    set status=%ERRORLEVEL%
+    echo. >> ..\..\..\bin.v2\tools\regression\test\run-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\run-fail~fail.output
+    echo EXIT STATUS: %status% >> ..\..\..\bin.v2\tools\regression\test\run-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\run-fail~fail.output
+    if %status% EQU 0 (
+        copy ..\..\..\bin.v2\tools\regression\test\run-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\run-fail~fail.output ..\..\..\bin.v2\tools\regression\test\run-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\run-fail~fail.run
+    )
+    set verbose=0
+    if %status% NEQ 0 (
+        set verbose=1
+    )
+    if %verbose% EQU 1 (
+        echo ====== BEGIN OUTPUT ======
+        type ..\..\..\bin.v2\tools\regression\test\run-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\run-fail~fail.output
+        echo ====== END OUTPUT ======
+    )    
+    exit %status%      
+
+...failed testing.capture-output ..\..\..\bin.v2\tools\regression\test\run-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\run-fail~fail.run...
+...removing ..\..\..\bin.v2\tools\regression\test\run-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\run-fail~fail.run
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run-fail~pass.test
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run-fail~pass.test\msvc-7.1
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run-fail~pass.test\msvc-7.1\debug
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run-fail~pass.test\msvc-7.1\debug\link-static
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run-fail~pass.test\msvc-7.1\debug\link-static\threading-multi
+compile-c-c++ ..\..\..\bin.v2\tools\regression\test\run-fail~pass.test\msvc-7.1\debug\link-static\threading-multi\run-fail~pass.obj
+run-fail~pass.cpp
+msvc.link ..\..\..\bin.v2\tools\regression\test\run-fail~pass.test\msvc-7.1\debug\link-static\threading-multi\run-fail~pass.exe
+testing.capture-output ..\..\..\bin.v2\tools\regression\test\run-fail~pass.test\msvc-7.1\debug\link-static\threading-multi\run-fail~pass.run
+====== BEGIN OUTPUT ======
+example of output from a run-time failure
+ 
+EXIT STATUS: 1 
+====== END OUTPUT ======
+
+    del /f /q "..\..\..\bin.v2\tools\regression\test\run-fail~pass.test\msvc-7.1\debug\link-static\threading-multi\run-fail~pass.exe"
+
+...failed RmTemps ..\..\..\bin.v2\tools\regression\test\run-fail~pass.test\msvc-7.1\debug\link-static\threading-multi\run-fail~pass.run...
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run~fail.test
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run~fail.test\msvc-7.1
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run~fail.test\msvc-7.1\debug
+...on 100th target...
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run~fail.test\msvc-7.1\debug\link-static
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run~fail.test\msvc-7.1\debug\link-static\threading-multi
+compile-c-c++ ..\..\..\bin.v2\tools\regression\test\run~fail.test\msvc-7.1\debug\link-static\threading-multi\run~fail.obj
+run~fail.cpp
+msvc.link ..\..\..\bin.v2\tools\regression\test\run~fail.test\msvc-7.1\debug\link-static\threading-multi\run~fail.exe
+testing.capture-output ..\..\..\bin.v2\tools\regression\test\run~fail.test\msvc-7.1\debug\link-static\threading-multi\run~fail.run
+====== BEGIN OUTPUT ======
+ 
+EXIT STATUS: 1 
+====== END OUTPUT ======
+
+    
+     ..\..\..\bin.v2\tools\regression\test\run~fail.test\msvc-7.1\debug\link-static\threading-multi\run~fail.exe   > ..\..\..\bin.v2\tools\regression\test\run~fail.test\msvc-7.1\debug\link-static\threading-multi\run~fail.output 2>&1      
+    set status=%ERRORLEVEL%
+    echo. >> ..\..\..\bin.v2\tools\regression\test\run~fail.test\msvc-7.1\debug\link-static\threading-multi\run~fail.output
+    echo EXIT STATUS: %status% >> ..\..\..\bin.v2\tools\regression\test\run~fail.test\msvc-7.1\debug\link-static\threading-multi\run~fail.output
+    if %status% EQU 0 (
+        copy ..\..\..\bin.v2\tools\regression\test\run~fail.test\msvc-7.1\debug\link-static\threading-multi\run~fail.output ..\..\..\bin.v2\tools\regression\test\run~fail.test\msvc-7.1\debug\link-static\threading-multi\run~fail.run
+    )
+    set verbose=0
+    if %status% NEQ 0 (
+        set verbose=1
+    )
+    if %verbose% EQU 1 (
+        echo ====== BEGIN OUTPUT ======
+        type ..\..\..\bin.v2\tools\regression\test\run~fail.test\msvc-7.1\debug\link-static\threading-multi\run~fail.output
+        echo ====== END OUTPUT ======
+    )    
+    exit %status%      
+
+...failed testing.capture-output ..\..\..\bin.v2\tools\regression\test\run~fail.test\msvc-7.1\debug\link-static\threading-multi\run~fail.run...
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run~note.test
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run~note.test\msvc-7.1
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run~note.test\msvc-7.1\debug
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run~note.test\msvc-7.1\debug\link-static
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run~note.test\msvc-7.1\debug\link-static\threading-multi
+compile-c-c++ ..\..\..\bin.v2\tools\regression\test\run~note.test\msvc-7.1\debug\link-static\threading-multi\run~note.obj
+run~note.cpp
+msvc.link ..\..\..\bin.v2\tools\regression\test\run~note.test\msvc-7.1\debug\link-static\threading-multi\run~note.exe
+testing.capture-output ..\..\..\bin.v2\tools\regression\test\run~note.test\msvc-7.1\debug\link-static\threading-multi\run~note.run
+        1 file(s) copied.
+**passed** ..\..\..\bin.v2\tools\regression\test\run~note.test\msvc-7.1\debug\link-static\threading-multi\run~note.test
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run~pass.test
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run~pass.test\msvc-7.1
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run~pass.test\msvc-7.1\debug
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run~pass.test\msvc-7.1\debug\link-static
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run~pass.test\msvc-7.1\debug\link-static\threading-multi
+compile-c-c++ ..\..\..\bin.v2\tools\regression\test\run~pass.test\msvc-7.1\debug\link-static\threading-multi\run~pass.obj
+run~pass.cpp
+msvc.link ..\..\..\bin.v2\tools\regression\test\run~pass.test\msvc-7.1\debug\link-static\threading-multi\run~pass.exe
+testing.capture-output ..\..\..\bin.v2\tools\regression\test\run~pass.test\msvc-7.1\debug\link-static\threading-multi\run~pass.run
+        1 file(s) copied.
+**passed** ..\..\..\bin.v2\tools\regression\test\run~pass.test\msvc-7.1\debug\link-static\threading-multi\run~pass.test
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run~warn-note.test
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run~warn-note.test\msvc-7.1
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run~warn-note.test\msvc-7.1\debug
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run~warn-note.test\msvc-7.1\debug\link-static
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run~warn-note.test\msvc-7.1\debug\link-static\threading-multi
+compile-c-c++ ..\..\..\bin.v2\tools\regression\test\run~warn-note.test\msvc-7.1\debug\link-static\threading-multi\run~warn-note.obj
+run~warn-note.cpp
+run~warn-note.cpp(21) : warning C4244: '=' : conversion from 'unsigned long' to 'short', possible loss of data
+c:\users\misha\stuff\boost\head\boost\tools\regression\test\run~warn-note.cpp(21) : warning C4700: local variable 'ul' used without having been initialized
+msvc.link ..\..\..\bin.v2\tools\regression\test\run~warn-note.test\msvc-7.1\debug\link-static\threading-multi\run~warn-note.exe
+testing.capture-output ..\..\..\bin.v2\tools\regression\test\run~warn-note.test\msvc-7.1\debug\link-static\threading-multi\run~warn-note.run
+        1 file(s) copied.
+**passed** ..\..\..\bin.v2\tools\regression\test\run~warn-note.test\msvc-7.1\debug\link-static\threading-multi\run~warn-note.test
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run~warn.test
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run~warn.test\msvc-7.1
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run~warn.test\msvc-7.1\debug
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run~warn.test\msvc-7.1\debug\link-static
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run~warn.test\msvc-7.1\debug\link-static\threading-multi
+compile-c-c++ ..\..\..\bin.v2\tools\regression\test\run~warn.test\msvc-7.1\debug\link-static\threading-multi\run~warn.obj
+run~warn.cpp
+run~warn.cpp(15) : warning C4244: '=' : conversion from 'unsigned long' to 'short', possible loss of data
+c:\users\misha\stuff\boost\head\boost\tools\regression\test\run~warn.cpp(15) : warning C4700: local variable 'ul' used without having been initialized
+msvc.link ..\..\..\bin.v2\tools\regression\test\run~warn.test\msvc-7.1\debug\link-static\threading-multi\run~warn.exe
+testing.capture-output ..\..\..\bin.v2\tools\regression\test\run~warn.test\msvc-7.1\debug\link-static\threading-multi\run~warn.run
+        1 file(s) copied.
+**passed** ..\..\..\bin.v2\tools\regression\test\run~warn.test\msvc-7.1\debug\link-static\threading-multi\run~warn.test
+...failed updating 9 targets...
+...skipped 17 targets...
+...updated 131 targets...

+ 167 - 0
regression/test/test-cases/general/expected/results.xml

@@ -0,0 +1,167 @@
+<test-log library="" test-name="compile-fail~fail" test-type="" test-program="" target-directory="tools/regression/test/compile-fail~fail.test/msvc-7.1/debug/link-static/threading-multi" toolset="msvc-7.1" show-run-output="false">
+<compile result="fail" timestamp="xxx">
+compile-fail~fail.cpp
+
+    call "C:\Program Files\Microsoft Visual Studio .NET 2003\Vc7\bin\vcvars32.bat"  &gt;nul
+cl /Zm800 -nologo @"C:\users\Misha\Stuff\boost\boost\tools\regression\test\test-cases\general\actual\tools\regression\test\compile-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\compile-fail~fail.obj.rsp"
+
+</compile>
+</test-log>
+<test-log library="" test-name="compile-fail~pass" test-type="" test-program="" target-directory="tools/regression/test/compile-fail~pass.test/msvc-7.1/debug/link-static/threading-multi" toolset="msvc-7.1" show-run-output="false">
+<compile result="succeed" timestamp="xxx">
+compile-fail~pass.cpp
+compile-fail~pass.cpp(9) : fatal error C1189: #error :  example of a compile failure
+(failed-as-expected) C:\users\Misha\Stuff\boost\boost\tools\regression\test\test-cases\general\actual\tools\regression\test\compile-fail~pass.test\msvc-7.1\debug\link-static\threading-multi\compile-fail~pass.obj
+</compile>
+</test-log>
+<test-log library="" test-name="compile~fail" test-type="" test-program="" target-directory="tools/regression/test/compile~fail.test/msvc-7.1/debug/link-static/threading-multi" toolset="msvc-7.1" show-run-output="false">
+<compile result="fail" timestamp="xxx">
+compile~fail.cpp
+compile~fail.cpp(9) : fatal error C1189: #error :  example of a compile failure
+
+    call "C:\Program Files\Microsoft Visual Studio .NET 2003\Vc7\bin\vcvars32.bat"  &gt;nul
+cl /Zm800 -nologo @"C:\users\Misha\Stuff\boost\boost\tools\regression\test\test-cases\general\actual\tools\regression\test\compile~fail.test\msvc-7.1\debug\link-static\threading-multi\compile~fail.obj.rsp"
+
+</compile>
+</test-log>
+<test-log library="" test-name="compile~pass" test-type="" test-program="" target-directory="tools/regression/test/compile~pass.test/msvc-7.1/debug/link-static/threading-multi" toolset="msvc-7.1" show-run-output="false">
+<compile result="succeed" timestamp="xxx">
+compile~pass.cpp
+</compile>
+</test-log>
+<test-log library="" test-name="compile~warn" test-type="" test-program="" target-directory="tools/regression/test/compile~warn.test/msvc-7.1/debug/link-static/threading-multi" toolset="msvc-7.1" show-run-output="false">
+<compile result="succeed" timestamp="xxx">
+compile~warn.cpp
+compile~warn.cpp(15) : warning C4244: '=' : conversion from 'unsigned long' to 'short', possible loss of data
+c:\users\misha\stuff\boost\head\boost\tools\regression\test\compile~warn.cpp(15) : warning C4700: local variable 'ul' used without having been initialized
+</compile>
+</test-log>
+<test-log library="" test-name="link-fail~fail" test-type="" test-program="" target-directory="tools/regression/test/link-fail~fail.test/msvc-7.1/debug/link-static/threading-multi" toolset="msvc-7.1" show-run-output="false">
+<compile result="succeed" timestamp="xxx">
+link-fail~fail.cpp
+</compile>
+<link result="fail" timestamp="xxx">
+
+        call "C:\Program Files\Microsoft Visual Studio .NET 2003\Vc7\bin\vcvars32.bat"  &gt;nul
+link /NOLOGO /INCREMENTAL:NO /DEBUG /subsystem:console /out:"C:\users\Misha\Stuff\boost\boost\tools\regression\test\test-cases\general\actual\tools\regression\test\link-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\link-fail~fail.exe"   @"C:\users\Misha\Stuff\boost\boost\tools\regression\test\test-cases\general\actual\tools\regression\test\link-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\link-fail~fail.exe.rsp"
+        if errorlevel 1 exit %errorlevel%
+        if exist "C:\users\Misha\Stuff\boost\boost\tools\regression\test\test-cases\general\actual\tools\regression\test\link-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\link-fail~fail.exe.manifest" (
+            mt -nologo -manifest "C:\users\Misha\Stuff\boost\boost\tools\regression\test\test-cases\general\actual\tools\regression\test\link-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\link-fail~fail.exe.manifest" "-outputresource:C:\users\Misha\Stuff\boost\boost\tools\regression\test\test-cases\general\actual\tools\regression\test\link-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\link-fail~fail.exe;1"
+        )
+    
+</link>
+</test-log>
+<test-log library="" test-name="link-fail~pass" test-type="" test-program="" target-directory="tools/regression/test/link-fail~pass.test/msvc-7.1/debug/link-static/threading-multi" toolset="msvc-7.1" show-run-output="false">
+<compile result="succeed" timestamp="xxx">
+link-fail~pass.cpp
+</compile>
+<link result="succeed" timestamp="xxx">
+link-fail~pass.obj : error LNK2019: unresolved external symbol "int __cdecl f(void)" (?f@@YAHXZ) referenced in function _main
+C:\users\Misha\Stuff\boost\boost\tools\regression\test\test-cases\general\actual\tools\regression\test\link-fail~pass.test\msvc-7.1\debug\link-static\threading-multi\link-fail~pass.exe : fatal error LNK1120: 1 unresolved externals
+(failed-as-expected) C:\users\Misha\Stuff\boost\boost\tools\regression\test\test-cases\general\actual\tools\regression\test\link-fail~pass.test\msvc-7.1\debug\link-static\threading-multi\link-fail~pass.exe
+</link>
+</test-log>
+<test-log library="" test-name="link~fail" test-type="" test-program="" target-directory="tools/regression/test/link~fail.test/msvc-7.1/debug/link-static/threading-multi" toolset="msvc-7.1" show-run-output="false">
+<compile result="succeed" timestamp="xxx">
+link~fail.cpp
+</compile>
+<link result="fail" timestamp="xxx">
+link~fail.obj : error LNK2019: unresolved external symbol "int __cdecl f(void)" (?f@@YAHXZ) referenced in function _main
+C:\users\Misha\Stuff\boost\boost\tools\regression\test\test-cases\general\actual\tools\regression\test\link~fail.test\msvc-7.1\debug\link-static\threading-multi\link~fail.exe : fatal error LNK1120: 1 unresolved externals
+
+        call "C:\Program Files\Microsoft Visual Studio .NET 2003\Vc7\bin\vcvars32.bat"  &gt;nul
+link /NOLOGO /INCREMENTAL:NO /DEBUG /subsystem:console /out:"C:\users\Misha\Stuff\boost\boost\tools\regression\test\test-cases\general\actual\tools\regression\test\link~fail.test\msvc-7.1\debug\link-static\threading-multi\link~fail.exe"   @"C:\users\Misha\Stuff\boost\boost\tools\regression\test\test-cases\general\actual\tools\regression\test\link~fail.test\msvc-7.1\debug\link-static\threading-multi\link~fail.exe.rsp"
+        if errorlevel 1 exit %errorlevel%
+        if exist "C:\users\Misha\Stuff\boost\boost\tools\regression\test\test-cases\general\actual\tools\regression\test\link~fail.test\msvc-7.1\debug\link-static\threading-multi\link~fail.exe.manifest" (
+            mt -nologo -manifest "C:\users\Misha\Stuff\boost\boost\tools\regression\test\test-cases\general\actual\tools\regression\test\link~fail.test\msvc-7.1\debug\link-static\threading-multi\link~fail.exe.manifest" "-outputresource:C:\users\Misha\Stuff\boost\boost\tools\regression\test\test-cases\general\actual\tools\regression\test\link~fail.test\msvc-7.1\debug\link-static\threading-multi\link~fail.exe;1"
+        )
+    
+</link>
+</test-log>
+<test-log library="" test-name="link~pass" test-type="" test-program="" target-directory="tools/regression/test/link~pass.test/msvc-7.1/debug/link-static/threading-multi" toolset="msvc-7.1" show-run-output="false">
+<compile result="succeed" timestamp="xxx">
+link~pass.cpp
+</compile>
+<link result="succeed" timestamp="xxx">
+</link>
+</test-log>
+<test-log library="" test-name="run-fail~compile-fail" test-type="" test-program="" target-directory="tools/regression/test/run-fail~compile-fail.test/msvc-7.1/debug/link-static/threading-multi" toolset="msvc-7.1" show-run-output="false">
+<compile result="fail" timestamp="xxx">
+run-fail~compile-fail.cpp
+run-fail~compile-fail.cpp(9) : fatal error C1189: #error :  example of a compile failure
+
+    call "C:\Program Files\Microsoft Visual Studio .NET 2003\Vc7\bin\vcvars32.bat"  &gt;nul
+cl /Zm800 -nologo @"C:\users\Misha\Stuff\boost\boost\tools\regression\test\test-cases\general\actual\tools\regression\test\run-fail~compile-fail.test\msvc-7.1\debug\link-static\threading-multi\run-fail~compile-fail.obj.rsp"
+
+</compile>
+</test-log>
+<test-log library="" test-name="run-fail~fail-warn" test-type="" test-program="" target-directory="tools/regression/test/run-fail~fail-warn.test/msvc-7.1/debug/link-static/threading-multi" toolset="msvc-7.1" show-run-output="false">
+<compile result="succeed" timestamp="xxx">
+run-fail~fail-warn.cpp
+run-fail~fail-warn.cpp(13) : warning C4244: '=' : conversion from 'unsigned long' to 'short', possible loss of data
+c:\users\misha\stuff\boost\head\boost\tools\regression\test\run-fail~fail-warn.cpp(13) : warning C4700: local variable 'ul' used without having been initialized
+</compile>
+<link result="succeed" timestamp="xxx"></link>
+<run result="fail" timestamp="xxx">
+</run>
+</test-log>
+<test-log library="" test-name="run-fail~fail" test-type="" test-program="" target-directory="tools/regression/test/run-fail~fail.test/msvc-7.1/debug/link-static/threading-multi" toolset="msvc-7.1" show-run-output="false">
+<compile result="succeed" timestamp="xxx">
+run-fail~fail.cpp
+</compile>
+<link result="succeed" timestamp="xxx"></link>
+<run result="fail" timestamp="xxx">
+</run>
+</test-log>
+<test-log library="" test-name="run-fail~pass" test-type="" test-program="" target-directory="tools/regression/test/run-fail~pass.test/msvc-7.1/debug/link-static/threading-multi" toolset="msvc-7.1" show-run-output="false">
+<compile result="succeed" timestamp="xxx">
+run-fail~pass.cpp
+</compile>
+<link result="succeed" timestamp="xxx"></link>
+<run result="succeed" timestamp="xxx">
+</run>
+</test-log>
+<test-log library="" test-name="run~fail" test-type="" test-program="" target-directory="tools/regression/test/run~fail.test/msvc-7.1/debug/link-static/threading-multi" toolset="msvc-7.1" show-run-output="false">
+<compile result="succeed" timestamp="xxx">
+run~fail.cpp
+</compile>
+<link result="succeed" timestamp="xxx"></link>
+<run result="fail" timestamp="xxx">
+</run>
+</test-log>
+<test-log library="" test-name="run~note" test-type="" test-program="" target-directory="tools/regression/test/run~note.test/msvc-7.1/debug/link-static/threading-multi" toolset="msvc-7.1" show-run-output="false">
+<compile result="succeed" timestamp="xxx">
+run~note.cpp
+</compile>
+<link result="succeed" timestamp="xxx"></link>
+<run result="succeed" timestamp="xxx">
+</run>
+</test-log>
+<test-log library="" test-name="run~pass" test-type="" test-program="" target-directory="tools/regression/test/run~pass.test/msvc-7.1/debug/link-static/threading-multi" toolset="msvc-7.1" show-run-output="false">
+<compile result="succeed" timestamp="xxx">
+run~pass.cpp
+</compile>
+<link result="succeed" timestamp="xxx"></link>
+<run result="succeed" timestamp="xxx">
+</run>
+</test-log>
+<test-log library="" test-name="run~warn-note" test-type="" test-program="" target-directory="tools/regression/test/run~warn-note.test/msvc-7.1/debug/link-static/threading-multi" toolset="msvc-7.1" show-run-output="false">
+<compile result="succeed" timestamp="xxx">
+run~warn-note.cpp
+run~warn-note.cpp(21) : warning C4244: '=' : conversion from 'unsigned long' to 'short', possible loss of data
+c:\users\misha\stuff\boost\head\boost\tools\regression\test\run~warn-note.cpp(21) : warning C4700: local variable 'ul' used without having been initialized
+</compile>
+<link result="succeed" timestamp="xxx"></link>
+<run result="succeed" timestamp="xxx">
+</run>
+</test-log>
+<test-log library="" test-name="run~warn" test-type="" test-program="" target-directory="tools/regression/test/run~warn.test/msvc-7.1/debug/link-static/threading-multi" toolset="msvc-7.1" show-run-output="false">
+<compile result="succeed" timestamp="xxx">
+run~warn.cpp
+run~warn.cpp(15) : warning C4244: '=' : conversion from 'unsigned long' to 'short', possible loss of data
+c:\users\misha\stuff\boost\head\boost\tools\regression\test\run~warn.cpp(15) : warning C4700: local variable 'ul' used without having been initialized
+</compile>
+<link result="succeed" timestamp="xxx"></link>
+<run result="succeed" timestamp="xxx">
+</run>
+</test-log>

+ 33 - 0
regression/test/test-cases/incremental/bjam.log

@@ -0,0 +1,33 @@
+locate-root "..\..\..\bin.v2"
+C:\Users\Misha\Stuff\boost\HEAD\boost\tools\regression\test>C:\Users\Misha\Stuff\boost\HEAD\bin\..\boost\tools\jam\src\bin.ntx86\bjam.exe --dump-tests --v2 msvc-7.1 "-sBOOST_BUILD_PATH=C:\Users\Misha\Stuff\boost\HEAD\bin\.." "-sBOOST_ROOT="C:\Users\Misha\Stuff\boost\HEAD\bin\..\boost"  
+
+boost-test(RUN) "testlib/run~pass" : "tools/regression/test/run~pass.cpp"
+boost-test(RUN) "testlib/run~pass" : "tools/regression/test/run~pass2s.cpp"
+
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run~pass.test
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run~pass.test\msvc-7.1
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run~pass.test\msvc-7.1\debug
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run~pass.test\msvc-7.1\debug\link-static
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run~pass.test\msvc-7.1\debug\link-static\threading-multi
+compile-c-c++ ..\..\..\bin.v2\tools\regression\test\run~pass.test\msvc-7.1\debug\link-static\threading-multi\run~pass.obj
+run~pass.cpp
+msvc.link ..\..\..\bin.v2\tools\regression\test\run~pass.test\msvc-7.1\debug\link-static\threading-multi\run~pass.exe
+testing.capture-output ..\..\..\bin.v2\tools\regression\test\run~pass.test\msvc-7.1\debug\link-static\threading-multi\run~pass.run
+        1 file(s) copied.
+**passed** ..\..\..\bin.v2\tools\regression\test\run~pass.test\msvc-7.1\debug\link-static\threading-multi\run~pass.test
+
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run~pass2.test
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run~pass2.test\msvc-7.1
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run~pass2.test\msvc-7.1\debug
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run~pass2.test\msvc-7.1\debug\link-static
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run~pass2.test\msvc-7.1\debug\link-static\threading-multi
+compile-c-c++ ..\..\..\bin.v2\tools\regression\test\run~pass2.test\msvc-7.1\debug\link-static\threading-multi\run~pass2.obj
+run~pass2.cpp
+msvc.link ..\..\..\bin.v2\tools\regression\test\run~pass2.test\msvc-7.1\debug\link-static\threading-multi\run~pass2.exe
+testing.capture-output ..\..\..\bin.v2\tools\regression\test\run~pass2.test\msvc-7.1\debug\link-static\threading-multi\run~pass2.run
+        1 file(s) copied.
+**passed** ..\..\..\bin.v2\tools\regression\test\run~pass2.test\msvc-7.1\debug\link-static\threading-multi\run~pass2.test
+
+...failed updating 9 targets...
+...skipped 17 targets...
+...updated 131 targets...

+ 38 - 0
regression/test/test-cases/incremental/bjam.log.1

@@ -0,0 +1,38 @@
+locate-root "..\..\..\bin.v2"
+C:\Users\Misha\Stuff\boost\HEAD\boost\tools\regression\test>C:\Users\Misha\Stuff\boost\HEAD\bin\..\boost\tools\jam\src\bin.ntx86\bjam.exe --dump-tests --v2 msvc-7.1 "-sBOOST_BUILD_PATH=C:\Users\Misha\Stuff\boost\HEAD\bin\.." "-sBOOST_ROOT="C:\Users\Misha\Stuff\boost\HEAD\bin\..\boost"  
+
+boost-test(RUN) "testlib/run~pass" : "tools/regression/test/run~pass.cpp"
+
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run~pass.test\msvc-7.1\debug\link-static\threading-multi
+compile-c-c++ ..\..\..\bin.v2\tools\regression\test\run~pass.test\msvc-7.1\debug\link-static\threading-multi\run~pass.obj
+run~pass.cpp
+
+    call "C:\Program Files\Microsoft Visual Studio .NET 2003\Vc7\bin\vcvars32.bat"  >nul
+cl /Zm800 -nologo @"..\..\..\bin.v2\tools\regression\test\run~pass.test\msvc-7.1\debug\link-static\threading-multi\run~pass.obj.rsp"
+
+...failed compile-c-c++ ..\..\..\bin.v2\tools\regression\test\run~pass.test\msvc-7.1\debug\link-static\threading-multi\run~pass.obj...
+
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run~pass2.test
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run~pass2.test\msvc-7.1
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run~pass2.test\msvc-7.1\debug
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run~pass2.test\msvc-7.1\debug\link-static
+MkDir1 ..\..\..\bin.v2\tools\regression\test\run~pass2.test\msvc-7.1\debug\link-static\threading-multi
+compile-c-c++ ..\..\..\bin.v2\tools\regression\test\run~pass2.test\msvc-7.1\debug\link-static\threading-multi\run~pass2.obj
+run~pass2.cpp
+
+msvc.link ..\..\..\bin.v2\tools\regression\test\run~pass2.test\msvc-7.1\debug\link-static\threading-multi\run~pass2.exe
+
+        call "C:\Program Files\Microsoft Visual Studio .NET 2003\Vc7\bin\vcvars32.bat"  >nul
+link /NOLOGO /INCREMENTAL:NO /DEBUG /subsystem:console /out:"..\..\..\bin.v2\tools\regression\test\run~pass2.test\msvc-7.1\debug\link-static\threading-multi\run~pass2.exe"   @"..\..\..\bin.v2\tools\regression\test\run~pass2.test\msvc-7.1\debug\link-static\threading-multi\run~pass2.exe.rsp"
+        if errorlevel 1 exit %errorlevel%
+        if exist "..\..\..\bin.v2\tools\regression\test\run~pass2.test\msvc-7.1\debug\link-static\threading-multi\run~pass2.exe.manifest" (
+            mt -nologo -manifest "..\..\..\bin.v2\tools\regression\test\run~pass2.test\msvc-7.1\debug\link-static\threading-multi\run~pass2.exe.manifest" "-outputresource:..\..\..\bin.v2\tools\regression\test\run~pass2.test\msvc-7.1\debug\link-static\threading-multi\run~pass2.exe;1"
+        )
+    
+...failed msvc.link ..\..\..\bin.v2\tools\regression\test\run~pass2.test\msvc-7.1\debug\link-static\threading-multi\run~pass2.exe...
+...removing ..\..\..\bin.v2\tools\regression\test\run~pass2.test\msvc-7.1\debug\link-static\threading-multi\run~pass2.exe
+
+
+...failed updating 9 targets...
+...skipped 17 targets...
+...updated 131 targets...

+ 9 - 0
regression/test/test-cases/incremental/expected/results.xml

@@ -0,0 +1,9 @@
+<test-log library="" test-name="run~pass" test-type="" test-program="" target-directory="tools/regression/test/run~pass.test/msvc-7.1/debug/link-static/threading-multi" toolset="msvc-7.1" show-run-output="false">
+<compile result="fail" timestamp="xxx">
+run~pass.cpp
+
+    call "C:\Program Files\Microsoft Visual Studio .NET 2003\Vc7\bin\vcvars32.bat"  &gt;nul
+cl /Zm800 -nologo @"C:\users\Misha\Stuff\boost\boost\tools\regression\test\test-cases\incremental\actual\tools\regression\test\run~pass.test\msvc-7.1\debug\link-static\threading-multi\run~pass.obj.rsp"
+
+</compile>
+</test-log>

+ 10 - 0
regression/test/test.bat

@@ -0,0 +1,10 @@
+set TEST_LOCATE_ROOT=d:\temp
+
+echo Begin test processing...
+bjam --dump-tests "-sALL_LOCATE_TARGET=%TEST_LOCATE_ROOT%" %* >bjam.log 2>&1
+echo Begin log processing...
+process_jam_log %TEST_LOCATE_ROOT% <bjam.log
+start bjam.log
+echo Begin compiler status processing...
+compiler_status --locate-root %TEST_LOCATE_ROOT% %BOOST_ROOT% test_status.html test_links.html
+start test_status.html

+ 181 - 0
regression/test/test.py

@@ -0,0 +1,181 @@
+# Copyright (c) MetaCommunications, Inc. 2003-2005
+#
+# Distributed under the Boost Software License, Version 1.0. 
+# (See accompanying file LICENSE_1_0.txt or copy at 
+# http://www.boost.org/LICENSE_1_0.txt)
+
+import difflib
+import os
+import re
+import shutil
+import string
+import sys
+
+
+
+def scan_for_test_cases():
+    return [ os.path.join( "test-cases", x ) for x in os.listdir( "test-cases" ) if x != "CVS" ]
+
+def clean_dir( dir ):
+    if os.path.exists( dir ):
+        shutil.rmtree( dir )
+    os.makedirs( dir )
+
+def system( commands ):
+    if sys.platform == 'win32':
+        f = open( 'tmp.cmd', 'w' )
+        f.write( string.join( commands, '\n' ) )
+        f.close()
+        rc = os.system( 'tmp.cmd' )
+        os.unlink( 'tmp.cmd' )
+        return rc
+    else:
+        rc = os.system( '&&'.join( commands ) )
+        return rc
+
+def checked_system( commands, valid_return_codes = [ 0 ] ):
+    rc = system( commands ) 
+    if rc not in [ 0 ] + valid_return_codes:
+        raise Exception( 'Command sequence "%s" failed with return code %d' % ( commands, rc ) )
+    return rc
+
+def list_recursively( dir ):
+    r = []
+    for root, dirs, files in os.walk( dir, topdown=False ):
+        root = root[ len( dir ) + 1 : ]
+        r.extend( [ os.path.join( root, x ) for x in dirs ] )
+        r.extend( [ os.path.join( root, x ) for x in files ] )
+
+    return r
+
+def find_process_jam_log():
+    root = "../../../"
+    
+    for root, dirs, files in os.walk( os.path.join( root, "bin.v2" ), topdown=False ):
+        if "process_jam_log.exe" in files:
+            return os.path.abspath( os.path.normpath( os.path.join( root, "process_jam_log.exe" ) ) )
+        if "process_jam_log" in files:
+            return os.path.abspath( os.path.normpath( os.path.join( root, "process_jam_log" ) ) )
+    return None
+
+def process_jam_log( executable, file, locate_root, results_dir ):
+    args = []
+    args.append( executable )
+    # args.append( '--echo' )
+    args.append( '--create-directories' )
+    args.append( '--v2' )
+    args.append( locate_root )
+    args.append( '<' )
+    args.append( file )
+
+    cmd = " ".join( args )
+    print "Running process_jam_log (%s)" % cmd
+    checked_system( [ cmd ] )
+    
+
+def read_file( file_path ):
+    f = open( file_path )
+    try:
+        return f.read()
+    finally:
+        f.close()
+
+def remove_timestamps( log_lines ):
+    return [ re.sub( "timestamp=\"[^\"]+\"", "timestamp=\"\"", x ) for x in log_lines ]    
+
+def determine_locate_root( bjam_log ):
+    locate_root = None
+    f = open( 'bjam.log' )
+    try:
+        locate_root_re = re.compile( r'locate-root\s+"(.*)"' )
+        for l in f.readlines():
+            m = locate_root_re.match( l )
+            if m:
+                locate_root = m.group(1)
+                break
+    finally:
+        f.close()
+    return locate_root
+
+def read_file( path ):    
+    f = open( path )
+    try:
+        return f.read()
+    finally:
+        f.close()
+
+def read_file_lines( path ):    
+    f = open( path )
+    try:
+        return f.readlines()
+    finally:
+        f.close()
+
+def write_file( path, content ):    
+    f = open( path, 'w' )
+    try:
+        return f.write( content )
+    finally:
+        f.close()
+
+def write_file_lines( path, content ):    
+    f = open( path, 'w' )
+    try:
+        return f.writelines( content )
+    finally:
+        f.close()
+
+        
+def run_test_cases( test_cases ):
+    process_jam_log_executable = find_process_jam_log()
+    print 'Found process_jam_log: %s' % process_jam_log_executable
+    initial_dir = os.getcwd()
+    for test_case in test_cases:
+        os.chdir( initial_dir )
+        print 'Running test case "%s"' % test_case
+        os.chdir( test_case )
+        if os.path.exists( "expected" ):
+            locate_root = determine_locate_root( 'bjam.log' )
+            print 'locate_root: %s' % locate_root
+            
+            actual_results_dir = os.path.join( test_case, "actual" )
+            clean_dir( "actual" )
+            os.chdir( "actual" )
+            root = os.getcwd()
+            i = 0
+            while 1:
+                if i == 0:
+                    bjam_log_file = 'bjam.log'
+                else:
+                    bjam_log_file = 'bjam.log.%0d' % i
+                i += 1
+                print 'Looking for %s' % bjam_log_file
+                if not os.path.exists( os.path.join( '..', bjam_log_file ) ):
+                    print '    does not exists'
+                    break
+                print '    found'
+                write_file_lines(bjam_log_file.replace( 'bjam', 'bjam_' ), 
+                                 [ x.replace( locate_root, root  ) for x in read_file_lines( os.path.join( '..', bjam_log_file ) ) ]  )
+                
+                process_jam_log( executable = process_jam_log_executable
+                                 , results_dir = "."
+                                 , locate_root = root 
+                                 , file=bjam_log_file.replace( 'bjam', 'bjam_' ) )
+            
+            actual_content = list_recursively( "." )
+            actual_content.sort()
+            result_xml = []
+            for test_log in [ x for x in actual_content if os.path.splitext( x )[1] == '.xml' ]:
+                print 'reading %s' % test_log
+                result = [ re.sub( r'timestamp="(.*)"', 'timestamp="xxx"', x ) for x in read_file_lines( test_log ) ]
+                result_xml.extend( result )
+                
+            write_file_lines( 'results.xml', result_xml )
+            os.chdir( '..' )
+            assert read_file( 'expected/results.xml' ) == read_file( 'actual/results.xml' )
+            os.chdir( '..' )
+        else:
+            raise '   Test case "%s" doesn\'t contain the expected results directory ("expected" )' % ( test_case )
+        
+run_test_cases( scan_for_test_cases() )
+# print find_process_jam_log()

+ 833 - 0
regression/xsl_reports/boost_wide_report.py

@@ -0,0 +1,833 @@
+
+# Copyright (c) MetaCommunications, Inc. 2003-2007
+#
+# Distributed under the Boost Software License, Version 1.0. 
+# (See accompanying file LICENSE_1_0.txt or copy at 
+# http://www.boost.org/LICENSE_1_0.txt)
+
+import shutil
+import codecs
+import xml.sax.handler
+import glob
+import re
+import os.path
+import os
+import string
+import time
+import sys
+import ftplib
+
+import utils
+import runner
+
+
+report_types = [ 'us', 'ds', 'ud', 'dd', 'l', 'p', 'i', 'n', 'ddr', 'dsr', 'udr', 'usr' ]
+
+if __name__ == '__main__':
+    run_dir = os.path.abspath( os.path.dirname( sys.argv[ 0 ] ) )
+else:
+    run_dir = os.path.abspath( os.path.dirname( sys.modules[ __name__ ].__file__ ) )
+
+
+def map_path( path ):
+    return os.path.join( run_dir, path ) 
+
+
+def xsl_path( xsl_file_name ):
+    return map_path( os.path.join( 'xsl/v2', xsl_file_name ) )
+
+class file_info:
+    def __init__( self, file_name, file_size, file_date ):
+        self.name = file_name
+        self.size = file_size
+        self.date = file_date
+
+    def __repr__( self ):
+        return "name: %s, size: %s, date %s" % ( self.name, self.size, self.date )
+
+#
+# Find the mod time from unix format directory listing line
+#
+
+def get_date( words ):
+    date = words[ 5: -1 ]
+    t = time.localtime()
+
+    month_names = [ "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec" ]
+
+    year = time.localtime()[0] # If year is not secified is it the current year
+    month = month_names.index( date[0] ) + 1
+    day = int( date[1] )
+    hours = 0 
+    minutes = 0
+
+    if  date[2].find( ":" ) != -1:
+        ( hours, minutes ) = [ int(x) for x in date[2].split( ":" ) ]
+    else:
+        # there is no way to get seconds for not current year dates
+        year = int( date[2] )
+
+    return ( year, month, day, hours, minutes, 0, 0, 0, 0 )
+
+def list_ftp( f ):
+    # f is an ftp object
+    utils.log( "listing source content" )
+    lines = []
+
+    # 1. get all lines
+    f.dir( lambda x: lines.append( x ) )
+
+    # 2. split lines into words
+    word_lines = [ x.split( None, 8 ) for x in lines ]
+
+    # we don't need directories
+    result = [ file_info( l[-1], None, get_date( l ) ) for l in word_lines if l[0][0] != "d" ]
+    for f in result:
+        utils.log( "    %s" % f )
+    return result
+
+def list_dir( dir ):
+    utils.log( "listing destination content %s" % dir )
+    result = []
+    for file_path in glob.glob( os.path.join( dir, "*.zip" ) ):
+        if os.path.isfile( file_path ):
+            mod_time = time.localtime( os.path.getmtime( file_path ) )
+            mod_time = ( mod_time[0], mod_time[1], mod_time[2], mod_time[3], mod_time[4], mod_time[5], 0, 0, mod_time[8] )
+            # no size (for now)
+            result.append( file_info( os.path.basename( file_path ), None, mod_time ) )
+    for fi in result:
+        utils.log( "    %s" % fi )
+    return result
+
+def find_by_name( d, name ):
+    for dd in d:
+        if dd.name == name:
+            return dd
+    return None
+
+def diff( source_dir_content, destination_dir_content ):
+    utils.log( "Finding updated files" )
+    result = ( [], [] ) # ( changed_files, obsolete_files )
+    for source_file in source_dir_content:
+        found = find_by_name( destination_dir_content, source_file.name )
+        if found is None: result[0].append( source_file.name )
+        elif time.mktime( found.date ) != time.mktime( source_file.date ): result[0].append( source_file.name )
+        else:
+            pass
+    for destination_file in destination_dir_content:
+        found = find_by_name( source_dir_content, destination_file.name )
+        if found is None: result[1].append( destination_file.name )
+    utils.log( "   Updated files:" )
+    for f in result[0]:
+        utils.log( "    %s" % f )
+    utils.log( "   Obsolete files:" )
+    for f in result[1]:
+        utils.log( "    %s" % f )
+    return result
+        
+def _modtime_timestamp( file ):
+    return os.stat( file ).st_mtime
+                
+
+root_paths = []
+
+def shorten( file_path ):
+    root_paths.sort( lambda x, y: cmp( len(y ), len( x ) ) )
+    for root in root_paths:
+        if file_path.lower().startswith( root.lower() ):
+            return file_path[ len( root ): ].replace( "\\", "/" )
+    return file_path.replace( "\\", "/" )
+
+class action:
+    def __init__( self, file_path ):
+        self.file_path_ = file_path
+        self.relevant_paths_ = [ self.file_path_ ]
+        self.boost_paths_ = []
+        self.dependencies_ = []
+        self.other_results_ = []
+
+    def run( self ):
+        utils.log( "%s: run" % shorten( self.file_path_ ) )
+        __log__ = 2
+
+        for dependency in self.dependencies_:
+            if not os.path.exists( dependency ):
+                utils.log( "%s doesn't exists, removing target" % shorten( dependency ) )
+                self.clean()
+                return
+
+        if not os.path.exists( self.file_path_ ):
+            utils.log( "target doesn't exists, building" )            
+            self.update()
+            return
+
+        dst_timestamp = _modtime_timestamp( self.file_path_ )
+        utils.log( "    target: %s [%s]" % ( shorten( self.file_path_ ),  dst_timestamp ) )
+        needs_updating = 0
+        utils.log( "    dependencies:" )
+        for dependency in  self.dependencies_:
+            dm = _modtime_timestamp( dependency )
+            update_mark = ""
+            if dm > dst_timestamp:
+                needs_updating = 1
+            utils.log( '        %s [%s] %s' % ( shorten( dependency ), dm, update_mark ) )
+            
+        if  needs_updating:
+            utils.log( "target needs updating, rebuilding" )            
+            self.update()
+            return
+        else:
+            utils.log( "target is up-to-date" )            
+
+
+    def clean( self ):
+        to_unlink = self.other_results_ + [ self.file_path_ ]
+        for result in to_unlink:
+            utils.log( '  Deleting obsolete "%s"' % shorten( result ) )
+            if os.path.exists( result ):
+                os.unlink( result )
+    
+class merge_xml_action( action ):
+    def __init__( self, source, destination, expected_results_file, failures_markup_file ):
+        action.__init__( self, destination )
+        self.source_ = source
+        self.destination_ = destination
+        
+        self.expected_results_file_ = expected_results_file
+        self.failures_markup_file_  = failures_markup_file
+
+        self.dependencies_.extend( [
+            self.source_
+            , self.expected_results_file_
+            , self.failures_markup_file_
+            ]
+            )
+
+        self.relevant_paths_.extend( [ self.source_ ] )
+        self.boost_paths_.extend( [ self.expected_results_file_, self.failures_markup_file_ ] ) 
+
+
+        
+    def update( self ):
+        def filter_xml( src, dest ):
+            
+            class xmlgen( xml.sax.saxutils.XMLGenerator ):
+                def __init__( self, writer ):
+                   xml.sax.saxutils.XMLGenerator.__init__( self, writer )
+                  
+                   self.trimmed = 0
+                   self.character_content = ""
+
+                def startElement( self, name, attrs):
+                    self.flush()
+                    xml.sax.saxutils.XMLGenerator.startElement( self, name, attrs )
+
+                def endElement( self, name ):
+                    self.flush()
+                    xml.sax.saxutils.XMLGenerator.endElement( self, name )
+                    
+                def flush( self ):
+                    content = self.character_content
+                    self.character_content = ""
+                    self.trimmed = 0
+                    xml.sax.saxutils.XMLGenerator.characters( self, content )
+
+                def characters( self, content ):
+                    if not self.trimmed:
+                        max_size = pow( 2, 16 )
+                        self.character_content += content
+                        if len( self.character_content ) > max_size:
+                            self.character_content = self.character_content[ : max_size ] + "...\n\n[The content has been trimmed by the report system because it exceeds %d bytes]" % max_size
+                            self.trimmed = 1
+
+            o = open( dest, "w" )
+            try: 
+                gen = xmlgen( o )
+                xml.sax.parse( src, gen )
+            finally:
+                o.close()
+
+            return dest
+
+            
+        utils.log( 'Merging "%s" with expected results...' % shorten( self.source_ ) )
+        try:
+            trimmed_source = filter_xml( self.source_, '%s-trimmed.xml' % os.path.splitext( self.source_ )[0] )
+            utils.libxslt(
+                  utils.log
+                , trimmed_source
+                , xsl_path( 'add_expected_results.xsl' )
+                , self.file_path_
+                , {
+                    "expected_results_file" : self.expected_results_file_
+                  , "failures_markup_file": self.failures_markup_file_
+                  }
+                )
+
+            os.unlink( trimmed_source )
+
+        except Exception, msg:
+            utils.log( '  Skipping "%s" due to errors (%s)' % ( self.source_, msg ) )
+            if os.path.exists( self.file_path_ ):
+                os.unlink( self.file_path_ )
+
+        
+    def _xml_timestamp( xml_path ):
+
+        class timestamp_reader( xml.sax.handler.ContentHandler ):
+            def startElement( self, name, attrs ):
+                if name == 'test-run':
+                    self.timestamp = attrs.getValue( 'timestamp' )
+                    raise self
+
+        try:
+            xml.sax.parse( xml_path, timestamp_reader() )
+            raise 'Cannot extract timestamp from "%s". Invalid XML file format?' % xml_path
+        except timestamp_reader, x:
+            return x.timestamp
+
+
+class make_links_action( action ):
+    def __init__( self, source, destination, output_dir, tag, run_date, comment_file, failures_markup_file ):
+        action.__init__( self, destination )
+        self.dependencies_.append( source )
+        self.source_ = source
+        self.output_dir_ = output_dir
+        self.tag_        = tag
+        self.run_date_   = run_date 
+        self.comment_file_ = comment_file
+        self.failures_markup_file_ = failures_markup_file
+        self.links_file_path_ = os.path.join( output_dir, 'links.html' )
+        
+    def update( self ):
+        utils.makedirs( os.path.join( os.path.dirname( self.links_file_path_ ), "output" ) )
+        utils.makedirs( os.path.join( os.path.dirname( self.links_file_path_ ), "developer", "output" ) )
+        utils.makedirs( os.path.join( os.path.dirname( self.links_file_path_ ), "user", "output" ) )
+        utils.log( '    Making test output files...' )
+        try:
+            utils.libxslt( 
+                  utils.log
+                , self.source_
+                , xsl_path( 'links_page.xsl' )
+                , self.links_file_path_
+                , {
+                    'source':                 self.tag_
+                  , 'run_date':               self.run_date_
+                  , 'comment_file':           self.comment_file_
+                  , 'explicit_markup_file':   self.failures_markup_file_
+                  }
+                )
+        except Exception, msg:
+            utils.log( '  Skipping "%s" due to errors (%s)' % ( self.source_, msg ) )
+
+        open( self.file_path_, "w" ).close()
+
+
+class unzip_action( action ):
+    def __init__( self, source, destination, unzip_func ):
+        action.__init__( self, destination )
+        self.dependencies_.append( source )
+        self.source_     = source
+        self.unzip_func_ = unzip_func
+
+    def update( self ):
+        try:
+            utils.log( '  Unzipping "%s" ... into "%s"' % ( shorten( self.source_ ), os.path.dirname( self.file_path_ ) ) )
+            self.unzip_func_( self.source_, os.path.dirname( self.file_path_ ) )
+        except Exception, msg:
+            utils.log( '  Skipping "%s" due to errors (%s)' % ( self.source_, msg ) )
+
+
+def ftp_task( site, site_path , destination ):
+    __log__ = 1
+    utils.log( '' )
+    utils.log( 'ftp_task: "ftp://%s/%s" -> %s' % ( site, site_path, destination ) )
+
+    utils.log( '    logging on ftp site %s' % site )
+    f = ftplib.FTP( site )
+    f.login()
+    utils.log( '    cwd to "%s"' % site_path )
+    f.cwd( site_path )
+
+    source_content = list_ftp( f )
+    source_content = [ x for x in source_content if re.match( r'.+[.](?<!log[.])zip', x.name ) and x.name.lower() != 'boostbook.zip' ]
+    destination_content = list_dir( destination )
+    d = diff( source_content, destination_content )
+
+    def synchronize():
+        for source in d[0]:
+            utils.log( 'Copying "%s"' % source )
+            result = open( os.path.join( destination, source ), 'wb' )
+            f.retrbinary( 'RETR %s' % source, result.write )
+            result.close()
+            mod_date = find_by_name( source_content, source ).date
+            m = time.mktime( mod_date )
+            os.utime( os.path.join( destination, source ), ( m, m ) )
+
+        for obsolete in d[1]:
+            utils.log( 'Deleting "%s"' % obsolete )
+            os.unlink( os.path.join( destination, obsolete ) )
+
+    utils.log( "    Synchronizing..." )
+    __log__ = 2
+    synchronize()
+    
+    f.quit()        
+
+def unzip_archives_task( source_dir, processed_dir, unzip_func ):
+    utils.log( '' )
+    utils.log( 'unzip_archives_task: unpacking updated archives in "%s" into "%s"...' % ( source_dir, processed_dir ) )
+    __log__ = 1
+
+    target_files = [ os.path.join( processed_dir, os.path.basename( x.replace( ".zip", ".xml" ) )  ) for x in glob.glob( os.path.join( source_dir, "*.zip" ) ) ] + glob.glob( os.path.join( processed_dir, "*.xml" ) )
+    actions = [ unzip_action( os.path.join( source_dir, os.path.basename( x.replace( ".xml", ".zip" ) ) ), x, unzip_func ) for x in target_files ]
+    for a in actions:
+        a.run()
+   
+def merge_xmls_task( source_dir, processed_dir, merged_dir, expected_results_file, failures_markup_file ):    
+    utils.log( '' )
+    utils.log( 'merge_xmls_task: merging updated XMLs in "%s"...' % source_dir )
+    __log__ = 1
+        
+    utils.makedirs( merged_dir )
+    target_files = [ os.path.join( merged_dir, os.path.basename( x ) ) for x in glob.glob( os.path.join( processed_dir, "*.xml" ) ) ] + glob.glob( os.path.join( merged_dir, "*.xml" ) )
+    actions = [ merge_xml_action( os.path.join( processed_dir, os.path.basename( x ) )
+                                  , x
+                                  , expected_results_file
+                                  , failures_markup_file ) for x in target_files ]
+
+    for a in actions:
+        a.run()
+
+
+def make_links_task( input_dir, output_dir, tag, run_date, comment_file, extended_test_results, failures_markup_file ):
+    utils.log( '' )
+    utils.log( 'make_links_task: make output files for test results in "%s"...' % input_dir )
+    __log__ = 1
+
+    target_files = [ x + ".links"  for x in glob.glob( os.path.join( input_dir, "*.xml" ) ) ] + glob.glob( os.path.join( input_dir, "*.links" ) )
+    actions = [ make_links_action( x.replace( ".links", "" )
+                                   , x
+                                   , output_dir
+                                   , tag
+                                   , run_date
+                                   , comment_file
+                                   , failures_markup_file 
+                                   ) for x in target_files ]
+
+    for a in actions:
+        a.run()
+
+
+class xmlgen( xml.sax.saxutils.XMLGenerator ):
+    document_started = 0
+    
+    def startDocument( self ):
+        if not self.document_started:
+            xml.sax.saxutils.XMLGenerator.startDocument( self )
+            self.document_started = 1
+
+
+def merge_processed_test_runs( test_runs_dir, tag, writer ):
+    utils.log( '' )
+    utils.log( 'merge_processed_test_runs: merging processed test runs from %s into a single XML...' % test_runs_dir )
+    __log__ = 1
+    
+    all_runs_xml = xmlgen( writer, encoding='utf-8' )
+    all_runs_xml.startDocument()
+    all_runs_xml.startElement( 'all-test-runs', {} )
+    
+    files = glob.glob( os.path.join( test_runs_dir, '*.xml' ) )
+    for test_run in files:
+        #file_pos = writer.stream.tell()
+        file_pos = writer.tell()
+        try:
+            utils.log( '    Writing "%s" into the resulting XML...' % test_run )
+            xml.sax.parse( test_run, all_runs_xml )
+        except Exception, msg:
+            utils.log( '    Skipping "%s" due to errors (%s)' % ( test_run, msg ) )
+            #writer.stream.seek( file_pos )
+            #writer.stream.truncate()
+            writer.seek( file_pos )
+            writer.truncate()
+
+    all_runs_xml.endElement( 'all-test-runs' )
+    all_runs_xml.endDocument()
+
+
+def execute_tasks(
+          tag
+        , user
+        , run_date
+        , comment_file
+        , results_dir
+        , output_dir
+        , reports
+        , warnings
+        , extended_test_results
+        , dont_collect_logs
+        , expected_results_file
+        , failures_markup_file
+        ):
+
+    incoming_dir = os.path.join( results_dir, 'incoming', tag )
+    processed_dir = os.path.join( incoming_dir, 'processed' )
+    merged_dir = os.path.join( processed_dir, 'merged' )
+    if not os.path.exists( incoming_dir ):
+        os.makedirs( incoming_dir )
+    if not os.path.exists( processed_dir ):
+        os.makedirs( processed_dir )
+    if not os.path.exists( merged_dir ):
+        os.makedirs( merged_dir )
+    
+    if not dont_collect_logs:
+        ftp_site = 'fx.meta-comm.com'
+        site_path = '/boost-regression/%s' % tag
+
+        ftp_task( ftp_site, site_path, incoming_dir )
+
+    unzip_archives_task( incoming_dir, processed_dir, utils.unzip )
+    merge_xmls_task( incoming_dir, processed_dir, merged_dir, expected_results_file, failures_markup_file )
+    make_links_task( merged_dir
+                     , output_dir
+                     , tag
+                     , run_date
+                     , comment_file
+                     , extended_test_results
+                     , failures_markup_file )
+
+
+    results_xml_path = os.path.join( output_dir, 'extended_test_results.xml' )
+    #writer = codecs.open( results_xml_path, 'w', 'utf-8' )
+    writer = open( results_xml_path, 'w' )
+    merge_processed_test_runs( merged_dir, tag, writer )
+    writer.close()
+
+    
+    make_result_pages(
+          extended_test_results
+        , expected_results_file
+        , failures_markup_file
+        , tag
+        , run_date
+        , comment_file
+        , output_dir
+        , reports
+        , warnings
+        )
+
+        
+def make_result_pages(
+          extended_test_results
+        , expected_results_file
+        , failures_markup_file
+        , tag
+        , run_date
+        , comment_file
+        , output_dir
+        , reports
+        , warnings
+        ):
+
+    utils.log( 'Producing the reports...' )
+    __log__ = 1
+
+    warnings_text = '+'.join( warnings )
+    
+    if comment_file != '':
+        comment_file = os.path.abspath( comment_file )
+        
+    links = os.path.join( output_dir, 'links.html' )
+    
+    utils.makedirs( os.path.join( output_dir, 'output' ) )
+    for mode in ( 'developer', 'user' ):
+        utils.makedirs( os.path.join( output_dir, mode , 'output' ) )
+        
+    issues = os.path.join( output_dir, 'developer', 'issues.html'  )
+    if 'i' in reports:
+        utils.log( '    Making issues list...' )
+        utils.libxslt( 
+              utils.log
+            , extended_test_results
+            , xsl_path( 'issues_page.xsl' )
+            , issues
+            , {
+                  'source':                 tag
+                , 'run_date':               run_date
+                , 'warnings':               warnings_text
+                , 'comment_file':           comment_file
+                , 'expected_results_file':  expected_results_file
+                , 'explicit_markup_file':   failures_markup_file
+                , 'release':                "yes"
+                }
+            )
+
+    for mode in ( 'developer', 'user' ):
+        if mode[0] + 'd' in reports:
+            utils.log( '    Making detailed %s  report...' % mode )
+            utils.libxslt( 
+                  utils.log
+                , extended_test_results
+                , xsl_path( 'result_page.xsl' )
+                , os.path.join( output_dir, mode, 'index.html' )
+                , { 
+                      'links_file':             'links.html'
+                    , 'mode':                   mode
+                    , 'source':                 tag
+                    , 'run_date':               run_date
+                    , 'warnings':               warnings_text
+                    , 'comment_file':           comment_file
+                    , 'expected_results_file':  expected_results_file
+                    , 'explicit_markup_file' :  failures_markup_file
+                    }
+                )
+    
+    for mode in ( 'developer', 'user' ):
+        if mode[0] + 's' in reports:
+            utils.log( '    Making summary %s  report...' % mode )
+            utils.libxslt(
+                  utils.log
+                , extended_test_results
+                , xsl_path( 'summary_page.xsl' )
+                , os.path.join( output_dir, mode, 'summary.html' )
+                , { 
+                      'mode' :                  mode 
+                    , 'source':                 tag
+                    , 'run_date':               run_date 
+                    , 'warnings':               warnings_text
+                    , 'comment_file':           comment_file
+                    , 'explicit_markup_file' :  failures_markup_file
+                    }
+                )
+
+    for mode in ( 'developer', 'user' ):
+        if mode[0] + 'dr' in reports:
+            utils.log( '    Making detailed %s release report...' % mode )
+            utils.libxslt( 
+                  utils.log
+                , extended_test_results
+                , xsl_path( 'result_page.xsl' )
+                , os.path.join( output_dir, mode, 'index_release.html' )
+                , { 
+                      'links_file':             'links.html'
+                    , 'mode':                   mode
+                    , 'source':                 tag
+                    , 'run_date':               run_date 
+                    , 'warnings':               warnings_text
+                    , 'comment_file':           comment_file
+                    , 'expected_results_file':  expected_results_file
+                    , 'explicit_markup_file' :  failures_markup_file
+                    , 'release':                "yes"
+                    }
+                )
+
+    for mode in ( 'developer', 'user' ):
+        if mode[0] + 'sr' in reports:
+            utils.log( '    Making summary %s release report...' % mode )
+            utils.libxslt(
+                  utils.log
+                , extended_test_results
+                , xsl_path( 'summary_page.xsl' )
+                , os.path.join( output_dir, mode, 'summary_release.html' )
+                , { 
+                      'mode' :                  mode
+                    , 'source':                 tag
+                    , 'run_date':               run_date 
+                    , 'warnings':               warnings_text
+                    , 'comment_file':           comment_file
+                    , 'explicit_markup_file' :  failures_markup_file
+                    , 'release':                'yes'
+                    }
+                )
+        
+    if 'e' in reports:
+        utils.log( '    Generating expected_results ...' )
+        utils.libxslt(
+              utils.log
+            , extended_test_results
+            , xsl_path( 'produce_expected_results.xsl' )
+            , os.path.join( output_dir, 'expected_results.xml' )
+            )
+
+    if  'n' in reports:
+        utils.log( '    Making runner comment files...' )
+        utils.libxslt(
+              utils.log
+            , extended_test_results
+            , xsl_path( 'runners.xsl' )
+            , os.path.join( output_dir, 'runners.html' )
+            )
+
+    shutil.copyfile(
+          xsl_path( 'html/master.css' )
+        , os.path.join( output_dir, 'master.css' )
+        )
+
+    fix_file_names( output_dir )
+
+
+def fix_file_names( dir ):
+    """
+    The current version of xslproc doesn't correctly handle
+    spaces. We have to manually go through the
+    result set and decode encoded spaces (%20).
+    """
+    utils.log( 'Fixing encoded file names...' )
+    for root, dirs, files in os.walk( dir ):
+        for file in files:
+            if file.find( "%20" ) > -1:
+                new_name = file.replace( "%20", " " )
+                utils.rename(
+                      utils.log
+                    , os.path.join( root, file )
+                    , os.path.join( root, new_name )
+                    )
+
+
+def build_xsl_reports( 
+          locate_root_dir
+        , tag
+        , expected_results_file
+        , failures_markup_file
+        , comment_file
+        , results_dir
+        , result_file_prefix
+        , dont_collect_logs = 0
+        , reports = report_types
+        , warnings = []
+        , user = None
+        , upload = False
+        ):
+
+    ( run_date ) = time.strftime( '%Y-%m-%dT%H:%M:%SZ', time.gmtime() )
+
+    root_paths.append( locate_root_dir )
+    root_paths.append( results_dir )
+    
+    bin_boost_dir = os.path.join( locate_root_dir, 'bin', 'boost' )
+    
+    output_dir = os.path.join( results_dir, result_file_prefix )
+    utils.makedirs( output_dir )
+    
+    if expected_results_file != '':
+        expected_results_file = os.path.abspath( expected_results_file )
+    else:
+        expected_results_file = os.path.abspath( map_path( 'empty_expected_results.xml' ) )
+
+
+    extended_test_results = os.path.join( output_dir, 'extended_test_results.xml' )
+        
+    execute_tasks(
+          tag
+        , user
+        , run_date
+        , comment_file
+        , results_dir
+        , output_dir
+        , reports
+        , warnings
+        , extended_test_results
+        , dont_collect_logs
+        , expected_results_file
+        , failures_markup_file
+        )
+
+    if upload:
+        upload_dir = 'regression-logs/'
+        utils.log( 'Uploading  results into "%s" [connecting as %s]...' % ( upload_dir, user ) )
+        
+        archive_name = '%s.tar.gz' % result_file_prefix
+        utils.tar( 
+              os.path.join( results_dir, result_file_prefix )
+            , archive_name
+            )
+        
+        utils.sourceforge.upload( os.path.join( results_dir, archive_name ), upload_dir, user )
+        utils.sourceforge.untar( os.path.join( upload_dir, archive_name ), user, background = True )
+
+
+def accept_args( args ):
+    args_spec = [ 
+          'locate-root='
+        , 'tag='
+        , 'expected-results='
+        , 'failures-markup='
+        , 'comment='
+        , 'results-dir='
+        , 'results-prefix='
+        , 'dont-collect-logs'
+        , 'reports='
+        , 'user='
+        , 'upload'
+        , 'help'
+        ]
+        
+    options = { 
+          '--comment': ''
+        , '--expected-results': ''
+        , '--failures-markup': ''
+        , '--reports': string.join( report_types, ',' )
+        , '--tag': None
+        , '--user': None
+        , 'upload': False
+        }
+    
+    utils.accept_args( args_spec, args, options, usage )
+    if not options.has_key( '--results-dir' ):
+         options[ '--results-dir' ] = options[ '--locate-root' ]
+
+    if not options.has_key( '--results-prefix' ):
+        options[ '--results-prefix' ] = 'all'
+    
+    return ( 
+          options[ '--locate-root' ]
+        , options[ '--tag' ]
+        , options[ '--expected-results' ]
+        , options[ '--failures-markup' ]
+        , options[ '--comment' ]
+        , options[ '--results-dir' ]
+        , options[ '--results-prefix' ]
+        , options.has_key( '--dont-collect-logs' )
+        , options[ '--reports' ].split( ',' )
+        , options[ '--user' ]
+        , options.has_key( '--upload' )
+        )
+
+
+def usage():
+    print 'Usage: %s [options]' % os.path.basename( sys.argv[0] )
+    print    '''
+\t--locate-root         the same as --locate-root in compiler_status
+\t--tag                 the tag for the results (i.e. 'trunk')
+\t--expected-results    the file with the results to be compared with
+\t                      the current run
+\t--failures-markup     the file with the failures markup
+\t--comment             an html comment file (will be inserted in the reports)
+\t--results-dir         the directory containing -links.html, -fail.html
+\t                      files produced by compiler_status (by default the
+\t                      same as specified in --locate-root)
+\t--results-prefix      the prefix of -links.html, -fail.html
+\t                      files produced by compiler_status
+\t--user                SourceForge user name for a shell account
+\t--upload              upload reports to SourceForge 
+
+The following options are useful in debugging:
+
+\t--dont-collect-logs dont collect the test logs
+\t--reports           produce only the specified reports
+\t                        us - user summary
+\t                        ds - developer summary
+\t                        ud - user detailed
+\t                        dd - developer detailed
+\t                        l  - links
+\t                        p  - patches
+\t                        x  - extended results file
+\t                        i  - issues
+\t                        n  - runner comment files
+'''
+
+def main():
+    build_xsl_reports( *accept_args( sys.argv[ 1 : ] ) )
+
+if __name__ == '__main__':
+    main()

+ 179 - 0
regression/xsl_reports/boostbook_report.py

@@ -0,0 +1,179 @@
+import ftplib
+import optparse
+import os
+import time
+import urlparse
+import utils
+import shutil
+import sys
+import zipfile
+import xml.sax.saxutils
+
+
+import utils.libxslt
+
+def get_date( words ):
+    date = words[ 5: -1 ]
+    t = time.localtime()
+
+    month_names = [ "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec" ]
+
+    year = time.localtime()[0] # If year is not secified is it the current year
+    month = month_names.index( date[0] ) + 1
+    day = int( date[1] )
+    hours = 0 
+    minutes = 0
+
+    if  date[2].find( ":" ) != -1:
+        ( hours, minutes ) = [ int(x) for x in date[2].split( ":" ) ]
+    else:
+        # there is no way to get seconds for not current year dates
+        year = int( date[2] )
+
+    return ( year, month, day, hours, minutes, 0, 0, 0, 0 )
+
+#def check_for_new_upload( target_dir, boostbook_info ):
+
+def accept_args( args ):        
+    parser = optparse.OptionParser()
+    parser.add_option( '-t', '--tag', dest='tag', help="the tag for the results (i.e. 'RC_1_34_0')" )
+    parser.add_option( '-d', '--destination', dest='destination', help='destination directory' )
+
+    if len(args) == 0:
+        parser.print_help()
+        sys.exit( 1 )
+
+    (options, args) = parser.parse_args()
+    if not options.destination:
+        print '-d is required'
+        parser.print_help()
+        sys.exit( 1 )
+    return options
+
+def unzip( archive_path, result_dir ):
+    utils.log( 'Unpacking %s into %s' % ( archive_path, result_dir ) )
+    z = zipfile.ZipFile( archive_path, 'r', zipfile.ZIP_DEFLATED ) 
+    for f in z.infolist():
+        dir = os.path.join( result_dir, os.path.dirname( f.filename ) )
+        if not os.path.exists( dir ):
+            os.makedirs( dir )
+        result = open( os.path.join( result_dir, f.filename ), 'wb' )
+        result.write( z.read( f.filename ) )
+        result.close()
+
+    z.close()
+
+def boostbook_report( options ):
+    site = 'fx.meta-comm.com'
+    site_path = '/boost-regression/%s' % options.tag
+    
+    utils.log( 'Opening %s ...' % site )
+    f = ftplib.FTP( site )
+    f.login()
+    utils.log( '   cd %s ...' % site_path )
+    f.cwd( site_path )
+    
+    utils.log( '   dir' )
+    lines = []
+    f.dir( lambda x: lines.append( x ) )
+    word_lines = [ x.split( None, 8 ) for x in lines ]
+    boostbook_info = [ ( l[-1], get_date( l ) ) for l in word_lines if l[-1] == "BoostBook.zip" ]
+    if len( boostbook_info ) > 0:
+        boostbook_info = boostbook_info[0]
+        utils.log( 'BoostBook found! (%s)' % ( boostbook_info, ) )
+        local_copy = os.path.join( options.destination,'BoostBook-%s.zip' % options.tag )
+        
+        if 1: 
+            if os.path.exists( local_copy ):
+                utils.log( 'Local copy exists. Checking if it is older than uploaded one...' )
+                uploaded_mtime = time.mktime( boostbook_info[1] )
+                local_mtime    = os.path.getmtime( local_copy )
+                utils.log( '    uploaded: %s %s, local: %s %s' % 
+                           ( uploaded_mtime
+                             , boostbook_info[1]
+                             , local_mtime 
+                             , time.localtime( local_mtime )) ) 
+                modtime = time.localtime( os.path.getmtime( local_copy ) )
+                if uploaded_mtime <= local_mtime:
+                    utils.log( 'Local copy is newer: exiting' )
+                    sys.exit()
+                
+        if 1:
+            temp = os.path.join( options.destination,'BoostBook.zip' )
+            result = open( temp, 'wb' )
+            f.retrbinary( 'RETR %s' % boostbook_info[0], result.write )
+            result.close()
+            
+            if os.path.exists( local_copy ):
+                os.unlink( local_copy )
+            os.rename( temp, local_copy )
+            m = time.mktime( boostbook_info[1] )
+            os.utime( local_copy, ( m, m ) )
+
+
+        docs_name = os.path.splitext( os.path.basename( local_copy ) )[0]
+        if 1:
+            unpacked_docs_dir = os.path.join( options.destination, docs_name )
+            utils.log( 'Dir %s ' % unpacked_docs_dir )
+            if os.path.exists( unpacked_docs_dir ):
+                utils.log( 'Cleaning up...' )
+                shutil.rmtree( unpacked_docs_dir )
+            os.makedirs( unpacked_docs_dir )
+            
+            unzip( local_copy, unpacked_docs_dir )
+
+        utils.system( [ 'cd %s' % unpacked_docs_dir
+                       , 'tar -c -f ../%s.tar.gz -z --exclude=tarball *' % docs_name ] )
+        
+        process_boostbook_build_log( os.path.join( unpacked_docs_dir, 'boostbook.log' ), read_timestamp( unpacked_docs_dir ) )
+        utils.libxslt( log
+                         , os.path.abspath( os.path.join( unpacked_docs_dir, 'boostbook.log.xml' ) )
+                         , os.path.abspath( os.path.join( os.path.dirname( __file__ ), 'xsl', 'v2', 'boostbook_log.xsl' ) ) 
+                         , os.path.abspath( os.path.join( unpacked_docs_dir, 'boostbook.log.html' ) ) )
+
+        
+def log( msg ):
+    print msg
+    
+def process_boostbook_build_log( path, timestamp ):
+    f = open( path + '.xml', 'w' )
+    g = xml.sax.saxutils.XMLGenerator( f )
+    lines = open( path ).read().splitlines()
+    output_lines = []
+    result = 'success'
+    for line in lines:
+        type = 'output'
+        if line.startswith( '...failed' ):
+            type = 'failure'
+            result='failure'
+
+        if line.startswith( 'runtime error:' ):
+            type = 'failure'
+    
+        if line.startswith( '...skipped' ):
+            type = 'skipped'
+        output_lines.append( ( type, line ) )
+        
+    g.startDocument()
+    g.startElement( 'build', { 'result':  result, 'timestamp': timestamp } )
+    for line in output_lines:
+        g.startElement( 'line', { 'type': line[0]} )
+        g.characters( line[1] )
+        g.endElement( 'line' )
+    g.endElement( 'build' )
+    g.endDocument()
+    
+        
+def read_timestamp( docs_directory ):
+    f = open( os.path.join( docs_directory, 'timestamp' ) )
+    try:
+        return f.readline()
+    finally:
+        f.close()
+    
+def main():
+    options = accept_args( sys.argv[1:])
+    boostbook_report( options )
+
+if __name__ == '__main__':
+    main()

+ 52 - 0
regression/xsl_reports/build_results.sh

@@ -0,0 +1,52 @@
+#!/bin/sh
+
+#~ Copyright Redshift Software, Inc. 2007
+#~ Distributed under the Boost Software License, Version 1.0.
+#~ (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
+
+set -e
+
+build_all()
+{
+    update_tools ${1}
+    build_results ${1}
+    upload_results ${1}
+}
+
+update_tools()
+{
+    cwd=`pwd`
+    cd ${1}/boost
+    svn up
+    cd "${cwd}"
+}
+
+build_results()
+{
+    cwd=`pwd`
+    cd ${1}
+    root=`pwd`
+    boost=${root}/boost
+    python "${boost}/tools/regression/xsl_reports/boost_wide_report.py" \
+        --locate-root="${root}" \
+        --tag=${1} \
+        --expected-results="${boost}/status/expected_results.xml" \
+        --failures-markup="${boost}/status/explicit-failures-markup.xml" \
+        --comment="" \
+        --user=""
+    cd "${cwd}"
+}
+
+upload_results()
+{
+    cwd=`pwd`
+    cd ${1}/all
+    rm -f ../../${1}.zip*
+    zip -r -9 ../../${1} * -x '*.xml'
+    cd "${cwd}"
+    bzip2 -9 ${1}.zip
+    scp ${1}.zip.bz2 beta.boost.org:/home/grafik/www.boost.org/testing/incoming/
+    ssh beta.boost.org bunzip2 /home/grafik/www.boost.org/testing/incoming/${1}.zip.bz2
+}
+
+build_all trunk

+ 631 - 0
regression/xsl_reports/email_maintainers.py

@@ -0,0 +1,631 @@
+#
+# Copyright (C) 2005, 2007 The Trustees of Indiana University 
+# Author: Douglas Gregor
+#
+# Distributed under the Boost Software License, Version 1.0. (See
+# accompanying file LICENSE_1_0.txt or copy at
+# http://www.boost.org/LICENSE_1_0.txt)
+#
+import re
+import smtplib
+import os
+import time
+import string
+import datetime
+import sys
+
+report_author = "Douglas Gregor <dgregor@osl.iu.edu>"
+boost_dev_list = "Boost Developer List <boost@lists.boost.org>"
+
+def sorted_keys( dict ):
+    result = dict.keys()
+    result.sort()
+    return result
+
+
+class Platform:
+    """
+    All of the failures for a particular platform.
+    """
+    def __init__(self, name):
+        self.name = name
+        self.failures = list()
+        return
+
+    def addFailure(self, failure):
+        self.failures.append(failure)
+        return
+
+    def isBroken(self):
+        return len(self.failures) > 300
+
+class Failure:
+    """
+    A single test case failure in the report.
+    """
+    def __init__(self, test, platform):
+        self.test = test
+        self.platform = platform
+        return
+
+class Test:
+    """
+    All of the failures for a single test name within a library.
+    """
+    def __init__(self, library, name):
+        self.library = library
+        self.name = name
+        self.failures = list()
+        return
+
+    def addFailure(self, failure):
+        self.failures.append(failure)
+        return
+
+    def numFailures(self):
+        return len(self.failures)
+
+    def numReportableFailures(self):
+        """
+        Returns the number of failures that we will report to the
+        maintainers of the library. This doesn't count failures on
+        broken platforms.
+        """
+        count = 0
+        for failure in self.failures:
+            if not failure.platform.isBroken():
+                count += 1
+                pass
+            pass
+        return count
+
+class Library:
+    """
+    All of the information about the failures in a single library.
+    """
+    def __init__(self, name):
+        self.name = name
+        self.maintainers = list()
+        self.tests = list()
+        return
+
+    def addTest(self, test):
+        """
+        Add another test to the library.
+        """
+        self.tests.append(test)
+        return
+
+    def addMaintainer(self, maintainer):
+        """
+        Add a new maintainer for this library.
+        """
+        self.maintainers.append(maintainer)
+        return
+
+    def numFailures(self):
+        count = 0
+        for test in self.tests:
+            count += test.numFailures()
+            pass
+        return count
+
+    def numReportableFailures(self):
+        count = 0
+        for test in self.tests:
+            count += test.numReportableFailures()
+            pass
+        return count
+
+class Maintainer:
+    """
+    Information about the maintainer of a library
+    """
+    def __init__(self, name, email):
+        self.name = name
+        self.email = email
+        self.libraries = list()
+        return
+
+    def addLibrary(self, library):
+        self.libraries.append(library)
+        return
+
+    def composeEmail(self, report):
+        """
+        Composes an e-mail to this maintainer with information about
+        the failures in his or her libraries, omitting those that come
+        from "broken" platforms. Returns the e-mail text if a message
+        needs to be sent, or None otherwise.
+        """
+
+        # Determine if we need to send a message to this developer.
+        requires_message = False
+        for library in self.libraries:
+            if library.numReportableFailures() > 0:
+                requires_message = True
+                break
+
+        if not requires_message:
+            return None
+
+        # Build the message header
+        message = """From: Douglas Gregor <dgregor@osl.iu.edu>
+To: """
+        message += self.name + ' <' + self.email + '>'
+        message += """
+Reply-To: boost@lists.boost.org
+Subject: Failures in your Boost libraries as of """
+        message += str(datetime.date.today()) + " [" + report.branch + "]"
+        message += """
+
+You are receiving this report because one or more of the libraries you
+maintain has regression test failures that are not accounted for.
+A full version of the report is sent to the Boost developer's mailing
+list.
+
+Detailed report:
+"""
+        message += '  ' + report.url + """
+
+There are failures in these libraries you maintain:
+"""
+
+        # List the libraries this maintainer is responsible for and
+        # the number of reportable failures in that library.
+        for library in self.libraries:
+            num_failures = library.numReportableFailures()
+            if num_failures > 0:
+                message += '  ' + library.name + ' (' + str(num_failures) + ')\n'
+                pass
+            pass
+
+        # Provide the details for the failures in each library.
+        for library in self.libraries:
+            if library.numReportableFailures() > 0:
+                message += '\n|' + library.name + '|\n'
+                for test in library.tests:
+                    if test.numReportableFailures() > 0:
+                        message += '  ' + test.name + ':'
+                        for failure in test.failures:
+                            if not failure.platform.isBroken():
+                                message += '  ' + failure.platform.name
+                                pass
+                            pass
+                        message += '\n'
+                        pass
+                    pass
+                pass
+            pass
+
+        return message
+
+class Report:
+    """
+    The complete report of all failing test cases.
+    """
+    def __init__(self, branch = 'trunk'):
+        self.branch = branch
+        self.date = None
+        self.url = None
+        self.libraries = dict()
+        self.platforms = dict()
+        self.maintainers = dict()
+        return
+
+    def getPlatform(self, name):
+        """
+        Retrieve the platform with the given name.
+        """
+        if self.platforms.has_key(name):
+            return self.platforms[name]
+        else:
+            self.platforms[name] = Platform(name)
+            return self.platforms[name]
+
+    def getMaintainer(self, name, email):
+        """
+        Retrieve the maintainer with the given name and e-mail address.
+        """
+        if self.maintainers.has_key(name):
+            return self.maintainers[name]
+        else:
+            self.maintainers[name] = Maintainer(name, email)
+            return self.maintainers[name]
+
+    def parseIssuesEmail(self):
+        """
+        Try to parse the issues e-mail file. Returns True if everything was
+        successful, false otherwise.
+        """
+        # See if we actually got the file
+        if not os.path.isfile('issues-email.txt'):
+            return False
+
+        # Determine the set of libraries that have unresolved failures
+        date_regex = re.compile('Report time: (.*)')
+        url_regex = re.compile('  (http://.*)')
+        library_regex = re.compile('\|(.*)\|')
+        failure_regex = re.compile('  ([^:]*):  (.*)')
+        current_library = None
+        for line in file('issues-email.txt', 'r'):
+            # Check for the report time line
+            m = date_regex.match(line)
+            if m:
+                self.date = m.group(1)
+                continue
+
+            # Check for the detailed report URL
+            m = url_regex.match(line)
+            if m:
+                self.url = m.group(1)
+                continue
+                
+            # Check for a library header
+            m = library_regex.match(line)
+            if m:
+                current_library = Library(m.group(1))
+                self.libraries[m.group(1)] = current_library
+                continue
+                
+            # Check for a library test and its failures
+            m = failure_regex.match(line)
+            if m:
+                test = Test(current_library, m.group(1))
+                for platform_name in re.split('\s*', m.group(2)):
+                    if platform_name != '':
+                        platform = self.getPlatform(platform_name)
+                        failure = Failure(test, platform)
+                        test.addFailure(failure)
+                        platform.addFailure(failure)
+                        pass
+                current_library.addTest(test)
+                continue
+            pass
+
+        return True
+
+    def getIssuesEmail(self):
+        """
+        Retrieve the issues email from beta.boost.org, trying a few
+        times in case something wonky is happening. If we can retrieve
+        the file, calls parseIssuesEmail and return True; otherwise,
+        return False.
+        """
+        base_url = "http://beta.boost.org/development/tests/"
+        base_url += self.branch
+        base_url += "/developer/";
+        got_issues = False
+
+        # Ping the server by looking for an HTML file
+        print "Pinging the server to initiate extraction..."
+        ping_url = base_url + "issues.html"
+        os.system('curl -O ' + ping_url)
+        os.system('rm -f issues.html')
+            
+        for x in range(30):
+            # Update issues-email.txt
+            url = base_url + "issues-email.txt"
+            print 'Retrieving issues email from ' + url
+            os.system('rm -f issues-email.txt')
+            os.system('curl -O ' + url)
+
+            if self.parseIssuesEmail():
+                return True
+
+            print 'Failed to fetch issues email. '
+            time.sleep (30)
+
+        return False
+        
+    # Parses the file $BOOST_ROOT/libs/maintainers.txt to create a hash
+    # mapping from the library name to the list of maintainers.
+    def parseLibraryMaintainersFile(self):
+        """
+        Parse the maintainers file in ../../../libs/maintainers.txt to
+        collect information about the maintainers of broken libraries.
+        """
+        lib_maintainer_regex = re.compile('(\S+)\s*(.*)')
+        name_email_regex = re.compile('\s*(\w*(\s*\w+)+)\s*<\s*(\S*(\s*\S+)+)\S*>')
+        at_regex = re.compile('\s*-\s*at\s*-\s*')
+        for line in file('../../../libs/maintainers.txt', 'r'):
+            m = lib_maintainer_regex.match (line)
+            if m:
+                libname = m.group(1)
+                if self.libraries.has_key(m.group(1)):
+                    library = self.libraries[m.group(1)]
+                    for person in re.split('\s*,\s*', m.group(2)):
+                        nmm = name_email_regex.match(person)
+                        if nmm:
+                            name = nmm.group(1)
+                            email = nmm.group(3)
+                            email = at_regex.sub('@', email)
+                            maintainer = self.getMaintainer(name, email)
+                            maintainer.addLibrary(library)
+                            library.addMaintainer(maintainer)
+                            pass
+                        pass
+                    pass
+                pass
+            pass
+        pass
+
+    def numFailures(self):
+        count = 0
+        for library in self.libraries:
+            count += self.libraries[library].numFailures()
+            pass
+        return count
+
+    def numReportableFailures(self):
+        count = 0
+        for library in self.libraries:
+            count += self.libraries[library].numReportableFailures()
+            pass
+        return count
+
+    def composeSummaryEmail(self):
+        """
+        Compose a message to send to the Boost developer's
+        list. Return the message and return it.
+        """
+        message = """From: Douglas Gregor <dgregor@osl.iu.edu>
+To: boost@lists.boost.org
+Reply-To: boost@lists.boost.org
+Subject: [Report] """
+        message += str(self.numFailures()) + " failures on " + branch
+        message += " (" + str(datetime.date.today()) + ")"
+        message += """
+
+Boost regression test failures
+"""
+        message += "Report time: " + self.date + """
+
+This report lists all regression test failures on release platforms.
+
+Detailed report:
+"""
+
+        message += '  ' + self.url + '\n\n'
+
+        if self.numFailures() == 0:
+            message += "No failures! Yay!\n"
+            return message
+            
+        # List the platforms that are broken
+        any_broken_platforms = self.numReportableFailures() < self.numFailures()
+        if any_broken_platforms:
+            message += """The following platforms have a large number of failures:
+"""
+            for platform in sorted_keys( self.platforms ):
+                if self.platforms[platform].isBroken():
+                    message += '  ' + platform + '\n'
+
+            message += '\n'
+   
+        # Display the number of failures
+        message += (str(self.numFailures()) + ' failures in ' + 
+                    str(len(self.libraries)) + ' libraries')
+        if any_broken_platforms:
+            message += ' (' + str(self.numReportableFailures()) + ' are from non-broken platforms)'
+        message += '\n'
+
+        # Display the number of failures per library
+        for k in sorted_keys( self.libraries ):
+            library = self.libraries[k]
+            num_failures = library.numFailures()
+            message += ('  ' + library.name + ' (' 
+                        + str(library.numReportableFailures()))
+            if library.numReportableFailures() < num_failures:
+                message += (' of ' + str(num_failures) 
+                            + ' failures are from non-broken platforms')
+            message += ')\n'
+            pass
+
+        # If we have any broken platforms, tell the user how we're
+        # displaying them.
+        if any_broken_platforms:
+            message += """
+Test failures marked with a (*) represent tests that failed on
+platforms that are considered broken. They are likely caused by
+misconfiguration by the regression tester or a failure in a core
+library such as Test or Config."""
+        message += '\n'
+
+        # Provide the details for the failures in each library.
+        for k in sorted_keys( self.libraries ):
+            library = self.libraries[k]
+            message += '\n|' + library.name + '|\n'
+            for test in library.tests:
+                message += '  ' + test.name + ':'
+                for failure in test.failures:
+                    platform = failure.platform
+                    message += '  ' + platform.name
+                    if platform.isBroken():
+                        message += '*'
+                    pass
+                message += '\n'
+                pass
+            pass
+
+        return message
+
+# Send a message to "person" (a maintainer of a library that is
+# failing).
+# maintainers is the result of get_library_maintainers()
+def send_individualized_message (branch, person, maintainers):
+  # There are several states we could be in:
+  #   0 Initial state. Eat everything up to the "NNN failures in MMM
+  #     libraries" line
+  #   1 Suppress output within this library
+  #   2 Forward output within this library
+  state = 0
+ 
+  failures_in_lib_regex = re.compile('\d+ failur.*\d+ librar')
+  lib_failures_regex = re.compile('  (\S+) \((\d+)\)')
+  lib_start_regex = re.compile('\|(\S+)\|')
+  general_pass_regex = re.compile('  http://')
+  for line in file('issues-email.txt', 'r'):
+    if state == 0:
+        lfm = lib_failures_regex.match(line)
+        if lfm:
+            # Pass the line through if the current person is a
+            # maintainer of this library
+            if lfm.group(1) in maintainers and person in maintainers[lfm.group(1)]:
+                message += line
+                print line,
+                
+        elif failures_in_lib_regex.match(line):
+            message += "\nThere are failures in these libraries you maintain:\n"
+        elif general_pass_regex.match(line):
+            message += line
+            
+    lib_start = lib_start_regex.match(line)
+    if lib_start:
+        if state == 0:
+            message += '\n'
+            
+        if lib_start.group(1) in maintainers and person in maintainers[lib_start.group(1)]:
+            message += line
+            state = 2
+        else:
+            state = 1
+    else:
+        if state == 1:
+            pass
+        elif state == 2:
+            message += line
+
+  if '--debug' in sys.argv:
+      print '-----------------Message text----------------'
+      print message
+  else:
+      print
+      
+  if '--send' in sys.argv:
+      print "Sending..."
+      smtp = smtplib.SMTP('milliways.osl.iu.edu')
+      smtp.sendmail(from_addr = 'Douglas Gregor <dgregor@osl.iu.edu>',
+                    to_addrs = person[1],
+                    msg = message)
+      print "Done."
+
+
+# Send a message to the developer's list
+def send_boost_developers_message(branch, maintainers, failing_libraries):
+  to_line = 'boost@lists.boost.org'
+  from_line = 'Douglas Gregor <dgregor@osl.iu.edu>'
+
+  message = """From: Douglas Gregor <dgregor@osl.iu.edu>
+To: boost@lists.boost.org
+Reply-To: boost@lists.boost.org
+Subject: Boost regression testing notification ("""
+
+  message += str(datetime.date.today()) + " [" + branch + "]"
+  message += ")"
+
+  message += """
+
+"""
+
+  for line in file('issues-email.txt', 'r'):
+      # Right before the detailed report, put out a warning message if
+      # any libraries with failures to not have maintainers listed.
+      if line.startswith('Detailed report:'):
+          missing_maintainers = False
+          for lib in failing_libraries:
+              if not(lib in maintainers) or maintainers[lib] == list():
+                  missing_maintainers = True
+
+          if missing_maintainers:
+              message += """WARNING: The following libraries have failing regression tests but do
+not have a maintainer on file. Once a maintainer is found, add an
+entry to libs/maintainers.txt to eliminate this message.
+"""
+
+              for lib in failing_libraries:
+                  if not(lib in maintainers) or maintainers[lib] == list():
+                      message += '  ' + lib + '\n'
+              message += '\n'
+              
+      message += line
+      
+  if '--send' in sys.argv:
+      print 'Sending notification email...'
+      smtp = smtplib.SMTP('milliways.osl.iu.edu')
+      smtp.sendmail(from_addr = from_line, to_addrs = to_line, msg = message)
+      print 'Done.'
+
+  if '--debug' in sys.argv:
+      print "----------Boost developer's message text----------"
+      print message
+
+###############################################################################
+# Main program                                                                #
+###############################################################################
+
+# Parse command-line options
+branch = "trunk"
+for arg in sys.argv:
+    if arg.startswith("--branch="):
+        branch = arg[len("--branch="):]
+
+report = Report(branch)
+
+# Try to parse the issues e-mail
+if '--no-get' in sys.argv:
+    okay = report.parseIssuesEmail()
+else:
+    okay = report.getIssuesEmail()
+
+if not okay:
+    print 'Aborting.'
+    if '--send' in sys.argv:
+        message = """From: Douglas Gregor <dgregor@osl.iu.edu>
+        To: Douglas Gregor <dgregor@osl.iu.edu>
+        Reply-To: boost@lists.boost.org
+        Subject: Regression status script failed on """
+        message += str(datetime.date.today()) + " [" + branch + "]"
+        smtp = smtplib.SMTP('milliways.osl.iu.edu')
+        smtp.sendmail(from_addr = 'Douglas Gregor <dgregor@osl.iu.edu>',
+                      to_addrs = 'dgregor@osl.iu.edu',
+                      msg = message)
+    sys.exit(1)
+
+# Try to parse maintainers information
+report.parseLibraryMaintainersFile()
+
+for maintainer_name in report.maintainers:
+    maintainer = report.maintainers[maintainer_name]
+
+    email = maintainer.composeEmail(report)
+    if email:
+        if '--send' in sys.argv:
+            print ('Sending notification email to ' + maintainer.name + '...')
+            smtp = smtplib.SMTP('milliways.osl.iu.edu')
+            smtp.sendmail(from_addr = report_author, 
+                          to_addrs = maintainer.email,
+                          msg = email)
+            print 'done.\n'
+        else:
+            print 'Would send a notification e-mail to',maintainer.name
+
+        if '--debug' in sys.argv:
+            print ('Message text for ' + maintainer.name + ':\n')
+            print email
+            
+email = report.composeSummaryEmail()
+if '--send' in sys.argv:
+    print 'Sending summary email to Boost developer list...'
+    smtp = smtplib.SMTP('milliways.osl.iu.edu')
+    smtp.sendmail(from_addr = report_author, 
+                  to_addrs = boost_dev_list,
+                  msg = email)
+    print 'done.\n'
+if '--debug' in sys.argv:
+    print 'Message text for summary:\n'
+    print email
+
+if not ('--send' in sys.argv):
+    print 'Chickening out and not sending any e-mail.'
+    print 'Use --send to actually send e-mail, --debug to see e-mails.'

+ 5 - 0
regression/xsl_reports/empty_expected_results.xml

@@ -0,0 +1,5 @@
+<?xml version="1.0" encoding="utf-8"?>
+<root>
+<expected-failures>
+</expected-failures>
+</root>

+ 174 - 0
regression/xsl_reports/make_snapshot.py

@@ -0,0 +1,174 @@
+
+# Copyright (c) MetaCommunications, Inc. 2003-2007
+#
+# Distributed under the Boost Software License, Version 1.0. 
+# (See accompanying file LICENSE_1_0.txt or copy at 
+# http://www.boost.org/LICENSE_1_0.txt)
+
+import tarfile
+import shutil
+import time
+import os.path
+import string
+import sys
+import traceback
+
+
+def retry( f, args, max_attempts=5, sleep_secs=10 ):
+    for attempts in range( max_attempts, -1, -1 ):
+        try:
+            return f( *args )
+        except Exception, msg:
+            utils.log( '%s failed with message "%s"' % ( f.__name__, msg ) )
+            if attempts == 0: 
+                utils.log( 'Giving up.' )
+                raise
+
+            utils.log( 'Retrying (%d more attempts).' % attempts )
+            time.sleep( sleep_secs )
+
+
+def rmtree( path ):
+    if os.path.exists( path ):
+        if sys.platform == 'win32':
+            os.system( 'del /f /s /q "%s" >nul 2>&1' % path )
+            shutil.rmtree( path )
+        else:
+            os.system( 'rm -f -r "%s"' % path )
+
+
+def svn_command( command ):
+    utils.log( 'Executing SVN command "%s"' % command )
+    rc = os.system( command )
+    if rc != 0:
+        raise Exception( 'SVN command "%s" failed with code %d' % ( command, rc ) )
+
+
+def svn_export( sources_dir, user, tag ):
+    if user is None or user == 'anonymous':
+        command = 'svn export --force http://svn.boost.org/svn/boost/%s %s' % ( tag, sources_dir )
+    else:
+        command = 'svn export --force --non-interactive --username=%s https://svn.boost.org/svn/boost/%s %s' \
+                  % ( user, tag, sources_dir )
+
+    os.chdir( os.path.basename( sources_dir ) )
+    retry(
+         svn_command
+       , ( command, )
+       )
+
+
+def make_tarball(
+          working_dir
+        , tag
+        , user
+        , site_dir
+        ):
+    timestamp = time.time()
+    timestamp_suffix = time.strftime( '%y-%m-%d-%H%M', time.gmtime( timestamp ) )
+
+    tag_suffix = tag.split( '/' )[-1]
+    sources_dir = os.path.join(
+          working_dir
+        , 'boost-%s-%s' % ( tag_suffix, timestamp_suffix )
+        )
+
+    if os.path.exists( sources_dir ):
+        utils.log( 'Directory "%s" already exists, cleaning it up...' % sources_dir )
+        rmtree( sources_dir )
+
+    try:
+        os.mkdir( sources_dir )
+        utils.log( 'Exporting files from SVN...' )
+        svn_export( sources_dir, user, tag )
+    except:
+        utils.log( 'Cleaning up...' )
+        rmtree( sources_dir )
+        raise
+
+
+    tarball_name = 'boost-%s.tar.bz2' % tag_suffix
+    tarball_path = os.path.join( working_dir, tarball_name )
+
+    utils.log( 'Archiving "%s" to "%s"...' % ( sources_dir, tarball_path ) )
+    tar = tarfile.open( tarball_path, 'w|bz2' )
+    tar.posix = False # see http://tinyurl.com/4ebd8
+
+    tar.add( sources_dir, os.path.basename( sources_dir ) )
+    tar.close()
+
+    tarball_timestamp_path = os.path.join( working_dir, 'boost-%s.timestamp' % tag_suffix )
+
+    utils.log( 'Writing timestamp into "%s"...' % tarball_timestamp_path )
+    timestamp_file = open( tarball_timestamp_path, 'w' )
+    timestamp_file.write( '%f' % timestamp )
+    timestamp_file.close()
+
+    md5sum_path = os.path.join( working_dir, 'boost-%s.md5' % tag_suffix )
+    utils.log( 'Writing md5 checksum into "%s"...' % md5sum_path )
+    old_dir = os.getcwd()
+    os.chdir( os.path.dirname( tarball_path ) )
+    os.system( 'md5sum -b "%s" >"%s"' % ( os.path.basename( tarball_path ), md5sum_path ) )
+    os.chdir( old_dir )
+    
+    if site_dir is not None:
+        utils.log( 'Moving "%s" to the site location "%s"...' % ( tarball_name, site_dir ) )
+        temp_site_dir = os.path.join( site_dir, 'temp' )
+        if not os.path.exists( temp_site_dir ):
+            os.mkdir( temp_site_dir )
+                
+        shutil.move( tarball_path, temp_site_dir )
+        shutil.move( os.path.join( temp_site_dir, tarball_name ), site_dir )
+        shutil.move( tarball_timestamp_path, site_dir )
+        shutil.move( md5sum_path, site_dir )
+        utils.log( 'Removing "%s"...' % sources_dir )
+        rmtree( sources_dir )
+
+
+def accept_args( args ):
+    args_spec = [ 
+          'working-dir='
+        , 'tag='
+        , 'user='
+        , 'site-dir='
+        , 'mail='
+        , 'help'
+        ]
+        
+    options = { 
+          '--tag': 'trunk'
+        , '--user': None
+        , '--site-dir': None
+        }
+    
+    utils.accept_args( args_spec, args, options, usage )
+        
+    return ( 
+          options[ '--working-dir' ]
+        , options[ '--tag' ]
+        , options[ '--user' ]
+        , options[ '--site-dir' ]
+        )
+
+
+def usage():
+    print 'Usage: %s [options]' % os.path.basename( sys.argv[0] )
+    print    '''
+\t--working-dir   working directory
+\t--tag           snapshot tag (i.e. 'trunk')
+\t--user          Boost SVN user ID (optional)
+\t--site-dir      site directory to copy the snapshot to (optional)
+'''
+
+def main():
+    make_tarball( *accept_args( sys.argv[ 1: ] ) )
+
+if __name__ != '__main__':  import utils
+else:
+    # in absense of relative import...
+    xsl_path = os.path.abspath( os.path.dirname( sys.argv[ 0 ] ) )
+    while os.path.basename( xsl_path ) != 'xsl_reports': xsl_path = os.path.dirname( xsl_path )
+    sys.path.append( xsl_path )
+
+    import utils
+    main()

+ 369 - 0
regression/xsl_reports/report.py

@@ -0,0 +1,369 @@
+
+# Copyright (c) MetaCommunications, Inc. 2003-2004
+#
+# Distributed under the Boost Software License, Version 1.0. 
+# (See accompanying file LICENSE_1_0.txt or copy at 
+# http://www.boost.org/LICENSE_1_0.txt)
+
+import shutil
+import os.path
+import os
+import string
+import time
+import sys
+
+import utils
+import runner
+
+
+report_types = [ 'us', 'ds', 'ud', 'dd', 'l', 'p', 'x', 'i', 'n', 'ddr', 'dsr' ]
+
+if __name__ == '__main__':
+    run_dir = os.path.abspath( os.path.dirname( sys.argv[ 0 ] ) )
+else:
+    run_dir = os.path.abspath( os.path.dirname( sys.modules[ __name__ ].__file__ ) )
+
+
+def map_path( path ):
+    return os.path.join( run_dir, path ) 
+
+
+def xsl_path( xsl_file_name, v2 = 0 ):
+    if v2:
+        return map_path( os.path.join( 'xsl/v2', xsl_file_name ) )
+    else:
+        return map_path( os.path.join( 'xsl', xsl_file_name ) )
+
+
+def make_result_pages( 
+          test_results_file
+        , expected_results_file
+        , failures_markup_file
+        , tag
+        , run_date
+        , comment_file
+        , results_dir
+        , result_prefix
+        , reports
+        , v2
+        ):
+
+    utils.log( 'Producing the reports...' )
+    __log__ = 1
+    
+    output_dir = os.path.join( results_dir, result_prefix )
+    utils.makedirs( output_dir )
+    
+    if comment_file != '':
+        comment_file = os.path.abspath( comment_file )
+        
+    if expected_results_file != '':
+        expected_results_file = os.path.abspath( expected_results_file )
+    else:
+        expected_results_file = os.path.abspath( map_path( 'empty_expected_results.xml' ) )
+        
+
+    extended_test_results = os.path.join( output_dir, 'extended_test_results.xml' )
+    if 'x' in reports:    
+        utils.log( '    Merging with expected results...' )
+        utils.libxslt( 
+              utils.log
+            , test_results_file
+            , xsl_path( 'add_expected_results.xsl', v2 )
+            , extended_test_results
+            , { 'expected_results_file': expected_results_file, 'failures_markup_file' : failures_markup_file }
+            )
+
+    links = os.path.join( output_dir, 'links.html' )
+    
+    utils.makedirs( os.path.join( output_dir, 'output' ) )
+    for mode in ( 'developer', 'user' ):
+        utils.makedirs( os.path.join( output_dir, mode , 'output' ) )
+        
+    if 'l' in reports:        
+        utils.log( '    Making test output files...' )
+        utils.libxslt( 
+              utils.log
+            , extended_test_results
+            , xsl_path( 'links_page.xsl', v2 )
+            , links
+            , {
+                  'source':                 tag
+                , 'run_date':               run_date 
+                , 'comment_file':           comment_file
+                , 'explicit_markup_file':   failures_markup_file
+                }
+            )
+
+
+    issues = os.path.join( output_dir, 'developer', 'issues.html'  )
+    if 'i' in reports:
+        utils.log( '    Making issues list...' )
+        utils.libxslt( 
+              utils.log
+            , extended_test_results
+            , xsl_path( 'issues_page.xsl', v2 )
+            , issues
+            , {
+                  'source':                 tag
+                , 'run_date':               run_date
+                , 'comment_file':           comment_file
+                , 'explicit_markup_file':   failures_markup_file
+                }
+            )
+
+    for mode in ( 'developer', 'user' ):
+        if mode[0] + 'd' in reports:
+            utils.log( '    Making detailed %s  report...' % mode )
+            utils.libxslt( 
+                  utils.log
+                , extended_test_results
+                , xsl_path( 'result_page.xsl', v2 )
+                , os.path.join( output_dir, mode, 'index.html' )
+                , { 
+                      'links_file':             'links.html'
+                    , 'mode':                   mode
+                    , 'source':                 tag
+                    , 'run_date':               run_date 
+                    , 'comment_file':           comment_file
+                    , 'expected_results_file':  expected_results_file
+                    , 'explicit_markup_file' :  failures_markup_file
+                    }
+                )
+    
+    for mode in ( 'developer', 'user' ):
+        if mode[0] + 's' in reports:
+            utils.log( '    Making summary %s  report...' % mode )
+            utils.libxslt(
+                  utils.log
+                , extended_test_results
+                , xsl_path( 'summary_page.xsl', v2 )
+                , os.path.join( output_dir, mode, 'summary.html' )
+                , { 
+                      'mode' :                  mode 
+                    , 'source':                 tag
+                    , 'run_date':               run_date 
+                    , 'comment_file':           comment_file
+                    , 'explicit_markup_file' :  failures_markup_file
+                    }
+                )
+
+    if v2 and "ddr" in reports:
+        utils.log( '    Making detailed %s release report...' % mode )
+        utils.libxslt( 
+                  utils.log
+                , extended_test_results
+                , xsl_path( 'result_page.xsl', v2 )
+                , os.path.join( output_dir, "developer", 'index_release.html' )
+                , { 
+                      'links_file':             'links.html'
+                    , 'mode':                   "developer"
+                    , 'source':                 tag
+                    , 'run_date':               run_date 
+                    , 'comment_file':           comment_file
+                    , 'expected_results_file':  expected_results_file
+                    , 'explicit_markup_file' :  failures_markup_file
+                    , 'release':                "yes"
+                    }
+                )
+
+    if v2 and "dsr" in reports:
+        utils.log( '    Making summary %s release report...' % mode )
+        utils.libxslt(
+                  utils.log
+                , extended_test_results
+                , xsl_path( 'summary_page.xsl', v2 )
+                , os.path.join( output_dir, "developer", 'summary_release.html' )
+                , { 
+                      'mode' :                  "developer"
+                    , 'source':                 tag
+                    , 'run_date':               run_date 
+                    , 'comment_file':           comment_file
+                    , 'explicit_markup_file' :  failures_markup_file
+                    , 'release':                'yes'
+                    }
+                )
+        
+    if 'e' in reports:
+        utils.log( '    Generating expected_results ...' )
+        utils.libxslt(
+              utils.log
+            , extended_test_results
+            , xsl_path( 'produce_expected_results.xsl', v2 )
+            , os.path.join( output_dir, 'expected_results.xml' )
+            )
+
+    if v2 and 'n' in reports:
+        utils.log( '    Making runner comment files...' )
+        utils.libxslt(
+              utils.log
+            , extended_test_results
+            , xsl_path( 'runners.xsl', v2 )
+            , os.path.join( output_dir, 'runners.html' )
+            )
+
+    shutil.copyfile(
+          xsl_path( 'html/master.css', v2 )
+        , os.path.join( output_dir, 'master.css' )
+        )
+
+
+def build_xsl_reports( 
+          locate_root_dir
+        , tag
+        , expected_results_file
+        , failures_markup_file
+        , comment_file
+        , results_dir
+        , result_file_prefix
+        , dont_collect_logs = 0
+        , reports = report_types
+        , v2 = 0
+        , user = None
+        , upload = False
+        ):
+
+    ( run_date ) = time.strftime( '%Y-%m-%dT%H:%M:%SZ', time.gmtime() )
+    
+    test_results_file = os.path.join( results_dir, 'test_results.xml' )
+    bin_boost_dir = os.path.join( locate_root_dir, 'bin', 'boost' )
+
+    if v2:
+        import merger
+        merger.merge_logs(
+              tag
+            , user
+            , results_dir
+            , test_results_file
+            , dont_collect_logs
+            )
+    else:
+        utils.log( '  dont_collect_logs: %s' % dont_collect_logs )
+        if not dont_collect_logs:
+            f = open( test_results_file, 'w+' )
+            f.write( '<tests>\n' )
+            runner.collect_test_logs( [ bin_boost_dir ], f )
+            f.write( '</tests>\n' )
+            f.close()
+
+    make_result_pages( 
+          test_results_file
+        , expected_results_file
+        , failures_markup_file
+        , tag
+        , run_date
+        , comment_file
+        , results_dir
+        , result_file_prefix
+        , reports
+        , v2
+        )
+
+    if v2 and upload:
+        upload_dir = 'regression-logs/'
+        utils.log( 'Uploading v2 results into "%s" [connecting as %s]...' % ( upload_dir, user ) )
+        
+        archive_name = '%s.tar.gz' % result_file_prefix
+        utils.tar( 
+              os.path.join( results_dir, result_file_prefix )
+            , archive_name
+            )
+        
+        utils.sourceforge.upload( os.path.join( results_dir, archive_name ), upload_dir, user )
+        utils.sourceforge.untar( os.path.join( upload_dir, archive_name ), user, background = True )
+
+
+def accept_args( args ):
+    args_spec = [ 
+          'locate-root='
+        , 'tag='
+        , 'expected-results='
+        , 'failures-markup='
+        , 'comment='
+        , 'results-dir='
+        , 'results-prefix='
+        , 'dont-collect-logs'
+        , 'reports='
+        , 'v2'
+        , 'user='
+        , 'upload'
+        , 'help'
+        ]
+        
+    options = { 
+          '--comment': ''
+        , '--expected-results': ''
+        , '--failures-markup': ''
+        , '--reports': string.join( report_types, ',' )
+        , '--tag': None
+        , '--user': None
+        , 'upload': False
+        }
+    
+    utils.accept_args( args_spec, args, options, usage )
+    if not options.has_key( '--results-dir' ):
+         options[ '--results-dir' ] = options[ '--locate-root' ]
+
+    if not options.has_key( '--results-prefix' ):
+        if options.has_key( '--v2' ):
+            options[ '--results-prefix' ] = 'all'
+        else:
+            options[ '--results-prefix' ] = ''
+    
+    return ( 
+          options[ '--locate-root' ]
+        , options[ '--tag' ]
+        , options[ '--expected-results' ]
+        , options[ '--failures-markup' ]
+        , options[ '--comment' ]
+        , options[ '--results-dir' ]
+        , options[ '--results-prefix' ]
+        , options.has_key( '--dont-collect-logs' )
+        , options[ '--reports' ].split( ',' )
+        , options.has_key( '--v2' )
+        , options[ '--user' ]
+        , options.has_key( '--upload' )
+        )
+
+
+def usage():
+    print 'Usage: %s [options]' % os.path.basename( sys.argv[0] )
+    print    '''
+\t--locate-root         the same as --locate-root in compiler_status
+\t--tag                 the tag for the results (i.e. 'CVS-HEAD')
+\t--expected-results    the file with the results to be compared with
+\t                      the current run
+\t--failures-markup     the file with the failures markup
+\t--comment             an html comment file (will be inserted in the reports)
+\t--results-dir         the directory containing -links.html, -fail.html
+\t                      files produced by compiler_status (by default the
+\t                      same as specified in --locate-root)
+\t--results-prefix      the prefix of -links.html, -fail.html
+\t                      files produced by compiler_status
+\t--v2                  v2 reports (combine multiple runners results into a 
+\t                      single set of reports)
+
+The following options are valid only for v2 reports:
+
+\t--user                SourceForge user name for a shell account
+\t--upload              upload v2 reports to SourceForge 
+
+The following options are useful in debugging:
+
+\t--dont-collect-logs dont collect the test logs
+\t--reports           produce only the specified reports
+\t                        us - user summary
+\t                        ds - developer summary
+\t                        ud - user detailed
+\t                        dd - developer detailed
+\t                        l  - links
+\t                        p  - patches
+\t                        x  - extended results file
+\t                        i  - issues
+'''
+
+def main():
+    build_xsl_reports( *accept_args( sys.argv[ 1 : ] ) )
+
+if __name__ == '__main__':
+    main()

+ 2 - 0
regression/xsl_reports/runner/__init__.py

@@ -0,0 +1,2 @@
+
+from collect_and_upload_logs import *

+ 500 - 0
regression/xsl_reports/runner/collect_and_upload_logs.py

@@ -0,0 +1,500 @@
+
+# Copyright (c) MetaCommunications, Inc. 2003-2007
+#
+# Distributed under the Boost Software License, Version 1.0. 
+# (See accompanying file LICENSE_1_0.txt or copy at 
+# http://www.boost.org/LICENSE_1_0.txt)
+
+import xml.sax.saxutils
+import zipfile
+import ftplib
+import time
+import stat
+import xml.dom.minidom
+import xmlrpclib
+import httplib
+
+import os.path
+import string
+import sys
+
+
+def process_xml_file( input_file, output_file ):
+    utils.log( 'Processing test log "%s"' % input_file )
+    
+    f = open( input_file, 'r' )
+    xml = f.readlines()
+    f.close()
+    
+    for i in range( 0, len(xml)):
+        xml[i] = string.translate( xml[i], utils.char_translation_table )
+
+    output_file.writelines( xml )
+
+
+def process_test_log_files( output_file, dir, names ):
+    for file in names:
+        if os.path.basename( file ) == 'test_log.xml':
+            process_xml_file( os.path.join( dir, file ), output_file )
+
+
+def collect_test_logs( input_dirs, test_results_writer ):
+    __log__ = 1
+    utils.log( 'Collecting test logs ...' )
+    for input_dir in input_dirs:
+        utils.log( 'Walking directory "%s" ...' % input_dir )
+        os.path.walk( input_dir, process_test_log_files, test_results_writer )
+
+dart_status_from_result = {
+    'succeed': 'passed',
+    'fail': 'failed',
+    'note': 'passed',
+    '': 'notrun'
+    }
+
+dart_project = {
+    'trunk': 'Boost_HEAD',
+    '': 'Boost_HEAD'
+    }
+
+dart_track = {
+    'full': 'Nightly',
+    'incremental': 'Continuous',
+    '': 'Experimental'
+    }
+
+ascii_only_table = ""
+for i in range(0,256):
+    if chr(i) == '\n' or chr(i) == '\r':
+        ascii_only_table += chr(i)
+    elif i < 32 or i >= 0x80:
+        ascii_only_table += '?'
+    else:
+        ascii_only_table += chr(i)
+
+class xmlrpcProxyTransport(xmlrpclib.Transport):
+    def __init__(self, proxy):
+        self.proxy = proxy
+    def make_connection(self, host):
+        self.realhost = host
+        return httplib.HTTP(self.proxy)
+    def send_request(self, connection, handler, request_body):
+        connection.putrequest('POST','http://%s%s' % (self.realhost,handler))
+    def send_host(self, connection, host):
+        connection.putheader('Host',self.realhost)
+    
+
+def publish_test_logs(
+    input_dirs,
+    runner_id, tag, platform, comment_file, timestamp, user, source, run_type,
+    dart_server = None,
+    http_proxy = None,
+    **unused
+    ):
+    __log__ = 1
+    utils.log( 'Publishing test logs ...' )
+    dart_rpc = None
+    dart_dom = {}
+    
+    def _publish_test_log_files_ ( unused, dir, names ):
+        for file in names:
+            if os.path.basename( file ) == 'test_log.xml':
+                utils.log( 'Publishing test log "%s"' % os.path.join(dir,file) )
+                if dart_server:
+                    log_xml = open(os.path.join(dir,file)).read().translate(ascii_only_table)
+                    #~ utils.log( '--- XML:\n%s' % log_xml)
+                    #~ It seems possible to get an empty XML result file :-(
+                    if log_xml == "": continue
+                    log_dom = xml.dom.minidom.parseString(log_xml)
+                    test = {
+                        'library': log_dom.documentElement.getAttribute('library'),
+                        'test-name': log_dom.documentElement.getAttribute('test-name'),
+                        'toolset': log_dom.documentElement.getAttribute('toolset')
+                        }
+                    if not test['test-name'] or test['test-name'] == '':
+                        test['test-name'] = 'unknown'
+                    if not test['toolset'] or test['toolset'] == '':
+                        test['toolset'] = 'unknown'
+                    if not dart_dom.has_key(test['toolset']):
+                        dart_dom[test['toolset']] = xml.dom.minidom.parseString(
+'''<?xml version="1.0" encoding="UTF-8"?>
+<DartSubmission version="2.0" createdby="collect_and_upload_logs.py">
+    <Site>%(site)s</Site>
+    <BuildName>%(buildname)s</BuildName>
+    <Track>%(track)s</Track>
+    <DateTimeStamp>%(datetimestamp)s</DateTimeStamp>
+</DartSubmission>
+'''                         % {
+                                'site': runner_id,
+                                'buildname': "%s -- %s (%s)" % (platform,test['toolset'],run_type),
+                                'track': dart_track[run_type],
+                                'datetimestamp' : timestamp
+                            } )
+                    submission_dom = dart_dom[test['toolset']]
+                    for node in log_dom.documentElement.childNodes:
+                        if node.nodeType == xml.dom.Node.ELEMENT_NODE:
+                            if node.firstChild:
+                                log_data = xml.sax.saxutils.escape(node.firstChild.data)
+                            else:
+                                log_data = ''
+                            test_dom = xml.dom.minidom.parseString('''<?xml version="1.0" encoding="UTF-8"?>
+<Test>
+    <Name>.Test.Boost.%(tag)s.%(library)s.%(test-name)s.%(type)s</Name>
+    <Status>%(result)s</Status>
+    <Measurement name="Toolset" type="text/string">%(toolset)s</Measurement>
+    <Measurement name="Timestamp" type="text/string">%(timestamp)s</Measurement>
+    <Measurement name="Log" type="text/text">%(log)s</Measurement>
+</Test>
+    '''                         % {
+                                    'tag': tag,
+                                    'library': test['library'],
+                                    'test-name': test['test-name'],
+                                    'toolset': test['toolset'],
+                                    'type': node.nodeName,
+                                    'result': dart_status_from_result[node.getAttribute('result')],
+                                    'timestamp': node.getAttribute('timestamp'),
+                                    'log': log_data
+                                })
+                            submission_dom.documentElement.appendChild(
+                                test_dom.documentElement.cloneNode(1) )
+    
+    for input_dir in input_dirs:
+        utils.log( 'Walking directory "%s" ...' % input_dir )
+        os.path.walk( input_dir, _publish_test_log_files_, None )
+    if dart_server:
+        try:
+            rpc_transport = None
+            if http_proxy:
+                rpc_transport = xmlrpcProxyTransport(http_proxy)
+            dart_rpc = xmlrpclib.ServerProxy(
+                'http://%s/%s/Command/' % (dart_server,dart_project[tag]),
+                rpc_transport )
+            for dom in dart_dom.values():
+                #~ utils.log('Dart XML: %s' % dom.toxml('utf-8'))
+                dart_rpc.Submit.put(xmlrpclib.Binary(dom.toxml('utf-8')))
+        except Exception, e:
+            utils.log('Dart server error: %s' % e)
+
+
+def upload_to_ftp( tag, results_file, ftp_proxy, debug_level ):
+    ftp_site = 'fx.meta-comm.com'
+    site_path = '/boost-regression'
+    utils.log( 'Uploading log archive "%s" to ftp://%s%s/%s' % ( results_file, ftp_site, site_path, tag ) )
+    
+    if not ftp_proxy:
+        ftp = ftplib.FTP( ftp_site )
+        ftp.set_debuglevel( debug_level )
+        ftp.login()
+    else:
+        utils.log( '    Connecting through FTP proxy server "%s"' % ftp_proxy )
+        ftp = ftplib.FTP( ftp_proxy )
+        ftp.set_debuglevel( debug_level )
+        ftp.set_pasv (0) # turn off PASV mode
+        ftp.login( 'anonymous@%s' % ftp_site, 'anonymous@' )
+
+    ftp.cwd( site_path )
+    try:
+        ftp.cwd( tag )
+    except ftplib.error_perm:
+        for dir in tag.split( '/' ):
+            ftp.mkd( dir )
+            ftp.cwd( dir )
+
+    f = open( results_file, 'rb' )
+    ftp.storbinary( 'STOR %s' % os.path.basename( results_file ), f )
+    ftp.quit()
+
+
+def copy_comments( results_xml, comment_file ):
+    results_xml.startElement( 'comment', {} )
+
+    if os.path.exists( comment_file ):
+        utils.log( 'Reading comments file "%s"...' % comment_file )
+        f = open( comment_file, 'r' )
+        try:
+            results_xml.characters( f.read() )
+        finally:
+            f.close()    
+    else:
+        utils.log( 'Warning: comment file "%s" is not found.' % comment_file )
+ 
+    results_xml.endElement( 'comment' )
+
+
+def compress_file( file_path, archive_path ):
+    utils.log( 'Compressing "%s"...' % file_path )
+
+    try:
+        z = zipfile.ZipFile( archive_path, 'w', zipfile.ZIP_DEFLATED )
+        z.write( file_path, os.path.basename( file_path ) )
+        z.close()
+        utils.log( 'Done writing "%s".'% archive_path )
+    except Exception, msg:
+        utils.log( 'Warning: Compressing falied (%s)' % msg )
+        utils.log( '         Trying to compress using a platform-specific tool...' )
+        try: import zip_cmd
+        except ImportError:
+            script_dir = os.path.dirname( os.path.abspath( sys.argv[0] ) )
+            utils.log( 'Could not find \'zip_cmd\' module in the script directory (%s).' % script_dir )
+            raise Exception( 'Compressing failed!' )
+        else:
+            if os.path.exists( archive_path ):
+                os.unlink( archive_path )
+                utils.log( 'Removing stale "%s".' % archive_path )
+                
+            zip_cmd.main( file_path, archive_path )
+            utils.log( 'Done compressing "%s".' % archive_path )
+
+
+def read_timestamp( file ):
+    if not os.path.exists( file ):
+        result = time.gmtime()
+        utils.log( 'Warning: timestamp file "%s" does not exist'% file )
+        utils.log( 'Using current UTC time (%s)' % result )
+        return result
+
+    return time.gmtime( os.stat( file ).st_mtime )
+
+
+def collect_logs( 
+          results_dir
+        , runner_id
+        , tag
+        , platform
+        , comment_file
+        , timestamp_file
+        , user
+        , source
+        , run_type
+        , dart_server = None
+        , http_proxy = None
+        , revision = ''
+        , **unused
+        ):
+    
+    timestamp = time.strftime( '%Y-%m-%dT%H:%M:%SZ', read_timestamp( timestamp_file ) )
+    
+    if dart_server:
+        publish_test_logs( [ results_dir ],
+            runner_id, tag, platform, comment_file, timestamp, user, source, run_type,
+            dart_server = dart_server,
+            http_proxy = http_proxy )
+    
+    results_file = os.path.join( results_dir, '%s.xml' % runner_id )
+    results_writer = open( results_file, 'w' )
+    utils.log( 'Collecting test logs into "%s"...' % results_file )
+        
+    results_xml = xml.sax.saxutils.XMLGenerator( results_writer )
+    results_xml.startDocument()
+    results_xml.startElement( 
+          'test-run'
+        , { 
+              'tag':        tag
+            , 'platform':   platform
+            , 'runner':     runner_id
+            , 'timestamp':  timestamp
+            , 'source':     source
+            , 'run-type':   run_type
+            , 'revision':   revision
+            }
+        )
+    
+    copy_comments( results_xml, comment_file )
+    collect_test_logs( [ results_dir ], results_writer )
+
+    results_xml.endElement( "test-run" )
+    results_xml.endDocument()
+    results_writer.close()
+    utils.log( 'Done writing "%s".' % results_file )
+
+    compress_file(
+          results_file
+        , os.path.join( results_dir,'%s.zip' % runner_id )
+        )
+
+
+def upload_logs(
+          results_dir
+        , runner_id
+        , tag
+        , user
+        , ftp_proxy
+        , debug_level
+        , send_bjam_log = False
+        , timestamp_file = None
+        , dart_server = None
+        , **unused
+        ):
+
+    logs_archive = os.path.join( results_dir, '%s.zip' % runner_id )
+    upload_to_ftp( tag, logs_archive, ftp_proxy, debug_level )
+    if send_bjam_log:
+        bjam_log_path = os.path.join( results_dir, 'bjam.log' )
+        if not timestamp_file:
+            timestamp_file = bjam_log_path
+
+        timestamp = time.strftime( '%Y-%m-%d-%H-%M-%S', read_timestamp( timestamp_file ) )
+        logs_archive = os.path.join( results_dir, '%s.%s.log.zip' % ( runner_id, timestamp ) )
+        compress_file( bjam_log_path, logs_archive )
+        upload_to_ftp( '%s/logs' % tag, logs_archive, ftp_proxy, debug_level )
+
+
+def collect_and_upload_logs( 
+          results_dir
+        , runner_id
+        , tag
+        , platform
+        , comment_file
+        , timestamp_file
+        , user
+        , source
+        , run_type
+        , revision = None
+        , ftp_proxy = None
+        , debug_level = 0
+        , send_bjam_log = False
+        , dart_server = None
+        , http_proxy = None
+        , **unused
+        ):
+    
+    collect_logs( 
+          results_dir
+        , runner_id
+        , tag
+        , platform
+        , comment_file
+        , timestamp_file
+        , user
+        , source
+        , run_type
+        , revision = revision
+        , dart_server = dart_server
+        , http_proxy = http_proxy
+        )
+    
+    upload_logs(
+          results_dir
+        , runner_id
+        , tag
+        , user
+        , ftp_proxy
+        , debug_level
+        , send_bjam_log
+        , timestamp_file
+        , dart_server = dart_server
+        )
+
+
+def accept_args( args ):
+    args_spec = [ 
+          'locate-root='
+        , 'runner='
+        , 'tag='
+        , 'platform='
+        , 'comment='
+        , 'timestamp='
+        , 'source='
+        , 'run-type='
+        , 'user='
+        , 'ftp-proxy='
+        , 'proxy='
+        , 'debug-level='
+        , 'send-bjam-log'
+        , 'help'
+        , 'dart-server='
+        , 'revision='
+        ]
+    
+    options = {
+          '--tag'           : 'trunk'
+        , '--platform'      : sys.platform
+        , '--comment'       : 'comment.html'
+        , '--timestamp'     : 'timestamp'
+        , '--user'          : None
+        , '--source'        : 'SVN'
+        , '--run-type'      : 'full'
+        , '--ftp-proxy'     : None
+        , '--proxy'         : None
+        , '--debug-level'   : 0
+        , '--dart-server'   : 'beta.boost.org:8081'
+        , '--revision'      : None
+        
+        }
+    
+    utils.accept_args( args_spec, args, options, usage )
+        
+    return {
+          'results_dir'     : options[ '--locate-root' ]
+        , 'runner_id'       : options[ '--runner' ]
+        , 'tag'             : options[ '--tag' ]
+        , 'platform'        : options[ '--platform']
+        , 'comment_file'    : options[ '--comment' ]
+        , 'timestamp_file'  : options[ '--timestamp' ]
+        , 'user'            : options[ '--user' ]
+        , 'source'          : options[ '--source' ]
+        , 'run_type'        : options[ '--run-type' ]
+        , 'ftp_proxy'       : options[ '--ftp-proxy' ]
+        , 'http_proxy'      : options[ '--proxy' ]
+        , 'debug_level'     : int(options[ '--debug-level' ])
+        , 'send_bjam_log'   : options.has_key( '--send-bjam-log' )
+        , 'dart_server'     : options[ '--dart-server' ]
+        , 'revision   '     : options[ '--revision' ]
+        }
+
+
+commands = {
+      'collect-and-upload'  : collect_and_upload_logs
+    , 'collect-logs'        : collect_logs
+    , 'upload-logs'         : upload_logs
+    }
+
+def usage():
+    print 'Usage: %s [command] [options]' % os.path.basename( sys.argv[0] )
+    print    '''
+Commands:
+\t%s
+
+Options:
+\t--locate-root   directory to to scan for "test_log.xml" files
+\t--runner        runner ID (e.g. "Metacomm")
+\t--timestamp     path to a file which modification time will be used 
+\t                as a timestamp of the run ("timestamp" by default)
+\t--comment       an HTML comment file to be inserted in the reports
+\t                ("comment.html" by default)
+\t--tag           the tag for the results ("trunk" by default)
+\t--user          SourceForge user name for a shell account (optional)
+\t--source        where Boost sources came from ("SVN" or "tarball";
+\t                "SVN" by default)
+\t--run-type      "incremental" or "full" ("full" by default)
+\t--send-bjam-log in addition to regular XML results, send in full bjam
+\t                log of the regression run
+\t--proxy         HTTP proxy server address and port (e.g.
+\t                'http://www.someproxy.com:3128', optional)
+\t--ftp-proxy     FTP proxy server (e.g. 'ftpproxy', optional)
+\t--debug-level   debugging level; controls the amount of debugging 
+\t                output printed; 0 by default (no debug output)
+\t--dart-server   The dart server to send results to.
+''' % '\n\t'.join( commands.keys() )
+
+    
+def main():
+    if len(sys.argv) > 1 and sys.argv[1] in commands:
+        command = sys.argv[1]
+        args = sys.argv[ 2: ]
+    else:
+        command = 'collect-and-upload'
+        args = sys.argv[ 1: ]
+    
+    commands[ command ]( **accept_args( args ) )
+
+
+if __name__ != '__main__':  import utils
+else:
+    # in absense of relative import...
+    xsl_path = os.path.abspath( os.path.dirname( sys.argv[ 0 ] ) )
+    while os.path.basename( xsl_path ) != 'xsl_reports': xsl_path = os.path.dirname( xsl_path )
+    sys.path.append( xsl_path )
+
+    import utils
+    main()

+ 229 - 0
regression/xsl_reports/runner/default.css

@@ -0,0 +1,229 @@
+/*
+:Author: David Goodger
+:Contact: goodger@users.sourceforge.net
+:date: $Date$
+:version: $Revision$
+:copyright: This stylesheet has been placed in the public domain.
+
+Default cascading style sheet for the HTML output of Docutils.
+*/
+
+body {
+    background-color: #fffff5;
+}
+
+h2 {
+    text-decoration: underline;
+}
+
+.first {
+  margin-top: 0 }
+
+.last {
+  margin-bottom: 0 }
+
+a.toc-backref {
+  text-decoration: none ;
+  color: black }
+
+blockquote.epigraph {
+  margin: 2em 5em ; }
+
+dd {
+  margin-bottom: 0.5em }
+
+div.abstract {
+  margin: 2em 5em }
+
+div.abstract p.topic-title {
+  font-weight: bold ;
+  text-align: center }
+
+div.attention, div.caution, div.danger, div.error, div.hint,
+div.important, div.note, div.tip, div.warning, div.admonition {
+  margin: 2em ;
+  border: medium outset ;
+  padding: 1em }
+
+div.attention p.admonition-title, div.caution p.admonition-title,
+div.danger p.admonition-title, div.error p.admonition-title,
+div.warning p.admonition-title {
+  color: red ;
+  font-weight: bold ;
+  font-family: sans-serif }
+
+div.hint p.admonition-title, div.important p.admonition-title,
+div.note p.admonition-title, div.tip p.admonition-title,
+div.admonition p.admonition-title {
+  font-weight: bold ;
+  font-family: sans-serif }
+
+div.dedication {
+  margin: 2em 5em ;
+  text-align: center ;
+  font-style: italic }
+
+div.dedication p.topic-title {
+  font-weight: bold ;
+  font-style: normal }
+
+div.figure {
+  margin-left: 2em }
+
+div.sidebar {
+  margin-left: 1em ;
+  border: medium outset ;
+  padding: 0em 1em ;
+  background-color: #ffffee ;
+  width: 40% ;
+  float: right ;
+  clear: right }
+
+div.sidebar p.rubric {
+  font-family: sans-serif ;
+  font-size: medium }
+
+div.system-messages {
+  margin: 5em }
+
+div.system-messages h1 {
+  color: red }
+
+div.system-message {
+  border: medium outset ;
+  padding: 1em }
+
+div.system-message p.system-message-title {
+  color: red ;
+  font-weight: bold }
+
+div.topic {
+  margin: 2em }
+
+h1.title {
+  text-align: center }
+
+h2.subtitle {
+  text-align: center }
+
+ol.simple, ul.simple {
+  margin-bottom: 1em }
+
+ol.arabic {
+  list-style: decimal }
+
+ol.loweralpha {
+  list-style: lower-alpha }
+
+ol.upperalpha {
+  list-style: upper-alpha }
+
+ol.lowerroman {
+  list-style: lower-roman }
+
+ol.upperroman {
+  list-style: upper-roman }
+
+p.attribution {
+  text-align: right ;
+  margin-left: 50% }
+
+p.caption {
+  font-style: italic }
+
+p.credits {
+  font-style: italic ;
+  font-size: smaller }
+
+p.label {
+  white-space: nowrap }
+
+p.rubric {
+  font-weight: bold ;
+  font-size: larger ;
+  color: maroon ;
+  text-align: center }
+
+p.sidebar-title {
+  font-family: sans-serif ;
+  font-weight: bold ;
+  font-size: larger }
+
+p.sidebar-subtitle {
+  font-family: sans-serif ;
+  font-weight: bold }
+
+p.topic-title {
+  font-weight: bold }
+
+pre.address {
+  margin-bottom: 0 ;
+  margin-top: 0 ;
+  font-family: serif ;
+  font-size: 100% }
+
+pre.line-block {
+  font-family: serif ;
+  font-size: 100% }
+
+pre.literal-block, pre.doctest-block {
+  margin-left: 2em ;
+  margin-right: 2em ;
+  background-color: #eeeeee }
+
+span.classifier {
+  font-family: sans-serif ;
+  font-style: oblique }
+
+span.classifier-delimiter {
+  font-family: sans-serif ;
+  font-weight: bold }
+
+span.interpreted {
+  font-family: sans-serif }
+
+span.option {
+  white-space: nowrap }
+
+span.option-argument {
+  font-style: italic }
+
+span.pre {
+  white-space: pre }
+
+span.problematic {
+  color: red }
+
+table {
+  margin-top: 0.5em ;
+  margin-bottom: 0.5em }
+
+table.citation {
+  border-left: solid thin gray ;
+  padding-left: 0.5ex }
+
+table.docinfo {
+  margin: 2em 4em }
+
+table.footnote {
+  border-left: solid thin black ;
+  padding-left: 0.5ex }
+
+td, th {
+  padding-left: 0.5em ;
+  padding-right: 0.5em ;
+  vertical-align: top }
+
+th.docinfo-name, th.field-name {
+  font-weight: bold ;
+  text-align: left ;
+  white-space: nowrap }
+
+h1 tt, h2 tt, h3 tt, h4 tt, h5 tt, h6 tt {
+  font-size: 100% }
+
+tt {
+  background-color: #eeeeee }
+
+ul.auto-toc {
+  list-style-type: none }

+ 485 - 0
regression/xsl_reports/runner/instructions.html

@@ -0,0 +1,485 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
+<head>
+<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+<meta name="generator" content="Docutils 0.5: http://docutils.sourceforge.net/" />
+<title>Running Boost Regression Tests</title>
+<style type="text/css">
+
+/*
+:Author: David Goodger
+:Contact: goodger@users.sourceforge.net
+:date: $Date$
+:version: $Revision$
+:copyright: This stylesheet has been placed in the public domain.
+
+Default cascading style sheet for the HTML output of Docutils.
+*/
+
+body {
+    background-color: #fffff5;
+}
+
+h2 {
+    text-decoration: underline;
+}
+
+.first {
+  margin-top: 0 }
+
+.last {
+  margin-bottom: 0 }
+
+a.toc-backref {
+  text-decoration: none ;
+  color: black }
+
+blockquote.epigraph {
+  margin: 2em 5em ; }
+
+dd {
+  margin-bottom: 0.5em }
+
+div.abstract {
+  margin: 2em 5em }
+
+div.abstract p.topic-title {
+  font-weight: bold ;
+  text-align: center }
+
+div.attention, div.caution, div.danger, div.error, div.hint,
+div.important, div.note, div.tip, div.warning, div.admonition {
+  margin: 2em ;
+  border: medium outset ;
+  padding: 1em }
+
+div.attention p.admonition-title, div.caution p.admonition-title,
+div.danger p.admonition-title, div.error p.admonition-title,
+div.warning p.admonition-title {
+  color: red ;
+  font-weight: bold ;
+  font-family: sans-serif }
+
+div.hint p.admonition-title, div.important p.admonition-title,
+div.note p.admonition-title, div.tip p.admonition-title,
+div.admonition p.admonition-title {
+  font-weight: bold ;
+  font-family: sans-serif }
+
+div.dedication {
+  margin: 2em 5em ;
+  text-align: center ;
+  font-style: italic }
+
+div.dedication p.topic-title {
+  font-weight: bold ;
+  font-style: normal }
+
+div.figure {
+  margin-left: 2em }
+
+div.sidebar {
+  margin-left: 1em ;
+  border: medium outset ;
+  padding: 0em 1em ;
+  background-color: #ffffee ;
+  width: 40% ;
+  float: right ;
+  clear: right }
+
+div.sidebar p.rubric {
+  font-family: sans-serif ;
+  font-size: medium }
+
+div.system-messages {
+  margin: 5em }
+
+div.system-messages h1 {
+  color: red }
+
+div.system-message {
+  border: medium outset ;
+  padding: 1em }
+
+div.system-message p.system-message-title {
+  color: red ;
+  font-weight: bold }
+
+div.topic {
+  margin: 2em }
+
+h1.title {
+  text-align: center }
+
+h2.subtitle {
+  text-align: center }
+
+ol.simple, ul.simple {
+  margin-bottom: 1em }
+
+ol.arabic {
+  list-style: decimal }
+
+ol.loweralpha {
+  list-style: lower-alpha }
+
+ol.upperalpha {
+  list-style: upper-alpha }
+
+ol.lowerroman {
+  list-style: lower-roman }
+
+ol.upperroman {
+  list-style: upper-roman }
+
+p.attribution {
+  text-align: right ;
+  margin-left: 50% }
+
+p.caption {
+  font-style: italic }
+
+p.credits {
+  font-style: italic ;
+  font-size: smaller }
+
+p.label {
+  white-space: nowrap }
+
+p.rubric {
+  font-weight: bold ;
+  font-size: larger ;
+  color: maroon ;
+  text-align: center }
+
+p.sidebar-title {
+  font-family: sans-serif ;
+  font-weight: bold ;
+  font-size: larger }
+
+p.sidebar-subtitle {
+  font-family: sans-serif ;
+  font-weight: bold }
+
+p.topic-title {
+  font-weight: bold }
+
+pre.address {
+  margin-bottom: 0 ;
+  margin-top: 0 ;
+  font-family: serif ;
+  font-size: 100% }
+
+pre.line-block {
+  font-family: serif ;
+  font-size: 100% }
+
+pre.literal-block, pre.doctest-block {
+  margin-left: 2em ;
+  margin-right: 2em ;
+  background-color: #eeeeee }
+
+span.classifier {
+  font-family: sans-serif ;
+  font-style: oblique }
+
+span.classifier-delimiter {
+  font-family: sans-serif ;
+  font-weight: bold }
+
+span.interpreted {
+  font-family: sans-serif }
+
+span.option {
+  white-space: nowrap }
+
+span.option-argument {
+  font-style: italic }
+
+span.pre {
+  white-space: pre }
+
+span.problematic {
+  color: red }
+
+table {
+  margin-top: 0.5em ;
+  margin-bottom: 0.5em }
+
+table.citation {
+  border-left: solid thin gray ;
+  padding-left: 0.5ex }
+
+table.docinfo {
+  margin: 2em 4em }
+
+table.footnote {
+  border-left: solid thin black ;
+  padding-left: 0.5ex }
+
+td, th {
+  padding-left: 0.5em ;
+  padding-right: 0.5em ;
+  vertical-align: top }
+
+th.docinfo-name, th.field-name {
+  font-weight: bold ;
+  text-align: left ;
+  white-space: nowrap }
+
+h1 tt, h2 tt, h3 tt, h4 tt, h5 tt, h6 tt {
+  font-size: 100% }
+
+tt {
+  background-color: #eeeeee }
+
+ul.auto-toc {
+  list-style-type: none }
+
+</style>
+</head>
+<body>
+<div class="document" id="running-boost-regression-tests">
+<h1 class="title">Running Boost Regression Tests</h1>
+
+<div class="section" id="requirements">
+<h2>Requirements</h2>
+<ul class="simple">
+<li>Python 2.3 or higher</li>
+<li>Some spare disk space (~5 Gb per each tested compiler)</li>
+</ul>
+<p>That's it! You don't even need an SVN client installed.</p>
+</div>
+<div class="section" id="installation">
+<h2>Installation</h2>
+<ul class="simple">
+<li>Download regression driver <tt class="docutils literal"><span class="pre">regression.py</span></tt> from <a class="reference external" href="http://svn.boost.org/svn/boost/trunk/tools/regression/xsl_reports/runner/regression.py">here</a> (<a class="reference external" href="http://tinyurl.com/236tty">http://tinyurl.com/236tty</a>)
+and put it in the directory where you want all the regression
+test files to be placed.</li>
+</ul>
+<ul>
+<li><p class="first"><strong>Optional</strong>: If you already have <tt class="docutils literal"><span class="pre">bjam</span></tt> and/or <tt class="docutils literal"><span class="pre">process_jam_log</span></tt> executables
+you'd like to use, just put them in the same directory with <tt class="docutils literal"><span class="pre">regression.py</span></tt>, e.g.:</p>
+<pre class="literal-block">
+my_boost_regressions/
+    regression.py
+    bjam<em>[.exe]</em>
+</pre>
+</li>
+</ul>
+</div>
+<div class="section" id="running-tests">
+<h2>Running tests</h2>
+<p>To start a regression run, simply run <tt class="docutils literal"><span class="pre">regression.py</span></tt> providing it with the following
+two arguments:</p>
+<ul class="simple">
+<li>runner id (something unique of your choice that will identify your
+results in the reports <a class="footnote-reference" href="#runnerid1" id="id2">[1]</a>, <a class="footnote-reference" href="#runnerid2" id="id3">[2]</a>)</li>
+<li>a particular set of toolsets you want to test with <a class="footnote-reference" href="#toolsets" id="id4">[3]</a>.</li>
+</ul>
+<p>For example:</p>
+<pre class="literal-block">
+python regression.py --runner=Metacomm --toolsets=gcc-4.2.1,msvc-8.0
+</pre>
+<p>If you are interested in seeing all available options, run <tt class="docutils literal"><span class="pre">python</span> <span class="pre">regression.py</span></tt>
+or <tt class="docutils literal"><span class="pre">python</span> <span class="pre">regression.py</span> <span class="pre">--help</span></tt>. See also the <a class="reference internal" href="#advanced-use">Advanced use</a> section below.</p>
+<p><strong>Note</strong>: If you are behind a firewall/proxy server, everything should still &quot;just work&quot;.
+In the rare cases when it doesn't, you can explicitly specify the proxy server
+parameters through the <tt class="docutils literal"><span class="pre">--proxy</span></tt> option, e.g.:</p>
+<pre class="literal-block">
+python regression.py ... <strong>--proxy=http://www.someproxy.com:3128</strong>
+</pre>
+</div>
+<div class="section" id="details">
+<h2>Details</h2>
+<p>The regression run procedure will:</p>
+<ul class="simple">
+<li>Download the most recent tarball from <a class="reference external" href="http://www.meta-comm.com/engineering/boost/snapshot/">http://www.meta-comm.com/engineering/boost/snapshot/</a>,
+unpack it in the subdirectory <tt class="docutils literal"><span class="pre">boost</span></tt>.</li>
+<li>Build <tt class="docutils literal"><span class="pre">bjam</span></tt> and <tt class="docutils literal"><span class="pre">process_jam_log</span></tt> if needed. (<tt class="docutils literal"><span class="pre">process_jam_log</span></tt> is an
+utility, which extracts the test results from the log file produced by
+Boost.Build).</li>
+<li>Run regression tests, process and collect the results.</li>
+<li>Upload the results to <a class="reference external" href="ftp://fx.meta-comm.com/boost-regression">ftp://fx.meta-comm.com/boost-regression</a>.</li>
+</ul>
+<p>The report merger process running continuously on MetaCommunications site will
+merge all submitted test runs and publish them at
+<a class="reference external" href="http://engineering.meta-comm.com/boost-regression/">http://engineering.meta-comm.com/boost-regression/</a>.</p>
+</div>
+<div class="section" id="advanced-use">
+<h2>Advanced use</h2>
+<div class="section" id="providing-detailed-information-about-your-environment">
+<h3>Providing detailed information about your environment</h3>
+<p>Once you have your regression results displayed in the Boost-wide
+reports, you may consider providing a bit more information about
+yourself and your test environment. This additional information will
+be presented in the reports on a page associated with your runner ID.</p>
+<p>By default, the page's content is just a single line coming from the
+<tt class="docutils literal"><span class="pre">comment.html</span></tt> file in your <tt class="docutils literal"><span class="pre">regression.py</span></tt> directory, specifying
+the tested platform. You can put online a more detailed description of
+your environment, such as your hardware configuration, compiler builds,
+and test schedule, by simply altering the file's content. Also, please
+consider providing your name and email address for cases where Boost
+developers have questions specific to your particular set of results.</p>
+</div>
+<div class="section" id="incremental-runs">
+<h3>Incremental runs</h3>
+<p>You can run <tt class="docutils literal"><span class="pre">regression.py</span></tt> in incremental mode <a class="footnote-reference" href="#incremental" id="id5">[4]</a> by simply passing
+it an identically named command-line flag:</p>
+<pre class="literal-block">
+python regression.py ... <strong>--incremental</strong>
+</pre>
+</div>
+<div class="section" id="dealing-with-misbehaved-tests-compilers">
+<h3>Dealing with misbehaved tests/compilers</h3>
+<p>Depending on the environment/C++ runtime support library the test is compiled with,
+a test failure/termination may cause an appearance of a dialog window, requiring
+human intervention to proceed. Moreover, the test (or even of the compiler itself)
+can fall into infinite loop, or simply run for too long. To allow <tt class="docutils literal"><span class="pre">regression.py</span></tt>
+to take care of these obstacles, add the <tt class="docutils literal"><span class="pre">--monitored</span></tt> flag to the script
+invocation:</p>
+<pre class="literal-block">
+python regression.py ... <strong>--monitored</strong>
+</pre>
+<p>That's it. Knowing your intentions, the script will be able to automatically deal
+with the listed issues <a class="footnote-reference" href="#monitored" id="id6">[5]</a>.</p>
+</div>
+<div class="section" id="getting-sources-from-svn">
+<h3>Getting sources from SVN</h3>
+<p>If you already have an SVN client installed and configured, you might
+prefer to get the sources directly from the <a class="reference external" href="http://svn.boost.org/trac/boost/wiki/BoostSubversion">Boost Subversion
+Repository</a>. To communicate this to the script, you just need to
+pass it your Boost SVN user ID using the <tt class="docutils literal"><span class="pre">--user</span></tt> option; for
+instance:</p>
+<pre class="literal-block">
+python regression.py ... <strong>--user=agurtovoy</strong>
+</pre>
+<p>You can also specify the user as <tt class="docutils literal"><span class="pre">anonymous</span></tt>, requesting anonymous
+SVN access.</p>
+<p>The main advantage of obtaining the sources through SVN is an
+immediate availability of the most recent check-ins: the sources
+extracted from a tarball the script downloads by default can be up to
+one hour behind the actual repository state at the time of test run.</p>
+</div>
+<div class="section" id="integration-with-a-custom-driver-script">
+<h3>Integration with a custom driver script</h3>
+<p>Even if you've already been using a custom driver script, and for some
+reason you don't  want <tt class="docutils literal"><span class="pre">regression.py</span></tt> to take over of the entire test cycle,
+getting your regression results into <a class="reference external" href="http://www.boost.org/regression-logs/developer/">Boost-wide reports</a> is still easy!</p>
+<p>In fact, it's just a matter of modifying your script to perform two straightforward
+operations:</p>
+<ol class="arabic">
+<li><p class="first"><em>Timestamp file creation</em> needs to be done before the SVN update/checkout.
+The file's location doesn't matter (nor does the content), as long as you know how
+to access it later. Making your script to do something as simple as
+<tt class="docutils literal"><span class="pre">echo</span> <span class="pre">&gt;timestamp</span></tt> would work just fine.</p>
+</li>
+<li><p class="first"><em>Collecting and uploading logs</em> can be done any time after <tt class="docutils literal"><span class="pre">process_jam_log</span></tt>' s
+run, and is as simple as an invocation of the local copy of
+<tt class="docutils literal"><span class="pre">$BOOST_ROOT/tools/regression/xsl_reports/runner/collect_and_upload_logs.py</span></tt>
+script that was just obtained from the SVN with the rest of the sources.
+You'd need to provide <tt class="docutils literal"><span class="pre">collect_and_upload_logs.py</span></tt> with the following three
+arguments:</p>
+<pre class="literal-block">
+--locate-root   directory to to scan for &quot;test_log.xml&quot; files
+--runner        runner ID (e.g. &quot;Metacomm&quot;)
+--timestamp     path to a file which modification time will be used
+                as a timestamp of the run (&quot;timestamp&quot; by default)
+</pre>
+<p>For example, assuming that the run's resulting  binaries are in the
+<tt class="docutils literal"><span class="pre">$BOOST_ROOT/bin</span></tt> directory (the default Boost.Build setup), the
+<tt class="docutils literal"><span class="pre">collect_and_upload_logs.py</span></tt> invocation might look like this:</p>
+<pre class="literal-block">
+python $BOOST_ROOT/tools/regression/xsl_reports/runner/collect_and_upload_logs.py
+   --locate-root=$BOOST_ROOT/bin
+   --runner=Metacomm
+   --timestamp=timestamp
+</pre>
+</li>
+</ol>
+</div>
+<div class="section" id="patching-boost-sources">
+<h3>Patching Boost sources</h3>
+<p>You might encounter an occasional need to make local modifications to
+the Boost codebase before running the tests, without disturbing the
+automatic nature of the regression process. To implement this under
+<tt class="docutils literal"><span class="pre">regression.py</span></tt>:</p>
+<ol class="arabic simple">
+<li>Codify applying the desired modifications to the sources
+located in the <tt class="docutils literal"><span class="pre">./boost</span></tt> subdirectory in a single executable
+script named <tt class="docutils literal"><span class="pre">patch_boost</span></tt> (<tt class="docutils literal"><span class="pre">patch_boost.bat</span></tt> on Windows).</li>
+<li>Place the script in the <tt class="docutils literal"><span class="pre">regression.py</span></tt> directory.</li>
+</ol>
+<p>The driver will check for the existence of the <tt class="docutils literal"><span class="pre">patch_boost</span></tt> script,
+and, if found, execute it after obtaining the Boost sources.</p>
+</div>
+</div>
+<div class="section" id="feedback">
+<h2>Feedback</h2>
+<p>Please send all comments/suggestions regarding this document and the testing procedure
+itself to the <a class="reference external" href="http://lists.boost.org/mailman/listinfo.cgi/boost-testing">Boost Testing list</a>.</p>
+</div>
+<div class="section" id="notes">
+<h2>Notes</h2>
+<table class="docutils footnote" frame="void" id="runnerid1" rules="none">
+<colgroup><col class="label" /><col /></colgroup>
+<tbody valign="top">
+<tr><td class="label"><a class="fn-backref" href="#id2">[1]</a></td><td>If you are running regressions interlacingly with a different
+set of compilers (e.g. for Intel in the morning and GCC at the end of the day), you need
+to provide a <em>different</em> runner id for each of these runs, e.g. <tt class="docutils literal"><span class="pre">your_name-intel</span></tt>, and
+<tt class="docutils literal"><span class="pre">your_name-gcc</span></tt>.</td></tr>
+</tbody>
+</table>
+<table class="docutils footnote" frame="void" id="runnerid2" rules="none">
+<colgroup><col class="label" /><col /></colgroup>
+<tbody valign="top">
+<tr><td class="label"><a class="fn-backref" href="#id3">[2]</a></td><td>The limitations of the reports' format/medium impose a direct dependency
+between the number of compilers you are testing with and the amount of space available
+for your runner id. If you are running regressions for a single compiler, please make
+sure to choose a short enough id that does not significantly disturb the reports' layout.</td></tr>
+</tbody>
+</table>
+<table class="docutils footnote" frame="void" id="toolsets" rules="none">
+<colgroup><col class="label" /><col /></colgroup>
+<tbody valign="top">
+<tr><td class="label"><a class="fn-backref" href="#id4">[3]</a></td><td>If <tt class="docutils literal"><span class="pre">--toolsets</span></tt> option is not provided, the script will try to use the
+platform's default toolset (<tt class="docutils literal"><span class="pre">gcc</span></tt> for most Unix-based systems).</td></tr>
+</tbody>
+</table>
+<table class="docutils footnote" frame="void" id="incremental" rules="none">
+<colgroup><col class="label" /><col /></colgroup>
+<tbody valign="top">
+<tr><td class="label"><a class="fn-backref" href="#id5">[4]</a></td><td><p class="first">By default, the script runs in what is known as <em>full mode</em>: on
+each <tt class="docutils literal"><span class="pre">regression.py</span></tt> invocation all the files that were left in place by the
+previous run -- including the binaries for the successfully built tests and libraries
+-- are deleted, and everything is rebuilt once again from scratch. By contrast, in
+<em>incremental mode</em> the already existing binaries are left intact, and only the
+tests and libraries which source files has changed since the previous run are
+re-built and re-tested.</p>
+<p>The main advantage of incremental runs is a significantly shorter turnaround time,
+but unfortunately they don't always produce reliable results. Some type of changes
+to the codebase (changes to the bjam testing subsystem in particular)
+often require switching to a full mode for one cycle in order to produce
+trustworthy reports.</p>
+<p class="last">As a general guideline, if you can afford it, testing in full mode is preferable.</p>
+</td></tr>
+</tbody>
+</table>
+<table class="docutils footnote" frame="void" id="monitored" rules="none">
+<colgroup><col class="label" /><col /></colgroup>
+<tbody valign="top">
+<tr><td class="label"><a class="fn-backref" href="#id6">[5]</a></td><td>Note that at the moment this functionality is available only if you
+are running on a Windows platform. Contributions are welcome!</td></tr>
+</tbody>
+</table>
+</div>
+</div>
+<div class="footer">
+<hr class="footer" />
+Generated on: 2007-08-05 04:33 UTC.
+Generated by <a class="reference external" href="http://docutils.sourceforge.net/">Docutils</a> from <a class="reference external" href="http://docutils.sourceforge.net/rst.html">reStructuredText</a> source.
+
+</div>
+</body>
+</html>

+ 258 - 0
regression/xsl_reports/runner/instructions.rst

@@ -0,0 +1,258 @@
+Running Boost Regression Tests
+==============================
+
+
+Requirements
+------------
+
+* Python 2.3 or higher
+* Some spare disk space (~5 Gb per each tested compiler)
+
+That's it! You don't even need an SVN client installed.
+
+Installation
+------------
+
+* Download regression driver ``regression.py`` from here__ (http://tinyurl.com/236tty)
+  and put it in the directory where you want all the regression 
+  test files to be placed.
+
+__ http://svn.boost.org/svn/boost/trunk/tools/regression/xsl_reports/runner/regression.py
+
+
+* **Optional**: If you already have ``bjam`` and/or ``process_jam_log`` executables
+  you'd like to use, just put them in the same directory with ``regression.py``, e.g.:
+
+  .. parsed-literal::
+
+    my_boost_regressions/
+        regression.py
+        bjam\ *[.exe]*
+
+
+Running tests
+-------------
+
+To start a regression run, simply run ``regression.py`` providing it with the following
+two arguments:
+
+- runner id (something unique of your choice that will identify your 
+  results in the reports [#runnerid1]_, [#runnerid2]_)
+
+- a particular set of toolsets you want to test with [#toolsets]_.
+
+For example::
+
+    python regression.py --runner=Metacomm --toolsets=gcc-4.2.1,msvc-8.0
+    
+
+If you are interested in seeing all available options, run ``python regression.py``
+or ``python regression.py --help``. See also the `Advanced use`_ section below.
+  
+**Note**: If you are behind a firewall/proxy server, everything should still "just work". 
+In the rare cases when it doesn't, you can explicitly specify the proxy server 
+parameters through the ``--proxy`` option, e.g.:
+
+.. parsed-literal::
+
+    python regression.py ... **--proxy=http://www.someproxy.com:3128**
+
+
+Details
+-------
+
+The regression run procedure will:
+
+* Download the most recent tarball from http://www.meta-comm.com/engineering/boost/snapshot/,
+  unpack it in the subdirectory ``boost``.
+
+* Build ``bjam`` and ``process_jam_log`` if needed. (``process_jam_log`` is an
+  utility, which extracts the test results from the log file produced by 
+  Boost.Build).
+
+* Run regression tests, process and collect the results.
+
+* Upload the results to ftp://fx.meta-comm.com/boost-regression.
+
+
+The report merger process running continuously on MetaCommunications site will 
+merge all submitted test runs and publish them at 
+http://engineering.meta-comm.com/boost-regression/.
+
+
+Advanced use
+------------
+
+Providing detailed information about your environment
+.....................................................
+
+Once you have your regression results displayed in the Boost-wide
+reports, you may consider providing a bit more information about
+yourself and your test environment. This additional information will
+be presented in the reports on a page associated with your runner ID.
+
+By default, the page's content is just a single line coming from the
+``comment.html`` file in your ``regression.py`` directory, specifying
+the tested platform. You can put online a more detailed description of
+your environment, such as your hardware configuration, compiler builds,
+and test schedule, by simply altering the file's content. Also, please
+consider providing your name and email address for cases where Boost
+developers have questions specific to your particular set of results.
+
+
+Incremental runs
+................
+
+You can run ``regression.py`` in incremental mode [#incremental]_ by simply passing 
+it an identically named command-line flag:
+
+.. parsed-literal::
+
+      python regression.py ... **--incremental**
+
+
+Dealing with misbehaved tests/compilers
+.......................................
+
+Depending on the environment/C++ runtime support library the test is compiled with, 
+a test failure/termination may cause an appearance of a dialog window, requiring
+human intervention to proceed. Moreover, the test (or even of the compiler itself)
+can fall into infinite loop, or simply run for too long. To allow ``regression.py`` 
+to take care of these obstacles, add the ``--monitored`` flag to the script 
+invocation:
+
+.. parsed-literal::
+
+      python regression.py ... **--monitored**
+
+
+That's it. Knowing your intentions, the script will be able to automatically deal 
+with the listed issues [#monitored]_.
+
+
+Getting sources from SVN
+........................
+
+If you already have an SVN client installed and configured, you might
+prefer to get the sources directly from the `Boost Subversion
+Repository`__. To communicate this to the script, you just need to
+pass it your Boost SVN user ID using the ``--user`` option; for
+instance:
+
+__ http://svn.boost.org/trac/boost/wiki/BoostSubversion
+
+.. parsed-literal::
+
+      python regression.py ... **--user=agurtovoy**
+
+You can also specify the user as ``anonymous``, requesting anonymous
+SVN access.  
+
+The main advantage of obtaining the sources through SVN is an
+immediate availability of the most recent check-ins: the sources
+extracted from a tarball the script downloads by default can be up to
+one hour behind the actual repository state at the time of test run.
+
+
+Integration with a custom driver script
+.......................................
+
+Even if you've already been using a custom driver script, and for some 
+reason you don't  want ``regression.py`` to take over of the entire test cycle, 
+getting your regression results into `Boost-wide reports`__ is still easy!
+
+In fact, it's just a matter of modifying your script to perform two straightforward 
+operations:
+
+1. *Timestamp file creation* needs to be done before the SVN update/checkout.
+   The file's location doesn't matter (nor does the content), as long as you know how 
+   to access it later. Making your script to do something as simple as
+   ``echo >timestamp`` would work just fine.
+
+2. *Collecting and uploading logs* can be done any time after ``process_jam_log``' s
+   run, and is as simple as an invocation of the local copy of
+   ``$BOOST_ROOT/tools/regression/xsl_reports/runner/collect_and_upload_logs.py``
+   script that was just obtained from the SVN with the rest of the sources.
+   You'd need to provide ``collect_and_upload_logs.py`` with the following three
+   arguments::
+
+        --locate-root   directory to to scan for "test_log.xml" files
+        --runner        runner ID (e.g. "Metacomm")
+        --timestamp     path to a file which modification time will be used 
+                        as a timestamp of the run ("timestamp" by default)
+
+   For example, assuming that the run's resulting  binaries are in the
+   ``$BOOST_ROOT/bin`` directory (the default Boost.Build setup), the 
+   ``collect_and_upload_logs.py`` invocation might look like this::
+
+       python $BOOST_ROOT/tools/regression/xsl_reports/runner/collect_and_upload_logs.py 
+          --locate-root=$BOOST_ROOT/bin
+          --runner=Metacomm
+          --timestamp=timestamp
+
+
+__ http://www.boost.org/regression-logs/developer/
+
+
+Patching Boost sources
+......................
+
+You might encounter an occasional need to make local modifications to
+the Boost codebase before running the tests, without disturbing the
+automatic nature of the regression process. To implement this under
+``regression.py``:
+
+1. Codify applying the desired modifications to the sources
+   located in the ``./boost`` subdirectory in a single executable
+   script named ``patch_boost`` (``patch_boost.bat`` on Windows).
+
+2. Place the script in the ``regression.py`` directory.
+
+The driver will check for the existence of the ``patch_boost`` script,
+and, if found, execute it after obtaining the Boost sources.
+
+
+Feedback
+--------
+
+Please send all comments/suggestions regarding this document and the testing procedure 
+itself to the `Boost Testing list`__.
+
+__ http://lists.boost.org/mailman/listinfo.cgi/boost-testing
+
+
+Notes
+-----
+
+.. [#runnerid1] If you are running regressions interlacingly with a different 
+   set of compilers (e.g. for Intel in the morning and GCC at the end of the day), you need 
+   to provide a *different* runner id for each of these runs, e.g. ``your_name-intel``, and
+   ``your_name-gcc``.
+
+.. [#runnerid2] The limitations of the reports' format/medium impose a direct dependency
+   between the number of compilers you are testing with and the amount of space available 
+   for your runner id. If you are running regressions for a single compiler, please make 
+   sure to choose a short enough id that does not significantly disturb the reports' layout.
+
+.. [#toolsets] If ``--toolsets`` option is not provided, the script will try to use the 
+   platform's default toolset (``gcc`` for most Unix-based systems).
+
+.. [#incremental] By default, the script runs in what is known as *full mode*: on 
+   each ``regression.py`` invocation all the files that were left in place by the 
+   previous run -- including the binaries for the successfully built tests and libraries 
+   -- are deleted, and everything is rebuilt once again from scratch. By contrast, in 
+   *incremental mode* the already existing binaries are left intact, and only the 
+   tests and libraries which source files has changed since the previous run are 
+   re-built and re-tested.
+
+   The main advantage of incremental runs is a significantly shorter turnaround time, 
+   but unfortunately they don't always produce reliable results. Some type of changes
+   to the codebase (changes to the bjam testing subsystem in particular)
+   often require switching to a full mode for one cycle in order to produce 
+   trustworthy reports. 
+   
+   As a general guideline, if you can afford it, testing in full mode is preferable.
+
+.. [#monitored] Note that at the moment this functionality is available only if you 
+   are running on a Windows platform. Contributions are welcome!
+   

+ 1 - 0
regression/xsl_reports/runner/instructions2html

@@ -0,0 +1 @@
+rst2html.py -dtg --embed-stylesheet --stylesheet=default.css --initial-header-level=2 instructions.rst instructions.html

+ 1042 - 0
regression/xsl_reports/runner/regression.py

@@ -0,0 +1,1042 @@
+#!/usr/bin/python
+
+# Copyright (c) MetaCommunications, Inc. 2003-2007
+#
+# Distributed under the Boost Software License, Version 1.0.
+# (See accompanying file LICENSE_1_0.txt or copy at
+# http://www.boost.org/LICENSE_1_0.txt)
+
+import urllib
+import tarfile
+import socket
+import time
+import getopt
+import glob
+import shutil
+import stat
+import os.path
+import os
+import platform
+import traceback
+import string
+import sys
+
+regression_root    = os.path.abspath( os.path.dirname( sys.argv[0] ) )
+regression_results = os.path.join( regression_root, 'results' )
+regression_log     = os.path.join( regression_results, 'bjam.log' )
+install_log        = os.path.join( regression_results, 'bjam_install.log' )
+boostbook_log      = os.path.join( regression_results, 'boostbook.log' )
+boostbook_archive_name = os.path.join( regression_results, 'boostbook.zip' )
+
+boost_root      = os.path.join( regression_root, 'boost' )
+xsl_reports_dir = os.path.join( boost_root, 'tools', 'regression', 'xsl_reports' )
+timestamp_path  = os.path.join( regression_root, 'timestamp' )
+
+svn_anonymous_command_line = 'svn %(command)s'
+svn_command_line           = 'svn --non-interactive --username=%(user)s %(command)s'
+
+
+bjam = {}
+process_jam_log = {}
+
+
+if sys.platform == 'win32':
+    bjam[ 'name' ] = 'bjam.exe'
+    bjam[ 'build_cmd' ] = lambda toolset, v2: bjam_build_script_cmd( 'build.bat %s' % toolset )
+    bjam[ 'is_supported_toolset' ] = lambda x: x in [ \
+        'borland', 'como', 'gcc', 'gcc-nocygwin', 'intel-win32', 'metrowerks', 'mingw', \
+        'msvc', 'vc7', 'vc8' \
+        ]
+    process_jam_log[ 'name' ] = 'process_jam_log.exe'
+
+    def default_toolset(v2):
+        if v2:
+            return 'msvc'
+        else:
+            return 'vc-7_1'
+
+    process_jam_log[ 'default_toolset' ] = default_toolset
+    patch_boost_name = 'patch_boost.bat'
+else:
+    bjam[ 'name' ] = 'bjam'
+    bjam[ 'build_cmd' ] = lambda toolset, v2: bjam_build_script_cmd( './build.sh %s' % toolset )
+    bjam[ 'is_supported_toolset' ] = lambda x: x in [ \
+        'acc', 'como', 'darwin', 'gcc', 'intel-linux', 'kcc', 'kylix', 'mipspro', \
+        'pathscale', 'pgi', 'qcc', 'sun', 'sunpro', 'tru64cxx', 'vacpp' \
+        ]
+    process_jam_log[ 'name' ] = 'process_jam_log'
+    process_jam_log[ 'default_toolset' ] = lambda x: 'gcc'
+    patch_boost_name = 'patch_boost'
+
+bjam[ 'default_toolset' ] = lambda x: ''
+bjam[ 'path' ] = os.path.join( regression_root, bjam[ 'name' ] )
+bjam[ 'source_dir' ] = os.path.join( boost_root, 'tools', 'jam', 'src' )
+bjam[ 'build_path_root' ] = lambda unused: bjam[ 'source_dir' ]
+
+process_jam_log[ 'path' ] = os.path.join( regression_root, process_jam_log[ 'name' ] )
+process_jam_log[ 'source_dir' ] = os.path.join( boost_root, 'tools', 'regression', 'build' )
+
+
+def process_jam_build_root(v2):
+    if v2:
+        return os.path.join(boost_root, 'dist', 'bin')
+    else:
+        return os.path.join(
+            boost_root, 'bin', 'boost', 'tools', 'regression', 'build'
+            , process_jam_log[ 'name' ])
+
+
+process_jam_log[ 'build_path_root' ] = process_jam_build_root
+
+process_jam_log[ 'build_cmd' ] = lambda toolset, v2: bjam_command( toolset, v2 )
+process_jam_log[ 'is_supported_toolset' ] = lambda x : True
+
+build_monitor_url = 'http://engineering.meta-comm.com/resources/build_monitor.zip'
+pskill_url = 'http://www.sysinternals.com/files/pskill.zip'
+
+utils = None
+
+
+def log( message ):
+    sys.stdout.flush()
+    sys.stderr.flush()
+    sys.stderr.write( '# %s\n' % message )
+    sys.stderr.flush()
+
+
+def platform_name():
+    # See http://article.gmane.org/gmane.comp.lib.boost.testing/933
+    if sys.platform == 'win32':
+        return 'Windows'
+    elif sys.platform == 'cygwin':
+        return 'Windows/Cygwin'
+
+    return platform.system()
+
+
+def rmtree( path ):
+    if os.path.exists( path ):
+        if sys.platform == 'win32':
+            os.system( 'del /f /s /q "%s" >nul 2>&1' % path )
+            shutil.rmtree( unicode( path ) )
+        else:
+            os.system( 'rm -f -r "%s"' % path )
+
+
+def retry( f, args, max_attempts=5, sleep_secs=10 ):
+    for attempts in range( max_attempts, -1, -1 ):
+        try:
+            return f( *args )
+        except Exception, msg:
+            log( '%s failed with message "%s"' % ( f.__name__, msg ) )
+            if attempts == 0:
+                log( 'Giving up.' )
+                raise
+
+            log( 'Retrying (%d more attempts).' % attempts )
+            time.sleep( sleep_secs )
+
+
+def cleanup( args, **unused ):
+    if args == []: args = [ 'source', 'bin' ]
+
+    if 'source' in args:
+        log( 'Cleaning up "%s" directory ...' % boost_root )
+        rmtree( boost_root )
+
+    if 'bin' in args:
+        boost_bin_dir = os.path.join( boost_root, 'bin' )
+        log( 'Cleaning up "%s" directory ...' % boost_bin_dir )
+        rmtree( boost_bin_dir )
+
+        boost_binv2_dir = os.path.join( boost_root, 'bin.v2' )
+        log( 'Cleaning up "%s" directory ...' % boost_binv2_dir )
+        rmtree( boost_binv2_dir )
+
+        log( 'Cleaning up "%s" directory ...' % regression_results )
+        rmtree( regression_results )
+
+
+def http_get( source_url, destination, proxy ):
+    if proxy is None: proxies = None
+    else:             proxies = { 'http' : proxy }
+
+    src = urllib.urlopen( source_url, proxies = proxies )
+
+    f = open( destination, 'wb' )
+    while True:
+        data = src.read( 16*1024 )
+        if len( data ) == 0: break
+        f.write( data )
+
+    f.close()
+    src.close()
+
+
+def tarball_name_for_tag( tag, timestamp = False ):
+    tag = tag.split( '/' )[-1]
+    if not timestamp: return 'boost-%s.tar.bz2' % tag
+    else:             return 'boost-%s.timestamp' % tag
+
+
+def download_boost_tarball( destination, tag, proxy, timestamp_only = False ):
+    tarball_name = tarball_name_for_tag( tag, timestamp_only )
+    tarball_path = os.path.join( destination, tarball_name )
+    tarball_url = 'http://beta.boost.org/development/snapshot.php/%s' % tag
+
+    log( 'Downloading "%s" to "%s"...'  % ( tarball_url, os.path.dirname( tarball_path ) ) )
+    if os.path.exists( tarball_path ):
+        os.unlink( tarball_path )
+
+    http_get(
+          tarball_url
+        , tarball_path
+        , proxy
+        )
+
+    return tarball_path
+
+
+def find_boost_dirs( path ):
+    return [ x for x in glob.glob( os.path.join( path, 'boost[-_]*' ) )
+                        if os.path.isdir( x ) ]
+
+
+def unpack_tarball( tarball_path, destination  ):
+    log( 'Looking for old unpacked archives...' )
+    old_boost_dirs = find_boost_dirs( destination )
+
+    for old_boost_dir in old_boost_dirs:
+        if old_boost_dir != tarball_path:
+            log( 'Deleting old directory %s.' % old_boost_dir )
+            rmtree( old_boost_dir )
+
+    log( 'Unpacking boost tarball ("%s")...' % tarball_path )
+
+    tarball_name = os.path.basename( tarball_path )
+    extension = tarball_name[ tarball_name.find( '.' ) : ]
+
+    if extension in ( ".tar.gz", ".tar.bz2" ):
+        mode = os.path.splitext( extension )[1][1:]
+        tar = tarfile.open( tarball_path, 'r:%s' % mode )
+        for tarinfo in tar:
+            tar.extract( tarinfo, destination )
+            if sys.platform == 'win32' and not tarinfo.isdir():
+                # workaround what appears to be a Win32-specific bug in 'tarfile'
+                # (modification times for extracted files are not set properly)
+                f = os.path.join( destination, tarinfo.name )
+                os.chmod( f, stat.S_IWRITE )
+                os.utime( f, ( tarinfo.mtime, tarinfo.mtime ) )
+        tar.close()
+    elif extension in ( ".zip" ):
+        import zipfile
+
+        z = zipfile.ZipFile( tarball_path, 'r', zipfile.ZIP_DEFLATED )
+        for f in z.infolist():
+            destination_file_path = os.path.join( destination, f.filename )
+            if destination_file_path[-1] == "/": # directory
+                if not os.path.exists( destination_file_path  ):
+                    os.makedirs( destination_file_path  )
+            else: # file
+                result = open( destination_file_path, 'wb' )
+                result.write( z.read( f.filename ) )
+                result.close()
+        z.close()
+    else:
+        raise 'Do not know how to unpack archives with extension \"%s\"' % extension
+
+    boost_dir = find_boost_dirs( destination )[0]
+    log( '    Unpacked into directory "%s"' % boost_dir )
+
+    if os.path.exists( boost_root ):
+        log( 'Deleting "%s" directory...' % boost_root )
+        rmtree( boost_root )
+
+    log( 'Renaming "%s" into "%s"' % ( boost_dir, boost_root ) )
+    os.rename( boost_dir, boost_root )
+
+
+def svn_command( user, command ):
+    if user is None or user == 'anonymous':
+        cmd = svn_anonymous_command_line % { 'command': command }
+    else:
+        cmd = svn_command_line % { 'user': user, 'command': command }
+
+    log( 'Executing SVN command "%s"' % cmd )
+    rc = os.system( cmd )
+    if rc != 0:
+        raise Exception( 'SVN command "%s" failed with code %d' % ( cmd, rc ) )
+
+
+def svn_repository_url( user, tag ):
+    if user != 'anonymous': return 'https://svn.boost.org/svn/boost/%s' % tag
+    else:                   return 'http://svn.boost.org/svn/boost/%s' % tag
+
+
+def svn_checkout( user, tag, args ):
+    command = 'co %s boost' % svn_repository_url( user, tag )
+    os.chdir( regression_root )
+    svn_command( user, command )
+
+
+def svn_update( user, tag, args ):
+    os.chdir( boost_root )
+    svn_command( user, 'update' )
+
+
+def format_time( t ):
+    return time.strftime(
+          '%a, %d %b %Y %H:%M:%S +0000'
+        , t
+        )
+
+
+def refresh_timestamp():
+    if os.path.exists( timestamp_path ):
+       os. unlink( timestamp_path )
+
+    open( timestamp_path, 'w' ).close()
+
+
+def timestamp():
+    return time.strftime(
+          '%Y-%m-%dT%H:%M:%SZ'
+        , time.gmtime( os.stat( timestamp_path ).st_mtime )
+        )
+
+
+def get_tarball( tag, proxy, args, **unused ):
+    if args == []: args = [ 'download', 'unpack' ]
+
+    tarball_path = None
+
+    if 'download' in args:
+        tarball_path = download_boost_tarball( regression_root, tag, proxy )
+
+    if 'unpack' in args:
+        if not tarball_path:
+            tarball_path = os.path.join( regression_root, tarball_name_for_tag( tag ) )
+        unpack_tarball( tarball_path, regression_root )
+
+
+def get_source( user, tag, proxy, args, **unused ):
+    refresh_timestamp()
+    log( 'Getting sources (%s)...' % timestamp() )
+
+    if user is not None:
+        retry(
+              svn_checkout
+            , ( user, tag, args )
+            )
+    else:
+        retry(
+              get_tarball
+            , ( tag, proxy, args )
+            )
+
+
+def update_source( user, tag, proxy, args, **unused ):
+    if user is not None or os.path.exists( os.path.join( boost_root, '.svn' ) ):
+        open( timestamp_path, 'w' ).close()
+        log( 'Updating sources from SVN (%s)...' % timestamp() )
+        retry(
+              svn_update
+            , ( user, tag, args )
+            )
+    else:
+        get_source( user, tag, proxy, args )
+
+
+def tool_path( name_or_spec, v2=None ):
+    if isinstance( name_or_spec, basestring ):
+        return os.path.join( regression_root, name_or_spec )
+
+    if os.path.exists( name_or_spec[ 'path' ] ):
+        return name_or_spec[ 'path' ]
+
+    if name_or_spec.has_key( 'build_path' ):
+        return name_or_spec[ 'build_path' ]
+
+    build_path_root = name_or_spec[ 'build_path_root' ]( v2 )
+    log( 'Searching for "%s" in "%s"...' % ( name_or_spec[ 'name' ], build_path_root ) )
+    for root, dirs, files in os.walk( build_path_root ):
+        if name_or_spec[ 'name' ] in files:
+            return os.path.join( root, name_or_spec[ 'name' ] )
+
+    raise Exception( 'Cannot find "%s" in any of the following locations:\n%s' % (
+          name_or_spec[ 'name' ]
+        , '\n'.join( [ name_or_spec[ 'path' ], build_path_root ] )
+        ) )
+
+
+def build_if_needed( tool, toolset, toolsets, v2 ):
+    if os.path.exists( tool[ 'path' ] ):
+        log( 'Found preinstalled "%s"; will use it.' % tool[ 'path' ] )
+        return
+
+    log( 'Preinstalled "%s" is not found; building one...' % tool[ 'path' ] )
+
+    if toolset is None:
+        if toolsets is not None:
+            toolset = string.split( toolsets, ',' )[0]
+            if not tool[ 'is_supported_toolset' ]( toolset ):
+                log( 'Warning: Specified toolset (%s) cannot be used to bootstrap "%s".'\
+                     % ( toolset, tool[ 'name' ] ) )
+
+                toolset = tool[ 'default_toolset' ](v2)
+                log( '         Using default toolset for the platform (%s).' % toolset )
+        else:
+            toolset = tool[ 'default_toolset' ](v2)
+            log( 'Warning: No bootstrap toolset for "%s" was specified.' % tool[ 'name' ] )
+            log( '         Using default toolset for the platform (%s).' % toolset )
+
+    if os.path.exists( tool[ 'source_dir' ] ):
+        log( 'Found "%s" source directory "%s"' % ( tool[ 'name' ], tool[ 'source_dir' ] ) )
+        build_cmd = tool[ 'build_cmd' ]( toolset, v2 )
+        log( 'Building "%s" (%s)...' % ( tool[ 'name'], build_cmd ) )
+        utils.system( [
+              'cd "%s"' % tool[ 'source_dir' ]
+            , build_cmd
+            ] )
+    else:
+        raise 'Could not find "%s" source directory "%s"' % ( tool[ 'name' ], tool[ 'source_dir' ] )
+
+    if not tool.has_key( 'build_path' ):
+        tool[ 'build_path' ] = tool_path( tool, v2 )
+
+    if not os.path.exists( tool[ 'build_path' ] ):
+        raise 'Failed to find "%s" after build.' % tool[ 'build_path' ]
+
+    log( '%s succesfully built in "%s" location' % ( tool[ 'name' ], tool[ 'build_path' ] ) )
+
+
+def import_utils():
+    global utils
+    if utils is None:
+        sys.path.append( xsl_reports_dir )
+        import utils as utils_module
+        utils = utils_module
+
+
+def download_if_needed( tool_name, tool_url, proxy ):
+    path = tool_path( tool_name )
+    if not os.path.exists( path ):
+        log( 'Preinstalled "%s" is not found.' % path )
+        log( '  Downloading from %s...' % tool_url )
+
+        zip_path = '%s.zip' % os.path.splitext( path )[0]
+        http_get( tool_url, zip_path, proxy )
+
+        log( '  Unzipping %s...' % path )
+        utils.unzip( zip_path, os.path.dirname( path ) )
+
+        log( '  Removing %s...' % zip_path )
+        os.unlink( zip_path )
+        log( 'Done.' )
+
+
+def setup(
+          comment
+        , toolsets
+        , book
+        , bjam_toolset
+        , pjl_toolset
+        , monitored
+        , proxy
+        , v2
+        , args
+        , **unused
+        ):
+    import_utils()
+
+    patch_boost_path = os.path.join( regression_root, patch_boost_name )
+    if os.path.exists( patch_boost_path ):
+        log( 'Found patch file "%s". Executing it.' % patch_boost_path )
+        os.chdir( regression_root )
+        utils.system( [ patch_boost_path ] )
+
+    build_if_needed( bjam, bjam_toolset, toolsets, v2 )
+    build_if_needed( process_jam_log, pjl_toolset, toolsets, v2 )
+
+    if monitored:
+        if sys.platform == 'win32':
+            download_if_needed( 'build_monitor.exe', build_monitor_url, proxy )
+            download_if_needed( 'pskill.exe', pskill_url, proxy )
+        else:
+            log( 'Warning: Test monitoring is not supported on this platform (yet).'     )
+            log( '         Please consider contributing this piece!' )
+
+
+def bjam_build_script_cmd( cmd ):
+    env_setup_key = 'BJAM_ENVIRONMENT_SETUP'
+    if os.environ.has_key( env_setup_key ):
+        return '%s & %s' % ( os.environ[env_setup_key], cmd )
+
+    return cmd
+
+
+def bjam_command( toolsets, v2 ):
+    build_path = regression_root
+    if build_path[-1] == '\\': build_path += '\\'
+
+    v2_option = ""
+    if v2:
+        v2_option = "--v2"
+
+    result = '"%s" %s "-sBOOST_BUILD_PATH=%s" "-sBOOST_ROOT=%s"'\
+        % (
+            tool_path( bjam, v2 )
+          , v2_option
+          , build_path
+          , boost_root
+          )
+
+    if toolsets:
+        if v2:
+            result += ' ' + string.join(string.split( toolsets, ',' ), ' ' )
+        else:
+            result += ' "-sTOOLS=%s"' % string.join( string.split( toolsets, ',' ), ' ' )
+
+    return result
+
+
+def install( toolsets, v2, **unused ):
+    import_utils()
+    os.chdir( os.path.join( boost_root ) )
+
+    log( 'Making "%s" directory...' % regression_results )
+    utils.makedirs( regression_results )
+
+    install_cmd = '%s -d2 install >>%s 2>&1' % ( bjam_command( toolsets, v2 ), install_log )
+    log( 'Installing libraries (%s)...' % install_cmd )
+    utils.system( [ install_cmd ] )
+
+
+def start_build_monitor( timeout ):
+    if sys.platform == 'win32':
+        build_monitor_path = tool_path( 'build_monitor.exe' )
+        if os.path.exists( build_monitor_path ):
+            utils.system( [ 'start /belownormal "" "%s" bjam.exe %d' % ( build_monitor_path, timeout*60 ) ] )
+        else:
+            log( 'Warning: Build monitor is not found at "%s"' % build_monitor_path )
+
+
+def stop_build_monitor():
+    if sys.platform == 'win32':
+        build_monitor_path = tool_path( 'build_monitor.exe' )
+        if os.path.exists( build_monitor_path ):
+            utils.system( [ '"%s" build_monitor' %  tool_path( 'pskill.exe' ) ] )
+
+
+def run_process_jam_log(v2):
+    log( 'Getting test case results out of "%s"...' % regression_log )
+
+    if v2:
+        v2 = "--v2"
+    else:
+        v2 = ""
+
+    utils.checked_system( [
+        '"%s" %s "%s" <"%s"' % (
+              tool_path( process_jam_log, v2 )
+            , v2
+            , regression_results
+            , regression_log
+            )
+        ] )
+
+
+def test(
+          toolsets
+        , bjam_options
+        , monitored
+        , timeout
+        , v2
+        , args
+        , **unused
+        ):
+    if args == []:
+        args = [ "test", "process" ]
+
+    import_utils()
+
+    try:
+        if monitored:
+            start_build_monitor( timeout )
+
+        cd = os.getcwd()
+        os.chdir( os.path.join( boost_root, 'status' ) )
+
+        log( 'Making "%s" directory...' % regression_results )
+        utils.makedirs( regression_results )
+
+        results_libs = os.path.join( regression_results, 'libs' )
+        results_status = os.path.join( regression_results, 'status' )
+
+        if "clean" in args:
+            rmtree( results_libs )
+            rmtree( results_status )
+
+        build_dir_option = "-sALL_LOCATE_TARGET"
+        if v2:
+            build_dir_option = "--build-dir"
+
+        if "test" in args:
+            test_cmd = '%s -d2 --dump-tests %s "%s=%s" >>"%s" 2>&1' % (
+                  bjam_command( toolsets, v2 )
+                , bjam_options
+                , build_dir_option
+                , regression_results
+                , regression_log
+                )
+
+            log( 'Starting tests (%s)...' % test_cmd )
+            utils.system( [ test_cmd ] )
+
+        if "process" in args:
+            run_process_jam_log(v2)
+
+        os.chdir( cd )
+    finally:
+        if monitored:
+            stop_build_monitor()
+
+
+def build_book( **kargs ):
+    # To do
+    # 1. PDF generation
+    # 2. Do we need to cleanup before the build?
+    # 3. Incremental builds
+    if not os.path.exists( regression_results ):
+        os.makedirs( regression_results )
+    import_utils()
+    cwd = os.getcwd()
+    try:
+        os.chdir( os.path.join( boost_root, 'doc' ) )
+        if os.path.exists( boostbook_log ):
+            os.unlink( boostbook_log )
+        utils.system( [ '%s --v2 html >>%s 2>&1' % ( tool_path( bjam, v2=True ), boostbook_log ) ] )
+        # utils.system( [ '%s --v2 pdf >>%s 2>&1' % ( tool_path( bjam, v2=True ), boostbook_log ) ] )
+    finally:
+        os.chdir( cwd )
+
+def collect_logs(
+          tag
+        , runner
+        , platform
+        , user
+        , comment
+        , incremental
+        , dart_server
+        , ftp_proxy
+        , args
+        , **unused
+        ):
+    import_utils()
+
+    if comment is None:
+        comment = 'comment.html'
+
+    comment_path = os.path.join( regression_root, comment )
+    if not os.path.exists( comment_path ):
+        log( 'Comment file "%s" not found; creating default comment.' % comment_path )
+        f = open( comment_path, 'w' )
+        f.write( '<p>Tests are run on %s platform.</p>' % platform_name() )
+        f.close()
+
+    run_type = ''
+    if incremental: run_type = 'incremental'
+    else:           run_type = 'full'
+
+    source = 'tarball'
+    revision = ''
+    svn_root_file = os.path.join( boost_root, '.svn' )
+    svn_info_file = os.path.join( boost_root, 'svn_info.txt' )
+    if os.path.exists( svn_root_file ):
+        source = 'SVN'
+        svn_command( 'user', 'info ' + boost_root + ' >' + svn_info_file )
+
+    if os.path.exists( svn_info_file ):
+        f = open( svn_info_file, 'r' )
+        svn_info = f.read()
+        f.close()
+        i = svn_info.find( 'Revision:' )
+        if i >= 0:
+            i += 10
+            while svn_info[i] >= '0' and svn_info[i] <= '9':
+              revision += svn_info[i]
+              i += 1
+      
+      
+    from runner import collect_logs
+    collect_logs(
+          regression_results
+        , runner
+        , tag
+        , platform
+        , comment_path
+        , timestamp_path
+        , user
+        , source
+        , run_type
+        , dart_server
+        , ftp_proxy
+        , revision
+        )
+
+
+def collect_book( **unused ):
+    log( 'Collecting files for BoostBook into "%s"...' % boostbook_archive_name )
+    import zipfile
+    boostbook_archive = zipfile.ZipFile( boostbook_archive_name, 'w', zipfile.ZIP_DEFLATED )
+    html_root = os.path.join( boost_root, 'doc/html' )
+
+    boostbook_archive.writestr( 'timestamp', timestamp())
+    boostbook_archive.write( boostbook_log, os.path.basename( boostbook_log ) )
+    
+    def add_files( arg, dirname, names ):
+        for name in names:
+            path = os.path.join( dirname, name )
+            if not os.path.isdir( path ):
+                boostbook_archive.write( path, path[ len( html_root ) + 1: ] )
+
+    os.path.walk( html_root, add_files, None ) 
+    
+
+def upload_logs(
+          tag
+        , runner
+        , user
+        , ftp_proxy
+        , debug_level
+        , send_bjam_log
+        , dart_server
+        , **unused
+        ):
+    import_utils()
+    from runner import upload_logs
+    retry(
+          upload_logs
+        , ( regression_results, runner, tag, user, ftp_proxy, debug_level,
+            send_bjam_log, timestamp_path, dart_server )
+        )
+
+
+def upload_book( tag, runner, ftp_proxy, debug_level, **unused ):
+    import_utils()
+    from runner import upload_to_ftp
+    upload_to_ftp( tag, boostbook_archive_name, ftp_proxy, debug_level )
+
+
+def update_itself( tag, **unused ):
+    source = os.path.join( xsl_reports_dir, 'runner', os.path.basename( sys.argv[0] ) )
+    self = os.path.join( regression_root, os.path.basename( sys.argv[0] ) )
+    
+    # Through revision 38985, the update copy was not done if
+    # os.stat(self).st_mtime > os.stat(source).st_mtime. This was not
+    # reliable on all systems, so the copy is now done unconditionally.
+    log( '    Saving a backup copy of the current script...' )
+    os.chmod( self, stat.S_IWRITE ) # Win32 workaround
+    shutil.move( self, '%s~' % self )
+    log( 'Updating %s from %s...' % ( self, source )  )
+    shutil.copy2( source, self )
+
+
+def send_mail( smtp_login, mail, subject, msg = '', debug_level = 0 ):
+    import smtplib
+    if not smtp_login:
+        server_name = 'mail.%s' % mail.split( '@' )[-1]
+        user_name = None
+        password = None
+    else:
+        server_name = smtp_login.split( '@' )[-1]
+        ( user_name, password ) = string.split( smtp_login.split( '@' )[0], ':' )
+
+    log( '    Sending mail through "%s"...' % server_name )
+    smtp_server = smtplib.SMTP( server_name )
+    smtp_server.set_debuglevel( debug_level )
+    if user_name:
+        smtp_server.login( user_name, password )
+
+    smtp_server.sendmail(
+          mail
+        , [ mail ]
+        , 'Subject: %s\nTo: %s\n\n%s' % ( subject, mail, msg )
+        )
+
+
+def regression(
+          tag
+        , local
+        , runner
+        , platform
+        , user
+        , comment
+        , toolsets
+        , book
+        , bjam_options
+        , bjam_toolset
+        , pjl_toolset
+        , incremental
+        , send_bjam_log
+        , force_update
+        , have_source
+        , skip_tests
+        , monitored
+        , timeout
+        , mail = None
+        , smtp_login = None
+        , proxy = None
+        , ftp_proxy = None
+        , debug_level = 0
+        , v2 = 1
+        , dart_server = None
+        , args = []
+        ):
+
+    try:
+        mail_subject = 'Boost regression for %s on %s' % ( tag, string.split(socket.gethostname(), '.')[0] )
+        start_time = time.localtime()
+        if mail:
+            log( 'Sending start notification to "%s"' % mail )
+            send_mail(
+                  smtp_login
+                , mail
+                , '%s started at %s.' % ( mail_subject, format_time( start_time ) )
+                , debug_level = debug_level
+                )
+
+        if local is not None:
+            log( 'Using local file "%s"' % local )
+
+            b = os.path.basename( local )
+            tag = b[ 0: b.find( '.' ) ]
+            log( 'Tag: "%s"' % tag  )
+
+            unpack_tarball( local, regression_root )
+        elif have_source:
+            if not incremental: cleanup( [ 'bin' ] )
+        else:
+            if incremental or force_update:
+                if not incremental: cleanup( [ 'bin' ] )
+                update_source( user, tag, proxy, [] )
+            else:
+                cleanup( [] )
+                get_source( user, tag, proxy, [] )
+
+        setup( comment, toolsets, book, bjam_toolset, pjl_toolset, monitored, proxy,
+               v2, [] )
+
+        # Not specifying --toolset in command line is not enough
+        # that would mean to use Boost.Build default ones
+        # We can skip test only we were explictly 
+        # told to have no toolsets in command line "--toolset="
+        if toolsets != '': # --toolset=,
+            if not skip_tests: test( toolsets, bjam_options, monitored, timeout, v2, [] )
+            collect_logs( tag, runner, platform, user, comment, incremental, dart_server, proxy, [] )
+            upload_logs( tag, runner, user, ftp_proxy, debug_level, send_bjam_log, dart_server )
+
+        if book:
+            build_book()
+            collect_book()
+            upload_book( tag, runner, ftp_proxy, debug_level )
+
+        update_itself( tag )
+
+        if mail:
+            log( 'Sending report to "%s"' % mail )
+            end_time = time.localtime()
+            send_mail(
+                  smtp_login
+                , mail
+                , '%s completed successfully at %s.' % ( mail_subject, format_time( end_time ) )
+                , debug_level = debug_level
+                )
+    except:
+        if mail:
+            log( 'Sending report to "%s"' % mail )
+            traceback_ = '\n'.join( apply( traceback.format_exception, sys.exc_info() ) )
+            end_time = time.localtime()
+            send_mail(
+                  smtp_login
+                , mail
+                , '%s failed at %s.' % ( mail_subject, format_time( end_time ) )
+                , traceback_
+                , debug_level
+                )
+        raise
+
+
+def show_revision( **unused ):
+    modified = '$Date$'
+    revision = '$Revision$'
+
+    import re
+    re_keyword_value = re.compile( r'^\$\w+:\s+(.*)\s+\$$' )
+    print '\n\tRevision: %s' % re_keyword_value.match( revision ).group( 1 )
+    print '\tLast modified on: %s\n' % re_keyword_value.match( modified ).group( 1 )
+
+
+def accept_args( args ):
+    args_spec = [
+          'tag='
+        , 'local='
+        , 'runner='
+        , 'platform='
+        , 'user='
+        , 'comment='
+        , 'toolsets='
+        , 'book'
+        , 'bjam-options='
+        , 'bjam-toolset='
+        , 'pjl-toolset='
+        , 'timeout='
+        , 'mail='
+        , 'smtp-login='
+        , 'proxy='
+        , 'ftp-proxy='
+        , 'debug-level='
+        , 'incremental'
+        , 'force-update'
+        , 'have-source'
+        , 'skip-tests'
+        , 'dont-send-bjam-log'
+        , 'monitored'
+        , 'help'
+        , 'v2'
+        , 'v1'
+        , 'dart-server='
+        ]
+
+    options = {
+          '--tag'           : 'trunk'
+        , '--local'         : None
+        , '--platform'      : platform_name()
+        , '--user'          : None
+        , '--comment'       : None
+        , '--toolsets'      : None
+        , '--book'          : False
+        , '--bjam-options'  : ''
+        , '--bjam-toolset'  : None
+        , '--pjl-toolset'   : None
+        , '--timeout'       : 5
+        , '--mail'          : None
+        , '--smtp-login'    : None
+        , '--proxy'         : None
+        , '--debug-level'   : 0
+        , '--ftp-proxy'     : None
+        , '--dart-server'   : 'beta.boost.org:8081'
+        }
+
+    ( option_pairs, other_args ) = getopt.getopt( args, '', args_spec )
+    map( lambda x: options.__setitem__( x[0], x[1] ), option_pairs )
+
+    if not options.has_key( '--runner' ) or options.has_key( '--help' ):
+        usage()
+        sys.exit( 1 )
+
+    return {
+          'tag'             : options[ '--tag' ]
+        , 'local'           : options[ '--local' ]
+        , 'runner'          : options[ '--runner' ]
+        , 'platform'        : options[ '--platform']
+        , 'user'            : options[ '--user' ]
+        , 'comment'         : options[ '--comment' ]
+        , 'toolsets'        : options[ '--toolsets' ]
+        , 'book'            : options.has_key( '--book' )
+        , 'bjam_options'    : options[ '--bjam-options' ]
+        , 'bjam_toolset'    : options[ '--bjam-toolset' ]
+        , 'pjl_toolset'     : options[ '--pjl-toolset' ]
+        , 'incremental'     : options.has_key( '--incremental' )
+        , 'send_bjam_log'   : not options.has_key( '--dont-send-bjam-log' )
+        , 'force_update'    : options.has_key( '--force-update' )
+        , 'have_source'     : options.has_key( '--have-source' )
+        , 'skip_tests'      : options.has_key( '--skip-tests' )
+        , 'monitored'       : options.has_key( '--monitored' )
+        , 'timeout'         : options[ '--timeout' ]
+        , 'mail'            : options[ '--mail' ]
+        , 'smtp_login'      : options[ '--smtp-login' ]
+        , 'proxy'           : options[ '--proxy' ]
+        , 'ftp_proxy'       : options[ '--ftp-proxy' ]
+        , 'debug_level'     : int(options[ '--debug-level' ])
+        , 'v2'              : not options.has_key( '--v1' )
+        , 'dart_server'     : options[ '--dart-server' ]
+        , 'args'            : other_args
+        }
+
+commands = {
+      'cleanup'         : cleanup
+    , 'get-source'      : get_source
+    , 'update-source'   : update_source
+    , 'setup'           : setup
+    , 'install'         : install
+    , 'test'            : test
+    , 'build-book'      : build_book
+    , 'collect-logs'    : collect_logs
+    , 'collect-book'    : collect_book
+    , 'upload-logs'     : upload_logs
+    , 'upload-book'     : upload_book
+    , 'update-itself'   : update_itself
+    , 'regression'      : regression
+    , 'show-revision'   : show_revision
+    }
+
+def usage():
+    print 'Usage:\n\t%s [command] options' % os.path.basename( sys.argv[0] )
+    print    '''
+Commands:
+\t%s
+
+Options:
+\t--runner        runner ID (e.g. 'Metacomm')
+\t--tag           the tag for the results ('trunk' by default)
+\t--local         the name of the boost tarball
+\t--comment       an HTML comment file to be inserted in the reports
+\t                ('comment.html' by default)
+\t--incremental   do incremental run (do not remove previous binaries)
+\t--dont-send-bjam-log 
+\t                do not send full bjam log of the regression run
+\t--force-update  do an SVN update (if applicable) instead of a clean
+\t                checkout, even when performing a full run
+\t--have-source   do neither a tarball download nor an SVN update;
+\t                used primarily for testing script changes
+\t--skip-tests    do no run bjam; used for testing script changes
+\t--monitored     do a monitored run
+\t--timeout       specifies the timeout, in minutes, for a single test
+\t                run/compilation (enforced only in monitored runs, 5 by
+\t                default)
+\t--user          Boost SVN user ID (optional)
+\t--toolsets      comma-separated list of toolsets to test with (optional)
+\t--book          build BoostBook (optional)
+\t--bjam-options  options to pass to the regression test (optional)
+\t--bjam-toolset  bootstrap toolset for 'bjam' executable (optional)
+\t--pjl-toolset   bootstrap toolset for 'process_jam_log' executable
+\t                (optional)
+\t--mail          email address to send run notification to (optional)
+\t--smtp-login    STMP server address/login information, in the following
+\t                form: <user>:<password>@<host>[:<port>] (optional).
+\t--proxy         HTTP proxy server address and port (e.g.
+\t                'http://www.someproxy.com:3128', optional)
+\t--ftp-proxy     FTP proxy server (e.g. 'ftpproxy', optional)
+\t--debug-level   debugging level; controls the amount of debugging
+\t                output printed; 0 by default (no debug output)
+\t--v1            Use Boost.Build V1
+\t--v2            Use Boost.Build V2 (default)
+\t--dart-server   The dart server to send results to.
+''' % '\n\t'.join( commands.keys() )
+
+    print 'Example:\n\t%s --runner=Metacomm\n' % os.path.basename( sys.argv[0] )
+    print 'For more documentation, see http://tinyurl.com/4f2zp\n'
+
+
+if __name__ == '__main__':
+    if len(sys.argv) > 1 and sys.argv[1] in commands:
+        command = sys.argv[1]
+        args = sys.argv[ 2: ]
+        if command not in [ 'collect-logs', 'upload-logs' ]:
+            args.insert( 0, '--runner=' )
+    else:
+        command = 'regression'
+        args = sys.argv[ 1: ]
+
+    commands[ command ]( **accept_args( args ) )

+ 165 - 0
regression/xsl_reports/test/common.py

@@ -0,0 +1,165 @@
+import xml.sax.saxutils
+import time
+
+def make_test_name( library_idx, test_idx ):
+    return "test_%02d_%02d" % ( library_idx, test_idx )
+
+def make_library_name( library_idx ):
+    if library_idx % 4 in ( 0, 1 ):
+        return "library_%02d/%02d" % ( int( library_idx / 4 ) * 4, library_idx % 4 )
+    else:
+        return "library_%02d" % library_idx
+
+def make_toolset_name( toolset_idx ):
+    return "toolset_%02d" % toolset_idx
+
+def make_library_target_directory( library_idx, toolset_idx, variant = None ):
+    base = "lib/%s/%s" % ( make_library_name( library_idx )
+                           , make_toolset_name( toolset_idx ) )
+    if variant is not None:
+        return "%s/%s" % ( base, variant )
+    else:
+        return base
+
+def make_test_target_directory( library_idx, toolset_idx, test_name, variant ):
+    base = "%s/%s/%s" % ( make_library_name( library_idx )
+                          , make_toolset_name( toolset_idx )
+                          , test_name )
+    if variant is not None:
+        return "%s/%s" % ( base, variant )
+    else:
+        return base
+
+def format_timestamp( timestamp ):
+    return time.strftime( "%Y-%m-%dT%H:%M:%SZ", timestamp )
+
+def make_test_log( xml_generator
+                   , library_idx
+                   , toolset_idx
+                   , test_name
+                   , test_type
+                   , test_result
+                   , show_run_output
+                   , variant ):
+    library = make_library_name( library_idx )
+    toolset_name = make_toolset_name( toolset_idx )
+    
+    target_directory = ""
+    if test_type != "lib":
+        target_directory = make_test_target_directory( library_idx, toolset_idx, test_name, variant )
+    else:
+        target_directory = make_library_target_directory( library_idx, toolset_idx, variant )
+        
+    xml_generator.startElement( "test-log", { "library": library
+                                  , "test-name":  test_name
+                                  , "toolset": toolset_name
+                                  , "test-type": test_type
+                                  , "test-program": "some_program"
+                                  , "target-directory": target_directory
+                                  , "show-run-output": show_run_output
+                                  } )
+
+    if test_type != "lib":
+
+        if test_result == "success" and ( toolset_idx + 1 ) % 4:
+            xml_generator.startElement( "compile", { "result": "success" } );
+            xml_generator.characters( "Compiling in %s" % target_directory )
+            xml_generator.endElement( "compile" )
+
+        if test_type.find( "link" ) == 0 or test_type.find( "run" ) == 0 and toolset_idx % 4:
+            xml_generator.startElement( "lib", { "result": test_result } );
+            xml_generator.characters( make_library_target_directory( library_idx, toolset_idx ) )
+            xml_generator.endElement( "lib" )
+
+            xml_generator.startElement( "link", { "result": "success" } );
+            xml_generator.characters( "Linking in %s" % target_directory )
+            xml_generator.endElement( "link" )
+
+        if test_type.find( "run" ) == 0 and ( toolset_idx + 2 ) % 4:
+            xml_generator.startElement( "run", { "result": test_result } );
+            xml_generator.characters( "Running in %s" % target_directory )
+            xml_generator.endElement( "run" )
+
+    else:
+        xml_generator.startElement( "compile", { "result": test_result } );
+        xml_generator.characters( "Compiling in %s" % make_library_target_directory( library_idx, toolset_idx ) )
+        xml_generator.endElement( "compile" )
+
+
+
+    xml_generator.endElement( "test-log" )
+
+
+def make_expicit_failure_markup( num_of_libs, num_of_toolsets, num_of_tests ):
+    g = xml.sax.saxutils.XMLGenerator( open( "explicit-failures-markup.xml", "w" ), "utf-8" )
+    g.startDocument()
+    g.startElement( "explicit-failures-markup", {} );
+
+    # required toolsets
+    for i_toolset in range( 0, num_of_toolsets ):
+        if i_toolset < 2:
+            g.startElement( "mark-toolset", { "name": "toolset_%02d" % i_toolset, "status":"required"} )
+            g.endElement( "mark-toolset" )
+
+    for i_library in range( 0, num_of_libs ):
+        g.startElement( "library", { "name": make_library_name( i_library ) } )
+        if i_library % 4 == 0:
+            g.startElement( "mark-unusable", {} )
+            for i_toolset in range( 0, num_of_toolsets ):
+                if i_toolset % 2 == 1:
+                    g.startElement( "toolset", { "name": make_toolset_name( i_toolset ) } )
+                    g.endElement( "toolset" )
+            g.startElement( "note", { "author": u"T. T\xe8st" } )
+            g.characters( "Test note" )
+            g.endElement( "note" )
+            g.endElement( "mark-unusable" )
+
+        for i_test in range( 0, num_of_tests ):
+
+            category = 0
+            explicitly_marked_failure = 0
+            unresearched = 0
+
+            if i_test % 2 == 0:
+                category = i_test % 3
+            
+            if i_test % 3 == 0:
+                explicitly_marked_failure = 1
+                if i_test % 2 == 0:
+                    unresearched = 1
+
+            if category or explicitly_marked_failure:
+                test_attrs = { "name": make_test_name( i_library, i_test ) }
+                if category:
+                    test_attrs[ "category" ] = "Category %s" % category
+                g.startElement( "test", test_attrs )
+                if explicitly_marked_failure:
+                    failure_attrs = {}
+                    if unresearched: failure_attrs[ "reason" ] = "not-researched"
+                    
+                    g.startElement( "mark-failure", failure_attrs )
+                    
+                    g.startElement( "toolset", { "name": make_toolset_name( 1 ) } )
+                    g.endElement( "toolset" )
+                    g.startElement( "toolset", { "name": make_toolset_name( 0 ) } )
+                    g.endElement( "toolset" )
+                    g.startElement( "toolset", { "name": make_toolset_name( 2 ) } )
+                    g.endElement( "toolset" )
+
+                    g.startElement( "note", {  "author": u"V. Ann\xf3tated" } )
+                    g.characters( "Some thoughtful note" )
+                    g.endElement( "note" )
+                    
+                    g.endElement( "mark-failure" )
+                    
+                g.endElement( "test" );
+        g.endElement( "library" )
+            
+        
+    g.endElement( "explicit-failures-markup" )
+    g.endDocument()
+
+
+def make_expected_results( num_of_libs, num_of_toolsets, num_of_tests ):
+    pass
+

+ 3 - 0
regression/xsl_reports/test/expected_results.xml

@@ -0,0 +1,3 @@
+<?xml version="1.0" encoding="utf-8"?>
+<expected-failures>
+</expected-failures>

+ 159 - 0
regression/xsl_reports/test/generate_test_results.py

@@ -0,0 +1,159 @@
+#
+# Generates test test results for testing of boost_wide_report.py
+#
+import common
+import xml.sax.saxutils
+
+import os
+import time
+
+num_of_libs = 5
+num_of_runners = 5
+num_of_toolsets = 3
+num_of_tests = 10
+
+results_directory = "results/incoming/CVS-HEAD/processed"
+
+
+# Generated results follow the rules:
+#
+# * odd runners are testing on Win32, even runners are testin on Unix
+# * the third toolset has 2 variants
+#
+
+# Generated expected markup:
+#
+# * First two toolset are required
+# * Every fourth library is unusable on event toolsets
+# * Last two tests are corner-ase tests
+# * Every 4th test is explicitly marked up as expected-failure
+
+
+def library_build_failed( library_idx ):
+    return library_idx % 2
+
+def test_run_source( runner_idx ):
+    if runner_idx % 2: return "tarball"
+    else:              return "cvs head"
+
+def test_run_type( runner_idx ):
+    if runner_idx % 2: return "incremental"
+    else:              return "full"
+
+
+def test_type( i ):
+    types = [ "compile", "compile_fail", "link", "link_fail", "run", "run_fail", "run_pyd" ]
+    return types[ i % len( types) ]
+
+
+def make_test_results():
+    if not os.path.exists( results_directory ):
+        os.makedirs( results_directory )
+
+    for i_runner in range( 0, num_of_runners ):
+        runner_id = "runner %02d" % i_runner
+        g = xml.sax.saxutils.XMLGenerator( open( os.path.join( results_directory, runner_id + ".xml" ), "w" ), "utf-8" )
+        g.startDocument()
+        if i_runner % 2:
+            platform = "Win32"
+        else:
+            platform = "Unix"
+            
+        g.startElement( "test-run", { "platform": platform
+                                      , "runner": runner_id
+                                      , "timestamp": common.format_timestamp( 
+                                                          time.gmtime( time.time() - i_runner * 24*60*60 )
+                                                        )
+                                      , "source": test_run_source( i_runner )
+                                      , "run-type": test_run_type( i_runner )
+                                      } )
+
+        g.startElement( "comment", {} )
+        g.characters( "<b>Runner</b> is who <i>running</i> does." )
+        g.endElement( "comment" )
+
+        for i_lib in range( 0, num_of_libs ):
+            for i_toolset in range( num_of_toolsets ):
+                if library_build_failed( i_lib ): test_result = "fail"
+                else:                             test_result = "success"
+                    
+                common.make_test_log( xml_generator = g
+                                      , library_idx = i_lib
+                                      , toolset_idx = i_toolset 
+                                      , test_name = ""
+                                      , test_type = "lib"
+                                      , test_result = test_result
+                                      , show_run_output = "false"
+                                      , variant = None )
+
+    
+        for i_lib in range( 0, num_of_libs ):
+            library_name = "library_%02d" % i_lib
+            if num_of_runners - 1 == i_runner and  i_lib % 2: 
+                continue 
+                
+            for i_toolset in range( num_of_toolsets ):
+                toolset_name = "toolset %02d" % ( i_toolset )
+
+                if num_of_runners - 1 == i_runner and i_toolset % 2:
+                    continue
+                
+                for i_test in range( num_of_tests ):
+                    test_name = "test_%02d_%02d" % ( i_lib, i_test )
+                    test_result = ""
+                    show_run_output = "false"
+                    
+                    if num_of_runners - 1 == i_runner and i_test % 2:
+                        continue
+                    
+                    if i_runner % 2: test_result = "success"
+                    else:            test_result = "fail"
+
+                    if i_runner == 1 and i_toolset == 2 and i_test % 6 == 0:
+                        test_result = "fail"
+                        
+                    if test_result == "success" and ( 0 == i_test % 5 ):
+                        show_run_output = "true"
+
+                    if i_toolset == 2:
+                        variants = [ "static-lib", "shared-lib" ]
+                    else:
+                        variants = [ None ]
+
+                    for variant in variants:
+                        common.make_test_log( xml_generator = g
+                                              , library_idx = i_lib
+                                              , toolset_idx = i_toolset
+                                              , test_name = test_name
+                                              , test_type = test_type( i_test )
+                                              , test_result = test_result
+                                              , show_run_output = show_run_output
+                                              , variant = variant )
+        g.endElement( "test-run" )
+        g.endDocument()
+
+
+
+## <test-log library="algorithm" test-name="container" test-type="run" test-program="libs/algorithm/string/test/container_test.cpp" target-directory="bin/boost/libs/algorithm/string/test/container.test/borland-5.6.4/debug" toolset="borland-5.6.4" show-run-output="false">
+## <compile result="fail" timestamp="2004-06-29 17:02:27 UTC">
+
+##     "C:\Progra~1\Borland\CBuilder6\bin\bcc32"  -j5 -g255 -q -c -P -w -Ve -Vx -a8 -b-   -v -Od -vi- -tWC -tWR -tWC -WM- -DBOOST_ALL_NO_LIB=1  -w-8001  -I"C:\Users\Administrator\boost\main\results\bin\boost\libs\algorithm\string\test"   -I"C:\Users\Administrator\boost\main\boost" -I"C:\Progra~1\Borland\CBuilder6\include"  -o"C:\Users\Administrator\boost\main\results\bin\boost\libs\algorithm\string\test\container.test\borland-5.6.4\debug\container_test.obj"  "..\libs\algorithm\string\test\container_test.cpp" 
+
+## ..\libs\algorithm\string\test\container_test.cpp:
+## Warning W8091 C:\Users\Administrator\boost\main\boost\libs/test/src/unit_test_result.cpp 323: template argument _InputIter passed to 'for_each' is a output iterator: input iterator required in function unit_test_result::~unit_test_result()
+## Warning W8091 C:\Users\Administrator\boost\main\boost\libs/test/src/unit_test_suite.cpp 63: template argument _InputIter passed to 'find_if' is a output iterator: input iterator required in function test_case::Impl::check_dependencies()
+## Warning W8091 C:\Users\Administrator\boost\main\boost\libs/test/src/unit_test_suite.cpp 204: template argument _InputIter passed to 'for_each' is a output iterator: input iterator required in function test_suite::~test_suite()
+## Error E2401 C:\Users\Administrator\boost\main\boost\boost/algorithm/string/finder.hpp 45: Invalid template argument list
+## Error E2040 C:\Users\Administrator\boost\main\boost\boost/algorithm/string/finder.hpp 46: Declaration terminated incorrectly
+## Error E2090 C:\Users\Administrator\boost\main\boost\boost/algorithm/string/finder.hpp 277: Qualifier 'algorithm' is not a class or namespace name
+## Error E2272 C:\Users\Administrator\boost\main\boost\boost/algorithm/string/finder.hpp 277: Identifier expected
+## Error E2090 C:\Users\Administrator\boost\main\boost\boost/algorithm/string/finder.hpp 278: Qualifier 'algorithm' is not a class or namespace name
+## Error E2228 C:\Users\Administrator\boost\main\boost\boost/algorithm/string/finder.hpp 278: Too many error or warning messages
+## *** 6 errors in Compile ***
+## </compile>
+## </test-log>
+        
+    
+make_test_results()
+common.make_expicit_failure_markup( num_of_libs, num_of_toolsets, num_of_tests  )
+

+ 85 - 0
regression/xsl_reports/test/generate_test_results_v1.py

@@ -0,0 +1,85 @@
+import xml.sax.saxutils
+
+import common
+
+import os
+import time
+
+num_of_libs = 2
+num_of_toolsets = 3
+num_of_tests = 10
+
+tag = "1_30_0"
+
+def library_build_failed( library_idx ):
+    return library_idx % 2
+
+def make_test_results():
+    if not os.path.exists( tag ):
+        os.makedirs( tag )
+        
+    g = xml.sax.saxutils.XMLGenerator( open( os.path.join( tag, "test.xml" ), "w" ) )
+    platform = "Win32"
+    g.startElement( "test-results", {} )
+
+    for i_lib in range( 0, num_of_libs ):
+        for i_toolset in range( num_of_toolsets ):
+            if library_build_failed( i_lib ): test_result = "fail"
+            else:                             test_result = "success"
+            
+            common.make_test_log( xml_generator = g
+                                  , library_idx = i_lib
+                                  , toolset_idx = i_toolset 
+                                  , test_name = ""
+                                  , test_type = "lib"
+                                  , test_result = test_result
+                                  , show_run_output = "false" )
+            
+    
+    for i_lib in range( 0, num_of_libs ):
+        library_name = "library_%02d" % i_lib
+
+        for i_toolset in range( num_of_toolsets ):
+            toolset_name = "toolset_%02d" % ( i_toolset )
+
+            for i_test in range( num_of_tests ):
+                test_name = "test_%02d_%02d" % ( i_lib, i_test )
+                test_result = ""
+                test_type = "run"
+                show_run_output = "false"
+
+                if i_lib % 2:  test_result = "success"
+                else:             test_result = "fail"
+
+                if test_result == "success" and ( 0 == i_test % 5 ):
+                    show_run_output = "true"
+
+                common.make_test_log( g, i_lib, i_toolset, test_name, test_type, test_result, show_run_output )
+
+    g.endElement( "test-results" )
+
+
+
+
+## <test-log library="algorithm" test-name="container" test-type="run" test-program="libs/algorithm/string/test/container_test.cpp" target-directory="bin/boost/libs/algorithm/string/test/container.test/borland-5.6.4/debug" toolset="borland-5.6.4" show-run-output="false">
+## <compile result="fail" timestamp="2004-06-29 17:02:27 UTC">
+
+##     "C:\Progra~1\Borland\CBuilder6\bin\bcc32"  -j5 -g255 -q -c -P -w -Ve -Vx -a8 -b-   -v -Od -vi- -tWC -tWR -tWC -WM- -DBOOST_ALL_NO_LIB=1  -w-8001  -I"C:\Users\Administrator\boost\main\results\bin\boost\libs\algorithm\string\test"   -I"C:\Users\Administrator\boost\main\boost" -I"C:\Progra~1\Borland\CBuilder6\include"  -o"C:\Users\Administrator\boost\main\results\bin\boost\libs\algorithm\string\test\container.test\borland-5.6.4\debug\container_test.obj"  "..\libs\algorithm\string\test\container_test.cpp" 
+
+## ..\libs\algorithm\string\test\container_test.cpp:
+## Warning W8091 C:\Users\Administrator\boost\main\boost\libs/test/src/unit_test_result.cpp 323: template argument _InputIter passed to 'for_each' is a output iterator: input iterator required in function unit_test_result::~unit_test_result()
+## Warning W8091 C:\Users\Administrator\boost\main\boost\libs/test/src/unit_test_suite.cpp 63: template argument _InputIter passed to 'find_if' is a output iterator: input iterator required in function test_case::Impl::check_dependencies()
+## Warning W8091 C:\Users\Administrator\boost\main\boost\libs/test/src/unit_test_suite.cpp 204: template argument _InputIter passed to 'for_each' is a output iterator: input iterator required in function test_suite::~test_suite()
+## Error E2401 C:\Users\Administrator\boost\main\boost\boost/algorithm/string/finder.hpp 45: Invalid template argument list
+## Error E2040 C:\Users\Administrator\boost\main\boost\boost/algorithm/string/finder.hpp 46: Declaration terminated incorrectly
+## Error E2090 C:\Users\Administrator\boost\main\boost\boost/algorithm/string/finder.hpp 277: Qualifier 'algorithm' is not a class or namespace name
+## Error E2272 C:\Users\Administrator\boost\main\boost\boost/algorithm/string/finder.hpp 277: Identifier expected
+## Error E2090 C:\Users\Administrator\boost\main\boost\boost/algorithm/string/finder.hpp 278: Qualifier 'algorithm' is not a class or namespace name
+## Error E2228 C:\Users\Administrator\boost\main\boost\boost/algorithm/string/finder.hpp 278: Too many error or warning messages
+## *** 6 errors in Compile ***
+## </compile>
+## </test-log>
+        
+    
+make_test_results( )
+common.make_expicit_failure_markup( num_of_libs, num_of_toolsets, num_of_tests  )

+ 36 - 0
regression/xsl_reports/test/restrict_to_library.xsl

@@ -0,0 +1,36 @@
+<?xml version="1.0" encoding="utf-8"?>
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+                version="1.0">
+
+    <xsl:output method="xml" encoding="ascii"/>
+    <xsl:param name="library"/>
+
+
+    <xsl:template match="/">
+        <xsl:message>
+            <xsl:value-of select="$library"/>
+        </xsl:message>
+        <xsl:apply-templates/>
+    </xsl:template>
+
+    <xsl:template match="*">
+        <xsl:copy>
+            <xsl:apply-templates select="@*"/>
+            <xsl:apply-templates />
+        </xsl:copy>
+    </xsl:template>
+    
+    <xsl:template match="test-log">          
+      <xsl:if test="@library=$library">
+          <xsl:copy>
+              <xsl:apply-templates select="@*"/>
+              <xsl:apply-templates/>
+          </xsl:copy>
+      </xsl:if>
+  </xsl:template>
+  
+  <xsl:template match="@*">
+      <xsl:copy-of select="."/>
+  </xsl:template>
+  
+</xsl:stylesheet>

+ 32 - 0
regression/xsl_reports/test/run_notes_regression.py

@@ -0,0 +1,32 @@
+import sys
+
+sys.path.append( '..' )
+
+import os
+
+import report
+import merger
+import utils
+
+
+tag = "1_32_0"
+
+# utils.makedirs( "results" )
+    
+all_xml_file = "a.xml"
+
+report.make_result_pages( 
+      test_results_file = os.path.abspath( all_xml_file )
+    , expected_results_file = ""
+    , failures_markup_file = os.path.abspath( "../../../../status/explicit-failures-markup.xml" )
+    , tag = tag
+    , run_date = "Today date"
+    , comment_file = os.path.abspath( "comment.html" )
+    , results_dir = "results"
+    , result_prefix = ""
+    , reports = [ "dd" ]
+    , v2 = 1
+    )
+
+
+

+ 35 - 0
regression/xsl_reports/test/run_v1.py

@@ -0,0 +1,35 @@
+import sys
+
+sys.path.append( '..' )
+
+import os
+
+import report
+import merger
+import utils
+
+
+tag = "1_30_0"
+
+utils.makedirs( "results" )
+    
+all_xml_file = "results/all.xml"
+all_xml_writer = open( all_xml_file, "w" )
+merger.merge_test_runs( ".", tag, all_xml_writer )
+all_xml_writer.close()
+
+report.make_result_pages( 
+      test_results_file = os.path.abspath( all_xml_file )
+    , expected_results_file = ""
+    , failures_markup_file = os.path.abspath( "explicit-failures-markup.xml" )
+    , source = tag
+    , run_date = "Today date"
+    , comment_file = os.path.abspath( "comment.html" )
+    , results_dir = os.path.abspath( "results" )
+    , result_prefix = ""
+    , reports = [ "l", "dd" ]
+    , v2 = 0
+    )
+
+
+

+ 34 - 0
regression/xsl_reports/test/test.py

@@ -0,0 +1,34 @@
+import sys
+
+sys.path.append( '..' )
+
+import os
+
+import boost_wide_report
+import common
+import utils
+import shutil
+import time
+
+tag = "CVS-HEAD"
+
+if os.path.exists( "results/incoming/CVS-HEAD/processed/merged" ):
+    shutil.rmtree( "results/incoming/CVS-HEAD/processed/merged"  )
+
+boost_wide_report.ftp_task = lambda ftp_site, site_path, incoming_dir: 1
+boost_wide_report.unzip_archives_task = lambda incoming_dir, processed_dir, unzip: 1
+
+boost_wide_report.execute_tasks(
+      tag = tag
+    , user = None
+    , run_date = common.format_timestamp( time.gmtime() )
+    , comment_file = os.path.abspath( "comment.html" )
+    , results_dir = os.path.abspath( "results" )
+    , output_dir = os.path.abspath( "output" )
+    , reports = [ "x", "ds", "dd", "dsr", "ddr", "us", "ud", "usr", "udr" ]
+    , warnings = [ 'Warning text 1', 'Warning text 2' ]
+    , extended_test_results = os.path.abspath( "output/extended_test_results.xml" )
+    , dont_collect_logs = 1
+    , expected_results_file = os.path.abspath( "expected_results.xml" )
+    , failures_markup_file = os.path.abspath( "explicit-failures-markup.xml" )
+    )

+ 36 - 0
regression/xsl_reports/test/test_boost_wide_report.py

@@ -0,0 +1,36 @@
+import unittest
+import sys
+import time
+
+sys.path.append( ".." )
+
+import boost_wide_report
+
+class test_boost_wide_report(unittest.TestCase):
+    def test_diff( self ):
+        test_cases = [
+              ( []
+                , []
+                , ( [], [] ) )
+            , ( [ boost_wide_report.file_info( "a", 1, time.localtime( 0 ) ) ]
+                , []
+                , ( [ "a" ], [] ) )
+            , ( []
+                , [ boost_wide_report.file_info( "a", 1, time.localtime( 0 ) ) ]
+                , ( [], [ "a" ] ) )
+            , ( [ boost_wide_report.file_info( "a", 1, time.localtime( 0 ) ) ]
+                , [ boost_wide_report.file_info( "a", 1, time.localtime( 1 ) ) ]
+                , ( [ "a" ], [] ) )
+            ]
+
+        for test_case in test_cases:
+            source_dir_content = test_case[0]
+            destination_dir_content = test_case[1]
+            expected_result = test_case[2]
+            d = boost_wide_report.diff( source_dir_content, destination_dir_content )
+            self.failUnlessEqual( d, expected_result )
+
+if __name__ == '__main__':
+    unittest.main()
+            
+            

+ 107 - 0
regression/xsl_reports/test_results.xsd

@@ -0,0 +1,107 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema" elementFormDefault="qualified">
+    <!-- 
+    The following online services can be used to validate collected test results:
+
+        - http://apps.gotdotnet.com/xmltools/xsdvalidator/
+        - http://tools.decisionsoft.com/schemaValidate.html
+    -->
+
+    <xs:simpleType name="test_result">
+        <xs:restriction base="xs:NMTOKEN">
+            <xs:enumeration value="fail"/>
+            <xs:enumeration value="succeed"/>
+        </xs:restriction>
+    </xs:simpleType>
+
+    <xs:simpleType name="run_test_result">
+        <xs:restriction base="xs:NMTOKEN">
+            <xs:enumeration value="fail"/>
+            <xs:enumeration value="succeed"/>
+            <xs:enumeration value="note"/>
+        </xs:restriction>
+    </xs:simpleType>
+
+    <xs:simpleType name="test_type">
+        <xs:restriction base="xs:NMTOKEN">
+            <xs:enumeration value="compile"/>
+            <xs:enumeration value="compile_fail"/>
+            <xs:enumeration value="lib"/>
+            <xs:enumeration value="pyd"/>
+            <xs:enumeration value="run"/>
+            <xs:enumeration value="run_fail"/>
+            <xs:enumeration value="run_pyd"/>
+        </xs:restriction>
+    </xs:simpleType>
+
+    <xs:element name="compile">
+        <xs:complexType>
+            <xs:simpleContent>
+                <xs:extension base="xs:string">
+                    <xs:attribute name="result" type="test_result" use="required"/>
+                    <xs:attribute name="timestamp" type="xs:string" use="required"/>
+                </xs:extension>
+            </xs:simpleContent>
+        </xs:complexType>
+    </xs:element>
+
+    <xs:element name="link">
+        <xs:complexType>
+            <xs:simpleContent>
+                <xs:extension base="xs:string">
+                    <xs:attribute name="result" type="test_result" use="required"/>
+                    <xs:attribute name="timestamp" type="xs:string" use="required"/>
+                </xs:extension>
+            </xs:simpleContent>
+        </xs:complexType>
+    </xs:element>
+
+    <xs:element name="lib">
+        <xs:complexType>
+            <xs:simpleContent>
+                <xs:extension base="xs:string">
+                    <xs:attribute name="result" type="test_result" use="required"/>
+                    <xs:attribute name="timestamp" type="xs:string" use="required"/>
+                </xs:extension>
+            </xs:simpleContent>
+        </xs:complexType>
+    </xs:element>
+
+    <xs:element name="run">
+        <xs:complexType>
+            <xs:simpleContent>
+                <xs:extension base="xs:string">
+                    <xs:attribute name="result" type="run_test_result" use="required"/>
+                    <xs:attribute name="timestamp" type="xs:string" use="required"/>
+                </xs:extension>
+            </xs:simpleContent>
+        </xs:complexType>
+    </xs:element>
+
+    <xs:element name="test-log">
+        <xs:complexType>
+            <xs:sequence>
+                <xs:element ref="compile" minOccurs="0"/>
+                <xs:element ref="link" minOccurs="0"/>
+                <xs:element ref="run" minOccurs="0"/>
+                <xs:element ref="lib" minOccurs="0"/>
+            </xs:sequence>
+            <xs:attribute name="library" type="xs:string" use="required"/>
+            <xs:attribute name="test-name" type="xs:string" use="required"/>
+            <xs:attribute name="test-type" type="test_type" use="required"/>
+            <xs:attribute name="test-program" type="xs:string" use="required"/>
+            <xs:attribute name="target-directory" type="xs:string" use="required"/>
+            <xs:attribute name="toolset" type="xs:string" use="required"/>
+            <xs:attribute name="show-run-output" type="xs:boolean" use="required"/>
+        </xs:complexType>
+    </xs:element>
+
+    <xs:element name="tests">
+        <xs:complexType>
+            <xs:sequence>
+                <xs:element ref="test-log" maxOccurs="unbounded"/>
+            </xs:sequence>
+        </xs:complexType>
+    </xs:element>
+
+</xs:schema>

+ 13 - 0
regression/xsl_reports/utils/__init__.py

@@ -0,0 +1,13 @@
+
+from accept_args import *
+from char_translation_table import *
+from check_existance import *
+from checked_system import *
+from libxslt import *
+from log import *
+from makedirs import *
+from rename import *
+from tar import *
+from zip import *
+
+import sourceforge

+ 30 - 0
regression/xsl_reports/utils/accept_args.py

@@ -0,0 +1,30 @@
+
+import getopt
+import re
+import sys
+
+def accept_args( args_spec, args, options, usage ):
+    
+    defaults_num = len(options)
+    
+    ( option_pairs, rest_args ) = getopt.getopt( args, '', args_spec )
+    map( lambda x: options.__setitem__( x[0], x[1] ), option_pairs )
+
+    if ( options.has_key( '--help' ) or len( options.keys() ) == defaults_num ):
+        usage()
+        sys.exit( 1 )
+
+    if len( rest_args ) > 0 and rest_args[0][0] == '@':
+        f = open( rest_args[0][1:], 'r' )
+        config_lines  = f.read().splitlines()
+        f.close()
+        for l in config_lines:
+            if re.search( r'^\s*#', l ): continue
+            if re.search( r'^\s*$', l ): continue
+            m = re.match( r'^(?P<name>.*?)=(?P<value>.*)', l )
+            if m:
+                options[ '--%s' % m.group( 'name' ) ] = m.group( 'value' )
+            else:
+                raise 'Invalid format of config line "%s"' % l
+
+    return rest_args

+ 13 - 0
regression/xsl_reports/utils/char_translation_table.py

@@ -0,0 +1,13 @@
+
+import string
+
+def chr_or_question_mark( c ):
+    if chr(c) in string.printable and c < 128 and c not in ( 0x09, 0x0b, 0x0c ):
+        return chr(c)
+    else:
+        return '?'
+
+char_translation_table = string.maketrans( 
+      ''.join( map( chr, range(0, 256) ) )
+    , ''.join( map( chr_or_question_mark, range(0, 256) ) )
+    )

+ 9 - 0
regression/xsl_reports/utils/check_existance.py

@@ -0,0 +1,9 @@
+
+import os
+
+def check_existance( name ):
+    a = os.popen( '%s --version' % name )
+    output = a.read()
+    rc = a.close()
+    if rc is not None:
+        raise Exception( '"%s" is required' % name )

+ 22 - 0
regression/xsl_reports/utils/checked_system.py

@@ -0,0 +1,22 @@
+
+import os
+import string
+import sys
+
+def system( commands ):
+    if sys.platform == 'win32':
+        f = open( 'tmp.cmd', 'w' )
+        f.write( string.join( commands, '\n' ) )
+        f.close()
+        rc = os.system( 'tmp.cmd' )
+        return rc
+    else:
+        rc = os.system( '&&'.join( commands ) )
+        return rc
+
+    
+def checked_system( commands, valid_return_codes = [ 0 ] ):
+    rc = system( commands ) 
+    if rc not in [ 0 ] + valid_return_codes:
+        raise Exception( 'Command sequence "%s" failed with return code %d' % ( commands, rc ) )
+    return rc

+ 49 - 0
regression/xsl_reports/utils/libxslt.py

@@ -0,0 +1,49 @@
+
+# Copyright (c) MetaCommunications, Inc. 2003-2007
+#
+# Distributed under the Boost Software License, Version 1.0. 
+# (See accompanying file LICENSE_1_0.txt or copy at 
+# http://www.boost.org/LICENSE_1_0.txt)
+
+import utils.makedirs
+import utils.rename
+import os.path
+import os
+import sys
+
+
+def xslt_param( path, replace_spaces = 1 ):
+    path = path.replace( '\\', '/' )
+    if sys.platform == 'win32' and replace_spaces:
+        path = path.replace( ' ', '%20' )
+    return path
+
+
+def libxslt( log, xml_file, xsl_file, output_file, parameters = None ):
+
+    utils.makedirs( os.path.dirname( output_file ) )
+    
+    if sys.platform == 'win32':
+        os.chdir( os.path.dirname( xsl_file ) )
+
+    transform_command = 'xsltproc'
+    transform_command = transform_command + ' -o ' + '"%s"' % xslt_param( output_file )
+
+    if parameters is not None:
+         for i in parameters:
+             if parameters[i]:
+                 parameters[i] = xslt_param( parameters[i] )
+                 transform_command = transform_command + ' --param %s "\'%s\'" ' % ( i, parameters[ i ] )
+
+    transform_command = transform_command + ' "%s" ' % xslt_param( xsl_file )
+    transform_command = transform_command + ' "%s" ' % xslt_param( xml_file )
+    log( transform_command )
+    rc = os.system( transform_command )
+    if rc != 0:
+        raise Exception( '"%s" failed with return code %d' % ( transform_command, rc ) )
+
+    output_file = xslt_param( output_file, 0 )
+    xlst_output_file = xslt_param( output_file )
+    if output_file != xlst_output_file and os.path.exists( xlst_output_file ):
+        utils.rename( log, xlst_output_file, output_file )
+

+ 18 - 0
regression/xsl_reports/utils/log.py

@@ -0,0 +1,18 @@
+
+import inspect
+import sys
+
+def log_level():
+   frames = inspect.stack()
+   level = 0
+   for i in frames[ 3: ]:
+       if i[0].f_locals.has_key( '__log__' ):
+           level = level + i[0].f_locals[ '__log__' ]
+   return level
+
+
+def stdlog( message ):
+    sys.stderr.write( '# ' + '    ' * log_level() +  message + '\n' )
+    sys.stderr.flush()
+
+log = stdlog

+ 7 - 0
regression/xsl_reports/utils/makedirs.py

@@ -0,0 +1,7 @@
+
+import os.path
+import os
+
+def makedirs( path ):
+    if not os.path.exists( path ):
+        os.makedirs( path )

+ 17 - 0
regression/xsl_reports/utils/rename.py

@@ -0,0 +1,17 @@
+
+# Copyright (c) MetaCommunications, Inc. 2003-2007
+#
+# Distributed under the Boost Software License, Version 1.0. 
+# (See accompanying file LICENSE_1_0.txt or copy at 
+# http://www.boost.org/LICENSE_1_0.txt)
+
+import os.path
+import os
+
+
+def rename( log, src, dst ):
+    log( 'Renaming %s to %s' % ( src, dst ) )
+    if os.path.exists( dst ):
+        os.unlink( dst )
+
+    os.rename( src, dst )

+ 13 - 0
regression/xsl_reports/utils/send_mail.py

@@ -0,0 +1,13 @@
+
+import smtplib
+
+def send_mail( mail, subject, msg = '' ):
+    smtp_server = smtplib.SMTP( 'mail.%s' % mail.split( '@' )[-1] )
+    smtp_server.sendmail( 
+          mail
+        , [ mail ]
+        , 'Subject: %s\n' % subject
+            + 'To: %s\n' % mail
+            + '\n'
+            + msg 
+        )

+ 48 - 0
regression/xsl_reports/utils/sourceforge.py

@@ -0,0 +1,48 @@
+
+import utils.checked_system
+import os
+import sys
+
+site_dir = '/home/groups/b/bo/boost/htdocs/'
+
+def download( source, destination, user ):
+    if sys.platform == 'win32':
+        destination = os.popen( 'cygpath "%s"' % destination ).read().splitlines()[0]
+
+    utils.checked_system( [ 
+          'rsync -v -r -z --progress %(user)s@shell.sourceforge.net:%(site_dir)s%(source)s %(dest)s'
+                % { 'user': user, 'site_dir': site_dir, 'source': source, 'dest': destination }
+        ] )
+
+
+def upload( source, destination, user ):
+    if sys.platform == 'win32':
+        source = os.popen( 'cygpath "%s"' % source ).read().splitlines()[0]
+        
+    utils.checked_system( [ 
+          'rsync -v -r -z --progress %(source)s %(user)s@shell.sourceforge.net:%(site_dir)s%(dest)s'
+                % { 'user': user, 'site_dir': site_dir, 'source': source, 'dest': destination }
+        ] )
+
+
+def checked_system( commands, user, background = False ):
+    if not background:
+        cmd = 'ssh -l %s shell.sourceforge.net "%s"'
+    else:
+        cmd = 'ssh -f -l %s shell.sourceforge.net "%s"'
+
+    utils.checked_system( 
+          [ cmd % ( user, '&&'.join( commands ) ) ]
+        )
+
+
+def untar( archive, user, background ):
+    checked_system( 
+          [
+              'cd %s' % os.path.join( site_dir, os.path.dirname( archive ) )
+            , 'tar -x -z --overwrite --mode=+w -f %s' % os.path.basename( archive )
+            , 'rm -f %s' % archive
+            ]
+        , user = user
+        , background = background
+        )

+ 16 - 0
regression/xsl_reports/utils/tar.py

@@ -0,0 +1,16 @@
+
+import utils.checked_system
+import os.path
+
+def tar( source_dir, archive_name ):
+    utils.checked_system( [
+          'cd %s' % source_dir
+        , 'tar -c -f ../%s -z *' % archive_name
+        ] )
+
+def untar( archive_path ):
+    #utils.checked_system( [ 'tar -xjf "%s"' % archive_path ] )
+    utils.checked_system( [ 
+          'cd %s' % os.path.dirname( archive_path )
+        , 'tar -xjf "%s"' % os.path.basename( archive_path )
+        ] )

+ 12 - 0
regression/xsl_reports/utils/zip.py

@@ -0,0 +1,12 @@
+
+import zipfile
+import os.path
+
+def unzip( archive_path, result_dir ):
+    z = zipfile.ZipFile( archive_path, 'r', zipfile.ZIP_DEFLATED ) 
+    for f in z.infolist():
+        result = open( os.path.join( result_dir, f.filename ), 'wb' )
+        result.write( z.read( f.filename ) )
+        result.close()
+
+    z.close()

+ 144 - 0
regression/xsl_reports/xsl/add_expected_results.xsl

@@ -0,0 +1,144 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!--
+
+Copyright MetaCommunications, Inc. 2003-2004.
+
+Distributed under the Boost Software License, Version 1.0. (See
+accompanying file LICENSE_1_0.txt or copy at
+http://www.boost.org/LICENSE_1_0.txt)
+
+-->
+
+<xsl:stylesheet 
+    xmlns:xsl="http://www.w3.org/1999/XSL/Transform" 
+    xmlns:meta="http://www.meta-comm.com"
+    exclude-result-prefixes="meta"
+    version="1.0">
+
+    <xsl:import href="common.xsl"/>
+  
+    <xsl:output method="xml" encoding="utf-8"/>
+      
+    <xsl:param name="expected_results_file"/>
+    <xsl:param name="failures_markup_file"/>
+    <xsl:variable name="expected_results" select="document( $expected_results_file )" />
+    <xsl:variable name="failures_markup" select="document( $failures_markup_file )" />
+
+    <xsl:template match="/">
+        <xsl:apply-templates/>
+    </xsl:template>
+      
+    <xsl:template match="test-log">
+        <xsl:variable name="library" select="@library"/>
+        <xsl:variable name="test-name" select="@test-name"/>
+        <xsl:variable name="toolset" select="@toolset"/>
+
+        <xsl:element name="{local-name()}">
+        <xsl:apply-templates select="@*"/>
+
+        <xsl:variable name="actual_result">
+            <xsl:choose>
+            <!-- Hack: needs to be researched (and removed). See M.Wille's incident. -->
+            <xsl:when test="run/@result='succeed' and lib/@result='fail'">
+                <xsl:text>success</xsl:text>
+            </xsl:when>
+            <xsl:when test="./*/@result = 'fail'" >
+                <xsl:text>fail</xsl:text>
+            </xsl:when>
+            <xsl:otherwise>
+                <xsl:text>success</xsl:text>
+            </xsl:otherwise>
+            </xsl:choose>                     
+        </xsl:variable>
+
+        <xsl:variable name="expected_results_test_case" select="$expected_results//*/test-result[ @library=$library and ( @test-name=$test-name or @test-name='*' ) and @toolset = $toolset]"/>
+        <xsl:variable name="new_failures_markup" select="$failures_markup//library[@name=$library]/mark-expected-failures[ meta:re_match( test/@name, $test-name ) and meta:re_match( toolset/@name, $toolset ) ]"/>
+        <xsl:variable name="failures_markup" select="$failures_markup//library[@name=$library]/test[ meta:re_match( @name, $test-name ) ]/mark-failure[ meta:re_match( toolset/@name, $toolset ) ]"/>
+        <xsl:variable name="is_new">
+            <xsl:choose>
+                <xsl:when test="$expected_results_test_case">
+                <xsl:text>no</xsl:text>
+                </xsl:when>
+                <xsl:otherwise>yes</xsl:otherwise>
+            </xsl:choose>
+        </xsl:variable>
+
+        <xsl:variable name="expected_result">
+            <xsl:choose>
+            <xsl:when test='count( $failures_markup ) &gt; 0 or count( $new_failures_markup ) &gt; 0'>
+                <xsl:text>fail</xsl:text>
+            </xsl:when>
+              
+            <xsl:otherwise>
+                <xsl:choose>
+                <xsl:when test="$expected_results_test_case and $expected_results_test_case/@result = 'fail'">
+                    <xsl:text>fail</xsl:text>
+                </xsl:when>
+                  
+                <xsl:otherwise>success</xsl:otherwise>
+                </xsl:choose>
+            </xsl:otherwise>
+            </xsl:choose>
+        </xsl:variable>
+
+        <xsl:variable name="status">
+            <xsl:choose>
+            <xsl:when test="count( $failures_markup ) &gt; 0 or count( $new_failures_markup ) &gt; 0">
+                <xsl:choose>
+                <xsl:when test="$expected_result = $actual_result">expected</xsl:when>
+                <xsl:otherwise>unexpected</xsl:otherwise>
+                </xsl:choose>
+            </xsl:when>
+
+            <xsl:otherwise>
+                <xsl:choose>
+                <xsl:when test="$expected_result = $actual_result">expected</xsl:when>
+                <xsl:otherwise>unexpected</xsl:otherwise>
+                </xsl:choose>
+            </xsl:otherwise>
+              
+            </xsl:choose>
+        </xsl:variable>
+
+        <xsl:variable name="notes">
+            <xsl:choose>
+
+            <xsl:when test='count( $failures_markup ) &gt; 0'>
+                <xsl:for-each select="$failures_markup/note">
+                <xsl:copy-of select="."/>
+                </xsl:for-each>
+            </xsl:when>
+
+            <xsl:when test='count( $new_failures_markup ) &gt; 0'>
+                <xsl:for-each select="$new_failures_markup/note">
+                <xsl:copy-of select="."/>
+                </xsl:for-each>
+            </xsl:when>
+              
+            </xsl:choose>
+        </xsl:variable>
+
+        <xsl:attribute name="result"><xsl:value-of select="$actual_result"/></xsl:attribute>
+        <xsl:attribute name="expected-result"><xsl:value-of select="$expected_result"/></xsl:attribute>
+        <xsl:attribute name="status"><xsl:value-of select="$status"/></xsl:attribute>
+        <xsl:attribute name="is-new"><xsl:value-of select="$is_new"/></xsl:attribute>
+        <!--<a><xsl:value-of select="count( $failures_markup )"/></a>-->
+        <xsl:element name="notes"><xsl:copy-of select="$notes"/></xsl:element>
+
+
+        <xsl:apply-templates select="node()" />
+        </xsl:element>
+    </xsl:template>
+
+    <xsl:template match="*">
+        <xsl:element name="{local-name()}">
+        <xsl:apply-templates select="@*"/>
+        <xsl:apply-templates select="node()" />
+        </xsl:element>
+    </xsl:template>
+
+    <xsl:template match="@*">
+        <xsl:copy-of select="." />
+    </xsl:template>
+
+</xsl:stylesheet>

+ 182 - 0
regression/xsl_reports/xsl/common.xsl

@@ -0,0 +1,182 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!--
+
+Copyright MetaCommunications, Inc. 2003-2004.
+
+Distributed under the Boost Software License, Version 1.0. (See
+accompanying file LICENSE_1_0.txt or copy at
+http://www.boost.org/LICENSE_1_0.txt)
+
+-->
+
+<xsl:stylesheet 
+    xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+    xmlns:exsl="http://exslt.org/common"
+    xmlns:func="http://exslt.org/functions"
+    xmlns:str="http://exslt.org/strings"
+    xmlns:meta="http://www.meta-comm.com"
+    extension-element-prefixes="func"
+    version="1.0">
+
+    <xsl:variable name="output_directory" select="'output'"/>
+
+    <xsl:template name="get_toolsets">
+    <xsl:param name="toolsets"/>
+    <xsl:param name="required-toolsets"/>
+
+    <xsl:variable name="toolset_output">
+        <xsl:for-each select="$toolsets">
+        <xsl:variable name="toolset" select="."/>
+        <xsl:element name="toolset">
+            <xsl:attribute name="toolset"><xsl:value-of select="$toolset"/></xsl:attribute>
+            <xsl:choose>
+            <xsl:when test="$required_toolsets[ $toolset = @name ]">
+                <xsl:attribute name="required">yes</xsl:attribute>
+                <xsl:attribute name="sort">a</xsl:attribute>
+            </xsl:when>
+            <xsl:otherwise>
+                <xsl:attribute name="required">no</xsl:attribute>
+                <xsl:attribute name="sort">z</xsl:attribute>
+            </xsl:otherwise>
+            </xsl:choose>
+        </xsl:element>
+        </xsl:for-each>
+    </xsl:variable>
+
+    <xsl:for-each select="exsl:node-set( $toolset_output )/toolset">
+        <xsl:sort select="concat( @sort, ' ', @toolset)" order="ascending"/>
+        <xsl:copy-of select="."/>
+    </xsl:for-each>
+
+    </xsl:template>
+
+  <func:function name="meta:show_output">
+      <xsl:param name="explicit_markup"/>     
+      <xsl:param name="test_log"/>     
+      <func:result select="$test_log/@result != 'success' and not( meta:is_unusable( $explicit_markup, $test_log/@library, $test_log/@toolset )) or $test_log/@show-run-output = 'true'"/>
+  </func:function>
+
+    <func:function name="meta:is_test_log_a_test_case">
+        <xsl:param name="test_log"/>      
+        <func:result select="$test_log/@test-type='compile' or $test_log/@test-type='compile_fail' or $test_log/@test-type='run' or $test_log/@test-type='run_pyd'"/>
+    </func:function>
+
+    <func:function name="meta:is_unusable">
+        <xsl:param name="explicit_markup"/>
+        <xsl:param name="library"/>
+        <xsl:param name="toolset"/>
+        
+        <func:result select="$explicit_markup//library[ @name = $library ]/mark-unusable[ toolset/@name = $toolset or toolset/@name='*' ]"/>
+    </func:function>
+
+    <func:function name="meta:re_match">
+        <xsl:param name="pattern"/>
+        <xsl:param name="text"/>
+        
+        <xsl:choose>
+            <xsl:when test="not( contains( $pattern, '*' ) )">
+                <func:result select="$text = $pattern"/>
+            </xsl:when>
+            <xsl:when test="$pattern = '*'">
+                <func:result select="1 = 1"/>
+            </xsl:when>
+            <xsl:when test="substring( $pattern, 1, 1 ) = '*' and substring( $pattern, string-length($pattern), 1 ) = '*' ">
+                <func:result select="contains( $text, substring( $pattern, 2, string-length($pattern) - 2 ) ) "/>
+            </xsl:when>
+            <xsl:when test="substring( $pattern, 1, 1 ) = '*'">
+                <xsl:variable name="pattern_tail" select="substring( $pattern, 2, string-length($pattern) - 1 )"/>
+                <func:result select="substring( $text, string-length($text) - string-length($pattern_tail) + 1, string-length($pattern_tail) ) = $pattern_tail"/>
+            </xsl:when>
+            <xsl:when test="substring( $pattern, string-length($pattern), 1 ) = '*' ">
+                <xsl:variable name="pattern_head" select="substring( $pattern, 1, string-length($pattern) - 2 )"/>
+                <func:result select="substring( $text, 1, string-length($pattern_head) ) = $pattern_head "/>
+            </xsl:when>
+        </xsl:choose>
+    </func:function>
+
+    <func:function name="meta:encode_path">
+        <xsl:param name="path"/>
+        <func:result select="translate( translate( $path, '/', '-' ), './', '-' )"/>
+    </func:function>
+
+    <func:function name="meta:toolset_name">
+        <xsl:param name="name"/>
+        <func:result select="$name"/>
+    </func:function>
+
+    <func:function name="meta:output_file_path">
+        <xsl:param name="path"/>
+        <func:result select="concat( $output_directory, '/', meta:encode_path( $path ), '.html' )"/>
+    </func:function>
+
+    <xsl:template name="show_notes">
+        <xsl:param name="explicit_markup"/>
+        <xsl:param name="notes"/>
+        <div class="notes">
+            <xsl:for-each select="$notes">
+            <div>
+                <xsl:variable name="refid" select="@refid"/>
+                <xsl:call-template name="show_note">
+                    <xsl:with-param name="note" select="."/>
+                    <xsl:with-param name="reference" select="$explicit_markup//note[ $refid = @id ]"/>
+                </xsl:call-template>
+            </div>
+            </xsl:for-each>
+        </div>
+    </xsl:template>
+
+    <xsl:template name="show_note">
+        <xsl:param name="note"/>
+        <xsl:param name="reference"/>
+        <div class="note">
+            <xsl:variable name="author">
+                <xsl:choose>
+                    <xsl:when test="$note/@author">
+                        <xsl:value-of select="$note/@author"/>
+                    </xsl:when>
+                    <xsl:when test="$reference">
+                        <xsl:value-of select="$reference/@author"/>                               
+                    </xsl:when>
+                    <xsl:otherwise>
+                        <xsl:text/>
+                    </xsl:otherwise>
+                </xsl:choose>
+            </xsl:variable>
+
+            <xsl:variable name="date">
+                <xsl:choose>
+                    <xsl:when test="$note/@date">
+                        <xsl:value-of select="$note/@date"/>
+                    </xsl:when>
+                    <xsl:when test="$reference">
+                        <xsl:value-of select="$reference/@date"/>                      
+                    </xsl:when>
+                    <xsl:otherwise>
+                        <xsl:text/>
+                    </xsl:otherwise>
+                </xsl:choose>
+            </xsl:variable>
+
+        <span class="note-header">
+            <xsl:choose>
+                <xsl:when test="$author != '' and $date != ''">
+                    [&#160;<xsl:value-of select="$author"/>&#160;<xsl:value-of select="$date"/>&#160;]
+                </xsl:when>
+                <xsl:when test="$author != ''">
+                    [&#160;<xsl:value-of select="$author"/>&#160;]                        
+                </xsl:when>
+                <xsl:when test="$date != ''">
+                    [&#160;<xsl:value-of select="$date"/>&#160;]                        
+                </xsl:when>
+            </xsl:choose>
+        </span>
+
+        <xsl:if test="$reference">
+            <xsl:copy-of select="$reference/node()"/>                
+        </xsl:if>
+        <xsl:copy-of select="$note/node()"/>      
+
+        </div>
+    </xsl:template>
+
+</xsl:stylesheet>

+ 36 - 0
regression/xsl_reports/xsl/html/issues_legend.html

@@ -0,0 +1,36 @@
+<!--
+
+Copyright MetaCommunications, Inc. 2003-2004.
+
+Distributed under the Boost Software License, Version 1.0. (See
+accompanying file LICENSE_1_0.txt or copy at
+http://www.boost.org/LICENSE_1_0.txt)
+
+-->
+
+<div class="legend">
+<table border="0" summary="report description">
+<tr>
+    <td>
+    <table border="0" summary="legend">
+        <tr>
+            <td>
+                <table width="100%" summary="unexpected new fail legend">
+                <tr class="library-row-single"><td class="library-fail-unexpected-new">&lt;toolset&gt;</td></tr>
+                </table>
+            </td>
+            <td class="legend-item">Failure on a newly added test/compiler.</td>
+        </tr>
+        <tr>
+            <td>
+                <table width="100%" summary="unexpected fail legend">
+                <tr class="library-row-single"><td class="library-fail-unexpected">&lt;toolset&gt;</td></tr>
+                </table>
+            </td>
+            <td class="legend-item">Unexpected failure.</td>
+        </tr>
+    </table>
+    </td>
+</tr>
+</table>
+</div>

+ 72 - 0
regression/xsl_reports/xsl/html/library_developer_legend.html

@@ -0,0 +1,72 @@
+<!--
+
+Copyright MetaCommunications, Inc. 2003-2004.
+
+Distributed under the Boost Software License, Version 1.0. (See
+accompanying file LICENSE_1_0.txt or copy at
+http://www.boost.org/LICENSE_1_0.txt)
+
+-->
+
+<div class="legend">
+<table border="0" summary="report description">
+<tr>
+    <td>
+    <table border="0" summary="legend">
+        <tr>
+            <td>
+                <table width="100%" summary="success legend">
+                <tr class="library-row-single"><td class="library-success-expected">pass</td></tr>
+                </table>
+            </td>
+            <td class="legend-item">Success.</td>
+        </tr>
+        <tr>
+            <td>
+                <table width="100%" summary="unexpected pass legend">
+                <tr class="library-row-single"><td class="library-success-unexpected">pass</td></tr>
+                </table>
+            </td>
+            <td class="legend-item">Unexpected success.</td>
+        </tr>
+        <tr>
+            <td>
+                <table width="100%" summary="expected fail legend">
+                <tr class="library-row-single"><td class="library-fail-expected">fail</td></tr>
+                </table>
+            </td>
+            <td class="legend-item">Expected failure.</td>
+        </tr>
+    </table>
+    </td>
+    <td>
+    <table border="0" summary="legend">
+        <tr>
+            <td>
+                <table width="100%" summary="unexpected new fail legend">
+                <tr class="library-row-single"><td class="library-fail-unexpected-new">fail</td></tr>
+                </table>
+            </td>
+            <td class="legend-item">Failure on a newly added test/compiler.</td>
+        </tr>
+        <tr>
+            <td>
+                <table width="100%" summary="unexpected fail legend">
+                <tr class="library-row-single"><td class="library-fail-unexpected">fail</td></tr>
+                </table>
+            </td>
+            <td class="legend-item">Unexpected failure.</td>
+        </tr>
+        <tr>
+            <td>
+                <table width="100%" summary="unusable legend">
+                <tr class="library-row-single"><td class="library-unusable">n/a</td></tr>
+                </table>
+            </td>
+            <td class="legend-item">The library author marked it as unusable on particular platform/toolset.</td>
+        </tr>
+    </table>
+    </td>
+</tr>
+</table>
+</div>

+ 65 - 0
regression/xsl_reports/xsl/html/library_user_legend.html

@@ -0,0 +1,65 @@
+<!--
+
+Copyright MetaCommunications, Inc. 2003-2004.
+
+Distributed under the Boost Software License, Version 1.0. (See
+accompanying file LICENSE_1_0.txt or copy at
+http://www.boost.org/LICENSE_1_0.txt)
+
+-->
+
+<div class="legend">
+<table border="0" summary="report description">
+<tr>
+    <td>
+    <table border="0" summary="legend">
+        <tr>
+            <td>
+                <table width="100%" summary="success legend">
+                    <tr class="library-row-single"><td class="library-user-success">pass</td></tr>
+                </table>
+            </td>
+            <td class="legend-item">
+                The test passes.
+            </td>
+        </tr>
+        <tr>
+            <td>
+                <table width="100%" summary="fail legend">
+                    <tr class="library-row-single"><td class="library-user-fail-expected">fail</td></tr>
+                </table>
+            </td>
+            <td class="legend-item">
+                A known test failure; click on the link to see the log.
+            </td>
+        </tr>
+    </table>
+    </td>
+    <td>
+    <table border="0" summary="legend">
+        <tr>
+            <td>
+                <table width="100%" summary="unexpected fail legend">
+                    <tr class="library-row-single"><td class="library-user-fail-unexpected">unexp.</td></tr>
+                </table>
+            </td>
+            <td class="legend-item">
+                The test is known to pass, but is currently failing; 
+                click on the link to see the log.
+            </td>
+        </tr>
+        <tr>
+            <td>
+                <table width="100%" summary="unusable legend">
+                    <tr class="library-row-single"><td class="library-unusable">n/a</td></tr>
+                </table>
+            </td>
+            <td class="legend-item">
+                The library author marked it as unusable on particular platform/toolset.
+            </td>
+        </tr>
+    </table>
+  </td>
+</tr>
+</table>
+</div>

Некоторые файлы не были показаны из-за большого количества измененных файлов

粤ICP备19079148号