Attachment "623787.patch" to
ticket [623787ffff]
added by
dgp
2004-02-24 03:10:50.
Index: library/tcltest/pkgIndex.tcl
===================================================================
RCS file: /cvsroot/tcl/tcl/library/tcltest/pkgIndex.tcl,v
retrieving revision 1.17
diff -u -r1.17 pkgIndex.tcl
--- library/tcltest/pkgIndex.tcl 3 Feb 2004 18:51:10 -0000 1.17
+++ library/tcltest/pkgIndex.tcl 23 Feb 2004 20:01:01 -0000
@@ -9,4 +9,4 @@
# full path name of this file's directory.
if {![package vsatisfies [package provide Tcl] 8.3]} {return}
-package ifneeded tcltest 2.2.5 [list source [file join $dir tcltest.tcl]]
+package ifneeded tcltest 2.3 [list source [file join $dir tcltest.tcl]]
Index: library/tcltest/tcltest.tcl
===================================================================
RCS file: /cvsroot/tcl/tcl/library/tcltest/tcltest.tcl,v
retrieving revision 1.87
diff -u -r1.87 tcltest.tcl
--- library/tcltest/tcltest.tcl 18 Feb 2004 01:41:42 -0000 1.87
+++ library/tcltest/tcltest.tcl 23 Feb 2004 20:01:01 -0000
@@ -14,17 +14,31 @@
# Copyright (c) 1998-1999 by Scriptics Corporation.
# Copyright (c) 2000 by Ajuba Solutions
# Contributions from Don Porter, NIST, 2002. (not subject to US copyright)
-# All rights reserved.
+# Contributions from David N. Welton <[email protected]>, 2003.
+#
+# See the 'license.terms' file in the Tcl distribution for licensing terms.
#
# RCS: @(#) $Id: tcltest.tcl,v 1.87 2004/02/18 01:41:42 dgp Exp $
package require Tcl 8.3 ;# uses [glob -directory]
+ namespace eval tcltest {
+ # This namespace holds information about the files evaluated.
+ namespace eval files {}
+ # This namespace is for information about test constraints.
+ namespace eval constraints {}
+}
+
+source [file join [file dirname [info script]] tcltestapi.tcl]
+source [file join [file dirname [info script]] constraints.tcl]
+source [file join [file dirname [info script]] testresults.tcl]
+source [file join [file dirname [info script]] files.tcl]
+
namespace eval tcltest {
# When the version number changes, be sure to update the pkgIndex.tcl file,
# and the install directory in the Makefiles. When the minor version
# changes (new feature) be sure to update the man page as well.
- variable Version 2.2.5
+ variable Version 2.3
# Compatibility support for dumb variables defined in tcltest 1
# Do not use these. Call [package provide Tcl] and [info patchlevel]
@@ -32,7 +46,12 @@
variable version [package provide Tcl]
variable patchLevel [info patchlevel]
-##### Export the public tcltest procs; several categories
+ # Default callbacks.
+ testcallback tcltest::printcallback
+ printstatscallback tcltest::PrintStats
+ restorecallback tcltest::Restore
+
+ ##### Export the public tcltest procs; several categories
#
# Export the main functional commands that do useful things
namespace export cleanupTests loadTestedCommands makeDirectory \
@@ -172,29 +191,6 @@
# save the original environment so that it can be restored later
ArrayDefault originalEnv [array get ::env]
- # initialize numTests array to keep track of the number of tests
- # that pass, fail, and are skipped.
- ArrayDefault numTests [list Total 0 Passed 0 Skipped 0 Failed 0]
-
- # createdNewFiles will store test files as indices and the list of
- # files (that should not have been) left behind by the test files
- # as values.
- ArrayDefault createdNewFiles {}
-
- # initialize skippedBecause array to keep track of constraints that
- # kept tests from running; a constraint name of "userSpecifiedSkip"
- # means that the test appeared on the list of tests that matched the
- # -skip value given to the flag; "userSpecifiedNonMatch" means that
- # the test didn't match the argument given to the -match flag; both
- # of these constraints are counted only if tcltest::debug is set to
- # true.
- ArrayDefault skippedBecause {}
-
- # initialize the testConstraints array to keep track of valid
- # predefined constraints (see the explanation for the
- # InitConstraints proc for more details).
- ArrayDefault testConstraints {}
-
##### Initialize internal variables of tcltest, but only if the caller
# has not already pre-initialized them. This is done to support
# compatibility with older tests that directly access internals
@@ -220,10 +216,9 @@
# The currentFailure var stores the boolean value of whether the
# current test file has had any failures. The failFiles list
# stores the names of test files that had failures.
- Default numTestFiles 0 AcceptInteger
+# Default numTestFiles 0 AcceptInteger
Default testSingleFile true AcceptBoolean
Default currentFailure false AcceptBoolean
- Default failFiles {} AcceptList
# Tests should remove all files they create. The test suite will
# check the current working dir for files created by the tests.
@@ -249,8 +244,12 @@
# Kept only for compatibility
Default constraintsSpecified {} AcceptList
- trace variable constraintsSpecified r {set ::tcltest::constraintsSpecified \
- [array names ::tcltest::testConstraints] ;# }
+ # trace variable constraintsSpecified r {set
+ # ::tcltest::constraintsSpecified [array names
+ # ::tcltest::testConstraints] ;# }
+ trace variable constraintsSpecified r \
+ {set ::tcltest::constraintsSpecified [::tcltest::constraints::getlist] ;# }
+
# tests that use threads need to know which is the main thread
Default mainThread 1
@@ -280,7 +279,7 @@
# Set the location of the execuatble
Default tcltest [info nameofexecutable]
trace variable tcltest w [namespace code {testConstraint stdio \
- [eval [ConstraintInitializer stdio]] ;#}]
+ [eval [constraints::ConstraintInitializer stdio]] ;#}]
# save the platform information so it can be restored later
Default originalTclPlatform [array get ::tcl_platform]
@@ -487,6 +486,7 @@
variable OptionControlledVariables
set Usage($option) $usage
set Verify($option) $verify
+
if {[catch {$verify $value} msg]} {
return -code error $msg
} else {
@@ -520,6 +520,7 @@
proc MatchingOption {option} {
variable Option
+
set match [array names Option $option*]
switch -- [llength $match] {
0 {
@@ -576,6 +577,7 @@
if {$n == 0} {
return [lsort [array names Option]]
}
+
if {$n == 1} {
if {[catch {MatchingOption [lindex $args 0]} option]} {
return -code error $option
@@ -605,7 +607,7 @@
set code [catch {eval Configure $args} msg]
return -code $code $msg
}
-
+
proc AcceptVerbose { level } {
set level [AcceptList $level]
if {[llength $level] == 1} {
@@ -701,9 +703,9 @@
# Don't run only the "-constraint" specified tests by default
proc ClearUnselectedConstraints args {
variable Option
- variable testConstraints
+
if {!$Option(-limitconstraints)} {return}
- foreach c [array names testConstraints] {
+ foreach c [constraints::getlist] {
if {[lsearch -exact $Option(-constraints) $c] == -1} {
testConstraint $c 0
}
@@ -777,8 +779,9 @@
trace variable Option(-loadfile) w [namespace code ReadLoadScript]
proc AcceptOutFile { file } {
- if {[string equal stderr $file]} {return $file}
- if {[string equal stdout $file]} {return $file}
+ if {[string equal stderr $file] || [string equal stdout $file]} {
+ return $file
+ }
return [file join [temporaryDirectory] $file]
}
@@ -796,6 +799,36 @@
trace variable Option(-errfile) w \
[namespace code {errorChannel $Option(-errfile) ;#}]
+ Option -api 0 {
+ Use tcltest results API to access test results.
+ } AcceptBoolean api
+
+ Option -dump 0 {
+ Dump the results of the tests so that they can be read for use with the results API.
+ } AcceptBoolean dump
+
+ namespace eval files {
+ set filelist [list]
+ tcltest::Default failFiles {} AcceptList
+ }
+
+ namespace eval constraints {
+ # initialize skippedBecause array to keep track of constraints
+ # that kept tests from running; a constraint name of
+ # "userSpecifiedSkip" means that the test appeared on the list
+ # of tests that matched the -skip value given to the flag;
+ # "userSpecifiedNonMatch" means that the test didn't match the
+ # argument given to the -match flag; both of these constraints
+ # are counted only if tcltest::debug is set to true.
+ array set skippedBecause {}
+
+ # initialize the vars namespace to keep track of valid
+ # predefined constraints (see the explanation for the
+ # InitConstraints proc for more details).
+ namespace eval vars {}
+ #array set testConstraints {}
+ }
+
}
#####################################################################
@@ -925,21 +958,24 @@
# none
proc tcltest::testConstraint {constraint {value ""}} {
- variable testConstraints
variable Option
+
DebugPuts 3 "entering testConstraint $constraint $value"
+
if {[llength [info level 0]] == 2} {
- return $testConstraints($constraint)
+ return [constraints::cset $constraint]
+ # return $testConstraints($constraint)
}
# Check for boolean values
if {[catch {expr {$value && $value}} msg]} {
return -code error $msg
}
- if {[limitConstraints]
- && [lsearch -exact $Option(-constraints) $constraint] == -1} {
+ if {[limitConstraints] && \
+ [lsearch -exact $Option(-constraints) $constraint] == -1} {
set value 0
}
- set testConstraints($constraint) $value
+ constraints::cset $constraint $value
+ # set testConstraints($constraint) $value
}
# tcltest::interpreter --
@@ -969,34 +1005,6 @@
#####################################################################
-# tcltest::AddToSkippedBecause --
-#
-# Increments the variable used to track how many tests were
-# skipped because of a particular constraint.
-#
-# Arguments:
-# constraint The name of the constraint to be modified
-#
-# Results:
-# Modifies tcltest::skippedBecause; sets the variable to 1 if
-# didn't previously exist - otherwise, it just increments it.
-#
-# Side effects:
-# None.
-
-proc tcltest::AddToSkippedBecause { constraint {value 1}} {
- # add the constraint to the list of constraints that kept tests
- # from running
- variable skippedBecause
-
- if {[info exists skippedBecause($constraint)]} {
- incr skippedBecause($constraint) $value
- } else {
- set skippedBecause($constraint) $value
- }
- return
-}
-
# tcltest::PrintError --
#
# Prints errors to tcltest::errorChannel and then flushes that
@@ -1058,274 +1066,7 @@
return
}
-# tcltest::SafeFetch --
-#
-# The following trace procedure makes it so that we can safely
-# refer to non-existent members of the testConstraints array
-# without causing an error. Instead, reading a non-existent
-# member will return 0. This is necessary because tests are
-# allowed to use constraint "X" without ensuring that
-# testConstraints("X") is defined.
-#
-# Arguments:
-# n1 - name of the array (testConstraints)
-# n2 - array key value (constraint name)
-# op - operation performed on testConstraints (generally r)
-#
-# Results:
-# none
-#
-# Side effects:
-# sets testConstraints($n2) to 0 if it's referenced but never
-# before used
-
-proc tcltest::SafeFetch {n1 n2 op} {
- variable testConstraints
- DebugPuts 3 "entering SafeFetch $n1 $n2 $op"
- if {[string equal {} $n2]} {return}
- if {![info exists testConstraints($n2)]} {
- if {[catch {testConstraint $n2 [eval [ConstraintInitializer $n2]]}]} {
- testConstraint $n2 0
- }
- }
-}
-
-# tcltest::ConstraintInitializer --
-#
-# Get or set a script that when evaluated in the tcltest namespace
-# will return a boolean value with which to initialize the
-# associated constraint.
-#
-# Arguments:
-# constraint - name of the constraint initialized by the script
-# script - the initializer script
-#
-# Results
-# boolean value of the constraint - enabled or disabled
-#
-# Side effects:
-# Constraint is initialized for future reference by [test]
-proc tcltest::ConstraintInitializer {constraint {script ""}} {
- variable ConstraintInitializer
- DebugPuts 3 "entering ConstraintInitializer $constraint $script"
- if {[llength [info level 0]] == 2} {
- return $ConstraintInitializer($constraint)
- }
- # Check for boolean values
- if {![info complete $script]} {
- return -code error "ConstraintInitializer must be complete script"
- }
- set ConstraintInitializer($constraint) $script
-}
-
-# tcltest::InitConstraints --
-#
-# Call all registered constraint initializers to force initialization
-# of all known constraints.
-# See the tcltest man page for the list of built-in constraints defined
-# in this procedure.
-#
-# Arguments:
-# none
-#
-# Results:
-# The testConstraints array is reset to have an index for each
-# built-in test constraint.
-#
-# Side Effects:
-# None.
-#
-
-proc tcltest::InitConstraints {} {
- variable ConstraintInitializer
- initConstraintsHook
- foreach constraint [array names ConstraintInitializer] {
- testConstraint $constraint
- }
-}
-
-proc tcltest::DefineConstraintInitializers {} {
- ConstraintInitializer singleTestInterp {singleProcess}
-
- # All the 'pc' constraints are here for backward compatibility and
- # are not documented. They have been replaced with equivalent 'win'
- # constraints.
-
- ConstraintInitializer unixOnly \
- {string equal $::tcl_platform(platform) unix}
- ConstraintInitializer macOnly \
- {string equal $::tcl_platform(platform) macintosh}
- ConstraintInitializer pcOnly \
- {string equal $::tcl_platform(platform) windows}
- ConstraintInitializer winOnly \
- {string equal $::tcl_platform(platform) windows}
-
- ConstraintInitializer unix {testConstraint unixOnly}
- ConstraintInitializer mac {testConstraint macOnly}
- ConstraintInitializer pc {testConstraint pcOnly}
- ConstraintInitializer win {testConstraint winOnly}
-
- ConstraintInitializer unixOrPc \
- {expr {[testConstraint unix] || [testConstraint pc]}}
- ConstraintInitializer macOrPc \
- {expr {[testConstraint mac] || [testConstraint pc]}}
- ConstraintInitializer unixOrWin \
- {expr {[testConstraint unix] || [testConstraint win]}}
- ConstraintInitializer macOrWin \
- {expr {[testConstraint mac] || [testConstraint win]}}
- ConstraintInitializer macOrUnix \
- {expr {[testConstraint mac] || [testConstraint unix]}}
-
- ConstraintInitializer nt {string equal $::tcl_platform(os) "Windows NT"}
- ConstraintInitializer 95 {string equal $::tcl_platform(os) "Windows 95"}
- ConstraintInitializer 98 {string equal $::tcl_platform(os) "Windows 98"}
-
- # The following Constraints switches are used to mark tests that
- # should work, but have been temporarily disabled on certain
- # platforms because they don't and we haven't gotten around to
- # fixing the underlying problem.
-
- ConstraintInitializer tempNotPc {expr {![testConstraint pc]}}
- ConstraintInitializer tempNotWin {expr {![testConstraint win]}}
- ConstraintInitializer tempNotMac {expr {![testConstraint mac]}}
- ConstraintInitializer tempNotUnix {expr {![testConstraint unix]}}
-
- # The following Constraints switches are used to mark tests that
- # crash on certain platforms, so that they can be reactivated again
- # when the underlying problem is fixed.
-
- ConstraintInitializer pcCrash {expr {![testConstraint pc]}}
- ConstraintInitializer winCrash {expr {![testConstraint win]}}
- ConstraintInitializer macCrash {expr {![testConstraint mac]}}
- ConstraintInitializer unixCrash {expr {![testConstraint unix]}}
-
- # Skip empty tests
- ConstraintInitializer emptyTest {format 0}
-
- # By default, tests that expose known bugs are skipped.
-
- ConstraintInitializer knownBug {format 0}
-
- # By default, non-portable tests are skipped.
-
- ConstraintInitializer nonPortable {format 0}
-
- # Some tests require user interaction.
-
- ConstraintInitializer userInteraction {format 0}
-
- # Some tests must be skipped if the interpreter is not in
- # interactive mode
-
- ConstraintInitializer interactive \
- {expr {[info exists ::tcl_interactive] && $::tcl_interactive}}
-
- # Some tests can only be run if the installation came from a CD
- # image instead of a web image. Some tests must be skipped if you
- # are running as root on Unix. Other tests can only be run if you
- # are running as root on Unix.
-
- ConstraintInitializer root {expr \
- {[string equal unix $::tcl_platform(platform)]
- && ([string equal root $::tcl_platform(user)]
- || [string equal "" $::tcl_platform(user)])}}
- ConstraintInitializer notRoot {expr {![testConstraint root]}}
-
- # Set nonBlockFiles constraint: 1 means this platform supports
- # setting files into nonblocking mode.
-
- ConstraintInitializer nonBlockFiles {
- set code [expr {[catch {set f [open defs r]}]
- || [catch {fconfigure $f -blocking off}]}]
- catch {close $f}
- set code
- }
-
- # Set asyncPipeClose constraint: 1 means this platform supports
- # async flush and async close on a pipe.
- #
- # Test for SCO Unix - cannot run async flushing tests because a
- # potential problem with select is apparently interfering.
- # (Mark Diekhans).
-
- ConstraintInitializer asyncPipeClose {expr {
- !([string equal unix $::tcl_platform(platform)]
- && ([catch {exec uname -X | fgrep {Release = 3.2v}}] == 0))}}
-
- # Test to see if we have a broken version of sprintf with respect
- # to the "e" format of floating-point numbers.
-
- ConstraintInitializer eformat {string equal [format %g 5e-5] 5e-05}
-
- # Test to see if execed commands such as cat, echo, rm and so forth
- # are present on this machine.
-
- ConstraintInitializer unixExecs {
- set code 1
- if {[string equal macintosh $::tcl_platform(platform)]} {
- set code 0
- }
- if {[string equal windows $::tcl_platform(platform)]} {
- if {[catch {
- set file _tcl_test_remove_me.txt
- makeFile {hello} $file
- }]} {
- set code 0
- } elseif {
- [catch {exec cat $file}] ||
- [catch {exec echo hello}] ||
- [catch {exec sh -c echo hello}] ||
- [catch {exec wc $file}] ||
- [catch {exec sleep 1}] ||
- [catch {exec echo abc > $file}] ||
- [catch {exec chmod 644 $file}] ||
- [catch {exec rm $file}] ||
- [llength [auto_execok mkdir]] == 0 ||
- [llength [auto_execok fgrep]] == 0 ||
- [llength [auto_execok grep]] == 0 ||
- [llength [auto_execok ps]] == 0
- } {
- set code 0
- }
- removeFile $file
- }
- set code
- }
-
- ConstraintInitializer stdio {
- set code 0
- if {![catch {set f [open "|[list [interpreter]]" w]}]} {
- if {![catch {puts $f exit}]} {
- if {![catch {close $f}]} {
- set code 1
- }
- }
- }
- set code
- }
-
- # Deliberately call socket with the wrong number of arguments. The
- # error message you get will indicate whether sockets are available
- # on this system.
-
- ConstraintInitializer socket {
- catch {socket} msg
- string compare $msg "sockets are not available on this system"
- }
-
- # Check for internationalization
- ConstraintInitializer hasIsoLocale {
- if {[llength [info commands testlocale]] == 0} {
- set code 0
- } else {
- set code [string length [SetIso8859_1_Locale]]
- RestoreLocale
- }
- set code
- }
-
-}
#####################################################################
# Usage and command line arguments processing.
@@ -1483,7 +1224,6 @@
proc tcltest::ProcessCmdLineArgs {} {
variable originalEnv
- variable testConstraints
# The "argv" var doesn't exist in some cases, so use {}.
if {![info exists ::argv]} {
@@ -1511,7 +1251,7 @@
DebugPuts 2 "Original environment (tcltest::originalEnv):"
DebugPArray 2 originalEnv
DebugPuts 2 "Constraints:"
- DebugPArray 2 testConstraints
+# DebugPArray 2 testConstraints
}
#####################################################################
@@ -1836,6 +1576,8 @@
global tcl_platform
variable testLevel
variable coreModTime
+ variable Option
+
DebugPuts 3 "test $name $args"
DebugDo 1 {
variable TestNames
@@ -1933,8 +1675,17 @@
}
}
+ # Save the body in either pass or fail cases.
+ testresults::newresult $name ;# This must be called before the first 'result'.
+ testresults::result testBody $name $body
+ testresults::result testDescription $name $description
+ testresults::result testMatch $name $match
+
if {[Skipped $name $constraints]} {
incr testLevel -1
+ uplevel [list [testcallback] $name]
+# if { ! $Option(-dump) } {
+# }
return
}
@@ -1952,12 +1703,6 @@
# Only run the test body if the setup was successful
if {!$setupFailure} {
- # Verbose notification of $body start
- if {[IsVerbose start]} {
- puts [outputChannel] "---- $name start"
- flush [outputChannel]
- }
-
set command [list [namespace origin RunTest] $name $body]
if {[info exists output] || [info exists errorOutput]} {
set testResult [uplevel 1 [list [namespace origin Eval] $command 0]]
@@ -1988,22 +1733,14 @@
} else {
set coreFailure 1
}
-
+
if {([preserveCore] > 1) && ($coreFailure)} {
- append coreMsg "\nMoving file to:\
- [file join [temporaryDirectory] core-$name]"
- catch {file rename -force \
- [file join [workingDirectory] core] \
- [file join [temporaryDirectory] core-$name]
- } msg
- if {[string length $msg] > 0} {
- append coreMsg "\nError:\
- Problem renaming core file: $msg"
- }
+ append coreMsg [CoreMsg $name]
}
}
}
+
# check if the return code matched the expected return code
set codeFailure 0
if {!$setupFailure && [lsearch -exact $returnCodes $returnCode] == -1} {
@@ -2048,51 +1785,45 @@
set scriptFailure 1
}
+
# if we didn't experience any failures, then we passed
variable numTests
if {!($setupFailure || $cleanupFailure || $coreFailure
|| $outputFailure || $errorFailure || $codeFailure
|| $scriptFailure)} {
if {$testLevel == 1} {
- incr numTests(Passed)
- if {[IsVerbose pass]} {
- puts [outputChannel] "++++ $name PASSED"
- }
+ testresults::incrtotal Passed
}
+ testresults::result testPassFail $name pass
incr testLevel -1
+ uplevel [list [testcallback] $name]
+# if { ! $Option(-dump) } {
+# }
return
}
-
+ testresults::result testPassFail $name fail
# We know the test failed, tally it...
if {$testLevel == 1} {
- incr numTests(Failed)
+ testresults::incrtotal Failed
}
# ... then report according to the type of failure
variable currentFailure true
- if {![IsVerbose body]} {
- set body ""
- }
- puts [outputChannel] "\n==== $name\
- [string trim $description] FAILED"
- if {[string length $body]} {
- puts [outputChannel] "==== Contents of test case:"
- puts [outputChannel] $body
- }
+
if {$setupFailure} {
- puts [outputChannel] "---- Test setup\
- failed:\n$setupMsg"
+ testresults::result testSetup $name $setupMsg
}
if {$scriptFailure} {
+ testresults::result testScriptFailure $name 1
if {$scriptCompare} {
- puts [outputChannel] "---- Error testing result: $scriptMatch"
+ testresults::result testScriptMatch $name $scriptMatch
} else {
- puts [outputChannel] "---- Result was:\n$actualAnswer"
- puts [outputChannel] "---- Result should have been\
- ($match matching):\n$result"
+ testresults::result testActualAnswer $name $actualAnswer
+ testresults::result testResult $name $result
}
}
if {$codeFailure} {
+ testresults::result testCodeFailure $name 1
switch -- $returnCode {
0 { set msg "Test completed normally" }
1 { set msg "Test generated error" }
@@ -2101,47 +1832,158 @@
4 { set msg "Test generated continue exception" }
default { set msg "Test generated exception" }
}
- puts [outputChannel] "---- $msg; Return code was: $returnCode"
- puts [outputChannel] "---- Return code should have been\
- one of: $returnCodes"
- if {[IsVerbose error]} {
- if {[info exists ::errorInfo]} {
- puts [outputChannel] "---- errorInfo: $::errorInfo"
- puts [outputChannel] "---- errorCode: $::errorCode"
- }
+ testresults::result testMsg $name $msg
+ testresults::result testReturnCode $name $returnCode
+ testresults::result testExpectedReturnCodes $name $returnCodes
+ if {[info exists ::errorInfo]} {
+ testresults::result testErrorInfo $name $::errorInfo
+ testresults::result testErrorCode $name $::errorCode
}
}
if {$outputFailure} {
+ testresults::result testOutputFailure $name 1
if {$outputCompare} {
- puts [outputChannel] "---- Error testing output: $outputMatch"
+ testresults::result testOutputMatch $name $outputMatch
} else {
- puts [outputChannel] "---- Output was:\n$outData"
- puts [outputChannel] "---- Output should have been\
- ($match matching):\n$output"
+ testresults::result testOutputData $name $outData
+ testresults::result testOutput $name $output
}
}
if {$errorFailure} {
+ testresults::result testErrorFailure $name 1
if {$errorCompare} {
- puts [outputChannel] "---- Error testing errorOutput: $errorMatch"
+ testresults::result testErrorMatch $name $errorMatch
} else {
- puts [outputChannel] "---- Error output was:\n$errData"
- puts [outputChannel] "---- Error output should have\
- been ($match matching):\n$errorOutput"
+ testresults::result testErrorData $errData
+ testresults::result testErrorOutput $errOutput
}
}
if {$cleanupFailure} {
- puts [outputChannel] "---- Test cleanup failed:\n$cleanupMsg"
+ testresults::result testCleanupMsg $name $cleanupMsg
}
+
if {$coreFailure} {
- puts [outputChannel] "---- Core file produced while running\
- test! $coreMsg"
+ testresults::result testCore $name $coreMsg
}
- puts [outputChannel] "==== $name FAILED\n"
incr testLevel -1
+ uplevel [list [testcallback] $name]
+
return
}
+proc tcltest::printcallback {name} {
+
+ if { [testresults::results exists $name testSkipped] } {
+ if { [IsVerbose skip] } {
+ puts [outputChannel] \
+ "++++ $name SKIPPED: [testresults::results get $name testSkipped]"
+ }
+ testresults::results clear $name
+ return
+ }
+
+ # Don't run for some other reason - it doesn't match or something
+ # like that.
+ if { [testresults::results exists $name testDontRun] } {
+ testresults::results clear $name
+ return
+ }
+
+ # Verbose notification of $body start
+ if {[IsVerbose start]} {
+ puts [outputChannel] "---- $name start"
+ flush [outputChannel]
+ }
+
+# puts "DOING $name"
+# foreach v $::tcltest::testresults::testAPIvars {
+
+# if { [ array exists ::tcltest::testresults::$v ] } {
+# puts "VAR ::tcltest::testresults::$v"
+# parray ::tcltest::testresults::$v
+# }
+# }
+
+# puts -nonewline "test $name :"
+# parray ::tcltest::testresults::testPassFail
+ if { [testresults::results get $name testPassFail] == "pass" } {
+ if {[IsVerbose pass]} {
+ puts [outputChannel] "++++ $name PASSED"
+ }
+ testresults::results clear $name
+ return
+ }
+
+ if {![IsVerbose body]} {
+ set body ""
+ } else {
+ set body [testresults::results get $name testBody]
+ }
+ set description [testresults::results get $name testDescription]
+ puts [outputChannel] "\n==== $name\
+ [string trim $description] FAILED"
+ if {[string length $body]} {
+ puts [outputChannel] "==== Contents of test case:"
+ puts [outputChannel] $body
+ }
+ if { [testresults::results exists $name testSetup] } {
+ puts [outputChannel] "---- Test setup\
+ failed:\n[testresults::results get $name testSetup]"
+ }
+ if { [testresults::results exists $name testScriptFailure] } {
+ if { [testresults::results exists $name testScriptMatch] } {
+ puts [outputChannel] "---- Error testing result: [testresults::results get $name testScriptMatch]"
+ } else {
+ puts [outputChannel] "---- Result was:\n[testresults::results get $name testActualAnswer]"
+ puts [outputChannel] "---- Result should have been\
+ ([testresults::results get $name testMatch] matching):\n[testresults::results get $name testResult]"
+ }
+ }
+ if { [testresults::results exists $name testCodeFailure] } {
+ puts [outputChannel] "---- [testresults::results get $name testMsg]; Return code was: [testresults::results get $name testReturnCode]"
+ puts [outputChannel] "---- Return code should have been\
+ one of: [testresults::results get $name testExpectedReturnCodes]"
+ if {[IsVerbose error]} {
+ if { [testresults::results exists $name testErrorInfo] } {
+ puts [outputChannel] "---- errorInfo: [testresults::results get $name testErrorInfo]"
+ puts [outputChannel] "---- errorCode: [testresults::results get $name testErrorCode]"
+ }
+ }
+ }
+
+ if { [testresults::results exists $name testOutputFailure] } {
+ if { [testresults::results exists $name testOutputMatch] } {
+ puts [outputChannel] "---- Error testing output: [testresults::results get $name testOutputMatch]"
+ } else {
+ puts [outputChannel] "---- Output was:\n[testresults::results get $name testOutputData]"
+ puts [outputChannel] "---- Output should have been\
+ ([testresults::results get $name testMatch] matching):\n$[testresults::results get $name testOutput]"
+ }
+ }
+
+ if { [testresults::results exists $name testErrorFailure] } {
+ if { [testresults::results exists $name testErrorMatch] } {
+ puts [outputChannel] "---- Error testing errorOutput: [testresults::results exists $name testErrorMatch]"
+ } else {
+ puts [outputChannel] "---- Error output was:\n[testresults::results exists $name testErrorData]"
+ puts [outputChannel] "---- Error output should have\
+ been ([testresults::results exists $name testMatch] matching):\n[testresults::results exists $name testErrorOutput]"
+ }
+ }
+
+ if { [testresults::results exists $name testCleanupMsg] } {
+ puts [outputChannel] "---- Test cleanup failed:\n[testresults::results get $name testCleanupMsg]"
+ }
+ if { [testresults::results exists $name testCore] } {
+ puts [outputChannel] "---- Core file produced while running\
+ test![testresults::results get $name testCore]"
+ }
+
+ puts [outputChannel] "==== $name FAILED\n"
+ testresults::results clear $name
+}
+
# Skipped --
#
# Given a test name and it constraints, returns a boolean indicating
@@ -2152,18 +1994,18 @@
proc tcltest::Skipped {name constraints} {
variable testLevel
variable numTests
- variable testConstraints
if {$testLevel == 1} {
- incr numTests(Total)
+ testresults::incrtotal Total
}
# skip the test if it's name matches an element of skip
foreach pattern [skip] {
if {[string match $pattern $name]} {
if {$testLevel == 1} {
- incr numTests(Skipped)
- DebugDo 1 {AddToSkippedBecause userSpecifiedSkip}
+ testresults::incrtotal Skipped
+ DebugDo 1 {constraints::incrskippedbecause userSpecifiedSkip}
}
+ testresults::result testDontRun $name 1
return 1
}
}
@@ -2177,61 +2019,37 @@
}
if {!$ok} {
if {$testLevel == 1} {
- incr numTests(Skipped)
- DebugDo 1 {AddToSkippedBecause userSpecifiedNonMatch}
+ testresults::incrtotal Skipped
+ DebugDo 1 {constraints::incrskippedbecause userSpecifiedNonMatch}
}
+ testresults::result testDontRun $name 1
return 1
}
if {[string equal {} $constraints]} {
# If we're limited to the listed constraints and there aren't
# any listed, then we shouldn't run the test.
if {[limitConstraints]} {
- AddToSkippedBecause userSpecifiedLimitConstraint
+ constraints::incrskippedbecause userSpecifiedLimitConstraint
if {$testLevel == 1} {
- incr numTests(Skipped)
+ testresults::incrtotal Skipped
}
+ testresults::result testDontRun $name 1
return 1
}
} else {
# "constraints" argument exists;
# make sure that the constraints are satisfied.
- set doTest 0
- if {[string match {*[$\[]*} $constraints] != 0} {
- # full expression, e.g. {$foo > [info tclversion]}
- catch {set doTest [uplevel #0 expr $constraints]}
- } elseif {[regexp {[^.a-zA-Z0-9 \n\r\t]+} $constraints] != 0} {
- # something like {a || b} should be turned into
- # $testConstraints(a) || $testConstraints(b).
- regsub -all {[.\w]+} $constraints {$testConstraints(&)} c
- catch {set doTest [eval expr $c]}
- } elseif {![catch {llength $constraints}]} {
- # just simple constraints such as {unixOnly fonts}.
- set doTest 1
- foreach constraint $constraints {
- if {(![info exists testConstraints($constraint)]) \
- || (!$testConstraints($constraint))} {
- set doTest 0
-
- # store the constraint that kept the test from
- # running
- set constraints $constraint
- break
- }
- }
- }
-
- if {$doTest == 0} {
- if {[IsVerbose skip]} {
- puts [outputChannel] "++++ $name SKIPPED: $constraints"
- }
+ set retval [constraints::checktest $name constraints]
+ if {$retval == 1} {
+ tcltest::testresults::result testSkipped $name $constraints
if {$testLevel == 1} {
- incr numTests(Skipped)
- AddToSkippedBecause $constraints
+ tcltest::testresults::incrtotal Skipped
+ constraints::incrskippedbecause $constraints
}
- return 1
}
+ return $retval
}
return 0
}
@@ -2299,16 +2117,16 @@
proc tcltest::cleanupTests {{calledFromAllFile 0}} {
variable filesMade
variable filesExisted
- variable createdNewFiles
variable testSingleFile
variable numTests
- variable numTestFiles
+ #variable numTestFiles
variable failFiles
variable skippedBecause
variable currentFailure
variable originalEnv
variable originalTclPlatform
variable coreModTime
+ variable Option
FillFilesExisted
set testFileName [file tail [info script]]
@@ -2341,60 +2159,15 @@
}
set filesExisted $currentFiles
if {[llength $newFiles] > 0} {
- set createdNewFiles($testFileName) $newFiles
+ files::setcreated $testFileName $newFiles
}
}
if {$calledFromAllFile || $testSingleFile} {
-
- # print stats
-
- puts -nonewline [outputChannel] "$testFileName:"
- foreach index [list "Total" "Passed" "Skipped" "Failed"] {
- puts -nonewline [outputChannel] \
- "\t$index\t$numTests($index)"
- }
- puts [outputChannel] ""
-
- # print number test files sourced
- # print names of files that ran tests which failed
-
- if {$calledFromAllFile} {
- puts [outputChannel] \
- "Sourced $numTestFiles Test Files."
- set numTestFiles 0
- if {[llength $failFiles] > 0} {
- puts [outputChannel] \
- "Files with failing tests: $failFiles"
- set failFiles {}
- }
- }
-
- # if any tests were skipped, print the constraints that kept
- # them from running.
-
- set constraintList [array names skippedBecause]
- if {[llength $constraintList] > 0} {
- puts [outputChannel] \
- "Number of tests skipped for each constraint:"
- foreach constraint [lsort $constraintList] {
- puts [outputChannel] \
- "\t$skippedBecause($constraint)\t$constraint"
- unset skippedBecause($constraint)
- }
- }
-
- # report the names of test files in createdNewFiles, and reset
- # the array to be empty.
-
- set testFilesThatTurded [lsort [array names createdNewFiles]]
- if {[llength $testFilesThatTurded] > 0} {
- puts [outputChannel] "Warning: files left behind:"
- foreach testFile $testFilesThatTurded {
- puts [outputChannel] \
- "\t$testFile:\t$createdNewFiles($testFile)"
- unset createdNewFiles($testFile)
- }
+ if { $Option(-dump) } {
+ tcltest::DumpStats
+ } else {
+ uplevel [printstatscallback] $calledFromAllFile
}
# reset filesMade, filesExisted, and numTests
@@ -2409,100 +2182,24 @@
# loop is running, which is the real issue.
# Actually, this doesn't belong here at all. A package
# really has no business [exit]-ing an application.
- if {![catch {package present Tk}] && ![testConstraint interactive]} {
- exit
- }
+ #if {![catch {package present Tk}] && ![testConstraint interactive]} {
+ # exit
+ #}
} else {
# if we're deferring stat-reporting until all files are sourced,
# then add current file to failFile list if any tests in this
# file failed
- incr numTestFiles
- if {$currentFailure \
- && ([lsearch -exact $failFiles $testFileName] == -1)} {
- lappend failFiles $testFileName
+ if { $currentFailure } {
+ files::failed $testFileName
}
set currentFailure false
- # restore the environment to the state it was in before this package
- # was loaded
-
- set newEnv {}
- set changedEnv {}
- set removedEnv {}
- foreach index [array names ::env] {
- if {![info exists originalEnv($index)]} {
- lappend newEnv $index
- unset ::env($index)
- } else {
- if {$::env($index) != $originalEnv($index)} {
- lappend changedEnv $index
- set ::env($index) $originalEnv($index)
- }
- }
- }
- foreach index [array names originalEnv] {
- if {![info exists ::env($index)]} {
- lappend removedEnv $index
- set ::env($index) $originalEnv($index)
- }
- }
- if {[llength $newEnv] > 0} {
- puts [outputChannel] \
- "env array elements created:\t$newEnv"
- }
- if {[llength $changedEnv] > 0} {
- puts [outputChannel] \
- "env array elements changed:\t$changedEnv"
- }
- if {[llength $removedEnv] > 0} {
- puts [outputChannel] \
- "env array elements removed:\t$removedEnv"
- }
-
- set changedTclPlatform {}
- foreach index [array names originalTclPlatform] {
- if {$::tcl_platform($index) \
- != $originalTclPlatform($index)} {
- lappend changedTclPlatform $index
- set ::tcl_platform($index) $originalTclPlatform($index)
- }
- }
- if {[llength $changedTclPlatform] > 0} {
- puts [outputChannel] "tcl_platform array elements\
- changed:\t$changedTclPlatform"
- }
-
- if {[file exists [file join [workingDirectory] core]]} {
- if {[preserveCore] > 1} {
- puts "rename core file (> 1)"
- puts [outputChannel] "produced core file! \
- Moving file to: \
- [file join [temporaryDirectory] core-$testFileName]"
- catch {file rename -force \
- [file join [workingDirectory] core] \
- [file join [temporaryDirectory] core-$testFileName]
- } msg
- if {[string length $msg] > 0} {
- PrintError "Problem renaming file: $msg"
- }
- } else {
- # Print a message if there is a core file and (1) there
- # previously wasn't one or (2) the new one is different
- # from the old one.
-
- if {[info exists coreModTime]} {
- if {$coreModTime != [file mtime \
- [file join [workingDirectory] core]]} {
- puts [outputChannel] "A core file was created!"
- }
- } else {
- puts [outputChannel] "A core file was created!"
- }
- }
- }
+ files::add $testFileName
+ uplevel 1 [restorecallback]
}
+ # FIXME These don't really belong here any more.
flush [outputChannel]
flush [errorChannel]
return
@@ -2645,7 +2342,7 @@
proc tcltest::runAllTests { {shell ""} } {
variable testSingleFile
- variable numTestFiles
+ #variable numTestFiles
variable numTests
variable failFiles
@@ -2703,7 +2400,8 @@
flush [outputChannel]
if {[singleProcess]} {
- incr numTestFiles
+ files::add $file
+ # incr numTestFiles
uplevel 1 [list ::source $file]
} else {
# Pass along our configuration to the child processes.
@@ -2711,47 +2409,33 @@
# needs to read and process output of children.
set childargv [list]
foreach opt [Configure] {
- if {[string equal $opt -outfile]} {continue}
- lappend childargv $opt [Configure $opt]
+ if { ! [string equal $opt -outfile] && ! [string equal $opt -dump] } {
+ lappend childargv $opt [Configure $opt]
+ }
}
+ lappend childargv -dump 1
set cmd [linsert $childargv 0 | $shell $file]
+
if {[catch {
- incr numTestFiles
+ files::add $file
set pipeFd [open $cmd "r"]
+ # ----------------
while {[gets $pipeFd line] >= 0} {
- if {[regexp [join {
- {^([^:]+):\t}
- {Total\t([0-9]+)\t}
- {Passed\t([0-9]+)\t}
- {Skipped\t([0-9]+)\t}
- {Failed\t([0-9]+)}
- } ""] $line null testFile \
- Total Passed Skipped Failed]} {
- foreach index {Total Passed Skipped Failed} {
- incr numTests($index) [set $index]
- }
- if {$Failed > 0} {
- lappend failFiles $testFile
- }
- } elseif {[regexp [join {
- {^Number of tests skipped }
- {for each constraint:}
- {|^\t(\d+)\t(.+)$}
- } ""] $line match skipped constraint]} {
- if {[string match \t* $match]} {
- AddToSkippedBecause $constraint $skipped
- }
- } else {
- puts [outputChannel] $line
- }
+ puts [outputChannel] $line
}
close $pipeFd
+
} msg]} {
puts [outputChannel] "Test file error: $msg"
# append the name of the test to a list to be reported
# later
lappend testFileFailures $file
}
+ # FIXME - we probably ought to do something more elaborate
+ # with this.
+ catch {
+ source [file join [ResultsFile]]
+ }
}
}
@@ -2770,9 +2454,9 @@
set dir [file tail $directory]
puts [outputChannel] [string repeat ~ 44]
puts [outputChannel] "$dir test began at [eval $timeCmd]\n"
-
+
uplevel 1 [list ::source [file join $directory all.tcl]]
-
+
set endTime [eval $timeCmd]
puts [outputChannel] "\n$dir test ended at $endTime"
puts [outputChannel] ""
@@ -3253,12 +2937,7 @@
# Initialize the constraints and set up command line arguments
namespace eval tcltest {
# Define initializers for all the built-in contraint definitions
- DefineConstraintInitializers
-
- # Set up the constraints in the testConstraints array to be lazily
- # initialized by a registered initializer, or by "false" if no
- # initializer is registered.
- trace variable testConstraints r [namespace code SafeFetch]
+ constraints::DefineConstraintInitializers
# Only initialize constraints at package load time if an
# [initConstraintsHook] has been pre-defined. This is only
@@ -3267,7 +2946,7 @@
# straight away, without all this "hook" nonsense.
if {[string equal [namespace current] \
[namespace qualifiers [namespace which initConstraintsHook]]]} {
- InitConstraints
+ #InitConstraints
} else {
proc initConstraintsHook {} {}
}