###############################################################################
#
# test.eagle --
#
# Extensible Adaptable Generalized Logic Engine (Eagle)
# Eagle Test Package File
#
# Copyright (c) 2007-2012 by Joe Mistachkin. All rights reserved.
#
# See the file "license.terms" for information on usage and redistribution of
# this file, and for a DISCLAIMER OF ALL WARRANTIES.
#
# RCS: @(#) $Id: $
#
###############################################################################
#
# NOTE: Use our own namespace here because even though we do not directly
# support namespaces ourselves, we do not want to pollute the global
# namespace if this script actually ends up being evaluated in Tcl.
#
namespace eval ::Eagle {
proc dumpState {} {
set result [list]
foreach varName [lsort [uplevel 1 [list info vars]]] {
if {[uplevel 1 [list array exists $varName]]} then {
lappend result $varName [list \
array [uplevel 1 [list array get $varName]]]
} else {
lappend result $varName [list \
scalar [uplevel 1 [list set $varName]]]
}
}
return $result
}
proc trawputs { channel string } {
#
# NOTE: If an output channel was provided, use it; otherwise, ignore
# the message.
#
if {[string length $channel] > 0} then {
#
# NOTE: Check if output is being actively intercepted by us.
#
if {![isEagle] && \
[llength [info commands ::tcl::save::puts]] > 0} then {
::tcl::save::puts -nonewline $channel $string
} else {
puts -nonewline $channel $string
}
}
}
proc tputs { channel string } {
trawputs $channel $string; tlog $string
}
#
# NOTE: This is a shim designed to act like tclLog.
#
proc ttclLog { string } {
tputs $::test_channel [appendArgs $string \n]
}
proc doesTestLogFileExist { fileName } {
if {[catch {
expr {[file exists $fileName] && [file size $fileName] > 0}
} result] == 0 && $result} then {
return true
} else {
return false
}
}
proc getTestLogStartSentry {} {
if {![info exists ::test_run_id]} then {
set ::test_run_id [getNewTestRunId]
}
return [appendArgs \
"**** START OF TEST LOG \"" $::test_run_id "\" ****\n"]
}
proc extractTestRunIdFromLogStartSentry { sentry } {
set prefix {^\*\*\*\* START OF TEST LOG "}
set suffix {" \*\*\*\*\n$}
set pattern(1) [appendArgs $prefix {([0-9A-F]{56})} $suffix]; # Tcl
if {[regexp -- $pattern(1) $sentry dummy result]} then {
return $result
}
set pattern(2) [appendArgs $prefix {([0-9A-F]{64})} $suffix]; # Eagle
if {[regexp -- $pattern(2) $sentry dummy result]} then {
return $result
}
return <none>
}
proc doesTestLogHaveStartSentry {} {
set fileName [getTestLog]
if {[string length $fileName] > 0} then {
if {[doesTestLogFileExist $fileName]} then {
set sentry [string trim [getTestLogStartSentry]]
if {[string length $sentry] > 0} then {
set data [readFile $fileName]
if {[string first $sentry $data] != -1} then {
return true
}
}
}
}
return false
}
proc didTestLogHaveStartSentry { sentry varName } {
if {[info exists ::test_log_sentry]} then {
if {$::test_log_sentry ne $sentry} then {
upvar 1 $varName error
set error [appendArgs \
"---- test log start sentry mismatch error, was \"" \
[extractTestRunIdFromLogStartSentry $::test_log_sentry] \
"\", now \"" [extractTestRunIdFromLogStartSentry $sentry] \
\"\n]
}
return true
} else {
return false
}
}
proc setTestLogStartSentry { sentry varName } {
upvar 1 $varName result
if {[info exists ::test_log_sentry]} then {
set result [appendArgs \
"---- test log start sentry reinitialized to \"" \
[extractTestRunIdFromLogStartSentry $sentry] \
"\", was \"" [extractTestRunIdFromLogStartSentry \
$::test_log_sentry] \"\n]
} else {
set result [appendArgs \
"---- test log start sentry initialized to \"" \
[extractTestRunIdFromLogStartSentry $sentry] \
\"\n]
}
set ::test_log_sentry $sentry
}
proc tlog { string } {
#
# NOTE: If a test log file was configured, use it; otherwise, ignore the
# message.
#
set fileName [getTestLog]
if {[string length $fileName] > 0} then {
#
# NOTE: Check for any queued test log data that needs to be sent to the
# log file prior to sending the current string.
#
if {[info exists ::test_log_queue]} then {
#
# NOTE: Process each queued test log entry, in order, sending them to
# the test log file (as long as they are not empty strings).
# Each entry is removed from the queue after it is sent to the
# test log file.
#
foreach entry [lsort -integer [array names ::test_log_queue]] {
set newString $::test_log_queue($entry)
if {[string length $newString] > 0} then {
if {![doesTestLogFileExist $fileName]} then {
set sentry [getTestLogStartSentry]
if {[string length $sentry] > 0} then {
#
# BUGFIX: At this point, there should not be any record of a
# previously used test log sentry. If there is, do
# not append a test log sentry again because the test
# log file may have been deleted and we need to make
# sure the test log is not considered as "complete".
#
if {[didTestLogHaveStartSentry $sentry sentryError]} then {
if {[info exists sentryError]} then {
appendSharedLogFile $fileName $sentryError
}
} else {
setTestLogStartSentry $sentry sentryResult
appendSharedLogFile $fileName $sentry
if {[info exists sentryResult]} then {
appendSharedLogFile $fileName $sentryResult
}
}
}
}
appendSharedLogFile $fileName $newString
}
unset ::test_log_queue($entry)
}
#
# NOTE: If all entries in the test log queue were just processed,
# unset the entire array now.
#
if {[array size test_log_queue] == 0} then {
unset ::test_log_queue
}
}
#
# NOTE: If an empty string is supplied by the caller, do nothing.
#
if {[string length $string] > 0} then {
if {![doesTestLogFileExist $fileName]} then {
set sentry [getTestLogStartSentry]
if {[string length $sentry] > 0} then {
#
# BUGFIX: At this point, there should not be any record of a
# previously used test log sentry. If there is, do
# not append a test log sentry again because the test
# log file may have been deleted and we need to make
# sure the test log is not considered as "complete".
#
if {[didTestLogHaveStartSentry $sentry sentryError]} then {
if {[info exists sentryError]} then {
appendSharedLogFile $fileName $sentryError
}
} else {
setTestLogStartSentry $sentry sentryResult
appendSharedLogFile $fileName $sentry
if {[info exists sentryResult]} then {
appendSharedLogFile $fileName $sentryResult
}
}
}
}
appendSharedLogFile $fileName $string
}
}
}
proc getSoftwareRegistryKey { wow64 } {
if {$wow64 && [info exists ::tcl_platform(machine)] && [lsearch -exact \
[list ia64 amd64 arm64] $::tcl_platform(machine)] != -1} then {
#
# NOTE: Return the WoW64 registry key name because we are running on a
# 64-bit operating system and the caller specifically requested
# the WoW64 registry key name.
#
return Software\\Wow6432Node
} else {
#
# NOTE: Return the native registry key name because we are either not
# running on a 64-bit operating system or the caller wants the
# native registry key name (i.e. not the WoW64 registry key name).
#
return Software
}
}
proc haveConstraint { name } {
if {[isEagle]} then {
return [expr {
[info exists ::eagle_tests(Constraints)] && \
[lsearch -exact $::eagle_tests(Constraints) $name] != -1}]
} else {
return [expr {
[info exists ::tcltest::testConstraints($name)] && \
$::tcltest::testConstraints($name)}]
}
}
proc addConstraint { name {value 1} } {
if {[isEagle]} then {
if {[info exists ::eagle_tests(Constraints)] && \
[lsearch -exact $::eagle_tests(Constraints) $name] == -1 && \
$value} then {
lappend ::eagle_tests(Constraints) $name
}
} else {
::tcltest::testConstraint $name $value
}
return ""
}
proc haveOrAddConstraint { name {value ""} } {
if {[isEagle]} then {
if {[llength [info level 0]] == 2} then {
return [haveConstraint $name]
}
return [addConstraint $name [expr {bool($value)}]]
} else {
return [::tcltest::testConstraint $name $value]
}
}
proc getConstraints {} {
set result [list]
if {[isEagle]} then {
if {[catch {set ::eagle_tests(Constraints)} constraints] == 0} then {
eval lappend result $constraints
}
} else {
foreach name [array names ::tcltest::testConstraints] {
if {$::tcltest::testConstraints($name)} then {
lappend result $name
}
}
}
return $result
}
proc getCachedConstraints {} {
if {[info exists ::test_constraints] && \
[llength $::test_constraints] > 0} then {
return $::test_constraints
}
return [getConstraints]
}
proc useCachedConstraints {} {
foreach name [getCachedConstraints] {
addConstraint $name
}
}
proc removeConstraint { name } {
if {[isEagle]} then {
if {[info exists ::eagle_tests(Constraints)]} then {
set index [lsearch -exact $::eagle_tests(Constraints) $name]
if {$index != -1} then {
set ::eagle_tests(Constraints) [lreplace \
$::eagle_tests(Constraints) $index $index]
}
}
} else {
if {[info exists ::tcltest::testConstraints($name)]} then {
unset ::tcltest::testConstraints($name)
}
}
return ""
}
proc fixConstraints { constraints } {
set result [string trim $constraints]
if {[string length $result] > 0} then {
#
# HACK: Fixup for the semi-magical expression (via [expr]) test
# constraint syntax supported by the Tcltest package and not
# by the Eagle.Test package. This needs to happen for Tcl
# in test constraints that contain any characters that are
# not alphanumeric, not a period, and not a colon (e.g. in
# this case, the exclamation point); however, it should only
# be required when the number of test constraints is greater
# than one.
#
if {![isEagle]} then {
if {[string first ! $result] != -1} then {
#
# HACK: All of our test constraints assume they are
# "logically and-ed" together.
#
set result [join [split $result] " && "]
}
}
}
return $result
}
proc isCorePublicKeyToken { publicKeyToken } {
#
# HACK: This list of "well-known" public key tokens is hard-coded.
#
set publicKeyTokens [list \
29c6297630be05eb 1e22ec67879739a2 358030063a832bc3]
if {[isEagle]} then {
set expr {$publicKeyToken in $publicKeyTokens}
if {[expr $expr]} then {
return true
}
} else {
if {[lsearch -exact $publicKeyTokens $publicKeyToken] != -1} then {
return true
}
}
return false
}
proc fixTimingConstraints { constraints } {
#
# HACK: In Eagle, when the right test constraint is present, *any* tests
# where PASSED / FAILED results can vary non-deterministically due
# to timing issues (e.g. performance) are forbidden from causing
# the overall test run to fail.
#
if {[isEagle]} then {
if {[info exists ::no(failTimingTests)] || \
[haveConstraint officialStableReleaseInProgress]} then {
return [fixConstraints [concat $constraints [list fail.false]]]
} else {
return [fixConstraints $constraints]
}
} else {
return [fixConstraints $constraints]
}
}
proc testDebugBreak {} {
if {[isEagle]} then {
#
# NOTE: In Eagle, simply break into the interactive loop using the
# integrated script debugger.
#
debug break
} else {
#
# NOTE: In native Tcl, attempt to use the TclPro Debugger interface.
# This requires that the TclPro Debugger interface package be
# present somewhere along the auto-path.
#
package require tcldebugger_attach; debugger_init; debugger_break
}
}
proc testArrayGet { varName {integer false} } {
#
# NOTE: Returns the results of [array get] in a well-defined order.
#
if {[string length $varName] == 0} then {
return [list]
}
#
# NOTE: Refer to the array in the context of the caller.
#
upvar 1 $varName array
#
# NOTE: Build the command that will sort the array names into order.
#
set command [list lsort]
if {$integer} then {lappend command -integer}
lappend command [array names array]
set result [list]
foreach name [eval $command] {
lappend result $name $array($name)
}
return $result
}
proc testArrayGet2 { varName {pattern ""} {integer false} } {
#
# NOTE: Returns the results of [array get] in a well-defined order.
#
if {[string length $varName] == 0} then {
return [list]
}
#
# NOTE: Refer to the array in the context of the caller.
#
upvar 1 $varName array
#
# NOTE: Build the command that will sort the array names into order.
#
set command [list lsort]
if {$integer} then {lappend command -integer}
#
# NOTE: If there is a pattern, use it; otherwise, all elements are
# returned.
#
if {[string length $pattern] > 0} then {
lappend command [array names array $pattern]
} else {
lappend command [array names array]
}
set result [list]
foreach name [eval $command] {
lappend result $name $array($name)
}
return $result
}
proc testResultGet { script } {
set code [catch {uplevel 1 $script} result]
return [expr {$code == 0 ? $result : "<error>"}]
}
proc testValueGet { varName {integer false} } {
#
# NOTE: Returns the results of [array get] in a well-defined order
# -OR- the value of the scalar variable.
#
if {[string length $varName] == 0} then {
return [list]
}
#
# NOTE: Is the specified variable (in the context of the caller) an
# array?
#
if {[uplevel 1 [list array exists $varName]]} then {
#
# NOTE: Refer to the array in the context of the caller.
#
upvar 1 $varName array
#
# NOTE: Build the command that will sort the array names into order.
#
set command [list lsort]
if {$integer} then {lappend command -integer}
lappend command [array names array]
set result [list]
foreach name [eval $command] {
lappend result $name $array($name)
}
} else {
#
# NOTE: Grab the value of the scalar variable in the context of the
# caller and then return both the name and the value.
#
set varValue [uplevel 1 [list set $varName]]
set result [list $varValue]
}
return $result
}
proc getFirstLineOfError { error } {
set error [string map [list \r\n \n] $error]
set index [string first \n $error]
if {$index != -1} then {
incr index -1
if {$index > 0} then {
return [string range $error 0 $index]
}
}
return $error
}
proc calculateBogoCops { {milliseconds 2000} {legacy false} } {
#
# NOTE: Verify that the number of milliseconds requested is greater
# than zero.
#
if {$milliseconds <= 0} then {
unset -nocomplain ::test_suite_running
error "number of milliseconds must be greater than zero"
}
#
# HACK: Different techniques are used to calculate the performance of
# the machine for Tcl and Eagle.
#
if {!$legacy && [isEagle]} then {
#
# BUGBUG: Tcl 8.4 does not like this expression (and Tcl tries to
# compile it even though it will only actually ever be
# evaluated in Eagle).
#
set expr {[catch {
string first -timeout [time nop -1 ---]
} timeout] in [list 0 2] && $timeout != -1}
#
# HACK: Attempt to determine if the "-timeout" option for [time] is
# available. If so, use it.
#
if {[expr $expr]} then {
set code [catch {
#
# NOTE: This is the most robust method, i.e. use the "-timeout"
# option to the [time] command.
#
set before [info cmdcount]
catch {time {nop} -1 -timeout $milliseconds}; # internal loop.
set after [info cmdcount]
#
# HACK: Mono has a bug that results in excessive trailing zeros
# here (Mono bug #655780).
#
if {[isMono]} then {
expr {double(($after - $before) / ($milliseconds / 1000.0))}
} else {
expr {($after - $before) / ($milliseconds / 1000.0)}
}
} result]
#
# NOTE: If we failed above, return an obviously invalid result
# instead.
#
if {$code == 0} then {
return $result
} else {
return 0
}
} else {
#
# HACK: This calculation method (i.e. using [after] to cancel the
# [time] command after the specified number of milliseconds)
# is no longer necessary as of Beta 45; however, it will be
# retained for backward compatibility with previous releases
# solely for the purpose of running comparative benchmarks.
#
# NOTE: Save the current readiness limit for later restoration
# and then set the current readiness limit to always check
# the interpreter readiness (default). If this was not
# done, the [interp cancel] command in this procedure may
# have no effect, which could cause this procedure to run
# forever.
#
set readylimit [interp readylimit {}]
interp readylimit {} 0
try {
#
# NOTE: Save the current background error handler for later
# restoration and then reset the current background
# error handler to nothing.
#
set bgerror [interp bgerror {}]
interp bgerror {} ""
try {
#
# NOTE: Save the current [after] flags for later restoration
# and then reset them to process events immediately.
#
set flags [after flags]
after flags =Immediate
try {
set code [catch {
#
# NOTE: First, make sure that the [after] event queue
# for the interpreter is totally empty.
#
catch {foreach id [after info] {after cancel $id}}
#
# NOTE: Schedule the event to cancel the script we are
# about to evaluate, capturing the name so we can
# cancel it later, if necessary.
#
set event [after $milliseconds [list interp cancel]]
#
# HACK: There is a potential "race condition" here. If the
# specified number of milliseconds elapses before (or
# after) entering the [catch] script block (below)
# then the resulting script cancellation error will
# not be caught and we will be unable to return the
# correct result to the caller.
#
set before [info cmdcount]
catch {time {nop} -1}; # uses the [time] internal busy loop.
set after [info cmdcount]
#
# HACK: Mono has a bug that results in excessive trailing
# zeros here (Mono bug #655780).
#
if {[isMono]} then {
expr {double(($after - $before) / ($milliseconds / 1000.0))}
} else {
expr {($after - $before) / ($milliseconds / 1000.0)}
}
} result]
#
# NOTE: If we failed due to the race condition explained
# above, return an obviously invalid result instead.
#
if {$code == 0} then {
return $result
} else {
return 0
}
} finally {
if {[info exists event]} then {
catch {after cancel $event}
}
after flags [appendArgs = $flags]
}
} finally {
interp bgerror {} $bgerror
}
} finally {
interp readylimit {} $readylimit
}
}
} else {
#
# NOTE: Record the initial Tcl command count.
#
set before [info cmdcount]
#
# NOTE: Calculate how many whole seconds we need to spin for.
#
set seconds [expr {$milliseconds / 1000}]
#
# NOTE: Calculate the starting and ending values of [clock seconds].
#
set now [clock seconds]
set start $now; set stop [expr {$now + $seconds}]
#
# NOTE: Do nothing for X seconds (i.e. except call [clock seconds]).
#
while {$start <= $now && $now < $stop} {set now [clock seconds]}
#
# NOTE: Record the final Tcl command count.
#
set after [info cmdcount]
#
# NOTE: Calculate approximately how many Tcl commands per second were
# executed during the timed loop (above). Due to various things,
# including overhead associated with [clock seconds], this number
# is not as accurate as the one for Eagle; however, it's generally
# good enough.
#
expr {($after - $before) / double($seconds)}
}
}
proc calculateRelativePerformance { type value } {
#
# NOTE: Adjust the expected performance number based on the
# relative performance of this machine, if available.
#
if {[info exists ::test_base_cops] && [info exists ::test_cops]} then {
#
# NOTE: Calibrate the expected performance numbers based
# on the ratio of the baseline performace to the
# current performance.
#
switch -exact -- $type {
elapsed {
if {$::test_cops != 0} then {
return [expr {double($value) * \
($::test_base_cops / $::test_cops)}]
}
}
iterations {
if {$::test_base_cops != 0} then {
return [expr {double($value) * \
($::test_cops / $::test_base_cops)}]
}
}
}
}
return $value
}
proc formatTimeStamp { seconds {gmt false} } {
if {[isEagle]} then {
return [clock format $seconds -gmt $gmt -iso -isotimezone]
} else {
return [clock format $seconds -gmt $gmt -format "%Y-%m-%dT%H:%M:%S %Z"]
}
}
proc formatElapsedTime { seconds } {
if {[isEagle] && [llength [info commands object]] > 0} then {
#
# NOTE: Create a TimeSpan instance based on the number of whole
# seconds.
#
set timeSpan [object invoke -create -alias TimeSpan FromSeconds \
$seconds]
#
# NOTE: Return the number of seconds and a human readable string
# representing the TimeSpan instance created based on that
# same number of seconds.
#
return [appendArgs $seconds " seconds (" [$timeSpan ToString] \
" elapsed time)"]
} else {
#
# NOTE: Unfortunately, there is no built-in native Tcl command
# that can correctly format an elapsed time; therefore,
# just return the number of whole seconds.
#
return [appendArgs $seconds " seconds"]
}
}
proc sourceIfValid { type fileName } {
if {[string length $fileName] > 0} then {
if {[file exists $fileName]} then {
tputs $::test_channel [appendArgs \
"---- evaluating " $type " file: \"" $fileName \"\n]
if {[catch {uplevel 1 [list source $fileName]} error]} then {
tputs $::test_channel [appendArgs \
"---- error during " $type " file: " $error \n]
#
# NOTE: The error has been logged, now re-throw it.
#
unset -nocomplain ::test_suite_running
error $error $::errorInfo $::errorCode
}
} else {
tputs $::test_channel [appendArgs \
"---- skipped " $type " file: \"" $fileName \
"\", it does not exist\n"]
}
}
}
proc processTestArguments { varName strict args } {
#
# NOTE: Initially, there are no unknown (i.e. unprocessed) arguments.
#
set result [list]
#
# NOTE: We are going to place the configured options in the variable
# identified by the name provided by the caller.
#
if {[string length $varName] > 0} then {
upvar 1 $varName array
}
#
# TODO: Add more support for standard "tcltest" options here.
#
set options [list \
-breakOnLeak -configuration -constraints -exitOnComplete \
-file -logFile -logId -logPath -machine -match -namePrefix \
-no -notFile -platform -postTest -preTest -postWait -preWait \
-randomOrder -skip -startFile -stopFile -stopOnFailure \
-stopOnLeak -suffix -suite -tclsh -threshold -uncountedLeaks \
-verbose]
set length [llength $args]
for {set index 0} {$index < $length} {incr index} {
#
# NOTE: Grab the current list element, which should be the name of
# the test option.
#
set name [lindex $args $index]
#
# NOTE: Use the [tqputs] command here just in case the test log file
# has not been setup yet (i.e. by default, this procedure is
# almost always called by the test prologue file prior to the
# test log file having been setup and we do not want to just
# lose this output).
#
if {[lsearch -exact $options $name] != -1} then {
#
# NOTE: Is there another list element available for the value? If
# not, this is not a valid test option.
#
if {$index + 1 < $length} then {
incr index; set value [lindex $args $index]
set array($name) $value
tqputs $::test_channel [appendArgs \
"---- overrode test option \"" $name "\" with value \"" \
$value \"\n]
} else {
tqputs $::test_channel [appendArgs \
"---- no value for test option \"" $name "\", ignored\n"]
}
} elseif {[string index $name 0] eq "-"} then {
#
# NOTE: Is there another list element available for the value? If
# not, it does not conform to the standard command line name
# and value pattern.
#
if {$index + 1 < $length} then {
incr index; set value [lindex $args $index]
if {!$strict && [lsearch -exact $options $value] != -1} then {
incr index -1; # HACK: Resynchronize with valid test option.
lappend result [list $name]
tqputs $::test_channel [appendArgs \
"---- no value for unknown test option \"" $name \
"\", ignored, backing up one for test option \"" \
$value \"...\n]
} else {
lappend result [list $name $value]
tqputs $::test_channel [appendArgs \
"---- unknown test option \"" $name "\" with value \"" \
$value "\", ignored\n"]
}
} else {
lappend result [list $name]
tqputs $::test_channel [appendArgs \
"---- no value for unknown test option \"" $name \
"\", ignored\n"]
}
} else {
#
# NOTE: Is there another list element available for the value? If
# not, it does not conform to the standard command line name
# and value pattern.
#
if {$index + 1 < $length} then {
incr index; set value [lindex $args $index]
if {!$strict && [lsearch -exact $options $value] != -1} then {
incr index -1; # HACK: Resynchronize with valid test argument.
lappend result [list $name]
tqputs $::test_channel [appendArgs \
"---- no value for unknown argument \"" $name \
"\", ignored, backing up one for test option \"" \
$value \"...\n]
} else {
lappend result [list $name $value]
tqputs $::test_channel [appendArgs \
"---- unknown argument \"" $name "\" with value \"" \
$value "\", ignored\n"]
}
} else {
#
# NOTE: This is not an option of *any* kind that we know about.
# Ignore it and issue a warning.
#
lappend result [list $name]
tqputs $::test_channel [appendArgs \
"---- unknown argument \"" $name "\", ignored\n"]
}
}
}
#
# NOTE: Now, attempt to flush the test log queue, if available.
#
tlog ""
#
# NOTE: Return the nested list of unknown arguments, formatted as
# name/value pairs, to the caller.
#
return $result
}
proc getTclShellFileName { automatic kits machine } {
#
# NOTE: Start out with an empty list of candiate Tcl shells.
#
set shells [list]
#
# NOTE: Figure out the environment variables to be checked. If
# there was a machine specified, it will be used to check
# for machine-specific Tcl shells.
#
set names [list]
foreach name [list Eagle_Tcl_Shell Tcl_Shell EAGLE_TCLSH TCLSH] {
if {[string length $machine] > 0} then {
set platform [machineToPlatform $machine true]
if {[string length $platform] > 0} then {
lappend names [appendArgs $name _ $platform]
}
set platform [machineToPlatform $machine false]
if {[string length $platform] > 0} then {
lappend names [appendArgs $name _ $platform]
}
lappend names [appendArgs $name _ $machine]
}
lappend names $name
}
#
# NOTE: Check all environment variables (we know about) that
# may contain the path where the Tcl shell is located.
#
foreach name $names {
#
# NOTE: Grab the value of the environment variable. This
# will be an empty string if it was not set.
#
set value [getEnvironmentVariable $name]
#
# TODO: Possibly add a check if the file actually exists
# here.
#
if {[string length $value] > 0} then {
#
# NOTE: *EXTERNAL* Use verbatim, no normalization.
#
if {$automatic && [isEagle]} then {
#
# NOTE: In automatic mode, the environment variable
# value simply represents another candidate
# Tcl shell (i.e. it does not halt the search
# for other candidate Tcl shells).
#
lappend shells $value
} else {
#
# NOTE: In manual mode, the environment variable
# value represents an "override" and halts
# the search for other candidate Tcl shells.
#
return $value
}
}
}
#
# NOTE: The automatic Tcl shell detection is only available when
# running in Eagle.
#
if {[isEagle]} then {
#
# NOTE: Attempt to check for the "best" available dynamically
# loadable Tcl library and then attempt to use its
# "associated" Tcl shell. A very similar block of code
# is also used by the [checkForTclInstalls] procedure
# in the constraints package.
#
if {[catch {tcl select -architecture} tcl] == 0} then {
#
# NOTE: Should we also consider TclKit shells? If so, a bit
# more handling is required.
#
if {$kits} then {
#
# NOTE: Did we find one? Attempt to grab the patch
# level field from the returned dictionary value.
#
set dotPatchLevel [getDictionaryValue $tcl patchLevel]
#
# NOTE: Verify that the patch level we found is valid
# and that it conforms to the pattern we expect.
#
if {[string length $dotPatchLevel] > 0 && \
[regexp -- {^\d+\.\d+\.\d+$} $dotPatchLevel]} then {
#
# NOTE: Build the candidate TclKit shell executable file
# name with the dot-separated patch level. This is
# the common naming scheme on Unix.
#
set dotShell [appendArgs tclkit- $dotPatchLevel]
#
# NOTE: Build the candidate TclKit shell executable file
# name with the patch level, removing the dot. This
# is the common naming scheme on Windows.
#
set shell [appendArgs \
tclkit- [string map [list . ""] $dotPatchLevel]]
#
# NOTE: Always favor the TclKit shell executable file
# naming scheme for the current operating system
# first.
#
if {[isWindows]} then {
lappend shells $shell
lappend shells $dotShell
} else {
lappend shells $dotShell
lappend shells $shell
}
}
}
#
# NOTE: Did we find one? Attempt to grab the version
# field from the returned dictionary value.
#
set dotVersion [getDictionaryValue $tcl version]
#
# NOTE: Verify that the version we found is valid and that
# it conforms to the pattern we expect.
#
if {[string length $dotVersion] > 0 && \
[regexp -- {^\d+\.\d+$} $dotVersion]} then {
#
# NOTE: Gather the list of candidate Tcl shells to check
# using the range of versions we are interested in,
# starting with the "best" available version and
# ending with the absolute minimum version supported
# by the Eagle core library. A very similar block
# of code is also used by the [checkForTclShell]
# procedure in the constraints package.
#
foreach version [lsort -real -decreasing [tcl \
versionrange -maximumversion $dotVersion]] {
#
# NOTE: Build the candidate Tcl shell executable file name
# with the dot-separated version. This is the common
# naming scheme on Unix.
#
set dotShell [appendArgs tclsh $version]
#
# NOTE: Build the candidate Tcl shell executable file name
# with the version, removing the dot. This is the
# common naming scheme on Windows.
#
set shell [appendArgs tclsh [string map [list . ""] $version]]
#
# NOTE: Always favor the Tcl shell executable file naming
# scheme for the current operating system first.
#
if {[isWindows]} then {
lappend shells $shell
lappend shells $dotShell
} else {
lappend shells $dotShell
lappend shells $shell
}
}
}
}
#
# NOTE: Check each candidate Tcl shell and query its fully
# qualified path from it. If it cannot be executed,
# we know that candidate Tcl shell is not available.
#
if {![info exists ::no(getTclExecutableForTclShell)]} then {
foreach shell $shells {
if {[catch {
getTclExecutableForTclShell $shell [getTclShellVerbosity]
} executable] == 0 && $executable ne "error" && \
![string match "error: *" $executable]} then {
#
# NOTE: It looks like this Tcl shell is available.
# Return the fully qualified path to it now.
#
return $executable
}
}
}
}
#
# NOTE: Return the fallback default.
#
return tclsh
}
proc getTemporaryPath { {usable true} } {
#
# NOTE: Build the list of "temporary directory" override
# environment variables to check.
#
set names [list]
foreach name [list \
EAGLE_TEST_TEMP EAGLE_TEMP XDG_RUNTIME_DIR TEMP TMP] {
#
# NOTE: Make sure we handle all the reasonable "cases" of
# the environment variable names.
#
lappend names [string toupper $name] [string tolower $name] \
[string totitle $name]
}
#
# NOTE: Check if we can use any of the environment variables.
#
foreach name $names {
set value [getEnvironmentVariable $name]
#
# NOTE: First, make sure the environment variable was actually
# set to something.
#
if {[string length $value] > 0} then {
#
# NOTE: Next, when the "usable" argument is non-zero, attempt
# to make sure the returned temporary path is actually
# an existing directory, writable by us.
#
if {$usable} then {
if {[file isdirectory $value] && [file writable $value]} then {
return [file normalize $value]
}
} else {
return [file normalize $value]
}
}
}
if {[isEagle] && [llength [info commands object]] > 0} then {
#
# NOTE: Eagle fallback, use whatever is reported by the
# underlying framework and/or operating system.
#
return [file normalize [object invoke System.IO.Path GetTempPath]]
} else {
#
# NOTE: Tcl fallback, *assume* that we can use the
# directory where the executable is running for
# temporary storage.
#
return [file normalize [file dirname [info nameofexecutable]]]
}
}
proc getTemporaryFileName { {seconds 5} } {
if {[isEagle]} then {
return [file tempname]
} else {
set path [getTemporaryPath]
set now [clock seconds]
set start $now; set stop [expr {$now + $seconds}]
while {$start <= $now && $now < $stop} {
binary scan [binary format d* [expr {rand()}]] h* random
set fileNameOnly [appendArgs \
tmp [string index $random 0] [string index $random 1] \
[string index $random end-1] [string index $random end] \
.tmp]
set fileName [file join $path $fileNameOnly]
if {![file exists $fileName]} then {
return $fileName
}
set now [clock seconds]
}
error "cannot generate temporary file name"
}
}
proc getFiles { directory pattern } {
if {[isEagle]} then {
set result [list]
if {[file exists $directory] && [file isdirectory $directory]} then {
foreach fileName [lsort -dictionary [file list $directory $pattern]] {
if {[file isfile $fileName] && [file readable $fileName]} then {
lappend result $fileName
}
}
}
return $result
} else {
return [lsort -dictionary [glob -directory $directory -types \
{f r} -nocomplain -- $pattern]]
}
}
proc getTestFiles { directories matchFilePatterns skipFilePatterns } {
set result [list]
foreach directory $directories {
set matchFileNames [list]
foreach pattern $matchFilePatterns {
eval lappend matchFileNames [getFiles $directory $pattern]
}
set skipFileNames [list]
foreach pattern $skipFilePatterns {
eval lappend skipFileNames [getFiles $directory $pattern]
}
foreach fileName $matchFileNames {
if {[lsearch -exact $skipFileNames $fileName] == -1} then {
lappend result $fileName
}
}
}
return $result
}
proc getTestRunId {} {
return [expr {[info exists ::test_run_id] ? $::test_run_id : ""}]
}
proc getNewTestRunId {} {
#
# HACK: Yes, this is a bit ugly; however, it creates a nice unique
# identifier to represent the test run, which makes analyzing
# the test log files a lot easier.
#
if {[isEagle]} then {
#
# BUGBUG: Tcl 8.4 does not like this expression (and Tcl tries to
# compile it even though it will only actually ever be
# evaluated in Eagle).
#
set expr {random()}
#
# NOTE: Include the host name to make the result more unique in both
# time and space. Also, hash the entire constructed string.
#
if {![isMono]} then {
#
# NOTE: When running on the .NET Framework, we can simply use the
# [string format] command.
#
return [hash normal sha256 [string format \
"{0}{1:X8}{2:X8}{3:X16}{4:X16}{5:X16}" [info host] [pid] \
[info tid] [clock now] [clock clicks] [expr $expr]]]
} else {
#
# HACK: Mono does not currently support calling the String.Format
# overload that takes a variable number of arguments via
# reflection (Mono bug #636939). Also, strip any leading
# minus signs for cleanliness.
#
return [hash normal sha256 [appendArgs [info host] [string \
trimleft [pid] -] [string trimleft [info tid] -] [string \
trimleft [clock now] -] [string trimleft [clock clicks] -] \
[string trimleft [expr $expr] -]]]
}
} else {
#
# NOTE: Generate a random number using [expr] and then convert it
# to a 64-bit integer.
#
binary scan [binary format d* [expr {rand()}]] w* random
#
# NOTE: Convert the host name to a hexadecimal string and include
# it in the result in an attempt to make it more unique in
# both time and space.
#
binary scan [info host] c* host
set host [eval [list format [string repeat %X [llength $host]]] $host]
#
# NOTE: Build the final result with the [format] command, converting
# all the pieces to hexadecimal (except the host, which is
# already hexadecimal).
#
set pid [pid]; set seconds [clock seconds]; set clicks [clock clicks]
return [appendArgs \
$host [format [appendArgs % [getLengthModifier $pid] X% \
[getLengthModifier $seconds] X% [getLengthModifier $clicks] X% \
[getLengthModifier $random] X] $pid $seconds $clicks $random]]
}
}
proc getDefaultTestLogPath { executable } {
#
# NOTE: By default, use the configured temporary directory for
# test log files unless we are forbidden from doing so.
#
if {![info exists ::no(temporaryTestLog)]} then {
return [getTemporaryPath]
} else {
#
# HACK: It seems that .NET Core considers itself to be the
# executable-of-record for the process; in that case,
# relocate the log file to be nearer the assemblies.
#
if {[isEagle] && [isDotNetCore]} then {
return [info binary]
} else {
return [file dirname $executable]
}
}
}
proc getTestLogPath {} {
return [expr {[info exists ::test_log_path] ? $test_log_path : ""}]
}
proc getTestLogId {} {
return [expr {[info exists ::test_log_id] ? \
[append result . $::test_log_id] : ""}]
}
proc getDefaultTestLog {} {
set executable [info nameofexecutable]
if {[info exists ::test_log_path]} then {
set path $::test_log_path
} else {
set path [getDefaultTestLogPath $executable]
}
return [file normalize [file join $path [appendArgs \
[file tail $executable] [getTestLogId] .test. [pid] .log]]]
}
proc getTestLog {} {
return [expr {[info exists ::test_log] ? $::test_log : ""}]
}
proc getLastTestLog {} {
#
# NOTE: Use the configured log file name -OR- what the configured
# log file name would be, by default, if it actually existed.
#
if {[info exists ::test_log]} then {
set logFileName $::test_log
} else {
set logFileName [getDefaultTestLog]
}
set logFileName [file normalize $logFileName]
set logTime [expr {[file exists $logFileName] ? \
[file mtime $logFileName] : 0}]
#
# NOTE: Make the log file name into a pattern we can use to find
# the related log files.
#
if {[regsub -- {\.\d+\.} $logFileName {.*.} pattern]} then {
set lastLogFile [list]
foreach fileName [findFiles $pattern] {
#
# NOTE: Skip the current test log file, if found.
#
if {[isSameFileName $fileName $logFileName]} then {
continue
}
#
# NOTE: When was this log file last modified?
#
set time [file mtime $fileName]
#
# NOTE: Check if there has been no log file seen -OR- this
# log file has the latest modified time seen.
#
if {[llength $lastLogFile] == 0 || \
$time > [lindex $lastLogFile 0]} then {
#
# NOTE: This is now the latest log file seen.
#
set lastLogFile [list $time $fileName]
}
}
#
# NOTE: Either return the last log file seen, if any -OR- the
# configured log file, if it actually exists.
#
if {[llength $lastLogFile] > 0} then {
return [lindex $lastLogFile 1]
} elseif {$logTime != 0} then {
return $logFileName
}
}
return ""
}
proc getTestSuite {} {
#
# NOTE: Determine the effective test suite name and return it. If the
# test suite name cannot be determined, return the default based
# on whether we are running in Eagle or native Tcl.
#
if {[info exists ::test_flags(-suite)] && \
[string length $::test_flags(-suite)] > 0} then {
#
# NOTE: The test suite name has been manually overridden via the test
# flags; therefore, use it.
#
return $::test_flags(-suite)
} elseif {[info exists ::test_suite]} then {
#
# NOTE: Use the test suite name. The default value is set by the test
# suite prologue; however, this may have been overridden.
#
return $::test_suite
} elseif {[isEagle]} then {
#
# NOTE: Use the default test suite name for Eagle.
#
return "Eagle Test Suite for Eagle"
} else {
#
# NOTE: Use the default test suite name for native Tcl.
#
return "Eagle Test Suite for Tcl"
}
}
proc getTestSuiteFullName {} {
if {[isEagle]} then {
set fileName [probeForScriptFileName [list \
[file join * prologue.eagle] [file join * epilogue.eagle] \
[file join * Test1.0 *]]]
} else {
set fileName ""
}
if {[string length $fileName] == 0} then {
if {[info exists ::test_suite_file]} then {
set fileName $::test_suite_file
}
}
if {[string length $fileName] == 0} then {
set fileName [info script]
}
if {[string length $fileName] == 0} then {
set fileName <none>
}
set suiteName [getTestSuite]
if {[string length $suiteName] == 0} then {
set suiteName <none>
}
return [appendArgs $fileName " (" $suiteName )]
}
proc getTestMachine {} {
#
# NOTE: Determine the effective test machine and return it. If the
# test machine cannot be determined, return an empty string.
#
if {[info exists ::test_flags(-machine)] && \
[string length $::test_flags(-machine)] > 0} then {
#
# NOTE: The test machine has been manually overridden via the test
# flags; therefore, use it.
#
return $::test_flags(-machine)
} elseif {[info exists ::test_machine]} then {
#
# NOTE: Use the test machine. The default value is set by the test
# suite prologue; however, this may have been overridden.
#
return $::test_machine
} elseif {[info exists ::tcl_platform(machine)]} then {
#
# NOTE: Use the build machine of Eagle itself.
#
return $::tcl_platform(machine)
} else {
#
# NOTE: We are missing the machine, return nothing.
#
return ""
}
}
proc getTestPlatform { {architecture false} } {
#
# NOTE: Determine the effective test platform and return it. If the
# test platform cannot be determined, return an empty string.
#
if {[info exists ::test_flags(-platform)] && \
[string length $::test_flags(-platform)] > 0} then {
#
# NOTE: The test platform has been manually overridden via the test
# flags; therefore, use it.
#
return $::test_flags(-platform)
} elseif {[info exists ::test_platform]} then {
#
# NOTE: Use the test platform. The default value is set by the test
# suite prologue; however, this may have been overridden.
#
return $::test_platform
} else {
set machine [getTestMachine]
if {[string length $machine] > 0} then {
#
# NOTE: Use the machine architecture to figure out the platform
# and then return it.
#
return [machineToPlatform $machine $architecture]
} else {
#
# NOTE: We are missing the machine and we cannot figure out the
# platform without it; therefore, return nothing.
#
return ""
}
}
}
proc getTestConfiguration {} {
#
# NOTE: Determine the effective test configuration and return it. If
# the test configuration cannot be determined, return an empty
# string.
#
if {[info exists ::test_flags(-configuration)] && \
[string length $::test_flags(-configuration)] > 0} then {
#
# NOTE: The test configuration has been manually overridden via the
# test flags; therefore, use it.
#
return $::test_flags(-configuration)
} elseif {[info exists ::test_configuration]} then {
#
# NOTE: Use the test configuration. The default value is set by the
# test suite prologue; however, this may have been overridden.
#
return $::test_configuration
} elseif {[info exists ::eagle_platform(configuration)]} then {
#
# NOTE: Use the build configuration of Eagle itself. This value will
# most likely be either "Debug" or "Release".
#
return $::eagle_platform(configuration)
} else {
#
# NOTE: We are missing the configuration, return nothing.
#
return ""
}
}
proc getTestNamePrefix {} {
#
# NOTE: Determine the effective test name prefix and return it. If
# the test name prefix cannot be determined, return an empty
# string.
#
if {[info exists ::test_flags(-namePrefix)] && \
[string length $::test_flags(-namePrefix)] > 0} then {
#
# NOTE: The test name prefix has been manually overridden via the
# test flags; therefore, use it.
#
return $::test_flags(-namePrefix)
} elseif {[info exists ::test_name_prefix] && \
[string length $::test_name_prefix] > 0} then {
#
# NOTE: Use the default test name prefix.
#
return $::test_name_prefix
} else {
#
# NOTE: We are missing the test name prefix, return nothing.
#
return ""
}
}
proc getTestSuffix {} {
#
# NOTE: Determine the effective test suffix and return it. If
# the test suffix cannot be determined, return an empty
# string.
#
if {[info exists ::test_flags(-suffix)] && \
[string length $::test_flags(-suffix)] > 0} then {
#
# NOTE: The test suffix has been manually overridden via the
# test flags; therefore, use it.
#
return $::test_flags(-suffix)
} elseif {[info exists ::test_suffix]} then {
#
# NOTE: Use the test suffix. There is no default value for
# this variable (i.e. by default, it does not exist).
#
return $::test_suffix
} elseif {[info exists ::eagle_platform(text)] && \
[string length $::eagle_platform(text)] > 0} then {
#
# NOTE: Use the build "text" of Eagle itself. This value
# will typically be "NetFx20", "NetFx40", etc. The
# default value of this element is an empty string.
#
return $::eagle_platform(text)
} elseif {[info exists ::eagle_platform(suffix)] && \
[string length $::eagle_platform(suffix)] > 0} then {
#
# NOTE: Use the build suffix of Eagle itself. This value
# will typically be "NetFx20", "NetFx40", etc. The
# default value of this element is an empty string.
#
return $::eagle_platform(suffix)
} else {
#
# NOTE: We are missing the test suffix, return nothing.
#
return ""
}
}
proc getTestUncountedLeaks {} {
if {[info exists ::test_uncounted_leaks] && \
[string length $::test_uncounted_leaks] > 0} then {
return $::test_uncounted_leaks
}
return [list]
}
proc getRuntimeAssemblyName {} {
if {[isEagle]} then {
if {[isDotNetCore]} then {
if {[llength [info commands object]] > 0} then {
#
# HACK: The core runtime assembly (i.e. the one containing
# System.Object, et al) must have already been loaded
# (?), so just abuse the [object load] sub-command to
# return its assembly name.
#
return [lindex [object load System.Private.CoreLib] 0]
} else {
#
# HACK: The [object] command is unavailable, just fake it.
#
return "System.Private.CoreLib, Version=4.0.0.0,\
Culture=neutral, PublicKeyToken=7cec85d7bea7798e"
}
} else {
if {[llength [info commands object]] > 0} then {
#
# HACK: The core runtime assembly (i.e. the one containing
# System.Object, et al) must have already been loaded
# (?), so just abuse the [object load] sub-command to
# return its assembly name.
#
return [lindex [object load mscorlib] 0]
} else {
#
# HACK: The [object] command is unavailable, just fake it.
#
if {[info exists ::eagle_platform(runtimeVersion)] && \
[string index $::eagle_platform(runtimeVersion) 0] >= 4} then {
#
# BUGBUG: Does not handle a major CLR version greater than
# four (4).
#
return "mscorlib, Version=4.0.0.0, Culture=neutral,\
PublicKeyToken=b77a5c561934e089"
} else {
return "mscorlib, Version=2.0.0.0, Culture=neutral,\
PublicKeyToken=b77a5c561934e089"
}
}
}
} else {
#
# HACK: Native Tcl has no runtime assembly name as it is native.
#
return ""
}
}
proc getTestAssemblyName {} {
if {[isEagle]} then {
return [lindex [split [lindex [info assembly] 0] ,] 0]
} else {
return Eagle
}
}
#
# NOTE: This procedure should return non-zero if the [exec] command may be
# used by the specified test package procedure.
#
proc canTestExec { procName } {
if {[info exists ::no(exec)]} then {
return false
}
if {[info exists ::no(canTestExec)]} then {
return false
}
if {[string length $procName] > 0 && \
[info exists [appendArgs ::no(canTestExec. $procName )]]} then {
return false
}
return true
}
proc testExec { commandName options args } {
set command [list exec]
if {[llength $options] > 0} then {eval lappend command $options}
lappend command -- $commandName
if {[llength $args] > 0} then {eval lappend command $args}
set procName [lindex [info level [info level]] 0]
if {![canTestExec $procName]} then {
tputs $::test_channel [appendArgs "---- skipping command: " $command \n]
error "test use of \[$procName\] has been disabled"
} else {
tputs $::test_channel [appendArgs "---- running command: " $command \n]
return [uplevel 1 $command]
}
}
proc testClrExec { commandName options args } {
set command [list exec]
if {[llength $options] > 0} then {eval lappend command $options}
lappend command --
#
# HACK: Assume that Mono is somewhere along the PATH.
#
if {[isMono]} then {
lappend command mono \
[appendArgs \" [file nativename $commandName] \"]
} else {
#
# HACK: When running on .NET Core, need to insert "dotnet exec"
# command line arguments before command to execute.
#
if {[isDotNetCore]} then {
lappend command dotnet exec \
[appendArgs \" [file nativename $commandName] \"]
} else {
lappend command $commandName
}
}
if {[llength $args] > 0} then {eval lappend command $args}
set procName [lindex [info level [info level]] 0]
if {![canTestExec $procName]} then {
tputs $::test_channel [appendArgs "---- skipping command: " $command \n]
error "test use of \[$procName\] has been disabled"
} else {
tputs $::test_channel [appendArgs "---- running command: " $command \n]
return [uplevel 1 $command]
}
}
proc execTestShell { options args } {
set procName [lindex [info level [info level]] 0]
if {![canTestExec $procName]} then {
tputs $::test_channel [appendArgs \
"---- skipping nested shell: exec " [string trim [appendArgs \
$options " " -- " \"" [info nameofexecutable] "\" " $args]] \n]
error "test use of \[$procName\] has been disabled"
} else {
tputs $::test_channel [appendArgs \
"---- running nested shell: exec " [string trim [appendArgs \
$options " " -- " \"" [info nameofexecutable] "\" " $args]] \n]
return [uplevel 1 execShell [list $options] $args]
}
}
proc isRandomOrder {} {
return [expr {[info exists ::test_random_order] && \
[string is boolean -strict $::test_random_order] && \
$::test_random_order}]
}
proc isBreakOnLeak {} {
return [expr {[info exists ::test_break_on_leak] && \
[string is boolean -strict $::test_break_on_leak] && \
$::test_break_on_leak}]
}
proc isBreakOnDemand {} {
global env
return [expr {[info exists env(isBreakOnDemand)] && \
[string is boolean -strict $env(isBreakOnDemand)] && \
$env(isBreakOnDemand)}]
}
proc isStopOnFailure {} {
return [expr {[info exists ::test_stop_on_failure] && \
[string is boolean -strict $::test_stop_on_failure] && \
$::test_stop_on_failure}]
}
proc isStopOnLeak {} {
return [expr {[info exists ::test_stop_on_leak] && \
[string is boolean -strict $::test_stop_on_leak] && \
$::test_stop_on_leak}]
}
proc isExitOnComplete {} {
return [expr {[info exists ::test_exit_on_complete] && \
[string is boolean -strict $::test_exit_on_complete] && \
$::test_exit_on_complete}]
}
proc returnInfoScript {} {
return [info script]
}
proc runTestPrologue {} {
#
# NOTE: Verify that the global test path variable is available.
#
if {![info exists ::test_path]} then {
error "cannot run test prologue, \"::test_path\" must be set"
}
#
# NOTE: Reset the primary test suite file name to our caller.
#
if {![info exists ::no(testSuiteFile)]} then {
set ::test_suite_file [info script]
}
#
# HACK: We do not want to force every third-party test suite
# to come up with a half-baked solution to finding its
# own files.
#
if {![info exists ::no(prologue.eagle)] && ![info exists ::path]} then {
set ::path [file normalize [file dirname [info script]]]
}
#
# NOTE: Evaluate the standard test prologue in the context of
# the caller.
#
uplevel 1 [list source [file join $::test_path prologue.eagle]]
}
proc runTestEpilogue {} {
#
# NOTE: Verify that the global test path variable is available.
#
if {![info exists ::test_path]} then {
error "cannot run test epilogue, \"::test_path\" must be set"
}
#
# NOTE: Reset the primary test suite file name to our caller.
#
if {![info exists ::no(testSuiteFile)]} then {
set ::test_suite_file [info script]
}
#
# NOTE: Evaluate the standard test epilogue in the context of
# the caller.
#
uplevel 1 [list source [file join $::test_path epilogue.eagle]]
#
# HACK: We do not want to force every third-party test suite
# to come up with a half-baked solution to finding its
# own files.
#
if {![info exists ::no(epilogue.eagle)] && [info exists ::path]} then {
unset ::path
}
}
proc hookPuts {} {
#
# NOTE: This code was stolen from "tcltest" and heavily modified to
# work with Eagle.
#
proc [namespace current]::testPuts { args } {
switch [llength $args] {
1 {
#
# NOTE: Only the string to be printed is specified (stdout).
#
return [tputs $::test_channel [appendArgs [lindex $args 0] \n]]
}
2 {
#
# NOTE: Either -nonewline or channelId has been specified.
#
if {[lindex $args 0] eq "-nonewline"} then {
return [tputs $::test_channel [lindex $args end]]
} else {
set channel [lindex $args 0]
set newLine \n
}
}
3 {
#
# NOTE: Both -nonewline and channelId are specified, unless
# it's an error. The -nonewline option is supposed to
# be argv[0].
#
if {[lindex $args 0] eq "-nonewline"} then {
set channel [lindex $args 1]
set newLine ""
}
}
}
if {[info exists channel] && $channel eq "stdout"} then {
#
# NOTE: Write output for stdout to the test channel.
#
return [tputs $::test_channel [appendArgs [lindex $args end] \
$newLine]]
}
#
# NOTE: If we haven't returned by now, we don't know how to
# handle the input. Let puts handle it.
#
return [eval ::tcl::save::puts $args]
}
rename ::puts ::tcl::save::puts; # save Tcl command
rename [namespace current]::testPuts ::puts; # insert our proc
}
proc unhookPuts {} {
rename ::puts ""; # remove our proc
rename ::tcl::save::puts ::puts; # restore Tcl command
}
proc runTest { script } {
#
# NOTE: This should work properly in both Tcl and Eagle as long as the
# "init" script has been evaluated first.
#
if {![isEagle]} then {
hookPuts
}
set code [catch {uplevel 1 $script} result]
set error [expr {$code == 0 ? false : true}]
if {[isEagle]} then {
#
# NOTE: Initially, the call to [tresult] (i.e. [host result]) will
# use the actual return code from the test command; however,
# if that return code was 3 (i.e. break), that indicates the
# test results should be highlighted in yellow -AND- that the
# test should still be considered successful even though the
# test was skipped. If the return code was 4 (i.e. continue),
# that indicates the test results should be highlighted in
# dark yellow -AND- that the test should still be considered
# successful because failures are being ignored for it.
#
set tresultCode $code
if {$code == 3 || $code == 5} then {
set code 0; set error false
} elseif {$code == 4} then {
set code 0
}
#
# NOTE: If the return code from the test command indicates success
# and the test results contain a clear indication of failure,
# reset both return codes to indicate that failure.
#
if {$code == 0 && [regexp -- {\s==== (.*?) FAILED\s} $result]} then {
set code 1; set tresultCode $code
}
#
# NOTE: Display and/or log the results for the test that we just
# completed.
#
if {[shouldWriteTestData $code]} then {
tresult $tresultCode $result
} else {
tlog $result
}
#
# NOTE: If the test failed with an actual error (i.e. not just a
# test failure), make sure we do not obscure the error
# message with test suite output.
#
if {$error} then {
tputs $::test_channel \n; # emit a blank line.
}
#
# NOTE: If this test failed and the stop-on-failure flag is set,
# raise an error now. If we are being run from inside
# runAllTests, this will also serve to signal it to stop
# processing further test files.
#
if {$code != 0 && [isStopOnFailure]} then {
tresult Error "OVERALL RESULT: STOP-ON-FAILURE\n"
unset -nocomplain ::test_suite_running
error ""; # no message
}
#
# NOTE: Unless forbidden from doing so, attempt to automatically
# cleanup any stale (e.g. temporary) object references now.
#
if {![info exists ::no(cleanupReferences)]} then {
catch {object cleanup -references}
}
} else {
if {$error} then {
#
# HACK: Prevent spurious errors dealing with [test] command options
# that are missing from native Tcl.
#
set badOptionPattern {^bad option ".*?":\
must be -body, -cleanup, -constraints, -errorOutput,\
-match, -output, -result, -returnCodes, or -setup$}
set badMatchValuePattern {^bad -match value ".*?": must be\
exact, glob, or regexp$}
if {[isEagle] || \
(![regexp -- $badOptionPattern $result] && \
![regexp -- $badMatchValuePattern $result])} then {
tputs $::test_channel [appendArgs \
"ERROR (runTest): " $result \n]
}
}
unhookPuts
}
#
# HACK: Return an empty string here just in case we are being called
# via the [testShim] procedure. Doing this should prevent any
# superfluous output from being displayed via [host result] in
# the outermost call to this procedure.
#
return ""
}
proc testShim { args } {
#
# NOTE: Call the original (saved) [test] command, wrapping it in
# our standard [runTest] wrapper.
#
uplevel 1 [list runTest [concat ::savedTest $args]]; return ""
}
proc tsource { fileName {prologue true} {epilogue true} } {
#
# NOTE: Run the test prologue in the context of the caller (which
# must be global)?
#
if {$prologue} then {
uplevel 1 runTestPrologue
}
#
# NOTE: Save the original [test] command and setup our test shim in
# its place.
#
rename ::test ::savedTest
interp alias {} ::test {} testShim
#
# NOTE: Source the specified test file in the context of the caller
# (which should be global).
#
set code [catch {uplevel 1 [list source $fileName]} result]
set error [expr {$code == 0 ? false : true}]
#
# NOTE: Remove our test shim and restore the original (saved) [test]
# command.
#
interp alias {} ::test {}
rename ::savedTest ::test
#
# NOTE: Run the test epilogue in the context of the caller (which
# must be global)?
#
if {$epilogue} then {
uplevel 1 runTestEpilogue
}
#
# NOTE: If the test raised an error, re-raise it now; otherwise,
# just return the result.
#
if {$error} then {
unset -nocomplain ::test_suite_running
error $result
} else {
return $result
}
}
proc recordTestStatistics { varName index } {
#
# NOTE: Record counts of all object types that we track.
#
upvar 1 $varName array
###########################################################################
if {![info exists array(uncounted,$index)]} then {
set array(uncounted,$index) [getTestUncountedLeaks]
}
###########################################################################
set array(time,$index) [clock seconds]
set array(afters,$index) [llength [after info]]
set array(variables,$index) [llength [info globals]]
set array(commands,$index) [llength [info commands]]
set array(procedures,$index) [llength [info procs]]
set array(namespaces,$index) [llength [namespace children ::]]
###########################################################################
if {[info exists ::test_path]} then {
set array(files,$index) [llength [getFiles $::test_path *]]
} else {
set array(files,$index) 0; # NOTE: Information not available.
}
###########################################################################
set array(temporaryFiles,$index) [llength [getFiles [getTemporaryPath] *]]
set array(channels,$index) [llength [file channels]]
set array(aliases,$index) [llength [interp aliases]]
set array(interpreters,$index) [llength [interp slaves]]
set array(environment,$index) [llength [array names env]]
set array(loaded,$index) [llength [info loaded]]
###########################################################################
#
# NOTE: These native resource types cannot be positively checked
# for leaks (i.e. because the "leak" may be from an external
# process).
#
if {![info exists ::no(uncountedTemporaryFiles)]} then {
lappend array(uncounted,$index) temporaryFiles
}
###########################################################################
if {[isEagle]} then {
#
# NOTE: Support for some of all of these entity types may not be
# present in the interpreter, initialize all these counts
# to zero and then try to query each one individually below
# wrapped in a catch.
#
set array(previousPid,$index) 0
set array(scopes,$index) 0
set array(assemblies,$index) 0
set array(processes,$index) 0
set array(objects,$index) 0
set array(objectCallbacks,$index) 0
set array(objectTypes,$index) 0
set array(objectInterfaces,$index) 0
set array(objectNamespaces,$index) 0
catch {set array(previousPid,$index) [expr {[info previouspid] != 0}]}
catch {set array(scopes,$index) [llength [scope list]]}
catch {set array(assemblies,$index) [llength [object assemblies]]}
catch {set array(processes,$index) [llength [getProcesses ""]]}
catch {set array(objects,$index) [llength [info objects]]}
catch {set array(objectCallbacks,$index) [llength [info callbacks]]}
catch {set array(objectTypes,$index) [llength [object types]]}
catch {set array(objectInterfaces,$index) [llength [object interfaces]]}
catch {set array(objectNamespaces,$index) [llength [object namespaces]]}
#########################################################################
#
# NOTE: Support for some of all of these entity types may not be
# present in the interpreter, initialize all these counts
# to zero and then try to query each one individually below
# wrapped in a catch.
#
set array(connections,$index) 0
set array(transactions,$index) 0
set array(modules,$index) 0
set array(delegates,$index) 0
set array(tcl,$index) 0
set array(tclInterps,$index) 0
set array(tclThreads,$index) 0
set array(tclCommands,$index) 0
set array(scriptThreads,$index) 0
catch {set array(connections,$index) [llength [info connections]]}
catch {set array(transactions,$index) [llength [info transactions]]}
catch {set array(modules,$index) [llength [info modules]]}
catch {set array(delegates,$index) [llength [info delegates]]}
if {[llength [info commands tcl]] > 0} then {
catch {set array(tcl,$index) [tcl ready]}
catch {set array(tclInterps,$index) [llength [tcl interps]]}
catch {set array(tclThreads,$index) [llength [tcl threads]]}
catch {set array(tclCommands,$index) [llength [tcl command list]]}
}
#
# NOTE: Grab the number of active threads that are active because
# of ScriptThread object instances. This only works if Eagle
# is Beta 31 or higher.
#
catch {
set array(scriptThreads,$index) \
[object invoke -flags +NonPublic ScriptThread activeCount]
}
#########################################################################
#
# NOTE: These managed resource types cannot be positively checked
# for leaks (i.e. because the "leak" may be from an external
# process).
#
if {![info exists ::no(uncountedAssemblies)]} then {
lappend array(uncounted,$index) assemblies
}
if {![info exists ::no(uncountedProcesses)]} then {
lappend array(uncounted,$index) processes
}
}
}
proc reportTestStatistics {
channel fileName stop statsVarName filesVarName {quiet false} } {
set statistics [list afters variables commands procedures namespaces \
files temporaryFiles channels aliases interpreters environment \
loaded]
if {[isEagle]} then {
#
# TODO: For now, tracking "leaked" assemblies is meaningless because
# the .NET Framework has no way to unload them without tearing
# down the entire application domain.
#
lappend statistics \
previousPid scopes assemblies processes objects objectCallbacks \
objectTypes objectInterfaces objectNamespaces connections \
transactions modules delegates tcl tclInterps tclThreads \
tclCommands scriptThreads
}
#
# NOTE: Show what leaked, if anything.
#
set count 0; upvar 1 $statsVarName array
foreach statistic $statistics {
if {![info exists array($statistic,after)]} then {
if {!$quiet} then {
tputs $channel [appendArgs "==== \"" $fileName "\" MISSING " \
$statistic " AFTER\n"]
}
continue
}
if {![info exists array($statistic,before)]} then {
if {!$quiet} then {
tputs $channel [appendArgs "==== \"" $fileName "\" MISSING " \
$statistic " BEFORE\n"]
}
continue
}
if {$array($statistic,after) > $array($statistic,before)} then {
lappend array(statistics,leaked) $statistic
if {!$quiet} then {
tputs $channel [appendArgs "==== \"" $fileName "\" LEAKED " \
$statistic \n]
}
if {[info exists array($statistic,before,list)]} then {
if {!$quiet} then {
tputs $channel [appendArgs "---- " $statistic " BEFORE: " \
[formatList $array($statistic,before,list)] \n]
}
}
if {[info exists array($statistic,after,list)]} then {
if {!$quiet} then {
tputs $channel [appendArgs "---- " $statistic " AFTER: " \
[formatList $array($statistic,after,list)] \n]
}
}
if {[info exists array(uncounted,before)] && \
[lsearch -exact $array(uncounted,before) $statistic] != -1} then {
continue
}
if {[info exists array(uncounted,after)] && \
[lsearch -exact $array(uncounted,after) $statistic] != -1} then {
continue
}
incr count
}
}
#
# NOTE: Make sure this file name is recorded in the list of file names with
# leaking tests.
#
upvar 1 $filesVarName fileNames
if {$count > 0 && \
[lsearch -exact $fileNames [file tail $fileName]] == -1} then {
lappend fileNames [file tail $fileName]
}
#
# NOTE: Disable test suite interaction in "quiet" mode. Just return the
# leak count.
#
if {!$quiet} then {
#
# NOTE: If we are supposed to stop or break into the debugger whenever
# a leak is detected, do it now.
#
if {$count > 0} then {
#
# BUGFIX: Is we are already stopping (e.g. due to a test failure),
# do not try to stop again.
#
if {!$stop && [isStopOnLeak]} then {
tresult Error "OVERALL RESULT: STOP-ON-LEAK\n"
unset -nocomplain ::test_suite_running
error ""; # no message
} elseif {[isBreakOnLeak]} then {
testDebugBreak
}
}
}
return [list leak $count]
}
proc formatList { list {default ""} {columns 1} } {
if {[catch {
set result ""
set count 1
foreach item $list {
if {[incr count -1] == 0} then {
set count $columns
append result \n
}
append result \t
if {[string length $item] > 0} then {
append result $item
} else {
append result <noItem>
}
}
}] == 0} then {
return [expr {[string length $result] > 0 ? $result : $default}]
} else {
return ""
}
}
proc formatListAsDict { list {default ""} } {
if {[catch {
set result ""
foreach {name value} $list {
append result \n\t
if {[string length $name] > 0} then {
append result $name
} else {
append result <noName>
}
append result ": "
if {[string length $value] > 0} then {
append result $value
} else {
append result <noValue>
}
}
}] == 0} then {
return [expr {[string length $result] > 0 ? $result : $default}]
} else {
return ""
}
}
proc pathToRegexp { path {list false} } {
#
# NOTE: This procedure needs to escape all characters that
# have any special meaning to the regular expression
# engine. Typically, the only characters we need to
# really worry about are the directory separator and
# the file extension separator (e.g. backslash and
# period on Windows and/or forward slash and period
# on Unix). Since the forward slash has no special
# meaning to the regular expression engine, Windows
# is somewhat more difficult to handle.
#
set map [list \
\\ \\\\ \$ \\\$ ( \\( ) \\) * \\* + \\+ - \\- . \\. \
? \\? \[ \\\[ \] \\\] ^ \\^ \{ \\\{ \} \\\}]
return [string map $map [expr {$list ? [list $path] : $path}]]
}
proc assemblyNameToRegexp { assemblyName {list false} } {
#
# NOTE: This procedure needs to escape all characters that
# have any special meaning to the regular expression
# engine -AND- that can actually appear in a legal
# assembly name. Normally, this would only include
# the period character.
#
# HACK: For now, just abuse the [pathToRegexp] procedure
# for this.
#
return [pathToRegexp $assemblyName $list]
}
proc inverseLsearchGlob { noCase patterns element } {
#
# NOTE: Perform the inverse of [lsearch -glob], attempt
# to match an element against a list of patterns.
#
set command [list string match]
if {$noCase} then {lappend command -nocase}
set length [llength $patterns]
for {set index 0} {$index < $length} {incr index} {
set pattern [lindex $patterns $index]
if {[eval $command [list $pattern] [list $element]]} then {
return $index
}
}
return -1
}
proc removePathFromFileNames { path fileNames } {
set result [list]
foreach fileName $fileNames {
if {[file normalize [file dirname $fileName]] eq \
[file normalize $path]} then {
#
# NOTE: Strip the path name from this file name.
#
lappend result [file tail $fileName]
} else {
lappend result $fileName
}
}
return $result
}
proc formatDecimal { value {places 4} {zeros false} } {
#
# NOTE: If the value is an empty string, do nothing and return an empty
# string.
#
if {[string length $value] == 0} then {
return ""
}
#
# NOTE: For now, use slightly different methods for formatting floating
# pointer numbers for native Tcl and Eagle.
#
if {[isEagle] && [llength [info commands object]] > 0} then {
#
# HACK: This works; however, in order to do this kind of thing cleanly,
# we really need the Tcl [format] command.
#
set result [object invoke String Format [appendArgs "{0:0." \
[string repeat [expr {$zeros ? "0" : "#"}] $places] "}"] \
[set object [object invoke -create Double Parse $value]]]
unset object; # dispose
} else {
#
# NOTE: See, nice and clean when done in Tcl?
#
set result [format [appendArgs %. $places f] $value]
#
# HACK: Since native Tcl does not appear to expose a method to only
# preserve non-zero trailing digits, we may need to manually
# remove extra trailing zeros.
#
if {!$zeros} then {
#
# NOTE: Remove all trailing zeros and the trailing decimal point,
# if necessary.
#
set result [string trimright [string trimright $result 0] .]
}
}
return $result
}
proc clearTestPercent { channel } {
if {[isEagle]} then {
host title ""
}
}
proc reportTestPercent {
channel percent doneFiles totalFiles failedFiles leakedFiles } {
if {[isEagle]} then {
set totalTests $::eagle_tests(Total)
set failedTests $::eagle_tests(Failed)
set skippedTests $::eagle_tests(Skipped)
} else {
set totalTests $::tcltest::numTests(Total)
set failedTests $::tcltest::numTests(Failed)
set skippedTests $::tcltest::numTests(Skipped)
}
set status [appendArgs \
"---- test suite running, about " $percent "% complete (" \
$totalTests " tests total, " $failedTests " tests failed, " \
$skippedTests " tests skipped, " $doneFiles " files done, " \
$totalFiles " files total, " $failedFiles " files failed, " \
$leakedFiles " files leaked)..."]
tputs $channel [appendArgs $status \n]
if {[isEagle]} then {
host title $status
}
}
proc reportArrayGet { varName } {
if {[string length $varName] == 0} then {
return [list]
}
upvar 1 $varName array
if {![info exists ::no(reportArrayGet)]} then {
set list(1) [list]
foreach {name value} [array get array] {
lappend list(1) [list $name $value]
}
#
# HACK: This assumes that we are dealing with integer values.
#
set list(2) [lsort -index 1 -integer -decreasing $list(1)]
set list(3) [list]
foreach pair $list(2) {
lappend list(3) [lindex $pair 0] [lindex $pair 1]
}
return $list(3)
} else {
return [array get array]
}
}
proc reportTestStatisticCounts { channel statsVarName } {
upvar 1 $statsVarName array
#
# NOTE: Were any counts recorded during the testing?
#
if {[info exists array(statistics,leaked)]} then {
#
# NOTE: Process each leak type in the list, recording any duplicates
# in the temporary count array.
#
foreach statistic $array(statistics,leaked) {
if {[info exists count($statistic)]} then {
incr count($statistic)
} else {
set count($statistic) 1
}
}
#
# NOTE: Flatten the temporary count array into a dictionary formatted
# list and then possibly display it (i.e. if it actually contains
# any data).
#
set statistics [reportArrayGet count]
if {[llength statistics] > 0} then {
tputs $channel [appendArgs "---- types of leaks detected: " \
[formatListAsDict $statistics] \n]
}
}
}
proc runAllTests {
channel path fileNames skipFileNames startFileNames stopFileNames } {
#
# NOTE: Are we configured to run the test files in random order?
#
if {[isRandomOrder]} then {
set fileNames [lshuffle $fileNames]
}
#
# NOTE: Show the exact arguments we received since they may not
# have been displayed by the caller (or anybody else).
#
if {![info exists ::no(runMetadata)]} then {
tputs $channel [appendArgs "---- test run path: \"" $path \"\n]
tputs $channel [appendArgs "---- test run file names: " \
[formatList [removePathFromFileNames $path $fileNames]] \n]
tputs $channel [appendArgs "---- test run skip file names: " \
[formatList $skipFileNames] \n]
}
#
# NOTE: Keep going unless this becomes true (i.e. if one of the
# test files signals us to stop).
#
set stop false
#
# NOTE: So far, we have run no tests.
#
set count 0
#
# NOTE: So far, no files have had failing or leaking tests.
#
set failed [list]
set leaked [list]
#
# NOTE: Process each file name we have been given by the caller...
#
set total [llength $fileNames]; set lastPercent -1
foreach fileName $fileNames {
#
# NOTE: If configured to break into the debugger before running the
# test file, do it now.
#
if {[isBreakOnDemand]} then {
testDebugBreak
}
#
# NOTE: In terms of files, not tests, what percent done are we now?
#
set percent [formatDecimal \
[expr {$total != 0 ? 100.0 * ($count / double($total)) : 0}]]
if {$percent != $lastPercent} then {
if {![info exists ::no(runPercent)]} then {
reportTestPercent $channel $percent \
$count $total [llength $failed] [llength $leaked]
}
set lastPercent $percent
}
#
# NOTE: If the starting file names have been specified by the caller,
# skip over all the file names before one of them.
#
if {[llength $startFileNames] > 0} then {
if {[inverseLsearchGlob false $startFileNames \
[file tail $fileName]] != -1} then {
#
# NOTE: Now that we found the starting test file name, do not
# skip any more test files.
#
set startFileNames [list]
} else {
#
# NOTE: We have not found the starting test file name yet, skip
# over this test file.
#
continue
}
}
#
# NOTE: If the stopping file names have been specified by the caller,
# skip over all the file names after one of them.
#
if {[llength $stopFileNames] > 0} then {
if {[inverseLsearchGlob false $stopFileNames \
[file tail $fileName]] != -1} then {
#
# NOTE: Now that we found the stopping test file name, do not
# run any more test files.
#
set stopFileNames [list]
#
# NOTE: This will terminate the loop right after the test file
# cleanup code (i.e. at the bottom of the loop).
#
set stop true
}
}
#
# NOTE: Skipping over any file name that matches a pattern in the
# list of file names to skip.
#
if {[inverseLsearchGlob false $skipFileNames \
[file tail $fileName]] == -1} then {
#
# NOTE: Does the file name contain directory information?
#
if {[string length [file dirname $fileName]] <= 1} then {
#
# NOTE: If not, assume it is under the supplied test path.
#
set fileName [file normalize [file join $path $fileName]]
}
#
# NOTE: The "magic" pattern we are looking for to determine if
# a given file is part of the formal test suite.
#
set pattern {^(\s)*runTest .*$}
#
# NOTE: Skip files that are not part of the test suite.
#
set data [readFile $fileName]
#
# NOTE: Check for a match.
#
set match [regexp -line -- $pattern $data]
#
# NOTE: Failing that, in Eagle only, check if the data, when
# interpreted as Unicode, matches the pattern.
#
if {!$match && [isEagle]} then {
set match [regexp -line -- $pattern \
[encoding convertfrom unicode $data]]
}
#
# NOTE: Does this "look" like an actual test suite file?
#
if {$match} then {
#
# BUGFIX: Unset the current test file name so that variable
# accounting works correctly. It will be reset below
# prior to running any actual test(s).
#
unset -nocomplain ::test_file
#
# NOTE: Is resource leak checking explicitly disabled?
#
if {![info exists ::no(leak)]} then {
#
# NOTE: Get "before" resource counts for leak tracking.
#
recordTestStatistics leaks before
}
#
# NOTE: Let the test prologue code know which file we are
# evaluating.
#
set ::test_file $fileName
#
# NOTE: Record failed test count before this file.
#
if {[isEagle]} then {
set before $::eagle_tests(Failed)
} else {
set before $::tcltest::numTests(Failed)
}
#
# NOTE: Evaluate the test file, optionally waiting for a certain
# number of milliseconds before and/or after doing so.
#
if {[catch {
#
# NOTE: Are we being prevented from waiting before the file?
#
if {![info exists ::no(preWait)]} then {
if {[info exists ::test_wait(pre)] && \
[string is integer -strict $::test_wait(pre)]} then {
if {![info exists ::no(runMetadata)]} then {
tputs $channel [appendArgs \
"---- waiting for " $::test_wait(pre) \
" milliseconds before test file...\n"]
}
after $::test_wait(pre); # NOTE: Sleep.
}
}
#
# NOTE: Log that this test file has started.
#
if {![info exists ::no(runStartFile)]} then {
tputs $channel [appendArgs "==== \"" $fileName "\" START\n"]
}
#
# NOTE: Evaluate the file in the context of the caller,
# catching any errors. If an error is raised and
# the stop-on-failure flag is set, assume it was
# a test failure and that we need to stop any and
# all further processing of test files.
#
uplevel 1 [list source $fileName]
#
# NOTE: Log that this test file has ended.
#
if {![info exists ::no(runEndFile)]} then {
tputs $channel [appendArgs "==== \"" $fileName "\" END\n"]
}
#
# NOTE: Are we being prevented from waiting after the file?
#
if {![info exists ::no(postWait)]} then {
if {[info exists ::test_wait(post)] && \
[string is integer -strict $::test_wait(post)]} then {
if {![info exists ::no(runMetadata)]} then {
tputs $channel [appendArgs \
"---- waiting for " $::test_wait(post) \
" milliseconds after test file...\n"]
}
after $::test_wait(post); # NOTE: Sleep.
}
}
} error]} then {
#
# NOTE: Most likely, this error was caused by malformed or
# incorrect code in-between the tests themselves. We
# need to report this.
#
if {![info exists ::no(runErrorFile)]} then {
tputs $channel [appendArgs "==== \"" $fileName "\" ERROR \"" \
$error \"\n]
}
#
# NOTE: Stop further processing after this loop iteration?
#
if {[isStopOnFailure]} then {
#
# NOTE: This will terminate the loop right after the test
# file cleanup code (i.e. at the bottom of the loop).
#
set stop true
#
# BUGFIX: If there are no actual test failures recorded yet,
# make sure there is one now. This is necessary to
# handle the case where an error occurs in a test
# file that does not directly cause at least one of
# its contained tests to fail. Otherwise, the test
# suite will still be stopped; however, an overall
# result of success will be returned by the process.
#
if {[isEagle]} then {
if {$::eagle_tests(Failed) == 0} then {
incr ::eagle_tests(Total)
incr ::eagle_tests(Failed)
}
} else {
if {$::tcltest::numTests(Failed) == 0} then {
incr ::tcltest::numTests(Total)
incr ::tcltest::numTests(Failed)
}
}
} else {
#
# NOTE: At this point, we know the test file had an error that
# probably caused it to skip a bunch of tests -AND- the
# option to stop-testing-on-error is not enabled. That
# being said, we must not simply ignore the error. The
# overall results of the test suite run must now reflect
# the failure. Set a special variable for the epilogue
# to pick up on (later).
#
lappend ::test_suite_errors [list $fileName $error]
}
}
#
# NOTE: Record failed test count after this file.
#
if {[isEagle]} then {
set after $::eagle_tests(Failed)
} else {
set after $::tcltest::numTests(Failed)
}
#
# NOTE: Did this file have any failing tests?
#
if {$after > $before} then {
lappend failed [file tail $fileName]
}
#
# NOTE: In terms of files, not tests, what percent done are we now?
#
set percent [formatDecimal \
[expr {$total != 0 ? 100.0 * ($count / double($total)) : 0}]]
if {$percent != $lastPercent} then {
if {![info exists ::no(runPercent)]} then {
reportTestPercent $channel $percent \
$count $total [llength $failed] [llength $leaked]
}
set lastPercent $percent
}
#
# NOTE: Unset the current test file name, it is no longer
# needed.
#
unset -nocomplain ::test_file
#
# NOTE: Is resource leak checking explicitly disabled?
#
if {![info exists ::no(leak)]} then {
#
# NOTE: Get "after" resource counts for leak tracking.
#
recordTestStatistics leaks after
if {![info exists ::no(runStatistics)]} then {
#
# NOTE: Determine if any resource leaks have occurred and
# output diagnostics as necessary if they have.
#
reportTestStatistics $channel $fileName $stop leaks leaked
}
}
} else {
#
# NOTE: This entire file has been skipped. Record that fact in the
# test suite log file.
#
if {![info exists ::no(runNonTestFile)]} then {
tputs $channel [appendArgs \
"==== \"" $fileName "\" NON_TEST_FILE\n"]
}
}
#
# NOTE: Another file of some kind was processed. It may have been
# skipped; however, that does not matter.
#
incr count
#
# NOTE: In terms of files, not tests, what percent done are we now?
#
set percent [formatDecimal \
[expr {$total != 0 ? 100.0 * ($count / double($total)) : 0}]]
if {$percent != $lastPercent} then {
if {![info exists ::no(runPercent)]} then {
reportTestPercent $channel $percent \
$count $total [llength $failed] [llength $leaked]
}
set lastPercent $percent
}
#
# NOTE: If the test file raised an error (i.e. to indicate a
# test failure with the stop-on-failure flag enabled),
# break out of the test loop now.
#
if {$stop} then {
break
}
} else {
#
# NOTE: This entire file has been skipped. Record that fact in the
# test suite log file.
#
if {![info exists ::no(runSkippedFile)]} then {
tputs $channel [appendArgs "==== \"" $fileName "\" SKIPPED\n"]
}
#
# NOTE: This file was skipped.
#
incr count
}
#
# NOTE: In terms of files, not tests, what percent done are we now?
#
set percent [formatDecimal \
[expr {$total != 0 ? 100.0 * ($count / double($total)) : 0}]]
if {$percent != $lastPercent} then {
if {![info exists ::no(runPercent)]} then {
reportTestPercent $channel $percent \
$count $total [llength $failed] [llength $leaked]
}
set lastPercent $percent
}
}
#
# NOTE: Reset the host title because we may have changed it in the for
# loop (above).
#
if {![info exists ::no(runPercent)]} then {
clearTestPercent $channel
}
if {![info exists ::no(runMetadata)]} then {
tputs $channel [appendArgs "---- sourced " $count " test " \
[expr {$count > 1 ? "files" : "file"}] \n]
#
# NOTE: Show the files that had failing and/or leaking tests.
#
if {[llength $failed] > 0} then {
tputs $channel [appendArgs "---- files with failing tests: " \
[formatList $failed] \n]
}
if {[llength $leaked] > 0} then {
tputs $channel [appendArgs "---- files with leaking tests: " \
[formatList $leaked] \n]
}
}
if {![info exists ::no(runStatisticCounts)]} then {
reportTestStatisticCounts $channel leaks
}
}
proc isTestSuiteRunning {} {
#
# NOTE: Return non-zero if the test suite appears to be running.
#
return [expr {[info exists ::test_suite_running] && \
$::test_suite_running}]
}
proc getTestChannelOrDefault {} {
if {[info exists ::test_channel]} then {
return $::test_channel
}
return stdout; # TODO: Good default?
}
proc tryVerifyTestPath {} {
#
# NOTE: If the test path variable does not exist, the directory it
# points to does not exist (or is not really a directory), or
# it appears to be an empty directory, return false; otherwise,
# return true.
#
if {![info exists ::test_path] || \
![file exists $::test_path] || \
![file isdirectory $::test_path] || \
[llength [getFiles $::test_path *]] == 0} then {
return false
}
return true
}
proc checkForAndSetTestPath { whatIf {quiet false} } {
#
# NOTE: Everything in this procedure requires access to the file system;
# therefore, it cannot be used in a stock "safe" interpreter.
#
if {![interp issafe] && ![info exists ::test_path]} then {
#
# NOTE: Grab the name of the current script file. If this is an empty
# string, many test path checks will have to be skipped.
#
set script [info script]
#
# NOTE: Eagle and native Tcl have different requirements and possible
# locations for the test path; therefore, handle them separately.
#
if {[isEagle]} then {
#
# NOTE: Grab the base directory and the library directory. Without
# these, several test path checks will be skipped.
#
set library [getTestLibraryDirectory]; set base [info base]
if {[string length $library] > 0} then {
#
# NOTE: Try the source release directory structure. For this
# case, the final test path would be:
#
# $library/../../Library/Tests
#
set ::test_path [file normalize [file join [file dirname [file \
dirname $library]] Library Tests]]
if {!$quiet} then {
tqputs [getTestChannelOrDefault] [appendArgs \
"---- checking #1 for Eagle test path at \"" \
$::test_path \"...\n]
}
}
if {[string length $base] > 0 && \
($whatIf || ![tryVerifyTestPath])} then {
#
# NOTE: Try the source release directory structure again; this
# time, assume only the embedded script library was used.
# For this case, the final test path would be:
#
# $base/Library/Tests
#
set ::test_path [file normalize [file join $base Library Tests]]
if {!$quiet} then {
tqputs [getTestChannelOrDefault] [appendArgs \
"---- checking #2 for Eagle test path at \"" \
$::test_path \"...\n]
}
}
if {[string length $script] > 0 && \
($whatIf || ![tryVerifyTestPath])} then {
#
# NOTE: Try for the test package directory. For this case, the
# final test path would be:
#
# $script/../Test1.0
#
set ::test_path [file normalize [file join [file dirname [file \
dirname $script]] [appendArgs Test [info engine Version]]]]
if {!$quiet} then {
tqputs [getTestChannelOrDefault] [appendArgs \
"---- checking #3 for Eagle test path at \"" \
$::test_path \"...\n]
}
}
if {[string length $base] > 0 && \
($whatIf || ![tryVerifyTestPath])} then {
#
# NOTE: Try for the test package directory again; this time, use
# the base path and assume the source release directory
# structure. For this case, the final test path would be:
#
# $base/lib/Test1.0
#
set ::test_path [file normalize [file join $base lib [appendArgs \
Test [info engine Version]]]]
if {!$quiet} then {
tqputs [getTestChannelOrDefault] [appendArgs \
"---- checking #4 for Eagle test path at \"" \
$::test_path \"...\n]
}
}
if {[string length $base] > 0 && \
($whatIf || ![tryVerifyTestPath])} then {
#
# NOTE: Try for the test package directory again; this time, use
# the base path. For this case, the final test path would
# be:
#
# $base/Test1.0
#
set ::test_path [file normalize [file join $base [appendArgs \
Test [info engine Version]]]]
if {!$quiet} then {
tqputs [getTestChannelOrDefault] [appendArgs \
"---- checking #5 for Eagle test path at \"" \
$::test_path \"...\n]
}
}
if {[string length $library] > 0 && \
($whatIf || ![tryVerifyTestPath])} then {
#
# NOTE: This must be a binary release, no "Library" directory
# then. Also, binary releases have an upper-case "Tests"
# directory name that originates from the "update.bat"
# tool. This must match the casing used in "update.bat".
# For this case, the final test path would be:
#
# $library/../../Tests
#
set ::test_path [file normalize [file join [file dirname [file \
dirname $library]] Tests]]
if {!$quiet} then {
tqputs [getTestChannelOrDefault] [appendArgs \
"---- checking #6 for Eagle test path at \"" \
$::test_path \"...\n]
}
}
if {[string length $base] > 0 && \
($whatIf || ![tryVerifyTestPath])} then {
#
# NOTE: Fallback to using the base directory and checking for a
# "Tests" directory beneath it. For this case, the final
# test path would be:
#
# $base/Tests
#
set ::test_path [file normalize [file join $base Tests]]
if {!$quiet} then {
tqputs [getTestChannelOrDefault] [appendArgs \
"---- checking #7 for Eagle test path at \"" \
$::test_path \"...\n]
}
}
if {!$quiet} then {
tqputs [getTestChannelOrDefault] [appendArgs \
"---- final Eagle test path is \"" \
[expr {[info exists ::test_path] ? \
$::test_path : "<none>"}] \"\n]
}
} else {
if {[string length $script] > 0} then {
#
# NOTE: Try the source release directory structure. For this
# case, the final test path would be:
#
# $script/../../Library/Tests
#
set ::test_path [file normalize [file join [file dirname [file \
dirname [file dirname $script]]] Library Tests]]
if {!$quiet} then {
tqputs [getTestChannelOrDefault] [appendArgs \
"---- checking #1 for Tcl test path at \"" \
$::test_path \"...\n]
}
}
if {[string length $script] > 0 && \
($whatIf || ![tryVerifyTestPath])} then {
#
# NOTE: Try for the test package directory. For this case, the
# final test path would be:
#
# $script/../Test1.0
#
set ::test_path [file normalize [file join [file dirname [file \
dirname $script]] Test1.0]]
if {!$quiet} then {
tqputs [getTestChannelOrDefault] [appendArgs \
"---- checking #2 for Tcl test path at \"" \
$::test_path \"...\n]
}
}
if {[string length $script] > 0 && \
($whatIf || ![tryVerifyTestPath])} then {
#
# NOTE: This must be a binary release, no "Library" directory
# then. Also, binary releases have an upper-case "Tests"
# directory name that originates from the "update.bat"
# tool. This must match the casing used in "update.bat".
# For this case, the final test path would be:
#
# $script/../../Tests
#
set ::test_path [file normalize [file join [file dirname [file \
dirname [file dirname $script]]] Tests]]
if {!$quiet} then {
tqputs [getTestChannelOrDefault] [appendArgs \
"---- checking #3 for Tcl test path at \"" \
$::test_path \"...\n]
}
}
if {!$quiet} then {
tqputs [getTestChannelOrDefault] [appendArgs \
"---- final Tcl test path is \"" \
[expr {[info exists ::test_path] ? \
$::test_path : "<none>"}] \"\n]
}
}
}
}
proc configureTcltest { verbose match skip constraints imports force } {
#
# NOTE: Eagle and native Tcl have different configuration requirements
# for the "tcltest" package. For Eagle, the necessary testing
# functionality is built-in. In native Tcl, the package must be
# loaded now and that cannot be done in a "safe" interpreter.
#
if {[isEagle]} then {
#
# HACK: Flag the "test" and "runTest" script library procedures so
# that they use the script location of their caller and not
# their own.
#
# BUGBUG: Even this does not yet fix the script location issues in
# the test suite:
#
# debug procedureflags test +ScriptLocation
# debug procedureflags runTest +ScriptLocation
#
# NOTE: Setup the necessary compatibility shims for the test suite.
#
namespace eval ::tcltest {}; # HACK: Force namespace creation now.
setupTestShims true [expr {![isTestSuiteRunning]}]
#
# NOTE: Fake having the package as the functionality is built-in.
#
package provide tcltest 2.2.10; # Tcl 8.4
} elseif {![interp issafe]} then {
#
# NOTE: Attempt to detect if the package is already loaded.
#
set loaded [expr {[catch {package present tcltest}] == 0}]
#
# NOTE: Always attempt to load the package.
#
package require tcltest
#
# NOTE: Configure it for our use (only when it was not loaded).
#
if {!$loaded} then {
if {[string length $verbose] > 0} then {
::tcltest::configure -verbose $verbose
} else {
::tcltest::configure -verbose pbste
}
}
#
# NOTE: We need to copy the Eagle test names to match over to Tcl.
#
if {[llength $match] > 0} then {
::tcltest::configure -match $match
}
#
# NOTE: We need to copy the Eagle test names to skip over to Tcl.
#
if {[llength $skip] > 0} then {
::tcltest::configure -skip $skip
}
#
# NOTE: We need to copy the Eagle test constraints over to Tcl.
#
if {[llength $constraints] > 0} then {
::tcltest::configure -constraints $constraints
}
#
# NOTE: For the benefit of the Eagle test suite, always add the
# pseudo-constraints "fail.false" and "fail.true".
#
::tcltest::testConstraint fail.false 1
::tcltest::testConstraint fail.true 1
#
# NOTE: We need the [test] command in the global namespace.
#
if {[llength $imports] > 0} then {
set command [list namespace import]
if {$force} then {
lappend command -force
}
foreach import $imports {
lappend command [appendArgs ::tcltest:: $import]
}
namespace eval :: $command
}
}
}
proc machineToPlatform { machine {architecture false} } {
#
# NOTE: Cannot use "-nocase" option here because Tcl 8.4 does not
# support it (i.e. because it is pre-TIP #241).
#
switch -exact -- [string tolower $machine] {
intel {
if {!$architecture && [isWindows]} then {
return Win32
} else {
return x86
}
}
arm {
return arm
}
ia64 {
return itanium
}
msil {
return clr
}
amd64 {
return x64
}
ia32_on_win64 {
return wow64
}
arm64 {
return arm64
}
default {
return unknown
}
}
}
proc architectureForPlatform { platform } {
#
# NOTE: Cannot use "-nocase" option here because Tcl 8.4 does not
# support it (i.e. because it is pre-TIP #241).
#
switch -exact -- [string tolower $platform] {
intel -
win32 -
x86 {
return x86
}
arm {
return arm
}
ia64 -
itanium {
return ia64
}
msil -
clr {
return msil
}
amd64 -
x64 {
return x64
}
ia32_on_win64 -
wow64 {
return ia32_on_win64
}
arm64 {
return arm64
}
default {
return unknown
}
}
}
if {[isEagle]} then {
###########################################################################
############################ BEGIN Eagle ONLY #############################
###########################################################################
proc initializeTests {} {
uplevel #0 {
#
# NOTE: Reset the information in the global "tests" array, which is
# used to interface with the internal test tracking information
# in the interpreter via a variable trace.
#
set eagle_tests(Total) 0
set eagle_tests(Skipped) 0
set eagle_tests(Passed) 0
set eagle_tests(Failed) 0
#
# NOTE: Setup the lists of patterns to match test names against. In
# Eagle, these originate from the command line arguments and are
# passed to the interpreter via this virtual array.
#
if {[info exists test_flags(-match)]} then {
set eagle_tests(MatchNames) $test_flags(-match); # run these tests.
} else {
set eagle_tests(MatchNames) [list *]; # default to running all tests.
}
if {[info exists test_flags(-skip)]} then {
set eagle_tests(SkipNames) $test_flags(-skip); # skip these tests.
} else {
set eagle_tests(SkipNames) [list]; # default to skipping no tests.
}
#
# NOTE: What tests have been skipped, if any?
#
set eagle_tests(SkippedNames) [list]
#
# NOTE: What tests have failed, if any?
#
set eagle_tests(FailedNames) [list]
#
# NOTE: Initialize the list of active test constraints from the
# environment variable and/or the test flags.
#
set eagle_tests(Constraints) [getEnvironmentVariable testConstraints]
if {[info exists test_flags(-constraints)]} then {
eval lappend eagle_tests(Constraints) $test_flags(-constraints)
}
unset -nocomplain test_verbose
set test_verbose [getEnvironmentVariable testVerbose]
if {[string length $test_verbose] == 0} then {
set test_verbose Default
}
if {[info exists test_flags(-verbose)] && \
[string length $test_flags(-verbose)] > 0} then {
#
# NOTE: Map all test verbosity flags we support for script usage
# to their abbreviated names (which are all one letter) and
# then split them into a list.
#
set test_verbose [split [string map [list \
Body B Pass P Skip S Start T Error E Line L \
Fail F Reason R Time I Exit X StdOut O StdErr D] \
$test_flags(-verbose)] ""]
}
set eagle_tests(Verbose) $test_verbose; unset test_verbose
}
}
proc setupTestShims { setup {quiet false} } {
if {$setup} then {
#
# HACK: Compatibility shim(s) for use with various tests in the Tcl
# test suite. Make sure these commands do not already exist
# prior to attempt to adding them.
#
if {[llength [info commands testConstraint]] == 0} then {
interp alias {} testConstraint {} haveOrAddConstraint
if {!$quiet} then {
tqputs [getTestChannelOrDefault] \
"---- added \"testConstraint\" alias\n"
}
}
if {[llength [info commands ::tcltest::testConstraint]] == 0} then {
interp alias {} ::tcltest::testConstraint {} haveOrAddConstraint
if {!$quiet} then {
tqputs [getTestChannelOrDefault] \
"---- added \"::tcltest::testConstraint\" alias\n"
}
}
#
# NOTE: This is needed by most tests in the Tcl test suite. Make
# sure this command does not already exist prior to adding it.
#
if {[llength [info commands ::tcltest::cleanupTests]] == 0} then {
proc ::tcltest::cleanupTests { args } {}
if {!$quiet} then {
tqputs [getTestChannelOrDefault] \
"---- added \"::tcltest::cleanupTests\" procedure\n"
}
}
#
# NOTE: This is needed by some tests in the Tcl test suite. Make
# sure this command does not already exist prior to adding it.
#
if {[llength [info commands \
::tcltest::loadTestedCommands]] == 0} then {
proc ::tcltest::loadTestedCommands { args } {}
if {!$quiet} then {
tqputs [getTestChannelOrDefault] \
"---- added \"::tcltest::loadTestedCommands\" procedure\n"
}
}
} else {
#
# NOTE: Remove the compatibility shim command aliases that we setup
# earlier.
#
if {[llength [info commands \
::tcltest::loadTestedCommands]] > 0} then {
rename ::tcltest::loadTestedCommands ""
if {!$quiet} then {
tqputs $::test_channel \
"---- removed \"::tcltest::loadTestedCommands\" procedure\n"
}
}
if {[llength [info commands ::tcltest::cleanupTests]] > 0} then {
rename ::tcltest::cleanupTests ""
if {!$quiet} then {
tqputs $::test_channel \
"---- removed \"::tcltest::cleanupTests\" procedure\n"
}
}
if {[llength [interp aliases ::tcltest::testConstraint]] > 0} then {
interp alias {} ::tcltest::testConstraint {} {}
if {!$quiet} then {
tqputs $::test_channel \
"---- removed \"::tcltest::testConstraint\" alias\n"
}
}
if {[llength [interp aliases testConstraint]] > 0} then {
interp alias {} testConstraint {} {}
if {!$quiet} then {
tqputs $::test_channel \
"---- removed \"testConstraint\" alias\n"
}
}
}
}
proc shouldWriteTestData { code } {
if {[llength [info commands object]] > 0 && [catch {
object invoke -flags +NonPublic \
Eagle._Components.Private.TestOps ShouldWriteTestData "" $code
} writeTestData] == 0 && $writeTestData} then {
return false
}
return true
}
proc probeForScriptFileName { {excludePatterns ""} {overridePatterns ""} } {
if {[llength [info commands object]] > 0 && [catch {
set locations [object invoke -alias -flags +NonPublic \
Interpreter.GetActive ScriptLocations]
set count [$locations Count]
for {set index 0} {$index < $count} {incr index} {
set location [$locations -alias Peek $index]
if {[isNonNullObjectHandle $location]} then {
set locationFileName [file normalize [$location FileName]]
if {[string length $locationFileName] > 0} then {
if {[llength $overridePatterns] > 0 && \
[lsearch -inverse -glob -- \
$overridePatterns $locationFileName] != -1} then {
return $locationFileName
}
if {[llength $excludePatterns] == 0 || \
[lsearch -inverse -glob -- \
$excludePatterns $locationFileName] == -1} then {
return $locationFileName
}
}
}
}
return ""
} result] in [list 0 2]} then {
return $result
}
return ""
}
proc tresult { code result } {
host result $code $result; tlog $result
}
proc getPassedPercentage {} {
if {$::eagle_tests(Total) > 0} then {
return [expr \
{100.0 * (($::eagle_tests(Passed) + \
$::eagle_tests(Skipped)) / \
double($::eagle_tests(Total)))}]
}
return 0; # no tests were run, etc.
}
proc getSkippedPercentage {} {
if {$::eagle_tests(Total) > 0} then {
return [expr \
{100.0 * ($::eagle_tests(Skipped) / \
double($::eagle_tests(Total)))}]
}
return 0; # no tests were run, etc.
}
proc getDisabledPercentage {} {
if {$::eagle_tests(Total) > 0} then {
return [expr \
{100.0 * ($::eagle_tests(Disabled) / \
double($::eagle_tests(Total)))}]
}
return 0; # no tests were run, etc.
}
proc testObjectMembers { args } {
if {[llength $args] == 0} then {
error [appendArgs \
"wrong # args: should be \"" \
[lindex [info level [info level]] 0] \
" ?options? object\""]
}
set command [list object members]
eval lappend command $args
return [lsort [uplevel 1 $command]]
}
proc createThread { script {parameterized false} {maxStackSize ""} } {
if {[isDotNetCore]} then {
#
# HACK: This seems to make .NET Core happier for reasons
# that are not entirely clear.
#
set typeName "System.Threading.Thread, mscorlib"
} else {
set typeName System.Threading.Thread
}
if {$parameterized} then {
if {[string length $maxStackSize] > 0} then {
return [object create -alias -objectflags +NoReturnReference \
-parametertypes [list System.Threading.ParameterizedThreadStart \
System.Int32] $typeName $script $maxStackSize]
} else {
return [object create -alias -objectflags +NoReturnReference \
-parametertypes [list System.Threading.ParameterizedThreadStart] \
$typeName $script]
}
} else {
if {[string length $maxStackSize] > 0} then {
return [object create -alias -objectflags +NoReturnReference \
-parametertypes [list System.Threading.ThreadStart \
System.Int32] $typeName $script $maxStackSize]
} else {
return [object create -alias -objectflags +NoReturnReference \
-parametertypes [list System.Threading.ThreadStart] \
$typeName $script]
}
}
}
proc startThread { thread {parameterized false} {parameter null} } {
if {$parameterized} then {
$thread Start $parameter
} else {
$thread Start
}
}
proc cleanupThread { thread {timeout 2000} } {
#
# HACK: When running on .NET Core, it seems that using the Interrupt
# method can result in a held lock being lost, even when using
# a lock statement, which should have try / finally semantics.
# Therefore, in that case, attempt to wait on the thread prior
# to attempting to use the Interrupt method.
#
if {[isDotNetCore]} then {
if {[$thread IsAlive]} then {
if {[catch {$thread Join $timeout} error]} then {
tputs $::test_channel [appendArgs \
"---- failed to pre-join test thread \"" $thread "\": " \
$error \n]
} elseif {$error} then {
tputs $::test_channel [appendArgs \
"---- pre-joined test thread \"" $thread \"\n]
} else {
tputs $::test_channel [appendArgs \
"---- timeout pre-joining test thread \"" $thread " (" \
$timeout " milliseconds)\"\n"]
}
}
}
if {[$thread IsAlive]} then {
if {[catch {$thread Interrupt} error]} then {
tputs $::test_channel [appendArgs \
"---- failed to interrupt test thread \"" $thread "\": " \
$error \n]
} else {
tputs $::test_channel [appendArgs "---- test thread \"" $thread \
"\" interrupted\n"]
}
if {[$thread IsAlive]} then {
if {[catch {$thread Join $timeout} error]} then {
tputs $::test_channel [appendArgs \
"---- failed to join test thread \"" $thread "\": " \
$error \n]
} elseif {$error} then {
tputs $::test_channel [appendArgs "---- joined test thread \"" \
$thread \"\n]
} else {
tputs $::test_channel [appendArgs \
"---- timeout joining test thread \"" $thread " (" \
$timeout " milliseconds)\"\n"]
}
if {[$thread IsAlive]} then {
if {[catch {$thread Abort} error]} then {
tputs $::test_channel [appendArgs \
"---- failed to abort test thread \"" $thread "\": " \
$error \n]
} else {
tputs $::test_channel [appendArgs "---- test thread \"" \
$thread "\" aborted\n"]
}
if {[$thread IsAlive]} then {
tputs $::test_channel [appendArgs "---- test thread \"" \
$thread "\" appears to be a zombie\n"]
} else {
return true; # aborted?
}
} else {
return true; # joined?
}
} else {
return true; # interrupted?
}
} else {
return true; # already dead?
}
return false; # still alive (or error).
}
proc reportTestConstraintCounts { channel skippedNames } {
#
# NOTE: Process the list of skipped tests, which is really a dictionary
# of test names to the list of constraints that caused them to be
# skipped. We need to "roll them up", on a per-constraint basis,
# and produce counts for each constraint. At the same time, we
# need to keep track of the maximum count seen, to help align the
# final output.
#
set maximum 0
foreach {testName constraintNames} $skippedNames {
foreach constraintName $constraintNames {
if {[info exists skippedCounts($constraintName)]} then {
incr skippedCounts($constraintName)
} else {
set skippedCounts($constraintName) 1
}
if {$skippedCounts($constraintName) > $maximum} then {
set maximum $skippedCounts($constraintName)
}
}
}
#
# NOTE: Produce the final output, which includes a single line header
# followed by one line per test constraint seen.
#
if {$maximum > 0 && [array size skippedCounts] > 0} then {
set places [expr {int(log10($maximum)) + 1}]
tputs $channel "Number of tests skipped for each constraint:\n"
foreach {name value} [reportArrayGet skippedCounts] {
tputs $channel [appendArgs \
\t [format [appendArgs % $places s] $value] \t $name \n]
}
tputs $channel \n
}
}
proc purgeAndCleanup { channel name } {
catch {uplevel 1 [list debug purge]} result
tputs $channel [appendArgs \
"---- purge \"" $name "\" results: " $result \n]
catch {uplevel 1 [list debug cleanup {
Default -Miscellaneous
}]} result
tputs $channel [appendArgs \
"---- cleanup \"" $name "\" results: " $result \n]
catch {uplevel 1 [list object invoke -flags +NonPublic \
Eagle._Components.Private.ProcessOps ClearOutputCache]} result
tputs $channel [appendArgs \
"---- ProcessOps cleanup results: " $result \n]
catch {uplevel 1 [list object invoke -flags +NonPublic \
Eagle._Components.Private.EnumOps ClearEnumCache]} result
tputs $channel [appendArgs \
"---- EnumOps cleanup results: " $result \n]
catch {uplevel 1 [list object invoke -flags +NonPublic \
Eagle._Components.Private.HelpOps ClearHelpCache]} result
tputs $channel [appendArgs \
"---- HelpOps cleanup results: " $result \n]
catch {uplevel 1 [list object invoke -flags +NonPublic \
Eagle._Components.Private.StringOps ClearPreambleEncodings]} result
tputs $channel [appendArgs \
"---- StringOps cleanup results: " $result \n]
catch {uplevel 1 [list object invoke -flags +NonPublic \
Eagle._Comparers.FileName ClearCache]} result
tputs $channel [appendArgs \
"---- Comparers.FileName cleanup results: " $result \n]
catch {uplevel 1 [list object invoke -flags +NonPublic \
Eagle._Components.Private.HashOps Cleanup]} result
tputs $channel [appendArgs \
"---- HashOps cleanup results: " $result \n]
catch {uplevel 1 [list object invoke -flags +NonPublic \
Eagle._Components.Private.FactoryOps Cleanup]} result
tputs $channel [appendArgs \
"---- FactoryOps cleanup results: " $result \n]
catch {uplevel 1 [list object invoke -flags +NonPublic \
Eagle._Components.Private.ScriptOps ClearInterpreterCache]} result
tputs $channel [appendArgs \
"---- ScriptOps cleanup results: " $result \n]
catch {uplevel 1 [list object invoke -flags +NonPublic \
Eagle._Components.Private.SyntaxOps ClearCache]} result
tputs $channel [appendArgs \
"---- SyntaxOps cleanup results: " $result \n]
}
proc evalWithTimeout { script {milliseconds 2000} {resultVarName ""} } {
#
# NOTE: Verify that the number of milliseconds requested is greater than
# zero.
#
if {$milliseconds <= 0} then {
error "number of milliseconds must be greater than zero"
}
#
# NOTE: Evaluate the specified script in the context of the caller,
# returning the result to the caller.
#
if {[string length $resultVarName] > 0} then {
upvar 1 $resultVarName result
}
return [catch {
#
# NOTE: Evaluate the script in the context of the caller, forcing
# any [vwait] that may be in the contained script to stop
# when it hits a script cancellation -AND- reset the script
# cancellation flags upon completion (i.e. important due to
# the use of script cancellation to enforce the timeout).
#
uplevel 1 [list \
debug secureeval -nocancel true -stoponerror true -timeout \
$milliseconds {} $script]
} result]
}
proc vwaitWithTimeout { varName {milliseconds 2000} } {
#
# NOTE: Verify that the number of milliseconds requested is positive
# or zero.
#
if {$milliseconds < 0} then {
error "number of milliseconds cannot be negative"
}
if {[catch {
#
# NOTE: Refer to the specified variable in the context of our
# caller.
#
upvar 1 $varName variable
#
# NOTE: Wait for the variable to be changed -OR- for the wait
# to be canceled.
#
vwait -eventwaitflags {+NoBgError StopOnError} -force -timeout \
$milliseconds -- variable
} result] == 0} then {
#
# NOTE: The wait completed successfully, the variable may have
# been changed.
#
return $result
} else {
#
# NOTE: The wait failed in some way, it may have been canceled
# and the variable may or may not have been changed.
#
return false
}
}
proc tclLoadForTest { {varName ""} {findFlags ""} {loadFlags ""} } {
if {[string length $varName] > 0} then {
upvar 1 $varName loaded
}
set loaded 0
if {![tcl ready]} then {
set command [list tcl load]
if {[string length $findFlags] > 0} then {
lappend command -findflags $findFlags
}
if {[string length $loadFlags] > 0} then {
lappend command -loadflags $loadFlags
}
uplevel 1 $command; set loaded 1
set module [tcl module]
set interp [tcl master]
tputs $::test_channel [appendArgs \
"---- native Tcl loaded, module \"" $module \
"\", interpreter \"" $interp \"\n]
}
}
proc tclUnloadForTest { {force false} {varName ""} } {
set unload false
if {$force} then {
#
# NOTE: This cannot check [tcl ready] as some tests actually
# rely upon errors from the [tcl unload] sub-command.
#
set unload true
}
if {[string length $varName] > 0} then {
upvar 1 $varName loaded
if {$loaded && !$unload} then {
set unload true
}
unset -nocomplain loaded
}
if {$unload} then {
set module [tcl module]
set interp [tcl master]
uplevel 1 [list tcl unload]
tputs $::test_channel [appendArgs \
"---- native Tcl " [expr {$force ? "forcibly " : ""}] \
"unloaded, module \"" $module "\", interpreter \"" \
$interp \"\n]
}
}
proc generateUniqueId { {text ""} {length 16} } {
#
# HACK: This should generate a reasonably random unique identifier
# suitable for non-cryptographic use by the test suite.
#
if {[string length $text] > 0} then {
return [string range \
[string tolower [hash normal sha512 $text]] 0 $length]
} else {
return [string range \
[string tolower [hash normal sha512 [appendArgs uniq \
[info context] [info tid] [pid] [clock clicks] [clock \
now]]]] 0 $length]
}
}
proc testExecTclScript { script {shell ""} {verbose 0} } {
try {
#
# NOTE: Get a temporary file name for the script we are going to
# use to query the machine type for the native Tcl shell.
#
set fileName [file tempname]
#
# NOTE: Since the native Tcl shell cannot simply evaluate a string
# supplied via the command line, write the script to be
# evaluated to the temporary file.
#
writeFile $fileName $script
#
# NOTE: Use the specified shell, if it is valid; otherwise, use
# the configured Tcl shell.
#
if {[string length $shell] == 0} then {
#
# NOTE: Before attempting to use the configured Tcl shell, make
# sure it has actually been set.
#
if {[info exists ::test_tclsh] && \
[string length $::test_tclsh] > 0} then {
#
# NOTE: Use the currently configured Tcl shell, which may or
# may not actually exist.
#
set shell $::test_tclsh
} else {
#
# NOTE: We cannot execute the native Tcl shell because one
# has not been specified, nor configured.
#
set error "error: \"::test_tclsh\" variable is missing"
if {$verbose > 0} then {
tputs $::test_channel [appendArgs \
"---- native Tcl script error: " $error \n]
}
return [expr {$verbose > 3 ? $error : "error"}]
}
}
#
# NOTE: Generate a unique identifier to make it easier to locate
# this script and its results in the test log file.
#
if {$verbose > 1} then {
set id [generateUniqueId $script]
}
#
# NOTE: When in "ultra-verbose" mode, emit native Tcl script to
# the log file.
#
if {$verbose > 2} then {
tputs $::test_channel [appendArgs \
"---- native Tcl script (" $id ") text: " [string trim \
$script] \n]
}
#
# NOTE: Evaluate the script using the native Tcl shell, trim the
# excess whitespace from the output, and return it to the
# caller.
#
if {[catch {string trim \
[testExec $shell [list -success Success] \
[appendArgs \" $fileName \"]]} result] == 0} then {
#
# NOTE: When in "super-verbose" mode, emit native Tcl script
# results to the log file.
#
if {$verbose > 1} then {
tputs $::test_channel [appendArgs \
"---- native Tcl script (" $id ") result: " $result \n]
}
#
# NOTE: Success, return the result to the caller.
#
return $result
} else {
#
# NOTE: We could not execute the native Tcl shell (perhaps one
# is not available?).
#
set error [appendArgs "error: " $result]
if {$verbose > 0} then {
tputs $::test_channel [appendArgs \
"---- native Tcl script error: " $error \n]
}
return [expr {$verbose > 3 ? $error : "error"}]
}
} finally {
#
# NOTE: Should we delete the temporary file we created, if any?
#
if {![info exists ::no(deleteTestExecTclFile)]} then {
#
# NOTE: Did we create a temporary file to hold the Tcl script?
#
if {[info exists fileName] && \
[string length $fileName] > 0 && \
[file exists $fileName]} then {
#
# NOTE: Delete the temporary file we used to query the machine
# type for the native Tcl shell.
#
catch {file delete $fileName}
}
}
}
}
proc getTclShellVerbosity {} {
if {[info exists ::test_tclsh_verbose] && \
[string is integer -strict $::test_tclsh_verbose]} then {
return $::test_tclsh_verbose
} else {
return 0; # TODO: Good default?
}
}
proc getTclVersionForTclShell { {shell ""} {verbose 0} } {
return [testExecTclScript {
puts -nonewline stdout [info tclversion]
} $shell $verbose]
}
proc getCommandsForTclShell { {shell ""} {verbose 0} } {
return [testExecTclScript {
puts -nonewline stdout [info commands]
} $shell $verbose]
}
proc getMachineForTclShell { {shell ""} {verbose 0} } {
return [testExecTclScript {
puts -nonewline stdout $tcl_platform(machine)
} $shell $verbose]
}
proc getTclExecutableForTclShell { {shell ""} {verbose 0} } {
return [testExecTclScript {
puts -nonewline stdout [info nameofexecutable]
} $shell $verbose]
}
proc getTkVersionForTclShell { {shell ""} {verbose 0} } {
return [testExecTclScript {
puts -nonewline stdout [package require Tk]; exit
} $shell $verbose]
}
proc evalWithTclShell { script {raw false} {shell ""} {verbose 0} } {
return [testExecTclScript [string map \
[list %script% $script %raw% $raw] {
if {%raw%} then {
set code [catch {%script%} result]
puts -nonewline stdout [list $code $result]
} else {
puts -nonewline stdout [eval {%script%}]
}
}] $shell $verbose]
}
proc getGarudaDll { {machine ""} } {
#
# NOTE: Get the Garuda DLL of the same platform (i.e. machine type)
# as the native Tcl shell.
#
if {[info exists ::base_path]} then {
#
# NOTE: Get the effective test configuration.
#
set configuration [getTestConfiguration]
#
# NOTE: If there is no effective test configuration available, we
# cannot continue.
#
if {[string length $configuration] == 0} then {
return ""
}
#
# NOTE: Get the effective test suffix. This is allowed to be an
# empty string.
#
set suffix [getTestSuffix]
#
# NOTE: If necessary, automatically detect the machine for the Tcl
# shell that we plan on using.
#
if {[string length $machine] == 0 && \
![info exists ::no(getMachineForTclShell)]} then {
set machine [getMachineForTclShell "" [getTclShellVerbosity]]
}
#
# NOTE: Build the full path and file name of the Garuda DLL, using
# the Eagle base path. Currently, this will only work
# correctly if the test suite is being run from inside the
# source tree.
#
return [file join $::base_path bin \
[machineToPlatform $machine] [appendArgs $configuration Dll \
$suffix] [appendArgs Garuda [info sharedlibextension]]]
} else {
#
# NOTE: We are missing the base path, return nothing.
#
return ""
}
}
proc cleanupExcel {} {
#
# TODO: These may need to be changed in later Excel versions.
#
object undeclare -declarepattern Microsoft.Office.Interop.Excel*
object unimport -importpattern Microsoft.Office.Interop.Excel
}
proc cleanupVisualBasic {} {
#
# TODO: These may need to be changed in later framework versions.
#
object unimport -importpattern Microsoft.VisualBasic*
}
proc cleanupXml {} {
#
# TODO: These may need to be changed in later framework versions.
#
object unimport -importpattern System.Xml*
}
proc cleanupWinForms {} {
#
# TODO: These may need to be changed in later framework versions.
#
object unimport -importpattern System.Resources
object unimport -importpattern System.Windows.Forms
object unimport -importpattern System.Windows.Forms.Automation
object unimport -importpattern \
System.Windows.Forms.ComponentModel.Com2Interop
object unimport -importpattern System.Windows.Forms.Design
object unimport -importpattern System.Windows.Forms.Layout
object unimport -importpattern System.Windows.Forms.PropertyGridInternal
object unimport -importpattern System.Windows.Forms.VisualStyles
}
proc getTestLibraryDirectory {} {
#
# NOTE: First, query the location of the script library. This will
# not work right in a "safe" interpreter.
#
if {[catch {info library} result] == 0} then {
#
# NOTE: Next, If the script library is embedded within the core
# library itself (i.e. the script library location refers
# to a file, not a directory), strip off the file name.
#
if {[file exists $result] && [file isfile $result]} then {
set result [file dirname $result]
}
#
# NOTE: Finally, return the resulting script library directory.
#
return $result
}
return ""
}
#
# NOTE: Check for the test path in the various well-known locations
# and set the associated variable.
#
if {![info exists ::no(checkForAndSetTestPath)]} then {
checkForAndSetTestPath false [expr {![isTestSuiteRunning]}]
}
#
# NOTE: Fake loading and configuring the "tcltest" package unless we
# are prevented.
#
if {![info exists ::no(configureTcltest)]} then {
configureTcltest "" [list] [list] [list] [list] false
}
###########################################################################
############################# END Eagle ONLY ##############################
###########################################################################
} else {
###########################################################################
############################# BEGIN Tcl ONLY ##############################
###########################################################################
proc getPassedPercentage {} {
if {$::tcltest::numTests(Total) > 0} then {
return [expr \
{100.0 * (($::tcltest::numTests(Passed) + \
$::tcltest::numTests(Skipped)) / \
double($::tcltest::numTests(Total)))}]
}
return 0; # no tests were run, etc.
}
proc getSkippedPercentage {} {
if {$::tcltest::numTests(Total) > 0} then {
return [expr \
{100.0 * ($::tcltest::numTests(Skipped) / \
double($::tcltest::numTests(Total)))}]
}
return 0; # no tests were run, etc.
}
#
# NOTE: Check for the test path in the various well-known locations
# and set the associated variable.
#
if {![info exists ::no(checkForAndSetTestPath)]} then {
checkForAndSetTestPath false [expr {![isTestSuiteRunning]}]
}
#
# NOTE: Load and configure the "tcltest" package unless we are prevented.
#
if {![info exists ::no(configureTcltest)]} then {
configureTcltest "" [list] [list] [list] [list test testConstraint] false
}
#
# NOTE: We need several of our test related commands in the global
# namespace as well.
#
exportAndImportPackageCommands [namespace current] [list \
dumpState trawputs tputs ttclLog doesTestLogFileExist \
getTestLogStartSentry extractTestRunIdFromLogStartSentry \
doesTestLogHaveStartSentry didTestLogHaveStartSentry \
setTestLogStartSentry tlog getSoftwareRegistryKey haveConstraint \
addConstraint haveOrAddConstraint getConstraints getCachedConstraints \
useCachedConstraints removeConstraint fixConstraints \
fixTimingConstraints testDebugBreak testArrayGet testArrayGet2 \
testResultGet testValueGet getFirstLineOfError calculateBogoCops \
calculateRelativePerformance formatTimeStamp formatElapsedTime \
sourceIfValid processTestArguments getTclShellFileName \
getTemporaryPath getTemporaryFileName getFiles getTestFiles \
getTestRunId getNewTestRunId getDefaultTestLogPath getTestLogPath \
getTestLogId getDefaultTestLog getTestLog getLastTestLog \
getTestSuite getTestSuiteFullName getTestMachine getTestPlatform \
getTestConfiguration getTestNamePrefix getTestSuffix \
getTestUncountedLeaks getRuntimeAssemblyName getTestAssemblyName \
canTestExec testExec testClrExec execTestShell isRandomOrder \
isBreakOnDemand isBreakOnLeak isStopOnFailure isStopOnLeak \
isExitOnComplete returnInfoScript runTestPrologue runTestEpilogue \
hookPuts unhookPuts runTest testShim tsource recordTestStatistics \
reportTestStatistics formatList formatListAsDict pathToRegexp \
assemblyNameToRegexp inverseLsearchGlob removePathFromFileNames \
formatDecimal clearTestPercent reportTestPercent reportArrayGet \
reportTestStatisticCounts runAllTests isTestSuiteRunning \
getTestChannelOrDefault tryVerifyTestPath checkForAndSetTestPath \
configureTcltest machineToPlatform architectureForPlatform \
getPassedPercentage getSkippedPercentage] false false
###########################################################################
############################## END Tcl ONLY ###############################
###########################################################################
}
#
# NOTE: Provide the Eagle "test" package to the interpreter.
#
package provide Eagle.Test \
[expr {[isEagle] ? [info engine PatchLevel] : "1.0"}]
}