System.Data.SQLite

Check-in [e1e1a9456a]
Login

Many hyperlinks are disabled.
Use anonymous login to enable hyperlinks.

Overview
Comment:Update Eagle script library in Externals to pre-beta22.
Downloads: Tarball | ZIP archive
Timelines: family | ancestors | descendants | both | trunk
Files: files | file ages | folders
SHA1: e1e1a9456a4925aa9159db3ce0f4c317cbf2d0e3
User & Date: mistachkin 2012-01-21 07:13:34.006
Original Comment: Update Eagle library in Externals to pre-beta22.
Context
2012-01-22
13:04
More updates to documentation. check-in: b3eb32438b user: mistachkin tags: trunk
2012-01-21
07:13
Update Eagle script library in Externals to pre-beta22. check-in: e1e1a9456a user: mistachkin tags: trunk
06:38
Update all version numbers for release 1.0.78.0. check-in: 5d55816324 user: mistachkin tags: trunk
Changes
Unified Diff Ignore Whitespace Patch
Changes to Externals/Eagle/lib/Eagle1.0/test.eagle.
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
      if {$code == 0 && [regexp -- {\s==== (.*?) FAILED\s} $result]} then {
        set code 1
      }

      #
      # NOTE: Display and log the result of the test we just completed.
      #
      host result $code $result
      tlog $result

      #
      # NOTE: If the test failed with an actual error (i.e. not just a
      #       test failure), make sure we do not obscure the error
      #       message with test suite output.
      #
      if {$error} then {
        tputs $::test_channel \n; # emit a blank line.
      }

      #
      # NOTE: If this test failed and the stop-on-failure flag is set,
      #       raise an error now.  If we are being run from inside
      #       runAllTests, this will also serve to signal it to stop
      #       processing further test files.
      #
      if {$code != 0 && [isStopOnFailure]} then {
        host result Error "OVERALL RESULT: STOP-ON-FAILURE\n"
        tlog "OVERALL RESULT: STOP-ON-FAILURE\n"

        error ""; # no message
      }
    } else {
      if {$error} then {
        tputs $::test_channel [appendArgs "ERROR (runTest): " $result \n]
      }







|
<

















|
<







548
549
550
551
552
553
554
555

556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573

574
575
576
577
578
579
580
      if {$code == 0 && [regexp -- {\s==== (.*?) FAILED\s} $result]} then {
        set code 1
      }

      #
      # NOTE: Display and log the result of the test we just completed.
      #
      tresult $code $result


      #
      # NOTE: If the test failed with an actual error (i.e. not just a
      #       test failure), make sure we do not obscure the error
      #       message with test suite output.
      #
      if {$error} then {
        tputs $::test_channel \n; # emit a blank line.
      }

      #
      # NOTE: If this test failed and the stop-on-failure flag is set,
      #       raise an error now.  If we are being run from inside
      #       runAllTests, this will also serve to signal it to stop
      #       processing further test files.
      #
      if {$code != 0 && [isStopOnFailure]} then {
        tresult Error "OVERALL RESULT: STOP-ON-FAILURE\n"


        error ""; # no message
      }
    } else {
      if {$error} then {
        tputs $::test_channel [appendArgs "ERROR (runTest): " $result \n]
      }
1261
1262
1263
1264
1265
1266
1267




1268
1269
1270
1271
1272
1273
1274
        set eagle_tests(constraints) [getEnvironmentVariable testConstraints]

        if {[info exists test_flags(-constraints)]} then {
            eval lappend eagle_tests(constraints) $test_flags(-constraints)
        }
      }
    }





    proc getPassPercentage {} {
      if {$::eagle_tests(total) > 0} then {
        return [expr \
            {100.0 * (($::eagle_tests(passed) + \
            $::eagle_tests(skipped)) / \
            double($::eagle_tests(total)))}]







>
>
>
>







1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
        set eagle_tests(constraints) [getEnvironmentVariable testConstraints]

        if {[info exists test_flags(-constraints)]} then {
            eval lappend eagle_tests(constraints) $test_flags(-constraints)
        }
      }
    }

    proc tresult { code result } {
      host result $code $result; tlog $result
    }

    proc getPassPercentage {} {
      if {$::eagle_tests(total) > 0} then {
        return [expr \
            {100.0 * (($::eagle_tests(passed) + \
            $::eagle_tests(skipped)) / \
            double($::eagle_tests(total)))}]
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643

1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
    #
    # NOTE: Setup the test path relative to the library path.
    #
    if {![interp issafe] && ![info exists ::test_path]} then {
      #
      # NOTE: Try the source release directory structure.
      #
      set ::test_path [file join [file normalize [file dirname [file dirname \
          [info library]]]] Library Tests]

      if {![file exists $::test_path] || \
          ![file isdirectory $::test_path]} then {
        #
        # NOTE: Try for the test package directory.
        #
        set ::test_path [file join [file normalize [file dirname \
            [file dirname [info script]]]] Test1.0]

      }

      if {![file exists $::test_path] || \
          ![file isdirectory $::test_path]} then {
        #
        # NOTE: This must be a binary release, no "Library" directory then.
        #       Also, binary releases have an upper-case "Tests" directory
        #       name that originates from the "update.bat" tool.  This must
        #       match the casing used in "update.bat".
        #
        set ::test_path [file join [file normalize [file dirname [file dirname \
            [info library]]]] Tests]
      }
    }

    #
    # NOTE: Fake having the tcltest package unless we are prevented.
    #
    if {![info exists ::no(configureTcltest)]} then {







|
|







|
>










|
|







1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
    #
    # NOTE: Setup the test path relative to the library path.
    #
    if {![interp issafe] && ![info exists ::test_path]} then {
      #
      # NOTE: Try the source release directory structure.
      #
      set ::test_path [file join [file normalize [file dirname \
          [file dirname [info library]]]] Library Tests]

      if {![file exists $::test_path] || \
          ![file isdirectory $::test_path]} then {
        #
        # NOTE: Try for the test package directory.
        #
        set ::test_path [file join [file normalize [file dirname \
            [file dirname [info script]]]] [appendArgs Test \
            [info engine Version]]]
      }

      if {![file exists $::test_path] || \
          ![file isdirectory $::test_path]} then {
        #
        # NOTE: This must be a binary release, no "Library" directory then.
        #       Also, binary releases have an upper-case "Tests" directory
        #       name that originates from the "update.bat" tool.  This must
        #       match the casing used in "update.bat".
        #
        set ::test_path [file join [file normalize [file dirname \
            [file dirname [info library]]]] Tests]
      }
    }

    #
    # NOTE: Fake having the tcltest package unless we are prevented.
    #
    if {![info exists ::no(configureTcltest)]} then {
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705

      return 0; # no tests were run, etc.
    }

    #
    # NOTE: Setup the test path relative to the path of this file.
    #
    if {![info exists ::test_path]} then {
      #
      # NOTE: Try the source release directory structure.
      #
      set ::test_path [file join [file normalize [file dirname \
          [file dirname [file dirname [info script]]]]] Library Tests]

      if {![file exists $::test_path] || \







|







1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708

      return 0; # no tests were run, etc.
    }

    #
    # NOTE: Setup the test path relative to the path of this file.
    #
    if {![interp issafe] && ![info exists ::test_path]} then {
      #
      # NOTE: Try the source release directory structure.
      #
      set ::test_path [file join [file normalize [file dirname \
          [file dirname [file dirname [info script]]]]] Library Tests]

      if {![file exists $::test_path] || \
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
            [file dirname [file dirname [info script]]]]] Tests]
      }
    }

    #
    # NOTE: Load and configure the tcltest package unless we are prevented.
    #
    if {![info exists ::no(configureTcltest)]} then {
      configureTcltest [list test testConstraint] false
    }

    #
    # NOTE: We need several of our test related commands in the global
    #       namespace as well.
    #







|







1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
            [file dirname [file dirname [info script]]]]] Tests]
      }
    }

    #
    # NOTE: Load and configure the tcltest package unless we are prevented.
    #
    if {![interp issafe] && ![info exists ::no(configureTcltest)]} then {
      configureTcltest [list test testConstraint] false
    }

    #
    # NOTE: We need several of our test related commands in the global
    #       namespace as well.
    #
Changes to Externals/Eagle/lib/Test1.0/constraints.eagle.
766
767
768
769
770
771
772





































773
774
775
776
777
778
779
        addConstraint softwareUpdate

        tputs $channel trusted\n
      } else {
        tputs $channel untrusted\n
      }
    }






































    proc checkForAdministrator { channel } {
      tputs $channel "---- checking for administrator... "

      if {[isAdministrator]} then {
        addConstraint administrator; # running as full admin.








>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>







766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
        addConstraint softwareUpdate

        tputs $channel trusted\n
      } else {
        tputs $channel untrusted\n
      }
    }

    proc checkForStrongName { channel } {
      tputs $channel "---- checking for strong name... "

      set strongName [object invoke Interpreter.GetActive GetStrongName]

      if {[string length $strongName] > 0} then {
        #
        # NOTE: Yes, it appears that the core library was signed with a
        #       strong name key.
        #
        addConstraint strongName

        tputs $channel yes\n
      } else {
        tputs $channel no\n
      }
    }

    proc checkForCertificate { channel } {
      tputs $channel "---- checking for certificate... "

      set certificate [object invoke Interpreter.GetActive GetCertificate]

      if {[string length $certificate] > 0} then {
        #
        # NOTE: Yes, it appears that the core library was signed with a
        #       code-signing certificate.
        #
        addConstraint certificate

        tputs $channel [appendArgs "yes (" \
            [object invoke $certificate Subject] ")\n"]
      } else {
        tputs $channel no\n
      }
    }

    proc checkForAdministrator { channel } {
      tputs $channel "---- checking for administrator... "

      if {[isAdministrator]} then {
        addConstraint administrator; # running as full admin.

Changes to Externals/Eagle/lib/Test1.0/epilogue.eagle.
15
16
17
18
19
20
21






22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
###############################################################################

if {![info exists no([file tail [info script]])]} then {
  if {[info level] > 0} then {
    error "cannot run, current level is not global"
  }







  if {[isEagle]} then {
    #
    # NOTE: Show the current state of the memory.
    #
    catch {debug memory} memory

    tputs $test_channel [appendArgs "---- ending memory: " \
        [formatListAsDict $memory] \n]

    unset memory

    #
    # NOTE: Show the current state of the native stack.
    #
    catch {debug stack true} stack

    tputs $test_channel [appendArgs "---- ending stack: " \
        [formatListAsDict $stack] \n]

    unset stack
  }

  #
  # NOTE: Show when the tests actually ended (now).
  #
  tputs $test_channel [appendArgs "---- tests ended at " \
      [clock format [clock seconds]] \n]

  if {[isEagle]} then {
    #
    # NOTE: Check for and display any duplicate test names that we found.  In
    #       theory, this checking may produce false positives if a test file
    #       (or the entire test suite) is run multiple times without resetting
    #       the test statistics and/or restarting Eagle; however, duplicate
    #       test names must be avoided and this is considered a good trade-off.
    #







>
>
>
>
>
>




















|
<
<
<
<
<
<
<
<







15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48








49
50
51
52
53
54
55
###############################################################################

if {![info exists no([file tail [info script]])]} then {
  if {[info level] > 0} then {
    error "cannot run, current level is not global"
  }

  #
  # NOTE: Show when the tests actually ended (now).
  #
  tputs $test_channel [appendArgs "---- tests ended at " \
      [clock format [clock seconds]] \n]

  if {[isEagle]} then {
    #
    # NOTE: Show the current state of the memory.
    #
    catch {debug memory} memory

    tputs $test_channel [appendArgs "---- ending memory: " \
        [formatListAsDict $memory] \n]

    unset memory

    #
    # NOTE: Show the current state of the native stack.
    #
    catch {debug stack true} stack

    tputs $test_channel [appendArgs "---- ending stack: " \
        [formatListAsDict $stack] \n]

    unset stack









    #
    # NOTE: Check for and display any duplicate test names that we found.  In
    #       theory, this checking may produce false positives if a test file
    #       (or the entire test suite) is run multiple times without resetting
    #       the test statistics and/or restarting Eagle; however, duplicate
    #       test names must be avoided and this is considered a good trade-off.
    #
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105

106
107
108
109
110
111
112

113
114
115
116
117
118
119
120
    }

    unset -nocomplain name count

    tputs $test_channel \n; # NOTE: Blank line.

    if {$eagle_tests(passed) > 0} then {
      host result Ok [appendArgs "PASSED: " $eagle_tests(passed) \n]
      tlog [appendArgs "PASSED: " $eagle_tests(passed) \n]
    }

    if {$eagle_tests(failed) > 0} then {
      host result Error [appendArgs "FAILED: " $eagle_tests(failed) \n]
      tlog [appendArgs "FAILED: " $eagle_tests(failed) \n]

      if {[llength $eagle_tests(failedNames)] > 0} then {
        host result Error [appendArgs "FAILED: " $eagle_tests(failedNames) \n]
        tlog [appendArgs "FAILED: " $eagle_tests(failedNames) \n]
      }
    }

    if {$eagle_tests(skipped) > 0} then {
      host result Break [appendArgs "SKIPPED: " $eagle_tests(skipped) \n]
      tlog [appendArgs "SKIPPED: " $eagle_tests(skipped) \n]

      if {[llength $eagle_tests(skippedNames)] > 0} then {
        host result Break [appendArgs "SKIPPED: " $eagle_tests(skippedNames) \n]
        tlog [appendArgs "SKIPPED: " $eagle_tests(skippedNames) \n]
      }
    }

    if {$eagle_tests(total) > 0} then {
      host result Return [appendArgs "TOTAL: " $eagle_tests(total) \n]
      tlog [appendArgs "TOTAL: " $eagle_tests(total) \n]

      if {$eagle_tests(skipped) > 0} then {
        set percent [getSkipPercentage]
        host result Break [appendArgs "SKIP PERCENTAGE: " \
            [formatDecimal $percent] %\n]


        tlog [appendArgs "SKIP PERCENTAGE: " [formatDecimal $percent] %\n]
      }

      set percent [getPassPercentage]
      host result Return [appendArgs "PASS PERCENTAGE: " \
          [formatDecimal $percent] %\n]


      tlog [appendArgs "PASS PERCENTAGE: " [formatDecimal $percent] %\n]
    } else {
      #
      # NOTE: No tests.
      #
      set percent 0
    }








|
<



|
<


|
<




|
<


|
<




|
<



<
<

>
|



<
<

>
|







64
65
66
67
68
69
70
71

72
73
74
75

76
77
78

79
80
81
82
83

84
85
86

87
88
89
90
91

92
93
94


95
96
97
98
99
100


101
102
103
104
105
106
107
108
109
110
    }

    unset -nocomplain name count

    tputs $test_channel \n; # NOTE: Blank line.

    if {$eagle_tests(passed) > 0} then {
      tresult Ok [appendArgs "PASSED: " $eagle_tests(passed) \n]

    }

    if {$eagle_tests(failed) > 0} then {
      tresult Error [appendArgs "FAILED: " $eagle_tests(failed) \n]


      if {[llength $eagle_tests(failedNames)] > 0} then {
        tresult Error [appendArgs "FAILED: " $eagle_tests(failedNames) \n]

      }
    }

    if {$eagle_tests(skipped) > 0} then {
      tresult Break [appendArgs "SKIPPED: " $eagle_tests(skipped) \n]


      if {[llength $eagle_tests(skippedNames)] > 0} then {
        tresult Break [appendArgs "SKIPPED: " $eagle_tests(skippedNames) \n]

      }
    }

    if {$eagle_tests(total) > 0} then {
      tresult Return [appendArgs "TOTAL: " $eagle_tests(total) \n]


      if {$eagle_tests(skipped) > 0} then {
        set percent [getSkipPercentage]



        tresult Break [appendArgs \
            "SKIP PERCENTAGE: " [formatDecimal $percent] %\n]
      }

      set percent [getPassPercentage]



      tresult Return [appendArgs \
          "PASS PERCENTAGE: " [formatDecimal $percent] %\n]
    } else {
      #
      # NOTE: No tests.
      #
      set percent 0
    }

130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203

204
205
206
207
208

209
210
211
212
213
214
215
216
217
      #
      set passedOrSkipped [expr {$eagle_tests(passed) + \
          $eagle_tests(skipped)}]

      if {$passedOrSkipped == $eagle_tests(total)} then {
        set exitCode Success

        host result Ok "OVERALL RESULT: SUCCESS\n"
        tlog "OVERALL RESULT: SUCCESS\n"
      } else {
        set exitCode Failure

        host result Error "OVERALL RESULT: FAILURE\n"
        tlog "OVERALL RESULT: FAILURE\n"
      }

      unset passedOrSkipped
    } else {
      #
      # NOTE: They specified a non-default test pass threshold.  Check to
      #       make sure that we meet or exceed the requirement and then
      #       set the exit code to success; otherwise, set it to failure.
      #
      if {$percent >= $test_threshold} then {
        set exitCode Success

        host result Ok [appendArgs "OVERALL RESULT: SUCCESS (" \
            $percent "% >= " $test_threshold %)\n]

        tlog [appendArgs "OVERALL RESULT: SUCCESS (" \
            $percent "% >= " $test_threshold %)\n]
      } else {
        set exitCode Failure

        host result Error [appendArgs \
            "OVERALL RESULT: FAILURE (" $percent "% < " $test_threshold %)\n]

        tlog [appendArgs \
            "OVERALL RESULT: FAILURE (" $percent "% < " $test_threshold %)\n]
      }
    }

    unset percent

    tputs $test_channel \n; # NOTE: Blank line.
  } else {
    tputs $test_channel \n; # NOTE: Blank line.

    if {$::tcltest::numTests(Passed) > 0} then {
      tputs $test_channel \
          [appendArgs "PASSED: " $::tcltest::numTests(Passed) \n]
    }

    if {$::tcltest::numTests(Failed) > 0} then {
      tputs $test_channel \
          [appendArgs "FAILED: " $::tcltest::numTests(Failed) \n]

      if {[llength $::tcltest::failFiles] > 0} then {
        tputs $test_channel \
            [appendArgs "FAILED: " $::tcltest::failFiles \n]
      }
    }

    if {$::tcltest::numTests(Skipped) > 0} then {
      tputs $test_channel \
          [appendArgs "SKIPPED: " $::tcltest::numTests(Skipped) \n]
    }

    if {$::tcltest::numTests(Total) > 0} then {
      tputs $test_channel \
          [appendArgs "TOTAL: " $::tcltest::numTests(Total) \n]

      if {$::tcltest::numTests(Skipped) > 0} then {
        set percent [getSkipPercentage]

        tputs $test_channel [appendArgs "SKIP PERCENTAGE: " \
            [formatDecimal $percent] %\n]
      }

      set percent [getPassPercentage]

      tputs $test_channel [appendArgs "PASS PERCENTAGE: " \
          [formatDecimal $percent] %\n]
    } else {
      #
      # NOTE: No tests.
      #
      set percent 0
    }








|
<



|
<












|
|
<
<
<



|
<
<
<











|
|



|
|


|
|




|
|



|
|



>
|
|



>
|
|







120
121
122
123
124
125
126
127

128
129
130
131

132
133
134
135
136
137
138
139
140
141
142
143
144
145



146
147
148
149



150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
      #
      set passedOrSkipped [expr {$eagle_tests(passed) + \
          $eagle_tests(skipped)}]

      if {$passedOrSkipped == $eagle_tests(total)} then {
        set exitCode Success

        tresult Ok "OVERALL RESULT: SUCCESS\n"

      } else {
        set exitCode Failure

        tresult Error "OVERALL RESULT: FAILURE\n"

      }

      unset passedOrSkipped
    } else {
      #
      # NOTE: They specified a non-default test pass threshold.  Check to
      #       make sure that we meet or exceed the requirement and then
      #       set the exit code to success; otherwise, set it to failure.
      #
      if {$percent >= $test_threshold} then {
        set exitCode Success

        tresult Ok [appendArgs \
            "OVERALL RESULT: SUCCESS (" $percent "% >= " $test_threshold %)\n]



      } else {
        set exitCode Failure

        tresult Error [appendArgs \



            "OVERALL RESULT: FAILURE (" $percent "% < " $test_threshold %)\n]
      }
    }

    unset percent

    tputs $test_channel \n; # NOTE: Blank line.
  } else {
    tputs $test_channel \n; # NOTE: Blank line.

    if {$::tcltest::numTests(Passed) > 0} then {
      tputs $test_channel [appendArgs \
          "PASSED: " $::tcltest::numTests(Passed) \n]
    }

    if {$::tcltest::numTests(Failed) > 0} then {
      tputs $test_channel [appendArgs \
          "FAILED: " $::tcltest::numTests(Failed) \n]

      if {[llength $::tcltest::failFiles] > 0} then {
        tputs $test_channel [appendArgs \
            "FAILED: " $::tcltest::failFiles \n]
      }
    }

    if {$::tcltest::numTests(Skipped) > 0} then {
      tputs $test_channel [appendArgs \
          "SKIPPED: " $::tcltest::numTests(Skipped) \n]
    }

    if {$::tcltest::numTests(Total) > 0} then {
      tputs $test_channel [appendArgs \
          "TOTAL: " $::tcltest::numTests(Total) \n]

      if {$::tcltest::numTests(Skipped) > 0} then {
        set percent [getSkipPercentage]

        tputs $test_channel [appendArgs \
            "SKIP PERCENTAGE: " [formatDecimal $percent] %\n]
      }

      set percent [getPassPercentage]

      tputs $test_channel [appendArgs \
          "PASS PERCENTAGE: " [formatDecimal $percent] %\n]
    } else {
      #
      # NOTE: No tests.
      #
      set percent 0
    }

244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
      # NOTE: They specified a non-default test pass threshold.  Check to
      #       make sure that we meet or exceed the requirement and then
      #       set the exit code to success; otherwise, set it to failure.
      #
      if {$percent >= $test_threshold} then {
        set exitCode 0; # Success.

        tputs $test_channel [appendArgs "OVERALL RESULT: SUCCESS (" \
            $percent "% >= " $test_threshold %)\n]
      } else {
        set exitCode 1; # Failure.

        tputs $test_channel [appendArgs "OVERALL RESULT: FAILURE (" \
            $percent "% < " $test_threshold %)\n]
      }
    }

    unset percent

    tputs $test_channel \n; # NOTE: Blank line.








|
|



|
|







228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
      # NOTE: They specified a non-default test pass threshold.  Check to
      #       make sure that we meet or exceed the requirement and then
      #       set the exit code to success; otherwise, set it to failure.
      #
      if {$percent >= $test_threshold} then {
        set exitCode 0; # Success.

        tputs $test_channel [appendArgs \
            "OVERALL RESULT: SUCCESS (" $percent "% >= " $test_threshold %)\n]
      } else {
        set exitCode 1; # Failure.

        tputs $test_channel [appendArgs \
            "OVERALL RESULT: FAILURE (" $percent "% < " $test_threshold %)\n]
      }
    }

    unset percent

    tputs $test_channel \n; # NOTE: Blank line.

Changes to Externals/Eagle/lib/Test1.0/prologue.eagle.
525
526
527
528
529
530
531







532
533
534
535
536
537
538
    #       does not control or change any hacks for Mono that may
    #       be present in the library itself.
    #
    # if {![info exists no(mono)] && [isMono]} then {
    #   set no(mono) true
    # }








    #
    # NOTE: Has administrator detection support been disabled?  We do
    #       this check [nearly] first as it may [eventually] be used
    #       to help determine if other constraints should be skipped.
    #
    if {![info exists no(administrator)]} then {
      checkForAdministrator $test_channel







>
>
>
>
>
>
>







525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
    #       does not control or change any hacks for Mono that may
    #       be present in the library itself.
    #
    # if {![info exists no(mono)] && [isMono]} then {
    #   set no(mono) true
    # }

    ###########################################################################
    ######################### BEGIN Eagle Constraints #########################
    ###########################################################################

    tputs $test_channel [appendArgs \
        "---- start of Eagle specific test constraints...\n"]

    #
    # NOTE: Has administrator detection support been disabled?  We do
    #       this check [nearly] first as it may [eventually] be used
    #       to help determine if other constraints should be skipped.
    #
    if {![info exists no(administrator)]} then {
      checkForAdministrator $test_channel
613
614
615
616
617
618
619














620
621
622
623
624
625
626
    #
    # NOTE: Has software update trust detection support been disabled?
    #
    if {![info exists no(softwareUpdate)]} then {
      checkForSoftwareUpdateTrust $test_channel
    }















    #
    # NOTE: Has database testing support been disabled?
    #
    if {![info exists no(sql)]} then {
      #
      # NOTE: Set the server name, if necessary.
      #







>
>
>
>
>
>
>
>
>
>
>
>
>
>







620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
    #
    # NOTE: Has software update trust detection support been disabled?
    #
    if {![info exists no(softwareUpdate)]} then {
      checkForSoftwareUpdateTrust $test_channel
    }

    #
    # NOTE: Has strong name detection support been disabled?
    #
    if {![info exists no(strongName)]} then {
      checkForStrongName $test_channel
    }

    #
    # NOTE: Has certificate detection support been disabled?
    #
    if {![info exists no(certificate)]} then {
      checkForCertificate $test_channel
    }

    #
    # NOTE: Has database testing support been disabled?
    #
    if {![info exists no(sql)]} then {
      #
      # NOTE: Set the server name, if necessary.
      #
672
673
674
675
676
677
678

679
680
681
682
683
684
685
            Security=SSPI;}}]}]
      }

      #
      # NOTE: Can we access the local database?
      #
      checkForDatabase $test_channel $test_database

      unset password user timeout database server
    }

    #
    # NOTE: Has symbol testing support been disabled?
    #
    if {![info exists no(assemblySymbols)]} then {







>







693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
            Security=SSPI;}}]}]
      }

      #
      # NOTE: Can we access the local database?
      #
      checkForDatabase $test_channel $test_database

      unset password user timeout database server
    }

    #
    # NOTE: Has symbol testing support been disabled?
    #
    if {![info exists no(assemblySymbols)]} then {
955
956
957
958
959
960
961











962
963
964
965
966
967
968
      }
    }

    #
    # NOTE: Has custom test method support been disabled?
    #
    if {![info exists no(test)]} then {











      #
      # NOTE: Has remoting testing support been disabled?
      #
      if {![info exists no(testRemoting)]} then {
        #
        # NOTE: For tests "remotingServer-1.*".
        #







>
>
>
>
>
>
>
>
>
>
>







977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
      }
    }

    #
    # NOTE: Has custom test method support been disabled?
    #
    if {![info exists no(test)]} then {
      #
      # NOTE: Has DateTime testing support been disabled?
      #
      if {![info exists no(testDateTime)]} then {
        #
        # NOTE: For test "vwait-1.11".
        #
        checkForObjectMember $test_channel Eagle._Tests.Default \
            *TestSetDateTimeNowCallback*
      }

      #
      # NOTE: Has remoting testing support been disabled?
      #
      if {![info exists no(testRemoting)]} then {
        #
        # NOTE: For tests "remotingServer-1.*".
        #
1097
1098
1099
1100
1101
1102
1103









1104
1105
1106
1107
1108
1109
1110
            *set_Item*
      }

      #
      # NOTE: Has core marshaller testing support been disabled?
      #
      if {![info exists no(testMarshaller)]} then {









        #
        # NOTE: For test "basic-1.29".
        #
        checkForObjectMember $test_channel Eagle._Tests.Default \
            *TestExecuteStaticDelegates*

        #







>
>
>
>
>
>
>
>
>







1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
            *set_Item*
      }

      #
      # NOTE: Has core marshaller testing support been disabled?
      #
      if {![info exists no(testMarshaller)]} then {
        #
        # NOTE: These are not currently used by any tests.
        #
        checkForObjectMember $test_channel Eagle._Tests.Default \
            *TestSaveObjects*

        checkForObjectMember $test_channel Eagle._Tests.Default \
            *TestRestoreObjects*

        #
        # NOTE: For test "basic-1.29".
        #
        checkForObjectMember $test_channel Eagle._Tests.Default \
            *TestExecuteStaticDelegates*

        #
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
        #
        checkForObjectMember $test_channel Eagle._Tests.Default \
            *TestComplexMethod*

        #
        # NOTE: For test "object-2.3".
        #
        checkForObjectMember $test_channel Eagle._Components.Private.ArrayOps \
            *ToHexadecimalString*

        checkForObjectMember $test_channel Eagle._Tests.Default \
            *TestMulti2Array*

        checkForObjectMember $test_channel Eagle._Tests.Default \
            *TestMulti3Array*








|
|







1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
        #
        checkForObjectMember $test_channel Eagle._Tests.Default \
            *TestComplexMethod*

        #
        # NOTE: For test "object-2.3".
        #
        checkForObjectMember $test_channel Eagle._Tests.Default \
            *TestToHexadecimalString*

        checkForObjectMember $test_channel Eagle._Tests.Default \
            *TestMulti2Array*

        checkForObjectMember $test_channel Eagle._Tests.Default \
            *TestMulti3Array*

1384
1385
1386
1387
1388
1389
1390




1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403







1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419











1420
1421
1422
1423
1424
1425
1426
    #
    if {![info exists no(garudaDll)]} then {
      #
      # NOTE: For test "garuda-1.1".
      #
      checkForGarudaDll $test_channel
    }




  } else {
    #
    # HACK: Reset the test counts for tcltest.
    #
    set ::tcltest::numTests(Total) 0
    set ::tcltest::numTests(Skipped) 0
    set ::tcltest::numTests(Passed) 0
    set ::tcltest::numTests(Failed) 0

    #
    # HACK: Reset the list of failed files.
    #
    set ::tcltest::failFiles [list]








    #
    # NOTE: Has compile/runtime option testing support been disabled?
    #
    if {![info exists no(compileOptions)]} then {
      #
      # NOTE: Has dedicated test support been enabled (at compile-time)?
      #
      if {![info exists no(compileTest)]} then {
        #
        # NOTE: For test "tclLoad-1.16.1".
        #
        checkForCompileOption $test_channel TEST
      }
    }
  }












  #
  # NOTE: Has checking for the extra files needed by various tests been
  #       disabled?
  #
  if {![info exists no(checkForFile)]} then {
    #







>
>
>
>













>
>
>
>
>
>
>















|
>
>
>
>
>
>
>
>
>
>
>







1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
    #
    if {![info exists no(garudaDll)]} then {
      #
      # NOTE: For test "garuda-1.1".
      #
      checkForGarudaDll $test_channel
    }

    ###########################################################################
    ########################## END Eagle Constraints ##########################
    ###########################################################################
  } else {
    #
    # HACK: Reset the test counts for tcltest.
    #
    set ::tcltest::numTests(Total) 0
    set ::tcltest::numTests(Skipped) 0
    set ::tcltest::numTests(Passed) 0
    set ::tcltest::numTests(Failed) 0

    #
    # HACK: Reset the list of failed files.
    #
    set ::tcltest::failFiles [list]

    ###########################################################################
    ########################## BEGIN Tcl Constraints ##########################
    ###########################################################################

    tputs $test_channel [appendArgs \
        "---- start of Tcl specific test constraints...\n"]

    #
    # NOTE: Has compile/runtime option testing support been disabled?
    #
    if {![info exists no(compileOptions)]} then {
      #
      # NOTE: Has dedicated test support been enabled (at compile-time)?
      #
      if {![info exists no(compileTest)]} then {
        #
        # NOTE: For test "tclLoad-1.16.1".
        #
        checkForCompileOption $test_channel TEST
      }
    }

    ###########################################################################
    ########################### END Tcl Constraints ###########################
    ###########################################################################
  }

  #############################################################################
  ####################### BEGIN Tcl & Eagle Constraints #######################
  #############################################################################

  tputs $test_channel [appendArgs \
      "---- start of common (Tcl & Eagle) test constraints...\n"]

  #
  # NOTE: Has checking for the extra files needed by various tests been
  #       disabled?
  #
  if {![info exists no(checkForFile)]} then {
    #
1694
1695
1696
1697
1698
1699
1700







1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732




1733
1734
1735
1736
1737
1738
1739
  #
  # NOTE: Has performance testing been disabled?
  #
  if {![info exists no(performance)]} then {
    checkForPerformance $test_channel
  }








  #
  # NOTE: Have very precise timing tests been disabled?
  #
  if {![info exists no(preciseTiming)]} then {
    checkForTiming $test_channel 25 preciseTiming; # 1/40th second.
  }

  #
  # NOTE: Have precise timing tests been disabled?
  #
  if {![info exists no(timing)]} then {
    checkForTiming $test_channel 50; # 1/20th second.
  }

  #
  # NOTE: Has interactive testing been disabled?
  #
  if {![info exists no(interactive)]} then {
    checkForInteractive $test_channel
  }

  if {![info exists no(userInteraction)]} then {
    checkForUserInteraction $test_channel
  }

  #
  # NOTE: Check for network connectivity to our test host (i.e.
  #       the Eagle distribution site).
  #
  if {![info exists no(network)]} then {
    checkForNetwork $test_channel $test_host $test_timeout
  }





  #
  # NOTE: For Eagle, dump the platform information, including
  #       the compile options.
  #
  if {[isEagle]} then {
    #







>
>
>
>
>
>
>







<
<
<
<
<
<
<


















>
>
>
>







1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778







1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
  #
  # NOTE: Has performance testing been disabled?
  #
  if {![info exists no(performance)]} then {
    checkForPerformance $test_channel
  }

  #
  # NOTE: Have precise timing tests been disabled?
  #
  if {![info exists no(timing)]} then {
    checkForTiming $test_channel 50; # 1/20th second.
  }

  #
  # NOTE: Have very precise timing tests been disabled?
  #
  if {![info exists no(preciseTiming)]} then {
    checkForTiming $test_channel 25 preciseTiming; # 1/40th second.
  }








  #
  # NOTE: Has interactive testing been disabled?
  #
  if {![info exists no(interactive)]} then {
    checkForInteractive $test_channel
  }

  if {![info exists no(userInteraction)]} then {
    checkForUserInteraction $test_channel
  }

  #
  # NOTE: Check for network connectivity to our test host (i.e.
  #       the Eagle distribution site).
  #
  if {![info exists no(network)]} then {
    checkForNetwork $test_channel $test_host $test_timeout
  }

  #############################################################################
  ######################## END Eagle & Tcl Constraints ########################
  #############################################################################

  #
  # NOTE: For Eagle, dump the platform information, including
  #       the compile options.
  #
  if {[isEagle]} then {
    #
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
      tputs $test_channel [appendArgs \
          "---- checking for current BogoCops (commands-per-second)... "]

      if {![info exists test_cops]} then {
        set test_cops [calculateBogoCops]
      }

      tputs $test_channel [appendArgs $test_cops \n]

      set percent [expr {[calculateRelativePerformance iterations 1] * 100}]

      tputs $test_channel [appendArgs \
          "---- current BogoCops (commands-per-second) is " [formatDecimal \
          [expr {$percent > 100 ? $percent - 100 : $percent}] 2] "% " \
          [expr {$percent > 100 ? "faster than" : "as fast as"}] \







|







1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
      tputs $test_channel [appendArgs \
          "---- checking for current BogoCops (commands-per-second)... "]

      if {![info exists test_cops]} then {
        set test_cops [calculateBogoCops]
      }

      tputs $test_channel [appendArgs [formatDecimal $test_cops] \n]

      set percent [expr {[calculateRelativePerformance iterations 1] * 100}]

      tputs $test_channel [appendArgs \
          "---- current BogoCops (commands-per-second) is " [formatDecimal \
          [expr {$percent > 100 ? $percent - 100 : $percent}] 2] "% " \
          [expr {$percent > 100 ? "faster than" : "as fast as"}] \
1803
1804
1805
1806
1807
1808
1809


1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
        [list [getPlatformInfo patchLevel <none>]] " " \
        [list [getPlatformInfo tag <none>]] " " \
        [list [getPlatformInfo release <none>]] " " \
        [list [getPlatformInfo text <none>]] " " \
        [list [getPlatformInfo configuration <none>]] " " \
        [list [getPlatformInfo suffix <none>]] " " \
        [list $timeStamp] \n]



    tputs $test_channel [appendArgs "---- os: " \
        [getPlatformInfo os <none>] \n]

    tputs $test_channel [appendArgs "---- globalAssemblyCache: " \
        [getPlatformInfo globalAssemblyCache <none>] \n]

    tputs $test_channel [appendArgs "---- moduleVersionId: " \
        [getPlatformInfo moduleVersionId <none>] \n]

    tputs $test_channel [appendArgs "---- compileOptions: " \
        [formatList [getPlatformInfo compileOptions <none>]] \n]

    tputs $test_channel [appendArgs "---- strongName: " \
        [getPlatformInfo strongName <none>] \n]

    tputs $test_channel [appendArgs "---- certificate: " \
        [getPlatformInfo certificate <none>] \n]

    unset timeStamp
  }

  #
  # NOTE: Show the current test file name, if any.
  #
  tputs $test_channel [appendArgs "---- test file: " \
      [expr {[info exists test_file] && [string length $test_file] > 0 ? \







>
>


















<
<







1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897


1898
1899
1900
1901
1902
1903
1904
        [list [getPlatformInfo patchLevel <none>]] " " \
        [list [getPlatformInfo tag <none>]] " " \
        [list [getPlatformInfo release <none>]] " " \
        [list [getPlatformInfo text <none>]] " " \
        [list [getPlatformInfo configuration <none>]] " " \
        [list [getPlatformInfo suffix <none>]] " " \
        [list $timeStamp] \n]

    unset timeStamp

    tputs $test_channel [appendArgs "---- os: " \
        [getPlatformInfo os <none>] \n]

    tputs $test_channel [appendArgs "---- globalAssemblyCache: " \
        [getPlatformInfo globalAssemblyCache <none>] \n]

    tputs $test_channel [appendArgs "---- moduleVersionId: " \
        [getPlatformInfo moduleVersionId <none>] \n]

    tputs $test_channel [appendArgs "---- compileOptions: " \
        [formatList [getPlatformInfo compileOptions <none>]] \n]

    tputs $test_channel [appendArgs "---- strongName: " \
        [getPlatformInfo strongName <none>] \n]

    tputs $test_channel [appendArgs "---- certificate: " \
        [getPlatformInfo certificate <none>] \n]


  }

  #
  # NOTE: Show the current test file name, if any.
  #
  tputs $test_channel [appendArgs "---- test file: " \
      [expr {[info exists test_file] && [string length $test_file] > 0 ? \