System.Data.SQLite

Check-in [6a82b4b74b]
Login

Many hyperlinks are disabled.
Use anonymous login to enable hyperlinks.

Overview
Comment:Pickup changes to Eagle script library in externals.
Downloads: Tarball | ZIP archive
Timelines: family | ancestors | descendants | both | trunk
Files: files | file ages | folders
SHA1: 6a82b4b74bc53b56dbe1e8b11f7fb9f309e930ba
User & Date: mistachkin 2018-04-26 13:57:54.968
Context
2018-04-28
15:59
Add preliminary support for the SQLITE_DBCONFIG_RESET_DATABASE control. check-in: 4e485e3e31 user: mistachkin tags: trunk
2018-04-26
13:57
Pickup changes to Eagle script library in externals. check-in: 6a82b4b74b user: mistachkin tags: trunk
13:32
Fix the 'getExternalDirectory' test suite infrastructure procedure to account for the new relative location of the Eagle shell binary. check-in: b520434ab4 user: mistachkin tags: trunk
Changes
Unified Diff Ignore Whitespace Patch
Changes to Externals/Eagle/lib/Eagle1.0/test.eagle.
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39


40
41
42
43
44
45
46
47
48







































49
50
51
52
53
54
55

#
# NOTE: Use our own namespace here because even though we do not directly
#       support namespaces ourselves, we do not want to pollute the global
#       namespace if this script actually ends up being evaluated in Tcl.
#
namespace eval ::Eagle {
  proc tputs { channel string } {
    #
    # NOTE: If an output channel was provided, use it; otherwise,
    #       ignore the message.
    #
    if {[string length $channel] > 0} then {
      #
      # NOTE: Check if output is being actively intercepted by us.
      #
      if {![isEagle] && \
          [llength [info commands ::tcl::save::puts]] > 0} then {
        ::tcl::save::puts -nonewline $channel $string
      } else {
        puts -nonewline $channel $string
      }
    }



    tlog $string
  }

  #
  # NOTE: This is a shim designed to act like tclLog.
  #
  proc ttclLog { string } {
    tputs $::test_channel [appendArgs $string \n]
  }








































  proc tlog { string } {
    #
    # NOTE: If a test log file was configured, use it; otherwise, ignore the
    #       message.
    #
    set fileName [getTestLog]







|

|
|












|
>
>
|








>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>







16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96

#
# NOTE: Use our own namespace here because even though we do not directly
#       support namespaces ourselves, we do not want to pollute the global
#       namespace if this script actually ends up being evaluated in Tcl.
#
namespace eval ::Eagle {
  proc trawputs { channel string } {
    #
    # NOTE: If an output channel was provided, use it; otherwise, ignore
    #       the message.
    #
    if {[string length $channel] > 0} then {
      #
      # NOTE: Check if output is being actively intercepted by us.
      #
      if {![isEagle] && \
          [llength [info commands ::tcl::save::puts]] > 0} then {
        ::tcl::save::puts -nonewline $channel $string
      } else {
        puts -nonewline $channel $string
      }
    }
  }

  proc tputs { channel string } {
    trawputs $channel $string; tlog $string
  }

  #
  # NOTE: This is a shim designed to act like tclLog.
  #
  proc ttclLog { string } {
    tputs $::test_channel [appendArgs $string \n]
  }

  proc doesTestLogFileExist { fileName } {
    if {[catch {
      expr {[file exists $fileName] && [file size $fileName] > 0}
    } result] == 0 && $result} then {
      return true
    } else {
      return false
    }
  }

  proc getTestLogStartSentry {} {
    if {![info exists ::test_run_id]} then {
      set ::test_run_id [getNewTestRunId]
    }

    return [appendArgs \
        "**** START OF TEST LOG \"" $::test_run_id "\" ****\n"]
  }

  proc doesTestLogHaveStartSentry {} {
    set fileName [getTestLog]

    if {[string length $fileName] > 0} then {
      if {[doesTestLogFileExist $fileName]} then {
        set sentry [string trim [getTestLogStartSentry]]

        if {[string length $sentry] > 0} then {
          set data [readFile $fileName]

          if {[string first $sentry $data] != -1} then {
            return true
          }
        }
      }
    }

    return false
  }

  proc tlog { string } {
    #
    # NOTE: If a test log file was configured, use it; otherwise, ignore the
    #       message.
    #
    set fileName [getTestLog]
66
67
68
69
70
71
72








73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91








92
93
94
95
96
97
98
        #       Each entry is removed from the queue after it is sent to the
        #       test log file.
        #
        foreach entry [lsort -integer [array names ::test_log_queue]] {
          set newString $::test_log_queue($entry)

          if {[string length $newString] > 0} then {








            appendSharedLogFile $fileName $newString
          }

          unset ::test_log_queue($entry)
        }

        #
        # NOTE: If all entries in the test log queue were just processed,
        #       unset the entire array now.
        #
        if {[array size test_log_queue] == 0} then {
          unset ::test_log_queue
        }
      }

      #
      # NOTE: If an empty string is supplied by the caller, do nothing.
      #
      if {[string length $string] > 0} then {








        appendSharedLogFile $fileName $string
      }
    }
  }

  proc getSoftwareRegistryKey { wow64 } {
    if {$wow64 && [info exists ::tcl_platform(machine)] && [lsearch -exact \







>
>
>
>
>
>
>
>



















>
>
>
>
>
>
>
>







107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
        #       Each entry is removed from the queue after it is sent to the
        #       test log file.
        #
        foreach entry [lsort -integer [array names ::test_log_queue]] {
          set newString $::test_log_queue($entry)

          if {[string length $newString] > 0} then {
            if {![doesTestLogFileExist $fileName]} then {
              set sentry [getTestLogStartSentry]

              if {[string length $sentry] > 0} then {
                appendSharedLogFile $fileName $sentry
              }
            }

            appendSharedLogFile $fileName $newString
          }

          unset ::test_log_queue($entry)
        }

        #
        # NOTE: If all entries in the test log queue were just processed,
        #       unset the entire array now.
        #
        if {[array size test_log_queue] == 0} then {
          unset ::test_log_queue
        }
      }

      #
      # NOTE: If an empty string is supplied by the caller, do nothing.
      #
      if {[string length $string] > 0} then {
        if {![doesTestLogFileExist $fileName]} then {
          set sentry [getTestLogStartSentry]

          if {[string length $sentry] > 0} then {
            appendSharedLogFile $fileName $sentry
          }
        }

        appendSharedLogFile $fileName $string
      }
    }
  }

  proc getSoftwareRegistryKey { wow64 } {
    if {$wow64 && [info exists ::tcl_platform(machine)] && [lsearch -exact \
1078
1079
1080
1081
1082
1083
1084




1085
1086
1087
1088
1089
1090
1091
      }
    }

    return $result
  }

  proc getTestRunId {} {




    #
    # HACK: Yes, this is a bit ugly; however, it creates a nice unique
    #       identifier to represent the test run, which makes analyzing
    #       the test log files a lot easier.
    #
    if {[isEagle]} then {
      #







>
>
>
>







1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
      }
    }

    return $result
  }

  proc getTestRunId {} {
    return [expr {[info exists ::test_run_id] ? $::test_run_id : ""}]
  }

  proc getNewTestRunId {} {
    #
    # HACK: Yes, this is a bit ugly; however, it creates a nice unique
    #       identifier to represent the test run, which makes analyzing
    #       the test log files a lot easier.
    #
    if {[isEagle]} then {
      #
4162
4163
4164
4165
4166
4167
4168

4169
4170
4171
4172
4173
4174
4175
4176
4177
4178
4179
4180
4181
4182
    }

    #
    # NOTE: We need several of our test related commands in the global
    #       namespace as well.
    #
    exportAndImportPackageCommands [namespace current] [list \

        tputs ttclLog tlog getSoftwareRegistryKey haveConstraint \
        addConstraint haveOrAddConstraint getConstraints \
        getCachedConstraints useCachedConstraints removeConstraint \
        fixConstraints fixTimingConstraints calculateBogoCops \
        calculateRelativePerformance formatTimeStamp formatElapsedTime \
        sourceIfValid processTestArguments getTclShellFileName \
        getTemporaryPath getFiles getTestFiles getTestRunId getTestLogId \
        getDefaultTestLog getTestLog getLastTestLog getTestSuite \
        getTestMachine getTestPlatform getTestConfiguration getTestSuffix \
        getTestUncountedLeaks getRuntimeAssemblyName getTestAssemblyName \
        canTestExec testExec testClrExec execTestShell isRandomOrder \
        isBreakOnDemand isBreakOnLeak isStopOnFailure isStopOnLeak \
        isExitOnComplete returnInfoScript runTestPrologue runTestEpilogue \
        hookPuts unhookPuts runTest testDebugBreak testArrayGet testShim \







>
|
|
|
|
|
|
|







4223
4224
4225
4226
4227
4228
4229
4230
4231
4232
4233
4234
4235
4236
4237
4238
4239
4240
4241
4242
4243
4244
    }

    #
    # NOTE: We need several of our test related commands in the global
    #       namespace as well.
    #
    exportAndImportPackageCommands [namespace current] [list \
        tputs ttclLog doesTestLogHaveStartSentry tlog \
        getSoftwareRegistryKey haveConstraint addConstraint \
        haveOrAddConstraint getConstraints getCachedConstraints \
        useCachedConstraints removeConstraint fixConstraints \
        fixTimingConstraints calculateBogoCops calculateRelativePerformance \
        formatTimeStamp formatElapsedTime sourceIfValid \
        processTestArguments getTclShellFileName getTemporaryPath \
        getFiles getTestFiles getTestRunId getNewTestRunId getTestLogId \
        getDefaultTestLog getTestLog getLastTestLog getTestSuite \
        getTestMachine getTestPlatform getTestConfiguration getTestSuffix \
        getTestUncountedLeaks getRuntimeAssemblyName getTestAssemblyName \
        canTestExec testExec testClrExec execTestShell isRandomOrder \
        isBreakOnDemand isBreakOnLeak isStopOnFailure isStopOnLeak \
        isExitOnComplete returnInfoScript runTestPrologue runTestEpilogue \
        hookPuts unhookPuts runTest testDebugBreak testArrayGet testShim \
Changes to Externals/Eagle/lib/Test1.0/epilogue.eagle.
75
76
77
78
79
80
81










82
83
84
85
86
87
88
  }

  #
  # NOTE: Show the ending command count (for both Tcl and Eagle).
  #
  tputs $test_channel [appendArgs "---- ending command count: " \
      [info cmdcount] \n]











  if {[isEagle]} then {
    #
    # NOTE: We can only calculate the elapsed microseconds for the tests
    #       if the necessary variables exist and contain valid values.
    #
    if {[info exists test_timestamp(startCount)] && \







>
>
>
>
>
>
>
>
>
>







75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
  }

  #
  # NOTE: Show the ending command count (for both Tcl and Eagle).
  #
  tputs $test_channel [appendArgs "---- ending command count: " \
      [info cmdcount] \n]

  #
  # NOTE: Check for the start-of-log sentry in the test log file.  If it
  #       is not found, there is a problem.
  #
  set haveStartSentry [doesTestLogHaveStartSentry]

  if {!$haveStartSentry} then {
    tputs $test_channel "---- missing start-of-log sentry\n"
  }

  if {[isEagle]} then {
    #
    # NOTE: We can only calculate the elapsed microseconds for the tests
    #       if the necessary variables exist and contain valid values.
    #
    if {[info exists test_timestamp(startCount)] && \
223
224
225
226
227
228
229

230
231
232
233
234
235
236
      #       Check to make sure that all tests pass and then set the
      #       exit code to success; otherwise, we set it to failure.
      #
      set passedOrSkipped [expr {$eagle_tests(Passed) + \
          $eagle_tests(Skipped)}]

      if {![info exists test_suite_errors] && \

          $passedOrSkipped == $eagle_tests(Total)} then {
        set exitCode Success

        if {$eagle_tests(Total) > 0} then {
          tresult Ok "OVERALL RESULT: SUCCESS\n"
        } else {
          tresult Ok "OVERALL RESULT: NONE\n"







>







233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
      #       Check to make sure that all tests pass and then set the
      #       exit code to success; otherwise, we set it to failure.
      #
      set passedOrSkipped [expr {$eagle_tests(Passed) + \
          $eagle_tests(Skipped)}]

      if {![info exists test_suite_errors] && \
          $haveStartSentry && \
          $passedOrSkipped == $eagle_tests(Total)} then {
        set exitCode Success

        if {$eagle_tests(Total) > 0} then {
          tresult Ok "OVERALL RESULT: SUCCESS\n"
        } else {
          tresult Ok "OVERALL RESULT: NONE\n"
251
252
253
254
255
256
257

258
259
260
261
262
263
264
    } else {
      #
      # NOTE: They specified a non-default test pass threshold.  Check to
      #       make sure that we meet or exceed the requirement and then
      #       set the exit code to success; otherwise, set it to failure.
      #
      if {![info exists test_suite_errors] && \

          $percent >= $test_threshold} then {
        set exitCode Success

        if {$eagle_tests(Total) > 0} then {
          tresult Ok [appendArgs \
              "OVERALL RESULT: SUCCESS (" \
              $percent "% >= " $test_threshold %)\n]







>







262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
    } else {
      #
      # NOTE: They specified a non-default test pass threshold.  Check to
      #       make sure that we meet or exceed the requirement and then
      #       set the exit code to success; otherwise, set it to failure.
      #
      if {![info exists test_suite_errors] && \
          $haveStartSentry && \
          $percent >= $test_threshold} then {
        set exitCode Success

        if {$eagle_tests(Total) > 0} then {
          tresult Ok [appendArgs \
              "OVERALL RESULT: SUCCESS (" \
              $percent "% >= " $test_threshold %)\n]
339
340
341
342
343
344
345


346
347
348
349
350
351
352
353
354
355






356
357
358
359
360
361
362
363
364
365
366


367
368
369
370
371
372
373
374
375
376
377
378






379
380
381
382
383
384
385
386
387
388
389





390
391
392
393
394
395
396
      # NOTE: The test pass threshold is set to the default value (100%).
      #       Check to make sure that all tests pass and then set the
      #       exit code to success; otherwise, we set it to failure.
      #
      set passedOrSkipped [expr {$::tcltest::numTests(Passed) + \
          $::tcltest::numTests(Skipped)}]



      if {$passedOrSkipped == $::tcltest::numTests(Total)} then {
        set exitCode 0; # Success.

        if {$::tcltest::numTests(Total) > 0} then {
          tputs $test_channel "OVERALL RESULT: SUCCESS\n"
        } else {
          tputs $test_channel "OVERALL RESULT: NONE\n"
        }
      } else {
        set exitCode 1; # Failure.







        tputs $test_channel "OVERALL RESULT: FAILURE\n"
      }

      unset passedOrSkipped
    } else {
      #
      # NOTE: They specified a non-default test pass threshold.  Check to
      #       make sure that we meet or exceed the requirement and then
      #       set the exit code to success; otherwise, set it to failure.
      #


      if {$percent >= $test_threshold} then {
        set exitCode 0; # Success.

        if {$::tcltest::numTests(Total) > 0} then {
          tputs $test_channel [appendArgs \
              "OVERALL RESULT: SUCCESS (" $percent "% >= " $test_threshold %)\n]
        } else {
          tputs $test_channel [appendArgs \
              "OVERALL RESULT: NONE (" $percent "% >= " $test_threshold %)\n]
        }
      } else {
        set exitCode 1; # Failure.







        tputs $test_channel [appendArgs \
            "OVERALL RESULT: FAILURE (" $percent "% < " $test_threshold %)\n]
      }
    }

    unset percent

    tputs $test_channel \n; # NOTE: Blank line.
  }






  #
  # NOTE: Call the Tcl test cleanup procedure now to give it a chance to do
  #       any custom cleanup that has been registered.
  #
  ::tcltest::cleanupTests

  #







>
>
|









>
>
>
>
>
>











>
>
|











>
>
>
>
>
>











>
>
>
>
>







351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
      # NOTE: The test pass threshold is set to the default value (100%).
      #       Check to make sure that all tests pass and then set the
      #       exit code to success; otherwise, we set it to failure.
      #
      set passedOrSkipped [expr {$::tcltest::numTests(Passed) + \
          $::tcltest::numTests(Skipped)}]

      if {![info exists test_suite_errors] && \
          $haveStartSentry && \
          $passedOrSkipped == $::tcltest::numTests(Total)} then {
        set exitCode 0; # Success.

        if {$::tcltest::numTests(Total) > 0} then {
          tputs $test_channel "OVERALL RESULT: SUCCESS\n"
        } else {
          tputs $test_channel "OVERALL RESULT: NONE\n"
        }
      } else {
        set exitCode 1; # Failure.

        if {[info exists test_suite_errors]} then {
          tputs $test_channel [appendArgs "OVERALL ERRORS: " \
              [expr {[llength $test_suite_errors] > 0 ? \
              $test_suite_errors : "<empty>"}] \n]
        }

        tputs $test_channel "OVERALL RESULT: FAILURE\n"
      }

      unset passedOrSkipped
    } else {
      #
      # NOTE: They specified a non-default test pass threshold.  Check to
      #       make sure that we meet or exceed the requirement and then
      #       set the exit code to success; otherwise, set it to failure.
      #
      if {![info exists test_suite_errors] && \
          $haveStartSentry && \
          $percent >= $test_threshold} then {
        set exitCode 0; # Success.

        if {$::tcltest::numTests(Total) > 0} then {
          tputs $test_channel [appendArgs \
              "OVERALL RESULT: SUCCESS (" $percent "% >= " $test_threshold %)\n]
        } else {
          tputs $test_channel [appendArgs \
              "OVERALL RESULT: NONE (" $percent "% >= " $test_threshold %)\n]
        }
      } else {
        set exitCode 1; # Failure.

        if {[info exists test_suite_errors]} then {
          tputs $test_channel [appendArgs "OVERALL ERRORS: " \
              [expr {[llength $test_suite_errors] > 0 ? \
              $test_suite_errors : "<empty>"}] \n]
        }

        tputs $test_channel [appendArgs \
            "OVERALL RESULT: FAILURE (" $percent "% < " $test_threshold %)\n]
      }
    }

    unset percent

    tputs $test_channel \n; # NOTE: Blank line.
  }

  #
  # NOTE: We no longer need the result of the start-of-log sentry check.
  #
  unset haveStartSentry

  #
  # NOTE: Call the Tcl test cleanup procedure now to give it a chance to do
  #       any custom cleanup that has been registered.
  #
  ::tcltest::cleanupTests

  #
Changes to Externals/Eagle/lib/Test1.0/prologue.eagle.
799
800
801
802
803
804
805




806

807
808
809
810
811
812
813
814
  sourceIfValid prologue [getEnvironmentVariable testPrologue]

  #
  # NOTE: Show the name of the executable and the command line arguments, if
  #       any.  This must be done after the log file has been setup; otherwise,
  #       this information will not be visible in the log file.
  #




  tputs $test_channel [appendArgs "---- testRunId: " \

      [getTestRunId] \n]

  tputs $test_channel [appendArgs "---- processId: " \
      [pid] \n]

  set ppid(0) [catch {info ppid} ppid(1)]

  tputs $test_channel [appendArgs "---- parentProcessId: " \







>
>
>
>

>
|







799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
  sourceIfValid prologue [getEnvironmentVariable testPrologue]

  #
  # NOTE: Show the name of the executable and the command line arguments, if
  #       any.  This must be done after the log file has been setup; otherwise,
  #       this information will not be visible in the log file.
  #
  if {![info exists test_run_id]} then {
    set test_run_id [getNewTestRunId]
  }

  tputs $test_channel [appendArgs "---- testRunId: " \
      [expr {[info exists test_run_id] ? \
          $test_run_id : "<none>"}] \n]

  tputs $test_channel [appendArgs "---- processId: " \
      [pid] \n]

  set ppid(0) [catch {info ppid} ppid(1)]

  tputs $test_channel [appendArgs "---- parentProcessId: " \