diff --git a/.travis.yml b/.travis.yml index 3a8e962c98ada..1ac75e6cd5df1 100644 --- a/.travis.yml +++ b/.travis.yml @@ -134,7 +134,6 @@ script: /tmp/julia/bin/julia --check-bounds=yes runtests.jl $TESTSTORUN && /tmp/julia/bin/julia --check-bounds=yes runtests.jl libgit2-online pkg - cd `dirname $TRAVIS_BUILD_DIR` && mv julia2 julia && - rm -rf julia/deps/scratch/julia-env && rm -f julia/deps/scratch/libgit2-*/CMakeFiles/CMakeOutput.log # uncomment the following if failures are suspected to be due to the out-of-memory killer # - dmesg diff --git a/base/deprecated.jl b/base/deprecated.jl index 1df19712e226d..89238179c026d 100644 --- a/base/deprecated.jl +++ b/base/deprecated.jl @@ -1134,4 +1134,7 @@ end) @deprecate cummin(A, dim=1) accumulate(min, A, dim=1) @deprecate cummax(A, dim=1) accumulate(max, A, dim=1) +# #19567 +@deprecate runtests(tests = ["all"], numcores = ceil(Int, Sys.CPU_CORES / 2)) Base.Test.runtests(["all"], test_dir=joinpath(JULIA_HOME, "../../test"), numcores=numcores) + # End deprecations scheduled for 0.6 diff --git a/base/interactiveutil.jl b/base/interactiveutil.jl index 1cc3bc35c10fd..83b8c7ad7f469 100644 --- a/base/interactiveutil.jl +++ b/base/interactiveutil.jl @@ -632,34 +632,6 @@ function workspace() nothing end -# testing - -""" - runtests([tests=["all"] [, numcores=ceil(Int, Sys.CPU_CORES / 2) ]]) - -Run the Julia unit tests listed in `tests`, which can be either a string or an array of -strings, using `numcores` processors. (not exported) -""" -function runtests(tests = ["all"], numcores = ceil(Int, Sys.CPU_CORES / 2)) - if isa(tests,AbstractString) - tests = split(tests) - end - ENV2 = copy(ENV) - ENV2["JULIA_CPU_CORES"] = "$numcores" - try - run(setenv(`$(julia_cmd()) $(joinpath(JULIA_HOME, - Base.DATAROOTDIR, "julia", "test", "runtests.jl")) $tests`, ENV2)) - catch - buf = PipeBuffer() - versioninfo(buf) - error("A test has failed. Please submit a bug report (https://github.com/JuliaLang/julia/issues)\n" * - "including error messages above and the output of versioninfo():\n$(readstring(buf))") - end -end - -# testing - - """ whos(io::IO=STDOUT, m::Module=current_module(), pattern::Regex=r"") diff --git a/base/test.jl b/base/test.jl index 2df29630e4177..31971c09ff8ca 100644 --- a/base/test.jl +++ b/base/test.jl @@ -19,6 +19,9 @@ export @testset export @test_approx_eq, @test_approx_eq_eps, @inferred export detect_ambiguities export GenericString +export runtests + +include("testdefs.jl") #----------------------------------------------------------------------- @@ -407,7 +410,7 @@ finish(ts::FallbackTestSet) = ts """ DefaultTestSet -If using the DefaultTestSet, the test results will be recorded. If there +If using the `DefaultTestSet`, the test results will be recorded. If there are any `Fail`s or `Error`s, an exception will be thrown only at the end, along with a summary of the test results. """ @@ -1075,6 +1078,83 @@ function detect_ambiguities(mods...; imported::Bool=false) return collect(ambs) end +""" + build_results_testset(results) + +Construct a testset on the master node which will hold results from all the +test files run on workers and on node1. The loop goes through the results, +inserting them as children of the overall testset if they are testsets, +handling errors otherwise. + +Since the workers don't return information about passing/broken tests, only +errors or failures, those Result types get passed `nothing` for their test +expressions (and expected/received result in the case of Broken). + +If a test failed, returning a `RemoteException`, the error is displayed and +the overall testset has a child testset inserted, with the (empty) Passes +and Brokens from the worker and the full information about all errors and +failures encountered running the tests. This information will be displayed +as a summary at the end of the test run. + +If a test failed, returning an `Exception` that is not a `RemoteException`, +it is likely the julia process running the test has encountered some kind +of internal error, such as a segfault. The entire testset is marked as +Errored, and execution continues until the summary at the end of the test +run, where the test file is printed out as the "failed expression". +""" +function build_results_testset(results) + o_ts = DefaultTestSet("Overall") + push_testset(o_ts) + for res in results + if isa(res[2][1], DefaultTestSet) + push_testset(res[2][1]) + record(o_ts, res[2][1]) + pop_testset() + elseif isa(res[2][1], Tuple{Int,Int}) + fake = DefaultTestSet(res[1]) + for i in 1:res[2][1][1] + record(fake, Pass(:test, nothing, nothing, nothing)) + end + for i in 1:res[2][1][2] + record(fake, Broken(:test, nothing)) + end + push_testset(fake) + record(o_ts, fake) + pop_testset() + elseif isa(res[2][1], RemoteException) + println("Worker $(res[2][1].pid) failed running test $(res[1]):") + Base.showerror(STDOUT,res[2][1].captured) + o_ts.anynonpass = true + if isa(res[2][1].captured.ex, TestSetException) + fake = DefaultTestSet(res[1]) + for i in 1:res[2][1].captured.ex.pass + record(fake, Pass(:test, nothing, nothing, nothing)) + end + for i in 1:res[2][1].captured.ex.broken + record(fake, Broken(:test, nothing)) + end + for t in res[2][1].captured.ex.errors_and_fails + record(fake, t) + end + push_testset(fake) + record(o_ts, fake) + pop_testset() + end + elseif isa(res[2][1], Exception) + # If this test raised an exception that is not a RemoteException, that means + # the test runner itself had some problem, so we may have hit a segfault + # or something similar. Record this testset as Errored. + o_ts.anynonpass = true + fake = DefaultTestSet(res[1]) + record(fake, Error(:test_error, res[1], res[2][1], [])) + push_testset(fake) + record(o_ts, fake) + pop_testset() + end + end + return o_ts +end + """ The `GenericString` can be used to test generic string APIs that program to the `AbstractString` interface, in order to ensure that functions can work @@ -1087,4 +1167,142 @@ Base.convert(::Type{GenericString}, s::AbstractString) = GenericString(s) Base.endof(s::GenericString) = endof(s.string) Base.next(s::GenericString, i::Int) = next(s.string, i) +function move_to_node1!(tests, node1, ts) + for t in ts + if t in tests + splice!(tests, findfirst(tests, t)) + push!(node1, t) + end + end +end + +function print_test_statistics(test, test_stats, worker, alignments) + name_str = rpad(test*" ($worker)", alignments[1], " ") + time_str = @sprintf("%7.2f", test_stats[2]) + time_str = rpad(time_str, alignments[2], " ") + gc_str = @sprintf("%5.2f", test_stats[5].total_time/10^9) + gc_str = rpad(gc_str, alignments[3] ," ") + # since there may be quite a few digits in the percentage, + # the left-padding here is less to make sure everything fits + percent_str = @sprintf("%4.1f",100*test_stats[5].total_time/(10^9*test_stats[2])) + percent_str = rpad(percent_str,alignments[4]," ") + alloc_str = @sprintf("%5.2f",test_stats[3]/2^20) + alloc_str = rpad(alloc_str,alignments[5]," ") + rss_str = @sprintf("%5.2f",test_stats[6]/2^20) + rss_str = rpad(rss_str,alignments[6]," ") + print_with_color(:white, name_str, " | ", time_str, " | ", gc_str, " | ", percent_str, " | ", alloc_str, " | ", rss_str, "\n") +end + +function runtests(names=["all"]; test_dir=joinpath(JULIA_HOME, Base.DATAROOTDIR, "julia/test/"), numcores::Int=ceil(Int, Sys.CPU_CORES / 2)) + include(joinpath(test_dir, "choosetests.jl")) + tests, n1_tests, bigmemtests, net_on = Main.choosetests(names) + tests = unique(tests) + n1_tests = unique(n1_tests) + bigmemtests = unique(bigmemtests) + # In a constrained memory environment, run the tests which may need a lot of memory after all others + max_worker_rss = if haskey(ENV, "JULIA_TEST_MAXRSS_MB") + parse(Int, ENV["JULIA_TEST_MAXRSS_MB"]) * 2^20 + else + typemax(Csize_t) + end + node1_tests = String[] + move_to_node1!(tests, node1_tests, n1_tests) + if max_worker_rss != typemax(Csize_t) + move_to_node1!(tests, node1_tests, bigmemtests) + end + + if haskey(ENV, "JULIA_TEST_EXEFLAGS") + test_exeflags = `$(Base.shell_split(ENV["JULIA_TEST_EXEFLAGS"]))` + else + test_exeflags = `--check-bounds=yes --startup-file=no --depwarn=error` + end + + if haskey(ENV, "JULIA_TEST_EXENAME") + test_exename = `$(Base.shell_split(ENV["JULIA_TEST_EXENAME"]))` + else + test_exename = `$(joinpath(JULIA_HOME, Base.julia_exename()))` + end + + cd(test_dir) do + n = 1 + if net_on + n = min(numcores, length(tests)) + n > 1 && addprocs(n; exename=test_exename, exeflags=test_exeflags) + BLAS.set_num_threads(1) + end + + @everywhere include("../base/testdefs.jl") + + #pretty print the information about gc and mem usage + name_align = max(length("Test (Worker)"), maximum(map(x -> length(x) + 3 + ndigits(nworkers()), tests))) + elapsed_align = length("Time (s)") + gc_align = length("GC (s)") + percent_align = length("GC %") + alloc_align = length("Alloc (MB)") + rss_align = length("RSS (MB)") + alignments = (name_align, elapsed_align, gc_align, percent_align, alloc_align, rss_align) + print_with_color(:white, rpad("Test (Worker)",name_align," "), " | ") + print_with_color(:white, "Time (s) | GC (s) | GC % | Alloc (MB) | RSS (MB)\n") + results=[] + @sync begin + for p in workers() + @async begin + while length(tests) > 0 + test = shift!(tests) + local resp + try + resp = remotecall_fetch(runtest, p, test) + catch e + resp = [e] + end + push!(results, (test, resp)) + if (isa(resp[end], Integer) && (resp[end] > max_worker_rss)) || isa(resp, Exception) + if n > 1 + rmprocs(p, waitfor=0.5) + p = addprocs(1; exename=test_exename, exeflags=test_exeflags)[1] + remotecall_fetch(()->include("../base/testdefs.jl"), p) + else + # single process testing, bail if mem limit reached, or, on an exception. + isa(resp, Exception) ? rethrow(resp) : error("Halting tests. Memory limit reached : $resp > $max_worker_rss") + end + end + if !isa(resp[1], Exception) + print_test_statistics(test, resp, p, alignments) + end + end + end + end + end + # Free up memory =) + n > 1 && rmprocs(workers(), waitfor=5.0) + for t in node1_tests + # As above, try to run each test + # which must run on node 1. If + # the test fails, catch the error, + # and either way, append the results + # to the overall aggregator + local resp + try + resp = runtest(t) + catch e + resp = [e] + end + push!(results, (t, resp)) + if !isa(resp[1], Exception) + print_test_statistics(t, resp, 1, alignments) + end + end + o_ts = build_results_testset(results) + println() + print_test_results(o_ts,1) + if !o_ts.anynonpass + println(" \033[32;1mSUCCESS\033[0m") + else + println(" \033[31;1mFAILURE\033[0m") + print_test_errors(o_ts) + error() + end + end +end + end # module diff --git a/base/testdefs.jl b/base/testdefs.jl new file mode 100644 index 0000000000000..a448de6afc16d --- /dev/null +++ b/base/testdefs.jl @@ -0,0 +1,33 @@ +# This file is a part of Julia. License is MIT: http://julialang.org/license + +function runtest(name, isolate=true) + if isolate + mod_name = Symbol("TestMain_", replace(name, '/', '_')) + m = eval(Main, :(module $mod_name end)) + else + m = Main + end + eval(m, :(using Base.Test)) + ex = quote + @timed @testset $"$name" begin + include($"$name.jl") + end + end + res_and_time_data = eval(m, ex) + rss = Sys.maxrss() + #res_and_time_data[1] is the testset + passes,fails,error,broken,c_passes,c_fails,c_errors,c_broken = Base.Test.get_test_counts(res_and_time_data[1]) + if res_and_time_data[1].anynonpass == false + res_and_time_data = ( + (passes+c_passes,broken+c_broken), + res_and_time_data[2], + res_and_time_data[3], + res_and_time_data[4], + res_and_time_data[5]) + end + vcat(collect(res_and_time_data), rss) +end + +# looking in . messes things up badly +#filter!(x->x!=".", LOAD_PATH) +nothing diff --git a/test/Makefile b/test/Makefile index f67f61ff5bc59..c43f81947f5de 100644 --- a/test/Makefile +++ b/test/Makefile @@ -18,7 +18,7 @@ endif $(TESTS): @cd $(SRCDIR) && \ $(ULIMIT_TEST) \ - $(call PRINT_JULIA, $(call spawn,$(JULIA_EXECUTABLE)) --check-bounds=yes --startup-file=no ./runtests.jl $@) + $(call PRINT_JULIA, $(call spawn,$(JULIA_EXECUTABLE)) --check-bounds=yes --startup-file=no -e "Base.Test.runtests(ARGS)" $@) perf: @$(MAKE) -C $(SRCDIR)/perf all diff --git a/test/choosetests.jl b/test/choosetests.jl index 5a377a54d1371..5273ab0d53830 100644 --- a/test/choosetests.jl +++ b/test/choosetests.jl @@ -1,8 +1,9 @@ # This file is a part of Julia. License is MIT: http://julialang.org/license -@doc """ +""" + choosetests(choices = []) -> tests, node1_tests, bigmemtests, net_on -`tests, net_on = choosetests(choices)` selects a set of tests to be +`choosetests(choices)` selects a set of tests to be run. `choices` should be a vector of test names; if empty or set to `["all"]`, all tests are selected. @@ -12,7 +13,7 @@ directories. Upon return, `tests` is a vector of fully-expanded test names, and `net_on` is true if networking is available (required for some tests). -""" -> +""" function choosetests(choices = []) testnames = [ "linalg", "subarray", "core", "inference", "keywordargs", "numbers", @@ -162,5 +163,7 @@ function choosetests(choices = []) filter!(x -> !(x in skip_tests), tests) - tests, net_on + node1_tests = ["compile"] + bigmemtests = ["parallel"] + tests, node1_tests, bigmemtests, net_on end diff --git a/test/runtests.jl b/test/runtests.jl index 5172d85719437..4575563cdf570 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -1,197 +1,5 @@ # This file is a part of Julia. License is MIT: http://julialang.org/license -using Base.Test -include("choosetests.jl") -tests, net_on = choosetests(ARGS) -tests = unique(tests) - -const max_worker_rss = if haskey(ENV, "JULIA_TEST_MAXRSS_MB") - parse(Int, ENV["JULIA_TEST_MAXRSS_MB"]) * 2^20 -else - typemax(Csize_t) -end - -if haskey(ENV, "JULIA_TEST_EXEFLAGS") - const test_exeflags = `$(Base.shell_split(ENV["JULIA_TEST_EXEFLAGS"]))` -else - const test_exeflags = `--check-bounds=yes --startup-file=no --depwarn=error` -end - -if haskey(ENV, "JULIA_TEST_EXENAME") - const test_exename = `$(Base.shell_split(ENV["JULIA_TEST_EXENAME"]))` -else - const test_exename = `$(joinpath(JULIA_HOME, Base.julia_exename()))` -end - -const node1_tests = String[] -function move_to_node1(t) - if t in tests - splice!(tests, findfirst(tests, t)) - push!(node1_tests, t) - end -end -# Base.compile only works from node 1, so compile test is handled specially -move_to_node1("compile") -# In a constrained memory environment, run the parallel test after all other tests -# since it starts a lot of workers and can easily exceed the maximum memory -max_worker_rss != typemax(Csize_t) && move_to_node1("parallel") - -cd(dirname(@__FILE__)) do - n = 1 - if net_on - n = min(Sys.CPU_CORES, length(tests)) - n > 1 && addprocs(n; exename=test_exename, exeflags=test_exeflags) - BLAS.set_num_threads(1) - end - @everywhere include("testdefs.jl") - - #pretty print the information about gc and mem usage - name_align = max(length("Test (Worker)"), maximum(map(x -> length(x) + 3 + ndigits(nworkers()), tests))) - elapsed_align = length("Time (s)") - gc_align = length("GC (s)") - percent_align = length("GC %") - alloc_align = length("Alloc (MB)") - rss_align = length("RSS (MB)") - print_with_color(:white, rpad("Test (Worker)",name_align," "), " | ") - print_with_color(:white, "Time (s) | GC (s) | GC % | Alloc (MB) | RSS (MB)\n") - results=[] - @sync begin - for p in workers() - @async begin - while length(tests) > 0 - test = shift!(tests) - local resp - try - resp = remotecall_fetch(runtests, p, test) - catch e - resp = [e] - end - push!(results, (test, resp)) - if (isa(resp[end], Integer) && (resp[end] > max_worker_rss)) || isa(resp, Exception) - if n > 1 - rmprocs(p, waitfor=0.5) - p = addprocs(1; exename=test_exename, exeflags=test_exeflags)[1] - remotecall_fetch(()->include("testdefs.jl"), p) - else - # single process testing, bail if mem limit reached, or, on an exception. - isa(resp, Exception) ? rethrow(resp) : error("Halting tests. Memory limit reached : $resp > $max_worker_rss") - end - end - if !isa(resp[1], Exception) - print_with_color(:white, rpad(test*" ($p)", name_align, " "), " | ") - time_str = @sprintf("%7.2f",resp[2]) - print_with_color(:white, rpad(time_str,elapsed_align," "), " | ") - gc_str = @sprintf("%5.2f",resp[5].total_time/10^9) - print_with_color(:white, rpad(gc_str,gc_align," "), " | ") - - # since there may be quite a few digits in the percentage, - # the left-padding here is less to make sure everything fits - percent_str = @sprintf("%4.1f",100*resp[5].total_time/(10^9*resp[2])) - print_with_color(:white, rpad(percent_str,percent_align," "), " | ") - alloc_str = @sprintf("%5.2f",resp[3]/2^20) - print_with_color(:white, rpad(alloc_str,alloc_align," "), " | ") - rss_str = @sprintf("%5.2f",resp[6]/2^20) - print_with_color(:white, rpad(rss_str,rss_align," "), "\n") - end - end - end - end - end - # Free up memory =) - n > 1 && rmprocs(workers(), waitfor=5.0) - for t in node1_tests - # As above, try to run each test - # which must run on node 1. If - # the test fails, catch the error, - # and either way, append the results - # to the overall aggregator - n > 1 && print("\tFrom worker 1:\t") - local resp - try - resp = runtests(t) - catch e - resp = [e] - end - push!(results, (t, resp)) - end - #= -` Construct a testset on the master node which will hold results from all the - test files run on workers and on node1. The loop goes through the results, - inserting them as children of the overall testset if they are testsets, - handling errors otherwise. - - Since the workers don't return information about passing/broken tests, only - errors or failures, those Result types get passed `nothing` for their test - expressions (and expected/received result in the case of Broken). - - If a test failed, returning a `RemoteException`, the error is displayed and - the overall testset has a child testset inserted, with the (empty) Passes - and Brokens from the worker and the full information about all errors and - failures encountered running the tests. This information will be displayed - as a summary at the end of the test run. +using Base.Test - If a test failed, returning an `Exception` that is not a `RemoteException`, - it is likely the julia process running the test has encountered some kind - of internal error, such as a segfault. The entire testset is marked as - Errored, and execution continues until the summary at the end of the test - run, where the test file is printed out as the "failed expression". - =# - o_ts = Base.Test.DefaultTestSet("Overall") - Base.Test.push_testset(o_ts) - for res in results - if isa(res[2][1], Base.Test.DefaultTestSet) - Base.Test.push_testset(res[2][1]) - Base.Test.record(o_ts, res[2][1]) - Base.Test.pop_testset() - elseif isa(res[2][1], Tuple{Int,Int}) - fake = Base.Test.DefaultTestSet(res[1]) - for i in 1:res[2][1][1] - Base.Test.record(fake, Base.Test.Pass(:test, nothing, nothing, nothing)) - end - for i in 1:res[2][1][2] - Base.Test.record(fake, Base.Test.Broken(:test, nothing)) - end - Base.Test.push_testset(fake) - Base.Test.record(o_ts, fake) - Base.Test.pop_testset() - elseif isa(res[2][1], RemoteException) - println("Worker $(res[2][1].pid) failed running test $(res[1]):") - Base.showerror(STDOUT,res[2][1].captured) - o_ts.anynonpass = true - if isa(res[2][1].captured.ex, Base.Test.TestSetException) - fake = Base.Test.DefaultTestSet(res[1]) - for i in 1:res[2][1].captured.ex.pass - Base.Test.record(fake, Base.Test.Pass(:test, nothing, nothing, nothing)) - end - for i in 1:res[2][1].captured.ex.broken - Base.Test.record(fake, Base.Test.Broken(:test, nothing)) - end - for t in res[2][1].captured.ex.errors_and_fails - Base.Test.record(fake, t) - end - Base.Test.push_testset(fake) - Base.Test.record(o_ts, fake) - Base.Test.pop_testset() - end - elseif isa(res[2][1], Exception) - # If this test raised an exception that is not a RemoteException, that means - # the test runner itself had some problem, so we may have hit a segfault - # or something similar. Record this testset as Errored. - o_ts.anynonpass = true - fake = Base.Test.DefaultTestSet(res[1]) - Base.Test.record(fake, Base.Test.Error(:test_error, res[1], res[2][1], [])) - Base.Test.push_testset(fake) - Base.Test.record(o_ts, fake) - Base.Test.pop_testset() - end - end - println() - Base.Test.print_test_results(o_ts,1) - if !o_ts.anynonpass - println(" \033[32;1mSUCCESS\033[0m") - else - println(" \033[31;1mFAILURE\033[0m") - Base.Test.print_test_errors(o_ts) - error() - end -end +Base.Test.runtests(ARGS)