Skip to content

Commit

Permalink
Merge pull request #23608 from JuliaLang/jb/no_importall
Browse files Browse the repository at this point in the history
remove uses of `importall` in Base
  • Loading branch information
JeffBezanson authored Sep 7, 2017
2 parents 6d1680a + 8052341 commit bc12af5
Show file tree
Hide file tree
Showing 8 changed files with 55 additions and 36 deletions.
22 changes: 19 additions & 3 deletions base/deprecated.jl
Original file line number Diff line number Diff line change
Expand Up @@ -225,6 +225,7 @@ for f in (:sin, :sinh, :sind, :asin, :asinh, :asind,
:cot, :coth, :cotd, :acot, :acotd,
:sec, :sech, :secd, :asech,
:csc, :csch, :cscd, :acsch)
@eval import .Math: $f
@eval @deprecate $f(A::SparseMatrixCSC) $f.(A)
end

Expand Down Expand Up @@ -259,6 +260,7 @@ for f in (
# base/complex.jl
:cis,
)
@eval import .Math: $f
@eval @dep_vectorize_1arg Number $f
end
# base/fastmath.jl
Expand All @@ -267,13 +269,15 @@ for f in ( :acos_fast, :acosh_fast, :angle_fast, :asin_fast, :asinh_fast,
:cosh_fast, :exp10_fast, :exp2_fast, :exp_fast, :expm1_fast,
:lgamma_fast, :log10_fast, :log1p_fast, :log2_fast, :log_fast,
:sin_fast, :sinh_fast, :sqrt_fast, :tan_fast, :tanh_fast )
@eval FastMath Base.@dep_vectorize_1arg Number $f
@eval import .FastMath: $f
@eval @dep_vectorize_1arg Number $f
end
for f in (
:trunc, :floor, :ceil, :round, # base/floatfuncs.jl
:rad2deg, :deg2rad, :exponent, :significand, # base/math.jl
:sind, :cosd, :tand, :asind, :acosd, :atand, :asecd, :acscd, :acotd, # base/special/trig.jl
)
@eval import .Math: $f
@eval @dep_vectorize_1arg Real $f
end
# base/complex.jl
Expand Down Expand Up @@ -312,11 +316,13 @@ for f in (
# base/math.jl
:log, :hypot, :atan2,
)
@eval import .Math: $f
@eval @dep_vectorize_2arg Number $f
end
# base/fastmath.jl
for f in (:pow_fast, :atan2_fast, :hypot_fast, :max_fast, :min_fast, :minmax_fast)
@eval FastMath Base.@dep_vectorize_2arg Number $f
@eval import .FastMath: $f
@eval @dep_vectorize_2arg Number $f
end
for f in (
:max, :min, # base/math.jl
Expand Down Expand Up @@ -737,6 +743,7 @@ end
for f in (:sec, :sech, :secd, :asec, :asech,
:csc, :csch, :cscd, :acsc, :acsch,
:cot, :coth, :cotd, :acot, :acoth)
@eval import .Math: $f
@eval @deprecate $f(A::AbstractArray{<:Number}) $f.(A)
end

Expand All @@ -746,6 +753,7 @@ end
@deprecate complex(A::AbstractArray, B::AbstractArray) complex.(A, B)

# Deprecate manually vectorized clamp methods in favor of compact broadcast syntax
import .Math: clamp
@deprecate clamp(A::AbstractArray, lo, hi) clamp.(A, lo, hi)

# Deprecate manually vectorized round methods in favor of compact broadcast syntax
Expand Down Expand Up @@ -845,7 +853,7 @@ end
@deprecate ~(A::AbstractArray) .~A
@deprecate ~(B::BitArray) .~B

function frexp(A::Array{<:AbstractFloat})
function Math.frexp(A::Array{<:AbstractFloat})
depwarn(string("`frexp(x::Array)` is discontinued. Though not a direct replacement, ",
"consider using dot-syntax to `broadcast` scalar `frexp` over `Array`s ",
"instead, for example `frexp.(rand(4))`."), :frexp)
Expand Down Expand Up @@ -1235,6 +1243,7 @@ end
for name in ("alnum", "alpha", "cntrl", "digit", "number", "graph",
"lower", "print", "punct", "space", "upper", "xdigit")
f = Symbol("is",name)
@eval import .UTF8proc: $f
@eval @deprecate ($f)(s::AbstractString) all($f, s)
end

Expand Down Expand Up @@ -1296,9 +1305,11 @@ next(p::Union{Process, ProcessChain}, i::Int) = (getindex(p, i), i + 1)
return i == 1 ? getfield(p, p.openstream) : p
end

import .LinAlg: cond
@deprecate cond(F::LinAlg.LU, p::Integer) cond(full(F), p)

# PR #21359
import .Random: srand
@deprecate srand(r::MersenneTwister, filename::AbstractString, n::Integer=4) srand(r, read!(filename, Array{UInt32}(Int(n))))
@deprecate srand(filename::AbstractString, n::Integer=4) srand(read!(filename, Array{UInt32}(Int(n))))
@deprecate MersenneTwister(filename::AbstractString) srand(MersenneTwister(0), read!(filename, Array{UInt32}(Int(4))))
Expand All @@ -1308,6 +1319,7 @@ end
@deprecate versioninfo(io::IO, verbose::Bool) versioninfo(io, verbose=verbose)

# PR #22188
import .LinAlg: cholfact, cholfact!
@deprecate cholfact!(A::StridedMatrix, uplo::Symbol, ::Type{Val{false}}) cholfact!(Hermitian(A, uplo), Val(false))
@deprecate cholfact!(A::StridedMatrix, uplo::Symbol) cholfact!(Hermitian(A, uplo))
@deprecate cholfact(A::StridedMatrix, uplo::Symbol, ::Type{Val{false}}) cholfact(Hermitian(A, uplo), Val(false))
Expand All @@ -1316,6 +1328,7 @@ end
@deprecate cholfact(A::StridedMatrix, uplo::Symbol, ::Type{Val{true}}; tol = 0.0) cholfact(Hermitian(A, uplo), Val(true), tol = tol)

# PR #22245
import .LinAlg: isposdef, isposdef!
@deprecate isposdef(A::AbstractMatrix, UL::Symbol) isposdef(Hermitian(A, UL))
@deprecate isposdef!(A::StridedMatrix, UL::Symbol) isposdef!(Hermitian(A, UL))

Expand Down Expand Up @@ -1425,6 +1438,7 @@ export conv, conv2, deconv, filt, filt!, xcorr
@deprecate cov(X::AbstractVecOrMat, Y::AbstractVecOrMat, vardim::Int, corrected::Bool) cov(X, Y, vardim, corrected=corrected)

# bkfact
import .LinAlg: bkfact, bkfact!
function bkfact(A::StridedMatrix, uplo::Symbol, symmetric::Bool = issymmetric(A), rook::Bool = false)
depwarn("bkfact with uplo and symmetric arguments deprecated. Please use bkfact($(symmetric ? "Symmetric(" : "Hermitian(")A, :$uplo))",
:bkfact)
Expand Down Expand Up @@ -1458,6 +1472,7 @@ end
@deprecate literal_pow(a, b, ::Type{Val{N}}) where {N} literal_pow(a, b, Val(N)) false
@eval IteratorsMD @deprecate split(t, V::Type{Val{n}}) where {n} split(t, Val(n)) false
@deprecate sqrtm(A::UpperTriangular{T},::Type{Val{realmatrix}}) where {T,realmatrix} sqrtm(A, Val(realmatrix))
import .LinAlg: lufact, lufact!, qrfact, qrfact!, cholfact, cholfact!
@deprecate lufact(A::AbstractMatrix, ::Type{Val{false}}) lufact(A, Val(false))
@deprecate lufact(A::AbstractMatrix, ::Type{Val{true}}) lufact(A, Val(true))
@deprecate lufact!(A::AbstractMatrix, ::Type{Val{false}}) lufact!(A, Val(false))
Expand Down Expand Up @@ -1688,6 +1703,7 @@ export hex2num
@deprecate cfunction(f, r, a::Tuple) cfunction(f, r, Tuple{a...})

# PR 23341
import .LinAlg: diagm
@deprecate diagm(A::SparseMatrixCSC) spdiagm(sparsevec(A))

# PR #23373
Expand Down
3 changes: 1 addition & 2 deletions base/pkg/entry.jl
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,7 @@ module Entry

import Base: thispatch, nextpatch, nextminor, nextmajor, check_new_version
import ..Reqs, ..Read, ..Query, ..Resolve, ..Cache, ..Write, ..Dir
import ...LibGit2
importall ...LibGit2
using ...LibGit2
import ...Pkg.PkgError
using ..Types

Expand Down
4 changes: 2 additions & 2 deletions base/pkg/write.jl
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,8 @@

module Write

import ...LibGit2, ..Cache, ..Read, ...Pkg.PkgError
importall ...LibGit2
import ..Cache, ..Read, ...Pkg.PkgError
using ...LibGit2

function prefetch(pkg::AbstractString, sha1::AbstractString)
isempty(Cache.prefetch(pkg, Read.url(pkg), sha1)) && return
Expand Down
4 changes: 2 additions & 2 deletions base/sharedarray.jl
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# This file is a part of Julia. License is MIT: https://julialang.org/license

import .Serializer: serialize_cycle_header, serialize_type, writetag, UNDEFREF_TAG
import .Distributed: RRID
import .Distributed: RRID, procs

mutable struct SharedArray{T,N} <: DenseArray{T,N}
id::RRID
Expand Down Expand Up @@ -499,7 +499,7 @@ function rand!(S::SharedArray{T}) where T
return S
end

function randn!(S::SharedArray)
function Random.randn!(S::SharedArray)
f = S->map!(x -> randn(), S.loc_subarr_1d, S.loc_subarr_1d)
@sync for p in procs(S)
@async remotecall_wait(f, p, S)
Expand Down
5 changes: 3 additions & 2 deletions base/sparse/cholmod.jl
Original file line number Diff line number Diff line change
Expand Up @@ -9,14 +9,15 @@ import Base.LinAlg: (\), A_mul_Bc, A_mul_Bt, Ac_ldiv_B, Ac_mul_B, At_ldiv_B, At_
cholfact, cholfact!, det, diag, ishermitian, isposdef,
issuccess, issymmetric, ldltfact, ldltfact!, logdet

importall ..SparseArrays
using ..SparseArrays

export
Dense,
Factor,
Sparse

import ..SparseArrays: AbstractSparseMatrix, SparseMatrixCSC, increment, indtype
import ..SparseArrays: AbstractSparseMatrix, SparseMatrixCSC, increment, indtype, sparse, speye,
spzeros, nnz

#########
# Setup #
Expand Down
2 changes: 1 addition & 1 deletion base/sparse/umfpack.jl
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ export UmfpackLU
import Base: (\), Ac_ldiv_B, At_ldiv_B, findnz, getindex, show, size
import Base.LinAlg: A_ldiv_B!, Ac_ldiv_B!, At_ldiv_B!, Factorization, det, lufact

importall ..SparseArrays
using ..SparseArrays
import ..SparseArrays: increment, increment!, decrement, decrement!, nnz

include("umfpack_h.jl")
Expand Down
2 changes: 1 addition & 1 deletion base/strings/strings.jl
Original file line number Diff line number Diff line change
Expand Up @@ -7,4 +7,4 @@ include("strings/search.jl")
include("strings/util.jl")
include("strings/io.jl")
include("strings/utf8proc.jl")
importall .UTF8proc
using .UTF8proc
49 changes: 26 additions & 23 deletions base/sysimg.jl
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ include("operators.jl")
include("pointer.jl")
include("refpointer.jl")
include("checked.jl")
importall .Checked
using .Checked

# buggy handling of ispure in type-inference means this should be
# after re-defining the basic operations that they might try to call
Expand Down Expand Up @@ -132,7 +132,7 @@ Matrix(m::Integer, n::Integer) = Matrix{Any}(Int(m), Int(n))
# numeric operations
include("hashing.jl")
include("rounding.jl")
importall .Rounding
using .Rounding
include("float.jl")
include("twiceprecision.jl")
include("complex.jl")
Expand All @@ -153,7 +153,7 @@ include("strings/string.jl")

# SIMD loops
include("simdloop.jl")
importall .SimdLoop
using .SimdLoop

# map-reduce operators
include("reduce.jl")
Expand Down Expand Up @@ -219,7 +219,7 @@ using .PermutedDimsArrays
include("nullable.jl")

include("broadcast.jl")
importall .Broadcast
using .Broadcast

# define the real ntuple functions
@generated function ntuple(f::F, ::Val{N}) where {F,N}
Expand All @@ -242,7 +242,7 @@ end

# base64 conversions (need broadcast)
include("base64.jl")
importall .Base64
using .Base64

# version
include("version.jl")
Expand All @@ -267,18 +267,19 @@ include("weakkeydict.jl")
include("stream.jl")
include("socket.jl")
include("filesystem.jl")
importall .Filesystem
using .Filesystem
include("process.jl")
include("multimedia.jl")
importall .Multimedia
using .Multimedia
include("grisu/grisu.jl")
import .Grisu.print_shortest
include("methodshow.jl")

# core math functions
include("floatfuncs.jl")
include("math.jl")
importall .Math
using .Math
import .Math: gamma
const ()=sqrt
const ()=cbrt

Expand All @@ -289,29 +290,29 @@ include("reducedim.jl") # macros in this file relies on string.jl

# basic data structures
include("ordering.jl")
importall .Order
using .Order

# Combinatorics
include("sort.jl")
importall .Sort
using .Sort

# Fast math
include("fastmath.jl")
importall .FastMath
using .FastMath

function deepcopy_internal end

# BigInts and BigFloats
include("gmp.jl")
importall .GMP
using .GMP

for T in [Signed, Integer, BigInt, Float32, Float64, Real, Complex, Rational]
@eval flipsign(x::$T, ::Unsigned) = +x
@eval copysign(x::$T, ::Unsigned) = +x
end

include("mpfr.jl")
importall .MPFR
using .MPFR
big(n::Integer) = convert(BigInt,n)
big(x::AbstractFloat) = convert(BigFloat,x)
big(q::Rational) = big(numerator(q))//big(denominator(q))
Expand All @@ -329,22 +330,24 @@ using .MathConstants: ℯ, π, pi
# random number generation
include("random/dSFMT.jl")
include("random/random.jl")
importall .Random
using .Random
import .Random: rand, rand!

# (s)printf macros
include("printf.jl")
importall .Printf
using .Printf

# metaprogramming
include("meta.jl")

# enums
include("Enums.jl")
importall .Enums
using .Enums

# concurrency and parallelism
include("serialize.jl")
importall .Serializer
using .Serializer
import .Serializer: serialize, deserialize
include("channels.jl")

# memory-mapped and shared arrays
Expand All @@ -353,7 +356,7 @@ import .Mmap

# utilities - timing, help, edit
include("datafmt.jl")
importall .DataFmt
using .DataFmt
include("deepcopy.jl")
include("interactiveutil.jl")
include("summarysize.jl")
Expand All @@ -372,14 +375,14 @@ include("client.jl")

# Stack frames and traces
include("stacktraces.jl")
importall .StackTraces
using .StackTraces

# misc useful functions & macros
include("util.jl")

# dense linear algebra
include("linalg/linalg.jl")
importall .LinAlg
using .LinAlg
const = dot
const × = cross

Expand All @@ -394,20 +397,20 @@ include("pkg/pkg.jl")

# profiler
include("profile.jl")
importall .Profile
using .Profile

# dates
include("dates/Dates.jl")
import .Dates: Date, DateTime, DateFormat, @dateformat_str, now

# sparse matrices, vectors, and sparse linear algebra
include("sparse/sparse.jl")
importall .SparseArrays
using .SparseArrays

include("asyncmap.jl")

include("distributed/Distributed.jl")
importall .Distributed
using .Distributed
include("sharedarray.jl")

# code loading
Expand Down

4 comments on commit bc12af5

@nanosoldier
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Executing the daily benchmark build, I will reply here when finished:

@nanosoldier runbenchmarks(ALL, isdaily = true)

@nanosoldier
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Something went wrong when running your job:

NanosoldierError: failed to run benchmarks against primary commit: failed process: Process(`sudo cset shield -e su nanosoldier -- -c ./benchscript.sh`, ProcessExited(1)) [1]

Logs and partial data can be found here
cc @ararslan

@ararslan
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@nanosoldier runbenchmarks(ALL, isdaily=true)

@nanosoldier
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Your benchmark job has completed - possible performance regressions were detected. A full report can be found here. cc @ararslan

Please sign in to comment.