Skip to content

Commit

Permalink
Merge pull request #959 from JuliaOpt/ml/06
Browse files Browse the repository at this point in the history
initial implementation of new default solvers; passing on Julia 0.6
  • Loading branch information
mlubin authored Feb 15, 2017
2 parents 55c3ebc + 4e1a016 commit 134b372
Show file tree
Hide file tree
Showing 21 changed files with 237 additions and 106 deletions.
2 changes: 1 addition & 1 deletion REQUIRE
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
julia 0.5
MathProgBase 0.5.1 0.6
MathProgBase 0.6 0.7
ReverseDiffSparse 0.7 0.8
ForwardDiff 0.3 0.4
Calculus
10 changes: 5 additions & 5 deletions src/JuMPArray.jl
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,14 @@
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.

immutable JuMPArray{T,N,NT<:NTuple} <: JuMPContainer{T,N}
immutable JuMPArray{T,N,NT} <: JuMPContainer{T,N}
innerArray::Array{T,N}
indexsets::NT
lookup::NTuple{N,Dict}
lookup::NTuple{N,Any}
meta::Dict{Symbol,Any}
end

@generated function JuMPArray{T,N}(innerArray::Array{T,N}, indexsets::NTuple{N})
@generated function JuMPArray{T,N}(innerArray::Array{T,N}, indexsets::NTuple{N,Any})
dicttuple = Expr(:tuple)
for i in 1:N
inner = quote
Expand All @@ -36,14 +36,14 @@ end

Base.getindex(d::JuMPArray, ::Colon) = d.innerArray[:]

@generated function Base.getindex{T,N,NT<:NTuple}(d::JuMPArray{T,N,NT}, idx...)
@generated function Base.getindex{T,N,NT}(d::JuMPArray{T,N,NT}, idx...)
if N != length(idx)
error("Indexed into a JuMPArray with $(length(idx)) indices (expected $N indices)")
end
Expr(:call, :getindex, :(d.innerArray), _to_cartesian(d,NT,idx)...)
end

@generated function Base.setindex!{T,N,NT<:NTuple}(d::JuMPArray{T,N,NT}, v, idx...)
@generated function Base.setindex!{T,N,NT}(d::JuMPArray{T,N,NT}, v, idx...)
if N != length(idx)
error("Indexed into a JuMPArray with $(length(idx)) indices (expected $N indices)")
end
Expand Down
8 changes: 4 additions & 4 deletions src/JuMPContainer.jl
Original file line number Diff line number Diff line change
Expand Up @@ -90,10 +90,10 @@ function gendict(instancename,T,idxsets...)
end
sizes = Expr(:tuple, [:(length($rng)) for rng in idxsets]...)
if truearray
:($instancename = Array($T, $sizes...))
:($instancename = Array{$T}($sizes...))
else
indexsets = Expr(:tuple, idxsets...)
:($instancename = JuMPArray(Array($T, $sizes...), $indexsets))
:($instancename = JuMPArray(Array{$T}($sizes...), $indexsets))
end
end

Expand All @@ -111,7 +111,7 @@ for (accessor, inner) in ((:getdual, :_getDual), (:getlowerbound, :getlowerbound
end


_similar(x::Array) = Array(Float64,size(x))
_similar(x::Array) = Array{Float64}(size(x))
_similar{T}(x::Dict{T}) = Dict{T,Float64}()

_innercontainer(x::JuMPArray) = x.innerArray
Expand Down Expand Up @@ -208,7 +208,7 @@ type KeyIterator{JA<:JuMPArray}
next_k_cache::Array{Any,1}
function KeyIterator(d)
n = ndims(d.innerArray)
new(d, n, Array(Any, n+1))
new(d, n, Array{Any}(n+1))
end
end

Expand Down
2 changes: 1 addition & 1 deletion src/affexpr.jl
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,7 @@ addconstraint(m::Model, c::Array{LinearConstraint}) =
error("The operators <=, >=, and == can only be used to specify scalar constraints. If you are trying to add a vectorized constraint, use the element-wise dot comparison operators (.<=, .>=, or .==) instead")

function addVectorizedConstraint(m::Model, v::Array{LinearConstraint})
ret = Array(LinConstrRef, size(v))
ret = Array{LinConstrRef}(size(v))
for I in eachindex(v)
ret[I] = addconstraint(m, v[I])
end
Expand Down
4 changes: 2 additions & 2 deletions src/macros.jl
Original file line number Diff line number Diff line change
Expand Up @@ -345,8 +345,8 @@ macro constraint(args...)
kwargs = Expr(:parameters)
end
kwsymbol = VERSION < v"0.6.0-dev" ? :kw : :(=)
append!(kwargs.args, collect(filter(x -> isexpr(x, kwsymbol), args))) # comma separated
args = collect(filter(x->!isexpr(x, kwsymbol), args))
append!(kwargs.args, filter(x -> isexpr(x, kwsymbol), collect(args))) # comma separated
args = filter(x->!isexpr(x, kwsymbol), collect(args))

if length(args) < 2
if length(kwargs.args) > 0
Expand Down
58 changes: 29 additions & 29 deletions src/nlp.jl
Original file line number Diff line number Diff line change
Expand Up @@ -152,12 +152,12 @@ type NLPEvaluator <: MathProgBase.AbstractNLPEvaluator
has_user_mv_operator |= ReverseDiffSparse.has_user_multivariate_operators(nlconstr.terms.nd)
end
d.disable_2ndorder = has_user_mv_operator
d.user_output_buffer = Array(Float64,m.nlpdata.largest_user_input_dimension)
d.jac_storage = Array(Float64,max(numVar,m.nlpdata.largest_user_input_dimension))
d.user_output_buffer = Array{Float64}(m.nlpdata.largest_user_input_dimension)
d.jac_storage = Array{Float64}(max(numVar,m.nlpdata.largest_user_input_dimension))
else
d.disable_2ndorder = false
d.user_output_buffer = Array(Float64,0)
d.jac_storage = Array(Float64,numVar)
d.user_output_buffer = Array{Float64}(0)
d.jac_storage = Array{Float64}(numVar)
end

d.eval_f_timer = 0
Expand Down Expand Up @@ -207,7 +207,7 @@ function FunctionStorage(nd::Vector{NodeData}, const_values,numVar, coloring_sto
else
hess_I = hess_J = Int[]
rinfo = Coloring.RecoveryInfo()
seed_matrix = Array(Float64,0,0)
seed_matrix = Array{Float64}(0,0)
linearity = [NONLINEAR]
end

Expand All @@ -223,7 +223,7 @@ function SubexpressionStorage(nd::Vector{NodeData}, const_values,numVar, fixed_v
reverse_storage = zeros(length(nd))
linearity = classify_linearity(nd, adj, subexpression_linearity, fixed_variables)

empty_arr = Array(Float64,0)
empty_arr = Array{Float64}(0)

return SubexpressionStorage(nd, adj, const_values, forward_storage, partials_storage, reverse_storage, empty_arr, empty_arr, empty_arr, linearity[1])

Expand Down Expand Up @@ -268,8 +268,8 @@ function MathProgBase.initialize(d::NLPEvaluator, requested_features::Vector{Sym

d.has_nlobj = isa(nldata.nlobj, NonlinearExprData)
max_expr_length = 0
main_expressions = Array(Vector{NodeData},0)
subexpr = Array(Vector{NodeData},0)
main_expressions = Array{Vector{NodeData}}(0)
subexpr = Array{Vector{NodeData}}(0)
for nlexpr in nldata.nlexpr
push!(subexpr, nlexpr.nd)
end
Expand All @@ -281,12 +281,12 @@ function MathProgBase.initialize(d::NLPEvaluator, requested_features::Vector{Sym
end
d.subexpression_order, individual_order = order_subexpressions(main_expressions,subexpr)

d.subexpression_linearity = Array(Linearity, length(nldata.nlexpr))
subexpression_variables = Array(Vector{Int}, length(nldata.nlexpr))
subexpression_edgelist = Array(Set{Tuple{Int,Int}}, length(nldata.nlexpr))
d.subexpressions = Array(SubexpressionStorage, length(nldata.nlexpr))
d.subexpression_forward_values = Array(Float64, length(d.subexpressions))
d.subexpression_reverse_values = Array(Float64, length(d.subexpressions))
d.subexpression_linearity = Array{Linearity}(length(nldata.nlexpr))
subexpression_variables = Array{Vector{Int}}(length(nldata.nlexpr))
subexpression_edgelist = Array{Set{Tuple{Int,Int}}}(length(nldata.nlexpr))
d.subexpressions = Array{SubexpressionStorage}(length(nldata.nlexpr))
d.subexpression_forward_values = Array{Float64}(length(d.subexpressions))
d.subexpression_reverse_values = Array{Float64}(length(d.subexpressions))

empty_edgelist = Set{Tuple{Int,Int}}()
for k in d.subexpression_order # only load expressions which actually are used
Expand Down Expand Up @@ -324,7 +324,7 @@ function MathProgBase.initialize(d::NLPEvaluator, requested_features::Vector{Sym
end

if :ExprGraph in requested_features
d.subexpressions_as_julia_expressions = Array(Any,length(subexpr))
d.subexpressions_as_julia_expressions = Array{Any}(length(subexpr))
for k in d.subexpression_order
if d.subexpression_linearity[k] != CONSTANT || !SIMPLIFY
ex = d.subexpressions[k]
Expand All @@ -336,7 +336,7 @@ function MathProgBase.initialize(d::NLPEvaluator, requested_features::Vector{Sym
end

if SIMPLIFY
main_expressions = Array(Vector{NodeData},0)
main_expressions = Array{Vector{NodeData}}(0)

# simplify objective and constraint expressions
if d.has_nlobj
Expand All @@ -350,7 +350,7 @@ function MathProgBase.initialize(d::NLPEvaluator, requested_features::Vector{Sym
# recompute dependencies after simplification
d.subexpression_order, individual_order = order_subexpressions(main_expressions,subexpr)

subexpr = Array(Vector{NodeData},length(d.subexpressions))
subexpr = Array{Vector{NodeData}}(length(d.subexpressions))
for k in d.subexpression_order
subexpr[k] = d.subexpressions[k].nd
end
Expand Down Expand Up @@ -379,13 +379,13 @@ function MathProgBase.initialize(d::NLPEvaluator, requested_features::Vector{Sym
max_chunk = min(max_chunk, 10) # 10 is hardcoded upper bound to avoid excess memory allocation

if d.want_hess || want_hess_storage # storage for Hess or HessVec
d.input_ϵ = Array(Float64,max_chunk*d.m.numCols)
d.output_ϵ = Array(Float64,max_chunk*d.m.numCols)
d.forward_storage_ϵ = Array(Float64,max_chunk*max_expr_length)
d.partials_storage_ϵ = Array(Float64,max_chunk*max_expr_length)
d.reverse_storage_ϵ = Array(Float64,max_chunk*max_expr_length)
d.subexpression_forward_values_ϵ = Array(Float64,max_chunk*length(d.subexpressions))
d.subexpression_reverse_values_ϵ = Array(Float64,max_chunk*length(d.subexpressions))
d.input_ϵ = Array{Float64}(max_chunk*d.m.numCols)
d.output_ϵ = Array{Float64}(max_chunk*d.m.numCols)
d.forward_storage_ϵ = Array{Float64}(max_chunk*max_expr_length)
d.partials_storage_ϵ = Array{Float64}(max_chunk*max_expr_length)
d.reverse_storage_ϵ = Array{Float64}(max_chunk*max_expr_length)
d.subexpression_forward_values_ϵ = Array{Float64}(max_chunk*length(d.subexpressions))
d.subexpression_reverse_values_ϵ = Array{Float64}(max_chunk*length(d.subexpressions))
for k in d.subexpression_order
subex = d.subexpressions[k]
subex.forward_storage_ϵ = zeros(Float64,max_chunk*length(subex.nd))
Expand All @@ -396,7 +396,7 @@ function MathProgBase.initialize(d::NLPEvaluator, requested_features::Vector{Sym
if d.want_hess
d.hess_I, d.hess_J = _hesslag_structure(d)
# JIT warm-up
MathProgBase.eval_hesslag(d, Array(Float64,length(d.hess_I)), d.m.colVal, 1.0, ones(MathProgBase.numconstr(d.m)))
MathProgBase.eval_hesslag(d, Array{Float64}(length(d.hess_I)), d.m.colVal, 1.0, ones(MathProgBase.numconstr(d.m)))
end
end

Expand Down Expand Up @@ -1311,7 +1311,7 @@ function getvalue(x::NonlinearExpression)
# could be smarter and cache

nldata::NLPData = m.nlpdata
subexpr = Array(Vector{NodeData},0)
subexpr = Array{Vector{NodeData}}(0)
for nlexpr in nldata.nlexpr
push!(subexpr, nlexpr.nd)
end
Expand All @@ -1322,14 +1322,14 @@ function getvalue(x::NonlinearExpression)

subexpression_order, individual_order = order_subexpressions(Vector{NodeData}[this_subexpr.nd],subexpr)

subexpr_values = Array(Float64, length(subexpr))
subexpr_values = Array{Float64}(length(subexpr))

for k in subexpression_order
max_len = max(max_len, length(nldata.nlexpr[k].nd))
end

forward_storage = Array(Float64, max_len)
partials_storage = Array(Float64, max_len)
forward_storage = Array{Float64}(max_len)
partials_storage = Array{Float64}(max_len)
user_input_buffer = zeros(nldata.largest_user_input_dimension)
user_output_buffer = zeros(nldata.largest_user_input_dimension)

Expand Down
28 changes: 14 additions & 14 deletions src/operators.jl
Original file line number Diff line number Diff line change
Expand Up @@ -478,11 +478,11 @@ end
_multiply_type(R,S) = typeof(one(R) * one(S))

# Don't do size checks here in _return_array, defer that to (*)
_return_array{R,S}(A::AbstractMatrix{R}, x::AbstractVector{S}) = _fillwithzeros(Array(_multiply_type(R,S), size(A,1)))
_return_array{R,S}(A::AbstractMatrix{R}, x::AbstractMatrix{S}) = _fillwithzeros(Array(_multiply_type(R,S), size(A,1), size(x,2)))
_return_array{R,S}(A::AbstractMatrix{R}, x::AbstractVector{S}) = _fillwithzeros(Array{_multiply_type(R,S)}(size(A,1)))
_return_array{R,S}(A::AbstractMatrix{R}, x::AbstractMatrix{S}) = _fillwithzeros(Array{_multiply_type(R,S)}(size(A,1), size(x,2)))
# these are for transpose return matrices
_return_arrayt{R,S}(A::AbstractMatrix{R}, x::AbstractVector{S}) = _fillwithzeros(Array(_multiply_type(R,S), size(A,2)))
_return_arrayt{R,S}(A::AbstractMatrix{R}, x::AbstractMatrix{S}) = _fillwithzeros(Array(_multiply_type(R,S), size(A,2), size(x, 2)))
_return_arrayt{R,S}(A::AbstractMatrix{R}, x::AbstractVector{S}) = _fillwithzeros(Array{_multiply_type(R,S)}(size(A,2)))
_return_arrayt{R,S}(A::AbstractMatrix{R}, x::AbstractMatrix{S}) = _fillwithzeros(Array{_multiply_type(R,S)}(size(A,2), size(x, 2)))

# helper so we don't fill the buffer array with the same object
function _fillwithzeros{T}(arr::Array{T})
Expand All @@ -497,28 +497,28 @@ typealias ArrayOrSparseMat{T} Union{Array{T}, SparseMatrixCSC{T}}

for op in [:+, :-]; @eval begin
function $op{T<:JuMPTypes}(lhs::Number,rhs::ArrayOrSparseMat{T})
ret = Array(typeof($op(lhs, zero(T))), size(rhs))
ret = Array{typeof($op(lhs, zero(T)))}(size(rhs))
for I in eachindex(ret)
ret[I] = $op(lhs, rhs[I])
end
ret
end
function $op{T<:JuMPTypes}(lhs::ArrayOrSparseMat{T},rhs::Number)
ret = Array(typeof($op(zero(T), rhs)), size(lhs))
ret = Array{typeof($op(zero(T), rhs))}(size(lhs))
for I in eachindex(ret)
ret[I] = $op(lhs[I], rhs)
end
ret
end
function $op{T<:JuMPTypes,S}(lhs::T,rhs::ArrayOrSparseMat{S})
ret = Array(typeof($op(lhs, zero(S))), size(rhs))
ret = Array{typeof($op(lhs, zero(S)))}(size(rhs))
for I in eachindex(ret)
ret[I] = $op(lhs, rhs[I])
end
ret
end
function $op{T<:JuMPTypes,S}(lhs::ArrayOrSparseMat{S},rhs::T)
ret = Array(typeof($op(zero(S), rhs)), size(lhs))
ret = Array{typeof($op(zero(S), rhs))}(size(lhs))
for I in eachindex(ret)
ret[I] = $op(lhs[I], rhs)
end
Expand All @@ -528,28 +528,28 @@ end; end

for op in [:*, :/]; @eval begin
function $op{T<:JuMPTypes}(lhs::Number,rhs::Array{T})
ret = Array(typeof($op(lhs, zero(T))), size(rhs))
ret = Array{typeof($op(lhs, zero(T)))}(size(rhs))
for I in eachindex(ret)
ret[I] = $op(lhs, rhs[I])
end
ret
end
function $op{T<:JuMPTypes}(lhs::Array{T},rhs::Number)
ret = Array(typeof($op(zero(T), rhs)), size(lhs))
ret = Array{typeof($op(zero(T), rhs))}(size(lhs))
for I in eachindex(ret)
ret[I] = $op(lhs[I], rhs)
end
ret
end
function $op{T<:JuMPTypes,S}(lhs::T,rhs::Array{S})
ret = Array(typeof($op(lhs, zero(S))), size(rhs))
ret = Array{typeof($op(lhs, zero(S)))}(size(rhs))
for I in eachindex(ret)
ret[I] = $op(lhs, rhs[I])
end
ret
end
function $op{T<:JuMPTypes,S}(lhs::Array{S},rhs::T)
ret = Array(typeof($op(zero(S), rhs)), size(lhs))
ret = Array{typeof($op(zero(S), rhs))}(size(lhs))
for I in eachindex(ret)
ret[I] = $op(lhs[I], rhs)
end
Expand All @@ -573,7 +573,7 @@ end; end
for op in [:(+), :(-)]; @eval begin
function $op(lhs::Array{Variable},rhs::Array{Variable})
(sz = size(lhs)) == size(rhs) || error("Incompatible sizes for $op: $sz $op $(size(rhs))")
ret = Array(AffExpr, sz)
ret = Array{AffExpr}(sz)
for I in eachindex(ret)
ret[I] = $op(lhs[I], rhs[I])
end
Expand All @@ -596,7 +596,7 @@ for (dotop,op) in [(:.+,:+), (:.-,:-), (:.*,:*), (:./,:/)]
@eval begin
function $dotop{S<:$T1,T<:$T2}(lhs::$S1{S},rhs::$S2{T})
size(lhs) == size(rhs) || error("Incompatible dimensions")
arr = Array(typeof($op(zero(S),zero(T))), size(rhs))
arr = Array{typeof($op(zero(S),zero(T)))}(size(rhs))
@inbounds for i in eachindex(lhs)
arr[i] = $op(lhs[i], rhs[i])
end
Expand Down
4 changes: 0 additions & 4 deletions src/parsenlp.jl
Original file line number Diff line number Diff line change
Expand Up @@ -198,11 +198,7 @@ function parseNLExpr_runtime(m::Model, x::Number, tape, parent, values)
nothing
end

# Temporary hack for deprecation of @defNLExpr syntax
const __last_model = Array(Model,1)

function parseNLExpr_runtime(m::Model, x::Variable, tape, parent, values)
__last_model[1] = x.m
x.m === m || error("Variable in nonlinear expression does not belong to corresponding model")
push!(tape, NodeData(VARIABLE, x.col, parent))
nothing
Expand Down
8 changes: 4 additions & 4 deletions src/print.jl
Original file line number Diff line number Diff line change
Expand Up @@ -433,7 +433,7 @@ function cont_str(mode, j, sym::PrintSymbols)
end
end
num_dims = length(data.indexsets)
idxvars = Array(String, num_dims)
idxvars = Array{String}(num_dims)
dimidx = 1
for i in 1:num_dims
if data.indexexprs[i].idxvar == nothing
Expand Down Expand Up @@ -627,7 +627,7 @@ function val_str(mode, dict::JuMPDict{Float64})

ndim = length(first(keys(dict.tupledict)))

key_strs = Array(AbstractString, length(dict), ndim)
key_strs = Array{String}(length(dict), ndim)
for (i, key) in enumerate(sortedkeys)
for j in 1:ndim
key_strs[i,j] = string(key[j])
Expand Down Expand Up @@ -679,7 +679,7 @@ function aff_str(mode, a::AffExpr, show_constant=true)
end

elm = 1
term_str = Array(String, 2*length(a.vars))
term_str = Array{String}(2*length(a.vars))
# For each model
for m in keys(moddict)
indvec = moddict[m]
Expand Down Expand Up @@ -741,7 +741,7 @@ function quad_str(mode, q::GenericQuadExpr, sym)
Qnnz = length(V)

# Odd terms are +/i, even terms are the variables/coeffs
term_str = Array(String, 2*Qnnz)
term_str = Array{String}(2*Qnnz)
if Qnnz > 0
for ind in 1:Qnnz
val = abs(V[ind])
Expand Down
2 changes: 1 addition & 1 deletion src/quadexpr.jl
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ addconstraint(m::Model, c::Array{QuadConstraint}) =
error("Vectorized constraint added without elementwise comparisons. Try using one of (.<=,.>=,.==).")

function addVectorizedConstraint(m::Model, v::Array{QuadConstraint})
ret = Array(ConstraintRef{Model,QuadConstraint}, size(v))
ret = Array{ConstraintRef{Model,QuadConstraint}}(size(v))
for I in eachindex(v)
ret[I] = addconstraint(m, v[I])
end
Expand Down
Loading

0 comments on commit 134b372

Please sign in to comment.