Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Make things work for general AbstractArrays #980

Merged
merged 12 commits into from
Mar 1, 2017
14 changes: 4 additions & 10 deletions src/JuMP.jl
Original file line number Diff line number Diff line change
Expand Up @@ -233,8 +233,8 @@ end
setobjective(m::Model, something::Any) =
error("in setobjective: needs three arguments: model, objective sense (:Max or :Min), and expression.")

setobjective(::Model, ::Symbol, x::Array) =
error("in setobjective: array of size $(size(x)) passed as objective; only scalar objectives are allowed")
setobjective(::Model, ::Symbol, x::AbstractArray) =
error("in setobjective: array of size $(_size(x)) passed as objective; only scalar objectives are allowed")

function setsolver(m::Model, solver::MathProgBase.AbstractMathProgSolver)
m.solver = solver
Expand Down Expand Up @@ -487,13 +487,7 @@ end

Base.copy(v::Variable, new_model::Model) = Variable(new_model, v.col)
Base.copy(x::Void, new_model::Model) = nothing
function Base.copy(v::Array{Variable}, new_model::Model)
ret = similar(v, Variable, size(v))
for I in eachindex(v)
ret[I] = Variable(new_model, v[I].col)
end
ret
end
Base.copy(v::AbstractArray{Variable}, new_model::Model) = (var -> Variable(new_model, var.col)).(v)

# Copy methods for variable containers
Base.copy(d::JuMPContainer) = map(copy, d)
Expand Down Expand Up @@ -529,7 +523,7 @@ type SDConstraint <: AbstractConstraint
end

# Special-case X ≥ 0, which is often convenient
function SDConstraint(lhs::Matrix, rhs::Number)
function SDConstraint(lhs::AbstractMatrix, rhs::Number)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This change is not covered by tests.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Fixed.

rhs == 0 || error("Cannot construct a semidefinite constraint with nonzero scalar bound $rhs")
SDConstraint(lhs)
end
Expand Down
12 changes: 7 additions & 5 deletions src/affexpr.jl
Original file line number Diff line number Diff line change
Expand Up @@ -181,13 +181,15 @@ function addconstraint(m::Model, c::LinearConstraint)
end
return LinConstrRef(m,length(m.linconstr))
end
addconstraint(m::Model, c::Array{LinearConstraint}) =
addconstraint(m::Model, c::AbstractArray{LinearConstraint}) =
error("The operators <=, >=, and == can only be used to specify scalar constraints. If you are trying to add a vectorized constraint, use the element-wise dot comparison operators (.<=, .>=, or .==) instead")

function addVectorizedConstraint(m::Model, v::Array{LinearConstraint})
ret = Array{LinConstrRef}(size(v))
for I in eachindex(v)
ret[I] = addconstraint(m, v[I])
function addVectorizedConstraint(m::Model, v::AbstractArray{LinearConstraint})
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is an internal function, it's unclear how it could be called with an AbstractArray

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm somewhat uncertain if this method should be changed. Is there a compelling reason to pass a sparse matrix of linear constraints, for example?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Without changing this method, the following fails:

m = Model()
v = @variable(m, [1:3])
x = OffsetArray(v, -3)
@constraint(m, x .== 0)

because there is no Convert method that constructs an Array from an OffsetArray (a conscious design decision in OffsetArrays).

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This doesn't seem like the right fix. I think constructconstraint!(x::Array, sense::Symbol) = map(c->constructconstraint!(c,sense), x) should be changed to return a flat Vector of constraints. Then there's no need to touch addVectorizedConstraint.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Since this also doesn't work:

t = OffsetArray(rand(3), -3)
t .== 0

I'm not sure this is desirable behavior.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

OK, will change. @joehuchette, that seems like a bug in Base.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Done.

# Can't use map! because map! for sparse vectors needs zero to be defined for
# JuMP.GenericRangeConstraint{JuMP.GenericAffExpr{Float64,JuMP.Variable}} on 0.6
ret = similar(v, LinConstrRef)
for i in eachindex(v)
ret[i] = addconstraint(m, v[i])
end
ret
end
12 changes: 6 additions & 6 deletions src/macros.jl
Original file line number Diff line number Diff line change
Expand Up @@ -304,21 +304,21 @@ function constructconstraint!(normexpr::SOCExpr, sense::Symbol)
end
end

constructconstraint!(x::Array, sense::Symbol) = map(c->constructconstraint!(c,sense), x)
constructconstraint!(x::AbstractArray, sense::Symbol) = map(c->constructconstraint!(c,sense), x)

_vectorize_like(x::Number, y::Array{AffExpr}) = fill(x, size(y))
function _vectorize_like{R<:Number}(x::Array{R}, y::Array{AffExpr})
_vectorize_like(x::Number, y::AbstractArray{AffExpr}) = (ret = similar(y, typeof(x)); fill!(ret, x))
function _vectorize_like{R<:Number}(x::AbstractArray{R}, y::AbstractArray{AffExpr})
for i in 1:max(ndims(x),ndims(y))
size(x,i) == size(y,i) || error("Unequal sizes for ranged constraint")
_size(x,i) == _size(y,i) || error("Unequal sizes for ranged constraint")
end
x
end

function constructconstraint!(x::Array{AffExpr}, lb, ub)
function constructconstraint!(x::AbstractArray{AffExpr}, lb, ub)
LB = _vectorize_like(lb,x)
UB = _vectorize_like(ub,x)
ret = similar(x, LinearConstraint)
map!(ret, 1:length(ret)) do i
map!(ret, eachindex(ret)) do i
constructconstraint!(x[i], LB[i], UB[i])
end
end
Expand Down
37 changes: 21 additions & 16 deletions src/norms.jl
Original file line number Diff line number Diff line change
Expand Up @@ -37,27 +37,32 @@ function GenericNorm{C,V}(P, terms::Vector{GenericAffExpr{C,V}})
end
GenericNorm{P,C,V}(terms)
end
function GenericNorm{C, V}(P, terms::AbstractVector{GenericAffExpr{C,V}})
GenericNorm(P, [terms[i] for i in eachindex(terms)])
end
Base.copy{P,C,V}(x::GenericNorm{P,C,V}) = GenericNorm{P,C,V}(copy(x.terms))

# Handle the norm() function by flattening arguments into a vector
Base.norm{V<:AbstractJuMPScalar}(x::V, p::Real=2) = vecnorm(x,p)
Base.norm{V<:AbstractJuMPScalar}(x::Array{V}, p::Real=2) = vecnorm(x,p)
Base.norm{V<:AbstractJuMPScalar}(x::JuMPArray{V},p::Real=2) = vecnorm(x,p)
Base.norm{V<:AbstractJuMPScalar}(x::JuMPDict{V}, p::Real=2) = vecnorm(x,p)
Base.norm{C,V}(x::GenericAffExpr{C,V}, p::Real=2) = vecnorm(x,p)
Base.norm{C,V}(x::Array{GenericAffExpr{C,V}}, p::Real=2) = vecnorm(x,p)
Base.norm{C,V}(x::JuMPArray{GenericAffExpr{C,V}},p::Real=2) = vecnorm(x,p)
Base.norm{C,V}(x::JuMPDict{GenericAffExpr{C,V}}, p::Real=2) = vecnorm(x,p)
Base.norm{V<:AbstractJuMPScalar}(x::V, p::Real=2) = vecnorm(x,p)
Base.norm{V<:AbstractJuMPScalar}(x::AbstractVector{V}, p::Real=2) = vecnorm(x,p)
Base.norm{V<:AbstractJuMPScalar}(x::AbstractMatrix{V}, p::Real=2) = vecnorm(x,p)
Base.norm{V<:AbstractJuMPScalar}(x::JuMPArray{V}, p::Real=2) = vecnorm(x,p)
Base.norm{V<:AbstractJuMPScalar}(x::JuMPDict{V}, p::Real=2) = vecnorm(x,p)
Base.norm{C,V}(x::GenericAffExpr{C,V}, p::Real=2) = vecnorm(x,p)
Base.norm{C,V}(x::AbstractVector{GenericAffExpr{C,V}}, p::Real=2) = vecnorm(x,p)
Base.norm{C,V}(x::AbstractMatrix{GenericAffExpr{C,V}}, p::Real=2) = vecnorm(x,p)
Base.norm{C,V}(x::JuMPArray{GenericAffExpr{C,V}}, p::Real=2) = vecnorm(x,p)
Base.norm{C,V}(x::JuMPDict{GenericAffExpr{C,V}}, p::Real=2) = vecnorm(x,p)

_vecaff(C,V,x) = map(GenericAffExpr{C,V},vec(x))
Base.vecnorm{V<:AbstractJuMPScalar}(x::V, p::Real=2) = GenericNorm(p, [GenericAffExpr{Float64,V}(x)] )
Base.vecnorm{V<:AbstractJuMPScalar}(x::Array{V}, p::Real=2) = GenericNorm(p, _vecaff(Float64,V,x) )
Base.vecnorm{V<:AbstractJuMPScalar}(x::JuMPArray{V},p::Real=2) = GenericNorm(p, _vecaff(Float64,V,x.innerArray) )
Base.vecnorm{V<:AbstractJuMPScalar}(x::JuMPDict{V}, p::Real=2) = GenericNorm(p, _vecaff(Float64,V,collect(values(x))) )
Base.vecnorm{C,V}(x::GenericAffExpr{C,V}, p::Real=2) = GenericNorm(p, [x])
Base.vecnorm{C,V}(x::Array{GenericAffExpr{C,V}}, p::Real=2) = GenericNorm(p, vec(x))
Base.vecnorm{C,V}(x::JuMPArray{GenericAffExpr{C,V}},p::Real=2) = GenericNorm(p, vec(x.innerArray))
Base.vecnorm{C,V}(x::JuMPDict{GenericAffExpr{C,V}}, p::Real=2) = GenericNorm(p, collect(values(x)))
Base.vecnorm{V<:AbstractJuMPScalar}(x::V, p::Real=2) = GenericNorm(p, [GenericAffExpr{Float64,V}(x)] )
Base.vecnorm{V<:AbstractJuMPScalar}(x::AbstractArray{V}, p::Real=2) = GenericNorm(p, _vecaff(Float64,V,x) )
Base.vecnorm{V<:AbstractJuMPScalar}(x::JuMPArray{V}, p::Real=2) = GenericNorm(p, _vecaff(Float64,V,x.innerArray) )
Base.vecnorm{V<:AbstractJuMPScalar}(x::JuMPDict{V}, p::Real=2) = GenericNorm(p, _vecaff(Float64,V,collect(values(x))) )
Base.vecnorm{C,V}(x::GenericAffExpr{C,V}, p::Real=2) = GenericNorm(p, [x])
Base.vecnorm{C,V}(x::AbstractArray{GenericAffExpr{C,V}}, p::Real=2) = GenericNorm(p, vec(x))
Base.vecnorm{C,V}(x::JuMPArray{GenericAffExpr{C,V}}, p::Real=2) = GenericNorm(p, vec(x.innerArray))
Base.vecnorm{C,V}(x::JuMPDict{GenericAffExpr{C,V}}, p::Real=2) = GenericNorm(p, collect(values(x)))

# Called by the parseNorm macro for e.g. norm2{...}
# If the arguments are tightly typed, just pass to the constructor
Expand Down
65 changes: 27 additions & 38 deletions src/operators.jl
Original file line number Diff line number Diff line change
Expand Up @@ -277,7 +277,8 @@ Base.sum(j::JuMPDict) = sum(values(j.tupledict))
Base.sum(j::JuMPArray{Variable}) = AffExpr(vec(j.innerArray), ones(length(j.innerArray)), 0.0)
Base.sum(j::JuMPDict{Variable}) = AffExpr(collect(values(j.tupledict)), ones(length(j.tupledict)), 0.0)
Base.sum(j::Array{Variable}) = AffExpr(vec(j), ones(length(j)), 0.0)
function Base.sum{T<:GenericAffExpr}(affs::Array{T})
Base.sum(j::AbstractArray{Variable}) = sum([j[i] for i in eachindex(j)]) # to handle non-one-indexed arrays.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

As long as vec(::AbstractArray{Variable}) will reliably work, I think the definition above should work fine.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I tried that first, but AffExpr is a type alias of GenericAffExpr{Float64,Variable}, so that directly calls the inner constructor of AffExpr, which expects precisely a Vector{Variable}. I tried adding an inner constructor to GenericAffExpr that takes AbstractArrays (+ additional outer constructors), but ran into some more issues that I don't remember anymore. Basically, it was going to be a pretty big change, whereas sum seemed to be the only current case where handling AbstractVectors was needed when constructing AffExprs.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Cool, thanks for the explanation.

function Base.sum{T<:GenericAffExpr}(affs::AbstractArray{T})
new_aff = zero(T)
for aff in affs
append!(new_aff, aff)
Expand Down Expand Up @@ -348,7 +349,10 @@ function Base.issymmetric{T<:JuMPTypes}(x::Matrix{T})
end

# Special-case because the the base version wants to do fill!(::Array{Variable}, zero(AffExpr))
Base.diagm(x::Vector{Variable}) = diagm(convert(Vector{AffExpr}, x))
function Base.diagm(x::AbstractVector{Variable})
@assert one_indexed(x) # Base.diagm doesn't work for non-one-indexed arrays in general.
diagm(copy!(similar(x, AffExpr), x))
end

###############
# The _multiply!(buf,y,z) adds the results of y*z into the buffer buf. No bounds/size
Expand Down Expand Up @@ -448,7 +452,7 @@ _multiply!(ret, lhs, rhs) = A_mul_B!(ret, lhs, ret)

import Base.At_mul_B
import Base.Ac_mul_B
# these methods are called when one does A.'*v or A'*v respectively
# these methods are called when one does A.'*v or A'*v respectively
At_mul_B{T<:JuMPTypes}(A::Union{Matrix{T},SparseMatrixCSC{T}}, x::Union{Matrix, Vector, SparseMatrixCSC}) = _matmult(A, x)
At_mul_B{T<:JuMPTypes,R<:JuMPTypes}(A::Union{Matrix{T},SparseMatrixCSC{T}}, x::Union{Matrix{R}, Vector{R}, SparseMatrixCSC{R}}) = _matmult(A, x)
At_mul_B{T<:JuMPTypes}(A::Union{Matrix,SparseMatrixCSC}, x::Union{Matrix{T}, Vector{T}, SparseMatrixCSC{T}}) = _matmult(A, x)
Expand Down Expand Up @@ -485,40 +489,37 @@ _return_arrayt{R,S}(A::AbstractMatrix{R}, x::AbstractVector{S}) = _fillwithzeros
_return_arrayt{R,S}(A::AbstractMatrix{R}, x::AbstractMatrix{S}) = _fillwithzeros(Array{_multiply_type(R,S)}(size(A,2), size(x, 2)))

# helper so we don't fill the buffer array with the same object
function _fillwithzeros{T}(arr::Array{T})
function _fillwithzeros{T}(arr::AbstractArray{T})
for I in eachindex(arr)
arr[I] = zero(T)
end
arr
end

# Let's be conservative and only define arithmetic for the basic types
typealias ArrayOrSparseMat{T} Union{Array{T}, SparseMatrixCSC{T}}

for op in [:+, :-]; @eval begin
function $op{T<:JuMPTypes}(lhs::Number,rhs::ArrayOrSparseMat{T})
ret = Array{typeof($op(lhs, zero(T)))}(size(rhs))
function $op{T<:JuMPTypes}(lhs::Number,rhs::AbstractArray{T})
ret = similar(rhs, typeof($op(lhs, zero(T))))
for I in eachindex(ret)
ret[I] = $op(lhs, rhs[I])
end
ret
end
function $op{T<:JuMPTypes}(lhs::ArrayOrSparseMat{T},rhs::Number)
ret = Array{typeof($op(zero(T), rhs))}(size(lhs))
function $op{T<:JuMPTypes}(lhs::AbstractArray{T},rhs::Number)
ret = similar(lhs, typeof($op(zero(T), rhs)))
for I in eachindex(ret)
ret[I] = $op(lhs[I], rhs)
end
ret
end
function $op{T<:JuMPTypes,S}(lhs::T,rhs::ArrayOrSparseMat{S})
ret = Array{typeof($op(lhs, zero(S)))}(size(rhs))
function $op{T<:JuMPTypes,S}(lhs::T,rhs::AbstractArray{S})
ret = similar(rhs, typeof($op(lhs, zero(S))))
for I in eachindex(ret)
ret[I] = $op(lhs, rhs[I])
end
ret
end
function $op{T<:JuMPTypes,S}(lhs::ArrayOrSparseMat{S},rhs::T)
ret = Array{typeof($op(zero(S), rhs))}(size(lhs))
function $op{T<:JuMPTypes,S}(lhs::AbstractArray{S},rhs::T)
ret = similar(lhs, typeof($op(zero(S), rhs)))
for I in eachindex(ret)
ret[I] = $op(lhs[I], rhs)
end
Expand All @@ -527,29 +528,29 @@ for op in [:+, :-]; @eval begin
end; end

for op in [:*, :/]; @eval begin
function $op{T<:JuMPTypes}(lhs::Number,rhs::Array{T})
ret = Array{typeof($op(lhs, zero(T)))}(size(rhs))
function $op{T<:JuMPTypes}(lhs::Number,rhs::AbstractArray{T})
ret = similar(rhs, typeof($op(lhs, zero(T))))
for I in eachindex(ret)
ret[I] = $op(lhs, rhs[I])
end
ret
end
function $op{T<:JuMPTypes}(lhs::Array{T},rhs::Number)
ret = Array{typeof($op(zero(T), rhs))}(size(lhs))
function $op{T<:JuMPTypes}(lhs::AbstractArray{T},rhs::Number)
ret = similar(lhs, typeof($op(zero(T), rhs)))
for I in eachindex(ret)
ret[I] = $op(lhs[I], rhs)
end
ret
end
function $op{T<:JuMPTypes,S}(lhs::T,rhs::Array{S})
ret = Array{typeof($op(lhs, zero(S)))}(size(rhs))
function $op{T<:JuMPTypes,S}(lhs::T,rhs::AbstractArray{S})
ret = similar(rhs, typeof($op(lhs, zero(S))))
for I in eachindex(ret)
ret[I] = $op(lhs, rhs[I])
end
ret
end
function $op{T<:JuMPTypes,S}(lhs::Array{S},rhs::T)
ret = Array{typeof($op(zero(S), rhs))}(size(lhs))
function $op{T<:JuMPTypes,S}(lhs::AbstractArray{S},rhs::T)
ret = similar(lhs, typeof($op(zero(S), rhs)))
for I in eachindex(ret)
ret[I] = $op(lhs[I], rhs)
end
Expand All @@ -569,18 +570,6 @@ end; end
(/){T<:JuMPTypes}(lhs::SparseMatrixCSC{T}, rhs::Number) =
SparseMatrixCSC(lhs.m, lhs.n, copy(lhs.colptr), copy(lhs.rowval), lhs.nzval ./ rhs)

# The following are primarily there for internal use in the macro code for @constraint
for op in [:(+), :(-)]; @eval begin
function $op(lhs::Array{Variable},rhs::Array{Variable})
(sz = size(lhs)) == size(rhs) || error("Incompatible sizes for $op: $sz $op $(size(rhs))")
ret = Array{AffExpr}(sz)
for I in eachindex(ret)
ret[I] = $op(lhs[I], rhs[I])
end
ret
end
end; end

for (dotop,op) in [(:.+,:+), (:.-,:-), (:.*,:*), (:./,:/)]
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Are these methods now covered by built-ins in Julia?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ok, thanks for checking

@eval begin
$dotop(lhs::Number,rhs::JuMPTypes) = $op(lhs,rhs)
Expand Down Expand Up @@ -611,15 +600,15 @@ for (dotop,op) in [(:.+,:+), (:.-,:-), (:.*,:*), (:./,:/)]
end


(+){T<:JuMPTypes}(x::Array{T}) = x
function (-){T<:JuMPTypes}(x::Array{T})
(+){T<:JuMPTypes}(x::AbstractArray{T}) = x
function (-){T<:JuMPTypes}(x::AbstractArray{T})
ret = similar(x, typeof(-one(T)))
for I in eachindex(ret)
ret[I] = -x[I]
end
ret
end
(*){T<:JuMPTypes}(x::Array{T}) = x
(*){T<:JuMPTypes}(x::AbstractArray{T}) = x

###############################################################################
# Add nonlinear function fallbacks for JuMP built-in types
Expand Down
8 changes: 4 additions & 4 deletions src/parseExpr_staged.jl
Original file line number Diff line number Diff line change
Expand Up @@ -268,11 +268,11 @@ for T1 in (GenericAffExpr,GenericQuadExpr), T2 in (Number,Variable,GenericAffExp
@eval addtoexpr(::$T1, ::_NLExpr, ::$T2) = _nlexprerr()
end

addtoexpr{T<:GenericAffExpr}(ex::Array{T}, c::AbstractArray, x::AbstractArray) = append!.(ex, c*x)
addtoexpr{T<:GenericAffExpr}(ex::Array{T}, c::AbstractArray, x::Number) = append!.(ex, c*x)
addtoexpr{T<:GenericAffExpr}(ex::Array{T}, c::Number, x::AbstractArray) = append!.(ex, c*x)
addtoexpr{T<:GenericAffExpr}(ex::AbstractArray{T}, c::AbstractArray, x::AbstractArray) = append!.(ex, c*x)
addtoexpr{T<:GenericAffExpr}(ex::AbstractArray{T}, c::AbstractArray, x::Number) = append!.(ex, c*x)
addtoexpr{T<:GenericAffExpr}(ex::AbstractArray{T}, c::Number, x::AbstractArray) = append!.(ex, c*x)

addtoexpr(ex, c, x) = ex + c*x
addtoexpr(ex, c, x) = ex + c*x

@generated addtoexpr_reorder(ex, arg) = :(addtoexpr(ex, 1.0, arg))

Expand Down
12 changes: 7 additions & 5 deletions src/quadexpr.jl
Original file line number Diff line number Diff line change
Expand Up @@ -149,13 +149,15 @@ function addconstraint(m::Model, c::QuadConstraint)
end
return ConstraintRef{Model,QuadConstraint}(m,length(m.quadconstr))
end
addconstraint(m::Model, c::Array{QuadConstraint}) =
addconstraint(m::Model, c::AbstractArray{QuadConstraint}) =
error("Vectorized constraint added without elementwise comparisons. Try using one of (.<=,.>=,.==).")

function addVectorizedConstraint(m::Model, v::Array{QuadConstraint})
ret = Array{ConstraintRef{Model,QuadConstraint}}(size(v))
for I in eachindex(v)
ret[I] = addconstraint(m, v[I])
function addVectorizedConstraint(m::Model, v::AbstractArray{QuadConstraint})
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This change is not covered by tests.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I wasn't able to construct a test case involving nonstandard arrays of QuadConstraint that didn't already pass or call this unexported function directly, so I changed it back to Array.

# Can't use map! because map! for sparse vectors needs zero to be defined for
# JuMP.GenericRangeConstraint{JuMP.GenericAffExpr{Float64,JuMP.Variable}} on 0.6
ret = similar(v, ConstraintRef{Model,QuadConstraint})
for i in eachindex(v)
ret[i] = addconstraint(m, v[i])
end
ret
end
8 changes: 8 additions & 0 deletions src/utils.jl
Original file line number Diff line number Diff line change
Expand Up @@ -101,3 +101,11 @@ function reinterpret_unsafe{T,R}(::Type{T},x::Vector{R})
p = reinterpret(Ptr{T},pointer(x))
return VectorView(0,div(len,sizeof(T)),p)
end

# For non-zero index arrays on 0.5; see
# https://github.com/alsam/OffsetArrays.jl#special-note-for-julia-05.
_size(A::AbstractArray) = map(length, indices(A))
_size(A) = size(A)
_size(A::AbstractArray, d) = d <= ndims(A) ? _size(A)[d] : 1

one_indexed(A) = all(x -> isa(x, Base.OneTo), indices(A))
1 change: 1 addition & 0 deletions test/REQUIRE
Original file line number Diff line number Diff line change
Expand Up @@ -4,3 +4,4 @@ GLPKMathProgInterface
Ipopt
ECOS
SCS
OffsetArrays 0.2.13
22 changes: 22 additions & 0 deletions test/model.jl
Original file line number Diff line number Diff line change
Expand Up @@ -976,4 +976,26 @@ end
@test isnan(getdual(x))
end

@testset "Constraints with non-Array AbstractArrays" begin
m = Model()
v = @variable(m, [1:3])
for x in (OffsetArray(v, -length(v)), view(v, :), sparse(v))
# Version of diagm that works for OffsetArrays:
A = similar(x, typeof(zero(eltype(x))), (eachindex(x), eachindex(x)))
for i in eachindex(x), j in eachindex(x)
A[i, j] = ifelse(i == j, x[i], zero(eltype(x)))
end

# No tests, just to make sure that there are no MethodErrors.
@constraint(m, x + first(x) .== 0)
@constraint(m, x - first(x) .== 0)
@constraint(m, (x + 1) + first(x) .== 0)
@constraint(m, (x + 1) - first(x) .== 0)
@constraint(m, -x .<= 0)
@constraint(m, +x .<= 0)
@SDconstraint(m, A >= 0)

@test_throws ErrorException @objective(m, Min, x) # vector objective
end
end
end
Loading