diff --git a/NEWS.md b/NEWS.md index 0b1b6e50a..ae5d2153d 100644 --- a/NEWS.md +++ b/NEWS.md @@ -1,3 +1,21 @@ +# Changes in v0.14.0 + +## Breaking changes + +* Changes to the `sign` of atoms: + * The sign of `sumlargesteigs` has been changed from `Positive` to `NoSign()` instead of `Positive()`, to allow non-positive-semidefinite inputs. This has the potential + to break code that required that sign to be positive. If you run into this problem, please file an issue so we can figure out a workaround. [#412](https://github.com/jump-dev/Convex.jl/pull/412) +* Removal of deprecations + * `lambdamin` and `lambdamax` has been deprecated to `eigmin` and `eigmax` since Convex v0.13.0. This deprecation has been removed, so your code must be updated to call `eigmin` or `eigmax` instead. [#412](https://github.com/jump-dev/Convex.jl/pull/412) + * `norm(x, p)` where `x` is a matrix expression has been deprecated to `opnorm(x,p)` since Convex v0.8.0. This deprecation has been removed, so your code must be updated to call `opnorm(x, p)` instead. Currently, `norm(x,p)` for a matrix + expression `x` will error, but in Convex.jl v0.15.0 it will return `norm(vec(x), p)`. [#412](https://github.com/jump-dev/Convex.jl/pull/412) + * `Convex.clearmemory()` has been deprecated and unnecessary since Convex v0.12.5. This deprecation has been removed, so if this function is in your code, just delete it. [#412](https://github.com/jump-dev/Convex.jl/pull/412) + * `vecnorm(x, p)` has been deprecated to `norm(vec(x), p)` since Convex v0.8.0. This deprecation has been removed, so your code must be updated to call `norm(vec(x),p)` instead. + +## Other changes + +* updated `nuclearnorm` and `sumlargesteigs` to allow complex variables, and allow the argument of `sumlargesteigs` to be non-positive-semi-definite [#409](https://github.com/jump-dev/Convex.jl/pull/409). Thanks to @dstahlke! + # Changes in v0.13.8 * add unary `+` for `Sign` and `ComplexSign` to allow single-argument `hcat` and `vcat` to work [#405](https://github.com/jump-dev/Convex.jl/pull/405). Thanks to @dstahlke! diff --git a/Project.toml b/Project.toml index 3114a2409..18aed18c0 100644 --- a/Project.toml +++ b/Project.toml @@ -1,6 +1,6 @@ name = "Convex" uuid = "f65535da-76fb-5f13-bab9-19810c17039a" -version = "0.13.8" +version = "0.14.0-DEV" [deps] AbstractTrees = "1520ce14-60c1-5f80-bbc7-55ef81b5835c" diff --git a/docs/Project.toml b/docs/Project.toml index c6d03ec9d..b2d4ffb59 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -19,4 +19,4 @@ SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" [compat] -Documenter = "0.24" +Documenter = "0.26" diff --git a/docs/README.md b/docs/README.md index ed4f5dc7a..a51858e4c 100644 --- a/docs/README.md +++ b/docs/README.md @@ -14,7 +14,8 @@ julia --project=docs -e 'using Pkg; Pkg.instantiate(); include("docs/make.jl")' to generate the examples notebooks (which will be placed in `docs/notebooks`) and the documentation itself, which is generated into the `doc/build` folder, and can be previewed by opening a webserver there. Note that this command can -take some time. +take some time. To generate the documentation without updating the examples, +set `ENV["CONVEX_SKIP_EXAMPLES"]="true"` before including `docs/make.jl`. To generate a single Jupyter notebook, run e.g. diff --git a/docs/examples_literate/general_examples/huber_regression.jl b/docs/examples_literate/general_examples/huber_regression.jl index 13ff8e659..8e9e99110 100644 --- a/docs/examples_literate/general_examples/huber_regression.jl +++ b/docs/examples_literate/general_examples/huber_regression.jl @@ -7,8 +7,8 @@ if big_example n = 300 number_tests = 50 else - n = 100 - number_tests = 20 + n = 50 + number_tests = 10 end # Generate data for Huber regression. diff --git a/docs/make.jl b/docs/make.jl index 05a3add5b..8d817bcdc 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -4,129 +4,135 @@ using Documenter, Convex, Literate, Pkg previous_GKSwstype = get(ENV, "GKSwstype", "") ENV["GKSwstype"] = "100" -@info "Building examples..." - -filename(str) = first(splitext(last(splitdir(str)))) -filename_to_name(str) = uppercasefirst(replace(replace(filename(str), "-" => " "), "_" => " ")) - -fix_math_md(content) = replace(content, r"\$\$(.*?)\$\$"s => s"```math\1```") - -literate_path = joinpath(@__DIR__(), "examples_literate") build_path = joinpath(@__DIR__, "src", "examples") rm(build_path; force=true, recursive=true) isdir(build_path) || mkdir(build_path) +literate_path = joinpath(@__DIR__(), "examples_literate") +notebooks_path = joinpath(@__DIR__, "notebooks") -@info "[Examples] Preparing notebooks..." +filename(str) = first(splitext(last(splitdir(str)))) +filename_to_name(str) = uppercasefirst(replace(replace(filename(str), "-" => " "), "_" => " ")) +fix_math_md(content) = replace(content, r"\$\$(.*?)\$\$"s => s"```math\1```") -notebooks_path = joinpath(@__DIR__, "notebooks") -rm(notebooks_path, recursive=true, force=true) -mkdir(notebooks_path) - -for dir in readdir(literate_path) - dir_path = joinpath(literate_path, dir) - isdir(dir_path) || continue - @info "Processing directory $dir" - notebook_dir = joinpath(notebooks_path, dir) - isdir(notebook_dir) || mkdir(notebook_dir) - for file in readdir(dir_path) - file_path = joinpath(dir_path, file) - out_path = joinpath(notebooks_path, dir, file) - if endswith(file, ".jl") - Literate.notebook(file_path, notebook_dir, execute=false) - else - cp(file_path, out_path) + +SKIP_EXAMPLES = get(ENV, "CONVEX_SKIP_EXAMPLES", false) == "true" + +if SKIP_EXAMPLES + @info "Skipping examples" + examples_nav = String[] +else + @info "Building examples..." + + @info "[Examples] Preparing notebooks..." + + rm(notebooks_path, recursive=true, force=true) + mkdir(notebooks_path) + + for dir in readdir(literate_path) + dir_path = joinpath(literate_path, dir) + isdir(dir_path) || continue + @info "Processing directory $dir" + notebook_dir = joinpath(notebooks_path, dir) + isdir(notebook_dir) || mkdir(notebook_dir) + for file in readdir(dir_path) + file_path = joinpath(dir_path, file) + out_path = joinpath(notebooks_path, dir, file) + if endswith(file, ".jl") + Literate.notebook(file_path, notebook_dir, execute=false) + else + cp(file_path, out_path) + end end end -end -# Copy `Project.toml` to notebooks -cp(joinpath(@__DIR__, "Project.toml"), joinpath(notebooks_path, "Project.toml")) + # Copy `Project.toml` to notebooks + cp(joinpath(@__DIR__, "Project.toml"), joinpath(notebooks_path, "Project.toml")) -# Add a README file to notebooks -open(joinpath(notebooks_path, "README.md"), "w") do io - print(io, """ - # Convex.jl example notebooks + # Add a README file to notebooks + open(joinpath(notebooks_path, "README.md"), "w") do io + print(io, """ + # Convex.jl example notebooks - Start Julia in this directory and set the project flag to point to this directory. E.g. run the command + Start Julia in this directory and set the project flag to point to this directory. E.g. run the command - ```julia - julia --project=. - ``` + ```julia + julia --project=. + ``` - in this directory. + in this directory. - Then add `IJulia` if it's not installed already in your global environment by + Then add `IJulia` if it's not installed already in your global environment by - ```julia - pkg> add IJulia - ``` + ```julia + pkg> add IJulia + ``` - Also call `instantiate` to download the required packages: + Also call `instantiate` to download the required packages: - ```julia - pkg> instantiate - ``` + ```julia + pkg> instantiate + ``` - Then launch Jupyter: + Then launch Jupyter: - ```julia - julia> using IJulia + ```julia + julia> using IJulia - julia> notebook(dir=pwd(); detached=true) - ``` + julia> notebook(dir=pwd(); detached=true) + ``` - This should allow you to try any of the notebooks. - """) -end + This should allow you to try any of the notebooks. + """) + end -# zip up the notebooks directory -zip_path = joinpath(build_path, "notebooks.zip") -run(Cmd(`zip $zip_path -r notebooks`; dir = @__DIR__)) - -@info "[Examples] Preparing markdown files..." - -for dir in readdir(literate_path) - dir_path = joinpath(literate_path, dir) - isdir(dir_path) || continue - @info "Processing directory $dir" - build_dir = joinpath(build_path, dir) - isdir(build_dir) || mkdir(build_dir) - for file in readdir(dir_path) - file_path = joinpath(dir_path, file) - out_path = joinpath(build_path, dir, file) - if endswith(file, ".jl") - postprocess = function(content) - """ - All of the examples can be found in Jupyter notebook form [here](../$(filename(zip_path)).zip). - - ```@setup $(filename(file)) - __START_TIME = time_ns() - @info "Starting example $(filename(file))" - ``` - """ * content * """ - ```@setup $(filename(file)) - __END_TIME = time_ns() - elapsed = string(round((__END_TIME - __START_TIME)*1e-9; sigdigits = 3), "s") - @info "Finished example $(filename(file)) after " * elapsed - ``` - """ + # zip up the notebooks directory + zip_path = joinpath(build_path, "notebooks.zip") + run(Cmd(`zip $zip_path -r notebooks`; dir = @__DIR__)) + + @info "[Examples] Preparing markdown files..." + + for dir in readdir(literate_path) + dir_path = joinpath(literate_path, dir) + isdir(dir_path) || continue + @info "Processing directory $dir" + build_dir = joinpath(build_path, dir) + isdir(build_dir) || mkdir(build_dir) + for file in readdir(dir_path) + file_path = joinpath(dir_path, file) + out_path = joinpath(build_path, dir, file) + if endswith(file, ".jl") + postprocess = function(content) + """ + All of the examples can be found in Jupyter notebook form [here](../$(filename(zip_path)).zip). + + ```@setup $(filename(file)) + __START_TIME = time_ns() + @info "Starting example $(filename(file))" + ``` + """ * content * """ + ```@setup $(filename(file)) + __END_TIME = time_ns() + elapsed = string(round((__END_TIME - __START_TIME)*1e-9; sigdigits = 3), "s") + @info "Finished example $(filename(file)) after " * elapsed + ``` + """ + end + Literate.markdown(file_path, build_dir; preprocess = fix_math_md, documenter = true, postprocess = postprocess) + else + cp(file_path, out_path) end - Literate.markdown(file_path, build_dir; preprocess = fix_math_md, documenter = true, postprocess = postprocess) - else - cp(file_path, out_path) end end -end - -@info "Starting `makedocs`" + # Build nav tree for examples + function nav_dir(dir, path) + sort([ joinpath("examples", dir, file) for file in readdir(path) if endswith(file, ".md") && file != "index.md" ]) + end -# Build nav tree for examples -function nav_dir(dir, path) - sort([ joinpath("examples", dir, file) for file in readdir(path) if endswith(file, ".md") && file != "index.md" ]) + examples_nav = [ filename_to_name(dir) => nav_dir(dir, joinpath(build_path, dir)) for dir in readdir(build_path) if isdir(joinpath(build_path, dir)) ] end -examples_nav = [ filename_to_name(dir) => nav_dir(dir, joinpath(build_path, dir)) for dir in readdir(build_path) if isdir(joinpath(build_path, dir)) ] +@info "Starting `makedocs`" makedocs(; modules = [Convex], diff --git a/docs/src/operations.md b/docs/src/operations.md index ca45746be..887f45617 100644 --- a/docs/src/operations.md +++ b/docs/src/operations.md @@ -28,41 +28,43 @@ Linear Program Representable Functions An optimization problem using only these functions can be solved by any LP solver. -| operation | description | vexity | slope | notes | -| ------------------------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | --------------------------------------------------------------------------------------------------------- | ------------------------------ | -| `x+y` or `x.+y` | addition | affine | increasing | none | -| `x-y` or `x.-y` | subtraction | affine | increasing in $x$ decreasing in $y$ | none none | -| `x*y` | multiplication | affine | increasing if constant term $\ge 0$ decreasing if constant term $\le 0$ not monotonic otherwise | PR: one argument is constant | -| `x/y` | division | affine | increasing | PR: $y$ is scalar constant | -| `dot(*)(x, y)` | elementwise multiplication | affine | increasing | PR: one argument is constant | -| `dot(/)(x, y)` | elementwise division | affine | increasing | PR: one argument is constant | -| `x[1:4, 2:3]` | indexing and slicing | affine | increasing | none | -| `diag(x, k)` | $k$-th diagonal of a matrix | affine | increasing | none | -| `diagm(x)` | construct diagonal matrix | affine | increasing | PR: $x$ is a vector | -| `x'` | transpose | affine | increasing | none | -| `vec(x)` | vector representation | affine | increasing | none | -| `dot(x,y)` | $\sum_i x_i y_i$ | affine | increasing | PR: one argument is constant | -| `kron(x,y)` | Kronecker product | affine | increasing | PR: one argument is constant | -| `vecdot(x,y)` | `dot(vec(x),vec(y))` | affine | increasing | PR: one argument is constant | -| `sum(x)` | $\sum_{ij} x_{ij}$ | affine | increasing | none | -| `sum(x, k)` | sum elements across dimension $k$ | affine | increasing | none | -| `sumlargest(x, k)` | sum of $k$ largest elements of $x$ | convex | increasing | none | -| `sumsmallest(x, k)` | sum of $k$ smallest elements of $x$ | concave | increasing | none | -| `dotsort(a, b)` | `dot(sort(a),sort(b))` | convex | increasing | PR: one argument is constant | -| `reshape(x, m, n)` | reshape into $m \times n$ | affine | increasing | none | -| `minimum(x)` | $\min(x)$ | concave | increasing | none | -| `maximum(x)` | $\max(x)$ | convex | increasing | none | -| `[x y]` or `[x; y]` `hcat(x, y)` or `vcat(x, y)` | stacking | affine | increasing | none | -| `tr(x)` | $\mathrm{tr} \left(X \right)$ | affine | increasing | none | -| `partialtrace(x,sys,dims)` | Partial trace | affine | increasing | none | -| `partialtranspose(x,sys,dims)` | Partial transpose | affine | increasing | none | -| `conv(h,x)` | $h \in \mathbb{R}^m$, $x \in \mathbb{R}^n$, $h\star x \in \mathbb{R}^{m+n-1}$; entry $i$ is given by $\sum_{j=1}^m h_jx_{i-j+1}$ with $x_k=0$ for $k$ out of bounds | affine | increasing if $h\ge 0$ decreasing if $h\le 0$ not monotonic otherwise | PR: $h$ is constant | -| `min(x,y)` | $\min(x,y)$ | concave | increasing | none | -| `max(x,y)` | $\max(x,y)$ | convex | increasing | none | -| `pos(x)` | $\max(x,0)$ | convex | increasing | none | -| `neg(x)` | $\max(-x,0)$ | convex | decreasing | none | -| `invpos(x)` | $1/x$ | convex | decreasing | IC: $x>0$ | -| `abs(x)` | $\left\|x\right\|$ | convex | increasing on $x \ge 0$ decreasing on $x \le 0$ | none | +| operation | description | vexity | slope | notes | +|--------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------|-------------------------------------------------------------------------------------------------|------------------------------| +| `x+y` or `x.+y` | addition | affine | increasing | none | +| `x-y` or `x.-y` | subtraction | affine | increasing in $x$ decreasing in $y$ | none none | +| `x*y` | multiplication | affine | increasing if constant term $\ge 0$ decreasing if constant term $\le 0$ not monotonic otherwise | PR: one argument is constant | +| `x/y` | division | affine | increasing | PR: $y$ is scalar constant | +| `dot(*)(x, y)` | elementwise multiplication | affine | increasing | PR: one argument is constant | +| `dot(/)(x, y)` | elementwise division | affine | increasing | PR: one argument is constant | +| `x[1:4, 2:3]` | indexing and slicing | affine | increasing | none | +| `diag(x, k)` | $k$-th diagonal of a matrix | affine | increasing | none | +| `diagm(x)` | construct diagonal matrix | affine | increasing | PR: $x$ is a vector | +| `x'` | transpose | affine | increasing | none | +| `vec(x)` | vector representation | affine | increasing | none | +| `dot(x,y)` | $\sum_i x_i y_i$ | affine | increasing | PR: one argument is constant | +| `kron(x,y)` | Kronecker product | affine | increasing | PR: one argument is constant | +| `vecdot(x,y)` | `dot(vec(x),vec(y))` | affine | increasing | PR: one argument is constant | +| `sum(x)` | $\sum_{ij} x_{ij}$ | affine | increasing | none | +| `sum(x, k)` | sum elements across dimension $k$ | affine | increasing | none | +| `sumlargest(x, k)` | sum of $k$ largest elements of $x$ | convex | increasing | none | +| `sumsmallest(x, k)` | sum of $k$ smallest elements of $x$ | concave | increasing | none | +| `dotsort(a, b)` | `dot(sort(a),sort(b))` | convex | increasing | PR: one argument is constant | +| `reshape(x, m, n)` | reshape into $m \times n$ | affine | increasing | none | +| `minimum(x)` | $\min(x)$ | concave | increasing | none | +| `maximum(x)` | $\max(x)$ | convex | increasing | none | +| `[x y]` or `[x; y]` `hcat(x, y)` or `vcat(x, y)` | stacking | affine | increasing | none | +| `tr(x)` | $\mathrm{tr} \left(X \right)$ | affine | increasing | none | +| `partialtrace(x,sys,dims)` | Partial trace | affine | increasing | none | +| `partialtranspose(x,sys,dims)` | Partial transpose | affine | increasing | none | +| `conv(h,x)` | $h \in \mathbb{R}^m$, $x \in \mathbb{R}^n$, $h\star x \in \mathbb{R}^{m+n-1}$; entry $i$ is given by $\sum_{j=1}^m h_jx_{i-j+1}$ with $x_k=0$ for $k$ out of bounds | affine | increasing if $h\ge 0$ decreasing if $h\le 0$ not monotonic otherwise | PR: $h$ is constant | +| `min(x,y)` | $\min(x,y)$ | concave | increasing | none | +| `max(x,y)` | $\max(x,y)$ | convex | increasing | none | +| `pos(x)` | $\max(x,0)$ | convex | increasing | none | +| `neg(x)` | $\max(-x,0)$ | convex | decreasing | none | +| `invpos(x)` | $1/x$ | convex | decreasing | IC: $x>0$ | +| `abs(x)` | $\left\|x\right\|$ | convex | increasing on $x \ge 0$ decreasing on $x \le 0$ | none | +| `opnorm(x, 1)` | maximum absolute column sum: $\max_{1 ≤ j ≤ n} \sum_{i=1}^m \left\|x_{ij}\right\|$ | convex | increasing on $x \ge 0$ decreasing on $x \le 0$ | | +| `opnorm(x, Inf)` | maximum absolute row sum: $\max_{1 ≤ i ≤ m} \sum_{j=1}^n \left\|x_{ij}\right\|$ | convex | increasing on $x \ge 0$ decreasing on $x \le 0$ | | Second-Order Cone Representable Functions ----------------------------------------- @@ -72,19 +74,19 @@ solver (including ECOS, SCS, Mosek, Gurobi, and CPLEX). Of course, if an optimization problem has both LP and SOCP representable functions, then any solver that can solve both LPs and SOCPs can solve the problem. -| operation | description | vexity | slope | notes | -| ------------------- | ----------------------------------------------------------------------------------- | ----------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------ | -| `norm(x, p)` | $(\sum x_i^p)^{1/p}$ | convex | increasing on $x \ge 0$ decreasing on $x \le 0$ | PR: `p >= 1` | -| `vecnorm(x, p)` | $(\sum x_{ij}^p)^{1/p}$ | convex | increasing on $x \ge 0$ decreasing on $x \le 0$ | PR: `p >= 1` | -| `quadform(x, P)` | $x^T P x$ | convex in $x$ affine in $P$ | increasing on $x \ge 0$ decreasing on $x \le 0$ increasing in $P$ | PR: either $x$ or $P$ must be constant; if $x$ is not constant, then $P$ must be symmetric and positive semidefinite | -| `quadoverlin(x, y)` | $x^T x/y$ | convex | increasing on $x \ge 0$ decreasing on $x \le 0$ decreasing in $y$ | IC: $y > 0$ | -| `sumsquares(x)` | $\sum x_i^2$ | convex | increasing on $x \ge 0$ decreasing on $x \le 0$ | none | -| `sqrt(x)` | $\sqrt{x}$ | concave | decreasing | IC: $x>0$ | -| `square(x), x^2` | $x^2$ | convex | increasing on $x \ge 0$ decreasing on $x \le 0$ | PR : $x$ is scalar | -| `dot(^)(x,2)` | $x.^2$ | convex | increasing on $x \ge 0$ decreasing on $x \le 0$ | elementwise | -| `geomean(x, y)` | $\sqrt{xy}$ | concave | increasing | IC: $x\ge0$, $y\ge0$ | -| `huber(x, M=1)` | $\begin{cases} x^2 &\|x\| \leq M \\ 2M\|x\| - M^2 &\|x\| > M \end{cases}$ | convex | increasing on $x \ge 0$ decreasing on $x \le 0$ | PR: $M>=1$ | +| operation | description | vexity | slope | notes | +|---------------------|---------------------------------------------------------------------------|-----------------------------|-------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------| +| `norm(x, p)` | $(\sum x_i^p)^{1/p}$ | convex | increasing on $x \ge 0$ decreasing on $x \le 0$ | PR: `p >= 1` | +| `quadform(x, P)` | $x^T P x$ | convex in $x$ affine in $P$ | increasing on $x \ge 0$ decreasing on $x \le 0$ increasing in $P$ | PR: either $x$ or $P$ must be constant; if $x$ is not constant, then $P$ must be symmetric and positive semidefinite | +| `quadoverlin(x, y)` | $x^T x/y$ | convex | increasing on $x \ge 0$ decreasing on $x \le 0$ decreasing in $y$ | IC: $y > 0$ | +| `sumsquares(x)` | $\sum x_i^2$ | convex | increasing on $x \ge 0$ decreasing on $x \le 0$ | none | +| `sqrt(x)` | $\sqrt{x}$ | concave | decreasing | IC: $x>0$ | +| `square(x), x^2` | $x^2$ | convex | increasing on $x \ge 0$ decreasing on $x \le 0$ | PR : $x$ is scalar | +| `dot(^)(x,2)` | $x.^2$ | convex | increasing on $x \ge 0$ decreasing on $x \le 0$ | elementwise | +| `geomean(x, y)` | $\sqrt{xy}$ | concave | increasing | IC: $x\ge0$, $y\ge0$ | +| `huber(x, M=1)` | $\begin{cases} x^2 &\|x\| \leq M \\ 2M\|x\| - M^2 &\|x\| > M \end{cases}$ | convex | increasing on $x \ge 0$ decreasing on $x \le 0$ | PR: $M>=1$ | +Note that for `p=1` and `p=Inf`, the function `norm(x,p)` is a linear-program representable, and does not need a SOCP solver, and for a matrix `x`, `norm(x,p)` is defined as `norm(vec(x), p)`. Exponential Cone Representable Functions ---------------------------------------- @@ -92,13 +94,13 @@ Exponential Cone Representable Functions An optimization problem using these functions can be solved by any exponential cone solver (SCS). -| operation | description | vexity | slope | notes | -| ----------------- | ------------------------------------------ | ------- | ------------- | -------------- | -| `logsumexp(x)` | $\log(\sum_i \exp(x_i))$ | convex | increasing | none | -| `exp(x)` | $\exp(x)$ | convex | increasing | none | -| `log(x)` | $\log(x)$ | concave | increasing | IC: $x>0$ | +| operation | description | vexity | slope | notes | +|-------------------|-----------------------------------|---------|---------------|-----------| +| `logsumexp(x)` | $\log(\sum_i \exp(x_i))$ | convex | increasing | none | +| `exp(x)` | $\exp(x)$ | convex | increasing | none | +| `log(x)` | $\log(x)$ | concave | increasing | IC: $x>0$ | | `entropy(x)` | $\sum_{ij} -x_{ij} \log (x_{ij})$ | concave | not monotonic | IC: $x>0$ | -| `logisticloss(x)` | $\log(1 + \exp(x_i))$ | convex | increasing | none | +| `logisticloss(x)` | $\log(1 + \exp(x_i))$ | convex | increasing | none | Semidefinite Program Representable Functions -------------------------------------------- @@ -106,10 +108,10 @@ Semidefinite Program Representable Functions An optimization problem using these functions can be solved by any SDP solver (including SCS and Mosek). -| operation | description | vexity | slope | notes | -| ------------------ | --------------------------------- | ------- | ------------- | ------------------------------ | -| `nuclearnorm(x)` | sum of singular values of $x$ | convex | not monotonic | none | -| `operatornorm(x)` | max of singular values of $x$ | convex | not monotonic | none | +| operation | description | vexity | slope | notes | +|------------------|-------------------------------|--------|---------------|-------| +| `nuclearnorm(x)` | sum of singular values of $x$ | convex | not monotonic | none | +| `opnorm(x, 2)` (`operatornorm(x)`) | max of singular values of $x$ | convex | not monotonic | none | | | `eigmax(x)` | max eigenvalue of $x$ | convex | not monotonic | none | | `eigmin(x)` | min eigenvalue of $x$ | concave | not monotonic | none | | `matrixfrac(x, P)` | $x^TP^{-1}x$ | convex | not monotonic | IC: P is positive semidefinite | @@ -122,8 +124,8 @@ An optimization problem using these functions can be solved by any solver that supports exponential constraints *and* semidefinite constraints simultaneously (SCS). -| operation | description | vexity | slope | notes | -| ----------- | ----------------------------- | ------- | ---------- | ------------------------------ | +| operation | description | vexity | slope | notes | +|-------------|---------------------------|---------|------------|--------------------------------| | `logdet(x)` | log of determinant of $x$ | concave | increasing | IC: x is positive semidefinite | Promotions diff --git a/src/Convex.jl b/src/Convex.jl index ea34cea63..8ebe72d31 100644 --- a/src/Convex.jl +++ b/src/Convex.jl @@ -168,12 +168,5 @@ include("utilities/iteration.jl") include("utilities/broadcast.jl") include("problem_depot/problem_depot.jl") -# Deprecated workaround for memory leak (https://github.com/jump-dev/Convex.jl/issues/83) -function clearmemory() - Base.depwarn("Convex.clearmemory() is deprecated, as the memory leak it works around has been closed (in https://github.com/jump-dev/Convex.jl/pull/322). This function no longer does anything and will be removed in a future Convex.jl release.", :clearmemory ) -end - -@deprecate lambdamin eigmin -@deprecate lambdamax eigmax end diff --git a/src/atoms/affine/dot.jl b/src/atoms/affine/dot.jl index 1d50918c4..c02a0c7f7 100644 --- a/src/atoms/affine/dot.jl +++ b/src/atoms/affine/dot.jl @@ -13,10 +13,3 @@ _vecdot(x, y) = sum(broadcast(*, asvec(x), asvec(y))) dot(x::AbstractExpr, y::AbstractExpr) = _vecdot(x, y) dot(x::Value, y::AbstractExpr) = _vecdot(x, y) dot(x::AbstractExpr, y::Value) = _vecdot(x, y) - -if isdefined(LinearAlgebra, :vecdot) # defined but deprecated - import LinearAlgebra: vecdot -end -Base.@deprecate vecdot(x::AbstractExpr, y::AbstractExpr) dot(x, y) -Base.@deprecate vecdot(x::Value, y::AbstractExpr) dot(x, y) -Base.@deprecate vecdot(x::AbstractExpr, y::Value) dot(x, y) diff --git a/src/atoms/affine/partialtranspose.jl b/src/atoms/affine/partialtranspose.jl index e85380f4b..1da6144ce 100644 --- a/src/atoms/affine/partialtranspose.jl +++ b/src/atoms/affine/partialtranspose.jl @@ -68,8 +68,11 @@ end """ permutedims_matrix(dims, p) + Returns a matrix `M` so that for any vector `v` of length `prod(dims)`, + M*v == vec(permutedims(reshape(v, dims), p)) + """ function permutedims_matrix(dims, p) d = prod(dims) diff --git a/src/atoms/affine/stack.jl b/src/atoms/affine/stack.jl index fafb51e4f..65001dfd6 100644 --- a/src/atoms/affine/stack.jl +++ b/src/atoms/affine/stack.jl @@ -107,6 +107,11 @@ function conic_form!(x::HcatAtom, unique_conic_forms::UniqueConicForms) return get_conic_form(unique_conic_forms, x) end +# TODO: fix piracy! + +# * `Value` is not owned by Convex.jl +# * splatting creates zero-argument functions, which again are not owned by Convex.jl + hcat(args::AbstractExpr...) = HcatAtom(args...) hcat(args::AbstractExprOrValue...) = HcatAtom(map(arg -> convert(AbstractExpr, arg), args)...) hcat(args::Value...) = Base.cat(args..., dims=Val(2)) diff --git a/src/atoms/affine/sum.jl b/src/atoms/affine/sum.jl index e3f508fbb..dcf772b1a 100644 --- a/src/atoms/affine/sum.jl +++ b/src/atoms/affine/sum.jl @@ -71,4 +71,3 @@ function _sum(x::AbstractExpr, dimension::Integer) end end -Base.@deprecate sum(x::AbstractExpr, dim::Int) sum(x, dims=dim) diff --git a/src/atoms/sdp_cone/operatornorm.jl b/src/atoms/sdp_cone/operatornorm.jl index 3688cc07b..ba781d361 100644 --- a/src/atoms/sdp_cone/operatornorm.jl +++ b/src/atoms/sdp_cone/operatornorm.jl @@ -5,7 +5,6 @@ # All expressions and atoms are subtypes of AbstractExpr. # Please read expressions.jl first. ############################################################################# -import LinearAlgebra: opnorm ### Operator norm @@ -41,7 +40,8 @@ end sigmamax(x::AbstractExpr) = OperatorNormAtom(x) -function opnorm(x::AbstractExpr, p::Real=2) + +function LinearAlgebra.opnorm(x::AbstractExpr, p::Real=2) if length(size(x)) <= 1 || minimum(size(x)) == 1 throw(ArgumentError("argument to `opnorm` must be a matrix")) end @@ -56,8 +56,6 @@ function opnorm(x::AbstractExpr, p::Real=2) end end -Base.@deprecate operatornorm(x::AbstractExpr) opnorm(x) - # Create the equivalent conic problem: # minimize t # subject to diff --git a/src/atoms/second_order_cone/norm.jl b/src/atoms/second_order_cone/norm.jl index 09e8927f6..1f99b4a72 100755 --- a/src/atoms/second_order_cone/norm.jl +++ b/src/atoms/second_order_cone/norm.jl @@ -5,10 +5,25 @@ norm_inf(x::AbstractExpr) = maximum(abs(x)) norm_1(x::AbstractExpr) = sum(abs(x)) norm_fro(x::AbstractExpr) = norm2(vec(x)) -# behavior of norm should be consistent with julia: -# * vector norms for vectors -# * operator norms for matrices -function norm(x::AbstractExpr, p::Real=2) +""" + norm(x::AbstractExpr, p::Real=2) + +Computes the `p`-norm `‖x‖ₚ = (∑ᵢ |xᵢ|^p)^(1/p)` of a vector expression `x`. + +This function uses specialized methods for `p=1, 2, Inf`. For `p > 1` otherwise, +this function uses the procedure documented at +[`rational_to_socp.pdf`](https://github.com/jump-dev/Convex.jl/raw/master/docs/supplementary/rational_to_socp.pdf), +based on the paper "Second-order cone programming" by F. Alizadeh and D. Goldfarb, +Mathematical Programming, Series B, 95:3-51, 2001. + +!!! warning + For versions of Convex.jl prior to v0.14.0, `norm` on a matrix expression returned + the operator norm ([`opnorm`](@ref)), which matches Julia v0.6 behavior. This functionality + was deprecated since Convex.jl v0.8.0, and has been removed. In the future, + `norm(x, p)` will return `‖vec(x)‖ₚ`, matching the behavior of [`norm`](@ref) + for numeric matrices. +""" +function LinearAlgebra.norm(x::AbstractExpr, p::Real=2) if length(size(x)) <= 1 || minimum(size(x))==1 # x is a vector if p == 1 @@ -24,16 +39,6 @@ function norm(x::AbstractExpr, p::Real=2) error("vector p-norms not defined for p < 1") end else - # TODO: After the deprecation period, allow this again but make it consistent with - # LinearAlgebra, i.e. make norm(x, p) for x a matrix the same as norm(vec(x), p). - Base.depwarn("`norm(x, p)` for matrices will in the future be equivalent to " * - "`norm(vec(x), p)`. Use `opnorm(x, p)` for the Julia 0.6 behavior of " * - "computing the operator norm for matrices.", :norm) - return opnorm(x, p) + error("In Convex.jl v0.13 and below, `norm(x, p)` meant `opnorm(x, p)` (but was deprecated since v0.8.0). In the future, `norm(x,p)` for matrices will be equivalent to `norm(vec(x),p)`. This is currently an error to ensure you update your code!") end end - -if isdefined(LinearAlgebra, :vecnorm) # deprecated but defined - import LinearAlgebra: vecnorm -end -Base.@deprecate vecnorm(x::AbstractExpr, p::Real=2) norm(vec(x), p) diff --git a/src/expressions.jl b/src/expressions.jl index ba8083991..830fd024c 100644 --- a/src/expressions.jl +++ b/src/expressions.jl @@ -115,5 +115,3 @@ lastindex(x::AbstractExpr) = length(x) axes(x::AbstractExpr) = (Base.OneTo(size(x, 1)), Base.OneTo(size(x, 2))) axes(x::AbstractExpr, n::Integer) = axes(x)[n] lastindex(x::AbstractExpr, n::Integer) = last(axes(x, n)) - -@deprecate get_vectorized_size(x::AbstractExpr) length(x) diff --git a/src/solution.jl b/src/solution.jl index aa3ab8b61..0dc5ba2a9 100644 --- a/src/solution.jl +++ b/src/solution.jl @@ -210,7 +210,8 @@ Optional keyword arguments: * `check_vexity` (default: `true`): emits a warning if the problem is not DCP * `verbose` (default: `true`): emits a warning if the problem was not solved optimally or `warmstart=true` but is not supported by the solver. -* `warmstart` (default: `false`): whether the solver should start the optimization from a previous optimal value (according to the current value of the variables in the problem, which can be set by [`value!`](@ref) and accessed by [`evaluate`](@ref)). +* `warmstart` (default: `false`): whether the solver should start the optimization from a previous optimal value (according to the current value + of the variables in the problem, which can be set by [`set_value!`](@ref) and accessed by [`evaluate`](@ref)). * `silent_solver`: whether the solver should be silent (and not emit output or logs) during the solution process. """ diff --git a/test/deprecations.jl b/test/deprecations.jl deleted file mode 100644 index 6f9e55d70..000000000 --- a/test/deprecations.jl +++ /dev/null @@ -1,7 +0,0 @@ -@testset "Deprecations" begin - A = Semidefinite(2) - @test_deprecated lambdamin(A) - @test_deprecated lambdamax(A) - - @test_deprecated Convex.clearmemory() -end diff --git a/test/runtests.jl b/test/runtests.jl index 5d718e15d..b98798011 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -32,7 +32,6 @@ end @testset "Convex" begin include("test_utilities.jl") - include("deprecations.jl") include("test_abstract_variable.jl") @testset "SCS with warmstarts" begin