diff --git a/Project.toml b/Project.toml index ba7dc486..38aee786 100644 --- a/Project.toml +++ b/Project.toml @@ -15,7 +15,7 @@ LuxorGraphPlot = "1f49bdf2-22a7-4bc4-978b-948dc219fbbc" OMEinsum = "ebe7aa44-baf0-506c-a96f-8464559b3922" Polynomials = "f27b6e38-b328-58d1-80ce-0feddd5e7a45" Primes = "27ebfcd6-29c5-5fa9-bf4b-fb8fc14df3ae" -Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" +ProblemReductions = "899c297d-f7d2-4ebf-8815-a35996def416" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" SIMDTypes = "94e857df-77ce-4151-89e5-788b33177be4" Serialization = "9e88b42a-f829-5b0c-bbe9-9e923198166b" @@ -41,7 +41,7 @@ LuxorGraphPlot = "0.5" OMEinsum = "0.8" Polynomials = "4" Primes = "0.5" -Printf = "1" +ProblemReductions = "0.2" Random = "1" SIMDTypes = "0.1" Serialization = "1" diff --git a/docs/make.jl b/docs/make.jl index c140fa32..a6978fed 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -1,6 +1,6 @@ using Pkg using GenericTensorNetworks -using GenericTensorNetworks: TropicalNumbers, Polynomials, OMEinsum, OMEinsum.OMEinsumContractionOrders, LuxorGraphPlot +using GenericTensorNetworks: TropicalNumbers, Polynomials, OMEinsum, OMEinsum.OMEinsumContractionOrders, LuxorGraphPlot, ProblemReductions using Documenter using DocThemeIndigo using Literate @@ -17,7 +17,7 @@ indigo = DocThemeIndigo.install(GenericTensorNetworks) DocMeta.setdocmeta!(GenericTensorNetworks, :DocTestSetup, :(using GenericTensorNetworks); recursive=true) makedocs(; - modules=[GenericTensorNetworks, TropicalNumbers, OMEinsum, OMEinsumContractionOrders, LuxorGraphPlot], + modules=[GenericTensorNetworks, ProblemReductions, TropicalNumbers, OMEinsum, OMEinsumContractionOrders, LuxorGraphPlot], authors="Jinguo Liu", repo="https://github.com/QuEraComputing/GenericTensorNetworks.jl/blob/{commit}{path}#{line}", sitename="GenericTensorNetworks.jl", @@ -40,7 +40,6 @@ makedocs(; "Satisfiability problem" => "generated/Satisfiability.md", "Set covering problem" => "generated/SetCovering.md", "Set packing problem" => "generated/SetPacking.md", - #"Other problems" => "generated/Others.md", ], "Topics" => [ "Gist" => "gist.md", diff --git a/docs/src/ref.md b/docs/src/ref.md index 17f96907..e4c4cea8 100644 --- a/docs/src/ref.md +++ b/docs/src/ref.md @@ -1,9 +1,9 @@ # References -## Graph problems +## Constraint Satisfaction Problems ```@docs solve GenericTensorNetwork -GraphProblem +ConstraintSatisfactionProblem IndependentSet MaximalIS Matching @@ -15,28 +15,35 @@ PaintShop Satisfiability SetCovering SetPacking -OpenPitMining ``` -#### Graph Problem Interfaces +#### Constraint Satisfaction Problem Interfaces -To subtype [`GraphProblem`](@ref), a new type must contain a `code` field to represent the (optimized) tensor network. -Interfaces [`GenericTensorNetworks.generate_tensors`](@ref), [`labels`](@ref), [`flavors`](@ref) and [`get_weights`](@ref) are required. -[`nflavor`](@ref) is optional. +To subtype [`ConstraintSatisfactionProblem`](@ref), a new type must contain a `code` field to represent the (optimized) tensor network. +Interfaces [`GenericTensorNetworks.generate_tensors`](@ref), [`flavors`](@ref) and [`weights`](@ref) are required. +[`num_flavors`](@ref) is optional. ```@docs GenericTensorNetworks.generate_tensors -labels -energy_terms flavors -get_weights -chweights -nflavor +weights +set_weights +is_weighted +num_flavors fixedvertices ``` -#### Graph Problem Utilities +#### Constraint Satisfaction Problem Utilities ```@docs +hard_constraints +is_satisfied +local_solution_spec +solution_size +energy_mode +LargerSizeIsBetter +SmallerSizeIsBetter +energy + is_independent_set is_maximal_independent_set is_dominating_set @@ -46,10 +53,7 @@ is_set_covering is_set_packing cut_size -spinglass_energy num_paint_shop_color_switch -paint_shop_coloring_from_config -mis_compactify! CNF CNFClause @@ -60,8 +64,7 @@ satisfiable ¬ ∧ -is_valid_mining -print_mining +mis_compactify! ``` ## Properties @@ -145,7 +148,12 @@ MergeGreedy ## Others #### Graph +Except the `SimpleGraph` defined in [Graphs](https://github.com/JuliaGraphs/Graphs.jl), `GenericTensorNetworks` also defines the following types and functions. + ```@docs +HyperGraph +UnitDiskGraph + show_graph show_configs show_einsum @@ -162,7 +170,6 @@ render_locs diagonal_coupled_graph square_lattice_graph -unit_disk_graph line_graph random_diagonal_coupled_graph diff --git a/examples/Coloring.jl b/examples/Coloring.jl index 73e87dd1..1a4a7bb8 100644 --- a/examples/Coloring.jl +++ b/examples/Coloring.jl @@ -46,10 +46,11 @@ problem = GenericTensorNetwork(coloring) # ## Solving properties # ##### counting all possible coloring -num_of_coloring = solve(problem, CountingMax())[] +# The size of a coloring problem is the number of violations of the coloring constraint. +num_of_coloring = solve(problem, CountingMin())[] # ##### finding one best coloring -single_solution = solve(problem, SingleConfigMax())[] +single_solution = solve(problem, SingleConfigMin())[] read_config(single_solution) is_vertex_coloring(graph, read_config(single_solution)) @@ -68,7 +69,7 @@ show_graph(linegraph, [(locations[e.src] .+ locations[e.dst]) # Let us construct the tensor network and see if there are solutions. lineproblem = Coloring{3}(linegraph); -num_of_coloring = solve(GenericTensorNetwork(lineproblem), CountingMax())[] +num_of_coloring = solve(GenericTensorNetwork(lineproblem), CountingMin())[] read_size_count(num_of_coloring) # You will see the maximum size 28 is smaller than the number of edges in the `linegraph`, diff --git a/examples/PaintShop.jl b/examples/PaintShop.jl index ae6ce5ae..7ae49017 100644 --- a/examples/PaintShop.jl +++ b/examples/PaintShop.jl @@ -12,7 +12,7 @@ # In the following, we use a character to represent a car, # and defined a binary paint shop problem as a string that each character appear exactly twice. -using GenericTensorNetworks, Graphs +using GenericTensorNetworks, Graphs, GenericTensorNetworks.ProblemReductions sequence = collect("iadgbeadfcchghebif") @@ -88,7 +88,7 @@ best_configs = solve(problem, ConfigsMin())[] # One can see to identical bitstrings corresponding two different vertex configurations, they are related to bit-flip symmetry. -painting1 = paint_shop_coloring_from_config(pshop, read_config(best_configs)[1]) +painting1 = ProblemReductions.paint_shop_coloring_from_config(pshop, read_config(best_configs)[1]) show_graph(graph, locations; format=:svg, texts=string.(sequence), edge_colors=[sequence[e.src] == sequence[e.dst] ? "blue" : "black" for e in edges(graph)], diff --git a/examples/Satisfiability.jl b/examples/Satisfiability.jl index 1daaef6d..45c5b839 100644 --- a/examples/Satisfiability.jl +++ b/examples/Satisfiability.jl @@ -8,7 +8,7 @@ # One can specify a satisfiable problem in the [conjunctive normal form](https://en.wikipedia.org/wiki/Conjunctive_normal_form). # In boolean logic, a formula is in conjunctive normal form (CNF) if it is a conjunction (∧) of one or more clauses, # where a clause is a disjunction (∨) of literals. -using GenericTensorNetworks +using GenericTensorNetworks, GenericTensorNetworks.ProblemReductions @bools a b c d e f g @@ -48,15 +48,15 @@ problem = GenericTensorNetwork(sat) # ## Solving properties # #### Satisfiability and its counting -# The size of a satisfiability problem is defined by the number of satisfiable clauses. -num_satisfiable = solve(problem, SizeMax())[] +# The size of a satisfiability problem is defined by the number of unsatisfied clauses. +num_satisfiable = solve(problem, SizeMin())[] # The [`GraphPolynomial`](@ref) of a satisfiability problem counts the number of solutions that `k` clauses satisfied. num_satisfiable_count = read_size_count(solve(problem, GraphPolynomial())[]) # #### Find one of the solutions -single_config = read_config(solve(problem, SingleConfigMax())[]) +single_config = read_config(solve(problem, SingleConfigMin())[]) # One will see a bit vector printed. # One can create an assignment and check the validity with the following statement: -satisfiable(cnf, Dict(zip(labels(problem), single_config))) +satisfiable(cnf, Dict(zip(ProblemReductions.symbols(problem.problem), single_config))) diff --git a/examples/SetCovering.jl b/examples/SetCovering.jl index f6bd0d83..72fe493a 100644 --- a/examples/SetCovering.jl +++ b/examples/SetCovering.jl @@ -73,7 +73,7 @@ min_configs = read_config(solve(problem, ConfigsMin())[]) # Hence the two optimal solutions are ``\{z_1, z_3, z_5, z_6\}`` and ``\{z_2, z_3, z_4, z_5\}``. # The correctness of this result can be checked with the [`is_set_covering`](@ref) function. -all(c->is_set_covering(sets, c), min_configs) +all(c->is_set_covering(problem.problem, c), min_configs) # Similarly, if one is only interested in computing one of the minimum set coverings, # one can use the graph property [`SingleConfigMin`](@ref). diff --git a/examples/SetPacking.jl b/examples/SetPacking.jl index 9c48cb79..0c5a5f33 100644 --- a/examples/SetPacking.jl +++ b/examples/SetPacking.jl @@ -74,7 +74,7 @@ max_configs = read_config(solve(problem, ConfigsMax())[]) # Hence the only optimal solution is ``\{z_1, z_3, z_6\}`` that has size 3. # The correctness of this result can be checked with the [`is_set_packing`](@ref) function. -all(c->is_set_packing(sets, c), max_configs) +all(c->is_set_packing(problem.problem, c), max_configs) # Similarly, if one is only interested in computing one of the maximum set packing, # one can use the graph property [`SingleConfigMax`](@ref). diff --git a/examples/SpinGlass.jl b/examples/SpinGlass.jl index ca40dffd..352a2ec7 100644 --- a/examples/SpinGlass.jl +++ b/examples/SpinGlass.jl @@ -26,7 +26,7 @@ show_graph(graph, locations; format=:svg) # ## Generic tensor network representation # An anti-ferromagnetic spin glass problem can be defined with the [`SpinGlass`](@ref) type as -spinglass = SpinGlass(graph, fill(1, ne(graph))) +spinglass = SpinGlass(graph, fill(1, ne(graph)), zeros(Int, nv(graph))) # The tensor network representation of the set packing problem can be obtained by problem = GenericTensorNetwork(spinglass) @@ -81,8 +81,10 @@ partition_function = solve(problem, GraphPolynomial())[] # The ground state of the spin glass problem can be found by the [`SingleConfigMin`](@ref) solver. ground_state = read_config(solve(problem, SingleConfigMin())[]) -# The energy of the ground state can be verified by the [`spinglass_energy`](@ref) function. -Emin_verify = spinglass_energy(spinglass, ground_state) +# The energy of the ground state can be verified by the [`energy`](@ref) function. +# Note the output of the ground state can not be directly used as the input of the `energy` function. +# It needs to be converted to the spin configurations. +Emin_verify = energy(problem.problem, 1 .- 2 .* Int.(ground_state)) # You should see a consistent result as above `Emin`. @@ -117,7 +119,7 @@ weights = [-1, 1, -1, 1, -1, 1, -1, 1]; # \end{align*} # ``` # A spin glass problem can be defined with the [`SpinGlass`](@ref) type as -hyperspinglass = SpinGlass(num_vertices, hyperedges, weights) +hyperspinglass = SpinGlass(HyperGraph(num_vertices, hyperedges), weights, zeros(Int, num_vertices)) # The tensor network representation of the set packing problem can be obtained by hyperproblem = GenericTensorNetwork(hyperspinglass) @@ -155,8 +157,8 @@ poly = solve(hyperproblem, GraphPolynomial())[] # The ground state of the spin glass problem can be found by the [`SingleConfigMin`](@ref) solver. ground_state = read_config(solve(hyperproblem, SingleConfigMin())[]) -# The energy of the ground state can be verified by the [`spinglass_energy`](@ref) function. +# The energy of the ground state can be verified by the [`energy`](@ref) function. -Emin_verify = spinglass_energy(hyperspinglass, ground_state) +Emin_verify = energy(hyperproblem.problem, 1 .- 2 .* Int.(ground_state)) # You should see a consistent result as above `Emin`. \ No newline at end of file diff --git a/examples/weighted.jl b/examples/weighted.jl index 1ea456be..f8c996b0 100644 --- a/examples/weighted.jl +++ b/examples/weighted.jl @@ -1,16 +1,16 @@ # # Weighted problems # Let us use the maximum independent set problem on Petersen graph as an example. -using GenericTensorNetworks, Graphs +using GenericTensorNetworks, GenericTensorNetworks.ProblemReductions, Graphs graph = Graphs.smallgraph(:petersen) # The following code constructs a weighted MIS problem instance. problem = GenericTensorNetwork(IndependentSet(graph, collect(1:10))); -GenericTensorNetworks.get_weights(problem) +GenericTensorNetworks.weights(problem) # The tensor labels that associated with the weights can be accessed by -GenericTensorNetworks.energy_terms(problem) +ProblemReductions.local_solution_spec(problem.problem) # Here, the `weights` keyword argument can be a vector for weighted graphs or `UnitWeight()` for unweighted graphs. # Most solution space properties work for unweighted graphs also work for the weighted graphs. diff --git a/src/GenericTensorNetworks.jl b/src/GenericTensorNetworks.jl index 6ed2207b..36a94c1b 100644 --- a/src/GenericTensorNetworks.jl +++ b/src/GenericTensorNetworks.jl @@ -5,7 +5,7 @@ using TropicalNumbers using OMEinsum using OMEinsum: contraction_complexity, timespace_complexity, timespacereadwrite_complexity, getixsv, NestedEinsum, getixs, getiy, DynamicEinCode using Graphs, Random -using DelimitedFiles, Serialization, Printf +using DelimitedFiles, Serialization using LuxorGraphPlot using LuxorGraphPlot.Luxor.Colors: @colorant_str using LuxorGraphPlot: Layered @@ -15,6 +15,12 @@ using FFTW using Primes using DocStringExtensions using Base.Cartesian +using ProblemReductions +import ProblemReductions: ConstraintSatisfactionProblem, AbstractSatisfiabilityProblem, UnitWeight, hard_constraints, is_satisfied, local_solution_spec, solution_size, energy_mode, LargerSizeIsBetter, SmallerSizeIsBetter +import ProblemReductions: @bv_str, StaticElementVector, StaticBitVector, onehotv, _nints, hamming_distance +import ProblemReductions: is_set_covering, is_vertex_coloring, is_set_packing, is_dominating_set, is_matching, is_maximal_independent_set, cut_size, is_independent_set +import ProblemReductions: num_paint_shop_color_switch, spin_glass_from_matrix, CNF, CNFClause, BoolVar, satisfiable, @bools, ∨, ¬, ∧ +import ProblemReductions: flavors, set_weights, weights, is_weighted, num_flavors, variables, energy import AbstractTrees: children, printnode, print_tree import StatsBase @@ -34,28 +40,32 @@ export CountingTropicalF64, CountingTropicalF32, TropicalF64, TropicalF32, Exten export generate_samples, OnehotVec # Graphs +export HyperGraph, SimpleGraph, UnitDiskGraph export random_regular_graph, diagonal_coupled_graph -export square_lattice_graph, unit_disk_graph, random_diagonal_coupled_graph, random_square_lattice_graph +export square_lattice_graph, random_diagonal_coupled_graph, random_square_lattice_graph export line_graph + +# Problems +export AbstractProblem, ConstraintSatisfactionProblem +export hard_constraints, is_satisfied, local_solution_spec, solution_size, energy_mode, LargerSizeIsBetter, SmallerSizeIsBetter # Tensor Networks (Graph problems) -export GraphProblem, GenericTensorNetwork, optimize_code, UnitWeight, ZeroWeight -export flavors, labels, nflavor, get_weights, fixedvertices, chweights, energy_terms +export GenericTensorNetwork, optimize_code, UnitWeight +export flavors, variables, num_flavors, weights, fixedvertices, set_weights, is_weighted export IndependentSet, mis_compactify!, is_independent_set export MaximalIS, is_maximal_independent_set export cut_size, MaxCut -export spinglass_energy, spin_glass_from_matrix, SpinGlass -export PaintShop, paintshop_from_pairs, num_paint_shop_color_switch, paint_shop_coloring_from_config, paint_shop_from_pairs +export energy, spin_glass_from_matrix, SpinGlass +export PaintShop, paintshop_from_pairs, num_paint_shop_color_switch export Coloring, is_vertex_coloring export Satisfiability, CNF, CNFClause, BoolVar, satisfiable, @bools, ∨, ¬, ∧ export DominatingSet, is_dominating_set export Matching, is_matching export SetPacking, is_set_packing export SetCovering, is_set_covering -export OpenPitMining, is_valid_mining, print_mining # Interfaces -export solve, SizeMax, SizeMin, PartitionFunction, CountingAll, CountingMax, CountingMin, GraphPolynomial, SingleConfigMax, SingleConfigMin, ConfigsAll, ConfigsMax, ConfigsMin, Single +export solve, SizeMax, SizeMin, PartitionFunction, CountingAll, CountingMax, CountingMin, GraphPolynomial, SingleConfigMax, SingleConfigMin, ConfigsAll, ConfigsMax, ConfigsMin, Single, AllConfigs # Utilities export save_configs, load_configs, hamming_distribution, save_sumproduct, load_sumproduct @@ -73,10 +83,8 @@ project_relative_path(xs...) = normpath(joinpath(dirname(dirname(pathof(@__MODUL include("Mods.jl/src/Mods.jl") using .Mods -include("utils.jl") -include("bitvector.jl") include("arithematics.jl") -include("networks/networks.jl") +include("networks.jl") include("graph_polynomials.jl") include("configurations.jl") include("graphs.jl") diff --git a/src/arithematics.jl b/src/arithematics.jl index 4cc8c66e..62867f0a 100644 --- a/src/arithematics.jl +++ b/src/arithematics.jl @@ -1,5 +1,4 @@ @enum TreeTag LEAF SUM PROD ZERO ONE - # pirate Base.abs(x::Mod) = x Base.isless(x::Mod{N}, y::Mod{N}) where N = mod(x.val, N) < mod(y.val, N) @@ -789,31 +788,31 @@ end # convert from counting type to bitstring type for F in [:set_type, :sampler_type, :treeset_type] @eval begin - function $F(::Type{T}, n::Int, nflavor::Int) where {OT, K, T<:TruncatedPoly{K,C,OT} where C} - TruncatedPoly{K, $F(n,nflavor),OT} + function $F(::Type{T}, n::Int, num_flavors::Int) where {OT, K, T<:TruncatedPoly{K,C,OT} where C} + TruncatedPoly{K, $F(n,num_flavors),OT} end - function $F(::Type{T}, n::Int, nflavor::Int) where {TX, T<:Polynomial{C,TX} where C} - Polynomial{$F(n,nflavor),:x} + function $F(::Type{T}, n::Int, num_flavors::Int) where {TX, T<:Polynomial{C,TX} where C} + Polynomial{$F(n,num_flavors),:x} end - function $F(::Type{T}, n::Int, nflavor::Int) where {TV, T<:CountingTropical{TV}} - CountingTropical{TV, $F(n,nflavor)} + function $F(::Type{T}, n::Int, num_flavors::Int) where {TV, T<:CountingTropical{TV}} + CountingTropical{TV, $F(n,num_flavors)} end - function $F(::Type{Real}, n::Int, nflavor::Int) - $F(n, nflavor) + function $F(::Type{Real}, n::Int, num_flavors::Int) + $F(n, num_flavors) end end end for (F,TP) in [(:set_type, :ConfigEnumerator), (:sampler_type, :ConfigSampler)] - @eval function $F(n::Integer, nflavor::Integer) - s = ceil(Int, log2(nflavor)) + @eval function $F(n::Integer, num_flavors::Integer) + s = ceil(Int, log2(num_flavors)) c = _nints(n,s) return $TP{n,s,c} end end -function treeset_type(n::Integer, nflavor::Integer) - return SumProductTree{OnehotVec{n, nflavor}} +function treeset_type(n::Integer, num_flavors::Integer) + return SumProductTree{OnehotVec{n, num_flavors}} end -sampler_type(::Type{ExtendedTropical{K,T}}, n::Int, nflavor::Int) where {K,T} = ExtendedTropical{K, sampler_type(T, n, nflavor)} +sampler_type(::Type{ExtendedTropical{K,T}}, n::Int, num_flavors::Int) where {K,T} = ExtendedTropical{K, sampler_type(T, n, num_flavors)} # utilities for creating onehot vectors onehotv(::Type{ConfigEnumerator{N,S,C}}, i::Integer, v) where {N,S,C} = ConfigEnumerator([onehotv(StaticElementVector{N,S,C}, i, v)]) diff --git a/src/bitvector.jl b/src/bitvector.jl deleted file mode 100644 index 9a764f25..00000000 --- a/src/bitvector.jl +++ /dev/null @@ -1,201 +0,0 @@ -""" - StaticElementVector{N,S,C} - StaticElementVector(nflavor::Int, x::AbstractVector) - -`N` is the length of vector, `C` is the size of storage in unit of `UInt64`, -`S` is the stride defined as the `log2(# of flavors)`. -When the number of flavors is 2, it is a `StaticBitVector`. - -Fields -------------------------------- -* `data` is a tuple of `UInt64` for storing the configuration of static elements. - -Examples -------------------------------- -```jldoctest; setup=:(using GenericTensorNetworks) -julia> ev = StaticElementVector(3, [1,2,0,1,2]) -12012 - -julia> ev[2] -0x0000000000000002 - -julia> collect(Int, ev) -5-element Vector{Int64}: - 1 - 2 - 0 - 1 - 2 -``` -""" -struct StaticElementVector{N,S,C} <: AbstractVector{UInt64} - data::NTuple{C,UInt64} -end - -Base.length(::StaticElementVector{N,S,C}) where {N,S,C} = N -Base.size(::StaticElementVector{N,S,C}) where {N,S,C} = (N,) -Base.:(==)(x::StaticElementVector, y::AbstractVector) = [x...] == [y...] -Base.:(==)(x::AbstractVector, y::StaticElementVector) = [x...] == [y...] -Base.:(==)(x::StaticElementVector{N,S,C}, y::StaticElementVector{N,S,C}) where {N,S,C} = x.data == y.data -Base.eltype(::Type{<:StaticElementVector}) = UInt64 -@inline function Base.getindex(x::StaticElementVector{N,S,C}, i::Integer) where {N,S,C} - @boundscheck i <= N || throw(BoundsError(x, i)) - i1 = (i-1)*S+1 # start point - i2 = i*S # stop point - ii1 = (i1-1) ÷ 64 - ii2 = (i2-1) ÷ 64 - @inbounds if ii1 == ii2 - (x.data[ii1+1] >> (i1-ii1*64-1)) & (1<> (i1-ii*64-S+1)) | (x.data[ii2+1] & (1<<(i2-ii1*64) - 1)) - end -end -function StaticElementVector(nflavor::Int, x::AbstractVector) - if any(x->x<0 || x>=nflavor, x) - throw(ArgumentError("Vector elements must be in range `[0, $(nflavor-1)]`, got $x.")) - end - N = length(x) - S = ceil(Int,log2(nflavor)) # sometimes can not devide 64. - convert(StaticElementVector{N,S,_nints(N,S)}, x) -end -function Base.convert(::Type{StaticElementVector{N,S,C}}, x::AbstractVector) where {N,S,C} - @assert length(x) == N - data = zeros(UInt64,C) - for i=1:N - i1 = (i-1)*S+1 # start point - i2 = i*S # stop point - ii1 = (i1-1) ÷ 64 - ii2 = (i2-1) ÷ 64 - @inbounds if ii1 == ii2 - data[ii1+1] |= UInt64(x[i]) << (i1-ii1*64-1) - else # cross two integers - data[ii1+1] |= UInt64(x[i]) << (i1-ii1*64-1) - data[ii2+1] |= UInt64(x[i]) >> (i2-ii1*64) - end - end - return StaticElementVector{N,S,C}((data...,)) -end -# joining two element sets -Base.:(|)(x::StaticElementVector{N,S,C}, y::StaticElementVector{N,S,C}) where {N,S,C} = StaticElementVector{N,S,C}(x.data .| y.data) -# intersection of two element sets -Base.:(&)(x::StaticElementVector{N,S,C}, y::StaticElementVector{N,S,C}) where {N,S,C} = StaticElementVector{N,S,C}(x.data .& y.data) -# difference of two element sets -Base.:(⊻)(x::StaticElementVector{N,S,C}, y::StaticElementVector{N,S,C}) where {N,S,C} = StaticElementVector{N,S,C}(x.data .⊻ y.data) - -""" - onehotv(::Type{<:StaticElementVector}, i, v) - onehotv(::Type{<:StaticBitVector, i) - -Returns a static element vector, with the value at location `i` being `v` (1 if not specified). -""" -function onehotv(::Type{StaticElementVector{N,S,C}}, i, v) where {N,S,C} - x = zeros(Int,N) - x[i] = v - return convert(StaticElementVector{N,S,C}, x) -end - -##### BitVectors -""" - StaticBitVector{N,C} = StaticElementVector{N,1,C} - StaticBitVector(x::AbstractVector) - -Examples -------------------------------- -```jldoctest; setup=:(using GenericTensorNetworks) -julia> sb = StaticBitVector([1,0,0,1,1]) -10011 - -julia> sb[3] -0x0000000000000000 - -julia> collect(Int, sb) -5-element Vector{Int64}: - 1 - 0 - 0 - 1 - 1 -``` -""" -const StaticBitVector{N,C} = StaticElementVector{N,1,C} -@inline function Base.getindex(x::StaticBitVector{N,C}, i::Integer) where {N,C} - @boundscheck (i <= N || throw(BoundsError(x, i))) # NOTE: still checks bounds in global scope, why? - i -= 1 - ii = i ÷ 64 - return @inbounds (x.data[ii+1] >> (i-ii*64)) & 1 -end - -function StaticBitVector(x::AbstractVector) - N = length(x) - StaticBitVector{N,_nints(N,1)}((convert(BitVector, x).chunks...,)) -end -# to void casting StaticBitVector itself -StaticBitVector(x::StaticBitVector) = x - -function Base.convert(::Type{StaticBitVector{N,C}}, x::AbstractVector) where {N,C} - @assert length(x) == N - StaticBitVector(x) -end -_nints(x,s) = (x*s-1)÷64+1 - -@generated function Base.zero(::Type{StaticElementVector{N,S,C}}) where {N,S,C} - Expr(:call, :(StaticElementVector{$N,$S,$C}), Expr(:tuple, zeros(UInt64, C)...)) -end -staticfalses(::Type{StaticBitVector{N,C}}) where {N,C} = zero(StaticBitVector{N,C}) -@generated function statictrues(::Type{StaticBitVector{N,C}}) where {N,C} - Expr(:call, :(StaticBitVector{$N,$C}), Expr(:tuple, fill(typemax(UInt64), C)...)) -end - -onehotv(::Type{StaticBitVector{N,C}}, i, v) where {N,C} = v > 0 ? onehotv(StaticBitVector{N,C}, i) : zero(StaticBitVector{N,C}) -function onehotv(::Type{StaticBitVector{N,C}}, i) where {N,C} - x = falses(N) - x[i] = true - return StaticBitVector(x) -end -function Base.iterate(x::StaticElementVector{N,S,C}, state=1) where {N,S,C} - if state > N - return nothing - else - return x[state], state+1 - end -end - -Base.show(io::IO, t::StaticElementVector) = Base.print(io, "$(join(Int.(t), ""))") -Base.show(io::IO, ::MIME"text/plain", t::StaticElementVector) = Base.show(io, t) - -function Base.count_ones(x::StaticBitVector) - sum(v->count_ones(v),x.data) -end - -""" - hamming_distance(x::StaticBitVector, y::StaticBitVector) - -Calculate the Hamming distance between two static bit vectors. -""" -hamming_distance(x::StaticBitVector, y::StaticBitVector) = count_ones(x ⊻ y) - -""" -Constructing a static bit vector. -""" -macro bv_str(str) - return parse_vector(2, str) -end - -function parse_vector(nflavor::Int, str::String) - val = Int[] - k = 1 - for each in filter(x -> x != '_', str) - if each == '1' - push!(val, 1) - k += 1 - elseif each == '0' - push!(val, 0) - k += 1 - elseif each == '_' - continue - else - error("expect 0 or 1, got $each at $k-th bit") - end - end - return StaticElementVector(nflavor, val) -end diff --git a/src/bounding.jl b/src/bounding.jl index 01dff20b..59b5fcd3 100644 --- a/src/bounding.jl +++ b/src/bounding.jl @@ -158,7 +158,7 @@ function solution_ad(code::Union{NestedEinsum,SlicedEinsum}, @nospecialize(xsa), @debug "generating masked tree..." mt = generate_masktree(SingleConfig(), code, c, ymask, size_dict) config = read_config!(code, mt, Dict()) - if length(config) !== length(labels(code)) # equal to the # of degree of freedoms + if length(config) !== length(uniquelabels(code)) # equal to the # of degree of freedoms error("configuration `$(config)` is not fully determined!") end n, config diff --git a/src/configurations.jl b/src/configurations.jl index e4e4706c..e4ed5b1b 100644 --- a/src/configurations.jl +++ b/src/configurations.jl @@ -1,95 +1,80 @@ -function config_type(::Type{T}, n, nflavor; all::Bool, tree_storage::Bool) where T +function config_type(::Type{T}, n, num_flavors; all::Bool, tree_storage::Bool) where T if all if tree_storage - return treeset_type(T, n, nflavor) + return treeset_type(T, n, num_flavors) else - return set_type(T, n, nflavor) + return set_type(T, n, num_flavors) end else - return sampler_type(T, n, nflavor) + return sampler_type(T, n, num_flavors) end end """ - best_solutions(problem; all=false, usecuda=false, invert=false, tree_storage::Bool=false) + largest_solutions(net::GenericTensorNetwork; all=false, usecuda=false, invert=false, tree_storage::Bool=false, T=Float64) -Find optimal solutions with bounding. - -* When `all` is true, the program will use set for enumerate all possible solutions, otherwise, it will return one solution for each size. -* `usecuda` can not be true if you want to use set to enumerate all possible solutions. -* If `invert` is true, find the minimum. -* If `tree_storage` is true, use [`SumProductTree`](@ref) as the storage of solutions. +Find optimal solutions, with bounding. Please check [`solutions`](@ref) for argument descriptions. """ -function best_solutions(gp::GenericTensorNetwork; all=false, usecuda=false, invert=false, tree_storage::Bool=false, T=Float64) +function largest_solutions(net::GenericTensorNetwork; all=false, usecuda=false, invert=false, tree_storage::Bool=false, T=Float64) if all && usecuda throw(ArgumentError("ConfigEnumerator can not be computed on GPU!")) end - xst = generate_tensors(_x(Tropical{T}; invert), gp) - ymask = trues(fill(nflavor(gp), length(getiyv(gp.code)))...) + xst = generate_tensors(_x(Tropical{T}; invert), net) + ymask = trues(fill(num_flavors(net), length(getiyv(net.code)))...) if usecuda xst = togpu.(xst) ymask = togpu(ymask) end if all # we use `Float64` as default because we want to support weighted graphs. - T = config_type(CountingTropical{T,T}, length(labels(gp)), nflavor(gp); all, tree_storage) - xs = generate_tensors(_x(T; invert), gp) - ret = bounding_contract(AllConfigs{1}(), gp.code, xst, ymask, xs) + T = config_type(CountingTropical{T,T}, length(variables(net)), num_flavors(net); all, tree_storage) + xs = generate_tensors(_x(T; invert), net) + ret = bounding_contract(AllConfigs{1}(), net.code, xst, ymask, xs) return invert ? asarray(post_invert_exponent.(ret), ret) : ret else @assert ndims(ymask) == 0 - t, res = solution_ad(gp.code, xst, ymask) + t, res = solution_ad(net.code, xst, ymask) ret = fill(CountingTropical(asscalar(t).n, ConfigSampler(StaticBitVector(map(l->res[l], 1:length(res)))))) return invert ? asarray(post_invert_exponent.(ret), ret) : ret end end """ - solutions(problem, basetype; all, usecuda=false, invert=false, tree_storage::Bool=false) + solutions(net::GenericTensorNetwork, ::Type{BT}; all::Bool, usecuda::Bool=false, invert::Bool=false, tree_storage::Bool=false) where BT -General routine to find solutions without bounding, +Find all solutions, solutions with largest sizes or solutions with smallest sizes. Bounding is not supported. -* `basetype` can be a type with counting field, +### Arguments +- `net` is a [`GenericTensorNetwork`](@ref) instance. +- `BT` is the data types used for computing, which can be * `CountingTropical{Float64,Float64}` for finding optimal solutions, * `Polynomial{Float64, :x}` for enumerating all solutions, * `Max2Poly{Float64,Float64}` for optimal and suboptimal solutions. -* When `all` is true, the program will use set for enumerate all possible solutions, otherwise, it will return one solution for each size. -* `usecuda` can not be true if you want to use set to enumerate all possible solutions. -* If `tree_storage` is true, use [`SumProductTree`](@ref) as the storage of solutions. + +### Keyword arguments +- `all` is an indicator whether to find all solutions or just one of them. +- `usecuda` is an indicator of using CUDA or not, which must be false if `all` is true. +- `invert` is an indicator of whether flip the size or not. If true, instead of finding the maximum, it find the minimum. +- `tree_storage` is an indicator of whether using more compact [`SumProductTree`](@ref) as the storage or not. """ -function solutions(gp::GenericTensorNetwork, ::Type{BT}; all::Bool, usecuda::Bool=false, invert::Bool=false, tree_storage::Bool=false) where BT +function solutions(net::GenericTensorNetwork, ::Type{BT}; all::Bool, usecuda::Bool=false, invert::Bool=false, tree_storage::Bool=false) where BT if all && usecuda throw(ArgumentError("ConfigEnumerator can not be computed on GPU!")) end - T = config_type(BT, length(labels(gp)), nflavor(gp); all, tree_storage) - ret = contractx(gp, _x(T; invert); usecuda=usecuda) + T = config_type(BT, length(variables(net)), num_flavors(net); all, tree_storage) + ret = contractx(net, _x(T; invert); usecuda=usecuda) return invert ? asarray(post_invert_exponent.(ret), ret) : ret end -""" - best2_solutions(problem; all=true, usecuda=false, invert=false, tree_storage::Bool=false) - -Finding optimal and suboptimal solutions. -""" -best2_solutions(gp::GenericTensorNetwork; all=true, usecuda=false, invert::Bool=false, T=Float64) = solutions(gp, Max2Poly{T,T}; all, usecuda, invert) - -function bestk_solutions(gp::GenericTensorNetwork, k::Int; invert::Bool=false, tree_storage::Bool=false, T=Float64) - xst = generate_tensors(_x(Tropical{T}; invert), gp) - ymask = trues(fill(2, length(getiyv(gp.code)))...) - T = config_type(TruncatedPoly{k,T,T}, length(labels(gp)), nflavor(gp); all=true, tree_storage) - xs = generate_tensors(_x(T; invert), gp) - ret = bounding_contract(AllConfigs{k}(), gp.code, xst, ymask, xs) +function largestk_solutions(net::GenericTensorNetwork, k::Int; invert::Bool=false, tree_storage::Bool=false, T=Float64) + xst = generate_tensors(_x(Tropical{T}; invert), net) + ymask = trues(fill(2, length(getiyv(net.code)))...) + T = config_type(TruncatedPoly{k,T,T}, length(variables(net)), num_flavors(net); all=true, tree_storage) + xs = generate_tensors(_x(T; invert), net) + ret = bounding_contract(AllConfigs{k}(), net.code, xst, ymask, xs) return invert ? asarray(post_invert_exponent.(ret), ret) : ret end -""" - all_solutions(problem) - -Finding all solutions grouped by size. -e.g. when the problem is [`MaximalIS`](@ref), it computes all maximal independent sets, or the maximal cliques of it complement. -""" -all_solutions(gp::GenericTensorNetwork; T=Float64) = solutions(gp, Polynomial{T,:x}, all=true, usecuda=false, tree_storage=false) - # NOTE: do we have more efficient way to compute it? # NOTE: doing pair-wise Hamming distance might be biased? """ diff --git a/src/deprecate.jl b/src/deprecate.jl index e2ffe8f3..2cb0c998 100644 --- a/src/deprecate.jl +++ b/src/deprecate.jl @@ -1,4 +1,21 @@ @deprecate Independence(args...; kwargs...) IndependentSet(args...; kwargs...) @deprecate MaximalIndependence(args...; kwargs...) MaximalIS(args...; kwargs...) -@deprecate NoWeight() UnitWeight() -@deprecate HyperSpinGlass(args...; kwargs...) SpinGlass(args...; kwargs...) \ No newline at end of file +@deprecate UnitWeight() error("UnitWeight() is deprecated. Please Use UnitWeight(::Int) instead.") +@deprecate ZeroWeight() error("ZeroWeight() is deprecated. Please Use ZeroWeight(::Int) instead.") +@deprecate HyperSpinGlass(args...; kwargs...) SpinGlass(args...; kwargs...) + +@deprecate nflavor(args...; kwargs...) num_flavors(args...; kwargs...) +@deprecate labels(args...; kwargs...) variables(args...; kwargs...) +@deprecate get_weights(args...; kwargs...) weights(args...; kwargs...) +@deprecate chweights(args...; kwargs...) set_weights(args...; kwargs...) + +@deprecate spinglass_energy(args...; kwargs...) energy(args...; kwargs...) +@deprecate unit_disk_graph(args...; kwargs...) UnitDiskGraph(args...; kwargs...) +@deprecate solve(problem::ConstraintSatisfactionProblem, args...; kwargs...) solve(GenericTensorNetwork(problem), args...; kwargs...) + +""" +const GraphProblem = ConstraintSatisfactionProblem + +Deprecated. Use `ConstraintSatisfactionProblem` instead. +""" +const GraphProblem = ConstraintSatisfactionProblem diff --git a/src/fileio.jl b/src/fileio.jl index e152caf5..4859f95c 100644 --- a/src/fileio.jl +++ b/src/fileio.jl @@ -14,20 +14,20 @@ function save_configs(filename, data::ConfigEnumerator{N,S,C}; format::Symbol=:b end """ - load_configs(filename; format=:binary, bitlength=nothing, nflavors=2) + load_configs(filename; format=:binary, bitlength=nothing, num_flavors=2) Load configurations from file `filename`. The format is `:binary` or `:text`. If the format is `:binary`, the bitstring length `bitlength` must be specified, -`nflavors` specifies the degree of freedom. +`num_flavors` specifies the degree of freedom. """ -function load_configs(filename; bitlength=nothing, format::Symbol=:binary, nflavors=2) +function load_configs(filename; bitlength=nothing, format::Symbol=:binary, num_flavors=2) if format == :binary bitlength === nothing && error("you need to specify `bitlength` for reading configurations from binary files.") - S = ceil(Int, log2(nflavors)) + S = ceil(Int, log2(num_flavors)) C = _nints(bitlength, S) return _from_raw_matrix(StaticElementVector{bitlength,S,C}, reshape(reinterpret(UInt64, read(filename)),C,:)) elseif format == :text - return from_plain_matrix(readdlm(filename); nflavors=nflavors) + return from_plain_matrix(readdlm(filename); num_flavors=num_flavors) else error("format must be `:binary` or `:text`, got `:$format`") end @@ -48,8 +48,8 @@ function plain_matrix(x::ConfigEnumerator{N,S,C}) where {N,S,C} return m end -function from_raw_matrix(m; bitlength, nflavors=2) - S = ceil(Int,log2(nflavors)) +function from_raw_matrix(m; bitlength, num_flavors=2) + S = ceil(Int,log2(num_flavors)) C = size(m, 1) T = StaticElementVector{bitlength,S,C} @assert bitlength*S <= C*64 @@ -62,8 +62,8 @@ function _from_raw_matrix(::Type{StaticElementVector{N,S,C}}, m::AbstractMatrix) end return ConfigEnumerator(data) end -function from_plain_matrix(m::Matrix; nflavors=2) - S = ceil(Int,log2(nflavors)) +function from_plain_matrix(m::Matrix; num_flavors=2) + S = ceil(Int,log2(num_flavors)) N = size(m, 1) C = _nints(N, S) T = StaticElementVector{N,S,C} diff --git a/src/graphs.jl b/src/graphs.jl index a0a5c5fb..449664b4 100644 --- a/src/graphs.jl +++ b/src/graphs.jl @@ -15,7 +15,7 @@ Create a masked square lattice graph. """ function square_lattice_graph(mask::AbstractMatrix{Bool}) locs = [(i, j) for i=1:size(mask, 1), j=1:size(mask, 2) if mask[i,j]] - unit_disk_graph(locs, 1.1) + UnitDiskGraph(locs, 1.1) end """ @@ -41,23 +41,7 @@ Create a masked diagonal coupled square lattice graph from a specified `mask`. """ function diagonal_coupled_graph(mask::AbstractMatrix{Bool}) locs = [(i, j) for i=1:size(mask, 1), j=1:size(mask, 2) if mask[i,j]] - unit_disk_graph(locs, 1.5) -end - -""" - unit_disk_graph(locs::AbstractVector, unit::Real) - -Create a unit disk graph with locations specified by `locs` and unit distance `unit`. -""" -function unit_disk_graph(locs::AbstractVector, unit::Real) - n = length(locs) - g = SimpleGraph(n) - for i=1:n, j=i+1:n - if sum(abs2, locs[i] .- locs[j]) < unit ^ 2 - add_edge!(g, i, j) - end - end - return g + UnitDiskGraph(locs, 1.5) end """ diff --git a/src/interfaces.jl b/src/interfaces.jl index 2dd8fe9d..5e9e7aa0 100644 --- a/src/interfaces.jl +++ b/src/interfaces.jl @@ -10,7 +10,7 @@ _asint(x::Type{Single}) = 1 The maximum-K set sizes. e.g. the largest size of the [`IndependentSet`](@ref) problem is also know as the independence number. * The corresponding tensor element type are max-plus tropical number [`Tropical`](@ref) if `K` is `Single` and [`ExtendedTropical`](@ref) if `K` is an integer. -* It is compatible with weighted graph problems. +* It is compatible with weighted Constraint Satisfaction Problems. * BLAS (on CPU) and GPU are supported only if `K` is `Single`, """ struct SizeMax{K} <: AbstractProperty end @@ -25,7 +25,7 @@ The minimum-K set sizes. e.g. the smallest size ofthe [`MaximalIS`](@ref) proble * The corresponding tensor element type are inverted max-plus tropical number [`Tropical`](@ref) if `K` is `Single` and inverted [`ExtendedTropical`](@ref) `K` is an integer. The inverted Tropical number emulates the min-plus tropical number. -* It is compatible with weighted graph problems. +* It is compatible with weighted constraint satisfaction problems. * BLAS (on CPU) and GPU are supported only if `K` is `Single`, """ struct SizeMin{K} <: AbstractProperty end @@ -64,7 +64,7 @@ Counting the number of sets with largest-K size. e.g. for [`IndependentSet`](@re it counts independent sets of size ``\\alpha(G), \\alpha(G)-1, \\ldots, \\alpha(G)-K+1``. * The corresponding tensor element type is [`CountingTropical`](@ref) if `K` is `Single`, and [`TruncatedPoly`](@ref)`{K}` if `K` is an integer. -* Weighted graph problems is only supported if `K` is `Single`. +* Weighted constraint satisfaction problems is only supported if `K` is `Single`. * GPU is supported, """ struct CountingMax{K} <: AbstractProperty end @@ -78,7 +78,7 @@ max_k(::CountingMax{K}) where K = K Counting the number of sets with smallest-K size. * The corresponding tensor element type is inverted [`CountingTropical`](@ref) if `K` is `Single`, and [`TruncatedPoly`](@ref)`{K}` if `K` is an integer. -* Weighted graph problems is only supported if `K` is `Single`. +* Weighted constraint satisfaction problems is only supported if `K` is `Single`. * GPU is supported, """ struct CountingMin{K} <: AbstractProperty end @@ -115,7 +115,7 @@ Method Argument * It has round-off error. * BLAS and GPU are supported, it is the fastest among all methods. -Graph polynomials are not defined for weighted graph problems. +Graph polynomials are not defined for weighted constraint satisfaction problems. """ struct GraphPolynomial{METHOD} <: AbstractProperty kwargs @@ -130,7 +130,7 @@ graph_polynomial_method(::GraphPolynomial{METHOD}) where METHOD = METHOD Finding single solution for largest-K sizes, e.g. for [`IndependentSet`](@ref) problem, it is one of the maximum independent sets. * The corresponding data type is [`CountingTropical{Float64,<:ConfigSampler}`](@ref) if `BOUNDED` is `false`, [`Tropical`](@ref) otherwise. -* Weighted graph problems is supported. +* Weighted constraint satisfaction problems is supported. * GPU is supported, Keyword Arguments @@ -148,7 +148,7 @@ max_k(::SingleConfigMax{K}) where K = K Finding single solution with smallest-K size. * The corresponding data type is inverted [`CountingTropical{Float64,<:ConfigSampler}`](@ref) if `BOUNDED` is `false`, inverted [`Tropical`](@ref) otherwise. -* Weighted graph problems is supported. +* Weighted constraint satisfaction problems is supported. * GPU is supported, Keyword Arguments @@ -184,7 +184,7 @@ Find configurations with largest-K sizes, e.g. for [`IndependentSet`](@ref) prob it is finding all independent sets of sizes ``\\alpha(G), \\alpha(G)-1, \\ldots, \\alpha(G)-K+1``. * The corresponding data type is [`CountingTropical`](@ref)`{Float64,<:ConfigEnumerator}` if `K` is `Single` and [`TruncatedPoly`](@ref)`{K,<:ConfigEnumerator}` if `K` is an integer. -* Weighted graph problems is only supported if `K` is `Single`. +* Weighted constraint satisfaction problem is only supported if `K` is `Single`. Keyword Arguments ---------------------------- @@ -203,7 +203,7 @@ tree_storage(::ConfigsMax{K,BOUNDED,TREESTORAGE}) where {K,BOUNDED,TREESTORAGE} Find configurations with smallest-K sizes. * The corresponding data type is inverted [`CountingTropical`](@ref)`{Float64,<:ConfigEnumerator}` if `K` is `Single` and inverted [`TruncatedPoly`](@ref)`{K,<:ConfigEnumerator}` if `K` is an integer. -* Weighted graph problems is only supported if `K` is `Single`. +* Weighted constraint satisfaction problem is only supported if `K` is `Single`. Keyword Arguments ---------------------------- @@ -275,21 +275,19 @@ function solve(gp::GenericTensorNetwork, property::AbstractProperty; T=Float64, res = contractx(gp, pre_invert_exponent(TruncatedPoly(ntuple(i->i == min_k(property) ? one(T) : zero(T), min_k(property)), one(T))); usecuda=usecuda) return asarray(post_invert_exponent.(res), res) elseif property isa GraphPolynomial - ws = get_weights(gp) - if !(eltype(ws) <: Integer) + # fix the fake non-integer weights + if is_weighted(gp) && !(eltype(weights(gp)) <: Integer) @warn "Input weights are not Integer types, try casting to weights of `Int64` type..." - gp = chweights(gp, Int.(ws)) - ws = get_weights(gp) + gp = set_weights(gp, Int.(weights(gp))) end - n = length(energy_terms(gp)) - if ws isa UnitWeight || ws isa ZeroWeight || all(i->all(>=(0), get_weights(gp, i)), 1:n) + if !is_weighted(gp) || size_all_positive(gp.problem) return graph_polynomial(gp, Val(graph_polynomial_method(property)); usecuda=usecuda, T=T, property.kwargs...) - elseif all(i->all(<=(0), get_weights(gp, i)), 1:n) - res = graph_polynomial(chweights(gp, -ws), Val(graph_polynomial_method(property)); usecuda=usecuda, T=T, property.kwargs...) + elseif is_weighted(gp) && size_all_negative(gp.problem) + res = graph_polynomial(set_weights(gp, -weights(gp)), Val(graph_polynomial_method(property)); usecuda=usecuda, T=T, property.kwargs...) return asarray(invert_polynomial.(res), res) else if graph_polynomial_method(property) != :laurent - @warn "Weights are not all positive or all negative, switch to using laurent polynomial." + @warn "weights are not all positive or all negative, switch to using laurent polynomial." end return graph_polynomial(gp, Val(:laurent); usecuda=usecuda, T=T, property.kwargs...) end @@ -312,23 +310,23 @@ function solve(gp::GenericTensorNetwork, property::AbstractProperty; T=Float64, elseif property isa ConfigsAll return solutions(gp, Real; all=true, usecuda=usecuda, tree_storage=tree_storage(property)) elseif property isa SingleConfigMax{Single,true} - return best_solutions(gp; all=false, usecuda=usecuda, T=T) + return largest_solutions(gp; all=false, usecuda=usecuda, T=T) elseif property isa (SingleConfigMax{K,true} where K) @warn "bounded `SingleConfigMax` property for `K != Single` is not implemented. Switching to the unbounded version." return solve(gp, SingleConfigMax{max_k(property),false}(); T, usecuda) elseif property isa SingleConfigMin{Single,true} - return best_solutions(gp; all=false, usecuda=usecuda, invert=true, T=T) + return largest_solutions(gp; all=false, usecuda=usecuda, invert=true, T=T) elseif property isa (SingleConfigMin{K,true} where K) @warn "bounded `SingleConfigMin` property for `K != Single` is not implemented. Switching to the unbounded version." return solve(gp, SingleConfigMin{min_k(property),false}(); T, usecuda) elseif property isa ConfigsMax{Single,true} - return best_solutions(gp; all=true, usecuda=usecuda, tree_storage=tree_storage(property), T=T) + return largest_solutions(gp; all=true, usecuda=usecuda, tree_storage=tree_storage(property), T=T) elseif property isa ConfigsMin{Single,true} - return best_solutions(gp; all=true, usecuda=usecuda, invert=true, tree_storage=tree_storage(property), T=T) + return largest_solutions(gp; all=true, usecuda=usecuda, invert=true, tree_storage=tree_storage(property), T=T) elseif property isa (ConfigsMax{K,true} where K) - return bestk_solutions(gp, max_k(property), tree_storage=tree_storage(property), T=T) + return largestk_solutions(gp, max_k(property), tree_storage=tree_storage(property), T=T) elseif property isa (ConfigsMin{K,true} where K) - return bestk_solutions(gp, min_k(property), invert=true, tree_storage=tree_storage(property), T=T) + return largestk_solutions(gp, min_k(property), invert=true, tree_storage=tree_storage(property), T=T) else error("unknown property: `$property`.") end @@ -362,10 +360,8 @@ function assert_solvable(problem, property::CountingMin) end end function has_noninteger_weights(problem::GenericTensorNetwork) - for i in 1:length(energy_terms(problem)) - if any(!isinteger, get_weights(problem, i)) - return true - end + if ProblemReductions.is_weighted(problem.problem) && any(!isinteger, weights(problem)) + return true end return false end @@ -394,7 +390,7 @@ Memory estimation in number of bytes to compute certain `property` of a `problem `T` is the base type. """ function estimate_memory(problem::GenericTensorNetwork, property::AbstractProperty; T=Float64)::Real - _estimate_memory(tensor_element_type(T, length(labels(problem)), nflavor(problem), property), problem) + _estimate_memory(tensor_element_type(T, length(variables(problem)), num_flavors(problem), property), problem) end function estimate_memory(problem::GenericTensorNetwork, property::Union{SingleConfigMax{K,BOUNDED},SingleConfigMin{K,BOUNDED}}; T=Float64) where {K, BOUNDED} tc, sc, rw = contraction_complexity(problem.code, _size_dict(problem)) @@ -402,30 +398,30 @@ function estimate_memory(problem::GenericTensorNetwork, property::Union{SingleCo if K === Single && BOUNDED return ceil(Int, exp2(rw - 1)) * sizeof(Tropical{T}) elseif K === Single && !BOUNDED - n, nf = length(labels(problem)), nflavor(problem) + n, nf = length(variables(problem)), num_flavors(problem) return peak_memory(problem.code, _size_dict(problem)) * (sizeof(tensor_element_type(T, n, nf, property))) else # NOTE: the integer `K` case does not respect bounding - n, nf = length(labels(problem)), nflavor(problem) + n, nf = length(variables(problem)), num_flavors(problem) TT = tensor_element_type(T, n, nf, property) return peak_memory(problem.code, _size_dict(problem)) * (sizeof(tensor_element_type(T, n, nf, SingleConfigMax{Single,BOUNDED}())) * K + sizeof(TT)) end end function estimate_memory(problem::GenericTensorNetwork, ::GraphPolynomial{:polynomial}; T=Float64) # this is the upper bound - return peak_memory(problem.code, _size_dict(problem)) * (sizeof(T) * length(labels(problem))) + return peak_memory(problem.code, _size_dict(problem)) * (sizeof(T) * length(variables(problem))) end function estimate_memory(problem::GenericTensorNetwork, ::GraphPolynomial{:laurent}; T=Float64) # this is the upper bound - return peak_memory(problem.code, _size_dict(problem)) * (sizeof(T) * length(labels(problem))) + return peak_memory(problem.code, _size_dict(problem)) * (sizeof(T) * length(variables(problem))) end function estimate_memory(problem::GenericTensorNetwork, ::Union{SizeMax{K},SizeMin{K}}; T=Float64) where K return peak_memory(problem.code, _size_dict(problem)) * (sizeof(T) * _asint(K)) end function _size_dict(problem) - lbs = labels(problem) - nf = nflavor(problem) + lbs = variables(problem) + nf = num_flavors(problem) return Dict([lb=>nf for lb in lbs]) end @@ -444,23 +440,23 @@ for (PROP, ET) in [ (:(GraphPolynomial{:laurent}), :(LaurentPolynomial{T, :x})), (:(GraphPolynomial{:fft}), :(Complex{T})), (:(GraphPolynomial{:finitefield}), :(Mod{N,Int32} where N)) ] - @eval tensor_element_type(::Type{T}, n::Int, nflavor::Int, ::$PROP) where {T} = $ET + @eval tensor_element_type(::Type{T}, n::Int, num_flavors::Int, ::$PROP) where {T} = $ET end for (PROP, ET) in [ (:(SizeMax{K}), :(ExtendedTropical{K,Tropical{T}})), (:(SizeMin{K}), :(ExtendedTropical{K,Tropical{T}})), (:(CountingMax{K}), :(TruncatedPoly{K,T,T})), (:(CountingMin{K}), :(TruncatedPoly{K,T,T})), ] - @eval tensor_element_type(::Type{T}, n::Int, nflavor::Int, ::$PROP) where {T, K} = $ET + @eval tensor_element_type(::Type{T}, n::Int, num_flavors::Int, ::$PROP) where {T, K} = $ET end -function tensor_element_type(::Type{T}, n::Int, nflavor::Int, ::PROP) where {T, K, BOUNDED, PROP<:Union{SingleConfigMax{K,BOUNDED},SingleConfigMin{K,BOUNDED}}} +function tensor_element_type(::Type{T}, n::Int, num_flavors::Int, ::PROP) where {T, K, BOUNDED, PROP<:Union{SingleConfigMax{K,BOUNDED},SingleConfigMin{K,BOUNDED}}} if K === Single && BOUNDED return Tropical{T} elseif K === Single && !BOUNDED - return sampler_type(CountingTropical{T,T}, n, nflavor) + return sampler_type(CountingTropical{T,T}, n, num_flavors) else # NOTE: the integer `K` case does not respect bounding - return sampler_type(ExtendedTropical{K,CountingTropical{T,T}}, n, nflavor) + return sampler_type(ExtendedTropical{K,CountingTropical{T,T}}, n, num_flavors) end end @@ -468,15 +464,22 @@ for (PROP, ET) in [ (:(ConfigsMax{Single}), :(CountingTropical{T,T})), (:(ConfigsMin{Single}), :(CountingTropical{T,T})), (:(ConfigsAll), :(Real)) ] - @eval function tensor_element_type(::Type{T}, n::Int, nflavor::Int, ::$PROP) where {T} - set_type($ET, n, nflavor) + @eval function tensor_element_type(::Type{T}, n::Int, num_flavors::Int, ::$PROP) where {T} + set_type($ET, n, num_flavors) end end for (PROP, ET) in [ (:(ConfigsMax{K}), :(TruncatedPoly{K,T,T})), (:(ConfigsMin{K}), :(TruncatedPoly{K,T,T})), ] - @eval function tensor_element_type(::Type{T}, n::Int, nflavor::Int, ::$PROP) where {T, K} - set_type($ET, n, nflavor) + @eval function tensor_element_type(::Type{T}, n::Int, num_flavors::Int, ::$PROP) where {T, K} + set_type($ET, n, num_flavors) end end + +for GP in [:IndependentSet, :MaxCut, :DominatingSet, :Satisfiability, :Coloring, :PaintShop, :SetCovering, :SetPacking, :MaximalIS, :Matching] + @eval size_all_negative(problem::$(GP)) = all(<=(0), weights(problem)) + @eval size_all_positive(problem::$(GP)) = all(>=(0), weights(problem)) +end +size_all_negative(::SpinGlass) = false +size_all_positive(::SpinGlass) = false diff --git a/src/networks/networks.jl b/src/networks.jl similarity index 51% rename from src/networks/networks.jl rename to src/networks.jl index 6d242d95..41fefb9c 100644 --- a/src/networks/networks.jl +++ b/src/networks.jl @@ -1,33 +1,20 @@ -""" - GraphProblem - -The abstract base type of graph problems. -""" -abstract type GraphProblem end -function generate_tensors(x::T, m::GraphProblem) where T - tensors = [energy_tensors(x, m)..., extra_tensors(T, m)...] - ixs = [energy_terms(m)..., extra_terms(m)...] - return add_labels!(tensors, ixs, labels(m)) +function generate_tensors(x::T, m::ConstraintSatisfactionProblem) where T + terms = ProblemReductions.size_terms(m) + tensors = [reshape(map(s -> !s.is_valid ? zero(x) : _pow(x, s.size), t.solution_sizes), ntuple(i->num_flavors(m), length(t.variables))) for t in terms] + ixs = [t.variables for t in terms] + return add_labels!(tensors, ixs, variables(m)) end -function rawcode(problem::GraphProblem; openvertices=()) - ixs = [energy_terms(problem)..., extra_terms(problem)...] +function rawcode(problem::ConstraintSatisfactionProblem; openvertices=()) + ixs = [t.variables for t in ProblemReductions.size_terms(problem)] LT = eltype(eltype(ixs)) return DynamicEinCode(ixs, collect(LT, openvertices)) # labels for edge tensors end -struct UnitWeight end -Base.getindex(::UnitWeight, i) = 1 -Base.eltype(::UnitWeight) = Int - -struct ZeroWeight end -Base.getindex(::ZeroWeight, i) = 0 -Base.eltype(::ZeroWeight) = Int - """ $TYPEDEF - GenericTensorNetwork(problem::GraphProblem; openvertices=(), fixedvertices=Dict(), optimizer=GreedyMethod()) + GenericTensorNetwork(problem::ConstraintSatisfactionProblem; openvertices=(), fixedvertices=Dict(), optimizer=GreedyMethod()) -The generic tensor network that generated from a [`GraphProblem`](@ref). +The generic tensor network that generated from a [`ConstraintSatisfactionProblem`](@ref). Positional arguments ------------------------------- @@ -40,11 +27,23 @@ struct GenericTensorNetwork{CFG, CT, LT} code::CT fixedvertices::Dict{LT,Int} end -function GenericTensorNetwork(problem::GraphProblem; openvertices=(), fixedvertices=Dict(), optimizer=GreedyMethod()) +function GenericTensorNetwork(problem::ConstraintSatisfactionProblem; openvertices=(), fixedvertices=Dict(), optimizer=GreedyMethod()) rcode = rawcode(problem; openvertices) - code = _optimize_code(rcode, uniformsize_fix(rcode, nflavor(problem), fixedvertices), optimizer, MergeVectors()) + code = _optimize_code(rcode, uniformsize_fix(rcode, num_flavors(problem), fixedvertices), optimizer, MergeVectors()) return GenericTensorNetwork(problem, code, Dict{labeltype(code),Int}(fixedvertices)) end +# a unified interface to optimize the contraction code +_optimize_code(code, size_dict, optimizer::Nothing, simplifier) = code +_optimize_code(code, size_dict, optimizer, simplifier) = optimize_code(code, size_dict, optimizer, simplifier) + +function Base.show(io::IO, tn::GenericTensorNetwork) + println(io, "$(typeof(tn))") + println(io, "- open vertices: $(getiyv(tn.code))") + println(io, "- fixed vertices: $(tn.fixedvertices)") + tc, sc, rw = contraction_complexity(tn) + print(io, "- contraction time = 2^$(round(tc; digits=3)), space = 2^$(round(sc; digits=3)), read-write = 2^$(round(rw; digits=3))") +end +Base.show(io::IO, ::MIME"text/plain", tn::GenericTensorNetwork) = Base.show(io, tn) function generate_tensors(x::T, tn::GenericTensorNetwork) where {T} ixs = getixsv(tn.code) isempty(ixs) && return Array{T}[] @@ -52,57 +51,10 @@ function generate_tensors(x::T, tn::GenericTensorNetwork) where {T} return select_dims(tensors, ixs, fixedvertices(tn)) end -######## Interfaces for graph problems ########## -""" - get_weights(problem::GraphProblem[, i::Int]) -> Vector - get_weights(problem::GenericTensorNetwork[, i::Int]) -> Vector - -The weights for the `problem` or the weights for the degree of freedom specified by the `i`-th term if a second argument is provided. -Weights are associated with [`energy_terms`](@ref) in the graph problem. -In graph polynomial, integer weights can be the exponents. -""" -function get_weights end -get_weights(gp::GenericTensorNetwork) = get_weights(gp.problem) -get_weights(gp::GenericTensorNetwork, i::Int) = get_weights(gp.problem, i) - -""" - chweights(problem::GraphProblem, weights) -> GraphProblem - chweights(problem::GenericTensorNetwork, weights) -> GenericTensorNetwork - -Change the weights for the `problem` and return a new problem instance. -Weights are associated with [`energy_terms`](@ref) in the graph problem. -In graph polynomial, integer weights can be the exponents. -""" -function chweights end -chweights(gp::GenericTensorNetwork, weights) = GenericTensorNetwork(chweights(gp.problem, weights), gp.code, gp.fixedvertices) - -""" - labels(problem::GraphProblem) -> Vector - labels(problem::GenericTensorNetwork) -> Vector - -The labels of a graph problem is defined as the degrees of freedoms in the graph problem. -e.g. for the maximum independent set problems, they are the indices of vertices: 1, 2, 3..., -while for the max cut problem, they are the edges. -""" -labels(gp::GenericTensorNetwork) = labels(gp.problem) - -""" - energy_terms(problem::GraphProblem) -> Vector - energy_terms(problem::GenericTensorNetwork) -> Vector - -The energy terms of a graph problem is defined as the tensor labels that carrying local energies (or weights) in the graph problem. -""" -function energy_terms end -energy_terms(gp::GenericTensorNetwork) = energy_terms(gp.problem) - -""" - extra_terms(problem::GraphProblem) -> Vector - extra_terms(problem::GenericTensorNetwork) -> Vector - -The extra terms of a graph problem is defined as the tensor labels that not carrying local energies (or weights) in the graph problem. -""" -function extra_terms end -extra_terms(gp::GenericTensorNetwork) = extra_terms(gp.problem) +variables(gp::GenericTensorNetwork) = variables(gp.problem) +set_weights(gp::GenericTensorNetwork, weights) = GenericTensorNetwork(set_weights(gp.problem, weights), gp.code, gp.fixedvertices) +weights(gp::GenericTensorNetwork) = weights(gp.problem) +is_weighted(gp::GenericTensorNetwork) = is_weighted(gp.problem) """ fixedvertices(tnet::GenericTensorNetwork) -> Dict @@ -112,27 +64,19 @@ Returns the fixed vertices in the graph problem, which is a dictionary specifyin fixedvertices(gtn::GenericTensorNetwork) = gtn.fixedvertices """ - flavors(::Type{<:GraphProblem}) -> Vector flavors(::Type{<:GenericTensorNetwork}) -> Vector It returns a vector of integers as the flavors of a degree of freedom. Its size is the same as the degree of freedom on a single vertex/edge. """ -flavors(::GT) where GT<:GraphProblem = flavors(GT) -flavors(::GenericTensorNetwork{GT}) where GT<:GraphProblem = flavors(GT) +flavors(::GenericTensorNetwork{GT}) where GT<:ConstraintSatisfactionProblem = flavors(GT) """ - nflavor(::Type{<:GraphProblem}) -> Int - nflavor(::Type{<:GenericTensorNetwork}) -> Int - nflavor(::GT) where GT<:GraphProblem -> Int - nflavor(::GenericTensorNetwork{GT}) where GT<:GraphProblem -> Int + num_flavors(::GenericTensorNetwork{GT}) where GT<:ConstraintSatisfactionProblem -> Int Bond size is equal to the number of flavors. """ -nflavor(::Type{GT}) where GT = length(flavors(GT)) -nflavor(::Type{GenericTensorNetwork{GT}}) where GT = nflavor(GT) -nflavor(::GT) where GT<:GraphProblem = nflavor(GT) -nflavor(::GenericTensorNetwork{GT}) where GT<:GraphProblem = nflavor(GT) +num_flavors(::GenericTensorNetwork{GT}) where GT<:ConstraintSatisfactionProblem = num_flavors(GT) """ generate_tensors(func, problem::GenericTensorNetwork) @@ -151,7 +95,17 @@ julia> gp = GenericTensorNetwork(IndependentSet(smallgraph(:petersen))); julia> getixsv(gp.code) 25-element Vector{Vector{Int64}}: - [1] + [1, 2] + [1, 5] + [1, 6] + [2, 3] + [2, 7] + [3, 4] + [3, 8] + [4, 5] + [4, 9] + [5, 10] + ⋮ [2] [3] [4] @@ -161,16 +115,6 @@ julia> getixsv(gp.code) [8] [9] [10] - ⋮ - [3, 8] - [4, 5] - [4, 9] - [5, 10] - [6, 8] - [6, 9] - [7, 9] - [7, 10] - [8, 10] julia> gp.code(GenericTensorNetworks.generate_tensors(Tropical(1.0), gp)...) 0-dimensional Array{Tropical{Float64}, 0}: @@ -179,31 +123,17 @@ julia> gp.code(GenericTensorNetworks.generate_tensors(Tropical(1.0), gp)...) """ function generate_tensors end -# requires field `code` - -include("IndependentSet.jl") -include("MaximalIS.jl") -include("MaxCut.jl") -include("Matching.jl") -include("Coloring.jl") -include("PaintShop.jl") -include("Satisfiability.jl") -include("DominatingSet.jl") -include("SetPacking.jl") -include("SetCovering.jl") -include("OpenPitMining.jl") -include("SpinGlass.jl") - # forward the time, space and readwrite complexity -OMEinsum.contraction_complexity(gp::GenericTensorNetwork) = contraction_complexity(gp.code, uniformsize(gp.code, nflavor(gp))) +OMEinsum.contraction_complexity(gp::GenericTensorNetwork) = contraction_complexity(gp.code, uniformsize(gp.code, num_flavors(gp))) # the following two interfaces will be deprecated -OMEinsum.timespace_complexity(gp::GenericTensorNetwork) = timespace_complexity(gp.code, uniformsize(gp.code, nflavor(gp))) -OMEinsum.timespacereadwrite_complexity(gp::GenericTensorNetwork) = timespacereadwrite_complexity(gp.code, uniformsize(gp.code, nflavor(gp))) +OMEinsum.timespace_complexity(gp::GenericTensorNetwork) = timespace_complexity(gp.code, uniformsize(gp.code, num_flavors(gp))) +OMEinsum.timespacereadwrite_complexity(gp::GenericTensorNetwork) = timespacereadwrite_complexity(gp.code, uniformsize(gp.code, num_flavors(gp))) # contract the graph tensor network function contractx(gp::GenericTensorNetwork, x; usecuda=false) @debug "generating tensors for x = `$x` ..." xs = generate_tensors(x, gp) + length(xs) == 0 && return asarray(one(x)) # empty tensor network @debug "contracting tensors ..." if usecuda gp.code([togpu(x) for x in xs]...) @@ -255,4 +185,43 @@ function _pow(x::LaurentPolynomial{BS,X}, i) where {BS,X} @assert length(x.coeffs) == 1 return LaurentPolynomial(x.coeffs .^ i, x.order[]*i) end -end \ No newline at end of file +end + +####### Extra utilities ####### +"""Upload a tensor network to GPU, need `using CUDA` to activate this extension.""" +function togpu end + +""" + mis_compactify!(tropicaltensor; potential=nothing) + +Compactify tropical tensor for maximum independent set problem. It will eliminate +some entries by setting them to zero, by the criteria that removing these entry +does not change the MIS size of its parent graph (reference to be added). + +### Arguments +- `tropicaltensor::AbstractArray{T}`: the tropical tensor + +### Keyword arguments +- `potential=nothing`: the maximum possible MIS contribution from each open vertex +""" +function mis_compactify!(a::AbstractArray{T, N}; potential=nothing) where {T <: TropicalTypes, N} + @assert potential === nothing || length(potential) == N "got unexpected potential length: $(length(potential)), expected $N" + for (ind_a, val_a) in enumerate(a) + for (ind_b, val_b) in enumerate(a) + bs_a = ind_a - 1 + bs_b = ind_b - 1 + if worse_than(bs_a, bs_b, val_a.n, val_b.n, potential) + @inbounds a[ind_a] = zero(T) + end + end + end + return a +end +function worse_than(bs_a::Integer, bs_b::Integer, val_a::T, val_b::T, potential::AbstractVector) where T + bs_a != bs_b && val_a + sum(k->readbit(bs_a, k) < readbit(bs_b, k) ? potential[k] : zero(T), 1:length(potential)) <= val_b +end +function worse_than(bs_a::Integer, bs_b::Integer, val_a::T, val_b::T, ::Nothing) where T + bs_a != bs_b && val_a <= val_b && (bs_b & bs_a) == bs_b +end +readbit(bs::Integer, k::Integer) = (bs >> (k-1)) & 1 + diff --git a/src/networks/Coloring.jl b/src/networks/Coloring.jl deleted file mode 100644 index 7ed7b682..00000000 --- a/src/networks/Coloring.jl +++ /dev/null @@ -1,55 +0,0 @@ -""" -$(TYPEDEF) - Coloring{K}(graph; weights=UnitWeight()) - -The [Vertex Coloring](https://queracomputing.github.io/GenericTensorNetworks.jl/dev/generated/Coloring/) problem. - -Positional arguments -------------------------------- -* `graph` is the problem graph. -* `weights` are associated with the edges of the `graph`, default to `UnitWeight()`. -""" -struct Coloring{K, WT<:Union{UnitWeight, Vector}} <: GraphProblem - graph::SimpleGraph{Int} - weights::WT - function Coloring{K}(graph::SimpleGraph, weights::Union{UnitWeight, Vector}=UnitWeight()) where {K} - @assert weights isa UnitWeight || length(weights) == ne(graph) - new{K, typeof(weights)}(graph, weights) - end -end - -flavors(::Type{<:Coloring{K}}) where K = collect(0:K-1) -energy_terms(gp::Coloring) = [[minmax(e.src,e.dst)...] for e in Graphs.edges(gp.graph)] -energy_tensors(x::T, c::Coloring{K}) where {K, T} = [_pow.(coloringb(x, K), get_weights(c, i)) for i=1:ne(c.graph)] -extra_terms(gp::Coloring) = [[i] for i in 1:nv(gp.graph)] -extra_tensors(::Type{T}, c::Coloring{K}) where {K,T} = [coloringv(T, K) for i=1:nv(c.graph)] -labels(gp::Coloring) = [1:nv(gp.graph)...] - -# weights interface -get_weights(c::Coloring) = c.weights -get_weights(c::Coloring{K}, i::Int) where K = fill(c.weights[i], K) -chweights(c::Coloring{K}, weights) where K = Coloring{K}(c.graph, weights) - -# coloring bond tensor -function coloringb(x::T, k::Int) where T - x = fill(x, k, k) - for i=1:k - x[i,i] = one(T) - end - return x -end -# coloring vertex tensor -coloringv(::Type{T}, k::Int) where T = fill(one(T), k) - -# utilities -""" - is_vertex_coloring(graph::SimpleGraph, config) - -Returns true if the coloring specified by config is a valid one, i.e. does not violate the contraints of vertices of an edges having different colors. -""" -function is_vertex_coloring(graph::SimpleGraph, config) - for e in edges(graph) - config[e.src] == config[e.dst] && return false - end - return true -end diff --git a/src/networks/DominatingSet.jl b/src/networks/DominatingSet.jl deleted file mode 100644 index ae8b00f2..00000000 --- a/src/networks/DominatingSet.jl +++ /dev/null @@ -1,47 +0,0 @@ -""" -$TYPEDEF - DominatingSet(graph; weights=UnitWeight()) - -The [dominating set](https://queracomputing.github.io/GenericTensorNetworks.jl/dev/generated/DominatingSet/) problem. - -Positional arguments -------------------------------- -* `graph` is the problem graph. -* `weights` are associated with the vertices of the `graph`, default to `UnitWeight()`. -""" -struct DominatingSet{WT<:Union{UnitWeight, Vector}} <: GraphProblem - graph::SimpleGraph{Int} - weights::WT - function DominatingSet(g::SimpleGraph, weights::Union{UnitWeight, Vector}=UnitWeight()) - @assert weights isa UnitWeight || length(weights) == nv(g) - new{typeof(weights)}(g, weights) - end -end - -flavors(::Type{<:DominatingSet}) = [0, 1] -energy_terms(gp::DominatingSet) = [[Graphs.neighbors(gp.graph, v)..., v] for v in Graphs.vertices(gp.graph)] -energy_tensors(x::T, c::DominatingSet) where T = [dominating_set_tensor(_pow.(Ref(x), get_weights(c, i))..., degree(c.graph, i)+1) for i=1:nv(c.graph)] -extra_terms(::DominatingSet) = Vector{Int}[] -extra_tensors(::Type{T}, ::DominatingSet) where T = Array{T}[] -labels(gp::DominatingSet) = [1:nv(gp.graph)...] - -# weights interface -get_weights(c::DominatingSet) = c.weights -get_weights(gp::DominatingSet, i::Int) = [0, gp.weights[i]] -chweights(c::DominatingSet, weights) = DominatingSet(c.graph, weights) - -function dominating_set_tensor(a::T, b::T, d::Int) where T - t = zeros(T, fill(2, d)...) - for i = 2:1<<(d-1) - t[i] = a - end - t[1<<(d-1)+1:end] .= Ref(b) - return t -end - -""" - is_dominating_set(g::SimpleGraph, config) - -Return true if `config` (a vector of boolean numbers as the mask of vertices) is a dominating set of graph `g`. -""" -is_dominating_set(g::SimpleGraph, config) = all(w->config[w] == 1 || any(v->!iszero(config[v]), neighbors(g, w)), Graphs.vertices(g)) \ No newline at end of file diff --git a/src/networks/IndependentSet.jl b/src/networks/IndependentSet.jl deleted file mode 100644 index 135bba60..00000000 --- a/src/networks/IndependentSet.jl +++ /dev/null @@ -1,94 +0,0 @@ -""" -$TYPEDEF - -The [independent set problem](https://queracomputing.github.io/GenericTensorNetworks.jl/dev/generated/IndependentSet/) in graph theory. - -Positional arguments -------------------------------- -* `graph` is the problem graph. -* `weights` are associated with the vertices of the `graph`, default to `UnitWeight()`. - -Examples -------------------------------- -```jldoctest; setup=:(using Random; Random.seed!(2)) -julia> using GenericTensorNetworks, Graphs - -julia> problem = IndependentSet(smallgraph(:petersen)); - -julia> net = GenericTensorNetwork(problem); - -julia> solve(net, ConfigsMax()) -0-dimensional Array{CountingTropical{Float64, ConfigEnumerator{10, 1, 1}}, 0}: -(4.0, {1010000011, 1001001100, 0100100110, 0101010001, 0010111000})ₜ -``` -""" -struct IndependentSet{WT} <: GraphProblem - graph::SimpleGraph{Int} - weights::WT - function IndependentSet(graph::SimpleGraph{Int}, weights::WT=UnitWeight()) where WT - @assert weights isa UnitWeight || length(weights) == nv(graph) "got unexpected weights for $(nv(graph))-vertex graph: $weights" - new{WT}(graph, weights) - end -end -flavors(::Type{<:IndependentSet}) = [0, 1] -energy_terms(gp::IndependentSet) = [[i] for i in 1:nv(gp.graph)] -energy_tensors(x::T, c::IndependentSet) where T = [misv(_pow.(Ref(x), get_weights(c, i))) for i=1:nv(c.graph)] -extra_terms(gp::IndependentSet) = [[minmax(e.src,e.dst)...] for e in Graphs.edges(gp.graph)] -extra_tensors(::Type{T}, gp::IndependentSet) where T = [misb(T) for i=1:ne(gp.graph)] -labels(gp::IndependentSet) = [1:nv(gp.graph)...] - -# weights interface -get_weights(c::IndependentSet) = c.weights -get_weights(gp::IndependentSet, i::Int) = [0, gp.weights[i]] -chweights(c::IndependentSet, weights) = IndependentSet(c.graph, weights) - -function misb(::Type{T}, n::Integer=2) where T - res = zeros(T, fill(2, n)...) - res[1] = one(T) - for i=1:n - res[1+1<<(i-1)] = one(T) - end - return res -end -misv(vals) = vals - -""" - mis_compactify!(tropicaltensor; potential=nothing) - -Compactify tropical tensor for maximum independent set problem. It will eliminate -some entries by setting them to zero, by the criteria that removing these entry -does not change the MIS size of its parent graph (reference to be added). - -### Arguments -- `tropicaltensor::AbstractArray{T}`: the tropical tensor - -### Keyword arguments -- `potential=nothing`: the maximum possible MIS contribution from each open vertex -""" -function mis_compactify!(a::AbstractArray{T, N}; potential=nothing) where {T <: TropicalTypes, N} - @assert potential === nothing || length(potential) == N "got unexpected potential length: $(length(potential)), expected $N" - for (ind_a, val_a) in enumerate(a) - for (ind_b, val_b) in enumerate(a) - bs_a = ind_a - 1 - bs_b = ind_b - 1 - if worse_than(bs_a, bs_b, val_a.n, val_b.n, potential) - @inbounds a[ind_a] = zero(T) - end - end - end - return a -end -function worse_than(bs_a::Integer, bs_b::Integer, val_a::T, val_b::T, potential::AbstractVector) where T - bs_a != bs_b && val_a + sum(k->readbit(bs_a, k) < readbit(bs_b, k) ? potential[k] : zero(T), 1:length(potential)) <= val_b -end -function worse_than(bs_a::Integer, bs_b::Integer, val_a::T, val_b::T, ::Nothing) where T - bs_a != bs_b && val_a <= val_b && (bs_b & bs_a) == bs_b -end -readbit(bs::Integer, k::Integer) = (bs >> (k-1)) & 1 - -""" - is_independent_set(g::SimpleGraph, config) - -Return true if `config` (a vector of boolean numbers as the mask of vertices) is an independent set of graph `g`. -""" -is_independent_set(g::SimpleGraph, config) = !any(e->config[e.src] == 1 && config[e.dst] == 1, edges(g)) diff --git a/src/networks/Matching.jl b/src/networks/Matching.jl deleted file mode 100644 index 9929443d..00000000 --- a/src/networks/Matching.jl +++ /dev/null @@ -1,66 +0,0 @@ -""" -$TYPEDEF - -The [Vertex matching](https://queracomputing.github.io/GenericTensorNetworks.jl/dev/generated/Matching/) problem. - -Positional arguments -------------------------------- -* `graph` is the problem graph. -* `weights` are associated with the edges of the `graph`. -""" -struct Matching{WT<:Union{UnitWeight,Vector}} <: GraphProblem - graph::SimpleGraph{Int} - weights::WT - function Matching(g::SimpleGraph, weights::Union{UnitWeight, Vector}=UnitWeight()) - @assert weights isa UnitWeight || length(weights) == ne(g) - new{typeof(weights)}(g, weights) - end -end - -flavors(::Type{<:Matching}) = [0, 1] -energy_terms(gp::Matching) = [[minmax(src(s), dst(s))] for s in edges(gp.graph)] # edge tensors -energy_tensors(x::T, c::Matching) where T = [_pow.(Ref(x), get_weights(c, i)) for i=1:ne(c.graph)] -extra_terms(gp::Matching) = [[minmax(i, j) for j in neighbors(gp.graph, i)] for i in Graphs.vertices(gp.graph)] -extra_tensors(::Type{T}, gp::Matching) where T = [match_tensor(T, degree(gp.graph, i)) for i=1:nv(gp.graph)] -labels(gp::Matching) = getindex.(energy_terms(gp)) - -# weights interface -get_weights(c::Matching) = c.weights -get_weights(m::Matching, i::Int) = [0, m.weights[i]] -chweights(c::Matching, weights) = Matching(c.graph, weights) - -function match_tensor(::Type{T}, n::Int) where T - t = zeros(T, fill(2, n)...) - for ci in CartesianIndices(t) - if sum(ci.I .- 1) <= 1 - t[ci] = one(T) - end - end - return t -end - -""" - is_matching(graph::SimpleGraph, config) - -Returns true if `config` is a valid matching on `graph`, and `false` if a vertex is double matched. -`config` is a vector of boolean variables, which has one to one correspondence with `edges(graph)`. -""" -function is_matching(g::SimpleGraph, config) - @assert ne(g) == length(config) - edges_mask = zeros(Bool, nv(g)) - for (e, c) in zip(edges(g), config) - if !iszero(c) - if edges_mask[e.src] - @debug "Vertex $(e.src) is double matched!" - return false - end - if edges_mask[e.dst] - @debug "Vertex $(e.dst) is double matched!" - return false - end - edges_mask[e.src] = true - edges_mask[e.dst] = true - end - end - return true -end \ No newline at end of file diff --git a/src/networks/MaxCut.jl b/src/networks/MaxCut.jl deleted file mode 100644 index 2acf970e..00000000 --- a/src/networks/MaxCut.jl +++ /dev/null @@ -1,58 +0,0 @@ -""" -$TYPEDEF - -The [cutting](https://queracomputing.github.io/GenericTensorNetworks.jl/dev/generated/MaxCut/) problem. - -Positional arguments -------------------------------- -* `graph` is the problem graph. -* `edge_weights` are associated with the edges of the `graph`. -* `vertex_weights` are associated with the vertices of the `graph`. -""" -struct MaxCut{WT1<:Union{UnitWeight, Vector},WT2<:Union{ZeroWeight, Vector}} <: GraphProblem - graph::SimpleGraph{Int} - edge_weights::WT1 - vertex_weights::WT2 - function MaxCut(g::SimpleGraph, - edge_weights::Union{UnitWeight, Vector}=UnitWeight(), - vertex_weights::Union{ZeroWeight, Vector}=ZeroWeight()) - @assert edge_weights isa UnitWeight || length(edge_weights) == ne(g) - @assert vertex_weights isa ZeroWeight || length(vertex_weights) == nv(g) - new{typeof(edge_weights), typeof(vertex_weights)}(g, edge_weights, vertex_weights) - end -end - -flavors(::Type{<:MaxCut}) = [0, 1] -# first `ne` indices are for edge weights, last `nv` indices are for vertex weights. -energy_terms(gp::MaxCut) = [[[minmax(e.src,e.dst)...] for e in Graphs.edges(gp.graph)]..., - [[v] for v in Graphs.vertices(gp.graph)]...] -energy_tensors(x::T, c::MaxCut) where T = [[maxcutb(_pow.(Ref(x), get_weights(c, i))...) for i=1:ne(c.graph)]..., - [Ref(x) .^ get_weights(c, i+ne(c.graph)) for i=1:nv(c.graph)]...] -extra_terms(::MaxCut) = Vector{Int}[] -extra_tensors(::Type{T}, ::MaxCut) where T = Array{T}[] -labels(gp::MaxCut) = [1:nv(gp.graph)...] - -# weights interface -get_weights(c::MaxCut) = [[c.edge_weights[i] for i=1:ne(c.graph)]..., [c.vertex_weights[i] for i=1:nv(c.graph)]...] -get_weights(gp::MaxCut, i::Int) = i <= ne(gp.graph) ? [0, gp.edge_weights[i]] : [0, gp.vertex_weights[i-ne(gp.graph)]] -chweights(c::MaxCut, weights) = MaxCut(c.graph, weights[1:ne(c.graph)], weights[ne(c.graph)+1:end]) - -function maxcutb(a, b) - return [a b; b a] -end - -""" - cut_size(g::SimpleGraph, config; edge_weights=UnitWeight(), vertex_weights=ZeroWeight()) - -Compute the cut size for the vertex configuration `config` (an iterator). -""" -function cut_size(g::SimpleGraph, config; edge_weights=UnitWeight(), vertex_weights=ZeroWeight()) - size = zero(promote_type(eltype(vertex_weights), eltype(edge_weights))) - for (i, e) in enumerate(edges(g)) - size += (config[e.src] != config[e.dst]) * edge_weights[i] - end - for v in vertices(g) - size += config[v] * vertex_weights[v] - end - return size -end diff --git a/src/networks/MaximalIS.jl b/src/networks/MaximalIS.jl deleted file mode 100644 index 50c44664..00000000 --- a/src/networks/MaximalIS.jl +++ /dev/null @@ -1,47 +0,0 @@ -""" -$TYPEDEF - -The [maximal independent set](https://queracomputing.github.io/GenericTensorNetworks.jl/dev/generated/MaximalIS/) problem. In the constructor, `weights` are the weights of vertices. - -Positional arguments -------------------------------- -* `graph` is the problem graph. -* `weights` are associated with the vertices of the `graph`. -""" -struct MaximalIS{WT<:Union{UnitWeight, Vector}} <: GraphProblem - graph::SimpleGraph - weights::WT - function MaximalIS(g::SimpleGraph, weights::Union{UnitWeight, Vector}=UnitWeight()) - @assert weights isa UnitWeight || length(weights) == nv(g) - new{typeof(weights)}(g, weights) - end -end - -flavors(::Type{<:MaximalIS}) = [0, 1] -energy_terms(gp::MaximalIS) = [[Graphs.neighbors(gp.graph, v)..., v] for v in Graphs.vertices(gp.graph)] -energy_tensors(x::T, c::MaximalIS) where T = [maximal_independent_set_tensor(_pow.(Ref(x), get_weights(c, i))..., degree(c.graph, i)+1) for i=1:nv(c.graph)] -extra_terms(::MaximalIS) = Vector{Int}[] -extra_tensors(::Type{T}, ::MaximalIS) where T = Array{T}[] -labels(gp::MaximalIS) = [1:nv(gp.graph)...] - -# weights interface -get_weights(c::MaximalIS) = c.weights -get_weights(gp::MaximalIS, i::Int) = [0, gp.weights[i]] -chweights(c::MaximalIS, weights) = MaximalIS(c.graph, weights) - -function maximal_independent_set_tensor(a::T, b::T, d::Int) where T - t = zeros(T, fill(2, d)...) - for i = 2:1<<(d-1) - t[i] = a - end - t[1<<(d-1)+1] = b - return t -end - - -""" - is_maximal_independent_set(g::SimpleGraph, config) - -Return true if `config` (a vector of boolean numbers as the mask of vertices) is a maximal independent set of graph `g`. -""" -is_maximal_independent_set(g::SimpleGraph, config) = !any(e->config[e.src] == 1 && config[e.dst] == 1, edges(g)) && all(w->config[w] == 1 || any(v->!iszero(config[v]), neighbors(g, w)), Graphs.vertices(g)) \ No newline at end of file diff --git a/src/networks/OpenPitMining.jl b/src/networks/OpenPitMining.jl deleted file mode 100644 index cdb47392..00000000 --- a/src/networks/OpenPitMining.jl +++ /dev/null @@ -1,215 +0,0 @@ -""" -$TYPEDEF - -The open pit mining problem. -This problem can be solved in polynomial time with the pseudoflow algorithm. - -Positional arguments -------------------------------- -* `rewards` is a matrix of rewards. -* `blocks` are the locations of the blocks. - -Example ------------------------------------ -```jldoctest; setup=:(using GenericTensorNetworks) -julia> rewards = [-4 -7 -7 -17 -7 -26; - 0 39 -7 -7 -4 0; - 0 0 1 8 0 0; - 0 0 0 0 0 0; - 0 0 0 0 0 0; - 0 0 0 0 0 0]; - -julia> gp = GenericTensorNetwork(OpenPitMining(rewards)); - -julia> res = solve(gp, SingleConfigMax())[] -(21.0, ConfigSampler{12, 1, 1}(111000100000))ₜ - -julia> is_valid_mining(rewards, res.c.data) -true - -julia> print_mining(rewards, res.c.data) - -4 -7 -7 -17 -7 -26 - ◼ 39 -7 -7 -4 ◼ - ◼ ◼ 1 8 ◼ ◼ - ◼ ◼ ◼ ◼ ◼ ◼ - ◼ ◼ ◼ ◼ ◼ ◼ - ◼ ◼ ◼ ◼ ◼ ◼ -``` - -You will the the mining is printed as green in an colored REPL. -""" -struct OpenPitMining{ET} <: GraphProblem - rewards::Matrix{ET} - blocks::Vector{Tuple{Int,Int}} # non-zero locations - function OpenPitMining(rewards::Matrix{ET}, blocks::Vector{Tuple{Int,Int}}) where ET - for (i, j) in blocks - checkbounds(rewards, i, j) - end - new{ET}(rewards, blocks) - end -end -function OpenPitMining(rewards::Matrix{ET}) where ET - # compute block locations - blocks = Tuple{Int,Int}[] - for i=1:size(rewards, 1), j=i:size(rewards,2)-i+1 - push!(blocks, (i,j)) - end - OpenPitMining(rewards, blocks) -end - -function mining_tensor(::Type{T}) where T - t = ones(T,2,2) - t[2,1] = zero(T) # first one is mined, but the second one is not mined. - return t -end - -flavors(::Type{<:OpenPitMining}) = [0, 1] -energy_terms(gp::OpenPitMining) = [[r] for r in gp.blocks] -energy_tensors(x::T, c::OpenPitMining) where T = [_pow.(Ref(x), get_weights(c, i)) for i=1:length(c.blocks)] -function extra_terms(gp::OpenPitMining) - depends = Pair{Tuple{Int,Int},Tuple{Int,Int}}[] - for i=1:size(gp.rewards, 1), j=i:size(gp.rewards,2)-i+1 - if i!=1 - push!(depends, (i,j)=>(i-1,j-1)) - push!(depends, (i,j)=>(i-1,j)) - push!(depends, (i,j)=>(i-1,j+1)) - end - end - return [[dep.first, dep.second] for dep in depends] -end -extra_tensors(::Type{T}, gp::OpenPitMining) where T = [mining_tensor(T) for _ in extra_terms(gp)] - -labels(gp::OpenPitMining) = gp.blocks - -# weights interface -get_weights(c::OpenPitMining) = [c.rewards[b...] for b in c.blocks] -get_weights(gp::OpenPitMining, i::Int) = [0, gp.rewards[gp.blocks[i]...]] -function chweights(c::OpenPitMining, weights) - rewards = copy(c.rewards) - for (w, b) in zip(weights, c.blocks) - rewards[b...] = w - end - OpenPitMining(rewards, c.blocks) -end - -""" - is_valid_mining(rewards::AbstractMatrix, config) - -Return true if `config` (a boolean mask for the feasible region) is a valid mining of `rewards`. -""" -function is_valid_mining(rewards::AbstractMatrix, config) - blocks = get_blocks(rewards) - assign = Dict(zip(blocks, config)) - for block in blocks - if block[1] != 1 && !iszero(assign[block]) - if iszero(assign[(block[1]-1, block[2]-1)]) || - iszero(assign[(block[1]-1, block[2])]) || - iszero(assign[(block[1]-1, block[2]+1)]) - return false - end - end - end - return true -end -function get_blocks(rewards) - blocks = Tuple{Int,Int}[] - for i=1:size(rewards, 1), j=i:size(rewards,2)-i+1 - push!(blocks, (i,j)) - end - return blocks -end - -""" - print_mining(rewards::AbstractMatrix, config) - -Printing the mining solution in a colored REPL. -""" -function print_mining(rewards::AbstractMatrix{T}, config) where T - k = 0 - for i=1:size(rewards, 1) - for j=1:size(rewards, 2) - if j >= i && j <= size(rewards,2)-i+1 - k += 1 - if T <: Integer - str = @sprintf " %6i " rewards[i,j] - else - str = @sprintf " %6.2F " rewards[i,j] - end - if iszero(config[k]) - printstyled(str; color = :red) - else - printstyled(str; color = :green) - end - else - str = @sprintf " %6s " "◼" - printstyled(str; color = :black) - end - end - println() - end -end - -function _open_pit_mining_branching!(rewards::AbstractMatrix{T}, mask::AbstractMatrix{Bool}, setmask::AbstractMatrix{Bool}, idx::Int) where T - # find next - idx < 1 && return zero(T) - i, j = divrem(idx-1, size(mask, 2)) .+ 1 # row-wise! - while i > j || size(mask, 1)-i+1 < j || setmask[i, j] # skip not allowed or already decided - idx -= 1 - idx < 1 && return zero(T) - i, j = divrem(idx-1, size(mask, 2)) .+ 1 # row-wise! - end - if rewards[i, j] < 0 # do not mine! - setmask[i, j] = true - return _open_pit_mining_branching!(rewards, mask, setmask, idx-1) - else - _mask = copy(mask) - _setmask = copy(setmask) - # CASE 1: try mine current block - # set mask - reward0 = set_recur!(mask, setmask, rewards, i, j) - reward1 = _open_pit_mining_branching!(rewards, mask, setmask, idx-1) + reward0 - - # CASE 1: try do not mine current block - # unset mask - _setmask[i, j] = true - reward2 = _open_pit_mining_branching!(rewards, _mask, _setmask, idx-1) - - # choose the right branch - if reward2 > reward1 - copyto!(mask, _mask) - copyto!(setmask, _setmask) - return reward2 - else - return reward1 - end - end -end - -function set_recur!(mask, setmask, rewards::AbstractMatrix{T}, i, j) where T - reward = zero(T) - for k=1:i - start = max(1, j-(i-k)) - stop = min(size(mask, 2), j+(i-k)) - @inbounds for l=start:stop - if !setmask[k,l] - mask[k,l] = true - setmask[k,l] = true - reward += rewards[k,l] - end - end - end - return reward -end - -""" - open_pit_mining_branching(rewards::AbstractMatrix) - -Solve the open pit mining problem with the naive branching algorithm. -NOTE: open pit mining can be solved in polynomial time, but this one runs in exponential time. -""" -function open_pit_mining_branching(rewards::AbstractMatrix{T}) where T - idx = length(rewards) - mask = falses(size(rewards)) - rewards = _open_pit_mining_branching!(rewards, mask, falses(size(rewards)), idx) - return rewards, mask -end \ No newline at end of file diff --git a/src/networks/PaintShop.jl b/src/networks/PaintShop.jl deleted file mode 100644 index c7fbdb45..00000000 --- a/src/networks/PaintShop.jl +++ /dev/null @@ -1,94 +0,0 @@ -""" -$TYPEDEF - -The [binary paint shop problem](https://queracomputing.github.io/GenericTensorNetworks.jl/dev/generated/PaintShop/). - -Positional arguments -------------------------------- -* `sequence` is a vector of symbols, each symbol is associated with a color. -* `isfirst` is a vector of boolean numbers, indicating whether the symbol is the first appearance in the sequence. - -Examples -------------------------------- -One can encode the paint shop problem `abaccb` as the following - -```jldoctest; setup=:(using GenericTensorNetworks) -julia> syms = collect("abaccb"); - -julia> pb = GenericTensorNetwork(PaintShop(syms)); - -julia> solve(pb, SizeMin())[] -2.0ₜ - -julia> solve(pb, ConfigsMin())[].c.data -2-element Vector{StaticBitVector{3, 1}}: - 100 - 011 -``` -In our definition, we find the maximum number of unchanged color in this sequence, i.e. (n-1) - (minimum number of color changes) -In the output of maximum configurations, the two configurations are defined on 5 bonds i.e. pairs of (i, i+1), `0` means color changed, while `1` means color not changed. -If we denote two "colors" as `r` and `b`, then the optimal painting is `rbbbrr` or `brrrbb`, both change the colors twice. -""" -struct PaintShop{LT} <: GraphProblem - sequence::Vector{LT} - isfirst::Vector{Bool} - function PaintShop(sequence::AbstractVector{T}) where T - @assert all(l->count(==(l), sequence)==2, sequence) - n = length(sequence) - isfirst = [findfirst(==(sequence[i]), sequence) == i for i=1:n] - new{eltype(sequence)}(sequence, isfirst) - end -end -function paint_shop_from_pairs(pairs::AbstractVector{Tuple{Int,Int}}) - n = length(pairs) - @assert sort!(vcat(collect.(pairs)...)) == collect(1:2n) - sequence = zeros(Int, 2*n) - @inbounds for i=1:n - sequence[pairs[i]] .= i - end - return PaintShop(sequence) -end - -flavors(::Type{<:PaintShop}) = [0, 1] -energy_terms(gp::PaintShop) = [[gp.sequence[i], gp.sequence[i+1]] for i in 1:length(gp.sequence)-1] -energy_tensors(x::T, c::PaintShop) where T = [flip_labels(paintshop_bond_tensor(_pow.(Ref(x), get_weights(c, i))...), c.isfirst[i], c.isfirst[i+1]) for i=1:length(c.sequence)-1] -extra_terms(::PaintShop{LT}) where LT = Vector{LT}[] -extra_tensors(::Type{T}, ::PaintShop) where T = Array{T}[] -labels(gp::PaintShop) = unique(gp.sequence) - -# weights interface -get_weights(c::PaintShop) = UnitWeight() -get_weights(::PaintShop, i::Int) = [0, 1] -chweights(c::PaintShop, weights) = c - -function paintshop_bond_tensor(a::T, b::T) where T - m = T[a b; b a] - return m -end -function flip_labels(m, if1, if2) - m = if1 ? m : m[[2,1],:] - m = if2 ? m : m[:,[2,1]] - return m -end - -""" - num_paint_shop_color_switch(sequence::AbstractVector, coloring) - -Returns the number of color switches. -""" -function num_paint_shop_color_switch(sequence::AbstractVector, coloring) - return count(i->coloring[i] != coloring[i+1], 1:length(sequence)-1) -end - -""" - paint_shop_coloring_from_config(p::PaintShop, config) - -Returns a valid painting from the paint shop configuration (given by the configuration solvers). -The `config` is a sequence of 0 and 1, where 0 means painting the first appearence of a car in blue, 1 otherwise. -""" -function paint_shop_coloring_from_config(p::PaintShop{LT}, config) where {LT} - d = Dict{LT,Bool}(zip(labels(p), config)) - return map(1:length(p.sequence)) do i - p.isfirst[i] ? d[p.sequence[i]] : ~d[p.sequence[i]] - end -end diff --git a/src/networks/Satisfiability.jl b/src/networks/Satisfiability.jl deleted file mode 100644 index f9dded34..00000000 --- a/src/networks/Satisfiability.jl +++ /dev/null @@ -1,184 +0,0 @@ -""" - BoolVar{T} - BoolVar(name, neg) - -Boolean variable for constructing CNF clauses. -""" -struct BoolVar{T} - name::T - neg::Bool -end -BoolVar(name) = BoolVar(name, false) -function Base.show(io::IO, b::BoolVar) - b.neg && print(io, "¬") - print(io, b.name) -end - -""" - CNFClause{T} - CNFClause(vars) - -A clause in [`CNF`](@ref), its value is the logical or of `vars`, where `vars` is a vector of [`BoolVar`](@ref). -""" -struct CNFClause{T} - vars::Vector{BoolVar{T}} -end -function Base.show(io::IO, b::CNFClause) - print(io, join(string.(b.vars), " ∨ ")) -end -Base.:(==)(x::CNFClause, y::CNFClause) = x.vars == y.vars - -""" - CNF{T} - CNF(clauses) - -Boolean expression in [conjunctive normal form](https://en.wikipedia.org/wiki/Conjunctive_normal_form). -`clauses` is a vector of [`CNFClause`](@ref), if and only if all clauses are satisfied, this CNF is satisfied. - -Example ------------------------- -```jldoctest; setup=:(using GenericTensorNetworks) -julia> @bools x y z - -julia> cnf = (x ∨ y) ∧ (¬y ∨ z) -(x ∨ y) ∧ (¬y ∨ z) - -julia> satisfiable(cnf, Dict([:x=>true, :y=>false, :z=>true])) -true - -julia> satisfiable(cnf, Dict([:x=>false, :y=>false, :z=>true])) -false -``` -""" -struct CNF{T} - clauses::Vector{CNFClause{T}} -end -function Base.show(io::IO, c::CNF) - print(io, join(["($k)" for k in c.clauses], " ∧ ")) -end -Base.:(==)(x::CNF, y::CNF) = x.clauses == y.clauses -Base.length(x::CNF) = length(x.clauses) - -""" - ¬(var::BoolVar) - -Negation of a boolean variables of type [`BoolVar`](@ref). -""" -¬(var::BoolVar{T}) where T = BoolVar(var.name, ~var.neg) - -""" - ∨(vars...) - -Logical `or` applied on [`BoolVar`](@ref) and [`CNFClause`](@ref). -Returns a [`CNFClause`](@ref). -""" -∨(var::BoolVar{T}, vars::BoolVar{T}...) where T = CNFClause([var, vars...]) -∨(c::CNFClause{T}, var::BoolVar{T}) where T = CNFClause([c.vars..., var]) -∨(c::CNFClause{T}, d::CNFClause{T}) where T = CNFClause([c.vars..., d.vars...]) -∨(var::BoolVar{T}, c::CNFClause) where T = CNFClause([var, c.vars...]) - -""" - ∧(vars...) - -Logical `and` applied on [`CNFClause`](@ref) and [`CNF`](@ref). -Returns a new [`CNF`](@ref). -""" -∧(c::CNFClause{T}, cs::CNFClause{T}...) where T = CNF([c, cs...]) -∧(c::CNFClause{T}, cs::CNF{T}) where T = CNF([c, cs.clauses...]) -∧(cs::CNF{T}, c::CNFClause{T}) where T = CNF([cs.clauses..., c]) -∧(cs::CNF{T}, ds::CNF{T}) where T = CNF([cs.clauses..., ds.clauses...]) - -""" - @bools(syms::Symbol...) - -Create some boolean variables of type [`BoolVar`](@ref) in current scope that can be used in create a [`CNF`](@ref). - -Example ------------------------- -```jldoctest; setup=:(using GenericTensorNetworks) -julia> @bools x y z - -julia> (x ∨ y) ∧ (¬y ∨ z) -(x ∨ y) ∧ (¬y ∨ z) -``` -""" -macro bools(syms::Symbol...) - esc(Expr(:block, [:($s = $BoolVar($(QuoteNode(s)))) for s in syms]..., nothing)) -end - -""" -$TYPEDEF - -The [satisfiability](https://queracomputing.github.io/GenericTensorNetworks.jl/dev/generated/Satisfiability/) problem. - -Positional arguments -------------------------------- -* `cnf` is a conjunctive normal form ([`CNF`](@ref)) for specifying the satisfiability problems. -* `weights` are associated with clauses. - -Examples -------------------------------- -```jldoctest; setup=:(using GenericTensorNetworks) -julia> @bools x y z a b c - -julia> c1 = x ∨ ¬y -x ∨ ¬y - -julia> c2 = c ∨ (¬a ∨ b) -c ∨ ¬a ∨ b - -julia> c3 = (z ∨ ¬a) ∨ y -z ∨ ¬a ∨ y - -julia> c4 = (c ∨ z) ∨ ¬b -c ∨ z ∨ ¬b - -julia> cnf = (c1 ∧ c4) ∧ (c2 ∧ c3) -(x ∨ ¬y) ∧ (c ∨ z ∨ ¬b) ∧ (c ∨ ¬a ∨ b) ∧ (z ∨ ¬a ∨ y) - -julia> gp = GenericTensorNetwork(Satisfiability(cnf)); - -julia> solve(gp, SizeMax())[] -4.0ₜ -``` -""" -struct Satisfiability{T,WT<:Union{UnitWeight, Vector}} <: GraphProblem - cnf::CNF{T} - weights::WT - function Satisfiability(cnf::CNF{T}, weights::WT=UnitWeight()) where {T, WT} - @assert weights isa UnitWeight || length(weights) == length(cnf) "weights size inconsistent! should be $(length(cnf)), got: $(length(weights))" - new{T, typeof(weights)}(cnf, weights) - end -end - -flavors(::Type{<:Satisfiability}) = [0, 1] # false, true -energy_terms(gp::Satisfiability) = [[getfield.(c.vars, :name)...] for c in gp.cnf.clauses] -energy_tensors(x::T, c::Satisfiability) where T = [tensor_for_clause(c.cnf.clauses[i], _pow.(Ref(x), get_weights(c, i))...) for i=1:length(c.cnf.clauses)] -extra_terms(::Satisfiability{T}) where T = Vector{T}[] -extra_tensors(::Type{T}, c::Satisfiability) where T = Array{T}[] -labels(gp::Satisfiability) = unique!(vcat(energy_terms(gp)...)) - -# weights interface -get_weights(c::Satisfiability) = c.weights -get_weights(s::Satisfiability, i::Int) = [0, s.weights[i]] -chweights(c::Satisfiability, weights) = Satisfiability(c.cnf, weights) - -""" - satisfiable(cnf::CNF, config::AbstractDict) - -Returns true if an assignment of variables satisfies a [`CNF`](@ref). -""" -function satisfiable(cnf::CNF{T}, config::AbstractDict{T}) where T - all(x->satisfiable(x, config), cnf.clauses) -end -function satisfiable(c::CNFClause{T}, config::AbstractDict{T}) where T - any(x->satisfiable(x, config), c.vars) -end -function satisfiable(v::BoolVar{T}, config::AbstractDict{T}) where T - config[v.name] == ~v.neg -end - -function tensor_for_clause(c::CNFClause{T}, a, b) where T - n = length(c.vars) - map(ci->any(i->~c.vars[i].neg == ci[i], 1:n) ? b : a, Iterators.product([[0, 1] for i=1:n]...)) -end diff --git a/src/networks/SetCovering.jl b/src/networks/SetCovering.jl deleted file mode 100644 index 8cffd53c..00000000 --- a/src/networks/SetCovering.jl +++ /dev/null @@ -1,76 +0,0 @@ -""" -$TYPEDEF - -The [set covering problem](https://queracomputing.github.io/GenericTensorNetworks.jl/dev/generated/SetCovering/). - -Positional arguments -------------------------------- -* `sets` is a vector of vectors, each set is associated with a weight specified in `weights`. -* `weights` are associated with sets. - -Examples -------------------------------- -```jldoctest; setup=:(using GenericTensorNetworks) -julia> sets = [[1, 2, 5], [1, 3], [2, 4], [3, 6], [2, 3, 6]]; # each set is a vertex - -julia> gp = GenericTensorNetwork(SetCovering(sets)); - -julia> res = solve(gp, ConfigsMin())[] -(3.0, {10110, 10101})ₜ -``` -""" -struct SetCovering{ET, WT<:Union{UnitWeight, Vector}} <: GraphProblem - sets::Vector{Vector{ET}} - weights::WT - function SetCovering(sets::Vector{Vector{ET}}, weights::Union{UnitWeight, Vector}=UnitWeight()) where {ET} - @assert weights isa UnitWeight || length(weights) == length(sets) - new{ET, typeof(weights)}(sets, weights) - end -end - -flavors(::Type{<:SetCovering}) = [0, 1] -energy_terms(gp::SetCovering) = [[i] for i=1:length(gp.sets)] -energy_tensors(x::T, c::SetCovering) where T = [misv(_pow.(Ref(x), get_weights(c, i))) for i=1:length(c.sets)] -function extra_terms(sc::SetCovering) - elements, count = cover_count(sc.sets) - return [count[e] for e in elements] -end -extra_tensors(::Type{T}, cfg::SetCovering) where T = [cover_tensor(T, ix) for ix in extra_terms(cfg)] -labels(gp::SetCovering) = [1:length(gp.sets)...] - -# weights interface -get_weights(c::SetCovering) = c.weights -get_weights(gp::SetCovering, i::Int) = [0, gp.weights[i]] -chweights(c::SetCovering, weights) = SetCovering(c.sets, weights) - -function cover_tensor(::Type{T}, set_indices::AbstractVector{Int}) where T - n = length(set_indices) - t = ones(T, fill(2, n)...) - t[1] = zero(T) - return t -end - -function cover_count(sets) - elements = vcat(sets...) - count = Dict{eltype(elements), Vector{Int}}() - for (iset, set) in enumerate(sets) - for e in set - if haskey(count, e) - push!(count[e], iset) - else - count[e] = [iset] - end - end - end - return elements, count -end - -""" - is_set_covering(sets::AbstractVector, config) - -Return true if `config` (a vector of boolean numbers as the mask of sets) is a set covering of `sets`. -""" -function is_set_covering(sets::AbstractVector, config) - insets = sets[(!iszero).(config)] - return length(unique!(vcat(insets...))) == length(unique!(vcat(sets...))) -end \ No newline at end of file diff --git a/src/networks/SetPacking.jl b/src/networks/SetPacking.jl deleted file mode 100644 index 97ee50ba..00000000 --- a/src/networks/SetPacking.jl +++ /dev/null @@ -1,58 +0,0 @@ -""" -$TYPEDEF - -The [set packing problem](https://queracomputing.github.io/GenericTensorNetworks.jl/dev/generated/SetPacking/), a generalization of independent set problem to hypergraphs. - -Positional arguments -------------------------------- -* `sets` is a vector of vectors, each set is associated with a weight specified in `weights`. -* `weights` are associated with sets. - -Examples -------------------------------- -```jldoctest; setup=:(using GenericTensorNetworks, Random; Random.seed!(2)) -julia> sets = [[1, 2, 5], [1, 3], [2, 4], [3, 6], [2, 3, 6]]; # each set is a vertex - -julia> gp = GenericTensorNetwork(SetPacking(sets)); - -julia> res = solve(gp, ConfigsMax())[] -(2.0, {00110, 10010, 01100})ₜ -``` -""" -struct SetPacking{ET,WT<:Union{UnitWeight, Vector}} <: GraphProblem - sets::Vector{Vector{ET}} - weights::WT - function SetPacking(sets::Vector{Vector{ET}}, weights::Union{UnitWeight, Vector}=UnitWeight()) where {ET} - @assert weights isa UnitWeight || length(weights) == length(sets) - new{ET, typeof(weights)}(sets, weights) - end -end - -flavors(::Type{<:SetPacking}) = [0, 1] -energy_terms(gp::SetPacking) = [[i] for i=1:length(gp.sets)] -energy_tensors(x::T, c::SetPacking) where T = [misv(_pow.(Ref(x), get_weights(c, i))) for i=1:length(c.sets)] -extra_terms(gp::SetPacking) = [[i,j] for i=1:length(gp.sets),j=1:length(gp.sets) if j>i && !isempty(gp.sets[i] ∩ gp.sets[j])] -extra_tensors(::Type{T}, gp::SetPacking) where T = [misb(T, length(ix)) for ix in extra_terms(gp)] -labels(gp::SetPacking) = [1:length(gp.sets)...] - -# weights interface -get_weights(c::SetPacking) = c.weights -get_weights(gp::SetPacking, i::Int) = [0, gp.weights[i]] -chweights(c::SetPacking, weights) = SetPacking(c.sets, weights) - -""" - is_set_packing(sets::AbstractVector, config) - -Return true if `config` (a vector of boolean numbers as the mask of sets) is a set packing of `sets`. -""" -function is_set_packing(sets::AbstractVector{ST}, config) where ST - d = Dict{eltype(ST), Int}() - for i=1:length(sets) - if !iszero(config[i]) - for e in sets[i] - d[e] = get(d, e, 0) + 1 - end - end - end - return all(isone, values(d)) -end \ No newline at end of file diff --git a/src/networks/SpinGlass.jl b/src/networks/SpinGlass.jl deleted file mode 100644 index 86f1e190..00000000 --- a/src/networks/SpinGlass.jl +++ /dev/null @@ -1,103 +0,0 @@ -""" -$(TYPEDEF) - SpinGlass(n, cliques; weights=UnitWeight()) - SpinGlass(graph::SimpleGraph, J=UnitWeight(), h=ZeroWeight()) - -The [spin-glass](https://queracomputing.github.io/GenericTensorNetworks.jl/dev/generated/SpinGlass/) problem. - -Positional arguments -------------------------------- -* `n` is the number of spins. -* `cliques` is a vector of cliques, each being a vector of vertices (integers). For simple graph, it is a vector of edges. -* `weights` are associated with the cliques. -""" -struct SpinGlass{WT<:Union{UnitWeight, Vector}} <: GraphProblem - n::Int - cliques::Vector{Vector{Int}} - weights::WT - function SpinGlass(n::Int, cliques::AbstractVector, weights::Union{UnitWeight, Vector}=UnitWeight()) - clqs = collect(collect.(cliques)) - @assert weights isa UnitWeight || length(weights) == length(clqs) - @assert all(c->all(b->1<=b<=n, c), cliques) "vertex index out of bound 1-$n, got: $cliques" - return new{typeof(weights)}(n, clqs, weights) - end -end -function SpinGlass(graph::SimpleGraph, J::Union{UnitWeight, Vector}, h::Union{ZeroWeight, Vector}=ZeroWeight()) - J_ = J isa UnitWeight ? fill(1, ne(graph)) : J - h_ = h isa ZeroWeight ? fill(0, nv(graph)) : h - @assert length(J_) == ne(graph) "length of J must be equal to the number of edges $(ne(graph)), got: $(length(J_))" - @assert length(h_) == nv(graph) "length of h must be equal to the number of vertices $(nv(graph)), got: $(length(h_))" - SpinGlass(nv(graph), [[[src(e), dst(e)] for e in edges(graph)]..., [[i] for i in 1:nv(graph)]...], [J_..., h_...]) -end -function spin_glass_from_matrix(M::AbstractMatrix, h::AbstractVector) - g = SimpleGraph((!iszero).(M)) - J = [M[e.src, e.dst] for e in edges(g)] - return SpinGlass(g, J, h) -end - -flavors(::Type{<:SpinGlass}) = [0, 1] -# first `ne` indices are for edge weights, last `n` indices are for vertex weights. -energy_terms(gp::SpinGlass) = gp.cliques -energy_tensors(x::T, c::SpinGlass) where T = [clique_tensor(length(c.cliques[i]), _pow.(Ref(x), get_weights(c, i))...) for i=1:length(c.cliques)] -extra_terms(sg::SpinGlass) = [[i] for i=1:sg.n] -extra_tensors(::Type{T}, c::SpinGlass) where T = [[one(T), one(T)] for i=1:c.n] -labels(gp::SpinGlass) = collect(1:gp.n) - -# weights interface -get_weights(c::SpinGlass) = c.weights -get_weights(gp::SpinGlass, i::Int) = [-gp.weights[i], gp.weights[i]] -chweights(c::SpinGlass, weights) = SpinGlass(c.n, c.cliques, weights) - -function clique_tensor(rank, a::T, b::T) where T - res = zeros(T, fill(2, rank)...) - for i=0:(1<x == 0 || x == 1, config) - s = 1 .- 2 .* Int.(config) # 0 -> spin 1, 1 -> spin -1 - for (i, spins) in enumerate(cliques) - size += prod(s[spins]) * weights[i] - end - return size -end -function spinglass_energy(g::SimpleGraph, config; J, h=ZeroWeight()) - eng = zero(promote_type(eltype(J), eltype(h))) - # NOTE: cast to Int to avoid using unsigned :nt - s = 1 .- 2 .* Int.(config) # 0 -> spin 1, 1 -> spin -1 - # coupling terms - for (i, e) in enumerate(edges(g)) - eng += (s[e.src] * s[e.dst]) * J[i] - end - # onsite terms - for (i, v) in enumerate(vertices(g)) - eng += s[v] * h[i] - end - return eng -end -function spinglass_energy(sg::SpinGlass, config) - spinglass_energy(sg.cliques, config; weights=sg.weights) -end diff --git a/src/utils.jl b/src/utils.jl deleted file mode 100644 index 479e2a4c..00000000 --- a/src/utils.jl +++ /dev/null @@ -1,19 +0,0 @@ -# Return a vector of unique labels in an Einsum token. -function labels(code::AbstractEinsum) - res = [] - for ix in getixsv(code) - for l in ix - if l ∉ res - push!(res, l) - end - end - end - return res -end - -# a unified interface to optimize the contraction code -_optimize_code(code, size_dict, optimizer::Nothing, simplifier) = code -_optimize_code(code, size_dict, optimizer, simplifier) = optimize_code(code, size_dict, optimizer, simplifier) - -# upload tensors to GPU -function togpu end \ No newline at end of file diff --git a/src/visualize.jl b/src/visualize.jl index 7d1e1e81..a10af734 100644 --- a/src/visualize.jl +++ b/src/visualize.jl @@ -100,20 +100,20 @@ function show_einsum(ein::AbstractEinsum; end """ - show_configs(gp::GraphProblem, locs, configs::AbstractMatrix; kwargs...) - show_configs(graph::SimpleGraph, locs, configs::AbstractMatrix; nflavor=2, kwargs...) + show_configs(gp::ConstraintSatisfactionProblem, locs, configs::AbstractMatrix; kwargs...) + show_configs(graph::SimpleGraph, locs, configs::AbstractMatrix; num_flavors=2, kwargs...) Show a gallery of configurations on a graph. """ -function show_configs(gp::GraphProblem, locs, configs::AbstractMatrix; kwargs...) - show_configs(gp.graph, locs, configs; nflavor=nflavor(gp), kwargs...) +function show_configs(gp::Union{Coloring,DominatingSet,IndependentSet,Matching,MaxCut,MaximalIndependentSet,SpinGlass}, locs, configs::AbstractMatrix; kwargs...) + show_configs(gp.graph, locs, configs; num_flavors=num_flavors(gp), kwargs...) end function show_configs(graph::SimpleGraph, locs, configs::AbstractMatrix; - nflavor::Int=2, + num_flavors::Int=2, kwargs...) - cmap = range(colorant"white", stop=colorant"red", length=nflavor) + cmap = range(colorant"white", stop=colorant"red", length=num_flavors) graphs = map(configs) do cfg - @assert all(0 .<= cfg .<= nflavor-1) + @assert all(0 .<= cfg .<= num_flavors-1) GraphViz(graph, locs; vertex_colors=cmap[cfg .+ 1]) end show_gallery(graphs; kwargs...) diff --git a/test/Project.toml b/test/Project.toml index cdc224b7..c6ebe75b 100644 --- a/test/Project.toml +++ b/test/Project.toml @@ -1,11 +1,13 @@ [deps] CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba" Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4" +GenericTensorNetworks = "3521c873-ad32-4bb4-b63d-f4f178f42b49" Graphs = "86223c79-3864-5bf0-83f7-82e725a168b6" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" LuxorGraphPlot = "1f49bdf2-22a7-4bc4-978b-948dc219fbbc" OMEinsum = "ebe7aa44-baf0-506c-a96f-8464559b3922" Polynomials = "f27b6e38-b328-58d1-80ce-0feddd5e7a45" +ProblemReductions = "899c297d-f7d2-4ebf-8815-a35996def416" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" TropicalNumbers = "b3a74e9c-7526-4576-a4eb-79c0d4c32334" diff --git a/test/arithematics.jl b/test/arithematics.jl index b092cb77..3a8ed6df 100644 --- a/test/arithematics.jl +++ b/test/arithematics.jl @@ -1,7 +1,7 @@ using GenericTensorNetworks, Test, OMEinsum using GenericTensorNetworks.Mods, Polynomials, TropicalNumbers using Graphs, Random -using GenericTensorNetworks: StaticBitVector +using GenericTensorNetworks: StaticBitVector, @bv_str, _nints using LinearAlgebra @testset "truncated poly" begin diff --git a/test/bitvector.jl b/test/bitvector.jl index a5f6fa30..0b1c69a7 100644 --- a/test/bitvector.jl +++ b/test/bitvector.jl @@ -1,5 +1,5 @@ using Test, GenericTensorNetworks -using GenericTensorNetworks: statictrues, staticfalses, StaticBitVector, onehotv +using GenericTensorNetworks: statictrues, staticfalses, StaticBitVector, onehotv, @bv_str, StaticElementVector @testset "static bit vector" begin @test statictrues(StaticBitVector{3,1}) == trues(3) diff --git a/test/bounding.jl b/test/bounding.jl index c77ff2e7..cfd4f0b7 100644 --- a/test/bounding.jl +++ b/test/bounding.jl @@ -25,7 +25,7 @@ end rawcode = GenericTensorNetwork(IndependentSet(random_regular_graph(10, 3)); optimizer=nothing).code optcode = GenericTensorNetwork(IndependentSet(random_regular_graph(10, 3)); optimizer=GreedyMethod()).code xs = map(OMEinsum.getixs(rawcode)) do ix - length(ix)==1 ? GenericTensorNetworks.misv([one(TropicalF64), TropicalF64(1.0)]) : GenericTensorNetworks.misb(TropicalF64) + length(ix)==1 ? [one(TropicalF64), TropicalF64(1.0)] : TropicalF64.([0 0; 0 -Inf]) end y1 = rawcode(xs...) y2 = bounding_contract(AllConfigs{1}(), rawcode, xs, BitArray(fill(true)), xs) diff --git a/test/configurations.jl b/test/configurations.jl index 534e44eb..9508bf3b 100644 --- a/test/configurations.jl +++ b/test/configurations.jl @@ -1,7 +1,8 @@ using GenericTensorNetworks, Test, Graphs using OMEinsum using TropicalNumbers: CountingTropicalF64 -using GenericTensorNetworks: _onehotv, _x, sampler_type, set_type, best_solutions, best2_solutions, solutions, all_solutions, bestk_solutions, AllConfigs, SingleConfig, max_size, max_size_count +using GenericTensorNetworks: _onehotv, _x, sampler_type, set_type, largest_solutions, solutions, largestk_solutions, AllConfigs, SingleConfig, max_size, max_size_count +using GenericTensorNetworks: graph_polynomial @testset "Config types" begin T = sampler_type(CountingTropical{Float32}, 5, 2) @@ -37,24 +38,21 @@ end for code in [rawcode, optcode] res0 = max_size(code) _, res1 = max_size_count(code) - res2 = best_solutions(code; all=true)[] + res2 = largest_solutions(code; all=true)[] res3 = solutions(code, CountingTropical{Float64}; all=false)[] res4 = solutions(code, CountingTropical{Float64}; all=true)[] @test res0 == res2.n == res3.n == res4.n @test res1 == length(res2.c) == length(res4.c) @test res3.c.data ∈ res2.c.data @test res3.c.data ∈ res4.c.data - res5 = best_solutions(code; all=false)[] + res5 = largest_solutions(code; all=false)[] @test res5.n == res0 @test res5.c.data ∈ res2.c.data - res6 = best2_solutions(code; all=true)[] - res6_ = bestk_solutions(code, 2)[] - res7 = all_solutions(code)[] + res6 = largestk_solutions(code, 2)[] + res7 = GenericTensorNetworks.solutions(code, Polynomial{Float64,:x}, all=true, usecuda=false, tree_storage=false)[] idp = graph_polynomial(code, Val(:finitefield))[] @test all(x->x ∈ res7.coeffs[end-1].data, res6.coeffs[1].data) @test all(x->x ∈ res7.coeffs[end].data, res6.coeffs[2].data) - @test all(x->x ∈ res7.coeffs[end-1].data, res6_.coeffs[1].data) - @test all(x->x ∈ res7.coeffs[end].data, res6_.coeffs[2].data) for (i, (s, c)) in enumerate(zip(res7.coeffs, idp.coeffs)) @test length(s) == c @test all(x->count_ones(x)==(i-1), s.data) diff --git a/test/cuda.jl b/test/cuda.jl index 905672c6..15e50367 100644 --- a/test/cuda.jl +++ b/test/cuda.jl @@ -58,9 +58,9 @@ end @testset "spinglass" begin g = Graphs.smallgraph("petersen") - gp = GenericTensorNetwork(SpinGlass(g, UnitWeight())) + gp = GenericTensorNetwork(SpinGlass(g, UnitWeight(ne(g)), zeros(Int, nv(g)))) usecuda=true @test solve(gp, CountingMax(); usecuda) isa CuArray - gp2 = GenericTensorNetwork(SpinGlass(g, UnitWeight()); openvertices=(2,)) + gp2 = GenericTensorNetwork(SpinGlass(g, UnitWeight(ne(g)), zeros(Int, nv(g))); openvertices=(2,)) @test solve(gp2, CountingMax(); usecuda) isa CuArray end diff --git a/test/deprecate.jl b/test/deprecate.jl new file mode 100644 index 00000000..7823755d --- /dev/null +++ b/test/deprecate.jl @@ -0,0 +1,22 @@ +using Test, GenericTensorNetworks, Graphs + +@testset "deprecate" begin + @test Independence(smallgraph(:petersen)) == IndependentSet(smallgraph(:petersen)) + @test MaximalIndependence(smallgraph(:petersen)) == MaximalIS(smallgraph(:petersen)) + @test_throws ErrorException UnitWeight() + @test_throws ErrorException ZeroWeight() + @test HyperSpinGlass(smallgraph(:petersen), ones(Int, ne(smallgraph(:petersen))), ones(Int, nv(smallgraph(:petersen)))) == SpinGlass(smallgraph(:petersen), ones(Int, ne(smallgraph(:petersen))), ones(Int, nv(smallgraph(:petersen)))) + + idp = IndependentSet(smallgraph(:petersen)) + @test nflavor(idp) == num_flavors(idp) + @test labels(idp) == variables(idp) + @test get_weights(idp) == GenericTensorNetworks.weights(idp) + @test chweights(idp, 2 * ones(Int, nv(idp.graph))) == set_weights(idp, 2 * ones(Int, nv(idp.graph))) + + @test GenericTensorNetworks.GraphProblem === ConstraintSatisfactionProblem + sg = SpinGlass(smallgraph(:petersen), ones(Int, ne(smallgraph(:petersen))), ones(Int, nv(smallgraph(:petersen)))) + cfg = rand([-1, 1], nv(sg.graph)) + @test spinglass_energy(sg, cfg) == energy(sg, cfg) + @test unit_disk_graph([(1, 2), (2, 2)], 1.6) isa UnitDiskGraph + @test solve(sg, SizeMax()) == solve(GenericTensorNetwork(sg), SizeMax()) +end diff --git a/test/fileio.jl b/test/fileio.jl index 7b245355..8cc63d03 100644 --- a/test/fileio.jl +++ b/test/fileio.jl @@ -6,26 +6,26 @@ using GenericTensorNetworks, Graphs, Test m = ConfigEnumerator([StaticBitVector(rand(Bool, 300)) for i=1:M]) bm = GenericTensorNetworks.plain_matrix(m) rm = GenericTensorNetworks.raw_matrix(m) - m1 = GenericTensorNetworks.from_raw_matrix(rm; bitlength=300, nflavors=2) - m2 = GenericTensorNetworks.from_plain_matrix(bm; nflavors=2) + m1 = GenericTensorNetworks.from_raw_matrix(rm; bitlength=300, num_flavors=2) + m2 = GenericTensorNetworks.from_plain_matrix(bm; num_flavors=2) @test m1 == m @test m2 == m save_configs(fname, m; format=:binary) @test_throws ErrorException load_configs("_test.bin"; format=:binary) - ma = load_configs(fname; format=:binary, bitlength=300, nflavors=2) + ma = load_configs(fname; format=:binary, bitlength=300, num_flavors=2) @test ma == m fname = tempname() save_configs(fname, m; format=:text) - mb = load_configs(fname; format=:text, nflavors=2) + mb = load_configs(fname; format=:text, num_flavors=2) @test mb == m M = 10 m = ConfigEnumerator([StaticElementVector(3, rand(0:2, 300)) for i=1:M]) bm = GenericTensorNetworks.plain_matrix(m) rm = GenericTensorNetworks.raw_matrix(m) - m1 = GenericTensorNetworks.from_raw_matrix(rm; bitlength=300, nflavors=3) - m2 = GenericTensorNetworks.from_plain_matrix(bm; nflavors=3) + m1 = GenericTensorNetworks.from_raw_matrix(rm; bitlength=300, num_flavors=3) + m2 = GenericTensorNetworks.from_plain_matrix(bm; num_flavors=3) @test m1 == m @test m2 == m @test Matrix(m) == bm @@ -34,12 +34,12 @@ using GenericTensorNetworks, Graphs, Test fname = tempname() save_configs(fname, m; format=:binary) @test_throws ErrorException load_configs(fname; format=:binary) - ma = load_configs(fname; format=:binary, bitlength=300, nflavors=3) + ma = load_configs(fname; format=:binary, bitlength=300, num_flavors=3) @test ma == m fname = tempname() save_configs(fname, m; format=:text) - mb = load_configs(fname; format=:text, nflavors=3) + mb = load_configs(fname; format=:text, num_flavors=3) @test mb == m end diff --git a/test/graph_polynomials.jl b/test/graph_polynomials.jl index e74a0a90..99e0cd89 100644 --- a/test/graph_polynomials.jl +++ b/test/graph_polynomials.jl @@ -3,17 +3,12 @@ using GenericTensorNetworks.Mods, Polynomials, TropicalNumbers using Graphs, Random using GenericTensorNetworks: StaticBitVector, graph_polynomial -@testset "bond and vertex tensor" begin - @test GenericTensorNetworks.misb(TropicalF64) == [TropicalF64(0) TropicalF64(0); TropicalF64(0) TropicalF64(-Inf)] - @test GenericTensorNetworks.misv([one(TropicalF64), TropicalF64(2.0)]) == [TropicalF64(0), TropicalF64(2.0)] -end - @testset "graph generator" begin g = diagonal_coupled_graph(trues(3, 3)) @test ne(g) == 20 g = diagonal_coupled_graph((x = trues(3, 3); x[2,2]=0; x)) @test ne(g) == 12 - @test length(GenericTensorNetworks.labels(GenericTensorNetwork(IndependentSet(g)).code)) == 8 + @test length(uniquelabels(GenericTensorNetwork(IndependentSet(g)).code)) == 8 end @testset "independence_polynomial" begin diff --git a/test/graphs.jl b/test/graphs.jl index 3fa63d49..5ba96c05 100644 --- a/test/graphs.jl +++ b/test/graphs.jl @@ -5,7 +5,7 @@ using GenericTensorNetworks, Test, Graphs @test nv(g) == 50 g = random_diagonal_coupled_graph(10, 10, 0.5) @test nv(g) == 50 - g = unit_disk_graph([(0.1, 0.2), (0.2, 0.3), (1.2, 1.4)], 1.0) + g = UnitDiskGraph([(0.1, 0.2), (0.2, 0.3), (1.2, 1.4)], 1.0) @test ne(g) == 1 @test nv(g) == 3 end diff --git a/test/networks/Coloring.jl b/test/networks/Coloring.jl index 79b5c8d7..81cf7de8 100644 --- a/test/networks/Coloring.jl +++ b/test/networks/Coloring.jl @@ -6,14 +6,14 @@ using Test, GenericTensorNetworks, Graphs add_edge!(g, i, j) end code = GenericTensorNetwork(Coloring{3}(g); optimizer=GreedyMethod()) - res = GenericTensorNetworks.best_solutions(code; all=true)[] + res = GenericTensorNetworks.largest_solutions(code; all=true, invert=true)[] @test length(res.c.data) == 12 g = smallgraph(:petersen) code = GenericTensorNetwork(Coloring{3}(g); optimizer=GreedyMethod()) - res = GenericTensorNetworks.best_solutions(code; all=true)[] + res = GenericTensorNetworks.largest_solutions(code; all=true, invert=true)[] @test length(res.c.data) == 120 - c = solve(code, SingleConfigMax())[] + c = solve(code, SingleConfigMin())[] @test c.c.data ∈ res.c.data @test is_vertex_coloring(g, c.c.data) end @@ -22,17 +22,17 @@ end @testset "weighted coloring" begin g = smallgraph(:petersen) problem = GenericTensorNetwork(Coloring{3}(g, fill(2, 15))) - @test get_weights(problem) == fill(2, 15) - @test get_weights(chweights(problem, fill(3, 15))) == fill(3, 15) - @test solve(problem, SizeMax())[].n == 30 - res = solve(problem, SingleConfigMax())[].c.data + @test GenericTensorNetworks.weights(problem) == fill(2, 15) + @test GenericTensorNetworks.weights(set_weights(problem, fill(3, 15))) == fill(3, 15) + @test solve(problem, SizeMin())[].n == 0 + res = solve(problem, SingleConfigMin())[].c.data @test is_vertex_coloring(g, res) end @testset "empty graph" begin g = SimpleGraph(4) pb = GenericTensorNetwork(Coloring{3}(g)) - @test solve(pb, SizeMax()) !== 4 + @test solve(pb, SizeMin()) !== 4 end @testset "planar gadget checking" begin @@ -53,7 +53,7 @@ end g = graph_crossing_gadget() problem = GenericTensorNetwork(Coloring{3}(g); openvertices=[3, 5]) - res = solve(problem, ConfigsMax()) + res = solve(problem, ConfigsMin()) for i=1:3 for ci in res[i,i].c @test ci[1] === ci[3] === ci[5] === ci[7] == i-1 diff --git a/test/networks/DominatingSet.jl b/test/networks/DominatingSet.jl index e8520a49..93403dc5 100644 --- a/test/networks/DominatingSet.jl +++ b/test/networks/DominatingSet.jl @@ -7,16 +7,13 @@ using GenericTensorNetworks, Test, Graphs @test is_dominating_set(g, mask) mask[1] = false @test !is_dominating_set(g, mask) - - @test GenericTensorNetworks.dominating_set_tensor(TropicalF64(0), TropicalF64(1), 3)[:,:,1] == TropicalF64[-Inf 0.0; 0 0] - @test GenericTensorNetworks.dominating_set_tensor(TropicalF64(0), TropicalF64(1), 3)[:,:,2] == TropicalF64[1.0 1.0; 1.0 1.0] end @testset "dominating set v.s. maximal IS" begin g = smallgraph(:petersen) gp1 = GenericTensorNetwork(DominatingSet(g)) - @test get_weights(gp1) == UnitWeight() - @test get_weights(chweights(gp1, fill(3, 10))) == fill(3, 10) + @test GenericTensorNetworks.weights(gp1) == UnitWeight(nv(g)) + @test GenericTensorNetworks.weights(set_weights(gp1, fill(3, 10))) == fill(3, 10) @test solve(gp1, SizeMax())[].n == 10 res1 = solve(gp1, ConfigsMin())[].c gp2 = GenericTensorNetwork(MaximalIS(g)) diff --git a/test/networks/IndependentSet.jl b/test/networks/IndependentSet.jl index 99212791..8c1cb4c9 100644 --- a/test/networks/IndependentSet.jl +++ b/test/networks/IndependentSet.jl @@ -5,8 +5,8 @@ using GenericTensorNetworks, Test, Graphs for (i,j) in [(1,2), (2,3), (4,5), (5,6), (1,6)] add_edge!(g, i, j) end - g = GenericTensorNetwork(IndependentSet(g); openvertices=[1,4,6,3]) - m = solve(g, SizeMax()) + net = GenericTensorNetwork(IndependentSet(g); openvertices=[1,4,6,3]) + m = solve(net, SizeMax()) @test m isa Array{Tropical{Float64}, 4} @test count(!iszero, m) == 12 m1 = mis_compactify!(copy(m)) @@ -14,12 +14,13 @@ using GenericTensorNetworks, Test, Graphs potential = zeros(Float64, 4) m2 = mis_compactify!(copy(m); potential) @test count(!iszero, m2) == 1 - @test get_weights(g) == UnitWeight() - @test get_weights(chweights(g, fill(3, 6))) == fill(3, 6) + @test GenericTensorNetworks.weights(net.problem) == UnitWeight(nv(net.problem.graph)) + @test GenericTensorNetworks.weights(set_weights(net.problem, fill(3, 6))) == fill(3, 6) end @testset "empty graph" begin g = SimpleGraph(4) pb = GenericTensorNetwork(IndependentSet(g)) + println(pb) @test solve(pb, SizeMax()) !== 4 end \ No newline at end of file diff --git a/test/networks/Matching.jl b/test/networks/Matching.jl index c20f33f8..8b8ef2da 100644 --- a/test/networks/Matching.jl +++ b/test/networks/Matching.jl @@ -1,18 +1,17 @@ using Test, GenericTensorNetworks, Graphs using GenericTensorNetworks: solutions - @testset "enumerating - matching" begin g = smallgraph(:petersen) code = GenericTensorNetwork(Matching(g); optimizer=GreedyMethod(), fixedvertices=Dict()) res = solutions(code, CountingTropicalF64; all=true)[] @test res.n == 5 @test length(res.c.data) == 6 - code = GenericTensorNetwork(Matching(g); optimizer=GreedyMethod(), fixedvertices=Dict((1,2)=>1)) - @test get_weights(code) == UnitWeight() - @test get_weights(chweights(code, fill(3, 15))) == fill(3, 15) + k = findfirst(x->x==Graphs.SimpleEdge(1,2), collect(edges(g))) + code = GenericTensorNetwork(Matching(g); optimizer=GreedyMethod(), fixedvertices=Dict(k=>1)) + @test GenericTensorNetworks.weights(code) == UnitWeight(ne(code.problem.graph)) + @test GenericTensorNetworks.weights(set_weights(code, fill(3, 15))) == fill(3, 15) res = solutions(code, CountingTropicalF64; all=true)[] @test res.n == 5 - k = findfirst(x->x==(1,2), labels(code)) @test length(res.c.data) == 2 && res.c.data[1][k] == 1 && res.c.data[2][k] == 1 end diff --git a/test/networks/MaxCut.jl b/test/networks/MaxCut.jl index b12b510d..85952a53 100644 --- a/test/networks/MaxCut.jl +++ b/test/networks/MaxCut.jl @@ -14,12 +14,12 @@ using GenericTensorNetworks: max_size, graph_polynomial # weighted ws = collect(1:ne(g)) gp = GenericTensorNetwork(MaxCut(g, ws)) - @test get_weights(gp) == [ws..., zeros(10)...] - @test get_weights(chweights(gp, fill(3, 25))) == fill(3, 25) + @test GenericTensorNetworks.weights(gp) == ws + @test GenericTensorNetworks.weights(set_weights(gp, fill(3, 15))) == fill(3, 15) mc = max_size(gp) config = solve(gp, SingleConfigMax())[].c.data @test solve(gp, CountingMax())[].c == 2 - @test cut_size(g, config; edge_weights=ws) == mc + @test cut_size(g, config; weights=ws) == mc end @testset "MaxCut" begin @@ -37,7 +37,7 @@ end add_edge!(g, i, j) end code = GenericTensorNetwork(MaxCut(g); optimizer=GreedyMethod()) - res = GenericTensorNetworks.best_solutions(code; all=true)[] + res = GenericTensorNetworks.largest_solutions(code; all=true)[] @test length(res.c.data) == 2 @test cut_size(g, res.c.data[1]) == 5 end @@ -54,14 +54,13 @@ end @test optimal_config[1] == StaticBitVector(Array{Bool, 1}([1, 0, 1, 0, 0])) end -@testset "vertex weights" begin +@testset "vertex get_weights" begin g = smallgraph(:petersen) - edge_weights = collect(1:ne(g)) - vertex_weights = collect(1:nv(g)) - gp = GenericTensorNetwork(MaxCut(g, edge_weights, vertex_weights)) + weights = collect(1:ne(g)) + gp = GenericTensorNetwork(MaxCut(g, weights)) mc = max_size(gp) config = solve(gp, SingleConfigMax())[].c.data - @test solve(gp, CountingMax())[].c == 1 - @test cut_size(g, config; edge_weights, vertex_weights) == mc + @test solve(gp, CountingMax())[].c == 2 + @test cut_size(g, config; weights) == mc end \ No newline at end of file diff --git a/test/networks/MaximalIS.jl b/test/networks/MaximalIS.jl index a8f96666..f143da12 100644 --- a/test/networks/MaximalIS.jl +++ b/test/networks/MaximalIS.jl @@ -4,8 +4,8 @@ using GenericTensorNetworks: graph_polynomial @testset "counting maximal IS" begin g = random_regular_graph(20, 3) gp = GenericTensorNetwork(MaximalIS(g), optimizer=KaHyParBipartite(sc_target=20)) - @test get_weights(gp) == UnitWeight() - @test get_weights(chweights(gp, fill(3, 20))) == fill(3, 20) + @test GenericTensorNetworks.weights(gp) == UnitWeight(nv(g)) + @test GenericTensorNetworks.weights(set_weights(gp, fill(3, 20))) == fill(3, 20) cs = graph_polynomial(gp, Val(:fft); r=1.0)[] gp = GenericTensorNetwork(MaximalIS(g), optimizer=SABipartite(sc_target=20)) cs2 = graph_polynomial(gp, Val(:polynomial))[] diff --git a/test/networks/OpenPitMining.jl b/test/networks/OpenPitMining.jl deleted file mode 100644 index 622810aa..00000000 --- a/test/networks/OpenPitMining.jl +++ /dev/null @@ -1,19 +0,0 @@ -using GenericTensorNetworks, Test, Graphs - -@testset "open pit mining" begin - rewards = zeros(Int,6,6) - rewards[1,:] .= [-4,-7,-7,-17,-7,-26] - rewards[2,2:end-1] .= [39, -7, -7, -4] - rewards[3,3:end-2] .= [1, 8] - problem = GenericTensorNetwork(OpenPitMining(rewards)) - @test get_weights(problem) == [-4,-7,-7,-17,-7,-26, 39, -7, -7, -4, 1, 8] - @test get_weights(chweights(problem, fill(3, 20))) == fill(3, 12) - res = solve(problem, SingleConfigMax())[] - @test is_valid_mining(rewards, res.c.data) - @test res.n == 21 - print_mining(rewards, res.c.data) - val, mask = GenericTensorNetworks.open_pit_mining_branching(rewards) - @test val == res.n - res_b = map(block->mask[block...], problem.problem.blocks) - @test res_b == [res.c.data...] -end \ No newline at end of file diff --git a/test/networks/PaintShop.jl b/test/networks/PaintShop.jl index 09b29c01..60783a7b 100644 --- a/test/networks/PaintShop.jl +++ b/test/networks/PaintShop.jl @@ -1,13 +1,11 @@ -using GenericTensorNetworks, Test +using GenericTensorNetworks, Test, ProblemReductions @testset "paint shop" begin syms = collect("abaccb") pb = GenericTensorNetwork(PaintShop(syms)) - @test get_weights(pb) == UnitWeight() - @test get_weights(chweights(pb, fill(3, 15))) == UnitWeight() @test solve(pb, SizeMin())[] == Tropical(2.0) config = solve(pb, SingleConfigMin())[].c.data - coloring = paint_shop_coloring_from_config(pb.problem, config) + coloring = ProblemReductions.paint_shop_coloring_from_config(pb.problem, config) @test num_paint_shop_color_switch(syms, coloring) == 2 @test bv"100" ∈ solve(pb, ConfigsMin())[].c.data end \ No newline at end of file diff --git a/test/networks/Satisfiability.jl b/test/networks/Satisfiability.jl index 194999be..2ca64ce6 100644 --- a/test/networks/Satisfiability.jl +++ b/test/networks/Satisfiability.jl @@ -21,8 +21,8 @@ using GenericTensorNetworks gp = GenericTensorNetwork(Satisfiability(cnf)) @test satisfiable(cnf, Dict(:x=>true, :y=>true, :z=>true, :a=>false, :b=>false, :c=>true)) @test !satisfiable(cnf, Dict(:x=>false, :y=>true, :z=>true, :a=>false, :b=>false, :c=>true)) - @test get_weights(gp) == UnitWeight() - @test get_weights(chweights(gp, fill(3, 4))) == fill(3,4) + @test GenericTensorNetworks.weights(gp) == UnitWeight(length(gp.problem.cnf)) + @test GenericTensorNetworks.weights(set_weights(gp, fill(3, 4))) == fill(3,4) @test_throws AssertionError Satisfiability(cnf, fill(3, 9)) end @@ -35,14 +35,14 @@ end cnf = (c1 ∧ c4) ∧ (c2 ∧ c3) gp = GenericTensorNetwork(Satisfiability(cnf)) - @test solve(gp, SizeMax())[].n == 4.0 - res = GenericTensorNetworks.best_solutions(gp; all=true)[].c.data + @test solve(gp, SizeMax())[].n == 2.0 + res = GenericTensorNetworks.largest_solutions(gp; invert=true, all=true)[].c.data for i=0:1<<6-1 v = StaticBitVector(Bool[i>>(k-1) & 1 for k=1:6]) if v ∈ res - @test satisfiable(gp.problem.cnf, Dict(zip(labels(gp), v))) + @test satisfiable(gp.problem.cnf, Dict(zip(ProblemReductions.symbols(gp.problem), v))) else - @test !satisfiable(gp.problem.cnf, Dict(zip(labels(gp), v))) + @test !satisfiable(gp.problem.cnf, Dict(zip(ProblemReductions.symbols(gp.problem), v))) end end end @@ -56,14 +56,14 @@ end cnf = (c1 ∧ c4) ∧ (c2 ∧ c3) gp = GenericTensorNetwork(Satisfiability(cnf, fill(2, length(cnf)))) - @test solve(gp, SizeMax())[].n == 8.0 - res = GenericTensorNetworks.best_solutions(gp; all=true)[].c.data + @test solve(gp, SizeMax())[].n == 4.0 + res = GenericTensorNetworks.largest_solutions(gp; invert=true, all=true)[].c.data for i=0:1<<6-1 v = StaticBitVector(Bool[i>>(k-1) & 1 for k=1:6]) if v ∈ res - @test satisfiable(gp.problem.cnf, Dict(zip(labels(gp), v))) + @test satisfiable(gp.problem.cnf, Dict(zip(ProblemReductions.symbols(gp.problem), v))) else - @test !satisfiable(gp.problem.cnf, Dict(zip(labels(gp), v))) + @test !satisfiable(gp.problem.cnf, Dict(zip(ProblemReductions.symbols(gp.problem), v))) end end end diff --git a/test/networks/SetCovering.jl b/test/networks/SetCovering.jl index 77a0dd69..ed25d30a 100644 --- a/test/networks/SetCovering.jl +++ b/test/networks/SetCovering.jl @@ -3,11 +3,12 @@ using GenericTensorNetworks, Test, Graphs @testset "set covering" begin sets = [[1, 2, 5], [1, 3], [2, 4], [3, 6], [2, 3, 6]] # each set is a vertex gp = GenericTensorNetwork(SetCovering(sets); optimizer=GreedyMethod()) - @test get_weights(gp) == UnitWeight() - @test get_weights(chweights(gp, fill(3, 5))) == fill(3,5) + @test GenericTensorNetworks.weights(gp) == UnitWeight(length(sets)) + @test GenericTensorNetworks.weights(set_weights(gp, fill(3, 5))) == fill(3,5) res = solve(gp, ConfigsMin())[] @test res.n == 3 @test BitVector(Bool[1,0,1,1,0]) ∈ res.c.data @test BitVector(Bool[1,0,1,0,1]) ∈ res.c.data - @test all(x->is_set_covering(sets, x),res.c) + print(typeof(res.c.data)) + @test all(x->is_set_covering(gp.problem,x),res.c.data) end \ No newline at end of file diff --git a/test/networks/SetPacking.jl b/test/networks/SetPacking.jl index f2305bcb..f74a0d2b 100644 --- a/test/networks/SetPacking.jl +++ b/test/networks/SetPacking.jl @@ -3,12 +3,12 @@ using GenericTensorNetworks, Test, Graphs @testset "set packing" begin sets = [[1, 2, 5], [1, 3], [2, 4], [3, 6], [2, 3, 6]] # each set is a vertex gp = GenericTensorNetwork(SetPacking(sets); optimizer=GreedyMethod()) - @test get_weights(gp) == UnitWeight() - @test get_weights(chweights(gp, fill(3, 5))) == fill(3,5) - res = GenericTensorNetworks.best_solutions(gp; all=true)[] + @test GenericTensorNetworks.weights(gp) == UnitWeight(length(sets)) + @test GenericTensorNetworks.weights(set_weights(gp, fill(3, 5))) == fill(3,5) + res = GenericTensorNetworks.largest_solutions(gp; all=true)[] @test res.n == 2 @test BitVector(Bool[0,0,1,1,0]) ∈ res.c.data @test BitVector(Bool[1,0,0,1,0]) ∈ res.c.data @test BitVector(Bool[0,1,1,0,0]) ∈ res.c.data - @test all(x->is_set_packing(sets, x),res.c) + @test all(x->is_set_packing(gp.problem, x),res.c) # need GenericTensorNetworks: is_set_packing end \ No newline at end of file diff --git a/test/networks/SpinGlass.jl b/test/networks/SpinGlass.jl index 4f087b16..7ea011a3 100644 --- a/test/networks/SpinGlass.jl +++ b/test/networks/SpinGlass.jl @@ -2,30 +2,28 @@ using GenericTensorNetworks, Test, Graphs @testset "memory estimation" begin g = smallgraph(:petersen) - ecliques = [[e.src, e.dst] for e in edges(g)] - cliques = [ecliques..., [[v] for v in vertices(g)]...] J = rand(15) h = randn(10) .* 0.5 - weights = [J..., h...] - gp = GenericTensorNetwork(SpinGlass(10, cliques, weights)) - cfg(x) = [(x>>i & 1) for i=0:9] - energies = [spinglass_energy(cliques, cfg(b); weights) for b=0:1<>i & 1) == 1 ? -1 : 1 for i=0:9] + energies = [energy(problem, cfg(b)) for b=0:1< collect, :n) ≈ sorted_energies[end-1:end] res = solve(gp, SingleConfigMax(2))[].orders |> collect @test getfield.(res, :n) ≈ sorted_energies[end-1:end] - @test spinglass_energy(cliques, res[1].c.data; weights) ≈ res[end-1].n - @test spinglass_energy(cliques, res[2].c.data; weights) ≈ res[end].n + cfg2(x) = 1 .- 2 .* Int.(x) + @test energy(problem, cfg2(res[1].c.data)) ≈ res[end-1].n + @test energy(problem, cfg2(res[2].c.data)) ≈ res[end].n val, ind = findmax(energies) - # integer weights - weights = UnitWeight() - gp = GenericTensorNetwork(SpinGlass(10, ecliques, weights)) - energies = [spinglass_energy(ecliques, cfg(b); weights) for b=0:1<>i & 1) for i=0:9] - energies = [spinglass_energy(g, cfg(b); J=J, h) for b=0:1<>i & 1) == 1 ? -1 : 1 for i=0:9] + energies = [energy(gp.problem, cfg(b)) for b=0:1< collect, :n) ≈ sorted_energies[end-1:end] res = solve(gp, SingleConfigMax(2))[].orders |> collect @test getfield.(res, :n) ≈ sorted_energies[end-1:end] - @test spinglass_energy(g, res[1].c.data; J, h) ≈ res[end-1].n - @test spinglass_energy(g, res[2].c.data; J, h) ≈ res[end].n + cfg2(x) = 1 .- 2 .* Int.(x) + @test energy(gp.problem, cfg2(res[1].c.data)) ≈ res[end-1].n + @test energy(gp.problem, cfg2(res[2].c.data)) ≈ res[end].n val, ind = findmax(energies) - # integer weights - J = UnitWeight() - h = ZeroWeight() + J = UnitWeight(ne(g)) + h = zeros(Int, nv(g)) gp = GenericTensorNetwork(SpinGlass(g, J, h)) - energies = [spinglass_energy(g, cfg(b); J=J, h) for b=0:1<