From c4152e325870527ebbae4132ea5374e5e3784bab Mon Sep 17 00:00:00 2001 From: Miles Date: Sun, 28 Apr 2024 18:57:04 -0400 Subject: [PATCH 01/17] Removed unused ArrElem type --- src/treetensornetworks/opsum_to_ttn.jl | 26 -------------------------- 1 file changed, 26 deletions(-) diff --git a/src/treetensornetworks/opsum_to_ttn.jl b/src/treetensornetworks/opsum_to_ttn.jl index c54d3b01..b7543da8 100644 --- a/src/treetensornetworks/opsum_to_ttn.jl +++ b/src/treetensornetworks/opsum_to_ttn.jl @@ -584,32 +584,6 @@ function ttn(eltype::Type{<:Number}, os, sites::IndsNetwork; kwargs...) return NDTensors.convert_scalartype(eltype, ttn(os, sites; kwargs...)) end -# -# Tree adaptation of functionalities in ITensors.jl/src/physics/autompo/matelem.jl -# - -################################# -# ArrElem (simple sparse array) # -################################# - -struct ArrElem{T,N} - idxs::MVector{N,Int} - val::T -end - -function Base.:(==)(a1::ArrElem{T,N}, a2::ArrElem{T,N})::Bool where {T,N} - return (a1.idxs == a2.idxs && a1.val == a2.val) -end - -function Base.isless(a1::ArrElem{T,N}, a2::ArrElem{T,N})::Bool where {T,N} - for n in 1:N - if a1.idxs[n] != a2.idxs[n] - return a1.idxs[n] < a2.idxs[n] - end - end - return a1.val < a2.val -end - ##################################### # QNArrElem (sparse array with QNs) # ##################################### From c513ebfeb776792640116abbc97ddd17d0dafa30 Mon Sep 17 00:00:00 2001 From: Miles Date: Wed, 1 May 2024 12:39:48 -0400 Subject: [PATCH 02/17] Refactor sorteachterm, passing lt comparison from outside --- src/treetensornetworks/opsum_to_ttn.jl | 28 ++++++++++++++------------ 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/src/treetensornetworks/opsum_to_ttn.jl b/src/treetensornetworks/opsum_to_ttn.jl index b7543da8..59d55818 100644 --- a/src/treetensornetworks/opsum_to_ttn.jl +++ b/src/treetensornetworks/opsum_to_ttn.jl @@ -8,8 +8,6 @@ using NamedGraphs.GraphsExtensions: GraphsExtensions, boundary_edges, degrees, is_leaf_vertex, vertex_path using StaticArrays: MVector -# convert ITensors.OpSum to TreeTensorNetwork - # # Utility methods # @@ -455,17 +453,10 @@ function _default_edge_ordering(g::AbstractGraph, root_vertex) return reverse(reverse.(post_order_dfs_edges(g, root_vertex))) end -# This is almost an exact copy of ITensors/src/opsum_to_mpo_generic:sorteachterm except for the site ordering being -# given via find_index_in_tree -# changed `isless_site` to use tree vertex ordering relative to root -function sorteachterm(os::OpSum, sites::IndsNetwork{V,<:Index}, root_vertex::V) where {V} +# This is almost an exact copy of ITensors/src/opsum_to_mpo_generic:sorteachterm +function sorteachterm(os::OpSum, sites; lt=isless) os = copy(os) - # linear ordering of vertices in tree graph relative to chosen root, chosen outward from root - ordering = _default_vertex_ordering(sites, root_vertex) - site_positions = Dict(zip(ordering, 1:length(ordering))) - findpos(op::Op) = site_positions[ITensors.site(op)] - isless_site(o1::Op, o2::Op) = findpos(o1) < findpos(o2) N = nv(sites) for n in eachindex(os) t = os[n] @@ -484,10 +475,12 @@ function sorteachterm(os::OpSum, sites::IndsNetwork{V,<:Index}, root_vertex::V) # Sort operators in t by site order, # and keep the permutation used, perm, for analysis below perm = Vector{Int}(undef, Nt) - sortperm!(perm, ITensors.terms(t); alg=InsertionSort, lt=isless_site) + sortperm!(perm, ITensors.terms(t); alg=InsertionSort, lt) t = coefficient(t) * Prod(ITensors.terms(t)[perm]) + # Everything below deals with fermionic operators: + # Identify fermionic operators, # zeroing perm for bosonic operators, # and inserting string "F" operators @@ -545,7 +538,16 @@ function ttn( is_leaf_vertex(sites, root_vertex) || error("Tree root must be a leaf vertex.") os = deepcopy(os) - os = sorteachterm(os, sites, root_vertex) + + # Build the isless_site function to pass to sorteachterm: + # * ordering = array of vertices ordered relative to chosen root, chosen outward from root + # * site_positions = map from vertex to where it is in ordering (inverse map of `ordering`) + ordering = _default_vertex_ordering(sites, root_vertex) + site_positions = Dict(zip(ordering, 1:length(ordering))) + findpos(op::Op) = site_positions[ITensors.site(op)] + isless_site(o1::Op, o2::Op) = findpos(o1) < findpos(o2) + + os = sorteachterm(os, sites; lt=isless_site) os = ITensorMPS.sortmergeterms(os) return ttn_svd(os, sites, root_vertex; kwargs...) end From 587e395eabe81d7ca8086c419b04527193a0d70a Mon Sep 17 00:00:00 2001 From: Miles Date: Thu, 2 May 2024 17:23:48 -0400 Subject: [PATCH 03/17] Minor code improvements --- src/treetensornetworks/opsum_to_ttn.jl | 113 ++++++++++++------------- 1 file changed, 54 insertions(+), 59 deletions(-) diff --git a/src/treetensornetworks/opsum_to_ttn.jl b/src/treetensornetworks/opsum_to_ttn.jl index 59d55818..3895c9bd 100644 --- a/src/treetensornetworks/opsum_to_ttn.jl +++ b/src/treetensornetworks/opsum_to_ttn.jl @@ -62,12 +62,12 @@ function ttn_svd( vs = _default_vertex_ordering(sites, root_vertex) # TODO: Add check in ttn_svd that the ordering matches that of find_index_in_tree, which is used in sorteachterm #fermion-sign! # store edges in fixed ordering relative to root - es = _default_edge_ordering(sites, root_vertex) + ordered_edges = _default_edge_ordering(sites, root_vertex) # some things to keep track of # rank of every TTN tensor in network degrees = Dict(v => degree(sites, v) for v in vs) # link isometries for SVD compression of TTN - Vs = Dict(e => Dict{QN,Matrix{coefficient_type}}() for e in es) + Vs = Dict(e => Dict{QN,Matrix{coefficient_type}}() for e in ordered_edges) # map from term in Hamiltonian to incoming channel index for every edge inmaps = Dict{Pair{edgetype_sites,QN},Dict{Vector{Op},Int}}() # map from term in Hamiltonian to outgoing channel index for every edge @@ -93,6 +93,7 @@ function ttn_svd( end Hflux = -calc_qn(terms(first(terms(os)))) + # insert dummy indices on internal vertices, these will not show up in the final tensor is_internal = Dict{vertextype_sites,Bool}() for v in vs @@ -103,27 +104,29 @@ function ttn_svd( sites[v] = [Index(Hflux => 1)] end end - # bond coefficients for incoming edge channels + + # Bond coefficients for incoming edge channels. + # These become the "M" coefficient matrices that get SVD'd. inbond_coefs = Dict( - e => Dict{QN,Vector{ITensorMPS.MatElem{coefficient_type}}}() for e in es + e => Dict{QN,Vector{ITensorMPS.MatElem{coefficient_type}}}() for e in ordered_edges ) # list of terms for which the coefficient has been added to a site factor site_coef_done = Prod{Op}[] - # temporary symbolic representation of TTN Hamiltonian + # Temporary symbolic representation of TTN Hamiltonian tempTTN = Dict(v => QNArrElem{Scaled{coefficient_type,Prod{Op}},degrees[v]}[] for v in vs) - # build compressed finite state machine representation + # Build compressed finite state machine representation (tempTTN) for v in vs - # for every vertex, find all edges that contain this vertex - edges = align_and_reorder_edges(incident_edges(sites, v), es) + # For every vertex, find all edges that contain this vertex + edges = align_and_reorder_edges(incident_edges(sites, v), ordered_edges) - # use the corresponding ordering as index order for tensor elements at this site + # Use the corresponding ordering as index order for tensor elements at this site dim_in = findfirst(e -> dst(e) == v, edges) edge_in = (isnothing(dim_in) ? [] : edges[dim_in]) dims_out = findall(e -> src(e) == v, edges) edges_out = edges[dims_out] - # for every site w except v, determine the incident edge to v that lies + # For every site w except v, determine the incident edge to v that lies # in the edge_path(w,v) subgraphs = split_at_vertex(sites, v) _boundary_edges = align_edges( @@ -142,7 +145,6 @@ function ttn_svd( for term in os # loop over OpSum and pick out terms that act on current vertex - factors = ITensors.terms(term) if v in ITensors.site.(factors) crosses_vertex = true @@ -253,15 +255,9 @@ function ttn_svd( # compress this tempTTN representation into dense form link_space = Dict{edgetype_sites,Index}() - for e in es - qi = Vector{Pair{QN,Int}}() - push!(qi, QN() => 1) - for (q, Vq) in Vs[e] - cols = size(Vq, 2) - push!(qi, q => cols) - end - push!(qi, Hflux => 1) - link_space[e] = Index(qi...; tags=edge_tag(e), dir=linkdir_ref) + for e in ordered_edges + operator_blocks = [q=>size(Vq,2) for (q,Vq) in Vs[e]] + link_space[e] = Index(QN()=>1,operator_blocks...,Hflux=>1; tags=edge_tag(e), dir=linkdir_ref) end H = ttn(sites0) # initialize TTN without the dummy indices added @@ -276,7 +272,7 @@ function ttn_svd( for v in vs # redo the whole thing like before # TODO: use neighborhood instead of going through all edges, see above - edges = align_and_reorder_edges(incident_edges(sites, v), es) + edges = align_and_reorder_edges(incident_edges(sites, v), ordered_edges) dim_in = findfirst(e -> dst(e) == v, edges) dims_out = findall(e -> src(e) == v, edges) # slice isometries at this vertex @@ -336,10 +332,9 @@ function ttn_svd( H[v] = ITensor() - # make the final arrow directions - _linkinds = copy(linkinds) + # Set the final arrow directions if !isnothing(dim_in) - _linkinds[dim_in] = dag(_linkinds[dim_in]) + linkinds[dim_in] = dag(linkinds[dim_in]) end for ((b, q_op), m) in blocks @@ -361,7 +356,7 @@ function ttn_svd( end end end - T = ITensors.BlockSparseTensor(coefficient_type, [b], _linkinds) + T = ITensors.BlockSparseTensor(coefficient_type, [b], linkinds) T[b] .= m iT = itensor(T) if !thishasqns @@ -401,7 +396,8 @@ function ttn_svd( idT_end_inds[dout] = linkdims[dout] end end - T = itensor(idT, _linkinds) + + T = itensor(idT, linkinds) if !thishasqns T = removeqns(T) end @@ -453,30 +449,37 @@ function _default_edge_ordering(g::AbstractGraph, root_vertex) return reverse(reverse.(post_order_dfs_edges(g, root_vertex))) end -# This is almost an exact copy of ITensors/src/opsum_to_mpo_generic:sorteachterm -function sorteachterm(os::OpSum, sites; lt=isless) - os = copy(os) - - N = nv(sites) - for n in eachindex(os) - t = os[n] - Nt = length(t) - +function check_terms_support(os::OpSum, sites) + for t in os if !all(map(v -> has_vertex(sites, v), ITensors.sites(t))) error( "The OpSum contains a term $t that does not have support on the underlying graph." ) end + end +end - prevsite = N + 1 #keep track of whether we are switching - #to a new site to make sure F string - #is only placed at most once for each site +# This code is very similar to ITensorMPS sorteachterm in opsum_generic.jl +function sorteachterm(os::OpSum, sites, root_vertex) + os = copy(os) + + # Build the isless_site function to pass to sortperm below: + # + ordering = array of vertices ordered relative to chosen root, chosen outward from root + # + site_positions = map from vertex to where it is in ordering (inverse map of `ordering`) + ordering = _default_vertex_ordering(sites, root_vertex) + site_positions = Dict(zip(ordering, 1:length(ordering))) + findpos(op::Op) = site_positions[ITensors.site(op)] + isless_site(o1::Op, o2::Op) = findpos(o1) < findpos(o2) + + N = nv(sites) + for j in eachindex(os) + t = os[j] # Sort operators in t by site order, # and keep the permutation used, perm, for analysis below - perm = Vector{Int}(undef, Nt) - sortperm!(perm, ITensors.terms(t); alg=InsertionSort, lt) - + Nt = length(t) + #perm = Vector{Int}(undef, Nt) + perm = sortperm(ITensors.terms(t); alg=InsertionSort, lt=isless_site) t = coefficient(t) * Prod(ITensors.terms(t)[perm]) # Everything below deals with fermionic operators: @@ -484,13 +487,14 @@ function sorteachterm(os::OpSum, sites; lt=isless) # Identify fermionic operators, # zeroing perm for bosonic operators, # and inserting string "F" operators - parity = +1 - for n in Nt:-1:1 + prevsite = typemax(Int) #keep track of whether we are switching to a new site + t_parity = +1 + for n in reverse(1:Nt) currsite = ITensors.site(t[n]) fermionic = has_fermion_string( ITensors.which_op(t[n]), only(sites[ITensors.site(t[n])]) ) - if !ITensors.using_auto_fermion() && (parity == -1) && (currsite < prevsite) + if !ITensors.using_auto_fermion() && (t_parity == -1) && (currsite < prevsite) error("No verified fermion support for automatic TTN constructor!") # no verified support, just throw error # Put local piece of Jordan-Wigner string emanating # from fermionic operators to the right @@ -500,14 +504,14 @@ function sorteachterm(os::OpSum, sites; lt=isless) prevsite = currsite if fermionic - parity = -parity + t_parity = -t_parity else # Ignore bosonic operators in perm # by zeroing corresponding entries perm[n] = 0 end end - if parity == -1 + if t_parity == -1 error("Parity-odd fermionic terms not yet supported by AutoTTN") end @@ -516,7 +520,7 @@ function sorteachterm(os::OpSum, sites; lt=isless) # and account for anti-commuting, fermionic operators # during above sort; put resulting sign into coef t *= ITensors.parity_sign(perm) - ITensors.terms(os)[n] = t + ITensors.terms(os)[j] = t end return os end @@ -536,18 +540,9 @@ function ttn( length(ITensors.terms(os)) == 0 && error("OpSum has no terms") is_tree(sites) || error("Site index graph must be a tree.") is_leaf_vertex(sites, root_vertex) || error("Tree root must be a leaf vertex.") - - os = deepcopy(os) - - # Build the isless_site function to pass to sorteachterm: - # * ordering = array of vertices ordered relative to chosen root, chosen outward from root - # * site_positions = map from vertex to where it is in ordering (inverse map of `ordering`) - ordering = _default_vertex_ordering(sites, root_vertex) - site_positions = Dict(zip(ordering, 1:length(ordering))) - findpos(op::Op) = site_positions[ITensors.site(op)] - isless_site(o1::Op, o2::Op) = findpos(o1) < findpos(o2) - - os = sorteachterm(os, sites; lt=isless_site) + check_terms_support(os,sites) + os = deepcopy(os) #TODO: do we need this? sorteachterm copies `os` again + os = sorteachterm(os, sites, root_vertex) os = ITensorMPS.sortmergeterms(os) return ttn_svd(os, sites, root_vertex; kwargs...) end From 9c94662640323a644b39b1d7239f42905fc88e45 Mon Sep 17 00:00:00 2001 From: Miles Date: Thu, 2 May 2024 17:57:46 -0400 Subject: [PATCH 04/17] Use more ITensors.Ops functions and rename some vars --- src/treetensornetworks/opsum_to_ttn.jl | 66 +++++++++++++------------- 1 file changed, 34 insertions(+), 32 deletions(-) diff --git a/src/treetensornetworks/opsum_to_ttn.jl b/src/treetensornetworks/opsum_to_ttn.jl index 3895c9bd..9921b48e 100644 --- a/src/treetensornetworks/opsum_to_ttn.jl +++ b/src/treetensornetworks/opsum_to_ttn.jl @@ -3,7 +3,7 @@ using ITensors: flux, has_fermion_string, itensor, ops, removeqns, space, val using ITensors.ITensorMPS: ITensorMPS, cutoff, linkdims, truncate! using ITensors.LazyApply: Prod, Sum, coefficient using ITensors.NDTensors: Block, blockdim, maxdim, nblocks, nnzblocks -using ITensors.Ops: Op, OpSum +using ITensors.Ops: argument, coefficient, Op, OpSum, name, params, site, terms, which_op using NamedGraphs.GraphsExtensions: GraphsExtensions, boundary_edges, degrees, is_leaf_vertex, vertex_path using StaticArrays: MVector @@ -60,6 +60,7 @@ function ttn_svd( # traverse tree outwards from root vertex vs = _default_vertex_ordering(sites, root_vertex) + vert_number = Dict(zip(vs, 1:length(vs))) # TODO: Add check in ttn_svd that the ordering matches that of find_index_in_tree, which is used in sorteachterm #fermion-sign! # store edges in fixed ordering relative to root ordered_edges = _default_edge_ordering(sites, root_vertex) @@ -144,37 +145,38 @@ function ttn_svd( (isempty(dims_out) || isnothing(dim_in)) && @assert is_leaf_vertex(sites, v) for term in os - # loop over OpSum and pick out terms that act on current vertex - factors = ITensors.terms(term) - if v in ITensors.site.(factors) + # Loop over OpSum and pick out terms that act on current vertex + ops = ITensors.terms(term) + if v in ITensors.site.(ops) crosses_vertex = true else crosses_vertex = !isone( - length(Set([which_incident_edge[site] for site in ITensors.site.(factors)])) + length(Set([which_incident_edge[site] for site in ITensors.site.(ops)])) ) end - #if term doesn't cross vertex, skip it + # If term doesn't cross vertex, skip it crosses_vertex || continue + # filter out factor that acts on current vertex - onsite = filter(t -> (ITensors.site(t) == v), factors) - not_onsite_factors = setdiff(factors, onsite) + onsite = filter(t -> (ITensors.site(t) == v), ops) + not_onsite_ops = setdiff(ops, onsite) - # filter out factors that come in from the direction of the incoming edge + # filter out ops that come in from the direction of the incoming edge incoming = filter( - t -> which_incident_edge[ITensors.site(t)] == edge_in, not_onsite_factors + t -> which_incident_edge[ITensors.site(t)] == edge_in, not_onsite_ops ) - # also store all non-incoming factors in standard order, used for channel merging + # also store all non-incoming ops in standard order, used for channel merging not_incoming = filter( t -> (ITensors.site(t) == v) || which_incident_edge[ITensors.site(t)] != edge_in, - factors, + ops, ) - # for every outgoing edge, filter out factors that go out along that edge + # for every outgoing edge, filter out ops that go out along that edge outgoing = Dict( - e => filter(t -> which_incident_edge[ITensors.site(t)] == e, not_onsite_factors) for + e => filter(t -> which_incident_edge[ITensors.site(t)] == e, not_onsite_ops) for e in edges_out ) @@ -198,7 +200,7 @@ function ttn_svd( bond_row = ITensorMPS.posInLink!(cinmap, incoming) bond_col = ITensorMPS.posInLink!(coutmap, not_incoming) # get incoming channel - bond_coef = convert(coefficient_type, ITensors.coefficient(term)) + bond_coef = convert(coefficient_type, coefficient(term)) q_inbond_coefs = get!( inbond_coefs[edge_in], incoming_qn, ITensorMPS.MatElem{coefficient_type}[] ) @@ -217,11 +219,11 @@ function ttn_svd( # if term starts at this site, add its coefficient as a site factor site_coef = one(coefficient_type) if (isnothing(dim_in) || T_inds[dim_in] == -1) && - ITensors.argument(term) ∉ site_coef_done - site_coef = ITensors.coefficient(term) - # required since ITensors.coefficient seems to return ComplexF64 even if coefficient_type is determined to be real + argument(term) ∉ site_coef_done + site_coef = coefficient(term) + # required since coefficient seems to return ComplexF64 even if coefficient_type is determined to be real site_coef = convert(coefficient_type, site_coef) - push!(site_coef_done, ITensors.argument(term)) + push!(site_coef_done, argument(term)) end # add onsite identity for interactions passing through vertex if isempty(onsite) @@ -422,7 +424,7 @@ end function isfermionic(t::Vector{Op}, sites::IndsNetwork{V,<:Index}) where {V} p = +1 for op in t - if has_fermion_string(ITensors.name(op), only(sites[ITensors.site(op)])) + if has_fermion_string(name(op), only(sites[site(op)])) p *= -1 end end @@ -431,11 +433,11 @@ end # only(site(ops[1])) in ITensors breaks for Tuple site labels, had to drop the only function computeSiteProd(sites::IndsNetwork{V,<:Index}, ops::Prod{Op})::ITensor where {V} - v = ITensors.site(ops[1]) - T = op(sites[v], ITensors.which_op(ops[1]); ITensors.params(ops[1])...) + v = site(ops[1]) + T = op(sites[v], which_op(ops[1]); params(ops[1])...) for j in 2:length(ops) - (ITensors.site(ops[j]) != v) && error("Mismatch of vertex labels in computeSiteProd") - opj = op(sites[v], ITensors.which_op(ops[j]); ITensors.params(ops[j])...) + (site(ops[j]) != v) && error("Mismatch of vertex labels in computeSiteProd") + opj = op(sites[v], which_op(ops[j]); params(ops[j])...) T = product(T, opj) end return T @@ -468,7 +470,7 @@ function sorteachterm(os::OpSum, sites, root_vertex) # + site_positions = map from vertex to where it is in ordering (inverse map of `ordering`) ordering = _default_vertex_ordering(sites, root_vertex) site_positions = Dict(zip(ordering, 1:length(ordering))) - findpos(op::Op) = site_positions[ITensors.site(op)] + findpos(op::Op) = site_positions[site(op)] isless_site(o1::Op, o2::Op) = findpos(o1) < findpos(o2) N = nv(sites) @@ -479,8 +481,8 @@ function sorteachterm(os::OpSum, sites, root_vertex) # and keep the permutation used, perm, for analysis below Nt = length(t) #perm = Vector{Int}(undef, Nt) - perm = sortperm(ITensors.terms(t); alg=InsertionSort, lt=isless_site) - t = coefficient(t) * Prod(ITensors.terms(t)[perm]) + perm = sortperm(terms(t); alg=InsertionSort, lt=isless_site) + t = coefficient(t) * Prod(terms(t)[perm]) # Everything below deals with fermionic operators: @@ -490,16 +492,16 @@ function sorteachterm(os::OpSum, sites, root_vertex) prevsite = typemax(Int) #keep track of whether we are switching to a new site t_parity = +1 for n in reverse(1:Nt) - currsite = ITensors.site(t[n]) + currsite = site(t[n]) fermionic = has_fermion_string( - ITensors.which_op(t[n]), only(sites[ITensors.site(t[n])]) + which_op(t[n]), only(sites[site(t[n])]) ) if !ITensors.using_auto_fermion() && (t_parity == -1) && (currsite < prevsite) error("No verified fermion support for automatic TTN constructor!") # no verified support, just throw error # Put local piece of Jordan-Wigner string emanating # from fermionic operators to the right # (Remaining F operators will be put in by svdMPO) - terms(t)[n] = Op("$(ITensors.which_op(t[n])) * F", only(ITensors.site(t[n]))) + terms(t)[n] = Op("$(which_op(t[n])) * F", only(site(t[n]))) end prevsite = currsite @@ -520,7 +522,7 @@ function sorteachterm(os::OpSum, sites, root_vertex) # and account for anti-commuting, fermionic operators # during above sort; put resulting sign into coef t *= ITensors.parity_sign(perm) - ITensors.terms(os)[j] = t + terms(os)[j] = t end return os end @@ -537,7 +539,7 @@ function ttn( root_vertex=GraphsExtensions.default_root_vertex(sites), kwargs..., ) - length(ITensors.terms(os)) == 0 && error("OpSum has no terms") + length(terms(os)) == 0 && error("OpSum has no terms") is_tree(sites) || error("Site index graph must be a tree.") is_leaf_vertex(sites, root_vertex) || error("Tree root must be a leaf vertex.") check_terms_support(os,sites) From d3ef14b5b8f55488d9056f74086c8d0f84942f7e Mon Sep 17 00:00:00 2001 From: Miles Date: Thu, 2 May 2024 18:18:45 -0400 Subject: [PATCH 05/17] Split sparse TTN construction into separate function. Remove some unneeded data structures. --- src/treetensornetworks/opsum_to_ttn.jl | 101 +++++++++++++++---------- 1 file changed, 59 insertions(+), 42 deletions(-) diff --git a/src/treetensornetworks/opsum_to_ttn.jl b/src/treetensornetworks/opsum_to_ttn.jl index 9921b48e..c1aa417f 100644 --- a/src/treetensornetworks/opsum_to_ttn.jl +++ b/src/treetensornetworks/opsum_to_ttn.jl @@ -42,40 +42,32 @@ function ttn_svd(os::OpSum, sites::IndsNetwork, root_vertex; kwargs...) return ttn_svd(coefficient_type, os, sites, root_vertex; kwargs...) end -function ttn_svd( - coefficient_type::Type{<:Number}, +# +# TODO: +# - remove is_internal from returns? maybe make it inside ttn_svd beforehand +# - remove Hflux from returns along with is_internal +# - split SVD'ing of "M"'s into Vs into a separate function +# +function make_sparse_ttn( os::OpSum, - sites0::IndsNetwork, - root_vertex; + sites::IndsNetwork, + root_vertex, + coefficient_type; mindim::Int=1, maxdim::Int=typemax(Int), cutoff=eps(real(coefficient_type)) * 10, ) - linkdir_ref = ITensors.In # safe to always use autofermion default here - - sites = copy(sites0) # copy because of modification to handle internal indices edgetype_sites = edgetype(sites) vertextype_sites = vertextype(sites) - thishasqns = any(v -> hasqns(sites[v]), vertices(sites)) - # traverse tree outwards from root vertex vs = _default_vertex_ordering(sites, root_vertex) - vert_number = Dict(zip(vs, 1:length(vs))) - # TODO: Add check in ttn_svd that the ordering matches that of find_index_in_tree, which is used in sorteachterm #fermion-sign! - # store edges in fixed ordering relative to root ordered_edges = _default_edge_ordering(sites, root_vertex) - # some things to keep track of - # rank of every TTN tensor in network - degrees = Dict(v => degree(sites, v) for v in vs) - # link isometries for SVD compression of TTN - Vs = Dict(e => Dict{QN,Matrix{coefficient_type}}() for e in ordered_edges) - # map from term in Hamiltonian to incoming channel index for every edge inmaps = Dict{Pair{edgetype_sites,QN},Dict{Vector{Op},Int}}() - # map from term in Hamiltonian to outgoing channel index for every edge outmaps = Dict{Pair{edgetype_sites,QN},Dict{Vector{Op},Int}}() - op_cache = Dict{Pair{String,vertextype_sites},ITensor}() + Vs = Dict(e => Dict{QN,Matrix{coefficient_type}}() for e in ordered_edges) + op_cache = Dict{Pair{String,vertextype_sites},ITensor}() function calc_qn(term::Vector{Op}) q = QN() for st in term @@ -111,13 +103,18 @@ function ttn_svd( inbond_coefs = Dict( e => Dict{QN,Vector{ITensorMPS.MatElem{coefficient_type}}}() for e in ordered_edges ) - # list of terms for which the coefficient has been added to a site factor + + # List of terms for which the coefficient has been added to a site factor site_coef_done = Prod{Op}[] + # Temporary symbolic representation of TTN Hamiltonian - tempTTN = Dict(v => QNArrElem{Scaled{coefficient_type,Prod{Op}},degrees[v]}[] for v in vs) + tempTTN = Dict( + v => QNArrElem{Scaled{coefficient_type,Prod{Op}},degree(sites, v)}[] for v in vs + ) # Build compressed finite state machine representation (tempTTN) for v in vs + v_degree = degree(sites, v) # For every vertex, find all edges that contain this vertex edges = align_and_reorder_edges(incident_edges(sites, v), ordered_edges) @@ -151,14 +148,11 @@ function ttn_svd( crosses_vertex = true else crosses_vertex = - !isone( - length(Set([which_incident_edge[site] for site in ITensors.site.(ops)])) - ) + !isone(length(Set([which_incident_edge[site] for site in ITensors.site.(ops)]))) end # If term doesn't cross vertex, skip it crosses_vertex || continue - # filter out factor that acts on current vertex onsite = filter(t -> (ITensors.site(t) == v), ops) not_onsite_ops = setdiff(ops, onsite) @@ -187,8 +181,8 @@ function ttn_svd( site_qn = calc_qn(onsite) # initialize QNArrayElement indices and quantum numbers - T_inds = MVector{degrees[v]}(fill(-1, degrees[v])) - T_qns = MVector{degrees[v]}(fill(QN(), degrees[v])) + T_inds = MVector{v_degree}(fill(-1, v_degree)) + T_qns = MVector{v_degree}(fill(QN(), v_degree)) # initialize ArrayElement indices for inbond_coefs bond_row = -1 bond_col = -1 @@ -218,8 +212,7 @@ function ttn_svd( end # if term starts at this site, add its coefficient as a site factor site_coef = one(coefficient_type) - if (isnothing(dim_in) || T_inds[dim_in] == -1) && - argument(term) ∉ site_coef_done + if (isnothing(dim_in) || T_inds[dim_in] == -1) && argument(term) ∉ site_coef_done site_coef = coefficient(term) # required since coefficient seems to return ComplexF64 even if coefficient_type is determined to be real site_coef = convert(coefficient_type, site_coef) @@ -254,12 +247,37 @@ function ttn_svd( end end + return tempTTN, Vs, Hflux, is_internal +end + +function ttn_svd( + coefficient_type::Type{<:Number}, os::OpSum, sites0::IndsNetwork, root_vertex; kws... +) + linkdir_ref = ITensors.In # safe to always use autofermion default here + + sites = copy(sites0) # copy because of modification to handle internal indices + edgetype_sites = edgetype(sites) + thishasqns = any(v -> hasqns(sites[v]), vertices(sites)) + + # traverse tree outwards from root vertex + vs = _default_vertex_ordering(sites, root_vertex) + #vert_number = Dict(zip(vs, 1:length(vs))) + # TODO: Add check in ttn_svd that the ordering matches that of find_index_in_tree, which is used in sorteachterm #fermion-sign! + # store edges in fixed ordering relative to root + ordered_edges = _default_edge_ordering(sites, root_vertex) + + tempTTN, Vs, Hflux, is_internal = make_sparse_ttn( + os, sites, root_vertex, coefficient_type; kws... + ) + # compress this tempTTN representation into dense form link_space = Dict{edgetype_sites,Index}() for e in ordered_edges - operator_blocks = [q=>size(Vq,2) for (q,Vq) in Vs[e]] - link_space[e] = Index(QN()=>1,operator_blocks...,Hflux=>1; tags=edge_tag(e), dir=linkdir_ref) + operator_blocks = [q => size(Vq, 2) for (q, Vq) in Vs[e]] + link_space[e] = Index( + QN() => 1, operator_blocks..., Hflux => 1; tags=edge_tag(e), dir=linkdir_ref + ) end H = ttn(sites0) # initialize TTN without the dummy indices added @@ -272,6 +290,7 @@ function ttn_svd( qnblockdim(i::Index, q::QN) = blockdim(i, qnblock(i, q)) for v in vs + v_degree = degree(sites, v) # redo the whole thing like before # TODO: use neighborhood instead of going through all edges, see above edges = align_and_reorder_edges(incident_edges(sites, v), ordered_edges) @@ -282,20 +301,20 @@ function ttn_svd( linkinds = [link_space[e] for e in edges] # construct blocks - blocks = Dict{Tuple{Block{degrees[v]},Vector{Op}},Array{coefficient_type,degrees[v]}}() + blocks = Dict{Tuple{Block{v_degree},Vector{Op}},Array{coefficient_type,v_degree}}() for el in tempTTN[v] t = el.val (abs(coefficient(t)) > eps(real(coefficient_type))) || continue - block_helper_inds = fill(-1, degrees[v]) # we manipulate T_inds later, and loose track of ending/starting information, so keep track of it here + block_helper_inds = fill(-1, v_degree) # we manipulate T_inds later, and loose track of ending/starting information, so keep track of it here T_inds = el.idxs T_qns = el.qn_idxs ct = convert(coefficient_type, coefficient(t)) sublinkdims = [ - (T_inds[i] == -1 ? 1 : qnblockdim(linkinds[i], T_qns[i])) for i in 1:degrees[v] + (T_inds[i] == -1 ? 1 : qnblockdim(linkinds[i], T_qns[i])) for i in 1:v_degree ] zero_arr() = zeros(coefficient_type, sublinkdims...) - terminal_dims = findall(d -> T_inds[d] == -1, 1:degrees[v]) # directions in which term starts or ends - normal_dims = findall(d -> T_inds[d] ≠ -1, 1:degrees[v]) # normal dimensions, do truncation thingies + terminal_dims = findall(d -> T_inds[d] == -1, 1:v_degree) # directions in which term starts or ends + normal_dims = findall(d -> T_inds[d] ≠ -1, 1:v_degree) # normal dimensions, do truncation thingies T_inds[terminal_dims] .= 1 # start in channel 1 ###?? block_helper_inds[terminal_dims] .= 1 for dout in filter(d -> d ∈ terminal_dims, dims_out) @@ -381,7 +400,7 @@ function ttn_svd( idT = zeros(coefficient_type, linkdims...) if isnothing(dim_in) # only one real starting identity - idT[ones(Int, degrees[v])...] = 1.0 + idT[ones(Int, v_degree)...] = 1.0 end # ending identities are a little more involved if !isnothing(dim_in) @@ -493,9 +512,7 @@ function sorteachterm(os::OpSum, sites, root_vertex) t_parity = +1 for n in reverse(1:Nt) currsite = site(t[n]) - fermionic = has_fermion_string( - which_op(t[n]), only(sites[site(t[n])]) - ) + fermionic = has_fermion_string(which_op(t[n]), only(sites[site(t[n])])) if !ITensors.using_auto_fermion() && (t_parity == -1) && (currsite < prevsite) error("No verified fermion support for automatic TTN constructor!") # no verified support, just throw error # Put local piece of Jordan-Wigner string emanating @@ -542,7 +559,7 @@ function ttn( length(terms(os)) == 0 && error("OpSum has no terms") is_tree(sites) || error("Site index graph must be a tree.") is_leaf_vertex(sites, root_vertex) || error("Tree root must be a leaf vertex.") - check_terms_support(os,sites) + check_terms_support(os, sites) os = deepcopy(os) #TODO: do we need this? sorteachterm copies `os` again os = sorteachterm(os, sites, root_vertex) os = ITensorMPS.sortmergeterms(os) From 5212239531fd328eeada7f74eedff70bc79ec0d7 Mon Sep 17 00:00:00 2001 From: Miles Date: Thu, 2 May 2024 21:15:01 -0400 Subject: [PATCH 06/17] Refactor last two part of ttn_svd into svd_bond_coefs and compress_ttn functions --- src/treetensornetworks/opsum_to_ttn.jl | 138 ++++++++++++++++--------- 1 file changed, 87 insertions(+), 51 deletions(-) diff --git a/src/treetensornetworks/opsum_to_ttn.jl b/src/treetensornetworks/opsum_to_ttn.jl index c1aa417f..aa0787e8 100644 --- a/src/treetensornetworks/opsum_to_ttn.jl +++ b/src/treetensornetworks/opsum_to_ttn.jl @@ -49,24 +49,17 @@ end # - split SVD'ing of "M"'s into Vs into a separate function # function make_sparse_ttn( - os::OpSum, - sites::IndsNetwork, - root_vertex, - coefficient_type; - mindim::Int=1, - maxdim::Int=typemax(Int), - cutoff=eps(real(coefficient_type)) * 10, + os::OpSum, sites::IndsNetwork, root_vertex, coefficient_type, calc_qn ) edgetype_sites = edgetype(sites) vertextype_sites = vertextype(sites) - vs = _default_vertex_ordering(sites, root_vertex) + ordered_verts = _default_vertex_ordering(sites, root_vertex) ordered_edges = _default_edge_ordering(sites, root_vertex) inmaps = Dict{Pair{edgetype_sites,QN},Dict{Vector{Op},Int}}() outmaps = Dict{Pair{edgetype_sites,QN},Dict{Vector{Op},Int}}() - Vs = Dict(e => Dict{QN,Matrix{coefficient_type}}() for e in ordered_edges) - + #= op_cache = Dict{Pair{String,vertextype_sites},ITensor}() function calc_qn(term::Vector{Op}) q = QN() @@ -84,19 +77,7 @@ function make_sparse_ttn( end return q end - - Hflux = -calc_qn(terms(first(terms(os)))) - - # insert dummy indices on internal vertices, these will not show up in the final tensor - is_internal = Dict{vertextype_sites,Bool}() - for v in vs - is_internal[v] = isempty(sites[v]) - if isempty(sites[v]) - # FIXME: This logic only works for trivial flux, breaks for nonzero flux - # TODO: add assert or fix and add test! - sites[v] = [Index(Hflux => 1)] - end - end + =# # Bond coefficients for incoming edge channels. # These become the "M" coefficient matrices that get SVD'd. @@ -109,11 +90,12 @@ function make_sparse_ttn( # Temporary symbolic representation of TTN Hamiltonian tempTTN = Dict( - v => QNArrElem{Scaled{coefficient_type,Prod{Op}},degree(sites, v)}[] for v in vs + v => QNArrElem{Scaled{coefficient_type,Prod{Op}},degree(sites, v)}[] for + v in ordered_verts ) # Build compressed finite state machine representation (tempTTN) - for v in vs + for v in ordered_verts v_degree = degree(sites, v) # For every vertex, find all edges that contain this vertex edges = align_and_reorder_edges(incident_edges(sites, v), ordered_edges) @@ -231,47 +213,55 @@ function make_sparse_ttn( push!(tempTTN[v], el) end - ITensorMPS.remove_dups!(tempTTN[v]) - # manual truncation: isometry on incoming edge + end + + return tempTTN, inbond_coefs +end + +function svd_bond_coefs( + coefficient_type, sites, ordered_verts, ordered_edges, inbond_coefs; kws... +) + Vs = Dict(e => Dict{QN,Matrix{coefficient_type}}() for e in ordered_edges) + for v in ordered_verts + edges = align_and_reorder_edges(incident_edges(sites, v), ordered_edges) + dim_in = findfirst(e -> dst(e) == v, edges) if !isnothing(dim_in) && !isempty(inbond_coefs[edges[dim_in]]) for (q, mat) in inbond_coefs[edges[dim_in]] M = ITensorMPS.toMatrix(mat) U, S, V = svd(M) P = S .^ 2 - truncate!(P; maxdim, cutoff, mindim) + truncate!(P; kws...) tdim = length(P) nc = size(M, 2) Vs[edges[dim_in]][q] = Matrix{coefficient_type}(V[1:nc, 1:tdim]) end end end - - return tempTTN, Vs, Hflux, is_internal + return Vs end -function ttn_svd( - coefficient_type::Type{<:Number}, os::OpSum, sites0::IndsNetwork, root_vertex; kws... +function compress_ttn( + coefficient_type, sites0, ordered_verts, ordered_edges, Hflux, tempTTN, Vs ) - linkdir_ref = ITensors.In # safe to always use autofermion default here + # Insert dummy indices on internal vertices, these will not show up in the final tensor + # TODO: come up with a better solution for this + sites = copy(sites0) + is_internal = Dict{vertextype(sites),Bool}() + for v in ordered_verts + is_internal[v] = isempty(sites[v]) + if isempty(sites[v]) + # FIXME: This logic only works for trivial flux, breaks for nonzero flux + # TODO: add assert or fix and add test! + sites[v] = [Index(Hflux => 1)] + end + end - sites = copy(sites0) # copy because of modification to handle internal indices + linkdir_ref = ITensors.In # safe to always use autofermion default here + # Compress this tempTTN representation into dense form edgetype_sites = edgetype(sites) thishasqns = any(v -> hasqns(sites[v]), vertices(sites)) - # traverse tree outwards from root vertex - vs = _default_vertex_ordering(sites, root_vertex) - #vert_number = Dict(zip(vs, 1:length(vs))) - # TODO: Add check in ttn_svd that the ordering matches that of find_index_in_tree, which is used in sorteachterm #fermion-sign! - # store edges in fixed ordering relative to root - ordered_edges = _default_edge_ordering(sites, root_vertex) - - tempTTN, Vs, Hflux, is_internal = make_sparse_ttn( - os, sites, root_vertex, coefficient_type; kws... - ) - - # compress this tempTTN representation into dense form - link_space = Dict{edgetype_sites,Index}() for e in ordered_edges operator_blocks = [q => size(Vq, 2) for (q, Vq) in Vs[e]] @@ -289,7 +279,7 @@ function ttn_svd( end qnblockdim(i::Index, q::QN) = blockdim(i, qnblock(i, q)) - for v in vs + for v in ordered_verts v_degree = degree(sites, v) # redo the whole thing like before # TODO: use neighborhood instead of going through all edges, see above @@ -428,10 +418,57 @@ function ttn_svd( H[v] += T * ITensorNetworks.computeSiteProd(sites, Prod([(Op("Id", v))])) end end - return H end +struct TermQN{V} + sites::IndsNetwork + op_cache::Dict{Pair{String,V},ITensor} + TermQN{V}(s) where {V} = new{V}(s, Dict{Pair{String,V},ITensor}()) +end + +function (t::TermQN)(term) + q = QN() + for st in term + op_tensor = get(t.op_cache, ITensors.which_op(st) => ITensors.site(st), nothing) + if op_tensor === nothing + op_tensor = op( + t.sites[ITensors.site(st)], ITensors.which_op(st); ITensors.params(st)... + ) + t.op_cache[ITensors.which_op(st) => ITensors.site(st)] = op_tensor + end + if !isnothing(flux(op_tensor)) + q += flux(op_tensor) + end + end + return q +end + +function ttn_svd( + coefficient_type::Type{<:Number}, os::OpSum, sites::IndsNetwork, root_vertex; kws... +) + calc_qn = TermQN{vertextype(sites)}(sites) + + tempTTN, inbond_coefs = make_sparse_ttn(os, sites, root_vertex, coefficient_type, calc_qn) + + Hflux = -calc_qn(terms(first(terms(os)))) + + # Traverse tree outwards from root vertex + ordered_verts = _default_vertex_ordering(sites, root_vertex) + # Store edges in fixed ordering relative to root + ordered_edges = _default_edge_ordering(sites, root_vertex) + + Vs = svd_bond_coefs( + coefficient_type, sites, ordered_verts, ordered_edges, inbond_coefs; kws... + ) + + T = compress_ttn( + coefficient_type, sites, ordered_verts, ordered_edges, Hflux, tempTTN, Vs + ) + + return T +end + # # Tree adaptations of functionalities in ITensors.jl/src/physics/autompo/opsum_to_mpo_generic.jl # @@ -489,8 +526,7 @@ function sorteachterm(os::OpSum, sites, root_vertex) # + site_positions = map from vertex to where it is in ordering (inverse map of `ordering`) ordering = _default_vertex_ordering(sites, root_vertex) site_positions = Dict(zip(ordering, 1:length(ordering))) - findpos(op::Op) = site_positions[site(op)] - isless_site(o1::Op, o2::Op) = findpos(o1) < findpos(o2) + isless_site(o1::Op, o2::Op) = site_positions[site(o1)] < site_positions[site(o2)] N = nv(sites) for j in eachindex(os) From 2fd3bc683c732ac760852b363e26ae6dccb0f512 Mon Sep 17 00:00:00 2001 From: Miles Date: Thu, 2 May 2024 22:14:50 -0400 Subject: [PATCH 07/17] Split MatElem and QNArrElem types into separate files --- src/ITensorNetworks.jl | 2 + src/treetensornetworks/matelem.jl | 41 +++++++ src/treetensornetworks/opsum_to_ttn.jl | 161 +++++++++++++------------ src/treetensornetworks/qnarrelem.jl | 30 +++++ 4 files changed, 157 insertions(+), 77 deletions(-) create mode 100644 src/treetensornetworks/matelem.jl create mode 100644 src/treetensornetworks/qnarrelem.jl diff --git a/src/ITensorNetworks.jl b/src/ITensorNetworks.jl index 2eca4ad4..e6d95931 100644 --- a/src/ITensorNetworks.jl +++ b/src/ITensorNetworks.jl @@ -40,6 +40,8 @@ include("solvers/local_solvers/contract.jl") include("solvers/local_solvers/linsolve.jl") include("treetensornetworks/abstracttreetensornetwork.jl") include("treetensornetworks/treetensornetwork.jl") +include("treetensornetworks/matelem.jl") +include("treetensornetworks/qnarrelem.jl") include("treetensornetworks/opsum_to_ttn.jl") include("treetensornetworks/projttns/abstractprojttn.jl") include("treetensornetworks/projttns/projttn.jl") diff --git a/src/treetensornetworks/matelem.jl b/src/treetensornetworks/matelem.jl new file mode 100644 index 00000000..d1b98dc0 --- /dev/null +++ b/src/treetensornetworks/matelem.jl @@ -0,0 +1,41 @@ + +# +# MatElem +# + +struct MatElem{T} + row::Int + col::Int + val::T +end + +#function Base.show(io::IO,m::MatElem) +# print(io,"($(m.row),$(m.col),$(m.val))") +#end + +function toMatrix(els::Vector{MatElem{T}})::Matrix{T} where {T} + nr = 0 + nc = 0 + for el in els + nr = max(nr, el.row) + nc = max(nc, el.col) + end + M = zeros(T, nr, nc) + for el in els + M[el.row, el.col] = el.val + end + return M +end + +function Base.:(==)(m1::MatElem{T}, m2::MatElem{T})::Bool where {T} + return (m1.row == m2.row && m1.col == m2.col && m1.val == m2.val) +end + +function Base.isless(m1::MatElem{T}, m2::MatElem{T})::Bool where {T} + if m1.row != m2.row + return m1.row < m2.row + elseif m1.col != m2.col + return m1.col < m2.col + end + return m1.val < m2.val +end diff --git a/src/treetensornetworks/opsum_to_ttn.jl b/src/treetensornetworks/opsum_to_ttn.jl index aa0787e8..3fe0bcd3 100644 --- a/src/treetensornetworks/opsum_to_ttn.jl +++ b/src/treetensornetworks/opsum_to_ttn.jl @@ -1,6 +1,5 @@ using Graphs: degree, is_tree -using ITensors: flux, has_fermion_string, itensor, ops, removeqns, space, val -using ITensors.ITensorMPS: ITensorMPS, cutoff, linkdims, truncate! +using ITensors: flux, has_fermion_string, itensor, ops, removeqns, space, truncate!, val using ITensors.LazyApply: Prod, Sum, coefficient using ITensors.NDTensors: Block, blockdim, maxdim, nblocks, nnzblocks using ITensors.Ops: argument, coefficient, Op, OpSum, name, params, site, terms, which_op @@ -30,6 +29,13 @@ end # Tree adaptations of functionalities in ITensors.jl/src/physics/autompo/opsum_to_mpo.jl # +function determine_val_type(terms) + for t in terms + (!isreal(coefficient(t))) && return ComplexF64 + end + return Float64 +end + """ ttn_svd(os::OpSum, sites::IndsNetwork, root_vertex, kwargs...) @@ -38,16 +44,40 @@ Hamiltonian, compressing shared interaction channels. """ function ttn_svd(os::OpSum, sites::IndsNetwork, root_vertex; kwargs...) # Function barrier to improve type stability - coefficient_type = ITensorMPS.determineValType(terms(os)) + coefficient_type = determine_val_type(terms(os)) return ttn_svd(coefficient_type, os, sites, root_vertex; kwargs...) end -# -# TODO: -# - remove is_internal from returns? maybe make it inside ttn_svd beforehand -# - remove Hflux from returns along with is_internal -# - split SVD'ing of "M"'s into Vs into a separate function -# +function remove_dups!(v::Vector) + N = length(v) + (N == 0) && return nothing + sort!(v) + n = 1 + u = 2 + while u <= N + while u < N && v[u] == v[n] + u += 1 + end + if v[u] != v[n] + v[n + 1] = v[u] + n += 1 + end + u += 1 + end + resize!(v, n) + return nothing +end + +function pos_in_link!(linkmap::Dict, k) + isempty(k) && return -1 + pos = get(linkmap, k, -1) + if pos == -1 + pos = length(linkmap) + 1 + linkmap[k] = pos + end + return pos +end + function make_sparse_ttn( os::OpSum, sites::IndsNetwork, root_vertex, coefficient_type, calc_qn ) @@ -59,30 +89,10 @@ function make_sparse_ttn( inmaps = Dict{Pair{edgetype_sites,QN},Dict{Vector{Op},Int}}() outmaps = Dict{Pair{edgetype_sites,QN},Dict{Vector{Op},Int}}() - #= - op_cache = Dict{Pair{String,vertextype_sites},ITensor}() - function calc_qn(term::Vector{Op}) - q = QN() - for st in term - op_tensor = get(op_cache, ITensors.which_op(st) => ITensors.site(st), nothing) - if op_tensor === nothing - op_tensor = op( - sites[ITensors.site(st)], ITensors.which_op(st); ITensors.params(st)... - ) - op_cache[ITensors.which_op(st) => ITensors.site(st)] = op_tensor - end - if !isnothing(flux(op_tensor)) - q += flux(op_tensor) - end - end - return q - end - =# - # Bond coefficients for incoming edge channels. # These become the "M" coefficient matrices that get SVD'd. inbond_coefs = Dict( - e => Dict{QN,Vector{ITensorMPS.MatElem{coefficient_type}}}() for e in ordered_edges + e => Dict{QN,Vector{MatElem{coefficient_type}}}() for e in ordered_edges ) # List of terms for which the coefficient has been added to a site factor @@ -174,13 +184,13 @@ function make_sparse_ttn( coutmap = get!(outmaps, edge_in => not_incoming_qn, Dict{Vector{Op},Int}()) cinmap = get!(inmaps, edge_in => -incoming_qn, Dict{Vector{Op},Int}()) - bond_row = ITensorMPS.posInLink!(cinmap, incoming) - bond_col = ITensorMPS.posInLink!(coutmap, not_incoming) # get incoming channel + bond_row = pos_in_link!(cinmap, incoming) + bond_col = pos_in_link!(coutmap, not_incoming) # get incoming channel bond_coef = convert(coefficient_type, coefficient(term)) q_inbond_coefs = get!( - inbond_coefs[edge_in], incoming_qn, ITensorMPS.MatElem{coefficient_type}[] + inbond_coefs[edge_in], incoming_qn, MatElem{coefficient_type}[] ) - push!(q_inbond_coefs, ITensorMPS.MatElem(bond_row, bond_col, bond_coef)) + push!(q_inbond_coefs, MatElem(bond_row, bond_col, bond_coef)) T_inds[dim_in] = bond_col T_qns[dim_in] = -incoming_qn end @@ -189,7 +199,7 @@ function make_sparse_ttn( outmaps, edges[dout] => outgoing_qns[edges[dout]], Dict{Vector{Op},Int}() ) # add outgoing channel - T_inds[dout] = ITensorMPS.posInLink!(coutmap, outgoing[edges[dout]]) + T_inds[dout] = pos_in_link!(coutmap, outgoing[edges[dout]]) T_qns[dout] = outgoing_qns[edges[dout]] end # if term starts at this site, add its coefficient as a site factor @@ -213,7 +223,7 @@ function make_sparse_ttn( push!(tempTTN[v], el) end - ITensorMPS.remove_dups!(tempTTN[v]) + remove_dups!(tempTTN[v]) end return tempTTN, inbond_coefs @@ -228,7 +238,7 @@ function svd_bond_coefs( dim_in = findfirst(e -> dst(e) == v, edges) if !isnothing(dim_in) && !isempty(inbond_coefs[edges[dim_in]]) for (q, mat) in inbond_coefs[edges[dim_in]] - M = ITensorMPS.toMatrix(mat) + M = toMatrix(mat) U, S, V = svd(M) P = S .^ 2 truncate!(P; kws...) @@ -257,7 +267,7 @@ function compress_ttn( end end - linkdir_ref = ITensors.In # safe to always use autofermion default here + linkdir_ref = ITensors.In # safe to always use autofermion default here # Compress this tempTTN representation into dense form edgetype_sites = edgetype(sites) thishasqns = any(v -> hasqns(sites[v]), vertices(sites)) @@ -281,7 +291,7 @@ function compress_ttn( for v in ordered_verts v_degree = degree(sites, v) - # redo the whole thing like before + # Redo the whole thing like before # TODO: use neighborhood instead of going through all edges, see above edges = align_and_reorder_edges(incident_edges(sites, v), ordered_edges) dim_in = findfirst(e -> dst(e) == v, edges) @@ -421,6 +431,12 @@ function compress_ttn( return H end +# +# TermQN implements a function with an internal cache. +# Calling it on a term determines that term's flux +# but tries not to rebuild the corresponding ITensor. +# (Previously it was the function calc_qn.) +# struct TermQN{V} sites::IndsNetwork op_cache::Dict{Pair{String,V},ITensor} @@ -430,12 +446,10 @@ end function (t::TermQN)(term) q = QN() for st in term - op_tensor = get(t.op_cache, ITensors.which_op(st) => ITensors.site(st), nothing) + op_tensor = get(t.op_cache, which_op(st) => site(st), nothing) if op_tensor === nothing - op_tensor = op( - t.sites[ITensors.site(st)], ITensors.which_op(st); ITensors.params(st)... - ) - t.op_cache[ITensors.which_op(st) => ITensors.site(st)] = op_tensor + op_tensor = op(t.sites[site(st)], which_op(st); params(st)...) + t.op_cache[which_op(st) => site(st)] = op_tensor end if !isnothing(flux(op_tensor)) q += flux(op_tensor) @@ -451,8 +465,6 @@ function ttn_svd( tempTTN, inbond_coefs = make_sparse_ttn(os, sites, root_vertex, coefficient_type, calc_qn) - Hflux = -calc_qn(terms(first(terms(os)))) - # Traverse tree outwards from root vertex ordered_verts = _default_vertex_ordering(sites, root_vertex) # Store edges in fixed ordering relative to root @@ -462,6 +474,8 @@ function ttn_svd( coefficient_type, sites, ordered_verts, ordered_edges, inbond_coefs; kws... ) + Hflux = -calc_qn(terms(first(terms(os)))) + T = compress_ttn( coefficient_type, sites, ordered_verts, ordered_edges, Hflux, tempTTN, Vs ) @@ -580,6 +594,28 @@ function sorteachterm(os::OpSum, sites, root_vertex) return os end +function sortmergeterms(os::OpSum{C}) where {C} + os_sorted_terms = sort(terms(os)) + os = Sum(os_sorted_terms) + # Merge (add) terms with same operators + merge_os_data = Scaled{C,Prod{Op}}[] + last_term = copy(os[1]) + last_term_coef = coefficient(last_term) + for n in 2:length(os) + if argument(os[n]) == argument(last_term) + last_term_coef += coefficient(os[n]) + last_term = last_term_coef * argument(last_term) + else + push!(merge_os_data, last_term) + last_term = os[n] + last_term_coef = coefficient(last_term) + end + end + push!(merge_os_data, last_term) + os = Sum(merge_os_data) + return os +end + """ ttn(os::OpSum, sites::IndsNetwork{<:Index}; kwargs...) ttn(eltype::Type{<:Number}, os::OpSum, sites::IndsNetwork{<:Index}; kwargs...) @@ -598,7 +634,7 @@ function ttn( check_terms_support(os, sites) os = deepcopy(os) #TODO: do we need this? sorteachterm copies `os` again os = sorteachterm(os, sites, root_vertex) - os = ITensorMPS.sortmergeterms(os) + os = sortmergeterms(os) return ttn_svd(os, sites, root_vertex; kwargs...) end @@ -636,40 +672,11 @@ function ttn(eltype::Type{<:Number}, os, sites::IndsNetwork; kwargs...) return NDTensors.convert_scalartype(eltype, ttn(os, sites; kwargs...)) end -##################################### -# QNArrElem (sparse array with QNs) # -##################################### - -struct QNArrElem{T,N} - qn_idxs::MVector{N,QN} - idxs::MVector{N,Int} - val::T -end - -function Base.:(==)(a1::QNArrElem{T,N}, a2::QNArrElem{T,N})::Bool where {T,N} - return (a1.idxs == a2.idxs && a1.val == a2.val && a1.qn_idxs == a2.qn_idxs) -end - -function Base.isless(a1::QNArrElem{T,N}, a2::QNArrElem{T,N})::Bool where {T,N} - ###two separate loops s.t. the MPS case reduces to the ITensors Implementation of QNMatElem - for n in 1:N - if a1.qn_idxs[n] != a2.qn_idxs[n] - return a1.qn_idxs[n] < a2.qn_idxs[n] - end - end - for n in 1:N - if a1.idxs[n] != a2.idxs[n] - return a1.idxs[n] < a2.idxs[n] - end - end - return a1.val < a2.val -end - # # Sparse finite state machine construction # -# allow sparse arrays with ITensors.Sum entries +# Allow sparse arrays with ITensors.Sum entries function Base.zero(::Type{S}) where {S<:Sum} return S() end diff --git a/src/treetensornetworks/qnarrelem.jl b/src/treetensornetworks/qnarrelem.jl new file mode 100644 index 00000000..4f765f07 --- /dev/null +++ b/src/treetensornetworks/qnarrelem.jl @@ -0,0 +1,30 @@ +using StaticArrays: MVector + +##################################### +# QNArrElem (sparse array with QNs) # +##################################### + +struct QNArrElem{T,N} + qn_idxs::MVector{N,QN} + idxs::MVector{N,Int} + val::T +end + +function Base.:(==)(a1::QNArrElem{T,N}, a2::QNArrElem{T,N})::Bool where {T,N} + return (a1.idxs == a2.idxs && a1.val == a2.val && a1.qn_idxs == a2.qn_idxs) +end + +function Base.isless(a1::QNArrElem{T,N}, a2::QNArrElem{T,N})::Bool where {T,N} + ###two separate loops s.t. the MPS case reduces to the ITensors Implementation of QNMatElem + for n in 1:N + if a1.qn_idxs[n] != a2.qn_idxs[n] + return a1.qn_idxs[n] < a2.qn_idxs[n] + end + end + for n in 1:N + if a1.idxs[n] != a2.idxs[n] + return a1.idxs[n] < a2.idxs[n] + end + end + return a1.val < a2.val +end From 1a3a4f9c622f2df894b24d7f7a8c8b22c481bc58 Mon Sep 17 00:00:00 2001 From: Miles Date: Thu, 2 May 2024 22:34:12 -0400 Subject: [PATCH 08/17] Fix missing cutoff keyword in apply.jl --- src/apply.jl | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/apply.jl b/src/apply.jl index 8548acc8..62bc5676 100644 --- a/src/apply.jl +++ b/src/apply.jl @@ -266,7 +266,12 @@ function ITensors.apply( end function ITensors.apply( - o⃗::Scaled, ψ::AbstractITensorNetwork; normalize=false, ortho=false, apply_kwargs... + o⃗::Scaled, + ψ::AbstractITensorNetwork; + cutoff=nothing, + normalize=false, + ortho=false, + apply_kwargs..., ) return maybe_real(Ops.coefficient(o⃗)) * apply(Ops.argument(o⃗), ψ; cutoff, maxdim, normalize, ortho, apply_kwargs...) From 24e5eda5abe51d9776ac40c2903da39ba9e5e7d2 Mon Sep 17 00:00:00 2001 From: Miles Date: Sun, 5 May 2024 19:08:08 -0400 Subject: [PATCH 09/17] Rename tempTTN to temp_ttn --- src/treetensornetworks/opsum_to_ttn.jl | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/src/treetensornetworks/opsum_to_ttn.jl b/src/treetensornetworks/opsum_to_ttn.jl index 3fe0bcd3..545dc83c 100644 --- a/src/treetensornetworks/opsum_to_ttn.jl +++ b/src/treetensornetworks/opsum_to_ttn.jl @@ -99,12 +99,12 @@ function make_sparse_ttn( site_coef_done = Prod{Op}[] # Temporary symbolic representation of TTN Hamiltonian - tempTTN = Dict( + temp_ttn = Dict( v => QNArrElem{Scaled{coefficient_type,Prod{Op}},degree(sites, v)}[] for v in ordered_verts ) - # Build compressed finite state machine representation (tempTTN) + # Build compressed finite state machine representation (temp_ttn) for v in ordered_verts v_degree = degree(sites, v) # For every vertex, find all edges that contain this vertex @@ -206,6 +206,9 @@ function make_sparse_ttn( site_coef = one(coefficient_type) if (isnothing(dim_in) || T_inds[dim_in] == -1) && argument(term) ∉ site_coef_done site_coef = coefficient(term) + # TODO: update this code if the behavior (bug?) + # is changed where typeof(coefficient(term)) of an + # OpSum term is always ComplexFloat64 # required since coefficient seems to return ComplexF64 even if coefficient_type is determined to be real site_coef = convert(coefficient_type, site_coef) push!(site_coef_done, argument(term)) @@ -221,12 +224,12 @@ function make_sparse_ttn( # save indices and value of symbolic tensor entry el = QNArrElem(T_qns, T_inds, site_coef * Prod(onsite)) - push!(tempTTN[v], el) + push!(temp_ttn[v], el) end - remove_dups!(tempTTN[v]) + remove_dups!(temp_ttn[v]) end - return tempTTN, inbond_coefs + return temp_ttn, inbond_coefs end function svd_bond_coefs( @@ -252,7 +255,7 @@ function svd_bond_coefs( end function compress_ttn( - coefficient_type, sites0, ordered_verts, ordered_edges, Hflux, tempTTN, Vs + coefficient_type, sites0, ordered_verts, ordered_edges, Hflux, temp_ttn, Vs ) # Insert dummy indices on internal vertices, these will not show up in the final tensor # TODO: come up with a better solution for this @@ -268,7 +271,7 @@ function compress_ttn( end linkdir_ref = ITensors.In # safe to always use autofermion default here - # Compress this tempTTN representation into dense form + # Compress this temp_ttn representation into dense form edgetype_sites = edgetype(sites) thishasqns = any(v -> hasqns(sites[v]), vertices(sites)) @@ -302,7 +305,7 @@ function compress_ttn( # construct blocks blocks = Dict{Tuple{Block{v_degree},Vector{Op}},Array{coefficient_type,v_degree}}() - for el in tempTTN[v] + for el in temp_ttn[v] t = el.val (abs(coefficient(t)) > eps(real(coefficient_type))) || continue block_helper_inds = fill(-1, v_degree) # we manipulate T_inds later, and loose track of ending/starting information, so keep track of it here @@ -463,7 +466,7 @@ function ttn_svd( ) calc_qn = TermQN{vertextype(sites)}(sites) - tempTTN, inbond_coefs = make_sparse_ttn(os, sites, root_vertex, coefficient_type, calc_qn) + temp_ttn, inbond_coefs = make_sparse_ttn(os, sites, root_vertex, coefficient_type, calc_qn) # Traverse tree outwards from root vertex ordered_verts = _default_vertex_ordering(sites, root_vertex) @@ -477,7 +480,7 @@ function ttn_svd( Hflux = -calc_qn(terms(first(terms(os)))) T = compress_ttn( - coefficient_type, sites, ordered_verts, ordered_edges, Hflux, tempTTN, Vs + coefficient_type, sites, ordered_verts, ordered_edges, Hflux, temp_ttn, Vs ) return T From 7efd76b8bda028f50287d5637c11e02ebab5191a Mon Sep 17 00:00:00 2001 From: Miles Date: Sun, 5 May 2024 19:08:22 -0400 Subject: [PATCH 10/17] Improvement to determine_coefficient_type function --- src/treetensornetworks/opsum_to_ttn.jl | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/src/treetensornetworks/opsum_to_ttn.jl b/src/treetensornetworks/opsum_to_ttn.jl index 545dc83c..18db1aad 100644 --- a/src/treetensornetworks/opsum_to_ttn.jl +++ b/src/treetensornetworks/opsum_to_ttn.jl @@ -29,11 +29,12 @@ end # Tree adaptations of functionalities in ITensors.jl/src/physics/autompo/opsum_to_mpo.jl # -function determine_val_type(terms) - for t in terms - (!isreal(coefficient(t))) && return ComplexF64 +function determine_coefficient_type(terms) + isempty(terms) && return Float64 + if all(t->isreal(coefficient(t)),terms) + return real(typeof(coefficient(first(terms)))) end - return Float64 + return typeof(coefficient(first(terms))) end """ @@ -44,7 +45,7 @@ Hamiltonian, compressing shared interaction channels. """ function ttn_svd(os::OpSum, sites::IndsNetwork, root_vertex; kwargs...) # Function barrier to improve type stability - coefficient_type = determine_val_type(terms(os)) + coefficient_type = determine_coefficient_type(terms(os)) return ttn_svd(coefficient_type, os, sites, root_vertex; kwargs...) end From e15392af830a1b393150641beac792e15be8b43f Mon Sep 17 00:00:00 2001 From: Miles Date: Sun, 5 May 2024 21:43:49 -0400 Subject: [PATCH 11/17] Minor cleanups to ttn_svd --- src/treetensornetworks/opsum_to_ttn.jl | 64 ++++++++++++-------------- 1 file changed, 29 insertions(+), 35 deletions(-) diff --git a/src/treetensornetworks/opsum_to_ttn.jl b/src/treetensornetworks/opsum_to_ttn.jl index 18db1aad..2763bb59 100644 --- a/src/treetensornetworks/opsum_to_ttn.jl +++ b/src/treetensornetworks/opsum_to_ttn.jl @@ -1,3 +1,4 @@ +#using FillArrays: OneElement using Graphs: degree, is_tree using ITensors: flux, has_fermion_string, itensor, ops, removeqns, space, truncate!, val using ITensors.LazyApply: Prod, Sum, coefficient @@ -80,15 +81,11 @@ function pos_in_link!(linkmap::Dict, k) end function make_sparse_ttn( - os::OpSum, sites::IndsNetwork, root_vertex, coefficient_type, calc_qn + os::OpSum, sites::IndsNetwork, root_vertex, coefficient_type, calc_qn; + ordered_verts, ordered_edges ) - edgetype_sites = edgetype(sites) - vertextype_sites = vertextype(sites) - - ordered_verts = _default_vertex_ordering(sites, root_vertex) - ordered_edges = _default_edge_ordering(sites, root_vertex) - inmaps = Dict{Pair{edgetype_sites,QN},Dict{Vector{Op},Int}}() - outmaps = Dict{Pair{edgetype_sites,QN},Dict{Vector{Op},Int}}() + inmaps = Dict{Pair{edgetype(sites),QN},Dict{Vector{Op},Int}}() + outmaps = Dict{Pair{edgetype(sites),QN},Dict{Vector{Op},Int}}() # Bond coefficients for incoming edge channels. # These become the "M" coefficient matrices that get SVD'd. @@ -113,12 +110,12 @@ function make_sparse_ttn( # Use the corresponding ordering as index order for tensor elements at this site dim_in = findfirst(e -> dst(e) == v, edges) - edge_in = (isnothing(dim_in) ? [] : edges[dim_in]) + edge_in = (isnothing(dim_in) ? nothing : edges[dim_in]) dims_out = findall(e -> src(e) == v, edges) edges_out = edges[dims_out] - # For every site w except v, determine the incident edge to v that lies - # in the edge_path(w,v) + # For every site w except v, determine the incident edge to v + # that lies in the edge_path(w,v) subgraphs = split_at_vertex(sites, v) _boundary_edges = align_edges( [only(boundary_edges(underlying_graph(sites), subgraph)) for subgraph in subgraphs], @@ -141,29 +138,29 @@ function make_sparse_ttn( crosses_vertex = true else crosses_vertex = - !isone(length(Set([which_incident_edge[site] for site in ITensors.site.(ops)]))) + !isone(length(Set([which_incident_edge[site] for site in site.(ops)]))) end # If term doesn't cross vertex, skip it crosses_vertex || continue # filter out factor that acts on current vertex - onsite = filter(t -> (ITensors.site(t) == v), ops) - not_onsite_ops = setdiff(ops, onsite) + onsite = filter(t -> (site(t) == v), ops) + non_onsite_ops = setdiff(ops, onsite) # filter out ops that come in from the direction of the incoming edge incoming = filter( - t -> which_incident_edge[ITensors.site(t)] == edge_in, not_onsite_ops + t -> which_incident_edge[site(t)] == edge_in, non_onsite_ops ) # also store all non-incoming ops in standard order, used for channel merging not_incoming = filter( - t -> (ITensors.site(t) == v) || which_incident_edge[ITensors.site(t)] != edge_in, + t -> (site(t) == v) || which_incident_edge[site(t)] != edge_in, ops, ) # for every outgoing edge, filter out ops that go out along that edge outgoing = Dict( - e => filter(t -> which_incident_edge[ITensors.site(t)] == e, not_onsite_ops) for + e => filter(t -> which_incident_edge[site(t)] == e, non_onsite_ops) for e in edges_out ) @@ -191,6 +188,8 @@ function make_sparse_ttn( q_inbond_coefs = get!( inbond_coefs[edge_in], incoming_qn, MatElem{coefficient_type}[] ) + #TODO: for OneElement, fill in size here + #push!(q_inbond_coefs, OneElement(bond_coef,(bond_row, bond_col), ())) push!(q_inbond_coefs, MatElem(bond_row, bond_col, bond_coef)) T_inds[dim_in] = bond_col T_qns[dim_in] = -incoming_qn @@ -203,28 +202,22 @@ function make_sparse_ttn( T_inds[dout] = pos_in_link!(coutmap, outgoing[edges[dout]]) T_qns[dout] = outgoing_qns[edges[dout]] end - # if term starts at this site, add its coefficient as a site factor + # If term starts at this site, add its coefficient as a site factor site_coef = one(coefficient_type) if (isnothing(dim_in) || T_inds[dim_in] == -1) && argument(term) ∉ site_coef_done - site_coef = coefficient(term) - # TODO: update this code if the behavior (bug?) - # is changed where typeof(coefficient(term)) of an - # OpSum term is always ComplexFloat64 - # required since coefficient seems to return ComplexF64 even if coefficient_type is determined to be real - site_coef = convert(coefficient_type, site_coef) + site_coef = convert(coefficient_type,coefficient(term)) push!(site_coef_done, argument(term)) end - # add onsite identity for interactions passing through vertex + # Add onsite identity for interactions passing through vertex if isempty(onsite) if !ITensors.using_auto_fermion() && isfermionic(incoming, sites) - error("No verified fermion support for automatic TTN constructor!") + push!(onsite, Op("F", v)) else push!(onsite, Op("Id", v)) end end - # save indices and value of symbolic tensor entry + # Save indices and value of symbolic tensor entry el = QNArrElem(T_qns, T_inds, site_coef * Prod(onsite)) - push!(temp_ttn[v], el) end remove_dups!(temp_ttn[v]) @@ -234,7 +227,7 @@ function make_sparse_ttn( end function svd_bond_coefs( - coefficient_type, sites, ordered_verts, ordered_edges, inbond_coefs; kws... + coefficient_type, sites, inbond_coefs; ordered_verts, ordered_edges, kws... ) Vs = Dict(e => Dict{QN,Matrix{coefficient_type}}() for e in ordered_edges) for v in ordered_verts @@ -256,7 +249,7 @@ function svd_bond_coefs( end function compress_ttn( - coefficient_type, sites0, ordered_verts, ordered_edges, Hflux, temp_ttn, Vs + coefficient_type, sites0, Hflux, temp_ttn, Vs; ordered_verts, ordered_edges ) # Insert dummy indices on internal vertices, these will not show up in the final tensor # TODO: come up with a better solution for this @@ -463,25 +456,26 @@ function (t::TermQN)(term) end function ttn_svd( - coefficient_type::Type{<:Number}, os::OpSum, sites::IndsNetwork, root_vertex; kws... + coefficient_type::Type{<:Number}, os::OpSum, sites::IndsNetwork, root_vertex; + kws... ) calc_qn = TermQN{vertextype(sites)}(sites) - temp_ttn, inbond_coefs = make_sparse_ttn(os, sites, root_vertex, coefficient_type, calc_qn) - # Traverse tree outwards from root vertex ordered_verts = _default_vertex_ordering(sites, root_vertex) # Store edges in fixed ordering relative to root ordered_edges = _default_edge_ordering(sites, root_vertex) + temp_ttn, inbond_coefs = make_sparse_ttn(os, sites, root_vertex, coefficient_type, calc_qn; ordered_verts, ordered_edges) + Vs = svd_bond_coefs( - coefficient_type, sites, ordered_verts, ordered_edges, inbond_coefs; kws... + coefficient_type, sites, inbond_coefs; ordered_verts, ordered_edges, kws... ) Hflux = -calc_qn(terms(first(terms(os)))) T = compress_ttn( - coefficient_type, sites, ordered_verts, ordered_edges, Hflux, temp_ttn, Vs + coefficient_type, sites, Hflux, temp_ttn, Vs; ordered_verts, ordered_edges ) return T From f970428f731d3e46de7a728141dbd10fb25b63d3 Mon Sep 17 00:00:00 2001 From: Miles Date: Sun, 5 May 2024 22:00:46 -0400 Subject: [PATCH 12/17] Variable name changes --- src/treetensornetworks/opsum_to_ttn.jl | 95 +++++++++++++------------- 1 file changed, 47 insertions(+), 48 deletions(-) diff --git a/src/treetensornetworks/opsum_to_ttn.jl b/src/treetensornetworks/opsum_to_ttn.jl index 2763bb59..89c14776 100644 --- a/src/treetensornetworks/opsum_to_ttn.jl +++ b/src/treetensornetworks/opsum_to_ttn.jl @@ -80,8 +80,8 @@ function pos_in_link!(linkmap::Dict, k) return pos end -function make_sparse_ttn( - os::OpSum, sites::IndsNetwork, root_vertex, coefficient_type, calc_qn; +function make_symbolic_ttn( + opsum::OpSum, sites::IndsNetwork, root_vertex, coefficient_type, calc_qn; ordered_verts, ordered_edges ) inmaps = Dict{Pair{edgetype(sites),QN},Dict{Vector{Op},Int}}() @@ -97,15 +97,17 @@ function make_sparse_ttn( site_coef_done = Prod{Op}[] # Temporary symbolic representation of TTN Hamiltonian - temp_ttn = Dict( + symbolic_ttn = Dict( v => QNArrElem{Scaled{coefficient_type,Prod{Op}},degree(sites, v)}[] for v in ordered_verts ) - # Build compressed finite state machine representation (temp_ttn) + # Build compressed finite state machine representation (symbolic_ttn) for v in ordered_verts v_degree = degree(sites, v) # For every vertex, find all edges that contain this vertex + # (align_and_reorder_edges makes the output of indicident edges match the + # direction and ordering match that of ordered_edges) edges = align_and_reorder_edges(incident_edges(sites, v), ordered_edges) # Use the corresponding ordering as index order for tensor elements at this site @@ -116,22 +118,21 @@ function make_sparse_ttn( # For every site w except v, determine the incident edge to v # that lies in the edge_path(w,v) + # TODO: better way to make which_incident_edge below? subgraphs = split_at_vertex(sites, v) - _boundary_edges = align_edges( - [only(boundary_edges(underlying_graph(sites), subgraph)) for subgraph in subgraphs], - edges, - ) + _boundary_edges = [only(boundary_edges(underlying_graph(sites), subgraph)) for subgraph in subgraphs] + _boundary_edges = align_edges(_boundary_edges, edges) which_incident_edge = Dict( Iterators.flatten([ subgraphs[i] .=> ((_boundary_edges[i]),) for i in eachindex(subgraphs) ]), ) - # sanity check, leaves only have single incoming or outgoing edge + # Sanity check, leaves only have single incoming or outgoing edge @assert !isempty(dims_out) || !isnothing(dim_in) (isempty(dims_out) || isnothing(dim_in)) && @assert is_leaf_vertex(sites, v) - for term in os + for term in opsum # Loop over OpSum and pick out terms that act on current vertex ops = ITensors.terms(term) if v in ITensors.site.(ops) @@ -143,53 +144,51 @@ function make_sparse_ttn( # If term doesn't cross vertex, skip it crosses_vertex || continue - # filter out factor that acts on current vertex - onsite = filter(t -> (site(t) == v), ops) - non_onsite_ops = setdiff(ops, onsite) + # Filter out ops that acts on current vertex + onsite_ops = filter(t -> (site(t) == v), ops) + non_onsite_ops = setdiff(ops, onsite_ops) - # filter out ops that come in from the direction of the incoming edge - incoming = filter( + # Filter out ops that come in from the direction of the incoming edge + incoming_ops = filter( t -> which_incident_edge[site(t)] == edge_in, non_onsite_ops ) - # also store all non-incoming ops in standard order, used for channel merging - not_incoming = filter( + # Also store all non-incoming ops in standard order, used for channel merging + non_incoming_ops = filter( t -> (site(t) == v) || which_incident_edge[site(t)] != edge_in, ops, ) - # for every outgoing edge, filter out ops that go out along that edge - outgoing = Dict( + # For every outgoing edge, filter out ops that go out along that edge + outgoing_ops = Dict( e => filter(t -> which_incident_edge[site(t)] == e, non_onsite_ops) for e in edges_out ) - # compute QNs - incoming_qn = calc_qn(incoming) - not_incoming_qn = calc_qn(not_incoming) - outgoing_qns = Dict(e => calc_qn(outgoing[e]) for e in edges_out) - site_qn = calc_qn(onsite) + # Compute QNs + incoming_qn = calc_qn(incoming_ops) + non_incoming_qn = calc_qn(non_incoming_ops) + outgoing_qns = Dict(e => calc_qn(outgoing_ops[e]) for e in edges_out) + site_qn = calc_qn(onsite_ops) - # initialize QNArrayElement indices and quantum numbers + # Initialize QNArrayElement indices and quantum numbers T_inds = MVector{v_degree}(fill(-1, v_degree)) T_qns = MVector{v_degree}(fill(QN(), v_degree)) # initialize ArrayElement indices for inbond_coefs bond_row = -1 bond_col = -1 - if !isempty(incoming) - # get the correct map from edge=>QN to term and channel - # this checks if term exists on edge=>QN ( otherwise insert it) and returns it's index - coutmap = get!(outmaps, edge_in => not_incoming_qn, Dict{Vector{Op},Int}()) + if !isempty(incoming_ops) + # Get the correct map from edge=>QN to term and channel. + # This checks if term exists on edge=>QN (otherwise insert it) and returns its index. + coutmap = get!(outmaps, edge_in => non_incoming_qn, Dict{Vector{Op},Int}()) cinmap = get!(inmaps, edge_in => -incoming_qn, Dict{Vector{Op},Int}()) - bond_row = pos_in_link!(cinmap, incoming) - bond_col = pos_in_link!(coutmap, not_incoming) # get incoming channel + bond_row = pos_in_link!(cinmap, incoming_ops) + bond_col = pos_in_link!(coutmap, non_incoming_ops) # get incoming channel bond_coef = convert(coefficient_type, coefficient(term)) q_inbond_coefs = get!( inbond_coefs[edge_in], incoming_qn, MatElem{coefficient_type}[] ) - #TODO: for OneElement, fill in size here - #push!(q_inbond_coefs, OneElement(bond_coef,(bond_row, bond_col), ())) push!(q_inbond_coefs, MatElem(bond_row, bond_col, bond_coef)) T_inds[dim_in] = bond_col T_qns[dim_in] = -incoming_qn @@ -198,8 +197,8 @@ function make_sparse_ttn( coutmap = get!( outmaps, edges[dout] => outgoing_qns[edges[dout]], Dict{Vector{Op},Int}() ) - # add outgoing channel - T_inds[dout] = pos_in_link!(coutmap, outgoing[edges[dout]]) + # Add outgoing channel + T_inds[dout] = pos_in_link!(coutmap, outgoing_ops[edges[dout]]) T_qns[dout] = outgoing_qns[edges[dout]] end # If term starts at this site, add its coefficient as a site factor @@ -209,21 +208,21 @@ function make_sparse_ttn( push!(site_coef_done, argument(term)) end # Add onsite identity for interactions passing through vertex - if isempty(onsite) - if !ITensors.using_auto_fermion() && isfermionic(incoming, sites) - push!(onsite, Op("F", v)) + if isempty(onsite_ops) + if !ITensors.using_auto_fermion() && isfermionic(incoming_ops, sites) + push!(onsite_ops, Op("F", v)) else - push!(onsite, Op("Id", v)) + push!(onsite_ops, Op("Id", v)) end end # Save indices and value of symbolic tensor entry - el = QNArrElem(T_qns, T_inds, site_coef * Prod(onsite)) - push!(temp_ttn[v], el) + el = QNArrElem(T_qns, T_inds, site_coef * Prod(onsite_ops)) + push!(symbolic_ttn[v], el) end - remove_dups!(temp_ttn[v]) + remove_dups!(symbolic_ttn[v]) end - return temp_ttn, inbond_coefs + return symbolic_ttn, inbond_coefs end function svd_bond_coefs( @@ -249,7 +248,7 @@ function svd_bond_coefs( end function compress_ttn( - coefficient_type, sites0, Hflux, temp_ttn, Vs; ordered_verts, ordered_edges + coefficient_type, sites0, Hflux, symbolic_ttn, Vs; ordered_verts, ordered_edges ) # Insert dummy indices on internal vertices, these will not show up in the final tensor # TODO: come up with a better solution for this @@ -265,7 +264,7 @@ function compress_ttn( end linkdir_ref = ITensors.In # safe to always use autofermion default here - # Compress this temp_ttn representation into dense form + # Compress this symbolic_ttn representation into dense form edgetype_sites = edgetype(sites) thishasqns = any(v -> hasqns(sites[v]), vertices(sites)) @@ -299,7 +298,7 @@ function compress_ttn( # construct blocks blocks = Dict{Tuple{Block{v_degree},Vector{Op}},Array{coefficient_type,v_degree}}() - for el in temp_ttn[v] + for el in symbolic_ttn[v] t = el.val (abs(coefficient(t)) > eps(real(coefficient_type))) || continue block_helper_inds = fill(-1, v_degree) # we manipulate T_inds later, and loose track of ending/starting information, so keep track of it here @@ -466,7 +465,7 @@ function ttn_svd( # Store edges in fixed ordering relative to root ordered_edges = _default_edge_ordering(sites, root_vertex) - temp_ttn, inbond_coefs = make_sparse_ttn(os, sites, root_vertex, coefficient_type, calc_qn; ordered_verts, ordered_edges) + symbolic_ttn, inbond_coefs = make_symbolic_ttn(os, sites, root_vertex, coefficient_type, calc_qn; ordered_verts, ordered_edges) Vs = svd_bond_coefs( coefficient_type, sites, inbond_coefs; ordered_verts, ordered_edges, kws... @@ -475,7 +474,7 @@ function ttn_svd( Hflux = -calc_qn(terms(first(terms(os)))) T = compress_ttn( - coefficient_type, sites, Hflux, temp_ttn, Vs; ordered_verts, ordered_edges + coefficient_type, sites, Hflux, symbolic_ttn, Vs; ordered_verts, ordered_edges ) return T From eae510c88bb039ebec73b0dc0df86714a3f9f9d1 Mon Sep 17 00:00:00 2001 From: Miles Date: Mon, 6 May 2024 18:15:34 -0400 Subject: [PATCH 13/17] Make some args keyword & reorder some --- src/treetensornetworks/opsum_to_ttn.jl | 72 ++++++++++++++++---------- 1 file changed, 45 insertions(+), 27 deletions(-) diff --git a/src/treetensornetworks/opsum_to_ttn.jl b/src/treetensornetworks/opsum_to_ttn.jl index 89c14776..3355c784 100644 --- a/src/treetensornetworks/opsum_to_ttn.jl +++ b/src/treetensornetworks/opsum_to_ttn.jl @@ -1,4 +1,5 @@ #using FillArrays: OneElement +#using DataGraphs: DataGraph using Graphs: degree, is_tree using ITensors: flux, has_fermion_string, itensor, ops, removeqns, space, truncate!, val using ITensors.LazyApply: Prod, Sum, coefficient @@ -32,7 +33,7 @@ end function determine_coefficient_type(terms) isempty(terms) && return Float64 - if all(t->isreal(coefficient(t)),terms) + if all(t -> isreal(coefficient(t)), terms) return real(typeof(coefficient(first(terms)))) end return typeof(coefficient(first(terms))) @@ -50,7 +51,12 @@ function ttn_svd(os::OpSum, sites::IndsNetwork, root_vertex; kwargs...) return ttn_svd(coefficient_type, os, sites, root_vertex; kwargs...) end -function remove_dups!(v::Vector) +# TODO: should be equivalent to `sort!(v); unique!(v)` however, +# Base.unique! is giving incorrect results for data involving the +# Prod type from LazyApply. This could be a combination of Base.unique! +# not strictly relying on isequal combined with an incorrect +# implementation of isequal/isless for Prod. +function _sort_unique!(v::Vector) N = length(v) (N == 0) && return nothing sort!(v) @@ -81,12 +87,19 @@ function pos_in_link!(linkmap::Dict, k) end function make_symbolic_ttn( - opsum::OpSum, sites::IndsNetwork, root_vertex, coefficient_type, calc_qn; - ordered_verts, ordered_edges + coefficient_type, + opsum::OpSum, + sites::IndsNetwork; + ordered_verts, + ordered_edges, + root_vertex, + term_qn_map, ) inmaps = Dict{Pair{edgetype(sites),QN},Dict{Vector{Op},Int}}() outmaps = Dict{Pair{edgetype(sites),QN},Dict{Vector{Op},Int}}() + #g = underlying_graph(sites) + # Bond coefficients for incoming edge channels. # These become the "M" coefficient matrices that get SVD'd. inbond_coefs = Dict( @@ -120,7 +133,9 @@ function make_symbolic_ttn( # that lies in the edge_path(w,v) # TODO: better way to make which_incident_edge below? subgraphs = split_at_vertex(sites, v) - _boundary_edges = [only(boundary_edges(underlying_graph(sites), subgraph)) for subgraph in subgraphs] + _boundary_edges = [ + only(boundary_edges(underlying_graph(sites), subgraph)) for subgraph in subgraphs + ] _boundary_edges = align_edges(_boundary_edges, edges) which_incident_edge = Dict( Iterators.flatten([ @@ -149,14 +164,11 @@ function make_symbolic_ttn( non_onsite_ops = setdiff(ops, onsite_ops) # Filter out ops that come in from the direction of the incoming edge - incoming_ops = filter( - t -> which_incident_edge[site(t)] == edge_in, non_onsite_ops - ) + incoming_ops = filter(t -> which_incident_edge[site(t)] == edge_in, non_onsite_ops) # Also store all non-incoming ops in standard order, used for channel merging non_incoming_ops = filter( - t -> (site(t) == v) || which_incident_edge[site(t)] != edge_in, - ops, + t -> (site(t) == v) || which_incident_edge[site(t)] != edge_in, ops ) # For every outgoing edge, filter out ops that go out along that edge @@ -166,10 +178,10 @@ function make_symbolic_ttn( ) # Compute QNs - incoming_qn = calc_qn(incoming_ops) - non_incoming_qn = calc_qn(non_incoming_ops) - outgoing_qns = Dict(e => calc_qn(outgoing_ops[e]) for e in edges_out) - site_qn = calc_qn(onsite_ops) + incoming_qn = term_qn_map(incoming_ops) + non_incoming_qn = term_qn_map(non_incoming_ops) + outgoing_qns = Dict(e => term_qn_map(outgoing_ops[e]) for e in edges_out) + site_qn = term_qn_map(onsite_ops) # Initialize QNArrayElement indices and quantum numbers T_inds = MVector{v_degree}(fill(-1, v_degree)) @@ -204,7 +216,7 @@ function make_symbolic_ttn( # If term starts at this site, add its coefficient as a site factor site_coef = one(coefficient_type) if (isnothing(dim_in) || T_inds[dim_in] == -1) && argument(term) ∉ site_coef_done - site_coef = convert(coefficient_type,coefficient(term)) + site_coef = convert(coefficient_type, coefficient(term)) push!(site_coef_done, argument(term)) end # Add onsite identity for interactions passing through vertex @@ -219,7 +231,7 @@ function make_symbolic_ttn( el = QNArrElem(T_qns, T_inds, site_coef * Prod(onsite_ops)) push!(symbolic_ttn[v], el) end - remove_dups!(symbolic_ttn[v]) + _sort_unique!(symbolic_ttn[v]) end return symbolic_ttn, inbond_coefs @@ -265,10 +277,9 @@ function compress_ttn( linkdir_ref = ITensors.In # safe to always use autofermion default here # Compress this symbolic_ttn representation into dense form - edgetype_sites = edgetype(sites) thishasqns = any(v -> hasqns(sites[v]), vertices(sites)) - link_space = Dict{edgetype_sites,Index}() + link_space = Dict{edgetype(sites),Index}() for e in ordered_edges operator_blocks = [q => size(Vq, 2) for (q, Vq) in Vs[e]] link_space[e] = Index( @@ -428,18 +439,18 @@ function compress_ttn( end # -# TermQN implements a function with an internal cache. +# TermQNMap implements a function with an internal cache. # Calling it on a term determines that term's flux # but tries not to rebuild the corresponding ITensor. # (Previously it was the function calc_qn.) # -struct TermQN{V} +struct TermQNMap{V} sites::IndsNetwork op_cache::Dict{Pair{String,V},ITensor} - TermQN{V}(s) where {V} = new{V}(s, Dict{Pair{String,V},ITensor}()) + TermQNMap{V}(s) where {V} = new{V}(s, Dict{Pair{String,V},ITensor}()) end -function (t::TermQN)(term) +function (t::TermQNMap)(term) q = QN() for st in term op_tensor = get(t.op_cache, which_op(st) => site(st), nothing) @@ -455,23 +466,30 @@ function (t::TermQN)(term) end function ttn_svd( - coefficient_type::Type{<:Number}, os::OpSum, sites::IndsNetwork, root_vertex; - kws... + coefficient_type::Type{<:Number}, os::OpSum, sites::IndsNetwork, root_vertex; kws... ) - calc_qn = TermQN{vertextype(sites)}(sites) + term_qn_map = TermQNMap{vertextype(sites)}(sites) # Traverse tree outwards from root vertex ordered_verts = _default_vertex_ordering(sites, root_vertex) # Store edges in fixed ordering relative to root ordered_edges = _default_edge_ordering(sites, root_vertex) - symbolic_ttn, inbond_coefs = make_symbolic_ttn(os, sites, root_vertex, coefficient_type, calc_qn; ordered_verts, ordered_edges) + symbolic_ttn, inbond_coefs = make_symbolic_ttn( + coefficient_type, + os, + sites; + ordered_verts, + ordered_edges, + root_vertex, + term_qn_map, + ) Vs = svd_bond_coefs( coefficient_type, sites, inbond_coefs; ordered_verts, ordered_edges, kws... ) - Hflux = -calc_qn(terms(first(terms(os)))) + Hflux = -term_qn_map(terms(first(terms(os)))) T = compress_ttn( coefficient_type, sites, Hflux, symbolic_ttn, Vs; ordered_verts, ordered_edges From e38712be8527ac135b63af73b868b5624016b3e4 Mon Sep 17 00:00:00 2001 From: Miles Date: Mon, 6 May 2024 18:47:02 -0400 Subject: [PATCH 14/17] Move OpSum to TTN code to opsum_to_ttn folder --- src/ITensorNetworks.jl | 6 +++--- src/treetensornetworks/{ => opsum_to_ttn}/matelem.jl | 0 src/treetensornetworks/{ => opsum_to_ttn}/opsum_to_ttn.jl | 0 src/treetensornetworks/{ => opsum_to_ttn}/qnarrelem.jl | 0 4 files changed, 3 insertions(+), 3 deletions(-) rename src/treetensornetworks/{ => opsum_to_ttn}/matelem.jl (100%) rename src/treetensornetworks/{ => opsum_to_ttn}/opsum_to_ttn.jl (100%) rename src/treetensornetworks/{ => opsum_to_ttn}/qnarrelem.jl (100%) diff --git a/src/ITensorNetworks.jl b/src/ITensorNetworks.jl index e6d95931..28da183a 100644 --- a/src/ITensorNetworks.jl +++ b/src/ITensorNetworks.jl @@ -40,9 +40,9 @@ include("solvers/local_solvers/contract.jl") include("solvers/local_solvers/linsolve.jl") include("treetensornetworks/abstracttreetensornetwork.jl") include("treetensornetworks/treetensornetwork.jl") -include("treetensornetworks/matelem.jl") -include("treetensornetworks/qnarrelem.jl") -include("treetensornetworks/opsum_to_ttn.jl") +include("treetensornetworks/opsum_to_ttn/matelem.jl") +include("treetensornetworks/opsum_to_ttn/qnarrelem.jl") +include("treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl") include("treetensornetworks/projttns/abstractprojttn.jl") include("treetensornetworks/projttns/projttn.jl") include("treetensornetworks/projttns/projttnsum.jl") diff --git a/src/treetensornetworks/matelem.jl b/src/treetensornetworks/opsum_to_ttn/matelem.jl similarity index 100% rename from src/treetensornetworks/matelem.jl rename to src/treetensornetworks/opsum_to_ttn/matelem.jl diff --git a/src/treetensornetworks/opsum_to_ttn.jl b/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl similarity index 100% rename from src/treetensornetworks/opsum_to_ttn.jl rename to src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl diff --git a/src/treetensornetworks/qnarrelem.jl b/src/treetensornetworks/opsum_to_ttn/qnarrelem.jl similarity index 100% rename from src/treetensornetworks/qnarrelem.jl rename to src/treetensornetworks/opsum_to_ttn/qnarrelem.jl From 74109a472e9f6f5601b02a7c822dc471b87eee90 Mon Sep 17 00:00:00 2001 From: Miles Date: Thu, 9 May 2024 21:10:12 -0400 Subject: [PATCH 15/17] Formatting --- src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl b/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl index 3355c784..920979cb 100644 --- a/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl +++ b/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl @@ -476,13 +476,7 @@ function ttn_svd( ordered_edges = _default_edge_ordering(sites, root_vertex) symbolic_ttn, inbond_coefs = make_symbolic_ttn( - coefficient_type, - os, - sites; - ordered_verts, - ordered_edges, - root_vertex, - term_qn_map, + coefficient_type, os, sites; ordered_verts, ordered_edges, root_vertex, term_qn_map ) Vs = svd_bond_coefs( From a5aff49e688d7c2a4f0948a4efdf15cda90469e6 Mon Sep 17 00:00:00 2001 From: Miles Date: Thu, 9 May 2024 21:40:31 -0400 Subject: [PATCH 16/17] Remove an unneeded dictionary --- .../opsum_to_ttn/opsum_to_ttn.jl | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl b/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl index 920979cb..a21eb522 100644 --- a/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl +++ b/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl @@ -129,8 +129,8 @@ function make_symbolic_ttn( dims_out = findall(e -> src(e) == v, edges) edges_out = edges[dims_out] - # For every site w except v, determine the incident edge to v - # that lies in the edge_path(w,v) + # For every site v' except v, determine the incident edge to v + # that lies in the edge_path(v',v) # TODO: better way to make which_incident_edge below? subgraphs = split_at_vertex(sites, v) _boundary_edges = [ @@ -180,7 +180,6 @@ function make_symbolic_ttn( # Compute QNs incoming_qn = term_qn_map(incoming_ops) non_incoming_qn = term_qn_map(non_incoming_ops) - outgoing_qns = Dict(e => term_qn_map(outgoing_ops[e]) for e in edges_out) site_qn = term_qn_map(onsite_ops) # Initialize QNArrayElement indices and quantum numbers @@ -206,12 +205,12 @@ function make_symbolic_ttn( T_qns[dim_in] = -incoming_qn end for dout in dims_out - coutmap = get!( - outmaps, edges[dout] => outgoing_qns[edges[dout]], Dict{Vector{Op},Int}() - ) + out_edge = edges[dout] + out_op = outgoing_ops[out_edge] + coutmap = get!(outmaps, out_edge => term_qn_map(out_op), Dict{Vector{Op},Int}()) # Add outgoing channel - T_inds[dout] = pos_in_link!(coutmap, outgoing_ops[edges[dout]]) - T_qns[dout] = outgoing_qns[edges[dout]] + T_inds[dout] = pos_in_link!(coutmap, out_op) + T_qns[dout] = term_qn_map(out_op) end # If term starts at this site, add its coefficient as a site factor site_coef = one(coefficient_type) From 6c78f89bfb3ec0bd08c407ffbc23efd774cc0de8 Mon Sep 17 00:00:00 2001 From: Miles Date: Thu, 9 May 2024 21:40:40 -0400 Subject: [PATCH 17/17] Fix linkdims using issue --- .../test_solvers/test_dmrg.jl | 14 ++------------ .../test_solvers/test_dmrg_x.jl | 3 ++- 2 files changed, 4 insertions(+), 13 deletions(-) diff --git a/test/test_treetensornetworks/test_solvers/test_dmrg.jl b/test/test_treetensornetworks/test_solvers/test_dmrg.jl index 2e9f3f6d..d4ca53f6 100644 --- a/test/test_treetensornetworks/test_solvers/test_dmrg.jl +++ b/test/test_treetensornetworks/test_solvers/test_dmrg.jl @@ -3,19 +3,9 @@ using DataGraphs: edge_data, vertex_data using Dictionaries: Dictionary using Graphs: nv, vertices using ITensors: ITensors -using ITensors.ITensorMPS: ITensorMPS +using ITensors.ITensorMPS: ITensorMPS, linkdims using ITensorNetworks: - ITensorNetworks, - OpSum, - ttn, - apply, - dmrg, - inner, - linkdims, - mpo, - random_mps, - random_ttn, - siteinds + ITensorNetworks, OpSum, ttn, apply, dmrg, inner, mpo, random_mps, random_ttn, siteinds using ITensorNetworks.ITensorsExtensions: replace_vertices using ITensorNetworks.ModelHamiltonians: ModelHamiltonians using KrylovKit: eigsolve diff --git a/test/test_treetensornetworks/test_solvers/test_dmrg_x.jl b/test/test_treetensornetworks/test_solvers/test_dmrg_x.jl index c684fc36..467d116c 100644 --- a/test/test_treetensornetworks/test_solvers/test_dmrg_x.jl +++ b/test/test_treetensornetworks/test_solvers/test_dmrg_x.jl @@ -1,8 +1,9 @@ @eval module $(gensym()) using Dictionaries: Dictionary using Graphs: nv, vertices +using ITensors.ITensorMPS: linkdims using ITensorNetworks: - OpSum, ttn, apply, contract, dmrg_x, inner, linkdims, mpo, mps, random_mps, siteinds + OpSum, ttn, apply, contract, dmrg_x, inner, mpo, mps, random_mps, siteinds using ITensorNetworks.ModelHamiltonians: ModelHamiltonians using ITensors: @disable_warn_order, array, dag, onehot, uniqueind using LinearAlgebra: eigen, normalize