Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 4 additions & 14 deletions ext/TensorKitCUDAExt/cutensormap.jl
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,9 @@ const AdjointCuTensorMap{T, S, N₁, N₂} = AdjointTensorMap{T, S, N₁, N₂,
function CuTensorMap(t::TensorMap{T, S, N₁, N₂, A}) where {T, S, N₁, N₂, A}
return CuTensorMap{T, S, N₁, N₂}(CuArray{T}(t.data), space(t))
end
function TensorMap{T, S, N₁, N₂, DA}(t::TensorMap{T, S, N₁, N₂, HA}) where {T, S, N₁, N₂, DA <: CuArray{T}, HA <: Array{T}}
return CuTensorMap{T, S, N₁, N₂}(CuArray{T}(t.data), space(t))
end

# project_symmetric! doesn't yet work for GPU types, so do this on the host, then copy
function TensorKit.project_symmetric_and_check(::Type{T}, ::Type{A}, data::AbstractArray, V::TensorMapSpace; tol = sqrt(eps(real(float(eltype(data)))))) where {T, A <: CuVector{T}}
Expand Down Expand Up @@ -101,18 +104,6 @@ function TensorKit.scalar(t::CuTensorMap{T, S, 0, 0}) where {T, S}
return isempty(inds) ? zero(scalartype(t)) : @allowscalar @inbounds t.data[only(inds)]
end

function Base.convert(
TT::Type{CuTensorMap{T, S, N₁, N₂}},
t::AbstractTensorMap{<:Any, S, N₁, N₂}
) where {T, S, N₁, N₂}
if typeof(t) === TT
return t
else
tnew = TT(undef, space(t))
return copy!(tnew, t)
end
end

function LinearAlgebra.isposdef(t::CuTensorMap)
domain(t) == codomain(t) ||
throw(SpaceMismatch("`isposdef` requires domain and codomain to be the same"))
Expand All @@ -138,10 +129,9 @@ function Base.promote_rule(
return CuTensorMap{T, S, N₁, N₂}
end

TensorKit.promote_storage_rule(::Type{CuArray{T, N}}, ::Type{<:CuArray{T, N}}) where {T, N} =
TensorKit.promote_storage_rule(::Type{<:CuArray{T, N}}, ::Type{<:CuArray{T, N}}) where {T, N} =
CuArray{T, N, CUDA.default_memory}


# CuTensorMap exponentation:
function TensorKit.exp!(t::CuTensorMap)
domain(t) == codomain(t) ||
Expand Down
16 changes: 11 additions & 5 deletions src/tensors/abstracttensor.jl
Original file line number Diff line number Diff line change
Expand Up @@ -53,9 +53,7 @@ storagetype(t) = storagetype(typeof(t))
function storagetype(::Type{T}) where {T <: AbstractTensorMap}
if T isa Union
# attempt to be slightly more specific by promoting unions
Ma = storagetype(T.a)
Mb = storagetype(T.b)
return promote_storagetype(Ma, Mb)
return promote_storagetype(T.a, T.b)
else
# fallback definition by using scalartype
return similarstoragetype(scalartype(T))
Expand Down Expand Up @@ -103,11 +101,19 @@ similarstoragetype(X::Type, ::Type{T}) where {T <: Number} =

# implement on tensors
similarstoragetype(::Type{TT}) where {TT <: AbstractTensorMap} = similarstoragetype(storagetype(TT))
similarstoragetype(::Type{TT}, ::Type{T}) where {TT <: AbstractTensorMap, T <: Number} =
similarstoragetype(storagetype(TT), T)
function similarstoragetype(::Type{TT}, ::Type{T}) where {TT <: AbstractTensorMap, T <: Number}
return similarstoragetype(storagetype(TT), T)
end
Comment thread
kshyatt marked this conversation as resolved.
function similarstoragetype(::Type{<:AbstractTensorMap{T, S, N₁, N₂}}, ::Type{TA}) where {T <: Number, TA <: DenseVector, S, N₁, N₂}
return similarstoragetype(TA, T)
end
function similarstoragetype(t::AbstractTensorMap{T, S, N₁, N₂}, ::Type{TA}) where {T <: Number, TA <: DenseVector, S, N₁, N₂}
return similarstoragetype(typeof(t), TA)
end

# implement on arrays
similarstoragetype(::Type{A}) where {A <: DenseVector{<:Number}} = A
similarstoragetype(::Type{A}, ::Type{A}) where {A <: DenseVector{<:Number}} = A
Comment on lines +107 to +116
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Are we using these somewhere? I'm slightly confused about the meaning of these functions with last argument A <: DenseVector. In general, I thought this function mostly meant: for given tensor T1 (and possibly T2), create a new storagetype that is compatible with T1 but has scalartype T. In the case where we pass in a storagetype to begin with, doesn't that mean we already know that the answer has to be A?.

Base.@assume_effects :foldable similarstoragetype(::Type{A}) where {A <: AbstractArray{<:Number}} =
Core.Compiler.return_type(similar, Tuple{A, Int})
Base.@assume_effects :foldable similarstoragetype(::Type{A}, ::Type{T}) where {A <: AbstractArray, T <: Number} =
Expand Down
2 changes: 2 additions & 0 deletions src/tensors/adjoint.jl
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,8 @@ Base.adjoint(t::AbstractTensorMap) = AdjointTensorMap(t)
space(t::AdjointTensorMap) = adjoint(space(parent(t)))
dim(t::AdjointTensorMap) = dim(parent(t))
storagetype(::Type{AdjointTensorMap{T, S, N₁, N₂, TT}}) where {T, S, N₁, N₂, TT} = storagetype(TT)
similarstoragetype(::AdjointTensorMap{T, S, N₁, N₂, TT}, ::Type{T′}) where {T, S, N₁, N₂, TT, T′ <: Number} = similarstoragetype(TT, T′)
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Isn't this also automatic from the definition of similarstoragetype(t::AbstractTensorMap, T) = similarstoragetype(storagetype(t), T) along with storagetype the line above?

similarstoragetype(::AdjointTensorMap{T, S, N₁, N₂, TT}, ::Type{TA}) where {T, S, N₁, N₂, TT, TA <: DenseVector} = similarstoragetype(TT, TA)
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Same comment here about why the second argument would already be a <:DenseVector.


# Blocks and subblocks
#----------------------
Expand Down
21 changes: 14 additions & 7 deletions src/tensors/braidingtensor.jl
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I guess the changes here are no longer necessary since BraidingTensor now knows its own storagetype?

Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm checking this now with MPSKit!

Original file line number Diff line number Diff line change
Expand Up @@ -189,12 +189,15 @@ end
has_shared_permute(t::BraidingTensor, ::Index2Tuple) = false
function add_transform!(
tdst::AbstractTensorMap,
tsrc::BraidingTensor, (p₁, p₂)::Index2Tuple,
tsrc::BraidingTensor{T, S},
(p₁, p₂)::Index2Tuple,
fusiontreetransform,
α::Number, β::Number, backend::AbstractBackend...
)
) where {T, S}
tsrc_map = similar(tdst, storagetype(tdst), space(tsrc))
copy!(tsrc_map, tsrc)
return add_transform!(
tdst, TensorMap(tsrc), (p₁, p₂), fusiontreetransform, α, β,
tdst, tsrc_map, (p₁, p₂), fusiontreetransform, α, β,
backend...
)
end
Expand Down Expand Up @@ -294,11 +297,15 @@ function planarcontract!(
backend, allocator
)
# special case only defined for contracting 2 indices
length(oindB) == length(cindB) == 2 ||
if !(length(oindB) == length(cindB) == 2)
# horrible!!!!!
tB′ = TensorMap(B)
tB = TensorMapWithStorage{eltype(B), similarstoragetype(A, eltype(B)), spacetype(tB′), numout(tB′), numin(tB′)}(tB′)
return planarcontract!(
C, A, (oindA, cindA), TensorMap(B), (cindB, oindB), (p1, p2),
α, β, backend, allocator
)
C, A, (oindA, cindA), tB, (cindB, oindB), (p1, p2),
α, β, backend, allocator
)
end

codA, domA = codomainind(A), domainind(A)
codB, domB = codomainind(B), domainind(B)
Expand Down
2 changes: 1 addition & 1 deletion src/tensors/diagonal.jl
Original file line number Diff line number Diff line change
Expand Up @@ -280,7 +280,7 @@ end
# ----------------
function TO.tensoradd_type(TC, A::DiagonalTensorMap, ::Index2Tuple{1, 1}, ::Bool)
M = similarstoragetype(A, TC)
return DiagonalTensorMap{TC, spacetype(A), M}
return DiagonalTensorMap{scalartype(M), spacetype(A), M}
end

function TO.tensorcontract_type(
Expand Down
12 changes: 7 additions & 5 deletions src/tensors/indexmanipulations.jl
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@ for (operation, manipulation) in (
$promote_op(::Type{T}, ::Type{I}) where {T <: Number, I <: Sector} =
sectorscalartype(I) <: Integer ? T :
sectorscalartype(I) <: Real ? float(T) : complex(T)
$promote_op(::Type{TA}, ::Type{I}) where {TA <: DenseVector, I <: Sector} =
similarstoragetype(TA, $promote_op(eltype(TA), I))
Comment on lines +20 to +21
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is also not really used I guess?

# TODO: currently the manipulations all use sectorscalartype, change to:
# $manipulation_scalartype(I) <: Integer ? T :
# $manipulation_scalartype(I) <: Real ? float(T) : complex(T)
Expand Down Expand Up @@ -342,11 +344,11 @@ See also [`insertrightunit`](@ref insertrightunit(::AbstractTensorMap, ::Val{i})
"""
function insertleftunit(
t::AbstractTensorMap, ::Val{i} = Val(numind(t) + 1);
copy::Bool = false, conj::Bool = false, dual::Bool = false
copy::Bool = false, conj::Bool = false, dual::Bool = false,
) where {i}
W = insertleftunit(space(t), Val(i); conj, dual)
if t isa TensorMap
return TensorMap{scalartype(t)}(copy ? Base.copy(t.data) : t.data, W)
return TensorMapWithStorage{scalartype(t), storagetype(t)}(copy ? Base.copy(t.data) : t.data, W)
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This doesn't look necessary, isn't the storagetype automatically deduced from the provided data vector?

else
tdst = similar(t, W)
for (c, b) in blocks(t)
Expand All @@ -371,11 +373,11 @@ See also [`insertleftunit`](@ref insertleftunit(::AbstractTensorMap, ::Val{i}) w
"""
function insertrightunit(
t::AbstractTensorMap, ::Val{i} = Val(numind(t));
copy::Bool = false, conj::Bool = false, dual::Bool = false
copy::Bool = false, conj::Bool = false, dual::Bool = false,
) where {i}
W = insertrightunit(space(t), Val(i); conj, dual)
if t isa TensorMap
return TensorMap{scalartype(t)}(copy ? Base.copy(t.data) : t.data, W)
return TensorMapWithStorage{scalartype(t), storagetype(t)}(copy ? Base.copy(t.data) : t.data, W)
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Same comment here

else
tdst = similar(t, W)
for (c, b) in blocks(t)
Expand All @@ -400,7 +402,7 @@ and [`insertrightunit`](@ref insertrightunit(::AbstractTensorMap, ::Val{i}) wher
function removeunit(t::AbstractTensorMap, ::Val{i}; copy::Bool = false) where {i}
W = removeunit(space(t), Val(i))
if t isa TensorMap
return TensorMap{scalartype(t)}(copy ? Base.copy(t.data) : t.data, W)
return TensorMapWithStorage{scalartype(t), storagetype(t)}(copy ? Base.copy(t.data) : t.data, W)
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Same comment here

else
tdst = similar(t, W)
for (c, b) in blocks(t)
Expand Down
3 changes: 2 additions & 1 deletion src/tensors/tensor.jl
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ struct TensorMap{T, S <: IndexSpace, N₁, N₂, A <: DenseVector{T}} <: Abstrac
end
return TensorMap{T, S, N₁, N₂, A}(data, space)
end

# constructors from data
function TensorMap{T, S, N₁, N₂, A}(
data::A, space::TensorMapSpace{S, N₁, N₂}
Expand All @@ -34,6 +33,8 @@ struct TensorMap{T, S <: IndexSpace, N₁, N₂, A <: DenseVector{T}} <: Abstrac
return new{T, S, N₁, N₂, A}(data, space)
end
end
# constructors from another TensorMap -- no-op
TensorMap{T, S, N₁, N₂, A}(t::TensorMap{T, S, N₁, N₂, A}) where {T, S <: IndexSpace, N₁, N₂, A <: DenseVector{T}} = t

"""
Tensor{T, S, N, A<:DenseVector{T}} = TensorMap{T, S, N, 0, A}
Expand Down
Loading
Loading