From 3c5a575db7d5e0e377577928b4832d3e022e80a8 Mon Sep 17 00:00:00 2001 From: Katharine Hyatt Date: Wed, 18 Feb 2026 07:35:49 -0500 Subject: [PATCH 1/2] More tweaks for GPU support --- ext/TensorKitCUDAExt/cutensormap.jl | 18 +--- src/tensors/abstracttensor.jl | 16 ++-- src/tensors/adjoint.jl | 2 + src/tensors/braidingtensor.jl | 21 +++-- src/tensors/diagonal.jl | 2 +- src/tensors/indexmanipulations.jl | 12 +-- src/tensors/tensor.jl | 3 +- test/cuda/tensors.jl | 128 +++++++++++++--------------- 8 files changed, 101 insertions(+), 101 deletions(-) diff --git a/ext/TensorKitCUDAExt/cutensormap.jl b/ext/TensorKitCUDAExt/cutensormap.jl index 2fefb3a24..a1d2d1b66 100644 --- a/ext/TensorKitCUDAExt/cutensormap.jl +++ b/ext/TensorKitCUDAExt/cutensormap.jl @@ -6,6 +6,9 @@ const AdjointCuTensorMap{T, S, N₁, N₂} = AdjointTensorMap{T, S, N₁, N₂, function CuTensorMap(t::TensorMap{T, S, N₁, N₂, A}) where {T, S, N₁, N₂, A} return CuTensorMap{T, S, N₁, N₂}(CuArray{T}(t.data), space(t)) end +function TensorMap{T, S, N₁, N₂, DA}(t::TensorMap{T, S, N₁, N₂, HA}) where {T, S, N₁, N₂, DA <: CuArray{T}, HA <: Array{T}} + return CuTensorMap{T, S, N₁, N₂}(CuArray{T}(t.data), space(t)) +end # project_symmetric! doesn't yet work for GPU types, so do this on the host, then copy function TensorKit.project_symmetric_and_check(::Type{T}, ::Type{A}, data::AbstractArray, V::TensorMapSpace; tol = sqrt(eps(real(float(eltype(data)))))) where {T, A <: CuVector{T}} @@ -101,18 +104,6 @@ function TensorKit.scalar(t::CuTensorMap{T, S, 0, 0}) where {T, S} return isempty(inds) ? zero(scalartype(t)) : @allowscalar @inbounds t.data[only(inds)] end -function Base.convert( - TT::Type{CuTensorMap{T, S, N₁, N₂}}, - t::AbstractTensorMap{<:Any, S, N₁, N₂} - ) where {T, S, N₁, N₂} - if typeof(t) === TT - return t - else - tnew = TT(undef, space(t)) - return copy!(tnew, t) - end -end - function LinearAlgebra.isposdef(t::CuTensorMap) domain(t) == codomain(t) || throw(SpaceMismatch("`isposdef` requires domain and codomain to be the same")) @@ -138,10 +129,9 @@ function Base.promote_rule( return CuTensorMap{T, S, N₁, N₂} end -TensorKit.promote_storage_rule(::Type{CuArray{T, N}}, ::Type{<:CuArray{T, N}}) where {T, N} = +TensorKit.promote_storage_rule(::Type{<:CuArray{T, N}}, ::Type{<:CuArray{T, N}}) where {T, N} = CuArray{T, N, CUDA.default_memory} - # CuTensorMap exponentation: function TensorKit.exp!(t::CuTensorMap) domain(t) == codomain(t) || diff --git a/src/tensors/abstracttensor.jl b/src/tensors/abstracttensor.jl index 48bfb0dd7..774812312 100644 --- a/src/tensors/abstracttensor.jl +++ b/src/tensors/abstracttensor.jl @@ -53,9 +53,7 @@ storagetype(t) = storagetype(typeof(t)) function storagetype(::Type{T}) where {T <: AbstractTensorMap} if T isa Union # attempt to be slightly more specific by promoting unions - Ma = storagetype(T.a) - Mb = storagetype(T.b) - return promote_storagetype(Ma, Mb) + return promote_storagetype(T.a, T.b) else # fallback definition by using scalartype return similarstoragetype(scalartype(T)) @@ -103,11 +101,19 @@ similarstoragetype(X::Type, ::Type{T}) where {T <: Number} = # implement on tensors similarstoragetype(::Type{TT}) where {TT <: AbstractTensorMap} = similarstoragetype(storagetype(TT)) -similarstoragetype(::Type{TT}, ::Type{T}) where {TT <: AbstractTensorMap, T <: Number} = - similarstoragetype(storagetype(TT), T) +function similarstoragetype(::Type{TT}, ::Type{T}) where {TT <: AbstractTensorMap, T <: Number} + return similarstoragetype(storagetype(TT), T) +end +function similarstoragetype(::Type{<:AbstractTensorMap{T, S, N₁, N₂}}, ::Type{TA}) where {T <: Number, TA <: DenseVector, S, N₁, N₂} + return similarstoragetype(TA, T) +end +function similarstoragetype(t::AbstractTensorMap{T, S, N₁, N₂}, ::Type{TA}) where {T <: Number, TA <: DenseVector, S, N₁, N₂} + return similarstoragetype(typeof(t), TA) +end # implement on arrays similarstoragetype(::Type{A}) where {A <: DenseVector{<:Number}} = A +similarstoragetype(::Type{A}, ::Type{A}) where {A <: DenseVector{<:Number}} = A Base.@assume_effects :foldable similarstoragetype(::Type{A}) where {A <: AbstractArray{<:Number}} = Core.Compiler.return_type(similar, Tuple{A, Int}) Base.@assume_effects :foldable similarstoragetype(::Type{A}, ::Type{T}) where {A <: AbstractArray, T <: Number} = diff --git a/src/tensors/adjoint.jl b/src/tensors/adjoint.jl index dfc1a4471..382f309b5 100644 --- a/src/tensors/adjoint.jl +++ b/src/tensors/adjoint.jl @@ -22,6 +22,8 @@ Base.adjoint(t::AbstractTensorMap) = AdjointTensorMap(t) space(t::AdjointTensorMap) = adjoint(space(parent(t))) dim(t::AdjointTensorMap) = dim(parent(t)) storagetype(::Type{AdjointTensorMap{T, S, N₁, N₂, TT}}) where {T, S, N₁, N₂, TT} = storagetype(TT) +similarstoragetype(::AdjointTensorMap{T, S, N₁, N₂, TT}, ::Type{T′}) where {T, S, N₁, N₂, TT, T′ <: Number} = similarstoragetype(TT, T′) +similarstoragetype(::AdjointTensorMap{T, S, N₁, N₂, TT}, ::Type{TA}) where {T, S, N₁, N₂, TT, TA <: DenseVector} = similarstoragetype(TT, TA) # Blocks and subblocks #---------------------- diff --git a/src/tensors/braidingtensor.jl b/src/tensors/braidingtensor.jl index f08dd8181..e0683c078 100644 --- a/src/tensors/braidingtensor.jl +++ b/src/tensors/braidingtensor.jl @@ -189,12 +189,15 @@ end has_shared_permute(t::BraidingTensor, ::Index2Tuple) = false function add_transform!( tdst::AbstractTensorMap, - tsrc::BraidingTensor, (p₁, p₂)::Index2Tuple, + tsrc::BraidingTensor{T, S}, + (p₁, p₂)::Index2Tuple, fusiontreetransform, α::Number, β::Number, backend::AbstractBackend... - ) + ) where {T, S} + tsrc_map = similar(tdst, storagetype(tdst), space(tsrc)) + copy!(tsrc_map, tsrc) return add_transform!( - tdst, TensorMap(tsrc), (p₁, p₂), fusiontreetransform, α, β, + tdst, tsrc_map, (p₁, p₂), fusiontreetransform, α, β, backend... ) end @@ -294,11 +297,15 @@ function planarcontract!( backend, allocator ) # special case only defined for contracting 2 indices - length(oindB) == length(cindB) == 2 || + if !(length(oindB) == length(cindB) == 2) + # horrible!!!!! + tB′ = TensorMap(B) + tB = TensorMapWithStorage{eltype(B), similarstoragetype(A, eltype(B)), spacetype(tB′), numout(tB′), numin(tB′)}(tB′) return planarcontract!( - C, A, (oindA, cindA), TensorMap(B), (cindB, oindB), (p1, p2), - α, β, backend, allocator - ) + C, A, (oindA, cindA), tB, (cindB, oindB), (p1, p2), + α, β, backend, allocator + ) + end codA, domA = codomainind(A), domainind(A) codB, domB = codomainind(B), domainind(B) diff --git a/src/tensors/diagonal.jl b/src/tensors/diagonal.jl index b2ac4134b..e73ad2787 100644 --- a/src/tensors/diagonal.jl +++ b/src/tensors/diagonal.jl @@ -280,7 +280,7 @@ end # ---------------- function TO.tensoradd_type(TC, A::DiagonalTensorMap, ::Index2Tuple{1, 1}, ::Bool) M = similarstoragetype(A, TC) - return DiagonalTensorMap{TC, spacetype(A), M} + return DiagonalTensorMap{scalartype(M), spacetype(A), M} end function TO.tensorcontract_type( diff --git a/src/tensors/indexmanipulations.jl b/src/tensors/indexmanipulations.jl index 3108abb17..e45789b44 100644 --- a/src/tensors/indexmanipulations.jl +++ b/src/tensors/indexmanipulations.jl @@ -17,6 +17,8 @@ for (operation, manipulation) in ( $promote_op(::Type{T}, ::Type{I}) where {T <: Number, I <: Sector} = sectorscalartype(I) <: Integer ? T : sectorscalartype(I) <: Real ? float(T) : complex(T) + $promote_op(::Type{TA}, ::Type{I}) where {TA <: DenseVector, I <: Sector} = + similarstoragetype(TA, $promote_op(eltype(TA), I)) # TODO: currently the manipulations all use sectorscalartype, change to: # $manipulation_scalartype(I) <: Integer ? T : # $manipulation_scalartype(I) <: Real ? float(T) : complex(T) @@ -342,11 +344,11 @@ See also [`insertrightunit`](@ref insertrightunit(::AbstractTensorMap, ::Val{i}) """ function insertleftunit( t::AbstractTensorMap, ::Val{i} = Val(numind(t) + 1); - copy::Bool = false, conj::Bool = false, dual::Bool = false + copy::Bool = false, conj::Bool = false, dual::Bool = false, ) where {i} W = insertleftunit(space(t), Val(i); conj, dual) if t isa TensorMap - return TensorMap{scalartype(t)}(copy ? Base.copy(t.data) : t.data, W) + return TensorMapWithStorage{scalartype(t), storagetype(t)}(copy ? Base.copy(t.data) : t.data, W) else tdst = similar(t, W) for (c, b) in blocks(t) @@ -371,11 +373,11 @@ See also [`insertleftunit`](@ref insertleftunit(::AbstractTensorMap, ::Val{i}) w """ function insertrightunit( t::AbstractTensorMap, ::Val{i} = Val(numind(t)); - copy::Bool = false, conj::Bool = false, dual::Bool = false + copy::Bool = false, conj::Bool = false, dual::Bool = false, ) where {i} W = insertrightunit(space(t), Val(i); conj, dual) if t isa TensorMap - return TensorMap{scalartype(t)}(copy ? Base.copy(t.data) : t.data, W) + return TensorMapWithStorage{scalartype(t), storagetype(t)}(copy ? Base.copy(t.data) : t.data, W) else tdst = similar(t, W) for (c, b) in blocks(t) @@ -400,7 +402,7 @@ and [`insertrightunit`](@ref insertrightunit(::AbstractTensorMap, ::Val{i}) wher function removeunit(t::AbstractTensorMap, ::Val{i}; copy::Bool = false) where {i} W = removeunit(space(t), Val(i)) if t isa TensorMap - return TensorMap{scalartype(t)}(copy ? Base.copy(t.data) : t.data, W) + return TensorMapWithStorage{scalartype(t), storagetype(t)}(copy ? Base.copy(t.data) : t.data, W) else tdst = similar(t, W) for (c, b) in blocks(t) diff --git a/src/tensors/tensor.jl b/src/tensors/tensor.jl index 342c83186..65e6adae6 100644 --- a/src/tensors/tensor.jl +++ b/src/tensors/tensor.jl @@ -21,7 +21,6 @@ struct TensorMap{T, S <: IndexSpace, N₁, N₂, A <: DenseVector{T}} <: Abstrac end return TensorMap{T, S, N₁, N₂, A}(data, space) end - # constructors from data function TensorMap{T, S, N₁, N₂, A}( data::A, space::TensorMapSpace{S, N₁, N₂} @@ -34,6 +33,8 @@ struct TensorMap{T, S <: IndexSpace, N₁, N₂, A <: DenseVector{T}} <: Abstrac return new{T, S, N₁, N₂, A}(data, space) end end +# constructors from another TensorMap -- no-op +TensorMap{T, S, N₁, N₂, A}(t::TensorMap{T, S, N₁, N₂, A}) where {T, S <: IndexSpace, N₁, N₂, A <: DenseVector{T}} = t """ Tensor{T, S, N, A<:DenseVector{T}} = TensorMap{T, S, N, 0, A} diff --git a/test/cuda/tensors.jl b/test/cuda/tensors.jl index 738440bef..7b958d915 100644 --- a/test/cuda/tensors.jl +++ b/test/cuda/tensors.jl @@ -14,6 +14,7 @@ for V in spacelist println("---------------------------------------") println("CUDA Tensors with symmetry: $Istr") println("---------------------------------------") + hasbraiding = BraidingStyle(I) isa HasBraiding symmetricbraiding = BraidingStyle(I) isa SymmetricBraiding @timedtestset "Tensors with symmetry: $Istr" verbose = true begin V1, V2, V3, V4, V5 = V @@ -209,8 +210,8 @@ for V in spacelist α = rand(T) @test norm(t, 2) ≈ norm(TensorKit.to_cpu(t), 2) @test dot(t2, t) ≈ dot(TensorKit.to_cpu(t2), TensorKit.to_cpu(t)) - @test TensorKit.to_cpu(α * t) ≈ α * TensorKit.to_cpu(t) - @test TensorKit.to_cpu(t + t) ≈ 2 * TensorKit.to_cpu(t) + @test adapt(Vector{T}, (α * t)) ≈ α * adapt(Vector{T}, t) + @test adapt(Vector{T}, (t + t)) ≈ 2 * adapt(Vector{T}, t) end end @timedtestset "Real and imaginary parts" begin @@ -263,16 +264,22 @@ for V in spacelist symmetricbraiding && @timedtestset "Permutations: test via inner product invariance" begin W = V1 ⊗ V2 ⊗ V3 ⊗ V4 ⊗ V5 t = cuRAND.rand(ComplexF64, W) + ht = adapt(Vector{ComplexF64}, t) t′ = cuRAND.randn!(similar(t)) + ht′ = adapt(Vector{ComplexF64}, t′) + dot_htt′ = dot(ht′, ht) + dot_tt′ = dot(t′, t) + @test dot_tt′ ≈ dot_htt′ + norm_t = norm(t) for k in 0:5 for p in permutations(1:5) p1 = ntuple(n -> p[n], k) p2 = ntuple(n -> p[k + n], 5 - k) t2 = @constinferred permute(t, (p1, p2)) - t2 = permute(t, (p1, p2)) - @test norm(t2) ≈ norm(t) t2′ = permute(t′, (p1, p2)) - @test dot(t2′, t2) ≈ dot(t′, t) ≈ dot(transpose(t2′), transpose(t2)) + @test norm(t2) ≈ norm_t + @test dot(t2′, t2) ≈ dot_tt′ + @test dot(transpose(t2′), transpose(t2)) ≈ dot_tt′ end t3 = @constinferred repartition(t, $k) t3 = repartition(t, k) @@ -293,29 +300,26 @@ for V in spacelist ht2 = permute(TensorKit.to_cpu(t), (p1, p2)) @test ht2 ≈ TensorKit.to_cpu(dt2) end - - dt3 = CUDA.@allowscalar repartition(t, k) + dt3 = repartition(t, k) ht3 = repartition(TensorKit.to_cpu(t), k) @test ht3 ≈ TensorKit.to_cpu(dt3) end end symmetricbraiding && @timedtestset "Full trace: test self-consistency" begin t = cuRAND.rand(ComplexF64, V1 ⊗ V2' ⊗ V2 ⊗ V1') - CUDA.@allowscalar begin - t2 = permute(t, ((1, 2), (4, 3))) - s = @constinferred tr(t2) - @test conj(s) ≈ tr(t2') - if !isdual(V1) - t2 = twist!(t2, 1) - end - if isdual(V2) - t2 = twist!(t2, 2) - end - ss = tr(t2) - @tensor s2 = t[a, b, b, a] - @tensor t3[a, b] := t[a, c, c, b] - @tensor s3 = t3[a, a] + t2 = permute(t, ((1, 2), (4, 3))) + s = @constinferred tr(t2) + @test conj(s) ≈ tr(t2') + if !isdual(V1) + t2 = twist!(t2, 1) + end + if isdual(V2) + t2 = twist!(t2, 2) end + ss = tr(t2) + @tensor s2 = t[a, b, b, a] + @tensor t3[a, b] := t[a, c, c, b] + @tensor s3 = t3[a, a] @test ss ≈ s2 @test ss ≈ s3 end @@ -328,20 +332,16 @@ for V in spacelist end symmetricbraiding && @timedtestset "Trace: test via conversion" begin t = cuRAND.rand(ComplexF64, V1 ⊗ V2' ⊗ V3 ⊗ V2 ⊗ V1' ⊗ V3') - CUDA.@allowscalar begin - @tensor t2[a, b] := t[c, d, b, d, c, a] - @tensor t3[a, b] := ad(t)[c, d, b, d, c, a] - end + @tensor t2[a, b] := t[c, d, b, d, c, a] + @tensor t3[a, b] := ad(t)[c, d, b, d, c, a] @test t3 ≈ ad(t2) end symmetricbraiding && @timedtestset "Trace and contraction" begin t1 = cuRAND.rand(ComplexF64, V1 ⊗ V2 ⊗ V3) t2 = cuRAND.rand(ComplexF64, V2' ⊗ V4 ⊗ V1') - CUDA.@allowscalar begin - t3 = t1 ⊗ t2 - @tensor ta[a, b] := t1[x, y, a] * t2[y, b, x] - @tensor tb[a, b] := t3[x, y, a, y, b, x] - end + t3 = t1 ⊗ t2 + @tensor ta[a, b] := t1[x, y, a] * t2[y, b, x] + @tensor tb[a, b] := t3[x, y, a, y, b, x] @test ta ≈ tb end if BraidingStyle(I) isa Bosonic && hasfusiontensor(I) @@ -360,44 +360,38 @@ for V in spacelist @test TensorKit.to_cpu(dHrA12) ≈ hHrA12 end end - BraidingStyle(I) isa HasBraiding && @timedtestset "Index flipping: test flipping inverse" begin + hasbraiding && @timedtestset "Index flipping: test flipping inverse" begin t = cuRAND.rand(ComplexF64, V1 ⊗ V2 ⊗ V3 ← (V4 ⊗ V5)') for i in 1:5 - CUDA.@allowscalar begin - @test t ≈ flip(flip(t, i), i; inv = true) - @test t ≈ flip(flip(t, i; inv = true), i) - end + @test t ≈ flip(flip(t, i), i; inv = true) + @test t ≈ flip(flip(t, i; inv = true), i) end end - #=@timedtestset "Index flipping: test via explicit flip" begin + symmetricbraiding && "Index flipping: test via explicit flip" begin t = cuRAND.rand(ComplexF64, V1 ⊗ V1' ← V1' ⊗ V1) - F1 = unitary(flip(V1), V1) + F1 = adapt(CuArray{ComplexF64}, unitary(flip(V1), V1)) - CUDA.@allowscalar begin - @tensor tf[a, b; c, d] := F1[a, a'] * t[a', b; c, d] - @test flip(t, 1) ≈ tf - @tensor tf[a, b; c, d] := conj(F1[b, b']) * t[a, b'; c, d] - @test twist!(flip(t, 2), 2) ≈ tf - @tensor tf[a, b; c, d] := F1[c, c'] * t[a, b; c', d] - @test flip(t, 3) ≈ tf - @tensor tf[a, b; c, d] := conj(F1[d, d']) * t[a, b; c, d'] - @test twist!(flip(t, 4), 4) ≈ tf - end + @tensor tf[a, b; c, d] := F1[a, a'] * t[a', b; c, d] + @test flip(t, 1) ≈ tf + @tensor tf[a, b; c, d] := conj(F1[b, b']) * t[a, b'; c, d] + @test twist!(flip(t, 2), 2) ≈ tf + @tensor tf[a, b; c, d] := F1[c, c'] * t[a, b; c', d] + @test flip(t, 3) ≈ tf + @tensor tf[a, b; c, d] := conj(F1[d, d']) * t[a, b; c, d'] + @test twist!(flip(t, 4), 4) ≈ tf end - @timedtestset "Index flipping: test via contraction" begin + symmetricbraiding && @timedtestset "Index flipping: test via contraction" begin t1 = cuRAND.rand(ComplexF64, V1 ⊗ V2 ⊗ V3 ← V4) t2 = cuRAND.rand(ComplexF64, V2' ⊗ V5 ← V4' ⊗ V1) - CUDA.@allowscalar begin - @tensor ta[a, b] := t1[x, y, a, z] * t2[y, b, z, x] - @tensor tb[a, b] := flip(t1, 1)[x, y, a, z] * flip(t2, 4)[y, b, z, x] - @test ta ≈ tb - @tensor tb[a, b] := flip(t1, (2, 4))[x, y, a, z] * flip(t2, (1, 3))[y, b, z, x] - @test ta ≈ tb - @tensor tb[a, b] := flip(t1, (1, 2, 4))[x, y, a, z] * flip(t2, (1, 3, 4))[y, b, z, x] - @tensor tb[a, b] := flip(t1, (1, 3))[x, y, a, z] * flip(t2, (2, 4))[y, b, z, x] - @test flip(ta, (1, 2)) ≈ tb - end - end=# # TODO + @tensor ta[a, b] := t1[x, y, a, z] * t2[y, b, z, x] + @tensor tb[a, b] := flip(t1, 1)[x, y, a, z] * flip(t2, 4)[y, b, z, x] + @test ta ≈ tb + @tensor tb[a, b] := flip(t1, (2, 4))[x, y, a, z] * flip(t2, (1, 3))[y, b, z, x] + @test ta ≈ tb + @tensor tb[a, b] := flip(t1, (1, 2, 4))[x, y, a, z] * flip(t2, (1, 3, 4))[y, b, z, x] + @tensor tb[a, b] := flip(t1, (1, 3))[x, y, a, z] * flip(t2, (2, 4))[y, b, z, x] + @test flip(ta, (1, 2)) ≈ tb + end @timedtestset "Multiplication of isometries: test properties" begin W1 = V1 ⊗ V2 ⊗ V3 W2 = (V4 ⊗ V5)' @@ -551,10 +545,8 @@ for V in spacelist t1 = cuRAND.rand(T, V1, V5') t2 = cuRAND.rand(T, V2 ⊗ V3, V4') t = @constinferred (t1 ⊗ t2) - CUDA.@allowscalar begin - @tensor t′[1 2 3; 4 5] := t1[1; 4] * t2[2 3; 5] - end - @test t ≈ t′ # This should really not be broken + @tensor t′[1 2 3; 4 5] := t1[1; 4] * t2[2 3; 5] + @test t ≈ t′ end end end @@ -567,17 +559,17 @@ end V1, V2, V3, V4, V5 = Vlist1 W1, W2, W3, W4, W5 = Vlist2 for T in (Float32, ComplexF64) - t1 = rand(T, V2 ⊗ V3, (V4 ⊗ V5)') - t2 = rand(T, W2, (W3 ⊗ W4)') + t1 = CUDA.rand(T, V2 ⊗ V3, (V4 ⊗ V5)') + t2 = CUDA.rand(T, W2, (W3 ⊗ W4)') t = @constinferred (t1 ⊠ t2) d1 = dim(codomain(t1)) d2 = dim(codomain(t2)) d3 = dim(domain(t1)) d4 = dim(domain(t2)) - At = convert(Array, t) + At = convert(Array, adapt(Vector{T}, t)) @test reshape(At, (d1, d2, d3, d4)) ≈ - reshape(convert(Array, t1), (d1, 1, d3, 1)) .* - reshape(convert(Array, t2), (1, d2, 1, d4)) + reshape(convert(Array, adapt(Vector{T}, t1)), (d1, 1, d3, 1)) .* + reshape(convert(Array, adapt(Vector{T}, t2)), (1, d2, 1, d4)) end end end From 4c599c2c4ca833918e5fa6e1886cafcdfe33a9fd Mon Sep 17 00:00:00 2001 From: Katharine Hyatt Date: Mon, 27 Apr 2026 07:53:59 -0400 Subject: [PATCH 2/2] fix typo --- test/cuda/tensors.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/cuda/tensors.jl b/test/cuda/tensors.jl index 7b958d915..bc40d381e 100644 --- a/test/cuda/tensors.jl +++ b/test/cuda/tensors.jl @@ -367,7 +367,7 @@ for V in spacelist @test t ≈ flip(flip(t, i; inv = true), i) end end - symmetricbraiding && "Index flipping: test via explicit flip" begin + symmetricbraiding && @timedtestset "Index flipping: test via explicit flip" begin t = cuRAND.rand(ComplexF64, V1 ⊗ V1' ← V1' ⊗ V1) F1 = adapt(CuArray{ComplexF64}, unitary(flip(V1), V1))