From b2b3450272795305ca92cd994f6fb21bfb4d6707 Mon Sep 17 00:00:00 2001 From: mtfishman Date: Mon, 16 Dec 2024 19:40:06 -0500 Subject: [PATCH] [WIP] Rewrite based on NamedDimsArrays --- .JuliaFormatter.toml | 1 + .../ISSUE_TEMPLATE/02_NDTensors_bug_report.md | 60 - .../02_NDTensors_feature_request.md | 24 - ...1_ITensors_bug_report.md => BUG_REPORT.md} | 6 +- ..._feature_request.md => FEATURE_REQUEST.md} | 4 +- .../PACKAGE_bug_report.md | 60 - .../PACKAGE_feature_request.md | 24 - .../generate_issue_templates.jl | 50 - .github/PULL_REQUEST_TEMPLATE.md | 2 + .github/workflows/CompatHelper.yml | 2 +- .github/workflows/Downstream.yml | 57 - .github/workflows/FormatCheck.yml | 13 + .github/workflows/LiterateCheck.yml | 15 + .github/workflows/Tests.yml | 42 + .github/workflows/comment_trigger_example.yml | 58 - .github/workflows/documentation.yml | 36 +- .github/workflows/format_check.yml | 35 - .github/workflows/format_pr.yml | 29 - .github/workflows/format_suggestions.yml | 27 - .../main_test_itensors_base_macos_windows.yml | 45 - .../workflows/test_itensors_base_ubuntu.yml | 46 - .github/workflows/test_ndtensors.yml | 40 - .gitignore | 21 +- .pre-commit-config.yaml | 10 +- CITATION.cff | 35 - CODE_OF_CONDUCT.md | 128 - Checklists.txt | 23 - LICENSE | 222 +- NDTensors/.JuliaFormatter.toml | 2 - NDTensors/LICENSE | 201 -- NDTensors/Project.toml | 105 - .../NDTensorsAMDGPUExt/NDTensorsAMDGPUExt.jl | 12 - NDTensors/ext/NDTensorsAMDGPUExt/adapt.jl | 31 - NDTensors/ext/NDTensorsAMDGPUExt/append.jl | 8 - NDTensors/ext/NDTensorsAMDGPUExt/copyto.jl | 35 - NDTensors/ext/NDTensorsAMDGPUExt/indexing.jl | 23 - .../ext/NDTensorsAMDGPUExt/linearalgebra.jl | 22 - NDTensors/ext/NDTensorsAMDGPUExt/mul.jl | 45 - .../ext/NDTensorsAMDGPUExt/permutedims.jl | 23 - NDTensors/ext/NDTensorsAMDGPUExt/set_types.jl | 11 - .../ext/NDTensorsCUDAExt/NDTensorsCUDAExt.jl | 12 - NDTensors/ext/NDTensorsCUDAExt/adapt.jl | 26 - NDTensors/ext/NDTensorsCUDAExt/append.jl | 8 - NDTensors/ext/NDTensorsCUDAExt/copyto.jl | 30 - .../ext/NDTensorsCUDAExt/default_kwargs.jl | 4 - NDTensors/ext/NDTensorsCUDAExt/indexing.jl | 23 - NDTensors/ext/NDTensorsCUDAExt/iscu.jl | 4 - .../ext/NDTensorsCUDAExt/linearalgebra.jl | 60 - NDTensors/ext/NDTensorsCUDAExt/mul.jl | 47 - NDTensors/ext/NDTensorsCUDAExt/permutedims.jl | 23 - NDTensors/ext/NDTensorsCUDAExt/set_types.jl | 13 - .../NDTensorsGPUArraysCoreExt.jl | 4 - .../blocksparsetensor.jl | 26 - .../ext/NDTensorsGPUArraysCoreExt/contract.jl | 81 - .../ext/NDTensorsHDF5Ext/NDTensorsHDF5Ext.jl | 8 - NDTensors/ext/NDTensorsHDF5Ext/blocksparse.jl | 68 - NDTensors/ext/NDTensorsHDF5Ext/dense.jl | 37 - NDTensors/ext/NDTensorsHDF5Ext/diag.jl | 38 - NDTensors/ext/NDTensorsHDF5Ext/empty.jl | 23 - .../NDTensorsJLArraysExt.jl | 7 - NDTensors/ext/NDTensorsJLArraysExt/copyto.jl | 30 - .../ext/NDTensorsJLArraysExt/indexing.jl | 19 - .../ext/NDTensorsJLArraysExt/linearalgebra.jl | 40 - NDTensors/ext/NDTensorsJLArraysExt/mul.jl | 43 - .../ext/NDTensorsJLArraysExt/permutedims.jl | 24 - .../NDTensorsMappedArraysExt.jl | 25 - .../NDTensorsMetalExt/NDTensorsMetalExt.jl | 12 - NDTensors/ext/NDTensorsMetalExt/adapt.jl | 29 - NDTensors/ext/NDTensorsMetalExt/append.jl | 10 - NDTensors/ext/NDTensorsMetalExt/copyto.jl | 28 - NDTensors/ext/NDTensorsMetalExt/indexing.jl | 18 - .../ext/NDTensorsMetalExt/linearalgebra.jl | 49 - NDTensors/ext/NDTensorsMetalExt/mul.jl | 42 - .../ext/NDTensorsMetalExt/permutedims.jl | 40 - NDTensors/ext/NDTensorsMetalExt/set_types.jl | 13 - .../NDTensorsOctavianExt.jl | 9 - NDTensors/ext/NDTensorsOctavianExt/import.jl | 1 - .../ext/NDTensorsOctavianExt/octavian.jl | 18 - .../NDTensorsTBLISExt/NDTensorsTBLISExt.jl | 10 - NDTensors/ext/NDTensorsTBLISExt/contract.jl | 43 - .../NDTensorscuTENSORExt.jl | 3 - .../ext/NDTensorscuTENSORExt/contract.jl | 50 - NDTensors/src/NDTensors.jl | 237 -- .../NDTensorsNamedDimsArraysExt.jl | 6 - .../src/NDTensorsNamedDimsArraysExt/fill.jl | 1 - .../NDTensorsNamedDimsArraysExt/similar.jl | 5 - .../generic_array_constructors.jl | 41 - NDTensors/src/abstractarray/iscu.jl | 7 - NDTensors/src/abstractarray/mul.jl | 12 - NDTensors/src/abstractarray/permutedims.jl | 9 - NDTensors/src/abstractarray/set_types.jl | 17 - NDTensors/src/abstractarray/similar.jl | 99 - .../abstractarray/tensoralgebra/contract.jl | 185 -- NDTensors/src/abstractarray/to_shape.jl | 15 - NDTensors/src/adapt.jl | 37 - NDTensors/src/aliasstyle.jl | 34 - NDTensors/src/array/mul.jl | 4 - NDTensors/src/array/permutedims.jl | 24 - .../arraystorage/storage/arraystorage.jl | 73 - .../arraystorage/arraystorage/storage/conj.jl | 2 - .../arraystorage/storage/contract.jl | 61 - .../arraystorage/storage/permutedims.jl | 6 - .../arraystorage/tensor/arraystorage.jl | 26 - .../arraystorage/tensor/contract.jl | 31 - .../arraystorage/arraystorage/tensor/eigen.jl | 67 - .../arraystorage/tensor/eigen_generic.jl | 84 - .../arraystorage/tensor/indexing.jl | 8 - .../arraystorage/arraystorage/tensor/mul.jl | 6 - .../arraystorage/tensor/permutedims.jl | 14 - .../arraystorage/arraystorage/tensor/qr.jl | 11 - .../arraystorage/arraystorage/tensor/svd.jl | 127 - .../arraystorage/arraystorage/tensor/zeros.jl | 18 - .../storage/combiner/contract.jl | 14 - .../storage/combiner/contract_combine.jl | 143 -- .../storage/combiner/contract_uncombine.jl | 156 -- .../storage/combiner/contract_utils.jl | 129 - .../blocksparsearray/storage/contract.jl | 108 - .../blocksparsearray/tensor/contract.jl | 59 - .../combiner/storage/combinerarray.jl | 12 - .../arraystorage/combiner/storage/contract.jl | 76 - .../combiner/storage/contract_utils.jl | 52 - .../combiner/storage/promote_rule.jl | 1 - .../arraystorage/combiner/tensor/contract.jl | 31 - .../combiner/tensor/to_arraystorage.jl | 4 - .../diagonalarray/storage/contract.jl | 173 -- .../diagonalarray/tensor/contract.jl | 25 - NDTensors/src/blocksparse/adapt.jl | 3 - NDTensors/src/blocksparse/block.jl | 180 -- NDTensors/src/blocksparse/blockdims.jl | 221 -- NDTensors/src/blocksparse/blockoffsets.jl | 173 -- NDTensors/src/blocksparse/blocksparse.jl | 207 -- .../src/blocksparse/blocksparsetensor.jl | 1002 -------- NDTensors/src/blocksparse/combiner.jl | 162 -- NDTensors/src/blocksparse/contract.jl | 76 - NDTensors/src/blocksparse/contract_folds.jl | 60 - NDTensors/src/blocksparse/contract_generic.jl | 162 -- .../src/blocksparse/contract_sequential.jl | 105 - NDTensors/src/blocksparse/contract_threads.jl | 137 -- .../src/blocksparse/contract_utilities.jl | 96 - NDTensors/src/blocksparse/diagblocksparse.jl | 700 ------ NDTensors/src/blocksparse/fermions.jl | 7 - NDTensors/src/blocksparse/linearalgebra.jl | 427 ---- NDTensors/src/blocksparse/similar.jl | 62 - NDTensors/src/combiner/combiner.jl | 171 -- NDTensors/src/combiner/contract.jl | 103 - NDTensors/src/default_kwargs.jl | 11 - NDTensors/src/dense/dense.jl | 143 -- NDTensors/src/dense/densetensor.jl | 318 --- .../src/dense/generic_array_constructors.jl | 31 - .../src/dense/linearalgebra/decompositions.jl | 96 - NDTensors/src/dense/set_types.jl | 15 - NDTensors/src/dense/tensoralgebra/contract.jl | 231 -- NDTensors/src/dense/tensoralgebra/outer.jl | 35 - NDTensors/src/deprecated.jl | 8 - NDTensors/src/diag/diag.jl | 134 -- NDTensors/src/diag/diagtensor.jl | 218 -- NDTensors/src/diag/set_types.jl | 20 - NDTensors/src/diag/similar.jl | 30 - NDTensors/src/diag/tensoralgebra/contract.jl | 226 -- NDTensors/src/diag/tensoralgebra/outer.jl | 30 - NDTensors/src/dims.jl | 99 - NDTensors/src/empty/EmptyTensor.jl | 184 -- NDTensors/src/empty/adapt.jl | 8 - NDTensors/src/empty/empty.jl | 100 - NDTensors/src/empty/tensoralgebra/contract.jl | 74 - NDTensors/src/emptynumber.jl | 38 - NDTensors/src/exports.jl | 89 - NDTensors/src/imports.jl | 128 - .../lib/AMDGPUExtensions/.JuliaFormatter.toml | 2 - .../AMDGPUExtensions/src/AMDGPUExtensions.jl | 4 - NDTensors/src/lib/AMDGPUExtensions/src/roc.jl | 14 - .../src/lib/AMDGPUExtensions/test/runtests.jl | 9 - .../src/lib/AllocateData/.JuliaFormatter.toml | 2 - NDTensors/src/lib/AllocateData/README.md | 15 - .../src/lib/AllocateData/src/AllocateData.jl | 8 - .../AllocateDataLinearAlgebraExt.jl | 4 - .../AllocateDataLinearAlgebraExt/diagonal.jl | 16 - .../AllocateDataLinearAlgebraExt/hermitian.jl | 19 - .../src/lib/AllocateData/src/allocate.jl | 38 - NDTensors/src/lib/AllocateData/src/base.jl | 11 - .../src/lib/AllocateData/src/defaults.jl | 4 - .../src/lib/AllocateData/src/initializers.jl | 29 - NDTensors/src/lib/AllocateData/src/to_axis.jl | 16 - .../src/lib/AllocateData/test/Project.toml | 3 - .../src/lib/AllocateData/test/runtests.jl | 39 - .../lib/BackendSelection/.JuliaFormatter.toml | 2 - .../BackendSelection/src/BackendSelection.jl | 9 - .../BackendSelection/src/abstractbackend.jl | 4 - .../lib/BackendSelection/src/backend_types.jl | 57 - .../lib/BackendSelection/test/Project.toml | 3 - .../src/lib/BackendSelection/test/runtests.jl | 29 - .../lib/BaseExtensions/.JuliaFormatter.toml | 2 - .../lib/BaseExtensions/src/BaseExtensions.jl | 3 - .../src/lib/BaseExtensions/src/replace.jl | 32 - .../src/lib/BaseExtensions/test/Project.toml | 3 - .../src/lib/BaseExtensions/test/runtests.jl | 15 - .../BlockSparseArrays/.JuliaFormatter.toml | 2 - .../src/lib/BlockSparseArrays/Project.toml | 3 - NDTensors/src/lib/BlockSparseArrays/README.md | 192 -- .../BlockSparseArrays/examples/Project.toml | 4 - .../lib/BlockSparseArrays/examples/README.jl | 206 -- .../src/BlockSparseArraysAdaptExt.jl | 5 - .../src/BlockSparseArraysGradedAxesExt.jl | 156 -- .../src/reducewhile.jl | 34 - .../test/Project.toml | 4 - .../test/runtests.jl | 322 --- .../src/BlockSparseArraysTensorAlgebraExt.jl | 24 - .../test/Project.toml | 4 - .../test/runtests.jl | 17 - .../BlockArraysExtensions.jl | 591 ----- .../BlockArraysSparseArraysBaseExt.jl | 11 - .../src/BlockSparseArrays.jl | 28 - .../abstractblocksparsearray.jl | 78 - .../abstractblocksparsematrix.jl | 1 - .../abstractblocksparsevector.jl | 1 - .../abstractblocksparsearray/arraylayouts.jl | 53 - .../src/abstractblocksparsearray/broadcast.jl | 48 - .../src/abstractblocksparsearray/cat.jl | 7 - .../abstractblocksparsearray/linearalgebra.jl | 18 - .../src/abstractblocksparsearray/map.jl | 151 -- .../sparsearrayinterface.jl | 38 - .../src/abstractblocksparsearray/views.jl | 294 --- .../wrappedabstractblocksparsearray.jl | 310 --- .../lib/BlockSparseArrays/src/backup/qr.jl | 143 -- .../src/blocksparsearray/blocksparsearray.jl | 199 -- .../src/blocksparsearray/defaults.jl | 42 - .../blocksparsearrayinterface/arraylayouts.jl | 48 - .../blocksparsearrayinterface.jl | 274 --- .../blocksparsearrayinterface/blockzero.jl | 45 - .../blocksparsearrayinterface/broadcast.jl | 39 - .../src/blocksparsearrayinterface/cat.jl | 26 - .../linearalgebra.jl | 12 - .../src/blocksparsearrayinterface/map.jl | 19 - .../src/blocksparsearrayinterface/views.jl | 3 - .../lib/BlockSparseArrays/test/Project.toml | 6 - .../test/TestBlockSparseArraysUtils.jl | 15 - .../lib/BlockSparseArrays/test/runtests.jl | 5 - .../lib/BlockSparseArrays/test/test_basics.jl | 1033 -------- .../.JuliaFormatter.toml | 2 - .../src/BroadcastMapConversion.jl | 48 - .../BroadcastMapConversion/test/runtests.jl | 14 - .../lib/CUDAExtensions/.JuliaFormatter.toml | 2 - .../lib/CUDAExtensions/src/CUDAExtensions.jl | 4 - NDTensors/src/lib/CUDAExtensions/src/cuda.jl | 14 - .../src/lib/CUDAExtensions/test/runtests.jl | 9 - .../lib/DiagonalArrays/.JuliaFormatter.toml | 2 - NDTensors/src/lib/DiagonalArrays/README.md | 84 - .../lib/DiagonalArrays/examples/Project.toml | 4 - .../src/lib/DiagonalArrays/examples/README.jl | 79 - .../lib/DiagonalArrays/src/DiagonalArrays.jl | 14 - .../abstractdiagonalarray.jl | 3 - .../src/abstractdiagonalarray/arraylayouts.jl | 7 - .../diagonalarraydiaginterface.jl | 23 - .../sparsearrayinterface.jl | 22 - .../src/diaginterface/defaults.jl | 3 - .../src/diaginterface/diagindex.jl | 14 - .../src/diaginterface/diagindices.jl | 14 - .../src/diaginterface/diaginterface.jl | 57 - .../src/diagonalarray/arraylayouts.jl | 11 - .../src/diagonalarray/diagonalarray.jl | 107 - .../src/diagonalarray/diagonalmatrix.jl | 5 - .../src/diagonalarray/diagonalvector.jl | 5 - .../src/lib/DiagonalArrays/test/Project.toml | 2 - .../src/lib/DiagonalArrays/test/runtests.jl | 59 - NDTensors/src/lib/Expose/.JuliaFormatter.toml | 2 - NDTensors/src/lib/Expose/README.md | 10 - NDTensors/src/lib/Expose/TODO.md | 4 - NDTensors/src/lib/Expose/src/Expose.jl | 24 - NDTensors/src/lib/Expose/src/exposed.jl | 17 - .../lib/Expose/src/functions/abstractarray.jl | 21 - .../src/lib/Expose/src/functions/adapt.jl | 8 - .../src/lib/Expose/src/functions/append.jl | 3 - .../src/lib/Expose/src/functions/copyto.jl | 4 - .../lib/Expose/src/functions/linearalgebra.jl | 29 - NDTensors/src/lib/Expose/src/functions/mul.jl | 4 - .../lib/Expose/src/functions/permutedims.jl | 13 - NDTensors/src/lib/Expose/src/import.jl | 14 - NDTensors/src/lib/Expose/test/Project.toml | 5 - NDTensors/src/lib/Expose/test/runtests.jl | 265 --- .../.JuliaFormatter.toml | 2 - .../lib/GPUArraysCoreExtensions/Project.toml | 7 - .../src/GPUArraysCoreExtensions.jl | 4 - .../src/gpuarrayscore.jl | 18 - .../GPUArraysCoreExtensions/test/runtests.jl | 7 - .../src/lib/GradedAxes/.JuliaFormatter.toml | 2 - .../src/lib/GradedAxes/src/GradedAxes.jl | 9 - .../lib/GradedAxes/src/blockedunitrange.jl | 186 -- NDTensors/src/lib/GradedAxes/src/dual.jl | 15 - NDTensors/src/lib/GradedAxes/src/fusion.jl | 112 - .../src/lib/GradedAxes/src/gradedunitrange.jl | 384 --- .../lib/GradedAxes/src/gradedunitrangedual.jl | 134 -- .../GradedAxes/src/labelledunitrangedual.jl | 49 - NDTensors/src/lib/GradedAxes/src/onetoone.jl | 9 - .../src/lib/GradedAxes/test/Project.toml | 4 - NDTensors/src/lib/GradedAxes/test/runtests.jl | 8 - .../src/lib/GradedAxes/test/test_basics.jl | 256 -- .../src/lib/GradedAxes/test/test_dual.jl | 280 --- .../GradedAxes/test/test_tensor_product.jl | 88 - .../lib/LabelledNumbers/.JuliaFormatter.toml | 2 - .../LabelledNumbers/src/LabelledNumbers.jl | 8 - .../src/LabelledNumbersBlockArraysExt.jl | 22 - .../LabelledNumbers/src/labelled_interface.jl | 62 - .../lib/LabelledNumbers/src/labelledarray.jl | 20 - .../LabelledNumbers/src/labelledinteger.jl | 123 - .../lib/LabelledNumbers/src/labellednumber.jl | 41 - .../LabelledNumbers/src/labelledunitrange.jl | 63 - .../src/lib/LabelledNumbers/test/Project.toml | 4 - .../src/lib/LabelledNumbers/test/runtests.jl | 132 -- .../lib/MetalExtensions/.JuliaFormatter.toml | 2 - .../MetalExtensions/src/MetalExtensions.jl | 4 - .../src/lib/MetalExtensions/src/metal.jl | 15 - .../src/lib/MetalExtensions/test/runtests.jl | 7 - .../lib/NamedDimsArrays/.JuliaFormatter.toml | 2 - NDTensors/src/lib/NamedDimsArrays/README.md | 36 - .../examples/example_readme.jl | 29 - .../src/NamedDimsArraysAdaptExt.jl | 3 - .../src/adapt_structure.jl | 6 - .../NamedDimsArraysAdaptExt/test/Project.toml | 4 - .../NamedDimsArraysAdaptExt/test/runtests.jl | 13 - .../src/NamedDimsArraysSparseArraysBaseExt.jl | 3 - .../src/densearray.jl | 8 - .../test/Project.toml | 4 - .../test/runtests.jl | 12 - .../src/NamedDimsArraysTensorAlgebraExt.jl | 7 - .../src/contract.jl | 31 - .../src/eigen.jl | 47 - .../src/fusedims.jl | 52 - .../NamedDimsArraysTensorAlgebraExt/src/qr.jl | 18 - .../src/svd.jl | 53 - .../test/Project.toml | 3 - .../test/runtests.jl | 59 - .../lib/NamedDimsArrays/generate_readme.jl | 10 - .../NamedDimsArrays/src/NamedDimsArrays.jl | 27 - .../src/abstractnameddimsarray.jl | 176 -- .../src/abstractnameddimsmatrix.jl | 1 - .../src/abstractnameddimsvector.jl | 1 - .../NamedDimsArrays/src/abstractnamedint.jl | 41 - .../src/abstractnamedunitrange.jl | 35 - .../src/lib/NamedDimsArrays/src/broadcast.jl | 49 - .../NamedDimsArrays/src/broadcast_shape.jl | 24 - .../lib/NamedDimsArrays/src/constructors.jl | 48 - NDTensors/src/lib/NamedDimsArrays/src/map.jl | 14 - NDTensors/src/lib/NamedDimsArrays/src/name.jl | 2 - .../lib/NamedDimsArrays/src/nameddimsarray.jl | 114 - .../src/lib/NamedDimsArrays/src/namedint.jl | 28 - .../lib/NamedDimsArrays/src/namedunitrange.jl | 12 - .../lib/NamedDimsArrays/src/permutedims.jl | 4 - .../lib/NamedDimsArrays/src/promote_shape.jl | 12 - .../src/lib/NamedDimsArrays/src/randname.jl | 5 - .../src/lib/NamedDimsArrays/src/similar.jl | 49 - .../src/lib/NamedDimsArrays/src/traits.jl | 1 - .../src/lib/NamedDimsArrays/test/Project.toml | 3 - .../src/lib/NamedDimsArrays/test/runtests.jl | 11 - .../test/test_NDTensorsNamedDimsArraysExt.jl | 33 - .../test/test_NamedDimsArraysAdaptExt.jl | 5 - ...test_NamedDimsArraysSparseArraysBaseExt.jl | 5 - .../test_NamedDimsArraysTensorAlgebraExt.jl | 5 - .../lib/NamedDimsArrays/test/test_basic.jl | 108 - .../test/test_tensoralgebra.jl | 80 - .../src/NestedPermutedDimsArrays.jl | 276 --- .../test/Project.toml | 2 - .../NestedPermutedDimsArrays/test/runtests.jl | 23 - .../RankFactorization/.JuliaFormatter.toml | 2 - .../src/RankFactorization.jl | 5 - .../RankFactorization/src/default_kwargs.jl | 11 - .../src/lib/RankFactorization/src/spectrum.jl | 23 - .../src/truncate_spectrum.jl | 105 - .../src/lib/SmallVectors/.JuliaFormatter.toml | 2 - NDTensors/src/lib/SmallVectors/README.md | 73 - .../SmallVectors/src/BaseExt/insertstyle.jl | 9 - .../src/lib/SmallVectors/src/BaseExt/sort.jl | 23 - .../SmallVectors/src/BaseExt/sortedunique.jl | 241 -- .../SmallVectors/src/BaseExt/thawfreeze.jl | 6 - .../src/lib/SmallVectors/src/SmallVectors.jl | 45 - .../SmallVectors/src/abstractarray/insert.jl | 2 - .../abstractsmallvector.jl | 34 - .../src/abstractsmallvector/deque.jl | 324 --- .../src/msmallvector/msmallvector.jl | 80 - .../src/msmallvector/thawfreeze.jl | 2 - .../src/smallvector/insertstyle.jl | 1 - .../src/smallvector/smallvector.jl | 82 - .../src/smallvector/thawfreeze.jl | 2 - .../src/subsmallvector/subsmallvector.jl | 80 - .../src/lib/SmallVectors/test/runtests.jl | 158 -- .../src/lib/SortedSets/.JuliaFormatter.toml | 2 - .../src/lib/SortedSets/src/BaseExt/sorted.jl | 54 - .../SortedSets/src/DictionariesExt/insert.jl | 35 - .../src/DictionariesExt/isinsertable.jl | 1 - .../SmallVectorsDictionariesExt/interface.jl | 3 - .../src/lib/SortedSets/src/SortedSets.jl | 21 - .../src/SortedSetsSmallVectorsExt/smallset.jl | 16 - .../src/lib/SortedSets/src/abstractset.jl | 88 - .../lib/SortedSets/src/abstractwrappedset.jl | 117 - NDTensors/src/lib/SortedSets/src/sortedset.jl | 301 --- NDTensors/src/lib/SortedSets/test/runtests.jl | 42 - .../lib/SparseArraysBase/.JuliaFormatter.toml | 2 - NDTensors/src/lib/SparseArraysBase/README.md | 117 - .../SparseArraysBase/src/SparseArraysBase.jl | 36 - .../SparseArraysBaseLinearAlgebraExt.jl | 15 - .../SparseArraysBaseSparseArraysExt.jl | 20 - .../abstractsparsearray.jl | 3 - .../abstractsparsematrix.jl | 1 - .../abstractsparsevector.jl | 1 - .../src/abstractsparsearray/arraylayouts.jl | 41 - .../src/abstractsparsearray/base.jl | 16 - .../src/abstractsparsearray/baseinterface.jl | 33 - .../src/abstractsparsearray/broadcast.jl | 4 - .../src/abstractsparsearray/cat.jl | 4 - .../src/abstractsparsearray/convert.jl | 7 - .../src/abstractsparsearray/map.jl | 42 - .../sparsearrayinterface.jl | 46 - .../wrappedabstractsparsearray.jl | 22 - .../src/sparsearraydok/arraylayouts.jl | 13 - .../src/sparsearraydok/defaults.jl | 5 - .../src/sparsearraydok/sparsearraydok.jl | 146 -- .../src/sparsearraydok/sparsematrixdok.jl | 1 - .../src/sparsearraydok/sparsevectordok.jl | 1 - .../SparseArraysBaseLinearAlgebraExt.jl | 78 - .../src/sparsearrayinterface/arraylayouts.jl | 5 - .../src/sparsearrayinterface/base.jl | 126 - .../src/sparsearrayinterface/broadcast.jl | 37 - .../src/sparsearrayinterface/cat.jl | 64 - .../src/sparsearrayinterface/conversion.jl | 5 - .../src/sparsearrayinterface/copyto.jl | 15 - .../src/sparsearrayinterface/densearray.jl | 12 - .../src/sparsearrayinterface/indexing.jl | 216 -- .../src/sparsearrayinterface/interface.jl | 18 - .../interface_optional.jl | 44 - .../src/sparsearrayinterface/map.jl | 120 - .../sparsearrayinterface/vectorinterface.jl | 28 - .../src/sparsearrayinterface/wrappers.jl | 51 - .../src/sparsearrayinterface/zero.jl | 5 - .../lib/SparseArraysBase/test/Project.toml | 7 - .../AbstractSparseArrays.jl | 74 - .../DiagonalArrays.jl | 95 - .../SparseArraysBaseTestUtils/Project.toml | 2 - .../SparseArraysBaseTestUtils/SparseArrays.jl | 166 -- .../SparseArraysBaseTestUtils.jl | 5 - .../src/lib/SparseArraysBase/test/runtests.jl | 5 - .../test/test_abstractsparsearray.jl | 439 ---- .../lib/SparseArraysBase/test/test_array.jl | 13 - .../test/test_diagonalarray.jl | 76 - .../test/test_sparsearraydok.jl | 139 -- .../lib/SymmetrySectors/.JuliaFormatter.toml | 2 - .../src/lib/SymmetrySectors/Project.toml | 0 .../SymmetrySectors/src/SymmetrySectors.jl | 17 - .../lib/SymmetrySectors/src/abstractsector.jl | 114 - .../src/namedtuple_operations.jl | 16 - .../src/sector_definitions/fib.jl | 45 - .../src/sector_definitions/ising.jl | 46 - .../src/sector_definitions/o2.jl | 75 - .../src/sector_definitions/su.jl | 175 -- .../src/sector_definitions/su2k.jl | 27 - .../src/sector_definitions/trivial.jl | 38 - .../src/sector_definitions/u1.jl | 31 - .../src/sector_definitions/zn.jl | 25 - .../lib/SymmetrySectors/src/sector_product.jl | 238 -- .../lib/SymmetrySectors/src/symmetry_style.jl | 17 - .../src/lib/SymmetrySectors/test/runtests.jl | 12 - .../SymmetrySectors/test/test_fusion_rules.jl | 287 --- .../test/test_sector_product.jl | 622 ----- .../test/test_simple_sectors.jl | 215 -- .../src/lib/TagSets/.JuliaFormatter.toml | 2 - NDTensors/src/lib/TagSets/README.md | 20 - .../src/lib/TagSets/examples/benchmark.jl | 47 - NDTensors/src/lib/TagSets/src/TagSets.jl | 89 - NDTensors/src/lib/TagSets/test/runtests.jl | 35 - .../lib/TensorAlgebra/.JuliaFormatter.toml | 2 - NDTensors/src/lib/TensorAlgebra/Project.toml | 3 - .../TensorAlgebraGradedAxesExt/Project.toml | 0 .../src/TensorAlgebraGradedAxesExt.jl | 8 - .../test/Project.toml | 7 - .../test/runtests.jl | 7 - .../test/test_basics.jl | 30 - .../test/test_contract.jl | 34 - .../src/BaseExtensions/BaseExtensions.jl | 4 - .../src/BaseExtensions/indexin.jl | 5 - .../src/BaseExtensions/permutedims.jl | 20 - .../LinearAlgebraExtensions.jl | 3 - .../src/LinearAlgebraExtensions/qr.jl | 68 - .../lib/TensorAlgebra/src/TensorAlgebra.jl | 14 - .../TensorAlgebra/src/blockedpermutation.jl | 192 -- .../src/contract/allocate_output.jl | 83 - .../src/contract/blockedperms.jl | 32 - .../TensorAlgebra/src/contract/contract.jl | 120 - .../contract/contract_matricize/contract.jl | 57 - .../src/contract/output_labels.jl | 21 - .../src/lib/TensorAlgebra/src/fusedims.jl | 69 - .../src/lib/TensorAlgebra/src/splitdims.jl | 68 - .../src/lib/TensorAlgebra/test/Project.toml | 9 - .../src/lib/TensorAlgebra/test/runtests.jl | 4 - .../src/lib/TensorAlgebra/test/test_basics.jl | 199 -- .../.JuliaFormatter.toml | 2 - .../src/TypeParameterAccessors.jl | 21 - .../src/abstractposition.jl | 3 - .../src/abstracttypeparameter.jl | 12 - .../src/base/abstractarray.jl | 95 - .../TypeParameterAccessors/src/base/array.jl | 2 - .../src/base/linearalgebra.jl | 25 - .../src/base/similartype.jl | 88 - .../src/base/stridedviews.jl | 3 - .../src/default_parameters.jl | 62 - .../TypeParameterAccessors/src/interface.jl | 22 - .../src/is_parameter_specified.jl | 5 - .../lib/TypeParameterAccessors/src/ndims.jl | 6 - .../TypeParameterAccessors/src/parameter.jl | 3 - .../TypeParameterAccessors/src/parameters.jl | 18 - .../TypeParameterAccessors/src/position.jl | 11 - .../src/set_parameters.jl | 46 - .../src/specify_parameters.jl | 30 - .../TypeParameterAccessors/src/to_unionall.jl | 17 - .../src/type_parameters.jl | 17 - .../src/unspecify_parameters.jl | 45 - .../TypeParameterAccessors/test/Project.toml | 4 - .../TypeParameterAccessors/test/runtests.jl | 10 - .../test/test_basics.jl | 85 - .../test/test_custom_types.jl | 78 - .../test/test_defaults.jl | 75 - .../test/test_similartype.jl | 26 - .../test/test_wrappers.jl | 121 - .../test/utils/test_inferred.jl | 28 - .../UnallocatedArrays/.JuliaFormatter.toml | 2 - NDTensors/src/lib/UnallocatedArrays/README.md | 4 - .../src/UnallocatedArrays.jl | 11 - .../src/abstractfill/abstractfill.jl | 22 - .../src/abstractunallocatedarray.jl | 62 - .../lib/UnallocatedArrays/src/broadcast.jl | 28 - .../lib/UnallocatedArrays/src/set_types.jl | 14 - .../UnallocatedArrays/src/unallocatedfill.jl | 67 - .../UnallocatedArrays/src/unallocatedzeros.jl | 88 - .../lib/UnallocatedArrays/test/Project.toml | 5 - .../lib/UnallocatedArrays/test/runtests.jl | 326 --- .../lib/UnspecifiedTypes/.JuliaFormatter.toml | 2 - NDTensors/src/lib/UnspecifiedTypes/README.md | 3 - NDTensors/src/lib/UnspecifiedTypes/TODO.md | 6 - .../UnspecifiedTypes/src/UnspecifiedTypes.jl | 11 - .../UnspecifiedTypes/src/unspecifiedarray.jl | 1 - .../UnspecifiedTypes/src/unspecifiednumber.jl | 5 - .../UnspecifiedTypes/src/unspecifiedzero.jl | 39 - .../src/lib/UnspecifiedTypes/test/runtests.jl | 6 - NDTensors/src/linearalgebra/linearalgebra.jl | 483 ---- NDTensors/src/linearalgebra/svd.jl | 71 - NDTensors/src/linearalgebra/symmetric.jl | 30 - NDTensors/src/nodata.jl | 6 - NDTensors/src/tensor/set_types.jl | 31 - NDTensors/src/tensor/similar.jl | 71 - NDTensors/src/tensor/tensor.jl | 497 ---- .../src/tensoroperations/contraction_logic.jl | 648 ------ .../generic_tensor_operations.jl | 234 -- .../src/tensorstorage/default_storage.jl | 21 - NDTensors/src/tensorstorage/set_types.jl | 7 - NDTensors/src/tensorstorage/similar.jl | 83 - NDTensors/src/tensorstorage/tensorstorage.jl | 105 - NDTensors/src/truncate.jl | 105 - NDTensors/src/tupletools.jl | 272 --- .../NDTensorsTestUtils/NDTensorsTestUtils.jl | 10 - .../test/NDTensorsTestUtils/device_list.jl | 59 - .../NDTensorsTestUtils/is_supported_eltype.jl | 6 - NDTensors/test/Project.toml | 34 - .../test/backup/arraytensor/Project.toml | 3 - NDTensors/test/backup/arraytensor/array.jl | 51 - .../backup/arraytensor/blocksparsearray.jl | 52 - .../test/backup/arraytensor/diagonalarray.jl | 25 - NDTensors/test/backup/arraytensor/runtests.jl | 8 - NDTensors/test/broken/readwrite.jl | 75 - NDTensors/test/lib/Project.toml | 11 - NDTensors/test/lib/runtests.jl | 32 - NDTensors/test/runtests.jl | 20 - NDTensors/test/test_blocksparse.jl | 356 --- NDTensors/test/test_combiner.jl | 122 - NDTensors/test/test_dense.jl | 327 --- NDTensors/test/test_diag.jl | 118 - NDTensors/test/test_diagblocksparse.jl | 88 - NDTensors/test/test_emptynumber.jl | 39 - NDTensors/test/test_emptystorage.jl | 35 - NDTensors/test/test_linearalgebra.jl | 97 - NDTensors/test/test_tupletools.jl | 34 - Project.toml | 69 +- README.md | 44 +- benchmark/benchmarks.jl | 7 + docs/Project.toml | 10 +- docs/make.jl | 29 +- docs/make_index.jl | 9 + docs/make_local_notest.jl | 5 - docs/make_local_test.jl | 3 - docs/make_readme.jl | 9 + docs/settings.jl | 78 - docs/src/AdvancedUsageGuide.md | 1124 --------- docs/src/CodeTiming.md | 10 - docs/src/ContractionSequenceOptimization.md | 130 -- docs/src/DMRG.md | 5 - docs/src/DMRGObserver.md | 43 - docs/src/DeveloperGuide.md | 100 - docs/src/Einsum.md | 192 -- docs/src/HDF5FileFormats.md | 187 -- docs/src/ITensorType.md | 139 -- docs/src/IncludedSiteTypes.md | 388 --- docs/src/IndexSetType.md | 32 - docs/src/IndexType.md | 65 - docs/src/MPSandMPO.md | 162 -- docs/src/Multithreading.md | 157 -- docs/src/Observer.md | 200 -- docs/src/OpSum.md | 14 - docs/src/ProjMPO.md | 23 - docs/src/ProjMPOSum.md | 22 - docs/src/QN.md | 28 - docs/src/QNTricks.md | 81 - docs/src/RunningOnGPUs.md | 75 - docs/src/SiteType.md | 17 - docs/src/Sweeps.md | 26 - docs/src/UpgradeGuide_0.1_to_0.2.md | 365 --- docs/src/assets/favicon.ico | Bin 318 -> 0 bytes docs/src/assets/logo.png | Bin 39665 -> 0 bytes docs/src/examples/DMRG.md | 552 ----- docs/src/examples/ITensor.md | 558 ----- docs/src/examples/MPSandMPO.md | 454 ---- docs/src/examples/Physics.md | 549 ----- docs/src/examples/combiner_itensor.png | Bin 62978 -> 0 bytes .../itensor_factorization_figures/QR_Ex1.png | Bin 43444 -> 0 bytes .../itensor_factorization_figures/SVD_Ex1.png | Bin 51458 -> 0 bytes .../itensor_trace_figures/delta_itensor.png | Bin 25198 -> 0 bytes .../itensor_trace_figures/trace_A.png | Bin 35845 -> 0 bytes docs/src/examples/mps_element.png | Bin 93910 -> 0 bytes docs/src/examples/mps_expect.png | Bin 131704 -> 0 bytes docs/src/examples/mps_from_tensor.png | Bin 48581 -> 0 bytes .../mps_onesite_figures/operator_app_mps.png | Bin 35673 -> 0 bytes .../mps_onesite_figures/operator_contract.png | Bin 43428 -> 0 bytes .../mps_onesite_figures/updated_mps.png | Bin 20774 -> 0 bytes docs/src/examples/mps_zz_correlation.png | Bin 139285 -> 0 bytes .../examples/twosite_figures/gate_app_mps.png | Bin 38503 -> 0 bytes .../twosite_figures/gate_contract.png | Bin 52837 -> 0 bytes .../examples/twosite_figures/gate_gauge.png | Bin 22874 -> 0 bytes .../src/examples/twosite_figures/gate_svd.png | Bin 25784 -> 0 bytes docs/src/faq/DMRG.md | 242 -- docs/src/faq/Development.md | 35 - docs/src/faq/HPC.md | 88 - docs/src/faq/JuliaAndCpp.md | 68 - docs/src/faq/JuliaPkg.md | 31 - docs/src/faq/QN.md | 24 - docs/src/faq/RelationshipToOtherLibraries.md | 13 - docs/src/getting_started/DebugChecks.md | 59 - docs/src/getting_started/Installing.md | 79 - docs/src/getting_started/NextSteps.md | 16 - docs/src/getting_started/RunningCodes.md | 124 - .../getting_started/install_screenshot.png | Bin 93122 -> 0 bytes docs/src/index.md | 327 --- docs/src/svd_tensor.png | Bin 40211 -> 0 bytes docs/src/tutorials/DMRG.md | 127 - docs/src/tutorials/MPSTimeEvolution.md | 189 -- docs/src/tutorials/QN_DMRG.md | 182 -- docs/src/tutorials/tebd.jl | 46 - docs/src/tutorials/trotter_tevol.png | Bin 127752 -> 0 bytes examples/Project.toml | 2 + examples/README.jl | 41 + examples/basic_ops/basic_ops.jl | 64 - examples/basic_ops/qn_itensors.jl | 31 - examples/ctmrg/anisotropic/Project.toml | 4 - examples/ctmrg/anisotropic/README.md | 7 - examples/ctmrg/anisotropic/run.jl | 27 - examples/ctmrg/isotropic/Project.toml | 3 - examples/ctmrg/isotropic/run.jl | 57 - examples/src/2d_classical_ising.jl | 94 - examples/src/ctmrg_anisotropic.jl | 300 --- examples/src/ctmrg_isotropic.jl | 49 - examples/src/trg.jl | 51 - examples/trg/Project.toml | 3 - examples/trg/run.jl | 21 - .../ITensorsChainRulesCoreExt.jl | 19 - .../LazyApply/LazyApply.jl | 16 - .../NDTensors/dense.jl | 8 - .../NDTensors/tensor.jl | 34 - ext/ITensorsChainRulesCoreExt/indexset.jl | 32 - ext/ITensorsChainRulesCoreExt/itensor.jl | 204 -- .../non_differentiable.jl | 19 - ext/ITensorsChainRulesCoreExt/projection.jl | 10 - ext/ITensorsChainRulesCoreExt/smallstrings.jl | 12 - ext/ITensorsChainRulesCoreExt/utils.jl | 7 - ext/ITensorsChainRulesCoreExt/zygoterules.jl | 10 - ext/ITensorsHDF5Ext/ITensorsHDF5Ext.jl | 8 - ext/ITensorsHDF5Ext/index.jl | 43 - ext/ITensorsHDF5Ext/indexset.jl | 24 - ext/ITensorsHDF5Ext/itensor.jl | 36 - ext/ITensorsHDF5Ext/qn.jl | 26 - ext/ITensorsHDF5Ext/qnindex.jl | 30 - ext/ITensorsHDF5Ext/tagset.jl | 20 - .../ITensorsVectorInterfaceExt.jl | 107 - .../ITensorsZygoteRulesExt.jl | 3 - ext/ITensorsZygoteRulesExt/itensors.jl | 12 - jenkins/Dockerfile | 16 - jenkins/Jenkinsfile | 151 -- src/ITensors.jl | 165 +- src/adapt.jl | 5 - src/argsdict/argsdict.jl | 111 - src/broadcast.jl | 545 ----- src/deprecated.jl | 34 - src/developer_tools.jl | 30 - src/exports.jl | 197 -- src/fermions/fermions.jl | 378 --- src/global_variables.jl | 211 -- src/imports.jl | 197 -- src/index.jl | 667 ------ src/indexset.jl | 935 -------- src/itensor.jl | 2073 ----------------- src/lastval.jl | 23 - .../src/ContractionSequenceOptimization.jl | 42 - .../src/breadth_first_constructive.jl | 235 -- .../src/contraction_cost.jl | 37 - .../src/depth_first_constructive.jl | 85 - .../src/three_tensors.jl | 69 - .../src/utils.jl | 371 --- src/lib/ITensorVisualizationCore/README.md | 3 - .../src/ITensorVisualizationCore.jl | 16 - .../src/visualize_macro.jl | 271 --- .../examples/Project.toml | 4 - .../examples/example_dmrg.jl | 24 - .../examples/example_readme.jl | 29 - .../src/ITensorsNamedDimsArraysExt.jl | 8 - .../src/combiner.jl | 22 - .../ITensorsNamedDimsArraysExt/src/index.jl | 65 - .../src/indexing.jl | 8 - .../ITensorsNamedDimsArraysExt/src/itensor.jl | 47 - .../src/tensoralgebra.jl | 31 - .../src/to_nameddimsarray.jl | 77 - .../test/Project.toml | 5 - .../test/runtests.jl | 11 - .../test/test_basics.jl | 33 - .../test/test_examples.jl | 17 - src/lib/ITensorsOpsExt/src/ITensorsOpsExt.jl | 79 - .../src/ITensorsSiteTypesExt.jl | 79 - src/lib/LazyApply/src/LazyApply.jl | 386 --- src/lib/Ops/ops_itensor.jl | 73 - src/lib/Ops/src/Ops.jl | 4 - src/lib/Ops/src/op.jl | 335 --- src/lib/Ops/src/trotter.jl | 35 - src/lib/QuantumNumbers/src/QuantumNumbers.jl | 5 - src/lib/QuantumNumbers/src/arrow.jl | 16 - src/lib/QuantumNumbers/src/qn.jl | 339 --- src/lib/QuantumNumbers/src/qnval.jl | 63 - src/lib/SiteTypes/src/SiteTypes.jl | 19 - .../src/SiteTypesChainRulesCoreExt.jl | 7 - src/lib/SiteTypes/src/sitetype.jl | 834 ------- src/lib/SiteTypes/src/sitetypes/aliases.jl | 40 - src/lib/SiteTypes/src/sitetypes/boson.jl | 32 - src/lib/SiteTypes/src/sitetypes/electron.jl | 336 --- src/lib/SiteTypes/src/sitetypes/fermion.jl | 111 - .../SiteTypes/src/sitetypes/generic_sites.jl | 49 - src/lib/SiteTypes/src/sitetypes/qubit.jl | 502 ---- src/lib/SiteTypes/src/sitetypes/qudit.jl | 110 - src/lib/SiteTypes/src/sitetypes/spinhalf.jl | 66 - src/lib/SiteTypes/src/sitetypes/spinone.jl | 143 -- src/lib/SiteTypes/src/sitetypes/tj.jl | 234 -- .../SmallStringsChainRulesCoreExt.jl | 5 - src/lib/SmallStrings/src/SmallStrings.jl | 3 - src/lib/SmallStrings/src/smallstring.jl | 150 -- src/lib/TagSets/src/TagSets.jl | 392 ---- src/name.jl | 1 - src/not.jl | 26 - src/nullspace.jl | 190 -- src/oneitensor.jl | 19 - src/packagecompile/compile.jl | 62 - src/qn/flux.jl | 84 - src/qn/qnindex.jl | 552 ----- src/qn/qnindexset.jl | 34 - src/qn/qnitensor.jl | 590 ----- src/readwrite.jl | 13 - src/set_operations.jl | 90 - src/set_types.jl | 5 - src/symmetrystyle.jl | 19 - src/tensor_operations/itensor_combiner.jl | 84 - src/tensor_operations/matrix_algebra.jl | 94 - src/tensor_operations/matrix_decomposition.jl | 857 ------- src/tensor_operations/permutations.jl | 244 -- src/tensor_operations/tensor_algebra.jl | 642 ----- src/usings.jl | 28 - src/utils.jl | 27 - src/val.jl | 1 - test/Project.toml | 24 +- test/base/Project.toml | 8 - test/base/runtests.jl | 16 - test/base/test_argsdict.jl | 115 - test/base/test_broadcast.jl | 308 --- test/base/test_combiner.jl | 263 --- test/base/test_contract.jl | 314 --- test/base/test_ctmrg.jl | 52 - test/base/test_debug_checks.jl | 36 - test/base/test_decomp.jl | 503 ---- test/base/test_diagitensor.jl | 562 ----- test/base/test_empty.jl | 81 - test/base/test_emptyitensor.jl | 103 - test/base/test_examples.jl | 14 - test/base/test_exports.jl | 11 - test/base/test_fermions.jl | 782 ------- test/base/test_global_variables.jl | 52 - test/base/test_index.jl | 185 -- test/base/test_indexset.jl | 374 --- test/base/test_indices.jl | 244 -- test/base/test_inference.jl | 102 - test/base/test_itensor.jl | 1963 ---------------- test/base/test_itensor_scalar.jl | 67 - test/base/test_itensor_scalar_contract.jl | 102 - test/base/test_itensor_slice.jl | 59 - test/base/test_ndtensors.jl | 27 - test/base/test_not.jl | 53 - test/base/test_oneitensor.jl | 18 - test/base/test_phys_site_types.jl | 851 ------- test/base/test_qn.jl | 150 -- test/base/test_qncombiner.jl | 12 - test/base/test_qndiagitensor.jl | 134 -- test/base/test_qnindex.jl | 70 - test/base/test_qnitensor.jl | 1917 --------------- test/base/test_readwrite.jl | 187 -- test/base/test_sitetype.jl | 583 ----- test/base/test_smallstring.jl | 75 - test/base/test_svd.jl | 232 -- test/base/test_symmetrystyle.jl | 48 - test/base/test_tagset.jl | 145 -- test/base/test_trg.jl | 24 - .../TestITensorsExportedNames.jl | 203 -- test/base/utils/testfilev0.1.41.h5 | Bin 16680 -> 0 bytes test/base/utils/util.jl | 40 - test/basics/test_basics.jl | 6 + .../ITensorsChainRulesCoreExt/Project.toml | 6 - .../ext/ITensorsChainRulesCoreExt/runtests.jl | 16 - .../test_chainrules.jl | 522 ----- .../test_chainrules_ops.jl | 283 --- .../utils/chainrulestestutils.jl | 66 - .../ITensorsVectorInterfaceExt/Project.toml | 3 - .../ITensorsVectorInterfaceExt/runtests.jl | 131 -- .../ext/NDTensorsMappedArraysExt/Project.toml | 5 - test/ext/NDTensorsMappedArraysExt/runtests.jl | 24 - .../runtests.jl | 16 - .../test_itensor_contract.jl | 161 -- .../ITensorsNamedDimsArraysExt/Project.toml | 4 - .../ITensorsNamedDimsArraysExt/runtests.jl | 8 - test/lib/LazyApply/outdated/test_lazyapply.jl | 44 - test/lib/LazyApply/runtests.jl | 16 - test/lib/LazyApply/test_lazyapply.jl | 36 - test/lib/Ops/runtests.jl | 16 - test/lib/Ops/test_ops.jl | 247 -- test/lib/Ops/test_trotter.jl | 29 - test/runtests.jl | 85 +- test/test_aqua.jl | 7 + test/threading/runtests.jl | 16 - test/threading/test_threading.jl | 83 - 844 files changed, 335 insertions(+), 71507 deletions(-) delete mode 100644 .github/ISSUE_TEMPLATE/02_NDTensors_bug_report.md delete mode 100644 .github/ISSUE_TEMPLATE/02_NDTensors_feature_request.md rename .github/ISSUE_TEMPLATE/{01_ITensors_bug_report.md => BUG_REPORT.md} (94%) rename .github/ISSUE_TEMPLATE/{01_ITensors_feature_request.md => FEATURE_REQUEST.md} (82%) delete mode 100644 .github/ISSUE_TEMPLATE/generate_issue_templates/PACKAGE_bug_report.md delete mode 100644 .github/ISSUE_TEMPLATE/generate_issue_templates/PACKAGE_feature_request.md delete mode 100644 .github/ISSUE_TEMPLATE/generate_issue_templates/generate_issue_templates.jl delete mode 100644 .github/workflows/Downstream.yml create mode 100644 .github/workflows/FormatCheck.yml create mode 100644 .github/workflows/LiterateCheck.yml create mode 100644 .github/workflows/Tests.yml delete mode 100644 .github/workflows/comment_trigger_example.yml delete mode 100644 .github/workflows/format_check.yml delete mode 100644 .github/workflows/format_pr.yml delete mode 100644 .github/workflows/format_suggestions.yml delete mode 100644 .github/workflows/main_test_itensors_base_macos_windows.yml delete mode 100644 .github/workflows/test_itensors_base_ubuntu.yml delete mode 100644 .github/workflows/test_ndtensors.yml delete mode 100644 CITATION.cff delete mode 100644 CODE_OF_CONDUCT.md delete mode 100644 Checklists.txt delete mode 100644 NDTensors/.JuliaFormatter.toml delete mode 100644 NDTensors/LICENSE delete mode 100644 NDTensors/Project.toml delete mode 100644 NDTensors/ext/NDTensorsAMDGPUExt/NDTensorsAMDGPUExt.jl delete mode 100644 NDTensors/ext/NDTensorsAMDGPUExt/adapt.jl delete mode 100644 NDTensors/ext/NDTensorsAMDGPUExt/append.jl delete mode 100644 NDTensors/ext/NDTensorsAMDGPUExt/copyto.jl delete mode 100644 NDTensors/ext/NDTensorsAMDGPUExt/indexing.jl delete mode 100644 NDTensors/ext/NDTensorsAMDGPUExt/linearalgebra.jl delete mode 100644 NDTensors/ext/NDTensorsAMDGPUExt/mul.jl delete mode 100644 NDTensors/ext/NDTensorsAMDGPUExt/permutedims.jl delete mode 100644 NDTensors/ext/NDTensorsAMDGPUExt/set_types.jl delete mode 100644 NDTensors/ext/NDTensorsCUDAExt/NDTensorsCUDAExt.jl delete mode 100644 NDTensors/ext/NDTensorsCUDAExt/adapt.jl delete mode 100644 NDTensors/ext/NDTensorsCUDAExt/append.jl delete mode 100644 NDTensors/ext/NDTensorsCUDAExt/copyto.jl delete mode 100644 NDTensors/ext/NDTensorsCUDAExt/default_kwargs.jl delete mode 100644 NDTensors/ext/NDTensorsCUDAExt/indexing.jl delete mode 100644 NDTensors/ext/NDTensorsCUDAExt/iscu.jl delete mode 100644 NDTensors/ext/NDTensorsCUDAExt/linearalgebra.jl delete mode 100644 NDTensors/ext/NDTensorsCUDAExt/mul.jl delete mode 100644 NDTensors/ext/NDTensorsCUDAExt/permutedims.jl delete mode 100644 NDTensors/ext/NDTensorsCUDAExt/set_types.jl delete mode 100644 NDTensors/ext/NDTensorsGPUArraysCoreExt/NDTensorsGPUArraysCoreExt.jl delete mode 100644 NDTensors/ext/NDTensorsGPUArraysCoreExt/blocksparsetensor.jl delete mode 100644 NDTensors/ext/NDTensorsGPUArraysCoreExt/contract.jl delete mode 100644 NDTensors/ext/NDTensorsHDF5Ext/NDTensorsHDF5Ext.jl delete mode 100644 NDTensors/ext/NDTensorsHDF5Ext/blocksparse.jl delete mode 100644 NDTensors/ext/NDTensorsHDF5Ext/dense.jl delete mode 100644 NDTensors/ext/NDTensorsHDF5Ext/diag.jl delete mode 100644 NDTensors/ext/NDTensorsHDF5Ext/empty.jl delete mode 100644 NDTensors/ext/NDTensorsJLArraysExt/NDTensorsJLArraysExt.jl delete mode 100644 NDTensors/ext/NDTensorsJLArraysExt/copyto.jl delete mode 100644 NDTensors/ext/NDTensorsJLArraysExt/indexing.jl delete mode 100644 NDTensors/ext/NDTensorsJLArraysExt/linearalgebra.jl delete mode 100644 NDTensors/ext/NDTensorsJLArraysExt/mul.jl delete mode 100644 NDTensors/ext/NDTensorsJLArraysExt/permutedims.jl delete mode 100644 NDTensors/ext/NDTensorsMappedArraysExt/NDTensorsMappedArraysExt.jl delete mode 100644 NDTensors/ext/NDTensorsMetalExt/NDTensorsMetalExt.jl delete mode 100644 NDTensors/ext/NDTensorsMetalExt/adapt.jl delete mode 100644 NDTensors/ext/NDTensorsMetalExt/append.jl delete mode 100644 NDTensors/ext/NDTensorsMetalExt/copyto.jl delete mode 100644 NDTensors/ext/NDTensorsMetalExt/indexing.jl delete mode 100644 NDTensors/ext/NDTensorsMetalExt/linearalgebra.jl delete mode 100644 NDTensors/ext/NDTensorsMetalExt/mul.jl delete mode 100644 NDTensors/ext/NDTensorsMetalExt/permutedims.jl delete mode 100644 NDTensors/ext/NDTensorsMetalExt/set_types.jl delete mode 100644 NDTensors/ext/NDTensorsOctavianExt/NDTensorsOctavianExt.jl delete mode 100644 NDTensors/ext/NDTensorsOctavianExt/import.jl delete mode 100644 NDTensors/ext/NDTensorsOctavianExt/octavian.jl delete mode 100644 NDTensors/ext/NDTensorsTBLISExt/NDTensorsTBLISExt.jl delete mode 100644 NDTensors/ext/NDTensorsTBLISExt/contract.jl delete mode 100644 NDTensors/ext/NDTensorscuTENSORExt/NDTensorscuTENSORExt.jl delete mode 100644 NDTensors/ext/NDTensorscuTENSORExt/contract.jl delete mode 100644 NDTensors/src/NDTensors.jl delete mode 100644 NDTensors/src/NDTensorsNamedDimsArraysExt/NDTensorsNamedDimsArraysExt.jl delete mode 100644 NDTensors/src/NDTensorsNamedDimsArraysExt/fill.jl delete mode 100644 NDTensors/src/NDTensorsNamedDimsArraysExt/similar.jl delete mode 100644 NDTensors/src/abstractarray/generic_array_constructors.jl delete mode 100644 NDTensors/src/abstractarray/iscu.jl delete mode 100644 NDTensors/src/abstractarray/mul.jl delete mode 100644 NDTensors/src/abstractarray/permutedims.jl delete mode 100644 NDTensors/src/abstractarray/set_types.jl delete mode 100644 NDTensors/src/abstractarray/similar.jl delete mode 100644 NDTensors/src/abstractarray/tensoralgebra/contract.jl delete mode 100644 NDTensors/src/abstractarray/to_shape.jl delete mode 100644 NDTensors/src/adapt.jl delete mode 100644 NDTensors/src/aliasstyle.jl delete mode 100644 NDTensors/src/array/mul.jl delete mode 100644 NDTensors/src/array/permutedims.jl delete mode 100644 NDTensors/src/backup/arraystorage/arraystorage/storage/arraystorage.jl delete mode 100644 NDTensors/src/backup/arraystorage/arraystorage/storage/conj.jl delete mode 100644 NDTensors/src/backup/arraystorage/arraystorage/storage/contract.jl delete mode 100644 NDTensors/src/backup/arraystorage/arraystorage/storage/permutedims.jl delete mode 100644 NDTensors/src/backup/arraystorage/arraystorage/tensor/arraystorage.jl delete mode 100644 NDTensors/src/backup/arraystorage/arraystorage/tensor/contract.jl delete mode 100644 NDTensors/src/backup/arraystorage/arraystorage/tensor/eigen.jl delete mode 100644 NDTensors/src/backup/arraystorage/arraystorage/tensor/eigen_generic.jl delete mode 100644 NDTensors/src/backup/arraystorage/arraystorage/tensor/indexing.jl delete mode 100644 NDTensors/src/backup/arraystorage/arraystorage/tensor/mul.jl delete mode 100644 NDTensors/src/backup/arraystorage/arraystorage/tensor/permutedims.jl delete mode 100644 NDTensors/src/backup/arraystorage/arraystorage/tensor/qr.jl delete mode 100644 NDTensors/src/backup/arraystorage/arraystorage/tensor/svd.jl delete mode 100644 NDTensors/src/backup/arraystorage/arraystorage/tensor/zeros.jl delete mode 100644 NDTensors/src/backup/arraystorage/blocksparsearray/storage/combiner/contract.jl delete mode 100644 NDTensors/src/backup/arraystorage/blocksparsearray/storage/combiner/contract_combine.jl delete mode 100644 NDTensors/src/backup/arraystorage/blocksparsearray/storage/combiner/contract_uncombine.jl delete mode 100644 NDTensors/src/backup/arraystorage/blocksparsearray/storage/combiner/contract_utils.jl delete mode 100644 NDTensors/src/backup/arraystorage/blocksparsearray/storage/contract.jl delete mode 100644 NDTensors/src/backup/arraystorage/blocksparsearray/tensor/contract.jl delete mode 100644 NDTensors/src/backup/arraystorage/combiner/storage/combinerarray.jl delete mode 100644 NDTensors/src/backup/arraystorage/combiner/storage/contract.jl delete mode 100644 NDTensors/src/backup/arraystorage/combiner/storage/contract_utils.jl delete mode 100644 NDTensors/src/backup/arraystorage/combiner/storage/promote_rule.jl delete mode 100644 NDTensors/src/backup/arraystorage/combiner/tensor/contract.jl delete mode 100644 NDTensors/src/backup/arraystorage/combiner/tensor/to_arraystorage.jl delete mode 100644 NDTensors/src/backup/arraystorage/diagonalarray/storage/contract.jl delete mode 100644 NDTensors/src/backup/arraystorage/diagonalarray/tensor/contract.jl delete mode 100644 NDTensors/src/blocksparse/adapt.jl delete mode 100644 NDTensors/src/blocksparse/block.jl delete mode 100644 NDTensors/src/blocksparse/blockdims.jl delete mode 100644 NDTensors/src/blocksparse/blockoffsets.jl delete mode 100644 NDTensors/src/blocksparse/blocksparse.jl delete mode 100644 NDTensors/src/blocksparse/blocksparsetensor.jl delete mode 100644 NDTensors/src/blocksparse/combiner.jl delete mode 100644 NDTensors/src/blocksparse/contract.jl delete mode 100644 NDTensors/src/blocksparse/contract_folds.jl delete mode 100644 NDTensors/src/blocksparse/contract_generic.jl delete mode 100644 NDTensors/src/blocksparse/contract_sequential.jl delete mode 100644 NDTensors/src/blocksparse/contract_threads.jl delete mode 100644 NDTensors/src/blocksparse/contract_utilities.jl delete mode 100644 NDTensors/src/blocksparse/diagblocksparse.jl delete mode 100644 NDTensors/src/blocksparse/fermions.jl delete mode 100644 NDTensors/src/blocksparse/linearalgebra.jl delete mode 100644 NDTensors/src/blocksparse/similar.jl delete mode 100644 NDTensors/src/combiner/combiner.jl delete mode 100644 NDTensors/src/combiner/contract.jl delete mode 100644 NDTensors/src/default_kwargs.jl delete mode 100644 NDTensors/src/dense/dense.jl delete mode 100644 NDTensors/src/dense/densetensor.jl delete mode 100644 NDTensors/src/dense/generic_array_constructors.jl delete mode 100644 NDTensors/src/dense/linearalgebra/decompositions.jl delete mode 100644 NDTensors/src/dense/set_types.jl delete mode 100644 NDTensors/src/dense/tensoralgebra/contract.jl delete mode 100644 NDTensors/src/dense/tensoralgebra/outer.jl delete mode 100644 NDTensors/src/deprecated.jl delete mode 100644 NDTensors/src/diag/diag.jl delete mode 100644 NDTensors/src/diag/diagtensor.jl delete mode 100644 NDTensors/src/diag/set_types.jl delete mode 100644 NDTensors/src/diag/similar.jl delete mode 100644 NDTensors/src/diag/tensoralgebra/contract.jl delete mode 100644 NDTensors/src/diag/tensoralgebra/outer.jl delete mode 100644 NDTensors/src/dims.jl delete mode 100644 NDTensors/src/empty/EmptyTensor.jl delete mode 100644 NDTensors/src/empty/adapt.jl delete mode 100644 NDTensors/src/empty/empty.jl delete mode 100644 NDTensors/src/empty/tensoralgebra/contract.jl delete mode 100644 NDTensors/src/emptynumber.jl delete mode 100644 NDTensors/src/exports.jl delete mode 100644 NDTensors/src/imports.jl delete mode 100644 NDTensors/src/lib/AMDGPUExtensions/.JuliaFormatter.toml delete mode 100644 NDTensors/src/lib/AMDGPUExtensions/src/AMDGPUExtensions.jl delete mode 100644 NDTensors/src/lib/AMDGPUExtensions/src/roc.jl delete mode 100644 NDTensors/src/lib/AMDGPUExtensions/test/runtests.jl delete mode 100644 NDTensors/src/lib/AllocateData/.JuliaFormatter.toml delete mode 100644 NDTensors/src/lib/AllocateData/README.md delete mode 100644 NDTensors/src/lib/AllocateData/src/AllocateData.jl delete mode 100644 NDTensors/src/lib/AllocateData/src/AllocateDataLinearAlgebraExt/AllocateDataLinearAlgebraExt.jl delete mode 100644 NDTensors/src/lib/AllocateData/src/AllocateDataLinearAlgebraExt/diagonal.jl delete mode 100644 NDTensors/src/lib/AllocateData/src/AllocateDataLinearAlgebraExt/hermitian.jl delete mode 100644 NDTensors/src/lib/AllocateData/src/allocate.jl delete mode 100644 NDTensors/src/lib/AllocateData/src/base.jl delete mode 100644 NDTensors/src/lib/AllocateData/src/defaults.jl delete mode 100644 NDTensors/src/lib/AllocateData/src/initializers.jl delete mode 100644 NDTensors/src/lib/AllocateData/src/to_axis.jl delete mode 100644 NDTensors/src/lib/AllocateData/test/Project.toml delete mode 100644 NDTensors/src/lib/AllocateData/test/runtests.jl delete mode 100644 NDTensors/src/lib/BackendSelection/.JuliaFormatter.toml delete mode 100644 NDTensors/src/lib/BackendSelection/src/BackendSelection.jl delete mode 100644 NDTensors/src/lib/BackendSelection/src/abstractbackend.jl delete mode 100644 NDTensors/src/lib/BackendSelection/src/backend_types.jl delete mode 100644 NDTensors/src/lib/BackendSelection/test/Project.toml delete mode 100644 NDTensors/src/lib/BackendSelection/test/runtests.jl delete mode 100644 NDTensors/src/lib/BaseExtensions/.JuliaFormatter.toml delete mode 100644 NDTensors/src/lib/BaseExtensions/src/BaseExtensions.jl delete mode 100644 NDTensors/src/lib/BaseExtensions/src/replace.jl delete mode 100644 NDTensors/src/lib/BaseExtensions/test/Project.toml delete mode 100644 NDTensors/src/lib/BaseExtensions/test/runtests.jl delete mode 100644 NDTensors/src/lib/BlockSparseArrays/.JuliaFormatter.toml delete mode 100644 NDTensors/src/lib/BlockSparseArrays/Project.toml delete mode 100644 NDTensors/src/lib/BlockSparseArrays/README.md delete mode 100644 NDTensors/src/lib/BlockSparseArrays/examples/Project.toml delete mode 100644 NDTensors/src/lib/BlockSparseArrays/examples/README.jl delete mode 100644 NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysAdaptExt/src/BlockSparseArraysAdaptExt.jl delete mode 100644 NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysGradedAxesExt/src/BlockSparseArraysGradedAxesExt.jl delete mode 100644 NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysGradedAxesExt/src/reducewhile.jl delete mode 100644 NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysGradedAxesExt/test/Project.toml delete mode 100644 NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysGradedAxesExt/test/runtests.jl delete mode 100644 NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysTensorAlgebraExt/src/BlockSparseArraysTensorAlgebraExt.jl delete mode 100644 NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysTensorAlgebraExt/test/Project.toml delete mode 100644 NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysTensorAlgebraExt/test/runtests.jl delete mode 100644 NDTensors/src/lib/BlockSparseArrays/src/BlockArraysExtensions/BlockArraysExtensions.jl delete mode 100644 NDTensors/src/lib/BlockSparseArrays/src/BlockArraysSparseArraysBaseExt/BlockArraysSparseArraysBaseExt.jl delete mode 100644 NDTensors/src/lib/BlockSparseArrays/src/BlockSparseArrays.jl delete mode 100644 NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/abstractblocksparsearray.jl delete mode 100644 NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/abstractblocksparsematrix.jl delete mode 100644 NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/abstractblocksparsevector.jl delete mode 100644 NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/arraylayouts.jl delete mode 100644 NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/broadcast.jl delete mode 100644 NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/cat.jl delete mode 100644 NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/linearalgebra.jl delete mode 100644 NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/map.jl delete mode 100644 NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/sparsearrayinterface.jl delete mode 100644 NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/views.jl delete mode 100644 NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/wrappedabstractblocksparsearray.jl delete mode 100644 NDTensors/src/lib/BlockSparseArrays/src/backup/qr.jl delete mode 100644 NDTensors/src/lib/BlockSparseArrays/src/blocksparsearray/blocksparsearray.jl delete mode 100644 NDTensors/src/lib/BlockSparseArrays/src/blocksparsearray/defaults.jl delete mode 100644 NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/arraylayouts.jl delete mode 100644 NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/blocksparsearrayinterface.jl delete mode 100644 NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/blockzero.jl delete mode 100644 NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/broadcast.jl delete mode 100644 NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/cat.jl delete mode 100644 NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/linearalgebra.jl delete mode 100644 NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/map.jl delete mode 100644 NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/views.jl delete mode 100644 NDTensors/src/lib/BlockSparseArrays/test/Project.toml delete mode 100644 NDTensors/src/lib/BlockSparseArrays/test/TestBlockSparseArraysUtils.jl delete mode 100644 NDTensors/src/lib/BlockSparseArrays/test/runtests.jl delete mode 100644 NDTensors/src/lib/BlockSparseArrays/test/test_basics.jl delete mode 100644 NDTensors/src/lib/BroadcastMapConversion/.JuliaFormatter.toml delete mode 100644 NDTensors/src/lib/BroadcastMapConversion/src/BroadcastMapConversion.jl delete mode 100644 NDTensors/src/lib/BroadcastMapConversion/test/runtests.jl delete mode 100644 NDTensors/src/lib/CUDAExtensions/.JuliaFormatter.toml delete mode 100644 NDTensors/src/lib/CUDAExtensions/src/CUDAExtensions.jl delete mode 100644 NDTensors/src/lib/CUDAExtensions/src/cuda.jl delete mode 100644 NDTensors/src/lib/CUDAExtensions/test/runtests.jl delete mode 100644 NDTensors/src/lib/DiagonalArrays/.JuliaFormatter.toml delete mode 100644 NDTensors/src/lib/DiagonalArrays/README.md delete mode 100644 NDTensors/src/lib/DiagonalArrays/examples/Project.toml delete mode 100644 NDTensors/src/lib/DiagonalArrays/examples/README.jl delete mode 100644 NDTensors/src/lib/DiagonalArrays/src/DiagonalArrays.jl delete mode 100644 NDTensors/src/lib/DiagonalArrays/src/abstractdiagonalarray/abstractdiagonalarray.jl delete mode 100644 NDTensors/src/lib/DiagonalArrays/src/abstractdiagonalarray/arraylayouts.jl delete mode 100644 NDTensors/src/lib/DiagonalArrays/src/abstractdiagonalarray/diagonalarraydiaginterface.jl delete mode 100644 NDTensors/src/lib/DiagonalArrays/src/abstractdiagonalarray/sparsearrayinterface.jl delete mode 100644 NDTensors/src/lib/DiagonalArrays/src/diaginterface/defaults.jl delete mode 100644 NDTensors/src/lib/DiagonalArrays/src/diaginterface/diagindex.jl delete mode 100644 NDTensors/src/lib/DiagonalArrays/src/diaginterface/diagindices.jl delete mode 100644 NDTensors/src/lib/DiagonalArrays/src/diaginterface/diaginterface.jl delete mode 100644 NDTensors/src/lib/DiagonalArrays/src/diagonalarray/arraylayouts.jl delete mode 100644 NDTensors/src/lib/DiagonalArrays/src/diagonalarray/diagonalarray.jl delete mode 100644 NDTensors/src/lib/DiagonalArrays/src/diagonalarray/diagonalmatrix.jl delete mode 100644 NDTensors/src/lib/DiagonalArrays/src/diagonalarray/diagonalvector.jl delete mode 100644 NDTensors/src/lib/DiagonalArrays/test/Project.toml delete mode 100644 NDTensors/src/lib/DiagonalArrays/test/runtests.jl delete mode 100644 NDTensors/src/lib/Expose/.JuliaFormatter.toml delete mode 100644 NDTensors/src/lib/Expose/README.md delete mode 100644 NDTensors/src/lib/Expose/TODO.md delete mode 100644 NDTensors/src/lib/Expose/src/Expose.jl delete mode 100644 NDTensors/src/lib/Expose/src/exposed.jl delete mode 100644 NDTensors/src/lib/Expose/src/functions/abstractarray.jl delete mode 100644 NDTensors/src/lib/Expose/src/functions/adapt.jl delete mode 100644 NDTensors/src/lib/Expose/src/functions/append.jl delete mode 100644 NDTensors/src/lib/Expose/src/functions/copyto.jl delete mode 100644 NDTensors/src/lib/Expose/src/functions/linearalgebra.jl delete mode 100644 NDTensors/src/lib/Expose/src/functions/mul.jl delete mode 100644 NDTensors/src/lib/Expose/src/functions/permutedims.jl delete mode 100644 NDTensors/src/lib/Expose/src/import.jl delete mode 100644 NDTensors/src/lib/Expose/test/Project.toml delete mode 100644 NDTensors/src/lib/Expose/test/runtests.jl delete mode 100644 NDTensors/src/lib/GPUArraysCoreExtensions/.JuliaFormatter.toml delete mode 100644 NDTensors/src/lib/GPUArraysCoreExtensions/Project.toml delete mode 100644 NDTensors/src/lib/GPUArraysCoreExtensions/src/GPUArraysCoreExtensions.jl delete mode 100644 NDTensors/src/lib/GPUArraysCoreExtensions/src/gpuarrayscore.jl delete mode 100644 NDTensors/src/lib/GPUArraysCoreExtensions/test/runtests.jl delete mode 100644 NDTensors/src/lib/GradedAxes/.JuliaFormatter.toml delete mode 100644 NDTensors/src/lib/GradedAxes/src/GradedAxes.jl delete mode 100644 NDTensors/src/lib/GradedAxes/src/blockedunitrange.jl delete mode 100644 NDTensors/src/lib/GradedAxes/src/dual.jl delete mode 100644 NDTensors/src/lib/GradedAxes/src/fusion.jl delete mode 100644 NDTensors/src/lib/GradedAxes/src/gradedunitrange.jl delete mode 100644 NDTensors/src/lib/GradedAxes/src/gradedunitrangedual.jl delete mode 100644 NDTensors/src/lib/GradedAxes/src/labelledunitrangedual.jl delete mode 100644 NDTensors/src/lib/GradedAxes/src/onetoone.jl delete mode 100644 NDTensors/src/lib/GradedAxes/test/Project.toml delete mode 100644 NDTensors/src/lib/GradedAxes/test/runtests.jl delete mode 100644 NDTensors/src/lib/GradedAxes/test/test_basics.jl delete mode 100644 NDTensors/src/lib/GradedAxes/test/test_dual.jl delete mode 100644 NDTensors/src/lib/GradedAxes/test/test_tensor_product.jl delete mode 100644 NDTensors/src/lib/LabelledNumbers/.JuliaFormatter.toml delete mode 100644 NDTensors/src/lib/LabelledNumbers/src/LabelledNumbers.jl delete mode 100644 NDTensors/src/lib/LabelledNumbers/src/LabelledNumbersBlockArraysExt.jl delete mode 100644 NDTensors/src/lib/LabelledNumbers/src/labelled_interface.jl delete mode 100644 NDTensors/src/lib/LabelledNumbers/src/labelledarray.jl delete mode 100644 NDTensors/src/lib/LabelledNumbers/src/labelledinteger.jl delete mode 100644 NDTensors/src/lib/LabelledNumbers/src/labellednumber.jl delete mode 100644 NDTensors/src/lib/LabelledNumbers/src/labelledunitrange.jl delete mode 100644 NDTensors/src/lib/LabelledNumbers/test/Project.toml delete mode 100644 NDTensors/src/lib/LabelledNumbers/test/runtests.jl delete mode 100644 NDTensors/src/lib/MetalExtensions/.JuliaFormatter.toml delete mode 100644 NDTensors/src/lib/MetalExtensions/src/MetalExtensions.jl delete mode 100644 NDTensors/src/lib/MetalExtensions/src/metal.jl delete mode 100644 NDTensors/src/lib/MetalExtensions/test/runtests.jl delete mode 100644 NDTensors/src/lib/NamedDimsArrays/.JuliaFormatter.toml delete mode 100644 NDTensors/src/lib/NamedDimsArrays/README.md delete mode 100644 NDTensors/src/lib/NamedDimsArrays/examples/example_readme.jl delete mode 100644 NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysAdaptExt/src/NamedDimsArraysAdaptExt.jl delete mode 100644 NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysAdaptExt/src/adapt_structure.jl delete mode 100644 NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysAdaptExt/test/Project.toml delete mode 100644 NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysAdaptExt/test/runtests.jl delete mode 100644 NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysSparseArraysBaseExt/src/NamedDimsArraysSparseArraysBaseExt.jl delete mode 100644 NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysSparseArraysBaseExt/src/densearray.jl delete mode 100644 NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysSparseArraysBaseExt/test/Project.toml delete mode 100644 NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysSparseArraysBaseExt/test/runtests.jl delete mode 100644 NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/src/NamedDimsArraysTensorAlgebraExt.jl delete mode 100644 NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/src/contract.jl delete mode 100644 NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/src/eigen.jl delete mode 100644 NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/src/fusedims.jl delete mode 100644 NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/src/qr.jl delete mode 100644 NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/src/svd.jl delete mode 100644 NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/test/Project.toml delete mode 100644 NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/test/runtests.jl delete mode 100644 NDTensors/src/lib/NamedDimsArrays/generate_readme.jl delete mode 100644 NDTensors/src/lib/NamedDimsArrays/src/NamedDimsArrays.jl delete mode 100644 NDTensors/src/lib/NamedDimsArrays/src/abstractnameddimsarray.jl delete mode 100644 NDTensors/src/lib/NamedDimsArrays/src/abstractnameddimsmatrix.jl delete mode 100644 NDTensors/src/lib/NamedDimsArrays/src/abstractnameddimsvector.jl delete mode 100644 NDTensors/src/lib/NamedDimsArrays/src/abstractnamedint.jl delete mode 100644 NDTensors/src/lib/NamedDimsArrays/src/abstractnamedunitrange.jl delete mode 100644 NDTensors/src/lib/NamedDimsArrays/src/broadcast.jl delete mode 100644 NDTensors/src/lib/NamedDimsArrays/src/broadcast_shape.jl delete mode 100644 NDTensors/src/lib/NamedDimsArrays/src/constructors.jl delete mode 100644 NDTensors/src/lib/NamedDimsArrays/src/map.jl delete mode 100644 NDTensors/src/lib/NamedDimsArrays/src/name.jl delete mode 100644 NDTensors/src/lib/NamedDimsArrays/src/nameddimsarray.jl delete mode 100644 NDTensors/src/lib/NamedDimsArrays/src/namedint.jl delete mode 100644 NDTensors/src/lib/NamedDimsArrays/src/namedunitrange.jl delete mode 100644 NDTensors/src/lib/NamedDimsArrays/src/permutedims.jl delete mode 100644 NDTensors/src/lib/NamedDimsArrays/src/promote_shape.jl delete mode 100644 NDTensors/src/lib/NamedDimsArrays/src/randname.jl delete mode 100644 NDTensors/src/lib/NamedDimsArrays/src/similar.jl delete mode 100644 NDTensors/src/lib/NamedDimsArrays/src/traits.jl delete mode 100644 NDTensors/src/lib/NamedDimsArrays/test/Project.toml delete mode 100644 NDTensors/src/lib/NamedDimsArrays/test/runtests.jl delete mode 100644 NDTensors/src/lib/NamedDimsArrays/test/test_NDTensorsNamedDimsArraysExt.jl delete mode 100644 NDTensors/src/lib/NamedDimsArrays/test/test_NamedDimsArraysAdaptExt.jl delete mode 100644 NDTensors/src/lib/NamedDimsArrays/test/test_NamedDimsArraysSparseArraysBaseExt.jl delete mode 100644 NDTensors/src/lib/NamedDimsArrays/test/test_NamedDimsArraysTensorAlgebraExt.jl delete mode 100644 NDTensors/src/lib/NamedDimsArrays/test/test_basic.jl delete mode 100644 NDTensors/src/lib/NamedDimsArrays/test/test_tensoralgebra.jl delete mode 100644 NDTensors/src/lib/NestedPermutedDimsArrays/src/NestedPermutedDimsArrays.jl delete mode 100644 NDTensors/src/lib/NestedPermutedDimsArrays/test/Project.toml delete mode 100644 NDTensors/src/lib/NestedPermutedDimsArrays/test/runtests.jl delete mode 100644 NDTensors/src/lib/RankFactorization/.JuliaFormatter.toml delete mode 100644 NDTensors/src/lib/RankFactorization/src/RankFactorization.jl delete mode 100644 NDTensors/src/lib/RankFactorization/src/default_kwargs.jl delete mode 100644 NDTensors/src/lib/RankFactorization/src/spectrum.jl delete mode 100644 NDTensors/src/lib/RankFactorization/src/truncate_spectrum.jl delete mode 100644 NDTensors/src/lib/SmallVectors/.JuliaFormatter.toml delete mode 100644 NDTensors/src/lib/SmallVectors/README.md delete mode 100644 NDTensors/src/lib/SmallVectors/src/BaseExt/insertstyle.jl delete mode 100644 NDTensors/src/lib/SmallVectors/src/BaseExt/sort.jl delete mode 100644 NDTensors/src/lib/SmallVectors/src/BaseExt/sortedunique.jl delete mode 100644 NDTensors/src/lib/SmallVectors/src/BaseExt/thawfreeze.jl delete mode 100644 NDTensors/src/lib/SmallVectors/src/SmallVectors.jl delete mode 100644 NDTensors/src/lib/SmallVectors/src/abstractarray/insert.jl delete mode 100644 NDTensors/src/lib/SmallVectors/src/abstractsmallvector/abstractsmallvector.jl delete mode 100644 NDTensors/src/lib/SmallVectors/src/abstractsmallvector/deque.jl delete mode 100644 NDTensors/src/lib/SmallVectors/src/msmallvector/msmallvector.jl delete mode 100644 NDTensors/src/lib/SmallVectors/src/msmallvector/thawfreeze.jl delete mode 100644 NDTensors/src/lib/SmallVectors/src/smallvector/insertstyle.jl delete mode 100644 NDTensors/src/lib/SmallVectors/src/smallvector/smallvector.jl delete mode 100644 NDTensors/src/lib/SmallVectors/src/smallvector/thawfreeze.jl delete mode 100644 NDTensors/src/lib/SmallVectors/src/subsmallvector/subsmallvector.jl delete mode 100644 NDTensors/src/lib/SmallVectors/test/runtests.jl delete mode 100644 NDTensors/src/lib/SortedSets/.JuliaFormatter.toml delete mode 100644 NDTensors/src/lib/SortedSets/src/BaseExt/sorted.jl delete mode 100644 NDTensors/src/lib/SortedSets/src/DictionariesExt/insert.jl delete mode 100644 NDTensors/src/lib/SortedSets/src/DictionariesExt/isinsertable.jl delete mode 100644 NDTensors/src/lib/SortedSets/src/SmallVectorsDictionariesExt/interface.jl delete mode 100644 NDTensors/src/lib/SortedSets/src/SortedSets.jl delete mode 100644 NDTensors/src/lib/SortedSets/src/SortedSetsSmallVectorsExt/smallset.jl delete mode 100644 NDTensors/src/lib/SortedSets/src/abstractset.jl delete mode 100644 NDTensors/src/lib/SortedSets/src/abstractwrappedset.jl delete mode 100644 NDTensors/src/lib/SortedSets/src/sortedset.jl delete mode 100644 NDTensors/src/lib/SortedSets/test/runtests.jl delete mode 100644 NDTensors/src/lib/SparseArraysBase/.JuliaFormatter.toml delete mode 100644 NDTensors/src/lib/SparseArraysBase/README.md delete mode 100644 NDTensors/src/lib/SparseArraysBase/src/SparseArraysBase.jl delete mode 100644 NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/SparseArraysBaseLinearAlgebraExt.jl delete mode 100644 NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/SparseArraysBaseSparseArraysExt.jl delete mode 100644 NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/abstractsparsearray.jl delete mode 100644 NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/abstractsparsematrix.jl delete mode 100644 NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/abstractsparsevector.jl delete mode 100644 NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/arraylayouts.jl delete mode 100644 NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/base.jl delete mode 100644 NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/baseinterface.jl delete mode 100644 NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/broadcast.jl delete mode 100644 NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/cat.jl delete mode 100644 NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/convert.jl delete mode 100644 NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/map.jl delete mode 100644 NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/sparsearrayinterface.jl delete mode 100644 NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/wrappedabstractsparsearray.jl delete mode 100644 NDTensors/src/lib/SparseArraysBase/src/sparsearraydok/arraylayouts.jl delete mode 100644 NDTensors/src/lib/SparseArraysBase/src/sparsearraydok/defaults.jl delete mode 100644 NDTensors/src/lib/SparseArraysBase/src/sparsearraydok/sparsearraydok.jl delete mode 100644 NDTensors/src/lib/SparseArraysBase/src/sparsearraydok/sparsematrixdok.jl delete mode 100644 NDTensors/src/lib/SparseArraysBase/src/sparsearraydok/sparsevectordok.jl delete mode 100644 NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/SparseArraysBaseLinearAlgebraExt.jl delete mode 100644 NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/arraylayouts.jl delete mode 100644 NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/base.jl delete mode 100644 NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/broadcast.jl delete mode 100644 NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/cat.jl delete mode 100644 NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/conversion.jl delete mode 100644 NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/copyto.jl delete mode 100644 NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/densearray.jl delete mode 100644 NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/indexing.jl delete mode 100644 NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/interface.jl delete mode 100644 NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/interface_optional.jl delete mode 100644 NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/map.jl delete mode 100644 NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/vectorinterface.jl delete mode 100644 NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/wrappers.jl delete mode 100644 NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/zero.jl delete mode 100644 NDTensors/src/lib/SparseArraysBase/test/Project.toml delete mode 100644 NDTensors/src/lib/SparseArraysBase/test/SparseArraysBaseTestUtils/AbstractSparseArrays.jl delete mode 100644 NDTensors/src/lib/SparseArraysBase/test/SparseArraysBaseTestUtils/DiagonalArrays.jl delete mode 100644 NDTensors/src/lib/SparseArraysBase/test/SparseArraysBaseTestUtils/Project.toml delete mode 100644 NDTensors/src/lib/SparseArraysBase/test/SparseArraysBaseTestUtils/SparseArrays.jl delete mode 100644 NDTensors/src/lib/SparseArraysBase/test/SparseArraysBaseTestUtils/SparseArraysBaseTestUtils.jl delete mode 100644 NDTensors/src/lib/SparseArraysBase/test/runtests.jl delete mode 100644 NDTensors/src/lib/SparseArraysBase/test/test_abstractsparsearray.jl delete mode 100644 NDTensors/src/lib/SparseArraysBase/test/test_array.jl delete mode 100644 NDTensors/src/lib/SparseArraysBase/test/test_diagonalarray.jl delete mode 100644 NDTensors/src/lib/SparseArraysBase/test/test_sparsearraydok.jl delete mode 100644 NDTensors/src/lib/SymmetrySectors/.JuliaFormatter.toml delete mode 100644 NDTensors/src/lib/SymmetrySectors/Project.toml delete mode 100644 NDTensors/src/lib/SymmetrySectors/src/SymmetrySectors.jl delete mode 100644 NDTensors/src/lib/SymmetrySectors/src/abstractsector.jl delete mode 100644 NDTensors/src/lib/SymmetrySectors/src/namedtuple_operations.jl delete mode 100644 NDTensors/src/lib/SymmetrySectors/src/sector_definitions/fib.jl delete mode 100644 NDTensors/src/lib/SymmetrySectors/src/sector_definitions/ising.jl delete mode 100644 NDTensors/src/lib/SymmetrySectors/src/sector_definitions/o2.jl delete mode 100644 NDTensors/src/lib/SymmetrySectors/src/sector_definitions/su.jl delete mode 100644 NDTensors/src/lib/SymmetrySectors/src/sector_definitions/su2k.jl delete mode 100644 NDTensors/src/lib/SymmetrySectors/src/sector_definitions/trivial.jl delete mode 100644 NDTensors/src/lib/SymmetrySectors/src/sector_definitions/u1.jl delete mode 100644 NDTensors/src/lib/SymmetrySectors/src/sector_definitions/zn.jl delete mode 100644 NDTensors/src/lib/SymmetrySectors/src/sector_product.jl delete mode 100644 NDTensors/src/lib/SymmetrySectors/src/symmetry_style.jl delete mode 100644 NDTensors/src/lib/SymmetrySectors/test/runtests.jl delete mode 100644 NDTensors/src/lib/SymmetrySectors/test/test_fusion_rules.jl delete mode 100644 NDTensors/src/lib/SymmetrySectors/test/test_sector_product.jl delete mode 100644 NDTensors/src/lib/SymmetrySectors/test/test_simple_sectors.jl delete mode 100644 NDTensors/src/lib/TagSets/.JuliaFormatter.toml delete mode 100644 NDTensors/src/lib/TagSets/README.md delete mode 100644 NDTensors/src/lib/TagSets/examples/benchmark.jl delete mode 100644 NDTensors/src/lib/TagSets/src/TagSets.jl delete mode 100644 NDTensors/src/lib/TagSets/test/runtests.jl delete mode 100644 NDTensors/src/lib/TensorAlgebra/.JuliaFormatter.toml delete mode 100644 NDTensors/src/lib/TensorAlgebra/Project.toml delete mode 100644 NDTensors/src/lib/TensorAlgebra/ext/TensorAlgebraGradedAxesExt/Project.toml delete mode 100644 NDTensors/src/lib/TensorAlgebra/ext/TensorAlgebraGradedAxesExt/src/TensorAlgebraGradedAxesExt.jl delete mode 100644 NDTensors/src/lib/TensorAlgebra/ext/TensorAlgebraGradedAxesExt/test/Project.toml delete mode 100644 NDTensors/src/lib/TensorAlgebra/ext/TensorAlgebraGradedAxesExt/test/runtests.jl delete mode 100644 NDTensors/src/lib/TensorAlgebra/ext/TensorAlgebraGradedAxesExt/test/test_basics.jl delete mode 100644 NDTensors/src/lib/TensorAlgebra/ext/TensorAlgebraGradedAxesExt/test/test_contract.jl delete mode 100644 NDTensors/src/lib/TensorAlgebra/src/BaseExtensions/BaseExtensions.jl delete mode 100644 NDTensors/src/lib/TensorAlgebra/src/BaseExtensions/indexin.jl delete mode 100644 NDTensors/src/lib/TensorAlgebra/src/BaseExtensions/permutedims.jl delete mode 100644 NDTensors/src/lib/TensorAlgebra/src/LinearAlgebraExtensions/LinearAlgebraExtensions.jl delete mode 100644 NDTensors/src/lib/TensorAlgebra/src/LinearAlgebraExtensions/qr.jl delete mode 100644 NDTensors/src/lib/TensorAlgebra/src/TensorAlgebra.jl delete mode 100644 NDTensors/src/lib/TensorAlgebra/src/blockedpermutation.jl delete mode 100644 NDTensors/src/lib/TensorAlgebra/src/contract/allocate_output.jl delete mode 100644 NDTensors/src/lib/TensorAlgebra/src/contract/blockedperms.jl delete mode 100644 NDTensors/src/lib/TensorAlgebra/src/contract/contract.jl delete mode 100644 NDTensors/src/lib/TensorAlgebra/src/contract/contract_matricize/contract.jl delete mode 100644 NDTensors/src/lib/TensorAlgebra/src/contract/output_labels.jl delete mode 100644 NDTensors/src/lib/TensorAlgebra/src/fusedims.jl delete mode 100644 NDTensors/src/lib/TensorAlgebra/src/splitdims.jl delete mode 100644 NDTensors/src/lib/TensorAlgebra/test/Project.toml delete mode 100644 NDTensors/src/lib/TensorAlgebra/test/runtests.jl delete mode 100644 NDTensors/src/lib/TensorAlgebra/test/test_basics.jl delete mode 100644 NDTensors/src/lib/TypeParameterAccessors/.JuliaFormatter.toml delete mode 100644 NDTensors/src/lib/TypeParameterAccessors/src/TypeParameterAccessors.jl delete mode 100644 NDTensors/src/lib/TypeParameterAccessors/src/abstractposition.jl delete mode 100644 NDTensors/src/lib/TypeParameterAccessors/src/abstracttypeparameter.jl delete mode 100644 NDTensors/src/lib/TypeParameterAccessors/src/base/abstractarray.jl delete mode 100644 NDTensors/src/lib/TypeParameterAccessors/src/base/array.jl delete mode 100644 NDTensors/src/lib/TypeParameterAccessors/src/base/linearalgebra.jl delete mode 100644 NDTensors/src/lib/TypeParameterAccessors/src/base/similartype.jl delete mode 100644 NDTensors/src/lib/TypeParameterAccessors/src/base/stridedviews.jl delete mode 100644 NDTensors/src/lib/TypeParameterAccessors/src/default_parameters.jl delete mode 100644 NDTensors/src/lib/TypeParameterAccessors/src/interface.jl delete mode 100644 NDTensors/src/lib/TypeParameterAccessors/src/is_parameter_specified.jl delete mode 100644 NDTensors/src/lib/TypeParameterAccessors/src/ndims.jl delete mode 100644 NDTensors/src/lib/TypeParameterAccessors/src/parameter.jl delete mode 100644 NDTensors/src/lib/TypeParameterAccessors/src/parameters.jl delete mode 100644 NDTensors/src/lib/TypeParameterAccessors/src/position.jl delete mode 100644 NDTensors/src/lib/TypeParameterAccessors/src/set_parameters.jl delete mode 100644 NDTensors/src/lib/TypeParameterAccessors/src/specify_parameters.jl delete mode 100644 NDTensors/src/lib/TypeParameterAccessors/src/to_unionall.jl delete mode 100644 NDTensors/src/lib/TypeParameterAccessors/src/type_parameters.jl delete mode 100644 NDTensors/src/lib/TypeParameterAccessors/src/unspecify_parameters.jl delete mode 100644 NDTensors/src/lib/TypeParameterAccessors/test/Project.toml delete mode 100644 NDTensors/src/lib/TypeParameterAccessors/test/runtests.jl delete mode 100644 NDTensors/src/lib/TypeParameterAccessors/test/test_basics.jl delete mode 100644 NDTensors/src/lib/TypeParameterAccessors/test/test_custom_types.jl delete mode 100644 NDTensors/src/lib/TypeParameterAccessors/test/test_defaults.jl delete mode 100644 NDTensors/src/lib/TypeParameterAccessors/test/test_similartype.jl delete mode 100644 NDTensors/src/lib/TypeParameterAccessors/test/test_wrappers.jl delete mode 100644 NDTensors/src/lib/TypeParameterAccessors/test/utils/test_inferred.jl delete mode 100644 NDTensors/src/lib/UnallocatedArrays/.JuliaFormatter.toml delete mode 100644 NDTensors/src/lib/UnallocatedArrays/README.md delete mode 100644 NDTensors/src/lib/UnallocatedArrays/src/UnallocatedArrays.jl delete mode 100644 NDTensors/src/lib/UnallocatedArrays/src/abstractfill/abstractfill.jl delete mode 100644 NDTensors/src/lib/UnallocatedArrays/src/abstractunallocatedarray.jl delete mode 100644 NDTensors/src/lib/UnallocatedArrays/src/broadcast.jl delete mode 100644 NDTensors/src/lib/UnallocatedArrays/src/set_types.jl delete mode 100644 NDTensors/src/lib/UnallocatedArrays/src/unallocatedfill.jl delete mode 100644 NDTensors/src/lib/UnallocatedArrays/src/unallocatedzeros.jl delete mode 100644 NDTensors/src/lib/UnallocatedArrays/test/Project.toml delete mode 100644 NDTensors/src/lib/UnallocatedArrays/test/runtests.jl delete mode 100644 NDTensors/src/lib/UnspecifiedTypes/.JuliaFormatter.toml delete mode 100644 NDTensors/src/lib/UnspecifiedTypes/README.md delete mode 100644 NDTensors/src/lib/UnspecifiedTypes/TODO.md delete mode 100644 NDTensors/src/lib/UnspecifiedTypes/src/UnspecifiedTypes.jl delete mode 100644 NDTensors/src/lib/UnspecifiedTypes/src/unspecifiedarray.jl delete mode 100644 NDTensors/src/lib/UnspecifiedTypes/src/unspecifiednumber.jl delete mode 100644 NDTensors/src/lib/UnspecifiedTypes/src/unspecifiedzero.jl delete mode 100644 NDTensors/src/lib/UnspecifiedTypes/test/runtests.jl delete mode 100644 NDTensors/src/linearalgebra/linearalgebra.jl delete mode 100644 NDTensors/src/linearalgebra/svd.jl delete mode 100644 NDTensors/src/linearalgebra/symmetric.jl delete mode 100644 NDTensors/src/nodata.jl delete mode 100644 NDTensors/src/tensor/set_types.jl delete mode 100644 NDTensors/src/tensor/similar.jl delete mode 100644 NDTensors/src/tensor/tensor.jl delete mode 100644 NDTensors/src/tensoroperations/contraction_logic.jl delete mode 100644 NDTensors/src/tensoroperations/generic_tensor_operations.jl delete mode 100644 NDTensors/src/tensorstorage/default_storage.jl delete mode 100644 NDTensors/src/tensorstorage/set_types.jl delete mode 100644 NDTensors/src/tensorstorage/similar.jl delete mode 100644 NDTensors/src/tensorstorage/tensorstorage.jl delete mode 100644 NDTensors/src/truncate.jl delete mode 100644 NDTensors/src/tupletools.jl delete mode 100644 NDTensors/test/NDTensorsTestUtils/NDTensorsTestUtils.jl delete mode 100644 NDTensors/test/NDTensorsTestUtils/device_list.jl delete mode 100644 NDTensors/test/NDTensorsTestUtils/is_supported_eltype.jl delete mode 100644 NDTensors/test/Project.toml delete mode 100644 NDTensors/test/backup/arraytensor/Project.toml delete mode 100644 NDTensors/test/backup/arraytensor/array.jl delete mode 100644 NDTensors/test/backup/arraytensor/blocksparsearray.jl delete mode 100644 NDTensors/test/backup/arraytensor/diagonalarray.jl delete mode 100644 NDTensors/test/backup/arraytensor/runtests.jl delete mode 100644 NDTensors/test/broken/readwrite.jl delete mode 100644 NDTensors/test/lib/Project.toml delete mode 100644 NDTensors/test/lib/runtests.jl delete mode 100644 NDTensors/test/runtests.jl delete mode 100644 NDTensors/test/test_blocksparse.jl delete mode 100644 NDTensors/test/test_combiner.jl delete mode 100644 NDTensors/test/test_dense.jl delete mode 100644 NDTensors/test/test_diag.jl delete mode 100644 NDTensors/test/test_diagblocksparse.jl delete mode 100644 NDTensors/test/test_emptynumber.jl delete mode 100644 NDTensors/test/test_emptystorage.jl delete mode 100644 NDTensors/test/test_linearalgebra.jl delete mode 100644 NDTensors/test/test_tupletools.jl mode change 120000 => 100644 README.md create mode 100644 benchmark/benchmarks.jl create mode 100644 docs/make_index.jl delete mode 100644 docs/make_local_notest.jl delete mode 100644 docs/make_local_test.jl create mode 100644 docs/make_readme.jl delete mode 100644 docs/settings.jl delete mode 100644 docs/src/AdvancedUsageGuide.md delete mode 100644 docs/src/CodeTiming.md delete mode 100644 docs/src/ContractionSequenceOptimization.md delete mode 100644 docs/src/DMRG.md delete mode 100644 docs/src/DMRGObserver.md delete mode 100644 docs/src/DeveloperGuide.md delete mode 100644 docs/src/Einsum.md delete mode 100644 docs/src/HDF5FileFormats.md delete mode 100644 docs/src/ITensorType.md delete mode 100644 docs/src/IncludedSiteTypes.md delete mode 100644 docs/src/IndexSetType.md delete mode 100644 docs/src/IndexType.md delete mode 100644 docs/src/MPSandMPO.md delete mode 100644 docs/src/Multithreading.md delete mode 100644 docs/src/Observer.md delete mode 100644 docs/src/OpSum.md delete mode 100644 docs/src/ProjMPO.md delete mode 100644 docs/src/ProjMPOSum.md delete mode 100644 docs/src/QN.md delete mode 100644 docs/src/QNTricks.md delete mode 100644 docs/src/RunningOnGPUs.md delete mode 100644 docs/src/SiteType.md delete mode 100644 docs/src/Sweeps.md delete mode 100644 docs/src/UpgradeGuide_0.1_to_0.2.md delete mode 100644 docs/src/assets/favicon.ico delete mode 100644 docs/src/assets/logo.png delete mode 100644 docs/src/examples/DMRG.md delete mode 100644 docs/src/examples/ITensor.md delete mode 100644 docs/src/examples/MPSandMPO.md delete mode 100644 docs/src/examples/Physics.md delete mode 100644 docs/src/examples/combiner_itensor.png delete mode 100644 docs/src/examples/itensor_factorization_figures/QR_Ex1.png delete mode 100644 docs/src/examples/itensor_factorization_figures/SVD_Ex1.png delete mode 100644 docs/src/examples/itensor_trace_figures/delta_itensor.png delete mode 100644 docs/src/examples/itensor_trace_figures/trace_A.png delete mode 100644 docs/src/examples/mps_element.png delete mode 100644 docs/src/examples/mps_expect.png delete mode 100644 docs/src/examples/mps_from_tensor.png delete mode 100644 docs/src/examples/mps_onesite_figures/operator_app_mps.png delete mode 100644 docs/src/examples/mps_onesite_figures/operator_contract.png delete mode 100644 docs/src/examples/mps_onesite_figures/updated_mps.png delete mode 100644 docs/src/examples/mps_zz_correlation.png delete mode 100644 docs/src/examples/twosite_figures/gate_app_mps.png delete mode 100644 docs/src/examples/twosite_figures/gate_contract.png delete mode 100644 docs/src/examples/twosite_figures/gate_gauge.png delete mode 100644 docs/src/examples/twosite_figures/gate_svd.png delete mode 100644 docs/src/faq/DMRG.md delete mode 100644 docs/src/faq/Development.md delete mode 100644 docs/src/faq/HPC.md delete mode 100644 docs/src/faq/JuliaAndCpp.md delete mode 100644 docs/src/faq/JuliaPkg.md delete mode 100644 docs/src/faq/QN.md delete mode 100644 docs/src/faq/RelationshipToOtherLibraries.md delete mode 100644 docs/src/getting_started/DebugChecks.md delete mode 100644 docs/src/getting_started/Installing.md delete mode 100644 docs/src/getting_started/NextSteps.md delete mode 100644 docs/src/getting_started/RunningCodes.md delete mode 100644 docs/src/getting_started/install_screenshot.png delete mode 100644 docs/src/index.md delete mode 100644 docs/src/svd_tensor.png delete mode 100644 docs/src/tutorials/DMRG.md delete mode 100644 docs/src/tutorials/MPSTimeEvolution.md delete mode 100644 docs/src/tutorials/QN_DMRG.md delete mode 100644 docs/src/tutorials/tebd.jl delete mode 100644 docs/src/tutorials/trotter_tevol.png create mode 100644 examples/Project.toml create mode 100644 examples/README.jl delete mode 100644 examples/basic_ops/basic_ops.jl delete mode 100644 examples/basic_ops/qn_itensors.jl delete mode 100644 examples/ctmrg/anisotropic/Project.toml delete mode 100644 examples/ctmrg/anisotropic/README.md delete mode 100644 examples/ctmrg/anisotropic/run.jl delete mode 100644 examples/ctmrg/isotropic/Project.toml delete mode 100644 examples/ctmrg/isotropic/run.jl delete mode 100644 examples/src/2d_classical_ising.jl delete mode 100644 examples/src/ctmrg_anisotropic.jl delete mode 100644 examples/src/ctmrg_isotropic.jl delete mode 100644 examples/src/trg.jl delete mode 100644 examples/trg/Project.toml delete mode 100644 examples/trg/run.jl delete mode 100644 ext/ITensorsChainRulesCoreExt/ITensorsChainRulesCoreExt.jl delete mode 100644 ext/ITensorsChainRulesCoreExt/LazyApply/LazyApply.jl delete mode 100644 ext/ITensorsChainRulesCoreExt/NDTensors/dense.jl delete mode 100644 ext/ITensorsChainRulesCoreExt/NDTensors/tensor.jl delete mode 100644 ext/ITensorsChainRulesCoreExt/indexset.jl delete mode 100644 ext/ITensorsChainRulesCoreExt/itensor.jl delete mode 100644 ext/ITensorsChainRulesCoreExt/non_differentiable.jl delete mode 100644 ext/ITensorsChainRulesCoreExt/projection.jl delete mode 100644 ext/ITensorsChainRulesCoreExt/smallstrings.jl delete mode 100644 ext/ITensorsChainRulesCoreExt/utils.jl delete mode 100644 ext/ITensorsChainRulesCoreExt/zygoterules.jl delete mode 100644 ext/ITensorsHDF5Ext/ITensorsHDF5Ext.jl delete mode 100644 ext/ITensorsHDF5Ext/index.jl delete mode 100644 ext/ITensorsHDF5Ext/indexset.jl delete mode 100644 ext/ITensorsHDF5Ext/itensor.jl delete mode 100644 ext/ITensorsHDF5Ext/qn.jl delete mode 100644 ext/ITensorsHDF5Ext/qnindex.jl delete mode 100644 ext/ITensorsHDF5Ext/tagset.jl delete mode 100644 ext/ITensorsVectorInterfaceExt/ITensorsVectorInterfaceExt.jl delete mode 100644 ext/ITensorsZygoteRulesExt/ITensorsZygoteRulesExt.jl delete mode 100644 ext/ITensorsZygoteRulesExt/itensors.jl delete mode 100644 jenkins/Dockerfile delete mode 100644 jenkins/Jenkinsfile delete mode 100644 src/adapt.jl delete mode 100644 src/argsdict/argsdict.jl delete mode 100644 src/broadcast.jl delete mode 100644 src/deprecated.jl delete mode 100644 src/developer_tools.jl delete mode 100644 src/exports.jl delete mode 100644 src/fermions/fermions.jl delete mode 100644 src/global_variables.jl delete mode 100644 src/imports.jl delete mode 100644 src/index.jl delete mode 100644 src/indexset.jl delete mode 100644 src/itensor.jl delete mode 100644 src/lastval.jl delete mode 100644 src/lib/ContractionSequenceOptimization/src/ContractionSequenceOptimization.jl delete mode 100644 src/lib/ContractionSequenceOptimization/src/breadth_first_constructive.jl delete mode 100644 src/lib/ContractionSequenceOptimization/src/contraction_cost.jl delete mode 100644 src/lib/ContractionSequenceOptimization/src/depth_first_constructive.jl delete mode 100644 src/lib/ContractionSequenceOptimization/src/three_tensors.jl delete mode 100644 src/lib/ContractionSequenceOptimization/src/utils.jl delete mode 100644 src/lib/ITensorVisualizationCore/README.md delete mode 100644 src/lib/ITensorVisualizationCore/src/ITensorVisualizationCore.jl delete mode 100644 src/lib/ITensorVisualizationCore/src/visualize_macro.jl delete mode 100644 src/lib/ITensorsNamedDimsArraysExt/examples/Project.toml delete mode 100644 src/lib/ITensorsNamedDimsArraysExt/examples/example_dmrg.jl delete mode 100644 src/lib/ITensorsNamedDimsArraysExt/examples/example_readme.jl delete mode 100644 src/lib/ITensorsNamedDimsArraysExt/src/ITensorsNamedDimsArraysExt.jl delete mode 100644 src/lib/ITensorsNamedDimsArraysExt/src/combiner.jl delete mode 100644 src/lib/ITensorsNamedDimsArraysExt/src/index.jl delete mode 100644 src/lib/ITensorsNamedDimsArraysExt/src/indexing.jl delete mode 100644 src/lib/ITensorsNamedDimsArraysExt/src/itensor.jl delete mode 100644 src/lib/ITensorsNamedDimsArraysExt/src/tensoralgebra.jl delete mode 100644 src/lib/ITensorsNamedDimsArraysExt/src/to_nameddimsarray.jl delete mode 100644 src/lib/ITensorsNamedDimsArraysExt/test/Project.toml delete mode 100644 src/lib/ITensorsNamedDimsArraysExt/test/runtests.jl delete mode 100644 src/lib/ITensorsNamedDimsArraysExt/test/test_basics.jl delete mode 100644 src/lib/ITensorsNamedDimsArraysExt/test/test_examples.jl delete mode 100644 src/lib/ITensorsOpsExt/src/ITensorsOpsExt.jl delete mode 100644 src/lib/ITensorsSiteTypesExt/src/ITensorsSiteTypesExt.jl delete mode 100644 src/lib/LazyApply/src/LazyApply.jl delete mode 100644 src/lib/Ops/ops_itensor.jl delete mode 100644 src/lib/Ops/src/Ops.jl delete mode 100644 src/lib/Ops/src/op.jl delete mode 100644 src/lib/Ops/src/trotter.jl delete mode 100644 src/lib/QuantumNumbers/src/QuantumNumbers.jl delete mode 100644 src/lib/QuantumNumbers/src/arrow.jl delete mode 100644 src/lib/QuantumNumbers/src/qn.jl delete mode 100644 src/lib/QuantumNumbers/src/qnval.jl delete mode 100644 src/lib/SiteTypes/src/SiteTypes.jl delete mode 100644 src/lib/SiteTypes/src/SiteTypesChainRulesCoreExt.jl delete mode 100644 src/lib/SiteTypes/src/sitetype.jl delete mode 100644 src/lib/SiteTypes/src/sitetypes/aliases.jl delete mode 100644 src/lib/SiteTypes/src/sitetypes/boson.jl delete mode 100644 src/lib/SiteTypes/src/sitetypes/electron.jl delete mode 100644 src/lib/SiteTypes/src/sitetypes/fermion.jl delete mode 100644 src/lib/SiteTypes/src/sitetypes/generic_sites.jl delete mode 100644 src/lib/SiteTypes/src/sitetypes/qubit.jl delete mode 100644 src/lib/SiteTypes/src/sitetypes/qudit.jl delete mode 100644 src/lib/SiteTypes/src/sitetypes/spinhalf.jl delete mode 100644 src/lib/SiteTypes/src/sitetypes/spinone.jl delete mode 100644 src/lib/SiteTypes/src/sitetypes/tj.jl delete mode 100644 src/lib/SmallStrings/ext/SmallStringsChainRulesCoreExt/SmallStringsChainRulesCoreExt.jl delete mode 100644 src/lib/SmallStrings/src/SmallStrings.jl delete mode 100644 src/lib/SmallStrings/src/smallstring.jl delete mode 100644 src/lib/TagSets/src/TagSets.jl delete mode 100644 src/name.jl delete mode 100644 src/not.jl delete mode 100644 src/nullspace.jl delete mode 100644 src/oneitensor.jl delete mode 100644 src/packagecompile/compile.jl delete mode 100644 src/qn/flux.jl delete mode 100644 src/qn/qnindex.jl delete mode 100644 src/qn/qnindexset.jl delete mode 100644 src/qn/qnitensor.jl delete mode 100644 src/readwrite.jl delete mode 100644 src/set_operations.jl delete mode 100644 src/set_types.jl delete mode 100644 src/symmetrystyle.jl delete mode 100644 src/tensor_operations/itensor_combiner.jl delete mode 100644 src/tensor_operations/matrix_algebra.jl delete mode 100644 src/tensor_operations/matrix_decomposition.jl delete mode 100644 src/tensor_operations/permutations.jl delete mode 100644 src/tensor_operations/tensor_algebra.jl delete mode 100644 src/usings.jl delete mode 100644 src/utils.jl delete mode 100644 src/val.jl delete mode 100644 test/base/Project.toml delete mode 100644 test/base/runtests.jl delete mode 100644 test/base/test_argsdict.jl delete mode 100644 test/base/test_broadcast.jl delete mode 100644 test/base/test_combiner.jl delete mode 100644 test/base/test_contract.jl delete mode 100644 test/base/test_ctmrg.jl delete mode 100644 test/base/test_debug_checks.jl delete mode 100644 test/base/test_decomp.jl delete mode 100644 test/base/test_diagitensor.jl delete mode 100644 test/base/test_empty.jl delete mode 100644 test/base/test_emptyitensor.jl delete mode 100644 test/base/test_examples.jl delete mode 100644 test/base/test_exports.jl delete mode 100644 test/base/test_fermions.jl delete mode 100644 test/base/test_global_variables.jl delete mode 100644 test/base/test_index.jl delete mode 100644 test/base/test_indexset.jl delete mode 100644 test/base/test_indices.jl delete mode 100644 test/base/test_inference.jl delete mode 100644 test/base/test_itensor.jl delete mode 100644 test/base/test_itensor_scalar.jl delete mode 100644 test/base/test_itensor_scalar_contract.jl delete mode 100644 test/base/test_itensor_slice.jl delete mode 100644 test/base/test_ndtensors.jl delete mode 100644 test/base/test_not.jl delete mode 100644 test/base/test_oneitensor.jl delete mode 100644 test/base/test_phys_site_types.jl delete mode 100644 test/base/test_qn.jl delete mode 100644 test/base/test_qncombiner.jl delete mode 100644 test/base/test_qndiagitensor.jl delete mode 100644 test/base/test_qnindex.jl delete mode 100644 test/base/test_qnitensor.jl delete mode 100644 test/base/test_readwrite.jl delete mode 100644 test/base/test_sitetype.jl delete mode 100644 test/base/test_smallstring.jl delete mode 100644 test/base/test_svd.jl delete mode 100644 test/base/test_symmetrystyle.jl delete mode 100644 test/base/test_tagset.jl delete mode 100644 test/base/test_trg.jl delete mode 100644 test/base/utils/TestITensorsExportedNames/TestITensorsExportedNames.jl delete mode 100644 test/base/utils/testfilev0.1.41.h5 delete mode 100644 test/base/utils/util.jl create mode 100644 test/basics/test_basics.jl delete mode 100644 test/ext/ITensorsChainRulesCoreExt/Project.toml delete mode 100644 test/ext/ITensorsChainRulesCoreExt/runtests.jl delete mode 100644 test/ext/ITensorsChainRulesCoreExt/test_chainrules.jl delete mode 100644 test/ext/ITensorsChainRulesCoreExt/test_chainrules_ops.jl delete mode 100644 test/ext/ITensorsChainRulesCoreExt/utils/chainrulestestutils.jl delete mode 100644 test/ext/ITensorsVectorInterfaceExt/Project.toml delete mode 100644 test/ext/ITensorsVectorInterfaceExt/runtests.jl delete mode 100644 test/ext/NDTensorsMappedArraysExt/Project.toml delete mode 100644 test/ext/NDTensorsMappedArraysExt/runtests.jl delete mode 100644 test/lib/ContractionSequenceOptimization/runtests.jl delete mode 100644 test/lib/ContractionSequenceOptimization/test_itensor_contract.jl delete mode 100644 test/lib/ITensorsNamedDimsArraysExt/Project.toml delete mode 100644 test/lib/ITensorsNamedDimsArraysExt/runtests.jl delete mode 100644 test/lib/LazyApply/outdated/test_lazyapply.jl delete mode 100644 test/lib/LazyApply/runtests.jl delete mode 100644 test/lib/LazyApply/test_lazyapply.jl delete mode 100644 test/lib/Ops/runtests.jl delete mode 100644 test/lib/Ops/test_ops.jl delete mode 100644 test/lib/Ops/test_trotter.jl create mode 100644 test/test_aqua.jl delete mode 100644 test/threading/runtests.jl delete mode 100644 test/threading/test_threading.jl diff --git a/.JuliaFormatter.toml b/.JuliaFormatter.toml index 08f664cdb9..4c49a86f07 100644 --- a/.JuliaFormatter.toml +++ b/.JuliaFormatter.toml @@ -1,2 +1,3 @@ +# See https://domluna.github.io/JuliaFormatter.jl/stable/ for a list of options style = "blue" indent = 2 diff --git a/.github/ISSUE_TEMPLATE/02_NDTensors_bug_report.md b/.github/ISSUE_TEMPLATE/02_NDTensors_bug_report.md deleted file mode 100644 index cb2aa59a82..0000000000 --- a/.github/ISSUE_TEMPLATE/02_NDTensors_bug_report.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -name: NDTensors.jl bug report -about: Create a bug report to help us improve NDTensors.jl -title: "[NDTensors] [BUG] YOUR SHORT DESCRIPTION OF THE BUG HERE" -labels: ["NDTensors", "bug"] -assignees: '' - ---- - -**Description of bug** - -Please give a brief description of the bug or unexpected behavior here. - -**Minimal code demonstrating the bug or unexpected behavior** - -If applicable, provide a minimal code that can be run to demonstrate the bug or unexpected behavior. - -If you are unable to construct a minimal code that demonstrates the bug or unexpected behavior, provide detailed steps for how to reproduce the behavior you are seeing. - -
Minimal runnable code

- -```julia -[YOUR MINIMAL RUNNABLE CODE HERE] -``` -

- - -**Expected output or behavior** - -Describe what you expected to happen. - -If you provided a minimal code that can be run to demonstrate the bug or unexpected behavior, describe what you expected the output would be. - - -**Actual output or behavior** - -Describe what actually happened. - -If you provided a minimal code that demonstrates the bug or unexpected behavior, provide the output you get from that code. If the code leads to an error or warning, include the full error or warning below. - -
Output of minimal runnable code

- -```julia -[OUTPUT OF YOUR MINIMAL RUNNABLE CODE HERE] -``` -

- - -**Version information** - - - Output from `versioninfo()`: -```julia -julia> versioninfo() -[YOUR OUTPUT HERE] -``` - - Output from `using Pkg; Pkg.status("ITensors")`: -```julia -julia> using Pkg; Pkg.status("ITensors") -[YOUR OUTPUT HERE] -``` diff --git a/.github/ISSUE_TEMPLATE/02_NDTensors_feature_request.md b/.github/ISSUE_TEMPLATE/02_NDTensors_feature_request.md deleted file mode 100644 index db445b1ef8..0000000000 --- a/.github/ISSUE_TEMPLATE/02_NDTensors_feature_request.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -name: NDTensors.jl feature request -about: Suggest an idea for NDTensors.jl -title: "[NDTensors] [ENHANCEMENT] YOUR SHORT DESCRIPTION OF THE FEATURE REQUEST HERE" -labels: ["NDTensors", "enhancement"] -assignees: '' - ---- - -**Is your feature request related to a problem? Please describe.** - -A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] - -**Describe the solution you'd like** - -A clear and concise description of what you want to happen. - -**Describe alternatives you've considered** - -A clear and concise description of any alternative solutions or features you've considered. - -**Additional context** - -Add any other context or screenshots about the feature request here. diff --git a/.github/ISSUE_TEMPLATE/01_ITensors_bug_report.md b/.github/ISSUE_TEMPLATE/BUG_REPORT.md similarity index 94% rename from .github/ISSUE_TEMPLATE/01_ITensors_bug_report.md rename to .github/ISSUE_TEMPLATE/BUG_REPORT.md index 56be1986ec..70dd1f1062 100644 --- a/.github/ISSUE_TEMPLATE/01_ITensors_bug_report.md +++ b/.github/ISSUE_TEMPLATE/BUG_REPORT.md @@ -1,8 +1,8 @@ --- name: ITensors.jl bug report about: Create a bug report to help us improve ITensors.jl -title: "[ITensors] [BUG] YOUR SHORT DESCRIPTION OF THE BUG HERE" -labels: ["ITensors", "bug"] +title: "[BUG] YOUR SHORT DESCRIPTION OF THE BUG HERE" +labels: ["bug"] assignees: '' --- @@ -22,6 +22,7 @@ If you are unable to construct a minimal code that demonstrates the bug or unexp ```julia [YOUR MINIMAL RUNNABLE CODE HERE] ``` +

@@ -43,6 +44,7 @@ If you provided a minimal code that demonstrates the bug or unexpected behavior, ```julia [OUTPUT OF YOUR MINIMAL RUNNABLE CODE HERE] ``` +

diff --git a/.github/ISSUE_TEMPLATE/01_ITensors_feature_request.md b/.github/ISSUE_TEMPLATE/FEATURE_REQUEST.md similarity index 82% rename from .github/ISSUE_TEMPLATE/01_ITensors_feature_request.md rename to .github/ISSUE_TEMPLATE/FEATURE_REQUEST.md index c4ecbbd7b1..e5601c2820 100644 --- a/.github/ISSUE_TEMPLATE/01_ITensors_feature_request.md +++ b/.github/ISSUE_TEMPLATE/FEATURE_REQUEST.md @@ -1,8 +1,8 @@ --- name: ITensors.jl feature request about: Suggest an idea for ITensors.jl -title: "[ITensors] [ENHANCEMENT] YOUR SHORT DESCRIPTION OF THE FEATURE REQUEST HERE" -labels: ["ITensors", "enhancement"] +title: "[ENHANCEMENT] YOUR SHORT DESCRIPTION OF THE FEATURE REQUEST HERE" +labels: ["enhancement"] assignees: '' --- diff --git a/.github/ISSUE_TEMPLATE/generate_issue_templates/PACKAGE_bug_report.md b/.github/ISSUE_TEMPLATE/generate_issue_templates/PACKAGE_bug_report.md deleted file mode 100644 index 3946e68dbe..0000000000 --- a/.github/ISSUE_TEMPLATE/generate_issue_templates/PACKAGE_bug_report.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -name: PACKAGE.jl bug report -about: Create a bug report to help us improve PACKAGE.jl -title: "[PACKAGE] [BUG] YOUR SHORT DESCRIPTION OF THE BUG HERE" -labels: ["PACKAGE", "bug"] -assignees: '' - ---- - -**Description of bug** - -Please give a brief description of the bug or unexpected behavior here. - -**Minimal code demonstrating the bug or unexpected behavior** - -If applicable, provide a minimal code that can be run to demonstrate the bug or unexpected behavior. - -If you are unable to construct a minimal code that demonstrates the bug or unexpected behavior, provide detailed steps for how to reproduce the behavior you are seeing. - -
Minimal runnable code

- -```julia -[YOUR MINIMAL RUNNABLE CODE HERE] -``` -

- - -**Expected output or behavior** - -Describe what you expected to happen. - -If you provided a minimal code that can be run to demonstrate the bug or unexpected behavior, describe what you expected the output would be. - - -**Actual output or behavior** - -Describe what actually happened. - -If you provided a minimal code that demonstrates the bug or unexpected behavior, provide the output you get from that code. If the code leads to an error or warning, include the full error or warning below. - -
Output of minimal runnable code

- -```julia -[OUTPUT OF YOUR MINIMAL RUNNABLE CODE HERE] -``` -

- - -**Version information** - - - Output from `versioninfo()`: -```julia -julia> versioninfo() -[YOUR OUTPUT HERE] -``` - - Output from `using Pkg; Pkg.status("ITensors")`: -```julia -julia> using Pkg; Pkg.status("ITensors") -[YOUR OUTPUT HERE] -``` diff --git a/.github/ISSUE_TEMPLATE/generate_issue_templates/PACKAGE_feature_request.md b/.github/ISSUE_TEMPLATE/generate_issue_templates/PACKAGE_feature_request.md deleted file mode 100644 index fc6bea0a43..0000000000 --- a/.github/ISSUE_TEMPLATE/generate_issue_templates/PACKAGE_feature_request.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -name: PACKAGE.jl feature request -about: Suggest an idea for PACKAGE.jl -title: "[PACKAGE] [ENHANCEMENT] YOUR SHORT DESCRIPTION OF THE FEATURE REQUEST HERE" -labels: ["PACKAGE", "enhancement"] -assignees: '' - ---- - -**Is your feature request related to a problem? Please describe.** - -A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] - -**Describe the solution you'd like** - -A clear and concise description of what you want to happen. - -**Describe alternatives you've considered** - -A clear and concise description of any alternative solutions or features you've considered. - -**Additional context** - -Add any other context or screenshots about the feature request here. diff --git a/.github/ISSUE_TEMPLATE/generate_issue_templates/generate_issue_templates.jl b/.github/ISSUE_TEMPLATE/generate_issue_templates/generate_issue_templates.jl deleted file mode 100644 index aaaa985ff5..0000000000 --- a/.github/ISSUE_TEMPLATE/generate_issue_templates/generate_issue_templates.jl +++ /dev/null @@ -1,50 +0,0 @@ -using FileUtils - -template_package_name = "PACKAGE" - -package_names = ["ITensors", "NDTensors"] - -package_ordering = Dict(["ITensors" => 1, "NDTensors" => 2]) - -function bug_report_file(package_name::String) - return "$(package_name)_bug_report.md" -end -function feature_request_file(package_name::String) - return "$(package_name)_feature_request.md" -end - -for package_name in package_names - @show package_name - - order = lpad(package_ordering[package_name], 2, "0") - - template_bug_report = bug_report_file(template_package_name) - new_bug_report = order * "_" * bug_report_file(package_name) - - if isfile(new_bug_report) - println("File $new_bug_report already exists, skipping") - else - println("Copying $template_bug_report to $new_bug_report") - cp(template_bug_report, new_bug_report) - - println("Replace $template_package_name with $package_name in $new_bug_report") - replace_in_file(new_bug_report, template_package_name => package_name) - - mv(new_bug_report, joinpath("..", new_bug_report); force=true) - end - - template_feature_request = feature_request_file(template_package_name) - new_feature_request = order * "_" * feature_request_file(package_name) - - if isfile(new_feature_request) - println("File $new_feature_request already exists, skipping") - else - println("Copying $template_feature_request to $new_feature_request") - cp(template_feature_request, new_feature_request) - - println("Replace $template_package_name with $package_name in $new_feature_request") - replace_in_file(new_feature_request, template_package_name => package_name) - - mv(new_feature_request, joinpath("..", new_feature_request); force=true) - end -end diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index a6406fd1ec..a20a53146c 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -11,6 +11,7 @@ If practical and applicable, please include a minimal demonstration of the previ ```julia [YOUR MINIMAL DEMONSTRATION OF PREVIOUS BEHAVIOR] ``` +

Minimal demonstration of new behavior

@@ -18,6 +19,7 @@ If practical and applicable, please include a minimal demonstration of the previ ```julia [YOUR MINIMAL DEMONSTRATION OF NEW BEHAVIOR] ``` +

# How Has This Been Tested? diff --git a/.github/workflows/CompatHelper.yml b/.github/workflows/CompatHelper.yml index 5c88486868..cba9134c67 100644 --- a/.github/workflows/CompatHelper.yml +++ b/.github/workflows/CompatHelper.yml @@ -13,4 +13,4 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} COMPATHELPER_PRIV: ${{ secrets.DOCUMENTER_KEY }} - run: julia -e 'using CompatHelper; CompatHelper.main(; subdirs=["", "NDTensors"])' + run: julia -e 'using CompatHelper; CompatHelper.main()' diff --git a/.github/workflows/Downstream.yml b/.github/workflows/Downstream.yml deleted file mode 100644 index 644dc6eb38..0000000000 --- a/.github/workflows/Downstream.yml +++ /dev/null @@ -1,57 +0,0 @@ -name: IntegrationTest -on: - push: - branches: [main] - tags: [v*] - pull_request: - -jobs: - test: - name: ${{ matrix.package.repo }} - runs-on: ${{ matrix.os }} - strategy: - fail-fast: false - matrix: - julia-version: [1] - os: [ubuntu-latest] - package: - - {user: ITensor, repo: ITensorGaussianMPS.jl} - - {user: ITensor, repo: ITensorMPS.jl} - - {user: ITensor, repo: ITensorUnicodePlots.jl} - - {user: ITensor, repo: ITensorVisualizationBase.jl} - - steps: - - uses: actions/checkout@v4 - - uses: julia-actions/setup-julia@v2 - with: - version: ${{ matrix.julia-version }} - arch: x64 - - uses: julia-actions/julia-buildpkg@latest - - name: Clone Downstream - uses: actions/checkout@v4 - with: - repository: ${{ matrix.package.user }}/${{ matrix.package.repo }} - path: downstream - - name: Load this and run the downstream tests - shell: julia --color=yes --project=downstream {0} - run: | - using Pkg - try - # force it to use this PR's version of the package - Pkg.develop(PackageSpec(path=".")) # resolver may fail with main deps - Pkg.update() - Pkg.test(coverage=true) # resolver may fail with test time deps - catch err - err isa Pkg.Resolve.ResolverError || rethrow() - # If we can't resolve that means this is incompatible by SemVer and this is fine - # It means we marked this as a breaking change, so we don't need to worry about - # Mistakenly introducing a breaking change, as we have intentionally made one - @info "Not compatible with this release. No problem." exception=err - exit(0) # Exit immediately, as a success - end -## - uses: julia-actions/julia-processcoverage@v1 -## - uses: codecov/codecov-action@v4 -## with: -## token: ${{ secrets.CODECOV_TOKEN }} -## file: lcov.info -## fail_ci_if_error: true diff --git a/.github/workflows/FormatCheck.yml b/.github/workflows/FormatCheck.yml new file mode 100644 index 0000000000..3f78afc276 --- /dev/null +++ b/.github/workflows/FormatCheck.yml @@ -0,0 +1,13 @@ +name: "Format Check" + +on: + push: + branches: + - 'main' + tags: '*' + pull_request: + +jobs: + format-check: + name: "Format Check" + uses: "ITensor/ITensorActions/.github/workflows/FormatCheck.yml@main" diff --git a/.github/workflows/LiterateCheck.yml b/.github/workflows/LiterateCheck.yml new file mode 100644 index 0000000000..2ca5f27eea --- /dev/null +++ b/.github/workflows/LiterateCheck.yml @@ -0,0 +1,15 @@ +name: "Literate Check" + +on: + push: + branches: + - 'main' + tags: '*' + pull_request: + +jobs: + literate: + name: "Literate Check" + uses: "ITensor/ITensorActions/.github/workflows/LiterateCheck.yml@main" + with: + localregistry: https://github.com/ITensor/ITensorRegistry.git diff --git a/.github/workflows/Tests.yml b/.github/workflows/Tests.yml new file mode 100644 index 0000000000..5a0a306401 --- /dev/null +++ b/.github/workflows/Tests.yml @@ -0,0 +1,42 @@ +name: Tests +on: + push: + branches: + - 'master' + - 'main' + - 'release-' + tags: '*' + paths-ignore: + - 'docs/**' + pull_request: + workflow_dispatch: + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + # Cancel intermediate builds: only if it is a pull request build. + cancel-in-progress: ${{ startsWith(github.ref, 'refs/pull/') }} + +jobs: + tests: + name: "Tests" + strategy: + fail-fast: false + matrix: + version: + - 'lts' # minimal supported version + - '1' # latest released Julia version + # group: + # - 'core' + # - 'optional' + os: + - ubuntu-latest + - macOS-latest + - windows-latest + uses: "ITensor/ITensorActions/.github/workflows/Tests.yml@main" + with: + group: "${{ matrix.group }}" + julia-version: "${{ matrix.version }}" + os: "${{ matrix.os }}" + localregistry: https://github.com/ITensor/ITensorRegistry.git + secrets: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} diff --git a/.github/workflows/comment_trigger_example.yml b/.github/workflows/comment_trigger_example.yml deleted file mode 100644 index 675eba4490..0000000000 --- a/.github/workflows/comment_trigger_example.yml +++ /dev/null @@ -1,58 +0,0 @@ -name: Example comment trigger - -# https://dev.to/zirkelc/trigger-github-workflow-for-comment-on-pull-request-45l2 - -on: - issue_comment: - types: [created] - -jobs: - deploy: - name: Deploy - if: github.event.issue.pull_request && contains(github.event.comment.body, '/deploy') - runs-on: ubuntu-latest - steps: - - name: Get PR branch - uses: xt0rted/pull-request-comment-branch@v3 - id: comment-branch - - name: Set latest commit status as pending - uses: myrotvorets/set-commit-status-action@master - with: - sha: ${{ steps.comment-branch.outputs.head_sha }} - token: ${{ secrets.GITHUB_TOKEN }} - status: pending - - name: Checkout PR branch - uses: actions/checkout@v4 - with: - # https://github.com/actions/checkout/issues/331#issuecomment-1438220926 - ref: refs/pull/${{ github.event.issue.number }}/head - - name: Setup Node.js 16 - uses: actions/setup-node@v4 - with: - node-version: 16 - - name: Deploy - run: | - echo "Deploying..." - - name: Set latest commit status as ${{ job.status }} - uses: myrotvorets/set-commit-status-action@master - if: always() - with: - sha: ${{ steps.comment-branch.outputs.head_sha }} - token: ${{ secrets.GITHUB_TOKEN }} - status: ${{ job.status }} - - name: Add comment to PR - uses: actions/github-script@v7 - if: always() - with: - script: | - const name = '${{ github.workflow }}'; - const url = '${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}'; - const success = '${{ job.status }}' === 'success'; - const body = `${name}: ${success ? 'succeeded ✅' : 'failed ❌'}\n${url}`; - - await github.rest.issues.createComment({ - issue_number: context.issue.number, - owner: context.repo.owner, - repo: context.repo.repo, - body: body - }) diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml index bb8d13e7c5..01a7f7a4ae 100644 --- a/.github/workflows/documentation.yml +++ b/.github/workflows/documentation.yml @@ -1,4 +1,4 @@ -name: Documentation +name: "Documentation" on: push: @@ -6,26 +6,18 @@ on: - main tags: '*' pull_request: - branches: - - main - tags: '*' - workflow_dispatch: - branches: - - main - tags: '*' + schedule: + - cron: '1 4 * * 4' + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ github.ref_name != github.event.repository.default_branch || github.ref != 'refs/tags/v*' }} jobs: - build: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - uses: julia-actions/setup-julia@latest - with: - version: '1' - - name: Install dependencies - run: julia --project=docs/ -e 'using Pkg; Pkg.develop(path="./NDTensors"); Pkg.develop(PackageSpec(path=pwd())); Pkg.instantiate()' - - name: Build and deploy - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # For authentication with GitHub Actions token - DOCUMENTER_KEY: ${{ secrets.DOCUMENTER_KEY }} - run: julia --project=docs/ docs/make.jl + build-and-deploy-docs: + name: "Documentation" + uses: "ITensor/ITensorActions/.github/workflows/Documentation.yml@main" + with: + localregistry: https://github.com/ITensor/ITensorRegistry.git + secrets: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} diff --git a/.github/workflows/format_check.yml b/.github/workflows/format_check.yml deleted file mode 100644 index bb6d9333f8..0000000000 --- a/.github/workflows/format_check.yml +++ /dev/null @@ -1,35 +0,0 @@ -name: Format check -on: - push: - branches: [main] - tags: [v*] - pull_request: - -jobs: - format: - name: "Format Check" - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - uses: julia-actions/setup-julia@v2 - with: - version: 1 - - name: Install JuliaFormatter and format - run: | - julia -e 'using Pkg; Pkg.add(PackageSpec(name="JuliaFormatter"))' - julia -e 'using JuliaFormatter; format(".", verbose=true)' - - name: Check format - run: | - julia -e ' - out = Cmd(`git diff --name-only`) |> read |> String - if out == "" - exit(0) - else - @error "The following files have not been formatted:" - write(stdout, out) - out_diff = Cmd(`git diff`) |> read |> String - @error "Diff:" - write(stdout, out_diff) - exit(1) - @error "" - end' diff --git a/.github/workflows/format_pr.yml b/.github/workflows/format_pr.yml deleted file mode 100644 index 7c27a7d871..0000000000 --- a/.github/workflows/format_pr.yml +++ /dev/null @@ -1,29 +0,0 @@ -name: format-pr -on: - schedule: - - cron: '0 0 * * *' -jobs: - build: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - name: Install JuliaFormatter and format - run: | - julia -e 'import Pkg; Pkg.add("JuliaFormatter")' - julia -e 'using JuliaFormatter; format(".")' - # https://github.com/marketplace/actions/create-pull-request - # https://github.com/peter-evans/create-pull-request#reference-example - - name: Create Pull Request - id: cpr - uses: peter-evans/create-pull-request@v7 - with: - token: ${{ secrets.GITHUB_TOKEN }} - commit-message: Format .jl files - title: 'Automatic JuliaFormatter.jl run' - branch: auto-juliaformatter-pr - delete-branch: true - labels: formatting, automated pr, no changelog - - name: Check outputs - run: | - echo "Pull Request Number - ${{ steps.cpr.outputs.pull-request-number }}" - echo "Pull Request URL - ${{ steps.cpr.outputs.pull-request-url }}" diff --git a/.github/workflows/format_suggestions.yml b/.github/workflows/format_suggestions.yml deleted file mode 100644 index f80377a24f..0000000000 --- a/.github/workflows/format_suggestions.yml +++ /dev/null @@ -1,27 +0,0 @@ -name: Format suggestions - -on: - pull_request: - -concurrency: - # Skip intermediate builds: always. - # Cancel intermediate builds: only if it is a pull request build. - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: ${{ startsWith(github.ref, 'refs/pull/') }} - -jobs: - format: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - uses: julia-actions/setup-julia@latest - with: - version: 1 - - run: | - julia -e 'using Pkg; Pkg.add("JuliaFormatter")' - julia -e 'using JuliaFormatter; format("."; verbose=true)' - - uses: reviewdog/action-suggester@v1 - with: - tool_name: JuliaFormatter - fail_on_error: true - filter_mode: added diff --git a/.github/workflows/main_test_itensors_base_macos_windows.yml b/.github/workflows/main_test_itensors_base_macos_windows.yml deleted file mode 100644 index f9f39d16fd..0000000000 --- a/.github/workflows/main_test_itensors_base_macos_windows.yml +++ /dev/null @@ -1,45 +0,0 @@ -name: Run ITensors base tests (macOS and Windows) - -on: - push: - branches: - - main - -jobs: - test: - name: Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ matrix.threads }} thread(s) - runs-on: ${{ matrix.os }} - env: - JULIA_NUM_THREADS: ${{ matrix.threads }} - strategy: - matrix: - version: - - 'lts' - - '1' - os: - # - windows-latest # windows tests are failing for an unknow reason, disable for now - - macOS-latest - threads: - - '2' - arch: - - x64 - exclude: - # MacOS not available on x86 - - {os: 'macOS-latest', arch: 'x86'} - steps: - - uses: actions/checkout@v4 - - uses: julia-actions/setup-julia@latest - with: - version: ${{ matrix.version }} - arch: ${{ matrix.arch }} - - name: Install Julia dependencies and run tests - shell: julia {0} - run: | - using Pkg; - Pkg.activate(temp=true); - Pkg.develop(path="./NDTensors"); - Pkg.develop(path="."); - Pkg.test("ITensors"; coverage=true, test_args=["base"]); - - uses: julia-actions/julia-uploadcodecov@latest - env: - CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} diff --git a/.github/workflows/test_itensors_base_ubuntu.yml b/.github/workflows/test_itensors_base_ubuntu.yml deleted file mode 100644 index 7004dae0f7..0000000000 --- a/.github/workflows/test_itensors_base_ubuntu.yml +++ /dev/null @@ -1,46 +0,0 @@ -name: Run ITensors base tests (Ubuntu) - -on: - push: - branches: - - main - tags: '*' - pull_request: - -jobs: - test: - name: Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ matrix.threads }} thread(s) - runs-on: ${{ matrix.os }} - env: - JULIA_NUM_THREADS: ${{ matrix.threads }} - strategy: - matrix: - version: - - 'lts' - - '1' - os: - - ubuntu-latest - threads: - - '2' - arch: - - x64 - exclude: - # MacOS not available on x86 - - {os: 'macOS-latest', arch: 'x86'} - steps: - - uses: actions/checkout@v4 - - uses: julia-actions/setup-julia@latest - with: - version: ${{ matrix.version }} - arch: ${{ matrix.arch }} - - name: Install Julia dependencies and run tests - shell: julia {0} - run: | - using Pkg; - Pkg.activate(temp=true); - Pkg.develop(path="./NDTensors"); - Pkg.develop(path="."); - Pkg.test("ITensors"; coverage=true, test_args=["base"]); - - uses: julia-actions/julia-uploadcodecov@latest - env: - CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} diff --git a/.github/workflows/test_ndtensors.yml b/.github/workflows/test_ndtensors.yml deleted file mode 100644 index 1f3f4c8be1..0000000000 --- a/.github/workflows/test_ndtensors.yml +++ /dev/null @@ -1,40 +0,0 @@ -name: Run NDTensors tests - -on: - push: - branches: - - main - tags: '*' - pull_request: - -jobs: - test: - name: Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ matrix.threads }} thread(s) - runs-on: ${{ matrix.os }} - env: - JULIA_NUM_THREADS: ${{ matrix.threads }} - strategy: - matrix: - version: - - 'lts' - - '1' - os: - - ubuntu-latest - threads: - - '1' - arch: - - x64 - steps: - - uses: actions/checkout@v4 - - uses: julia-actions/setup-julia@latest - with: - version: ${{ matrix.version }} - arch: ${{ matrix.arch }} - - name: Install Julia dependencies and run tests - shell: julia --depwarn=yes {0} - run: | - using Pkg; - Pkg.activate(temp=true); - Pkg.develop(path="./NDTensors"); - Pkg.develop(path="."); - Pkg.test("NDTensors"); diff --git a/.gitignore b/.gitignore index d60c751e5e..10593a9abb 100644 --- a/.gitignore +++ b/.gitignore @@ -1,17 +1,14 @@ -Manifest.toml -.tmp -.DS_Store -.benchmarkci +*.jl.*.cov +*.jl.cov +*.jl.mem *.o *.swp -*.swo -*.cov -benchmark/mult +.DS_Store +.benchmarkci +.tmp +.vscode/ +Manifest.toml benchmark/*.json docs/Manifest.toml docs/build/ -NDTensors/Manifest.toml -NDTensors/test/Manifest.toml -precompile/tmp -test/data.h5 -.vscode/ +docs/src/index.md diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index f7ecc4ad06..65993659d3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -6,11 +6,9 @@ repos: - id: check-toml - id: check-yaml - id: end-of-file-fixer - exclude: '.*references/.*\.txt$' # do not check reference TN images exclude_types: [markdown] # incompatible with Literate.jl - - id: trailing-whitespace - exclude: '.*references/.*\.txt$' # do not check reference TN images -- repo: https://github.com/qiaojunfeng/pre-commit-julia-format - rev: v0.2.0 + +- repo: "https://github.com/domluna/JuliaFormatter.jl" + rev: v1.0.62 hooks: - - id: julia-format + - id: "julia-formatter" diff --git a/CITATION.cff b/CITATION.cff deleted file mode 100644 index 22c6e16b6c..0000000000 --- a/CITATION.cff +++ /dev/null @@ -1,35 +0,0 @@ -cff-version: 1.1.0 -authors: - - family-names: Fishman - given-names: Matthew - - family-names: White - given-names: Steven R. - orcid: "https://orcid.org/0000-0003-3496-0707" - - family-names: Stoudenmire - given-names: E. Miles - orcid: "https://orcid.org/0000-0003-3389-9692" -references: - - type: article - authors: - - family-names: Fishman - given-names: Matthew - - family-names: White - given-names: Steven R. - orcid: "https://orcid.org/0000-0003-3496-0707" - - family-names: Stoudenmire - given-names: E. Miles - orcid: "https://orcid.org/0000-0003-3389-9692" - title: "The ITensor Software Library for Tensor Network Calculations" - status: "preprint" - number: "2007.14822" - identifiers: "arXiv:2007.14822" - url: "https://arxiv.org/abs/2007.14822" -date-released: 2020-07-28 -repository-code: "https://arxiv.org/abs/2007.14822" -message: "Please cite the following article when using this software." -license: "Apache-2.0" -version: "0.2" -identifiers: - - type: "other" - value: "arXiv:2007.14822" -title: "The ITensor Software Library for Tensor Network Calculations" diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md deleted file mode 100644 index c79a13ca0b..0000000000 --- a/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,128 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -We as members, contributors, and leaders pledge to make participation in our -community a harassment-free experience for everyone, regardless of age, body -size, visible or invisible disability, ethnicity, sex characteristics, gender -identity and expression, level of experience, education, socio-economic status, -nationality, personal appearance, race, religion, or sexual identity -and orientation. - -We pledge to act and interact in ways that contribute to an open, welcoming, -diverse, inclusive, and healthy community. - -## Our Standards - -Examples of behavior that contributes to a positive environment for our -community include: - -* Demonstrating empathy and kindness toward other people -* Being respectful of differing opinions, viewpoints, and experiences -* Giving and gracefully accepting constructive feedback -* Accepting responsibility and apologizing to those affected by our mistakes, - and learning from the experience -* Focusing on what is best not just for us as individuals, but for the - overall community - -Examples of unacceptable behavior include: - -* The use of sexualized language or imagery, and sexual attention or - advances of any kind -* Trolling, insulting or derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or email - address, without their explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Enforcement Responsibilities - -Community leaders are responsible for clarifying and enforcing our standards of -acceptable behavior and will take appropriate and fair corrective action in -response to any behavior that they deem inappropriate, threatening, offensive, -or harmful. - -Community leaders have the right and responsibility to remove, edit, or reject -comments, commits, code, wiki edits, issues, and other contributions that are -not aligned to this Code of Conduct, and will communicate reasons for moderation -decisions when appropriate. - -## Scope - -This Code of Conduct applies within all community spaces, and also applies when -an individual is officially representing the community in public spaces. -Examples of representing our community include using an official e-mail address, -posting via an official social media account, or acting as an appointed -representative at an online or offline event. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported to the community leaders responsible for enforcement at -info@pastaq.org. -All complaints will be reviewed and investigated promptly and fairly. - -All community leaders are obligated to respect the privacy and security of the -reporter of any incident. - -## Enforcement Guidelines - -Community leaders will follow these Community Impact Guidelines in determining -the consequences for any action they deem in violation of this Code of Conduct: - -### 1. Correction - -**Community Impact**: Use of inappropriate language or other behavior deemed -unprofessional or unwelcome in the community. - -**Consequence**: A private, written warning from community leaders, providing -clarity around the nature of the violation and an explanation of why the -behavior was inappropriate. A public apology may be requested. - -### 2. Warning - -**Community Impact**: A violation through a single incident or series -of actions. - -**Consequence**: A warning with consequences for continued behavior. No -interaction with the people involved, including unsolicited interaction with -those enforcing the Code of Conduct, for a specified period of time. This -includes avoiding interactions in community spaces as well as external channels -like social media. Violating these terms may lead to a temporary or -permanent ban. - -### 3. Temporary Ban - -**Community Impact**: A serious violation of community standards, including -sustained inappropriate behavior. - -**Consequence**: A temporary ban from any sort of interaction or public -communication with the community for a specified period of time. No public or -private interaction with the people involved, including unsolicited interaction -with those enforcing the Code of Conduct, is allowed during this period. -Violating these terms may lead to a permanent ban. - -### 4. Permanent Ban - -**Community Impact**: Demonstrating a pattern of violation of community -standards, including sustained inappropriate behavior, harassment of an -individual, or aggression toward or disparagement of classes of individuals. - -**Consequence**: A permanent ban from any sort of public interaction within -the community. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], -version 2.0, available at -https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. - -Community Impact Guidelines were inspired by [Mozilla's code of conduct -enforcement ladder](https://github.com/mozilla/diversity). - -[homepage]: https://www.contributor-covenant.org - -For answers to common questions about this code of conduct, see the FAQ at -https://www.contributor-covenant.org/faq. Translations are available at -https://www.contributor-covenant.org/translations. diff --git a/Checklists.txt b/Checklists.txt deleted file mode 100644 index f12962187e..0000000000 --- a/Checklists.txt +++ /dev/null @@ -1,23 +0,0 @@ -Checklist for Tagging a New Release ----------------------------------------------------------- -- Update the version = "x.y.z" line near the top of Project.toml - to be the new version number and commit this change -- On the Github website for ITensors.jl, click on Code tab, - then the link to the individual commits (showing total - number of commits) then click on a commit occurring on - or after the change to Project.toml -- In the comments under this commit, add the comment: - @JuliaRegistrator register -- Wait an hour or so for the JuliaRegistrator bot to - update the official Julia registry -- The TagBot bot should also create a new tag and version on Github - automatically (if you see a notice about "Trigger TagBot Issue" - it does not mean that TagBot isn't working or has an issue, it - is just literally a Github issue used to trigger TagBot to run) - -Checklist for Updating the Version of Documenter.jl Used ----------------------------------------------------------- -- Edit the docs/Project.toml file, changing only the version - number for Documenter (under the [compat] section). -- Create a new PR to confirm that the docs build correctly - with the new version. diff --git a/LICENSE b/LICENSE index 555297e50a..7f5c8c6360 100644 --- a/LICENSE +++ b/LICENSE @@ -1,201 +1,21 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2019 The Simons Foundation, Inc. - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +MIT License + +Copyright (c) 2024 ITensor developers + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/NDTensors/.JuliaFormatter.toml b/NDTensors/.JuliaFormatter.toml deleted file mode 100644 index 08f664cdb9..0000000000 --- a/NDTensors/.JuliaFormatter.toml +++ /dev/null @@ -1,2 +0,0 @@ -style = "blue" -indent = 2 diff --git a/NDTensors/LICENSE b/NDTensors/LICENSE deleted file mode 100644 index 555297e50a..0000000000 --- a/NDTensors/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2019 The Simons Foundation, Inc. - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/NDTensors/Project.toml b/NDTensors/Project.toml deleted file mode 100644 index 52bf4ccfc7..0000000000 --- a/NDTensors/Project.toml +++ /dev/null @@ -1,105 +0,0 @@ -name = "NDTensors" -uuid = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" -authors = ["Matthew Fishman "] -version = "0.3.74" - -[deps] -Accessors = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697" -Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" -ArrayLayouts = "4c555306-a7a7-4459-81d9-ec55ddd5c99a" -BlockArrays = "8e7c35d0-a365-5155-bbbb-fb81a777f24e" -Compat = "34da2185-b29b-5c13-b0c7-acf172513d20" -Dictionaries = "85a47980-9c8c-11e8-2b9f-f7ca1fa99fb4" -EllipsisNotation = "da5c29d0-fa7d-589e-88eb-ea29b0a81949" -FillArrays = "1a297f60-69ca-5386-bcde-b61e274b549b" -Folds = "41a02a25-b8f0-4f67-bc48-60067656b558" -Functors = "d9f16b24-f501-4c13-a1f2-28368ffc5196" -HalfIntegers = "f0d1745a-41c9-11e9-1dd9-e5d34d218721" -InlineStrings = "842dd82b-1e85-43dc-bf29-5d0ee9dffc48" -LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" -MacroTools = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09" -MappedArrays = "dbb5928d-eab1-5f90-85c2-b9b0edb7c900" -PackageExtensionCompat = "65ce6f38-6b18-4e1d-a461-8949797d7930" -Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" -SimpleTraits = "699a6c99-e7fa-54fc-8d76-47d257e15c1d" -SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" -SplitApplyCombine = "03a91e81-4c3e-53e1-a0a4-9c0c8f19dd66" -StaticArrays = "90137ffa-7385-5640-81b9-e52037218182" -Strided = "5e0ebb24-38b0-5f93-81fe-25c709ecae67" -StridedViews = "4db3bf67-4bd7-4b4e-b153-31dc3fb37143" -TimerOutputs = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f" -TupleTools = "9d95972d-f1c8-5527-a6e0-b4b365fa01f6" -VectorInterface = "409d34a3-91d5-4945-b6ec-7529ddf182d8" - -[weakdeps] -AMDGPU = "21141c5a-9bdb-4563-92ae-f87d6854732e" -CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba" -GPUArraysCore = "46192b85-c4d5-4398-a991-12ede77f4527" -HDF5 = "f67ccb44-e63f-5c2f-98bd-6dc0ccc4ba2f" -JLArrays = "27aeb0d3-9eb9-45fb-866b-73c2ecf80fcb" -MappedArrays = "dbb5928d-eab1-5f90-85c2-b9b0edb7c900" -Metal = "dde4c033-4e86-420c-a63e-0dd931031962" -Octavian = "6fd5a793-0b7e-452c-907f-f8bfe9c57db4" -TBLIS = "48530278-0828-4a49-9772-0f3830dfa1e9" -cuTENSOR = "011b41b2-24ef-40a8-b3eb-fa098493e9e1" - -[extensions] -NDTensorsAMDGPUExt = ["AMDGPU", "GPUArraysCore"] -NDTensorsCUDAExt = ["CUDA", "GPUArraysCore"] -NDTensorsGPUArraysCoreExt = "GPUArraysCore" -NDTensorsHDF5Ext = "HDF5" -NDTensorsJLArraysExt = ["GPUArraysCore", "JLArrays"] -NDTensorsMappedArraysExt = ["MappedArrays"] -NDTensorsMetalExt = ["GPUArraysCore", "Metal"] -NDTensorsOctavianExt = "Octavian" -NDTensorsTBLISExt = "TBLIS" -NDTensorscuTENSORExt = "cuTENSOR" - -[compat] -AMDGPU = "0.9, 1" -Accessors = "0.1.33" -Adapt = "3.7, 4" -ArrayLayouts = "1.4" -BlockArrays = "1.1" -CUDA = "5" -Compat = "4.9" -Dictionaries = "0.4" -EllipsisNotation = "1.8" -FillArrays = "1" -Folds = "0.2.8" -Functors = "0.2, 0.3, 0.4, 0.5" -GPUArraysCore = "0.1" -HDF5 = "0.14, 0.15, 0.16, 0.17" -HalfIntegers = "1" -InlineStrings = "1" -JLArrays = "0.1, 0.2" -LinearAlgebra = "<0.0.1, 1.10" -MacroTools = "0.5" -MappedArrays = "0.4" -Metal = "1" -Octavian = "0.3" -PackageExtensionCompat = "1" -Random = "<0.0.1, 1.10" -SimpleTraits = "0.9.4" -SparseArrays = "<0.0.1, 1.10" -SplitApplyCombine = "1.2.2" -StaticArrays = "0.12, 1.0" -Strided = "2" -StridedViews = "0.2.2, 0.3" -TBLIS = "0.2" -TimerOutputs = "0.5.5" -TupleTools = "1.2.0" -VectorInterface = "0.4.2, 0.5" -cuTENSOR = "2" -julia = "1.10" - -[extras] -AMDGPU = "21141c5a-9bdb-4563-92ae-f87d6854732e" -CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba" -GPUArraysCore = "46192b85-c4d5-4398-a991-12ede77f4527" -HDF5 = "f67ccb44-e63f-5c2f-98bd-6dc0ccc4ba2f" -JLArrays = "27aeb0d3-9eb9-45fb-866b-73c2ecf80fcb" -Metal = "dde4c033-4e86-420c-a63e-0dd931031962" -Octavian = "6fd5a793-0b7e-452c-907f-f8bfe9c57db4" -TBLIS = "48530278-0828-4a49-9772-0f3830dfa1e9" -cuTENSOR = "011b41b2-24ef-40a8-b3eb-fa098493e9e1" diff --git a/NDTensors/ext/NDTensorsAMDGPUExt/NDTensorsAMDGPUExt.jl b/NDTensors/ext/NDTensorsAMDGPUExt/NDTensorsAMDGPUExt.jl deleted file mode 100644 index 34004a7b7f..0000000000 --- a/NDTensors/ext/NDTensorsAMDGPUExt/NDTensorsAMDGPUExt.jl +++ /dev/null @@ -1,12 +0,0 @@ -module NDTensorsAMDGPUExt - -include("append.jl") -include("copyto.jl") -include("set_types.jl") -include("adapt.jl") -include("indexing.jl") -include("linearalgebra.jl") -include("mul.jl") -include("permutedims.jl") - -end diff --git a/NDTensors/ext/NDTensorsAMDGPUExt/adapt.jl b/NDTensors/ext/NDTensorsAMDGPUExt/adapt.jl deleted file mode 100644 index 8ef943d674..0000000000 --- a/NDTensors/ext/NDTensorsAMDGPUExt/adapt.jl +++ /dev/null @@ -1,31 +0,0 @@ -using NDTensors: NDTensors, EmptyStorage, adapt_storagetype, emptytype -using NDTensors.AMDGPUExtensions: AMDGPUExtensions, ROCArrayAdaptor -using NDTensors.GPUArraysCoreExtensions: storagemode -using NDTensors.TypeParameterAccessors: - default_type_parameter, - set_type_parameter, - set_type_parameters, - type_parameter, - type_parameters -using Adapt: Adapt, adapt -using AMDGPU: AMDGPU, ROCArray, ROCVector -using Functors: fmap - -function AMDGPUExtensions.roc(xs; storagemode=default_type_parameter(ROCArray, storagemode)) - return fmap(x -> adapt(ROCArrayAdaptor{storagemode}(), x), xs) -end - -function Adapt.adapt_storage(adaptor::ROCArrayAdaptor, xs::AbstractArray) - new_parameters = (type_parameters(xs, (eltype, ndims))..., storagemode(adaptor)) - roctype = set_type_parameters(ROCArray, (eltype, ndims, storagemode), new_parameters) - return isbits(xs) ? xs : adapt(roctype, xs) -end - -function NDTensors.adapt_storagetype( - adaptor::ROCArrayAdaptor, ::Type{EmptyStorage{ElT,StoreT}} -) where {ElT,StoreT} - roctype = set_type_parameters( - ROCVector, (eltype, storagemode), (ElT, storagemode(adaptor)) - ) - return emptytype(adapt_storagetype(roctype, StoreT)) -end diff --git a/NDTensors/ext/NDTensorsAMDGPUExt/append.jl b/NDTensors/ext/NDTensorsAMDGPUExt/append.jl deleted file mode 100644 index c4b5d30947..0000000000 --- a/NDTensors/ext/NDTensorsAMDGPUExt/append.jl +++ /dev/null @@ -1,8 +0,0 @@ -using AMDGPU: ROCArray -using GPUArraysCore: @allowscalar -using NDTensors.Expose: Exposed, unexpose - -## Warning this append function uses scalar indexing and is therefore extremely slow -function Base.append!(Ecollection::Exposed{<:ROCArray}, collections...) - return @allowscalar append!(unexpose(Ecollection), collections...) -end diff --git a/NDTensors/ext/NDTensorsAMDGPUExt/copyto.jl b/NDTensors/ext/NDTensorsAMDGPUExt/copyto.jl deleted file mode 100644 index cba61603a2..0000000000 --- a/NDTensors/ext/NDTensorsAMDGPUExt/copyto.jl +++ /dev/null @@ -1,35 +0,0 @@ -using NDTensors.Expose: Exposed, expose, parent, unexpose -using LinearAlgebra: LinearAlgebra, Adjoint -using AMDGPU: ROCArray - -# Same definition as `MtlArray`. -function Base.copy(src::Exposed{<:ROCArray,<:Base.ReshapedArray}) - return reshape(copy(parent(src)), size(unexpose(src))) -end - -function Base.copy( - src::Exposed{ - <:ROCArray,<:SubArray{<:Any,<:Any,<:Base.ReshapedArray{<:Any,<:Any,<:Adjoint}} - }, -) - return copy(@view copy(expose(parent(src)))[parentindices(unexpose(src))...]) -end - -function Base.copyto!(dest::Exposed{<:ROCArray}, src::Exposed{<:ROCArray,<:SubArray}) - copyto!(dest, expose(copy(src))) - return unexpose(dest) -end - -function Base.copyto!( - dest::Exposed{<:ROCArray}, src::Exposed{<:ROCArray,<:Base.ReshapedArray} -) - copyto!(dest, expose(parent(src))) - return unexpose(dest) -end - -function Base.copyto!( - dest::Exposed{<:ROCArray}, src::Exposed{<:ROCArray,<:LinearAlgebra.Transpose} -) - copyto!(expose(transpose(dest)), expose(parent(src))) - return unexpose(dest) -end diff --git a/NDTensors/ext/NDTensorsAMDGPUExt/indexing.jl b/NDTensors/ext/NDTensorsAMDGPUExt/indexing.jl deleted file mode 100644 index 46ade03433..0000000000 --- a/NDTensors/ext/NDTensorsAMDGPUExt/indexing.jl +++ /dev/null @@ -1,23 +0,0 @@ -using AMDGPU: AMDGPU, ROCArray -using GPUArraysCore: @allowscalar -using NDTensors.Expose: Exposed, expose, parent, unexpose -using NDTensors.GPUArraysCoreExtensions: cpu - -function Base.getindex(E::Exposed{<:ROCArray}) - return @allowscalar unexpose(E)[] -end - -function Base.setindex!(E::Exposed{<:ROCArray}, x::Number) - @allowscalar unexpose(E)[] = x - return unexpose(E) -end - -function Base.getindex(E::Exposed{<:ROCArray,<:Adjoint}, i, j) - return (expose(parent(E))[j, i])' -end - -Base.any(f, E::Exposed{<:ROCArray,<:NDTensors.Tensor}) = any(f, data(unexpose(E))) - -function Base.print_array(io::IO, E::Exposed{<:ROCArray}) - return Base.print_array(io, expose(cpu(E))) -end diff --git a/NDTensors/ext/NDTensorsAMDGPUExt/linearalgebra.jl b/NDTensors/ext/NDTensorsAMDGPUExt/linearalgebra.jl deleted file mode 100644 index 642d2e6da0..0000000000 --- a/NDTensors/ext/NDTensorsAMDGPUExt/linearalgebra.jl +++ /dev/null @@ -1,22 +0,0 @@ -using NDTensors.AMDGPUExtensions: roc -using NDTensors.Expose: Expose, Exposed, expose, ql, ql_positive -using NDTensors.GPUArraysCoreExtensions: cpu -using NDTensors.TypeParameterAccessors: unwrap_array_type -using LinearAlgebra: svd -using Adapt: adapt -using AMDGPU: ROCMatrix - -function LinearAlgebra.svd(A::Exposed{<:ROCMatrix}; kwargs...) - U, S, V = svd(cpu(A)) - return roc.((U, S, V)) -end - -## TODO currently AMDGPU doesn't have ql so make a ql function -function Expose.ql(A::Exposed{<:ROCMatrix}) - Q, L = ql(expose(cpu(A))) - return adapt(unwrap_array_type(A), Matrix(Q)), adapt(unwrap_array_type(A), L) -end -function Expose.ql_positive(A::Exposed{<:ROCMatrix}) - Q, L = ql_positive(expose(cpu(A))) - return adapt(unwrap_array_type(A), Matrix(Q)), adapt(unwrap_array_type(A), L) -end diff --git a/NDTensors/ext/NDTensorsAMDGPUExt/mul.jl b/NDTensors/ext/NDTensorsAMDGPUExt/mul.jl deleted file mode 100644 index 8d332e8452..0000000000 --- a/NDTensors/ext/NDTensorsAMDGPUExt/mul.jl +++ /dev/null @@ -1,45 +0,0 @@ -using NDTensors.Expose: Exposed, expose, parent, unexpose -using LinearAlgebra: LinearAlgebra, Adjoint, Transpose, mul! -using AMDGPU: ROCArray - -# This was calling generic matrix multiplication. -function LinearAlgebra.mul!( - CM::Exposed{<:ROCArray,<:LinearAlgebra.Transpose}, - AM::Exposed{<:ROCArray}, - BM::Exposed{<:ROCArray}, - α, - β, -) - mul!(transpose(CM), transpose(BM), transpose(AM), α, β) - return unexpose(CM) -end - -# This was calling generic matrix multiplication. -function LinearAlgebra.mul!( - CM::Exposed{<:ROCArray,<:LinearAlgebra.Adjoint}, - AM::Exposed{<:ROCArray}, - BM::Exposed{<:ROCArray}, - α, - β, -) - mul!(CM', BM', AM', α, β) - return unexpose(CM) -end - -# Fix issue in AMDGPU.jl where it cannot distinguish -# Transpose{Reshape{Adjoint{ROCArray}}} as a ROCArray and calls generic matmul -function LinearAlgebra.mul!( - CM::Exposed{<:ROCArray}, - AM::Exposed{<:ROCArray}, - BM::Exposed{ - <:ROCArray, - <:LinearAlgebra.Transpose{ - <:Any,<:Base.ReshapedArray{<:Any,<:Any,<:LinearAlgebra.Adjoint} - }, - }, - α, - β, -) - mul!(CM, AM, expose(transpose(copy(expose(parent(BM))))), α, β) - return unexpose(CM) -end diff --git a/NDTensors/ext/NDTensorsAMDGPUExt/permutedims.jl b/NDTensors/ext/NDTensorsAMDGPUExt/permutedims.jl deleted file mode 100644 index cc284e6389..0000000000 --- a/NDTensors/ext/NDTensorsAMDGPUExt/permutedims.jl +++ /dev/null @@ -1,23 +0,0 @@ -using NDTensors.Expose: Exposed, expose, parent, unexpose -using AMDGPU: ROCArray - -function Base.permutedims!( - Edest::Exposed{<:ROCArray,<:Base.ReshapedArray}, Esrc::Exposed{<:ROCArray}, perm -) - Aperm = permutedims(Esrc, perm) - copyto!(expose(parent(Edest)), expose(Aperm)) - return unexpose(Edest) -end - -# There is an issue in AMDGPU where if Edest is a reshaped{<:Adjoint} -# .= can fail. So instead force Esrc into the shape of parent(Edest) -function Base.permutedims!( - Edest::Exposed{<:ROCArray,<:Base.ReshapedArray{<:Any,<:Any,<:Adjoint}}, - Esrc::Exposed{<:ROCArray}, - perm, - f, -) - Aperm = reshape(permutedims(Esrc, perm), size(parent(Edest))) - parent(Edest) .= f.(parent(Edest), Aperm) - return unexpose(Edest) -end diff --git a/NDTensors/ext/NDTensorsAMDGPUExt/set_types.jl b/NDTensors/ext/NDTensorsAMDGPUExt/set_types.jl deleted file mode 100644 index 759b140926..0000000000 --- a/NDTensors/ext/NDTensorsAMDGPUExt/set_types.jl +++ /dev/null @@ -1,11 +0,0 @@ -# TypeParameterAccessors definitions -using NDTensors.TypeParameterAccessors: - TypeParameterAccessors, Position, default_type_parameters -using NDTensors.GPUArraysCoreExtensions: storagemode -using AMDGPU: AMDGPU, ROCArray - -function TypeParameterAccessors.default_type_parameters(::Type{<:ROCArray}) - return (default_type_parameters(AbstractArray)..., AMDGPU.Mem.HIPBuffer) -end - -TypeParameterAccessors.position(::Type{<:ROCArray}, ::typeof(storagemode)) = Position(3) diff --git a/NDTensors/ext/NDTensorsCUDAExt/NDTensorsCUDAExt.jl b/NDTensors/ext/NDTensorsCUDAExt/NDTensorsCUDAExt.jl deleted file mode 100644 index 2a7a458ff9..0000000000 --- a/NDTensors/ext/NDTensorsCUDAExt/NDTensorsCUDAExt.jl +++ /dev/null @@ -1,12 +0,0 @@ -module NDTensorsCUDAExt -include("append.jl") -include("default_kwargs.jl") -include("copyto.jl") -include("set_types.jl") -include("iscu.jl") -include("adapt.jl") -include("indexing.jl") -include("linearalgebra.jl") -include("mul.jl") -include("permutedims.jl") -end diff --git a/NDTensors/ext/NDTensorsCUDAExt/adapt.jl b/NDTensors/ext/NDTensorsCUDAExt/adapt.jl deleted file mode 100644 index c47a9408be..0000000000 --- a/NDTensors/ext/NDTensorsCUDAExt/adapt.jl +++ /dev/null @@ -1,26 +0,0 @@ -using Adapt: Adapt -using CUDA: CUDA, CuArray, CuVector -using Functors: fmap -using NDTensors: NDTensors, EmptyStorage, adapt_storagetype, emptytype -using NDTensors.CUDAExtensions: CUDAExtensions, CuArrayAdaptor -using NDTensors.GPUArraysCoreExtensions: storagemode -using NDTensors.TypeParameterAccessors: - default_type_parameter, set_type_parameters, type_parameters - -function CUDAExtensions.cu(xs; storagemode=default_type_parameter(CuArray, storagemode)) - return fmap(x -> adapt(CuArrayAdaptor{storagemode}(), x), xs) -end - -## Could do this generically -function Adapt.adapt_storage(adaptor::CuArrayAdaptor, xs::AbstractArray) - params = (type_parameters(xs, (eltype, ndims))..., storagemode(adaptor)) - cutype = set_type_parameters(CuArray, (eltype, ndims, storagemode), params) - return isbits(xs) ? xs : adapt(cutype, xs) -end - -function NDTensors.adapt_storagetype( - adaptor::CuArrayAdaptor, ::Type{EmptyStorage{ElT,StoreT}} -) where {ElT,StoreT} - cutype = set_type_parameters(CuVector, (eltype, storagemode), (ElT, storagemode(adaptor))) - return emptytype(adapt_storagetype(cutype, StoreT)) -end diff --git a/NDTensors/ext/NDTensorsCUDAExt/append.jl b/NDTensors/ext/NDTensorsCUDAExt/append.jl deleted file mode 100644 index 9a974354ab..0000000000 --- a/NDTensors/ext/NDTensorsCUDAExt/append.jl +++ /dev/null @@ -1,8 +0,0 @@ -using CUDA: CuArray -using GPUArraysCore: @allowscalar -using NDTensors.Expose: Exposed, unexpose - -## Warning this append function uses scalar indexing and is therefore extremely slow -function Base.append!(Ecollection::Exposed{<:CuArray}, collections...) - return @allowscalar append!(unexpose(Ecollection), collections...) -end diff --git a/NDTensors/ext/NDTensorsCUDAExt/copyto.jl b/NDTensors/ext/NDTensorsCUDAExt/copyto.jl deleted file mode 100644 index c3f136a9a6..0000000000 --- a/NDTensors/ext/NDTensorsCUDAExt/copyto.jl +++ /dev/null @@ -1,30 +0,0 @@ -using CUDA: CuArray -using NDTensors.Expose: Exposed, expose, unexpose -using LinearAlgebra: Adjoint - -# Same definition as `MtlArray`. -function Base.copy(src::Exposed{<:CuArray,<:Base.ReshapedArray}) - return reshape(copy(parent(src)), size(unexpose(src))) -end - -function Base.copy( - src::Exposed{ - <:CuArray,<:SubArray{<:Any,<:Any,<:Base.ReshapedArray{<:Any,<:Any,<:Adjoint}} - }, -) - return copy(@view copy(expose(parent(src)))[parentindices(unexpose(src))...]) -end - -# Catches a bug in `copyto!` in CUDA backend. -function Base.copyto!(dest::Exposed{<:CuArray}, src::Exposed{<:CuArray,<:SubArray}) - copyto!(dest, expose(copy(src))) - return unexpose(dest) -end - -# Catches a bug in `copyto!` in CUDA backend. -function Base.copyto!( - dest::Exposed{<:CuArray}, src::Exposed{<:CuArray,<:Base.ReshapedArray} -) - copyto!(dest, expose(parent(src))) - return unexpose(dest) -end diff --git a/NDTensors/ext/NDTensorsCUDAExt/default_kwargs.jl b/NDTensors/ext/NDTensorsCUDAExt/default_kwargs.jl deleted file mode 100644 index 180e2143e3..0000000000 --- a/NDTensors/ext/NDTensorsCUDAExt/default_kwargs.jl +++ /dev/null @@ -1,4 +0,0 @@ -using CUDA: CuArray -using NDTensors: NDTensors - -NDTensors.default_svd_alg(::Type{<:CuArray}, a) = "qr_algorithm" diff --git a/NDTensors/ext/NDTensorsCUDAExt/indexing.jl b/NDTensors/ext/NDTensorsCUDAExt/indexing.jl deleted file mode 100644 index ac86e18da8..0000000000 --- a/NDTensors/ext/NDTensorsCUDAExt/indexing.jl +++ /dev/null @@ -1,23 +0,0 @@ -using CUDA: CuArray -using GPUArraysCore: @allowscalar -using NDTensors: NDTensors -using NDTensors.Expose: Exposed, expose, unexpose - -function Base.getindex(E::Exposed{<:CuArray}) - return @allowscalar unexpose(E)[] -end - -function Base.setindex!(E::Exposed{<:CuArray}, x::Number) - @allowscalar unexpose(E)[] = x - return unexpose(E) -end - -function Base.getindex(E::Exposed{<:CuArray,<:Adjoint}, i, j) - return (expose(parent(E))[j, i])' -end - -Base.any(f, E::Exposed{<:CuArray,<:NDTensors.Tensor}) = any(f, data(unexpose(E))) - -function Base.print_array(io::IO, E::Exposed{<:CuArray}) - return Base.print_array(io, expose(NDTensors.cpu(E))) -end diff --git a/NDTensors/ext/NDTensorsCUDAExt/iscu.jl b/NDTensors/ext/NDTensorsCUDAExt/iscu.jl deleted file mode 100644 index c0c7f30fa4..0000000000 --- a/NDTensors/ext/NDTensorsCUDAExt/iscu.jl +++ /dev/null @@ -1,4 +0,0 @@ -using CUDA: CuArray -using NDTensors: NDTensors - -NDTensors.iscu(::Type{<:CuArray}) = true diff --git a/NDTensors/ext/NDTensorsCUDAExt/linearalgebra.jl b/NDTensors/ext/NDTensorsCUDAExt/linearalgebra.jl deleted file mode 100644 index f76841e135..0000000000 --- a/NDTensors/ext/NDTensorsCUDAExt/linearalgebra.jl +++ /dev/null @@ -1,60 +0,0 @@ -using Adapt: adapt -using CUDA: CUDA, CuMatrix -using LinearAlgebra: Adjoint, svd -using NDTensors: NDTensors -using NDTensors.Expose: Expose, expose, ql, ql_positive -using NDTensors.GPUArraysCoreExtensions: cpu -using NDTensors.TypeParameterAccessors: unwrap_array_type -function NDTensors.svd_catch_error(A::CuMatrix; alg::String="jacobi_algorithm") - if alg == "jacobi_algorithm" - alg = CUDA.CUSOLVER.JacobiAlgorithm() - elseif alg == "qr_algorithm" - alg = CUDA.CUSOLVER.QRAlgorithm() - else - error( - "svd algorithm $alg is not currently supported. Please see the documentation for currently supported algorithms.", - ) - end - return NDTensors.svd_catch_error(A, alg) -end - -function NDTensors.svd_catch_error(A::CuMatrix, ::CUDA.CUSOLVER.JacobiAlgorithm) - USV = try - svd(A; alg=CUDA.CUSOLVER.JacobiAlgorithm()) - catch - return nothing - end - return USV -end - -function NDTensors.svd_catch_error(A::CuMatrix, ::CUDA.CUSOLVER.QRAlgorithm) - s = size(A) - if s[1] < s[2] - At = copy(Adjoint(A)) - - USV = try - svd(At; alg=CUDA.CUSOLVER.QRAlgorithm()) - catch - return nothing - end - MV, MS, MU = USV - USV = (MU, MS, MV) - else - USV = try - svd(A; alg=CUDA.CUSOLVER.QRAlgorithm()) - catch - return nothing - end - end - return USV -end - -## TODO currently AMDGPU doesn't have ql so make a ql function -function Expose.ql(A::Exposed{<:CuMatrix}) - Q, L = ql(expose(cpu(A))) - return adapt(unwrap_array_type(A), Matrix(Q)), adapt(unwrap_array_type(A), L) -end -function Expose.ql_positive(A::Exposed{<:CuMatrix}) - Q, L = ql_positive(expose(cpu(A))) - return adapt(unwrap_array_type(A), Matrix(Q)), adapt(unwrap_array_type(A), L) -end diff --git a/NDTensors/ext/NDTensorsCUDAExt/mul.jl b/NDTensors/ext/NDTensorsCUDAExt/mul.jl deleted file mode 100644 index 624e20aaad..0000000000 --- a/NDTensors/ext/NDTensorsCUDAExt/mul.jl +++ /dev/null @@ -1,47 +0,0 @@ -using CUDA: CuArray -using LinearAlgebra: LinearAlgebra, mul!, transpose -using NDTensors.Expose: Exposed, expose, unexpose - -# This was calling generic matrix multiplication. -# TODO: Raise an issue with `CUDA.jl`. -function LinearAlgebra.mul!( - CM::Exposed{<:CuArray,<:LinearAlgebra.Transpose}, - AM::Exposed{<:CuArray}, - BM::Exposed{<:CuArray}, - α, - β, -) - mul!(transpose(CM), transpose(BM), transpose(AM), α, β) - return unexpose(CM) -end - -# This was calling generic matrix multiplication. -# TODO: Raise an issue with `CUDA.jl`. -function LinearAlgebra.mul!( - CM::Exposed{<:CuArray,<:LinearAlgebra.Adjoint}, - AM::Exposed{<:CuArray}, - BM::Exposed{<:CuArray}, - α, - β, -) - mul!(CM', BM', AM', α, β) - return unexpose(CM) -end - -## Fix issue in CUDA.jl where it cannot distinguish Transpose{Reshape{Adjoint{CuArray}}} -## as a CuArray and calls generic matmul -function LinearAlgebra.mul!( - CM::Exposed{<:CuArray}, - AM::Exposed{<:CuArray}, - BM::Exposed{ - <:CuArray, - <:LinearAlgebra.Transpose{ - <:Any,<:Base.ReshapedArray{<:Any,<:Any,<:LinearAlgebra.Adjoint} - }, - }, - α, - β, -) - mul!(CM, AM, expose(transpose(copy(expose(parent(BM))))), α, β) - return unexpose(CM) -end diff --git a/NDTensors/ext/NDTensorsCUDAExt/permutedims.jl b/NDTensors/ext/NDTensorsCUDAExt/permutedims.jl deleted file mode 100644 index 032c55c40a..0000000000 --- a/NDTensors/ext/NDTensorsCUDAExt/permutedims.jl +++ /dev/null @@ -1,23 +0,0 @@ -using CUDA: CuArray -using NDTensors.Expose: Exposed, expose, unexpose - -function Base.permutedims!( - Edest::Exposed{<:CuArray,<:Base.ReshapedArray}, Esrc::Exposed{<:CuArray}, perm -) - Aperm = permutedims(Esrc, perm) - copyto!(expose(parent(Edest)), expose(Aperm)) - return unexpose(Edest) -end - -## Found an issue in CUDA where if Edest is a reshaped{<:Adjoint} -## .= can fail. So instead force Esrc into the shape of parent(Edest) -function Base.permutedims!( - Edest::Exposed{<:CuArray,<:Base.ReshapedArray{<:Any,<:Any,<:Adjoint}}, - Esrc::Exposed{<:CuArray}, - perm, - f, -) - Aperm = reshape(permutedims(Esrc, perm), size(parent(Edest))) - parent(Edest) .= f.(parent(Edest), Aperm) - return unexpose(Edest) -end diff --git a/NDTensors/ext/NDTensorsCUDAExt/set_types.jl b/NDTensors/ext/NDTensorsCUDAExt/set_types.jl deleted file mode 100644 index bab1149890..0000000000 --- a/NDTensors/ext/NDTensorsCUDAExt/set_types.jl +++ /dev/null @@ -1,13 +0,0 @@ -# TypeParameterAccessors definitions -using CUDA: CUDA, CuArray -using NDTensors.TypeParameterAccessors: - TypeParameterAccessors, Position, default_type_parameters -using NDTensors.GPUArraysCoreExtensions: storagemode - -function TypeParameterAccessors.position(::Type{<:CuArray}, ::typeof(storagemode)) - return Position(3) -end - -function TypeParameterAccessors.default_type_parameters(::Type{<:CuArray}) - return (default_type_parameters(AbstractArray)..., CUDA.Mem.DeviceBuffer) -end diff --git a/NDTensors/ext/NDTensorsGPUArraysCoreExt/NDTensorsGPUArraysCoreExt.jl b/NDTensors/ext/NDTensorsGPUArraysCoreExt/NDTensorsGPUArraysCoreExt.jl deleted file mode 100644 index c9e183cd52..0000000000 --- a/NDTensors/ext/NDTensorsGPUArraysCoreExt/NDTensorsGPUArraysCoreExt.jl +++ /dev/null @@ -1,4 +0,0 @@ -module NDTensorsGPUArraysCoreExt -include("contract.jl") -include("blocksparsetensor.jl") -end diff --git a/NDTensors/ext/NDTensorsGPUArraysCoreExt/blocksparsetensor.jl b/NDTensors/ext/NDTensorsGPUArraysCoreExt/blocksparsetensor.jl deleted file mode 100644 index 1073bd2495..0000000000 --- a/NDTensors/ext/NDTensorsGPUArraysCoreExt/blocksparsetensor.jl +++ /dev/null @@ -1,26 +0,0 @@ -using GPUArraysCore: @allowscalar, AbstractGPUArray -using NDTensors: NDTensors, BlockSparseTensor, dense, diag, map_diag! -using NDTensors.DiagonalArrays: diaglength -using NDTensors.Expose: Exposed, unexpose - -## TODO to circumvent issues with blocksparse and scalar indexing -## convert blocksparse GPU tensors to dense tensors and call diag -## copying will probably have some impact on timing but this code -## currently isn't used in the main code, just in tests. -function NDTensors.diag(ETensor::Exposed{<:AbstractGPUArray,<:BlockSparseTensor}) - return diag(dense(unexpose(ETensor))) -end - -## TODO scalar indexing is slow here -function NDTensors.map_diag!( - f::Function, - exposed_t_destination::Exposed{<:AbstractGPUArray,<:BlockSparseTensor}, - exposed_t_source::Exposed{<:AbstractGPUArray,<:BlockSparseTensor}, -) - t_destination = unexpose(exposed_t_destination) - t_source = unexpose(exposed_t_source) - @allowscalar for i in 1:diaglength(t_destination) - NDTensors.setdiagindex!(t_destination, f(NDTensors.getdiagindex(t_source, i)), i) - end - return t_destination -end diff --git a/NDTensors/ext/NDTensorsGPUArraysCoreExt/contract.jl b/NDTensors/ext/NDTensorsGPUArraysCoreExt/contract.jl deleted file mode 100644 index 26b13ed731..0000000000 --- a/NDTensors/ext/NDTensorsGPUArraysCoreExt/contract.jl +++ /dev/null @@ -1,81 +0,0 @@ -using Adapt: adapt -using GPUArraysCore: AbstractGPUArray -using NDTensors: NDTensors, DenseTensor, DiagTensor, contract!, dense, inds, Tensor -using NDTensors.Expose: Exposed, expose, unexpose -using NDTensors.TypeParameterAccessors: parenttype, set_ndims - -function NDTensors.contract!( - output_tensor::Exposed{<:AbstractGPUArray,<:DenseTensor}, - labelsoutput_tensor, - tensor1::Exposed{<:AbstractGPUArray,<:DiagTensor}, - labelstensor1, - tensor2::Exposed{<:AbstractGPUArray,<:DenseTensor}, - labelstensor2, - α::Number=one(Bool), - β::Number=zero(Bool); - convert_to_dense::Bool=true, -) - # Convert tensor1 to dense. - # TODO: Define `Exposed` overload for `dense`. - tensor1 = expose(dense(unexpose(tensor1))) - contract!( - output_tensor, labelsoutput_tensor, tensor1, labelstensor1, tensor2, labelstensor2, α, β - ) - return output_tensor -end - -function NDTensors.contract!( - output_tensor::Exposed{<:AbstractGPUArray,<:DenseTensor}, - labelsoutput_tensor, - tensor1::Exposed{<:AbstractGPUArray,<:DenseTensor}, - labelstensor1, - tensor2::Exposed{<:AbstractGPUArray,<:DiagTensor}, - labelstensor2, - α::Number=one(Bool), - β::Number=zero(Bool), -) - contract!( - output_tensor, labelsoutput_tensor, tensor2, labelstensor2, tensor1, labelstensor1, α, β - ) - return output_tensor -end - -## In this function we convert the DiagTensor to a dense tensor and -## Feed it back into contract -function NDTensors.contract!( - output_tensor::Exposed{<:AbstractGPUArray,<:DenseTensor}, - labelsoutput_tensor, - tensor1::Exposed{<:Number,<:DiagTensor}, - labelstensor1, - tensor2::Exposed{<:AbstractGPUArray,<:DenseTensor}, - labelstensor2, - α::Number=one(Bool), - β::Number=zero(Bool), -) - # Convert tensor1 to dense. - # TODO: Define `Exposed` overload for `dense`. - # TODO: This allocates on CPU first then moves over to GPU which could be optimized. - tensor1 = expose( - adapt(set_ndims(parenttype(typeof(tensor2)), 1), dense(unexpose(tensor1))) - ) - contract!( - output_tensor, labelsoutput_tensor, tensor1, labelstensor1, tensor2, labelstensor2, α, β - ) - return output_tensor -end - -function NDTensors.contract!( - output_tensor::Exposed{<:AbstractGPUArray,<:DenseTensor}, - labelsoutput_tensor, - tensor1::Exposed{<:AbstractGPUArray,<:DenseTensor}, - labelstensor1, - tensor2::Exposed{<:Number,<:DiagTensor}, - labelstensor2, - α::Number=one(Bool), - β::Number=zero(Bool), -) - contract!( - output_tensor, labelsoutput_tensor, tensor2, labelstensor2, tensor1, labelstensor1, α, β - ) - return output_tensor -end diff --git a/NDTensors/ext/NDTensorsHDF5Ext/NDTensorsHDF5Ext.jl b/NDTensors/ext/NDTensorsHDF5Ext/NDTensorsHDF5Ext.jl deleted file mode 100644 index 3df64b0c7b..0000000000 --- a/NDTensors/ext/NDTensorsHDF5Ext/NDTensorsHDF5Ext.jl +++ /dev/null @@ -1,8 +0,0 @@ -module NDTensorsHDF5Ext - -include("blocksparse.jl") -include("dense.jl") -include("diag.jl") -include("empty.jl") - -end # module NDTensorsHDF5Ext diff --git a/NDTensors/ext/NDTensorsHDF5Ext/blocksparse.jl b/NDTensors/ext/NDTensorsHDF5Ext/blocksparse.jl deleted file mode 100644 index 64c360adb8..0000000000 --- a/NDTensors/ext/NDTensorsHDF5Ext/blocksparse.jl +++ /dev/null @@ -1,68 +0,0 @@ -using HDF5: HDF5, attributes, create_group, open_group, read, write -using NDTensors: data, Block, blockoffsets, BlockOffsets, BlockSparse - -# Helper function for HDF5 write/read of BlockSparse -function offsets_to_array(boff::BlockOffsets{N}) where {N} - nblocks = length(boff) - asize = (N + 1) * nblocks - n = 1 - a = Vector{Int}(undef, asize) - for bo in pairs(boff) - for j in 1:N - a[n] = bo[1][j] - n += 1 - end - a[n] = bo[2] - n += 1 - end - return a -end - -# Helper function for HDF5 write/read of BlockSparse -function array_to_offsets(a, N::Int) - asize = length(a) - nblocks = div(asize, N + 1) - boff = BlockOffsets{N}() - j = 0 - for b in 1:nblocks - insert!(boff, Block(ntuple(i -> (a[j + i]), N)), a[j + N + 1]) - j += (N + 1) - end - return boff -end - -function HDF5.write(parent::Union{HDF5.File,HDF5.Group}, name::String, B::BlockSparse) - g = create_group(parent, name) - attributes(g)["type"] = "BlockSparse{$(eltype(B))}" - attributes(g)["version"] = 1 - if eltype(B) != Nothing - write(g, "ndims", ndims(B)) - write(g, "data", data(B)) - off_array = offsets_to_array(blockoffsets(B)) - write(g, "offsets", off_array) - end -end - -function HDF5.read( - parent::Union{HDF5.File,HDF5.Group}, name::AbstractString, ::Type{Store} -) where {Store<:BlockSparse} - g = open_group(parent, name) - ElT = eltype(Store) - typestr = "BlockSparse{$ElT}" - if read(attributes(g)["type"]) != typestr - error("HDF5 group or file does not contain $typestr data") - end - N = read(g, "ndims") - off_array = read(g, "offsets") - boff = array_to_offsets(off_array, N) - # Attribute __complex__ is attached to the "data" dataset - # by the h5 library used by C++ version of ITensor: - if haskey(attributes(g["data"]), "__complex__") - M = read(g, "data") - nelt = size(M, 1) * size(M, 2) - data = Vector(reinterpret(ComplexF64, reshape(M, nelt))) - else - data = read(g, "data") - end - return BlockSparse(data, boff) -end diff --git a/NDTensors/ext/NDTensorsHDF5Ext/dense.jl b/NDTensors/ext/NDTensorsHDF5Ext/dense.jl deleted file mode 100644 index baab94b601..0000000000 --- a/NDTensors/ext/NDTensorsHDF5Ext/dense.jl +++ /dev/null @@ -1,37 +0,0 @@ -using HDF5: HDF5, attributes, create_group, open_group, read, write -using NDTensors: Dense - -function HDF5.write( - parent::Union{HDF5.File,HDF5.Group}, name::String, D::Store -) where {Store<:Dense} - g = create_group(parent, name) - attributes(g)["type"] = "Dense{$(eltype(Store))}" - attributes(g)["version"] = 1 - if eltype(D) != Nothing - write(g, "data", D.data) - end -end - -function HDF5.read( - parent::Union{HDF5.File,HDF5.Group}, name::AbstractString, ::Type{Store} -) where {Store<:Dense} - g = open_group(parent, name) - ElT = eltype(Store) - typestr = "Dense{$ElT}" - if read(attributes(g)["type"]) != typestr - error("HDF5 group or file does not contain $typestr data") - end - if ElT == Nothing - return Dense{Nothing}() - end - # Attribute __complex__ is attached to the "data" dataset - # by the h5 library used by C++ version of ITensor: - if haskey(attributes(g["data"]), "__complex__") - M = read(g, "data") - nelt = size(M, 1) * size(M, 2) - data = Vector(reinterpret(ComplexF64, reshape(M, nelt))) - else - data = read(g, "data") - end - return Dense{ElT}(data) -end diff --git a/NDTensors/ext/NDTensorsHDF5Ext/diag.jl b/NDTensors/ext/NDTensorsHDF5Ext/diag.jl deleted file mode 100644 index b5e8215173..0000000000 --- a/NDTensors/ext/NDTensorsHDF5Ext/diag.jl +++ /dev/null @@ -1,38 +0,0 @@ -using HDF5: HDF5, attributes, create_group, open_group, read, write -using NDTensors: datatype, Dense, Diag - -function HDF5.write( - parent::Union{HDF5.File,HDF5.Group}, name::String, D::Store -) where {Store<:Diag} - g = create_group(parent, name) - attributes(g)["type"] = "Diag{$(eltype(Store)),$(datatype(Store))}" - attributes(g)["version"] = 1 - if eltype(D) != Nothing - write(g, "data", D.data) - end -end - -function HDF5.read( - parent::Union{HDF5.File,HDF5.Group}, name::AbstractString, ::Type{Store} -) where {Store<:Diag} - g = open_group(parent, name) - ElT = eltype(Store) - DataT = datatype(Store) - typestr = "Diag{$ElT,$DataT}" - if read(attributes(g)["type"]) != typestr - error("HDF5 group or file does not contain $typestr data") - end - if ElT == Nothing - return Dense{Nothing}() - end - # Attribute __complex__ is attached to the "data" dataset - # by the h5 library used by C++ version of ITensor: - if haskey(attributes(g["data"]), "__complex__") - M = read(g, "data") - nelt = size(M, 1) * size(M, 2) - data = Vector(reinterpret(ComplexF64, reshape(M, nelt))) - else - data = read(g, "data") - end - return Diag{ElT,DataT}(data) -end diff --git a/NDTensors/ext/NDTensorsHDF5Ext/empty.jl b/NDTensors/ext/NDTensorsHDF5Ext/empty.jl deleted file mode 100644 index 5c5f8782d4..0000000000 --- a/NDTensors/ext/NDTensorsHDF5Ext/empty.jl +++ /dev/null @@ -1,23 +0,0 @@ -using HDF5: HDF5, attributes, create_group, open_group, read, write -using NDTensors: EmptyStorage - -# XXX: this seems a bit strange and fragile? -# Takes the type very literally. -function HDF5.read( - parent::Union{HDF5.File,HDF5.Group}, name::AbstractString, ::Type{StoreT} -) where {StoreT<:EmptyStorage} - g = open_group(parent, name) - typestr = string(StoreT) - if read(attributes(g)["type"]) != typestr - error("HDF5 group or file does not contain $typestr data") - end - return StoreT() -end - -function HDF5.write( - parent::Union{HDF5.File,HDF5.Group}, name::String, ::StoreT -) where {StoreT<:EmptyStorage} - g = create_group(parent, name) - attributes(g)["type"] = string(StoreT) - return attributes(g)["version"] = 1 -end diff --git a/NDTensors/ext/NDTensorsJLArraysExt/NDTensorsJLArraysExt.jl b/NDTensors/ext/NDTensorsJLArraysExt/NDTensorsJLArraysExt.jl deleted file mode 100644 index 8de2f1dcfd..0000000000 --- a/NDTensors/ext/NDTensorsJLArraysExt/NDTensorsJLArraysExt.jl +++ /dev/null @@ -1,7 +0,0 @@ -module NDTensorsJLArraysExt -include("copyto.jl") -include("indexing.jl") -include("linearalgebra.jl") -include("mul.jl") -include("permutedims.jl") -end diff --git a/NDTensors/ext/NDTensorsJLArraysExt/copyto.jl b/NDTensors/ext/NDTensorsJLArraysExt/copyto.jl deleted file mode 100644 index e0fe1eb99d..0000000000 --- a/NDTensors/ext/NDTensorsJLArraysExt/copyto.jl +++ /dev/null @@ -1,30 +0,0 @@ -using JLArrays: JLArray -using NDTensors.Expose: Exposed, expose, unexpose -using LinearAlgebra: Adjoint - -# Same definition as `CuArray`. -function Base.copy(src::Exposed{<:JLArray,<:Base.ReshapedArray}) - return reshape(copy(parent(src)), size(unexpose(src))) -end - -function Base.copy( - src::Exposed{ - <:JLArray,<:SubArray{<:Any,<:Any,<:Base.ReshapedArray{<:Any,<:Any,<:Adjoint}} - }, -) - return copy(@view copy(expose(parent(src)))[parentindices(unexpose(src))...]) -end - -# Catches a bug in `copyto!` in CUDA backend. -function Base.copyto!(dest::Exposed{<:JLArray}, src::Exposed{<:JLArray,<:SubArray}) - copyto!(dest, expose(copy(src))) - return unexpose(dest) -end - -# Catches a bug in `copyto!` in JLArray backend. -function Base.copyto!( - dest::Exposed{<:JLArray}, src::Exposed{<:JLArray,<:Base.ReshapedArray} -) - copyto!(dest, expose(parent(src))) - return unexpose(dest) -end diff --git a/NDTensors/ext/NDTensorsJLArraysExt/indexing.jl b/NDTensors/ext/NDTensorsJLArraysExt/indexing.jl deleted file mode 100644 index 0f6eeb0469..0000000000 --- a/NDTensors/ext/NDTensorsJLArraysExt/indexing.jl +++ /dev/null @@ -1,19 +0,0 @@ -using JLArrays: JLArray -using GPUArraysCore: @allowscalar -using NDTensors: NDTensors -using NDTensors.Expose: Exposed, expose, unexpose - -function Base.getindex(E::Exposed{<:JLArray}) - return @allowscalar unexpose(E)[] -end - -function Base.setindex!(E::Exposed{<:JLArray}, x::Number) - @allowscalar unexpose(E)[] = x - return unexpose(E) -end - -function Base.getindex(E::Exposed{<:JLArray,<:Adjoint}, i, j) - return (expose(parent(E))[j, i])' -end - -Base.any(f, E::Exposed{<:JLArray,<:NDTensors.Tensor}) = any(f, data(unexpose(E))) diff --git a/NDTensors/ext/NDTensorsJLArraysExt/linearalgebra.jl b/NDTensors/ext/NDTensorsJLArraysExt/linearalgebra.jl deleted file mode 100644 index 4d594050f1..0000000000 --- a/NDTensors/ext/NDTensorsJLArraysExt/linearalgebra.jl +++ /dev/null @@ -1,40 +0,0 @@ -using Adapt: adapt -using JLArrays: JLArray, JLMatrix -using LinearAlgebra: LinearAlgebra, Hermitian, Symmetric, qr, eigen -using NDTensors: NDTensors -using NDTensors.Expose: Expose, expose, qr, qr_positive, ql, ql_positive -using NDTensors.GPUArraysCoreExtensions: cpu -using NDTensors.TypeParameterAccessors: unwrap_array_type - -## TODO this function exists because of the same issue below. when -## that issue is resolved we can rely on the abstractarray version of -## this operation. -function Expose.qr(A::Exposed{<:JLArray}) - Q, L = qr(unexpose(A)) - return adapt(unwrap_array_type(A), Matrix(Q)), adapt(unwrap_array_type(A), L) -end -## TODO this should work using a JLArray but there is an error converting the Q from its packed QR from -## back into a JLArray see https://github.com/JuliaGPU/GPUArrays.jl/issues/545. To fix call cpu for now -function Expose.qr_positive(A::Exposed{<:JLArray}) - Q, L = qr_positive(expose(cpu(A))) - return adapt(unwrap_array_type(A), copy(Q)), adapt(unwrap_array_type(A), L) -end - -function Expose.ql(A::Exposed{<:JLMatrix}) - Q, L = ql(expose(cpu(A))) - return adapt(unwrap_array_type(A), copy(Q)), adapt(unwrap_array_type(A), L) -end -function Expose.ql_positive(A::Exposed{<:JLMatrix}) - Q, L = ql_positive(expose(cpu(A))) - return adapt(unwrap_array_type(A), copy(Q)), adapt(unwrap_array_type(A), L) -end - -function LinearAlgebra.eigen(A::Exposed{<:JLMatrix,<:Symmetric}) - q, l = (eigen(expose(cpu(A)))) - return adapt.(unwrap_array_type(A), (q, l)) -end - -function LinearAlgebra.eigen(A::Exposed{<:JLMatrix,<:Hermitian}) - q, l = (eigen(expose(Hermitian(cpu(unexpose(A).data))))) - return adapt.(JLArray, (q, l)) -end diff --git a/NDTensors/ext/NDTensorsJLArraysExt/mul.jl b/NDTensors/ext/NDTensorsJLArraysExt/mul.jl deleted file mode 100644 index 5b04e75df6..0000000000 --- a/NDTensors/ext/NDTensorsJLArraysExt/mul.jl +++ /dev/null @@ -1,43 +0,0 @@ -using JLArrays: JLArray -using LinearAlgebra: LinearAlgebra, mul!, transpose -using NDTensors.Expose: Exposed, expose, unexpose - -function LinearAlgebra.mul!( - CM::Exposed{<:JLArray,<:LinearAlgebra.Transpose}, - AM::Exposed{<:JLArray}, - BM::Exposed{<:JLArray}, - α, - β, -) - mul!(transpose(CM), transpose(BM), transpose(AM), α, β) - return unexpose(CM) -end - -function LinearAlgebra.mul!( - CM::Exposed{<:JLArray,<:LinearAlgebra.Adjoint}, - AM::Exposed{<:JLArray}, - BM::Exposed{<:JLArray}, - α, - β, -) - mul!(CM', BM', AM', α, β) - return unexpose(CM) -end - -## Fix issue in JLArrays.jl where it cannot distinguish Transpose{Reshape{Adjoint{JLArray}}} -## as a JLArray and calls generic matmul -function LinearAlgebra.mul!( - CM::Exposed{<:JLArray}, - AM::Exposed{<:JLArray}, - BM::Exposed{ - <:JLArray, - <:LinearAlgebra.Transpose{ - <:Any,<:Base.ReshapedArray{<:Any,<:Any,<:LinearAlgebra.Adjoint} - }, - }, - α, - β, -) - mul!(CM, AM, expose(transpose(copy(expose(parent(BM))))), α, β) - return unexpose(CM) -end diff --git a/NDTensors/ext/NDTensorsJLArraysExt/permutedims.jl b/NDTensors/ext/NDTensorsJLArraysExt/permutedims.jl deleted file mode 100644 index 4bbd5833c4..0000000000 --- a/NDTensors/ext/NDTensorsJLArraysExt/permutedims.jl +++ /dev/null @@ -1,24 +0,0 @@ -using JLArrays: JLArray -using LinearAlgebra: Adjoint -using NDTensors.Expose: Exposed, expose, unexpose - -function Base.permutedims!( - Edest::Exposed{<:JLArray,<:Base.ReshapedArray}, Esrc::Exposed{<:JLArray}, perm -) - Aperm = permutedims(Esrc, perm) - copyto!(expose(parent(Edest)), expose(Aperm)) - return unexpose(Edest) -end - -## Found an issue in CUDA where if Edest is a reshaped{<:Adjoint} -## .= can fail. So instead force Esrc into the shape of parent(Edest) -function Base.permutedims!( - Edest::Exposed{<:JLArray,<:Base.ReshapedArray{<:Any,<:Any,<:Adjoint}}, - Esrc::Exposed{<:JLArray}, - perm, - f, -) - Aperm = reshape(permutedims(Esrc, perm), size(parent(Edest))) - parent(Edest) .= f.(parent(Edest), Aperm) - return unexpose(Edest) -end diff --git a/NDTensors/ext/NDTensorsMappedArraysExt/NDTensorsMappedArraysExt.jl b/NDTensors/ext/NDTensorsMappedArraysExt/NDTensorsMappedArraysExt.jl deleted file mode 100644 index 74372f3f2c..0000000000 --- a/NDTensors/ext/NDTensorsMappedArraysExt/NDTensorsMappedArraysExt.jl +++ /dev/null @@ -1,25 +0,0 @@ -module NDTensorsMappedArraysExt -using MappedArrays: AbstractMappedArray -using NDTensors: NDTensors -function NDTensors.similar(arraytype::Type{<:AbstractMappedArray}, dims::Tuple{Vararg{Int}}) - return similar(Array{eltype(arraytype)}, dims) -end -function NDTensors.similartype(storagetype::Type{<:AbstractMappedArray}) - return Array{eltype(storagetype),ndims(storagetype)} -end -function NDTensors.similartype( - storagetype::Type{<:AbstractMappedArray}, dims::Tuple{Vararg{Int}} -) - return Array{eltype(storagetype),length(dims)} -end - -using MappedArrays: ReadonlyMappedArray -using NDTensors: AllowAlias -# It is a bit unfortunate that we have to define this, it fixes an ambiguity -# error with MappedArrays. -function (arraytype::Type{ReadonlyMappedArray{T,N,A,F}} where {T,N,A<:AbstractArray,F})( - ::AllowAlias, a::AbstractArray -) - return a -end -end diff --git a/NDTensors/ext/NDTensorsMetalExt/NDTensorsMetalExt.jl b/NDTensors/ext/NDTensorsMetalExt/NDTensorsMetalExt.jl deleted file mode 100644 index 8cca86ca2a..0000000000 --- a/NDTensors/ext/NDTensorsMetalExt/NDTensorsMetalExt.jl +++ /dev/null @@ -1,12 +0,0 @@ -module NDTensorsMetalExt - -include("adapt.jl") -include("set_types.jl") -include("indexing.jl") -include("linearalgebra.jl") -include("copyto.jl") -include("append.jl") -include("permutedims.jl") -include("mul.jl") - -end diff --git a/NDTensors/ext/NDTensorsMetalExt/adapt.jl b/NDTensors/ext/NDTensorsMetalExt/adapt.jl deleted file mode 100644 index 5034c71b29..0000000000 --- a/NDTensors/ext/NDTensorsMetalExt/adapt.jl +++ /dev/null @@ -1,29 +0,0 @@ -using Adapt: Adapt, adapt -using Functors: fmap -using Metal: MtlArray, MtlVector, DefaultStorageMode -using NDTensors: NDTensors, EmptyStorage, adapt_storagetype, emptytype -using NDTensors.Expose: Exposed -using NDTensors.MetalExtensions: MetalExtensions, MtlArrayAdaptor -using NDTensors.GPUArraysCoreExtensions: GPUArraysCoreExtensions -using NDTensors.TypeParameterAccessors: set_type_parameters, type_parameters - -GPUArraysCoreExtensions.cpu(e::Exposed{<:MtlArray}) = adapt(Array, e) - -function MetalExtensions.mtl(xs; storagemode=DefaultStorageMode) - return fmap(x -> adapt(MtlArrayAdaptor{storagemode}(), x), xs) -end - -function Adapt.adapt_storage(adaptor::MtlArrayAdaptor, xs::AbstractArray) - new_parameters = (type_parameters(xs, (eltype, ndims))..., storagemode(adaptor)) - mtltype = set_type_parameters(MtlArray, (eltype, ndims, storagemode), new_parameters) - return isbits(xs) ? xs : adapt(mtltype, xs) -end - -function NDTensors.adapt_storagetype( - adaptor::MtlArrayAdaptor, ::Type{EmptyStorage{ElT,StoreT}} -) where {ElT,StoreT} - mtltype = set_type_parameters( - MtlVector, (eltype, storagemode), (ElT, storagemode(adaptor)) - ) - return emptytype(adapt_storagetype(mtltype, StoreT)) -end diff --git a/NDTensors/ext/NDTensorsMetalExt/append.jl b/NDTensors/ext/NDTensorsMetalExt/append.jl deleted file mode 100644 index b48d2cfd4f..0000000000 --- a/NDTensors/ext/NDTensorsMetalExt/append.jl +++ /dev/null @@ -1,10 +0,0 @@ -## Right now append! is broken on metal because of a missing resize! function -## but make this available in the next release this will allow metal to work working -using GPUArraysCore: @allowscalar -using Metal: MtlArray -using NDTensors.Expose: Exposed, unexpose - -## Warning this append function uses scalar indexing and is therefore extremely slow -function Base.append!(Ecollection::Exposed{<:MtlArray}, collections...) - return @allowscalar append!(unexpose(Ecollection), collections...) -end diff --git a/NDTensors/ext/NDTensorsMetalExt/copyto.jl b/NDTensors/ext/NDTensorsMetalExt/copyto.jl deleted file mode 100644 index 6c7aeb4b3c..0000000000 --- a/NDTensors/ext/NDTensorsMetalExt/copyto.jl +++ /dev/null @@ -1,28 +0,0 @@ -using Metal: MtlArray -using NDTensors.Expose: Exposed, expose, unexpose - -function Base.copy(src::Exposed{<:MtlArray,<:Base.ReshapedArray}) - return reshape(copy(parent(src)), size(unexpose(src))) -end - -function Base.copy( - src::Exposed{ - <:MtlArray,<:SubArray{<:Any,<:Any,<:Base.ReshapedArray{<:Any,<:Any,<:Adjoint}} - }, -) - return copy(@view copy(expose(parent(src)))[parentindices(unexpose(src))...]) -end - -# Catches a bug in `copyto!` in Metal backend. -function Base.copyto!(dest::Exposed{<:MtlArray}, src::Exposed{<:MtlArray,<:SubArray}) - copyto!(dest, expose(copy(src))) - return unexpose(dest) -end - -# Catches a bug in `copyto!` in Metal backend. -function Base.copyto!( - dest::Exposed{<:MtlArray}, src::Exposed{<:MtlArray,<:Base.ReshapedArray} -) - copyto!(dest, expose(parent(src))) - return unexpose(dest) -end diff --git a/NDTensors/ext/NDTensorsMetalExt/indexing.jl b/NDTensors/ext/NDTensorsMetalExt/indexing.jl deleted file mode 100644 index 8a37e44e05..0000000000 --- a/NDTensors/ext/NDTensorsMetalExt/indexing.jl +++ /dev/null @@ -1,18 +0,0 @@ -using Metal: MtlArray -using GPUArraysCore: @allowscalar -using LinearAlgebra: Adjoint -using NDTensors.Expose: Exposed, expose, unexpose - -function Base.getindex(E::Exposed{<:MtlArray}) - return @allowscalar unexpose(E)[] -end - -function Base.setindex!(E::Exposed{<:MtlArray}, x::Number) - @allowscalar unexpose(E)[] = x - return unexpose(E) -end - -# Shared with `CuArray`. Move to `NDTensorsGPUArraysCoreExt`? -function Base.getindex(E::Exposed{<:MtlArray,<:Adjoint}, i, j) - return (expose(parent(E))[j, i])' -end diff --git a/NDTensors/ext/NDTensorsMetalExt/linearalgebra.jl b/NDTensors/ext/NDTensorsMetalExt/linearalgebra.jl deleted file mode 100644 index 28d592506a..0000000000 --- a/NDTensors/ext/NDTensorsMetalExt/linearalgebra.jl +++ /dev/null @@ -1,49 +0,0 @@ -using Metal: MtlMatrix -using LinearAlgebra: LinearAlgebra, qr, eigen, svd -using NDTensors.Expose: qr_positive, ql_positive, ql -using NDTensors.TypeParameterAccessors: - set_type_parameters, type_parameters, unwrap_array_type - -function LinearAlgebra.qr(A::Exposed{<:MtlMatrix}) - Q, R = qr(expose(NDTensors.cpu(A))) - return adapt(unwrap_array_type(A), Matrix(Q)), adapt(unwrap_array_type(A), R) -end - -function NDTensors.Expose.qr_positive(A::Exposed{<:MtlMatrix}) - Q, R = qr_positive(expose(NDTensors.cpu(A))) - return adapt(unwrap_array_type(A), Matrix(Q)), adapt(unwrap_array_type(A), R) -end - -function NDTensors.Expose.ql(A::Exposed{<:MtlMatrix}) - Q, L = ql(expose(NDTensors.cpu(A))) - return adapt(unwrap_array_type(A), Matrix(Q)), adapt(unwrap_array_type(A), L) -end -function NDTensors.Expose.ql_positive(A::Exposed{<:MtlMatrix}) - Q, L = ql_positive(expose(NDTensors.cpu(A))) - return adapt(unwrap_array_type(A), Matrix(Q)), adapt(unwrap_array_type(A), L) -end - -function LinearAlgebra.eigen(A::Exposed{<:MtlMatrix}) - Dcpu, Ucpu = eigen(expose(NDTensors.cpu(A))) - D = adapt( - set_type_parameters( - unwrap_array_type(A), (eltype, ndims), type_parameters(Dcpu, (eltype, ndims)) - ), - Dcpu, - ) - U = adapt(unwrap_array_type(A), Ucpu) - return D, U -end - -function LinearAlgebra.svd(A::Exposed{<:MtlMatrix}; kwargs...) - Ucpu, Scpu, Vcpu = svd(expose(NDTensors.cpu(A)); kwargs...) - U = adapt(unwrap_array_type(A), Ucpu) - S = adapt( - set_type_parameters( - unwrap_array_type(A), (eltype, ndims), type_parameters(Scpu, (eltype, ndims)) - ), - Scpu, - ) - V = adapt(unwrap_array_type(A), Vcpu) - return U, S, V -end diff --git a/NDTensors/ext/NDTensorsMetalExt/mul.jl b/NDTensors/ext/NDTensorsMetalExt/mul.jl deleted file mode 100644 index b6e13d9e74..0000000000 --- a/NDTensors/ext/NDTensorsMetalExt/mul.jl +++ /dev/null @@ -1,42 +0,0 @@ -using Metal: MtlArray -using LinearAlgebra: LinearAlgebra, Adjoint, Transpose, mul! -# This was calling generic matrix multiplication. -# TODO: Raise an issue with `Metal.jl`. -function LinearAlgebra.mul!( - CM::Exposed{<:MtlArray,<:Transpose}, - AM::Exposed{<:MtlArray}, - BM::Exposed{<:MtlArray}, - α, - β, -) - mul!(transpose(CM), transpose(BM), transpose(AM), α, β) - return unexpose(CM) -end - -# This was calling generic matrix multiplication. -# TODO: Raise an issue with `Metal.jl`. -function LinearAlgebra.mul!( - CM::Exposed{<:MtlArray,<:Adjoint}, AM::Exposed{<:MtlArray}, BM::Exposed{<:MtlArray}, α, β -) - mul!(CM', BM', AM', α, β) - return unexpose(CM) -end - -## Fix issue in Metal.jl where it cannot distinguish Transpose{Reshape{Adjoint{MtlArray}}} -## as a MtlArray and calls generic matmul -function LinearAlgebra.mul!( - CM::Exposed{<:MtlArray}, - AM::Exposed{<:MtlArray}, - BM::Exposed{ - <:MtlArray, - <:LinearAlgebra.Transpose{ - <:Any,<:Base.ReshapedArray{<:Any,<:Any,<:LinearAlgebra.Adjoint} - }, - }, - α, - β, -) - B = copy(expose(parent(BM))) - mul!(CM, AM, expose(transpose(B)), α, β) - return unexpose(CM) -end diff --git a/NDTensors/ext/NDTensorsMetalExt/permutedims.jl b/NDTensors/ext/NDTensorsMetalExt/permutedims.jl deleted file mode 100644 index 5af55b8eb3..0000000000 --- a/NDTensors/ext/NDTensorsMetalExt/permutedims.jl +++ /dev/null @@ -1,40 +0,0 @@ -using Metal: MtlArray -using GPUArraysCore: @allowscalar -using NDTensors.Expose: Exposed, expose, unexpose -## Theres an issue in metal that `ReshapedArray' wrapped arrays cannot be permuted using -## permutedims (failing in that Metal uses scalar indexing) -## These functions are to address the problem in different instances of permutedims -function Base.permutedims(E::Exposed{<:MtlArray,<:Base.ReshapedArray}, perm) - A = copy(E) - return permutedims(A, perm) -end - -function Base.permutedims!( - Edest::Exposed{<:MtlArray,<:Base.ReshapedArray}, Esrc::Exposed{<:MtlArray}, perm -) - Aperm = permutedims(Esrc, perm) - copyto!(expose(parent(Edest)), expose(Aperm)) - return unexpose(Edest) -end - -function Base.permutedims!( - Edest::Exposed{<:MtlArray}, Esrc::Exposed{<:MtlArray,<:Base.ReshapedArray}, perm -) - Aperm = permutedims(Esrc, perm) - copyto!(Edest, expose(Aperm)) - return unexpose(Edest) -end - -## To get around the Metal issue here we copy and permute Esrc, -## then we reshape Esrc to the size of Edest's parent -## and broadcast into the parent. -function Base.permutedims!( - Edest::Exposed{<:MtlArray,<:Base.ReshapedArray}, - Esrc::Exposed{<:MtlArray,<:Base.ReshapedArray}, - perm, - f, -) - Aperm = reshape(permutedims(Esrc, perm), size(parent(Edest))) - parent(Edest) .= f.(parent(Edest), Aperm) - return unexpose(Edest) -end diff --git a/NDTensors/ext/NDTensorsMetalExt/set_types.jl b/NDTensors/ext/NDTensorsMetalExt/set_types.jl deleted file mode 100644 index 761fa089d4..0000000000 --- a/NDTensors/ext/NDTensorsMetalExt/set_types.jl +++ /dev/null @@ -1,13 +0,0 @@ -using Metal: Metal, MtlArray -# `TypeParameterAccessors.jl` definitions. - -using NDTensors.TypeParameterAccessors: TypeParameterAccessors, Position -using NDTensors.GPUArraysCoreExtensions: storagemode - -function TypeParameterAccessors.position(::Type{<:MtlArray}, ::typeof(storagemode)) - return Position(3) -end - -function TypeParameterAccessors.default_type_parameters(::Type{<:MtlArray}) - return (Float32, 1, Metal.DefaultStorageMode) -end diff --git a/NDTensors/ext/NDTensorsOctavianExt/NDTensorsOctavianExt.jl b/NDTensors/ext/NDTensorsOctavianExt/NDTensorsOctavianExt.jl deleted file mode 100644 index c25dd84870..0000000000 --- a/NDTensors/ext/NDTensorsOctavianExt/NDTensorsOctavianExt.jl +++ /dev/null @@ -1,9 +0,0 @@ -module NDTensorsOctavianExt - -using NDTensors -using Octavian - -include("import.jl") -include("octavian.jl") - -end diff --git a/NDTensors/ext/NDTensorsOctavianExt/import.jl b/NDTensors/ext/NDTensorsOctavianExt/import.jl deleted file mode 100644 index d133a69f86..0000000000 --- a/NDTensors/ext/NDTensorsOctavianExt/import.jl +++ /dev/null @@ -1 +0,0 @@ -import NDTensors: _gemm!, backend_octavian diff --git a/NDTensors/ext/NDTensorsOctavianExt/octavian.jl b/NDTensors/ext/NDTensorsOctavianExt/octavian.jl deleted file mode 100644 index 2cbd6d4a10..0000000000 --- a/NDTensors/ext/NDTensorsOctavianExt/octavian.jl +++ /dev/null @@ -1,18 +0,0 @@ -function NDTensors.backend_octavian() - return NDTensors.gemm_backend[] = :Octavian -end - -function _gemm!( - ::NDTensors.GemmBackend{:Octavian}, - tA, - tB, - alpha, - A::AbstractVecOrMat, - B::AbstractVecOrMat, - beta, - C::AbstractVecOrMat, -) - return Octavian.matmul!( - C, tA == 'T' ? transpose(A) : A, tB == 'T' ? transpose(B) : B, alpha, beta - ) -end diff --git a/NDTensors/ext/NDTensorsTBLISExt/NDTensorsTBLISExt.jl b/NDTensors/ext/NDTensorsTBLISExt/NDTensorsTBLISExt.jl deleted file mode 100644 index 7edc3c78d9..0000000000 --- a/NDTensors/ext/NDTensorsTBLISExt/NDTensorsTBLISExt.jl +++ /dev/null @@ -1,10 +0,0 @@ -module NDTensorsTBLISExt - -using NDTensors -using LinearAlgebra -using TBLIS - -import NDTensors.contract! - -include("contract.jl") -end diff --git a/NDTensors/ext/NDTensorsTBLISExt/contract.jl b/NDTensors/ext/NDTensorsTBLISExt/contract.jl deleted file mode 100644 index 4b2661ea6f..0000000000 --- a/NDTensors/ext/NDTensorsTBLISExt/contract.jl +++ /dev/null @@ -1,43 +0,0 @@ - -function contract!( - ::Val{:TBLIS}, - R::DenseTensor{ElT}, - labelsR, - T1::DenseTensor{ElT}, - labelsT1, - T2::DenseTensor{ElT}, - labelsT2, - α::ElT, - β::ElT, -) where {ElT<:LinearAlgebra.BlasReal} - # TBLIS Tensors - R_tblis = TBLIS.TTensor{ElT}(array(R), β) - T1_tblis = TBLIS.TTensor{ElT}(array(T1), α) - T2_tblis = TBLIS.TTensor{ElT}(array(T2)) - - function label_to_char(label) - # Start at 'a' - char_start = Char(96) - if label < 0 - # Start at 'z' - char_start = Char(123) - end - return char_start + label - end - - function labels_to_tblis(labels) - if isempty(labels) - return "" - end - str = prod(label_to_char.(labels)) - return str - end - - labelsT1_tblis = labels_to_tblis(labelsT1) - labelsT2_tblis = labels_to_tblis(labelsT2) - labelsR_tblis = labels_to_tblis(labelsR) - - TBLIS.mul!(R_tblis, T1_tblis, T2_tblis, labelsT1_tblis, labelsT2_tblis, labelsR_tblis) - - return R -end diff --git a/NDTensors/ext/NDTensorscuTENSORExt/NDTensorscuTENSORExt.jl b/NDTensors/ext/NDTensorscuTENSORExt/NDTensorscuTENSORExt.jl deleted file mode 100644 index 5c4ba0f06c..0000000000 --- a/NDTensors/ext/NDTensorscuTENSORExt/NDTensorscuTENSORExt.jl +++ /dev/null @@ -1,3 +0,0 @@ -module NDTensorscuTENSORExt -include("contract.jl") -end diff --git a/NDTensors/ext/NDTensorscuTENSORExt/contract.jl b/NDTensors/ext/NDTensorscuTENSORExt/contract.jl deleted file mode 100644 index 0e70969a31..0000000000 --- a/NDTensors/ext/NDTensorscuTENSORExt/contract.jl +++ /dev/null @@ -1,50 +0,0 @@ -using Base: ReshapedArray -using NDTensors: NDTensors, DenseTensor, array -using NDTensors.Expose: Exposed, expose, unexpose -using cuTENSOR: cuTENSOR, CuArray, CuTensor - -# Handle cases that can't be handled by `cuTENSOR.jl` -# right now. -function to_zero_offset_cuarray(a::CuArray) - return iszero(a.offset) ? a : copy(a) -end -function to_zero_offset_cuarray(a::ReshapedArray) - return copy(expose(a)) -end - -function NDTensors.contract!( - exposedR::Exposed{<:CuArray,<:DenseTensor}, - labelsR, - exposedT1::Exposed{<:CuArray,<:DenseTensor}, - labelsT1, - exposedT2::Exposed{<:CuArray,<:DenseTensor}, - labelsT2, - α::Number=one(Bool), - β::Number=zero(Bool), -) - R, T1, T2 = unexpose.((exposedR, exposedT1, exposedT2)) - zoffR = iszero(array(R).offset) - arrayR = zoffR ? array(R) : copy(array(R)) - arrayT1 = to_zero_offset_cuarray(array(T1)) - arrayT2 = to_zero_offset_cuarray(array(T2)) - # Promote to a common type. This is needed because as of - # cuTENSOR.jl v5.4.2, cuTENSOR contraction only performs - # limited sets of type promotions of inputs, see: - # https://github.com/JuliaGPU/CUDA.jl/blob/v5.4.2/lib/cutensor/src/types.jl#L11-L19 - elt = promote_type(eltype.((arrayR, arrayT1, arrayT2))...) - if elt !== eltype(arrayR) - return error( - "In cuTENSOR contraction, input tensors have element types `$(eltype(arrayT1))` and `$(eltype(arrayT2))` while the output has element type `$(eltype(arrayR))`.", - ) - end - arrayT1 = convert(CuArray{elt}, arrayT1) - arrayT2 = convert(CuArray{elt}, arrayT2) - cuR = CuTensor(arrayR, collect(labelsR)) - cuT1 = CuTensor(arrayT1, collect(labelsT1)) - cuT2 = CuTensor(arrayT2, collect(labelsT2)) - cuTENSOR.mul!(cuR, cuT1, cuT2, α, β) - if !zoffR - array(R) .= cuR.data - end - return R -end diff --git a/NDTensors/src/NDTensors.jl b/NDTensors/src/NDTensors.jl deleted file mode 100644 index 7e7cffb8dc..0000000000 --- a/NDTensors/src/NDTensors.jl +++ /dev/null @@ -1,237 +0,0 @@ -module NDTensors -##################################### -# Imports and exports -# -include("imports.jl") -include("exports.jl") - -##################################### -# General functionality -# -include("default_kwargs.jl") -include("aliasstyle.jl") -include("abstractarray/set_types.jl") -include("abstractarray/to_shape.jl") -include("abstractarray/iscu.jl") -include("abstractarray/similar.jl") -include("abstractarray/mul.jl") -include("abstractarray/permutedims.jl") -include("abstractarray/generic_array_constructors.jl") -include("array/permutedims.jl") -include("array/mul.jl") -include("tupletools.jl") -include("emptynumber.jl") -include("nodata.jl") -include("tensorstorage/tensorstorage.jl") -include("tensorstorage/set_types.jl") -include("tensorstorage/default_storage.jl") -include("tensorstorage/similar.jl") -include("tensor/tensor.jl") -include("dims.jl") -include("tensor/set_types.jl") -include("tensor/similar.jl") -include("adapt.jl") -include("tensoroperations/generic_tensor_operations.jl") -include("tensoroperations/contraction_logic.jl") -include("abstractarray/tensoralgebra/contract.jl") - -##################################### -# DenseTensor and DiagTensor -# -include("dense/dense.jl") -include("dense/densetensor.jl") -include("dense/tensoralgebra/contract.jl") -include("dense/linearalgebra/decompositions.jl") -include("dense/tensoralgebra/outer.jl") -include("dense/set_types.jl") -include("dense/generic_array_constructors.jl") -include("linearalgebra/symmetric.jl") -include("linearalgebra/linearalgebra.jl") -include("diag/diag.jl") -include("diag/set_types.jl") -include("diag/diagtensor.jl") -include("diag/similar.jl") -include("diag/tensoralgebra/contract.jl") -include("diag/tensoralgebra/outer.jl") -include("combiner/combiner.jl") -include("combiner/contract.jl") -include("truncate.jl") -include("linearalgebra/svd.jl") - -##################################### -# BlockSparseTensor -# -include("blocksparse/blockdims.jl") -include("blocksparse/block.jl") -include("blocksparse/blockoffsets.jl") -include("blocksparse/blocksparse.jl") -include("blocksparse/blocksparsetensor.jl") -include("blocksparse/fermions.jl") -include("blocksparse/contract.jl") -include("blocksparse/contract_utilities.jl") -include("blocksparse/contract_generic.jl") -include("blocksparse/contract_sequential.jl") -include("blocksparse/contract_folds.jl") -include("blocksparse/contract_threads.jl") -include("blocksparse/diagblocksparse.jl") -include("blocksparse/similar.jl") -include("blocksparse/combiner.jl") -include("blocksparse/linearalgebra.jl") - -##################################### -# Empty -# -include("empty/empty.jl") -include("empty/EmptyTensor.jl") -include("empty/tensoralgebra/contract.jl") -include("empty/adapt.jl") - -##################################### -# Deprecations -# -include("deprecated.jl") - -##################################### -# NDTensorsNamedDimsArraysExt -# I tried putting this inside of an -# `NDTensorsNamedDimsArraysExt` module -# but for some reason it kept overloading -# `Base.similar` instead of `NDTensors.similar`. -# -include("NDTensorsNamedDimsArraysExt/NDTensorsNamedDimsArraysExt.jl") - -##################################### -# A global timer used with TimerOutputs.jl -# - -const timer = TimerOutput() - -##################################### -# Optional block sparse multithreading -# - -blas_get_num_threads() = BLAS.get_num_threads() - -const _using_threaded_blocksparse = Ref(false) - -function enable_threaded_blocksparse_docstring(module_name) - return """ - $(module_name).enable_threaded_blocksparse() - $(module_name).disable_threaded_blocksparse() - - Enable or disable block sparse multithreading. - - Returns the current state of `$(module_name).using_threaded_blocksparse()`, i.e. `true` if threaded block sparse was previously enabled, and `false` if threaded block sparse was previously disabled. This is helpful for turning block sparse threading on or off temporarily. For example: - ```julia - using_threaded_blocksparse = $(module_name).enable_threaded_blocksparse() - # Run code that you want to be threaded - if !using_threaded_blocksparse - $(module_name).disable_threaded_blocksparse() - end - ``` - - Note that you need to start Julia with multiple threads. For example, to start Julia with 4 threads, you can use any of the following: - ``` - \$ julia --threads=4 - - \$ julia -t 4 - - \$ JULIA_NUM_THREADS=4 julia - ``` - - In addition, we have found that it is best to disable `BLAS` and `Strided` multithreading when using block sparse multithreading. You can do that with the commands `using LinearAlgebra; BLAS.set_num_threads(1)` and `$(module_name).Strided.disable_threads()`. - - See also: `$(module_name).enable_threaded_blocksparse`, `$(module_name).disable_threaded_blocksparse`, `$(module_name).using_threaded_blocksparse`. - """ -end - -function _enable_threaded_blocksparse() - current_using_threaded_blocksparse = using_threaded_blocksparse() - if !current_using_threaded_blocksparse - if Threads.nthreads() == 1 - println( - "WARNING: You are trying to enable block sparse multithreading, but you have started Julia with only a single thread. You can start Julia with `N` threads with `julia -t N`, and check the number of threads Julia can use with `Threads.nthreads()`. Your system has $(Sys.CPU_THREADS) threads available to use, which you can determine by running `Sys.CPU_THREADS`.\n", - ) - end - if BLAS.get_num_threads() > 1 && Threads.nthreads() > 1 - println( - "WARNING: You are enabling block sparse multithreading, but your BLAS configuration $(BLAS.get_config()) is currently set to use $(BLAS.get_num_threads()) threads. When using block sparse multithreading, we recommend setting BLAS to use only a single thread, otherwise you may see suboptimal performance. You can set it with `using LinearAlgebra; BLAS.set_num_threads(1)`.\n", - ) - end - if Strided.get_num_threads() > 1 - println( - "WARNING: You are enabling block sparse multithreading, but Strided.jl is currently set to use $(Strided.get_num_threads()) threads for performing dense tensor permutations. When using block sparse multithreading, we recommend setting Strided.jl to use only a single thread, otherwise you may see suboptimal performance. You can set it with `NDTensors.Strided.disable_threads()` and see the current number of threads it is using with `NDTensors.Strided.get_num_threads()`.\n", - ) - end - _using_threaded_blocksparse[] = true - end - return current_using_threaded_blocksparse -end - -function _disable_threaded_blocksparse() - current_using_threaded_blocksparse = using_threaded_blocksparse() - if current_using_threaded_blocksparse - _using_threaded_blocksparse[] = false - end - return current_using_threaded_blocksparse -end - -""" -$(enable_threaded_blocksparse_docstring(@__MODULE__)) -""" -using_threaded_blocksparse() = _using_threaded_blocksparse[] - -""" -$(enable_threaded_blocksparse_docstring(@__MODULE__)) -""" -enable_threaded_blocksparse() = _enable_threaded_blocksparse() - -""" -$(enable_threaded_blocksparse_docstring(@__MODULE__)) -""" -disable_threaded_blocksparse() = _disable_threaded_blocksparse() - -##################################### -# Optional auto fermion system -# - -const _using_auto_fermion = Ref(false) - -using_auto_fermion() = _using_auto_fermion[] - -function enable_auto_fermion() - _using_auto_fermion[] = true - return nothing -end - -function disable_auto_fermion() - _using_auto_fermion[] = false - return nothing -end - -##################################### -# Optional backends -# - -const _using_tblis = Ref(false) - -using_tblis() = _using_tblis[] - -function enable_tblis() - _using_tblis[] = true - return nothing -end - -function disable_tblis() - _using_tblis[] = false - return nothing -end - -function backend_octavian end - -using PackageExtensionCompat -function __init__() - @require_extensions -end - -end # module NDTensors diff --git a/NDTensors/src/NDTensorsNamedDimsArraysExt/NDTensorsNamedDimsArraysExt.jl b/NDTensors/src/NDTensorsNamedDimsArraysExt/NDTensorsNamedDimsArraysExt.jl deleted file mode 100644 index 2f389fc13a..0000000000 --- a/NDTensors/src/NDTensorsNamedDimsArraysExt/NDTensorsNamedDimsArraysExt.jl +++ /dev/null @@ -1,6 +0,0 @@ -# I tried putting this inside of an -# `NDTensorsNamedDimsArraysExt` module -# but for some reason it kept overloading -# `Base.similar` instead of `NDTensors.similar`. -include("similar.jl") -include("fill.jl") diff --git a/NDTensors/src/NDTensorsNamedDimsArraysExt/fill.jl b/NDTensors/src/NDTensorsNamedDimsArraysExt/fill.jl deleted file mode 100644 index cc1b70e056..0000000000 --- a/NDTensors/src/NDTensorsNamedDimsArraysExt/fill.jl +++ /dev/null @@ -1 +0,0 @@ -fill!!(a::NamedDimsArrays.NamedDimsArray, α) = fill!(a, α) diff --git a/NDTensors/src/NDTensorsNamedDimsArraysExt/similar.jl b/NDTensors/src/NDTensorsNamedDimsArraysExt/similar.jl deleted file mode 100644 index dfaa29b5db..0000000000 --- a/NDTensors/src/NDTensorsNamedDimsArraysExt/similar.jl +++ /dev/null @@ -1,5 +0,0 @@ -# I tried putting this inside the `NamedDimsArrays` module -# but for some reason it kept overloading `Base.similar`. -# NDTensors.similar -similar(a::NamedDimsArrays.AbstractNamedDimsArray) = Base.similar(a) -similar(a::NamedDimsArrays.AbstractNamedDimsArray, elt::Type) = Base.similar(a, elt) diff --git a/NDTensors/src/abstractarray/generic_array_constructors.jl b/NDTensors/src/abstractarray/generic_array_constructors.jl deleted file mode 100644 index 40f34dd6b8..0000000000 --- a/NDTensors/src/abstractarray/generic_array_constructors.jl +++ /dev/null @@ -1,41 +0,0 @@ -using .TypeParameterAccessors: - unwrap_array_type, specify_default_type_parameters, type_parameter - -# Convert to Array, avoiding copying if possible -array(a::AbstractArray) = a -matrix(a::AbstractMatrix) = a -vector(a::AbstractVector) = a - -## Warning to use these functions it is necessary to define `TypeParameterAccessors.position(::Type{<:YourArrayType}, ::typeof(ndims)))` -# Implementation, catches if `ndims(arraytype) != length(dims)`. -## TODO convert ndims to `type_parameter(::, typeof(ndims))` -function generic_randn(arraytype::Type{<:AbstractArray}, dims...; rng=Random.default_rng()) - arraytype_specified = specify_type_parameter( - unwrap_array_type(arraytype), ndims, length(dims) - ) - arraytype_specified = specify_default_type_parameters(arraytype_specified) - @assert length(dims) == ndims(arraytype_specified) - data = similar(arraytype_specified, dims...) - return randn!(rng, data) -end - -function generic_randn( - arraytype::Type{<:AbstractArray}, dims::Tuple; rng=Random.default_rng() -) - return generic_randn(arraytype, dims...; rng) -end - -# Implementation, catches if `ndims(arraytype) != length(dims)`. -function generic_zeros(arraytype::Type{<:AbstractArray}, dims...) - arraytype_specified = specify_type_parameter( - unwrap_array_type(arraytype), ndims, length(dims) - ) - arraytype_specified = specify_default_type_parameters(arraytype_specified) - @assert length(dims) == ndims(arraytype_specified) - ElT = eltype(arraytype_specified) - return fill!(similar(arraytype_specified, dims...), zero(ElT)) -end - -function generic_zeros(arraytype::Type{<:AbstractArray}, dims::Tuple) - return generic_zeros(arraytype, dims...) -end diff --git a/NDTensors/src/abstractarray/iscu.jl b/NDTensors/src/abstractarray/iscu.jl deleted file mode 100644 index 6bbe96a0ad..0000000000 --- a/NDTensors/src/abstractarray/iscu.jl +++ /dev/null @@ -1,7 +0,0 @@ -using .TypeParameterAccessors: unwrap_array_type -# TODO: Make `isgpu`, `ismtl`, etc. -# For `isgpu`, will require a `NDTensorsGPUArrayCoreExt`. -iscu(A::AbstractArray) = iscu(typeof(A)) -function iscu(A::Type{<:AbstractArray}) - return (unwrap_array_type(A) == A ? false : iscu(unwrap_array_type(A))) -end diff --git a/NDTensors/src/abstractarray/mul.jl b/NDTensors/src/abstractarray/mul.jl deleted file mode 100644 index ff5841f189..0000000000 --- a/NDTensors/src/abstractarray/mul.jl +++ /dev/null @@ -1,12 +0,0 @@ -function mul!!(CM::AbstractArray, AM::AbstractArray, BM::AbstractArray, α, β) - CM = mul!(expose(CM), expose(AM), expose(BM), α, β) - return CM -end - -## TODO There is an issue in CUDA.jl -## When all are transpose CUDA.mul! isn't being -## Called correctly in `NDTensorsCUDAExt` -function mul!!(CM::Transpose, AM::Transpose, BM::Transpose, α, β) - CM = mul!!(parent(CM), parent(BM), parent(AM), α, β) - return CM -end diff --git a/NDTensors/src/abstractarray/permutedims.jl b/NDTensors/src/abstractarray/permutedims.jl deleted file mode 100644 index 7fb99847fd..0000000000 --- a/NDTensors/src/abstractarray/permutedims.jl +++ /dev/null @@ -1,9 +0,0 @@ -function permutedims!!(B::AbstractArray, A::AbstractArray, perm) - permutedims!(expose(B), expose(A), perm) - return B -end - -function permutedims!!(B::AbstractArray, A::AbstractArray, perm, f) - permutedims!(expose(B), expose(A), perm, f) - return B -end diff --git a/NDTensors/src/abstractarray/set_types.jl b/NDTensors/src/abstractarray/set_types.jl deleted file mode 100644 index 823c2fa11e..0000000000 --- a/NDTensors/src/abstractarray/set_types.jl +++ /dev/null @@ -1,17 +0,0 @@ -using .TypeParameterAccessors: TypeParameterAccessors - -""" -# Do we still want to define things like this? -TODO: Use `Accessors.jl` notation: -```julia -@set eltype(arraytype) = eltype -``` -""" -# This is for uniform `Diag` storage which uses -# a Number as the data type. -# TODO: Delete this when we change to using a -# `FillArray` instead. This is a stand-in -# to make things work with the current design. -function TypeParameterAccessors.set_ndims(numbertype::Type{<:Number}, ndims) - return numbertype -end diff --git a/NDTensors/src/abstractarray/similar.jl b/NDTensors/src/abstractarray/similar.jl deleted file mode 100644 index 440b1b86b8..0000000000 --- a/NDTensors/src/abstractarray/similar.jl +++ /dev/null @@ -1,99 +0,0 @@ -using Base: DimOrInd, Dims, OneTo -using .TypeParameterAccessors: IsWrappedArray, unwrap_array_type, set_eltype, similartype - -## Custom `NDTensors.similar` implementation. -## More extensive than `Base.similar`. - -# This function actually allocates the data. -# NDTensors.similar -function similar(arraytype::Type{<:AbstractArray}, dims::Tuple) - shape = NDTensors.to_shape(arraytype, dims) - return similartype(arraytype, shape)(undef, NDTensors.to_shape(arraytype, shape)) -end - -# This function actually allocates the data. -# Catches conversions of dimensions specified by ranges -# dimensions specified by integers with `Base.to_shape`. -# NDTensors.similar -function similar(arraytype::Type{<:AbstractArray}, dims::Dims) - return similartype(arraytype, dims)(undef, dims) -end - -# NDTensors.similar -function similar(arraytype::Type{<:AbstractArray}, dims::DimOrInd...) - return similar(arraytype, NDTensors.to_shape(dims)) -end - -# Handles range inputs, `Base.to_shape` converts them to integer dimensions. -# See Julia's `base/abstractarray.jl`. -# NDTensors.similar -function similar( - arraytype::Type{<:AbstractArray}, - shape::Tuple{Union{Integer,OneTo},Vararg{Union{Integer,OneTo}}}, -) - return NDTensors.similar(arraytype, NDTensors.to_shape(shape)) -end - -# NDTensors.similar -function similar(arraytype::Type{<:AbstractArray}, eltype::Type, dims::Tuple) - return NDTensors.similar(similartype(arraytype, eltype, dims), dims) -end - -# TODO: Add an input `structure` which can store things like the nonzero -# structure of a sparse/block sparse tensor. -# NDTensors.similar -# function similar(arraytype::Type{<:AbstractArray}, structure) -# return NDTensors.similar(similartype(arraytype, structure), structure) -# end - -# TODO: Add an input `structure` which can store things like the nonzero -# structure of a sparse/block sparse tensor. -# NDTensors.similar -# function similar(arraytype::Type{<:AbstractArray}, eltype::Type, structure) -# return NDTensors.similar(similartype(arraytype, eltype, structure), structure) -# end - -# TODO: Add an input `structure` which can store things like the nonzero -# structure of a sparse/block sparse tensor. -# NDTensors.similar -# function similar(arraytype::Type{<:AbstractArray}, structure, dims::Tuple) -# return NDTensors.similar(similartype(arraytype, structure, dims), structure, dims) -# end - -# TODO: Add an input `structure` which can store things like the nonzero -# structure of a sparse/block sparse tensor. -# NDTensors.similar -# function similar(arraytype::Type{<:AbstractArray}, eltype::Type, structure, dims::Tuple) -# return NDTensors.similar(similartype(arraytype, eltype, structure, dims), structure, dims) -# end - -# TODO: Maybe makes an empty array, i.e. `similartype(arraytype, eltype)()`? -# NDTensors.similar -function similar(arraytype::Type{<:AbstractArray}, eltype::Type) - return error("Must specify dimensions.") -end - -## NDTensors.similar for instances - -# NDTensors.similar -function similar(array::AbstractArray, eltype::Type, dims::Tuple) - return NDTensors.similar(similartype(typeof(array), eltype), dims) -end - -# NDTensors.similar -function similar(array::AbstractArray, eltype::Type, dims::Int) - return NDTensors.similar(similartype(typeof(array), eltype), dims) -end - -# NDTensors.similar -similar(array::AbstractArray, dims::Tuple) = NDTensors.similar(typeof(array), dims) - -# Use the `size` to determine the dimensions -# NDTensors.similar -function similar(array::AbstractArray, eltype::Type) - return NDTensors.similar(typeof(array), eltype, size(array)) -end - -# Use the `size` to determine the dimensions -# NDTensors.similar -similar(array::AbstractArray) = NDTensors.similar(typeof(array), size(array)) diff --git a/NDTensors/src/abstractarray/tensoralgebra/contract.jl b/NDTensors/src/abstractarray/tensoralgebra/contract.jl deleted file mode 100644 index 160373832a..0000000000 --- a/NDTensors/src/abstractarray/tensoralgebra/contract.jl +++ /dev/null @@ -1,185 +0,0 @@ -using LinearAlgebra: BlasFloat -using .Expose: expose - -# TODO: Delete these exports -export backend_auto, backend_blas, backend_generic - -@eval struct GemmBackend{T} - (f::Type{<:GemmBackend})() = $(Expr(:new, :f)) -end -GemmBackend(s) = GemmBackend{Symbol(s)}() -macro GemmBackend_str(s) - return :(GemmBackend{$(Expr(:quote, Symbol(s)))}) -end - -const gemm_backend = Ref(:Auto) -function backend_auto() - return gemm_backend[] = :Auto -end -function backend_blas() - return gemm_backend[] = :BLAS -end -function backend_generic() - return gemm_backend[] = :Generic -end - -@inline function auto_select_backend( - ::Type{<:StridedVecOrMat{<:BlasFloat}}, - ::Type{<:StridedVecOrMat{<:BlasFloat}}, - ::Type{<:StridedVecOrMat{<:BlasFloat}}, -) - return GemmBackend(:BLAS) -end - -@inline function auto_select_backend( - ::Type{<:AbstractVecOrMat}, ::Type{<:AbstractVecOrMat}, ::Type{<:AbstractVecOrMat} -) - return GemmBackend(:Generic) -end - -function _gemm!( - tA, tB, alpha, A::TA, B::TB, beta, C::TC -) where {TA<:AbstractVecOrMat,TB<:AbstractVecOrMat,TC<:AbstractVecOrMat} - if gemm_backend[] == :Auto - _gemm!(auto_select_backend(TA, TB, TC), tA, tB, alpha, A, B, beta, C) - else - _gemm!(GemmBackend(gemm_backend[]), tA, tB, alpha, A, B, beta, C) - end -end - -# BLAS matmul -function _gemm!( - ::GemmBackend{:BLAS}, - tA, - tB, - alpha, - A::AbstractVecOrMat, - B::AbstractVecOrMat, - beta, - C::AbstractVecOrMat, -) - #@timeit_debug timer "BLAS.gemm!" begin - return BLAS.gemm!(tA, tB, alpha, A, B, beta, C) - #end # @timeit -end - -# generic matmul -function _gemm!( - ::GemmBackend{:Generic}, - tA, - tB, - alpha::AT, - A::AbstractVecOrMat, - B::AbstractVecOrMat, - beta::BT, - C::AbstractVecOrMat, -) where {AT,BT} - mul!( - expose(C), - expose(tA == 'T' ? transpose(A) : A), - expose(tB == 'T' ? transpose(B) : B), - alpha, - beta, - ) - return C -end - -# Non-trivial permutation -function _contract_scalar_perm!( - Rᵃ::AbstractArray{ElR}, Tᵃ::AbstractArray, perm, α, β=zero(ElR) -) where {ElR} - if iszero(β) - if iszero(α) - fill!(Rᵃ, 0) - else - Rᵃ = permutedims!!(Rᵃ, Tᵃ, perm, (r, t) -> α * t) - end - elseif isone(β) - if iszero(α) - # Rᵃ .= Rᵃ - # No-op - else - Rᵃ = permutedims!!(Rᵃ, Tᵃ, perm, (r, t) -> r + α * t) - end - else - if iszero(α) - # Rᵃ .= β .* Rᵃ - LinearAlgebra.scal!(length(Rᵃ), β, Rᵃ, 1) - else - Rᵃ .= α .* permutedims(expose(Tᵃ), perm) .+ β .* Rᵃ - end - end - return Rᵃ -end - -function _contract!( - CT::AbstractArray{El,NC}, - AT::AbstractArray{El,NA}, - BT::AbstractArray{El,NB}, - props::ContractionProperties, - α::Number=one(El), - β::Number=zero(El), -) where {El,NC,NA,NB} - tA = 'N' - if props.permuteA - #@timeit_debug timer "_contract!: permutedims A" begin - Ap = permutedims(expose(AT), props.PA) - #end # @timeit - AM = transpose(reshape(Ap, (props.dmid, props.dleft))) - else - #A doesn't have to be permuted - if Atrans(props) - AM = transpose(reshape(AT, (props.dmid, props.dleft))) - else - AM = reshape(AT, (props.dleft, props.dmid)) - end - end - - tB = 'N' - if props.permuteB - #@timeit_debug timer "_contract!: permutedims B" begin - Bp = permutedims(expose(BT), props.PB) - #end # @timeit - BM = reshape(Bp, (props.dmid, props.dright)) - else - if Btrans(props) - BM = transpose(reshape(BT, (props.dright, props.dmid))) - else - BM = reshape(BT, (props.dmid, props.dright)) - end - end - - # TODO: this logic may be wrong - if props.permuteC - # if we are computing C = α * A B + β * C - # we need to make sure C is permuted to the same - # ordering as A B which is the inverse of props.PC - if β ≠ 0 - CM = reshape(permutedims(expose(CT), invperm(props.PC)), (props.dleft, props.dright)) - else - # Need to copy here since we will be permuting - # into C later - CM = reshape(copy(CT), (props.dleft, props.dright)) - end - else - if Ctrans(props) - CM = transpose(reshape(CT, (props.dright, props.dleft))) - else - CM = reshape(CT, (props.dleft, props.dright)) - end - end - - #tC = similar(CM) - #_gemm!(tA, tB, El(α), AM, BM, El(β), CM) - CM = mul!!(CM, AM, BM, El(α), El(β)) - - if props.permuteC - Cr = reshape(CM, props.newCrange) - # TODO: use invperm(pC) here? - #@timeit_debug timer "_contract!: permutedims C" begin - CT .= permutedims(expose(Cr), props.PC) - #end # @timeit - end - - return CT -end diff --git a/NDTensors/src/abstractarray/to_shape.jl b/NDTensors/src/abstractarray/to_shape.jl deleted file mode 100644 index fc3f4a0edd..0000000000 --- a/NDTensors/src/abstractarray/to_shape.jl +++ /dev/null @@ -1,15 +0,0 @@ -# Like `Base.to_shape` but more general, can return -# `Index`, etc. Customize for an array/tensor -# with custom index types. -# NDTensors.to_shape -function to_shape(arraytype::Type{<:AbstractArray}, dims::Tuple) - return NDTensors.to_shape(dims) -end -# NDTensors.to_shape -to_shape(dims) = Base.to_shape(dims) - -# NDTensors.to_shape overoads for block dimensions. -to_shape(dims::Tuple{Vararg{Vector{<:Integer}}}) = map(to_shape, dims) -# Each dimension. -# NDTensors.to_shape -to_shape(i::Vector{<:Integer}) = sum(to_shape, i) diff --git a/NDTensors/src/adapt.jl b/NDTensors/src/adapt.jl deleted file mode 100644 index df5770224f..0000000000 --- a/NDTensors/src/adapt.jl +++ /dev/null @@ -1,37 +0,0 @@ -using .GPUArraysCoreExtensions: GPUArraysCoreExtensions -adapt_structure(to, x::TensorStorage) = setdata(x, adapt(to, data(x))) -adapt_structure(to, x::Tensor) = setstorage(x, adapt(to, storage(x))) - -function GPUArraysCoreExtensions.cpu(eltype::Type{<:Number}, x) - return fmap(x -> adapt(Array{eltype}, x), x) -end -GPUArraysCoreExtensions.cpu(x) = fmap(x -> adapt(Array, x), x) - -adapt_structure(to::Type{<:Number}, x::TensorStorage) = setdata(x, convert.(to, data(x))) - -convert_scalartype(eltype::Type{<:Number}, x) = fmap(x -> adapt(eltype, x), x) - -single_precision(::Type{Float32}) = Float32 -single_precision(::Type{Float64}) = Float32 -single_precision(eltype::Type{<:Complex}) = Complex{single_precision(real(eltype))} - -single_precision(x) = fmap(x -> adapt(single_precision(eltype(x)), x), x) - -double_precision(::Type{Float32}) = Float64 -double_precision(::Type{Float64}) = Float64 -double_precision(eltype::Type{<:Complex}) = Complex{double_precision(real(eltype))} - -double_precision(x) = fmap(x -> adapt(double_precision(eltype(x)), x), x) - -# -# Used to adapt `EmptyStorage` types -# - -using .TypeParameterAccessors: specify_type_parameter, specify_type_parameters -function adapt_storagetype(to::Type{<:AbstractVector}, x::Type{<:TensorStorage}) - return set_datatype(x, specify_type_parameter(to, eltype, eltype(x))) -end - -function adapt_storagetype(to::Type{<:AbstractArray}, x::Type{<:TensorStorage}) - return set_datatype(x, specify_type_parameter(to, (ndims, eltype), (1, eltype(x)))) -end diff --git a/NDTensors/src/aliasstyle.jl b/NDTensors/src/aliasstyle.jl deleted file mode 100644 index 08225d9531..0000000000 --- a/NDTensors/src/aliasstyle.jl +++ /dev/null @@ -1,34 +0,0 @@ -""" - AliasStyle - -A trait that determines the aliasing behavior of a constructor or function, -for example whether or not a function or constructor might return an alias -of one of the inputs (i.e. the output shares memory with one of the inputs, -such that modifying the output also modifies the input or vice versa). - -See also [`AllowAlias`](@ref) and [`NeverAlias`](@ref). -""" -abstract type AliasStyle end - -""" - AllowAlias - -Singleton type used in a constructor or function indicating -that the constructor or function may return an alias of the input data when -possible, i.e. the output may share data with the input. For a constructor -`T(AllowAlias(), args...)`, this would act like `Base.convert(T, args...)`. - -See also [`AliasStyle`](@ref) and [`NeverAlias`](@ref). -""" -struct AllowAlias <: AliasStyle end - -""" - NeverAlias - -Singleton type used in a constructor or function indicating -that the constructor or function will never return an alias of the input data, -i.e. the output will never share data with one of the inputs. - -See also [`AliasStyle`](@ref) and [`AllowAlias`](@ref). -""" -struct NeverAlias <: AliasStyle end diff --git a/NDTensors/src/array/mul.jl b/NDTensors/src/array/mul.jl deleted file mode 100644 index d66c38f4fc..0000000000 --- a/NDTensors/src/array/mul.jl +++ /dev/null @@ -1,4 +0,0 @@ -function mul!(CM::Exposed{<:Array}, AM::Exposed{<:Array}, BM::Exposed{<:Array}, α, β) - @strided mul!(unexpose(CM), unexpose(AM), unexpose(BM), α, β) - return unexpose(CM) -end diff --git a/NDTensors/src/array/permutedims.jl b/NDTensors/src/array/permutedims.jl deleted file mode 100644 index 1a2875d9da..0000000000 --- a/NDTensors/src/array/permutedims.jl +++ /dev/null @@ -1,24 +0,0 @@ -using .Expose: Exposed, unexpose - -# TODO: Move to `Expose` module. -# Create the Exposed version of Base.permutedims -function permutedims(E::Exposed{<:Array}, perm) - ## Creating Mperm here to evaluate the permutation and - ## avoid returning a Stridedview - @strided Mperm = permutedims(unexpose(E), perm) - return Mperm -end - -function permutedims!(Edest::Exposed{<:Array}, Esrc::Exposed{<:Array}, perm) - a_dest = unexpose(Edest) - a_src = unexpose(Esrc) - @strided a_dest .= permutedims(a_src, perm) - return a_dest -end - -function permutedims!(Edest::Exposed{<:Array}, Esrc::Exposed{<:Array}, perm, f) - a_dest = unexpose(Edest) - a_src = unexpose(Esrc) - @strided a_dest .= f.(a_dest, permutedims(a_src, perm)) - return a_dest -end diff --git a/NDTensors/src/backup/arraystorage/arraystorage/storage/arraystorage.jl b/NDTensors/src/backup/arraystorage/arraystorage/storage/arraystorage.jl deleted file mode 100644 index 60a48bda3e..0000000000 --- a/NDTensors/src/backup/arraystorage/arraystorage/storage/arraystorage.jl +++ /dev/null @@ -1,73 +0,0 @@ -using .BlockSparseArrays: BlockSparseArray -using .DiagonalArrays: DiagonalArray - -# Used for dispatch to distinguish from Tensors wrapping TensorStorage. -# Remove once TensorStorage is removed. -const ArrayStorage{T,N} = Union{ - Array{T,N}, - ReshapedArray{T,N}, - SubArray{T,N}, - PermutedDimsArray{T,N}, - StridedView{T,N}, - DiagonalArray{T,N}, - BlockSparseArray{T,N}, - CombinerArray{N}, -} - -const MatrixStorage{T} = Union{ - ArrayStorage{T,2}, - Transpose{T}, - Adjoint{T}, - Symmetric{T}, - Hermitian{T}, - UpperTriangular{T}, - LowerTriangular{T}, - UnitUpperTriangular{T}, - UnitLowerTriangular{T}, - Diagonal{T}, -} - -const MatrixOrArrayStorage{T} = Union{MatrixStorage{T},ArrayStorage{T}} - -# TODO: Delete this, it is a hack to decide -# if an Index is blocked. -function is_blocked_ind(i) - return try - blockdim(i, 1) - true - catch - false - end -end - -# TODO: Delete once `TensorStorage` is removed. -function to_axes(inds::Tuple) - if any(is_blocked_ind, inds) - return BlockArrays.blockedrange.(map(i -> [blockdim(i, b) for b in 1:nblocks(i)], inds)) - else - return Base.OneTo.(dim.(inds)) - end -end - -# TODO: Delete once `Dense` is removed. -function to_arraystorage(x::DenseTensor) - return tensor(reshape(data(x), size(x)), inds(x)) -end - -# TODO: Delete once `Diag` is removed. -function to_arraystorage(x::DiagTensor) - return tensor(DiagonalArray(data(x), size(x)), inds(x)) -end - -# TODO: Delete once `BlockSparse` is removed. -function to_arraystorage(x::BlockSparseTensor) - blockinds = map(i -> [blockdim(i, b) for b in 1:nblocks(i)], inds(x)) - blocktype = set_ndims(datatype(x), ndims(x)) - # TODO: Make a simpler constructor: - # BlockSparseArray(blocktype, blockinds) - arraystorage = BlockSparseArray{eltype(x),ndims(x),blocktype}(blockinds) - for b in nzblocks(x) - arraystorage[BlockArrays.Block(Tuple(b)...)] = x[b] - end - return tensor(arraystorage, inds(x)) -end diff --git a/NDTensors/src/backup/arraystorage/arraystorage/storage/conj.jl b/NDTensors/src/backup/arraystorage/arraystorage/storage/conj.jl deleted file mode 100644 index 51c6a39985..0000000000 --- a/NDTensors/src/backup/arraystorage/arraystorage/storage/conj.jl +++ /dev/null @@ -1,2 +0,0 @@ -conj(as::AliasStyle, A::AbstractArray) = conj(A) -conj(as::AllowAlias, A::Array{<:Real}) = A diff --git a/NDTensors/src/backup/arraystorage/arraystorage/storage/contract.jl b/NDTensors/src/backup/arraystorage/arraystorage/storage/contract.jl deleted file mode 100644 index 39c9696b48..0000000000 --- a/NDTensors/src/backup/arraystorage/arraystorage/storage/contract.jl +++ /dev/null @@ -1,61 +0,0 @@ -# Generic AbstractArray code -function contract( - array1::MatrixOrArrayStorage, - labels1, - array2::MatrixOrArrayStorage, - labels2, - labelsR=contract_labels(labels1, labels2); - kwargs..., -) - output_array = contraction_output(array1, labels1, array2, labels2, labelsR) - contract!(output_array, labelsR, array1, labels1, array2, labels2; kwargs...) - return output_array -end - -function contraction_output( - array1::MatrixOrArrayStorage, array2::MatrixOrArrayStorage, indsR -) - arraytypeR = contraction_output_type(typeof(array1), typeof(array2), indsR) - return NDTensors.similar(arraytypeR, indsR) -end - -function contraction_output_type( - arraytype1::Type{<:MatrixOrArrayStorage}, arraytype2::Type{<:MatrixOrArrayStorage}, inds -) - return similartype(promote_type(arraytype1, arraytype2), inds) -end - -function contraction_output( - array1::MatrixOrArrayStorage, - labelsarray1, - array2::MatrixOrArrayStorage, - labelsarray2, - labelsoutput_array, -) - # TODO: Maybe use `axes` here to be more generic, for example for BlockArrays? - indsoutput_array = contract_inds( - size(array1), labelsarray1, size(array2), labelsarray2, labelsoutput_array - ) - output_array = contraction_output(array1, array2, indsoutput_array) - return output_array -end - -# Required interface for specific AbstractArray types -# TODO: Define `default_α` and `default_β`. -# TODO: Define this as a `ttgt` or `matricize` backend. -function contract!( - array_dest::MatrixOrArrayStorage, - labels_dest, - array1::MatrixOrArrayStorage, - labels1, - array2::MatrixOrArrayStorage, - labels2, - α=one(eltype(array_dest)), - β=zero(eltype(array_dest)); -) - props = ContractionProperties(labels1, labels2, labels_dest) - compute_contraction_properties!(props, array1, array2, array_dest) - # TODO: Change this to just `contract!`, or maybe `contract_ttgt!`? - _contract!(array_dest, array1, array2, props, α, β) - return array_dest -end diff --git a/NDTensors/src/backup/arraystorage/arraystorage/storage/permutedims.jl b/NDTensors/src/backup/arraystorage/arraystorage/storage/permutedims.jl deleted file mode 100644 index e44f502f9f..0000000000 --- a/NDTensors/src/backup/arraystorage/arraystorage/storage/permutedims.jl +++ /dev/null @@ -1,6 +0,0 @@ -function permutedims!( - output_array::MatrixOrArrayStorage, array::MatrixOrArrayStorage, perm, f::Function -) - output_array = permutedims!!(output_array, array, perm, f) - return output_array -end diff --git a/NDTensors/src/backup/arraystorage/arraystorage/tensor/arraystorage.jl b/NDTensors/src/backup/arraystorage/arraystorage/tensor/arraystorage.jl deleted file mode 100644 index d04fe8d65c..0000000000 --- a/NDTensors/src/backup/arraystorage/arraystorage/tensor/arraystorage.jl +++ /dev/null @@ -1,26 +0,0 @@ -const ArrayStorageTensor{T,N,S,I} = Tensor{T,N,S,I} where {S<:ArrayStorage{T,N}} -const MatrixStorageTensor{T,S,I} = Tensor{T,2,S,I} where {S<:MatrixStorage{T}} -const MatrixOrArrayStorageTensor{T,S,I} = - Tensor{T,N,S,I} where {N,S<:MatrixOrArrayStorage{T}} - -function Tensor(storage::MatrixOrArrayStorageTensor, inds::Tuple) - return Tensor(NeverAlias(), storage, inds) -end - -function Tensor(as::AliasStyle, storage::MatrixOrArrayStorage, inds::Tuple) - return Tensor{eltype(storage),length(inds),typeof(storage),typeof(inds)}( - as, storage, inds - ) -end - -array(tensor::MatrixOrArrayStorageTensor) = storage(tensor) - -# Linear algebra (matrix algebra) -function Base.adjoint(tens::MatrixStorageTensor) - return tensor(adjoint(storage(tens)), reverse(inds(tens))) -end - -# Linear algebra (matrix algebra) -function LinearAlgebra.Hermitian(tens::MatrixStorageTensor) - return tensor(Hermitian(storage(tens)), inds(tens)) -end diff --git a/NDTensors/src/backup/arraystorage/arraystorage/tensor/contract.jl b/NDTensors/src/backup/arraystorage/arraystorage/tensor/contract.jl deleted file mode 100644 index 309893bc31..0000000000 --- a/NDTensors/src/backup/arraystorage/arraystorage/tensor/contract.jl +++ /dev/null @@ -1,31 +0,0 @@ -# TODO: Just call `contraction_output(storage(tensor1), storage(tensor2), indsR)` -function contraction_output( - tensor1::MatrixOrArrayStorageTensor, tensor2::MatrixOrArrayStorageTensor, indsR -) - tensortypeR = contraction_output_type(typeof(tensor1), typeof(tensor2), indsR) - return NDTensors.similar(tensortypeR, indsR) -end - -# TODO: Define `default_α` and `default_β`. -function contract!( - tensor_dest::MatrixOrArrayStorageTensor, - labels_dest, - tensor1::MatrixOrArrayStorageTensor, - labels1, - tensor2::MatrixOrArrayStorageTensor, - labels2, - α=one(eltype(tensor_dest)), - β=zero(eltype(tensor_dest)); -) - contract!( - storage(tensor_dest), - labels_dest, - storage(tensor1), - labels1, - storage(tensor2), - labels2, - α, - β, - ) - return tensor_dest -end diff --git a/NDTensors/src/backup/arraystorage/arraystorage/tensor/eigen.jl b/NDTensors/src/backup/arraystorage/arraystorage/tensor/eigen.jl deleted file mode 100644 index 992c005d13..0000000000 --- a/NDTensors/src/backup/arraystorage/arraystorage/tensor/eigen.jl +++ /dev/null @@ -1,67 +0,0 @@ -# TODO: Rewrite this function to be more modern: -# 1. List keyword arguments in function signature. -# 2. Output `Spectrum` as a keyword argument that gets overwritten. -# 3. Make this into two layers, one that handles indices and one that works with `AbstractMatrix`. -function LinearAlgebra.eigen( - T::MatrixOrArrayStorageTensor; - maxdim=nothing, - mindim=nothing, - cutoff=nothing, - use_absolute_cutoff=nothing, - use_relative_cutoff=nothing, - # These are getting passed erroneously. - # TODO: Make sure they don't get passed down - # to here. - which_decomp=nothing, - tags=nothing, - eigen_perturbation=nothing, - normalize=nothing, - ishermitian=nothing, - ortho=nothing, - svd_alg=nothing, -) - matrixT = matrix(T) - ## TODO Here I am calling parent to ensure that the correct `any` function - ## is envoked for non-cpu matrices - if any(!isfinite, parent(matrixT)) - throw( - ArgumentError( - "Trying to perform the eigendecomposition of a matrix containing NaNs or Infs" - ), - ) - end - - DM, VM = eigen(matrixT) - - # Sort by largest to smallest eigenvalues - # TODO: Replace `cpu` with `Expose` dispatch. - p = sortperm(cpu(DM); rev=true, by=abs) - DM = DM[p] - VM = VM[:, p] - - if any(!isnothing, (maxdim, cutoff)) - DM, truncerr, _ = truncate!!( - DM; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff - ) - dD = length(DM) - if dD < size(VM, 2) - VM = VM[:, 1:dD] - end - else - dD = length(DM) - truncerr = 0.0 - end - spec = Spectrum(DM, truncerr) - - # Make the new indices to go onto V - # TODO: Put in a separate function, such as - # `rewrap_inds` or something like that. - indstype = typeof(inds(T)) - l = eltype(indstype)(dD) - r = eltype(indstype)(dD) - Vinds = indstype((dag(ind(T, 2)), dag(r))) - Dinds = indstype((l, dag(r))) - V = tensor(VM, Vinds) - D = tensor(DiagonalMatrix(DM), Dinds) - return D, V, spec -end diff --git a/NDTensors/src/backup/arraystorage/arraystorage/tensor/eigen_generic.jl b/NDTensors/src/backup/arraystorage/arraystorage/tensor/eigen_generic.jl deleted file mode 100644 index 45628af24e..0000000000 --- a/NDTensors/src/backup/arraystorage/arraystorage/tensor/eigen_generic.jl +++ /dev/null @@ -1,84 +0,0 @@ -function truncate!!( - d::AbstractVector, - u::AbstractMatrix; - mindim, - maxdim, - cutoff, - use_absolute_cutoff, - use_relative_cutoff, -) - error("Not implemented") - # Sort by largest to smallest eigenvalues - # TODO: Replace `cpu` with `Expose` dispatch. - p = sortperm(cpu(d); rev=true, by=abs) - d = d[p] - u = u[:, p] - - if any(!isnothing, (maxdim, cutoff)) - d, truncerr, _ = truncate!!( - d; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff - ) - length_d = length(d) - if length_d < size(u, 2) - u = u[:, 1:length_d] - end - else - length_d = length(d) - # TODO: Make this `zero(eltype(d))`? - truncerr = 0.0 - end - spec = Spectrum(d, truncerr) - return d, u, spec -end - -# TODO: Rewrite this function to be more modern: -# 1. List keyword arguments in function signature. -# 2. Output `Spectrum` as a keyword argument that gets overwritten. -# 3. Make this into two layers, one that handles indices and one that works with `AbstractMatrix`. -function LinearAlgebra.eigen( - t::MatrixOrArrayStorageTensor; - maxdim=nothing, - mindim=nothing, - cutoff=nothing, - use_absolute_cutoff=nothing, - use_relative_cutoff=nothing, - # These are getting passed erroneously. - # TODO: Make sure they don't get passed down - # to here. - which_decomp=nothing, - tags=nothing, - eigen_perturbation=nothing, - normalize=nothing, - ishermitian=nothing, - ortho=nothing, - svd_alg=nothing, -) - a = storage(t) - ## TODO Here I am calling parent to ensure that the correct `any` function - ## is envoked for non-cpu matrices - if any(!isfinite, parent(a)) - throw( - ArgumentError( - "Trying to perform the eigendecomposition of a matrix containing NaNs or Infs" - ), - ) - end - d, u = eigen(a) - d, u, spec = truncate!!(d, u) - - error("Not implemented") - - # Make the new indices to go onto V - # TODO: Put in a separate function, such as - # `rewrap_inds` or something like that. - indstype = typeof(inds(t)) - - # TODO: Make this generic to dense or block sparse. - l = eltype(indstype)(axes(t, 1)) - r = eltype(indstype)(axes(t, 2)) - inds_d = indstype((l, dag(r))) - inds_u = indstype((dag(ind(T, 2)), dag(r))) - dₜ = tensor(DiagonalMatrix(DM), Dinds) - uₜ = tensor(u, u_inds) - return dₜ, uₜ, spec -end diff --git a/NDTensors/src/backup/arraystorage/arraystorage/tensor/indexing.jl b/NDTensors/src/backup/arraystorage/arraystorage/tensor/indexing.jl deleted file mode 100644 index b5efbf81a1..0000000000 --- a/NDTensors/src/backup/arraystorage/arraystorage/tensor/indexing.jl +++ /dev/null @@ -1,8 +0,0 @@ -function getindex(tensor::MatrixOrArrayStorageTensor, I::Integer...) - return storage(tensor)[I...] -end - -function setindex!(tensor::MatrixOrArrayStorageTensor, v, I::Integer...) - storage(tensor)[I...] = v - return tensor -end diff --git a/NDTensors/src/backup/arraystorage/arraystorage/tensor/mul.jl b/NDTensors/src/backup/arraystorage/arraystorage/tensor/mul.jl deleted file mode 100644 index 58593e58e1..0000000000 --- a/NDTensors/src/backup/arraystorage/arraystorage/tensor/mul.jl +++ /dev/null @@ -1,6 +0,0 @@ -function LinearAlgebra.mul!( - C::MatrixStorageTensor, A::MatrixStorageTensor, B::MatrixStorageTensor -) - mul!(storage(C), storage(A), storage(B)) - return C -end diff --git a/NDTensors/src/backup/arraystorage/arraystorage/tensor/permutedims.jl b/NDTensors/src/backup/arraystorage/arraystorage/tensor/permutedims.jl deleted file mode 100644 index 9c9543ee9c..0000000000 --- a/NDTensors/src/backup/arraystorage/arraystorage/tensor/permutedims.jl +++ /dev/null @@ -1,14 +0,0 @@ -function permutedims!( - tensor_dest::MatrixOrArrayStorageTensor, - tensor_src::MatrixOrArrayStorageTensor, - perm, - f::Function, -) - permutedims!(storage(tensor_dest), storage(tensor_src), perm, f) - return tensor_dest -end - -function permutedims(t::MatrixOrArrayStorageTensor, perm) - a_perm = permutedims(storage(t), perm) - return tensor(a_perm, permute(inds(t), perm)) -end diff --git a/NDTensors/src/backup/arraystorage/arraystorage/tensor/qr.jl b/NDTensors/src/backup/arraystorage/arraystorage/tensor/qr.jl deleted file mode 100644 index 7ee216341f..0000000000 --- a/NDTensors/src/backup/arraystorage/arraystorage/tensor/qr.jl +++ /dev/null @@ -1,11 +0,0 @@ -function qr(A::ArrayStorageTensor; positive=false) - positive && error("Not implemented") - Q, R = qr(storage(A)) - Q = convert(typeof(R), Q) - i, j = inds(A) - q = size(A, 1) < size(A, 2) ? i : j - q = sim(q) - Qₜ = tensor(Q, (i, q)) - Rₜ = tensor(R, (dag(q), j)) - return Qₜ, Rₜ -end diff --git a/NDTensors/src/backup/arraystorage/arraystorage/tensor/svd.jl b/NDTensors/src/backup/arraystorage/arraystorage/tensor/svd.jl deleted file mode 100644 index 947f5e82a7..0000000000 --- a/NDTensors/src/backup/arraystorage/arraystorage/tensor/svd.jl +++ /dev/null @@ -1,127 +0,0 @@ -using .DiagonalArrays: DiagIndices, DiagonalMatrix - -backup_svd_alg(::Algorithm"divide_and_conquer") = Algorithm"qr_iteration"() -backup_svd_alg(::Algorithm"qr_iteration") = Algorithm"recursive"() - -function svd(alg::Algorithm"divide_and_conquer", a::ArrayStorage) - USV = svd_catch_error(a; alg=LinearAlgebra.DivideAndConquer()) - if isnothing(USV) - return svd(backup_svd_alg(alg), a) - end - return USV -end - -function svd(alg::Algorithm"qr_iteration", a::ArrayStorage) - USV = svd_catch_error(a; alg=LinearAlgebra.QRIteration()) - if isnothing(USV) - return svd(backup_svd_alg(alg), a) - end - return USV -end - -function svd(alg::Algorithm"recursive", a::ArrayStorage) - return svd_recursive(a) -end - -function svd(::Algorithm"QRAlgorithm", a::ArrayStorage) - return error("Not implemented yet") -end - -function svd(::Algorithm"JacobiAlgorithm", a::ArrayStorage) - return error("Not implemented yet") -end - -function svd(alg::Algorithm, a::ArrayStorage) - return error( - "svd algorithm $alg is not currently supported. Please see the documentation for currently supported algorithms.", - ) -end - -""" - tsvd(a::ArrayStorage{<:Number,2}; kwargs...) - -svd of an order-2 DenseTensor -""" -function tsvd( - a::ArrayStorage; - mindim=nothing, - maxdim=nothing, - cutoff=nothing, - alg=nothing, - use_absolute_cutoff=nothing, - use_relative_cutoff=nothing, - # Only used by BlockSparse svd - min_blockdim=nothing, -) - alg = replace_nothing(alg, default_svd_alg(a)) - USV = svd(Algorithm(alg), a) - if isnothing(USV) - if any(isnan, a) - println("SVD failed, the matrix you were trying to SVD contains NaNs.") - else - println(lapack_svd_error_message(alg)) - end - return nothing - end - - U, S, V = USV - conj!(V) - - P = S .^ 2 - if any(!isnothing, (maxdim, cutoff)) - P, truncerr, _ = truncate!!( - P; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff - ) - else - truncerr = 0.0 - end - spec = Spectrum(P, truncerr) - dS = length(P) - if dS < length(S) - U = U[:, 1:dS] - # Fails on some GPU backends like Metal. - # resize!(MS, dS) - S = S[1:dS] - V = V[:, 1:dS] - end - return U, DiagonalMatrix(S), V, spec -end - -# TODO: Rewrite this function to be more modern: -# 1. Output `Spectrum` as a keyword argument that gets overwritten. -# 2. Dispatch on `alg`. -# 3. Make this into two layers, one that handles indices and one that works with `Matrix`. -""" - svd(T::ArrayStorageTensor{<:Number,2}; kwargs...) - -svd of an order-2 DenseTensor -""" -function svd( - T::ArrayStorageTensor; - mindim=nothing, - maxdim=nothing, - cutoff=nothing, - alg=nothing, - use_absolute_cutoff=nothing, - use_relative_cutoff=nothing, - # Only used by BlockSparse svd - min_blockdim=nothing, -) - U, S, V, spec = tsvd( - storage(T); mindim, maxdim, cutoff, alg, use_absolute_cutoff, use_relative_cutoff - ) - # Make the new indices to go onto U and V - # TODO: Put in a separate function, such as - # `rewrap_inds` or something like that. - dS = length(S[DiagIndices(:)]) - indstype = typeof(inds(T)) - u = eltype(indstype)(dS) - v = eltype(indstype)(dS) - Uinds = indstype((ind(T, 1), u)) - Sinds = indstype((u, v)) - Vinds = indstype((ind(T, 2), v)) - TU = tensor(U, Uinds) - TS = tensor(S, Sinds) - TV = tensor(V, Vinds) - return TU, TS, TV, spec -end diff --git a/NDTensors/src/backup/arraystorage/arraystorage/tensor/zeros.jl b/NDTensors/src/backup/arraystorage/arraystorage/tensor/zeros.jl deleted file mode 100644 index c990e5db30..0000000000 --- a/NDTensors/src/backup/arraystorage/arraystorage/tensor/zeros.jl +++ /dev/null @@ -1,18 +0,0 @@ -function _zeros(tensortype::Type{<:ArrayStorageTensor}, inds) - return tensor(generic_zeros(storagetype(tensortype), dims(inds)), inds) -end - -# To resolve ambiguity error with `Base.zeros`. -function zeros(tensortype::Type{<:ArrayStorageTensor}, inds) - return _zeros(tensortype, inds) -end - -# To resolve ambiguity error with `Base.zeros`. -function zeros(tensortype::Type{<:ArrayStorageTensor}, inds::Tuple{Vararg{Integer}}) - return _zeros(tensortype, inds) -end - -# To resolve ambiguity error with `Base.zeros`. -function zeros(tensortype::Type{<:ArrayStorageTensor}, inds::Tuple{}) - return _zeros(tensortype, inds) -end diff --git a/NDTensors/src/backup/arraystorage/blocksparsearray/storage/combiner/contract.jl b/NDTensors/src/backup/arraystorage/blocksparsearray/storage/combiner/contract.jl deleted file mode 100644 index d90c365310..0000000000 --- a/NDTensors/src/backup/arraystorage/blocksparsearray/storage/combiner/contract.jl +++ /dev/null @@ -1,14 +0,0 @@ -function contract(a_src::BlockSparseArray, labels_src, a_comb::CombinerArray, labels_comb) - # TODO: Special cases for index replacement, need - # to check for trivial block permutations. - return if is_combining(a_src, labels_src, a_comb, labels_comb) - contract_combine(a_src, labels_src, a_comb, labels_comb) - else - # TODO: Check this is actually uncombining. - contract_uncombine(a_src, labels_src, a_comb, labels_comb) - end -end - -function contract(a_comb::CombinerArray, labels_comb, a_src::BlockSparseArray, labels_src) - return contract(a_src, labels_src, a_comb, labels_comb) -end diff --git a/NDTensors/src/backup/arraystorage/blocksparsearray/storage/combiner/contract_combine.jl b/NDTensors/src/backup/arraystorage/blocksparsearray/storage/combiner/contract_combine.jl deleted file mode 100644 index 9050e7ea9a..0000000000 --- a/NDTensors/src/backup/arraystorage/blocksparsearray/storage/combiner/contract_combine.jl +++ /dev/null @@ -1,143 +0,0 @@ -using .TypeParameterAccessors: unwrap_array_type -function contract_combine( - a_src::BlockSparseArray, labels_src, a_comb::CombinerArray, labels_comb -) - labels_dest = contract_labels(labels_comb, labels_src) - axes_dest = contract_inds(axes(a_comb), labels_comb, axes(a_src), labels_src, labels_dest) - - ## TODO: Add this back. - ## #: - ## a_src = before_combiner_signs( - ## a_src, - ## labels_src, - ## axes(a_src), - ## a_comb, - ## labels_comb, - ## axes(a_comb), - ## labels_dest, - ## axes_dest, - ## ) - - # Account for permutation of data. - cpos_in_labels_comb = 1 - clabel = labels_comb[cpos_in_labels_comb] - labels_uc = deleteat(labels_comb, cpos_in_labels_comb) - cpos_in_labels_dest = findfirst(==(clabel), labels_dest) - labels_dest_uc = insertat(labels_dest, labels_uc, cpos_in_labels_dest) - perm = getperm(labels_dest_uc, labels_src) - ucpos_in_labels_src = Tuple(findall(x -> x in labels_uc, labels_src)) - a_dest = permutedims_combine( - a_src, axes_dest, perm, ucpos_in_labels_src, blockperm(a_comb), blockcomb(a_comb) - ) - - return a_dest, labels_dest -end - -function permutedims_combine( - a_src::BlockSparseArray, - axes_dest, - perm::Tuple, - combdims::Tuple, - blockperm::Vector{Int}, - blockcomb::Vector{Int}, -) - a_dest = permutedims_combine_output( - a_src, axes_dest, perm, combdims, blockperm, blockcomb - ) - - # Permute the indices - axes_perm = permute(axes(a_src), perm) - - # Now that the indices are permuted, compute - # which indices are now combined - combdims_perm = TupleTools.sort(_permute_combdims(combdims, perm)) - comb_ind_loc = minimum(combdims_perm) - - # Determine the new index before combining - axes_to_combine = getindices(axes_perm, combdims_perm) - axis_comb = ⊗(axes_to_combine...) - axis_comb = BlockArrays.blockedrange(length.(BlockArrays.blocks(axis_comb)[blockperm])) - - for b in nzblocks(a_src) - a_src_b = @view a_src[b] - b_perm = permute(b, perm) - b_perm_comb = combine_dims(b_perm, axes_perm, combdims_perm) - b_perm_comb = perm_block(b_perm_comb, comb_ind_loc, blockperm) - # TODO: Wrap this in `BlockArrays.Block`? - b_in_combined_dim = b_perm_comb.n[comb_ind_loc] - new_b_in_combined_dim = blockcomb[b_in_combined_dim] - offset = 0 - pos_in_new_combined_block = 1 - while b_in_combined_dim - pos_in_new_combined_block > 0 && - blockcomb[b_in_combined_dim - pos_in_new_combined_block] == new_b_in_combined_dim - # offset += blockdim(axis_comb, b_in_combined_dim - pos_in_new_combined_block) - offset += length( - axis_comb[BlockArrays.Block(b_in_combined_dim - pos_in_new_combined_block)] - ) - pos_in_new_combined_block += 1 - end - b_dest = setindex(b_perm_comb, new_b_in_combined_dim, comb_ind_loc) - a_dest_b_total = @view a_dest[b_dest] - # dimsa_dest_b_tot = size(a_dest_b_total) - - # TODO: Simplify this code. - subind = ntuple(ndims(a_src) - length(combdims) + 1) do i - if i == comb_ind_loc - range( - 1 + offset; - stop=offset + length(axis_comb[BlockArrays.Block(b_in_combined_dim)]), - ) - else - range(1; stop=size(a_dest_b_total)[i]) - end - end - - a_dest_b = @view a_dest_b_total[subind...] - a_dest_b = reshape(a_dest_b, permute(size(a_src_b), perm)) - # TODO: Make this `convert` call more general - # for GPUs using `unwrap_array_type`. - a_src_bₐ = convert(Array, a_src_b) - # TODO: Use `expose` to make more efficient and robust. - permutedims!(a_dest_b, a_src_bₐ, perm) - end - - return a_dest -end - -function permutedims_combine_output( - a_src::BlockSparseArray, - axes_dest, - perm::Tuple, - combdims::Tuple, - blockperm::Vector{Int}, - blockcomb::Vector{Int}, -) - # Permute the indices - axes_src = axes(a_src) - axes_perm = permute(axes_src, perm) - - # Now that the indices are permuted, compute - # which indices are now combined - combdims_perm = TupleTools.sort(_permute_combdims(combdims, perm)) - - # Permute the nonzero blocks (dimension-wise) - blocks = nzblocks(a_src) - - # TODO: Use `permute.(blocks, perm)`. - blocks_perm = BlockArrays.Block.(permute.(getfield.(blocks, :n), Ref(perm))) - - # Combine the nonzero blocks (dimension-wise) - blocks_perm_comb = combine_dims(blocks_perm, axes_perm, combdims_perm) - - # Permute the blocks (within the newly combined dimension) - comb_ind_loc = minimum(combdims_perm) - blocks_perm_comb = perm_blocks(blocks_perm_comb, comb_ind_loc, blockperm) - blocks_perm_comb = sort(blocks_perm_comb; lt=isblockless) - - # Combine the blocks (within the newly combined and permuted dimension) - blocks_perm_comb = combine_blocks(blocks_perm_comb, comb_ind_loc, blockcomb) - T = eltype(a_src) - N = length(axes_dest) - B = set_ndims(unwrap_array_type(a_src), length(axes_dest)) - return BlockSparseArray{T,N,B}(undef, blocks_perm_comb, axes_dest) -end diff --git a/NDTensors/src/backup/arraystorage/blocksparsearray/storage/combiner/contract_uncombine.jl b/NDTensors/src/backup/arraystorage/blocksparsearray/storage/combiner/contract_uncombine.jl deleted file mode 100644 index 742b900235..0000000000 --- a/NDTensors/src/backup/arraystorage/blocksparsearray/storage/combiner/contract_uncombine.jl +++ /dev/null @@ -1,156 +0,0 @@ -using .TypeParameterAccessors: unwrap_array_type -function contract_inds_uncombine(inds_src::Tuple, labels_src, inds_comb::Tuple, labels_comb) - cpos_in_labels_comb = 1 - clabel = labels_comb[cpos_in_labels_comb] - labels_uc = deleteat(labels_comb, cpos_in_labels_comb) - labels_dest = labels_src - cpos_in_labels_dest = findfirst(==(clabel), labels_dest) - # Move combined index to first position - perm = ntuple(identity, length(inds_src)) - if cpos_in_labels_dest != 1 - labels_dest_orig = labels_dest - labels_dest = deleteat(labels_dest, cpos_in_labels_dest) - labels_dest = insertafter(labels_dest, clabel, 0) - cpos_in_labels_dest = 1 - perm = getperm(labels_dest, labels_dest_orig) - inds_src = permute(inds_src, perm) - labels_src = permute(labels_src, perm) - end - labels_dest = insertat(labels_dest, labels_uc, cpos_in_labels_dest) - inds_dest = contract_inds(inds_comb, labels_comb, inds_src, labels_src, labels_dest) - return inds_dest, labels_dest, perm, cpos_in_labels_dest -end - -function contract_uncombine( - a_src::BlockSparseArray, labels_src, a_comb::CombinerArray, labels_comb -) - axes_dest, labels_dest, perm, cpos_in_labels_dest = contract_inds_uncombine( - axes(a_src), labels_src, axes(a_comb), labels_comb - ) - a_src = permutedims(a_src, perm) - - ## TODO: Add this back. - ## # : - ## a_src = before_combiner_signs( - ## a_src, - ## labels_src, - ## axes(a_src), - ## a_comb, - ## labels_comb, - ## axes(a_comb), - ## labels_dest, - ## axes_dest, - ## ) - - a_dest = uncombine( - a_src, - labels_src, - axes_dest, - labels_dest, - cpos_in_labels_dest, - blockperm(a_comb), - blockcomb(a_comb), - ) - - ## TODO: Add this back. - ## # : - ## a_dest = after_combiner_signs( - ## a_dest, - ## labels_dest, - ## axes_dest, - ## a_comb, - ## labels_comb, - ## axes(a_comb), - ## ) - - return a_dest, labels_dest -end - -function uncombine( - a_src::BlockSparseArray, - labels_src, - axes_dest, - labels_dest, - combdim::Int, - blockperm::Vector{Int}, - blockcomb::Vector{Int}, -) - a_dest = uncombine_output( - a_src, labels_src, axes_dest, labels_dest, combdim, blockperm, blockcomb - ) - invblockperm = invperm(blockperm) - # This is needed for reshaping the block - # TODO: It is already calculated in uncombine_output, use it from there - labels_uncomb_perm = setdiff(labels_dest, labels_src) - ind_uncomb_perm = ⊗( - axes_dest[map(x -> findfirst(==(x), labels_dest), labels_uncomb_perm)]... - ) - ind_uncomb = BlockArrays.blockedrange( - length.(BlockArrays.blocks(ind_uncomb_perm)[blockperm]) - ) - # Same as axes(a_src) but with the blocks uncombined - axes_uncomb = insertat(axes(a_src), ind_uncomb, combdim) - axes_uncomb_perm = insertat(axes(a_src), ind_uncomb_perm, combdim) - for b in nzblocks(a_src) - a_src_b_tot = @view a_src[b] - bs_uncomb = uncombine_block(b, combdim, blockcomb) - offset = 0 - for i in 1:length(bs_uncomb) - b_uncomb = bs_uncomb[i] - b_uncomb_perm = perm_block(b_uncomb, combdim, invblockperm) - b_uncomb_perm_reshape = reshape(b_uncomb_perm, axes_uncomb_perm, axes_dest) - a_dest_b = @view a_dest[b_uncomb_perm_reshape] - b_uncomb_in_combined_dim = b_uncomb_perm.n[combdim] - start = offset + 1 - stop = offset + length(ind_uncomb_perm[BlockArrays.Block(b_uncomb_in_combined_dim)]) - subind = ntuple( - i -> i == combdim ? range(start; stop=stop) : range(1; stop=size(a_src_b_tot)[i]), - ndims(a_src), - ) - offset = stop - a_src_b = @view a_src_b_tot[subind...] - - # Alternative (but maybe slower): - #copyto!(a_dest_b, a_src_b) - - if length(a_src_b) == 1 - # Call `cpu` to avoid allowscalar error on GPU. - # TODO: a_desteplace with `@allowscalar`, requires adding - # `GPUArraysCore.jl` as a dependency, or use `expose`. - a_dest_b[] = cpu(a_src_b)[] - else - # TODO: Use `unspecify_parameters(unwrap_array_type(a_src))` intead of `Array`. - a_dest_bₐ = convert(Array, a_dest_b) - a_dest_bₐᵣ = reshape(a_dest_bₐ, size(a_src_b)) - copyto!(expose(a_dest_bₐᵣ), expose(a_src_b)) - end - end - end - return a_dest -end - -function uncombine_output( - a_src::BlockSparseArray, - labels_src, - axes_dest, - labels_dest, - combdim::Int, - blockperm::Vector{Int}, - blockcomb::Vector{Int}, -) - labels_uncomb_perm = setdiff(labels_dest, labels_src) - ind_uncomb_perm = ⊗( - axes_dest[map(x -> findfirst(==(x), labels_dest), labels_uncomb_perm)]... - ) - axes_uncomb_perm = insertat(axes(a_src), ind_uncomb_perm, combdim) - # Uncombine the blocks of a_src - blocks_uncomb = uncombine_blocks(nzblocks(a_src), combdim, blockcomb) - blocks_uncomb_perm = perm_blocks(blocks_uncomb, combdim, invperm(blockperm)) - - # TODO: Should this be zero data instead of undef? - T = eltype(a_src) - N = length(axes_uncomb_perm) - B = unwrap_array_type(a_src) - a_uncomb_perm = BlockSparseArray{T,N,B}(undef, blocks_uncomb_perm, axes_uncomb_perm) - return reshape(a_uncomb_perm, axes_dest) -end diff --git a/NDTensors/src/backup/arraystorage/blocksparsearray/storage/combiner/contract_utils.jl b/NDTensors/src/backup/arraystorage/blocksparsearray/storage/combiner/contract_utils.jl deleted file mode 100644 index ad52f741b1..0000000000 --- a/NDTensors/src/backup/arraystorage/blocksparsearray/storage/combiner/contract_utils.jl +++ /dev/null @@ -1,129 +0,0 @@ -# Needed for implementing block sparse combiner contraction. -using .BlockSparseArrays: blocks, nonzero_keys -using .BlockSparseArrays.BlockArrays: BlockArrays -# TODO: Move to `BlockSparseArrays`, come up with better name. -# `nonzero_block_keys`? -nzblocks(a::BlockSparseArray) = BlockArrays.Block.(Tuple.(collect(nonzero_keys(blocks(a))))) - -# ⊗ -# TODO: Rename this function. -function outer(i1::BlockArrays.BlockedUnitRange, i2::BlockArrays.BlockedUnitRange) - axes = (i1, i2) - return BlockArrays.blockedrange( - prod.(length, vec(collect(Iterators.product(BlockArrays.blocks.(axes)...)))) - ) -end - -outer(i::BlockArrays.BlockedUnitRange) = i - -function combine_dims( - blocks::Vector{BlockArrays.Block{N,Int}}, inds, combdims::NTuple{NC,Int} -) where {N,NC} - nblcks = map(i -> BlockArrays.blocklength(inds[i]), combdims) - blocks_comb = Vector{BlockArrays.Block{N - NC + 1,Int}}(undef, length(blocks)) - for (i, block) in enumerate(blocks) - blocks_comb[i] = combine_dims(block, inds, combdims) - end - return blocks_comb -end - -function getindices(b::BlockArrays.Block, I::Tuple) - return getindices(b.n, I) -end -deleteat(b::BlockArrays.Block, pos) = BlockArrays.Block(deleteat(b.n, pos)) -function insertafter(b::BlockArrays.Block, val, pos) - return BlockArrays.Block(insertafter(b.n, Int.(val), pos)) -end -setindex(b::BlockArrays.Block, val, pos) = BlockArrays.Block(setindex(b.n, Int(val), pos)) -permute(s::BlockArrays.Block, perm::Tuple) = BlockArrays.Block(permute(s.n, perm)) -# define block ordering with reverse lexographical order -function isblockless(b1::BlockArrays.Block{N}, b2::BlockArrays.Block{N}) where {N} - return CartesianIndex(b1.n) < CartesianIndex(b2.n) -end -# In the dimension dim, permute the block -function perm_block(block::BlockArrays.Block, dim::Int, perm) - iperm = invperm(perm) - return setindex(block, iperm[block.n[dim]], dim) -end - -function combine_dims(block::BlockArrays.Block, inds, combdims::NTuple{NC,Int}) where {NC} - nblcks = map(i -> BlockArrays.blocklength(inds[i]), combdims) - slice = getindices(block, combdims) - slice_comb = LinearIndices(nblcks)[slice...] - block_comb = deleteat(block, combdims) - block_comb = insertafter(block_comb, tuple(slice_comb), minimum(combdims) - 1) - return block_comb -end - -# In the dimension dim, permute the blocks -function perm_blocks(blocks::Vector{BlockArrays.Block{N,Int}}, dim::Int, perm) where {N} - blocks_perm = Vector{BlockArrays.Block{N,Int}}(undef, length(blocks)) - iperm = invperm(perm) - for (i, block) in enumerate(blocks) - blocks_perm[i] = setindex(block, iperm[block.n[dim]], dim) - end - return blocks_perm -end - -# In the dimension dim, combine the specified blocks -function combine_blocks( - blocks::Vector{<:BlockArrays.Block}, dim::Int, blockcomb::Vector{Int} -) - blocks_comb = copy(blocks) - nnz_comb = length(blocks) - for (i, block) in enumerate(blocks) - dimval = block.n[dim] - blocks_comb[i] = setindex(block, blockcomb[dimval], dim) - end - unique!(blocks_comb) - return blocks_comb -end - -# Uncombining utils - -# Uncombine the blocks along the dimension dim -# according to the pattern in blockcomb (for example, blockcomb -# is [1,2,2,3] and dim = 2, so the blocks (1,2),(2,3) get -# split into (1,2),(1,3),(2,4)) -function uncombine_blocks( - blocks::Vector{BlockArrays.Block{N,Int}}, dim::Int, blockcomb::Vector{Int} -) where {N} - blocks_uncomb = Vector{BlockArrays.Block{N,Int}}() - ncomb_tot = 0 - for i in 1:length(blocks) - block = blocks[i] - blockval = block.n[dim] - ncomb = _number_uncombined(blockval, blockcomb) - ncomb_shift = _number_uncombined_shift(blockval, blockcomb) - push!(blocks_uncomb, setindex(block, blockval + ncomb_shift, dim)) - for j in 1:(ncomb - 1) - push!(blocks_uncomb, setindex(block, blockval + ncomb_shift + j, dim)) - end - end - return blocks_uncomb -end - -function uncombine_block( - block::BlockArrays.Block{N}, dim::Int, blockcomb::Vector{Int} -) where {N} - blocks_uncomb = Vector{BlockArrays.Block{N,Int}}() - ncomb_tot = 0 - blockval = block.n[dim] - ncomb = _number_uncombined(blockval, blockcomb) - ncomb_shift = _number_uncombined_shift(blockval, blockcomb) - push!(blocks_uncomb, setindex(block, blockval + ncomb_shift, dim)) - for j in 1:(ncomb - 1) - push!(blocks_uncomb, setindex(block, blockval + ncomb_shift + j, dim)) - end - return blocks_uncomb -end - -# TODO: Rethink this function. -function reshape(blockT::BlockArrays.Block{NT}, indsT, indsR) where {NT} - nblocksT = BlockArrays.blocklength.(indsT) - nblocksR = BlockArrays.blocklength.(indsR) - blockR = Tuple( - CartesianIndices(nblocksR)[LinearIndices(nblocksT)[CartesianIndex(blockT.n)]] - ) - return BlockArrays.Block(blockR) -end diff --git a/NDTensors/src/backup/arraystorage/blocksparsearray/storage/contract.jl b/NDTensors/src/backup/arraystorage/blocksparsearray/storage/contract.jl deleted file mode 100644 index a02f9229d4..0000000000 --- a/NDTensors/src/backup/arraystorage/blocksparsearray/storage/contract.jl +++ /dev/null @@ -1,108 +0,0 @@ -# TODO: Define in `SparseArraysBase`. -using ..SparseArraysBase: SparseArrayDOK - -# TODO: This is inefficient, need to optimize. -# Look at `contract_labels`, `contract_blocks` and `maybe_contract_blocks!` in: -# src/blocksparse/contract_utilities.jl -function key_dest(labels_dest, I1::CartesianIndex, labels1, I2::CartesianIndex, labels2) - labels_intersect = labels1 ∩ labels2 - for label_intersect in labels_intersect - i1 = Tuple(I1)[findfirst(==(label_intersect), labels1)] - i2 = Tuple(I2)[findfirst(==(label_intersect), labels2)] - if i1 ≠ i2 - return nothing - end - end - I_dest = CartesianIndex( - ntuple(length(labels_dest)) do j_dest - label_dest = labels_dest[j_dest] - j1 = findfirst(==(label_dest), labels1) - if !isnothing(j1) - return Tuple(I1)[j1] - end - j2 = findfirst(==(label_dest), labels2) - if !isnothing(j2) - return Tuple(I2)[j2] - end - return nothing - end, - ) - if any(isnothing, Tuple(I_dest)) - return nothing - end - return I_dest -end - -function default_contract_muladd(a1, labels1, a2, labels2, a_dest, labels_dest) - return muladd(a1, a2, a_dest) -end - -function contract!( - a_dest::SparseArrayDOK, - labels_dest, - a1::SparseArrayDOK, - labels1, - a2::SparseArrayDOK, - labels2; - muladd=default_contract_muladd, -) - for I1 in nonzero_keys(a1) - for I2 in nonzero_keys(a2) - # TODO: Cache information needed for `key_dest`, i.e. - # location of `labels_dest` in `labels1` and `labels2`. - I_dest = key_dest(labels_dest, I1, labels1, I2, labels2) - if !isnothing(I_dest) - a_dest[I_dest] = muladd( - a1[I1], labels1, a2[I2], labels2, a_dest[I_dest], labels_dest - ) - end - end - end - return a_dest -end - -function blocksparse_contract_muladd(a1, labels1, a2, labels2, a_dest, labels_dest) - # TODO: Use `contract!!` once we change to `UnallocatedZeros` - # for structural zero blocks. - # TODO: Check that `α` and `β` are correct. - contract!(a_dest, labels_dest, a1, labels1, a2, labels2, true, true) - return a_dest -end - -function contract!( - a_dest::BlockSparseArray, - labels_dest, - a1::BlockSparseArray, - labels1, - a2::BlockSparseArray, - labels2, -) - contract!( - blocks(a_dest), - labels_dest, - blocks(a1), - labels1, - blocks(a2), - labels2; - muladd=blocksparse_contract_muladd, - ) - return a_dest -end - -function contract(a1::BlockSparseArray, labels1, a2::BlockSparseArray, labels2) - labels_dest = contract_labels(labels1, labels2) - axes_dest = contract_inds(axes(a1), labels1, axes(a2), labels2, labels_dest) - # TODO: Do this through `allocate_output(::typeof(contract), ...)` - elt_dest = promote_type(eltype(a1), eltype(a2)) - a_dest = BlockSparseArray{elt_dest}(axes_dest) - contract!(a_dest, labels_dest, a1, labels1, a2, labels2) - return a_dest, labels_dest -end - -function contract(a1::BlockSparseArray, labels1, a2::Array, labels2) - return error("Not implemented") -end - -function contract(a1::Array, labels1, a2::BlockSparseArray, labels2) - return error("Not implemented") -end diff --git a/NDTensors/src/backup/arraystorage/blocksparsearray/tensor/contract.jl b/NDTensors/src/backup/arraystorage/blocksparsearray/tensor/contract.jl deleted file mode 100644 index c8d5f445c6..0000000000 --- a/NDTensors/src/backup/arraystorage/blocksparsearray/tensor/contract.jl +++ /dev/null @@ -1,59 +0,0 @@ -# TODO: This should be a generic `Tensor` definition. -function contract( - t1::Tensor{T1,N1,<:BlockSparseArray{T1,N1}}, - labels1, - t2::Tensor{T2,N2,<:BlockSparseArray{T2,N2}}, - labels2, -) where {T1,N1,T2,N2} - a_dest, labels_dest = contract(storage(t1), labels1, storage(t2), labels2) - inds_dest = contract_inds(inds(t1), labels1, inds(t2), labels2, labels_dest) - return tensor(a_dest, inds_dest) -end - -# TODO: This should be a generic `Tensor` definition. -function contract( - t1::Tensor{<:Any,<:Any,<:BlockSparseArray}, - labels1, - t2::MatrixOrArrayStorageTensor, - labels2, -) - a_dest, labels_dest = contract(storage(t1), labels1, storage(t2), labels2) - inds_dest = contract_inds(inds(t1), labels1, inds(t2), labels2, labels_dest) - return tensor(a_dest, inds_dest) -end - -# TODO: This should be a generic `Tensor` definition. -function contract( - t1::MatrixOrArrayStorageTensor, - labels1, - t2::Tensor{<:Any,<:Any,<:BlockSparseArray}, - labels2, -) - a_dest, labels_dest = contract(storage(t1), labels1, storage(t2), labels2) - inds_dest = contract_inds(inds(t1), labels1, inds(t2), labels2, labels_dest) - return tensor(a_dest, inds_dest) -end - -# TODO: This should be a generic `Tensor` definition. -function contract( - t1::Tensor{<:Any,<:Any,<:CombinerArray}, - labels1, - t2::Tensor{T,N,<:BlockSparseArray{T,N}}, - labels2, -) where {T,N} - a_dest, labels_dest = contract(storage(t1), labels1, storage(t2), labels2) - inds_dest = contract_inds(inds(t1), labels1, inds(t2), labels2, labels_dest) - return tensor(a_dest, inds_dest) -end - -# TODO: This should be a generic `Tensor` definition. -function contract( - t1::Tensor{T,N,<:BlockSparseArray{T,N}}, - labels1, - t2::Tensor{<:Any,<:Any,<:CombinerArray}, - labels2, -) where {T,N} - a_dest, labels_dest = contract(storage(t1), labels1, storage(t2), labels2) - inds_dest = contract_inds(inds(t1), labels1, inds(t2), labels2, labels_dest) - return tensor(a_dest, inds_dest) -end diff --git a/NDTensors/src/backup/arraystorage/combiner/storage/combinerarray.jl b/NDTensors/src/backup/arraystorage/combiner/storage/combinerarray.jl deleted file mode 100644 index 3409b6efed..0000000000 --- a/NDTensors/src/backup/arraystorage/combiner/storage/combinerarray.jl +++ /dev/null @@ -1,12 +0,0 @@ -struct CombinerArray{N,C,Axes<:Tuple{Vararg{Any,N}}} <: AbstractArray{Any,N} - combiner::C - axes::Axes -end - -Base.axes(a::CombinerArray) = a.axes -Base.size(a::CombinerArray) = length.(axes(a)) -Base.getindex(a::CombinerArray{0}) = true - -function Base.conj(aliasstyle::AliasStyle, a::CombinerArray) - return CombinerArray(conj(aliasstyle, a.combiner), axes(a)) -end diff --git a/NDTensors/src/backup/arraystorage/combiner/storage/contract.jl b/NDTensors/src/backup/arraystorage/combiner/storage/contract.jl deleted file mode 100644 index c95a6ec864..0000000000 --- a/NDTensors/src/backup/arraystorage/combiner/storage/contract.jl +++ /dev/null @@ -1,76 +0,0 @@ -function contract( - a_comb::CombinerArray, labels_comb, a_src::MatrixOrArrayStorage, labels_src -) - if ndims(a_comb) ≤ 1 - return contract_scalar(a_comb, labels_comb, a_src, labels_src) - elseif is_index_replacement(a_src, labels_src, a_comb, labels_comb) - return contract_replacement(a_comb, labels_comb, a_src, labels_src) - elseif is_combining(a_src, labels_src, a_comb, labels_comb) - return contract_combine(a_comb, labels_comb, a_src, labels_src) - else - # TODO: Check this is a proper uncombining. - return contract_uncombine(a_comb, labels_comb, a_src, labels_src) - end - return invalid_comb_contraction_error(t, labels_src, a_comb, labels_comb) -end - -function contract( - a_src::MatrixOrArrayStorage, labels_src, a_comb::CombinerArray, labels_comb -) - return contract(a_comb, labels_comb, a_src, labels_src) -end - -# Empty comb, acts as multiplying by 1 -function contract_scalar( - a_comb::CombinerArray, labels_comb, a_src::MatrixOrArrayStorage, labels_src -) - error("Not implemented") - return copy(a_src), labels_src -end - -function contract_replacement( - a_comb::CombinerArray, labels_comb, a_src::MatrixOrArrayStorage, labels_src -) - @assert length(labels_comb) == 2 - a_dest = copy(a_src) - replacement_label = only(setdiff(labels_comb, labels_src)) - common_label = only(intersect(labels_src, labels_comb)) - common_label_src_pos = findfirst(==(common_label), labels_src) - labels_dest = setindex(labels_src, replacement_label, common_label_src_pos) - return a_dest, labels_dest -end - -function contract_combine( - a_comb::CombinerArray, labels_comb, a_src::MatrixOrArrayStorage, labels_src -) - labels_dest = contract_labels(labels_comb, labels_src) - axes_dest = contract_inds(axes(a_comb), labels_comb, axes(a_src), labels_src, labels_dest) - cpos1, a_dest_cpos = intersect_positions(labels_comb, labels_dest) - labels_comb = deleteat(labels_comb, cpos1) - a_dest_vl = [labels_dest...] - for (ii, li) in enumerate(labels_comb) - insert!(a_dest_vl, a_dest_cpos + ii, li) - end - deleteat!(a_dest_vl, a_dest_cpos) - labels_perm = tuple(a_dest_vl...) - perm = getperm(labels_perm, labels_src) - tp_axes = permute(axes(a_src), perm) - a_dest = permutedims(a_src, perm) - return reshape(a_dest, length.(axes_dest)), labels_dest -end - -function contract_uncombine( - a_comb::CombinerArray, labels_comb, a_src::MatrixOrArrayStorage, labels_src -) - a_dest = copy(a_src) - - cpos1, cpos2 = intersect_positions(labels_comb, labels_src) - - axes_dest = deleteat(axes(a_comb), cpos1) - axes_dest = insertat(axes(a_src), axes_dest, cpos2) - - labels_dest = deleteat(labels_comb, cpos1) - labels_dest = insertat(labels_src, labels_dest, cpos2) - - return reshape(a_dest, length.(axes_dest)), labels_dest -end diff --git a/NDTensors/src/backup/arraystorage/combiner/storage/contract_utils.jl b/NDTensors/src/backup/arraystorage/combiner/storage/contract_utils.jl deleted file mode 100644 index a7fdbe0276..0000000000 --- a/NDTensors/src/backup/arraystorage/combiner/storage/contract_utils.jl +++ /dev/null @@ -1,52 +0,0 @@ -blockperm(a::CombinerArray) = blockperm(a.combiner) -blockcomb(a::CombinerArray) = blockcomb(a.combiner) - -function combinedind(a::CombinerArray) - return axes(a)[combinedind_position(a)] -end - -function is_index_replacement( - a::AbstractArray, a_labels, a_comb::CombinerArray, a_comb_labels -) - return (ndims(a_comb) == 2) && isone(count(∈(a_labels), a_comb_labels)) -end - -# Return if the combiner contraction is combining or uncombining. -# Check for valid contractions, for example when combining, -# only the combined index should be uncontracted, and when uncombining, -# only the combined index should be contracted. -function is_combining(a::AbstractArray, a_labels, a_comb::CombinerArray, a_comb_labels) - is_combining = is_combining_no_check(a, a_labels, a_comb, a_comb_labels) - check_valid_combiner_contraction(is_combining, a, a_labels, a_comb, a_comb_labels) - return is_combining -end - -function is_combining_no_check( - a::AbstractArray, a_labels, a_comb::CombinerArray, a_comb_labels -) - return combinedind_label(a_comb, a_comb_labels) ∉ a_labels -end - -function combinedind_label(a_comb::CombinerArray, a_comb_labels) - return a_comb_labels[combinedind_position(a_comb)] -end - -# The position of the combined index/dimension. -# By convention, it is the first one. -combinedind_position(a_comb::CombinerArray) = 1 - -function check_valid_combiner_contraction( - is_combining::Bool, a::AbstractArray, a_labels, a_comb::CombinerArray, a_comb_labels -) - if !is_valid_combiner_contraction(is_combining, a, a_labels, a_comb, a_comb_labels) - return invalid_combiner_contraction_error(a, a_labels, a_comb, a_comb_labels) - end - return nothing -end - -function is_valid_combiner_contraction( - is_combining::Bool, a::AbstractArray, a_labels, a_comb::CombinerArray, a_comb_labels -) - in_a_labels_op = is_combining ? ∉(a_labels) : ∈(a_labels) - return isone(count(in_a_labels_op, a_comb_labels)) -end diff --git a/NDTensors/src/backup/arraystorage/combiner/storage/promote_rule.jl b/NDTensors/src/backup/arraystorage/combiner/storage/promote_rule.jl deleted file mode 100644 index 9d367979d8..0000000000 --- a/NDTensors/src/backup/arraystorage/combiner/storage/promote_rule.jl +++ /dev/null @@ -1 +0,0 @@ -promote_rule(::Type{<:CombinerArray}, arraytype::Type{<:MatrixOrArrayStorage}) = arraytype diff --git a/NDTensors/src/backup/arraystorage/combiner/tensor/contract.jl b/NDTensors/src/backup/arraystorage/combiner/tensor/contract.jl deleted file mode 100644 index ddffab1380..0000000000 --- a/NDTensors/src/backup/arraystorage/combiner/tensor/contract.jl +++ /dev/null @@ -1,31 +0,0 @@ -# TODO: This should be generic `Tensor` definition. -function contract( - t1::Tensor{<:Any,<:Any,<:CombinerArray}, labels1, t2::MatrixOrArrayStorageTensor, labels2 -) - a_dest, labels_dest = contract(storage(t1), labels1, storage(t2), labels2) - inds_dest = contract_inds(inds(t1), labels1, inds(t2), labels2, labels_dest) - return tensor(a_dest, inds_dest) -end - -# TODO: This should be generic `Tensor` definition. -function contract( - t1::MatrixOrArrayStorageTensor, labels1, t2::Tensor{<:Any,<:Any,<:CombinerArray}, labels2 -) - a_dest, labels_dest = contract(storage(t1), labels1, storage(t2), labels2) - inds_dest = contract_inds(inds(t1), labels1, inds(t2), labels2, labels_dest) - return tensor(a_dest, inds_dest) -end - -# TODO: Delete. -function contract( - t1::Tensor{<:Any,<:Any,<:Combiner}, labels1, t2::MatrixOrArrayStorageTensor, labels2 -) - return contract(to_arraystorage(t1), labels1, t2, labels2) -end - -# TODO: Delete. -function contract( - t1::MatrixOrArrayStorageTensor, labels1, t2::Tensor{<:Any,<:Any,<:Combiner}, labels2 -) - return contract(t1, labels1, to_arraystorage(t2), labels2) -end diff --git a/NDTensors/src/backup/arraystorage/combiner/tensor/to_arraystorage.jl b/NDTensors/src/backup/arraystorage/combiner/tensor/to_arraystorage.jl deleted file mode 100644 index ddbe6dfb4f..0000000000 --- a/NDTensors/src/backup/arraystorage/combiner/tensor/to_arraystorage.jl +++ /dev/null @@ -1,4 +0,0 @@ -# TODO: Delete when we directly use `CombinerArray` as storage. -function to_arraystorage(t::CombinerTensor) - return tensor(CombinerArray(storage(t), to_axes(inds(t))), inds(t)) -end diff --git a/NDTensors/src/backup/arraystorage/diagonalarray/storage/contract.jl b/NDTensors/src/backup/arraystorage/diagonalarray/storage/contract.jl deleted file mode 100644 index 5e53d2bd22..0000000000 --- a/NDTensors/src/backup/arraystorage/diagonalarray/storage/contract.jl +++ /dev/null @@ -1,173 +0,0 @@ -using .SparseArraysBase: densearray -using .DiagonalArrays: DiagIndex, diaglength -using .TypeParameterAccessors: unwrap_array_type - -# TODO: Move to a different file. -function promote_rule( - storagetype1::Type{<:ArrayStorage}, storagetype2::Type{<:DiagonalArray} -) - # TODO: Replace with `unwrap_array_type` once - # https://github.com/ITensor/ITensors.jl/pull/1220 - # is merged. - return promote_type(storagetype1, unwrap_array_type(storagetype2)) -end - -# TODO: Move to a different file. -function promote_rule( - storagetype1::Type{<:DiagonalArray}, storagetype2::Type{<:DiagonalArray} -) - return error("Not implemented yet") -end - -function contraction_output_type( - arraytype1::Type{<:DiagonalArray}, arraytype2::Type{<:DiagonalArray}, inds -) - return error("Not implemented yet") -end - -default_convert_to_dense() = true - -# TODO: Modernize this function, rewrite in terms of `Array` and `DiagonalArray`. -# TODO: Move to `storage`. -function contract!( - C::ArrayStorage, - Clabels, - A::DiagonalArray, - Alabels, - B::ArrayStorage, - Blabels, - α::Number=one(eltype(C)), - β::Number=zero(eltype(C)); - convert_to_dense=default_convert_to_dense(), -) - if convert_to_dense - contract_dense!(C, Clabels, A, Alabels, B, Blabels, α, β) - return C - end - if !isone(α) || !iszero(β) - error( - "`contract!(::ArrayStorageTensor, ::DiagTensor, ::ArrayStorageTensor, α, β; convert_to_dense = false)` with `α ≠ 1` or `β ≠ 0` is not currently supported. You can call it with `convert_to_dense = true` instead.", - ) - end - astarts = zeros(Int, length(Alabels)) - bstart = 0 - cstart = 0 - b_cstride = 0 - nbu = 0 - for ib in 1:length(Blabels) - ia = findfirst(==(Blabels[ib]), Alabels) - if !isnothing(ia) - b_cstride += stride(B, ib) - bstart += astarts[ia] * stride(B, ib) - else - nbu += 1 - end - end - c_cstride = 0 - for ic in 1:length(Clabels) - ia = findfirst(==(Clabels[ic]), Alabels) - if !isnothing(ia) - c_cstride += stride(C, ic) - cstart += astarts[ia] * stride(C, ic) - end - end - # strides of the uncontracted dimensions of - # B - bustride = zeros(Int, nbu) - custride = zeros(Int, nbu) - # size of the uncontracted dimensions of - # B, to be used in CartesianIndices - busize = zeros(Int, nbu) - n = 1 - for ib in 1:length(Blabels) - if Blabels[ib] > 0 - bustride[n] = stride(B, ib) - busize[n] = size(B, ib) - ic = findfirst(==(Blabels[ib]), Clabels) - custride[n] = stride(C, ic) - n += 1 - end - end - boffset_orig = 1 - sum(strides(B)) - coffset_orig = 1 - sum(strides(C)) - cartesian_inds = CartesianIndices(Tuple(busize)) - for inds in cartesian_inds - boffset = boffset_orig - coffset = coffset_orig - for i in 1:nbu - ii = inds[i] - boffset += ii * bustride[i] - coffset += ii * custride[i] - end - c = zero(eltype(C)) - for j in 1:diaglength(A) - # With α == 0 && β == 1 - C[cstart + j * c_cstride + coffset] += - A[DiagIndex(j)] * B[bstart + j * b_cstride + boffset] - # XXX: not sure if this is correct - #C[cstart+j*c_cstride+coffset] += α * A[DiagIndex(j)] * B[bstart+j*b_cstride+boffset] + β * C[cstart+j*c_cstride+coffset] - end - end -end - -function contract!( - C::ArrayStorage{<:Any,0}, - Clabels, - A::DiagonalArray, - Alabels, - B::ArrayStorage, - Blabels, - α::Number=one(eltype(C)), - β::Number=zero(eltype(C)); - convert_to_dense=nothing, -) - # If all of B is contracted - # TODO: can also check NC+NB==NA - min_dim = min(minimum(size(A)), minimum(size(B))) - if length(Clabels) == 0 - # all indices are summed over, just add the product of the diagonal - # elements of A and B - # Assumes C starts set to 0 - c₁ = zero(eltype(C)) - for i in 1:min_dim - c₁ += A[DiagIndex(i)] * B[DiagIndex(i)] - end - C[DiagIndex(1)] = α * c₁ + β * C[DiagIndex(1)] - else - # not all indices are summed over, set the diagonals of the result - # to the product of the diagonals of A and B - # TODO: should we make this return a Diag storage? - for i in 1:min_dim - C[DiagIndex(i)] = α * A[DiagIndex(i)] * B[DiagIndex(i)] + β * C[DiagIndex(i)] - end - end - return C -end - -function contract_dense!( - C::ArrayStorage, - Clabels, - A::DiagonalArray, - Alabels, - B::ArrayStorage, - Blabels, - α::Number=one(eltype(C)), - β::Number=zero(eltype(C)), -) - return contract!(C, Clabels, densearray(A), Alabels, B, Blabels, α, β) -end - -# Overspecifying types to fix ambiguity error. -function contract!( - C::ArrayStorage, - Clabels, - A::ArrayStorage, - Alabels, - B::DiagonalArray, - Blabels, - α::Number=one(eltype(C)), - β::Number=zero(eltype(C)); - convert_to_dense=default_convert_to_dense(), -) - return contract!(C, Clabels, B, Blabels, A, Alabels, α, β; convert_to_dense) -end diff --git a/NDTensors/src/backup/arraystorage/diagonalarray/tensor/contract.jl b/NDTensors/src/backup/arraystorage/diagonalarray/tensor/contract.jl deleted file mode 100644 index 18fe5a1cff..0000000000 --- a/NDTensors/src/backup/arraystorage/diagonalarray/tensor/contract.jl +++ /dev/null @@ -1,25 +0,0 @@ -# The output must be initialized as zero since it is sparse, cannot be undefined -# Overspecifying types to fix ambiguity error. -# TODO: Rewrite in terms of `DiagonalArray` and `Array`, not `Tensor`. -function contraction_output( - T1::Tensor{T,N,<:DiagonalArray{T,N,<:AbstractVector{T}}}, T2::ArrayStorageTensor, indsR -) where {T,N} - return zero_contraction_output(T1, T2, indsR) -end - -# TODO: Rewrite in terms of `DiagonalArray` and `Array`, not `Tensor`. -function contraction_output( - T1::ArrayStorageTensor, T2::Tensor{T,N,<:DiagonalArray{T,N,<:AbstractVector{T}}}, indsR -) where {T,N} - return contraction_output(T2, T1, indsR) -end - -# Overspecifying types to fix ambiguity error. -# TODO: Rewrite in terms of `DiagonalArray`, not `Tensor`. -function contraction_output( - tensor1::Tensor{T1,N1,<:DiagonalArray{T1,N1,<:AbstractVector{T1}}}, - tensor2::Tensor{T2,N2,<:DiagonalArray{T2,N2,<:AbstractVector{T2}}}, - indsR, -) where {T1,N1,T2,N2} - return zero_contraction_output(tensor1, tensor2, indsR) -end diff --git a/NDTensors/src/blocksparse/adapt.jl b/NDTensors/src/blocksparse/adapt.jl deleted file mode 100644 index a5c40e5fe2..0000000000 --- a/NDTensors/src/blocksparse/adapt.jl +++ /dev/null @@ -1,3 +0,0 @@ -function set_datatype(storagetype::Type{<:BlockSparse}, datatype::Type{<:AbstractVector}) - return BlockSparse{eltype(datatype),datatype,ndims(storagetype)} -end diff --git a/NDTensors/src/blocksparse/block.jl b/NDTensors/src/blocksparse/block.jl deleted file mode 100644 index 5094669003..0000000000 --- a/NDTensors/src/blocksparse/block.jl +++ /dev/null @@ -1,180 +0,0 @@ - -# -# Block -# - -struct Block{N} - data::NTuple{N,UInt} - hash::UInt - function Block{N}(data::NTuple{N,UInt}) where {N} - h = _hash(data) - return new{N}(data, h) - end - function Block{0}(::Tuple{}) - h = _hash(()) - return new{0}((), h) - end -end - -# -# Constructors -# - -Block{N}(t::Tuple{Vararg{Any,N}}) where {N} = Block{N}(UInt.(t)) - -Block{N}(I::CartesianIndex{N}) where {N} = Block{N}(I.I) - -Block{N}(v::MVector{N}) where {N} = Block{N}(Tuple(v)) - -Block{N}(v::SVector{N}) where {N} = Block{N}(Tuple(v)) - -Block(b::Block) = b - -Block(I::CartesianIndex{N}) where {N} = Block{N}(I) - -Block(v::MVector{N}) where {N} = Block{N}(v) - -Block(v::SVector{N}) where {N} = Block{N}(v) - -Block(t::NTuple{N,UInt}) where {N} = Block{N}(t) - -Block(t::Tuple{Vararg{Any,N}}) where {N} = Block{N}(t) - -Block(::Tuple{}) = Block{0}(()) - -Block(I::Union{Integer,Block{1}}...) = Block(I) - -# -# Conversions -# - -CartesianIndex(b::Block) = CartesianIndex(Tuple(b)) - -Tuple(b::Block{N}) where {N} = NTuple{N,UInt}(b.data) - -convert(::Type{Block}, I::CartesianIndex{N}) where {N} = Block{N}(I.I) - -convert(::Type{Block{N}}, I::CartesianIndex{N}) where {N} = Block{N}(I.I) - -convert(::Type{Block}, t::Tuple) = Block(t) - -convert(::Type{Block{N}}, t::Tuple) where {N} = Block{N}(t) - -(::Type{IntT})(b::Block{1}) where {IntT<:Integer} = IntT(only(b)) - -# -# Getting and setting fields -# - -gethash(b::Block) = b.hash[] - -sethash!(b::Block, h::UInt) = (b.hash[] = h; return b) - -# -# Basic functions -# - -length(::Block{N}) where {N} = N - -isless(b1::Block, b2::Block) = isless(Tuple(b1), Tuple(b2)) - -iterate(b::Block, args...) = iterate(b.data, args...) - -@propagate_inbounds function getindex(b::Block, i::Integer) - return b.data[i] -end - -@propagate_inbounds function setindex(b::Block{N}, val, i::Integer) where {N} - return Block{N}(setindex(b.data, UInt(val), i)) -end - -ValLength(::Type{<:Block{N}}) where {N} = Val{N} - -deleteat(b::Block, pos) = Block(deleteat(Tuple(b), pos)) - -insertafter(b::Block, val, pos) = Block(insertafter(Tuple(b), UInt.(val), pos)) - -getindices(b::Block, I) = getindices(Tuple(b), I) - -# -# checkbounds -# - -# XXX: define this properly -checkbounds(::Tensor, ::Block) = nothing - -# -# Hashing -# - -# Borrowed from: -# https://github.com/JuliaLang/julia/issues/37073 -# This is the same as Julia's Base tuple hash, but is -# a bit faster. -_hash(t::Tuple) = _hash(t, zero(UInt)) -_hash(::Tuple{}, h::UInt) = h + Base.tuplehash_seed -@generated function _hash(b::NTuple{N}, h::UInt) where {N} - quote - out = h + Base.tuplehash_seed - @nexprs $N i -> out = hash(b[$N - i + 1], out) - end -end - -if VERSION < v"1.7.0-DEV.933" - # Stop inlining after some number of arguments to avoid code blowup - function _hash(t::Base.Any16, h::UInt) - out = h + Base.tuplehash_seed - for i in length(t):-1:1 - out = hash(t[i], out) - end - return out - end -else - # Stop inlining after some number of arguments to avoid code blowup - function _hash(t::Base.Any32, h::UInt) - out = h + Base.tuplehash_seed - for i in length(t):-1:1 - out = hash(t[i], out) - end - return out - end -end - -hash(b::Block) = UInt(b.hash) -hash(b::Block, h::UInt) = h + hash(b) - -# -# Custom NTuple{N, Int} hashes -# These are faster, but have a lot of collisions -# - -# Borrowed from: -# https://stackoverflow.com/questions/20511347/a-good-hash-function-for-a-vector -# This seems to have a lot of clashes -#function hash(b::Block, seed::UInt) -# h = UInt(0x9e3779b9) -# for n in b -# seed ⊻= n + h + (seed << 6) + (seed >> 2) -# end -# return seed -#end - -# Borrowed from: -# http://www.docjar.com/html/api/java/util/Arrays.java.html -# Could also consider uring the CPython tuple hash: -# https://github.com/python/cpython/blob/0430dfac629b4eb0e899a09b899a494aa92145f6/Objects/tupleobject.c#L406 -#function hash(b::Block, h::UInt) -# h += Base.tuplehash_seed -# for n in b -# h = 31 * h + n ⊻ (n >> 32) -# end -# return h -#end - -# -# Printing for Block type -# - -Base.show(io::IO, mime::MIME"text/plain", b::Block) = print(io, "Block$(Int.(Tuple(b)))") - -Base.show(io::IO, b::Block) = show(io, MIME("text/plain"), b) diff --git a/NDTensors/src/blocksparse/blockdims.jl b/NDTensors/src/blocksparse/blockdims.jl deleted file mode 100644 index 8de0439417..0000000000 --- a/NDTensors/src/blocksparse/blockdims.jl +++ /dev/null @@ -1,221 +0,0 @@ -using .TypeParameterAccessors: TypeParameterAccessors - -""" - BlockDim - -An index for a BlockSparseTensor. -""" -const BlockDim = Vector{Int} - -# Makes for generic code -dim(d::BlockDim) = sum(d) - -""" - BlockDims{N} - -Dimensions used for BlockSparse NDTensors. -Each entry lists the block sizes in each dimension. -""" -const BlockDims{N} = NTuple{N,BlockDim} - -Base.ndims(ds::Type{<:BlockDims{N}}) where {N} = N - -function TypeParameterAccessors.similartype(::Type{<:BlockDims}, ::Type{Val{N}}) where {N} - return BlockDims{N} -end - -Base.copy(ds::BlockDims) = ds - -""" -dim(::BlockDims,::Integer) - -Return the total extent of the specified dimensions. -""" -function dim(ds::BlockDims{N}, i::Integer) where {N} - return sum(ds[i]) -end - -""" -dims(::BlockDims) - -Return the total extents of the dense space -the block dimensions live in. -""" -function dims(ds::BlockDims{N}) where {N} - return ntuple(i -> dim(ds, i), Val(N)) -end - -""" -dim(::BlockDims) - -Return the total extent of the dense space -the block dimensions live in. -""" -function dim(ds::BlockDims{N}) where {N} - return prod(dims(ds)) -end - -""" - nblocks(::BlockDim) - -The number of blocks of the BlockDim. -""" -function nblocks(ind::BlockDim) - return length(ind) -end - -""" - ndiagblocks(::Any) - -The number of blocks along the diagonal. -""" -function ndiagblocks(x) - return minimum(nblocks(x)) -end - -""" - nblocks(::BlockDims,i::Integer) - -The number of blocks in the specified dimension. -""" -function nblocks(inds::Tuple, i::Integer) - return nblocks(inds[i]) -end - -""" - nblocks(::BlockDims,is) - -The number of blocks in the specified dimensions. -""" -function nblocks(inds::Tuple, is::NTuple{N,Int}) where {N} - return ntuple(i -> nblocks(inds, is[i]), Val(N)) -end - -""" - nblocks(::BlockDims) - -A tuple of the number of blocks in each -dimension. -""" -function nblocks(inds::NTuple{N,<:Any}) where {N} - return ntuple(i -> nblocks(inds, i), Val(N)) -end - -function eachblock(inds::Tuple) - return (Block(b) for b in CartesianIndices(_Tuple(nblocks(inds)))) -end - -function eachdiagblock(inds::Tuple) - return (Block(ntuple(_ -> i, length(inds))) for i in 1:ndiagblocks(inds)) -end - -""" -blockdim(::BlockDim,::Integer) - -The size of the specified block in the specified -dimension. -""" -function blockdim(ind::BlockDim, i::Integer) - return ind[i] -end - -function blockdim(ind::Integer, i) - return error( - "`blockdim(i::Integer, b)` not currently defined for non-block index $i of type `$(typeof(i))`. In the future this may be defined for `b == Block(1)` or `b == 1` as `dim(i)` and error otherwise.", - ) -end - -""" - blockdim(::BlockDims,block,::Integer) - -The size of the specified block in the specified -dimension. -""" -function blockdim(inds, block, i::Integer) - return blockdim(inds[i], block[i]) -end - -""" - blockdims(::BlockDims,block) - -The size of the specified block. -""" -function blockdims(inds, block) - return ntuple(i -> blockdim(inds, block, i), ValLength(inds)) -end - -""" - blockdim(::BlockDims,block) - -The total size of the specified block. -""" -function blockdim(inds, block) - return prod(blockdims(inds, block)) -end - -""" - blockdiaglength(inds::BlockDims,block) - -The length of the diagonal of the specified block. -""" -function blockdiaglength(inds, block) - return minimum(blockdims(inds, block)) -end - -function outer(dim1, dim2, dim3, dims...; kwargs...) - return outer(outer(dim1, dim2), dim3, dims...; kwargs...) -end - -function outer(dim1::BlockDim, dim2::BlockDim) - dimR = BlockDim(undef, nblocks(dim1) * nblocks(dim2)) - for (i, t) in enumerate(Iterators.product(dim1, dim2)) - dimR[i] = prod(t) - end - return dimR -end - -function permuteblocks(dim::BlockDim, perm) - return dim[perm] -end - -# Given a CartesianIndex in the range dims(T), get the block it is in -# and the index within that block -function blockindex(T, i::Vararg{Integer,N}) where {N} - # Bounds check. - # Do something more robust like: - # @boundscheck Base.checkbounds_indices(Bool, map(Base.oneto, dims(T)), i) || throw_boundserror(T, i) - @boundscheck any(iszero, i) && Base.throw_boundserror(T, i) - - # Start in the (1,1,...,1) block - current_block_loc = @MVector ones(Int, N) - current_block_dims = blockdims(T, Tuple(current_block_loc)) - block_index = MVector(i) - for dim in 1:N - while block_index[dim] > current_block_dims[dim] - block_index[dim] -= current_block_dims[dim] - current_block_loc[dim] += 1 - current_block_dims = blockdims(T, Tuple(current_block_loc)) - end - end - return Tuple(block_index), Block{N}(current_block_loc) -end - -blockindex(T) = (), Block{0}() - -# -# This is to help with ITensor compatibility -# - -block(i::BlockDim, n::Integer) = i[n] - -resize(n::Int, newdim::Int) = newdim - -setblockdim!(dim1::BlockDim, newdim::Int, n::Int) = setindex!(dim1, newdim, n) - -setblock!(i::BlockDim, b::Int, n::Integer) = (i[n] = b) - -sim(dim::BlockDim) = copy(dim) - -dir(::BlockDim) = 0 - -dag(dim::BlockDim) = copy(dim) diff --git a/NDTensors/src/blocksparse/blockoffsets.jl b/NDTensors/src/blocksparse/blockoffsets.jl deleted file mode 100644 index 93bf9bd0d5..0000000000 --- a/NDTensors/src/blocksparse/blockoffsets.jl +++ /dev/null @@ -1,173 +0,0 @@ -using SparseArrays: SparseArrays - -# -# BlockOffsets -# - -const Blocks{N} = Vector{Block{N}} -const BlockOffset{N} = Pair{Block{N},Int} -# Use Dictionary from Dictionaries.jl (faster -# iteration than Base.Dict) -const BlockOffsets{N} = Dictionary{Block{N},Int} - -BlockOffset(block::Block{N}, offset::Int) where {N} = BlockOffset{N}(block, offset) - -Base.ndims(::Blocks{N}) where {N} = N -Base.ndims(::BlockOffset{N}) where {N} = N -Base.ndims(::BlockOffsets{N}) where {N} = N - -blocktype(bofs::BlockOffsets) = keytype(bofs) - -nzblock(bof::BlockOffset) = first(bof) - -offset(bof::BlockOffset) = last(bof) - -nzblock(block::Block) = block - -# Get the offset if the nth block in the block-offsets -# list -offset(bofs::BlockOffsets, n) = offset(bofs[n]) - -nnzblocks(bofs::BlockOffsets) = length(bofs) -nnzblocks(bs::Blocks) = length(bs) - -eachnzblock(bofs::BlockOffsets) = keys(bofs) - -nzblocks(bofs::BlockOffsets) = collect(eachnzblock(bofs)) - -# define block ordering with reverse lexographical order -function isblockless(b1::Block{N}, b2::Block{N}) where {N} - return CartesianIndex(b1) < CartesianIndex(b2) -end - -function isblockless(bof1::BlockOffset{N}, bof2::BlockOffset{N}) where {N} - return isblockless(nzblock(bof1), nzblock(bof2)) -end - -function isblockless(bof1::BlockOffset{N}, b2::Block{N}) where {N} - return isblockless(nzblock(bof1), b2) -end - -function isblockless(b1::Block{N}, bof2::BlockOffset{N}) where {N} - return isblockless(b1, nzblock(bof2)) -end - -function offset(bofs::BlockOffsets{N}, block::Block{N}) where {N} - if !isassigned(bofs, block) - return nothing - end - return bofs[block] -end - -function SparseArrays.nnz(bofs::BlockOffsets, inds) - _nnz = 0 - nnzblocks(bofs) == 0 && return _nnz - for block in eachnzblock(bofs) - _nnz += blockdim(inds, block) - end - return _nnz -end - -blockoffsets(blocks::Vector{<:NTuple}, inds) = blockoffsets(Block.(blocks), inds) - -# TODO: should this be a constructor? -function blockoffsets(blocks::Vector{<:Block{N}}, inds) where {N} - blockoffsets = BlockOffsets{N}() - nnz = 0 - for block in blocks - insert!(blockoffsets, block, nnz) - current_block_dim = blockdim(inds, block) - nnz += current_block_dim - end - return blockoffsets, nnz -end - -""" - diagblockoffsets(blocks::Blocks,inds) - -Get the blockoffsets only along the diagonal. -The offsets are along the diagonal. - -Assumes the blocks are allong the diagonal. -""" -function diagblockoffsets( - blocks::Vector{BlockT}, inds -) where {BlockT<:Union{Block{N},Tuple{Vararg{Any,N}}}} where {N} - blockoffsets = BlockOffsets{N}() - nnzdiag = 0 - for (i, block) in enumerate(blocks) - insert!(blockoffsets, Block(block), nnzdiag) - current_block_diaglength = blockdiaglength(inds, block) - nnzdiag += current_block_diaglength - end - return blockoffsets, nnzdiag -end - -# Permute the blockoffsets and indices -function permutedims(boffs::BlockOffsets{N}, inds, perm::NTuple{N,Int}) where {N} - blocksR = Blocks{N}(undef, nnzblocks(boffs)) - for (i, block) in enumerate(keys(boffs)) - blocksR[i] = permute(block, perm) - end - indsR = permute(inds, perm) - blockoffsetsR, _ = blockoffsets(blocksR, indsR) - return blockoffsetsR, indsR -end - -function permutedims(blocks::Vector{Block{N}}, perm::NTuple{N,Int}) where {N} - blocks_perm = Vector{Block{N}}(undef, length(blocks)) - for (i, block) in enumerate(blocks) - blocks_perm[i] = permute(block, perm) - end - return blocks_perm -end - -""" -blockdim(T::BlockOffsets,nnz::Int,pos::Int) - -Get the block dimension of the block at position pos. -""" -function blockdim(boffs::BlockOffsets, nnz::Int, pos::Int) - if nnzblocks(boffs) == 0 - return 0 - elseif pos == nnzblocks(boffs) - return nnz - offset(boffs, pos) - end - return offset(boffs, pos + 1) - offset(boffs, pos) -end - -function Base.union( - boffs1::BlockOffsets{N}, nnz1::Int, boffs2::BlockOffsets{N}, nnz2::Int -) where {N} - n1, n2 = 1, 1 - boffsR = BlockOffset{N}[] - current_offset = 0 - while n1 <= length(boffs1) && n2 <= length(boffs2) - if isblockless(boffs1[n1], boffs2[n2]) - push!(boffsR, BlockOffset(nzblock(boffs1[n1]), current_offset)) - current_offset += blockdim(boffs1, nnz1, n1) - n1 += 1 - elseif isblockless(boffs2[n2], boffs1[n1]) - push!(boffsR, BlockOffset(nzblock(boffs2[n2]), current_offset)) - current_offset += blockdim(boffs2, nnz2, n2) - n2 += 1 - else - push!(boffsR, BlockOffset(nzblock(boffs1[n1]), current_offset)) - current_offset += blockdim(boffs1, nnz1, n1) - n1 += 1 - n2 += 1 - end - end - if n1 <= length(boffs1) - for n in n1:length(boffs1) - push!(boffsR, BlockOffset(nzblock(boffs1[n]), current_offset)) - current_offset += blockdim(boffs1, nnz1, n) - end - elseif n2 <= length(boffs2) - for n in n2:length(bofss2) - push!(boffsR, BlockOffset(nzblock(boffs2[n]), current_offset)) - current_offset += blockdim(boffs2, nnz2, n) - end - end - return boffsR, current_offset -end diff --git a/NDTensors/src/blocksparse/blocksparse.jl b/NDTensors/src/blocksparse/blocksparse.jl deleted file mode 100644 index 7724ac10fc..0000000000 --- a/NDTensors/src/blocksparse/blocksparse.jl +++ /dev/null @@ -1,207 +0,0 @@ -# -# BlockSparse storage -# - -struct BlockSparse{ElT,VecT,N} <: TensorStorage{ElT} - data::VecT - blockoffsets::BlockOffsets{N} # Block number-offset pairs - function BlockSparse( - data::VecT, blockoffsets::BlockOffsets{N} - ) where {VecT<:AbstractVector{ElT},N} where {ElT} - return new{ElT,VecT,N}(data, blockoffsets) - end -end - -# TODO: Implement as `fieldtype(storagetype, :data)`. -datatype(::Type{<:BlockSparse{<:Any,DataT}}) where {DataT} = DataT -# TODO: Implement as `ndims(blockoffsetstype(storagetype))`. -Base.ndims(storagetype::Type{<:BlockSparse{<:Any,<:Any,N}}) where {N} = N -# TODO: Implement as `fieldtype(storagetype, :blockoffsets)`. -blockoffsetstype(storagetype::Type{<:BlockSparse}) = BlockOffsets{ndims(storagetype)} - -function set_datatype(storagetype::Type{<:BlockSparse}, datatype::Type{<:AbstractVector}) - return BlockSparse{eltype(datatype),datatype,ndims(storagetype)} -end - -function TypeParameterAccessors.set_ndims(storagetype::Type{<:BlockSparse}, ndims::Int) - return BlockSparse{eltype(storagetype),datatype(storagetype),ndims} -end - -# TODO: Write as `(::Type{<:BlockSparse})()`. -BlockSparse{ElT,DataT,N}() where {ElT,DataT,N} = BlockSparse(DataT(), BlockOffsets{N}()) - -function BlockSparse( - datatype::Type{<:AbstractArray}, blockoffsets::BlockOffsets, dim::Integer; vargs... -) - return BlockSparse( - fill!(NDTensors.similar(datatype, dim), zero(eltype(datatype))), blockoffsets; vargs... - ) -end - -function BlockSparse( - eltype::Type{<:Number}, blockoffsets::BlockOffsets, dim::Integer; vargs... -) - return BlockSparse(Vector{eltype}, blockoffsets, dim; vargs...) -end - -function BlockSparse(x::Number, blockoffsets::BlockOffsets, dim::Integer; vargs...) - return BlockSparse(fill(x, dim), blockoffsets; vargs...) -end - -function BlockSparse( - ::Type{ElT}, ::UndefInitializer, blockoffsets::BlockOffsets, dim::Integer; vargs... -) where {ElT<:Number} - return BlockSparse(Vector{ElT}(undef, dim), blockoffsets; vargs...) -end - -function BlockSparse( - datatype::Type{<:AbstractArray}, - ::UndefInitializer, - blockoffsets::BlockOffsets, - dim::Integer; - vargs..., -) - return BlockSparse(datatype(undef, dim), blockoffsets; vargs...) -end - -function BlockSparse(blockoffsets::BlockOffsets, dim::Integer; vargs...) - return BlockSparse(Float64, blockoffsets, dim; vargs...) -end - -function BlockSparse(::UndefInitializer, blockoffsets::BlockOffsets, dim::Integer; vargs...) - return BlockSparse(Float64, undef, blockoffsets, dim; vargs...) -end - -copy(D::BlockSparse) = BlockSparse(copy(data(D)), copy(blockoffsets(D))) - -setdata(B::BlockSparse, ndata) = BlockSparse(ndata, copy(blockoffsets(B))) -function setdata(storagetype::Type{<:BlockSparse}, data) - return error("Not implemented, must specify block offsets as well") -end - -# -# Random -# - -function randn( - StorageT::Type{<:BlockSparse{ElT}}, blockoffsets::BlockOffsets, dim::Integer -) where {ElT<:Number} - return randn(Random.default_rng(), StorageT, blockoffsets, dim) -end - -function randn( - rng::AbstractRNG, ::Type{<:BlockSparse{ElT}}, blockoffsets::BlockOffsets, dim::Integer -) where {ElT<:Number} - return BlockSparse(randn(rng, ElT, dim), blockoffsets) -end - -#function BlockSparse{ElR}(data::VecT,offsets) where {ElR,VecT<:AbstractVector{ElT}} where {ElT} -# ElT == ElR ? BlockSparse(data,offsets) : BlockSparse(ElR.(data),offsets) -#end -#BlockSparse{ElT}() where {ElT} = BlockSparse(ElT[],BlockOffsets()) - -# TODO: check the offsets are the same? -function copyto!(D1::BlockSparse, D2::BlockSparse) - blockoffsets(D1) ≠ blockoffsets(D1) && - error("Cannot copy between BlockSparse storages with different offsets") - copyto!(expose(data(D1)), expose(data(D2))) - return D1 -end - -Base.real(::Type{BlockSparse{T}}) where {T} = BlockSparse{real(T)} - -complex(::Type{BlockSparse{T}}) where {T} = BlockSparse{complex(T)} - -Base.ndims(::BlockSparse{T,V,N}) where {T,V,N} = N - -eltype(::BlockSparse{T}) where {T} = eltype(T) -# This is necessary since for some reason inference doesn't work -# with the more general definition (eltype(Nothing) === Any) -eltype(::BlockSparse{Nothing}) = Nothing -eltype(::Type{BlockSparse{T}}) where {T} = eltype(T) - -dense(::Type{<:BlockSparse{ElT,VecT}}) where {ElT,VecT} = Dense{ElT,VecT} - -can_contract(T1::Type{<:Dense}, T2::Type{<:BlockSparse}) = false -can_contract(T1::Type{<:BlockSparse}, T2::Type{<:Dense}) = can_contract(T2, T1) - -function promote_rule( - ::Type{<:BlockSparse{ElT1,VecT1,N}}, ::Type{<:BlockSparse{ElT2,VecT2,N}} -) where {ElT1,ElT2,VecT1,VecT2,N} - # Promote the element types properly. - ElT = promote_type(ElT1, ElT2) - VecT = promote_type(set_eltype(VecT1, ElT), set_eltype(VecT2, ElT)) - return BlockSparse{ElT,VecT,N} -end - -function promote_rule( - ::Type{<:BlockSparse{ElT1,VecT1,N1}}, ::Type{<:BlockSparse{ElT2,VecT2,N2}} -) where {ElT1,ElT2,VecT1,VecT2,N1,N2} - # Promote the element types properly. - ElT = promote_type(ElT1, ElT2) - VecT = promote_type(set_eltype(VecT1, ElT), set_eltype(VecT2, ElT)) - return BlockSparse{ElT,VecT,NR} where {NR} -end - -function promote_rule( - ::Type{<:BlockSparse{ElT1,VecT1,N1}}, ::Type{ElT2} -) where {ElT1,VecT1<:AbstractVector{ElT1},ElT2<:Number,N1} - ElR = promote_type(ElT1, ElT2) - VecR = set_eltype(VecT1, ElR) - return BlockSparse{ElR,VecR,N1} -end - -function convert( - ::Type{<:BlockSparse{ElR,VecR,N}}, D::BlockSparse{ElD,VecD,N} -) where {ElR,VecR,N,ElD,VecD} - return setdata(D, convert(VecR, data(D))) -end - -""" -isblocknz(T::BlockSparse, - block::Block) - -Check if the specified block is non-zero. -""" -function isblocknz(T::BlockSparse{ElT,VecT,N}, block::Block{N}) where {ElT,VecT,N} - return isassigned(blockoffsets(T), block) -end - -# If block is input as Tuple -isblocknz(T::BlockSparse, block) = isblocknz(T, Block(block)) - -# Given a specified block, return a Dense storage that is a view to the data -# in that block. Return nothing if the block is structurally zero -function blockview(T::BlockSparse, block) - #error("Block must be structurally non-zero to get a view") - !isblocknz(T, block) && return nothing - blockoffsetT = offset(T, block) - blockdimT = blockdim(T, block) - dataTslice = @view data(T)[(blockoffsetT + 1):(blockoffsetT + blockdimT)] - return Dense(dataTslice) -end - -# XXX this is not well defined with new Dictionary design -#function (D1::BlockSparse + D2::BlockSparse) -# # This could be of order nnzblocks, avoid? -# if blockoffsets(D1) == blockoffsets(D2) -# return BlockSparse(data(D1)+data(D2),blockoffsets(D1)) -# end -# blockoffsetsR,nnzR = union(blockoffsets(D1),nnz(D1), -# blockoffsets(D2),nnz(D2)) -# R = BlockSparse(undef,blockoffsetsR,nnzR) -# for (blockR,offsetR) in blockoffsets(R) -# blockview1 = blockview(D1,blockR) -# blockview2 = blockview(D2,blockR) -# blockviewR = blockview(R,blockR) -# if isnothing(blockview1) -# copyto!(blockviewR,blockview2) -# elseif isnothing(blockview2) -# copyto!(blockviewR,blockview1) -# else -# # TODO: is this fast? -# blockviewR .= blockview1 .+ blockview2 -# end -# end -# return R -#end diff --git a/NDTensors/src/blocksparse/blocksparsetensor.jl b/NDTensors/src/blocksparse/blocksparsetensor.jl deleted file mode 100644 index f367de836c..0000000000 --- a/NDTensors/src/blocksparse/blocksparsetensor.jl +++ /dev/null @@ -1,1002 +0,0 @@ -using SparseArrays: nnz -using .TypeParameterAccessors: similartype - -# -# BlockSparseTensor (Tensor using BlockSparse storage) -# - -const BlockSparseTensor{ElT,N,StoreT,IndsT} = - Tensor{ElT,N,StoreT,IndsT} where {StoreT<:BlockSparse} - -nonzeros(T::Tensor) = data(T) - -function BlockSparseTensor( - ::Type{ElT}, ::UndefInitializer, boffs::BlockOffsets, inds -) where {ElT<:Number} - nnz_tot = nnz(boffs, inds) - storage = BlockSparse(ElT, undef, boffs, nnz_tot) - return tensor(storage, inds) -end - -function BlockSparseTensor( - datatype::Type{<:AbstractArray}, ::UndefInitializer, boffs::BlockOffsets, inds -) - nnz_tot = nnz(boffs, inds) - storage = BlockSparse(datatype, undef, boffs, nnz_tot) - return tensor(storage, inds) -end - -function BlockSparseTensor( - ::Type{ElT}, ::UndefInitializer, blocks::Vector{BlockT}, inds -) where {ElT<:Number,BlockT<:Union{Block,NTuple}} - boffs, nnz = blockoffsets(blocks, inds) - storage = BlockSparse(ElT, undef, boffs, nnz) - return tensor(storage, inds) -end - -function BlockSparseTensor( - datatype::Type{<:AbstractArray}, - ::UndefInitializer, - blocks::Vector{<:Union{Block,NTuple}}, - inds, -) - boffs, nnz = blockoffsets(blocks, inds) - storage = BlockSparse(datatype, undef, boffs, nnz) - return tensor(storage, inds) -end - -""" - BlockSparseTensor(::UndefInitializer, blocks, inds) - -Construct a block sparse tensor with uninitialized memory -from indices and locations of non-zero blocks. -""" -function BlockSparseTensor(::UndefInitializer, blockoffsets, inds) - return BlockSparseTensor(default_eltype(), undef, blockoffsets, inds) -end - -function BlockSparseTensor( - datatype::Type{<:AbstractArray}, blockoffsets::BlockOffsets, inds -) - nnz_tot = nnz(blockoffsets, inds) - storage = BlockSparse(datatype, blockoffsets, nnz_tot) - return tensor(storage, inds) -end - -function BlockSparseTensor(eltype::Type{<:Number}, blockoffsets::BlockOffsets, inds) - return BlockSparseTensor(Vector{eltype}, blockoffsets, inds) -end - -function BlockSparseTensor(blockoffsets::BlockOffsets, inds) - return BlockSparseTensor(default_eltype(), blockoffsets, inds) -end - -""" - BlockSparseTensor(inds) - -Construct a block sparse tensor with no blocks. -""" -BlockSparseTensor(inds) = BlockSparseTensor(default_eltype(), inds) - -function BlockSparseTensor(datatype::Type{<:AbstractArray}, inds) - return BlockSparseTensor(datatype, BlockOffsets{length(inds)}(), inds) -end - -function BlockSparseTensor(eltype::Type{<:Number}, inds) - return BlockSparseTensor(Vector{eltype}, inds) -end - -""" - BlockSparseTensor(inds) - -Construct a block sparse tensor with no blocks. -""" -function BlockSparseTensor(inds::Vararg{DimT,N}) where {DimT,N} - return BlockSparseTensor(BlockOffsets{N}(), inds) -end - -""" - BlockSparseTensor(blocks::Vector{Block{N}}, inds) - -Construct a block sparse tensor with the specified blocks. -Defaults to setting structurally non-zero blocks to zero. -""" -function BlockSparseTensor(blocks::Vector{BlockT}, inds) where {BlockT<:Union{Block,NTuple}} - return BlockSparseTensor(default_eltype(), blocks, inds) -end - -function BlockSparseTensor( - ::Type{ElT}, blocks::Vector{BlockT}, inds -) where {ElT<:Number,BlockT<:Union{Block,NTuple}} - boffs, nnz = blockoffsets(blocks, inds) - storage = BlockSparse(ElT, boffs, nnz) - return tensor(storage, inds) -end - -function BlockSparseTensor( - datatype::Type{<:AbstractArray}, blocks::Vector{<:Union{Block,NTuple}}, inds -) - boffs, nnz = blockoffsets(blocks, inds) - storage = BlockSparse(datatype, boffs, nnz) - return tensor(storage, inds) -end - -function BlockSparseTensor( - x::Number, blocks::Vector{BlockT}, inds -) where {BlockT<:Union{Block,NTuple}} - boffs, nnz = blockoffsets(blocks, inds) - storage = BlockSparse(x, boffs, nnz) - return tensor(storage, inds) -end - -#complex(::Type{BlockSparseTensor{ElT,N,StoreT,IndsT}}) where {ElT<:Number,N,StoreT<:BlockSparse -# = Tensor{ElT,N,StoreT,IndsT} where {StoreT<:BlockSparse} - -function randn( - TensorT::Type{<:BlockSparseTensor{ElT,N}}, blocks::Vector{<:BlockT}, inds -) where {ElT,BlockT<:Union{Block{N},NTuple{N,<:Integer}}} where {N} - return randn(Random.default_rng(), TensorT, blocks, inds) -end - -function randn( - rng::AbstractRNG, ::Type{<:BlockSparseTensor{ElT,N}}, blocks::Vector{<:BlockT}, inds -) where {ElT,BlockT<:Union{Block{N},NTuple{N,<:Integer}}} where {N} - boffs, nnz = blockoffsets(blocks, inds) - storage = randn(rng, BlockSparse{ElT}, boffs, nnz) - return tensor(storage, inds) -end - -function randomBlockSparseTensor( - ::Type{ElT}, blocks::Vector{<:BlockT}, inds -) where {ElT,BlockT<:Union{Block{N},NTuple{N,<:Integer}}} where {N} - return randomBlockSparseTensor(Random.default_rng(), ElT, blocks, inds) -end - -function randomBlockSparseTensor( - rng::AbstractRNG, ::Type{ElT}, blocks::Vector{<:BlockT}, inds -) where {ElT,BlockT<:Union{Block{N},NTuple{N,<:Integer}}} where {N} - return randn(rng, BlockSparseTensor{ElT,N}, blocks, inds) -end - -function randomBlockSparseTensor(blocks::Vector, inds) - return randomBlockSparseTensor(Random.default_rng(), blocks, inds) -end - -function randomBlockSparseTensor(rng::AbstractRNG, blocks::Vector, inds) - return randomBlockSparseTensor(rng, default_eltype(), blocks, inds) -end - -""" -BlockSparseTensor(blocks::Vector{Block{N}}, - inds::BlockDims...) - -Construct a block sparse tensor with the specified blocks. -Defaults to setting structurally non-zero blocks to zero. -""" -function BlockSparseTensor( - blocks::Vector{BlockT}, inds::Vararg{BlockDim,N} -) where {BlockT<:Union{Block{N},NTuple{N,<:Integer}}} where {N} - return BlockSparseTensor(blocks, inds) -end - -function BlockSparseTensor{ElT}( - blocks::Vector{BlockT}, inds::Vararg{BlockDim,N} -) where {ElT<:Number,BlockT<:Union{Block{N},NTuple{N,<:Integer}}} where {N} - return BlockSparseTensor(ElT, blocks, inds) -end - -function zeros( - tensor::BlockSparseTensor{ElT,N}, blockoffsets::BlockOffsets{N}, inds -) where {ElT,N} - return BlockSparseTensor(datatype(tensor), blockoffsets, inds) -end - -function zeros( - tensortype::Type{<:BlockSparseTensor{ElT,N}}, blockoffsets::BlockOffsets{N}, inds -) where {ElT,N} - return BlockSparseTensor(datatype(tensortype), blockoffsets, inds) -end - -function zeros(tensortype::Type{<:BlockSparseTensor}, inds) - return BlockSparseTensor(datatype(tensortype), inds) -end - -zeros(tensor::BlockSparseTensor, inds) = zeros(typeof(tensor), inds) - -# Basic functionality for AbstractArray interface -IndexStyle(::Type{<:BlockSparseTensor}) = IndexCartesian() - -# Get the CartesianIndices for the range of indices -# of the specified -function blockindices(T::BlockSparseTensor{ElT,N}, block) where {ElT,N} - return CartesianIndex(blockstart(T, block)):CartesianIndex(blockend(T, block)) -end - -""" -indexoffset(T::BlockSparseTensor,i::Int...) -> offset,block,blockoffset - -Get the offset in the data of the specified -CartesianIndex. If it falls in a block that doesn't -exist, return nothing for the offset. -Also returns the block the index is found in and the offset -within the block. -""" -function indexoffset(T::BlockSparseTensor{ElT,N}, i::Vararg{Int,N}) where {ElT,N} - index_within_block, block = blockindex(T, i...) - block_dims = blockdims(T, block) - offset_within_block = LinearIndices(block_dims)[CartesianIndex(index_within_block)] - offset_of_block = offset(T, block) - offset_of_i = isnothing(offset_of_block) ? nothing : offset_of_block + offset_within_block - return offset_of_i, block, offset_within_block -end - -# TODO: Add a checkbounds -# TODO: write this nicer in terms of blockview? -# Could write: -# block,index_within_block = blockindex(T,i...) -# return blockview(T,block)[index_within_block] -@propagate_inbounds function getindex( - T::BlockSparseTensor{ElT,N}, i::Vararg{Int,N} -) where {ElT,N} - offset, _ = indexoffset(T, i...) - isnothing(offset) && return zero(ElT) - return storage(T)[offset] -end - -@propagate_inbounds function getindex(T::BlockSparseTensor{ElT,0}) where {ElT} - nnzblocks(T) == 0 && return zero(ElT) - return expose(storage(T))[] -end - -# These may not be valid if the Tensor has no blocks -#@propagate_inbounds getindex(T::BlockSparseTensor{<:Number,1},ind::Int) = storage(T)[ind] - -#@propagate_inbounds getindex(T::BlockSparseTensor{<:Number,0}) = storage(T)[1] - -# Add the specified block to the BlockSparseTensor -# Insert it such that the blocks remain ordered. -# Defaults to adding zeros. -# Returns the offset of the new block added. -# XXX rename to insertblock!, no need to return offset -using .TypeParameterAccessors: unwrap_array_type -using .Expose: Exposed, expose, unexpose -function insertblock_offset!(T::BlockSparseTensor{ElT,N}, newblock::Block{N}) where {ElT,N} - newdim = blockdim(T, newblock) - newoffset = nnz(T) - insert!(blockoffsets(T), newblock, newoffset) - # Insert new block into data - new_data = generic_zeros(unwrap_array_type(T), newdim) - # TODO: `append!` is broken on `Metal` since `resize!` - # isn't implemented. - append!(expose(data(T)), new_data) - return newoffset -end - -function insertblock!(T::BlockSparseTensor{<:Number,N}, block::Block{N}) where {N} - insertblock_offset!(T, block) - return T -end - -insertblock!(T::BlockSparseTensor, block) = insertblock!(T, Block(block)) - -# Insert missing diagonal blocks as zero blocks -function insert_diag_blocks!(T::AbstractArray) - for b in eachdiagblock(T) - blockT = blockview(T, b) - if isnothing(blockT) - # Block was not found in the list, insert it - insertblock!(T, b) - end - end -end - -# TODO: Add a checkbounds -@propagate_inbounds function setindex!( - T::BlockSparseTensor{ElT,N}, val, i::Vararg{Int,N} -) where {ElT,N} - offset, block, offset_within_block = indexoffset(T, i...) - if isnothing(offset) - offset_of_block = insertblock_offset!(T, block) - offset = offset_of_block + offset_within_block - end - storage(T)[offset] = val - return T -end - -hasblock(T::Tensor, block::Block) = isassigned(blockoffsets(T), block) - -@propagate_inbounds function setindex!( - T::BlockSparseTensor{ElT,N}, val, b::Block{N} -) where {ElT,N} - if !hasblock(T, b) - insertblock!(T, b) - end - Tb = T[b] - Tb .= val - return T -end - -getindex(T::BlockSparseTensor, block::Block) = blockview(T, block) - -to_indices(T::Tensor{<:Any,N}, b::Tuple{Block{N}}) where {N} = blockindices(T, b...) - -function blockview(T::BlockSparseTensor, block::Block) - return blockview(T, block, offset(T, block)) -end - -function blockview(T::BlockSparseTensor, block::Block, offset::Integer) - return blockview(T, BlockOffset(block, offset)) -end - -# Case where the block isn't found, return nothing -function blockview(T::BlockSparseTensor, block::Block, ::Nothing) - return nothing -end - -blockview(T::BlockSparseTensor, block) = blockview(T, Block(block)) - -function blockview(T::BlockSparseTensor, bof::BlockOffset) - blockT, offsetT = bof - blockdimsT = blockdims(T, blockT) - blockdimT = prod(blockdimsT) - dataTslice = @view data(storage(T))[(offsetT + 1):(offsetT + blockdimT)] - return tensor(Dense(dataTslice), blockdimsT) -end - -view(T::BlockSparseTensor, b::Block) = blockview(T, b) - -# convert to Dense -function dense(T::TensorT) where {TensorT<:BlockSparseTensor} - R = zeros(dense(TensorT), inds(T)) - ## Here this failed with scalar indexing (R[blockindices] = blockview) - ## We can fix this by using copyto the arrays - r = array(R) - for block in keys(blockoffsets(T)) - # TODO: make sure this assignment is efficient - rview = @view r[blockindices(T, block)] - copyto!(expose(rview), expose(array(blockview(T, block)))) - end - return tensor(Dense(r), inds(T)) -end - -function diag(ETensor::Exposed{<:AbstractArray,<:BlockSparseTensor}) - tensor = unexpose(ETensor) - tensordiag = NDTensors.similar( - dense(typeof(tensor)), eltype(tensor), (diaglength(tensor),) - ) - for j in 1:diaglength(tensor) - @inbounds tensordiag[j] = getdiagindex(tensor, j) - end - return tensordiag -end - -function Base.mapreduce( - f, op, t1::BlockSparseTensor, t_tail::BlockSparseTensor...; kwargs... -) - # TODO: Take advantage of block sparsity here. - return mapreduce(f, op, array(t1), array.(t_tail)...; kwargs...) -end - -# This is a special case that optimizes for a single tensor -# and takes advantage of block sparsity. Once the more general -# case handles block sparsity, this can be removed. -function Base.mapreduce(f, op, t::BlockSparseTensor; kwargs...) - elt = eltype(t) - if !iszero(f(zero(elt))) - return mapreduce(f, op, array(t); kwargs...) - end - if length(t) > nnz(t) - # Some elements are zero, account for that - # with the initial value. - init_kwargs = (; init=zero(elt)) - else - init_kwargs = (;) - end - return mapreduce(f, op, storage(t); kwargs..., init_kwargs...) -end - -function blocksparse_isequal(x, y) - return array(x) == array(y) -end -function Base.:(==)(x::BlockSparseTensor, y::BlockSparseTensor) - return blocksparse_isequal(x, y) -end -function Base.:(==)(x::BlockSparseTensor, y::Tensor) - return blocksparse_isequal(x, y) -end -function Base.:(==)(x::Tensor, y::BlockSparseTensor) - return blocksparse_isequal(x, y) -end - -## TODO currently this fails on GPU with scalar indexing -function map_diag!( - f::Function, - exposed_t_destination::Exposed{<:AbstractArray,<:BlockSparseTensor}, - exposed_t_source::Exposed{<:AbstractArray,<:BlockSparseTensor}, -) - t_destination = unexpose(exposed_t_destination) - t_source = unexpose(exposed_t_source) - for i in 1:diaglength(t_destination) - NDTensors.setdiagindex!(t_destination, f(NDTensors.getdiagindex(t_source, i)), i) - end - return t_destination -end -# -# Operations -# - -# TODO: extend to case with different block structures -function +(T1::BlockSparseTensor{<:Number,N}, T2::BlockSparseTensor{<:Number,N}) where {N} - inds(T1) ≠ inds(T2) && - error("Cannot add block sparse tensors with different block structure") - R = copy(T1) - return permutedims!!(R, T2, ntuple(identity, Val(N)), +) -end - -function permutedims(T::BlockSparseTensor{<:Number,N}, perm::NTuple{N,Int}) where {N} - blockoffsetsR, indsR = permutedims(blockoffsets(T), inds(T), perm) - R = NDTensors.similar(T, blockoffsetsR, indsR) - permutedims!(R, T, perm) - return R -end - -function _permute_combdims(combdims::NTuple{NC,Int}, perm::NTuple{NP,Int}) where {NC,NP} - res = MVector{NC,Int}(undef) - iperm = invperm(perm) - for i in 1:NC - res[i] = iperm[combdims[i]] - end - return Tuple(res) -end - -# -# These are functions to help with combining and uncombining -# - -# Note that combdims is expected to be contiguous and ordered -# smallest to largest -function combine_dims(blocks::Vector{Block{N}}, inds, combdims::NTuple{NC,Int}) where {N,NC} - nblcks = nblocks(inds, combdims) - blocks_comb = Vector{Block{N - NC + 1}}(undef, length(blocks)) - for (i, block) in enumerate(blocks) - blocks_comb[i] = combine_dims(block, inds, combdims) - end - return blocks_comb -end - -function combine_dims(block::Block, inds, combdims::NTuple{NC,Int}) where {NC} - nblcks = nblocks(inds, combdims) - slice = getindices(block, combdims) - slice_comb = LinearIndices(nblcks)[slice...] - block_comb = deleteat(block, combdims) - block_comb = insertafter(block_comb, tuple(slice_comb), minimum(combdims) - 1) - return block_comb -end - -# In the dimension dim, permute the blocks -function perm_blocks(blocks::Blocks{N}, dim::Int, perm) where {N} - blocks_perm = Blocks{N}(undef, nnzblocks(blocks)) - iperm = invperm(perm) - for (i, block) in enumerate(blocks) - blocks_perm[i] = setindex(block, iperm[block[dim]], dim) - end - return blocks_perm -end - -# In the dimension dim, permute the block -function perm_block(block::Block, dim::Int, perm) - iperm = invperm(perm) - return setindex(block, iperm[block[dim]], dim) -end - -# In the dimension dim, combine the specified blocks -function combine_blocks(blocks::Blocks, dim::Int, blockcomb::Vector{Int}) - blocks_comb = copy(blocks) - nnz_comb = nnzblocks(blocks) - for (i, block) in enumerate(blocks) - dimval = block[dim] - blocks_comb[i] = setindex(block, blockcomb[dimval], dim) - end - unique!(blocks_comb) - return blocks_comb -end - -function permutedims_combine_output( - T::BlockSparseTensor{ElT,N}, - is, - perm::NTuple{N,Int}, - combdims::NTuple{NC,Int}, - blockperm::Vector{Int}, - blockcomb::Vector{Int}, -) where {ElT,N,NC} - # Permute the indices - indsT = inds(T) - inds_perm = permute(indsT, perm) - - # Now that the indices are permuted, compute - # which indices are now combined - combdims_perm = TupleTools.sort(_permute_combdims(combdims, perm)) - - # Permute the nonzero blocks (dimension-wise) - blocks = nzblocks(T) - blocks_perm = permutedims(blocks, perm) - - # Combine the nonzero blocks (dimension-wise) - blocks_perm_comb = combine_dims(blocks_perm, inds_perm, combdims_perm) - - # Permute the blocks (within the newly combined dimension) - comb_ind_loc = minimum(combdims_perm) - blocks_perm_comb = perm_blocks(blocks_perm_comb, comb_ind_loc, blockperm) - blocks_perm_comb = sort(blocks_perm_comb; lt=isblockless) - - # Combine the blocks (within the newly combined and permuted dimension) - blocks_perm_comb = combine_blocks(blocks_perm_comb, comb_ind_loc, blockcomb) - - return BlockSparseTensor(unwrap_array_type(T), blocks_perm_comb, is) -end - -function permutedims_combine( - T::BlockSparseTensor{ElT,N}, - is, - perm::NTuple{N,Int}, - combdims::NTuple{NC,Int}, - blockperm::Vector{Int}, - blockcomb::Vector{Int}, -) where {ElT,N,NC} - R = permutedims_combine_output(T, is, perm, combdims, blockperm, blockcomb) - - # Permute the indices - inds_perm = permute(inds(T), perm) - - # Now that the indices are permuted, compute - # which indices are now combined - combdims_perm = TupleTools.sort(_permute_combdims(combdims, perm)) - comb_ind_loc = minimum(combdims_perm) - - # Determine the new index before combining - inds_to_combine = getindices(inds_perm, combdims_perm) - ind_comb = ⊗(inds_to_combine...) - ind_comb = permuteblocks(ind_comb, blockperm) - - for bof in pairs(blockoffsets(T)) - Tb = blockview(T, bof) - b = nzblock(bof) - b_perm = permute(b, perm) - b_perm_comb = combine_dims(b_perm, inds_perm, combdims_perm) - b_perm_comb = perm_block(b_perm_comb, comb_ind_loc, blockperm) - b_in_combined_dim = b_perm_comb[comb_ind_loc] - new_b_in_combined_dim = blockcomb[b_in_combined_dim] - offset = 0 - pos_in_new_combined_block = 1 - while b_in_combined_dim - pos_in_new_combined_block > 0 && - blockcomb[b_in_combined_dim - pos_in_new_combined_block] == new_b_in_combined_dim - offset += blockdim(ind_comb, b_in_combined_dim - pos_in_new_combined_block) - pos_in_new_combined_block += 1 - end - b_new = setindex(b_perm_comb, new_b_in_combined_dim, comb_ind_loc) - - Rb_total = blockview(R, b_new) - dimsRb_tot = dims(Rb_total) - subind = ntuple( - i -> if i == comb_ind_loc - range(1 + offset; stop=offset + blockdim(ind_comb, b_in_combined_dim)) - else - range(1; stop=dimsRb_tot[i]) - end, - N - NC + 1, - ) - Rb = @view array(Rb_total)[subind...] - - # XXX Are these equivalent? - #Tb_perm = permutedims(Tb,perm) - #copyto!(Rb,Tb_perm) - - # XXX Not sure what this was for - Rb = reshape(Rb, permute(dims(Tb), perm)) - # TODO: Make this `convert` call more general - # for GPUs. - Tbₐ = convert(Array, Tb) - ## @strided Rb .= permutedims(Tbₐ, perm) - permutedims!(expose(Rb), expose(Tbₐ), perm) - end - - return R -end - -# TODO: optimize by avoiding findfirst -function _number_uncombined(blockval::Integer, blockcomb::Vector) - if blockval == blockcomb[end] - return length(blockcomb) - findfirst(==(blockval), blockcomb) + 1 - end - return findfirst(==(blockval + 1), blockcomb) - findfirst(==(blockval), blockcomb) -end - -# TODO: optimize by avoiding findfirst -function _number_uncombined_shift(blockval::Integer, blockcomb::Vector) - if blockval == 1 - return 0 - end - ncomb_shift = 0 - for i in 1:(blockval - 1) - ncomb_shift += findfirst(==(i + 1), blockcomb) - findfirst(==(i), blockcomb) - 1 - end - return ncomb_shift -end - -# Uncombine the blocks along the dimension dim -# according to the pattern in blockcomb (for example, blockcomb -# is [1,2,2,3] and dim = 2, so the blocks (1,2),(2,3) get -# split into (1,2),(1,3),(2,4)) -function uncombine_blocks(blocks::Blocks{N}, dim::Int, blockcomb::Vector{Int}) where {N} - blocks_uncomb = Blocks{N}() - ncomb_tot = 0 - for i in 1:length(blocks) - block = blocks[i] - blockval = block[dim] - ncomb = _number_uncombined(blockval, blockcomb) - ncomb_shift = _number_uncombined_shift(blockval, blockcomb) - push!(blocks_uncomb, setindex(block, blockval + ncomb_shift, dim)) - for j in 1:(ncomb - 1) - push!(blocks_uncomb, setindex(block, blockval + ncomb_shift + j, dim)) - end - end - return blocks_uncomb -end - -function uncombine_block(block::Block{N}, dim::Int, blockcomb::Vector{Int}) where {N} - blocks_uncomb = Blocks{N}() - ncomb_tot = 0 - blockval = block[dim] - ncomb = _number_uncombined(blockval, blockcomb) - ncomb_shift = _number_uncombined_shift(blockval, blockcomb) - push!(blocks_uncomb, setindex(block, blockval + ncomb_shift, dim)) - for j in 1:(ncomb - 1) - push!(blocks_uncomb, setindex(block, blockval + ncomb_shift + j, dim)) - end - return blocks_uncomb -end - -function uncombine_output( - T::BlockSparseTensor{ElT,N}, - T_labels, - is, - is_labels, - combdim::Int, - blockperm::Vector{Int}, - blockcomb::Vector{Int}, -) where {ElT<:Number,N} - labels_uncomb_perm = setdiff(is_labels, T_labels) - ind_uncomb_perm = ⊗(is[map(x -> findfirst(==(x), is_labels), labels_uncomb_perm)]...) - inds_uncomb_perm = insertat(inds(T), ind_uncomb_perm, combdim) - # Uncombine the blocks of T - blocks_uncomb = uncombine_blocks(nzblocks(T), combdim, blockcomb) - blocks_uncomb_perm = perm_blocks(blocks_uncomb, combdim, invperm(blockperm)) - boffs_uncomb_perm, nnz_uncomb_perm = blockoffsets(blocks_uncomb_perm, inds_uncomb_perm) - T_uncomb_perm = tensor( - BlockSparse(unwrap_array_type(T), boffs_uncomb_perm, nnz_uncomb_perm), inds_uncomb_perm - ) - R = reshape(T_uncomb_perm, is) - return R -end - -function reshape(blockT::Block{NT}, indsT, indsR) where {NT} - nblocksT = nblocks(indsT) - nblocksR = nblocks(indsR) - blockR = Tuple( - CartesianIndices(nblocksR)[LinearIndices(nblocksT)[CartesianIndex(blockT)]] - ) - return blockR -end - -function uncombine( - T::BlockSparseTensor{<:Number,NT}, - T_labels, - is, - is_labels, - combdim::Int, - blockperm::Vector{Int}, - blockcomb::Vector{Int}, -) where {NT} - NR = length(is) - R = uncombine_output(T, T_labels, is, is_labels, combdim, blockperm, blockcomb) - invblockperm = invperm(blockperm) - # This is needed for reshaping the block - # TODO: It is already calculated in uncombine_output, use it from there - labels_uncomb_perm = setdiff(is_labels, T_labels) - ind_uncomb_perm = ⊗(is[map(x -> findfirst(==(x), is_labels), labels_uncomb_perm)]...) - ind_uncomb = permuteblocks(ind_uncomb_perm, blockperm) - # Same as inds(T) but with the blocks uncombined - inds_uncomb = insertat(inds(T), ind_uncomb, combdim) - inds_uncomb_perm = insertat(inds(T), ind_uncomb_perm, combdim) - for bof in pairs(blockoffsets(T)) - b = nzblock(bof) - Tb_tot = blockview(T, bof) - dimsTb_tot = dims(Tb_tot) - bs_uncomb = uncombine_block(b, combdim, blockcomb) - offset = 0 - for i in 1:length(bs_uncomb) - b_uncomb = bs_uncomb[i] - b_uncomb_perm = perm_block(b_uncomb, combdim, invblockperm) - b_uncomb_perm_reshape = reshape(b_uncomb_perm, inds_uncomb_perm, is) - Rb = blockview(R, b_uncomb_perm_reshape) - b_uncomb_in_combined_dim = b_uncomb_perm[combdim] - start = offset + 1 - stop = offset + blockdim(ind_uncomb_perm, b_uncomb_in_combined_dim) - subind = ntuple( - i -> i == combdim ? range(start; stop=stop) : range(1; stop=dimsTb_tot[i]), NT - ) - offset = stop - Tb = @view array(Tb_tot)[subind...] - - # Alternative (but maybe slower): - #copyto!(Rb,Tb) - - if length(Tb) == 1 - # Call `cpu` to avoid allowscalar error on GPU. - # TODO: Replace with `@allowscalar`, requires adding - # `GPUArraysCore.jl` as a dependency. - Rb[] = cpu(Tb)[] - else - # XXX: this used to be: - # Rbₐᵣ = ReshapedArray(parent(Rbₐ), size(Tb), ()) - # however that doesn't work with subarrays - Rbₐ = convert(Array, Rb) - ## Rbₐᵣ = ReshapedArray(Rbₐ, size(Tb), ()) - Rbₐᵣ = reshape(Rbₐ, size(Tb)) - ## @strided Rbₐᵣ .= Tb - copyto!(expose(Rbₐᵣ), expose(Tb)) - end - end - end - return R -end - -function copyto!(R::BlockSparseTensor, T::BlockSparseTensor) - for bof in pairs(blockoffsets(T)) - copyto!(blockview(R, nzblock(bof)), blockview(T, bof)) - end - return R -end - -# TODO: handle case where: -# f(zero(ElR),zero(ElT)) != promote_type(ElR,ElT) -function permutedims!!( - R::BlockSparseTensor{ElR,N}, - T::BlockSparseTensor{ElT,N}, - perm::NTuple{N,Int}, - f::Function=(r, t) -> t, -) where {ElR,ElT,N} - RR = convert(promote_type(typeof(R), typeof(T)), R) - permutedims!(RR, T, perm, f) - return RR -end - -# -scale_blocks!(T, compute_fac::Function=(b) -> 1) = T - -# -function scale_blocks!( - T::BlockSparseTensor{<:Number,N}, compute_fac::Function=(b) -> 1 -) where {N} - for blockT in keys(blockoffsets(T)) - fac = compute_fac(blockT) - if fac != 1 - Tblock = blockview(T, blockT) - scale!(Tblock, fac) - end - end - return T -end - -# -permfactor(perm, block, inds) = 1 - -using .TypeParameterAccessors: set_type_parameters, parenttype -function permutedims!( - R::BlockSparseTensor{<:Number,N}, - T::BlockSparseTensor{<:Number,N}, - perm::NTuple{N,Int}, - f::Function=(r, t) -> t, -) where {N} - blocks_R = keys(blockoffsets(R)) - perm_blocks_T = map(b -> permute(b, perm), keys(blockoffsets(T))) - blocks = union(blocks_R, perm_blocks_T) - for block in blocks - block_T = permute(block, invperm(perm)) - - # Loop over non-zero blocks of T/R - Rblock = blockview(R, block) - Tblock = blockview(T, block_T) - - # - pfac = permfactor(perm, block_T, inds(T)) - f_fac = isone(pfac) ? f : ((r, t) -> f(r, pfac * t)) - - Rblock_exists = !isnothing(Rblock) - Tblock_exists = !isnothing(Tblock) - if !Rblock_exists - # Rblock doesn't exist - block_size = permute(size(Tblock), perm) - # TODO: Make GPU friendly. - DenseT = set_type_parameters(Dense, (eltype, parenttype), (eltype(R), datatype(R))) - Rblock = tensor(generic_zeros(DenseT, prod(block_size)), block_size) - elseif !Tblock_exists - # Tblock doesn't exist - block_size = permute(size(Rblock), invperm(perm)) - # TODO: Make GPU friendly. - DenseT = set_type_parameters(Dense, (eltype, parenttype), (eltype(T), datatype(T))) - Tblock = tensor(generic_zeros(DenseT, prod(block_size)), block_size) - end - permutedims!(Rblock, Tblock, perm, f_fac) - if !Rblock_exists - # Set missing nonzero block - ## To make sure no allowscalar issue grab the data - if !iszero(data(Rblock)) - R[block] = Rblock - end - end - end - return R -end - -const IntTuple = NTuple{N,Int} where {N} -const IntOrIntTuple = Union{Int,IntTuple} - -function permute_combine(inds::IndsT, pos::Vararg{IntOrIntTuple,N}) where {IndsT,N} - IndT = eltype(IndsT) - # Using SizedVector since setindex! doesn't - # work for MVector when eltype not isbitstype - newinds = SizedVector{N,IndT}(undef) - for i in 1:N - pos_i = pos[i] - newind_i = inds[pos_i[1]] - for p in 2:length(pos_i) - newind_i = newind_i ⊗ inds[pos_i[p]] - end - newinds[i] = newind_i - end - IndsR = similartype(IndsT, Val{N}) - indsR = IndsR(Tuple(newinds)) - return indsR -end - -""" -Indices are combined according to the grouping of the input, -for example (1,2),3 will combine the first two indices. -""" -function combine(inds::IndsT, com::Vararg{IntOrIntTuple,N}) where {IndsT,N} - IndT = eltype(IndsT) - # Using SizedVector since setindex! doesn't - # work for MVector when eltype not isbitstype - newinds = SizedVector{N,IndT}(undef) - i_orig = 1 - for i in 1:N - newind_i = inds[i_orig] - i_orig += 1 - for p in 2:length(com[i]) - newind_i = newind_i ⊗ inds[i_orig] - i_orig += 1 - end - newinds[i] = newind_i - end - IndsR = similartype(IndsT, Val{N}) - indsR = IndsR(Tuple(newinds)) - return indsR -end - -function permute_combine( - boffs::BlockOffsets, inds::IndsT, pos::Vararg{IntOrIntTuple,N} -) where {IndsT,N} - perm = flatten(pos...) - boffsp, indsp = permutedims(boffs, inds, perm) - indsR = combine(indsp, pos...) - boffsR = reshape(boffsp, indsp, indsR) - return boffsR, indsR -end - -function reshape(boffsT::BlockOffsets{NT}, indsT, indsR) where {NT} - NR = length(indsR) - boffsR = BlockOffsets{NR}() - nblocksT = nblocks(indsT) - nblocksR = nblocks(indsR) - for (blockT, offsetT) in pairs(boffsT) - blockR = Block( - CartesianIndices(nblocksR)[LinearIndices(nblocksT)[CartesianIndex(blockT)]] - ) - insert!(boffsR, blockR, offsetT) - end - return boffsR -end - -function reshape(boffsT::BlockOffsets{NT}, blocksR::Vector{Block{NR}}) where {NR,NT} - boffsR = BlockOffsets{NR}() - # TODO: check blocksR is ordered and are properly reshaped - # versions of the blocks of boffsT - for (i, (blockT, offsetT)) in enumerate(boffsT) - blockR = blocksR[i] - boffsR[blockR] = offsetT - end - return boffsR -end - -reshape(T::BlockSparse, boffsR::BlockOffsets) = BlockSparse(data(T), boffsR) - -function reshape(T::BlockSparseTensor, boffsR::BlockOffsets, indsR) - storeR = reshape(storage(T), boffsR) - return tensor(storeR, indsR) -end - -function reshape(T::BlockSparseTensor, indsR) - # TODO: add some checks that the block dimensions - # are consistent (e.g. nnzblocks(T) == nnzblocks(R), etc.) - boffsR = reshape(blockoffsets(T), inds(T), indsR) - R = reshape(T, boffsR, indsR) - return R -end - -function permute_combine( - T::BlockSparseTensor{ElT,NT,IndsT}, pos::Vararg{IntOrIntTuple,NR} -) where {ElT,NT,IndsT,NR} - boffsR, indsR = permute_combine(blockoffsets(T), inds(T), pos...) - - perm = flatten(pos...) - - length(perm) ≠ NT && error("Index positions must add up to order of Tensor ($NT)") - isperm(perm) || error("Index positions must be a permutation") - - if !is_trivial_permutation(perm) - Tp = permutedims(T, perm) - else - Tp = copy(T) - end - NR == NT && return Tp - R = reshape(Tp, boffsR, indsR) - return R -end - -# -# Print block sparse tensors -# - -#function summary(io::IO, -# T::BlockSparseTensor{ElT,N}) where {ElT,N} -# println(io,Base.dims2string(dims(T))," ",typeof(T)) -# for (dim,ind) in enumerate(inds(T)) -# println(io,"Dim $dim: ",ind) -# end -# println(io,"Number of nonzero blocks: ",nnzblocks(T)) -#end - -#function summary(io::IO, -# T::BlockSparseTensor{ElT,N}) where {ElT,N} -# println(io,typeof(T)) -# println(io,Base.dims2string(dims(T))," ",typeof(T)) -# for (dim,ind) in enumerate(inds(T)) -# println(io,"Dim $dim: ",ind) -# end -# println("Number of nonzero blocks: ",nnzblocks(T)) -#end - -function _range2string(rangestart::NTuple{N,Int}, rangeend::NTuple{N,Int}) where {N} - s = "" - for n in 1:N - s = string(s, rangestart[n], ":", rangeend[n]) - if n < N - s = string(s, ", ") - end - end - return s -end - -function Base.show(io::IO, mime::MIME"text/plain", T::BlockSparseTensor) - summary(io, T) - for (n, block) in enumerate(keys(blockoffsets(T))) - blockdimsT = blockdims(T, block) - println(io, block) - println(io, " [", _range2string(blockstart(T, block), blockend(T, block)), "]") - print_tensor(io, blockview(T, block)) - n < nnzblocks(T) && print(io, "\n\n") - end -end - -Base.show(io::IO, T::BlockSparseTensor) = show(io, MIME("text/plain"), T) diff --git a/NDTensors/src/blocksparse/combiner.jl b/NDTensors/src/blocksparse/combiner.jl deleted file mode 100644 index e993d441c1..0000000000 --- a/NDTensors/src/blocksparse/combiner.jl +++ /dev/null @@ -1,162 +0,0 @@ -#: -function before_combiner_signs( - tensor, - tensor_labels, - indstensor, - combiner_tensor, - combiner_tensor_labels, - indscombiner_tensor, - labelsoutput_tensor, - output_tensor_inds, -) - return tensor -end -function after_combiner_signs( - output_tensor, - labelsoutput_tensor, - output_tensor_inds, - combiner_tensor, - combiner_tensor_labels, - indscombiner_tensor, -) - return output_tensor -end - -function contract( - tensor::BlockSparseTensor, - tensor_labels, - combiner_tensor::CombinerTensor, - combiner_tensor_labels, -) - #@timeit_debug timer "Block sparse (un)combiner" begin - # Get the label marking the combined index - # By convention the combined index is the first one - # TODO: Consider storing the location of the combined - # index in preperation for multiple combined indices - # TODO: Use `combinedind_label(...)`, `uncombinedind_labels(...)`, etc. - cpos_in_combiner_tensor_labels = 1 - clabel = combiner_tensor_labels[cpos_in_combiner_tensor_labels] - c = combinedind(combiner_tensor) - labels_uc = deleteat(combiner_tensor_labels, cpos_in_combiner_tensor_labels) - is_combining_contraction = is_combining( - tensor, tensor_labels, combiner_tensor, combiner_tensor_labels - ) - if is_combining_contraction - output_tensor_labels = contract_labels(combiner_tensor_labels, tensor_labels) - cpos_in_output_tensor_labels = findfirst(==(clabel), output_tensor_labels) - output_tensor_labels_uc = insertat( - output_tensor_labels, labels_uc, cpos_in_output_tensor_labels - ) - output_tensor_inds = contract_inds( - inds(combiner_tensor), - combiner_tensor_labels, - inds(tensor), - tensor_labels, - output_tensor_labels, - ) - - #: - tensor = before_combiner_signs( - tensor, - tensor_labels, - inds(tensor), - combiner_tensor, - combiner_tensor_labels, - inds(combiner_tensor), - output_tensor_labels, - output_tensor_inds, - ) - - perm = getperm(output_tensor_labels_uc, tensor_labels) - ucpos_in_tensor_labels = Tuple(findall(x -> x in labels_uc, tensor_labels)) - output_tensor = permutedims_combine( - tensor, - output_tensor_inds, - perm, - ucpos_in_tensor_labels, - blockperm(combiner_tensor), - blockcomb(combiner_tensor), - ) - return output_tensor - else # Uncombining - output_tensor_labels = tensor_labels - cpos_in_output_tensor_labels = findfirst(==(clabel), output_tensor_labels) - # Move combined index to first position - if cpos_in_output_tensor_labels != 1 - output_tensor_labels_orig = output_tensor_labels - output_tensor_labels = deleteat(output_tensor_labels, cpos_in_output_tensor_labels) - output_tensor_labels = insertafter(output_tensor_labels, clabel, 0) - cpos_in_output_tensor_labels = 1 - perm = getperm(output_tensor_labels, output_tensor_labels_orig) - tensor = permutedims(tensor, perm) - tensor_labels = permute(tensor_labels, perm) - end - output_tensor_labels_uc = insertat( - output_tensor_labels, labels_uc, cpos_in_output_tensor_labels - ) - output_tensor_inds_uc = contract_inds( - inds(combiner_tensor), - combiner_tensor_labels, - inds(tensor), - tensor_labels, - output_tensor_labels_uc, - ) - - # : - tensor = before_combiner_signs( - tensor, - tensor_labels, - inds(tensor), - combiner_tensor, - combiner_tensor_labels, - inds(combiner_tensor), - output_tensor_labels_uc, - output_tensor_inds_uc, - ) - - output_tensor = uncombine( - tensor, - tensor_labels, - output_tensor_inds_uc, - output_tensor_labels_uc, - cpos_in_output_tensor_labels, - blockperm(combiner_tensor), - blockcomb(combiner_tensor), - ) - - # : - output_tensor = after_combiner_signs( - output_tensor, - output_tensor_labels_uc, - output_tensor_inds_uc, - combiner_tensor, - combiner_tensor_labels, - inds(combiner_tensor), - ) - - return output_tensor - end - return invalid_combiner_contraction_error( - combiner_tensor, tensor_labels, tensor, tensor_labels - ) -end - -function contract( - combiner_tensor::CombinerTensor, - combiner_tensor_labels, - tensor::BlockSparseTensor, - tensor_labels, -) - return contract(tensor, tensor_labels, combiner_tensor, combiner_tensor_labels) -end - -# Special case when no indices are combined -# TODO: No copy? Maybe use `AllowAlias`. -function contract( - tensor::BlockSparseTensor, - tensor_labels, - combiner_tensor::CombinerTensor{<:Any,0}, - combiner_tensor_labels, -) - return copy(tensor) -end diff --git a/NDTensors/src/blocksparse/contract.jl b/NDTensors/src/blocksparse/contract.jl deleted file mode 100644 index bfe413e7d1..0000000000 --- a/NDTensors/src/blocksparse/contract.jl +++ /dev/null @@ -1,76 +0,0 @@ -using .BackendSelection: Algorithm, @Algorithm_str - -function contract( - tensor1::BlockSparseTensor, - labelstensor1, - tensor2::BlockSparseTensor, - labelstensor2, - labelsR=contract_labels(labelstensor1, labelstensor2), -) - R, contraction_plan = contraction_output( - tensor1, labelstensor1, tensor2, labelstensor2, labelsR - ) - R = contract!( - R, labelsR, tensor1, labelstensor1, tensor2, labelstensor2, contraction_plan - ) - return R -end - -# Determine the contraction output and block contractions -function contraction_output( - tensor1::BlockSparseTensor, - labelstensor1, - tensor2::BlockSparseTensor, - labelstensor2, - labelsR, -) - indsR = contract_inds(inds(tensor1), labelstensor1, inds(tensor2), labelstensor2, labelsR) - TensorR = contraction_output_type(typeof(tensor1), typeof(tensor2), indsR) - blockoffsetsR, contraction_plan = contract_blockoffsets( - blockoffsets(tensor1), - inds(tensor1), - labelstensor1, - blockoffsets(tensor2), - inds(tensor2), - labelstensor2, - indsR, - labelsR, - ) - R = similar(TensorR, blockoffsetsR, indsR) - return R, contraction_plan -end - -function contract_blockoffsets( - boffs1::BlockOffsets, inds1, labels1, boffs2::BlockOffsets, inds2, labels2, indsR, labelsR -) - alg = Algorithm"sequential"() - if using_threaded_blocksparse() && nthreads() > 1 - alg = Algorithm"threaded_threads"() - # This code is a bit cleaner but slower: - # alg = Algorithm"threaded_folds"() - end - return contract_blockoffsets( - alg, boffs1, inds1, labels1, boffs2, inds2, labels2, indsR, labelsR - ) -end - -function contract!( - R::BlockSparseTensor, - labelsR, - tensor1::BlockSparseTensor, - labelstensor1, - tensor2::BlockSparseTensor, - labelstensor2, - contraction_plan, -) - if isempty(contraction_plan) - return R - end - alg = Algorithm"sequential"() - if using_threaded_blocksparse() && nthreads() > 1 - alg = Algorithm"threaded_folds"() - end - return contract!( - alg, R, labelsR, tensor1, labelstensor1, tensor2, labelstensor2, contraction_plan - ) -end diff --git a/NDTensors/src/blocksparse/contract_folds.jl b/NDTensors/src/blocksparse/contract_folds.jl deleted file mode 100644 index 9e483e2eb1..0000000000 --- a/NDTensors/src/blocksparse/contract_folds.jl +++ /dev/null @@ -1,60 +0,0 @@ -# Function barrier to improve type stability, -# since `Folds`/`FLoops` is not type stable: -# https://discourse.julialang.org/t/type-instability-in-floop-reduction/68598 -function contract_blocks!( - alg::Algorithm"threaded_folds", - contraction_plans, - boffs1, - boffs2, - labels1_to_labels2, - labels1_to_labelsR, - labels2_to_labelsR, - ValNR, -) - if nnzblocks(boffs1) > nnzblocks(boffs2) - Folds.foreach(eachnzblock(boffs1).values, ThreadedEx()) do block1 - for block2 in eachnzblock(boffs2) - maybe_contract_blocks!( - contraction_plans[threadid()], - block1, - block2, - labels1_to_labels2, - labels1_to_labelsR, - labels2_to_labelsR, - ValNR, - ) - end - end - else - Folds.foreach(eachnzblock(boffs2).values, ThreadedEx()) do block2 - for block1 in eachnzblock(boffs1) - maybe_contract_blocks!( - contraction_plans[threadid()], - block1, - block2, - labels1_to_labels2, - labels1_to_labelsR, - labels2_to_labelsR, - ValNR, - ) - end - end - end - return nothing -end - -function contract!( - ::Algorithm"threaded_folds", - R::BlockSparseTensor, - labelsR, - tensor1::BlockSparseTensor, - labelstensor1, - tensor2::BlockSparseTensor, - labelstensor2, - contraction_plan, -) - executor = ThreadedEx() - return contract!( - R, labelsR, tensor1, labelstensor1, tensor2, labelstensor2, contraction_plan, executor - ) -end diff --git a/NDTensors/src/blocksparse/contract_generic.jl b/NDTensors/src/blocksparse/contract_generic.jl deleted file mode 100644 index eed3bebb87..0000000000 --- a/NDTensors/src/blocksparse/contract_generic.jl +++ /dev/null @@ -1,162 +0,0 @@ -# A generic version that is used by both -# "threaded_folds" and "threaded"threads". -function contract_blockoffsets( - alg::Algorithm, - boffs1::BlockOffsets, - inds1, - labels1, - boffs2::BlockOffsets, - inds2, - labels2, - indsR, - labelsR, -) - N1 = ndims(boffs1) - N2 = ndims(boffs2) - NR = length(labelsR) - ValNR = ValLength(labelsR) - labels1_to_labels2, labels1_to_labelsR, labels2_to_labelsR = contract_labels( - labels1, labels2, labelsR - ) - - # Contraction plan element type - T = Tuple{Block{N1},Block{N2},Block{NR}} - - # Thread-local collections of block contractions. - # Could use: - # ```julia - # FLoops.@reduce(contraction_plans = append!(T[], [(block1, block2, blockR)])) - # ``` - # as a simpler alternative but it is slower. - - contraction_plans = Vector{T}[T[] for _ in 1:nthreads()] - - # - # Reserve some capacity - # In theory the maximum is length(boffs1) * length(boffs2) - # but in practice that is too much - #for contraction_plan in contraction_plans - # sizehint!(contraction_plan, max(length(boffs1), length(boffs2))) - #end - # - - contract_blocks!( - alg, - contraction_plans, - boffs1, - boffs2, - labels1_to_labels2, - labels1_to_labelsR, - labels2_to_labelsR, - ValNR, - ) - - contraction_plan = reduce(vcat, contraction_plans) - blockoffsetsR = BlockOffsets{NR}() - nnzR = 0 - for (_, _, blockR) in contraction_plan - if !isassigned(blockoffsetsR, blockR) - insert!(blockoffsetsR, blockR, nnzR) - nnzR += blockdim(indsR, blockR) - end - end - - return blockoffsetsR, contraction_plan -end - -# A generic version making use of `Folds.jl` which -# can take various Executor backends. -# Used for sequential and threaded contract functions. -function contract!( - R::BlockSparseTensor, - labelsR, - tensor1::BlockSparseTensor, - labelstensor1, - tensor2::BlockSparseTensor, - labelstensor2, - contraction_plan, - executor, -) - # Group the contraction plan by the output block, - # since the sets of contractions into the same block - # must be performed sequentially to reduce over those - # sets of contractions properly (and avoid race conditions). - # Same as: - # ```julia - # grouped_contraction_plan = group(last, contraction_plan) - # ``` - # but more efficient since we know the groups/keys already, - # since they are the nonzero blocks of the output tensor `R`. - grouped_contraction_plan = map(_ -> empty(contraction_plan), eachnzblock(R)) - for block_contraction in contraction_plan - push!(grouped_contraction_plan[last(block_contraction)], block_contraction) - end - _contract!( - R, - labelsR, - tensor1, - labelstensor1, - tensor2, - labelstensor2, - grouped_contraction_plan, - executor, - ) - return R -end - -using NDTensors.Expose: expose -# Function barrier to improve type stability, -# since `Folds`/`FLoops` is not type stable: -# https://discourse.julialang.org/t/type-instability-in-floop-reduction/68598 -function _contract!( - R::BlockSparseTensor, - labelsR, - tensor1::BlockSparseTensor, - labelstensor1, - tensor2::BlockSparseTensor, - labelstensor2, - grouped_contraction_plan, - executor, -) - Folds.foreach(grouped_contraction_plan.values, executor) do contraction_plan_group - # Start by overwriting the block: - # R .= α .* (tensor1 * tensor2) - β = zero(eltype(R)) - for block_contraction in contraction_plan_group - blocktensor1, blocktensor2, blockR = block_contraction - - # : - α = compute_alpha( - eltype(R), - labelsR, - blockR, - inds(R), - labelstensor1, - blocktensor1, - inds(tensor1), - labelstensor2, - blocktensor2, - inds(tensor2), - ) - - contract!( - expose(R[blockR]), - labelsR, - expose(tensor1[blocktensor1]), - labelstensor1, - expose(tensor2[blocktensor2]), - labelstensor2, - α, - β, - ) - - if iszero(β) - # After the block has been overwritten, - # add into it: - # R .= α .* (tensor1 * tensor2) .+ β .* R - β = one(eltype(R)) - end - end - end - return nothing -end diff --git a/NDTensors/src/blocksparse/contract_sequential.jl b/NDTensors/src/blocksparse/contract_sequential.jl deleted file mode 100644 index 78a44cb340..0000000000 --- a/NDTensors/src/blocksparse/contract_sequential.jl +++ /dev/null @@ -1,105 +0,0 @@ -function contract_blockoffsets( - ::Algorithm"sequential", - boffs1::BlockOffsets, - inds1, - labels1, - boffs2::BlockOffsets, - inds2, - labels2, - indsR, - labelsR, -) - N1 = ndims(boffs1) - N2 = ndims(boffs2) - NR = length(labelsR) - ValNR = ValLength(labelsR) - labels1_to_labels2, labels1_to_labelsR, labels2_to_labelsR = contract_labels( - labels1, labels2, labelsR - ) - blockoffsetsR = BlockOffsets{NR}() - nnzR = 0 - contraction_plan = Tuple{Block{N1},Block{N2},Block{NR}}[] - # Reserve some capacity - # In theory the maximum is length(boffs1) * length(boffs2) - # but in practice that is too much - sizehint!(contraction_plan, max(length(boffs1), length(boffs2))) - for block1 in keys(boffs1) - for block2 in keys(boffs2) - if are_blocks_contracted(block1, block2, labels1_to_labels2) - blockR = contract_blocks( - block1, labels1_to_labelsR, block2, labels2_to_labelsR, ValNR - ) - push!(contraction_plan, (block1, block2, blockR)) - if !isassigned(blockoffsetsR, blockR) - insert!(blockoffsetsR, blockR, nnzR) - nnzR += blockdim(indsR, blockR) - end - end - end - end - return blockoffsetsR, contraction_plan -end - -function contract!( - ::Algorithm"sequential", - R::BlockSparseTensor, - labelsR, - tensor1::BlockSparseTensor, - labelstensor1, - tensor2::BlockSparseTensor, - labelstensor2, - contraction_plan, -) - executor = SequentialEx() - return contract!( - R, labelsR, tensor1, labelstensor1, tensor2, labelstensor2, contraction_plan, executor - ) -end -using NDTensors.Expose: expose -########################################################################### -# Old version -# TODO: DELETE, keeping around for now for testing/benchmarking. -function contract!( - ::Algorithm"sequential_deprecated", - R::BlockSparseTensor{ElR,NR}, - labelsR, - T1::BlockSparseTensor{ElT1,N1}, - labelsT1, - T2::BlockSparseTensor{ElT2,N2}, - labelsT2, - contraction_plan, -) where {ElR,ElT1,ElT2,N1,N2,NR} - if isempty(contraction_plan) - return R - end - if using_threaded_blocksparse() && nthreads() > 1 - _contract_threaded_deprecated!(R, labelsR, T1, labelsT1, T2, labelsT2, contraction_plan) - return R - end - already_written_to = Dict{Block{NR},Bool}() - indsR = inds(R) - indsT1 = inds(T1) - indsT2 = inds(T2) - # In R .= α .* (T1 * T2) .+ β .* R - for (block1, block2, blockR) in contraction_plan - - # - α = compute_alpha( - ElR, labelsR, blockR, indsR, labelsT1, block1, indsT1, labelsT2, block2, indsT2 - ) - - T1block = T1[block1] - T2block = T2[block2] - Rblock = R[blockR] - β = one(ElR) - if !haskey(already_written_to, blockR) - already_written_to[blockR] = true - # Overwrite the block of R - β = zero(ElR) - end - contract!( - expose(Rblock), labelsR, expose(T1block), labelsT1, expose(T2block), labelsT2, α, β - ) - end - return R -end diff --git a/NDTensors/src/blocksparse/contract_threads.jl b/NDTensors/src/blocksparse/contract_threads.jl deleted file mode 100644 index f7ba69ffc8..0000000000 --- a/NDTensors/src/blocksparse/contract_threads.jl +++ /dev/null @@ -1,137 +0,0 @@ -using NDTensors.Expose: expose -# TODO: This seems to be faster than the newer version using `Folds.jl` -# in `contract_folds.jl`, investigate why. -function contract_blocks!( - alg::Algorithm"threaded_threads", - contraction_plans, - boffs1, - boffs2, - labels1_to_labels2, - labels1_to_labelsR, - labels2_to_labelsR, - ValNR, -) - blocks1 = keys(boffs1) - blocks2 = keys(boffs2) - if length(blocks1) > length(blocks2) - @sync for blocks1_partition in - Iterators.partition(blocks1, max(1, length(blocks1) ÷ nthreads())) - @spawn for block1 in blocks1_partition - for block2 in blocks2 - maybe_contract_blocks!( - contraction_plans[threadid()], - block1, - block2, - labels1_to_labels2, - labels1_to_labelsR, - labels2_to_labelsR, - ValNR, - ) - end - end - end - else - @sync for blocks2_partition in - Iterators.partition(blocks2, max(1, length(blocks2) ÷ nthreads())) - @spawn for block2 in blocks2_partition - for block1 in blocks1 - maybe_contract_blocks!( - contraction_plans[threadid()], - block1, - block2, - labels1_to_labels2, - labels1_to_labelsR, - labels2_to_labelsR, - ValNR, - ) - end - end - end - end - return nothing -end - -########################################################################### -# Old version -# TODO: DELETE, keeping around for testing/benchmarking. -function contract!( - ::Algorithm"threaded_threads", - R::BlockSparseTensor{ElR,NR}, - labelsR, - T1::BlockSparseTensor{ElT1,N1}, - labelsT1, - T2::BlockSparseTensor{ElT2,N2}, - labelsT2, - contraction_plan, -) where {ElR,ElT1,ElT2,N1,N2,NR} - # Sort the contraction plan by the output blocks - # This is to help determine which output blocks are the result - # of multiple contractions - sort!(contraction_plan; by=last) - - # Ranges of contractions to the same block - repeats = Vector{UnitRange{Int}}(undef, nnzblocks(R)) - ncontracted = 1 - posR = last(contraction_plan[1]) - posR_unique = posR - for n in 1:(nnzblocks(R) - 1) - start = ncontracted - while posR == posR_unique - ncontracted += 1 - posR = last(contraction_plan[ncontracted]) - end - posR_unique = posR - repeats[n] = start:(ncontracted - 1) - end - repeats[end] = ncontracted:length(contraction_plan) - - contraction_plan_blocks = Vector{Tuple{Tensor,Tensor,Tensor}}( - undef, length(contraction_plan) - ) - for ncontracted in 1:length(contraction_plan) - block1, block2, blockR = contraction_plan[ncontracted] - T1block = T1[block1] - T2block = T2[block2] - Rblock = R[blockR] - contraction_plan_blocks[ncontracted] = (T1block, T2block, Rblock) - end - - indsR = inds(R) - indsT1 = inds(T1) - indsT2 = inds(T2) - - α = one(ElR) - @sync for repeats_partition in - Iterators.partition(repeats, max(1, length(repeats) ÷ nthreads())) - @spawn for ncontracted_range in repeats_partition - # Overwrite the block since it hasn't been written to - # R .= α .* (T1 * T2) - β = zero(ElR) - for ncontracted in ncontracted_range - blockT1, blockT2, blockR = contraction_plan_blocks[ncontracted] - # R .= α .* (T1 * T2) .+ β .* R - - # : - α = compute_alpha( - ElR, labelsR, blockR, indsR, labelsT1, blockT1, indsT1, labelsT2, blockT2, indsT2 - ) - - contract!( - expose(blockR), - labelsR, - expose(blockT1), - labelsT1, - expose(blockT2), - labelsT2, - α, - β, - ) - # Now keep adding to the block, since it has - # been written to - # R .= α .* (T1 * T2) .+ R - β = one(ElR) - end - end - end - return R -end diff --git a/NDTensors/src/blocksparse/contract_utilities.jl b/NDTensors/src/blocksparse/contract_utilities.jl deleted file mode 100644 index 219d6ac9b1..0000000000 --- a/NDTensors/src/blocksparse/contract_utilities.jl +++ /dev/null @@ -1,96 +0,0 @@ -# -function compute_alpha( - ElR, - labelsR, - blockR, - indsR, - labelstensor1, - blocktensor1, - indstensor1, - labelstensor2, - blocktensor2, - indstensor2, -) - return one(ElR) -end - -function maybe_contract_blocks!( - contraction_plan, - block1, - block2, - labels1_to_labels2, - labels1_to_labelsR, - labels2_to_labelsR, - ValNR, -) - if are_blocks_contracted(block1, block2, labels1_to_labels2) - blockR = contract_blocks(block1, labels1_to_labelsR, block2, labels2_to_labelsR, ValNR) - push!(contraction_plan, (block1, block2, blockR)) - end - return nothing -end - -function contract_labels(labels1, labels2, labelsR) - labels1_to_labels2 = find_matching_positions(labels1, labels2) - labels1_to_labelsR = find_matching_positions(labels1, labelsR) - labels2_to_labelsR = find_matching_positions(labels2, labelsR) - return labels1_to_labels2, labels1_to_labelsR, labels2_to_labelsR -end - -""" - find_matching_positions(t1,t2) -> t1_to_t2 - -In a tuple of length(t1), store the positions in t2 -where the element of t1 is found. Otherwise, store 0 -to indicate that the element of t1 is not in t2. - -For example, for all t1[pos1] == t2[pos2], t1_to_t2[pos1] == pos2, -otherwise t1_to_t2[pos1] == 0. -""" -function find_matching_positions(t1, t2) - t1_to_t2 = @MVector zeros(Int, length(t1)) - for pos1 in 1:length(t1) - for pos2 in 1:length(t2) - if t1[pos1] == t2[pos2] - t1_to_t2[pos1] = pos2 - end - end - end - return Tuple(t1_to_t2) -end - -function are_blocks_contracted(block1::Block, block2::Block, labels1_to_labels2::Tuple) - t1 = Tuple(block1) - t2 = Tuple(block2) - for i1 in 1:length(block1) - i2 = @inbounds labels1_to_labels2[i1] - if i2 > 0 - # This dimension is contracted - if @inbounds t1[i1] != @inbounds t2[i2] - return false - end - end - end - return true -end - -function contract_blocks( - block1::Block, labels1_to_labelsR, block2::Block, labels2_to_labelsR, ::Val{NR} -) where {NR} - blockR = ntuple(_ -> UInt(0), Val(NR)) - t1 = Tuple(block1) - t2 = Tuple(block2) - for i1 in 1:length(block1) - iR = @inbounds labels1_to_labelsR[i1] - if iR > 0 - blockR = @inbounds setindex(blockR, t1[i1], iR) - end - end - for i2 in 1:length(block2) - iR = @inbounds labels2_to_labelsR[i2] - if iR > 0 - blockR = @inbounds setindex(blockR, t2[i2], iR) - end - end - return Block{NR}(blockR) -end diff --git a/NDTensors/src/blocksparse/diagblocksparse.jl b/NDTensors/src/blocksparse/diagblocksparse.jl deleted file mode 100644 index 42882102d7..0000000000 --- a/NDTensors/src/blocksparse/diagblocksparse.jl +++ /dev/null @@ -1,700 +0,0 @@ -using .TypeParameterAccessors: similartype - -export DiagBlockSparse, DiagBlockSparseTensor - -# DiagBlockSparse can have either Vector storage, in which case -# it is a general DiagBlockSparse tensor, or scalar storage, -# in which case the diagonal has a uniform value -# TODO: Define as an `AbstractBlockSparse`, or -# `GenericBlockSparse` parametrized by `Dense` or `Diag`. -struct DiagBlockSparse{ElT,VecT,N} <: TensorStorage{ElT} - data::VecT - diagblockoffsets::BlockOffsets{N} # Block number-offset pairs - - # Nonuniform case - function DiagBlockSparse( - data::VecT, blockoffsets::BlockOffsets{N} - ) where {VecT<:AbstractVector{ElT},N} where {ElT} - return new{ElT,VecT,N}(data, blockoffsets) - end - - # Uniform case - function DiagBlockSparse(data::VecT, blockoffsets::BlockOffsets{N}) where {VecT<:Number,N} - return new{VecT,VecT,N}(data, blockoffsets) - end -end - -# Data and type accessors. -datatype(storage::DiagBlockSparse) = datatype(typeof(storage)) -datatype(storagetype::Type{<:DiagBlockSparse}) = fieldtype(storagetype, :data) -blockoffsets(storage::DiagBlockSparse) = getfield(storage, :diagblockoffsets) -blockoffsetstype(storage::DiagBlockSparse) = blockoffsetstype(typeof(storage)) -function blockoffsetstype(storagetype::Type{<:DiagBlockSparse}) - return fieldtype(storagetype, :diagblockoffsets) -end - -# TODO: Deprecate? -diagblockoffsets(storage::DiagBlockSparse) = blockoffsets(storage) - -function setdata(storagetype::Type{<:DiagBlockSparse}, data::AbstractArray) - error("Must specify `diagblockoffsets`.") - return DiagBlockSparse(data, blockoffsetstype(storagetype)()) -end - -# TODO: Move this to a `set_types.jl` file. -function set_datatype( - storagetype::Type{<:DiagBlockSparse}, datatype::Type{<:AbstractVector} -) - return DiagBlockSparse{eltype(datatype),datatype,ndims(storagetype)} -end - -function DiagBlockSparse( - ::Type{ElT}, boffs::BlockOffsets, diaglength::Integer -) where {ElT<:Number} - return DiagBlockSparse(zeros(ElT, diaglength), boffs) -end - -function DiagBlockSparse(boffs::BlockOffsets, diaglength::Integer) - return DiagBlockSparse(Float64, boffs, diaglength) -end - -function DiagBlockSparse( - ::Type{ElT}, ::UndefInitializer, boffs::BlockOffsets, diaglength::Integer -) where {ElT<:Number} - return DiagBlockSparse(Vector{ElT}(undef, diaglength), boffs) -end - -function DiagBlockSparse( - datatype::Type{<:AbstractArray}, - ::UndefInitializer, - boffs::BlockOffsets, - diaglength::Integer, -) - return DiagBlockSparse(datatype(undef, diaglength), boffs) -end - -function DiagBlockSparse(::UndefInitializer, boffs::BlockOffsets, diaglength::Integer) - return DiagBlockSparse(Float64, undef, boffs, diaglength) -end - -function findblock( - D::DiagBlockSparse{<:Number,<:Union{Number,AbstractVector},N}, block::Block{N}; vargs... -) where {N} - return findblock(diagblockoffsets(D), block; vargs...) -end - -const NonuniformDiagBlockSparse{ElT,VecT} = - DiagBlockSparse{ElT,VecT} where {VecT<:AbstractVector} -const UniformDiagBlockSparse{ElT,VecT} = DiagBlockSparse{ElT,VecT} where {VecT<:Number} - -@propagate_inbounds function getindex(D::NonuniformDiagBlockSparse, i::Int) - return data(D)[i] -end - -getindex(D::UniformDiagBlockSparse, i::Int) = data(D) - -@propagate_inbounds function setindex!(D::DiagBlockSparse, val, i::Int) - data(D)[i] = val - return D -end - -function setindex!(D::UniformDiagBlockSparse, val, i::Int) - return error("Cannot set elements of a uniform DiagBlockSparse storage") -end - -#fill!(D::DiagBlockSparse,v) = fill!(data(D),v) - -copy(D::DiagBlockSparse) = DiagBlockSparse(copy(data(D)), copy(diagblockoffsets(D))) - -setdata(D::DiagBlockSparse, ndata) = DiagBlockSparse(ndata, diagblockoffsets(D)) - -# TODO: Move this to a `set_types.jl` file. -# TODO: Remove this once uniform diagonal tensors use FillArrays for the data. -function set_datatype(storagetype::Type{<:UniformDiagBlockSparse}, datatype::Type) - return DiagBlockSparse{datatype,datatype,ndims(storagetype)} -end - -# TODO: Make this more generic. For example, use an -# `is_composite_mutable` trait, and if `!is_composite_mutable`, -# automatically forward `NeverAlias` to `AllowAlias` since -# aliasing doesn't matter for immutable types. -function conj(::NeverAlias, storage::UniformDiagBlockSparse) - return conj(AllowAlias(), storage) -end - -## convert to complex -## TODO: this could be a generic TensorStorage function -#complex(D::DiagBlockSparse) = DiagBlockSparse(complex(data(D)), diagblockoffsets(D)) - -#conj(D::DiagBlockSparse{<:Real}) = D -#conj(D::DiagBlockSparse{<:Complex}) = DiagBlockSparse(conj(data(D)), diagblockoffsets(D)) - -# TODO: make this generic for all storage types -eltype(::DiagBlockSparse{ElT}) where {ElT} = ElT -eltype(::Type{<:DiagBlockSparse{ElT}}) where {ElT} = ElT - -# Deal with uniform DiagBlockSparse conversion -#convert(::Type{<:DiagBlockSparse{ElT,VecT}},D::DiagBlockSparse) where {ElT,VecT} = DiagBlockSparse(convert(VecT,data(D))) - -size(D::DiagBlockSparse) = size(data(D)) - -# TODO: make this work for other storage besides Vector -function zeros(::Type{<:NonuniformDiagBlockSparse{ElT}}, dim::Int64) where {ElT} - return DiagBlockSparse(zeros(ElT, dim)) -end -function zeros(::Type{<:UniformDiagBlockSparse{ElT}}, dim::Int64) where {ElT} - return DiagBlockSparse(zero(ElT)) -end - -# -# Type promotions involving DiagBlockSparse -# Useful for knowing how conversions should work when adding and contracting -# - -function promote_rule( - ::Type{<:UniformDiagBlockSparse{ElT1}}, ::Type{<:UniformDiagBlockSparse{ElT2}} -) where {ElT1,ElT2} - ElR = promote_type(ElT1, ElT2) - return DiagBlockSparse{ElR,ElR} -end - -function promote_rule( - ::Type{<:NonuniformDiagBlockSparse{ElT1,VecT1}}, - ::Type{<:NonuniformDiagBlockSparse{ElT2,VecT2}}, -) where {ElT1,VecT1<:AbstractVector,ElT2,VecT2<:AbstractVector} - ElR = promote_type(ElT1, ElT2) - VecR = promote_type(VecT1, VecT2) - return DiagBlockSparse{ElR,VecR} -end - -# This is an internal definition, is there a more general way? -#promote_type(::Type{Vector{ElT1}}, -# ::Type{ElT2}) where {ElT1<:Number, -# ElT2<:Number} = Vector{promote_type(ElT1,ElT2)} -# -#promote_type(::Type{ElT1}, -# ::Type{Vector{ElT2}}) where {ElT1<:Number, -# ElT2<:Number} = promote_type(Vector{ElT2},ElT1) - -# TODO: how do we make this work more generally for T2<:AbstractVector{S2}? -# Make a similartype(AbstractVector{S2},T1) -> AbstractVector{T1} function? -function promote_rule( - ::Type{<:UniformDiagBlockSparse{ElT1,VecT1}}, - ::Type{<:NonuniformDiagBlockSparse{ElT2,Vector{ElT2}}}, -) where {ElT1,VecT1<:Number,ElT2} - ElR = promote_type(ElT1, ElT2) - VecR = Vector{ElR} - return DiagBlockSparse{ElR,VecR} -end - -function promote_rule( - ::Type{BlockSparseT1}, ::Type{<:NonuniformDiagBlockSparse{ElT2,VecT2,N2}} -) where {BlockSparseT1<:BlockSparse,ElT2<:Number,VecT2<:AbstractVector,N2} - return promote_type(BlockSparseT1, BlockSparse{ElT2,VecT2,N2}) -end - -function promote_rule( - ::Type{BlockSparseT1}, ::Type{<:UniformDiagBlockSparse{ElT2,ElT2}} -) where {BlockSparseT1<:BlockSparse,ElT2<:Number} - return promote_type(BlockSparseT1, ElT2) -end - -# Convert a DiagBlockSparse storage type to the closest Dense storage type -dense(::Type{<:NonuniformDiagBlockSparse{ElT,VecT}}) where {ElT,VecT} = Dense{ElT,VecT} -dense(::Type{<:UniformDiagBlockSparse{ElT}}) where {ElT} = Dense{ElT,Vector{ElT}} - -const DiagBlockSparseTensor{ElT,N,StoreT,IndsT} = - Tensor{ElT,N,StoreT,IndsT} where {StoreT<:DiagBlockSparse} -const NonuniformDiagBlockSparseTensor{ElT,N,StoreT,IndsT} = - Tensor{ElT,N,StoreT,IndsT} where {StoreT<:NonuniformDiagBlockSparse} -const UniformDiagBlockSparseTensor{ElT,N,StoreT,IndsT} = - Tensor{ElT,N,StoreT,IndsT} where {StoreT<:UniformDiagBlockSparse} - -function DiagBlockSparseTensor( - ::Type{ElT}, ::UndefInitializer, blocks::Vector, inds -) where {ElT} - blockoffsets, nnz = diagblockoffsets(blocks, inds) - storage = DiagBlockSparse(ElT, undef, blockoffsets, nnz) - return tensor(storage, inds) -end - -function DiagBlockSparseTensor(::UndefInitializer, blocks::Vector, inds) - return DiagBlockSparseTensor(Float64, undef, blocks, inds) -end - -function DiagBlockSparseTensor(::Type{ElT}, blocks::Vector, inds) where {ElT} - blockoffsets, nnz = diagblockoffsets(blocks, inds) - storage = DiagBlockSparse(ElT, blockoffsets, nnz) - return tensor(storage, inds) -end - -DiagBlockSparseTensor(blocks::Vector, inds) = DiagBlockSparseTensor(Float64, blocks, inds) - -# Uniform case -function DiagBlockSparseTensor(x::Number, blocks::Vector, inds) - blockoffsets, nnz = diagblockoffsets(blocks, inds) - storage = DiagBlockSparse(x, blockoffsets) - return tensor(storage, inds) -end - -diagblockoffsets(T::DiagBlockSparseTensor) = diagblockoffsets(storage(T)) - -""" -blockview(T::DiagBlockSparseTensor, block::Block) - -Given a block in the block-offset list, return a Diag Tensor -that is a view to the data in that block (to avoid block lookup if the position -is known already). -""" -function blockview(T::DiagBlockSparseTensor, blockT::Block) - return blockview(T, BlockOffset(blockT, offset(T, blockT))) -end - -getindex(T::DiagBlockSparseTensor, block::Block) = blockview(T, block) - -function blockview(T::DiagBlockSparseTensor, bof::BlockOffset) - blockT, offsetT = bof - blockdimsT = blockdims(T, blockT) - blockdiaglengthT = minimum(blockdimsT) - dataTslice = @view data(storage(T))[(offsetT + 1):(offsetT + blockdiaglengthT)] - return tensor(Diag(dataTslice), blockdimsT) -end - -function blockview(T::UniformDiagBlockSparseTensor, bof::BlockOffset) - blockT, offsetT = bof - blockdimsT = blockdims(T, blockT) - return tensor(Diag(getdiagindex(T, 1)), blockdimsT) -end - -IndexStyle(::Type{<:DiagBlockSparseTensor}) = IndexCartesian() - -# TODO: this needs to be better (promote element type, check order compatibility, -# etc. -function convert( - ::Type{<:DenseTensor{ElT,N}}, T::DiagBlockSparseTensor{ElT,N} -) where {ElT<:Number,N} - return dense(T) -end - -# These are rules for determining the output of a pairwise contraction of NDTensors -# (given the indices of the output tensors) -function contraction_output_type( - TensorT1::Type{<:DiagBlockSparseTensor}, TensorT2::Type{<:BlockSparseTensor}, indsR::Tuple -) - return similartype(promote_type(TensorT1, TensorT2), indsR) -end - -function contraction_output_type( - TensorT1::Type{<:BlockSparseTensor}, TensorT2::Type{<:DiagBlockSparseTensor}, indsR::Tuple -) - return contraction_output_type(TensorT2, TensorT1, indsR) -end - -# This performs the logic that DiagBlockSparseTensor*DiagBlockSparseTensor -> DiagBlockSparseTensor if it is not an outer -# product but -> DenseTensor if it is -# TODO: if the tensors are both order 2 (or less), or if there is an Index replacement, -# then they remain diagonal. Should we limit DiagBlockSparseTensor*DiagBlockSparseTensor to cases that -# result in a DiagBlockSparseTensor, for efficiency and type stability? What about a general -# SparseTensor result? -function contraction_output_type( - TensorT1::Type{<:DiagBlockSparseTensor{<:Number,N1}}, - TensorT2::Type{<:DiagBlockSparseTensor{<:Number,N2}}, - indsR::Tuple, -) where {N1,N2} - if ValLength(indsR) === Val{N1 + N2} - # Turn into is_outer(inds1,inds2,indsR) function? - # How does type inference work with arithmatic of compile time values? - return similartype(dense(promote_type(TensorT1, TensorT2)), indsR) - end - return similartype(promote_type(TensorT1, TensorT2), indsR) -end - -# The output must be initialized as zero since it is sparse, cannot be undefined -function contraction_output(T1::DiagBlockSparseTensor, T2::Tensor, indsR) - return zero_contraction_output(T1, T2, indsR) -end -function contraction_output(T1::Tensor, T2::DiagBlockSparseTensor, indsR) - return contraction_output(T2, T1, indsR) -end - -# function contraction_output(T1::DiagBlockSparseTensor, T2::DiagBlockSparseTensor, indsR) -# return zero_contraction_output(T1, T2, indsR) -# end - -# Determine the contraction output and block contractions -function contraction_output( - tensor1::DiagBlockSparseTensor, - labelstensor1, - tensor2::DiagBlockSparseTensor, - labelstensor2, - labelsR, -) - indsR = contract_inds(inds(tensor1), labelstensor1, inds(tensor2), labelstensor2, labelsR) - TensorR = contraction_output_type(typeof(tensor1), typeof(tensor2), indsR) - blockoffsetsR, contraction_plan = contract_blockoffsets( - blockoffsets(tensor1), - inds(tensor1), - labelstensor1, - blockoffsets(tensor2), - inds(tensor2), - labelstensor2, - indsR, - labelsR, - ) - R = similar(TensorR, blockoffsetsR, indsR) - return R # , contraction_plan -end - -## TODO: Is there a way to make this generic? -# NDTensors.similar -function similar( - tensortype::Type{<:DiagBlockSparseTensor}, blockoffsets::BlockOffsets, dims::Tuple -) - return Tensor(similar(storagetype(tensortype), blockoffsets, dims), dims) -end - -# NDTensors.similar -function similar( - storagetype::Type{<:DiagBlockSparse}, blockoffsets::BlockOffsets, dims::Tuple -) - # TODO: Improve this with FillArrays.jl - # data = similar(datatype(storagetype), nnz(blockoffsets, dims)) - data = zero(datatype(storagetype)) - return DiagBlockSparse(data, blockoffsets) -end - -function array(T::DiagBlockSparseTensor{ElT,N}) where {ElT,N} - return array(dense(T)) -end -matrix(T::DiagBlockSparseTensor{<:Number,2}) = array(T) -vector(T::DiagBlockSparseTensor{<:Number,1}) = array(T) - -function Array{ElT,N}(T::DiagBlockSparseTensor{ElT,N}) where {ElT,N} - return array(T) -end - -function Array(T::DiagBlockSparseTensor{ElT,N}) where {ElT,N} - return Array{ElT,N}(T) -end - -getdiagindex(T::DiagBlockSparseTensor{<:Number}, ind::Int) = storage(T)[ind] - -# XXX: handle case of missing diagonal blocks -function setdiagindex!(T::DiagBlockSparseTensor{<:Number}, val, ind::Int) - storage(T)[ind] = val - return T -end - -function setdiag(T::DiagBlockSparseTensor, val, ind::Int) - return tensor(DiagBlockSparse(val), inds(T)) -end - -function setdiag(T::UniformDiagBlockSparseTensor, val, ind::Int) - return tensor(DiagBlockSparse(val, blockoffsets(T)), inds(T)) -end - -@propagate_inbounds function getindex( - T::DiagBlockSparseTensor{ElT,N}, inds::Vararg{Int,N} -) where {ElT,N} - if all(==(inds[1]), inds) - return storage(T)[inds[1]] - else - return zero(eltype(ElT)) - end -end - -@propagate_inbounds function getindex(T::DiagBlockSparseTensor{<:Number,1}, ind::Int) - return storage(T)[ind] -end - -@propagate_inbounds function getindex(T::DiagBlockSparseTensor{<:Number,0}) - return storage(T)[1] -end - -# Set diagonal elements -# Throw error for off-diagonal -@propagate_inbounds function setindex!( - T::DiagBlockSparseTensor{<:Number,N}, val, inds::Vararg{Int,N} -) where {N} - all(==(inds[1]), inds) || - error("Cannot set off-diagonal element of DiagBlockSparse storage") - storage(T)[inds[1]] = val - return T -end - -@propagate_inbounds function setindex!(T::DiagBlockSparseTensor{<:Number,1}, val, ind::Int) - storage(T)[ind] = val - return T -end - -@propagate_inbounds function setindex!(T::DiagBlockSparseTensor{<:Number,0}, val) - storage(T)[1] = val - return T -end - -function setindex!( - T::UniformDiagBlockSparseTensor{<:Number,N}, val, inds::Vararg{Int,N} -) where {N} - return error("Cannot set elements of a uniform DiagBlockSparse storage") -end - -# TODO: make a fill!! that works for uniform and non-uniform -#fill!(T::DiagBlockSparseTensor,v) = fill!(storage(T),v) - -function dense( - ::Type{<:Tensor{ElT,N,StoreT,IndsT}} -) where {ElT,N,StoreT<:DiagBlockSparse,IndsT} - return Tensor{ElT,N,dense(StoreT),IndsT} -end - -# convert to Dense -function dense(T::TensorT) where {TensorT<:DiagBlockSparseTensor} - R = zeros(dense(TensorT), inds(T)) - for i in 1:diaglength(T) - setdiagindex!(R, getdiagindex(T, i), i) - end - return R -end - -# convert to BlockSparse -function denseblocks(D::Tensor) - nzblocksD = nzblocks(D) - T = BlockSparseTensor(eltype(D), nzblocksD, inds(D)) - for b in nzblocksD - for n in 1:diaglength(D) - setdiagindex!(T, getdiagindex(D, n), n) - end - end - return T -end - -function outer!( - R::DenseTensor{<:Number,NR}, - T1::DiagBlockSparseTensor{<:Number,N1}, - T2::DiagBlockSparseTensor{<:Number,N2}, -) where {NR,N1,N2} - for i1 in 1:diaglength(T1), i2 in 1:diaglength(T2) - indsR = CartesianIndex{NR}(ntuple(r -> r ≤ N1 ? i1 : i2, Val(NR))) - R[indsR] = getdiagindex(T1, i1) * getdiagindex(T2, i2) - end - return R -end - -# TODO: write an optimized version of this? -function outer!(R::DenseTensor{ElR}, T1::DenseTensor, T2::DiagBlockSparseTensor) where {ElR} - R .= zero(ElR) - outer!(R, T1, dense(T2)) - return R -end - -function outer!(R::DenseTensor{ElR}, T1::DiagBlockSparseTensor, T2::DenseTensor) where {ElR} - R .= zero(ElR) - outer!(R, dense(T1), T2) - return R -end - -# Right an in-place version -function outer( - T1::DiagBlockSparseTensor{ElT1,N1}, T2::DiagBlockSparseTensor{ElT2,N2} -) where {ElT1,ElT2,N1,N2} - indsR = unioninds(inds(T1), inds(T2)) - R = tensor(Dense(zeros(promote_type(ElT1, ElT2), dim(indsR))), indsR) - outer!(R, T1, T2) - return R -end - -function permutedims!( - R::DiagBlockSparseTensor{<:Number,N}, - T::DiagBlockSparseTensor{<:Number,N}, - perm::NTuple{N,Int}, - f::Function=(r, t) -> t, -) where {N} - # TODO: check that inds(R)==permute(inds(T),perm)? - for i in 1:diaglength(R) - @inbounds setdiagindex!(R, f(getdiagindex(R, i), getdiagindex(T, i)), i) - end - return R -end - -function permutedims( - T::UniformDiagBlockSparseTensor{ElT,N}, perm::NTuple{N,Int}, f::Function=identity -) where {ElT,N} - R = tensor(DiagBlockSparse(f(getdiagindex(T, 1))), permute(inds(T), perm)) - return R -end - -# Version that may overwrite in-place or may return the result -function permutedims!!( - R::NonuniformDiagBlockSparseTensor{<:Number,N}, - T::NonuniformDiagBlockSparseTensor{<:Number,N}, - perm::NTuple{N,Int}, - f::Function=(r, t) -> t, -) where {N} - RR = convert(promote_type(typeof(R), typeof(T)), R) - permutedims!(RR, T, perm, f) - return RR -end - -function permutedims!!( - R::UniformDiagBlockSparseTensor{ElR,N}, - T::UniformDiagBlockSparseTensor{ElT,N}, - perm::NTuple{N,Int}, - f::Function=(r, t) -> t, -) where {ElR,ElT,N} - RR = convert(promote_type(typeof(R), typeof(T)), R) - RR = tensor(DiagBlockSparse(f(getdiagindex(RR, 1), getdiagindex(T, 1))), inds(RR)) - return RR -end - -function permutedims!( - R::DenseTensor{ElR,N}, - T::DiagBlockSparseTensor{ElT,N}, - perm::NTuple{N,Int}, - f::Function=(r, t) -> t, -) where {ElR,ElT,N} - for i in 1:diaglength(T) - @inbounds setdiagindex!(R, f(getdiagindex(R, i), getdiagindex(T, i)), i) - end - return R -end - -function permutedims!!( - R::DenseTensor{ElR,N}, - T::DiagBlockSparseTensor{ElT,N}, - perm::NTuple{N,Int}, - f::Function=(r, t) -> t, -) where {ElR,ElT,N} - permutedims!(R, T, perm, f) - return R -end - -function _contract!!( - R::UniformDiagBlockSparseTensor{ElR,NR}, - labelsR, - T1::UniformDiagBlockSparseTensor{<:Number,N1}, - labelsT1, - T2::UniformDiagBlockSparseTensor{<:Number,N2}, - labelsT2, -) where {ElR,NR,N1,N2} - if NR == 0 # If all indices of A and B are contracted - # all indices are summed over, just add the product of the diagonal - # elements of A and B - R = setdiag(R, diaglength(T1) * getdiagindex(T1, 1) * getdiagindex(T2, 1), 1) - else - # not all indices are summed over, set the diagonals of the result - # to the product of the diagonals of A and B - R = setdiag(R, getdiagindex(T1, 1) * getdiagindex(T2, 1), 1) - end - return R -end - -# TODO: Improve this with FillArrays.jl -norm(S::UniformDiagBlockSparseTensor) = sqrt(mindim(S) * abs2(data(S))) - -function contraction_output( - T1::TensorT1, labelsT1, T2::TensorT2, labelsT2, labelsR -) where {TensorT1<:BlockSparseTensor,TensorT2<:DiagBlockSparseTensor} - indsR = contract_inds(inds(T1), labelsT1, inds(T2), labelsT2, labelsR) - TensorR = contraction_output_type(TensorT1, TensorT2, indsR) - blockoffsetsR, contraction_plan = contract_blockoffsets( - blockoffsets(T1), - inds(T1), - labelsT1, - blockoffsets(T2), - inds(T2), - labelsT2, - indsR, - labelsR, - ) - R = zeros(TensorR, blockoffsetsR, indsR) - return R, contraction_plan -end - -function contract( - T1::BlockSparseTensor, - labelsT1, - T2::DiagBlockSparseTensor, - labelsT2, - labelsR=contract_labels(labelsT1, labelsT2), -) - R, contraction_plan = contraction_output(T1, labelsT1, T2, labelsT2, labelsR) - R = contract!(R, labelsR, T1, labelsT1, T2, labelsT2, contraction_plan) - return R -end - -function contract( - T1::DiagBlockSparseTensor, - labelsT1, - T2::BlockSparseTensor, - labelsT2, - labelsR=contract_labels(labelsT2, labelsT1), -) - return contract(T2, labelsT2, T1, labelsT1, labelsR) -end - -function contract!( - R::BlockSparseTensor{ElR,NR}, - labelsR, - T1::BlockSparseTensor, - labelsT1, - T2::DiagBlockSparseTensor, - labelsT2, - contraction_plan, -) where {ElR<:Number,NR} - if any(b -> !allequal(Tuple(b)), nzblocks(T2)) - return error( - "When contracting a BlockSparse tensor with a DiagBlockSparse tensor, the DiagBlockSparse tensor must be block diagonal for the time being.", - ) - end - already_written_to = Dict{Block{NR},Bool}() - indsR = inds(R) - indsT1 = inds(T1) - indsT2 = inds(T2) - # In R .= α .* (T1 * T2) .+ β .* R - α = one(ElR) - for (block1, block2, blockR) in contraction_plan - T1block = T1[block1] - T2block = T2[block2] - Rblock = R[blockR] - - # - α = compute_alpha( - ElR, labelsR, blockR, indsR, labelsT1, block1, indsT1, labelsT2, block2, indsT2 - ) - - β = one(ElR) - if !haskey(already_written_to, blockR) - already_written_to[blockR] = true - # Overwrite the block of R - β = zero(ElR) - end - contract!( - expose(Rblock), labelsR, expose(T1block), labelsT1, expose(T2block), labelsT2, α, β - ) - end - return R -end - -function contract!( - C::BlockSparseTensor, - Clabels, - A::BlockSparseTensor, - Alabels, - B::DiagBlockSparseTensor, - Blabels, -) - return contract!(C, Clabels, B, Blabels, A, Alabels) -end - -function Base.show(io::IO, mime::MIME"text/plain", T::DiagBlockSparseTensor) - summary(io, T) - for (n, block) in enumerate(keys(diagblockoffsets(T))) - blockdimsT = blockdims(T, block) - println(io, block) - println(io, " [", _range2string(blockstart(T, block), blockend(T, block)), "]") - print_tensor(io, blockview(T, block)) - n < nnzblocks(T) && print(io, "\n\n") - end -end - -show(io::IO, T::DiagBlockSparseTensor) = show(io, MIME("text/plain"), T) diff --git a/NDTensors/src/blocksparse/fermions.jl b/NDTensors/src/blocksparse/fermions.jl deleted file mode 100644 index 3fdb96fd9c..0000000000 --- a/NDTensors/src/blocksparse/fermions.jl +++ /dev/null @@ -1,7 +0,0 @@ -block_parity(i, block) = 0 -block_sign(i, block) = 1 - -right_arrow_sign(i, block) = 1 -left_arrow_sign(i, block) = 1 - -permfactor(perm, args...) = 1 diff --git a/NDTensors/src/blocksparse/linearalgebra.jl b/NDTensors/src/blocksparse/linearalgebra.jl deleted file mode 100644 index 23e332ba4c..0000000000 --- a/NDTensors/src/blocksparse/linearalgebra.jl +++ /dev/null @@ -1,427 +0,0 @@ -using .TypeParameterAccessors: unwrap_array_type -using .Expose: expose -const BlockSparseMatrix{ElT,StoreT,IndsT} = BlockSparseTensor{ElT,2,StoreT,IndsT} -const DiagBlockSparseMatrix{ElT,StoreT,IndsT} = DiagBlockSparseTensor{ElT,2,StoreT,IndsT} -const DiagMatrix{ElT,StoreT,IndsT} = DiagTensor{ElT,2,StoreT,IndsT} - -function _truncated_blockdim( - S::DiagMatrix, docut::Real; singular_values=false, truncate=true, min_blockdim=nothing -) - min_blockdim = replace_nothing(min_blockdim, 0) - # TODO: Replace `cpu` with `Expose` dispatch. - S = cpu(S) - full_dim = diaglength(S) - !truncate && return full_dim - min_blockdim = min(min_blockdim, full_dim) - newdim = 0 - val = singular_values ? getdiagindex(S, newdim + 1)^2 : abs(getdiagindex(S, newdim + 1)) - while newdim + 1 ≤ full_dim && val > docut - newdim += 1 - if newdim + 1 ≤ full_dim - val = - singular_values ? getdiagindex(S, newdim + 1)^2 : abs(getdiagindex(S, newdim + 1)) - end - end - (newdim >= min_blockdim) || (newdim = min_blockdim) - return newdim -end - -""" - svd(T::BlockSparseTensor{<:Number,2}; kwargs...) - -svd of an order-2 BlockSparseTensor. - -This function assumes that there is one block -per row/column, otherwise it fails. -This assumption makes it so the result can be -computed from the dense svds of seperate blocks. -""" -function svd( - T::Tensor{ElT,2,<:BlockSparse}; - min_blockdim=nothing, - mindim=nothing, - maxdim=nothing, - cutoff=nothing, - alg=nothing, - use_absolute_cutoff=nothing, - use_relative_cutoff=nothing, -) where {ElT} - Us = Vector{DenseTensor{ElT,2}}(undef, nnzblocks(T)) - Ss = Vector{DiagTensor{real(ElT),2}}(undef, nnzblocks(T)) - Vs = Vector{DenseTensor{ElT,2}}(undef, nnzblocks(T)) - - # Sorted eigenvalues - d = Vector{real(ElT)}() - - for (n, b) in enumerate(eachnzblock(T)) - blockT = blockview(T, b) - USVb = svd(blockT; alg) - if isnothing(USVb) - return nothing - end - Ub, Sb, Vb = USVb - Us[n] = Ub - Ss[n] = Sb - Vs[n] = Vb - # Previously this was: - # vector(diag(Sb)) - # But it broke, did `diag(::Tensor)` change types? - # TODO: call this a function `diagonal`, i.e.: - # https://github.com/JuliaLang/julia/issues/30250 - # or make `diag(::Tensor)` return a view by default. - append!(expose(d), data(Sb)) - end - - # Square the singular values to get - # the eigenvalues - d .= d .^ 2 - sort!(d; rev=true) - - # Get the list of blocks of T - # that are not dropped - nzblocksT = nzblocks(T) - - dropblocks = Int[] - if any(!isnothing, (maxdim, cutoff)) - d, truncerr, docut = truncate!!( - d; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff - ) - for n in 1:nnzblocks(T) - blockdim = _truncated_blockdim( - Ss[n], docut; min_blockdim, singular_values=true, truncate=true - ) - if blockdim == 0 - push!(dropblocks, n) - else - # TODO: Replace call to `data` with `diagview`. - Strunc = tensor(Diag(data(Ss[n])[1:blockdim]), (blockdim, blockdim)) - Us[n] = Us[n][1:dim(Us[n], 1), 1:blockdim] - Ss[n] = Strunc - Vs[n] = Vs[n][1:dim(Vs[n], 1), 1:blockdim] - end - end - deleteat!(Us, dropblocks) - deleteat!(Ss, dropblocks) - deleteat!(Vs, dropblocks) - deleteat!(nzblocksT, dropblocks) - else - truncerr, docut = 0.0, 0.0 - end - - # The number of non-zero blocks of T remaining - nnzblocksT = length(nzblocksT) - - # - # Make indices of U and V - # that connect to S - # - i1 = ind(T, 1) - i2 = ind(T, 2) - uind = dag(sim(i1)) - vind = dag(sim(i2)) - resize!(uind, nnzblocksT) - resize!(vind, nnzblocksT) - for (n, blockT) in enumerate(nzblocksT) - Udim = size(Us[n], 2) - b1 = block(i1, blockT[1]) - setblock!(uind, resize(b1, Udim), n) - Vdim = size(Vs[n], 2) - b2 = block(i2, blockT[2]) - setblock!(vind, resize(b2, Vdim), n) - end - - # - # Put the blocks into U,S,V - # - - nzblocksU = Vector{Block{2}}(undef, nnzblocksT) - nzblocksS = Vector{Block{2}}(undef, nnzblocksT) - nzblocksV = Vector{Block{2}}(undef, nnzblocksT) - - for (n, blockT) in enumerate(nzblocksT) - blockU = (blockT[1], UInt(n)) - nzblocksU[n] = blockU - - blockS = (n, n) - nzblocksS[n] = blockS - - blockV = (blockT[2], UInt(n)) - nzblocksV[n] = blockV - end - - indsU = setindex(inds(T), uind, 2) - - indsV = setindex(inds(T), vind, 1) - indsV = permute(indsV, (2, 1)) - - indsS = setindex(inds(T), dag(uind), 1) - indsS = setindex(indsS, dag(vind), 2) - - U = BlockSparseTensor(unwrap_array_type(T), undef, nzblocksU, indsU) - S = DiagBlockSparseTensor( - set_eltype(unwrap_array_type(T), real(ElT)), undef, nzblocksS, indsS - ) - V = BlockSparseTensor(unwrap_array_type(T), undef, nzblocksV, indsV) - - for n in 1:nnzblocksT - Ub, Sb, Vb = Us[n], Ss[n], Vs[n] - - blockU = nzblocksU[n] - blockS = nzblocksS[n] - blockV = nzblocksV[n] - - if VERSION < v"1.5" - # In v1.3 and v1.4 of Julia, Ub has - # a very complicated view wrapper that - # can't be handled efficiently - Ub = copy(Ub) - Vb = copy(Vb) - end - - # - sU = right_arrow_sign(uind, blockU[2]) - - if sU == -1 - Ub *= -1 - end - copyto!(expose(blockview(U, blockU)), expose(Ub)) - - blockviewS = blockview(S, blockS) - # TODO: Replace `data` with `diagview`. - copyto!(expose(data(blockviewS)), expose(data(Sb))) - - # - sV = left_arrow_sign(vind, blockV[2]) - # This sign (sVP) accounts for the fact that - # V is transposed, i.e. the index connecting to S - # is the second index: - sVP = 1 - if using_auto_fermion() - sVP = -block_sign(vind, blockV[2]) - end - - if (sV * sVP) == -1 - Vb *= -1 - end - copyto!(blockview(V, blockV), Vb) - end - return U, S, V, Spectrum(d, truncerr) -end - -_eigen_eltypes(T::Hermitian{ElT,<:BlockSparseMatrix{ElT}}) where {ElT} = real(ElT), ElT - -_eigen_eltypes(T::BlockSparseMatrix{ElT}) where {ElT} = complex(ElT), complex(ElT) - -function LinearAlgebra.eigen( - T::Union{Hermitian{ElT,<:Tensor{ElT,2,<:BlockSparse}},Tensor{ElT,2,<:BlockSparse}}; - min_blockdim=nothing, - mindim=nothing, - maxdim=nothing, - cutoff=nothing, - use_absolute_cutoff=nothing, - use_relative_cutoff=nothing, -) where {ElT<:Union{Real,Complex}} - ElD, ElV = _eigen_eltypes(T) - - # Sorted eigenvalues - d = Vector{real(ElT)}() - - for b in eachnzblock(T) - all(==(b[1]), b) || error("Eigen currently only supports block diagonal matrices.") - end - - b = first(eachnzblock(T)) - blockT = blockview(T, b) - Db, Vb = eigen(expose(blockT)) - Ds = [Db] - Vs = [Vb] - append!(expose(d), abs.(data(Db))) - for (n, b) in enumerate(eachnzblock(T)) - n == 1 && continue - blockT = blockview(T, b) - Db, Vb = eigen(expose(blockT)) - push!(Ds, Db) - push!(Vs, Vb) - append!(expose(d), abs.(data(Db))) - end - - dropblocks = Int[] - sort!(d; rev=true, by=abs) - - if any(!isnothing, (maxdim, cutoff)) - d, truncerr, docut = truncate!!( - d; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff - ) - for n in 1:nnzblocks(T) - blockdim = _truncated_blockdim( - Ds[n], docut; min_blockdim, singular_values=false, truncate=true - ) - if blockdim == 0 - push!(dropblocks, n) - else - # TODO: Replace call to `data` with `diagview`. - Dtrunc = tensor(Diag(data(Ds[n])[1:blockdim]), (blockdim, blockdim)) - Ds[n] = Dtrunc - new_size = (dim(Vs[n], 1), blockdim) - new_data = array(Vs[n])[1:new_size[1], 1:new_size[2]] - Vs[n] = tensor(Dense(new_data), new_size) - end - end - deleteat!(Ds, dropblocks) - deleteat!(Vs, dropblocks) - else - truncerr = 0.0 - end - - # Get the list of blocks of T - # that are not dropped - nzblocksT = nzblocks(T) - deleteat!(nzblocksT, dropblocks) - - # The number of blocks of T remaining - nnzblocksT = nnzblocks(T) - length(dropblocks) - - # - # Put the blocks into D, V - # - - i1, i2 = inds(T) - l = sim(i1) - - lkeepblocks = Int[bT[1] for bT in nzblocksT] - ldropblocks = setdiff(1:nblocks(l), lkeepblocks) - deleteat!(l, ldropblocks) - - # l may have too many blocks - (nblocks(l) > nnzblocksT) && error("New index l in eigen has too many blocks") - - # Truncation may have changed - # some block sizes - for n in 1:nnzblocksT - setblockdim!(l, minimum(dims(Ds[n])), n) - end - - r = dag(sim(l)) - - indsD = (l, r) - indsV = (dag(i2), r) - - nzblocksD = Vector{Block{2}}(undef, nnzblocksT) - nzblocksV = Vector{Block{2}}(undef, nnzblocksT) - for n in 1:nnzblocksT - blockT = nzblocksT[n] - - blockD = (n, n) - nzblocksD[n] = blockD - - blockV = (blockT[1], n) - nzblocksV[n] = blockV - end - - D = DiagBlockSparseTensor( - set_ndims(set_eltype(unwrap_array_type(T), ElD), 1), undef, nzblocksD, indsD - ) - V = BlockSparseTensor(set_eltype(unwrap_array_type(T), ElV), undef, nzblocksV, indsV) - - for n in 1:nnzblocksT - Db, Vb = Ds[n], Vs[n] - - blockD = nzblocksD[n] - blockviewD = blockview(D, blockD) - # TODO: Replace `data` with `diagview`. - copyto!(expose(data(blockviewD)), expose(data(Db))) - - blockV = nzblocksV[n] - copyto!(blockview(V, blockV), Vb) - end - - return D, V, Spectrum(d, truncerr) -end - -Expose.ql(T::BlockSparseTensor{<:Any,2}; kwargs...) = qx(ql, T; kwargs...) -qr(T::BlockSparseTensor{<:Any,2}; kwargs...) = qx(qr, T; kwargs...) -# -# Generic function to implelement blocks sparse qr/ql decomposition. It calls -# the dense qr or ql for each block. The X tensor = R or L. -# This code thanks to Niklas Tausendpfund -# https://github.com/ntausend/variance_iTensor/blob/main/Hubig_variance_test.ipynb -# -function qx(qx::Function, T::BlockSparseTensor{<:Any,2}; positive=nothing) - ElT = eltype(T) - # getting total number of blocks - nnzblocksT = nnzblocks(T) - nzblocksT = nzblocks(T) - - Qs = Vector{DenseTensor{ElT,2}}(undef, nnzblocksT) - Xs = Vector{DenseTensor{ElT,2}}(undef, nnzblocksT) - - for (jj, b) in enumerate(eachnzblock(T)) - blockT = blockview(T, b) - QXb = qx(blockT; positive) - - if (isnothing(QXb)) - return nothing - end - - Q, X = QXb - Qs[jj] = Q - Xs[jj] = X - end - - # - # Make the new index connecting Q and R - # - itl = ind(T, 1) #left index of T - iq = dag(sim(itl)) #start with similar to the left index of T - resize!(iq, nnzblocksT) #adjust the size to match the block count - for (n, blockT) in enumerate(nzblocksT) - Qdim = size(Qs[n], 2) #get the block dim on right side of Q. - b1 = block(itl, blockT[1]) - setblock!(iq, resize(b1, Qdim), n) - end - - indsQ = setindex(inds(T), iq, 2) - indsX = setindex(inds(T), dag(iq), 1) - - nzblocksQ = Vector{Block{2}}(undef, nnzblocksT) - nzblocksX = Vector{Block{2}}(undef, nnzblocksT) - - for n in 1:nnzblocksT - blockT = nzblocksT[n] - nzblocksQ[n] = (blockT[1], UInt(n)) - nzblocksX[n] = (UInt(n), blockT[2]) - end - - Q = BlockSparseTensor(unwrap_array_type(T), undef, nzblocksQ, indsQ) - X = BlockSparseTensor(unwrap_array_type(T), undef, nzblocksX, indsX) - - for n in 1:nnzblocksT - copyto!(blockview(Q, nzblocksQ[n]), Qs[n]) - copyto!(blockview(X, nzblocksX[n]), Xs[n]) - end - - Q = adapt(unwrap_array_type(T), Q) - X = adapt(unwrap_array_type(T), X) - return Q, X -end - -function exp( - T::Union{BlockSparseMatrix{ElT},Hermitian{ElT,<:BlockSparseMatrix{ElT}}} -) where {ElT<:Union{Real,Complex}} - expT = BlockSparseTensor(ElT, undef, nzblocks(T), inds(T)) - for b in eachnzblock(T) - all(==(b[1]), b) || error("exp currently supports only block-diagonal matrices") - end - for b in eachdiagblock(T) - blockT = blockview(T, b) - if isnothing(blockT) - # Block was not found in the list, treat as 0 - id_block = Matrix{ElT}(I, blockdims(T, b)) - insertblock!(expT, b) - blockview(expT, b) .= id_block - else - blockview(expT, b) .= exp(blockT) - end - end - return expT -end diff --git a/NDTensors/src/blocksparse/similar.jl b/NDTensors/src/blocksparse/similar.jl deleted file mode 100644 index 5ef594c1c4..0000000000 --- a/NDTensors/src/blocksparse/similar.jl +++ /dev/null @@ -1,62 +0,0 @@ -using SparseArrays: nnz -using .TypeParameterAccessors: similartype - -# NDTensors.similar -function similar(storagetype::Type{<:BlockSparse}, blockoffsets::BlockOffsets, dims::Tuple) - data = similar(datatype(storagetype), nnz(blockoffsets, dims)) - return BlockSparse(data, blockoffsets) -end - -# NDTensors.similar -function similar(storagetype::Type{<:BlockSparse}, dims::Tuple) - # Create an empty BlockSparse storage - return similartype(storagetype, dims)() -end - -# NDTensors.similar -function similar(storagetype::Type{<:BlockSparse}, dims::Dims) - # Create an empty BlockSparse storage - return similartype(storagetype, dims)() -end - -## TODO: Is there a way to make this generic? -# NDTensors.similar -function similar( - tensortype::Type{<:BlockSparseTensor}, blockoffsets::BlockOffsets, dims::Tuple -) - return Tensor(similar(storagetype(tensortype), blockoffsets, dims), dims) -end - -# NDTensors.similar -function similar(tensor::BlockSparseTensor, blockoffsets::BlockOffsets, dims::Tuple) - return similar(typeof(tensor), blockoffsets, dims) -end - -## ## TODO: Determine if the methods below are needed. -## similar(D::DiagBlockSparse, n::Int) = setdata(D, similar(data(D), n)) -## -## function similar(D::DiagBlockSparse, ::Type{ElR}, n::Int) where {ElR} -## return setdata(D, similar(data(D), ElR, n)) -## end -## -## # TODO: write in terms of ::Int, not inds -## similar(D::NonuniformDiagBlockSparse) = setdata(D, similar(data(D))) -## -## similar(D::NonuniformDiagBlockSparse, ::Type{S}) where {S} = setdata(D, similar(data(D), S)) -## #similar(D::NonuniformDiagBlockSparse,inds) = DiagBlockSparse(similar(data(D),minimum(dims(inds))), diagblockoffsets(D)) -## #function similar(D::Type{<:NonuniformDiagBlockSparse{ElT,VecT}},inds) where {ElT,VecT} -## # return DiagBlockSparse(similar(VecT,diaglength(inds)), diagblockoffsets(D)) -## #end -## -## similar(D::UniformDiagBlockSparse) = setdata(D, zero(eltype(D))) -## similar(D::UniformDiagBlockSparse, inds) = similar(D) -## function similar(::Type{<:UniformDiagBlockSparse{ElT}}, inds) where {ElT} -## return DiagBlockSparse(zero(ElT), diagblockoffsets(D)) -## end -## -## # Needed to get slice of DiagBlockSparseTensor like T[1:3,1:3] -## function similar( -## T::DiagBlockSparseTensor{<:Number,N}, ::Type{ElR}, inds::Dims{N} -## ) where {ElR<:Number,N} -## return tensor(similar(storage(T), ElR, minimum(inds)), inds) -## end diff --git a/NDTensors/src/combiner/combiner.jl b/NDTensors/src/combiner/combiner.jl deleted file mode 100644 index c807e96204..0000000000 --- a/NDTensors/src/combiner/combiner.jl +++ /dev/null @@ -1,171 +0,0 @@ -export Combiner - -# TODO: Have combiner store the locations -# of the uncombined and combined indices -# This can generalize to a Combiner that combines -# multiple set of indices, e.g. (i,j),(k,l) -> (a,b) -struct Combiner <: TensorStorage{Number} - perm::Vector{Int} - comb::Vector{Int} - cind::Vector{Int} - isconj::Bool - function Combiner(perm::Vector{Int}, comb::Vector{Int}, cind::Vector{Int}, isconj::Bool) - return new(perm, comb, cind, isconj) - end -end - -Combiner() = Combiner(Int[], Int[], Int[1], false) - -Combiner(perm::Vector{Int}, comb::Vector{Int}) = Combiner(perm, comb, Int[1], false) - -data(::Combiner) = NoData() -datatype(::Type{<:Combiner}) = NoData -setdata(C::Combiner, data::NoData) = C -blockperm(C::Combiner) = C.perm -blockcomb(C::Combiner) = C.comb -cinds(C::Combiner) = C.cind -isconj(C::Combiner) = C.isconj -setisconj(C::Combiner, isconj) = Combiner(blockperm(C), blockcomb(C), cinds(C), isconj) - -function copy(C::Combiner) - return Combiner(copy(blockperm(C)), copy(blockcomb(C)), copy(cinds(C)), isconj(C)) -end - -eltype(::Type{<:Combiner}) = Number - -eltype(::Combiner) = eltype(Combiner) - -promote_rule(::Type{<:Combiner}, StorageT::Type{<:Dense}) = StorageT - -conj(::AllowAlias, C::Combiner) = setisconj(C, !isconj(C)) -conj(::NeverAlias, C::Combiner) = conj(AllowAlias(), copy(C)) - -# -# CombinerTensor (Tensor using Combiner storage) -# - -const CombinerTensor{ElT,N,StoreT,IndsT} = - Tensor{ElT,N,StoreT,IndsT} where {StoreT<:Combiner} - -# The position of the combined index/dimension. -# By convention, it is the first one. -combinedind_position(combiner_tensor::CombinerTensor) = 1 - -function combinedind(combiner_tensor::CombinerTensor) - return inds(combiner_tensor)[combinedind_position(combiner_tensor)] -end -# TODO: Rewrite in terms of `combinedind_position`. -function uncombinedinds(combiner_tensor::CombinerTensor) - return deleteat(inds(combiner_tensor), combinedind_position(combiner_tensor)) -end - -function combinedind_label(combiner_tensor::CombinerTensor, combiner_tensor_labels) - return combiner_tensor_labels[combinedind_position(combiner_tensor)] -end - -function uncombinedind_labels(combiner_tensor::CombinerTensor, combiner_tensor_labels) - return deleteat(combiner_tensor_labels, combinedind_position(combiner_tensor)) -end - -blockperm(C::CombinerTensor) = blockperm(storage(C)) -blockcomb(C::CombinerTensor) = blockcomb(storage(C)) - -function is_index_replacement( - tensor::Tensor, tensor_labels, combiner_tensor::CombinerTensor, combiner_tensor_labels -) - return (ndims(combiner_tensor) == 2) && - isone(count(∈(tensor_labels), combiner_tensor_labels)) -end - -# Return if the combiner contraction is combining or uncombining. -# Check for valid contractions, for example when combining, -# only the combined index should be uncontracted, and when uncombining, -# only the combined index should be contracted. -function is_combining( - tensor::Tensor, tensor_labels, combiner_tensor::CombinerTensor, combiner_tensor_labels -) - is_combining = is_combining_no_check( - tensor, tensor_labels, combiner_tensor, combiner_tensor_labels - ) - check_valid_combiner_contraction( - is_combining, tensor, tensor_labels, combiner_tensor, combiner_tensor_labels - ) - return is_combining -end - -function is_combining_no_check( - tensor::Tensor, tensor_labels, combiner_tensor::CombinerTensor, combiner_tensor_labels -) - return combinedind_label(combiner_tensor, combiner_tensor_labels) ∉ tensor_labels -end - -function check_valid_combiner_contraction( - is_combining::Bool, - tensor::Tensor, - tensor_labels, - combiner_tensor::CombinerTensor, - combiner_tensor_labels, -) - if !is_valid_combiner_contraction( - is_combining, tensor, tensor_labels, combiner_tensor, combiner_tensor_labels - ) - return invalid_combiner_contraction_error( - tensor, tensor_labels, combiner_tensor, combiner_tensor_labels - ) - end - return nothing -end - -function is_valid_combiner_contraction( - is_combining::Bool, - tensor::Tensor, - tensor_labels, - combiner_tensor::CombinerTensor, - combiner_tensor_labels, -) - in_tensor_labels_op = is_combining ? ∉(tensor_labels) : ∈(tensor_labels) - return isone(count(in_tensor_labels_op, combiner_tensor_labels)) -end - -function invalid_combiner_contraction_error( - tensor::Tensor, tensor_labels, combiner_tensor::CombinerTensor, combiner_tensor_labels -) - return error( - """ - Trying to contract a tensor with indices: - - $(inds(tensor)) - - and labels: - - $(tensor_labels) - - with a combiner tensor with indices: - - $(inds(combiner_tensor)) - - and labels: - - $(combiner_tensor_labels). - - This is not a valid combiner contraction. - - If you are combining, the combined index of the combiner should be the only one uncontracted. - - If you are uncombining, the combined index of the combiner should be the only one contracted. - - By convention, the combined index should be the index in position $(combinedind_position(combiner_tensor)) of the combiner tensor. - """, - ) -end - -function Base.show(io::IO, mime::MIME"text/plain", S::Combiner) - println(io, "Permutation of blocks: ", S.perm) - return println(io, "Combination of blocks: ", S.comb) -end - -function Base.show(io::IO, mime::MIME"text/plain", T::CombinerTensor) - summary(io, T) - println(io) - return show(io, mime, storage(T)) -end diff --git a/NDTensors/src/combiner/contract.jl b/NDTensors/src/combiner/contract.jl deleted file mode 100644 index aae09061ba..0000000000 --- a/NDTensors/src/combiner/contract.jl +++ /dev/null @@ -1,103 +0,0 @@ -function contraction_output( - ::TensorT1, ::TensorT2, indsR::Tuple -) where {TensorT1<:CombinerTensor,TensorT2<:DenseTensor} - TensorR = contraction_output_type(TensorT1, TensorT2, indsR) - return similar(TensorR, indsR) -end - -function contraction_output( - T1::TensorT1, T2::TensorT2, indsR -) where {TensorT1<:DenseTensor,TensorT2<:CombinerTensor} - return contraction_output(T2, T1, indsR) -end - -function contract!!( - output_tensor::Tensor, - output_tensor_labels, - combiner_tensor::CombinerTensor, - combiner_tensor_labels, - tensor::Tensor, - tensor_labels, -) - if ndims(combiner_tensor) ≤ 1 - # Empty combiner, acts as multiplying by 1 - output_tensor = permutedims!!( - output_tensor, tensor, getperm(output_tensor_labels, tensor_labels) - ) - return output_tensor - end - if is_index_replacement(tensor, tensor_labels, combiner_tensor, combiner_tensor_labels) - ui = setdiff(combiner_tensor_labels, tensor_labels)[] - newind = inds(combiner_tensor)[findfirst(==(ui), combiner_tensor_labels)] - cpos1, cpos2 = intersect_positions(combiner_tensor_labels, tensor_labels) - output_tensor_storage = copy(storage(tensor)) - output_tensor_inds = setindex(inds(tensor), newind, cpos2) - return NDTensors.tensor(output_tensor_storage, output_tensor_inds) - end - is_combining_contraction = is_combining( - tensor, tensor_labels, combiner_tensor, combiner_tensor_labels - ) - if is_combining_contraction - Alabels, Blabels = tensor_labels, combiner_tensor_labels - final_labels = contract_labels(Blabels, Alabels) - final_labels_n = contract_labels(combiner_tensor_labels, tensor_labels) - output_tensor_inds = inds(output_tensor) - if final_labels != final_labels_n - perm = getperm(final_labels_n, final_labels) - output_tensor_inds = permute(inds(output_tensor), perm) - output_tensor_labels = permute(output_tensor_labels, perm) - end - cpos1, output_tensor_cpos = intersect_positions( - combiner_tensor_labels, output_tensor_labels - ) - labels_comb = deleteat(combiner_tensor_labels, cpos1) - output_tensor_vl = [output_tensor_labels...] - for (ii, li) in enumerate(labels_comb) - insert!(output_tensor_vl, output_tensor_cpos + ii, li) - end - deleteat!(output_tensor_vl, output_tensor_cpos) - labels_perm = tuple(output_tensor_vl...) - perm = getperm(labels_perm, tensor_labels) - tensorp = reshape(output_tensor, permute(inds(tensor), perm)) - permutedims!(tensorp, tensor, perm) - return reshape(tensorp, output_tensor_inds) - else # Uncombining - cpos1, cpos2 = intersect_positions(combiner_tensor_labels, tensor_labels) - output_tensor_storage = copy(storage(tensor)) - indsC = deleteat(inds(combiner_tensor), cpos1) - output_tensor_inds = insertat(inds(tensor), indsC, cpos2) - return NDTensors.tensor(output_tensor_storage, output_tensor_inds) - end - return invalid_combiner_contraction_error( - tensor, tensor_labels, combiner_tensor, combiner_tensor_labels - ) -end - -function contract!!( - output_tensor::Tensor, - output_tensor_labels, - tensor::Tensor, - tensor_labels, - combiner_tensor::CombinerTensor, - combiner_tensor_labels, -) - return contract!!( - output_tensor, - output_tensor_labels, - combiner_tensor, - combiner_tensor_labels, - tensor, - tensor_labels, - ) -end - -function contract( - diag_tensor::DiagTensor, - diag_tensor_labels, - combiner_tensor::CombinerTensor, - combiner_tensor_labels, -) - return contract( - dense(diag_tensor), diag_tensor_labels, combiner_tensor, combiner_tensor_labels - ) -end diff --git a/NDTensors/src/default_kwargs.jl b/NDTensors/src/default_kwargs.jl deleted file mode 100644 index f8e0536ebe..0000000000 --- a/NDTensors/src/default_kwargs.jl +++ /dev/null @@ -1,11 +0,0 @@ -using .TypeParameterAccessors: unwrap_array_type -replace_nothing(::Nothing, replacement) = replacement -replace_nothing(value, replacement) = value - -default_maxdim(a) = minimum(size(a)) -default_mindim(a) = true -default_cutoff(a) = zero(eltype(a)) -default_svd_alg(a) = default_svd_alg(unwrap_array_type(a), a) -default_svd_alg(::Type{<:AbstractArray}, a) = "divide_and_conquer" -default_use_absolute_cutoff(a) = false -default_use_relative_cutoff(a) = true diff --git a/NDTensors/src/dense/dense.jl b/NDTensors/src/dense/dense.jl deleted file mode 100644 index cce7744d39..0000000000 --- a/NDTensors/src/dense/dense.jl +++ /dev/null @@ -1,143 +0,0 @@ -# -# Dense storage -# - -struct Dense{ElT,DataT<:AbstractArray} <: TensorStorage{ElT} - data::DataT - function Dense{ElT,DataT}(data::DataT) where {ElT,DataT<:AbstractVector} - @assert ElT == eltype(DataT) - return new{ElT,DataT}(data) - end - - function Dense{ElT,DataT}(data::DataT) where {ElT,DataT<:AbstractArray} - println("Only Vector-based datatypes are currently supported.") - throw(TypeError) - end -end - -#Start with high information constructors and move to low information constructors -function Dense{ElT,DataT}() where {ElT,DataT<:AbstractArray} - return Dense{ElT,DataT}(DataT()) -end - -# Construct from a set of indices -# This will fail if zero(ElT) is not defined for the ElT -function Dense{ElT,DataT}(inds::Tuple) where {ElT,DataT<:AbstractArray} - return Dense{ElT,DataT}(generic_zeros(DataT, dim(inds))) -end - -function Dense{ElT,DataT}(dim::Integer) where {ElT,DataT<:AbstractArray} - return Dense{ElT,DataT}(generic_zeros(DataT, dim)) -end - -function Dense{ElT,DataT}(::UndefInitializer, inds::Tuple) where {ElT,DataT<:AbstractArray} - return Dense{ElT,DataT}(similar(DataT, dim(inds))) -end - -function Dense{ElT,DataT}(x, dim::Integer) where {ElT,DataT<:AbstractVector} - return Dense{ElT,DataT}(fill!(similar(DataT, dim), ElT(x))) -end - -function Dense{ElR,DataT}(data::AbstractArray) where {ElR,DataT<:AbstractArray} - data = convert(DataT, data) - return Dense{ElR,DataT}(data) -end - -# This function is ill-defined. It cannot transform a complex type to real... -function Dense{ElR}(data::AbstractArray{ElT}) where {ElR,ElT} - return Dense{ElR}(convert(similartype(typeof(data), ElR), data)) -end - -function Dense{ElT}(data::AbstractArray{ElT}) where {ElT} - return Dense{ElT,typeof(data)}(data) -end - -function Dense{ElT}(inds::Tuple) where {ElT} - return Dense{ElT}(dim(inds)) -end - -function Dense{ElT}(dim::Integer) where {ElT} - return Dense{ElT,default_datatype(ElT)}(dim) -end - -Dense{ElT}() where {ElT} = Dense{ElT,default_datatype(ElT)}() - -function Dense(data::AbstractVector) - return Dense{eltype(data)}(data) -end - -function Dense(data::DataT) where {DataT<:AbstractArray{<:Any,N}} where {N} - #println("Warning: Only vector based datatypes are currenlty supported by Dense. The data structure provided will be vectorized.") - return Dense(vec(data)) -end - -function Dense(DataT::Type{<:AbstractArray}, dim::Integer) - ElT = eltype(DataT) - return Dense{ElT,DataT}(dim) -end - -Dense(ElT::Type{<:Number}, dim::Integer) = Dense{ElT}(dim) - -function Dense(ElT::Type{<:Number}, ::UndefInitializer, dim::Integer) - return Dense{ElT,default_datatype(ElT)}(undef, (dim,)) -end - -function Dense(::UndefInitializer, dim::Integer) - datatype = default_datatype() - return Dense{eltype(datatype),datatype}(undef, (dim,)) -end - -function Dense(x::Number, dim::Integer) - ElT = typeof(x) - return Dense{ElT,default_datatype(ElT)}(x, dim) -end - -Dense(dim::Integer) = Dense(default_eltype(), dim) - -Dense(::Type{ElT}) where {ElT} = Dense{ElT}() - -## End Dense initializers - -setdata(D::Dense, ndata) = Dense(ndata) -setdata(storagetype::Type{<:Dense}, data) = Dense(data) - -function copy(D::Dense) - return Dense(copy(expose(data(D)))) -end - -function Base.real(T::Type{<:Dense}) - return set_datatype(T, similartype(datatype(T), real(eltype(T)))) -end - -function complex(T::Type{<:Dense}) - return set_datatype(T, similartype(datatype(T), complex(eltype(T)))) -end - -# TODO: Define a generic `dense` for `Tensor`, `TensorStorage`. -dense(storagetype::Type{<:Dense}) = storagetype - -# TODO: make these more general, move to tensorstorage.jl -datatype(storetype::Type{<:Dense{<:Any,DataT}}) where {DataT} = DataT - -using .TypeParameterAccessors: unwrap_array_type -function promote_rule( - ::Type{<:Dense{ElT1,DataT1}}, ::Type{<:Dense{ElT2,DataT2}} -) where {ElT1,DataT1,ElT2,DataT2} - ElR = promote_type(ElT1, ElT2) - VecR = promote_type(unwrap_array_type(DataT1), unwrap_array_type(DataT2)) - VecR = similartype(VecR, ElR) - return Dense{ElR,VecR} -end - -# This is for type promotion for Scalar*Dense -function promote_rule( - ::Type{<:Dense{ElT1,DataT}}, ::Type{ElT2} -) where {DataT,ElT1,ElT2<:Number} - ElR = promote_type(ElT1, ElT2) - DataR = set_eltype(DataT, ElR) - return Dense{ElR,DataR} -end - -function convert(::Type{<:Dense{ElR,DataT}}, D::Dense) where {ElR,DataT} - return Dense(convert(DataT, data(D))) -end diff --git a/NDTensors/src/dense/densetensor.jl b/NDTensors/src/dense/densetensor.jl deleted file mode 100644 index 119934bdfa..0000000000 --- a/NDTensors/src/dense/densetensor.jl +++ /dev/null @@ -1,318 +0,0 @@ -using SparseArrays: nnz - -# -# DenseTensor (Tensor using Dense storage) -# - -const DenseTensor{ElT,N,StoreT,IndsT} = Tensor{ElT,N,StoreT,IndsT} where {StoreT<:Dense} - -DenseTensor(::Type{ElT}, inds) where {ElT} = tensor(Dense(ElT, dim(inds)), inds) - -# Special convenience function for Int -# dimensions -DenseTensor(::Type{ElT}, inds::Int...) where {ElT} = DenseTensor(ElT, inds) - -DenseTensor(inds) = tensor(Dense(dim(inds)), inds) - -DenseTensor(inds::Int...) = DenseTensor(inds) - -function DenseTensor(::Type{ElT}, ::UndefInitializer, inds) where {ElT} - return tensor(Dense(ElT, undef, dim(inds)), inds) -end - -function DenseTensor(::Type{ElT}, ::UndefInitializer, inds::Int...) where {ElT} - return DenseTensor(ElT, undef, inds) -end - -DenseTensor(::UndefInitializer, inds) = tensor(Dense(undef, dim(inds)), inds) - -DenseTensor(::UndefInitializer, inds::Int...) = DenseTensor(undef, inds) - -# -# Random constructors -# - -function randomDenseTensor(::Type{ElT}, inds) where {ElT} - return tensor(generic_randn(Dense{ElT}, dim(inds)), inds) -end - -randomDenseTensor(inds) = randomDenseTensor(default_eltype(), inds) - -## End Random Dense Tensor constructor - -# Basic functionality for AbstractArray interface -IndexStyle(::Type{<:DenseTensor}) = IndexLinear() - -# Override CartesianIndices iteration to iterate -# linearly through the Dense storage (faster) -iterate(T::DenseTensor, args...) = iterate(storage(T), args...) - -function _zeros(TensorT::Type{<:DenseTensor}, inds) - return tensor(generic_zeros(storagetype(TensorT), dim(inds)), inds) -end - -function zeros(TensorT::Type{<:DenseTensor}, inds) - return _zeros(TensorT, inds) -end - -# To fix method ambiguity with zeros(::Type, ::Tuple) -function zeros(TensorT::Type{<:DenseTensor}, inds::Dims) - return _zeros(TensorT, inds) -end - -function zeros(TensorT::Type{<:DenseTensor}, inds::Tuple{}) - return _zeros(TensorT, inds) -end - -convert(::Type{Array}, T::DenseTensor) = reshape(data(storage(T)), dims(inds(T))) - -# Create an Array that is a view of the Dense Tensor -# Useful for using Base Array functions -array(T::DenseTensor) = convert(Array, T) - -using .DiagonalArrays: DiagonalArrays, diagview - -function DiagonalArrays.diagview(T::DenseTensor) - return diagview(array(T)) -end - -function Array{ElT,N}(T::DenseTensor{ElT,N}) where {ElT,N} - return copy(array(T)) -end - -function Array(T::DenseTensor{ElT,N}) where {ElT,N} - return Array{ElT,N}(T) -end - -# -# Single index -# - -## TODO replace this with Exposed -@propagate_inbounds function getindex(T::DenseTensor{<:Number}) - return getindex(expose(data(T))) -end - -@propagate_inbounds function getindex(T::DenseTensor{<:Number}, I::Integer...) - Base.@_inline_meta - return getindex(expose(data(T)), Base._sub2ind(T, I...)) -end - -@propagate_inbounds function getindex(T::DenseTensor{<:Number}, I::CartesianIndex) - Base.@_inline_meta - return getindex(T, I.I...) -end - -@propagate_inbounds function setindex!( - T::DenseTensor{<:Number}, x::Number, I::Vararg{Integer} -) - Base.@_inline_meta - setindex!(data(T), x, Base._sub2ind(T, I...)) - return T -end - -@propagate_inbounds function setindex!( - T::DenseTensor{<:Number}, x::Number, I::CartesianIndex -) - Base.@_inline_meta - setindex!(T, x, I.I...) - return T -end - -@propagate_inbounds function setindex!(T::DenseTensor{<:Number}, x::Number) - setindex!(expose(data(T)), x) - return T -end - -# -# Linear indexing -# - -@propagate_inbounds @inline getindex(T::DenseTensor, i::Integer) = storage(T)[i] - -@propagate_inbounds @inline function setindex!(T::DenseTensor, v, i::Integer) - return (storage(T)[i] = v; T) -end - -# -# Slicing -# TODO: this doesn't allow colon right now -# Create a DenseView that stores a Dense and an offset? -# - -## @propagate_inbounds function _getindex( -## T::DenseTensor{ElT,N}, I::CartesianIndices{N} -## ) where {ElT,N} -## storeR = Dense(vec(@view array(T)[I])) -## indsR = Tuple(I[end] - I[1] + CartesianIndex(ntuple(_ -> 1, Val(N)))) -## return tensor(storeR, indsR) -## end -## -## @propagate_inbounds function getindex(T::DenseTensor{ElT,N}, I...) where {ElT,N} -## return _getindex(T, CartesianIndices(I)) -## end - -@propagate_inbounds function getindex(T::DenseTensor, I...) - AI = @view array(T)[I...] - storeR = Dense(vec(AI)) - indsR = size(AI) - return tensor(storeR, indsR) -end - -# Reshape a DenseTensor using the specified dimensions -# This returns a view into the same Tensor data -function reshape(T::DenseTensor, dims) - dim(T) == dim(dims) || error("Total new dimension must be the same as the old dimension") - return tensor(storage(T), dims) -end - -# This version fixes method ambiguity with AbstractArray reshape -function reshape(T::DenseTensor, dims::Dims) - dim(T) == dim(dims) || error("Total new dimension must be the same as the old dimension") - return tensor(storage(T), dims) -end - -function reshape(T::DenseTensor, dims::Int...) - return tensor(storage(T), tuple(dims...)) -end - -## TODO might have to look into these functions more -# If the storage data are regular Vectors, use Base.copyto! -function copyto!( - R::Tensor{<:Number,N,<:Dense{<:Number,<:Vector}}, - T::Tensor{<:Number,N,<:Dense{<:Number,<:Vector}}, -) where {N} - RA = array(R) - TA = array(T) - RA .= TA - return R -end - -# If they are something more complicated like views, use Strided copyto! -function copyto!( - R::DenseTensor{<:Number,N,StoreT}, T::DenseTensor{<:Number,N,StoreT} -) where {N,StoreT<:StridedArray} - RA = array(R) - TA = array(T) - @strided RA .= TA - return R -end - -# Maybe allocate output data. -# TODO: Remove this in favor of `map!` -# applied to `PermutedDimsArray`. -function permutedims!!(R::DenseTensor, T::DenseTensor, perm, f::Function) - Base.checkdims_perm(R, T, perm) - RR = convert(promote_type(typeof(R), typeof(T)), R) - permutedims!(RR, T, perm, f) - return RR -end - -function permutedims!!(R::DenseTensor, T::DenseTensor, perm) - Base.checkdims_perm(R, T, perm) - RR = convert(promote_type(typeof(R), typeof(T)), R) - permutedims!(RR, T, perm) - return RR -end - -# TODO: call permutedims!(R,T,perm,(r,t)->t)? -function permutedims!( - R::DenseTensor{<:Number,N,StoreT}, T::DenseTensor{<:Number,N,StoreT}, perm::NTuple{N,Int} -) where {N,StoreT<:StridedArray} - RA = array(R) - TA = array(T) - permutedims!(expose(RA), expose(TA), perm) - return R -end - -# TODO: call permutedims!(R,T,perm,(r,t)->t)? -function permutedims!( - R::DenseTensor{<:Number,N}, T::DenseTensor{<:Number,N}, perm::NTuple{N,Int} -) where {N} - RA = array(R) - TA = array(T) - permutedims!(expose(RA), expose(TA), perm) - return R -end - -function apply!( - R::DenseTensor{<:Number,N,StoreT}, - T::DenseTensor{<:Number,N,StoreT}, - f::Function=(r, t) -> t, -) where {N,StoreT<:StridedArray} - RA = array(R) - TA = array(T) - @strided RA .= f.(RA, TA) - return R -end - -function apply!(R::DenseTensor, T::DenseTensor, f::Function=(r, t) -> t) - RA = array(R) - TA = array(T) - RA .= f.(RA, TA) - return R -end - -function permutedims!( - R::DenseTensor{<:Number,N}, T::DenseTensor{<:Number,N}, perm, f::Function -) where {N} - if nnz(R) == 1 && nnz(T) == 1 - R[] = f(R[], T[]) - return R - end - RA = array(R) - TA = array(T) - return permutedims!!(RA, TA, perm, f) -end - -""" - NDTensors.permute_reshape(T::Tensor,pos...) - -Takes a permutation that is split up into tuples. Index positions -within the tuples are combined. - -For example: - -permute_reshape(T,(3,2),1) - -First T is permuted as `permutedims(3,2,1)`, then reshaped such -that the original indices 3 and 2 are combined. -""" -function permute_reshape( - T::DenseTensor{ElT,NT,IndsT}, pos::Vararg{Any,N} -) where {ElT,NT,IndsT,N} - perm = flatten(pos...) - - length(perm) ≠ NT && error("Index positions must add up to order of Tensor ($N)") - isperm(perm) || error("Index positions must be a permutation") - - dimsT = dims(T) - indsT = inds(T) - if !is_trivial_permutation(perm) - T = permutedims(T, perm) - end - if all(p -> length(p) == 1, pos) && N == NT - return T - end - newdims = MVector(ntuple(_ -> eltype(IndsT)(1), Val(N))) - for i in 1:N - if length(pos[i]) == 1 - # No reshape needed, just use the - # original index - newdims[i] = indsT[pos[i][1]] - else - newdim_i = 1 - for p in pos[i] - newdim_i *= dimsT[p] - end - newdims[i] = eltype(IndsT)(newdim_i) - end - end - newinds = similartype(IndsT, Val{N})(Tuple(newdims)) - return reshape(T, newinds) -end - -function Base.show(io::IO, mime::MIME"text/plain", T::DenseTensor) - summary(io, T) - return print_tensor(io, T) -end diff --git a/NDTensors/src/dense/generic_array_constructors.jl b/NDTensors/src/dense/generic_array_constructors.jl deleted file mode 100644 index 41057bf1be..0000000000 --- a/NDTensors/src/dense/generic_array_constructors.jl +++ /dev/null @@ -1,31 +0,0 @@ -using .TypeParameterAccessors: - default_type_parameter, - parenttype, - set_eltype, - specify_default_type_parameters, - type_parameter -##TODO replace randn in ITensors with generic_randn -## and replace zeros with generic_zeros - -# This is a file to write generic fills for NDTensors. -# This includes random fills, zeros, ... - -function generic_randn(StoreT::Type{<:Dense}, dims::Integer; rng=Random.default_rng()) - StoreT = specify_default_type_parameters(StoreT) - DataT = specify_type_parameter(type_parameter(StoreT, parenttype), eltype, eltype(StoreT)) - @assert eltype(StoreT) == eltype(DataT) - - data = generic_randn(DataT, dims; rng=rng) - StoreT = set_datatype(StoreT, typeof(data)) - return StoreT(data) -end - -function generic_zeros(StoreT::Type{<:Dense}, dims::Integer) - StoreT = specify_default_type_parameters(StoreT) - DataT = specify_type_parameter(type_parameter(StoreT, parenttype), eltype, eltype(StoreT)) - @assert eltype(StoreT) == eltype(DataT) - - data = generic_zeros(DataT, dims) - StoreT = set_datatype(StoreT, typeof(data)) - return StoreT(data) -end diff --git a/NDTensors/src/dense/linearalgebra/decompositions.jl b/NDTensors/src/dense/linearalgebra/decompositions.jl deleted file mode 100644 index d10991745f..0000000000 --- a/NDTensors/src/dense/linearalgebra/decompositions.jl +++ /dev/null @@ -1,96 +0,0 @@ -Strided.StridedView(T::DenseTensor) = StridedView(convert(Array, T)) - -function drop_singletons(::Order{N}, labels, dims) where {N} - labelsᵣ = ntuple(zero, Val(N)) - dimsᵣ = labelsᵣ - nkeep = 1 - for n in 1:length(dims) - if dims[n] > 1 - labelsᵣ = @inbounds setindex(labelsᵣ, labels[n], nkeep) - dimsᵣ = @inbounds setindex(dimsᵣ, dims[n], nkeep) - nkeep += 1 - end - end - return labelsᵣ, dimsᵣ -end - -# svd of an order-n tensor according to positions Lpos -# and Rpos -function svd( - T::DenseTensor{<:Number,N,IndsT}, Lpos::NTuple{NL,Int}, Rpos::NTuple{NR,Int}; kwargs... -) where {N,IndsT,NL,NR} - M = permute_reshape(T, Lpos, Rpos) - UM, S, VM, spec = svd(M; kwargs...) - u = ind(UM, 2) - v = ind(VM, 2) - - Linds = similartype(IndsT, Val{NL})(ntuple(i -> inds(T)[Lpos[i]], Val(NL))) - Uinds = push(Linds, u) - - # TODO: do these positions need to be reversed? - Rinds = similartype(IndsT, Val{NR})(ntuple(i -> inds(T)[Rpos[i]], Val(NR))) - Vinds = push(Rinds, v) - - U = reshape(UM, Uinds) - V = reshape(VM, Vinds) - - return U, S, V, spec -end - -# qr decomposition of an order-n tensor according to -# positions Lpos and Rpos -function qr( - T::DenseTensor{<:Number,N,IndsT}, Lpos::NTuple{NL,Int}, Rpos::NTuple{NR,Int}; kwargs... -) where {N,IndsT,NL,NR} - M = permute_reshape(T, Lpos, Rpos) - QM, RM = qr(M; kwargs...) - q = ind(QM, 2) - r = ind(RM, 1) - # TODO: simplify this by permuting inds(T) by (Lpos,Rpos) - # then grab Linds,Rinds - Linds = similartype(IndsT, Val{NL})(ntuple(i -> inds(T)[Lpos[i]], Val(NL))) - Qinds = push(Linds, r) - Q = reshape(QM, Qinds) - Rinds = similartype(IndsT, Val{NR})(ntuple(i -> inds(T)[Rpos[i]], Val(NR))) - Rinds = pushfirst(Rinds, r) - R = reshape(RM, Rinds) - return Q, R -end - -# polar decomposition of an order-n tensor according to positions Lpos -# and Rpos -function polar( - T::DenseTensor{<:Number,N,IndsT}, Lpos::NTuple{NL,Int}, Rpos::NTuple{NR,Int} -) where {N,IndsT,NL,NR} - M = permute_reshape(T, Lpos, Rpos) - UM, PM = polar(M) - - # TODO: turn these into functions - Linds = similartype(IndsT, Val{NL})(ntuple(i -> inds(T)[Lpos[i]], Val(NL))) - Rinds = similartype(IndsT, Val{NR})(ntuple(i -> inds(T)[Rpos[i]], Val(NR))) - - # Use sim to create "similar" indices, in case - # the indices have identifiers. If not this should - # act as an identity operator - simRinds = sim(Rinds) - Uinds = (Linds..., simRinds...) - Pinds = (simRinds..., Rinds...) - - U = reshape(UM, Uinds) - P = reshape(PM, Pinds) - return U, P -end - -function LinearAlgebra.exp( - T::DenseTensor{ElT,N}, Lpos::NTuple{NL,Int}, Rpos::NTuple{NR,Int}; ishermitian::Bool=false -) where {ElT,N,NL,NR} - M = permute_reshape(T, Lpos, Rpos) - indsTp = permute(inds(T), (Lpos..., Rpos...)) - if ishermitian - expM = parent(exp(Hermitian(matrix(M)))) - return tensor(Dense{ElT}(vec(expM)), indsTp) - else - expM = exp(M) - return reshape(expM, indsTp) - end -end diff --git a/NDTensors/src/dense/set_types.jl b/NDTensors/src/dense/set_types.jl deleted file mode 100644 index 95f6487c1e..0000000000 --- a/NDTensors/src/dense/set_types.jl +++ /dev/null @@ -1,15 +0,0 @@ -using .TypeParameterAccessors: TypeParameterAccessors, Position, parenttype - -function set_datatype(storagetype::Type{<:Dense}, datatype::Type{<:AbstractVector}) - return Dense{eltype(datatype),datatype} -end - -function set_datatype(storagetype::Type{<:Dense}, datatype::Type{<:AbstractArray}) - return error( - "Setting the `datatype` of the storage type `$storagetype` to a $(ndims(datatype))-dimsional array of type `$datatype` is not currently supported, use an `AbstractVector` instead.", - ) -end - -TypeParameterAccessors.default_type_parameters(::Type{<:Dense}) = (Float64, Vector) -TypeParameterAccessors.position(::Type{<:Dense}, ::typeof(eltype)) = Position(1) -TypeParameterAccessors.position(::Type{<:Dense}, ::typeof(parenttype)) = Position(2) diff --git a/NDTensors/src/dense/tensoralgebra/contract.jl b/NDTensors/src/dense/tensoralgebra/contract.jl deleted file mode 100644 index 60db4e9a22..0000000000 --- a/NDTensors/src/dense/tensoralgebra/contract.jl +++ /dev/null @@ -1,231 +0,0 @@ -using SparseArrays: nnz - -function contraction_output(tensor1::DenseTensor, tensor2::DenseTensor, indsR) - tensortypeR = contraction_output_type(typeof(tensor1), typeof(tensor2), indsR) - return NDTensors.similar(tensortypeR, indsR) -end - -# Both are scalar-like tensors -function _contract_scalar!( - R::DenseTensor{ElR}, - labelsR, - T1::Number, - labelsT1, - T2::Number, - labelsT2, - α=one(ElR), - β=zero(ElR), -) where {ElR} - if iszero(β) - R[] = α * T1 * T2 - elseif iszero(α) - R[] = β * R[] - else - R[] = α * T1 * T2 + β * R[] - end - return R -end - -# Trivial permutation -# Version where R and T have different element types, so we can't call BLAS -# Instead use Julia's broadcasting (maybe consider Strided in the future) -function _contract_scalar_noperm!( - R::DenseTensor{ElR}, T::DenseTensor, α, β=zero(ElR) -) where {ElR} - Rᵈ = data(R) - Tᵈ = data(T) - if iszero(β) - if iszero(α) - fill!(Rᵈ, 0) - else - Rᵈ .= α .* Tᵈ - end - elseif isone(β) - if iszero(α) - # No-op - # Rᵈ .= Rᵈ - else - Rᵈ .= α .* Tᵈ .+ Rᵈ - end - else - if iszero(α) - # Rᵈ .= β .* Rᵈ - BLAS.scal!(length(Rᵈ), β, Rᵈ, 1) - else - Rᵈ .= α .* Tᵈ .+ β .* Rᵈ - end - end - return R -end - -# Trivial permutation -# Version where R and T are the same element type, so we can -# call BLAS -function _contract_scalar_noperm!( - R::DenseTensor{ElR}, T::DenseTensor{ElR}, α, β=zero(ElR) -) where {ElR} - Rᵈ = data(R) - Tᵈ = data(T) - if iszero(β) - if iszero(α) - fill!(Rᵈ, 0) - else - Rᵈ .= α .* Tᵈ - end - elseif isone(β) - if iszero(α) - # No-op - # Rᵈ .= Rᵈ - else - # Rᵈ .= α .* Tᵈ .+ Rᵈ - LinearAlgebra.axpy!(α, Tᵈ, Rᵈ) - end - else - if iszero(α) - Rᵈ .= β .* Rᵈ - else - # Rᵈ .= α .* Tᵈ .+ β .* Rᵈ - LinearAlgebra.axpby!(α, Tᵈ, β, Rᵈ) - end - end - return R -end - -function _contract_scalar_maybe_perm!( - ::Order{N}, R::DenseTensor{ElR,NR}, labelsR, T::DenseTensor, labelsT, α, β=zero(ElR) -) where {ElR,NR,N} - labelsRᵣ, dimsRᵣ = drop_singletons(Order(N), labelsR, dims(R)) - labelsTᵣ, dimsTᵣ = drop_singletons(Order(N), labelsT, dims(T)) - perm = getperm(labelsRᵣ, labelsTᵣ) - if is_trivial_permutation(perm) - # trivial permutation - _contract_scalar_noperm!(R, T, α, β) - else - # non-trivial permutation - Rᵣ = ReshapedArray(data(R), dimsRᵣ, ()) - Tᵣ = ReshapedArray(data(T), dimsTᵣ, ()) - _contract_scalar_perm!(Rᵣ, Tᵣ, perm, α, β) - end - return R -end - -function _contract_scalar_maybe_perm!( - R::DenseTensor{ElR,NR}, labelsR, T::DenseTensor, labelsT, α, β=zero(ElR) -) where {ElR,NR} - N = count(≠(1), dims(R)) - _contract_scalar_maybe_perm!(Order(N), R, labelsR, T, labelsT, α, β) - return R -end - -# XXX: handle case of non-trivial permutation -function _contract_scalar_maybe_perm!( - R::DenseTensor{ElR,NR}, - labelsR, - T₁::DenseTensor, - labelsT₁, - T₂::DenseTensor, - labelsT₂, - α=one(ElR), - β=zero(ElR), -) where {ElR,NR} - if nnz(T₁) == 1 - _contract_scalar_maybe_perm!(R, labelsR, T₂, labelsT₂, α * T₁[], β) - elseif nnz(T₂) == 1 - _contract_scalar_maybe_perm!(R, labelsR, T₁, labelsT₁, α * T₂[], β) - else - error("In _contract_scalar_perm!, one tensor must be a scalar") - end - return R -end - -# At least one of the tensors is size 1 -function _contract_scalar!( - R::DenseTensor{ElR}, - labelsR, - T1::DenseTensor, - labelsT1, - T2::DenseTensor, - labelsT2, - α=one(ElR), - β=zero(ElR), -) where {ElR} - if nnz(T1) == nnz(T2) == 1 - _contract_scalar!(R, labelsR, T1[], labelsT1, T2[], labelsT2, α, β) - else - _contract_scalar_maybe_perm!(R, labelsR, T1, labelsT1, T2, labelsT2, α, β) - end - return R -end - -function contract!( - R::DenseTensor{ElR,NR}, - labelsR, - T1::DenseTensor{ElT1,N1}, - labelsT1, - T2::DenseTensor{ElT2,N2}, - labelsT2, - α::Elα=one(ElR), - β::Elβ=zero(ElR), -) where {Elα,Elβ,ElR,ElT1,ElT2,NR,N1,N2} - # Special case for scalar tensors - if nnz(T1) == 1 || nnz(T2) == 1 - _contract_scalar!(R, labelsR, T1, labelsT1, T2, labelsT2, α, β) - return R - end - - if using_tblis() && ElR <: LinearAlgebra.BlasReal && (ElR == ElT1 == ElT2 == Elα == Elβ) - #@timeit_debug timer "TBLIS contract!" begin - contract!(Val(:TBLIS), R, labelsR, T1, labelsT1, T2, labelsT2, α, β) - #end - return R - end - - if N1 + N2 == NR - outer!(R, T1, T2) - labelsRp = (labelsT1..., labelsT2...) - perm = getperm(labelsR, labelsRp) - if !is_trivial_permutation(perm) - permutedims!(R, copy(R), perm) - end - return R - end - - props = ContractionProperties(labelsT1, labelsT2, labelsR) - compute_contraction_properties!(props, T1, T2, R) - - if ElT1 != ElT2 - # TODO: use promote instead - # T1, T2 = promote(T1, T2) - - ElT1T2 = promote_type(ElT1, ElT2) - if ElT1 != ElR - # TODO: get this working - # T1 = ElR.(T1) - T1 = one(ElT1T2) * T1 - end - if ElT2 != ElR - # TODO: get this working - # T2 = ElR.(T2) - T2 = one(ElT1T2) * T2 - end - end - - _contract!(R, T1, T2, props, α, β) - return R - #end -end - -function _contract!( - CT::DenseTensor{El,NC}, - AT::DenseTensor{El,NA}, - BT::DenseTensor{El,NB}, - props::ContractionProperties, - α::Number=one(El), - β::Number=zero(El), -) where {El,NC,NA,NB} - C = array(CT) - A = array(AT) - B = array(BT) - - return _contract!(C, A, B, props, α, β) -end diff --git a/NDTensors/src/dense/tensoralgebra/outer.jl b/NDTensors/src/dense/tensoralgebra/outer.jl deleted file mode 100644 index 3fd949b9f2..0000000000 --- a/NDTensors/src/dense/tensoralgebra/outer.jl +++ /dev/null @@ -1,35 +0,0 @@ -function outer!( - R::DenseTensor{ElR}, T1::DenseTensor{ElT1}, T2::DenseTensor{ElT2} -) where {ElR,ElT1,ElT2} - if ElT1 != ElT2 - # TODO: use promote instead - # T1,T2 = promote(T1,T2) - - ElT1T2 = promote_type(ElT1, ElT2) - if ElT1 != ElT1T2 - # TODO: get this working - # T1 = ElR.(T1) - T1 = one(ElT1T2) * T1 - end - if ElT2 != ElT1T2 - # TODO: get this working - # T2 = ElR.(T2) - T2 = one(ElT1T2) * T2 - end - end - - v1 = data(T1) - v2 = data(T2) - RM = reshape(R, length(v1), length(v2)) - ## There is no _gemm! defined for CUDA or Metal so it calls - ## generic matmul. Replace with mul!! to call correct mul! (ger) - mul!!(array(RM), v1, transpose(v2), one(ElR), zero(ElR)) - return R -end - -# TODO: call outer!!, make this generic -function outer(T1::DenseTensor{ElT1}, T2::DenseTensor{ElT2}) where {ElT1,ElT2} - array_outer = vec(array(T1)) * transpose(vec(array(T2))) - inds_outer = unioninds(inds(T1), inds(T2)) - return tensor(Dense{promote_type(ElT1, ElT2)}(vec(array_outer)), inds_outer) -end diff --git a/NDTensors/src/deprecated.jl b/NDTensors/src/deprecated.jl deleted file mode 100644 index f9afd61fde..0000000000 --- a/NDTensors/src/deprecated.jl +++ /dev/null @@ -1,8 +0,0 @@ - -# NDTensors.jl -@deprecate use_tblis() NDTensors.using_tblis() -@deprecate enable_tblis!() NDTensors.enable_tblis() -@deprecate disable_tblis!() NDTensors.disable_tblis() - -@deprecate addblock!! insertblock!! -@deprecate addblock! insertblock! diff --git a/NDTensors/src/diag/diag.jl b/NDTensors/src/diag/diag.jl deleted file mode 100644 index ad89444896..0000000000 --- a/NDTensors/src/diag/diag.jl +++ /dev/null @@ -1,134 +0,0 @@ - -# Diag can have either Vector storage, in which case -# it is a general Diag tensor, or scalar storage, -# in which case the diagonal has a uniform value -struct Diag{ElT,DataT} <: TensorStorage{ElT} - data::DataT - function Diag{ElT,DataT}(data) where {ElT,DataT<:AbstractVector{ElT}} - return new{ElT,DataT}(data) - end - function Diag{ElT,ElT}(data) where {ElT} - return new{ElT,ElT}(data) - end -end - -const NonuniformDiag{ElT,DataT} = Diag{ElT,DataT} where {DataT<:AbstractVector} - -const UniformDiag{ElT,DataT} = Diag{ElT,DataT} where {DataT<:Number} - -# Diag constructors -Diag(data::DataT) where {DataT<:AbstractVector{ElT}} where {ElT} = Diag{ElT,DataT}(data) - -Diag(data::ElT) where {ElT<:Number} = Diag{ElT,ElT}(data) - -function Diag{ElR}(data::AbstractVector{ElT}) where {ElR,ElT} - return Diag(convert(similartype(typeof(data), ElR), data)) -end - -Diag(::Type{ElT}, n::Integer) where {ElT<:Number} = Diag(zeros(ElT, n)) - -Diag(x::ElT, n::Integer) where {ElT<:Number} = Diag(fill(x, n)) - -# End Diag constructors - -datatype(::Type{<:Diag{<:Any,DataT}}) where {DataT} = DataT - -setdata(D::Diag, ndata) = Diag(ndata) -setdata(storagetype::Type{<:Diag}, data) = Diag(data) - -copy(D::Diag) = Diag(copy(data(D))) - -# Special printing for uniform Diag -function Base.show(io::IO, mime::MIME"text/plain", diag::UniformDiag) - println(io, typeof(diag)) - println(io, "Diag storage with uniform diagonal value:") - println(io, diag[1]) - return nothing -end - -getindex(D::UniformDiag, i::Int) = data(D) - -function setindex!(D::UniformDiag, val, i::Int) - return error("Cannot set elements of a uniform Diag storage") -end - -# Deal with uniform Diag conversion -function convert(::Type{<:Diag{ElT,DataT}}, D::Diag) where {ElT,DataT<:AbstractArray} - @assert data(D) isa AbstractArray - return Diag(convert(DataT, data(D))) -end - -function convert(::Type{<:Diag{ElT,DataT}}, D::Diag) where {ElT,DataT<:Number} - @assert data(D) isa Number - return Diag(convert(DataT, data(D))) -end - -function generic_zeros(diagT::Type{<:NonuniformDiag{ElT}}, dim::Integer) where {ElT} - return diagT(generic_zeros(datatype(diagT), dim)) -end - -generic_zeros(diagT::Type{<:UniformDiag{ElT}}, dim::Integer) where {ElT} = diagT(zero(ElT)) - -function generic_zeros(diagT::Type{<:Diag{ElT}}, dim::Integer) where {ElT} - return generic_zeros(diagT{default_datatype(ElT)}, dim) -end - -function generic_zeros(diagT::Type{<:Diag}, dim::Integer) - return generic_zeros(diagT{default_eltype()}, dim) -end - -# -# Type promotions involving Diag -# Useful for knowing how conversions should work when adding and contracting -# - -function promote_rule( - ::Type{<:UniformDiag{ElT1}}, ::Type{<:UniformDiag{ElT2}} -) where {ElT1,ElT2} - ElR = promote_type(ElT1, ElT2) - return Diag{ElR,ElR} -end - -function promote_rule( - ::Type{<:NonuniformDiag{ElT1,DataT1}}, ::Type{<:NonuniformDiag{ElT2,DataT2}} -) where {ElT1,DataT1<:AbstractVector,ElT2,DataT2<:AbstractVector} - ElR = promote_type(ElT1, ElT2) - VecR = promote_type(DataT1, DataT2) - return Diag{ElR,VecR} -end - -# This is an internal definition, is there a more general way? -#promote_type(::Type{Vector{ElT1}}, -# ::Type{ElT2}) where {ElT1<:Number, -# ElT2<:Number} = Vector{promote_type(ElT1,ElT2)} -# -#promote_type(::Type{ElT1}, -# ::Type{Vector{ElT2}}) where {ElT1<:Number, -# ElT2<:Number} = promote_type(Vector{ElT2},ElT1) - -# TODO: how do we make this work more generally for T2<:AbstractVector{S2}? -# Make a similartype(AbstractVector{S2},T1) -> AbstractVector{T1} function? -function promote_rule( - ::Type{<:UniformDiag{ElT1,DataT1}}, ::Type{<:NonuniformDiag{ElT2,AbstractArray{ElT2}}} -) where {ElT1,DataT1<:Number,ElT2} - ElR = promote_type(ElT1, ElT2) - - VecR = Vector{ElR} - return Diag{ElR,VecR} -end - -function promote_rule( - ::Type{DenseT1}, ::Type{<:NonuniformDiag{ElT2,DataT2}} -) where {DenseT1<:Dense,ElT2,DataT2<:AbstractVector} - return promote_type(DenseT1, Dense{ElT2,DataT2}) -end - -function promote_rule( - ::Type{DenseT1}, ::Type{<:UniformDiag{ElT2,DataT2}} -) where {DenseT1<:Dense,ElT2,DataT2<:Number} - return promote_type(DenseT1, ElT2) -end - -# Convert a Diag storage type to the closest Dense storage type -dense(::Type{<:NonuniformDiag{ElT,DataT}}) where {ElT,DataT} = Dense{ElT,DataT} -dense(::Type{<:UniformDiag{ElT}}) where {ElT} = Dense{ElT,default_datatype(ElT)} diff --git a/NDTensors/src/diag/diagtensor.jl b/NDTensors/src/diag/diagtensor.jl deleted file mode 100644 index 852fefa466..0000000000 --- a/NDTensors/src/diag/diagtensor.jl +++ /dev/null @@ -1,218 +0,0 @@ -using .DiagonalArrays: diaglength, diagview - -const DiagTensor{ElT,N,StoreT,IndsT} = Tensor{ElT,N,StoreT,IndsT} where {StoreT<:Diag} -const NonuniformDiagTensor{ElT,N,StoreT,IndsT} = - Tensor{ElT,N,StoreT,IndsT} where {StoreT<:NonuniformDiag} -const UniformDiagTensor{ElT,N,StoreT,IndsT} = - Tensor{ElT,N,StoreT,IndsT} where {StoreT<:UniformDiag} - -function diag(tensor::DiagTensor) - tensor_diag = NDTensors.similar(dense(typeof(tensor)), (diaglength(tensor),)) - # TODO: Define `eachdiagindex`. - diagview(tensor_diag) .= diagview(tensor) - return tensor_diag -end - -IndexStyle(::Type{<:DiagTensor}) = IndexCartesian() - -# TODO: this needs to be better (promote element type, check order compatibility, -# etc. -function convert(::Type{<:DenseTensor{ElT,N}}, T::DiagTensor{ElT,N}) where {ElT<:Number,N} - return dense(T) -end - -convert(::Type{Diagonal}, D::DiagTensor{<:Number,2}) = Diagonal(data(D)) - -function Array{ElT,N}(T::DiagTensor{ElT,N}) where {ElT,N} - return array(T) -end - -function Array(T::DiagTensor{ElT,N}) where {ElT,N} - return Array{ElT,N}(T) -end - -function DiagonalArrays.diagview(T::NonuniformDiagTensor) - return data(T) -end - -function zeros(tensortype::Type{<:DiagTensor}, inds) - return tensor(generic_zeros(storagetype(tensortype), mindim(inds)), inds) -end - -function zeros(tensortype::Type{<:DiagTensor}, inds::Dims) - return tensor(generic_zeros(storagetype(tensortype), mindim(inds)), inds) -end - -function zeros(tensortype::Type{<:DiagTensor}, inds::Tuple{}) - return tensor(generic_zeros(storagetype(tensortype), mindim(inds)), inds) -end - -# Compute the norm of Uniform diagonal tensor -# TODO: Improve this with FillArrays.jl -norm(S::UniformDiagTensor) = sqrt(mindim(S) * abs2(data(S))) - -""" -getdiagindex(T::DiagTensor,i::Int) - -Get the ith value along the diagonal of the tensor. -""" -getdiagindex(T::DiagTensor{<:Number}, ind::Int) = storage(T)[ind] - -""" -setdiagindex!(T::DiagTensor,i::Int) - -Set the ith value along the diagonal of the tensor. -""" -setdiagindex!(T::DiagTensor{<:Number}, val, ind::Int) = (storage(T)[ind] = val) - -""" -setdiag(T::UniformDiagTensor,val) - -Set the entire diagonal of a uniform DiagTensor. -""" -setdiag(T::UniformDiagTensor, val) = tensor(Diag(val), inds(T)) - -@propagate_inbounds function getindex( - T::DiagTensor{ElT,N}, inds::Vararg{Int,N} -) where {ElT,N} - if all(==(inds[1]), inds) - return getdiagindex(T, inds[1]) - else - return zero(eltype(ElT)) - end -end -@propagate_inbounds getindex(T::DiagTensor{<:Number,1}, ind::Int) = storage(T)[ind] -using NDTensors.Expose: expose -@propagate_inbounds getindex(T::DiagTensor{<:Number,0}) = getindex(expose(storage(T))) - -# Set diagonal elements -# Throw error for off-diagonal -@propagate_inbounds function setindex!( - T::DiagTensor{<:Number,N}, val, inds::Vararg{Int,N} -) where {N} - all(==(inds[1]), inds) || error("Cannot set off-diagonal element of Diag storage") - setdiagindex!(T, val, inds[1]) - return T -end -@propagate_inbounds function setindex!(T::DiagTensor{<:Number,1}, val, ind::Int) - return (storage(T)[ind] = val) -end -@propagate_inbounds setindex!(T::DiagTensor{<:Number,0}, val) = (storage(T)[1] = val) - -function setindex!(T::UniformDiagTensor{<:Number,N}, val, inds::Vararg{Int,N}) where {N} - return error("Cannot set elements of a uniform Diag storage") -end - -# TODO: make a fill!! that works for uniform and non-uniform -#fill!(T::DiagTensor,v) = fill!(storage(T),v) - -function dense(::Type{<:Tensor{ElT,N,StoreT,IndsT}}) where {ElT,N,StoreT<:Diag,IndsT} - return Tensor{ElT,N,dense(StoreT),IndsT} -end - -using .TypeParameterAccessors: unwrap_array_type -# convert to Dense -function dense(T::DiagTensor) - R = zeros(dense(typeof(T)), inds(T)) - diagview(R) .= diagview(T) - return R -end - -denseblocks(T::DiagTensor) = dense(T) - -function permutedims!( - R::DiagTensor{<:Number,N}, - T::DiagTensor{<:Number,N}, - perm::NTuple{N,Int}, - f::Function=(r, t) -> t, -) where {N} - # TODO: check that inds(R)==permute(inds(T),perm)? - diagview(R) .= f.(diagview(R), diagview(T)) - return R -end - -function permutedims( - T::DiagTensor{<:Number,N}, perm::NTuple{N,Int}, f::Function=identity -) where {N} - R = NDTensors.similar(T) - g(r, t) = f(t) - permutedims!(R, T, perm, g) - return R -end - -function permutedims( - T::UniformDiagTensor{<:Number,N}, perm::NTuple{N,Int}, f::Function=identity -) where {N} - R = tensor(Diag(f(getdiagindex(T, 1))), permute(inds(T), perm)) - return R -end - -# Version that may overwrite in-place or may return the result -function permutedims!!( - R::NonuniformDiagTensor{<:Number,N}, - T::NonuniformDiagTensor{<:Number,N}, - perm::NTuple{N,Int}, - f::Function=(r, t) -> t, -) where {N} - R = convert(promote_type(typeof(R), typeof(T)), R) - permutedims!(R, T, perm, f) - return R -end - -function permutedims!!( - R::UniformDiagTensor{ElR,N}, - T::UniformDiagTensor{ElT,N}, - perm::NTuple{N,Int}, - f::Function=(r, t) -> t, -) where {ElR,ElT,N} - R = convert(promote_type(typeof(R), typeof(T)), R) - R = tensor(Diag(f(getdiagindex(R, 1), getdiagindex(T, 1))), inds(R)) - return R -end - -function permutedims!( - R::DenseTensor{ElR,N}, T::DiagTensor{ElT,N}, perm::NTuple{N,Int}, f::Function=(r, t) -> t -) where {ElR,ElT,N} - diagview(R) .= f.(diagview(R), diagview(T)) - return R -end - -function permutedims!!( - R::DenseTensor{ElR,N}, T::DiagTensor{ElT,N}, perm::NTuple{N,Int}, f::Function=(r, t) -> t -) where {ElR,ElT,N} - RR = convert(promote_type(typeof(R), typeof(T)), R) - permutedims!(RR, T, perm, f) - return RR -end - -# TODO: make a single implementation since this is -# the same as the version with the input types -# swapped. -function permutedims!!( - R::DiagTensor{ElR,N}, T::DenseTensor{ElT,N}, perm::NTuple{N,Int}, f::Function=(r, t) -> t -) where {ElR,ElT,N} - RR = convert(promote_type(typeof(R), typeof(T)), R) - permutedims!(RR, T, perm, f) - return RR -end - -function Base.mapreduce(f, op, t1::DiagTensor, t_tail::DiagTensor...; kwargs...) - elt = mapreduce(eltype, promote_type, (t1, t_tail...)) - if !iszero(f(zero(elt))) - return mapreduce(f, op, array(t1), array.(t_tail)...; kwargs...) - end - if length(t1) > diaglength(t1) - # Some elements are zero, account for that - # with the initial value. - init_kwargs = (; init=zero(elt)) - else - init_kwargs = (;) - end - return mapreduce(f, op, diagview(t1), diagview.(t_tail)...; kwargs..., init_kwargs...) -end - -function Base.show(io::IO, mime::MIME"text/plain", T::DiagTensor) - summary(io, T) - print_tensor(io, T) - return nothing -end diff --git a/NDTensors/src/diag/set_types.jl b/NDTensors/src/diag/set_types.jl deleted file mode 100644 index ec546e8c85..0000000000 --- a/NDTensors/src/diag/set_types.jl +++ /dev/null @@ -1,20 +0,0 @@ -using .TypeParameterAccessors: TypeParameterAccessors - -function TypeParameterAccessors.set_eltype(storagetype::Type{<:UniformDiag}, eltype::Type) - return Diag{eltype,eltype} -end - -function TypeParameterAccessors.set_eltype( - storagetype::Type{<:NonuniformDiag}, eltype::Type{<:AbstractArray} -) - return Diag{eltype,similartype(storagetype, eltype)} -end - -# TODO: Remove this once uniform diagonal tensors use FillArrays for the data. -function set_datatype(storagetype::Type{<:UniformDiag}, datatype::Type) - return Diag{datatype,datatype} -end - -function set_datatype(storagetype::Type{<:NonuniformDiag}, datatype::Type{<:AbstractArray}) - return Diag{eltype(datatype),datatype} -end diff --git a/NDTensors/src/diag/similar.jl b/NDTensors/src/diag/similar.jl deleted file mode 100644 index bf7113aab7..0000000000 --- a/NDTensors/src/diag/similar.jl +++ /dev/null @@ -1,30 +0,0 @@ -using NDTensors.TypeParameterAccessors: TypeParameterAccessors - -# NDTensors.similar -function similar(storagetype::Type{<:Diag}, dims::Dims) - return setdata(storagetype, similar(datatype(storagetype), mindim(dims))) -end - -# TODO: Redesign UniformDiag to make it handled better -# by generic code. -function TypeParameterAccessors.similartype(storagetype::Type{<:UniformDiag}, eltype::Type) - # This will also set the `datatype`. - return set_eltype(storagetype, eltype) -end - -# Needed to get slice of DiagTensor like T[1:3,1:3] -function similar( - T::DiagTensor{<:Number,N}, ::Type{ElR}, inds::Dims{N} -) where {ElR<:Number,N} - return tensor(similar(storage(T), ElR, minimum(inds)), inds) -end - -similar(storage::NonuniformDiag) = setdata(storage, similar(data(storage))) - -similar(D::UniformDiag{ElT}) where {ElT} = Diag(zero(ElT)) -similar(D::UniformDiag, inds) = similar(D) -similar(::Type{<:UniformDiag{ElT}}, inds) where {ElT} = Diag(zero(ElT)) - -similar(D::Diag, n::Int) = Diag(similar(data(D), n)) - -similar(D::Diag, ::Type{ElR}, n::Int) where {ElR} = Diag(similar(data(D), ElR, n)) diff --git a/NDTensors/src/diag/tensoralgebra/contract.jl b/NDTensors/src/diag/tensoralgebra/contract.jl deleted file mode 100644 index 085737c890..0000000000 --- a/NDTensors/src/diag/tensoralgebra/contract.jl +++ /dev/null @@ -1,226 +0,0 @@ -# These are rules for determining the output of a pairwise contraction of NDTensors -# (given the indices of the output tensors) -function contraction_output_type( - tensortype1::Type{<:DiagTensor}, tensortype2::Type{<:DenseTensor}, indsR -) - return similartype(promote_type(tensortype1, tensortype2), indsR) -end -function contraction_output_type( - tensortype1::Type{<:DenseTensor}, tensortype2::Type{<:DiagTensor}, indsR -) - return contraction_output_type(tensortype2, tensortype1, indsR) -end - -# This performs the logic that DiagTensor*DiagTensor -> DiagTensor if it is not an outer -# product but -> DenseTensor if it is -# TODO: if the tensors are both order 2 (or less), or if there is an Index replacement, -# then they remain diagonal. Should we limit DiagTensor*DiagTensor to cases that -# result in a DiagTensor, for efficiency and type stability? What about a general -# SparseTensor result? -function contraction_output_type( - tensortype1::Type{<:DiagTensor}, tensortype2::Type{<:DiagTensor}, indsR -) - if length(indsR) == ndims(tensortype1) + ndims(tensortype2) - # Turn into is_outer(inds1,inds2,indsR) function? - # How does type inference work with arithmatic of compile time values? - return similartype(dense(promote_type(tensortype1, tensortype2)), indsR) - end - return similartype(promote_type(tensortype1, tensortype2), indsR) -end - -# The output must be initialized as zero since it is sparse, cannot be undefined -function contraction_output(T1::DiagTensor, T2::Tensor, indsR) - return zero_contraction_output(T1, T2, indsR) -end -contraction_output(T1::Tensor, T2::DiagTensor, indsR) = contraction_output(T2, T1, indsR) - -function contraction_output(T1::DiagTensor, T2::DiagTensor, indsR) - return zero_contraction_output(T1, T2, indsR) -end - -function _contract!!( - R::UniformDiagTensor{ElR,NR}, - labelsR, - T1::UniformDiagTensor{<:Number,N1}, - labelsT1, - T2::UniformDiagTensor{<:Number,N2}, - labelsT2, -) where {ElR,NR,N1,N2} - if NR == 0 # If all indices of A and B are contracted - # all indices are summed over, just add the product of the diagonal - # elements of A and B - R = setdiag(R, diaglength(T1) * getdiagindex(T1, 1) * getdiagindex(T2, 1)) - else - # not all indices are summed over, set the diagonals of the result - # to the product of the diagonals of A and B - R = setdiag(R, getdiagindex(T1, 1) * getdiagindex(T2, 1)) - end - return R -end - -function contract!( - output_tensor::Exposed{<:AbstractArray,<:DiagTensor}, - labelsoutput_tensor, - tensor1::Exposed, - labelstensor1, - tensor2::Exposed, - labelstensor2, - α::Number=one(Bool), - β::Number=zero(Bool), -) - @assert isone(α) - @assert iszero(β) - return contract!( - unexpose(output_tensor), - labelsoutput_tensor, - unexpose(tensor1), - labelstensor1, - unexpose(tensor2), - labelstensor2, - ) -end - -function contract!( - R::DiagTensor{ElR,NR}, - labelsR, - T1::DiagTensor{<:Number,N1}, - labelsT1, - T2::DiagTensor{<:Number,N2}, - labelsT2, -) where {ElR,NR,N1,N2} - if NR == 0 # If all indices of A and B are contracted - # All indices are summed over, just add the product of the diagonal - # elements of A and B. - # `expose` allows dispatching on the data type - # in order to allow scalar indexing on GPU. - expose(R)[] = mapreduce(*, +, diagview(T1), diagview(T2)) - else - diagview(R) .= diagview(T1) .* diagview(T2) - end - return R -end - -function contract!( - C::DenseTensor{ElC,NC}, - Clabels, - A::DiagTensor{ElA,NA}, - Alabels, - B::DenseTensor{ElB,NB}, - Blabels, - α::Number=one(ElC), - β::Number=zero(ElC); - convert_to_dense::Bool=true, -) where {ElA,NA,ElB,NB,ElC,NC} - #@timeit_debug timer "diag-dense contract!" begin - if all(i -> i < 0, Blabels) - # If all of B is contracted - # TODO: can also check NC+NB==NA - min_dim = min(minimum(dims(A)), minimum(dims(B))) - if length(Clabels) == 0 - # all indices are summed over, just add the product of the diagonal - # elements of A and B - # Assumes C starts set to 0 - c₁ = zero(ElC) - for i in 1:min_dim - c₁ += getdiagindex(A, i) * getdiagindex(B, i) - end - setdiagindex!(C, α * c₁ + β * getdiagindex(C, 1), 1) - else - # not all indices are summed over, set the diagonals of the result - # to the product of the diagonals of A and B - # TODO: should we make this return a Diag storage? - for i in 1:min_dim - setdiagindex!( - C, α * getdiagindex(A, i) * getdiagindex(B, i) + β * getdiagindex(C, i), i - ) - end - end - else - # Most general contraction - if convert_to_dense - contract!(C, Clabels, dense(A), Alabels, B, Blabels, α, β) - else - if !isone(α) || !iszero(β) - error( - "`contract!(::DenseTensor, ::DiagTensor, ::DenseTensor, α, β; convert_to_dense = false)` with `α ≠ 1` or `β ≠ 0` is not currently supported. You can call it with `convert_to_dense = true` instead.", - ) - end - astarts = zeros(Int, length(Alabels)) - bstart = 0 - cstart = 0 - b_cstride = 0 - nbu = 0 - for ib in 1:length(Blabels) - ia = findfirst(==(Blabels[ib]), Alabels) - if !isnothing(ia) - b_cstride += stride(B, ib) - bstart += astarts[ia] * stride(B, ib) - else - nbu += 1 - end - end - - c_cstride = 0 - for ic in 1:length(Clabels) - ia = findfirst(==(Clabels[ic]), Alabels) - if !isnothing(ia) - c_cstride += stride(C, ic) - cstart += astarts[ia] * stride(C, ic) - end - end - - # strides of the uncontracted dimensions of - # B - bustride = zeros(Int, nbu) - custride = zeros(Int, nbu) - # size of the uncontracted dimensions of - # B, to be used in CartesianIndices - busize = zeros(Int, nbu) - n = 1 - for ib in 1:length(Blabels) - if Blabels[ib] > 0 - bustride[n] = stride(B, ib) - busize[n] = size(B, ib) - ic = findfirst(==(Blabels[ib]), Clabels) - custride[n] = stride(C, ic) - n += 1 - end - end - - boffset_orig = 1 - sum(strides(B)) - coffset_orig = 1 - sum(strides(C)) - cartesian_inds = CartesianIndices(Tuple(busize)) - for inds in cartesian_inds - boffset = boffset_orig - coffset = coffset_orig - for i in 1:nbu - ii = inds[i] - boffset += ii * bustride[i] - coffset += ii * custride[i] - end - c = zero(ElC) - for j in 1:diaglength(A) - # With α == 0 && β == 1 - C[cstart + j * c_cstride + coffset] += - getdiagindex(A, j) * B[bstart + j * b_cstride + boffset] - # XXX: not sure if this is correct - #C[cstart+j*c_cstride+coffset] += α * getdiagindex(A, j)* B[bstart+j*b_cstride+boffset] + β * C[cstart+j*c_cstride+coffset] - end - end - end - end - #end # @timeit -end - -function contract!( - C::DenseTensor, - Clabels, - A::DenseTensor, - Alabels, - B::DiagTensor, - Blabels, - α::Number=one(eltype(C)), - β::Number=zero(eltype(C)), -) - return contract!(C, Clabels, B, Blabels, A, Alabels, α, β) -end diff --git a/NDTensors/src/diag/tensoralgebra/outer.jl b/NDTensors/src/diag/tensoralgebra/outer.jl deleted file mode 100644 index b01a4850a0..0000000000 --- a/NDTensors/src/diag/tensoralgebra/outer.jl +++ /dev/null @@ -1,30 +0,0 @@ -function outer!( - R::DenseTensor{<:Number,NR}, T1::DiagTensor{<:Number,N1}, T2::DiagTensor{<:Number,N2} -) where {NR,N1,N2} - for i1 in 1:diaglength(T1), i2 in 1:diaglength(T2) - indsR = CartesianIndex{NR}(ntuple(r -> r ≤ N1 ? i1 : i2, Val(NR))) - R[indsR] = getdiagindex(T1, i1) * getdiagindex(T2, i2) - end - return R -end - -# TODO: write an optimized version of this? -function outer!(R::DenseTensor{ElR}, T1::DenseTensor, T2::DiagTensor) where {ElR} - R .= zero(ElR) - outer!(R, T1, dense(T2)) - return R -end - -function outer!(R::DenseTensor{ElR}, T1::DiagTensor, T2::DenseTensor) where {ElR} - R .= zero(ElR) - outer!(R, dense(T1), T2) - return R -end - -# Right an in-place version -function outer(T1::DiagTensor{ElT1,N1}, T2::DiagTensor{ElT2,N2}) where {ElT1,ElT2,N1,N2} - indsR = unioninds(inds(T1), inds(T2)) - R = tensor(Dense(generic_zeros(promote_type(ElT1, ElT2), dim(indsR))), indsR) - outer!(R, T1, T2) - return R -end diff --git a/NDTensors/src/dims.jl b/NDTensors/src/dims.jl deleted file mode 100644 index c49895f18b..0000000000 --- a/NDTensors/src/dims.jl +++ /dev/null @@ -1,99 +0,0 @@ -using .DiagonalArrays: DiagonalArrays -using .TypeParameterAccessors: TypeParameterAccessors - -export dense, dims, dim, mindim, diaglength - -# dim and dims are used in the Tensor interface, overload -# base Dims here -dims(ds::Dims) = ds - -# Generic dims function -dims(inds::Tuple) = ntuple(i -> dim(@inbounds inds[i]), Val(length(inds))) - -# Generic dim function -dim(inds::Tuple) = prod(dims(inds)) - -dims(::Tuple{}) = () - -dim(::Tuple{}) = 1 - -dense(ds::Dims) = ds - -dense(::Type{DimsT}) where {DimsT<:Dims} = DimsT - -dim(ds::Dims) = prod(ds) - -dim(ds::Dims, i::Int) = dims(ds)[i] - -mindim(inds::Tuple) = minimum(dims(inds)) - -mindim(::Tuple{}) = 1 - -DiagonalArrays.diaglength(inds::Tuple) = mindim(inds) - -""" - dim_to_strides(ds) - -Get the strides from the dimensions. - -This is unexported, call with NDTensors.dim_to_strides. -""" -dim_to_strides(ds) = Base.size_to_strides(1, dims(ds)...) - -""" - dim_to_stride(ds, k::Int) - -Get the stride of the dimension k from the dimensions. - -This is unexported, call with NDTensors.stride. -""" -dim_to_stride(ds, k::Int) = dim_to_strides(ds)[k] - -# This is to help with some generic programming in the Tensor -# code (it helps to construct a Tuple(::NTuple{N,Int}) where the -# only known thing for dispatch is a concrete type such -# as Dims{4}) -TypeParameterAccessors.similartype(::Type{<:Dims}, ::Type{Val{N}}) where {N} = Dims{N} - -# This is to help with ITensor compatibility -dim(i::Int) = i - -# This is to help with ITensor compatibility -dir(::Int) = 0 - -# This is to help with ITensor compatibility -dag(i::Int) = i - -# This is to help with ITensor compatibility -sim(i::Int) = i - -# -# Order value type -# - -# More complicated definition makes Order(Ref(2)[]) faster -@eval struct Order{N} - (OrderT::Type{<:Order})() = $(Expr(:new, :OrderT)) -end - -@doc """ - Order{N} - -A value type representing the order of an ITensor. -""" Order - -""" - Order(N) = Order{N}() - -Create an instance of the value type Order representing -the order of an ITensor. -""" -Order(N) = Order{N}() - -#dims for tensor -# The size is obtained from the indices -dims(T::Tensor) = dims(inds(T)) -dim(T::Tensor) = dim(inds(T)) -dim(T::Tensor, i::Int) = dim(inds(T), i) -maxdim(T::Tensor) = maxdim(inds(T)) -mindim(T::Tensor) = mindim(inds(T)) diff --git a/NDTensors/src/empty/EmptyTensor.jl b/NDTensors/src/empty/EmptyTensor.jl deleted file mode 100644 index f5e04e0652..0000000000 --- a/NDTensors/src/empty/EmptyTensor.jl +++ /dev/null @@ -1,184 +0,0 @@ -# -# EmptyTensor (Tensor using EmptyStorage storage) -# - -const EmptyTensor{ElT,N,StoreT,IndsT} = - Tensor{ElT,N,StoreT,IndsT} where {StoreT<:EmptyStorage} - -## Start constructors -function EmptyTensor(::Type{ElT}, inds) where {ElT<:Number} - return tensor(EmptyStorage(ElT), inds) -end - -function EmptyTensor(::Type{StoreT}, inds) where {StoreT<:TensorStorage} - return tensor(empty(StoreT), inds) -end - -function EmptyBlockSparseTensor(::Type{ElT}, inds) where {ElT<:Number} - StoreT = BlockSparse{ElT,Vector{ElT},length(inds)} - return EmptyTensor(StoreT, inds) -end -## End constructors - -fulltype(::Type{EmptyStorage{ElT,StoreT}}) where {ElT,StoreT} = StoreT -fulltype(T::EmptyStorage) = fulltype(typeof(T)) - -fulltype(T::Tensor) = fulltype(typeof(T)) - -# Needed for correct `NDTensors.ndims` definitions, for -# example `EmptyStorage` that wraps a `BlockSparse` which -# can have non-unity dimensions. -function Base.ndims(storagetype::Type{<:EmptyStorage}) - return ndims(fulltype(storagetype)) -end - -# From an EmptyTensor, return the closest Tensor type -function fulltype(::Type{TensorT}) where {TensorT<:Tensor} - return Tensor{ - eltype(TensorT),ndims(TensorT),fulltype(storetype(TensorT)),indstype(TensorT) - } -end - -function fulltype( - ::Type{ElR}, ::Type{<:Tensor{ElT,N,EStoreT,IndsT}} -) where {ElR,ElT<:Number,N,EStoreT<:EmptyStorage{ElT,StoreT},IndsT} where {StoreT} - return Tensor{ElR,N,similartype(StoreT, ElR),IndsT} -end - -function emptytype(::Type{TensorT}) where {TensorT<:Tensor} - return Tensor{ - eltype(TensorT),ndims(TensorT),emptytype(storagetype(TensorT)),indstype(TensorT) - } -end - -# XXX TODO: add bounds checking -getindex(T::EmptyTensor, I::Integer...) = zero(eltype(T)) - -function getindex(T::EmptyTensor{Complex{EmptyNumber}}, I::Integer...) - return Complex(EmptyNumber(), EmptyNumber()) -end - -similar(T::EmptyTensor, inds::Tuple) = setinds(T, inds) -function similar(T::EmptyTensor, ::Type{ElT}) where {ElT<:Number} - return tensor(similar(storage(T), ElT), inds(T)) -end - -function randn!!(T::EmptyTensor) - return randn!!(Random.default_rng(), T) -end - -function randn!!(rng::AbstractRNG, T::EmptyTensor) - Tf = similar(fulltype(T), inds(T)) - randn!(rng, Tf) - return Tf -end - -# Default to Float64 -function randn!!(T::EmptyTensor{EmptyNumber}) - return randn!!(Random.default_rng(), T) -end - -# Default to Float64 -function randn!!(rng::AbstractRNG, T::EmptyTensor{EmptyNumber}) - return randn!!(rng, similar(T, Float64)) -end - -function _fill!!(::Type{ElT}, T::EmptyTensor, α::Number) where {ElT} - Tf = similar(fulltype(T), ElT, inds(T)) - fill!(Tf, α) - return Tf -end - -fill!!(T::EmptyTensor, α::Number) = _fill!!(eltype(T), T, α) - -# Determine the element type from the number you are filling with -fill!!(T::EmptyTensor{EmptyNumber}, α::Number) = _fill!!(eltype(α), T, α) - -isempty(::EmptyTensor) = true - -zero(empty::EmptyTensor) = empty - -function zeros(T::TensorT) where {TensorT<:EmptyTensor} - TensorR = fulltype(TensorT) - return zeros(TensorR, inds(T)) -end - -function zeros(::Type{ElT}, T::TensorT) where {ElT,TensorT<:EmptyTensor} - TensorR = fulltype(ElT, TensorT) - return zeros(TensorR, inds(T)) -end - -function insertblock(T::EmptyTensor{<:Number,N}, block) where {N} - R = zeros(T) - insertblock!(R, Block(block)) - return R -end - -insertblock!!(T::EmptyTensor{<:Number,N}, block) where {N} = insertblock(T, block) - -blockoffsets(tensor::EmptyTensor) = BlockOffsets{ndims(tensor)}() - -# Special case with element type of EmptyNumber: storage takes the type -# of the input. -@propagate_inbounds function _setindex(T::EmptyTensor{EmptyNumber}, x, I...) - R = zeros(typeof(x), T) - R[I...] = x - return R -end - -# Special case with element type of Complex{EmptyNumber}: storage takes the type -# of the complex version of the input. -@propagate_inbounds function _setindex(T::EmptyTensor{Complex{EmptyNumber}}, x, I...) - R = zeros(typeof(complex(x)), T) - R[I...] = x - return R -end - -@propagate_inbounds function _setindex(T::EmptyTensor, x, I...) - R = zeros(T) - R[I...] = x - return R -end - -@propagate_inbounds function setindex(T::EmptyTensor, x, I...) - return _setindex(T, x, I...) -end - -# This is needed to fix an ambiguity error with ArrayInterface.jl -# https://github.com/ITensor/NDTensors.jl/issues/62 -@propagate_inbounds function setindex(T::EmptyTensor, x, I::Int...) - return _setindex(T, x, I...) -end - -setindex!!(T::EmptyTensor, x, I...) = setindex(T, x, I...) - -function promote_rule( - ::Type{T1}, ::Type{T2} -) where {T1<:EmptyStorage{EmptyNumber},T2<:TensorStorage} - return T2 -end -function promote_rule(::Type{T1}, ::Type{T2}) where {T1<:EmptyStorage,T2<:TensorStorage} - return promote_type(similartype(T2, eltype(T1)), T2) -end - -function permutedims!!(R::Tensor, T::EmptyTensor, perm::Tuple, f::Function=(r, t) -> t) - RR = convert(promote_type(typeof(R), typeof(T)), R) - RR = permutedims!!(RR, RR, ntuple(identity, Val(ndims(R))), (r, t) -> f(r, false)) - return RR -end - -function permutedims!!(R::EmptyTensor, T::Tensor, perm::Tuple, f::Function=(r, t) -> t) - RR = similar(promote_type(typeof(R), typeof(T)), inds(R)) - RR = permutedims!!(RR, T, perm, (r, t) -> f(false, t)) - return RR -end - -function permutedims!!(R::EmptyTensor, T::EmptyTensor, perm::Tuple, f::Function=(r, t) -> t) - RR = convert(promote_type(typeof(R), typeof(T)), R) - return RR -end - -function Base.show(io::IO, mime::MIME"text/plain", T::EmptyTensor) - summary(io, T) - return println(io) -end diff --git a/NDTensors/src/empty/adapt.jl b/NDTensors/src/empty/adapt.jl deleted file mode 100644 index 0a048aac9e..0000000000 --- a/NDTensors/src/empty/adapt.jl +++ /dev/null @@ -1,8 +0,0 @@ -function adapt_structure(to, x::EmptyStorage) - return adapt_storagetype(to, typeof(x))() -end - -function adapt_storagetype(to::Type{<:AbstractArray}, x::Type{<:EmptyStorage}) - d = datatype(storagetype(x)) - return emptytype(adapt_storagetype(adapt(to, d), fulltype(x))) -end diff --git a/NDTensors/src/empty/empty.jl b/NDTensors/src/empty/empty.jl deleted file mode 100644 index 4aab974e0b..0000000000 --- a/NDTensors/src/empty/empty.jl +++ /dev/null @@ -1,100 +0,0 @@ -using SparseArrays: SparseArrays -using .TypeParameterAccessors: TypeParameterAccessors, set_eltype, similartype - -# -# Represents a tensor order that could be set to any order. -# - -struct EmptyOrder end - -function TypeParameterAccessors.similartype( - StoreT::Type{<:TensorStorage{EmptyNumber}}, ElT::Type -) - return set_eltype(StoreT, ElT) -end - -function TypeParameterAccessors.similartype( - StoreT::Type{<:TensorStorage{EmptyNumber}}, DataT::Type{<:AbstractArray} -) - return set_datatype(StoreT, DataT) -end - -## TODO fix this similartype to use set eltype for BlockSparse -function TypeParameterAccessors.similartype( - ::Type{StoreT}, ::Type{ElT} -) where {StoreT<:BlockSparse{EmptyNumber},ElT} - return BlockSparse{ElT,similartype(datatype(StoreT), ElT),ndims(StoreT)} -end - -# -# Empty storage -# - -struct EmptyStorage{ElT,StoreT<:TensorStorage} <: TensorStorage{ElT} end - -function EmptyStorage(::Type{ElT}) where {ElT} - return empty(default_storagetype(default_datatype(ElT))) - #return emptytype(Dense{ElT,Vector{ElT}})() -end - -# TODO: should this be `EmptyNumber`? -EmptyStorage() = EmptyStorage(default_eltype()) - -storagetype(::Type{EmptyStorage{ElT,StoreT}}) where {ElT,StoreT} = StoreT -storagetype(::EmptyStorage{ElT,StoreT}) where {ElT,StoreT} = StoreT - -# Get the EmptyStorage version of the TensorStorage -function emptytype(storagetype::Type{<:TensorStorage}) - return EmptyStorage{eltype(storagetype),storagetype} -end - -empty(storagetype::Type{<:TensorStorage}) = emptytype(storagetype)() - -data(S::EmptyStorage) = NoData() - -## TODO Why is the norm of an empty tensor 0??? -norm(::EmptyStorage{ElT}) where {ElT} = norm(zero(ElT))#EmptyNumber - -similar(S::EmptyStorage) = S -similar(S::EmptyStorage, ::Type{ElT}) where {ElT} = empty(similartype(fulltype(S), ElT)) - -copy(S::EmptyStorage) = S - -size(::EmptyStorage) = (0,) -length(::EmptyStorage) = 0 - -isempty(::EmptyStorage) = true - -nnzblocks(::EmptyStorage) = 0 - -SparseArrays.nnz(::EmptyStorage) = 0 - -function conj(::AllowAlias, S::EmptyStorage) - return S -end - -# TODO: promote the element type properly -(S::EmptyStorage * x::Number) = S -(x::Number * S::EmptyStorage) = S * x - -function Base.real(::Type{<:EmptyStorage{ElT,StoreT}}) where {ElT,StoreT} - return EmptyStorage{real(ElT),real(StoreT)} -end - -function complex(::Type{<:EmptyStorage{ElT,StoreT}}) where {ElT,StoreT} - return EmptyStorage{complex(ElT),complex(StoreT)} -end - -real(S::EmptyStorage) = real(typeof(S))() - -complex(S::EmptyStorage) = complex(typeof(S))() - -blockoffsets(storage::EmptyStorage) = BlockOffsets{ndims(storage)}() - -function Base.show(io::IO, mime::MIME"text/plain", S::EmptyStorage) - return println(io, typeof(S)) -end - -using .TypeParameterAccessors: TypeParameterAccessors -TypeParameterAccessors.parenttype(empty::Type{<:EmptyStorage}) = storagetype(empty) -zero(empty::EmptyStorage) = empty diff --git a/NDTensors/src/empty/tensoralgebra/contract.jl b/NDTensors/src/empty/tensoralgebra/contract.jl deleted file mode 100644 index d566bad441..0000000000 --- a/NDTensors/src/empty/tensoralgebra/contract.jl +++ /dev/null @@ -1,74 +0,0 @@ -# Version of contraction where output storage is empty -function contract!!(R::EmptyTensor, labelsR, T1::Tensor, labelsT1, T2::Tensor, labelsT2) - RR = contract(T1, labelsT1, T2, labelsT2, labelsR) - return RR -end - -# When one of the tensors is empty, return an empty -# tensor. -# XXX: make sure `R` is actually correct! -function contract!!( - R::EmptyTensor, labelsR, T1::EmptyTensor, labelsT1, T2::Tensor, labelsT2 -) - return R -end - -# When one of the tensors is empty, return an empty -# tensor. -# XXX: make sure `R` is actually correct! -function contract!!( - R::EmptyTensor, labelsR, T1::Tensor, labelsT1, T2::EmptyTensor, labelsT2 -) - return R -end - -function contract!!( - R::EmptyTensor, labelsR, T1::EmptyTensor, labelsT1, T2::EmptyTensor, labelsT2 -) - return R -end - -# For ambiguity with versions in combiner.jl -function contract!!( - R::EmptyTensor, labelsR, T1::CombinerTensor, labelsT1, T2::Tensor, labelsT2 -) - RR = contract(T1, labelsT1, T2, labelsT2, labelsR) - return RR -end - -# For ambiguity with versions in combiner.jl -function contract!!( - R::EmptyTensor, labelsR, T1::Tensor, labelsT1, T2::CombinerTensor, labelsT2 -) - RR = contract(T1, labelsT1, T2, labelsT2, labelsR) - return RR -end - -# For ambiguity with versions in combiner.jl -function contract!!( - R::EmptyTensor, labelsR, T1::EmptyTensor, labelsT1, T2::CombinerTensor, labelsT2 -) - RR = contraction_output(T1, labelsT1, T2, labelsT2, labelsR) - return RR -end - -function contraction_output(T1::EmptyTensor, T2::EmptyTensor, indsR::Tuple) - fulltypeR = contraction_output_type(fulltype(T1), fulltype(T2), indsR) - storagetypeR = storagetype(fulltypeR) - emptystoragetypeR = emptytype(storagetypeR) - return Tensor(emptystoragetypeR(), indsR) -end - -function contraction_output(T1::Tensor, T2::EmptyTensor, indsR) - fulltypeR = contraction_output_type(typeof(T1), fulltype(T2), indsR) - storagetypeR = storagetype(fulltypeR) - emptystoragetypeR = emptytype(storagetypeR) - return Tensor(emptystoragetypeR(), indsR) -end - -function contraction_output(T1::EmptyTensor, T2::Tensor, indsR) - fulltypeR = contraction_output_type(fulltype(T1), typeof(T2), indsR) - storagetypeR = storagetype(fulltypeR) - emptystoragetypeR = emptytype(storagetypeR) - return Tensor(emptystoragetypeR(), indsR) -end diff --git a/NDTensors/src/emptynumber.jl b/NDTensors/src/emptynumber.jl deleted file mode 100644 index 1d9799b740..0000000000 --- a/NDTensors/src/emptynumber.jl +++ /dev/null @@ -1,38 +0,0 @@ -# -# Represents a number that can be set to any type. -# - -struct EmptyNumber <: Real end - -zero(::Type{EmptyNumber}) = EmptyNumber() -zero(n::EmptyNumber) = zero(typeof(n)) - -# This helps handle a lot of basic algebra, like: -# EmptyNumber() + 2.3 == 2.3 -convert(::Type{T}, x::EmptyNumber) where {T<:Number} = T(zero(T)) - -# TODO: Should this be implemented? -#Complex(x::Real, ::EmptyNumber) = x - -# This is to help define `float(::EmptyNumber) = 0.0`. -# This helps with defining `norm` of `EmptyStorage{EmptyNumber}`. -AbstractFloat(::EmptyNumber) = zero(AbstractFloat) - -# Extra definitions fix ambiguity errors. -Base.promote_rule(::Type{EmptyNumber}, T::Type{<:Number}) = T -Base.promote_rule(T::Type{<:Number}, ::Type{EmptyNumber}) = T -Base.promote_rule(::Type{EmptyNumber}, ::Type{Bool}) = Bool -Base.promote_rule(::Type{Bool}, ::Type{EmptyNumber}) = Bool -Base.promote_rule(::Type{EmptyNumber}, T::Type{Complex{R}}) where {R<:Real} = T -Base.promote_rule(T::Type{Complex{R}}, ::Type{EmptyNumber}) where {R<:Real} = T - -# Basic arithmetic -(::EmptyNumber + ::EmptyNumber) = EmptyNumber() -(::EmptyNumber - ::EmptyNumber) = EmptyNumber() -(::Number * ::EmptyNumber) = EmptyNumber() -(::EmptyNumber * ::Number) = EmptyNumber() -(::EmptyNumber * ::EmptyNumber) = EmptyNumber() -(::EmptyNumber / ::Number) = EmptyNumber() -(::Number / ::EmptyNumber) = throw(DivideError()) -(::EmptyNumber / ::EmptyNumber) = throw(DivideError()) --(::EmptyNumber) = EmptyNumber() diff --git a/NDTensors/src/exports.jl b/NDTensors/src/exports.jl deleted file mode 100644 index 8e91f52019..0000000000 --- a/NDTensors/src/exports.jl +++ /dev/null @@ -1,89 +0,0 @@ -export - # NDTensors.jl - insertblock!!, - setindex, - setindex!!, - # blocksparse/blockdims.jl - BlockDims, - blockdim, - blockdims, - nblocks, - blockindex, - # blocksparse/blocksparse.jl - # Types - Block, - BlockOffset, - BlockOffsets, - BlockSparse, - # Methods - blockoffsets, - blockview, - eachnzblock, - findblock, - isblocknz, - nnzblocks, - nnz, - nzblock, - nzblocks, - - # blocksparse/blocksparsetensor.jl - # Types - BlockSparseTensor, - # Methods - blockview, - insertblock!, - randomBlockSparseTensor, - - # dense.jl - # Types - Dense, - DenseTensor, - # Symbols - ⊗, - # Methods - randomTensor, - array, - contract, - matrix, - outer, - permutedims!!, - read, - vector, - write, - - # diag.jl - # Types - Diag, - DiagTensor, - - # empty.jl - EmptyStorage, - EmptyTensor, - EmptyBlockSparseTensor, - - # tensorstorage.jl - data, - TensorStorage, - randn!, - scale!, - norm, - - # tensor.jl - Tensor, - tensor, - inds, - ind, - store, - - # truncate.jl - truncate!, - - # linearalgebra.jl - eigs, - entropy, - polar, - ql, - random_orthog, - random_unitary, - Spectrum, - truncerror diff --git a/NDTensors/src/imports.jl b/NDTensors/src/imports.jl deleted file mode 100644 index 6a5f6343c7..0000000000 --- a/NDTensors/src/imports.jl +++ /dev/null @@ -1,128 +0,0 @@ -# Makes `cpu` available as `NDTensors.cpu`. -# TODO: Define `cpu`, `cu`, etc. in a module `DeviceAbstractions`, -# similar to: -# https://github.com/JuliaGPU/KernelAbstractions.jl -# https://github.com/oschulz/HeterogeneousComputing.jl - -using Adapt -using Base.Threads -using Compat -using Dictionaries -using Folds -using InlineStrings -using Random -using LinearAlgebra -using StaticArrays -using Functors -using SimpleTraits -using SplitApplyCombine -using Strided -using TimerOutputs -using TupleTools - -for lib in [ - :AllocateData, - :BackendSelection, - :BaseExtensions, - :UnspecifiedTypes, - :TypeParameterAccessors, - :Expose, - :GPUArraysCoreExtensions, - :AMDGPUExtensions, - :CUDAExtensions, - :MetalExtensions, - :BroadcastMapConversion, - :RankFactorization, - :LabelledNumbers, - :GradedAxes, - :SymmetrySectors, - :TensorAlgebra, - :NestedPermutedDimsArrays, - :SparseArraysBase, - :DiagonalArrays, - :BlockSparseArrays, - :NamedDimsArrays, - :SmallVectors, - :SortedSets, - :TagSets, - :UnallocatedArrays, -] - include("lib/$(lib)/src/$(lib).jl") - @eval using .$lib: $lib -end -# TODO: This is defined for backwards compatibility, -# delete this alias once downstream packages change over -# to using `BackendSelection`. -const AlgorithmSelection = BackendSelection - -using Base: @propagate_inbounds, ReshapedArray, DimOrInd, OneTo - -using Base.Cartesian: @nexprs - -using Base.Threads: @spawn - -using .AMDGPUExtensions: roc -using .CUDAExtensions: cu -using .GPUArraysCoreExtensions: cpu -using .MetalExtensions: mtl - -import Base: - # Types - AbstractFloat, - Array, - CartesianIndex, - Complex, - IndexStyle, - Tuple, - # Symbols - +, - -, - *, - /, - # Methods - checkbounds, - complex, - convert, - conj, - copy, - copyto!, - eachindex, - eltype, - empty, - fill, - fill!, - getindex, - hash, - imag, - isempty, - isless, - iterate, - length, - map, - permutedims, - permutedims!, - print, - promote_rule, - randn, - real, - reshape, - setindex, - setindex!, - show, - size, - stride, - strides, - summary, - to_indices, - unsafe_convert, - view, - zero, - zeros - -import Base.Broadcast: Broadcasted, BroadcastStyle - -import Adapt: adapt_structure, adapt_storage - -import LinearAlgebra: diag, exp, norm, qr, svd, mul! - -import TupleTools: isperm diff --git a/NDTensors/src/lib/AMDGPUExtensions/.JuliaFormatter.toml b/NDTensors/src/lib/AMDGPUExtensions/.JuliaFormatter.toml deleted file mode 100644 index 08f664cdb9..0000000000 --- a/NDTensors/src/lib/AMDGPUExtensions/.JuliaFormatter.toml +++ /dev/null @@ -1,2 +0,0 @@ -style = "blue" -indent = 2 diff --git a/NDTensors/src/lib/AMDGPUExtensions/src/AMDGPUExtensions.jl b/NDTensors/src/lib/AMDGPUExtensions/src/AMDGPUExtensions.jl deleted file mode 100644 index e9e77e6cc5..0000000000 --- a/NDTensors/src/lib/AMDGPUExtensions/src/AMDGPUExtensions.jl +++ /dev/null @@ -1,4 +0,0 @@ -module AMDGPUExtensions -include("roc.jl") - -end diff --git a/NDTensors/src/lib/AMDGPUExtensions/src/roc.jl b/NDTensors/src/lib/AMDGPUExtensions/src/roc.jl deleted file mode 100644 index 2cd0aca64f..0000000000 --- a/NDTensors/src/lib/AMDGPUExtensions/src/roc.jl +++ /dev/null @@ -1,14 +0,0 @@ -using NDTensors.TypeParameterAccessors: TypeParameterAccessors, Position -using NDTensors.GPUArraysCoreExtensions: storagemode -# Implemented in NDTensorsAMDGPUExt -function roc end - -## Here we need an ROCArrayAdaptor to prevent conversion of 64 bit numbers to 32 bit. -## We cannot write `adapt(CuVector, x)` because this -## will not allow us to properly utilize the buffer preference without changing the value of -## default_buffertype. Also `adapt(CuVector{<:Any, <:Any, Buffertype})` fails to work properly -struct ROCArrayAdaptor{B} end - -function TypeParameterAccessors.position(::Type{<:ROCArrayAdaptor}, ::typeof(storagemode)) - return Position(1) -end diff --git a/NDTensors/src/lib/AMDGPUExtensions/test/runtests.jl b/NDTensors/src/lib/AMDGPUExtensions/test/runtests.jl deleted file mode 100644 index da274f21da..0000000000 --- a/NDTensors/src/lib/AMDGPUExtensions/test/runtests.jl +++ /dev/null @@ -1,9 +0,0 @@ -@eval module $(gensym()) -using Test: @testset, @test -using NDTensors.AMDGPUExtensions: roc, ROCArrayAdaptor -using NDTensors.GPUArraysCoreExtensions: storagemode -@testset "roc and ROCArrayAdaptor" begin - @test roc isa Function - @test storagemode(ROCArrayAdaptor{1}) == 1 -end -end diff --git a/NDTensors/src/lib/AllocateData/.JuliaFormatter.toml b/NDTensors/src/lib/AllocateData/.JuliaFormatter.toml deleted file mode 100644 index 08f664cdb9..0000000000 --- a/NDTensors/src/lib/AllocateData/.JuliaFormatter.toml +++ /dev/null @@ -1,2 +0,0 @@ -style = "blue" -indent = 2 diff --git a/NDTensors/src/lib/AllocateData/README.md b/NDTensors/src/lib/AllocateData/README.md deleted file mode 100644 index a0c5264c67..0000000000 --- a/NDTensors/src/lib/AllocateData/README.md +++ /dev/null @@ -1,15 +0,0 @@ -# AllocateData.jl - -Generic interface for allocating data, such as array data. - -## See also - -- https://juliaobjects.github.io/ConstructionBase.jl -- https://github.com/oschulz/HeterogeneousComputing.jl -- https://github.com/JuliaGPU/KernelAbstractions.jl -- https://github.com/JuliaLang/julia/issues/11557 -- https://github.com/JuliaLang/julia/issues/18161 -- https://github.com/JuliaLang/julia/issues/22218 -- https://github.com/JuliaLang/julia/issues/25107 -- https://github.com/JuliaArrays/ArrayInterface.jl/issues/130 -- https://github.com/JuliaArrays/StaticArrays.jl/issues/32 diff --git a/NDTensors/src/lib/AllocateData/src/AllocateData.jl b/NDTensors/src/lib/AllocateData/src/AllocateData.jl deleted file mode 100644 index 03943c5c92..0000000000 --- a/NDTensors/src/lib/AllocateData/src/AllocateData.jl +++ /dev/null @@ -1,8 +0,0 @@ -module AllocateData -include("to_axis.jl") -include("initializers.jl") -include("defaults.jl") -include("allocate.jl") -include("base.jl") -include("AllocateDataLinearAlgebraExt/AllocateDataLinearAlgebraExt.jl") -end diff --git a/NDTensors/src/lib/AllocateData/src/AllocateDataLinearAlgebraExt/AllocateDataLinearAlgebraExt.jl b/NDTensors/src/lib/AllocateData/src/AllocateDataLinearAlgebraExt/AllocateDataLinearAlgebraExt.jl deleted file mode 100644 index b44d41caa3..0000000000 --- a/NDTensors/src/lib/AllocateData/src/AllocateDataLinearAlgebraExt/AllocateDataLinearAlgebraExt.jl +++ /dev/null @@ -1,4 +0,0 @@ -module AllocateDataLinearAlgebraExt -include("diagonal.jl") -include("hermitian.jl") -end diff --git a/NDTensors/src/lib/AllocateData/src/AllocateDataLinearAlgebraExt/diagonal.jl b/NDTensors/src/lib/AllocateData/src/AllocateDataLinearAlgebraExt/diagonal.jl deleted file mode 100644 index e3a85414ae..0000000000 --- a/NDTensors/src/lib/AllocateData/src/AllocateDataLinearAlgebraExt/diagonal.jl +++ /dev/null @@ -1,16 +0,0 @@ -using Compat: allequal -using LinearAlgebra: LinearAlgebra, Diagonal -using ..AllocateData: AllocateData, to_dim - -function LinearAlgebra.Diagonal{T}( - ::AllocateData.UndefInitializer, axes::Tuple{Vararg{AbstractUnitRange,2}} -) where {T} - dims = to_dim.(axes) - @assert allequal(dims) - diag_dim = first(dims) - if VERSION < v"1.7.0-DEV.986" - # https://github.com/JuliaLang/julia/pull/38282 - return Diagonal(Vector{T}(Base.undef, diag_dim)) - end - return Diagonal{T}(Base.undef, diag_dim) -end diff --git a/NDTensors/src/lib/AllocateData/src/AllocateDataLinearAlgebraExt/hermitian.jl b/NDTensors/src/lib/AllocateData/src/AllocateDataLinearAlgebraExt/hermitian.jl deleted file mode 100644 index b3fb75da2a..0000000000 --- a/NDTensors/src/lib/AllocateData/src/AllocateDataLinearAlgebraExt/hermitian.jl +++ /dev/null @@ -1,19 +0,0 @@ -using LinearAlgebra: LinearAlgebra, Hermitian -using ..AllocateData: AllocateData, to_dim - -function (arraytype::Type{<:LinearAlgebra.Hermitian})( - ::AllocateData.UndefInitializer, axes::Tuple{Vararg{AbstractUnitRange,2}} -) - # TODO: Check the parent type of `arraytype`. - a = Array{eltype(arraytype)}(AllocateData.undef, axes) - return Hermitian(a) -end - -## TODO: For some reason this is broken, and gives an error: -## ERROR: UndefVarError: `T` not defined -## function LinearAlgebra.Hermitian{T}( -## ::AllocateData.UndefInitializer, axes::Tuple{Vararg{AbstractUnitRange,2}} -## ) where {T} -## a = Array{T}(AllocateData.undef, axes) -## return Hermitian(a) -## end diff --git a/NDTensors/src/lib/AllocateData/src/allocate.jl b/NDTensors/src/lib/AllocateData/src/allocate.jl deleted file mode 100644 index 1d7f492bae..0000000000 --- a/NDTensors/src/lib/AllocateData/src/allocate.jl +++ /dev/null @@ -1,38 +0,0 @@ -# Allocate undefined memory. -function allocate( - arraytype::Type{<:AbstractArray}, - ::UndefInitializer, - axes::Tuple{Vararg{AbstractUnitRange}}, -) - # Defaults to `undef` constructor, like `Base.similar`. - return arraytype(undef, axes) -end - -function allocate( - arraytype::Type{<:AbstractArray}, initializer::AbstractInitializer, dims::Tuple -) - return allocate(arraytype, initializer, to_axis.(dims)) -end - -# TODO: Move to `allocate_zeros`. -# Allocate an array filled with zeros. -function allocate(arraytype::Type{<:AbstractArray}, ::ZeroInitializer, axes::Tuple) - a = allocate(arraytype, axes) - # TODO: Use `VectorInterface.zerovector!!`. - a .= zero(eltype(a)) - return a -end - -function allocate_zeros(arraytype::Type{<:AbstractArray}, axes::Tuple) - return allocate(arraytype, zero_init, axes) -end - -# Default initializes undefined memory -function allocate(arraytype::Type{<:AbstractArray}, axes::Tuple) - return allocate(arraytype, default_initializer(arraytype), axes) -end - -# Default initializes undefined memory -function allocate(axes::Tuple) - return allocate(default_arraytype(), axes) -end diff --git a/NDTensors/src/lib/AllocateData/src/base.jl b/NDTensors/src/lib/AllocateData/src/base.jl deleted file mode 100644 index 840b7603e6..0000000000 --- a/NDTensors/src/lib/AllocateData/src/base.jl +++ /dev/null @@ -1,11 +0,0 @@ -# TODO: Move to `AllocatedDataBaseExt`. - -# `Base.UndefInitializer` -function allocate(arraytype::Type{<:AbstractArray}, ::Base.UndefInitializer, axes::Tuple) - return allocate(arraytype, undef, axes) -end - -# Work around limited `Array` constructors (don't accept `axes`) -function Base.Array{T}(::UndefInitializer, axes::Tuple{Vararg{AbstractUnitRange}}) where {T} - return Array{T}(Base.undef, to_dim.(axes)) -end diff --git a/NDTensors/src/lib/AllocateData/src/defaults.jl b/NDTensors/src/lib/AllocateData/src/defaults.jl deleted file mode 100644 index c14a1a558a..0000000000 --- a/NDTensors/src/lib/AllocateData/src/defaults.jl +++ /dev/null @@ -1,4 +0,0 @@ -default_initializer(::Type{<:AbstractArray}) = undef -default_eltype() = Float64 -default_arraytype(elt::Type) = Array{elt} -default_arraytype() = default_arraytype(default_eltype()) diff --git a/NDTensors/src/lib/AllocateData/src/initializers.jl b/NDTensors/src/lib/AllocateData/src/initializers.jl deleted file mode 100644 index 80b3700214..0000000000 --- a/NDTensors/src/lib/AllocateData/src/initializers.jl +++ /dev/null @@ -1,29 +0,0 @@ -# Like: -# undef = UndefBlocksInitializer() -# undef_blocks = UndefBlocksInitializer() -abstract type AbstractInitializer end - -struct ZeroInitializer <: AbstractInitializer end -const zero_init = ZeroInitializer() - -# Equivalent to `Base.UndefUnitializer` and `Base.undef`, -# but a subtype of `AbstractInitializer`. -struct UndefInitializer <: AbstractInitializer end -const undef = UndefInitializer() - -# TODO: Move to `AllocateDataBaseExt`. -# Forward constructors to Base constructors. -function (arraytype::Type{<:AbstractArray})(::AllocateData.UndefInitializer, axes::Tuple) - return arraytype(Base.undef, axes) -end - -# TODO: Move to `AllocateDataBlockArraysExt`. -using BlockArrays: BlockArrays - -struct UndefBlocksInitializer <: AbstractInitializer end -const undef_blocks = UndefBlocksInitializer() - -# TODO: Move to `AllocateDataBlockArraysExt`. -base_initializer(::BlockArrays.UndefBlocksInitializer) = BlockArrays.undef_blocks - -# TODO: Add `rand_init`, `randn_init`? diff --git a/NDTensors/src/lib/AllocateData/src/to_axis.jl b/NDTensors/src/lib/AllocateData/src/to_axis.jl deleted file mode 100644 index 27d4e85b3c..0000000000 --- a/NDTensors/src/lib/AllocateData/src/to_axis.jl +++ /dev/null @@ -1,16 +0,0 @@ -# Convert dimension to axis -to_axis(dim::Int) = Base.OneTo(dim) -to_axis(dim::Integer) = to_axis(Int(dim)) - -function to_dim(axis::AbstractUnitRange) - # Assume 1-based indexing - @assert isone(first(axis)) - return length(axis) -end - -# TODO: Move to `AllocateDataBlockArraysExt`. -using BlockArrays: blockedrange - -# Blocked dimension/axis -to_axis(dim::Vector{Int}) = blockedrange(dim) -to_axis(dim::Vector{<:Integer}) = blockedrange(Int.(dim)) diff --git a/NDTensors/src/lib/AllocateData/test/Project.toml b/NDTensors/src/lib/AllocateData/test/Project.toml deleted file mode 100644 index ef491a529c..0000000000 --- a/NDTensors/src/lib/AllocateData/test/Project.toml +++ /dev/null @@ -1,3 +0,0 @@ -[deps] -NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" -Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" diff --git a/NDTensors/src/lib/AllocateData/test/runtests.jl b/NDTensors/src/lib/AllocateData/test/runtests.jl deleted file mode 100644 index b824651feb..0000000000 --- a/NDTensors/src/lib/AllocateData/test/runtests.jl +++ /dev/null @@ -1,39 +0,0 @@ -@eval module $(gensym()) -using NDTensors.AllocateData: AllocateData, allocate, allocate_zeros, zero_init -using LinearAlgebra: Diagonal, Hermitian -using NDTensors.DiagonalArrays: DiagonalArray -using NDTensors.BlockSparseArrays: BlockSparseArray -using NDTensors.SparseArraysBase: SparseArrayDOK -using Test: @test, @testset, @test_broken, @test_throws - -const arraytypes = ( - Array, Diagonal, Hermitian, DiagonalArray, BlockSparseArray, SparseArrayDOK -) -const elts = (Float32, Float64, ComplexF32, ComplexF64) -const initializerss = ((undef,), (AllocateData.undef,), (zero_init,), ()) -const axess = ((2, 2), (1:2, 1:2)) -@testset "AllocateData (arraytype=$arraytype, eltype=$elt, initializer=$initializers, axes=$axes)" for arraytype in - arraytypes, - elt in elts, - initializers in initializerss, - axes in axess - - a = allocate(arraytype{elt}, initializers..., axes) - @test a isa arraytype{elt} - @test ndims(a) == length(axes) - @test size(a) == (2, 2) - if !isempty(initializers) && only(initializers) isa AllocateData.ZeroInitializer - @test iszero(a) - end - a = allocate_zeros(arraytype{elt}, axes) - @test a isa arraytype{elt} - @test ndims(a) == length(axes) - @test size(a) == (2, 2) - @test iszero(a) - if !(arraytype <: BlockSparseArray) - @test_throws AssertionError allocate(arraytype{elt}, (1:2, 0:2)) - else - @test_broken error("Constructor should throw error for non-one-based axes.") - end -end -end diff --git a/NDTensors/src/lib/BackendSelection/.JuliaFormatter.toml b/NDTensors/src/lib/BackendSelection/.JuliaFormatter.toml deleted file mode 100644 index 08f664cdb9..0000000000 --- a/NDTensors/src/lib/BackendSelection/.JuliaFormatter.toml +++ /dev/null @@ -1,2 +0,0 @@ -style = "blue" -indent = 2 diff --git a/NDTensors/src/lib/BackendSelection/src/BackendSelection.jl b/NDTensors/src/lib/BackendSelection/src/BackendSelection.jl deleted file mode 100644 index fccd7fcc1c..0000000000 --- a/NDTensors/src/lib/BackendSelection/src/BackendSelection.jl +++ /dev/null @@ -1,9 +0,0 @@ -module BackendSelection -include("abstractbackend.jl") -include("backend_types.jl") - -# TODO: This is defined for backwards compatibility, -# delete this alias once downstream packages change over -# to using `BackendSelection`. -const AlgorithmSelection = BackendSelection -end diff --git a/NDTensors/src/lib/BackendSelection/src/abstractbackend.jl b/NDTensors/src/lib/BackendSelection/src/abstractbackend.jl deleted file mode 100644 index e0c81c7be7..0000000000 --- a/NDTensors/src/lib/BackendSelection/src/abstractbackend.jl +++ /dev/null @@ -1,4 +0,0 @@ -abstract type AbstractBackend end - -backend_string(::AbstractBackend) = error("Not implemented") -parameters(::AbstractBackend) = error("Not implemented") diff --git a/NDTensors/src/lib/BackendSelection/src/backend_types.jl b/NDTensors/src/lib/BackendSelection/src/backend_types.jl deleted file mode 100644 index a07a46f22c..0000000000 --- a/NDTensors/src/lib/BackendSelection/src/backend_types.jl +++ /dev/null @@ -1,57 +0,0 @@ -for type in (:Algorithm, :Backend) - @eval begin - """ - $($type) - - A type representing a backend for a function. - - For example, a function might have multiple backends - implementations, which internally are selected with a `$($type)` type. - - This allows users to extend functionality with a new implementation but - use the same interface. - """ - struct $type{Back,Kwargs<:NamedTuple} <: AbstractBackend - kwargs::Kwargs - end - - $type{Back}(kwargs::NamedTuple) where {Back} = $type{Back,typeof(kwargs)}(kwargs) - $type{Back}(; kwargs...) where {Back} = $type{Back}(NamedTuple(kwargs)) - $type(s; kwargs...) = $type{Symbol(s)}(NamedTuple(kwargs)) - - $type(backend::$type) = backend - - # TODO: Use `SetParameters`. - backend_string(::$type{Back}) where {Back} = string(Back) - parameters(backend::$type) = getfield(backend, :kwargs) - - function Base.show(io::IO, backend::$type) - return print(io, "$($type) type ", backend_string(backend), ", ", parameters(backend)) - end - Base.print(io::IO, backend::$type) = - print(io, backend_string(backend), ", ", parameters(backend)) - end -end - -# TODO: See if these can be moved inside of `@eval`. -""" - @Algorithm_str - -A convenience macro for writing [`Algorithm`](@ref) types, typically used when -adding methods to a function that supports multiple algorithm -backends. -""" -macro Algorithm_str(s) - return :(Algorithm{$(Expr(:quote, Symbol(s)))}) -end - -""" - @Backend_str - -A convenience macro for writing [`Backend`](@ref) types, typically used when -adding methods to a function that supports multiple -backends. -""" -macro Backend_str(s) - return :(Backend{$(Expr(:quote, Symbol(s)))}) -end diff --git a/NDTensors/src/lib/BackendSelection/test/Project.toml b/NDTensors/src/lib/BackendSelection/test/Project.toml deleted file mode 100644 index ef491a529c..0000000000 --- a/NDTensors/src/lib/BackendSelection/test/Project.toml +++ /dev/null @@ -1,3 +0,0 @@ -[deps] -NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" -Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" diff --git a/NDTensors/src/lib/BackendSelection/test/runtests.jl b/NDTensors/src/lib/BackendSelection/test/runtests.jl deleted file mode 100644 index 5abde35cbd..0000000000 --- a/NDTensors/src/lib/BackendSelection/test/runtests.jl +++ /dev/null @@ -1,29 +0,0 @@ -@eval module $(gensym()) -using Test: @test, @testset -using NDTensors: NDTensors -using NDTensors.BackendSelection: - BackendSelection, Algorithm, Backend, @Algorithm_str, @Backend_str -# TODO: This is defined for backwards compatibility, -# delete this alias once downstream packages change over -# to using `BackendSelection`. -using NDTensors.AlgorithmSelection: AlgorithmSelection -@testset "BackendSelection" begin - # TODO: This is defined for backwards compatibility, - # delete this alias once downstream packages change over - # to using `BackendSelection`. - @test AlgorithmSelection === BackendSelection - for type in (Algorithm, Backend) - @testset "$type" begin - @test type("backend") isa type{:backend} - @test type(:backend) isa type{:backend} - backend = type("backend"; x=2, y=3) - @test backend isa type{:backend} - @test BackendSelection.parameters(backend) === (; x=2, y=3) - end - end - # Macro syntax. - @test Algorithm"backend"(; x=2, y=3) === Algorithm("backend"; x=2, y=3) - @test Backend"backend"(; x=2, y=3) === Backend("backend"; x=2, y=3) - @test isnothing(show(Algorithm(""))) -end -end diff --git a/NDTensors/src/lib/BaseExtensions/.JuliaFormatter.toml b/NDTensors/src/lib/BaseExtensions/.JuliaFormatter.toml deleted file mode 100644 index 08f664cdb9..0000000000 --- a/NDTensors/src/lib/BaseExtensions/.JuliaFormatter.toml +++ /dev/null @@ -1,2 +0,0 @@ -style = "blue" -indent = 2 diff --git a/NDTensors/src/lib/BaseExtensions/src/BaseExtensions.jl b/NDTensors/src/lib/BaseExtensions/src/BaseExtensions.jl deleted file mode 100644 index 747f3ac675..0000000000 --- a/NDTensors/src/lib/BaseExtensions/src/BaseExtensions.jl +++ /dev/null @@ -1,3 +0,0 @@ -module BaseExtensions -include("replace.jl") -end diff --git a/NDTensors/src/lib/BaseExtensions/src/replace.jl b/NDTensors/src/lib/BaseExtensions/src/replace.jl deleted file mode 100644 index 9749f85a7a..0000000000 --- a/NDTensors/src/lib/BaseExtensions/src/replace.jl +++ /dev/null @@ -1,32 +0,0 @@ -replace(collection, replacements::Pair...) = Base.replace(collection, replacements...) -@static if VERSION < v"1.7.0-DEV.15" - # https://github.com/JuliaLang/julia/pull/38216 - # TODO: Add to `Compat.jl` or delete when we drop Julia 1.6 support. - # `replace` for Tuples. - function _replace(f::Base.Callable, t::Tuple, count::Int) - return if count == 0 || isempty(t) - t - else - x = f(t[1]) - (x, _replace(f, Base.tail(t), count - !==(x, t[1]))...) - end - end - - function replace(f::Base.Callable, t::Tuple; count::Integer=typemax(Int)) - return _replace(f, t, Base.check_count(count)) - end - - function _replace(t::Tuple, count::Int, old_new::Tuple{Vararg{Pair}}) - return _replace(t, count) do x - Base.@_inline_meta - for o_n in old_new - isequal(first(o_n), x) && return last(o_n) - end - return x - end - end - - function replace(t::Tuple, old_new::Pair...; count::Integer=typemax(Int)) - return _replace(t, Base.check_count(count), old_new) - end -end diff --git a/NDTensors/src/lib/BaseExtensions/test/Project.toml b/NDTensors/src/lib/BaseExtensions/test/Project.toml deleted file mode 100644 index 90187321ef..0000000000 --- a/NDTensors/src/lib/BaseExtensions/test/Project.toml +++ /dev/null @@ -1,3 +0,0 @@ -[deps] -NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" -SafeTestsets = "1bc83da4-3b8d-516f-aca4-4fe02f6d838f" diff --git a/NDTensors/src/lib/BaseExtensions/test/runtests.jl b/NDTensors/src/lib/BaseExtensions/test/runtests.jl deleted file mode 100644 index d29334d85f..0000000000 --- a/NDTensors/src/lib/BaseExtensions/test/runtests.jl +++ /dev/null @@ -1,15 +0,0 @@ -using SafeTestsets: @safetestset - -@safetestset "BaseExtensions" begin - using NDTensors.BaseExtensions: BaseExtensions - using Test: @test, @testset - @testset "replace $(typeof(collection))" for collection in - (["a", "b", "c"], ("a", "b", "c")) - r1 = BaseExtensions.replace(collection, "b" => "d") - @test r1 == typeof(collection)(["a", "d", "c"]) - @test typeof(r1) === typeof(collection) - r2 = BaseExtensions.replace(collection, "b" => "d", "a" => "e") - @test r2 == typeof(collection)(["e", "d", "c"]) - @test typeof(r2) === typeof(collection) - end -end diff --git a/NDTensors/src/lib/BlockSparseArrays/.JuliaFormatter.toml b/NDTensors/src/lib/BlockSparseArrays/.JuliaFormatter.toml deleted file mode 100644 index 08f664cdb9..0000000000 --- a/NDTensors/src/lib/BlockSparseArrays/.JuliaFormatter.toml +++ /dev/null @@ -1,2 +0,0 @@ -style = "blue" -indent = 2 diff --git a/NDTensors/src/lib/BlockSparseArrays/Project.toml b/NDTensors/src/lib/BlockSparseArrays/Project.toml deleted file mode 100644 index 4b6396f4a0..0000000000 --- a/NDTensors/src/lib/BlockSparseArrays/Project.toml +++ /dev/null @@ -1,3 +0,0 @@ -[deps] -BlockArrays = "8e7c35d0-a365-5155-bbbb-fb81a777f24e" -LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" diff --git a/NDTensors/src/lib/BlockSparseArrays/README.md b/NDTensors/src/lib/BlockSparseArrays/README.md deleted file mode 100644 index dd74c97225..0000000000 --- a/NDTensors/src/lib/BlockSparseArrays/README.md +++ /dev/null @@ -1,192 +0,0 @@ -# BlockSparseArrays.jl - -A Julia `BlockSparseArray` type based on the `BlockArrays.jl` interface. - -It wraps an elementwise `SparseArray` type that uses a dictionary-of-keys -to store non-zero values, specifically a `Dictionary` from `Dictionaries.jl`. -`BlockArrays` reinterprets the `SparseArray` as a blocked data structure. - -````julia -using NDTensors.BlockSparseArrays -using BlockArrays: BlockArrays, blockedrange -using Test - -function main() - # Block dimensions - i1 = [2, 3] - i2 = [2, 3] - - i_axes = (blockedrange(i1), blockedrange(i2)) - - function block_size(axes, block) - return length.(getindex.(axes, BlockArrays.Block.(block.n))) - end - - # Data - nz_blocks = BlockArrays.Block.([(1, 1), (2, 2)]) - nz_block_sizes = [block_size(i_axes, nz_block) for nz_block in nz_blocks] - nz_block_lengths = prod.(nz_block_sizes) - - # Blocks with discontiguous underlying data - d_blocks = randn.(nz_block_sizes) - - # Blocks with contiguous underlying data - # d_data = PseudoBlockVector(randn(sum(nz_block_lengths)), nz_block_lengths) - # d_blocks = [reshape(@view(d_data[Block(i)]), block_size(i_axes, nz_blocks[i])) for i in 1:length(nz_blocks)] - - B = BlockSparseArray(nz_blocks, d_blocks, i_axes) - - # Access a block - B[BlockArrays.Block(1, 1)] - - # Access a non-zero block, returns a zero matrix - B[BlockArrays.Block(1, 2)] - - # Set a zero block - B[BlockArrays.Block(1, 2)] = randn(2, 3) - - # Matrix multiplication (not optimized for sparsity yet) - @test B * B ≈ Array(B) * Array(B) - - permuted_B = permutedims(B, (2, 1)) - @test permuted_B isa BlockSparseArray - @test permuted_B == permutedims(Array(B), (2, 1)) - - @test B + B ≈ Array(B) + Array(B) - @test 2B ≈ 2Array(B) - - @test reshape(B, ([4, 6, 6, 9],)) isa BlockSparseArray{<:Any,1} - - return nothing -end - -main() -```` - -# BlockSparseArrays.jl and BlockArrays.jl interface - -````julia -using NDTensors.BlockSparseArrays -using BlockArrays: BlockArrays - -i1 = [2, 3] -i2 = [2, 3] -B = BlockSparseArray{Float64}(i1, i2) -B[BlockArrays.Block(1, 1)] = randn(2, 2) -B[BlockArrays.Block(2, 2)] = randn(3, 3) - -# Minimal interface - -# Specifies the block structure -@show collect.(BlockArrays.blockaxes(axes(B, 1))) - -# Index range of a block -@show axes(B, 1)[BlockArrays.Block(1)] - -# Last index of each block -@show BlockArrays.blocklasts(axes(B, 1)) - -# Find the block containing the index -@show BlockArrays.findblock(axes(B, 1), 3) - -# Retrieve a block -@show B[BlockArrays.Block(1, 1)] -@show BlockArrays.viewblock(B, BlockArrays.Block(1, 1)) - -# Check block bounds -@show BlockArrays.blockcheckbounds(B, 2, 2) -@show BlockArrays.blockcheckbounds(B, BlockArrays.Block(2, 2)) - -# Derived interface - -# Specifies the block structure -@show collect(Iterators.product(BlockArrays.blockaxes(B)...)) - -# Iterate over block views -@show sum.(BlockArrays.eachblock(B)) - -# Reshape into 1-d -@show BlockArrays.blockvec(B)[BlockArrays.Block(1)] - -# Array-of-array view -@show BlockArrays.blocks(B)[1, 1] == B[BlockArrays.Block(1, 1)] - -# Access an index within a block -@show B[BlockArrays.Block(1, 1)[1, 1]] == B[1, 1] -```` - -# Proposals for interfaces based on `BlockArrays.jl`, `SparseArrays`, and `BlockSparseArrays.jl` - -```julia -# BlockSparseArray interface - -# Define `eachblockindex` -eachblockindex(B::BlockArrays.AbstractBlockArray) = Iterators.product(BlockArrays.blockaxes(B)...) - -eachblockindex(B::BlockArrays.AbstractBlockArray, b::Block) # indices in a block - -blocksize(B::BlockArrays.AbstractBlockArray, b::Block) # size of a block -blocksize(axes, b::Block) # size of a block - -blocklength(B::BlockArrays.AbstractBlockArray, b::Block) # length of a block -blocklength(axes, b::Block) # length of a block - -# Other functions -BlockArrays.blocksize(B) # number of blocks in each dimension -BlockArrays.blocksizes(B) # length of blocks in each dimension - -tuple_block(Block(2, 2)) == (Block(2), Block(2)) # Block.(b.n) -blocksize(axes, b::Block) = map(axis -> length(axis[Block(b.n)]), axes) -blocksize(B, Block(2, 2)) = size(B[Block(2, 2)]) # size of a specified block - -# SparseArrays interface - -findnz(S) # outputs nonzero keys and values (SparseArrayKit.nonzero_pairs) -nonzeros(S) # vector of structural nonzeros (SparseArrayKit.nonzero_values) -nnz(S) # number of nonzero values (SparseArrayKit.nonzero_length) -rowvals(S) # row that each nonzero value in `nonzeros(S)` is in -nzrange(S, c) # range of linear indices into `nonzeros(S)` for values in column `c` -findall(!iszero, S) # CartesianIndices of numerical nonzeros -issparse(S) -sparse(A) # convert to sparse -dropzeros!(S) -droptol!(S, tol) - -# BlockSparseArrays.jl + SparseArrays - -blockfindnz(B) # outputs nonzero block indices/keys and block views -blocknonzeros(B) -blocknnz(S) -blockfindall(!iszero, B) -isblocksparse(B) -blocksparse(A) -blockdropzeros!(B) -blockdroptol!(B, tol) - -# SparseArrayKit.jl interface - -nonzero_pairs(a) # SparseArrays.findnz -nonzero_keys(a) # SparseArrays.? -nonzero_values(a) # SparseArrays.nonzeros -nonzero_length(a) # SparseArrays.nnz - -# BlockSparseArrays.jl + SparseArrayKit.jl interface - -block_nonzero_pairs -block_nonzero_keys -block_nonzero_values -block_nonzero_length -``` - -You can generate this README with: -```julia -using Literate -using NDTensors.BlockSparseArrays -dir = joinpath(pkgdir(BlockSparseArrays), "src", "BlockSparseArrays") -Literate.markdown(joinpath(dir, "examples", "README.jl"), dir; flavor=Literate.CommonMarkFlavor()) -``` - ---- - -*This page was generated using [Literate.jl](https://github.com/fredrikekre/Literate.jl).* - diff --git a/NDTensors/src/lib/BlockSparseArrays/examples/Project.toml b/NDTensors/src/lib/BlockSparseArrays/examples/Project.toml deleted file mode 100644 index d1bf575ce0..0000000000 --- a/NDTensors/src/lib/BlockSparseArrays/examples/Project.toml +++ /dev/null @@ -1,4 +0,0 @@ -[deps] -BlockArrays = "8e7c35d0-a365-5155-bbbb-fb81a777f24e" -NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" -Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" diff --git a/NDTensors/src/lib/BlockSparseArrays/examples/README.jl b/NDTensors/src/lib/BlockSparseArrays/examples/README.jl deleted file mode 100644 index 9fff22ae7b..0000000000 --- a/NDTensors/src/lib/BlockSparseArrays/examples/README.jl +++ /dev/null @@ -1,206 +0,0 @@ -# # BlockSparseArrays.jl -# -# A Julia `BlockSparseArray` type based on the `BlockArrays.jl` interface. -# -# It wraps an elementwise `SparseArray` type that uses a dictionary-of-keys -# to store non-zero values, specifically a `Dictionary` from `Dictionaries.jl`. -# `BlockArrays` reinterprets the `SparseArray` as a blocked data structure. - -using BlockArrays: BlockArrays, PseudoBlockVector, blockedrange -using NDTensors.BlockSparseArrays: BlockSparseArray, block_stored_length -using Test: @test, @test_broken - -function main() - Block = BlockArrays.Block - - ## Block dimensions - i1 = [2, 3] - i2 = [2, 3] - - i_axes = (blockedrange(i1), blockedrange(i2)) - - function block_size(axes, block) - return length.(getindex.(axes, BlockArrays.Block.(block.n))) - end - - ## Data - nz_blocks = Block.([(1, 1), (2, 2)]) - nz_block_sizes = [block_size(i_axes, nz_block) for nz_block in nz_blocks] - nz_block_lengths = prod.(nz_block_sizes) - - ## Blocks with contiguous underlying data - d_data = PseudoBlockVector(randn(sum(nz_block_lengths)), nz_block_lengths) - d_blocks = [ - reshape(@view(d_data[Block(i)]), block_size(i_axes, nz_blocks[i])) for - i in 1:length(nz_blocks) - ] - b = BlockSparseArray(nz_blocks, d_blocks, i_axes) - - @test block_stored_length(b) == 2 - - ## Blocks with discontiguous underlying data - d_blocks = randn.(nz_block_sizes) - b = BlockSparseArray(nz_blocks, d_blocks, i_axes) - - @test block_stored_length(b) == 2 - - ## Access a block - @test b[Block(1, 1)] == d_blocks[1] - - ## Access a zero block, returns a zero matrix - @test b[Block(1, 2)] == zeros(2, 3) - - ## Set a zero block - a₁₂ = randn(2, 3) - b[Block(1, 2)] = a₁₂ - @test b[Block(1, 2)] == a₁₂ - - ## Matrix multiplication (not optimized for sparsity yet) - @test b * b ≈ Array(b) * Array(b) - - permuted_b = permutedims(b, (2, 1)) - ## TODO: Fix this, broken. - @test_broken permuted_b isa BlockSparseArray - @test permuted_b == permutedims(Array(b), (2, 1)) - - @test b + b ≈ Array(b) + Array(b) - @test b + b isa BlockSparseArray - @test block_stored_length(b + b) == 2 - - scaled_b = 2b - @test scaled_b ≈ 2Array(b) - ## TODO: Fix this, broken. - @test_broken scaled_b isa BlockSparseArray - - ## TODO: Fix this, broken. - @test_broken reshape(b, ([4, 6, 6, 9],)) isa BlockSparseArray{<:Any,1} - - return nothing -end - -main() - -# # BlockSparseArrays.jl and BlockArrays.jl interface - -using BlockArrays: BlockArrays -using NDTensors.BlockSparseArrays: BlockSparseArray - -i1 = [2, 3] -i2 = [2, 3] -B = BlockSparseArray{Float64}(i1, i2) -B[BlockArrays.Block(1, 1)] = randn(2, 2) -B[BlockArrays.Block(2, 2)] = randn(3, 3) - -## Minimal interface - -## Specifies the block structure -@show collect.(BlockArrays.blockaxes(axes(B, 1))) - -## Index range of a block -@show axes(B, 1)[BlockArrays.Block(1)] - -## Last index of each block -@show BlockArrays.blocklasts(axes(B, 1)) - -## Find the block containing the index -@show BlockArrays.findblock(axes(B, 1), 3) - -## Retrieve a block -@show B[BlockArrays.Block(1, 1)] -@show BlockArrays.viewblock(B, BlockArrays.Block(1, 1)) - -## Check block bounds -@show BlockArrays.blockcheckbounds(B, 2, 2) -@show BlockArrays.blockcheckbounds(B, BlockArrays.Block(2, 2)) - -## Derived interface - -## Specifies the block structure -@show collect(Iterators.product(BlockArrays.blockaxes(B)...)) - -## Iterate over block views -@show sum.(BlockArrays.eachblock(B)) - -## Reshape into 1-d -@show BlockArrays.blockvec(B)[BlockArrays.Block(1)] - -## Array-of-array view -@show BlockArrays.blocks(B)[1, 1] == B[BlockArrays.Block(1, 1)] - -## Access an index within a block -@show B[BlockArrays.Block(1, 1)[1, 1]] == B[1, 1] - -# # Proposals for interfaces based on `BlockArrays.jl`, `SparseArrays`, and `BlockSparseArrays.jl` - -#= -```julia -# BlockSparseArray interface - -# Define `eachblockindex` -eachblockindex(B::BlockArrays.AbstractBlockArray) = Iterators.product(BlockArrays.blockaxes(B)...) - -eachblockindex(B::BlockArrays.AbstractBlockArray, b::Block) # indices in a block - -blocksize(B::BlockArrays.AbstractBlockArray, b::Block) # size of a block -blocksize(axes, b::Block) # size of a block - -blocklength(B::BlockArrays.AbstractBlockArray, b::Block) # length of a block -blocklength(axes, b::Block) # length of a block - -# Other functions -BlockArrays.blocksize(B) # number of blocks in each dimension -BlockArrays.blocksizes(B) # length of blocks in each dimension - -tuple_block(Block(2, 2)) == (Block(2), Block(2)) # Block.(b.n) -blocksize(axes, b::Block) = map(axis -> length(axis[Block(b.n)]), axes) -blocksize(B, Block(2, 2)) = size(B[Block(2, 2)]) # size of a specified block - -# SparseArrays interface - -findnz(S) # outputs nonzero keys and values (SparseArrayKit.nonzero_pairs) -nonzeros(S) # vector of structural nonzeros (SparseArrayKit.nonzero_values) -nnz(S) # number of nonzero values (SparseArrayKit.nonzero_length) -rowvals(S) # row that each nonzero value in `nonzeros(S)` is in -nzrange(S, c) # range of linear indices into `nonzeros(S)` for values in column `c` -findall(!iszero, S) # CartesianIndices of numerical nonzeros -issparse(S) -sparse(A) # convert to sparse -dropzeros!(S) -droptol!(S, tol) - -# BlockSparseArrays.jl + SparseArrays - -blockfindnz(B) # outputs nonzero block indices/keys and block views -blocknonzeros(B) -blocknnz(S) -blockfindall(!iszero, B) -isblocksparse(B) -blocksparse(A) -blockdropzeros!(B) -blockdroptol!(B, tol) - -# SparseArrayKit.jl interface - -nonzero_pairs(a) # SparseArrays.findnz -nonzero_keys(a) # SparseArrays.? -nonzero_values(a) # SparseArrays.nonzeros -nonzero_length(a) # SparseArrays.nnz - -# BlockSparseArrays.jl + SparseArrayKit.jl interface - -block_nonzero_pairs -block_nonzero_keys -block_nonzero_values -block_nonzero_length -``` -=# - -#= -You can generate this README with: -```julia -using Literate -using NDTensors.BlockSparseArrays -dir = joinpath(pkgdir(BlockSparseArrays), "src", "BlockSparseArrays") -Literate.markdown(joinpath(dir, "examples", "README.jl"), dir; flavor=Literate.CommonMarkFlavor()) -``` -=# diff --git a/NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysAdaptExt/src/BlockSparseArraysAdaptExt.jl b/NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysAdaptExt/src/BlockSparseArraysAdaptExt.jl deleted file mode 100644 index 68cbf05e35..0000000000 --- a/NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysAdaptExt/src/BlockSparseArraysAdaptExt.jl +++ /dev/null @@ -1,5 +0,0 @@ -module BlockSparseArraysAdaptExt -using Adapt: Adapt, adapt -using ..BlockSparseArrays: AbstractBlockSparseArray, map_stored_blocks -Adapt.adapt_structure(to, x::AbstractBlockSparseArray) = map_stored_blocks(adapt(to), x) -end diff --git a/NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysGradedAxesExt/src/BlockSparseArraysGradedAxesExt.jl b/NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysGradedAxesExt/src/BlockSparseArraysGradedAxesExt.jl deleted file mode 100644 index 85fcaab49f..0000000000 --- a/NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysGradedAxesExt/src/BlockSparseArraysGradedAxesExt.jl +++ /dev/null @@ -1,156 +0,0 @@ -module BlockSparseArraysGradedAxesExt -using BlockArrays: - AbstractBlockVector, - AbstractBlockedUnitRange, - Block, - BlockIndexRange, - blockedrange, - blocks -using ..BlockSparseArrays: - BlockSparseArrays, - AbstractBlockSparseArray, - AbstractBlockSparseMatrix, - BlockSparseArray, - BlockSparseMatrix, - BlockSparseVector, - block_merge -using ...GradedAxes: - GradedAxes, - AbstractGradedUnitRange, - OneToOne, - blockmergesortperm, - blocksortperm, - dual, - invblockperm, - nondual, - tensor_product -using LinearAlgebra: Adjoint, Transpose -using ...TensorAlgebra: - TensorAlgebra, FusionStyle, BlockReshapeFusion, SectorFusion, fusedims, splitdims - -# TODO: Make a `ReduceWhile` library. -include("reducewhile.jl") - -TensorAlgebra.FusionStyle(::AbstractGradedUnitRange) = SectorFusion() - -# TODO: Need to implement this! Will require implementing -# `block_merge(a::AbstractUnitRange, blockmerger::BlockedUnitRange)`. -function BlockSparseArrays.block_merge( - a::AbstractGradedUnitRange, blockmerger::AbstractBlockedUnitRange -) - return a -end - -# Sort the blocks by sector and then merge the common sectors. -function block_mergesort(a::AbstractArray) - I = blockmergesortperm.(axes(a)) - return a[I...] -end - -function TensorAlgebra.fusedims( - ::SectorFusion, a::AbstractArray, axes::AbstractUnitRange... -) - # First perform a fusion using a block reshape. - a_reshaped = fusedims(BlockReshapeFusion(), a, axes...) - # Sort the blocks by sector and merge the equivalent sectors. - return block_mergesort(a_reshaped) -end - -function TensorAlgebra.splitdims( - ::SectorFusion, a::AbstractArray, split_axes::AbstractUnitRange... -) - # First, fuse axes to get `blockmergesortperm`. - # Then unpermute the blocks. - axes_prod = - groupreducewhile(tensor_product, split_axes, ndims(a); init=OneToOne()) do i, axis - return length(axis) ≤ length(axes(a, i)) - end - blockperms = invblockperm.(blocksortperm.(axes_prod)) - # TODO: This is doing extra copies of the blocks, - # use `@view a[axes_prod...]` instead. - # That will require implementing some reindexing logic - # for this combination of slicing. - a_unblocked = a[axes_prod...] - a_blockpermed = a_unblocked[blockperms...] - return splitdims(BlockReshapeFusion(), a_blockpermed, split_axes...) -end - -# This is a temporary fix for `eachindex` being broken for BlockSparseArrays -# with mixed dual and non-dual axes. This shouldn't be needed once -# GradedAxes is rewritten using BlockArrays v1. -# TODO: Delete this once GradedAxes is rewritten. -function Base.eachindex(a::AbstractBlockSparseArray) - return CartesianIndices(nondual.(axes(a))) -end - -# TODO: Handle this through some kind of trait dispatch, maybe -# a `SymmetryStyle`-like trait to check if the block sparse -# matrix has graded axes. -function Base.axes(a::Adjoint{<:Any,<:AbstractBlockSparseMatrix}) - return dual.(reverse(axes(a'))) -end - -# This definition is only needed since calls like -# `a[[Block(1), Block(2)]]` where `a isa AbstractGradedUnitRange` -# returns a `BlockSparseVector` instead of a `BlockVector` -# due to limitations in the `BlockArray` type not allowing -# axes with non-Int element types. -# TODO: Remove this once that issue is fixed, -# see https://github.com/JuliaArrays/BlockArrays.jl/pull/405. -using BlockArrays: BlockRange -using NDTensors.LabelledNumbers: label -function GradedAxes.blocklabels(a::BlockSparseVector) - return map(BlockRange(a)) do block - return label(blocks(a)[Int(block)]) - end -end - -# This is a temporary fix for `show` being broken for BlockSparseArrays -# with mixed dual and non-dual axes. This shouldn't be needed once -# GradedAxes is rewritten using BlockArrays v1. -# TODO: Delete this once GradedAxes is rewritten. -function blocksparse_show( - io::IO, mime::MIME"text/plain", a::AbstractArray, axes_a::Tuple; kwargs... -) - println(io, "typeof(axes) = ", typeof(axes_a), "\n") - println( - io, - "Warning: To temporarily circumvent a bug in printing BlockSparseArrays with mixtures of dual and non-dual axes, the types of the dual axes printed below might not be accurate. The types printed above this message are the correct ones.\n", - ) - return invoke(show, Tuple{IO,MIME"text/plain",AbstractArray}, io, mime, a; kwargs...) -end - -# This is a temporary fix for `show` being broken for BlockSparseArrays -# with mixed dual and non-dual axes. This shouldn't be needed once -# GradedAxes is rewritten using BlockArrays v1. -# TODO: Delete this once GradedAxes is rewritten. -function Base.show(io::IO, mime::MIME"text/plain", a::BlockSparseArray; kwargs...) - axes_a = axes(a) - a_nondual = BlockSparseArray(blocks(a), nondual.(axes(a))) - return blocksparse_show(io, mime, a_nondual, axes_a; kwargs...) -end - -# This is a temporary fix for `show` being broken for BlockSparseArrays -# with mixed dual and non-dual axes. This shouldn't be needed once -# GradedAxes is rewritten using BlockArrays v1. -# TODO: Delete this once GradedAxes is rewritten. -function Base.show( - io::IO, mime::MIME"text/plain", a::Adjoint{<:Any,<:BlockSparseMatrix}; kwargs... -) - axes_a = axes(a) - a_nondual = BlockSparseArray(blocks(a'), dual.(nondual.(axes(a'))))' - return blocksparse_show(io, mime, a_nondual, axes_a; kwargs...) -end - -# This is a temporary fix for `show` being broken for BlockSparseArrays -# with mixed dual and non-dual axes. This shouldn't be needed once -# GradedAxes is rewritten using BlockArrays v1. -# TODO: Delete this once GradedAxes is rewritten. -function Base.show( - io::IO, mime::MIME"text/plain", a::Transpose{<:Any,<:BlockSparseMatrix}; kwargs... -) - axes_a = axes(a) - a_nondual = tranpose(BlockSparseArray(transpose(blocks(a)), nondual.(axes(a)))) - return blocksparse_show(io, mime, a_nondual, axes_a; kwargs...) -end -end diff --git a/NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysGradedAxesExt/src/reducewhile.jl b/NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysGradedAxesExt/src/reducewhile.jl deleted file mode 100644 index 661c95e340..0000000000 --- a/NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysGradedAxesExt/src/reducewhile.jl +++ /dev/null @@ -1,34 +0,0 @@ -""" - reducewhile(f, op, collection, state) - -reducewhile(x -> length(x) < 3, vcat, ["a", "b", "c", "d"], 2; init=String[]) == - (["b", "c"], 4) -""" -function reducewhile(f, op, collection, state; init) - prev_result = init - prev_state = state - result = prev_result - while f(result) - prev_result = result - prev_state = state - value_and_state = iterate(collection, state) - isnothing(value_and_state) && break - value, state = value_and_state - result = op(result, value) - end - return prev_result, prev_state -end - -""" - groupreducewhile(f, op, collection, ngroups) - -groupreducewhile((i, x) -> length(x) ≤ i, vcat, ["a", "b", "c", "d", "e", "f"], 3; init=String[]) == - (["a"], ["b", "c"], ["d", "e", "f"]) -""" -function groupreducewhile(f, op, collection, ngroups; init) - state = firstindex(collection) - return ntuple(ngroups) do group_number - result, state = reducewhile(x -> f(group_number, x), op, collection, state; init) - return result - end -end diff --git a/NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysGradedAxesExt/test/Project.toml b/NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysGradedAxesExt/test/Project.toml deleted file mode 100644 index d1bf575ce0..0000000000 --- a/NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysGradedAxesExt/test/Project.toml +++ /dev/null @@ -1,4 +0,0 @@ -[deps] -BlockArrays = "8e7c35d0-a365-5155-bbbb-fb81a777f24e" -NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" -Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" diff --git a/NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysGradedAxesExt/test/runtests.jl b/NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysGradedAxesExt/test/runtests.jl deleted file mode 100644 index 2c2a504df5..0000000000 --- a/NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysGradedAxesExt/test/runtests.jl +++ /dev/null @@ -1,322 +0,0 @@ -@eval module $(gensym()) -using Test: @test, @testset -using BlockArrays: - AbstractBlockArray, Block, BlockedOneTo, blockedrange, blocklengths, blocksize -using NDTensors.BlockSparseArrays: BlockSparseArray, block_stored_length -using NDTensors.GradedAxes: - GradedAxes, - GradedOneTo, - GradedUnitRange, - GradedUnitRangeDual, - blocklabels, - dual, - gradedrange, - isdual -using NDTensors.LabelledNumbers: label -using NDTensors.SparseArraysBase: stored_length -using NDTensors.SymmetrySectors: U1 -using NDTensors.TensorAlgebra: fusedims, splitdims -using LinearAlgebra: adjoint -using Random: randn! -function blockdiagonal!(f, a::AbstractArray) - for i in 1:minimum(blocksize(a)) - b = Block(ntuple(Returns(i), ndims(a))) - a[b] = f(a[b]) - end - return a -end - -const elts = (Float32, Float64, Complex{Float32}, Complex{Float64}) -@testset "BlockSparseArraysGradedAxesExt (eltype=$elt)" for elt in elts - @testset "map" begin - d1 = gradedrange([U1(0) => 2, U1(1) => 2]) - d2 = gradedrange([U1(0) => 2, U1(1) => 2]) - a = BlockSparseArray{elt}(d1, d2, d1, d2) - blockdiagonal!(randn!, a) - @test axes(a, 1) isa GradedOneTo - @test axes(view(a, 1:4, 1:4, 1:4, 1:4), 1) isa GradedOneTo - - for b in (a + a, 2 * a) - @test size(b) == (4, 4, 4, 4) - @test blocksize(b) == (2, 2, 2, 2) - @test blocklengths.(axes(b)) == ([2, 2], [2, 2], [2, 2], [2, 2]) - @test stored_length(b) == 32 - @test block_stored_length(b) == 2 - for i in 1:ndims(a) - @test axes(b, i) isa GradedOneTo - end - @test label(axes(b, 1)[Block(1)]) == U1(0) - @test label(axes(b, 1)[Block(2)]) == U1(1) - @test Array(b) isa Array{elt} - @test Array(b) == b - @test 2 * Array(a) == b - end - - # Test mixing graded axes and dense axes - # in addition/broadcasting. - for b in (a + Array(a), Array(a) + a) - @test size(b) == (4, 4, 4, 4) - @test blocksize(b) == (2, 2, 2, 2) - @test blocklengths.(axes(b)) == ([2, 2], [2, 2], [2, 2], [2, 2]) - @test stored_length(b) == 256 - @test block_stored_length(b) == 16 - for i in 1:ndims(a) - @test axes(b, i) isa BlockedOneTo{Int} - end - @test Array(a) isa Array{elt} - @test Array(a) == a - @test 2 * Array(a) == b - end - - b = a[2:3, 2:3, 2:3, 2:3] - @test size(b) == (2, 2, 2, 2) - @test blocksize(b) == (2, 2, 2, 2) - @test stored_length(b) == 2 - @test block_stored_length(b) == 2 - for i in 1:ndims(a) - @test axes(b, i) isa GradedOneTo - end - @test label(axes(b, 1)[Block(1)]) == U1(0) - @test label(axes(b, 1)[Block(2)]) == U1(1) - @test Array(a) isa Array{elt} - @test Array(a) == a - end - # TODO: Add tests for various slicing operations. - @testset "fusedims" begin - d1 = gradedrange([U1(0) => 1, U1(1) => 1]) - d2 = gradedrange([U1(0) => 1, U1(1) => 1]) - a = BlockSparseArray{elt}(d1, d2, d1, d2) - blockdiagonal!(randn!, a) - m = fusedims(a, (1, 2), (3, 4)) - for ax in axes(m) - @test ax isa GradedOneTo - @test blocklabels(ax) == [U1(0), U1(1), U1(2)] - end - for I in CartesianIndices(m) - if I ∈ CartesianIndex.([(1, 1), (4, 4)]) - @test !iszero(m[I]) - else - @test iszero(m[I]) - end - end - @test a[1, 1, 1, 1] == m[1, 1] - @test a[2, 2, 2, 2] == m[4, 4] - @test blocksize(m) == (3, 3) - @test a == splitdims(m, (d1, d2), (d1, d2)) - end - - @testset "dual axes" begin - r = gradedrange([U1(0) => 2, U1(1) => 2]) - for ax in ((r, r), (dual(r), r), (r, dual(r)), (dual(r), dual(r))) - a = BlockSparseArray{elt}(ax...) - @views for b in [Block(1, 1), Block(2, 2)] - a[b] = randn(elt, size(a[b])) - end - for dim in 1:ndims(a) - @test typeof(ax[dim]) === typeof(axes(a, dim)) - @test isdual(ax[dim]) == isdual(axes(a, dim)) - end - @test @view(a[Block(1, 1)])[1, 1] == a[1, 1] - @test @view(a[Block(1, 1)])[2, 1] == a[2, 1] - @test @view(a[Block(1, 1)])[1, 2] == a[1, 2] - @test @view(a[Block(1, 1)])[2, 2] == a[2, 2] - @test @view(a[Block(2, 2)])[1, 1] == a[3, 3] - @test @view(a[Block(2, 2)])[2, 1] == a[4, 3] - @test @view(a[Block(2, 2)])[1, 2] == a[3, 4] - @test @view(a[Block(2, 2)])[2, 2] == a[4, 4] - @test @view(a[Block(1, 1)])[1:2, 1:2] == a[1:2, 1:2] - @test @view(a[Block(2, 2)])[1:2, 1:2] == a[3:4, 3:4] - a_dense = Array(a) - @test eachindex(a) == CartesianIndices(size(a)) - for I in eachindex(a) - @test a[I] == a_dense[I] - end - @test axes(a') == dual.(reverse(axes(a))) - - @test isdual(axes(a', 1)) ≠ isdual(axes(a, 2)) - @test isdual(axes(a', 2)) ≠ isdual(axes(a, 1)) - @test isnothing(show(devnull, MIME("text/plain"), a)) - - # Check preserving dual in tensor algebra. - for b in (a + a, 2 * a, 3 * a - a) - @test Array(b) ≈ 2 * Array(a) - for dim in 1:ndims(a) - @test isdual(axes(b, dim)) == isdual(axes(a, dim)) - end - end - - @test isnothing(show(devnull, MIME("text/plain"), @view(a[Block(1, 1)]))) - @test @view(a[Block(1, 1)]) == a[Block(1, 1)] - end - - @testset "GradedOneTo" begin - r = gradedrange([U1(0) => 2, U1(1) => 2]) - a = BlockSparseArray{elt}(r, r) - @views for i in [Block(1, 1), Block(2, 2)] - a[i] = randn(elt, size(a[i])) - end - b = 2 * a - @test block_stored_length(b) == 2 - @test Array(b) == 2 * Array(a) - for i in 1:2 - @test axes(b, i) isa GradedOneTo - @test axes(a[:, :], i) isa GradedOneTo - end - - I = [Block(1)[1:1]] - @test a[I, :] isa AbstractBlockArray - @test a[:, I] isa AbstractBlockArray - @test size(a[I, I]) == (1, 1) - @test !isdual(axes(a[I, I], 1)) - end - - @testset "GradedUnitRange" begin - r = gradedrange([U1(0) => 2, U1(1) => 2])[1:3] - a = BlockSparseArray{elt}(r, r) - @views for i in [Block(1, 1), Block(2, 2)] - a[i] = randn(elt, size(a[i])) - end - b = 2 * a - @test block_stored_length(b) == 2 - @test Array(b) == 2 * Array(a) - for i in 1:2 - @test axes(b, i) isa GradedUnitRange - @test axes(a[:, :], i) isa GradedUnitRange - end - - I = [Block(1)[1:1]] - @test a[I, :] isa AbstractBlockArray - @test axes(a[I, :], 1) isa GradedOneTo - @test axes(a[I, :], 2) isa GradedUnitRange - - @test a[:, I] isa AbstractBlockArray - @test axes(a[:, I], 2) isa GradedOneTo - @test axes(a[:, I], 1) isa GradedUnitRange - @test size(a[I, I]) == (1, 1) - @test !isdual(axes(a[I, I], 1)) - end - - # Test case when all axes are dual. - @testset "dual GradedOneTo" begin - r = gradedrange([U1(-1) => 2, U1(1) => 2]) - a = BlockSparseArray{elt}(dual(r), dual(r)) - @views for i in [Block(1, 1), Block(2, 2)] - a[i] = randn(elt, size(a[i])) - end - b = 2 * a - @test block_stored_length(b) == 2 - @test Array(b) == 2 * Array(a) - for i in 1:2 - @test axes(b, i) isa GradedUnitRangeDual - @test axes(a[:, :], i) isa GradedUnitRangeDual - end - I = [Block(1)[1:1]] - @test a[I, :] isa AbstractBlockArray - @test a[:, I] isa AbstractBlockArray - @test size(a[I, I]) == (1, 1) - @test isdual(axes(a[I, :], 2)) - @test isdual(axes(a[:, I], 1)) - @test isdual(axes(a[I, :], 1)) - @test isdual(axes(a[:, I], 2)) - @test isdual(axes(a[I, I], 1)) - @test isdual(axes(a[I, I], 2)) - end - - @testset "dual GradedUnitRange" begin - r = gradedrange([U1(0) => 2, U1(1) => 2])[1:3] - a = BlockSparseArray{elt}(dual(r), dual(r)) - @views for i in [Block(1, 1), Block(2, 2)] - a[i] = randn(elt, size(a[i])) - end - b = 2 * a - @test block_stored_length(b) == 2 - @test Array(b) == 2 * Array(a) - for i in 1:2 - @test axes(b, i) isa GradedUnitRangeDual - @test axes(a[:, :], i) isa GradedUnitRangeDual - end - - I = [Block(1)[1:1]] - @test a[I, :] isa AbstractBlockArray - @test a[:, I] isa AbstractBlockArray - @test size(a[I, I]) == (1, 1) - @test isdual(axes(a[I, :], 2)) - @test isdual(axes(a[:, I], 1)) - @test isdual(axes(a[I, :], 1)) - @test isdual(axes(a[:, I], 2)) - @test isdual(axes(a[I, I], 1)) - @test isdual(axes(a[I, I], 2)) - end - - @testset "dual BlockedUnitRange" begin # self dual - r = blockedrange([2, 2]) - a = BlockSparseArray{elt}(dual(r), dual(r)) - @views for i in [Block(1, 1), Block(2, 2)] - a[i] = randn(elt, size(a[i])) - end - b = 2 * a - @test block_stored_length(b) == 2 - @test Array(b) == 2 * Array(a) - @test a[:, :] isa BlockSparseArray - for i in 1:2 - @test axes(b, i) isa BlockedOneTo - @test axes(a[:, :], i) isa BlockedOneTo - end - - I = [Block(1)[1:1]] - @test a[I, :] isa BlockSparseArray - @test a[:, I] isa BlockSparseArray - @test size(a[I, I]) == (1, 1) - @test !isdual(axes(a[I, I], 1)) - end - - # Test case when all axes are dual from taking the adjoint. - for r in ( - gradedrange([U1(0) => 2, U1(1) => 2]), - gradedrange([U1(0) => 2, U1(1) => 2])[begin:end], - ) - a = BlockSparseArray{elt}(r, r) - @views for i in [Block(1, 1), Block(2, 2)] - a[i] = randn(elt, size(a[i])) - end - b = 2 * a' - @test block_stored_length(b) == 2 - @test Array(b) == 2 * Array(a)' - for ax in axes(b) - @test ax isa typeof(dual(r)) - end - - @test !isdual(axes(a, 1)) - @test !isdual(axes(a, 2)) - @test isdual(axes(a', 1)) - @test isdual(axes(a', 2)) - @test isdual(axes(b, 1)) - @test isdual(axes(b, 2)) - @test isdual(axes(copy(a'), 1)) - @test isdual(axes(copy(a'), 2)) - - I = [Block(1)[1:1]] - @test size(b[I, :]) == (1, 4) - @test size(b[:, I]) == (4, 1) - @test size(b[I, I]) == (1, 1) - end - end - @testset "Matrix multiplication" begin - r = gradedrange([U1(0) => 2, U1(1) => 3]) - a1 = BlockSparseArray{elt}(dual(r), r) - a1[Block(1, 2)] = randn(elt, size(@view(a1[Block(1, 2)]))) - a1[Block(2, 1)] = randn(elt, size(@view(a1[Block(2, 1)]))) - a2 = BlockSparseArray{elt}(dual(r), r) - a2[Block(1, 2)] = randn(elt, size(@view(a2[Block(1, 2)]))) - a2[Block(2, 1)] = randn(elt, size(@view(a2[Block(2, 1)]))) - @test Array(a1 * a2) ≈ Array(a1) * Array(a2) - @test Array(a1' * a2') ≈ Array(a1') * Array(a2') - - a2 = BlockSparseArray{elt}(r, dual(r)) - a2[Block(1, 2)] = randn(elt, size(@view(a2[Block(1, 2)]))) - a2[Block(2, 1)] = randn(elt, size(@view(a2[Block(2, 1)]))) - @test Array(a1' * a2) ≈ Array(a1') * Array(a2) - @test Array(a1 * a2') ≈ Array(a1) * Array(a2') - end -end -end diff --git a/NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysTensorAlgebraExt/src/BlockSparseArraysTensorAlgebraExt.jl b/NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysTensorAlgebraExt/src/BlockSparseArraysTensorAlgebraExt.jl deleted file mode 100644 index 74ebd6593c..0000000000 --- a/NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysTensorAlgebraExt/src/BlockSparseArraysTensorAlgebraExt.jl +++ /dev/null @@ -1,24 +0,0 @@ -module BlockSparseArraysTensorAlgebraExt -using BlockArrays: AbstractBlockedUnitRange -using ..BlockSparseArrays: AbstractBlockSparseArray, block_reshape -using ...GradedAxes: tensor_product -using ...TensorAlgebra: TensorAlgebra, FusionStyle, BlockReshapeFusion - -function TensorAlgebra.:⊗(a1::AbstractBlockedUnitRange, a2::AbstractBlockedUnitRange) - return tensor_product(a1, a2) -end - -TensorAlgebra.FusionStyle(::AbstractBlockedUnitRange) = BlockReshapeFusion() - -function TensorAlgebra.fusedims( - ::BlockReshapeFusion, a::AbstractArray, axes::AbstractUnitRange... -) - return block_reshape(a, axes) -end - -function TensorAlgebra.splitdims( - ::BlockReshapeFusion, a::AbstractArray, axes::AbstractUnitRange... -) - return block_reshape(a, axes) -end -end diff --git a/NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysTensorAlgebraExt/test/Project.toml b/NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysTensorAlgebraExt/test/Project.toml deleted file mode 100644 index d1bf575ce0..0000000000 --- a/NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysTensorAlgebraExt/test/Project.toml +++ /dev/null @@ -1,4 +0,0 @@ -[deps] -BlockArrays = "8e7c35d0-a365-5155-bbbb-fb81a777f24e" -NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" -Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" diff --git a/NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysTensorAlgebraExt/test/runtests.jl b/NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysTensorAlgebraExt/test/runtests.jl deleted file mode 100644 index e5c1e5069e..0000000000 --- a/NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysTensorAlgebraExt/test/runtests.jl +++ /dev/null @@ -1,17 +0,0 @@ -@eval module $(gensym()) -using Test: @test, @testset -using NDTensors.BlockSparseArrays: BlockSparseArray -using NDTensors.TensorAlgebra: contract -using NDTensors.SparseArraysBase: densearray -@testset "BlockSparseArraysTensorAlgebraExt (eltype=$elt)" for elt in ( - Float32, Float64, Complex{Float32}, Complex{Float64} -) - a1 = BlockSparseArray{elt}([1, 2], [2, 3], [3, 2]) - a2 = BlockSparseArray{elt}([2, 2], [3, 2], [2, 3]) - a_dest, dimnames_dest = contract(a1, (1, -1, -2), a2, (2, -2, -1)) - a_dest_dense, dimnames_dest_dense = contract( - densearray(a1), (1, -1, -2), densearray(a2), (2, -2, -1) - ) - @test a_dest ≈ a_dest_dense -end -end diff --git a/NDTensors/src/lib/BlockSparseArrays/src/BlockArraysExtensions/BlockArraysExtensions.jl b/NDTensors/src/lib/BlockSparseArrays/src/BlockArraysExtensions/BlockArraysExtensions.jl deleted file mode 100644 index c57906fdac..0000000000 --- a/NDTensors/src/lib/BlockSparseArrays/src/BlockArraysExtensions/BlockArraysExtensions.jl +++ /dev/null @@ -1,591 +0,0 @@ -using ArrayLayouts: ArrayLayouts, MemoryLayout, sub_materialize -using BlockArrays: - BlockArrays, - AbstractBlockArray, - AbstractBlockVector, - Block, - BlockIndex, - BlockIndexRange, - BlockRange, - BlockSlice, - BlockVector, - BlockedOneTo, - BlockedUnitRange, - BlockedVector, - block, - blockaxes, - blockedrange, - blockindex, - blocks, - findblock, - findblockindex -using Compat: allequal -using Dictionaries: Dictionary, Indices -using ..GradedAxes: blockedunitrange_getindices, to_blockindices -using ..SparseArraysBase: SparseArraysBase, stored_length, stored_indices - -# A return type for `blocks(array)` when `array` isn't blocked. -# Represents a vector with just that single block. -struct SingleBlockView{T,N,Array<:AbstractArray{T,N}} <: AbstractArray{T,N} - array::Array -end -blocks_maybe_single(a) = blocks(a) -blocks_maybe_single(a::Array) = SingleBlockView(a) -function Base.getindex(a::SingleBlockView{<:Any,N}, index::Vararg{Int,N}) where {N} - @assert all(isone, index) - return a.array -end - -# A wrapper around a potentially blocked array that is not blocked. -struct NonBlockedArray{T,N,Array<:AbstractArray{T,N}} <: AbstractArray{T,N} - array::Array -end -Base.size(a::NonBlockedArray) = size(a.array) -Base.getindex(a::NonBlockedArray{<:Any,N}, I::Vararg{Integer,N}) where {N} = a.array[I...] -# Views of `NonBlockedArray`/`NonBlockedVector` are eager. -# This fixes an issue in Julia 1.11 where reindexing defaults to using views. -# TODO: Maybe reconsider this design, and allows views to work in slicing. -Base.view(a::NonBlockedArray, I...) = a[I...] -BlockArrays.blocks(a::NonBlockedArray) = SingleBlockView(a.array) -const NonBlockedVector{T,Array} = NonBlockedArray{T,1,Array} -NonBlockedVector(array::AbstractVector) = NonBlockedArray(array) - -# BlockIndices works around an issue that the indices of BlockSlice -# are restricted to AbstractUnitRange{Int}. -struct BlockIndices{B,T<:Integer,I<:AbstractVector{T}} <: AbstractVector{T} - blocks::B - indices::I -end -for f in (:axes, :unsafe_indices, :axes1, :first, :last, :size, :length, :unsafe_length) - @eval Base.$f(S::BlockIndices) = Base.$f(S.indices) -end -Base.getindex(S::BlockIndices, i::Integer) = getindex(S.indices, i) -function Base.getindex(S::BlockIndices, i::BlockSlice{<:Block{1}}) - # TODO: Check that `i.indices` is consistent with `S.indices`. - # It seems like this isn't handling the case where `i` is a - # subslice of a block correctly (i.e. it ignores `i.indices`). - @assert length(S.indices[Block(i)]) == length(i.indices) - return BlockSlice(S.blocks[Int(Block(i))], S.indices[Block(i)]) -end - -# This is used in slicing like: -# a = BlockSparseArray{Float64}([2, 2, 2, 2], [2, 2, 2, 2]) -# I = BlockedVector([Block(4), Block(3), Block(2), Block(1)], [2, 2]) -# a[I, I] -function Base.getindex( - S::BlockIndices{<:AbstractBlockVector{<:Block{1}}}, i::BlockSlice{<:Block{1}} -) - # TODO: Check for conistency of indices. - # Wrapping the indices in `NonBlockedVector` reinterprets the blocked indices - # as a single block, since the result shouldn't be blocked. - return NonBlockedVector(BlockIndices(S.blocks[Block(i)], S.indices[Block(i)])) -end -function Base.getindex( - S::BlockIndices{<:BlockedVector{<:Block{1},<:BlockRange{1}}}, i::BlockSlice{<:Block{1}} -) - return i -end -# Views of `BlockIndices` are eager. -# This fixes an issue in Julia 1.11 where reindexing defaults to using views. -Base.view(S::BlockIndices, i) = S[i] - -# Used in indexing such as: -# ```julia -# a = BlockSparseArray{Float64}([2, 2, 2, 2], [2, 2, 2, 2]) -# I = BlockedVector([Block(4), Block(3), Block(2), Block(1)], [2, 2]) -# b = @view a[I, I] -# @view b[Block(1, 1)[1:2, 2:2]] -# ``` -# This is similar to the definition: -# blocksparse_to_indices(a, inds, I::Tuple{UnitRange{<:Integer},Vararg{Any}}) -function Base.getindex( - a::NonBlockedVector{<:Integer,<:BlockIndices}, I::UnitRange{<:Integer} -) - ax = only(axes(a.array.indices)) - brs = to_blockindices(ax, I) - inds = blockedunitrange_getindices(ax, I) - return NonBlockedVector(a.array[BlockSlice(brs, inds)]) -end - -function Base.getindex(S::BlockIndices, i::BlockSlice{<:BlockRange{1}}) - # TODO: Check that `i.indices` is consistent with `S.indices`. - # TODO: Turn this into a `blockedunitrange_getindices` definition. - subblocks = S.blocks[Int.(i.block)] - subindices = mortar( - map(1:length(i.block)) do I - r = blocks(i.indices)[I] - return S.indices[first(r)]:S.indices[last(r)] - end, - ) - return BlockIndices(subblocks, subindices) -end - -# Used when performing slices like: -# @views a[[Block(2), Block(1)]][2:4, 2:4] -function Base.getindex(S::BlockIndices, i::BlockSlice{<:BlockVector{<:BlockIndex{1}}}) - subblocks = mortar( - map(blocks(i.block)) do br - return S.blocks[Int(Block(br))][only(br.indices)] - end, - ) - subindices = mortar( - map(blocks(i.block)) do br - S.indices[br] - end, - ) - return BlockIndices(subblocks, subindices) -end - -# Similar to the definition of `BlockArrays.BlockSlices`: -# ```julia -# const BlockSlices = Union{Base.Slice,BlockSlice{<:BlockRange{1}}} -# ``` -# but includes `BlockIndices`, where the blocks aren't contiguous. -const BlockSliceCollection = Union{ - Base.Slice,BlockSlice{<:BlockRange{1}},BlockIndices{<:Vector{<:Block{1}}} -} -const SubBlockSliceCollection = BlockIndices{ - <:BlockVector{<:BlockIndex{1},<:Vector{<:BlockIndexRange{1}}} -} - -# TODO: This is type piracy. This is used in `reindex` when making -# views of blocks of sliced block arrays, for example: -# ```julia -# a = BlockSparseArray{elt}(undef, ([2, 3], [2, 3])) -# b = @view a[[Block(1)[1:1], Block(2)[1:2]], [Block(1)[1:1], Block(2)[1:2]]] -# b[Block(1, 1)] -# ``` -# Without this change, BlockArrays has the slicing behavior: -# ```julia -# julia> mortar([Block(1)[1:1], Block(2)[1:2]])[BlockSlice(Block(2), 2:3)] -# 2-element Vector{BlockIndex{1, Tuple{Int64}, Tuple{Int64}}}: -# Block(2)[1] -# Block(2)[2] -# ``` -# while with this change it has the slicing behavior: -# ```julia -# julia> mortar([Block(1)[1:1], Block(2)[1:2]])[BlockSlice(Block(2), 2:3)] -# Block(2)[1:2] -# ``` -# i.e. it preserves the types of the blocks better. Upstream this fix to -# BlockArrays.jl. Also consider overloading `reindex` so that it calls -# a custom `getindex` function to avoid type piracy in the meantime. -# Also fix this in BlockArrays: -# ```julia -# julia> mortar([Block(1)[1:1], Block(2)[1:2]])[Block(2)] -# 2-element Vector{BlockIndex{1, Tuple{Int64}, Tuple{Int64}}}: -# Block(2)[1] -# Block(2)[2] -# ``` -function Base.getindex( - a::BlockVector{<:BlockIndex{1},<:AbstractVector{<:BlockIndexRange{1}}}, - I::BlockSlice{<:Block{1}}, -) - # Check that the block slice corresponds to the correct block. - @assert I.indices == only(axes(a))[Block(I)] - return blocks(a)[Int(Block(I))] -end - -# Outputs a `BlockUnitRange`. -function sub_axis(a::AbstractUnitRange, indices) - return error("Not implemented") -end - -# TODO: Use `GradedAxes.blockedunitrange_getindices`. -# Outputs a `BlockUnitRange`. -function sub_axis(a::AbstractUnitRange, indices::AbstractUnitRange) - return only(axes(blockedunitrange_getindices(a, indices))) -end - -# TODO: Use `GradedAxes.blockedunitrange_getindices`. -# Outputs a `BlockUnitRange`. -function sub_axis(a::AbstractUnitRange, indices::BlockSlice{<:BlockRange{1}}) - return sub_axis(a, indices.block) -end - -# TODO: Use `GradedAxes.blockedunitrange_getindices`. -# Outputs a `BlockUnitRange`. -function sub_axis(a::AbstractUnitRange, indices::BlockSlice{<:Block{1}}) - return sub_axis(a, Block(indices)) -end - -# TODO: Use `GradedAxes.blockedunitrange_getindices`. -# Outputs a `BlockUnitRange`. -function sub_axis(a::AbstractUnitRange, indices::BlockSlice{<:BlockIndexRange{1}}) - return sub_axis(a, indices.block) -end - -function sub_axis(a::AbstractUnitRange, indices::BlockIndices) - return sub_axis(a, indices.blocks) -end - -# TODO: Use `GradedAxes.blockedunitrange_getindices`. -# Outputs a `BlockUnitRange`. -function sub_axis(a::AbstractUnitRange, indices::Block) - return only(axes(blockedunitrange_getindices(a, indices))) -end - -# TODO: Use `GradedAxes.blockedunitrange_getindices`. -# Outputs a `BlockUnitRange`. -function sub_axis(a::AbstractUnitRange, indices::BlockIndexRange) - return only(axes(blockedunitrange_getindices(a, indices))) -end - -# TODO: Use `GradedAxes.blockedunitrange_getindices`. -# Outputs a `BlockUnitRange`. -function sub_axis(a::AbstractUnitRange, indices::AbstractVector{<:Block}) - return blockedrange([length(a[index]) for index in indices]) -end - -# TODO: Use `GradedAxes.blockedunitrange_getindices`. -# TODO: Merge blocks. -function sub_axis(a::AbstractUnitRange, indices::BlockVector{<:Block}) - # `collect` is needed here, otherwise a `PseudoBlockVector` is - # constructed. - return blockedrange([length(a[index]) for index in collect(indices)]) -end - -# TODO: Use `Tuple` conversion once -# BlockArrays.jl PR is merged. -block_to_cartesianindex(b::Block) = CartesianIndex(b.n) - -function blocks_to_cartesianindices(i::Indices{<:Block}) - return block_to_cartesianindex.(i) -end - -function blocks_to_cartesianindices(d::Dictionary{<:Block}) - return Dictionary(blocks_to_cartesianindices(eachindex(d)), d) -end - -function block_reshape(a::AbstractArray, dims::Tuple{Vararg{Vector{Int}}}) - return block_reshape(a, blockedrange.(dims)) -end - -function block_reshape(a::AbstractArray, dims::Vararg{Vector{Int}}) - return block_reshape(a, dims) -end - -tuple_oneto(n) = ntuple(identity, n) - -function block_reshape(a::AbstractArray, axes::Tuple{Vararg{AbstractUnitRange}}) - reshaped_blocks_a = reshape(blocks(a), blocklength.(axes)) - reshaped_a = similar(a, axes) - for I in stored_indices(reshaped_blocks_a) - block_size_I = map(i -> length(axes[i][Block(I[i])]), tuple_oneto(length(axes))) - # TODO: Better converter here. - reshaped_a[Block(Tuple(I))] = reshape(reshaped_blocks_a[I], block_size_I) - end - return reshaped_a -end - -function block_reshape(a::AbstractArray, axes::Vararg{AbstractUnitRange}) - return block_reshape(a, axes) -end - -function cartesianindices(axes::Tuple, b::Block) - return CartesianIndices(ntuple(dim -> axes[dim][Tuple(b)[dim]], length(axes))) -end - -# Get the range within a block. -function blockindexrange(axis::AbstractUnitRange, r::AbstractUnitRange) - bi1 = findblockindex(axis, first(r)) - bi2 = findblockindex(axis, last(r)) - b = block(bi1) - # Range must fall within a single block. - @assert b == block(bi2) - i1 = blockindex(bi1) - i2 = blockindex(bi2) - return b[i1:i2] -end - -function blockindexrange( - axes::Tuple{Vararg{AbstractUnitRange,N}}, I::CartesianIndices{N} -) where {N} - brs = blockindexrange.(axes, I.indices) - b = Block(block.(brs)) - rs = map(br -> only(br.indices), brs) - return b[rs...] -end - -function blockindexrange(a::AbstractArray, I::CartesianIndices) - return blockindexrange(axes(a), I) -end - -# Get the blocks the range spans across. -function blockrange(axis::AbstractUnitRange, r::UnitRange) - return findblock(axis, first(r)):findblock(axis, last(r)) -end - -# Occurs when slicing with `a[2:4, 2:4]`. -function blockrange(axis::BlockedOneTo{<:Integer}, r::BlockedUnitRange{<:Integer}) - # TODO: Check the blocks are commensurate. - return findblock(axis, first(r)):findblock(axis, last(r)) -end - -function blockrange(axis::AbstractUnitRange, r::Int) - ## return findblock(axis, r) - return error("Slicing with integer values isn't supported.") -end - -function blockrange(axis::AbstractUnitRange, r::AbstractVector{<:Block{1}}) - for b in r - @assert b ∈ blockaxes(axis, 1) - end - return r -end - -# This handles changing the blocking, for example: -# a = BlockSparseArray{Float64}([2, 2, 2, 2], [2, 2, 2, 2]) -# I = blockedrange([4, 4]) -# a[I, I] -# TODO: Generalize to `AbstractBlockedUnitRange`. -function blockrange(axis::BlockedOneTo{<:Integer}, r::BlockedOneTo{<:Integer}) - # TODO: Probably this is incorrect and should be something like: - # return findblock(axis, first(r)):findblock(axis, last(r)) - return only(blockaxes(r)) -end - -# This handles block merging: -# a = BlockSparseArray{Float64}([2, 2, 2, 2], [2, 2, 2, 2]) -# I = BlockedVector(Block.(1:4), [2, 2]) -# I = BlockVector(Block.(1:4), [2, 2]) -# I = BlockedVector([Block(4), Block(3), Block(2), Block(1)], [2, 2]) -# I = BlockVector([Block(4), Block(3), Block(2), Block(1)], [2, 2]) -# a[I, I] -function blockrange(axis::BlockedOneTo{<:Integer}, r::AbstractBlockVector{<:Block{1}}) - for b in r - @assert b ∈ blockaxes(axis, 1) - end - return only(blockaxes(r)) -end - -using BlockArrays: BlockSlice -function blockrange(axis::AbstractUnitRange, r::BlockSlice) - return blockrange(axis, r.block) -end - -function blockrange(a::AbstractUnitRange, r::BlockIndices) - return blockrange(a, r.blocks) -end - -function blockrange(axis::AbstractUnitRange, r::Block{1}) - return r:r -end - -function blockrange(axis::AbstractUnitRange, r::BlockIndexRange) - return Block(r):Block(r) -end - -function blockrange(axis::AbstractUnitRange, r::AbstractVector{<:BlockIndexRange{1}}) - return error("Slicing not implemented for range of type `$(typeof(r))`.") -end - -function blockrange( - axis::AbstractUnitRange, - r::BlockVector{<:BlockIndex{1},<:AbstractVector{<:BlockIndexRange{1}}}, -) - return map(b -> Block(b), blocks(r)) -end - -# This handles slicing with `:`/`Colon()`. -function blockrange(axis::AbstractUnitRange, r::Base.Slice) - # TODO: Maybe use `BlockRange`, but that doesn't output - # the same thing. - return only(blockaxes(axis)) -end - -function blockrange(axis::AbstractUnitRange, r::NonBlockedVector) - return Block(1):Block(1) -end - -function blockrange(axis::AbstractUnitRange, r) - return error("Slicing not implemented for range of type `$(typeof(r))`.") -end - -# This takes a range of indices `indices` of array `a` -# and maps it to the range of indices within block `block`. -function blockindices(a::AbstractArray, block::Block, indices::Tuple) - return blockindices(axes(a), block, indices) -end - -function blockindices(axes::Tuple, block::Block, indices::Tuple) - return blockindices.(axes, Tuple(block), indices) -end - -function blockindices(axis::AbstractUnitRange, block::Block, indices::AbstractUnitRange) - indices_within_block = intersect(indices, axis[block]) - if iszero(length(indices_within_block)) - # Falls outside of block - return 1:0 - end - return only(blockindexrange(axis, indices_within_block).indices) -end - -# This catches the case of `Vector{<:Block{1}}`. -# `BlockRange` gets wrapped in a `BlockSlice`, which is handled properly -# by the version with `indices::AbstractUnitRange`. -# TODO: This should get fixed in a better way inside of `BlockArrays`. -function blockindices( - axis::AbstractUnitRange, block::Block, indices::AbstractVector{<:Block{1}} -) - if block ∉ indices - # Falls outside of block - return 1:0 - end - return Base.OneTo(length(axis[block])) -end - -function blockindices(a::AbstractUnitRange, b::Block, r::BlockIndices) - return blockindices(a, b, r.blocks) -end - -function blockindices( - a::AbstractUnitRange, - b::Block, - r::BlockVector{<:BlockIndex{1},<:AbstractVector{<:BlockIndexRange{1}}}, -) - # TODO: Change to iterate over `BlockRange(r)` - # once https://github.com/JuliaArrays/BlockArrays.jl/issues/404 - # is fixed. - for bl in blocks(r) - if b == Block(bl) - return only(bl.indices) - end - end - return error("Block not found.") -end - -function cartesianindices(a::AbstractArray, b::Block) - return cartesianindices(axes(a), b) -end - -# Output which blocks of `axis` are contained within the unit range `range`. -# The start and end points must match. -function findblocks(axis::AbstractUnitRange, range::AbstractUnitRange) - # TODO: Add a test that the start and end points of the ranges match. - return findblock(axis, first(range)):findblock(axis, last(range)) -end - -function block_stored_indices(a::AbstractArray) - return Block.(Tuple.(stored_indices(blocks(a)))) -end - -_block(indices) = block(indices) -_block(indices::CartesianIndices) = Block(ntuple(Returns(1), ndims(indices))) - -function combine_axes(as::Vararg{Tuple}) - @assert allequal(length.(as)) - ndims = length(first(as)) - return ntuple(ndims) do dim - dim_axes = map(a -> a[dim], as) - return reduce(BlockArrays.combine_blockaxes, dim_axes) - end -end - -# Returns `BlockRange` -# Convert the block of the axes to blocks of the subaxes. -function subblocks(axes::Tuple, subaxes::Tuple, block::Block) - @assert length(axes) == length(subaxes) - return BlockRange( - ntuple(length(axes)) do dim - findblocks(subaxes[dim], axes[dim][Tuple(block)[dim]]) - end, - ) -end - -# Returns `Vector{<:Block}` -function subblocks(axes::Tuple, subaxes::Tuple, blocks) - return mapreduce(vcat, blocks; init=eltype(blocks)[]) do block - return vec(subblocks(axes, subaxes, block)) - end -end - -# Returns `Vector{<:CartesianIndices}` -function blocked_cartesianindices(axes::Tuple, subaxes::Tuple, blocks) - return map(subblocks(axes, subaxes, blocks)) do block - return cartesianindices(subaxes, block) - end -end - -# Represents a view of a block of a blocked array. -struct BlockView{T,N,Array<:AbstractArray{T,N}} <: AbstractArray{T,N} - array::Array - block::Tuple{Vararg{Block{1,Int},N}} -end -function Base.axes(a::BlockView) - # TODO: Try to avoid conversion to `Base.OneTo{Int}`, or just convert - # the element type to `Int` with `Int.(...)`. - # When the axes of `a.array` are `GradedOneTo`, the block is `LabelledUnitRange`, - # which has element type `LabelledInteger`. That causes conversion problems - # in some generic Base Julia code, for example when printing `BlockView`. - return ntuple(ndims(a)) do dim - return Base.OneTo{Int}(only(axes(axes(a.array, dim)[a.block[dim]]))) - end -end -function Base.size(a::BlockView) - return length.(axes(a)) -end -function Base.getindex(a::BlockView{<:Any,N}, index::Vararg{Int,N}) where {N} - return blocks(a.array)[Int.(a.block)...][index...] -end -function Base.setindex!(a::BlockView{<:Any,N}, value, index::Vararg{Int,N}) where {N} - blocks(a.array)[Int.(a.block)...] = blocks(a.array)[Int.(a.block)...] - blocks(a.array)[Int.(a.block)...][index...] = value - return a -end - -function SparseArraysBase.stored_length(a::BlockView) - # TODO: Store whether or not the block is stored already as - # a Bool in `BlockView`. - I = CartesianIndex(Int.(a.block)) - # TODO: Use `block_stored_indices`. - if I ∈ stored_indices(blocks(a.array)) - return stored_length(blocks(a.array)[I]) - end - return 0 -end - -## # Allow more fine-grained control: -## function ArrayLayouts.sub_materialize(layout, a::BlockView, ax) -## return blocks(a.array)[Int.(a.block)...] -## end -## function ArrayLayouts.sub_materialize(layout, a::BlockView) -## return sub_materialize(layout, a, axes(a)) -## end -## function ArrayLayouts.sub_materialize(a::BlockView) -## return sub_materialize(MemoryLayout(a), a) -## end -function ArrayLayouts.sub_materialize(a::BlockView) - return blocks(a.array)[Int.(a.block)...] -end - -function view!(a::AbstractArray{<:Any,N}, index::Block{N}) where {N} - return view!(a, Tuple(index)...) -end -function view!(a::AbstractArray{<:Any,N}, index::Vararg{Block{1},N}) where {N} - blocks(a)[Int.(index)...] = blocks(a)[Int.(index)...] - return blocks(a)[Int.(index)...] -end - -function view!(a::AbstractArray{<:Any,N}, index::BlockIndexRange{N}) where {N} - # TODO: Is there a better code pattern for this? - indices = ntuple(N) do dim - return Tuple(Block(index))[dim][index.indices[dim]] - end - return view!(a, indices...) -end -function view!(a::AbstractArray{<:Any,N}, index::Vararg{BlockIndexRange{1},N}) where {N} - b = view!(a, Block.(index)...) - r = map(index -> only(index.indices), index) - return @view b[r...] -end - -using MacroTools: @capture -using NDTensors.SparseArraysBase: is_getindex_expr -macro view!(expr) - if !is_getindex_expr(expr) - error("@view must be used with getindex syntax (as `@view! a[i,j,...]`)") - end - @capture(expr, array_[indices__]) - return :(view!($(esc(array)), $(esc.(indices)...))) -end diff --git a/NDTensors/src/lib/BlockSparseArrays/src/BlockArraysSparseArraysBaseExt/BlockArraysSparseArraysBaseExt.jl b/NDTensors/src/lib/BlockSparseArrays/src/BlockArraysSparseArraysBaseExt/BlockArraysSparseArraysBaseExt.jl deleted file mode 100644 index 56b0080d92..0000000000 --- a/NDTensors/src/lib/BlockSparseArrays/src/BlockArraysSparseArraysBaseExt/BlockArraysSparseArraysBaseExt.jl +++ /dev/null @@ -1,11 +0,0 @@ -using BlockArrays: AbstractBlockArray, BlocksView -using ..SparseArraysBase: SparseArraysBase, stored_length - -function SparseArraysBase.stored_length(a::AbstractBlockArray) - return sum(b -> stored_length(b), blocks(a); init=zero(Int)) -end - -# TODO: Handle `BlocksView` wrapping a sparse array? -function SparseArraysBase.storage_indices(a::BlocksView) - return CartesianIndices(a) -end diff --git a/NDTensors/src/lib/BlockSparseArrays/src/BlockSparseArrays.jl b/NDTensors/src/lib/BlockSparseArrays/src/BlockSparseArrays.jl deleted file mode 100644 index 576fe03ce6..0000000000 --- a/NDTensors/src/lib/BlockSparseArrays/src/BlockSparseArrays.jl +++ /dev/null @@ -1,28 +0,0 @@ -module BlockSparseArrays -include("BlockArraysExtensions/BlockArraysExtensions.jl") -include("blocksparsearrayinterface/blocksparsearrayinterface.jl") -include("blocksparsearrayinterface/linearalgebra.jl") -include("blocksparsearrayinterface/blockzero.jl") -include("blocksparsearrayinterface/broadcast.jl") -include("blocksparsearrayinterface/map.jl") -include("blocksparsearrayinterface/arraylayouts.jl") -include("blocksparsearrayinterface/views.jl") -include("blocksparsearrayinterface/cat.jl") -include("abstractblocksparsearray/abstractblocksparsearray.jl") -include("abstractblocksparsearray/wrappedabstractblocksparsearray.jl") -include("abstractblocksparsearray/abstractblocksparsematrix.jl") -include("abstractblocksparsearray/abstractblocksparsevector.jl") -include("abstractblocksparsearray/views.jl") -include("abstractblocksparsearray/arraylayouts.jl") -include("abstractblocksparsearray/sparsearrayinterface.jl") -include("abstractblocksparsearray/broadcast.jl") -include("abstractblocksparsearray/map.jl") -include("abstractblocksparsearray/linearalgebra.jl") -include("abstractblocksparsearray/cat.jl") -include("blocksparsearray/defaults.jl") -include("blocksparsearray/blocksparsearray.jl") -include("BlockArraysSparseArraysBaseExt/BlockArraysSparseArraysBaseExt.jl") -include("../ext/BlockSparseArraysTensorAlgebraExt/src/BlockSparseArraysTensorAlgebraExt.jl") -include("../ext/BlockSparseArraysGradedAxesExt/src/BlockSparseArraysGradedAxesExt.jl") -include("../ext/BlockSparseArraysAdaptExt/src/BlockSparseArraysAdaptExt.jl") -end diff --git a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/abstractblocksparsearray.jl b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/abstractblocksparsearray.jl deleted file mode 100644 index cfe1ef5ab6..0000000000 --- a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/abstractblocksparsearray.jl +++ /dev/null @@ -1,78 +0,0 @@ -using BlockArrays: - BlockArrays, AbstractBlockArray, Block, BlockIndex, BlockedUnitRange, blocks -using ..SparseArraysBase: sparse_getindex, sparse_setindex! - -# TODO: Delete this. This function was replaced -# by `stored_length` but is still used in `NDTensors`. -function nonzero_keys end - -abstract type AbstractBlockSparseArray{T,N} <: AbstractBlockArray{T,N} end - -## Base `AbstractArray` interface - -Base.axes(::AbstractBlockSparseArray) = error("Not implemented") - -# TODO: Add some logic to unwrapping wrapped arrays. -# TODO: Decide what a good default is. -blockstype(arraytype::Type{<:AbstractBlockSparseArray}) = SparseArrayDOK{AbstractArray} -function blockstype(arraytype::Type{<:AbstractBlockSparseArray{T}}) where {T} - return SparseArrayDOK{AbstractArray{T}} -end -function blockstype(arraytype::Type{<:AbstractBlockSparseArray{T,N}}) where {T,N} - return SparseArrayDOK{AbstractArray{T,N},N} -end - -# Specialized in order to fix ambiguity error with `BlockArrays`. -function Base.getindex(a::AbstractBlockSparseArray{<:Any,N}, I::Vararg{Int,N}) where {N} - return blocksparse_getindex(a, I...) -end - -# Specialized in order to fix ambiguity error with `BlockArrays`. -function Base.getindex(a::AbstractBlockSparseArray{<:Any,0}) - return blocksparse_getindex(a) -end - -## # Fix ambiguity error with `BlockArrays`. -## function Base.getindex(a::AbstractBlockSparseArray{<:Any,N}, I::Block{N}) where {N} -## return ArrayLayouts.layout_getindex(a, I) -## end -## -## # Fix ambiguity error with `BlockArrays`. -## function Base.getindex(a::AbstractBlockSparseArray{<:Any,1}, I::Block{1}) -## return ArrayLayouts.layout_getindex(a, I) -## end -## -## # Fix ambiguity error with `BlockArrays`. -## function Base.getindex(a::AbstractBlockSparseArray, I::Vararg{AbstractVector}) -## ## return blocksparse_getindex(a, I...) -## return ArrayLayouts.layout_getindex(a, I...) -## end - -# Specialized in order to fix ambiguity error with `BlockArrays`. -function Base.setindex!( - a::AbstractBlockSparseArray{<:Any,N}, value, I::Vararg{Int,N} -) where {N} - blocksparse_setindex!(a, value, I...) - return a -end - -# Fix ambiguity error. -function Base.setindex!(a::AbstractBlockSparseArray{<:Any,0}, value) - blocksparse_setindex!(a, value) - return a -end - -function Base.setindex!( - a::AbstractBlockSparseArray{<:Any,N}, value, I::Vararg{Block{1},N} -) where {N} - blocksize = ntuple(dim -> length(axes(a, dim)[I[dim]]), N) - if size(value) ≠ blocksize - throw( - DimensionMismatch( - "Trying to set block $(Block(Int.(I)...)), which has a size $blocksize, with data of size $(size(value)).", - ), - ) - end - blocks(a)[Int.(I)...] = value - return a -end diff --git a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/abstractblocksparsematrix.jl b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/abstractblocksparsematrix.jl deleted file mode 100644 index 0c2c578781..0000000000 --- a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/abstractblocksparsematrix.jl +++ /dev/null @@ -1 +0,0 @@ -const AbstractBlockSparseMatrix{T} = AbstractBlockSparseArray{T,2} diff --git a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/abstractblocksparsevector.jl b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/abstractblocksparsevector.jl deleted file mode 100644 index ae1441c5a8..0000000000 --- a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/abstractblocksparsevector.jl +++ /dev/null @@ -1 +0,0 @@ -const AbstractBlockSparseVector{T} = AbstractBlockSparseArray{T,1} diff --git a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/arraylayouts.jl b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/arraylayouts.jl deleted file mode 100644 index 4e79b8fb81..0000000000 --- a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/arraylayouts.jl +++ /dev/null @@ -1,53 +0,0 @@ -using ArrayLayouts: ArrayLayouts, DualLayout, MemoryLayout, MulAdd -using BlockArrays: BlockLayout -using ..SparseArraysBase: SparseLayout -using ..TypeParameterAccessors: parenttype, similartype - -function ArrayLayouts.MemoryLayout(arraytype::Type{<:AnyAbstractBlockSparseArray}) - outer_layout = typeof(MemoryLayout(blockstype(arraytype))) - inner_layout = typeof(MemoryLayout(blocktype(arraytype))) - return BlockLayout{outer_layout,inner_layout}() -end - -# TODO: Generalize to `BlockSparseVectorLike`/`AnyBlockSparseVector`. -function ArrayLayouts.MemoryLayout( - arraytype::Type{<:Adjoint{<:Any,<:AbstractBlockSparseVector}} -) - return DualLayout{typeof(MemoryLayout(parenttype(arraytype)))}() -end -# TODO: Generalize to `BlockSparseVectorLike`/`AnyBlockSparseVector`. -function ArrayLayouts.MemoryLayout( - arraytype::Type{<:Transpose{<:Any,<:AbstractBlockSparseVector}} -) - return DualLayout{typeof(MemoryLayout(parenttype(arraytype)))}() -end - -function Base.similar( - mul::MulAdd{<:BlockLayout{<:SparseLayout},<:BlockLayout{<:SparseLayout},<:Any,<:Any,A,B}, - elt::Type, - axes, -) where {A,B} - # TODO: Check that this equals `similartype(blocktype(B), elt, axes)`, - # or maybe promote them? - output_blocktype = similartype(blocktype(A), elt, axes) - return similar(BlockSparseArray{elt,length(axes),output_blocktype}, axes) -end - -# Materialize a SubArray view. -function ArrayLayouts.sub_materialize(layout::BlockLayout{<:SparseLayout}, a, axes) - # TODO: Define `blocktype`/`blockstype` for `SubArray` wrapping `BlockSparseArray`. - # TODO: Use `similar`? - blocktype_a = blocktype(parent(a)) - a_dest = BlockSparseArray{eltype(a),length(axes),blocktype_a}(axes) - a_dest .= a - return a_dest -end - -# Materialize a SubArray view. -function ArrayLayouts.sub_materialize( - layout::BlockLayout{<:SparseLayout}, a, axes::Tuple{Vararg{Base.OneTo}} -) - a_dest = blocktype(a)(undef, length.(axes)) - a_dest .= a - return a_dest -end diff --git a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/broadcast.jl b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/broadcast.jl deleted file mode 100644 index 96841be6f1..0000000000 --- a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/broadcast.jl +++ /dev/null @@ -1,48 +0,0 @@ -using BlockArrays: AbstractBlockedUnitRange, BlockSlice -using Base.Broadcast: Broadcast - -function Broadcast.BroadcastStyle(arraytype::Type{<:AnyAbstractBlockSparseArray}) - return BlockSparseArrayStyle{ndims(arraytype)}() -end - -# Fix ambiguity error with `BlockArrays`. -function Broadcast.BroadcastStyle( - arraytype::Type{ - <:SubArray{ - <:Any, - <:Any, - <:AbstractBlockSparseArray, - <:Tuple{BlockSlice{<:Any,<:Any,<:AbstractBlockedUnitRange},Vararg{Any}}, - }, - }, -) - return BlockSparseArrayStyle{ndims(arraytype)}() -end -function Broadcast.BroadcastStyle( - arraytype::Type{ - <:SubArray{ - <:Any, - <:Any, - <:AbstractBlockSparseArray, - <:Tuple{ - BlockSlice{<:Any,<:Any,<:AbstractBlockedUnitRange}, - BlockSlice{<:Any,<:Any,<:AbstractBlockedUnitRange}, - Vararg{Any}, - }, - }, - }, -) - return BlockSparseArrayStyle{ndims(arraytype)}() -end -function Broadcast.BroadcastStyle( - arraytype::Type{ - <:SubArray{ - <:Any, - <:Any, - <:AbstractBlockSparseArray, - <:Tuple{Any,BlockSlice{<:Any,<:Any,<:AbstractBlockedUnitRange},Vararg{Any}}, - }, - }, -) - return BlockSparseArrayStyle{ndims(arraytype)}() -end diff --git a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/cat.jl b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/cat.jl deleted file mode 100644 index 3023037113..0000000000 --- a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/cat.jl +++ /dev/null @@ -1,7 +0,0 @@ -# TODO: Change to `AnyAbstractBlockSparseArray`. -function Base.cat(as::AnyAbstractBlockSparseArray...; dims) - # TODO: Use `sparse_cat` instead, currently - # that erroneously allocates too many blocks that are - # zero and shouldn't be stored. - return blocksparse_cat(as...; dims) -end diff --git a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/linearalgebra.jl b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/linearalgebra.jl deleted file mode 100644 index 144ea47593..0000000000 --- a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/linearalgebra.jl +++ /dev/null @@ -1,18 +0,0 @@ -using LinearAlgebra: Adjoint, Transpose - -# Like: https://github.com/JuliaLang/julia/blob/v1.11.1/stdlib/LinearAlgebra/src/transpose.jl#L184 -# but also takes the dual of the axes. -# Fixes an issue raised in: -# https://github.com/ITensor/ITensors.jl/issues/1336#issuecomment-2353434147 -function Base.copy(a::Adjoint{T,<:AbstractBlockSparseMatrix{T}}) where {T} - a_dest = similar(parent(a), axes(a)) - a_dest .= a - return a_dest -end - -# More efficient than the generic `LinearAlgebra` version. -function Base.copy(a::Transpose{T,<:AbstractBlockSparseMatrix{T}}) where {T} - a_dest = similar(parent(a), axes(a)) - a_dest .= a - return a_dest -end diff --git a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/map.jl b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/map.jl deleted file mode 100644 index 30ca37c53b..0000000000 --- a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/map.jl +++ /dev/null @@ -1,151 +0,0 @@ -using ArrayLayouts: LayoutArray -using BlockArrays: blockisequal -using LinearAlgebra: Adjoint, Transpose -using ..SparseArraysBase: - SparseArraysBase, - SparseArrayStyle, - sparse_map!, - sparse_copy!, - sparse_copyto!, - sparse_permutedims!, - sparse_mapreduce, - sparse_iszero, - sparse_isreal - -# Returns `Vector{<:CartesianIndices}` -function union_stored_blocked_cartesianindices(as::Vararg{AbstractArray}) - combined_axes = combine_axes(axes.(as)...) - stored_blocked_cartesianindices_as = map(as) do a - return blocked_cartesianindices(axes(a), combined_axes, block_stored_indices(a)) - end - return ∪(stored_blocked_cartesianindices_as...) -end - -# This is used by `map` to get the output axes. -# This is type piracy, try to avoid this, maybe requires defining `map`. -## Base.promote_shape(a1::Tuple{Vararg{BlockedUnitRange}}, a2::Tuple{Vararg{BlockedUnitRange}}) = combine_axes(a1, a2) - -reblock(a) = a - -# If the blocking of the slice doesn't match the blocking of the -# parent array, reblock according to the blocking of the parent array. -function reblock( - a::SubArray{<:Any,<:Any,<:AbstractBlockSparseArray,<:Tuple{Vararg{AbstractUnitRange}}} -) - # TODO: This relies on the behavior that slicing a block sparse - # array with a UnitRange inherits the blocking of the underlying - # block sparse array, we might change that default behavior - # so this might become something like `@blocked parent(a)[...]`. - return @view parent(a)[UnitRange{Int}.(parentindices(a))...] -end - -function reblock( - a::SubArray{<:Any,<:Any,<:AbstractBlockSparseArray,<:Tuple{Vararg{NonBlockedArray}}} -) - return @view parent(a)[map(I -> I.array, parentindices(a))...] -end - -function reblock( - a::SubArray{ - <:Any, - <:Any, - <:AbstractBlockSparseArray, - <:Tuple{Vararg{BlockIndices{<:AbstractBlockVector{<:Block{1}}}}}, - }, -) - # Remove the blocking. - return @view parent(a)[map(I -> Vector(I.blocks), parentindices(a))...] -end - -# TODO: Rewrite this so that it takes the blocking structure -# made by combining the blocking of the axes (i.e. the blocking that -# is used to determine `union_stored_blocked_cartesianindices(...)`). -# `reblock` is a partial solution to that, but a bit ad-hoc. -# TODO: Move to `blocksparsearrayinterface/map.jl`. -function SparseArraysBase.sparse_map!( - ::BlockSparseArrayStyle, f, a_dest::AbstractArray, a_srcs::Vararg{AbstractArray} -) - a_dest, a_srcs = reblock(a_dest), reblock.(a_srcs) - for I in union_stored_blocked_cartesianindices(a_dest, a_srcs...) - BI_dest = blockindexrange(a_dest, I) - BI_srcs = map(a_src -> blockindexrange(a_src, I), a_srcs) - # TODO: Investigate why this doesn't work: - # block_dest = @view a_dest[_block(BI_dest)] - block_dest = blocks_maybe_single(a_dest)[Int.(Tuple(_block(BI_dest)))...] - # TODO: Investigate why this doesn't work: - # block_srcs = ntuple(i -> @view(a_srcs[i][_block(BI_srcs[i])]), length(a_srcs)) - block_srcs = ntuple(length(a_srcs)) do i - return blocks_maybe_single(a_srcs[i])[Int.(Tuple(_block(BI_srcs[i])))...] - end - subblock_dest = @view block_dest[BI_dest.indices...] - subblock_srcs = ntuple(i -> @view(block_srcs[i][BI_srcs[i].indices...]), length(a_srcs)) - # TODO: Use `map!!` to handle immutable blocks. - map!(f, subblock_dest, subblock_srcs...) - # Replace the entire block, handles initializing new blocks - # or if blocks are immutable. - blocks(a_dest)[Int.(Tuple(_block(BI_dest)))...] = block_dest - end - return a_dest -end - -# TODO: Implement this. -# function SparseArraysBase.sparse_mapreduce(::BlockSparseArrayStyle, f, a_dest::AbstractArray, a_srcs::Vararg{AbstractArray}) -# end - -function Base.map!(f, a_dest::AbstractArray, a_srcs::Vararg{AnyAbstractBlockSparseArray}) - sparse_map!(f, a_dest, a_srcs...) - return a_dest -end - -function Base.map(f, as::Vararg{AnyAbstractBlockSparseArray}) - return f.(as...) -end - -function Base.copy!(a_dest::AbstractArray, a_src::AnyAbstractBlockSparseArray) - sparse_copy!(a_dest, a_src) - return a_dest -end - -function Base.copyto!(a_dest::AbstractArray, a_src::AnyAbstractBlockSparseArray) - sparse_copyto!(a_dest, a_src) - return a_dest -end - -# Fix ambiguity error -function Base.copyto!(a_dest::LayoutArray, a_src::AnyAbstractBlockSparseArray) - sparse_copyto!(a_dest, a_src) - return a_dest -end - -function Base.copyto!( - a_dest::AbstractMatrix, a_src::Transpose{T,<:AbstractBlockSparseMatrix{T}} -) where {T} - sparse_copyto!(a_dest, a_src) - return a_dest -end - -function Base.copyto!( - a_dest::AbstractMatrix, a_src::Adjoint{T,<:AbstractBlockSparseMatrix{T}} -) where {T} - sparse_copyto!(a_dest, a_src) - return a_dest -end - -function Base.permutedims!(a_dest, a_src::AnyAbstractBlockSparseArray, perm) - sparse_permutedims!(a_dest, a_src, perm) - return a_dest -end - -function Base.mapreduce(f, op, as::Vararg{AnyAbstractBlockSparseArray}; kwargs...) - return sparse_mapreduce(f, op, as...; kwargs...) -end - -# TODO: Why isn't this calling `mapreduce` already? -function Base.iszero(a::AnyAbstractBlockSparseArray) - return sparse_iszero(blocks(a)) -end - -# TODO: Why isn't this calling `mapreduce` already? -function Base.isreal(a::AnyAbstractBlockSparseArray) - return sparse_isreal(blocks(a)) -end diff --git a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/sparsearrayinterface.jl b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/sparsearrayinterface.jl deleted file mode 100644 index 31dbca27e7..0000000000 --- a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/sparsearrayinterface.jl +++ /dev/null @@ -1,38 +0,0 @@ -using BlockArrays: Block -using ..SparseArraysBase: SparseArraysBase, sparse_storage, stored_indices - -# Structure storing the block sparse storage -struct BlockSparseStorage{Arr<:AbstractBlockSparseArray} - array::Arr -end - -function blockindex_to_cartesianindex(a::AbstractArray, blockindex) - return CartesianIndex(getindex.(axes(a), getindex.(Block.(blockindex.I), blockindex.α))) -end - -function Base.keys(s::BlockSparseStorage) - stored_blockindices = Iterators.map(stored_indices(blocks(s.array))) do I - block_axes = axes(blocks(s.array)[I]) - blockindices = Block(Tuple(I))[block_axes...] - return Iterators.map( - blockindex -> blockindex_to_cartesianindex(s.array, blockindex), blockindices - ) - end - return Iterators.flatten(stored_blockindices) -end - -function Base.values(s::BlockSparseStorage) - return Iterators.map(I -> s.array[I], eachindex(s)) -end - -function Base.iterate(s::BlockSparseStorage, args...) - return iterate(values(s), args...) -end - -function SparseArraysBase.sparse_storage(a::AbstractBlockSparseArray) - return BlockSparseStorage(a) -end - -function SparseArraysBase.stored_length(a::AnyAbstractBlockSparseArray) - return sum(stored_length, sparse_storage(blocks(a)); init=zero(Int)) -end diff --git a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/views.jl b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/views.jl deleted file mode 100644 index 7283b850d3..0000000000 --- a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/views.jl +++ /dev/null @@ -1,294 +0,0 @@ -using BlockArrays: - AbstractBlockedUnitRange, - BlockArrays, - Block, - BlockIndexRange, - BlockedVector, - blocklength, - blocksize, - viewblock - -# This splits `BlockIndexRange{N}` into -# `NTuple{N,BlockIndexRange{1}}`. -# TODO: Move to `BlockArraysExtensions`. -to_tuple(x) = Tuple(x) -function to_tuple(x::BlockIndexRange{N}) where {N} - blocks = Tuple(Block(x)) - n = length(blocks) - return ntuple(dim -> blocks[dim][x.indices[dim]], n) -end - -# Override the default definition of `BlockArrays.blocksize`, -# which is incorrect for certain slices. -function BlockArrays.blocksize(a::SubArray{<:Any,<:Any,<:AnyAbstractBlockSparseArray}) - return blocklength.(axes(a)) -end -function BlockArrays.blocksize( - a::SubArray{<:Any,<:Any,<:AnyAbstractBlockSparseArray}, i::Int -) - # TODO: Maybe use `blocklength(axes(a, i))` which would be a bit faster. - return blocksize(a)[i] -end - -# These definitions circumvent some generic definitions in BlockArrays.jl: -# https://github.com/JuliaArrays/BlockArrays.jl/blob/master/src/views.jl -# which don't handle subslices of blocks properly. -function Base.view( - a::SubArray{ - <:Any,N,<:AnyAbstractBlockSparseArray,<:Tuple{Vararg{BlockSlice{<:BlockRange{1}},N}} - }, - I::Block{N}, -) where {N} - return blocksparse_view(a, I) -end -function Base.view( - a::SubArray{ - <:Any,N,<:AnyAbstractBlockSparseArray,<:Tuple{Vararg{BlockSlice{<:BlockRange{1}},N}} - }, - I::Vararg{Block{1},N}, -) where {N} - return blocksparse_view(a, I...) -end -function Base.view( - V::SubArray{<:Any,1,<:AnyAbstractBlockSparseArray,<:Tuple{BlockSlice{<:BlockRange{1}}}}, - I::Block{1}, -) - return blocksparse_view(a, I) -end - -# Specialized code for getting the view of a block. -function BlockArrays.viewblock( - a::AbstractBlockSparseArray{<:Any,N}, block::Block{N} -) where {N} - return viewblock(a, Tuple(block)...) -end - -# TODO: Define `blocksparse_viewblock`. -function BlockArrays.viewblock( - a::AbstractBlockSparseArray{<:Any,N}, block::Vararg{Block{1},N} -) where {N} - I = CartesianIndex(Int.(block)) - # TODO: Use `block_stored_indices`. - if I ∈ stored_indices(blocks(a)) - return blocks(a)[I] - end - return BlockView(a, block) -end - -# Specialized code for getting the view of a subblock. -function Base.view( - a::AbstractBlockSparseArray{<:Any,N}, block::BlockIndexRange{N} -) where {N} - return view(a, to_tuple(block)...) -end - -# Specialized code for getting the view of a subblock. -function Base.view( - a::SubArray{T,N,<:AbstractBlockSparseArray{T,N}}, I::BlockIndexRange{N} -) where {T,N} - return view(a, to_tuple(I)...) -end -function Base.view(a::AbstractBlockSparseArray{<:Any,N}, I::Vararg{Block{1},N}) where {N} - return viewblock(a, I...) -end - -# TODO: Move to `GradedAxes` or `BlockArraysExtensions`. -to_block(I::Block{1}) = I -to_block(I::BlockIndexRange{1}) = Block(I) -to_block_indices(I::Block{1}) = Colon() -to_block_indices(I::BlockIndexRange{1}) = only(I.indices) - -function Base.view( - a::AbstractBlockSparseArray{<:Any,N}, I::Vararg{Union{Block{1},BlockIndexRange{1}},N} -) where {N} - return @views a[to_block.(I)...][to_block_indices.(I)...] -end - -function Base.view( - a::SubArray{T,N,<:AbstractBlockSparseArray{T,N}}, I::Vararg{Block{1},N} -) where {T,N} - return viewblock(a, I...) -end -function Base.view( - a::SubArray{T,N,<:AbstractBlockSparseArray{T,N}}, - I::Vararg{Union{Block{1},BlockIndexRange{1}},N}, -) where {T,N} - return @views a[to_block.(I)...][to_block_indices.(I)...] -end -# Generic fallback. -function BlockArrays.viewblock( - a::SubArray{T,N,<:AbstractBlockSparseArray{T,N}}, I::Vararg{Block{1},N} -) where {T,N} - return Base.invoke(view, Tuple{AbstractArray,Vararg{Any}}, a, I...) -end - -function Base.view( - a::SubArray{ - T, - N, - <:AbstractBlockSparseArray{T,N}, - <:Tuple{Vararg{Union{BlockSliceCollection,SubBlockSliceCollection},N}}, - }, - block::Union{Block{N},BlockIndexRange{N}}, -) where {T,N} - return viewblock(a, block) -end -function Base.view( - a::SubArray{ - T, - N, - <:AbstractBlockSparseArray{T,N}, - <:Tuple{Vararg{Union{BlockSliceCollection,SubBlockSliceCollection},N}}, - }, - block::Vararg{Union{Block{1},BlockIndexRange{1}},N}, -) where {T,N} - return viewblock(a, block...) -end -function BlockArrays.viewblock( - a::SubArray{ - T, - N, - <:AbstractBlockSparseArray{T,N}, - <:Tuple{Vararg{Union{BlockSliceCollection,SubBlockSliceCollection},N}}, - }, - block::Union{Block{N},BlockIndexRange{N}}, -) where {T,N} - return viewblock(a, to_tuple(block)...) -end - -# Fixes ambiguity error with `AnyAbstractBlockSparseArray` definition. -function Base.view( - a::SubArray{ - T,N,<:AbstractBlockSparseArray{T,N},<:Tuple{Vararg{BlockSlice{<:BlockRange{1}},N}} - }, - block::Block{N}, -) where {T,N} - return viewblock(a, block) -end -# Fixes ambiguity error with `AnyAbstractBlockSparseArray` definition. -function Base.view( - a::SubArray{ - T,N,<:AbstractBlockSparseArray{T,N},<:Tuple{Vararg{BlockSlice{<:BlockRange{1}},N}} - }, - block::Vararg{Block{1},N}, -) where {T,N} - return viewblock(a, block...) -end - -# XXX: TODO: Distinguish if a sub-view of the block needs to be taken! -# Define a new `SubBlockSlice` which is used in: -# `blocksparse_to_indices(a, inds, I::Tuple{UnitRange{<:Integer},Vararg{Any}})` -# in `blocksparsearrayinterface/blocksparsearrayinterface.jl`. -# TODO: Define `blocksparse_viewblock`. -function BlockArrays.viewblock( - a::SubArray{T,N,<:AbstractBlockSparseArray{T,N},<:Tuple{Vararg{BlockSliceCollection,N}}}, - block::Vararg{Block{1},N}, -) where {T,N} - I = CartesianIndex(Int.(block)) - # TODO: Use `block_stored_indices`. - if I ∈ stored_indices(blocks(a)) - return blocks(a)[I] - end - return BlockView(parent(a), Block.(Base.reindex(parentindices(blocks(a)), Tuple(I)))) -end - -function to_blockindexrange( - a::BlockIndices{<:BlockVector{<:BlockIndex{1},<:Vector{<:BlockIndexRange{1}}}}, - I::Block{1}, -) - # TODO: Ideally we would just use `a.blocks[I]` but that doesn't - # work right now. - return blocks(a.blocks)[Int(I)] -end -function to_blockindexrange( - a::Base.Slice{<:AbstractBlockedUnitRange{<:Integer}}, I::Block{1} -) - @assert I in only(blockaxes(a.indices)) - return I -end - -function BlockArrays.viewblock( - a::SubArray{ - T, - N, - <:AbstractBlockSparseArray{T,N}, - <:Tuple{Vararg{Union{BlockSliceCollection,SubBlockSliceCollection},N}}, - }, - block::Vararg{Block{1},N}, -) where {T,N} - brs = ntuple(dim -> to_blockindexrange(parentindices(a)[dim], block[dim]), ndims(a)) - return @view parent(a)[brs...] -end - -# TODO: Define `blocksparse_viewblock`. -function BlockArrays.viewblock( - a::SubArray{ - T, - N, - <:AbstractBlockSparseArray{T,N}, - <:Tuple{Vararg{Union{BlockSliceCollection,SubBlockSliceCollection},N}}, - }, - block::Vararg{BlockIndexRange{1},N}, -) where {T,N} - return view(viewblock(a, Block.(block)...), map(b -> only(b.indices), block)...) -end - -# Block slice of the result of slicing `@view a[2:5, 2:5]`. -# TODO: Move this to `BlockArraysExtensions`. -const BlockedSlice = BlockSlice{ - <:BlockVector{<:BlockIndex{1},<:Vector{<:BlockIndexRange{1}}} -} - -function Base.view( - a::SubArray{T,N,<:AbstractBlockSparseArray{T,N},<:Tuple{Vararg{BlockedSlice,N}}}, - block::Union{Block{N},BlockIndexRange{N}}, -) where {T,N} - return viewblock(a, block) -end -function Base.view( - a::SubArray{T,N,<:AbstractBlockSparseArray{T,N},<:Tuple{Vararg{BlockedSlice,N}}}, - block::Vararg{Union{Block{1},BlockIndexRange{1}},N}, -) where {T,N} - return viewblock(a, block...) -end -function BlockArrays.viewblock( - a::SubArray{T,N,<:AbstractBlockSparseArray{T,N},<:Tuple{Vararg{BlockedSlice,N}}}, - block::Union{Block{N},BlockIndexRange{N}}, -) where {T,N} - return viewblock(a, to_tuple(block)...) -end -# TODO: Define `blocksparse_viewblock`. -function BlockArrays.viewblock( - a::SubArray{T,N,<:AbstractBlockSparseArray{T,N},<:Tuple{Vararg{BlockedSlice,N}}}, - I::Vararg{Block{1},N}, -) where {T,N} - # TODO: Use `reindex`, `to_indices`, etc. - brs = ntuple(ndims(a)) do dim - # TODO: Ideally we would use this but it outputs a Vector, - # not a range: - # return parentindices(a)[dim].block[I[dim]] - return blocks(parentindices(a)[dim].block)[Int(I[dim])] - end - return @view parent(a)[brs...] -end -# TODO: Define `blocksparse_viewblock`. -function BlockArrays.viewblock( - a::SubArray{T,N,<:AbstractBlockSparseArray{T,N},<:Tuple{Vararg{BlockedSlice,N}}}, - block::Vararg{BlockIndexRange{1},N}, -) where {T,N} - return view(viewblock(a, Block.(block)...), map(b -> only(b.indices), block)...) -end - -# migrate wrapper layer for viewing `adjoint` and `transpose`. -for (f, F) in ((:adjoint, :Adjoint), (:transpose, :Transpose)) - @eval begin - function Base.view(A::$F{<:Any,<:AbstractBlockSparseVector}, b::Block{1}) - return $f(view(parent(A), b)) - end - - Base.view(A::$F{<:Any,<:AbstractBlockSparseMatrix}, b::Block{2}) = view(A, Tuple(b)...) - function Base.view(A::$F{<:Any,<:AbstractBlockSparseMatrix}, b1::Block{1}, b2::Block{1}) - return $f(view(parent(A), b2, b1)) - end - end -end diff --git a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/wrappedabstractblocksparsearray.jl b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/wrappedabstractblocksparsearray.jl deleted file mode 100644 index c961f67e5a..0000000000 --- a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/wrappedabstractblocksparsearray.jl +++ /dev/null @@ -1,310 +0,0 @@ -using Adapt: Adapt, WrappedArray -using BlockArrays: - BlockArrays, - AbstractBlockVector, - AbstractBlockedUnitRange, - BlockIndexRange, - BlockRange, - blockedrange, - mortar, - unblock -using SplitApplyCombine: groupcount -using ..TypeParameterAccessors: similartype - -const WrappedAbstractBlockSparseArray{T,N} = WrappedArray{ - T,N,AbstractBlockSparseArray,AbstractBlockSparseArray{T,N} -} - -# TODO: Rename `AnyBlockSparseArray`. -const AnyAbstractBlockSparseArray{T,N} = Union{ - <:AbstractBlockSparseArray{T,N},<:WrappedAbstractBlockSparseArray{T,N} -} - -# a[1:2, 1:2] -function Base.to_indices( - a::AnyAbstractBlockSparseArray, inds, I::Tuple{UnitRange{<:Integer},Vararg{Any}} -) - return blocksparse_to_indices(a, inds, I) -end - -# a[[Block(2), Block(1)], [Block(2), Block(1)]] -function Base.to_indices( - a::AnyAbstractBlockSparseArray, inds, I::Tuple{Vector{<:Block{1}},Vararg{Any}} -) - return blocksparse_to_indices(a, inds, I) -end - -# a[BlockVector([Block(2), Block(1)], [2]), BlockVector([Block(2), Block(1)], [2])] -# a[BlockedVector([Block(2), Block(1)], [2]), BlockedVector([Block(2), Block(1)], [2])] -function Base.to_indices( - a::AnyAbstractBlockSparseArray, - inds, - I::Tuple{AbstractBlockVector{<:Block{1}},Vararg{Any}}, -) - return blocksparse_to_indices(a, inds, I) -end - -# a[mortar([Block(1)[1:2], Block(2)[1:3]])] -function Base.to_indices( - a::AnyAbstractBlockSparseArray, - inds, - I::Tuple{BlockVector{<:BlockIndex{1},<:Vector{<:BlockIndexRange{1}}},Vararg{Any}}, -) - return blocksparse_to_indices(a, inds, I) -end - -# a[[Block(1)[1:2], Block(2)[1:2]], [Block(1)[1:2], Block(2)[1:2]]] -function Base.to_indices( - a::AnyAbstractBlockSparseArray, inds, I::Tuple{Vector{<:BlockIndexRange{1}},Vararg{Any}} -) - return to_indices(a, inds, (mortar(I[1]), Base.tail(I)...)) -end - -# BlockArrays `AbstractBlockArray` interface -BlockArrays.blocks(a::AnyAbstractBlockSparseArray) = blocksparse_blocks(a) - -# Fix ambiguity error with `BlockArrays` -using BlockArrays: BlockSlice -function BlockArrays.blocks( - a::SubArray{<:Any,<:Any,<:AbstractBlockSparseArray,<:Tuple{Vararg{BlockSlice}}} -) - return blocksparse_blocks(a) -end - -using ..TypeParameterAccessors: parenttype -function blockstype(arraytype::Type{<:WrappedAbstractBlockSparseArray}) - return blockstype(parenttype(arraytype)) -end - -blocktype(a::AnyAbstractBlockSparseArray) = eltype(blocks(a)) -blocktype(arraytype::Type{<:AnyAbstractBlockSparseArray}) = eltype(blockstype(arraytype)) - -using ArrayLayouts: ArrayLayouts -function Base.getindex( - a::AnyAbstractBlockSparseArray{<:Any,N}, I::CartesianIndices{N} -) where {N} - return ArrayLayouts.layout_getindex(a, I) -end -function Base.getindex( - a::AnyAbstractBlockSparseArray{<:Any,N}, I::Vararg{AbstractUnitRange{<:Integer},N} -) where {N} - return ArrayLayouts.layout_getindex(a, I...) -end -# TODO: Define `AnyBlockSparseMatrix`. -function Base.getindex( - a::AnyAbstractBlockSparseArray{<:Any,2}, I::Vararg{AbstractUnitRange{<:Integer},2} -) - return ArrayLayouts.layout_getindex(a, I...) -end -# Fixes ambiguity error. -function Base.getindex(a::AnyAbstractBlockSparseArray{<:Any,0}) - return ArrayLayouts.layout_getindex(a) -end - -# TODO: Define `blocksparse_isassigned`. -function Base.isassigned( - a::AnyAbstractBlockSparseArray{<:Any,N}, index::Vararg{Block{1},N} -) where {N} - return isassigned(blocks(a), Int.(index)...) -end - -# Fix ambiguity error. -function Base.isassigned(a::AnyAbstractBlockSparseArray{<:Any,0}) - return isassigned(blocks(a)) -end - -function Base.isassigned(a::AnyAbstractBlockSparseArray{<:Any,N}, index::Block{N}) where {N} - return isassigned(a, Tuple(index)...) -end - -# TODO: Define `blocksparse_isassigned`. -function Base.isassigned( - a::AnyAbstractBlockSparseArray{<:Any,N}, index::Vararg{BlockIndex{1},N} -) where {N} - b = block.(index) - return isassigned(a, b...) && isassigned(@view(a[b...]), blockindex.(index)...) -end - -function Base.setindex!( - a::AnyAbstractBlockSparseArray{<:Any,N}, value, I::BlockIndex{N} -) where {N} - blocksparse_setindex!(a, value, I) - return a -end -# Fixes ambiguity error with BlockArrays.jl -function Base.setindex!(a::AnyAbstractBlockSparseArray{<:Any,1}, value, I::BlockIndex{1}) - blocksparse_setindex!(a, value, I) - return a -end - -function Base.fill!(a::AbstractBlockSparseArray, value) - if iszero(value) - # This drops all of the blocks. - sparse_zero!(blocks(a)) - return a - end - blocksparse_fill!(a, value) - return a -end - -function Base.fill!(a::AnyAbstractBlockSparseArray, value) - # TODO: Even if `iszero(value)`, this doesn't drop - # blocks from `a`, and additionally allocates - # new blocks filled with zeros, unlike - # `fill!(a::AbstractBlockSparseArray, value)`. - # Consider changing that behavior when possible. - blocksparse_fill!(a, value) - return a -end - -# Needed by `BlockArrays` matrix multiplication interface -function Base.similar( - arraytype::Type{<:AnyAbstractBlockSparseArray}, - axes::Tuple{Vararg{AbstractUnitRange{<:Integer}}}, -) - return similar(arraytype, eltype(arraytype), axes) -end - -# Fixes ambiguity error. -function Base.similar( - arraytype::Type{<:AnyAbstractBlockSparseArray}, axes::Tuple{Base.OneTo,Vararg{Base.OneTo}} -) - return similar(arraytype, eltype(arraytype), axes) -end - -# Needed by `BlockArrays` matrix multiplication interface -# TODO: This fixes an ambiguity error with `OffsetArrays.jl`, but -# is only appears to be needed in older versions of Julia like v1.6. -# Delete once we drop support for older versions of Julia. -function Base.similar( - arraytype::Type{<:AnyAbstractBlockSparseArray}, - axes::Tuple{AbstractUnitRange{<:Integer},Vararg{AbstractUnitRange{<:Integer}}}, -) - return similar(arraytype, eltype(arraytype), axes) -end - -# Fixes ambiguity error with `BlockArrays`. -function Base.similar( - arraytype::Type{<:AnyAbstractBlockSparseArray}, - axes::Tuple{AbstractBlockedUnitRange{<:Integer},Vararg{AbstractUnitRange{<:Integer}}}, -) - return similar(arraytype, eltype(arraytype), axes) -end - -# Fixes ambiguity error with `BlockArrays`. -function Base.similar( - arraytype::Type{<:AnyAbstractBlockSparseArray}, - axes::Tuple{ - AbstractUnitRange{<:Integer}, - AbstractBlockedUnitRange{<:Integer}, - Vararg{AbstractUnitRange{<:Integer}}, - }, -) - return similar(arraytype, eltype(arraytype), axes) -end - -# Needed for disambiguation -function Base.similar( - arraytype::Type{<:AnyAbstractBlockSparseArray}, - axes::Tuple{Vararg{AbstractBlockedUnitRange{<:Integer}}}, -) - return similar(arraytype, eltype(arraytype), axes) -end - -function blocksparse_similar(a, elt::Type, axes::Tuple) - return BlockSparseArray{elt,length(axes),similartype(blocktype(a), elt, axes)}( - undef, axes - ) -end - -# Needed by `BlockArrays` matrix multiplication interface -# TODO: Define a `blocksparse_similar` function. -function Base.similar( - arraytype::Type{<:AnyAbstractBlockSparseArray}, - elt::Type, - axes::Tuple{Vararg{AbstractUnitRange{<:Integer}}}, -) - return blocksparse_similar(arraytype, elt, axes) -end - -# TODO: Define a `blocksparse_similar` function. -function Base.similar( - a::AnyAbstractBlockSparseArray, - elt::Type, - axes::Tuple{Vararg{AbstractUnitRange{<:Integer}}}, -) - return blocksparse_similar(a, elt, axes) -end - -# Fixes ambiguity error. -function Base.similar(a::AnyAbstractBlockSparseArray, elt::Type, axes::Tuple{}) - return blocksparse_similar(a, elt, axes) -end - -# Fixes ambiguity error with `BlockArrays`. -function Base.similar( - a::AnyAbstractBlockSparseArray, - elt::Type, - axes::Tuple{ - AbstractBlockedUnitRange{<:Integer},Vararg{AbstractBlockedUnitRange{<:Integer}} - }, -) - return blocksparse_similar(a, elt, axes) -end - -# Fixes ambiguity error with `OffsetArrays`. -function Base.similar( - a::AnyAbstractBlockSparseArray, - elt::Type, - axes::Tuple{AbstractUnitRange{<:Integer},Vararg{AbstractUnitRange{<:Integer}}}, -) - return blocksparse_similar(a, elt, axes) -end - -# Fixes ambiguity error with `BlockArrays`. -function Base.similar( - a::AnyAbstractBlockSparseArray, - elt::Type, - axes::Tuple{AbstractBlockedUnitRange{<:Integer},Vararg{AbstractUnitRange{<:Integer}}}, -) - return blocksparse_similar(a, elt, axes) -end - -# Fixes ambiguity errors with BlockArrays. -function Base.similar( - a::AnyAbstractBlockSparseArray, - elt::Type, - axes::Tuple{ - AbstractUnitRange{<:Integer}, - AbstractBlockedUnitRange{<:Integer}, - Vararg{AbstractUnitRange{<:Integer}}, - }, -) - return blocksparse_similar(a, elt, axes) -end - -# Fixes ambiguity error with `StaticArrays`. -function Base.similar( - a::AnyAbstractBlockSparseArray, elt::Type, axes::Tuple{Base.OneTo,Vararg{Base.OneTo}} -) - return blocksparse_similar(a, elt, axes) -end - -# TODO: Implement this in a more generic way using a smarter `copyto!`, -# which is ultimately what `Array{T,N}(::AbstractArray{<:Any,N})` calls. -# These are defined for now to avoid scalar indexing issues when there -# are blocks on GPU. -function Base.Array{T,N}(a::AnyAbstractBlockSparseArray{<:Any,N}) where {T,N} - # First make it dense, then move to CPU. - # Directly copying to CPU causes some issues with - # scalar indexing on GPU which we have to investigate. - a_dest = similartype(blocktype(a), T)(undef, size(a)) - a_dest .= a - return Array{T,N}(a_dest) -end -function Base.Array{T}(a::AnyAbstractBlockSparseArray) where {T} - return Array{T,ndims(a)}(a) -end -function Base.Array(a::AnyAbstractBlockSparseArray) - return Array{eltype(a)}(a) -end diff --git a/NDTensors/src/lib/BlockSparseArrays/src/backup/qr.jl b/NDTensors/src/lib/BlockSparseArrays/src/backup/qr.jl deleted file mode 100644 index 4cd4527358..0000000000 --- a/NDTensors/src/lib/BlockSparseArrays/src/backup/qr.jl +++ /dev/null @@ -1,143 +0,0 @@ -using ...SparseArraysBase: SparseArrayDOK - -# Check if the matrix has 1 or fewer entries -# per row/column. -function is_permutation_matrix(a::SparseMatrixCSC) - return all(col -> length(nzrange(a, col)) ≤ 1, axes(a, 2)) -end - -# Check if the matrix has 1 or fewer entries -# per row/column. -function is_permutation_matrix(a::SparseArrayDOK{<:Any,2}) - keys = collect(Iterators.map(Tuple, nonzero_keys(a))) - I = first.(keys) - J = last.(keys) - return allunique(I) && allunique(J) -end - -function findnonzerorows(a::SparseMatrixCSC, col) - return view(a.rowval, a.colptr[col]:(a.colptr[col + 1] - 1)) -end - -# TODO: Is this already defined? -function SparseArrays.SparseMatrixCSC(a::SparseArrayDOK{<:Any,2}) - # Not defined: - # a_csc = SparseMatrixCSC{eltype(a)}(size(a)) - a_csc = spzeros(eltype(a), size(a)) - for I in nonzero_keys(a) - a_csc[I] = a[I] - end - return a_csc -end - -# TODO: Is this already defined? -# Get the sparse structure of a SparseArray as a SparseMatrixCSC. -function sparse_structure( - structure_type::Type{<:SparseMatrixCSC}, a::SparseArrayDOK{<:Any,2} -) - # Idealy would work but a bit too complicated for `map` right now: - # return SparseMatrixCSC(map(x -> iszero(x) ? false : true, a)) - # TODO: Change to `spzeros(Bool, size(a))`. - a_structure = structure_type(spzeros(Bool, size(a)...)) - for I in nonzero_keys(a) - i, j = Tuple(I) - a_structure[i, j] = true - end - return a_structure -end - -# Get the sparsity structure as a `SparseMatrixCSC` with values -# of `true` where there are structural nonzero blocks and `false` -# otherwise. -function block_sparse_structure(structure_type::Type, a::BlockSparseArray{<:Any,2}) - return sparse_structure(structure_type, blocks(a)) -end - -function is_block_permutation_matrix(a::BlockSparseArray{<:Any,2}) - return is_permutation_matrix(blocks(a)) -end - -qr_rank(alg::Algorithm"thin", a::AbstractArray{<:Any,2}) = minimum(size(a)) - -# m × n → (m × min(m, n)) ⋅ (min(m, n) × n) -function qr_block_sparse_structure(alg::Algorithm"thin", a::BlockSparseArray{<:Any,2}) - axes_row, axes_col = axes(a) - a_csc = block_sparse_structure(SparseMatrixCSC, a) - F = qr(float(a_csc)) - # Outputs full Q - # q_csc = sparse(F.Q[invperm(F.prow), :]) - q_csc = (F.Q * sparse(I, size(a_csc, 1), minimum(size(a_csc))))[invperm(F.prow), :] - r_csc = F.R[:, invperm(F.pcol)] - nblocks = size(q_csc, 2) - @assert nblocks == size(r_csc, 1) - a_sparse = blocks(a) - blocklengths_qr = Vector{Int}(undef, nblocks) - for I in nonzero_keys(a_sparse) - i, k = Tuple(I) - # Get the nonzero columns associated - # with the given row. - j = only(findnonzerorows(r_csc, k)) - # @assert is_structural_nonzero(r, j, k) - # @assert is_structural_nonzero(q, i, j) - blocklengths_qr[j] = qr_rank(alg, @view(a[BlockArrays.Block(i, k)])) - end - axes_qr = blockedrange(blocklengths_qr) - axes_q = (axes(a, 1), axes_qr) - axes_r = (axes_qr, axes(a, 2)) - # TODO: Come up with a better format to ouput. - # TODO: Get `axes_qr` as a permutation of the - # axes of `axes(a, 2)` to preserve sectors - # when using symmetric tensors. - return q_csc, axes_q, r_csc, axes_r -end - -# m × n → (m × m) ⋅ (m × n) -function qr_block_sparse_structure(alg::Algorithm"full", a::BlockSparseArray{<:Any,2}) - return error("Not implemented") -end - -function qr_blocks(a, structure_r, block_a) - i, k = block_a.n - j = only(findnonzerorows(structure_r, k)) - return BlockArrays.Block(i, j), BlockArrays.Block(j, k) -end - -# Block-preserving QR. -function LinearAlgebra.qr(a::BlockSparseArray{<:Any,2}; alg="thin") - return qr(Algorithm(alg), a) -end - -# Block-preserving QR. -function LinearAlgebra.qr(alg::Algorithm, a::BlockSparseArray{<:Any,2}) - if !is_block_permutation_matrix(a) - # Must have 1 or fewer blocks per row/column. - println("Block sparsity structure is:") - display(nonzero_blockkeys(a)) - error("Not a block permutation matrix") - end - eltype_a = eltype(a) - # TODO: `structure_q` isn't needed. - structure_q, axes_q, structure_r, axes_r = qr_block_sparse_structure(alg, a) - # TODO: Make this generic to GPU, use `similar`. - q = BlockSparseArray{eltype_a}(axes_q) - r = BlockSparseArray{eltype_a}(axes_r) - for block_a in nonzero_blockkeys(a) - # TODO: Make thin or full depending on `alg`. - q_b, r_b = qr(a[block_a]) - # Determine the block of Q and R - # TODO: Do the block locations change for `alg="full"`? - block_q, block_r = qr_blocks(a, structure_r, block_a) - - # TODO Make this generic to GPU. - q[block_q] = Matrix(q_b) - r[block_r] = r_b - end - # TODO: If `alg="full"`, fill in blocks of `q` - # with random unitaries. - # Which blocks should be filled? Seems to be based - # on the QNs... - # Maybe fill diagonal blocks. - # TODO: Also store `structure_r` in some way - # since that is needed for permuting the QNs. - return q, r -end diff --git a/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearray/blocksparsearray.jl b/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearray/blocksparsearray.jl deleted file mode 100644 index 19de9fd0ab..0000000000 --- a/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearray/blocksparsearray.jl +++ /dev/null @@ -1,199 +0,0 @@ -using BlockArrays: BlockArrays, Block, BlockedUnitRange, blockedrange, blocklength -using Dictionaries: Dictionary -using ..SparseArraysBase: SparseArrayDOK - -# TODO: Delete this. -## using BlockArrays: blocks - -struct BlockSparseArray{ - T, - N, - A<:AbstractArray{T,N}, - Blocks<:AbstractArray{A,N}, - Axes<:Tuple{Vararg{AbstractUnitRange,N}}, -} <: AbstractBlockSparseArray{T,N} - blocks::Blocks - axes::Axes -end - -# TODO: Can this definition be shortened? -const BlockSparseMatrix{T,A<:AbstractMatrix{T},Blocks<:AbstractMatrix{A},Axes<:Tuple{AbstractUnitRange,AbstractUnitRange}} = BlockSparseArray{ - T,2,A,Blocks,Axes -} - -# TODO: Can this definition be shortened? -const BlockSparseVector{T,A<:AbstractVector{T},Blocks<:AbstractVector{A},Axes<:Tuple{AbstractUnitRange}} = BlockSparseArray{ - T,1,A,Blocks,Axes -} - -function BlockSparseArray( - block_data::Dictionary{<:Block{N},<:AbstractArray{<:Any,N}}, - axes::Tuple{Vararg{AbstractUnitRange,N}}, -) where {N} - blocks = default_blocks(block_data, axes) - return BlockSparseArray(blocks, axes) -end - -function BlockSparseArray( - block_indices::Vector{<:Block{N}}, - block_data::Vector{<:AbstractArray{<:Any,N}}, - axes::Tuple{Vararg{AbstractUnitRange,N}}, -) where {N} - return BlockSparseArray(Dictionary(block_indices, block_data), axes) -end - -function BlockSparseArray{T,N,A,Blocks}( - blocks::AbstractArray{<:AbstractArray{T,N},N}, axes::Tuple{Vararg{AbstractUnitRange,N}} -) where {T,N,A<:AbstractArray{T,N},Blocks<:AbstractArray{A,N}} - return BlockSparseArray{T,N,A,Blocks,typeof(axes)}(blocks, axes) -end - -function BlockSparseArray{T,N,A}( - blocks::AbstractArray{<:AbstractArray{T,N},N}, axes::Tuple{Vararg{AbstractUnitRange,N}} -) where {T,N,A<:AbstractArray{T,N}} - return BlockSparseArray{T,N,A,typeof(blocks)}(blocks, axes) -end - -function BlockSparseArray{T,N}( - blocks::AbstractArray{<:AbstractArray{T,N},N}, axes::Tuple{Vararg{AbstractUnitRange,N}} -) where {T,N} - return BlockSparseArray{T,N,eltype(blocks),typeof(blocks),typeof(axes)}(blocks, axes) -end - -function BlockSparseArray{T,N}( - block_data::Dictionary{Block{N,Int},<:AbstractArray{T,N}}, - axes::Tuple{Vararg{AbstractUnitRange,N}}, -) where {T,N} - blocks = default_blocks(block_data, axes) - return BlockSparseArray{T,N}(blocks, axes) -end - -function BlockSparseArray{T,N,A}( - axes::Tuple{Vararg{AbstractUnitRange,N}} -) where {T,N,A<:AbstractArray{T,N}} - blocks = default_blocks(A, axes) - return BlockSparseArray{T,N,A}(blocks, axes) -end - -function BlockSparseArray{T,N,A}( - axes::Vararg{AbstractUnitRange,N} -) where {T,N,A<:AbstractArray{T,N}} - return BlockSparseArray{T,N,A}(axes) -end - -function BlockSparseArray{T,N,A}( - dims::Tuple{Vararg{Vector{Int},N}} -) where {T,N,A<:AbstractArray{T,N}} - return BlockSparseArray{T,N,A}(blockedrange.(dims)) -end - -# Fix ambiguity error. -function BlockSparseArray{T,0,A}(axes::Tuple{}) where {T,A<:AbstractArray{T,0}} - blocks = default_blocks(A, axes) - return BlockSparseArray{T,0,A}(blocks, axes) -end - -function BlockSparseArray{T,N,A}( - dims::Vararg{Vector{Int},N} -) where {T,N,A<:AbstractArray{T,N}} - return BlockSparseArray{T,N,A}(dims) -end - -function BlockSparseArray{T,N}(axes::Tuple{Vararg{AbstractUnitRange,N}}) where {T,N} - return BlockSparseArray{T,N,default_arraytype(T, axes)}(axes) -end - -function BlockSparseArray{T,N}(axes::Vararg{AbstractUnitRange,N}) where {T,N} - return BlockSparseArray{T,N}(axes) -end - -function BlockSparseArray{T,0}(axes::Tuple{}) where {T} - return BlockSparseArray{T,0,default_arraytype(T, axes)}(axes) -end - -function BlockSparseArray{T,N}(dims::Tuple{Vararg{Vector{Int},N}}) where {T,N} - return BlockSparseArray{T,N}(blockedrange.(dims)) -end - -function BlockSparseArray{T,N}(dims::Vararg{Vector{Int},N}) where {T,N} - return BlockSparseArray{T,N}(dims) -end - -function BlockSparseArray{T}(dims::Tuple{Vararg{Vector{Int}}}) where {T} - return BlockSparseArray{T,length(dims)}(dims) -end - -function BlockSparseArray{T}(axes::Tuple{Vararg{AbstractUnitRange}}) where {T} - return BlockSparseArray{T,length(axes)}(axes) -end - -function BlockSparseArray{T}(axes::Tuple{}) where {T} - return BlockSparseArray{T,length(axes)}(axes) -end - -function BlockSparseArray{T}(dims::Vararg{Vector{Int}}) where {T} - return BlockSparseArray{T}(dims) -end - -function BlockSparseArray{T}(axes::Vararg{AbstractUnitRange}) where {T} - return BlockSparseArray{T}(axes) -end - -function BlockSparseArray{T}() where {T} - return BlockSparseArray{T}(()) -end - -# undef -function BlockSparseArray{T,N,A,Blocks}( - ::UndefInitializer, args... -) where {T,N,A<:AbstractArray{T,N},Blocks<:AbstractArray{A,N}} - return BlockSparseArray{T,N,A,Blocks}(args...) -end - -function BlockSparseArray{T,N,A}( - ::UndefInitializer, args... -) where {T,N,A<:AbstractArray{T,N}} - return BlockSparseArray{T,N,A}(args...) -end - -function BlockSparseArray{T,N}(::UndefInitializer, args...) where {T,N} - return BlockSparseArray{T,N}(args...) -end - -function BlockSparseArray{T}(::UndefInitializer, args...) where {T} - return BlockSparseArray{T}(args...) -end - -# Base `AbstractArray` interface -Base.axes(a::BlockSparseArray) = a.axes - -# BlockArrays `AbstractBlockArray` interface. -# This is used by `blocks(::AnyAbstractBlockSparseArray)`. -blocksparse_blocks(a::BlockSparseArray) = a.blocks - -# TODO: Use `TypeParameterAccessors`. -function blockstype( - arraytype::Type{<:BlockSparseArray{T,N,A,Blocks}} -) where {T,N,A<:AbstractArray{T,N},Blocks<:AbstractArray{A,N}} - return Blocks -end -function blockstype( - arraytype::Type{<:BlockSparseArray{T,N,A}} -) where {T,N,A<:AbstractArray{T,N}} - return SparseArrayDOK{A,N} -end -function blockstype(arraytype::Type{<:BlockSparseArray{T,N}}) where {T,N} - return SparseArrayDOK{AbstractArray{T,N},N} -end -function blockstype(arraytype::Type{<:BlockSparseArray{T}}) where {T} - return SparseArrayDOK{AbstractArray{T}} -end -blockstype(arraytype::Type{<:BlockSparseArray}) = SparseArrayDOK{AbstractArray} - -## # Base interface -## function Base.similar( -## a::AbstractBlockSparseArray, elt::Type, axes::Tuple{Vararg{BlockedUnitRange}} -## ) -## # TODO: Preserve GPU data! -## return BlockSparseArray{elt}(undef, axes) -## end diff --git a/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearray/defaults.jl b/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearray/defaults.jl deleted file mode 100644 index ab126aaa15..0000000000 --- a/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearray/defaults.jl +++ /dev/null @@ -1,42 +0,0 @@ -using BlockArrays: Block -using Dictionaries: Dictionary -using ..SparseArraysBase: SparseArrayDOK - -# Construct the sparse structure storing the blocks -function default_blockdata( - block_data::Dictionary{<:CartesianIndex{N},<:AbstractArray{<:Any,N}}, - axes::Tuple{Vararg{AbstractUnitRange,N}}, -) where {N} - return error() -end - -function default_blocks( - block_indices::Vector{<:Block{N}}, - block_data::Vector{<:AbstractArray{<:Any,N}}, - axes::Tuple{Vararg{AbstractUnitRange,N}}, -) where {N} - return default_blocks(Dictionary(block_indices, block_data), axes) -end - -function default_blocks( - block_data::Dictionary{<:Block{N},<:AbstractArray{<:Any,N}}, - axes::Tuple{Vararg{AbstractUnitRange,N}}, -) where {N} - return default_blocks(blocks_to_cartesianindices(block_data), axes) -end - -function default_arraytype(elt::Type, axes::Tuple{Vararg{AbstractUnitRange}}) - return Array{elt,length(axes)} -end - -function default_blocks(blocktype::Type, axes::Tuple{Vararg{AbstractUnitRange}}) - block_data = Dictionary{Block{length(axes),Int},blocktype}() - return default_blocks(block_data, axes) -end - -function default_blocks( - block_data::Dictionary{<:CartesianIndex{N},<:AbstractArray{<:Any,N}}, - axes::Tuple{Vararg{AbstractUnitRange,N}}, -) where {N} - return SparseArrayDOK(block_data, blocklength.(axes), BlockZero(axes)) -end diff --git a/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/arraylayouts.jl b/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/arraylayouts.jl deleted file mode 100644 index f7d02ae554..0000000000 --- a/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/arraylayouts.jl +++ /dev/null @@ -1,48 +0,0 @@ -using ArrayLayouts: ArrayLayouts, Dot, MatMulMatAdd, MatMulVecAdd, MulAdd -using BlockArrays: BlockLayout -using ..SparseArraysBase: SparseLayout -using LinearAlgebra: dot, mul! - -function blocksparse_muladd!( - α::Number, a1::AbstractArray, a2::AbstractArray, β::Number, a_dest::AbstractArray -) - mul!(blocks(a_dest), blocks(a1), blocks(a2), α, β) - return a_dest -end - -function blocksparse_matmul!(m::MulAdd) - α, a1, a2, β, a_dest = m.α, m.A, m.B, m.β, m.C - blocksparse_muladd!(α, a1, a2, β, a_dest) - return a_dest -end - -function ArrayLayouts.materialize!( - m::MatMulMatAdd{ - <:BlockLayout{<:SparseLayout}, - <:BlockLayout{<:SparseLayout}, - <:BlockLayout{<:SparseLayout}, - }, -) - blocksparse_matmul!(m) - return m.C -end -function ArrayLayouts.materialize!( - m::MatMulVecAdd{ - <:BlockLayout{<:SparseLayout}, - <:BlockLayout{<:SparseLayout}, - <:BlockLayout{<:SparseLayout}, - }, -) - blocksparse_matmul!(m) - return m.C -end - -function blocksparse_dot(a1::AbstractArray, a2::AbstractArray) - # TODO: Add a check that the blocking of `a1` and `a2` are - # the same, or the same up to a reshape. - return dot(blocks(a1), blocks(a2)) -end - -function Base.copy(d::Dot{<:BlockLayout{<:SparseLayout},<:BlockLayout{<:SparseLayout}}) - return blocksparse_dot(d.A, d.B) -end diff --git a/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/blocksparsearrayinterface.jl b/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/blocksparsearrayinterface.jl deleted file mode 100644 index 4e9b85a958..0000000000 --- a/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/blocksparsearrayinterface.jl +++ /dev/null @@ -1,274 +0,0 @@ -using BlockArrays: - AbstractBlockVector, - Block, - BlockIndex, - BlockRange, - BlockSlice, - BlockVector, - BlockedUnitRange, - BlockedVector, - block, - blockcheckbounds, - blocklengths, - blocks, - findblockindex -using LinearAlgebra: Adjoint, Transpose -using ..SparseArraysBase: perm, iperm, stored_length, sparse_zero! - -blocksparse_blocks(a::AbstractArray) = error("Not implemented") - -blockstype(a::AbstractArray) = blockstype(typeof(a)) - -function blocksparse_getindex(a::AbstractArray{<:Any,N}, I::Vararg{Int,N}) where {N} - @boundscheck checkbounds(a, I...) - return a[findblockindex.(axes(a), I)...] -end - -# Fix ambiguity error. -function blocksparse_getindex(a::AbstractArray{<:Any,0}) - # TODO: Use `Block()[]` once https://github.com/JuliaArrays/BlockArrays.jl/issues/430 - # is fixed. - return a[BlockIndex{0,Tuple{},Tuple{}}((), ())] -end - -# a[1:2, 1:2] -# TODO: This definition means that the result of slicing a block sparse array -# with a non-blocked unit range is blocked. We may want to change that behavior, -# and make that explicit with `@blocked a[1:2, 1:2]`. See the discussion in -# https://github.com/JuliaArrays/BlockArrays.jl/issues/347 and also -# https://github.com/ITensor/ITensors.jl/issues/1336. -function blocksparse_to_indices(a, inds, I::Tuple{UnitRange{<:Integer},Vararg{Any}}) - bs1 = to_blockindices(inds[1], I[1]) - I1 = BlockSlice(bs1, blockedunitrange_getindices(inds[1], I[1])) - return (I1, to_indices(a, Base.tail(inds), Base.tail(I))...) -end - -# Special case when there is no blocking. -function blocksparse_to_indices( - a, - inds::Tuple{Base.OneTo{<:Integer},Vararg{Any}}, - I::Tuple{UnitRange{<:Integer},Vararg{Any}}, -) - return (inds[1][I[1]], to_indices(a, Base.tail(inds), Base.tail(I))...) -end - -# a[[Block(2), Block(1)], [Block(2), Block(1)]] -function blocksparse_to_indices(a, inds, I::Tuple{Vector{<:Block{1}},Vararg{Any}}) - I1 = BlockIndices(I[1], blockedunitrange_getindices(inds[1], I[1])) - return (I1, to_indices(a, Base.tail(inds), Base.tail(I))...) -end - -# a[mortar([Block(1)[1:2], Block(2)[1:3]]), mortar([Block(1)[1:2], Block(2)[1:3]])] -# a[[Block(1)[1:2], Block(2)[1:3]], [Block(1)[1:2], Block(2)[1:3]]] -function blocksparse_to_indices( - a, inds, I::Tuple{BlockVector{<:BlockIndex{1},<:Vector{<:BlockIndexRange{1}}},Vararg{Any}} -) - I1 = BlockIndices(I[1], blockedunitrange_getindices(inds[1], I[1])) - return (I1, to_indices(a, Base.tail(inds), Base.tail(I))...) -end - -# a[BlockVector([Block(2), Block(1)], [2]), BlockVector([Block(2), Block(1)], [2])] -# Permute and merge blocks. -# TODO: This isn't merging blocks yet, that needs to be implemented that. -function blocksparse_to_indices( - a, inds, I::Tuple{AbstractBlockVector{<:Block{1}},Vararg{Any}} -) - I1 = BlockIndices(I[1], blockedunitrange_getindices(inds[1], I[1])) - return (I1, to_indices(a, Base.tail(inds), Base.tail(I))...) -end - -# TODO: Need to implement this! -function block_merge end - -function blocksparse_setindex!(a::AbstractArray{<:Any,N}, value, I::Vararg{Int,N}) where {N} - @boundscheck checkbounds(a, I...) - a[findblockindex.(axes(a), I)...] = value - return a -end - -# Fix ambiguity error. -function blocksparse_setindex!(a::AbstractArray{<:Any,0}, value) - # TODO: Use `Block()[]` once https://github.com/JuliaArrays/BlockArrays.jl/issues/430 - # is fixed. - a[BlockIndex{0,Tuple{},Tuple{}}((), ())] = value - return a -end - -function blocksparse_setindex!(a::AbstractArray{<:Any,N}, value, I::BlockIndex{N}) where {N} - i = Int.(Tuple(block(I))) - a_b = blocks(a)[i...] - a_b[I.α...] = value - # Set the block, required if it is structurally zero. - blocks(a)[i...] = a_b - return a -end - -# Fix ambiguity error. -function blocksparse_setindex!(a::AbstractArray{<:Any,0}, value, I::BlockIndex{0}) - a_b = blocks(a)[] - a_b[] = value - # Set the block, required if it is structurally zero. - blocks(a)[] = a_b - return a -end - -function blocksparse_fill!(a::AbstractArray, value) - for b in BlockRange(a) - # We can't use: - # ```julia - # a[b] .= value - # ``` - # since that would lead to a stack overflow, - # because broadcasting calls `fill!`. - - # TODO: Ideally we would use: - # ```julia - # @view!(a[b]) .= value - # ``` - # but that doesn't work on `SubArray` right now. - - # This line is needed to instantiate blocks - # that aren't instantiated yet. Maybe - # we can make this work without this line? - blocks(a)[Int.(Tuple(b))...] = blocks(a)[Int.(Tuple(b))...] - blocks(a)[Int.(Tuple(b))...] .= value - end - return a -end - -function block_stored_length(a::AbstractArray) - return stored_length(blocks(a)) -end - -# BlockArrays - -using ..SparseArraysBase: SparseArraysBase, AbstractSparseArray, AbstractSparseMatrix - -_perm(::PermutedDimsArray{<:Any,<:Any,perm}) where {perm} = perm -_invperm(::PermutedDimsArray{<:Any,<:Any,<:Any,invperm}) where {invperm} = invperm -_getindices(t::Tuple, indices) = map(i -> t[i], indices) -_getindices(i::CartesianIndex, indices) = CartesianIndex(_getindices(Tuple(i), indices)) - -# Represents the array of arrays of a `PermutedDimsArray` -# wrapping a block spare array, i.e. `blocks(array)` where `a` is a `PermutedDimsArray`. -struct SparsePermutedDimsArrayBlocks{ - T,N,BlockType<:AbstractArray{T,N},Array<:PermutedDimsArray{T,N} -} <: AbstractSparseArray{BlockType,N} - array::Array -end -function blocksparse_blocks(a::PermutedDimsArray) - return SparsePermutedDimsArrayBlocks{eltype(a),ndims(a),blocktype(parent(a)),typeof(a)}(a) -end -function Base.size(a::SparsePermutedDimsArrayBlocks) - return _getindices(size(blocks(parent(a.array))), _perm(a.array)) -end -function Base.getindex( - a::SparsePermutedDimsArrayBlocks{<:Any,N}, index::Vararg{Int,N} -) where {N} - return PermutedDimsArray( - blocks(parent(a.array))[_getindices(index, _invperm(a.array))...], _perm(a.array) - ) -end -function SparseArraysBase.stored_indices(a::SparsePermutedDimsArrayBlocks) - return map(I -> _getindices(I, _perm(a.array)), stored_indices(blocks(parent(a.array)))) -end -# TODO: Either make this the generic interface or define -# `SparseArraysBase.sparse_storage`, which is used -# to defined this. -function SparseArraysBase.stored_length(a::SparsePermutedDimsArrayBlocks) - return length(stored_indices(a)) -end -function SparseArraysBase.sparse_storage(a::SparsePermutedDimsArrayBlocks) - return error("Not implemented") -end - -reverse_index(index) = reverse(index) -reverse_index(index::CartesianIndex) = CartesianIndex(reverse(Tuple(index))) - -blocksparse_blocks(a::Transpose) = transpose(blocks(parent(a))) -blocksparse_blocks(a::Adjoint) = adjoint(blocks(parent(a))) - -# Represents the array of arrays of a `SubArray` -# wrapping a block spare array, i.e. `blocks(array)` where `a` is a `SubArray`. -struct SparseSubArrayBlocks{T,N,BlockType<:AbstractArray{T,N},Array<:SubArray{T,N}} <: - AbstractSparseArray{BlockType,N} - array::Array -end -function blocksparse_blocks(a::SubArray) - return SparseSubArrayBlocks{eltype(a),ndims(a),blocktype(parent(a)),typeof(a)}(a) -end -# TODO: Define this as `blockrange(a::AbstractArray, indices::Tuple{Vararg{AbstractUnitRange}})`. -function blockrange(a::SparseSubArrayBlocks) - blockranges = blockrange.(axes(parent(a.array)), a.array.indices) - return map(blockrange -> Int.(blockrange), blockranges) -end -function Base.axes(a::SparseSubArrayBlocks) - return Base.OneTo.(length.(blockrange(a))) -end -function Base.size(a::SparseSubArrayBlocks) - return length.(axes(a)) -end -function Base.getindex(a::SparseSubArrayBlocks{<:Any,N}, I::Vararg{Int,N}) where {N} - # TODO: Should this be defined as `@view a.array[Block(I)]` instead? - return @view a.array[Block(I)] - - ## parent_blocks = @view blocks(parent(a.array))[blockrange(a)...] - ## parent_block = parent_blocks[I...] - ## # TODO: Define this using `blockrange(a::AbstractArray, indices::Tuple{Vararg{AbstractUnitRange}})`. - ## block = Block(ntuple(i -> blockrange(a)[i][I[i]], ndims(a))) - ## return @view parent_block[blockindices(parent(a.array), block, a.array.indices)...] -end -# TODO: This should be handled by generic `AbstractSparseArray` code. -function Base.getindex(a::SparseSubArrayBlocks{<:Any,N}, I::CartesianIndex{N}) where {N} - return a[Tuple(I)...] -end -function Base.setindex!(a::SparseSubArrayBlocks{<:Any,N}, value, I::Vararg{Int,N}) where {N} - parent_blocks = @view blocks(parent(a.array))[blockrange(a)...] - # TODO: The following line is required to instantiate - # uninstantiated blocks, maybe use `@view!` instead, - # or some other code pattern. - parent_blocks[I...] = parent_blocks[I...] - # TODO: Define this using `blockrange(a::AbstractArray, indices::Tuple{Vararg{AbstractUnitRange}})`. - block = Block(ntuple(i -> blockrange(a)[i][I[i]], ndims(a))) - return parent_blocks[I...][blockindices(parent(a.array), block, a.array.indices)...] = - value -end -function Base.isassigned(a::SparseSubArrayBlocks{<:Any,N}, I::Vararg{Int,N}) where {N} - if CartesianIndex(I) ∉ CartesianIndices(a) - return false - end - # TODO: Implement this properly. - return true -end -function SparseArraysBase.stored_indices(a::SparseSubArrayBlocks) - return stored_indices(view(blocks(parent(a.array)), blockrange(a)...)) -end -# TODO: Either make this the generic interface or define -# `SparseArraysBase.sparse_storage`, which is used -# to defined this. -SparseArraysBase.stored_length(a::SparseSubArrayBlocks) = length(stored_indices(a)) - -## struct SparseSubArrayBlocksStorage{Array<:SparseSubArrayBlocks} -## array::Array -## end -function SparseArraysBase.sparse_storage(a::SparseSubArrayBlocks) - return map(I -> a[I], stored_indices(a)) -end - -function SparseArraysBase.getindex_zero_function(a::SparseSubArrayBlocks) - # TODO: Base it off of `getindex_zero_function(blocks(parent(a.array))`, but replace the - # axes with `axes(a.array)`. - return BlockZero(axes(a.array)) -end - -to_blocks_indices(I::BlockSlice{<:BlockRange{1}}) = Int.(I.block) -to_blocks_indices(I::BlockIndices{<:Vector{<:Block{1}}}) = Int.(I.blocks) - -function blocksparse_blocks( - a::SubArray{<:Any,<:Any,<:Any,<:Tuple{Vararg{BlockSliceCollection}}} -) - return @view blocks(parent(a))[map(to_blocks_indices, parentindices(a))...] -end - -using BlockArrays: BlocksView -SparseArraysBase.stored_length(a::BlocksView) = length(a) diff --git a/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/blockzero.jl b/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/blockzero.jl deleted file mode 100644 index 25665acd63..0000000000 --- a/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/blockzero.jl +++ /dev/null @@ -1,45 +0,0 @@ -using BlockArrays: Block, blockedrange - -# Extensions to BlockArrays.jl -blocktuple(b::Block) = Block.(b.n) -inttuple(b::Block) = b.n - -# The size of a block -function block_size(axes::Tuple{Vararg{AbstractUnitRange}}, block::Block) - return length.(getindex.(axes, blocktuple(block))) -end - -# The size of a block -function block_size(blockinds::Tuple{Vararg{AbstractVector}}, block::Block) - return block_size(blockedrange.(blockinds), block) -end - -struct BlockZero{Axes} - axes::Axes -end - -function (f::BlockZero)(a::AbstractArray, I) - return f(eltype(a), I) -end - -function (f::BlockZero)(arraytype::Type{<:SubArray{<:Any,<:Any,P}}, I) where {P} - return f(P, I) -end - -function (f::BlockZero)(arraytype::Type{<:AbstractArray}, I) - # TODO: Make sure this works for sparse or block sparse blocks, immutable - # blocks, diagonal blocks, etc.! - blck_size = block_size(f.axes, Block(Tuple(I))) - blck_type = similartype(arraytype, blck_size) - return fill!(blck_type(undef, blck_size), false) -end - -# Fallback so that `SparseArray` with scalar elements works. -function (f::BlockZero)(blocktype::Type{<:Number}, I) - return zero(blocktype) -end - -# Fallback to Array if it is abstract -function (f::BlockZero)(arraytype::Type{AbstractArray{T,N}}, I) where {T,N} - return f(Array{T,N}, I) -end diff --git a/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/broadcast.jl b/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/broadcast.jl deleted file mode 100644 index 7ce8d024ef..0000000000 --- a/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/broadcast.jl +++ /dev/null @@ -1,39 +0,0 @@ -using Base.Broadcast: BroadcastStyle, AbstractArrayStyle, DefaultArrayStyle, Broadcasted -using ..BroadcastMapConversion: map_function, map_args - -struct BlockSparseArrayStyle{N} <: AbstractArrayStyle{N} end - -# Define for new sparse array types. -# function Broadcast.BroadcastStyle(arraytype::Type{<:MyBlockSparseArray}) -# return BlockSparseArrayStyle{ndims(arraytype)}() -# end - -BlockSparseArrayStyle(::Val{N}) where {N} = BlockSparseArrayStyle{N}() -BlockSparseArrayStyle{M}(::Val{N}) where {M,N} = BlockSparseArrayStyle{N}() - -Broadcast.BroadcastStyle(a::BlockSparseArrayStyle, ::DefaultArrayStyle{0}) = a -function Broadcast.BroadcastStyle( - ::BlockSparseArrayStyle{N}, a::DefaultArrayStyle -) where {N} - return BroadcastStyle(DefaultArrayStyle{N}(), a) -end -function Broadcast.BroadcastStyle( - ::BlockSparseArrayStyle{N}, ::Broadcast.Style{Tuple} -) where {N} - return DefaultArrayStyle{N}() -end - -function Base.similar(bc::Broadcasted{<:BlockSparseArrayStyle}, elt::Type) - # TODO: Make sure this handles GPU arrays properly. - return similar(first(map_args(bc)), elt, combine_axes(axes.(map_args(bc))...)) -end - -# Broadcasting implementation -function Base.copyto!( - dest::AbstractArray{<:Any,N}, bc::Broadcasted{BlockSparseArrayStyle{N}} -) where {N} - # convert to map - # flatten and only keep the AbstractArray arguments - sparse_map!(map_function(bc), dest, map_args(bc)...) - return dest -end diff --git a/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/cat.jl b/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/cat.jl deleted file mode 100644 index b2d6596bd5..0000000000 --- a/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/cat.jl +++ /dev/null @@ -1,26 +0,0 @@ -using BlockArrays: AbstractBlockedUnitRange, blockedrange, blocklengths -using NDTensors.SparseArraysBase: SparseArraysBase, allocate_cat_output, sparse_cat! - -# TODO: Maybe move to `SparseArraysBaseBlockArraysExt`. -# TODO: Handle dual graded unit ranges, for example in a new `SparseArraysBaseGradedAxesExt`. -function SparseArraysBase.axis_cat( - a1::AbstractBlockedUnitRange, a2::AbstractBlockedUnitRange -) - return blockedrange(vcat(blocklengths(a1), blocklengths(a2))) -end - -# that erroneously allocates too many blocks that are -# zero and shouldn't be stored. -function blocksparse_cat!(a_dest::AbstractArray, as::AbstractArray...; dims) - sparse_cat!(blocks(a_dest), blocks.(as)...; dims) - return a_dest -end - -# TODO: Delete this in favor of `sparse_cat`, currently -# that erroneously allocates too many blocks that are -# zero and shouldn't be stored. -function blocksparse_cat(as::AbstractArray...; dims) - a_dest = allocate_cat_output(as...; dims) - blocksparse_cat!(a_dest, as...; dims) - return a_dest -end diff --git a/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/linearalgebra.jl b/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/linearalgebra.jl deleted file mode 100644 index ac7f566a93..0000000000 --- a/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/linearalgebra.jl +++ /dev/null @@ -1,12 +0,0 @@ -using LinearAlgebra: mul! - -function blocksparse_mul!( - a_dest::AbstractMatrix, - a1::AbstractMatrix, - a2::AbstractMatrix, - α::Number=true, - β::Number=false, -) - mul!(blocks(a_dest), blocks(a1), blocks(a2), α, β) - return a_dest -end diff --git a/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/map.jl b/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/map.jl deleted file mode 100644 index 2d537cdbf7..0000000000 --- a/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/map.jl +++ /dev/null @@ -1,19 +0,0 @@ -function map_stored_blocks(f, a::AbstractArray) - # TODO: Implement this as: - # ```julia - # mapped_blocks = SparseArraysInterface.map_stored(f, blocks(a)) - # BlockSparseArray(mapped_blocks, axes(a)) - # ``` - # TODO: `block_stored_indices` should output `Indices` storing - # the stored Blocks, not a `Dictionary` from cartesian indices - # to Blocks. - bs = collect(block_stored_indices(a)) - ds = map(b -> f(@view(a[b])), bs) - # We manually specify the block type using `Base.promote_op` - # since `a[b]` may not be inferrable. For example, if `blocktype(a)` - # is `Diagonal{Float64,Vector{Float64}}`, the non-stored blocks are `Matrix{Float64}` - # since they can't necessarily by `Diagonal` if there are rectangular blocks. - mapped_blocks = Dictionary{eltype(bs),eltype(ds)}(bs, ds) - # TODO: Use `similartype(typeof(a), eltype(eltype(mapped_blocks)))(...)`. - return BlockSparseArray(mapped_blocks, axes(a)) -end diff --git a/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/views.jl b/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/views.jl deleted file mode 100644 index 8e43f2625b..0000000000 --- a/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/views.jl +++ /dev/null @@ -1,3 +0,0 @@ -function blocksparse_view(a, I...) - return Base.invoke(view, Tuple{AbstractArray,Vararg{Any}}, a, I...) -end diff --git a/NDTensors/src/lib/BlockSparseArrays/test/Project.toml b/NDTensors/src/lib/BlockSparseArrays/test/Project.toml deleted file mode 100644 index b0460803d3..0000000000 --- a/NDTensors/src/lib/BlockSparseArrays/test/Project.toml +++ /dev/null @@ -1,6 +0,0 @@ -[deps] -BlockArrays = "8e7c35d0-a365-5155-bbbb-fb81a777f24e" -Compat = "34da2185-b29b-5c13-b0c7-acf172513d20" -GPUArraysCore = "46192b85-c4d5-4398-a991-12ede77f4527" -JLArrays = "27aeb0d3-9eb9-45fb-866b-73c2ecf80fcb" -NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" diff --git a/NDTensors/src/lib/BlockSparseArrays/test/TestBlockSparseArraysUtils.jl b/NDTensors/src/lib/BlockSparseArrays/test/TestBlockSparseArraysUtils.jl deleted file mode 100644 index 36f453986b..0000000000 --- a/NDTensors/src/lib/BlockSparseArrays/test/TestBlockSparseArraysUtils.jl +++ /dev/null @@ -1,15 +0,0 @@ -module TestBlockSparseArraysUtils -using BlockArrays: BlockRange - -function set_blocks!(a::AbstractArray, f::Function, blocks::Function) - set_blocks!(a, f, filter(blocks, BlockRange(a))) - return a -end - -function set_blocks!(a::AbstractArray, f::Function, blocks::Vector) - for b in blocks - a[b] = f(eltype(a), size(@view(a[b]))) - end - return a -end -end diff --git a/NDTensors/src/lib/BlockSparseArrays/test/runtests.jl b/NDTensors/src/lib/BlockSparseArrays/test/runtests.jl deleted file mode 100644 index 2a8e2c5db9..0000000000 --- a/NDTensors/src/lib/BlockSparseArrays/test/runtests.jl +++ /dev/null @@ -1,5 +0,0 @@ -@eval module $(gensym()) -include("test_basics.jl") -include("../ext/BlockSparseArraysTensorAlgebraExt/test/runtests.jl") -include("../ext/BlockSparseArraysGradedAxesExt/test/runtests.jl") -end diff --git a/NDTensors/src/lib/BlockSparseArrays/test/test_basics.jl b/NDTensors/src/lib/BlockSparseArrays/test/test_basics.jl deleted file mode 100644 index 13eda130c2..0000000000 --- a/NDTensors/src/lib/BlockSparseArrays/test/test_basics.jl +++ /dev/null @@ -1,1033 +0,0 @@ -@eval module $(gensym()) -using BlockArrays: - Block, - BlockIndexRange, - BlockRange, - BlockSlice, - BlockVector, - BlockedOneTo, - BlockedUnitRange, - BlockedVector, - blockedrange, - blocklength, - blocklengths, - blocksize, - blocksizes, - mortar -using Compat: @compat -using GPUArraysCore: @allowscalar -using LinearAlgebra: Adjoint, Transpose, dot, mul!, norm -using NDTensors.BlockSparseArrays: - @view!, - BlockSparseArray, - BlockSparseMatrix, - BlockSparseVector, - BlockView, - block_stored_length, - block_reshape, - block_stored_indices, - blockstype, - blocktype, - view! -using NDTensors.GPUArraysCoreExtensions: cpu -using NDTensors.SparseArraysBase: stored_length -using NDTensors.SparseArraysBase: SparseArrayDOK, SparseMatrixDOK, SparseVectorDOK -using NDTensors.TensorAlgebra: contract -using Test: @test, @test_broken, @test_throws, @testset, @inferred -include("TestBlockSparseArraysUtils.jl") - -using NDTensors: NDTensors -include(joinpath(pkgdir(NDTensors), "test", "NDTensorsTestUtils", "NDTensorsTestUtils.jl")) -using .NDTensorsTestUtils: devices_list, is_supported_eltype -@testset "BlockSparseArrays (dev=$dev, eltype=$elt)" for dev in devices_list(copy(ARGS)), - elt in (Float32, Float64, Complex{Float32}, Complex{Float64}) - - if !is_supported_eltype(dev, elt) - continue - end - @testset "Broken" begin - # TODO: Fix this and turn it into a proper test. - a = dev(BlockSparseArray{elt}([2, 3], [2, 3])) - a[Block(1, 1)] = dev(randn(elt, 2, 2)) - a[Block(2, 2)] = dev(randn(elt, 3, 3)) - @test_broken a[:, 4] - - # TODO: Fix this and turn it into a proper test. - a = dev(BlockSparseArray{elt}([2, 3], [2, 3])) - a[Block(1, 1)] = dev(randn(elt, 2, 2)) - a[Block(2, 2)] = dev(randn(elt, 3, 3)) - @test_broken a[:, [2, 4]] - @test_broken a[[3, 5], [2, 4]] - - # TODO: Fix this and turn it into a proper test. - a = dev(BlockSparseArray{elt}([2, 3], [2, 3])) - a[Block(1, 1)] = dev(randn(elt, 2, 2)) - a[Block(2, 2)] = dev(randn(elt, 3, 3)) - @allowscalar @test a[2:4, 4] == Array(a)[2:4, 4] - @test_broken a[4, 2:4] - - @test a[Block(1), :] isa BlockSparseArray{elt} - @test adjoint(a) isa Adjoint{elt,<:BlockSparseArray} - @test_broken adjoint(a)[Block(1), :] isa Adjoint{elt,<:BlockSparseArray} - # could also be directly a BlockSparseArray - end - @testset "Constructors" begin - # BlockSparseMatrix - bs = ([2, 3], [3, 4]) - for T in ( - BlockSparseArray{elt}, - BlockSparseArray{elt,2}, - BlockSparseMatrix{elt}, - BlockSparseArray{elt,2,Matrix{elt}}, - BlockSparseMatrix{elt,Matrix{elt}}, - ## BlockSparseArray{elt,2,Matrix{elt},SparseMatrixDOK{Matrix{elt}}}, # TODO - ## BlockSparseMatrix{elt,Matrix{elt},SparseMatrixDOK{Matrix{elt}}}, # TODO - ) - for args in ( - bs, - (bs,), - blockedrange.(bs), - (blockedrange.(bs),), - (undef, bs), - (undef, bs...), - (undef, blockedrange.(bs)), - (undef, blockedrange.(bs)...), - ) - a = T(args...) - @test eltype(a) == elt - @test blocktype(a) == Matrix{elt} - @test blockstype(a) <: SparseMatrixDOK{Matrix{elt}} - @test blocklengths.(axes(a)) == ([2, 3], [3, 4]) - @test iszero(a) - @test iszero(block_stored_length(a)) - @test iszero(stored_length(a)) - end - end - - # BlockSparseVector - bs = ([2, 3],) - for T in ( - BlockSparseArray{elt}, - BlockSparseArray{elt,1}, - BlockSparseVector{elt}, - BlockSparseArray{elt,1,Vector{elt}}, - BlockSparseVector{elt,Vector{elt}}, - ## BlockSparseArray{elt,1,Vector{elt},SparseVectorDOK{Vector{elt}}}, # TODO - ## BlockSparseVector{elt,Vector{elt},SparseVectorDOK{Vector{elt}}}, # TODO - ) - for args in ( - bs, - (bs,), - blockedrange.(bs), - (blockedrange.(bs),), - (undef, bs), - (undef, bs...), - (undef, blockedrange.(bs)), - (undef, blockedrange.(bs)...), - ) - a = T(args...) - @test eltype(a) == elt - @test blocktype(a) == Vector{elt} - @test blockstype(a) <: SparseVectorDOK{Vector{elt}} - @test blocklengths.(axes(a)) == ([2, 3],) - @test iszero(a) - @test iszero(block_stored_length(a)) - @test iszero(stored_length(a)) - end - end - end - @testset "Basics" begin - a = dev(BlockSparseArray{elt}([2, 3], [2, 3])) - @allowscalar @test a == dev( - BlockSparseArray{elt}(blockedrange([2, 3]), blockedrange([2, 3])) - ) - @test eltype(a) === elt - @test axes(a) == (1:5, 1:5) - @test all(aᵢ -> aᵢ isa BlockedOneTo, axes(a)) - @test blocklength.(axes(a)) == (2, 2) - @test blocksize(a) == (2, 2) - @test size(a) == (5, 5) - @test block_stored_length(a) == 0 - @test iszero(a) - @allowscalar @test all(I -> iszero(a[I]), eachindex(a)) - @test_throws DimensionMismatch a[Block(1, 1)] = randn(elt, 2, 3) - - a = BlockSparseArray{elt}([2, 3], [2, 3]) - a[3, 3] = 33 - @test eltype(a) === elt - @test axes(a) == (1:5, 1:5) - @test all(aᵢ -> aᵢ isa BlockedOneTo, axes(a)) - @test blocklength.(axes(a)) == (2, 2) - @test blocksize(a) == (2, 2) - @test size(a) == (5, 5) - @test block_stored_length(a) == 1 - @test !iszero(a) - @test a[3, 3] == 33 - @test all(eachindex(a)) do I - if I == CartesianIndex(3, 3) - a[I] == 33 - else - iszero(a[I]) - end - end - - a[3, 3] = NaN - @test isnan(norm(a)) - - # Empty constructor - for a in (dev(BlockSparseArray{elt}()), dev(BlockSparseArray{elt}(undef))) - @test size(a) == () - @test isone(length(a)) - @test blocksize(a) == () - @test blocksizes(a) == fill(()) - @test iszero(block_stored_length(a)) - @test iszero(@allowscalar(a[])) - @test iszero(@allowscalar(a[CartesianIndex()])) - @test a[Block()] == dev(fill(0)) - @test iszero(@allowscalar(a[Block()][])) - # Broken: - ## @test b[Block()[]] == 2 - for b in ( - (b = copy(a); @allowscalar b[] = 2; b), - (b = copy(a); @allowscalar b[CartesianIndex()] = 2; b), - ) - @test size(b) == () - @test isone(length(b)) - @test blocksize(b) == () - @test blocksizes(b) == fill(()) - @test isone(block_stored_length(b)) - @test @allowscalar(b[]) == 2 - @test @allowscalar(b[CartesianIndex()]) == 2 - @test b[Block()] == dev(fill(2)) - @test @allowscalar(b[Block()][]) == 2 - # Broken: - ## @test b[Block()[]] == 2 - end - end - - @testset "Transpose" begin - a = dev(BlockSparseArray{elt}([2, 2], [3, 3, 1])) - a[Block(1, 1)] = dev(randn(elt, 2, 3)) - a[Block(2, 3)] = dev(randn(elt, 2, 1)) - - at = @inferred transpose(a) - @test at isa Transpose - @test size(at) == reverse(size(a)) - @test blocksize(at) == reverse(blocksize(a)) - @test stored_length(at) == stored_length(a) - @test block_stored_length(at) == block_stored_length(a) - for bind in block_stored_indices(a) - bindt = Block(reverse(Int.(Tuple(bind)))) - @test bindt in block_stored_indices(at) - end - - @test @views(at[Block(1, 1)]) == transpose(a[Block(1, 1)]) - @test @views(at[Block(1, 1)]) isa Transpose - @test @views(at[Block(3, 2)]) == transpose(a[Block(2, 3)]) - # TODO: BlockView == AbstractArray calls scalar code - @test @allowscalar @views(at[Block(1, 2)]) == transpose(a[Block(2, 1)]) - @test @views(at[Block(1, 2)]) isa Transpose - end - - @testset "Adjoint" begin - a = dev(BlockSparseArray{elt}([2, 2], [3, 3, 1])) - a[Block(1, 1)] = dev(randn(elt, 2, 3)) - a[Block(2, 3)] = dev(randn(elt, 2, 1)) - - at = @inferred adjoint(a) - @test at isa Adjoint - @test size(at) == reverse(size(a)) - @test blocksize(at) == reverse(blocksize(a)) - @test stored_length(at) == stored_length(a) - @test block_stored_length(at) == block_stored_length(a) - for bind in block_stored_indices(a) - bindt = Block(reverse(Int.(Tuple(bind)))) - @test bindt in block_stored_indices(at) - end - - @test @views(at[Block(1, 1)]) == adjoint(a[Block(1, 1)]) - @test @views(at[Block(1, 1)]) isa Adjoint - @test @views(at[Block(3, 2)]) == adjoint(a[Block(2, 3)]) - # TODO: BlockView == AbstractArray calls scalar code - @test @allowscalar @views(at[Block(1, 2)]) == adjoint(a[Block(2, 1)]) - @test @views(at[Block(1, 2)]) isa Adjoint - end - end - @testset "Tensor algebra" begin - a = dev(BlockSparseArray{elt}(undef, ([2, 3], [3, 4]))) - @views for b in [Block(1, 2), Block(2, 1)] - a[b] = dev(randn(elt, size(a[b]))) - end - @test eltype(a) == elt - @test block_stored_length(a) == 2 - @test stored_length(a) == 2 * 4 + 3 * 3 - - # TODO: Broken on GPU. - if dev ≠ cpu - a = dev(BlockSparseArray{elt}([2, 3], [3, 4])) - @test_broken a[Block(1, 2)] .= 2 - end - - # TODO: Broken on GPU. - a = BlockSparseArray{elt}([2, 3], [3, 4]) - a[Block(1, 2)] .= 2 - @test eltype(a) == elt - @test all(==(2), a[Block(1, 2)]) - @test iszero(a[Block(1, 1)]) - @test iszero(a[Block(2, 1)]) - @test iszero(a[Block(2, 2)]) - @test block_stored_length(a) == 1 - @test stored_length(a) == 2 * 4 - - # TODO: Broken on GPU. - if dev ≠ cpu - a = dev(BlockSparseArray{elt}([2, 3], [3, 4])) - @test_broken a[Block(1, 2)] .= 0 - end - - # TODO: Broken on GPU. - a = BlockSparseArray{elt}([2, 3], [3, 4]) - a[Block(1, 2)] .= 0 - @test eltype(a) == elt - @test iszero(a[Block(1, 1)]) - @test iszero(a[Block(2, 1)]) - @test iszero(a[Block(1, 2)]) - @test iszero(a[Block(2, 2)]) - @test block_stored_length(a) == 1 - @test stored_length(a) == 2 * 4 - - a = dev(BlockSparseArray{elt}(undef, ([2, 3], [3, 4]))) - @views for b in [Block(1, 2), Block(2, 1)] - a[b] = dev(randn(elt, size(a[b]))) - end - b = similar(a, complex(elt)) - @test eltype(b) == complex(eltype(a)) - @test iszero(b) - @test block_stored_length(b) == 0 - @test stored_length(b) == 0 - @test size(b) == size(a) - @test blocksize(b) == blocksize(a) - - a = dev(BlockSparseArray{elt}([2, 3], [3, 4])) - b = @view a[[Block(2), Block(1)], [Block(2), Block(1)]] - c = @view b[Block(1, 1)] - @test iszero(a) - @test iszero(stored_length(a)) - @test iszero(b) - @test iszero(stored_length(b)) - # TODO: Broken on GPU. - @test iszero(c) broken = dev ≠ cpu - @test iszero(stored_length(c)) - @allowscalar a[5, 7] = 1 - @test !iszero(a) - @test stored_length(a) == 3 * 4 - @test !iszero(b) - @test stored_length(b) == 3 * 4 - # TODO: Broken on GPU. - @test !iszero(c) broken = dev ≠ cpu - @test stored_length(c) == 3 * 4 - d = @view a[1:4, 1:6] - @test iszero(d) - @test stored_length(d) == 2 * 3 - - a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) - @views for b in [Block(1, 2), Block(2, 1)] - a[b] = randn(elt, size(a[b])) - end - b = copy(a) - b[1, 1] = 11 - @test b[1, 1] == 11 - @test a[1, 1] ≠ 11 - - a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) - @views for b in [Block(1, 2), Block(2, 1)] - a[b] = randn(elt, size(a[b])) - end - b = copy(a) - b .*= 2 - @test b ≈ 2a - - a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) - @views for b in [Block(1, 2), Block(2, 1)] - a[b] = randn(elt, size(a[b])) - end - b = copy(a) - b ./= 2 - @test b ≈ a / 2 - - a = dev(BlockSparseArray{elt}(undef, ([2, 3], [3, 4]))) - @views for b in [Block(1, 2), Block(2, 1)] - a[b] = dev(randn(elt, size(a[b]))) - end - b = 2 * a - @allowscalar @test Array(b) ≈ 2 * Array(a) - @test eltype(b) == elt - @test block_stored_length(b) == 2 - @test stored_length(b) == 2 * 4 + 3 * 3 - - a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) - @views for b in [Block(1, 2), Block(2, 1)] - a[b] = randn(elt, size(a[b])) - end - b = (2 + 3im) * a - @test Array(b) ≈ (2 + 3im) * Array(a) - @test eltype(b) == complex(elt) - @test block_stored_length(b) == 2 - @test stored_length(b) == 2 * 4 + 3 * 3 - - a = dev(BlockSparseArray{elt}(undef, ([2, 3], [3, 4]))) - @views for b in [Block(1, 2), Block(2, 1)] - a[b] = dev(randn(elt, size(a[b]))) - end - b = a + a - @allowscalar @test Array(b) ≈ 2 * Array(a) - @test eltype(b) == elt - @test block_stored_length(b) == 2 - @test stored_length(b) == 2 * 4 + 3 * 3 - - a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) - @views for b in [Block(1, 2), Block(2, 1)] - a[b] = randn(elt, size(a[b])) - end - x = BlockSparseArray{elt}(undef, ([3, 4], [2, 3])) - @views for b in [Block(1, 2), Block(2, 1)] - x[b] = randn(elt, size(x[b])) - end - b = a .+ a .+ 3 .* PermutedDimsArray(x, (2, 1)) - @test Array(b) ≈ 2 * Array(a) + 3 * permutedims(Array(x), (2, 1)) - @test eltype(b) == elt - @test block_stored_length(b) == 2 - @test stored_length(b) == 2 * 4 + 3 * 3 - - a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) - @views for b in [Block(1, 2), Block(2, 1)] - a[b] = randn(elt, size(a[b])) - end - b = permutedims(a, (2, 1)) - @test Array(b) ≈ permutedims(Array(a), (2, 1)) - @test eltype(b) == elt - @test block_stored_length(b) == 2 - @test stored_length(b) == 2 * 4 + 3 * 3 - - a = dev(BlockSparseArray{elt}([1, 1, 1], [1, 2, 3], [2, 2, 1], [1, 2, 1])) - a[Block(3, 2, 2, 3)] = dev(randn(elt, 1, 2, 2, 1)) - perm = (2, 3, 4, 1) - for b in (PermutedDimsArray(a, perm), permutedims(a, perm)) - @test Array(b) == permutedims(Array(a), perm) - @test issetequal(block_stored_indices(b), [Block(2, 2, 3, 3)]) - @test @allowscalar b[Block(2, 2, 3, 3)] == permutedims(a[Block(3, 2, 2, 3)], perm) - end - - a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) - @views for b in [Block(1, 2), Block(2, 1)] - a[b] = randn(elt, size(a[b])) - end - b = map(x -> 2x, a) - @test Array(b) ≈ 2 * Array(a) - @test eltype(b) == elt - @test size(b) == size(a) - @test blocksize(b) == (2, 2) - @test block_stored_length(b) == 2 - @test stored_length(b) == 2 * 4 + 3 * 3 - - a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) - @views for b in [Block(1, 2), Block(2, 1)] - a[b] = randn(elt, size(a[b])) - end - b = a[[Block(2), Block(1)], [Block(2), Block(1)]] - @test b[Block(1, 1)] == a[Block(2, 2)] - @test b[Block(1, 2)] == a[Block(2, 1)] - @test b[Block(2, 1)] == a[Block(1, 2)] - @test b[Block(2, 2)] == a[Block(1, 1)] - @test size(b) == size(a) - @test blocksize(b) == (2, 2) - @test stored_length(b) == stored_length(a) - @test block_stored_length(b) == 2 - - a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) - @views for b in [Block(1, 2), Block(2, 1)] - a[b] = randn(elt, size(a[b])) - end - b = a[Block(1):Block(2), Block(1):Block(2)] - @test b == a - @test size(b) == size(a) - @test blocksize(b) == (2, 2) - @test stored_length(b) == stored_length(a) - @test block_stored_length(b) == 2 - - a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) - @views for b in [Block(1, 2), Block(2, 1)] - a[b] = randn(elt, size(a[b])) - end - b = a[Block(1):Block(1), Block(1):Block(2)] - @test b == Array(a)[1:2, 1:end] - @test b[Block(1, 1)] == a[Block(1, 1)] - @test b[Block(1, 2)] == a[Block(1, 2)] - @test size(b) == (2, 7) - @test blocksize(b) == (1, 2) - @test stored_length(b) == stored_length(a[Block(1, 2)]) - @test block_stored_length(b) == 1 - - a = dev(BlockSparseArray{elt}(undef, ([2, 3], [3, 4]))) - @views for b in [Block(1, 2), Block(2, 1)] - a[b] = dev(randn(elt, size(a[b]))) - end - for b in (a[2:4, 2:4], @view(a[2:4, 2:4])) - @allowscalar @test b == Array(a)[2:4, 2:4] - @test size(b) == (3, 3) - @test blocksize(b) == (2, 2) - @test stored_length(b) == 1 * 1 + 2 * 2 - @test block_stored_length(b) == 2 - for f in (getindex, view) - # TODO: Broken on GPU. - @allowscalar begin - @test size(f(b, Block(1, 1))) == (1, 2) - @test size(f(b, Block(2, 1))) == (2, 2) - @test size(f(b, Block(1, 2))) == (1, 1) - @test size(f(b, Block(2, 2))) == (2, 1) - @test f(b, Block(1, 1)) == a[Block(1, 1)[2:2, 2:3]] - @test f(b, Block(2, 1)) == a[Block(2, 1)[1:2, 2:3]] - @test f(b, Block(1, 2)) == a[Block(1, 2)[2:2, 1:1]] - @test f(b, Block(2, 2)) == a[Block(2, 2)[1:2, 1:1]] - end - end - end - - a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) - @views for b in [Block(1, 2), Block(2, 1)] - a[b] = randn(elt, size(a[b])) - end - b = a[Block(2, 1)[1:2, 2:3]] - @test b == Array(a)[3:4, 2:3] - @test size(b) == (2, 2) - @test blocksize(b) == (1, 1) - @test stored_length(b) == 2 * 2 - @test block_stored_length(b) == 1 - - a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) - @views for b in [Block(1, 2), Block(2, 1)] - a[b] = randn(elt, size(a[b])) - end - b = PermutedDimsArray(a, (2, 1)) - @test block_stored_length(b) == 2 - @test Array(b) == permutedims(Array(a), (2, 1)) - c = 2 * b - @test block_stored_length(c) == 2 - @test Array(c) == 2 * permutedims(Array(a), (2, 1)) - - a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) - @views for b in [Block(1, 2), Block(2, 1)] - a[b] = randn(elt, size(a[b])) - end - b = a' - @test block_stored_length(b) == 2 - @test Array(b) == Array(a)' - c = 2 * b - @test block_stored_length(c) == 2 - @test Array(c) == 2 * Array(a)' - - a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) - @views for b in [Block(1, 2), Block(2, 1)] - a[b] = randn(elt, size(a[b])) - end - b = transpose(a) - @test block_stored_length(b) == 2 - @test Array(b) == transpose(Array(a)) - c = 2 * b - @test block_stored_length(c) == 2 - @test Array(c) == 2 * transpose(Array(a)) - - a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) - @views for b in [Block(1, 2), Block(2, 1)] - a[b] = randn(elt, size(a[b])) - end - b = a[Block(1), Block(1):Block(2)] - @test size(b) == (2, 7) - @test blocksize(b) == (1, 2) - @test b[Block(1, 1)] == a[Block(1, 1)] - @test b[Block(1, 2)] == a[Block(1, 2)] - - a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) - @views for b in [Block(1, 2), Block(2, 1)] - a[b] = randn(elt, size(a[b])) - end - b = copy(a) - x = randn(elt, size(@view(a[Block(2, 2)]))) - b[Block(2), Block(2)] = x - @test b[Block(2, 2)] == x - - a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) - @views for b in [Block(1, 2), Block(2, 1)] - a[b] = randn(elt, size(a[b])) - end - b = copy(a) - b[Block(1, 1)] .= 1 - @test b[Block(1, 1)] == trues(blocksizes(b)[1, 1]) - - a = BlockSparseArray{elt}([2, 3], [3, 4]) - b = @view a[Block(2, 2)] - @test size(b) == (3, 4) - for i in parentindices(b) - @test i isa Base.OneTo{Int} - end - @test parentindices(b)[1] == 1:3 - @test parentindices(b)[2] == 1:4 - - a = BlockSparseArray{elt}([2, 3], [3, 4]) - b = @view a[Block(2, 2)[1:2, 2:2]] - @test size(b) == (2, 1) - for i in parentindices(b) - @test i isa UnitRange{Int} - end - @test parentindices(b)[1] == 1:2 - @test parentindices(b)[2] == 2:2 - - a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) - x = randn(elt, 1, 2) - @view(a[Block(2, 2)])[1:1, 1:2] = x - @test a[Block(2, 2)][1:1, 1:2] == x - @test @view(a[Block(2, 2)])[1:1, 1:2] == x - @test a[3:3, 4:5] == x - - a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) - x = randn(elt, 1, 2) - @views a[Block(2, 2)][1:1, 1:2] = x - @test a[Block(2, 2)][1:1, 1:2] == x - @test @view(a[Block(2, 2)])[1:1, 1:2] == x - @test a[3:3, 4:5] == x - - a = BlockSparseArray{elt}([2, 3], [3, 4]) - b = @views a[Block(2, 2)][1:2, 2:3] - @test b isa SubArray{<:Any,<:Any,<:BlockView} - for i in parentindices(b) - @test i isa UnitRange{Int} - end - x = randn(elt, 2, 2) - b .= x - @test a[Block(2, 2)[1:2, 2:3]] == x - @test a[Block(2, 2)[1:2, 2:3]] == b - @test block_stored_length(a) == 1 - - a = BlockSparseArray{elt}([2, 3], [2, 3]) - @views for b in [Block(1, 1), Block(2, 2)] - a[b] = randn(elt, size(a[b])) - end - for I in (Block.(1:2), [Block(1), Block(2)]) - b = @view a[I, I] - for I in CartesianIndices(a) - @test b[I] == a[I] - end - for block in BlockRange(a) - @test b[block] == a[block] - end - end - - a = dev(BlockSparseArray{elt}([2, 3], [2, 3])) - @views for b in [Block(1, 1), Block(2, 2)] - # TODO: Use `blocksizes(a)[Int.(Tuple(b))...]` once available. - a[b] = dev(randn(elt, size(a[b]))) - end - for I in ([Block(2), Block(1)],) - b = @view a[I, I] - @test b[Block(1, 1)] == a[Block(2, 2)] - @test b[Block(2, 1)] == a[Block(1, 2)] - @test b[Block(1, 2)] == a[Block(2, 1)] - @test b[Block(2, 2)] == a[Block(1, 1)] - @allowscalar begin - @test b[1, 1] == a[3, 3] - @test b[4, 4] == a[1, 1] - b[4, 4] = 44 - @test b[4, 4] == 44 - end - end - - a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) - @views for b in [Block(1, 2), Block(2, 1)] - a[b] = randn(elt, size(a[b])) - end - b = a[Block(2):Block(2), Block(1):Block(2)] - @test block_stored_length(b) == 1 - @test b == Array(a)[3:5, 1:end] - - a = BlockSparseArray{elt}(undef, ([2, 3, 4], [2, 3, 4])) - # TODO: Define `block_diagindices`. - @views for b in [Block(1, 1), Block(2, 2), Block(3, 3)] - a[b] = randn(elt, size(a[b])) - end - for (I1, I2) in ( - (mortar([Block(2)[2:3], Block(3)[1:3]]), mortar([Block(2)[2:3], Block(3)[2:3]])), - ([Block(2)[2:3], Block(3)[1:3]], [Block(2)[2:3], Block(3)[2:3]]), - ) - for b in (a[I1, I2], @view(a[I1, I2])) - # TODO: Rename `block_stored_length`. - @test block_stored_length(b) == 2 - @test b[Block(1, 1)] == a[Block(2, 2)[2:3, 2:3]] - @test b[Block(2, 2)] == a[Block(3, 3)[1:3, 2:3]] - end - end - - a = dev(BlockSparseArray{elt}(undef, ([3, 3], [3, 3]))) - # TODO: Define `block_diagindices`. - @views for b in [Block(1, 1), Block(2, 2)] - a[b] = dev(randn(elt, size(a[b]))) - end - I = mortar([Block(1)[1:2], Block(2)[1:2]]) - b = a[:, I] - @test b[Block(1, 1)] == a[Block(1, 1)][:, 1:2] - @test b[Block(2, 1)] == a[Block(2, 1)][:, 1:2] - @test b[Block(1, 2)] == a[Block(1, 2)][:, 1:2] - @test b[Block(2, 2)] == a[Block(2, 2)][:, 1:2] - @test blocklengths.(axes(b)) == ([3, 3], [2, 2]) - # TODO: Rename `block_stored_length`. - @test blocksize(b) == (2, 2) - @test block_stored_length(b) == 2 - - a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) - @views for b in [Block(1, 2), Block(2, 1)] - a[b] = randn(elt, size(a[b])) - end - @test isassigned(a, 1, 1) - @test isassigned(a, 1, 1, 1) - @test !isassigned(a, 1, 1, 2) - @test isassigned(a, 5, 7) - @test isassigned(a, 5, 7, 1) - @test !isassigned(a, 5, 7, 2) - @test !isassigned(a, 0, 1) - @test !isassigned(a, 5, 8) - @test isassigned(a, Block(1), Block(1)) - @test isassigned(a, Block(2), Block(2)) - @test !isassigned(a, Block(1), Block(0)) - @test !isassigned(a, Block(3), Block(2)) - @test isassigned(a, Block(1, 1)) - @test isassigned(a, Block(2, 2)) - @test !isassigned(a, Block(1, 0)) - @test !isassigned(a, Block(3, 2)) - @test isassigned(a, Block(1)[1], Block(1)[1]) - @test isassigned(a, Block(2)[3], Block(2)[4]) - @test !isassigned(a, Block(1)[0], Block(1)[1]) - @test !isassigned(a, Block(2)[3], Block(2)[5]) - @test !isassigned(a, Block(1)[1], Block(0)[1]) - @test !isassigned(a, Block(3)[3], Block(2)[4]) - - a = BlockSparseArray{elt}([2, 3], [3, 4]) - @test iszero(a) - @test iszero(block_stored_length(a)) - fill!(a, 0) - @test iszero(a) - @test iszero(block_stored_length(a)) - fill!(a, 2) - @test !iszero(a) - @test all(==(2), a) - @test block_stored_length(a) == 4 - fill!(a, 0) - @test iszero(a) - @test iszero(block_stored_length(a)) - - a = BlockSparseArray{elt}([2, 3], [3, 4]) - @test iszero(a) - @test iszero(block_stored_length(a)) - a .= 0 - @test iszero(a) - @test iszero(block_stored_length(a)) - a .= 2 - @test !iszero(a) - @test all(==(2), a) - @test block_stored_length(a) == 4 - a .= 0 - @test iszero(a) - @test iszero(block_stored_length(a)) - - # TODO: Broken on GPU. - a = BlockSparseArray{elt}([2, 3], [3, 4]) - for I in (Block.(1:2), [Block(1), Block(2)]) - b = @view a[I, I] - x = randn(elt, 3, 4) - b[Block(2, 2)] = x - # These outputs a block of zeros, - # for some reason the block - # is not getting set. - # I think the issue is that: - # ```julia - # @view(@view(a[I, I]))[Block(1, 1)] - # ``` - # creates a doubly-wrapped SubArray - # instead of flattening down to a - # single SubArray wrapper. - @test a[Block(2, 2)] == x - @test b[Block(2, 2)] == x - end - - function f1() - a = BlockSparseArray{elt}([2, 3], [3, 4]) - b = @view a[[Block(2), Block(1)], [Block(2), Block(1)]] - x = randn(elt, 3, 4) - b[Block(1, 1)] .= x - return (; a, b, x) - end - function f2() - a = BlockSparseArray{elt}([2, 3], [3, 4]) - b = @view a[[Block(2), Block(1)], [Block(2), Block(1)]] - x = randn(elt, 3, 4) - b[Block(1, 1)] = x - return (; a, b, x) - end - for abx in (f1(), f2()) - @compat (; a, b, x) = abx - @test b isa SubArray{<:Any,<:Any,<:BlockSparseArray} - @test block_stored_length(b) == 1 - @test b[Block(1, 1)] == x - @test @view(b[Block(1, 1)]) isa Matrix{elt} - for blck in [Block(2, 1), Block(1, 2), Block(2, 2)] - @test iszero(b[blck]) - end - @test block_stored_length(a) == 1 - @test a[Block(2, 2)] == x - for blck in [Block(1, 1), Block(2, 1), Block(1, 2)] - @test iszero(a[blck]) - end - @test_throws DimensionMismatch b[Block(1, 1)] .= randn(2, 3) - end - - a = BlockSparseArray{elt}([2, 3], [3, 4]) - b = @views a[[Block(2), Block(1)], [Block(2), Block(1)]][Block(2, 1)] - @test iszero(b) - @test size(b) == (2, 4) - x = randn(elt, 2, 4) - b .= x - @test b == x - @test a[Block(1, 2)] == x - @test block_stored_length(a) == 1 - - a = BlockSparseArray{elt}([4, 3, 2], [4, 3, 2]) - @views for B in [Block(1, 1), Block(2, 2), Block(3, 3)] - a[B] = randn(elt, size(a[B])) - end - b = @view a[[Block(3), Block(2), Block(1)], [Block(3), Block(2), Block(1)]] - @test b isa SubArray{<:Any,<:Any,<:BlockSparseArray} - c = @view b[4:8, 4:8] - @test c isa SubArray{<:Any,<:Any,<:BlockSparseArray} - @test size(c) == (5, 5) - @test block_stored_length(c) == 2 - @test blocksize(c) == (2, 2) - @test blocklengths.(axes(c)) == ([2, 3], [2, 3]) - @test size(c[Block(1, 1)]) == (2, 2) - @test c[Block(1, 1)] == a[Block(2, 2)[2:3, 2:3]] - @test size(c[Block(2, 2)]) == (3, 3) - @test c[Block(2, 2)] == a[Block(1, 1)[1:3, 1:3]] - @test size(c[Block(2, 1)]) == (3, 2) - @test iszero(c[Block(2, 1)]) - @test size(c[Block(1, 2)]) == (2, 3) - @test iszero(c[Block(1, 2)]) - - x = randn(elt, 3, 3) - c[Block(2, 2)] = x - @test c[Block(2, 2)] == x - @test a[Block(1, 1)[1:3, 1:3]] == x - - a = BlockSparseArray{elt}([2, 3], [3, 4]) - b = @view a[[Block(2), Block(1)], [Block(2), Block(1)]] - for index in parentindices(@view(b[Block(1, 1)])) - @test index isa Base.OneTo{Int} - end - - a = BlockSparseArray{elt}([2, 3], [3, 4]) - a[Block(1, 1)] = randn(elt, 2, 3) - b = @view a[Block(1, 1)[1:2, 1:1]] - @test b isa SubArray{elt,2,Matrix{elt}} - for i in parentindices(b) - @test i isa UnitRange{Int} - end - - a = BlockSparseArray{elt}([2, 2, 2, 2], [2, 2, 2, 2]) - @views for I in [Block(1, 1), Block(2, 2), Block(3, 3), Block(4, 4)] - a[I] = randn(elt, size(a[I])) - end - for I in (blockedrange([4, 4]), BlockedVector(Block.(1:4), [2, 2])) - b = @view a[I, I] - @test copy(b) == a - @test blocksize(b) == (2, 2) - @test blocklengths.(axes(b)) == ([4, 4], [4, 4]) - # TODO: Fix in Julia 1.11 (https://github.com/ITensor/ITensors.jl/pull/1539). - if VERSION < v"1.11-" - @test b[Block(1, 1)] == a[Block.(1:2), Block.(1:2)] - @test b[Block(2, 1)] == a[Block.(3:4), Block.(1:2)] - @test b[Block(1, 2)] == a[Block.(1:2), Block.(3:4)] - @test b[Block(2, 2)] == a[Block.(3:4), Block.(3:4)] - end - c = @view b[Block(2, 2)] - @test blocksize(c) == (1, 1) - @test c == a[Block.(3:4), Block.(3:4)] - end - - a = BlockSparseArray{elt}([2, 3], [2, 3]) - a[Block(1, 1)] = randn(elt, 2, 2) - a[Block(2, 2)] = randn(elt, 3, 3) - for I in (mortar([Block(1)[2:2], Block(2)[2:3]]), [Block(1)[2:2], Block(2)[2:3]]) - b = @view a[:, I] - @test b == Array(a)[:, [2, 4, 5]] - end - - # Merge and permute blocks. - a = BlockSparseArray{elt}([2, 2, 2, 2], [2, 2, 2, 2]) - @views for I in [Block(1, 1), Block(2, 2), Block(3, 3), Block(4, 4)] - a[I] = randn(elt, size(a[I])) - end - for I in ( - BlockVector([Block(4), Block(3), Block(2), Block(1)], [2, 2]), - BlockedVector([Block(4), Block(3), Block(2), Block(1)], [2, 2]), - ) - b = @view a[I, I] - J = [Block(4), Block(3), Block(2), Block(1)] - @test b == a[J, J] - @test copy(b) == a[J, J] - @test blocksize(b) == (2, 2) - @test blocklengths.(axes(b)) == ([4, 4], [4, 4]) - @test b[Block(1, 1)] == Array(a)[[7, 8, 5, 6], [7, 8, 5, 6]] - c = @views b[Block(1, 1)][2:3, 2:3] - @test c == Array(a)[[8, 5], [8, 5]] - @test copy(c) == Array(a)[[8, 5], [8, 5]] - c = @view b[Block(1, 1)[2:3, 2:3]] - @test c == Array(a)[[8, 5], [8, 5]] - @test copy(c) == Array(a)[[8, 5], [8, 5]] - end - - # TODO: Add more tests of this, it may - # only be working accidentally. - a = BlockSparseArray{elt}([2, 3], [2, 3]) - a[Block(1, 1)] = randn(elt, 2, 2) - a[Block(2, 2)] = randn(elt, 3, 3) - @test a[2:4, 4] == Array(a)[2:4, 4] - # TODO: Fix this. - @test_broken a[4, 2:4] == Array(a)[4, 2:4] - end - @testset "view!" begin - for blk in ((Block(2, 2),), (Block(2), Block(2))) - a = BlockSparseArray{elt}([2, 3], [2, 3]) - b = view!(a, blk...) - x = randn(elt, 3, 3) - b .= x - @test b == x - @test a[blk...] == x - @test @view(a[blk...]) == x - @test view!(a, blk...) == x - @test @view!(a[blk...]) == x - end - for blk in ((Block(2, 2),), (Block(2), Block(2))) - a = BlockSparseArray{elt}([2, 3], [2, 3]) - b = @view! a[blk...] - x = randn(elt, 3, 3) - b .= x - @test b == x - @test a[blk...] == x - @test @view(a[blk...]) == x - @test view!(a, blk...) == x - @test @view!(a[blk...]) == x - end - for blk in ((Block(2, 2)[2:3, 1:2],), (Block(2)[2:3], Block(2)[1:2])) - a = BlockSparseArray{elt}([2, 3], [2, 3]) - b = view!(a, blk...) - x = randn(elt, 2, 2) - b .= x - @test b == x - @test a[blk...] == x - @test @view(a[blk...]) == x - @test view!(a, blk...) == x - @test @view!(a[blk...]) == x - end - for blk in ((Block(2, 2)[2:3, 1:2],), (Block(2)[2:3], Block(2)[1:2])) - a = BlockSparseArray{elt}([2, 3], [2, 3]) - b = @view! a[blk...] - x = randn(elt, 2, 2) - b .= x - @test b == x - @test a[blk...] == x - @test @view(a[blk...]) == x - @test view!(a, blk...) == x - @test @view!(a[blk...]) == x - end - end - @testset "LinearAlgebra" begin - a1 = dev(BlockSparseArray{elt}([2, 3], [2, 3])) - a1[Block(1, 1)] = dev(randn(elt, size(@view(a1[Block(1, 1)])))) - a2 = dev(BlockSparseArray{elt}([2, 3], [2, 3])) - a2[Block(1, 1)] = dev(randn(elt, size(@view(a1[Block(1, 1)])))) - a_dest = a1 * a2 - @allowscalar @test Array(a_dest) ≈ Array(a1) * Array(a2) - @test a_dest isa BlockSparseArray{elt} - @test block_stored_length(a_dest) == 1 - end - @testset "Matrix multiplication" begin - a1 = dev(BlockSparseArray{elt}([2, 3], [2, 3])) - a1[Block(1, 2)] = dev(randn(elt, size(@view(a1[Block(1, 2)])))) - a1[Block(2, 1)] = dev(randn(elt, size(@view(a1[Block(2, 1)])))) - a2 = dev(BlockSparseArray{elt}([2, 3], [2, 3])) - a2[Block(1, 2)] = dev(randn(elt, size(@view(a2[Block(1, 2)])))) - a2[Block(2, 1)] = dev(randn(elt, size(@view(a2[Block(2, 1)])))) - for (a1′, a2′) in ((a1, a2), (a1', a2), (a1, a2'), (a1', a2')) - a_dest = a1′ * a2′ - @allowscalar @test Array(a_dest) ≈ Array(a1′) * Array(a2′) - end - end - @testset "Dot product" begin - a1 = dev(BlockSparseArray{elt}([2, 3, 4])) - a1[Block(1)] = dev(randn(elt, size(@view(a1[Block(1)])))) - a1[Block(3)] = dev(randn(elt, size(@view(a1[Block(3)])))) - a2 = dev(BlockSparseArray{elt}([2, 3, 4])) - a2[Block(2)] = dev(randn(elt, size(@view(a1[Block(2)])))) - a2[Block(3)] = dev(randn(elt, size(@view(a1[Block(3)])))) - @test a1' * a2 ≈ Array(a1)' * Array(a2) - @test dot(a1, a2) ≈ a1' * a2 - end - @testset "cat" begin - a1 = dev(BlockSparseArray{elt}([2, 3], [2, 3])) - a1[Block(2, 1)] = dev(randn(elt, size(@view(a1[Block(2, 1)])))) - a2 = dev(BlockSparseArray{elt}([2, 3], [2, 3])) - a2[Block(1, 2)] = dev(randn(elt, size(@view(a2[Block(1, 2)])))) - - a_dest = cat(a1, a2; dims=1) - @test block_stored_length(a_dest) == 2 - @test blocklengths.(axes(a_dest)) == ([2, 3, 2, 3], [2, 3]) - @test issetequal(block_stored_indices(a_dest), [Block(2, 1), Block(3, 2)]) - @test a_dest[Block(2, 1)] == a1[Block(2, 1)] - @test a_dest[Block(3, 2)] == a2[Block(1, 2)] - - a_dest = cat(a1, a2; dims=2) - @test block_stored_length(a_dest) == 2 - @test blocklengths.(axes(a_dest)) == ([2, 3], [2, 3, 2, 3]) - @test issetequal(block_stored_indices(a_dest), [Block(2, 1), Block(1, 4)]) - @test a_dest[Block(2, 1)] == a1[Block(2, 1)] - @test a_dest[Block(1, 4)] == a2[Block(1, 2)] - - a_dest = cat(a1, a2; dims=(1, 2)) - @test block_stored_length(a_dest) == 2 - @test blocklengths.(axes(a_dest)) == ([2, 3, 2, 3], [2, 3, 2, 3]) - @test issetequal(block_stored_indices(a_dest), [Block(2, 1), Block(3, 4)]) - @test a_dest[Block(2, 1)] == a1[Block(2, 1)] - @test a_dest[Block(3, 4)] == a2[Block(1, 2)] - end - @testset "TensorAlgebra" begin - a1 = dev(BlockSparseArray{elt}([2, 3], [2, 3])) - a1[Block(1, 1)] = dev(randn(elt, size(@view(a1[Block(1, 1)])))) - a2 = dev(BlockSparseArray{elt}([2, 3], [2, 3])) - a2[Block(1, 1)] = dev(randn(elt, size(@view(a1[Block(1, 1)])))) - # TODO: Make this work, requires customization of `TensorAlgebra.fusedims` and - # `TensorAlgebra.splitdims` in terms of `BlockSparseArrays.block_reshape`, - # and customization of `TensorAlgebra.:⊗` in terms of `GradedAxes.tensor_product`. - a_dest, dimnames_dest = contract(a1, (1, -1), a2, (-1, 2)) - @allowscalar begin - a_dest_dense, dimnames_dest_dense = contract(Array(a1), (1, -1), Array(a2), (-1, 2)) - @test a_dest ≈ a_dest_dense - end - end - @testset "block_reshape" begin - a = dev(BlockSparseArray{elt}(undef, ([3, 4], [2, 3]))) - a[Block(1, 2)] = dev(randn(elt, size(@view(a[Block(1, 2)])))) - a[Block(2, 1)] = dev(randn(elt, size(@view(a[Block(2, 1)])))) - b = block_reshape(a, [6, 8, 9, 12]) - @test reshape(a[Block(1, 2)], 9) == b[Block(3)] - @test reshape(a[Block(2, 1)], 8) == b[Block(2)] - @test block_stored_length(b) == 2 - @test stored_length(b) == 17 - end -end -end diff --git a/NDTensors/src/lib/BroadcastMapConversion/.JuliaFormatter.toml b/NDTensors/src/lib/BroadcastMapConversion/.JuliaFormatter.toml deleted file mode 100644 index 08f664cdb9..0000000000 --- a/NDTensors/src/lib/BroadcastMapConversion/.JuliaFormatter.toml +++ /dev/null @@ -1,2 +0,0 @@ -style = "blue" -indent = 2 diff --git a/NDTensors/src/lib/BroadcastMapConversion/src/BroadcastMapConversion.jl b/NDTensors/src/lib/BroadcastMapConversion/src/BroadcastMapConversion.jl deleted file mode 100644 index 6edf0ae2f7..0000000000 --- a/NDTensors/src/lib/BroadcastMapConversion/src/BroadcastMapConversion.jl +++ /dev/null @@ -1,48 +0,0 @@ -module BroadcastMapConversion -# Convert broadcast call to map call by capturing array arguments -# with `map_args` and creating a map function with `map_function`. -# Logic from https://github.com/Jutho/Strided.jl/blob/v2.0.4/src/broadcast.jl. - -using Base.Broadcast: Broadcasted - -const WrappedScalarArgs = Union{AbstractArray{<:Any,0},Ref{<:Any}} - -function map_args(bc::Broadcasted, rest...) - return (map_args(bc.args...)..., map_args(rest...)...) -end -map_args(a::AbstractArray, rest...) = (a, map_args(rest...)...) -map_args(a, rest...) = map_args(rest...) -map_args() = () - -struct MapFunction{F,Args<:Tuple} - f::F - args::Args -end -struct Arg end - -# construct MapFunction -function map_function(bc::Broadcasted) - args = map_function_tuple(bc.args) - return MapFunction(bc.f, args) -end -map_function_tuple(t::Tuple{}) = t -map_function_tuple(t::Tuple) = (map_function(t[1]), map_function_tuple(Base.tail(t))...) -map_function(a::WrappedScalarArgs) = a[] -map_function(a::AbstractArray) = Arg() -map_function(a) = a - -# Evaluate MapFunction -(f::MapFunction)(args...) = apply(f, args)[1] -function apply(f::MapFunction, args) - args, newargs = apply_tuple(f.args, args) - return f.f(args...), newargs -end -apply(a::Arg, args::Tuple) = args[1], Base.tail(args) -apply(a, args) = a, args -apply_tuple(t::Tuple{}, args) = t, args -function apply_tuple(t::Tuple, args) - t1, newargs1 = apply(t[1], args) - ttail, newargs = apply_tuple(Base.tail(t), newargs1) - return (t1, ttail...), newargs -end -end diff --git a/NDTensors/src/lib/BroadcastMapConversion/test/runtests.jl b/NDTensors/src/lib/BroadcastMapConversion/test/runtests.jl deleted file mode 100644 index 92e5d6ae41..0000000000 --- a/NDTensors/src/lib/BroadcastMapConversion/test/runtests.jl +++ /dev/null @@ -1,14 +0,0 @@ -@eval module $(gensym()) -using Test: @test, @testset -using NDTensors.BroadcastMapConversion: map_function, map_args -@testset "BroadcastMapConversion" begin - using Base.Broadcast: Broadcasted - c = 2.2 - a = randn(2, 3) - b = randn(2, 3) - bc = Broadcasted(*, (c, a)) - @test copy(bc) ≈ c * a ≈ map(map_function(bc), map_args(bc)...) - bc = Broadcasted(+, (a, b)) - @test copy(bc) ≈ a + b ≈ map(map_function(bc), map_args(bc)...) -end -end diff --git a/NDTensors/src/lib/CUDAExtensions/.JuliaFormatter.toml b/NDTensors/src/lib/CUDAExtensions/.JuliaFormatter.toml deleted file mode 100644 index 08f664cdb9..0000000000 --- a/NDTensors/src/lib/CUDAExtensions/.JuliaFormatter.toml +++ /dev/null @@ -1,2 +0,0 @@ -style = "blue" -indent = 2 diff --git a/NDTensors/src/lib/CUDAExtensions/src/CUDAExtensions.jl b/NDTensors/src/lib/CUDAExtensions/src/CUDAExtensions.jl deleted file mode 100644 index 63a030f9cb..0000000000 --- a/NDTensors/src/lib/CUDAExtensions/src/CUDAExtensions.jl +++ /dev/null @@ -1,4 +0,0 @@ -module CUDAExtensions -include("cuda.jl") - -end diff --git a/NDTensors/src/lib/CUDAExtensions/src/cuda.jl b/NDTensors/src/lib/CUDAExtensions/src/cuda.jl deleted file mode 100644 index 7e94917a21..0000000000 --- a/NDTensors/src/lib/CUDAExtensions/src/cuda.jl +++ /dev/null @@ -1,14 +0,0 @@ -using NDTensors.TypeParameterAccessors: TypeParameterAccessors, Position -using NDTensors.GPUArraysCoreExtensions: storagemode -# Implemented in NDTensorsCUDAExt -function cu end - -## Here we need an CuArrayAdaptor because the CuArrayAdaptor provided by CUDA -## converts 64 bit numbers to 32 bit. We cannot write `adapt(CuVector, x)` because this -## Will not allow us to properly utilize the buffer preference without changing the value of -## default_buffertype. Also `adapt(CuVector{<:Any, <:Any, Buffertype})` fails to work properly -struct CuArrayAdaptor{B} end - -function TypeParameterAccessors.position(::Type{<:CuArrayAdaptor}, ::typeof(storagemode)) - return Position(1) -end diff --git a/NDTensors/src/lib/CUDAExtensions/test/runtests.jl b/NDTensors/src/lib/CUDAExtensions/test/runtests.jl deleted file mode 100644 index 9c736ea59f..0000000000 --- a/NDTensors/src/lib/CUDAExtensions/test/runtests.jl +++ /dev/null @@ -1,9 +0,0 @@ -@eval module $(gensym()) -using Test: @testset, @test -using NDTensors.CUDAExtensions: cu, CuArrayAdaptor -using NDTensors.GPUArraysCoreExtensions: storagemode -@testset "cu function exists" begin - @test cu isa Function - @test storagemode(CuArrayAdaptor{1}) == 1 -end -end diff --git a/NDTensors/src/lib/DiagonalArrays/.JuliaFormatter.toml b/NDTensors/src/lib/DiagonalArrays/.JuliaFormatter.toml deleted file mode 100644 index 08f664cdb9..0000000000 --- a/NDTensors/src/lib/DiagonalArrays/.JuliaFormatter.toml +++ /dev/null @@ -1,2 +0,0 @@ -style = "blue" -indent = 2 diff --git a/NDTensors/src/lib/DiagonalArrays/README.md b/NDTensors/src/lib/DiagonalArrays/README.md deleted file mode 100644 index 1043d095f7..0000000000 --- a/NDTensors/src/lib/DiagonalArrays/README.md +++ /dev/null @@ -1,84 +0,0 @@ -# DiagonalArrays.jl - -A Julia `DiagonalArray` type. - -````julia -using NDTensors.DiagonalArrays: - DiagonalArray, DiagonalMatrix, DiagIndex, DiagIndices, diaglength, isdiagindex -using Test - -function main() - d = DiagonalMatrix([1.0, 2.0, 3.0]) - @test eltype(d) == Float64 - @test diaglength(d) == 3 - @test size(d) == (3, 3) - @test d[1, 1] == 1 - @test d[2, 2] == 2 - @test d[3, 3] == 3 - @test d[1, 2] == 0 - - d = DiagonalArray([1.0, 2.0, 3.0], 3, 4, 5) - @test eltype(d) == Float64 - @test diaglength(d) == 3 - @test d[1, 1, 1] == 1 - @test d[2, 2, 2] == 2 - @test d[3, 3, 3] == 3 - @test d[1, 2, 1] == 0 - - d[2, 2, 2] = 22 - @test d[2, 2, 2] == 22 - - d_r = reshape(d, 3, 20) - @test size(d_r) == (3, 20) - @test all(I -> d_r[I] == d[I], LinearIndices(d)) - - @test length(d[DiagIndices(:)]) == 3 - @test Array(d) == d - @test d[DiagIndex(2)] == d[2, 2, 2] - - d[DiagIndex(2)] = 222 - @test d[2, 2, 2] == 222 - - a = randn(3, 4, 5) - new_diag = randn(3) - a[DiagIndices(:)] = new_diag - d[DiagIndices(:)] = a[DiagIndices(:)] - - @test a[DiagIndices(:)] == new_diag - @test d[DiagIndices(:)] == new_diag - - permuted_d = permutedims(d, (3, 2, 1)) - @test permuted_d isa DiagonalArray - @test permuted_d[DiagIndices(:)] == d[DiagIndices(:)] - @test size(d) == (3, 4, 5) - @test size(permuted_d) == (5, 4, 3) - for I in eachindex(d) - if !isdiagindex(d, I) - @test iszero(d[I]) - else - @test !iszero(d[I]) - end - end - - mapped_d = map(x -> 2x, d) - @test mapped_d isa DiagonalArray - @test mapped_d == map(x -> 2x, Array(d)) - - return nothing -end - -main() -```` - -You can generate this README with: -```julia -using Literate -using NDTensors.DiagonalArrays -dir = joinpath(pkgdir(DiagonalArrays), "src", "lib", "DiagonalArrays") -Literate.markdown(joinpath(dir, "examples", "README.jl"), dir; flavor=Literate.CommonMarkFlavor()) -``` - ---- - -*This page was generated using [Literate.jl](https://github.com/fredrikekre/Literate.jl).* - diff --git a/NDTensors/src/lib/DiagonalArrays/examples/Project.toml b/NDTensors/src/lib/DiagonalArrays/examples/Project.toml deleted file mode 100644 index b46e6ac7ac..0000000000 --- a/NDTensors/src/lib/DiagonalArrays/examples/Project.toml +++ /dev/null @@ -1,4 +0,0 @@ -[deps] -Literate = "98b081ad-f1c9-55d3-8b20-4c87d4299306" -NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" -Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" diff --git a/NDTensors/src/lib/DiagonalArrays/examples/README.jl b/NDTensors/src/lib/DiagonalArrays/examples/README.jl deleted file mode 100644 index 074adec5c9..0000000000 --- a/NDTensors/src/lib/DiagonalArrays/examples/README.jl +++ /dev/null @@ -1,79 +0,0 @@ -# # DiagonalArrays.jl -# -# A Julia `DiagonalArray` type. - -using NDTensors.DiagonalArrays: - DiagonalArray, DiagonalMatrix, DiagIndex, DiagIndices, diaglength, isdiagindex -using Test - -function main() - d = DiagonalMatrix([1.0, 2.0, 3.0]) - @test eltype(d) == Float64 - @test diaglength(d) == 3 - @test size(d) == (3, 3) - @test d[1, 1] == 1 - @test d[2, 2] == 2 - @test d[3, 3] == 3 - @test d[1, 2] == 0 - - d = DiagonalArray([1.0, 2.0, 3.0], 3, 4, 5) - @test eltype(d) == Float64 - @test diaglength(d) == 3 - @test d[1, 1, 1] == 1 - @test d[2, 2, 2] == 2 - @test d[3, 3, 3] == 3 - @test d[1, 2, 1] == 0 - - d[2, 2, 2] = 22 - @test d[2, 2, 2] == 22 - - d_r = reshape(d, 3, 20) - @test size(d_r) == (3, 20) - @test all(I -> d_r[I] == d[I], LinearIndices(d)) - - @test length(d[DiagIndices(:)]) == 3 - @test Array(d) == d - @test d[DiagIndex(2)] == d[2, 2, 2] - - d[DiagIndex(2)] = 222 - @test d[2, 2, 2] == 222 - - a = randn(3, 4, 5) - new_diag = randn(3) - a[DiagIndices(:)] = new_diag - d[DiagIndices(:)] = a[DiagIndices(:)] - - @test a[DiagIndices(:)] == new_diag - @test d[DiagIndices(:)] == new_diag - - permuted_d = permutedims(d, (3, 2, 1)) - @test permuted_d isa DiagonalArray - @test permuted_d[DiagIndices(:)] == d[DiagIndices(:)] - @test size(d) == (3, 4, 5) - @test size(permuted_d) == (5, 4, 3) - for I in eachindex(d) - if !isdiagindex(d, I) - @test iszero(d[I]) - else - @test !iszero(d[I]) - end - end - - mapped_d = map(x -> 2x, d) - @test mapped_d isa DiagonalArray - @test mapped_d == map(x -> 2x, Array(d)) - - return nothing -end - -main() - -#= -You can generate this README with: -```julia -using Literate -using NDTensors.DiagonalArrays -dir = joinpath(pkgdir(DiagonalArrays), "src", "lib", "DiagonalArrays") -Literate.markdown(joinpath(dir, "examples", "README.jl"), dir; flavor=Literate.CommonMarkFlavor()) -``` -=# diff --git a/NDTensors/src/lib/DiagonalArrays/src/DiagonalArrays.jl b/NDTensors/src/lib/DiagonalArrays/src/DiagonalArrays.jl deleted file mode 100644 index 94566ebd88..0000000000 --- a/NDTensors/src/lib/DiagonalArrays/src/DiagonalArrays.jl +++ /dev/null @@ -1,14 +0,0 @@ -module DiagonalArrays -include("diaginterface/defaults.jl") -include("diaginterface/diaginterface.jl") -include("diaginterface/diagindex.jl") -include("diaginterface/diagindices.jl") -include("abstractdiagonalarray/abstractdiagonalarray.jl") -include("abstractdiagonalarray/sparsearrayinterface.jl") -include("abstractdiagonalarray/diagonalarraydiaginterface.jl") -include("abstractdiagonalarray/arraylayouts.jl") -include("diagonalarray/diagonalarray.jl") -include("diagonalarray/diagonalmatrix.jl") -include("diagonalarray/diagonalvector.jl") -include("diagonalarray/arraylayouts.jl") -end diff --git a/NDTensors/src/lib/DiagonalArrays/src/abstractdiagonalarray/abstractdiagonalarray.jl b/NDTensors/src/lib/DiagonalArrays/src/abstractdiagonalarray/abstractdiagonalarray.jl deleted file mode 100644 index 8180a61472..0000000000 --- a/NDTensors/src/lib/DiagonalArrays/src/abstractdiagonalarray/abstractdiagonalarray.jl +++ /dev/null @@ -1,3 +0,0 @@ -using ..SparseArraysBase: AbstractSparseArray - -abstract type AbstractDiagonalArray{T,N} <: AbstractSparseArray{T,N} end diff --git a/NDTensors/src/lib/DiagonalArrays/src/abstractdiagonalarray/arraylayouts.jl b/NDTensors/src/lib/DiagonalArrays/src/abstractdiagonalarray/arraylayouts.jl deleted file mode 100644 index d8f0b41dbe..0000000000 --- a/NDTensors/src/lib/DiagonalArrays/src/abstractdiagonalarray/arraylayouts.jl +++ /dev/null @@ -1,7 +0,0 @@ -using ArrayLayouts: ArrayLayouts -using ..SparseArraysBase: AbstractSparseLayout - -abstract type AbstractDiagonalLayout <: AbstractSparseLayout end -struct DiagonalLayout <: AbstractDiagonalLayout end - -ArrayLayouts.MemoryLayout(::Type{<:AbstractDiagonalArray}) = DiagonalLayout() diff --git a/NDTensors/src/lib/DiagonalArrays/src/abstractdiagonalarray/diagonalarraydiaginterface.jl b/NDTensors/src/lib/DiagonalArrays/src/abstractdiagonalarray/diagonalarraydiaginterface.jl deleted file mode 100644 index bcc2c81cbd..0000000000 --- a/NDTensors/src/lib/DiagonalArrays/src/abstractdiagonalarray/diagonalarraydiaginterface.jl +++ /dev/null @@ -1,23 +0,0 @@ -using ..SparseArraysBase: SparseArraysBase, StorageIndex, StorageIndices - -SparseArraysBase.StorageIndex(i::DiagIndex) = StorageIndex(index(i)) - -function Base.getindex(a::AbstractDiagonalArray, i::DiagIndex) - return a[StorageIndex(i)] -end - -function Base.setindex!(a::AbstractDiagonalArray, value, i::DiagIndex) - a[StorageIndex(i)] = value - return a -end - -SparseArraysBase.StorageIndices(i::DiagIndices) = StorageIndices(indices(i)) - -function Base.getindex(a::AbstractDiagonalArray, i::DiagIndices) - return a[StorageIndices(i)] -end - -function Base.setindex!(a::AbstractDiagonalArray, value, i::DiagIndices) - a[StorageIndices(i)] = value - return a -end diff --git a/NDTensors/src/lib/DiagonalArrays/src/abstractdiagonalarray/sparsearrayinterface.jl b/NDTensors/src/lib/DiagonalArrays/src/abstractdiagonalarray/sparsearrayinterface.jl deleted file mode 100644 index 98762bbbca..0000000000 --- a/NDTensors/src/lib/DiagonalArrays/src/abstractdiagonalarray/sparsearrayinterface.jl +++ /dev/null @@ -1,22 +0,0 @@ -using Compat: Returns, allequal -using ..SparseArraysBase: SparseArraysBase - -# `SparseArraysBase` interface -function SparseArraysBase.index_to_storage_index( - a::AbstractDiagonalArray{<:Any,N}, I::CartesianIndex{N} -) where {N} - !allequal(Tuple(I)) && return nothing - return first(Tuple(I)) -end - -function SparseArraysBase.storage_index_to_index(a::AbstractDiagonalArray, I) - return CartesianIndex(ntuple(Returns(I), ndims(a))) -end - -## # 1-dimensional case can be `AbstractDiagonalArray`. -## function SparseArraysBase.sparse_similar( -## a::AbstractDiagonalArray, elt::Type, dims::Tuple{Int} -## ) -## # TODO: Handle preserving zero element function. -## return similar(a, elt, dims) -## end diff --git a/NDTensors/src/lib/DiagonalArrays/src/diaginterface/defaults.jl b/NDTensors/src/lib/DiagonalArrays/src/diaginterface/defaults.jl deleted file mode 100644 index 39e9395eb6..0000000000 --- a/NDTensors/src/lib/DiagonalArrays/src/diaginterface/defaults.jl +++ /dev/null @@ -1,3 +0,0 @@ -using Compat: Returns - -default_size(diag::AbstractVector, n) = ntuple(Returns(length(diag)), n) diff --git a/NDTensors/src/lib/DiagonalArrays/src/diaginterface/diagindex.jl b/NDTensors/src/lib/DiagonalArrays/src/diaginterface/diagindex.jl deleted file mode 100644 index e177e80697..0000000000 --- a/NDTensors/src/lib/DiagonalArrays/src/diaginterface/diagindex.jl +++ /dev/null @@ -1,14 +0,0 @@ -# Represents a linear offset along the diagonal -struct DiagIndex{I} - i::I -end -index(i::DiagIndex) = i.i - -function Base.getindex(a::AbstractArray, i::DiagIndex) - return getdiagindex(a, index(i)) -end - -function Base.setindex!(a::AbstractArray, value, i::DiagIndex) - setdiagindex!(a, value, index(i)) - return a -end diff --git a/NDTensors/src/lib/DiagonalArrays/src/diaginterface/diagindices.jl b/NDTensors/src/lib/DiagonalArrays/src/diaginterface/diagindices.jl deleted file mode 100644 index 08590d148e..0000000000 --- a/NDTensors/src/lib/DiagonalArrays/src/diaginterface/diagindices.jl +++ /dev/null @@ -1,14 +0,0 @@ -# Represents a set of linear offsets along the diagonal -struct DiagIndices{I} - i::I -end -indices(i::DiagIndices) = i.i - -function Base.getindex(a::AbstractArray, I::DiagIndices) - return getdiagindices(a, indices(I)) -end - -function Base.setindex!(a::AbstractArray, value, i::DiagIndices) - setdiagindices!(a, value, indices(i)) - return a -end diff --git a/NDTensors/src/lib/DiagonalArrays/src/diaginterface/diaginterface.jl b/NDTensors/src/lib/DiagonalArrays/src/diaginterface/diaginterface.jl deleted file mode 100644 index 19092010a4..0000000000 --- a/NDTensors/src/lib/DiagonalArrays/src/diaginterface/diaginterface.jl +++ /dev/null @@ -1,57 +0,0 @@ -using Compat: allequal - -diaglength(a::AbstractArray{<:Any,0}) = 1 - -function diaglength(a::AbstractArray) - return minimum(size(a)) -end - -function isdiagindex(a::AbstractArray{<:Any,N}, I::CartesianIndex{N}) where {N} - @boundscheck checkbounds(a, I) - return allequal(Tuple(I)) -end - -function diagstride(a::AbstractArray) - s = 1 - p = 1 - for i in 1:(ndims(a) - 1) - p *= size(a, i) - s += p - end - return s -end - -function diagindices(a::AbstractArray) - maxdiag = LinearIndices(a)[CartesianIndex(ntuple(Returns(diaglength(a)), ndims(a)))] - return 1:diagstride(a):maxdiag -end - -function diagindices(a::AbstractArray{<:Any,0}) - return Base.OneTo(1) -end - -function diagview(a::AbstractArray) - return @view a[diagindices(a)] -end - -function getdiagindex(a::AbstractArray, i::Integer) - return diagview(a)[i] -end - -function setdiagindex!(a::AbstractArray, v, i::Integer) - diagview(a)[i] = v - return a -end - -function getdiagindices(a::AbstractArray, I) - return @view diagview(a)[I] -end - -function getdiagindices(a::AbstractArray, I::Colon) - return diagview(a) -end - -function setdiagindices!(a::AbstractArray, v, i::Colon) - diagview(a) .= v - return a -end diff --git a/NDTensors/src/lib/DiagonalArrays/src/diagonalarray/arraylayouts.jl b/NDTensors/src/lib/DiagonalArrays/src/diagonalarray/arraylayouts.jl deleted file mode 100644 index 7f365db0f2..0000000000 --- a/NDTensors/src/lib/DiagonalArrays/src/diagonalarray/arraylayouts.jl +++ /dev/null @@ -1,11 +0,0 @@ -using ArrayLayouts: MulAdd - -# Default sparse array type for `AbstractDiagonalLayout`. -default_diagonalarraytype(elt::Type) = DiagonalArray{elt} - -# TODO: Preserve GPU memory! Implement `CuSparseArrayLayout`, `MtlSparseLayout`? -function Base.similar( - ::MulAdd{<:AbstractDiagonalLayout,<:AbstractDiagonalLayout}, elt::Type, axes -) - return similar(default_diagonalarraytype(elt), axes) -end diff --git a/NDTensors/src/lib/DiagonalArrays/src/diagonalarray/diagonalarray.jl b/NDTensors/src/lib/DiagonalArrays/src/diagonalarray/diagonalarray.jl deleted file mode 100644 index 6f3fc95ff3..0000000000 --- a/NDTensors/src/lib/DiagonalArrays/src/diagonalarray/diagonalarray.jl +++ /dev/null @@ -1,107 +0,0 @@ -using ..SparseArraysBase: Zero, getindex_zero_function -# TODO: Put into `DiagonalArraysSparseArraysBaseExt`? -using ..SparseArraysBase: SparseArraysBase, SparseArrayDOK - -struct DiagonalArray{T,N,Diag<:AbstractVector{T},Zero} <: AbstractDiagonalArray{T,N} - diag::Diag - dims::NTuple{N,Int} - zero::Zero -end - -function DiagonalArray{T,N}( - diag::AbstractVector{T}, d::Tuple{Vararg{Int,N}}, zero=Zero() -) where {T,N} - return DiagonalArray{T,N,typeof(diag),typeof(zero)}(diag, d, zero) -end - -function DiagonalArray{T,N}( - diag::AbstractVector, d::Tuple{Vararg{Int,N}}, zero=Zero() -) where {T,N} - return DiagonalArray{T,N}(T.(diag), d, zero) -end - -function DiagonalArray{T,N}(diag::AbstractVector, d::Vararg{Int,N}) where {T,N} - return DiagonalArray{T,N}(diag, d) -end - -function DiagonalArray{T}( - diag::AbstractVector, d::Tuple{Vararg{Int,N}}, zero=Zero() -) where {T,N} - return DiagonalArray{T,N}(diag, d, zero) -end - -function DiagonalArray{T}(diag::AbstractVector, d::Vararg{Int,N}) where {T,N} - return DiagonalArray{T,N}(diag, d) -end - -function DiagonalArray(diag::AbstractVector{T}, d::Tuple{Vararg{Int,N}}) where {T,N} - return DiagonalArray{T,N}(diag, d) -end - -function DiagonalArray(diag::AbstractVector{T}, d::Vararg{Int,N}) where {T,N} - return DiagonalArray{T,N}(diag, d) -end - -# Infer size from diagonal -function DiagonalArray{T,N}(diag::AbstractVector) where {T,N} - return DiagonalArray{T,N}(diag, default_size(diag, N)) -end - -function DiagonalArray{<:Any,N}(diag::AbstractVector{T}) where {T,N} - return DiagonalArray{T,N}(diag) -end - -# undef -function DiagonalArray{T,N}( - ::UndefInitializer, d::Tuple{Vararg{Int,N}}, zero=Zero() -) where {T,N} - return DiagonalArray{T,N}(Vector{T}(undef, minimum(d)), d, zero) -end - -function DiagonalArray{T,N}(::UndefInitializer, d::Vararg{Int,N}) where {T,N} - return DiagonalArray{T,N}(undef, d) -end - -function DiagonalArray{T}( - ::UndefInitializer, d::Tuple{Vararg{Int,N}}, zero=Zero() -) where {T,N} - return DiagonalArray{T,N}(undef, d, zero) -end - -# Axes version -function DiagonalArray{T}( - ::UndefInitializer, axes::Tuple{Vararg{AbstractUnitRange,N}}, zero=Zero() -) where {T,N} - @assert all(isone, first.(axes)) - return DiagonalArray{T,N}(undef, length.(axes), zero) -end - -function DiagonalArray{T}(::UndefInitializer, d::Vararg{Int,N}) where {T,N} - return DiagonalArray{T,N}(undef, d) -end - -# Minimal `AbstractArray` interface -Base.size(a::DiagonalArray) = a.dims - -function Base.similar(a::DiagonalArray, elt::Type, dims::Tuple{Vararg{Int}}) - # TODO: Preserve zero element function. - return DiagonalArray{elt}(undef, dims, getindex_zero_function(a)) -end - -# Minimal `SparseArraysBase` interface -SparseArraysBase.sparse_storage(a::DiagonalArray) = a.diag - -# `SparseArraysBase` -# Defines similar when the output can't be `DiagonalArray`, -# such as in `reshape`. -# TODO: Put into `DiagonalArraysSparseArraysBaseExt`? -# TODO: Special case 2D to output `SparseMatrixCSC`? -function SparseArraysBase.sparse_similar( - a::DiagonalArray, elt::Type, dims::Tuple{Vararg{Int}} -) - return SparseArrayDOK{elt}(undef, dims, getindex_zero_function(a)) -end - -function SparseArraysBase.getindex_zero_function(a::DiagonalArray) - return a.zero -end diff --git a/NDTensors/src/lib/DiagonalArrays/src/diagonalarray/diagonalmatrix.jl b/NDTensors/src/lib/DiagonalArrays/src/diagonalarray/diagonalmatrix.jl deleted file mode 100644 index 873410db78..0000000000 --- a/NDTensors/src/lib/DiagonalArrays/src/diagonalarray/diagonalmatrix.jl +++ /dev/null @@ -1,5 +0,0 @@ -const DiagonalMatrix{T,Diag,Zero} = DiagonalArray{T,2,Diag,Zero} - -function DiagonalMatrix(diag::AbstractVector) - return DiagonalArray{<:Any,2}(diag) -end diff --git a/NDTensors/src/lib/DiagonalArrays/src/diagonalarray/diagonalvector.jl b/NDTensors/src/lib/DiagonalArrays/src/diagonalarray/diagonalvector.jl deleted file mode 100644 index 40e35e409d..0000000000 --- a/NDTensors/src/lib/DiagonalArrays/src/diagonalarray/diagonalvector.jl +++ /dev/null @@ -1,5 +0,0 @@ -const DiagonalVector{T,Diag,Zero} = DiagonalArray{T,1,Diag,Zero} - -function DiagonalVector(diag::AbstractVector) - return DiagonalArray{<:Any,1}(diag) -end diff --git a/NDTensors/src/lib/DiagonalArrays/test/Project.toml b/NDTensors/src/lib/DiagonalArrays/test/Project.toml deleted file mode 100644 index 9b1d5ccd25..0000000000 --- a/NDTensors/src/lib/DiagonalArrays/test/Project.toml +++ /dev/null @@ -1,2 +0,0 @@ -[deps] -NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" diff --git a/NDTensors/src/lib/DiagonalArrays/test/runtests.jl b/NDTensors/src/lib/DiagonalArrays/test/runtests.jl deleted file mode 100644 index af439730e9..0000000000 --- a/NDTensors/src/lib/DiagonalArrays/test/runtests.jl +++ /dev/null @@ -1,59 +0,0 @@ -@eval module $(gensym()) -using Test: @test, @testset, @test_broken -using NDTensors.DiagonalArrays: DiagonalArrays, DiagonalArray, DiagonalMatrix, diaglength -using NDTensors.SparseArraysBase: SparseArrayDOK -using NDTensors.SparseArraysBase: stored_length -@testset "Test NDTensors.DiagonalArrays" begin - @testset "README" begin - @test include( - joinpath( - pkgdir(DiagonalArrays), "src", "lib", "DiagonalArrays", "examples", "README.jl" - ), - ) isa Any - end - @testset "DiagonalArray (eltype=$elt)" for elt in - (Float32, Float64, ComplexF32, ComplexF64) - @testset "Basics" begin - a = fill(one(elt), 2, 3) - @test diaglength(a) == 2 - a = fill(one(elt)) - @test diaglength(a) == 1 - end - @testset "Matrix multiplication" begin - a1 = DiagonalArray{elt}(undef, (2, 3)) - a1[1, 1] = 11 - a1[2, 2] = 22 - a2 = DiagonalArray{elt}(undef, (3, 4)) - a2[1, 1] = 11 - a2[2, 2] = 22 - a2[3, 3] = 33 - a_dest = a1 * a2 - # TODO: Use `densearray` to make generic to GPU. - @test Array(a_dest) ≈ Array(a1) * Array(a2) - # TODO: Make this work with `ArrayLayouts`. - @test stored_length(a_dest) == 2 - @test a_dest isa DiagonalMatrix{elt} - - # TODO: Make generic to GPU, use `allocate_randn`? - a2 = randn(elt, (3, 4)) - a_dest = a1 * a2 - # TODO: Use `densearray` to make generic to GPU. - @test Array(a_dest) ≈ Array(a1) * Array(a2) - @test stored_length(a_dest) == 8 - @test a_dest isa Matrix{elt} - - a2 = SparseArrayDOK{elt}(3, 4) - a2[1, 1] = 11 - a2[2, 2] = 22 - a2[3, 3] = 33 - a_dest = a1 * a2 - # TODO: Use `densearray` to make generic to GPU. - @test Array(a_dest) ≈ Array(a1) * Array(a2) - # TODO: Define `SparseMatrixDOK`. - # TODO: Make this work with `ArrayLayouts`. - @test stored_length(a_dest) == 2 - @test a_dest isa SparseArrayDOK{elt,2} - end - end -end -end diff --git a/NDTensors/src/lib/Expose/.JuliaFormatter.toml b/NDTensors/src/lib/Expose/.JuliaFormatter.toml deleted file mode 100644 index 08f664cdb9..0000000000 --- a/NDTensors/src/lib/Expose/.JuliaFormatter.toml +++ /dev/null @@ -1,2 +0,0 @@ -style = "blue" -indent = 2 diff --git a/NDTensors/src/lib/Expose/README.md b/NDTensors/src/lib/Expose/README.md deleted file mode 100644 index ccfaa3ffc0..0000000000 --- a/NDTensors/src/lib/Expose/README.md +++ /dev/null @@ -1,10 +0,0 @@ -# Expose - -A module to unwrap complex types to assist in the generic programming of type based functions. Expose can be utilized to catch workflows before unexpected behavior is executed as a result of arbitrary type wrapping. - -Related: -- https://github.com/JuliaLinearAlgebra/ArrayLayouts.jl/issues/9 -- https://juliaarrays.github.io/ArrayInterface.jl/stable/wrapping/ -- https://github.com/JuliaGPU/Adapt.jl -- https://github.com/chengchingwen/StructWalk.jl -- https://github.com/FluxML/Functors.jl diff --git a/NDTensors/src/lib/Expose/TODO.md b/NDTensors/src/lib/Expose/TODO.md deleted file mode 100644 index d890175376..0000000000 --- a/NDTensors/src/lib/Expose/TODO.md +++ /dev/null @@ -1,4 +0,0 @@ -Replace all `leaf_parenttype` calls by wrapping the arrays in this `expose` type - -Fix the issue Ryan found in MPS -Make a GPUArrays extension that has generic GPU algorithms \ No newline at end of file diff --git a/NDTensors/src/lib/Expose/src/Expose.jl b/NDTensors/src/lib/Expose/src/Expose.jl deleted file mode 100644 index fb64927d7b..0000000000 --- a/NDTensors/src/lib/Expose/src/Expose.jl +++ /dev/null @@ -1,24 +0,0 @@ -module Expose -using SimpleTraits -using LinearAlgebra -using Base: ReshapedArray -using StridedViews -using Adapt: Adapt, adapt, adapt_structure - -include("exposed.jl") - -include("import.jl") -## TODO Create functions which take the `Expose` type and launch functions -## using that type -## Exposed based functions -include("functions/abstractarray.jl") -include("functions/append.jl") -include("functions/copyto.jl") -include("functions/linearalgebra.jl") -include("functions/mul.jl") -include("functions/permutedims.jl") -include("functions/adapt.jl") - -export IsWrappedArray, expose, Exposed, unexpose, cpu - -end diff --git a/NDTensors/src/lib/Expose/src/exposed.jl b/NDTensors/src/lib/Expose/src/exposed.jl deleted file mode 100644 index e3d51dd07a..0000000000 --- a/NDTensors/src/lib/Expose/src/exposed.jl +++ /dev/null @@ -1,17 +0,0 @@ -using NDTensors.TypeParameterAccessors: - TypeParameterAccessors, unwrap_array_type, parameter, parenttype, type_parameter -struct Exposed{Unwrapped,Object} - object::Object -end - -expose(object) = Exposed{unwrap_array_type(object),typeof(object)}(object) - -unexpose(E::Exposed) = E.object - -## TODO remove TypeParameterAccessors when SetParameters is removed -TypeParameterAccessors.parenttype(type::Type{<:Exposed}) = parameter(type, parenttype) -function TypeParameterAccessors.position(::Type{<:Exposed}, ::typeof(parenttype)) - return TypeParameterAccessors.Position(1) -end -TypeParameterAccessors.unwrap_array_type(type::Type{<:Exposed}) = parenttype(type) -TypeParameterAccessors.unwrap_array_type(E::Exposed) = unwrap_array_type(typeof(E)) diff --git a/NDTensors/src/lib/Expose/src/functions/abstractarray.jl b/NDTensors/src/lib/Expose/src/functions/abstractarray.jl deleted file mode 100644 index ca6e573c56..0000000000 --- a/NDTensors/src/lib/Expose/src/functions/abstractarray.jl +++ /dev/null @@ -1,21 +0,0 @@ -parent(E::Exposed) = parent(unexpose(E)) - -transpose(E::Exposed) = transpose(unexpose(E)) - -adjoint(E::Exposed) = adjoint(unexpose(E)) -getindex(E::Exposed) = unexpose(E)[] - -function setindex!(E::Exposed, x::Number) - unexpose(E)[] = x - return unexpose(E) -end - -getindex(E::Exposed, I...) = unexpose(E)[I...] - -function copy(E::Exposed) - return copy(unexpose(E)) -end - -any(f, E::Exposed) = any(f, unexpose(E)) - -print_array(io::IO, E::Exposed) = print_array(io, unexpose(E)) diff --git a/NDTensors/src/lib/Expose/src/functions/adapt.jl b/NDTensors/src/lib/Expose/src/functions/adapt.jl deleted file mode 100644 index 6ebc8bf7d6..0000000000 --- a/NDTensors/src/lib/Expose/src/functions/adapt.jl +++ /dev/null @@ -1,8 +0,0 @@ -Adapt.adapt(to, x::Exposed) = adapt_structure(to, x) -Adapt.adapt_structure(to, x::Exposed) = adapt_structure(to, unexpose(x)) - -# https://github.com/JuliaGPU/Adapt.jl/pull/51 -# TODO: Remove once https://github.com/JuliaGPU/Adapt.jl/issues/71 is addressed. -function Adapt.adapt_structure(to, A::Exposed{<:Any,<:Hermitian}) - return Hermitian(adapt(to, parent(unexpose(A))), Symbol(unexpose(A).uplo)) -end diff --git a/NDTensors/src/lib/Expose/src/functions/append.jl b/NDTensors/src/lib/Expose/src/functions/append.jl deleted file mode 100644 index a72e895bed..0000000000 --- a/NDTensors/src/lib/Expose/src/functions/append.jl +++ /dev/null @@ -1,3 +0,0 @@ -function Base.append!(Ecollection::Exposed, collections...) - return append!(unexpose(Ecollection), collections...) -end diff --git a/NDTensors/src/lib/Expose/src/functions/copyto.jl b/NDTensors/src/lib/Expose/src/functions/copyto.jl deleted file mode 100644 index c2ffd3c29a..0000000000 --- a/NDTensors/src/lib/Expose/src/functions/copyto.jl +++ /dev/null @@ -1,4 +0,0 @@ -function copyto!(R::Exposed, T::Exposed) - copyto!(unexpose(R), unexpose(T)) - return unexpose(R) -end diff --git a/NDTensors/src/lib/Expose/src/functions/linearalgebra.jl b/NDTensors/src/lib/Expose/src/functions/linearalgebra.jl deleted file mode 100644 index 4dfa65f43b..0000000000 --- a/NDTensors/src/lib/Expose/src/functions/linearalgebra.jl +++ /dev/null @@ -1,29 +0,0 @@ -function qr(E::Exposed) - return qr(unexpose(E)) -end -## These functions do not exist in `LinearAlgebra` but were defined -## in NDTensors. Because Expose is imported before NDTensors, -## one cannot import a these functions from NDTensors so instead -## I define them here and extend them in NDTensors -## I have done the same thing for the function cpu -## Expose.qr_positive -function qr_positive(E::Exposed) - return qr_positive(unexpose(E)) -end - -## Expose.ql -function ql(E::Exposed) - return ql(unexpose(E)) -end -## Expose.ql_positive -function ql_positive(E::Exposed) - return ql_positive(unexpose(E)) -end - -function LinearAlgebra.eigen(E::Exposed) - return eigen(unexpose(E)) -end - -function svd(E::Exposed; kwargs...) - return svd(unexpose(E); kwargs...) -end diff --git a/NDTensors/src/lib/Expose/src/functions/mul.jl b/NDTensors/src/lib/Expose/src/functions/mul.jl deleted file mode 100644 index c858df7685..0000000000 --- a/NDTensors/src/lib/Expose/src/functions/mul.jl +++ /dev/null @@ -1,4 +0,0 @@ -function mul!(CM::Exposed, AM::Exposed, BM::Exposed, α, β) - mul!(unexpose(CM), unexpose(AM), unexpose(BM), α, β) - return unexpose(CM) -end diff --git a/NDTensors/src/lib/Expose/src/functions/permutedims.jl b/NDTensors/src/lib/Expose/src/functions/permutedims.jl deleted file mode 100644 index a4c78eec49..0000000000 --- a/NDTensors/src/lib/Expose/src/functions/permutedims.jl +++ /dev/null @@ -1,13 +0,0 @@ -function permutedims(E::Exposed, perm) - return permutedims(unexpose(E), perm) -end - -function permutedims!(Edest::Exposed, Esrc::Exposed, perm) - permutedims!(unexpose(Edest), unexpose(Esrc), perm) - return unexpose(Edest) -end - -function permutedims!(Edest::Exposed, Esrc::Exposed, perm, f) - unexpose(Edest) .= f.(unexpose(Edest), permutedims(Esrc, perm)) - return unexpose(Edest) -end diff --git a/NDTensors/src/lib/Expose/src/import.jl b/NDTensors/src/lib/Expose/src/import.jl deleted file mode 100644 index 4a1f789ad8..0000000000 --- a/NDTensors/src/lib/Expose/src/import.jl +++ /dev/null @@ -1,14 +0,0 @@ -import Base: - adjoint, - permutedims, - permutedims!, - copy, - copyto!, - parent, - print_array, - transpose, - getindex, - setindex!, - any - -import LinearAlgebra: mul!, qr, svd diff --git a/NDTensors/src/lib/Expose/test/Project.toml b/NDTensors/src/lib/Expose/test/Project.toml deleted file mode 100644 index 3cc424a3c3..0000000000 --- a/NDTensors/src/lib/Expose/test/Project.toml +++ /dev/null @@ -1,5 +0,0 @@ -[deps] -GPUArraysCore = "46192b85-c4d5-4398-a991-12ede77f4527" -Metal = "dde4c033-4e86-420c-a63e-0dd931031962" -NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" -Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" diff --git a/NDTensors/src/lib/Expose/test/runtests.jl b/NDTensors/src/lib/Expose/test/runtests.jl deleted file mode 100644 index fe2f1d1ef8..0000000000 --- a/NDTensors/src/lib/Expose/test/runtests.jl +++ /dev/null @@ -1,265 +0,0 @@ -@eval module $(gensym()) -using Test: @testset, @test, @test_broken -using NDTensors.Expose -using NDTensors: NDTensors, mul!! -using LinearAlgebra: - LinearAlgebra, - Adjoint, - Diagonal, - Hermitian, - Symmetric, - Transpose, - eigen, - mul!, - norm, - qr, - svd -using GPUArraysCore: @allowscalar -include(joinpath(pkgdir(NDTensors), "test", "NDTensorsTestUtils", "NDTensorsTestUtils.jl")) -using .NDTensorsTestUtils: devices_list -using NDTensors.GPUArraysCoreExtensions: cpu - -@testset "Testing Expose $dev, $elt" for dev in devices_list(ARGS), - elt in (Float32, ComplexF32) - - v = dev(randn(elt, 10)) - vt = transpose(v) - va = v' - - E = expose(v) - Et = expose(vt) - Ea = expose(va) - v_type = typeof(v) - e_type = eltype(v) - @test typeof(E) == Exposed{v_type,v_type} - @test typeof(Et) == Exposed{v_type,Transpose{e_type,v_type}} - @test typeof(Ea) == Exposed{v_type,Adjoint{e_type,v_type}} - - @test parent(E) == v - @test parent(Et) == v - @test parent(Ea) == v - @test transpose(E) == vt - @test cpu(E) == cpu(v) - @test cpu(Et) == cpu(vt) - - m = reshape(v, (5, 2)) - mt = transpose(m) - ma = m' - E = expose(m) - Et = expose(mt) - Ea = expose(ma) - - m_type = typeof(m) - @test typeof(E) == Exposed{m_type,m_type} - @test typeof(Et) == Exposed{m_type,Transpose{e_type,m_type}} - @test typeof(Ea) == Exposed{m_type,Adjoint{e_type,m_type}} - - o = dev(randn(elt, 1)) - expose(o)[] = 2 - @test expose(o)[] == 2 - - fill!(m, 0) - @test any(!Base.isinf, expose(m)) - - mp = copy(Ea) - @test mp == ma - fill!(ma, 2.0) - copyto!(expose(mp), expose(ma)) - @test mp == ma - - q, r = qr(expose(mp)) - @test q * r ≈ mp - - q, r = Expose.qr_positive(expose(mp)) - @test q * r ≈ mp - - square = dev(rand(real(elt), (10, 10))) - square = (square + transpose(square)) / 2 - ## CUDA only supports Hermitian or Symmetric eigen decompositions - ## So I symmetrize square and call symetric here - l, U = eigen(expose(Symmetric(square))) - @test eltype(l) == real(elt) - @test eltype(U) == real(elt) - @test square * U ≈ U * Diagonal(l) - - square = dev(rand(elt, (10, 10))) - # Can use `hermitianpart` in Julia 1.10 - square = (square + square') / 2 - ## CUDA only supports Hermitian or Symmetric eigen decompositions - ## So I symmetrize square and call symetric here - l, U = eigen(expose(Hermitian(square))) - @test eltype(l) == real(elt) - @test eltype(U) == elt - @test square * U ≈ U * Diagonal(l) - - U, S, V, = svd(expose(mp)) - @test eltype(U) == elt - @test eltype(S) == real(elt) - @test eltype(V) == elt - @test U * Diagonal(S) * V' ≈ mp - - cm = dev(randn(elt, 2, 2)) - mul!(expose(cm), expose(mp), expose(mp'), 1.0, 0.0) - @test cm ≈ mp * mp' - - @test permutedims(expose(mp), (2, 1)) == transpose(mp) - fill!(mt, 3) - permutedims!(expose(m), expose(mt), (2, 1)) - @test norm(m) ≈ sqrt(3^2 * 10) - @test size(m) == (5, 2) - permutedims!(expose(m), expose(mt), (2, 1), +) - @test size(m) == (5, 2) - @test norm(m) ≈ sqrt(6^2 * 10) - - m = reshape(m, (5, 2, 1)) - mt = fill!(similar(m), 3.0) - m = permutedims(expose(m), (2, 1, 3)) - @test size(m) == (2, 5, 1) - permutedims!(expose(m), expose(mt), (2, 1, 3)) - @test norm(m) ≈ sqrt(3^2 * 10) - permutedims!(expose(m), expose(mt), (2, 1, 3), -) - @test norm(m) == 0 - - x = dev(rand(elt, 4, 4)) - y = dev(rand(elt, 4, 4)) - copyto!(expose(y), expose(x)) - @test y == x - - y = dev(rand(elt, 4, 4)) - x = Base.ReshapedArray(dev(rand(elt, 16)), (4, 4), ()) - copyto!(expose(y), expose(x)) - @test cpu(y) == cpu(x) - @test cpu(copy(expose(x))) == cpu(x) - - ## Tests for Metal because permutedims with ReshapedArray does not work properly - ## transpose(ReshapedArray(MtlArray)) fails with scalar indexing so calling copy to - ## evaluate tests in the following tests - y = dev(rand(elt, 4, 4)) - @test permutedims(expose(y), (2, 1)) == transpose(y) - y = Base.ReshapedArray(y, (2, 8), ()) - @test permutedims(expose(y), (2, 1)) == transpose(copy(expose(y))) - yt = dev(rand(elt, (8, 2))) - permutedims!(expose(y), expose(yt), (2, 1)) - @test copy(expose(y)) == transpose(yt) - yt = dev(rand(elt, 8, 2)) - permutedims!(expose(yt), expose(y), (2, 1)) - @test copy(expose(y)) == transpose(yt) - - y = reshape(dev(randn(elt, 8))', 2, 4) - x = Base.ReshapedArray(dev(randn(elt, 8, 8)'[1:8]), (2, 4), ()) - z = dev(fill!(Matrix{elt}(undef, (2, 4)), 0.0)) - for i in 1:2 - for j in 1:4 - @allowscalar z[i, j] = y[i, j] * x[i, j] - end - end - permutedims!(expose(y), expose(x), (1, 2), *) - @allowscalar @test reshape(z, size(y)) ≈ y - for i in 1:2 - for j in 1:4 - @allowscalar z[i, j] = x[i, j] * y[i, j] - end - end - permutedims!(expose(x), expose(y), (1, 2), *) - ## I copy x here because it is a ReshapedArray{SubArray} which causes `≈` - ## to throw an error - @test z ≈ copy(expose(x)) - - y = dev(rand(elt, 4, 4)) - x = @view dev(rand(elt, 8, 8))[1:4, 1:4] - copyto!(expose(y), expose(x)) - @test y == x - @test copy(x) == x - - y = dev(randn(elt, 16)) - x = reshape(dev(randn(elt, 4, 4))', 16) - copyto!(expose(y), expose(x)) - @allowscalar begin - @test y == x - @test copy(x) == x - end - - y = dev(randn(elt, 8)) - x = @view reshape(dev(randn(elt, 8, 8))', 64)[1:8] - copyto!(expose(y), expose(x)) - @allowscalar begin - @test y == x - ## temporarily use expose copy because this is broken in Metal 1.1 - @test copy(expose(x)) == x - end - - y = Base.ReshapedArray(dev(randn(elt, 16)), (4, 4), ()) - x = dev(randn(elt, 4, 4)) - permutedims!(expose(y), expose(x), (2, 1)) - @test cpu(y) == transpose(cpu(x)) - - ########################################## - ### Testing an issue with CUDA&Metal transpose/adjoint mul - A = dev(randn(elt, (3, 2))) - B = dev(randn(elt, (3, 4))) - C = dev(randn(elt, (4, 2))) - Cp = copy(C) - - ## This fails with scalar indexing - if dev != cpu - @test_broken mul!(transpose(C), transpose(A), B, true, false) - end - mul!(C, transpose(B), A, true, false) - mul!(expose(transpose(Cp)), expose(transpose(A)), expose(B), true, false) - @test C ≈ Cp - Cp = zero(C) - ## Try calling mul!! with transposes to verify that code works - Cpt = mul!!(transpose(Cp), transpose(A), B, true, false) - @test transpose(Cpt) ≈ C - - Cp = zero(C) - ## This fails with scalar indexing - if dev != cpu - @test_broken mul!(C', A', B, true, false) - end - mul!(C, B', A, true, false) - mul!(expose(Cp'), expose(A'), expose(B), true, false) - @test C ≈ Cp - Cp = zero(C) - Cpt = mul!!(Cp', A', B, true, false) - @test Cpt' ≈ C - - ################################## - ### Add test for transpose(reshape(adjoint )) failure in CUDA - - A = dev(transpose(reshape(randn(elt, 2, 12)', (12, 2)))) - B = dev(randn(elt, 2, 2)) - C = dev(zeros(elt, 2, 12)) - mul!(expose(C), expose(B), expose(A), true, false) - Cp = cpu(similar(C)) - mul!(expose(Cp), expose(cpu(B)), expose(cpu(A)), true, false) - @test cpu(C) ≈ Cp - zero(C) - mul!!(C, B, A, true, false) - @test cpu(C) ≈ Cp - - ################################## - ### Add test for append! to address scalar indexing in GPUs - ## For now, Metal doesn't have a `resize!` function so all the tests are failing - if (dev == NDTensors.mtl) - continue - end - A = dev(randn(elt, 10)) - Ap = copy(A) - B = randn(elt, 3) - C = append!(expose(A), B) - - @test length(C) == 13 - @test sum(C) ≈ sum(Ap) + sum(B) - - A = Ap - B = dev(randn(elt, 29)) - Bp = copy(B) - C = append!(expose(B), A) - @test length(C) == 39 - @test sum(C) ≈ sum(Bp) + sum(Ap) - @allowscalar for i in 1:length(B) - C[i] == B[i] - end -end -end diff --git a/NDTensors/src/lib/GPUArraysCoreExtensions/.JuliaFormatter.toml b/NDTensors/src/lib/GPUArraysCoreExtensions/.JuliaFormatter.toml deleted file mode 100644 index 08f664cdb9..0000000000 --- a/NDTensors/src/lib/GPUArraysCoreExtensions/.JuliaFormatter.toml +++ /dev/null @@ -1,2 +0,0 @@ -style = "blue" -indent = 2 diff --git a/NDTensors/src/lib/GPUArraysCoreExtensions/Project.toml b/NDTensors/src/lib/GPUArraysCoreExtensions/Project.toml deleted file mode 100644 index f85988b6fd..0000000000 --- a/NDTensors/src/lib/GPUArraysCoreExtensions/Project.toml +++ /dev/null @@ -1,7 +0,0 @@ -name = "GPUArraysCoreExtensions" - -[deps] -GPUArraysCore = "46192b85-c4d5-4398-a991-12ede77f4527" - -[compat] -GPUArraysCore = "0.1" diff --git a/NDTensors/src/lib/GPUArraysCoreExtensions/src/GPUArraysCoreExtensions.jl b/NDTensors/src/lib/GPUArraysCoreExtensions/src/GPUArraysCoreExtensions.jl deleted file mode 100644 index 6845816fca..0000000000 --- a/NDTensors/src/lib/GPUArraysCoreExtensions/src/GPUArraysCoreExtensions.jl +++ /dev/null @@ -1,4 +0,0 @@ -module GPUArraysCoreExtensions -include("gpuarrayscore.jl") - -end diff --git a/NDTensors/src/lib/GPUArraysCoreExtensions/src/gpuarrayscore.jl b/NDTensors/src/lib/GPUArraysCoreExtensions/src/gpuarrayscore.jl deleted file mode 100644 index 20ccf11e1d..0000000000 --- a/NDTensors/src/lib/GPUArraysCoreExtensions/src/gpuarrayscore.jl +++ /dev/null @@ -1,18 +0,0 @@ -using NDTensors.Expose: Exposed, unexpose -using NDTensors.TypeParameterAccessors: - TypeParameterAccessors, type_parameter, set_type_parameter - -function storagemode(object) - return storagemode(typeof(object)) -end -function storagemode(type::Type) - return type_parameter(type, storagemode) -end - -function set_storagemode(type::Type, param) - return set_type_parameter(type, storagemode, param) -end - -function cpu end - -cpu(E::Exposed) = cpu(unexpose(E)) diff --git a/NDTensors/src/lib/GPUArraysCoreExtensions/test/runtests.jl b/NDTensors/src/lib/GPUArraysCoreExtensions/test/runtests.jl deleted file mode 100644 index 3e53b7f509..0000000000 --- a/NDTensors/src/lib/GPUArraysCoreExtensions/test/runtests.jl +++ /dev/null @@ -1,7 +0,0 @@ -@eval module $(gensym()) -using Test: @testset, @test -using NDTensors.GPUArraysCoreExtensions: storagemode -@testset "Test Base" begin - @test storagemode isa Function -end -end diff --git a/NDTensors/src/lib/GradedAxes/.JuliaFormatter.toml b/NDTensors/src/lib/GradedAxes/.JuliaFormatter.toml deleted file mode 100644 index 08f664cdb9..0000000000 --- a/NDTensors/src/lib/GradedAxes/.JuliaFormatter.toml +++ /dev/null @@ -1,2 +0,0 @@ -style = "blue" -indent = 2 diff --git a/NDTensors/src/lib/GradedAxes/src/GradedAxes.jl b/NDTensors/src/lib/GradedAxes/src/GradedAxes.jl deleted file mode 100644 index ba17c175f0..0000000000 --- a/NDTensors/src/lib/GradedAxes/src/GradedAxes.jl +++ /dev/null @@ -1,9 +0,0 @@ -module GradedAxes -include("blockedunitrange.jl") -include("gradedunitrange.jl") -include("dual.jl") -include("labelledunitrangedual.jl") -include("gradedunitrangedual.jl") -include("onetoone.jl") -include("fusion.jl") -end diff --git a/NDTensors/src/lib/GradedAxes/src/blockedunitrange.jl b/NDTensors/src/lib/GradedAxes/src/blockedunitrange.jl deleted file mode 100644 index d913dd60ab..0000000000 --- a/NDTensors/src/lib/GradedAxes/src/blockedunitrange.jl +++ /dev/null @@ -1,186 +0,0 @@ -using BlockArrays: - BlockArrays, - AbstractBlockVector, - AbstractBlockedUnitRange, - Block, - BlockIndex, - BlockIndexRange, - BlockRange, - BlockSlice, - BlockVector, - BlockedOneTo, - BlockedUnitRange, - BlockedVector, - block, - blockindex, - findblock, - findblockindex - -# Custom `BlockedUnitRange` constructor that takes a unit range -# and a set of block lengths, similar to `BlockArray(::AbstractArray, blocklengths...)`. -function blockedunitrange(a::AbstractUnitRange, blocklengths) - blocklengths_shifted = copy(blocklengths) - blocklengths_shifted[1] += (first(a) - 1) - blocklasts = cumsum(blocklengths_shifted) - return BlockArrays._BlockedUnitRange(first(a), blocklasts) -end - -# TODO: Move this to a `BlockArraysExtensions` library. -# TODO: Rename this. `BlockArrays.findblock(a, k)` finds the -# block of the value `k`, while this finds the block of the index `k`. -# This could make use of the `BlockIndices` object, i.e. `block(BlockIndices(a)[index])`. -function blockedunitrange_findblock(a::AbstractBlockedUnitRange, index::Integer) - @boundscheck index in 1:length(a) || throw(BoundsError(a, index)) - return @inbounds findblock(a, index + first(a) - 1) -end - -# TODO: Move this to a `BlockArraysExtensions` library. -# TODO: Rename this. `BlockArrays.findblockindex(a, k)` finds the -# block index of the value `k`, while this finds the block index of the index `k`. -# This could make use of the `BlockIndices` object, i.e. `BlockIndices(a)[index]`. -function blockedunitrange_findblockindex(a::AbstractBlockedUnitRange, index::Integer) - @boundscheck index in 1:length(a) || throw(BoundsError()) - return @inbounds findblockindex(a, index + first(a) - 1) -end - -function blockedunitrange_getindices(a::AbstractUnitRange, indices) - return a[indices] -end - -# TODO: Move this to a `BlockArraysExtensions` library. -# Like `a[indices]` but preserves block structure. -# TODO: Consider calling this something else, for example -# `blocked_getindex`. See the discussion here: -# https://github.com/JuliaArrays/BlockArrays.jl/issues/347 -function blockedunitrange_getindices( - a::AbstractBlockedUnitRange, indices::AbstractUnitRange{<:Integer} -) - first_blockindex = blockedunitrange_findblockindex(a, first(indices)) - last_blockindex = blockedunitrange_findblockindex(a, last(indices)) - first_block = block(first_blockindex) - last_block = block(last_blockindex) - blocklengths = if first_block == last_block - [length(indices)] - else - map(first_block:last_block) do block - if block == first_block - return length(a[first_block]) - blockindex(first_blockindex) + 1 - end - if block == last_block - return blockindex(last_blockindex) - end - return length(a[block]) - end - end - return blockedunitrange(indices .+ (first(a) - 1), blocklengths) -end - -# TODO: Make sure this handles block labels (AbstractGradedUnitRange) correctly. -# TODO: Make a special case for `BlockedVector{<:Block{1},<:BlockRange{1}}`? -# For example: -# ```julia -# blocklengths = map(bs -> sum(b -> length(a[b]), bs), blocks(indices)) -# return blockedrange(blocklengths) -# ``` -function blockedunitrange_getindices( - a::AbstractBlockedUnitRange, indices::AbstractBlockVector{<:Block{1}} -) - blks = map(bs -> mortar(map(b -> a[b], bs)), blocks(indices)) - # We pass `length.(blks)` to `mortar` in order - # to pass block labels to the axes of the output, - # if they exist. This makes it so that - # `only(axes(a[indices])) isa `GradedUnitRange` - # if `a isa `GradedUnitRange`, for example. - # Note there is a more specialized definition: - # ```julia - # function blockedunitrange_getindices( - # a::AbstractGradedUnitRange, indices::AbstractBlockVector{<:Block{1}} - # ) - # ``` - # that does a better job of preserving labels, since `length` - # may drop labels for certain block types. - return mortar(blks, length.(blks)) -end - -# TODO: Move this to a `BlockArraysExtensions` library. -function blockedunitrange_getindices(a::AbstractBlockedUnitRange, indices::BlockIndexRange) - return a[block(indices)][only(indices.indices)] -end - -# TODO: Move this to a `BlockArraysExtensions` library. -function blockedunitrange_getindices(a::AbstractBlockedUnitRange, indices::BlockSlice) - # TODO: Is this a good definition? It ignores `indices.indices`. - return a[indices.block] -end - -# TODO: Move this to a `BlockArraysExtensions` library. -function blockedunitrange_getindices( - a::AbstractBlockedUnitRange, indices::Vector{<:Integer} -) - return map(index -> a[index], indices) -end - -# TODO: Move this to a `BlockArraysExtensions` library. -# TODO: Make a special definition for `BlockedVector{<:Block{1}}` in order -# to merge blocks. -function blockedunitrange_getindices( - a::AbstractBlockedUnitRange, indices::AbstractVector{<:Union{Block{1},BlockIndexRange{1}}} -) - # Without converting `indices` to `Vector`, - # mapping `indices` outputs a `BlockVector` - # which is harder to reason about. - blocks = map(index -> a[index], Vector(indices)) - # We pass `length.(blocks)` to `mortar` in order - # to pass block labels to the axes of the output, - # if they exist. This makes it so that - # `only(axes(a[indices])) isa `GradedUnitRange` - # if `a isa `GradedUnitRange`, for example. - return mortar(blocks, length.(blocks)) -end - -# TODO: Move this to a `BlockArraysExtensions` library. -function blockedunitrange_getindices(a::AbstractBlockedUnitRange, indices::Block{1}) - return a[indices] -end - -function blockedunitrange_getindices( - a::AbstractBlockedUnitRange, - indices::BlockVector{<:BlockIndex{1},<:Vector{<:BlockIndexRange{1}}}, -) - return mortar(map(b -> a[b], blocks(indices))) -end - -# TODO: Move this to a `BlockArraysExtensions` library. -function blockedunitrange_getindices(a::AbstractBlockedUnitRange, indices) - return error("Not implemented.") -end - -# The blocks of the corresponding slice. -_blocks(a::AbstractUnitRange, indices) = error("Not implemented") -function _blocks(a::AbstractUnitRange, indices::AbstractUnitRange) - return findblock(a, first(indices)):findblock(a, last(indices)) -end -function _blocks(a::AbstractUnitRange, indices::BlockRange) - return indices -end - -# Slice `a` by `I`, returning a: -# `BlockVector{<:BlockIndex{1},<:Vector{<:BlockIndexRange{1}}}` -# with the `BlockIndex{1}` corresponding to each value of `I`. -function to_blockindices(a::AbstractBlockedUnitRange{<:Integer}, I::UnitRange{<:Integer}) - return mortar( - map(blocks(blockedunitrange_getindices(a, I))) do r - bi_first = findblockindex(a, first(r)) - bi_last = findblockindex(a, last(r)) - @assert block(bi_first) == block(bi_last) - return block(bi_first)[blockindex(bi_first):blockindex(bi_last)] - end, - ) -end - -# This handles non-blocked slices. -# For example: -# a = BlockSparseArray{Float64}([2, 2, 2, 2]) -# I = BlockedVector(Block.(1:4), [2, 2]) -# @views a[I][Block(1)] -to_blockindices(a::Base.OneTo{<:Integer}, I::UnitRange{<:Integer}) = I diff --git a/NDTensors/src/lib/GradedAxes/src/dual.jl b/NDTensors/src/lib/GradedAxes/src/dual.jl deleted file mode 100644 index 877ba1a857..0000000000 --- a/NDTensors/src/lib/GradedAxes/src/dual.jl +++ /dev/null @@ -1,15 +0,0 @@ -# default behavior: any object is self-dual -dual(x) = x -nondual(r::AbstractUnitRange) = r -isdual(::AbstractUnitRange) = false - -using NDTensors.LabelledNumbers: - LabelledStyle, IsLabelled, NotLabelled, label, labelled, unlabel - -dual(i::LabelledInteger) = labelled(unlabel(i), dual(label(i))) -label_dual(x) = label_dual(LabelledStyle(x), x) -label_dual(::NotLabelled, x) = x -label_dual(::IsLabelled, x) = labelled(unlabel(x), dual(label(x))) - -flip(a::AbstractUnitRange) = dual(label_dual(a)) -flip(g::AbstractGradedUnitRange) = dual(gradedrange(label_dual.(blocklengths(g)))) diff --git a/NDTensors/src/lib/GradedAxes/src/fusion.jl b/NDTensors/src/lib/GradedAxes/src/fusion.jl deleted file mode 100644 index 2e893ec1db..0000000000 --- a/NDTensors/src/lib/GradedAxes/src/fusion.jl +++ /dev/null @@ -1,112 +0,0 @@ -using BlockArrays: AbstractBlockedUnitRange, blocklengths - -# https://github.com/ITensor/ITensors.jl/blob/v0.3.57/NDTensors/src/lib/GradedAxes/src/tensor_product.jl -# https://en.wikipedia.org/wiki/Tensor_product -# https://github.com/KeitaNakamura/Tensorial.jl -function tensor_product( - a1::AbstractUnitRange, - a2::AbstractUnitRange, - a3::AbstractUnitRange, - a_rest::Vararg{AbstractUnitRange}, -) - return foldl(tensor_product, (a1, a2, a3, a_rest...)) -end - -flip_dual(r::AbstractUnitRange) = r -flip_dual(r::GradedUnitRangeDual) = flip(r) -function tensor_product(a1::AbstractUnitRange, a2::AbstractUnitRange) - return tensor_product(flip_dual(a1), flip_dual(a2)) -end - -function tensor_product(a1::Base.OneTo, a2::Base.OneTo) - return Base.OneTo(length(a1) * length(a2)) -end - -function tensor_product(::OneToOne, a2::AbstractUnitRange) - return a2 -end - -function tensor_product(a1::AbstractUnitRange, ::OneToOne) - return a1 -end - -function tensor_product(::OneToOne, ::OneToOne) - return OneToOne() -end - -function fuse_labels(x, y) - return error( - "`fuse_labels` not implemented for object of type `$(typeof(x))` and `$(typeof(y))`." - ) -end - -function fuse_blocklengths(x::Integer, y::Integer) - # return blocked unit range to keep non-abelian interface - return blockedrange([x * y]) -end - -using ..LabelledNumbers: LabelledInteger, label, labelled, unlabel -function fuse_blocklengths(x::LabelledInteger, y::LabelledInteger) - # return blocked unit range to keep non-abelian interface - return blockedrange([labelled(x * y, fuse_labels(label(x), label(y)))]) -end - -using BlockArrays: blockedrange, blocks -function tensor_product(a1::AbstractBlockedUnitRange, a2::AbstractBlockedUnitRange) - nested = map(Iterators.flatten((Iterators.product(blocks(a1), blocks(a2)),))) do it - return mapreduce(length, fuse_blocklengths, it) - end - new_blocklengths = mapreduce(blocklengths, vcat, nested) - return blockedrange(new_blocklengths) -end - -# convention: sort GradedUnitRangeDual according to nondual blocks -function blocksortperm(a::AbstractUnitRange) - return Block.(sortperm(blocklabels(nondual(a)))) -end - -using BlockArrays: Block, BlockVector -using SplitApplyCombine: groupcount -# Get the permutation for sorting, then group by common elements. -# groupsortperm([2, 1, 2, 3]) == [[2], [1, 3], [4]] -function groupsortperm(v; kwargs...) - perm = sortperm(v; kwargs...) - v_sorted = @view v[perm] - group_lengths = collect(groupcount(identity, v_sorted)) - return BlockVector(perm, group_lengths) -end - -# Used by `TensorAlgebra.splitdims` in `BlockSparseArraysGradedAxesExt`. -# Get the permutation for sorting, then group by common elements. -# groupsortperm([2, 1, 2, 3]) == [[2], [1, 3], [4]] -function blockmergesortperm(a::AbstractUnitRange) - return Block.(groupsortperm(blocklabels(nondual(a)))) -end - -# Used by `TensorAlgebra.splitdims` in `BlockSparseArraysGradedAxesExt`. -invblockperm(a::Vector{<:Block{1}}) = Block.(invperm(Int.(a))) - -function blockmergesort(g::AbstractGradedUnitRange) - glabels = blocklabels(g) - gblocklengths = blocklengths(g) - new_blocklengths = map(sort(unique(glabels))) do la - return labelled(sum(gblocklengths[findall(==(la), glabels)]; init=0), la) - end - return gradedrange(new_blocklengths) -end - -blockmergesort(g::GradedUnitRangeDual) = flip(blockmergesort(flip(g))) -blockmergesort(g::AbstractUnitRange) = g - -# fusion_product produces a sorted, non-dual GradedUnitRange -function fusion_product(g1, g2) - return blockmergesort(tensor_product(g1, g2)) -end - -fusion_product(g::AbstractUnitRange) = blockmergesort(g) -fusion_product(g::GradedUnitRangeDual) = fusion_product(flip(g)) - -# recursive fusion_product. Simpler than reduce + fix type stability issues with reduce -function fusion_product(g1, g2, g3...) - return fusion_product(fusion_product(g1, g2), g3...) -end diff --git a/NDTensors/src/lib/GradedAxes/src/gradedunitrange.jl b/NDTensors/src/lib/GradedAxes/src/gradedunitrange.jl deleted file mode 100644 index 76eaf42692..0000000000 --- a/NDTensors/src/lib/GradedAxes/src/gradedunitrange.jl +++ /dev/null @@ -1,384 +0,0 @@ -using BlockArrays: - BlockArrays, - AbstractBlockVector, - AbstractBlockedUnitRange, - Block, - BlockIndex, - BlockRange, - BlockSlice, - BlockVector, - BlockedOneTo, - BlockedUnitRange, - blockedrange, - BlockIndexRange, - blockfirsts, - blockisequal, - blocklength, - blocklengths, - findblock, - findblockindex, - mortar, - sortedunion -using Compat: allequal -using FillArrays: Fill -using ..LabelledNumbers: - LabelledNumbers, - LabelledInteger, - LabelledUnitRange, - label, - label_type, - labelled, - labelled_isequal, - unlabel - -abstract type AbstractGradedUnitRange{T,BlockLasts} <: - AbstractBlockedUnitRange{T,BlockLasts} end - -struct GradedUnitRange{T,BlockLasts<:Vector{T}} <: AbstractGradedUnitRange{T,BlockLasts} - first::T - lasts::BlockLasts -end - -struct GradedOneTo{T,BlockLasts<:Vector{T}} <: AbstractGradedUnitRange{T,BlockLasts} - lasts::BlockLasts - - # assume that lasts is sorted, no checks carried out here - function GradedOneTo(lasts::BlockLasts) where {T<:Integer,BlockLasts<:AbstractVector{T}} - Base.require_one_based_indexing(lasts) - isempty(lasts) || first(lasts) >= 0 || throw(ArgumentError("blocklasts must be >= 0")) - return new{T,BlockLasts}(lasts) - end - function GradedOneTo(lasts::BlockLasts) where {T<:Integer,BlockLasts<:Tuple{T,Vararg{T}}} - first(lasts) >= 0 || throw(ArgumentError("blocklasts must be >= 0")) - return new{T,BlockLasts}(lasts) - end -end - -function Base.show(io::IO, ::MIME"text/plain", g::AbstractGradedUnitRange) - v = map(b -> label(b) => unlabel(b), blocks(g)) - println(io, typeof(g)) - return print(io, join(repr.(v), '\n')) -end - -function Base.show(io::IO, g::AbstractGradedUnitRange) - v = map(b -> label(b) => unlabel(b), blocks(g)) - return print(io, nameof(typeof(g)), '[', join(repr.(v), ", "), ']') -end - -# == is just a range comparison that ignores labels. Need dedicated function to check equality. -struct NoLabel end -blocklabels(r::AbstractUnitRange) = Fill(NoLabel(), blocklength(r)) -blocklabels(la::LabelledUnitRange) = [label(la)] - -function LabelledNumbers.labelled_isequal(a1::AbstractUnitRange, a2::AbstractUnitRange) - return blockisequal(a1, a2) && (blocklabels(a1) == blocklabels(a2)) -end - -function space_isequal(a1::AbstractUnitRange, a2::AbstractUnitRange) - return (isdual(a1) == isdual(a2)) && labelled_isequal(a1, a2) -end - -# needed in BlockSparseArrays -function Base.AbstractUnitRange{T}( - a::AbstractGradedUnitRange{<:LabelledInteger{T}} -) where {T} - return unlabel_blocks(a) -end - -# TODO: Use `TypeParameterAccessors`. -Base.eltype(::Type{<:GradedUnitRange{T}}) where {T} = T -LabelledNumbers.label_type(g::AbstractGradedUnitRange) = label_type(typeof(g)) -LabelledNumbers.label_type(T::Type{<:AbstractGradedUnitRange}) = label_type(eltype(T)) - -function gradedrange(lblocklengths::AbstractVector{<:LabelledInteger}) - brange = blockedrange(unlabel.(lblocklengths)) - lblocklasts = labelled.(blocklasts(brange), label.(lblocklengths)) - return GradedOneTo(lblocklasts) -end - -# To help with generic code. -function BlockArrays.blockedrange(lblocklengths::AbstractVector{<:LabelledInteger}) - return gradedrange(lblocklengths) -end - -Base.last(a::AbstractGradedUnitRange) = isempty(a.lasts) ? first(a) - 1 : last(a.lasts) - -function gradedrange(lblocklengths::AbstractVector{<:Pair{<:Any,<:Integer}}) - return gradedrange(labelled.(last.(lblocklengths), first.(lblocklengths))) -end - -function labelled_blocks(a::BlockedOneTo, labels) - # TODO: Use `blocklasts(a)`? That might - # cause a recursive loop. - return GradedOneTo(labelled.(a.lasts, labels)) -end -function labelled_blocks(a::BlockedUnitRange, labels) - # TODO: Use `first(a)` and `blocklasts(a)`? Those might - # cause a recursive loop. - return GradedUnitRange(labelled(a.first, labels[1]), labelled.(a.lasts, labels)) -end - -function BlockArrays.findblock(a::AbstractGradedUnitRange, index::Integer) - return blockedunitrange_findblock(unlabel_blocks(a), index) -end - -function blockedunitrange_findblock(a::AbstractGradedUnitRange, index::Integer) - return blockedunitrange_findblock(unlabel_blocks(a), index) -end - -function blockedunitrange_findblockindex(a::AbstractGradedUnitRange, index::Integer) - return blockedunitrange_findblockindex(unlabel_blocks(a), index) -end - -function BlockArrays.findblockindex(a::AbstractGradedUnitRange, index::Integer) - return blockedunitrange_findblockindex(unlabel_blocks(a), index) -end - -## Block label interface - -# Internal function -function get_label(a::AbstractUnitRange, index::Block{1}) - return label(blocklasts(a)[Int(index)]) -end - -# Internal function -function get_label(a::AbstractUnitRange, index::Integer) - return get_label(a, blockedunitrange_findblock(a, index)) -end - -function blocklabels(a::AbstractBlockVector) - return map(BlockRange(a)) do block - return label(@view(a[block])) - end -end - -function blocklabels(a::AbstractBlockedUnitRange) - # Using `a.lasts` here since that is what is stored - # inside of `BlockedUnitRange`, maybe change that. - # For example, it could be something like: - # - # map(BlockRange(a)) do block - # return label(@view(a[block])) - # end - # - return label.(a.lasts) -end - -# TODO: This relies on internals of `BlockArrays`, maybe redesign -# to try to avoid that. -# TODO: Define `set_grades`, `set_sector_labels`, `set_labels`. -function unlabel_blocks(a::GradedOneTo) - # TODO: Use `blocklasts(a)`. - return BlockedOneTo(unlabel.(a.lasts)) -end -function unlabel_blocks(a::GradedUnitRange) - return BlockArrays._BlockedUnitRange(a.first, unlabel.(a.lasts)) -end - -## BlockedUnitRange interface - -function Base.axes(ga::AbstractGradedUnitRange) - return map(axes(unlabel_blocks(ga))) do a - return labelled_blocks(a, blocklabels(ga)) - end -end - -function gradedunitrange_blockfirsts(a::AbstractGradedUnitRange) - return labelled.(blockfirsts(unlabel_blocks(a)), blocklabels(a)) -end -function BlockArrays.blockfirsts(a::AbstractGradedUnitRange) - return gradedunitrange_blockfirsts(a) -end - -function BlockArrays.blocklasts(a::AbstractGradedUnitRange) - return labelled.(blocklasts(unlabel_blocks(a)), blocklabels(a)) -end - -function BlockArrays.blocklengths(a::AbstractGradedUnitRange) - return labelled.(blocklengths(unlabel_blocks(a)), blocklabels(a)) -end - -function gradedunitrange_first(a::AbstractUnitRange) - return labelled(first(unlabel_blocks(a)), label(a[Block(1)])) -end -function Base.first(a::AbstractGradedUnitRange) - return gradedunitrange_first(a) -end - -Base.iterate(a::AbstractGradedUnitRange) = isempty(a) ? nothing : (first(a), first(a)) -function Base.iterate(a::AbstractGradedUnitRange, i) - i == last(a) && return nothing - next = a[i + step(a)] - return (next, next) -end - -function firstblockindices(a::AbstractGradedUnitRange) - return labelled.(firstblockindices(unlabel_blocks(a)), blocklabels(a)) -end - -function blockedunitrange_getindices(a::AbstractGradedUnitRange, index::Block{1}) - return labelled(unlabel_blocks(a)[index], get_label(a, index)) -end - -function blockedunitrange_getindices(a::AbstractGradedUnitRange, indices::Vector{<:Integer}) - return map(index -> a[index], indices) -end - -function blockedunitrange_getindices( - a::AbstractGradedUnitRange, - indices::BlockVector{<:BlockIndex{1},<:Vector{<:BlockIndexRange{1}}}, -) - return mortar(map(b -> a[b], blocks(indices))) -end - -function blockedunitrange_getindices(a::AbstractGradedUnitRange, index) - return labelled(unlabel_blocks(a)[index], get_label(a, index)) -end - -function blockedunitrange_getindices(a::AbstractGradedUnitRange, indices::BlockIndexRange) - return a[block(indices)][only(indices.indices)] -end - -function blockedunitrange_getindices( - a::AbstractGradedUnitRange, indices::AbstractVector{<:Union{Block{1},BlockIndexRange{1}}} -) - # Without converting `indices` to `Vector`, - # mapping `indices` outputs a `BlockVector` - # which is harder to reason about. - blocks = map(index -> a[index], Vector(indices)) - # We pass `length.(blocks)` to `mortar` in order - # to pass block labels to the axes of the output, - # if they exist. This makes it so that - # `only(axes(a[indices])) isa `GradedUnitRange` - # if `a isa `GradedUnitRange`, for example. - return mortar(blocks, length.(blocks)) -end - -# The block labels of the corresponding slice. -function blocklabels(a::AbstractUnitRange, indices) - return map(_blocks(a, indices)) do block - return label(a[block]) - end -end - -function blockedunitrange_getindices( - ga::AbstractGradedUnitRange, indices::AbstractUnitRange{<:Integer} -) - a_indices = blockedunitrange_getindices(unlabel_blocks(ga), indices) - return labelled_blocks(a_indices, blocklabels(ga, indices)) -end - -function blockedunitrange_getindices(a::AbstractGradedUnitRange, indices::BlockSlice) - return a[indices.block] -end - -function blockedunitrange_getindices(ga::AbstractGradedUnitRange, indices::BlockRange) - return labelled_blocks(unlabel_blocks(ga)[indices], blocklabels(ga, indices)) -end - -function blockedunitrange_getindices(a::AbstractGradedUnitRange, indices::BlockIndex{1}) - return a[block(indices)][blockindex(indices)] -end - -function Base.getindex(a::AbstractGradedUnitRange, index::Integer) - return labelled(unlabel_blocks(a)[index], get_label(a, index)) -end - -function Base.getindex(a::AbstractGradedUnitRange, index::Block{1}) - return blockedunitrange_getindices(a, index) -end - -function Base.getindex(a::AbstractGradedUnitRange, indices::BlockIndexRange) - return blockedunitrange_getindices(a, indices) -end - -# fix ambiguities -function Base.getindex( - a::AbstractGradedUnitRange, indices::BlockArrays.BlockRange{1,<:Tuple{Base.OneTo}} -) - return blockedunitrange_getindices(a, indices) -end -function Base.getindex( - a::AbstractGradedUnitRange, indices::BlockRange{1,<:Tuple{AbstractUnitRange{Int}}} -) - return blockedunitrange_getindices(a, indices) -end - -function Base.getindex(a::AbstractGradedUnitRange, indices::BlockIndex{1}) - return blockedunitrange_getindices(a, indices) -end - -# Fixes ambiguity issues with: -# ```julia -# getindex(::BlockedUnitRange, ::BlockSlice) -# getindex(::GradedUnitRange, ::AbstractUnitRange{<:Integer}) -# getindex(::GradedUnitRange, ::Any) -# getindex(::AbstractUnitRange, ::AbstractUnitRange{<:Integer}) -# ``` -function Base.getindex(a::AbstractGradedUnitRange, indices::BlockSlice) - return blockedunitrange_getindices(a, indices) -end - -function Base.getindex(a::AbstractGradedUnitRange, indices) - return blockedunitrange_getindices(a, indices) -end - -function Base.getindex(a::AbstractGradedUnitRange, indices::AbstractUnitRange{<:Integer}) - return blockedunitrange_getindices(a, indices) -end - -# This fixes an issue that `combine_blockaxes` was promoting -# the element type of the axes to `Integer` in broadcasting operations -# that mixed dense and graded axes. -# TODO: Maybe come up with a more general solution. -function BlockArrays.combine_blockaxes( - a1::AbstractGradedUnitRange{<:LabelledInteger{T}}, a2::AbstractUnitRange{T} -) where {T<:Integer} - combined_blocklasts = sort!(union(unlabel.(blocklasts(a1)), blocklasts(a2))) - return BlockedOneTo(combined_blocklasts) -end -function BlockArrays.combine_blockaxes( - a1::AbstractUnitRange{T}, a2::AbstractGradedUnitRange{<:LabelledInteger{T}} -) where {T<:Integer} - return BlockArrays.combine_blockaxes(a2, a1) -end - -# preserve labels inside combine_blockaxes -function BlockArrays.combine_blockaxes(a::GradedOneTo, b::GradedOneTo) - return GradedOneTo(sortedunion(blocklasts(a), blocklasts(b))) -end -function BlockArrays.combine_blockaxes(a::GradedUnitRange, b::GradedUnitRange) - new_blocklasts = sortedunion(blocklasts(a), blocklasts(b)) - new_first = labelled(oneunit(eltype(new_blocklasts)), label(first(new_blocklasts))) - return GradedUnitRange(new_first, new_blocklasts) -end - -# preserve axes in SubArray -Base.axes(S::Base.Slice{<:AbstractGradedUnitRange}) = (S.indices,) - -# Version of length that checks that all blocks have the same label -# and returns a labelled length with that label. -function labelled_length(a::AbstractBlockVector{<:Integer}) - blocklabels = label.(blocks(a)) - @assert allequal(blocklabels) - return labelled(unlabel(length(a)), first(blocklabels)) -end - -# TODO: Make sure this handles block labels (AbstractGradedUnitRange) correctly. -# TODO: Make a special case for `BlockedVector{<:Block{1},<:BlockRange{1}}`? -# For example: -# ```julia -# blocklengths = map(bs -> sum(b -> length(a[b]), bs), blocks(indices)) -# return blockedrange(blocklengths) -# ``` -function blockedunitrange_getindices( - a::AbstractGradedUnitRange, indices::AbstractBlockVector{<:Block{1}} -) - blks = map(bs -> mortar(map(b -> a[b], bs)), blocks(indices)) - # We pass `length.(blks)` to `mortar` in order - # to pass block labels to the axes of the output, - # if they exist. This makes it so that - # `only(axes(a[indices])) isa `GradedUnitRange` - # if `a isa `GradedUnitRange`, for example. - return mortar(blks, labelled_length.(blks)) -end diff --git a/NDTensors/src/lib/GradedAxes/src/gradedunitrangedual.jl b/NDTensors/src/lib/GradedAxes/src/gradedunitrangedual.jl deleted file mode 100644 index a16da982f9..0000000000 --- a/NDTensors/src/lib/GradedAxes/src/gradedunitrangedual.jl +++ /dev/null @@ -1,134 +0,0 @@ -struct GradedUnitRangeDual{ - T,BlockLasts,NondualUnitRange<:AbstractGradedUnitRange{T,BlockLasts} -} <: AbstractGradedUnitRange{T,BlockLasts} - nondual_unitrange::NondualUnitRange -end - -dual(a::AbstractGradedUnitRange) = GradedUnitRangeDual(a) -nondual(a::GradedUnitRangeDual) = a.nondual_unitrange -dual(a::GradedUnitRangeDual) = nondual(a) -flip(a::GradedUnitRangeDual) = dual(flip(nondual(a))) -isdual(::GradedUnitRangeDual) = true -## TODO: Define this to instantiate a dual unit range. -## materialize_dual(a::GradedUnitRangeDual) = materialize_dual(nondual(a)) - -Base.first(a::GradedUnitRangeDual) = label_dual(first(nondual(a))) -Base.last(a::GradedUnitRangeDual) = label_dual(last(nondual(a))) -Base.step(a::GradedUnitRangeDual) = label_dual(step(nondual(a))) - -Base.view(a::GradedUnitRangeDual, index::Block{1}) = a[index] - -function blockedunitrange_getindices( - a::GradedUnitRangeDual, indices::AbstractUnitRange{<:Integer} -) - return dual(getindex(nondual(a), indices)) -end - -using BlockArrays: Block, BlockIndexRange, BlockRange - -function blockedunitrange_getindices(a::GradedUnitRangeDual, indices::Integer) - return label_dual(getindex(nondual(a), indices)) -end - -function blockedunitrange_getindices(a::GradedUnitRangeDual, indices::Block{1}) - return dual(getindex(nondual(a), indices)) -end - -function blockedunitrange_getindices(a::GradedUnitRangeDual, indices::BlockRange) - return dual(getindex(nondual(a), indices)) -end - -function blockedunitrange_getindices(a::GradedUnitRangeDual, indices::BlockIndexRange) - return dual(nondual(a)[indices]) -end - -# fix ambiguity -function blockedunitrange_getindices( - a::GradedUnitRangeDual, indices::BlockRange{1,<:Tuple{AbstractUnitRange{Int}}} -) - return dual(getindex(nondual(a), indices)) -end - -function BlockArrays.blocklengths(a::GradedUnitRangeDual) - return dual.(blocklengths(nondual(a))) -end - -# TODO: Move this to a `BlockArraysExtensions` library. -function blockedunitrange_getindices( - a::GradedUnitRangeDual, indices::Vector{<:BlockIndexRange{1}} -) - # dual v axes to stay consistent with other cases where axes(v) are used - return dual_axes(blockedunitrange_getindices(nondual(a), indices)) -end - -function blockedunitrange_getindices( - a::GradedUnitRangeDual, - indices::BlockVector{<:BlockIndex{1},<:Vector{<:BlockIndexRange{1}}}, -) - # dual v axis to preserve dual information - # axes(v) will appear in axes(view(::BlockSparseArray, [Block(1)[1:1]])) - return dual_axes(blockedunitrange_getindices(nondual(a), indices)) -end - -function blockedunitrange_getindices( - a::GradedUnitRangeDual, indices::AbstractVector{<:Union{Block{1},BlockIndexRange{1}}} -) - # dual v axis to preserve dual information - # axes(v) will appear in axes(view(::BlockSparseArray, [Block(1)])) - return dual_axes(blockedunitrange_getindices(nondual(a), indices)) -end - -# Fixes ambiguity error. -function blockedunitrange_getindices( - a::GradedUnitRangeDual, indices::AbstractBlockVector{<:Block{1}} -) - v = blockedunitrange_getindices(nondual(a), indices) - # v elements are not dualled by dual_axes due to different structure. - # take element dual here. - return dual_axes(dual.(v)) -end - -function dual_axes(v::BlockVector) - # dual both v elements and v axes - block_axes = dual.(axes(v)) - return mortar(dual.(blocks(v)), block_axes) -end - -Base.axes(a::GradedUnitRangeDual) = axes(nondual(a)) - -using BlockArrays: BlockArrays, Block, BlockSlice -using NDTensors.LabelledNumbers: LabelledUnitRange -function BlockArrays.BlockSlice(b::Block, a::LabelledUnitRange) - return BlockSlice(b, unlabel(a)) -end - -using BlockArrays: BlockArrays, BlockSlice -using NDTensors.GradedAxes: GradedUnitRangeDual, dual -function BlockArrays.BlockSlice(b::Block, r::GradedUnitRangeDual) - return BlockSlice(b, dual(r)) -end - -using NDTensors.LabelledNumbers: LabelledNumbers, LabelledUnitRange, label -function Base.iterate(a::GradedUnitRangeDual, i) - i == last(a) && return nothing - return dual.(iterate(nondual(a), i)) -end - -using NDTensors.LabelledNumbers: LabelledInteger, label, labelled, unlabel -using BlockArrays: BlockArrays, blockaxes, blocklasts, combine_blockaxes, findblock -BlockArrays.blockaxes(a::GradedUnitRangeDual) = blockaxes(nondual(a)) -BlockArrays.blockfirsts(a::GradedUnitRangeDual) = label_dual.(blockfirsts(nondual(a))) -BlockArrays.blocklasts(a::GradedUnitRangeDual) = label_dual.(blocklasts(nondual(a))) -function BlockArrays.findblock(a::GradedUnitRangeDual, index::Integer) - return findblock(nondual(a), index) -end - -blocklabels(a::GradedUnitRangeDual) = dual.(blocklabels(nondual(a))) - -function BlockArrays.combine_blockaxes(a1::GradedUnitRangeDual, a2::GradedUnitRangeDual) - return dual(combine_blockaxes(nondual(a1), nondual(a2))) -end - -function unlabel_blocks(a::GradedUnitRangeDual) - return unlabel_blocks(nondual(a)) -end diff --git a/NDTensors/src/lib/GradedAxes/src/labelledunitrangedual.jl b/NDTensors/src/lib/GradedAxes/src/labelledunitrangedual.jl deleted file mode 100644 index 466d64945b..0000000000 --- a/NDTensors/src/lib/GradedAxes/src/labelledunitrangedual.jl +++ /dev/null @@ -1,49 +0,0 @@ -# LabelledUnitRangeDual is obtained by slicing a GradedUnitRangeDual with a block - -using ..LabelledNumbers: LabelledNumbers, label, labelled, unlabel - -struct LabelledUnitRangeDual{T,NondualUnitRange<:AbstractUnitRange{T}} <: - AbstractUnitRange{T} - nondual_unitrange::NondualUnitRange -end - -dual(a::LabelledUnitRange) = LabelledUnitRangeDual(a) -nondual(a::LabelledUnitRangeDual) = a.nondual_unitrange -dual(a::LabelledUnitRangeDual) = nondual(a) -label_dual(::IsLabelled, a::LabelledUnitRangeDual) = dual(label_dual(nondual(a))) -isdual(::LabelledUnitRangeDual) = true -blocklabels(la::LabelledUnitRangeDual) = [label(la)] - -LabelledNumbers.label(a::LabelledUnitRangeDual) = dual(label(nondual(a))) -LabelledNumbers.unlabel(a::LabelledUnitRangeDual) = unlabel(nondual(a)) -LabelledNumbers.LabelledStyle(::LabelledUnitRangeDual) = IsLabelled() - -for f in [:first, :getindex, :last, :length, :step] - @eval Base.$f(a::LabelledUnitRangeDual, args...) = - labelled($f(unlabel(a), args...), label(a)) -end - -# fix ambiguities -Base.getindex(a::LabelledUnitRangeDual, i::Integer) = dual(nondual(a)[i]) -function Base.getindex(a::LabelledUnitRangeDual, indices::AbstractUnitRange{<:Integer}) - return dual(nondual(a)[indices]) -end - -function Base.iterate(a::LabelledUnitRangeDual, i) - i == last(a) && return nothing - next = convert(eltype(a), labelled(i + step(a), label(a))) - return (next, next) -end - -function Base.show(io::IO, ::MIME"text/plain", a::LabelledUnitRangeDual) - println(io, typeof(a)) - return print(io, label(a), " => ", unlabel(a)) -end - -function Base.show(io::IO, a::LabelledUnitRangeDual) - return print(io, nameof(typeof(a)), " ", label(a), " => ", unlabel(a)) -end - -function Base.AbstractUnitRange{T}(a::LabelledUnitRangeDual) where {T} - return AbstractUnitRange{T}(nondual(a)) -end diff --git a/NDTensors/src/lib/GradedAxes/src/onetoone.jl b/NDTensors/src/lib/GradedAxes/src/onetoone.jl deleted file mode 100644 index 426df396b1..0000000000 --- a/NDTensors/src/lib/GradedAxes/src/onetoone.jl +++ /dev/null @@ -1,9 +0,0 @@ -using BlockArrays: AbstractBlockedUnitRange -using ..LabelledNumbers: islabelled - -# Represents the range `1:1` or `Base.OneTo(1)`. -struct OneToOne{T} <: AbstractUnitRange{T} end -OneToOne() = OneToOne{Bool}() -Base.first(a::OneToOne) = one(eltype(a)) -Base.last(a::OneToOne) = one(eltype(a)) -BlockArrays.blockaxes(g::OneToOne) = (Block.(g),) # BlockArrays default crashes for OneToOne{Bool} diff --git a/NDTensors/src/lib/GradedAxes/test/Project.toml b/NDTensors/src/lib/GradedAxes/test/Project.toml deleted file mode 100644 index d1bf575ce0..0000000000 --- a/NDTensors/src/lib/GradedAxes/test/Project.toml +++ /dev/null @@ -1,4 +0,0 @@ -[deps] -BlockArrays = "8e7c35d0-a365-5155-bbbb-fb81a777f24e" -NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" -Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" diff --git a/NDTensors/src/lib/GradedAxes/test/runtests.jl b/NDTensors/src/lib/GradedAxes/test/runtests.jl deleted file mode 100644 index c0fdca21be..0000000000 --- a/NDTensors/src/lib/GradedAxes/test/runtests.jl +++ /dev/null @@ -1,8 +0,0 @@ -@eval module $(gensym()) -using Test: @testset -@testset "GradedAxes" begin - include("test_basics.jl") - include("test_dual.jl") - include("test_tensor_product.jl") -end -end diff --git a/NDTensors/src/lib/GradedAxes/test/test_basics.jl b/NDTensors/src/lib/GradedAxes/test/test_basics.jl deleted file mode 100644 index 90faa59b93..0000000000 --- a/NDTensors/src/lib/GradedAxes/test/test_basics.jl +++ /dev/null @@ -1,256 +0,0 @@ -@eval module $(gensym()) -using BlockArrays: - Block, - BlockRange, - BlockSlice, - BlockVector, - blockedrange, - blockfirsts, - blocklasts, - blocklength, - blocklengths, - blocks, - combine_blockaxes, - mortar -using NDTensors.GradedAxes: - GradedOneTo, GradedUnitRange, OneToOne, blocklabels, gradedrange, space_isequal -using NDTensors.LabelledNumbers: - LabelledUnitRange, islabelled, label, labelled, labelled_isequal, unlabel -using Test: @test, @test_broken, @testset - -@testset "OneToOne" begin - a0 = OneToOne() - @test a0 isa OneToOne{Bool} - @test eltype(a0) == Bool - @test length(a0) == 1 - @test labelled_isequal(a0, a0) - @test a0[1] == true - @test a0[[1]] == [true] - - @test labelled_isequal(a0, 1:1) - @test labelled_isequal(1:1, a0) - @test !labelled_isequal(a0, 1:2) - @test !labelled_isequal(1:2, a0) -end - -@testset "GradedAxes basics" begin - a0 = OneToOne() - for a in ( - blockedrange([labelled(2, "x"), labelled(3, "y")]), - gradedrange([labelled(2, "x"), labelled(3, "y")]), - gradedrange(["x" => 2, "y" => 3]), - ) - @test a isa GradedOneTo - @test labelled_isequal(a, a) - @test !labelled_isequal(a0, a) - @test !labelled_isequal(a, a0) - @test !labelled_isequal(a, 1:5) - for x in iterate(a) - @test x == 1 - @test label(x) == "x" - end - for x in iterate(a, labelled(1, "x")) - @test x == 2 - @test label(x) == "x" - end - for x in iterate(a, labelled(2, "x")) - @test x == 3 - @test label(x) == "y" - end - for x in iterate(a, labelled(3, "y")) - @test x == 4 - @test label(x) == "y" - end - for x in iterate(a, labelled(4, "y")) - @test x == 5 - @test label(x) == "y" - end - @test isnothing(iterate(a, labelled(5, "y"))) - @test labelled_isequal(a, a) - @test length(a) == 5 - @test step(a) == 1 - @test !islabelled(step(a)) - @test length(blocks(a)) == 2 - @test blocks(a)[1] == 1:2 - @test label(blocks(a)[1]) == "x" - @test blocks(a)[2] == 3:5 - @test label(blocks(a)[2]) == "y" - @test a[Block(2)] == 3:5 - @test label(a[Block(2)]) == "y" - @test a[Block(2)] isa LabelledUnitRange - @test a[4] == 4 - @test label(a[4]) == "y" - @test unlabel(a[4]) == 4 - @test blocklengths(a) == [2, 3] - @test blocklabels(a) == ["x", "y"] - @test label.(blocklengths(a)) == ["x", "y"] - @test blockfirsts(a) == [1, 3] - @test label.(blockfirsts(a)) == ["x", "y"] - @test first(a) == 1 - @test label(first(a)) == "x" - @test blocklasts(a) == [2, 5] - @test label.(blocklasts(a)) == ["x", "y"] - @test last(a) == 5 - @test label(last(a)) == "y" - @test a[Block(2)] == 3:5 - @test label(a[Block(2)]) == "y" - @test length(a[Block(2)]) == 3 - @test blocklengths(only(axes(a))) == blocklengths(a) - @test blocklabels(only(axes(a))) == blocklabels(a) - - @test axes(Base.Slice(a)) isa Tuple{typeof(a)} - @test AbstractUnitRange{Int}(a) == 1:5 - b = combine_blockaxes(a, a) - @test b isa GradedOneTo - @test b == 1:5 - @test space_isequal(b, a) - end - - # Slicing operations - x = gradedrange(["x" => 2, "y" => 3]) - a = x[2:4] - @test a isa GradedUnitRange - @test length(a) == 3 - @test blocklength(a) == 2 - @test a[Block(1)] == 2:2 - @test label(a[Block(1)]) == "x" - @test a[Block(2)] == 3:4 - @test label(a[Block(2)]) == "y" - ax = only(axes(a)) - @test ax == 1:length(a) - @test length(ax) == length(a) - @test blocklengths(ax) == blocklengths(a) - @test blocklabels(ax) == blocklabels(a) - @test blockfirsts(a) == [2, 3] - - @test AbstractUnitRange{Int}(a) == 2:4 - b = combine_blockaxes(a, a) - @test b isa GradedUnitRange - @test b == 1:4 - - @test x[[2, 4]] == [labelled(2, "x"), labelled(4, "y")] - @test labelled_isequal(x[BlockRange(1)], gradedrange(["x" => 2])) - - # Regression test for ambiguity error. - x = gradedrange(["x" => 2, "y" => 3]) - a = x[BlockSlice(Block(1), Base.OneTo(2))] - @test length(a) == 2 - @test a == 1:2 - @test blocklength(a) == 1 - # TODO: Should this be a `GradedUnitRange`, - # or maybe just a `LabelledUnitRange`? - @test a isa LabelledUnitRange - @test length(a[Block(1)]) == 2 - @test label(a) == "x" - @test a[Block(1)] == 1:2 - @test label(a[Block(1)]) == "x" - - x = gradedrange(["x" => 2, "y" => 3]) - a = x[3:4] - @test a isa GradedUnitRange - @test length(a) == 2 - @test blocklength(a) == 1 - @test a[Block(1)] == 3:4 - @test label(a[Block(1)]) == "y" - ax = only(axes(a)) - @test ax == 1:length(a) - @test length(ax) == length(a) - @test blocklengths(ax) == blocklengths(a) - @test blocklabels(ax) == blocklabels(a) - @test axes(Base.Slice(a)) isa Tuple{typeof(a)} - - x = gradedrange(["x" => 2, "y" => 3]) - a = x[2:4][1:2] - @test a isa GradedUnitRange - @test length(a) == 2 - @test blocklength(a) == 2 - @test a[Block(1)] == 2:2 - @test label(a[Block(1)]) == "x" - @test a[Block(2)] == 3:3 - @test label(a[Block(2)]) == "y" - ax = only(axes(a)) - @test ax == 1:length(a) - @test length(ax) == length(a) - @test blocklengths(ax) == blocklengths(a) - @test blocklabels(ax) == blocklabels(a) - - x = gradedrange(["x" => 2, "y" => 3]) - a = x[Block(2)[2:3]] - @test a isa LabelledUnitRange - @test length(a) == 2 - @test a == 4:5 - @test label(a) == "y" - ax = only(axes(a)) - @test ax == 1:length(a) - @test length(ax) == length(a) - @test label(ax) == label(a) - - x = gradedrange(["x" => 2, "y" => 3, "z" => 4]) - a = x[Block(2):Block(3)] - @test a isa GradedUnitRange - @test length(a) == 7 - @test blocklength(a) == 2 - @test blocklengths(a) == [3, 4] - @test blocklabels(a) == ["y", "z"] - @test a[Block(1)] == 3:5 - @test a[Block(2)] == 6:9 - ax = only(axes(a)) - @test ax == 1:length(a) - @test length(ax) == length(a) - @test blocklengths(ax) == blocklengths(a) - @test blocklabels(ax) == blocklabels(a) - - x = gradedrange(["x" => 2, "y" => 3, "z" => 4]) - a = x[[Block(3), Block(2)]] - @test a isa BlockVector - @test length(a) == 7 - @test blocklength(a) == 2 - # TODO: `BlockArrays` doesn't define `blocklengths` - # `blocklengths(::BlockVector)`, unbrake this test - # once it does. - @test_broken blocklengths(a) == [4, 3] - @test blocklabels(a) == ["z", "y"] - @test a[Block(1)] == 6:9 - @test a[Block(2)] == 3:5 - ax = only(axes(a)) - @test ax == 1:length(a) - @test length(ax) == length(a) - # TODO: Change to: - # @test blocklengths(ax) == blocklengths(a) - # once `blocklengths(::BlockVector)` is defined. - @test blocklengths(ax) == [4, 3] - @test blocklabels(ax) == blocklabels(a) - - x = gradedrange(["x" => 2, "y" => 3, "z" => 4]) - a = x[[Block(3)[2:3], Block(2)[2:3]]] - @test a isa BlockVector - @test length(a) == 4 - @test blocklength(a) == 2 - # TODO: `BlockArrays` doesn't define `blocklengths` - # for `BlockVector`, should it? - @test_broken blocklengths(a) == [2, 2] - @test blocklabels(a) == ["z", "y"] - @test a[Block(1)] == 7:8 - @test a[Block(2)] == 4:5 - ax = only(axes(a)) - @test ax == 1:length(a) - @test length(ax) == length(a) - # TODO: Change to: - # @test blocklengths(ax) == blocklengths(a) - # once `blocklengths(::BlockVector)` is defined. - @test blocklengths(ax) == [2, 2] - @test blocklabels(ax) == blocklabels(a) - - x = gradedrange(["x" => 2, "y" => 3]) - I = mortar([Block(1)[1:1]]) - a = x[I] - @test length(a) == 1 - @test label(first(a)) == "x" - - x = gradedrange(["x" => 2, "y" => 3])[1:5] - I = mortar([Block(1)[1:1]]) - a = x[I] - @test length(a) == 1 - @test label(first(a)) == "x" -end -end diff --git a/NDTensors/src/lib/GradedAxes/test/test_dual.jl b/NDTensors/src/lib/GradedAxes/test/test_dual.jl deleted file mode 100644 index f2b3072dc1..0000000000 --- a/NDTensors/src/lib/GradedAxes/test/test_dual.jl +++ /dev/null @@ -1,280 +0,0 @@ -@eval module $(gensym()) -using BlockArrays: - Block, - BlockedOneTo, - blockaxes, - blockedrange, - blockfirsts, - blockisequal, - blocklasts, - blocklength, - blocklengths, - blocks, - findblock, - mortar, - combine_blockaxes -using NDTensors.GradedAxes: - AbstractGradedUnitRange, - GradedAxes, - GradedUnitRangeDual, - LabelledUnitRangeDual, - OneToOne, - blocklabels, - blockmergesortperm, - blocksortperm, - dual, - flip, - space_isequal, - gradedrange, - isdual, - nondual -using NDTensors.LabelledNumbers: - LabelledInteger, LabelledUnitRange, label, label_type, labelled, labelled_isequal, unlabel -using Test: @test, @test_broken, @testset -struct U1 - n::Int -end -GradedAxes.dual(c::U1) = U1(-c.n) -Base.isless(c1::U1, c2::U1) = c1.n < c2.n - -@testset "AbstractUnitRange" begin - a0 = OneToOne() - @test !isdual(a0) - @test dual(a0) isa OneToOne - @test space_isequal(a0, a0) - @test labelled_isequal(a0, a0) - @test space_isequal(a0, dual(a0)) - - a = 1:3 - ad = dual(a) - @test !isdual(a) - @test !isdual(ad) - @test ad isa UnitRange - @test space_isequal(ad, a) - - a = blockedrange([2, 3]) - ad = dual(a) - @test !isdual(a) - @test !isdual(ad) - @test ad isa BlockedOneTo - @test blockisequal(ad, a) -end - -@testset "LabelledUnitRangeDual" begin - la = labelled(1:2, U1(1)) - @test la isa LabelledUnitRange - @test label(la) == U1(1) - @test blocklabels(la) == [U1(1)] - @test unlabel(la) == 1:2 - @test la == 1:2 - @test !isdual(la) - @test labelled_isequal(la, la) - @test space_isequal(la, la) - @test label_type(la) == U1 - - @test iterate(la) == (1, 1) - @test iterate(la) == (1, 1) - @test iterate(la, 1) == (2, 2) - @test isnothing(iterate(la, 2)) - - lad = dual(la) - @test lad isa LabelledUnitRangeDual - @test label(lad) == U1(-1) - @test blocklabels(lad) == [U1(-1)] - @test unlabel(lad) == 1:2 - @test lad == 1:2 - @test labelled_isequal(lad, lad) - @test space_isequal(lad, lad) - @test !labelled_isequal(la, lad) - @test !space_isequal(la, lad) - @test isdual(lad) - @test nondual(lad) === la - @test dual(lad) === la - @test label_type(lad) == U1 - - @test iterate(lad) == (1, 1) - @test iterate(lad) == (1, 1) - @test iterate(lad, 1) == (2, 2) - @test isnothing(iterate(lad, 2)) - - lad2 = lad[1:1] - @test lad2 isa LabelledUnitRangeDual - @test label(lad2) == U1(-1) - @test unlabel(lad2) == 1:1 - - laf = flip(la) - @test laf isa LabelledUnitRangeDual - @test label(laf) == U1(1) - @test unlabel(laf) == 1:2 - @test labelled_isequal(la, laf) - @test !space_isequal(la, laf) - - ladf = flip(dual(la)) - @test ladf isa LabelledUnitRange - @test label(ladf) == U1(-1) - @test unlabel(ladf) == 1:2 - - lafd = dual(flip(la)) - @test lafd isa LabelledUnitRange - @test label(lafd) == U1(-1) - @test unlabel(lafd) == 1:2 - - # check default behavior for objects without dual - la = labelled(1:2, 'x') - lad = dual(la) - @test lad isa LabelledUnitRangeDual - @test label(lad) == 'x' - @test blocklabels(lad) == ['x'] - @test unlabel(lad) == 1:2 - @test lad == 1:2 - @test labelled_isequal(lad, lad) - @test space_isequal(lad, lad) - @test labelled_isequal(la, lad) - @test !space_isequal(la, lad) - @test isdual(lad) - @test nondual(lad) === la - @test dual(lad) === la - - laf = flip(la) - @test laf isa LabelledUnitRangeDual - @test label(laf) == 'x' - @test unlabel(laf) == 1:2 - - ladf = flip(lad) - @test ladf isa LabelledUnitRange - @test label(ladf) == 'x' - @test unlabel(ladf) == 1:2 -end - -@testset "GradedUnitRangeDual" begin - for a in - [gradedrange([U1(0) => 2, U1(1) => 3]), gradedrange([U1(0) => 2, U1(1) => 3])[1:5]] - ad = dual(a) - @test ad isa GradedUnitRangeDual - @test ad isa AbstractGradedUnitRange - @test eltype(ad) == LabelledInteger{Int,U1} - @test blocklengths(ad) isa Vector - @test eltype(blocklengths(ad)) == eltype(blocklengths(a)) - - @test space_isequal(dual(ad), a) - @test space_isequal(nondual(ad), a) - @test space_isequal(nondual(a), a) - @test space_isequal(ad, ad) - @test !space_isequal(a, ad) - @test !space_isequal(ad, a) - - @test isdual(ad) - @test !isdual(a) - @test axes(Base.Slice(a)) isa Tuple{typeof(a)} - @test AbstractUnitRange{Int}(ad) == 1:5 - b = combine_blockaxes(ad, ad) - @test b isa GradedUnitRangeDual - @test b == 1:5 - @test space_isequal(b, ad) - - for x in iterate(ad) - @test x == 1 - @test label(x) == U1(0) - end - for x in iterate(ad, labelled(3, U1(-1))) - @test x == 4 - @test label(x) == U1(-1) - end - - @test blockfirsts(ad) == [labelled(1, U1(0)), labelled(3, U1(-1))] - @test blocklasts(ad) == [labelled(2, U1(0)), labelled(5, U1(-1))] - @test blocklength(ad) == 2 - @test blocklengths(ad) == [2, 3] - @test blocklabels(ad) == [U1(0), U1(-1)] - @test label.(blocklengths(ad)) == [U1(0), U1(-1)] - @test findblock(ad, 4) == Block(2) - @test only(blockaxes(ad)) == Block(1):Block(2) - @test blocks(ad) == [labelled(1:2, U1(0)), labelled(3:5, U1(-1))] - @test ad[4] == 4 - @test label(ad[4]) == U1(-1) - @test ad[2:4] == 2:4 - @test ad[2:4] isa GradedUnitRangeDual - @test label(ad[2:4][Block(2)]) == U1(-1) - @test ad[[2, 4]] == [2, 4] - @test label(ad[[2, 4]][2]) == U1(-1) - @test ad[Block(2)] == 3:5 - @test label(ad[Block(2)]) == U1(-1) - @test ad[Block(1):Block(2)][Block(2)] == 3:5 - @test label(ad[Block(1):Block(2)][Block(2)]) == U1(-1) - @test ad[[Block(2), Block(1)]][Block(1)] == 3:5 - @test label(ad[[Block(2), Block(1)]][Block(1)]) == U1(-1) - @test ad[[Block(2)[1:2], Block(1)[1:2]]][Block(1)] == 3:4 - @test label(ad[[Block(2)[1:2], Block(1)[1:2]]][Block(1)]) == U1(-1) - @test blocksortperm(a) == [Block(1), Block(2)] - @test blocksortperm(ad) == [Block(1), Block(2)] - @test blocklength(blockmergesortperm(a)) == 2 - @test blocklength(blockmergesortperm(ad)) == 2 - @test blockmergesortperm(a) == [Block(1), Block(2)] - @test blockmergesortperm(ad) == [Block(1), Block(2)] - - @test isdual(ad[Block(1)]) - @test isdual(ad[Block(1)[1:1]]) - @test ad[Block(1)] isa LabelledUnitRangeDual - @test ad[Block(1)[1:1]] isa LabelledUnitRangeDual - @test label(ad[Block(2)]) == U1(-1) - @test label(ad[Block(2)[1:1]]) == U1(-1) - - v = ad[[Block(2)[1:1]]] - @test v isa AbstractVector{LabelledInteger{Int64,U1}} - @test length(v) == 1 - @test label(first(v)) == U1(-1) - @test unlabel(first(v)) == 3 - @test isdual(v[Block(1)]) - @test isdual(axes(v, 1)) - @test blocklabels(axes(v, 1)) == [U1(-1)] - - v = ad[mortar([Block(2)[1:1]])] - @test v isa AbstractVector{LabelledInteger{Int64,U1}} - @test isdual(axes(v, 1)) # used in view(::BlockSparseVector, [Block(1)[1:1]]) - @test label(first(v)) == U1(-1) - @test unlabel(first(v)) == 3 - @test blocklabels(axes(v, 1)) == [U1(-1)] - - v = ad[[Block(2)]] - @test v isa AbstractVector{LabelledInteger{Int64,U1}} - @test isdual(axes(v, 1)) # used in view(::BlockSparseVector, [Block(1)]) - @test label(first(v)) == U1(-1) - @test unlabel(first(v)) == 3 - @test blocklabels(axes(v, 1)) == [U1(-1)] - - v = ad[mortar([[Block(2)], [Block(1)]])] - @test v isa AbstractVector{LabelledInteger{Int64,U1}} - @test isdual(axes(v, 1)) - @test label(first(v)) == U1(-1) - @test unlabel(first(v)) == 3 - @test blocklabels(axes(v, 1)) == [U1(-1), U1(0)] - end -end - -@testset "flip" begin - for a in - [gradedrange([U1(0) => 2, U1(1) => 3]), gradedrange([U1(0) => 2, U1(1) => 3])[1:5]] - ad = dual(a) - @test space_isequal(flip(a), dual(gradedrange([U1(0) => 2, U1(-1) => 3]))) - @test space_isequal(flip(ad), gradedrange([U1(0) => 2, U1(-1) => 3])) - - @test blocklabels(a) == [U1(0), U1(1)] - @test blocklabels(dual(a)) == [U1(0), U1(-1)] - @test blocklabels(flip(a)) == [U1(0), U1(1)] - @test blocklabels(flip(dual(a))) == [U1(0), U1(-1)] - @test blocklabels(dual(flip(a))) == [U1(0), U1(-1)] - - @test blocklengths(a) == [2, 3] - @test blocklengths(ad) == [2, 3] - @test blocklengths(flip(a)) == [2, 3] - @test blocklengths(flip(ad)) == [2, 3] - @test blocklengths(dual(flip(a))) == [2, 3] - - @test !isdual(a) - @test isdual(ad) - @test isdual(flip(a)) - @test !isdual(flip(ad)) - @test !isdual(dual(flip(a))) - end -end -end diff --git a/NDTensors/src/lib/GradedAxes/test/test_tensor_product.jl b/NDTensors/src/lib/GradedAxes/test/test_tensor_product.jl deleted file mode 100644 index 99e41454ff..0000000000 --- a/NDTensors/src/lib/GradedAxes/test/test_tensor_product.jl +++ /dev/null @@ -1,88 +0,0 @@ -@eval module $(gensym()) -using Test: @test, @testset - -using BlockArrays: blocklength, blocklengths - -using NDTensors.GradedAxes: - GradedAxes, - GradedOneTo, - OneToOne, - dual, - fusion_product, - flip, - gradedrange, - space_isequal, - isdual, - tensor_product - -using NDTensors.LabelledNumbers: labelled_isequal - -struct U1 - n::Int -end -GradedAxes.dual(c::U1) = U1(-c.n) -Base.isless(c1::U1, c2::U1) = c1.n < c2.n -GradedAxes.fuse_labels(x::U1, y::U1) = U1(x.n + y.n) - -@testset "GradedAxes.tensor_product" begin - GradedAxes.fuse_labels(x::String, y::String) = x * y - - g0 = OneToOne() - @test labelled_isequal(g0, g0) - @test labelled_isequal(tensor_product(g0, g0), g0) - - a = gradedrange(["x" => 2, "y" => 3]) - b = tensor_product(a, a) - @test b isa GradedOneTo - @test length(b) == 25 - @test blocklength(b) == 4 - @test blocklengths(b) == [4, 6, 6, 9] - @test labelled_isequal(b, gradedrange(["xx" => 4, "yx" => 6, "xy" => 6, "yy" => 9])) - - c = tensor_product(a, a, a) - @test c isa GradedOneTo - @test length(c) == 125 - @test blocklength(c) == 8 -end - -@testset "GradedAxes.fusion_product" begin - g0 = OneToOne() - @test labelled_isequal(fusion_product(g0, g0), g0) - - a = gradedrange([U1(1) => 1, U1(2) => 3, U1(1) => 1]) - - b = fusion_product(a) - @test labelled_isequal(b, gradedrange([U1(1) => 2, U1(2) => 3])) - - c = fusion_product(a, a) - @test labelled_isequal(c, gradedrange([U1(2) => 4, U1(3) => 12, U1(4) => 9])) - - d = fusion_product(a, a, a) - @test labelled_isequal( - d, gradedrange([U1(3) => 8, U1(4) => 36, U1(5) => 54, U1(6) => 27]) - ) -end - -@testset "dual and tensor_product" begin - a = gradedrange([U1(1) => 1, U1(2) => 3, U1(1) => 1]) - ad = dual(a) - - b = fusion_product(ad) - @test b isa GradedOneTo - @test !isdual(b) - @test space_isequal(b, gradedrange([U1(-2) => 3, U1(-1) => 2])) - - c = fusion_product(ad, ad) - @test c isa GradedOneTo - @test !isdual(c) - @test space_isequal(c, gradedrange([U1(-4) => 9, U1(-3) => 12, U1(-2) => 4])) - - d = fusion_product(ad, a) - @test !isdual(d) - @test space_isequal(d, gradedrange([U1(-1) => 6, U1(0) => 13, U1(1) => 6])) - - e = fusion_product(a, ad) - @test !isdual(d) - @test space_isequal(e, d) -end -end diff --git a/NDTensors/src/lib/LabelledNumbers/.JuliaFormatter.toml b/NDTensors/src/lib/LabelledNumbers/.JuliaFormatter.toml deleted file mode 100644 index 08f664cdb9..0000000000 --- a/NDTensors/src/lib/LabelledNumbers/.JuliaFormatter.toml +++ /dev/null @@ -1,2 +0,0 @@ -style = "blue" -indent = 2 diff --git a/NDTensors/src/lib/LabelledNumbers/src/LabelledNumbers.jl b/NDTensors/src/lib/LabelledNumbers/src/LabelledNumbers.jl deleted file mode 100644 index 7f54a02f4f..0000000000 --- a/NDTensors/src/lib/LabelledNumbers/src/LabelledNumbers.jl +++ /dev/null @@ -1,8 +0,0 @@ -module LabelledNumbers -include("labelled_interface.jl") -include("labellednumber.jl") -include("labelledinteger.jl") -include("labelledarray.jl") -include("labelledunitrange.jl") -include("LabelledNumbersBlockArraysExt.jl") -end diff --git a/NDTensors/src/lib/LabelledNumbers/src/LabelledNumbersBlockArraysExt.jl b/NDTensors/src/lib/LabelledNumbers/src/LabelledNumbersBlockArraysExt.jl deleted file mode 100644 index 38c8b4e555..0000000000 --- a/NDTensors/src/lib/LabelledNumbers/src/LabelledNumbersBlockArraysExt.jl +++ /dev/null @@ -1,22 +0,0 @@ -using BlockArrays: BlockArrays, Block, blockaxes, blockfirsts, blocklasts - -# Fixes ambiguity error with: -# ```julia -# getindex(::LabelledUnitRange, ::Any...) -# getindex(::AbstractArray{<:Any,N}, ::Block{N}) where {N} -# getindex(::AbstractArray, ::Block{1}, ::Any...) -# ``` -function Base.getindex(a::LabelledUnitRange, index::Block{1}) - @boundscheck index == Block(1) || throw(BlockBoundsError(a, index)) - return a -end - -function BlockArrays.blockaxes(a::LabelledUnitRange) - return blockaxes(unlabel(a)) -end -function BlockArrays.blockfirsts(a::LabelledUnitRange) - return blockfirsts(unlabel(a)) -end -function BlockArrays.blocklasts(a::LabelledUnitRange) - return blocklasts(unlabel(a)) -end diff --git a/NDTensors/src/lib/LabelledNumbers/src/labelled_interface.jl b/NDTensors/src/lib/LabelledNumbers/src/labelled_interface.jl deleted file mode 100644 index fda9ab0ebb..0000000000 --- a/NDTensors/src/lib/LabelledNumbers/src/labelled_interface.jl +++ /dev/null @@ -1,62 +0,0 @@ -# Labelled object interface. -abstract type LabelledStyle end -struct IsLabelled <: LabelledStyle end -struct NotLabelled <: LabelledStyle end -LabelledStyle(::Type) = NotLabelled() -LabelledStyle(object) = LabelledStyle(typeof(object)) -islabelled(::IsLabelled) = true -islabelled(::NotLabelled) = false -islabelled(object) = islabelled(LabelledStyle(object)) -label(object) = error("This object does not have a label.") -# TODO: Use `TypeParameterAccessors`. -label_type(::Type) = error("No label type defined.") -label_type(object) = typeof(label(object)) -labelled(object, label) = error("Can't add a label to this object.") -# TODO: Turn into a trait function. -function set_label(object, label) - if islabelled(object) - object = unlabel(object) - end - return labelled(object, label) -end -unlabel(object) = object -unlabel_type(type::Type) = type -unlabel_type(object) = typeof(unlabel(object)) - -set_value(x, value) = labelled(value, label(x)) - -labelled_zero(x) = set_value(x, zero(unlabel(x))) -labelled_one(x) = one(unlabel(x)) -labelled_one(type::Type) = one(unlabel_type(type)) -labelled_oneunit(x) = set_value(x, one(x)) -# TODO: Implement this for types where the label is -# encoded in the type. -labelled_oneunit(type::Type) = error("Not implemented.") - -function labelled_binary_op(f, x, y) - return labelled_binary_op(f, LabelledStyle(x), x, LabelledStyle(y), y) -end -labelled_binary_op(f, ::LabelledStyle, x, ::LabelledStyle, y) = f(unlabel(x), unlabel(y)) - -# TODO: This is only needed for older Julia versions, like Julia 1.6. -# Delete once we drop support for older Julia versions. -labelled_minus(x) = set_value(x, -unlabel(x)) - -# TODO: This is only needed for older Julia versions, like Julia 1.6. -# Delete once we drop support for older Julia versions. -labelled_hash(x, h::UInt64) = hash(unlabel(x), h) - -for (fname, f) in [ - (:mul, :*), - (:add, :+), - (:minus, :-), - (:division, :/), - (:div, :÷), - (:isequal, :isequal), - (:isless, :isless), -] - labelled_fname = Symbol(:(labelled_), fname) - @eval begin - $labelled_fname(x, y) = labelled_binary_op($f, x, y) - end -end diff --git a/NDTensors/src/lib/LabelledNumbers/src/labelledarray.jl b/NDTensors/src/lib/LabelledNumbers/src/labelledarray.jl deleted file mode 100644 index f9dbedb8b7..0000000000 --- a/NDTensors/src/lib/LabelledNumbers/src/labelledarray.jl +++ /dev/null @@ -1,20 +0,0 @@ -struct LabelledArray{T,N,Value<:AbstractArray{T,N},Label} <: - AbstractArray{LabelledInteger{T,Label},N} - value::Value - label::Label -end -LabelledStyle(::Type{<:LabelledArray}) = IsLabelled() -label(lobject::LabelledArray) = lobject.label -# TODO: Use `TypeParameterAccessors`. -label_type(::Type{<:LabelledArray{<:Any,Label}}) where {Label} = Label -labelled(object::AbstractArray, label) = LabelledArray(object, label) -unlabel(lobject::LabelledArray) = lobject.value -unlabel_type(::Type{<:LabelledArray{Value}}) where {Value} = Value - -for f in [:axes] - @eval Base.$f(a::LabelledArray, args...) = $f(unlabel(a), args...) -end - -for f in [:first, :getindex, :last, :length] - @eval Base.$f(a::LabelledArray, args...) = labelled($f(unlabel(a), args...), label(a)) -end diff --git a/NDTensors/src/lib/LabelledNumbers/src/labelledinteger.jl b/NDTensors/src/lib/LabelledNumbers/src/labelledinteger.jl deleted file mode 100644 index 56ad8e30fc..0000000000 --- a/NDTensors/src/lib/LabelledNumbers/src/labelledinteger.jl +++ /dev/null @@ -1,123 +0,0 @@ -struct LabelledInteger{Value<:Integer,Label} <: Integer - value::Value - label::Label -end -LabelledStyle(::Type{<:LabelledInteger}) = IsLabelled() -# TODO: Define `set_value` and `set_label`? -label(lobject::LabelledInteger) = lobject.label -# TODO: Use `TypeParameterAccessors`. -label_type(::Type{<:LabelledInteger{<:Any,Label}}) where {Label} = Label -labelled(object::Integer, label) = LabelledInteger(object, label) -unlabel(lobject::LabelledInteger) = lobject.value -unlabel_type(::Type{<:LabelledInteger{Value}}) where {Value} = Value - -# When using as shapes of arrays. -# TODO: Preserve the label? For example: -# labelled(Base.to_shape(unlabel(x)), label(x)) -Base.to_shape(x::LabelledInteger) = Base.to_shape(unlabel(x)) - -# TODO: Define `labelled_convert`. -Base.convert(type::Type{<:Number}, x::LabelledInteger) = type(unlabel(x)) -# TODO: This is only needed for older Julia versions, like Julia 1.6. -# Delete once we drop support for older Julia versions. -function Base.convert(type::Type{<:LabelledInteger}, x::LabelledInteger) - return type(unlabel(x), label(x)) -end - -# Used by `Base.hash(::Integer)`. -# TODO: Define `labelled_trailing_zeros` to be used by other -# labelled number types. -Base.trailing_zeros(x::LabelledInteger) = trailing_zeros(unlabel(x)) - -# Used by `Base.hash(::Integer)`. -# TODO: Define `labelled_right_bit_shift` to be used by other -# labelled number types. -Base.:>>(x::LabelledInteger, y::Int) = >>(unlabel(x), y) - -Base.:(==)(x::LabelledInteger, y::LabelledInteger) = labelled_isequal(x, y) -Base.:(==)(x::LabelledInteger, y::Number) = labelled_isequal(x, y) -Base.:(==)(x::Number, y::LabelledInteger) = labelled_isequal(x, y) -Base.:<(x::LabelledInteger, y::LabelledInteger) = labelled_isless(x, y) -# This is only needed on older versions of Julia, like Julia 1.6. -# TODO: Delete once we drop support for Julia 1.6. -function Base.:<=(x::LabelledInteger, y::LabelledInteger) - return labelled_isless(x, y) || labelled_isequal(x, y) -end -# TODO: Define `labelled_colon`. -(::Base.Colon)(start::LabelledInteger, stop::LabelledInteger) = unlabel(start):unlabel(stop) -Base.zero(lobject::LabelledInteger) = labelled_zero(lobject) -Base.one(lobject::LabelledInteger) = labelled_one(lobject) -Base.one(type::Type{<:LabelledInteger}) = labelled_one(type) -Base.oneunit(lobject::LabelledInteger) = labelled_oneunit(lobject) -Base.oneunit(type::Type{<:LabelledInteger}) = oneunit(unlabel_type(type)) -Base.zero(type::Type{<:LabelledInteger}) = zero(unlabel_type(type)) - -Base.Int(x::LabelledInteger) = Int(unlabel(x)) - -Base.:+(x::LabelledInteger, y::LabelledInteger) = labelled_add(x, y) -Base.:+(x::LabelledInteger, y::Number) = labelled_add(x, y) -Base.:+(x::Number, y::LabelledInteger) = labelled_add(x, y) -# Fix ambiguity error with `+(::Integer, ::Integer)`. -Base.:+(x::LabelledInteger, y::Integer) = labelled_add(x, y) -Base.:+(x::Integer, y::LabelledInteger) = labelled_add(x, y) - -Base.:-(x::LabelledInteger, y::LabelledInteger) = labelled_minus(x, y) -Base.:-(x::LabelledInteger, y::Number) = labelled_minus(x, y) -Base.:-(x::Number, y::LabelledInteger) = labelled_minus(x, y) -# Fix ambiguity error with `-(::Integer, ::Integer)`. -Base.:-(x::LabelledInteger, y::Integer) = labelled_minus(x, y) -Base.:-(x::Integer, y::LabelledInteger) = labelled_minus(x, y) - -function Base.sub_with_overflow(x::LabelledInteger, y::LabelledInteger) - return labelled_binary_op(Base.sub_with_overflow, x, y) -end - -Base.:*(x::LabelledInteger, y::LabelledInteger) = labelled_mul(x, y) -Base.:*(x::LabelledInteger, y::Number) = labelled_mul(x, y) -Base.:*(x::Number, y::LabelledInteger) = labelled_mul(x, y) -# Fix ambiguity issue with `Base` `Integer`. -Base.:*(x::LabelledInteger, y::Integer) = labelled_mul(x, y) -# Fix ambiguity issue with `Base` `Integer`. -Base.:*(x::Integer, y::LabelledInteger) = labelled_mul(x, y) - -Base.:/(x::LabelledInteger, y::Number) = labelled_division(x, y) -Base.div(x::LabelledInteger, y::Number) = labelled_div(x, y) - -# TODO: This is only needed for older Julia versions, like Julia 1.6. -# Delete once we drop support for older Julia versions. -# TODO: Define in terms of a generic `labelled_minus` function. -# TODO: Define in terms of `set_value`? -Base.:-(x::LabelledInteger) = labelled_minus(x) - -# TODO: This is only needed for older Julia versions, like Julia 1.6. -# Delete once we drop support for older Julia versions. -Base.hash(x::LabelledInteger, h::UInt64) = labelled_hash(x, h) - -using Random: AbstractRNG, default_rng -default_eltype() = Float64 -for f in [:rand, :randn] - @eval begin - function Base.$f( - rng::AbstractRNG, - elt::Type{<:Number}, - dims::Tuple{LabelledInteger,Vararg{LabelledInteger}}, - ) - return a = $f(rng, elt, unlabel.(dims)) - end - function Base.$f( - rng::AbstractRNG, - elt::Type{<:Number}, - dim1::LabelledInteger, - dims::Vararg{LabelledInteger}, - ) - return $f(rng, elt, (dim1, dims...)) - end - Base.$f(elt::Type{<:Number}, dims::Tuple{LabelledInteger,Vararg{LabelledInteger}}) = - $f(default_rng(), elt, dims) - Base.$f(elt::Type{<:Number}, dim1::LabelledInteger, dims::Vararg{LabelledInteger}) = - $f(elt, (dim1, dims...)) - Base.$f(dims::Tuple{LabelledInteger,Vararg{LabelledInteger}}) = - $f(default_eltype(), dims) - Base.$f(dim1::LabelledInteger, dims::Vararg{LabelledInteger}) = $f((dim1, dims...)) - end -end diff --git a/NDTensors/src/lib/LabelledNumbers/src/labellednumber.jl b/NDTensors/src/lib/LabelledNumbers/src/labellednumber.jl deleted file mode 100644 index 09a30a456b..0000000000 --- a/NDTensors/src/lib/LabelledNumbers/src/labellednumber.jl +++ /dev/null @@ -1,41 +0,0 @@ -struct LabelledNumber{Value<:Number,Label} <: Number - value::Value - label::Label -end -LabelledStyle(::Type{<:LabelledNumber}) = IsLabelled() -label(lobject::LabelledNumber) = lobject.label -# TODO: Use `TypeParameterAccessors`. -label_type(::Type{<:LabelledNumber{<:Any,Label}}) where {Label} = Label -labelled(object::Number, label) = LabelledNumber(object, label) -unlabel(lobject::LabelledNumber) = lobject.value -unlabel_type(::Type{<:LabelledNumber{Value}}) where {Value} = Value - -# TODO: Define `labelled_convert`. -Base.convert(type::Type{<:Number}, x::LabelledNumber) = type(unlabel(x)) - -Base.:(==)(x::LabelledNumber, y::LabelledNumber) = labelled_isequal(x, y) -Base.:<(x::LabelledNumber, y::LabelledNumber) = labelled_isless(x < y) -# TODO: Define `labelled_colon`. -(::Base.Colon)(start::LabelledNumber, stop::LabelledNumber) = unlabel(start):unlabel(stop) -Base.zero(lobject::LabelledNumber) = labelled_zero(lobject) -Base.one(lobject::LabelledNumber) = labelled_one(lobject) -Base.one(type::Type{<:LabelledNumber}) = labelled_one(type) -Base.oneunit(lobject::LabelledNumber) = labelled_oneunit(lobject) -Base.oneunit(type::Type{<:LabelledNumber}) = error("Not implemented.") - -Base.:*(x::LabelledNumber, y::LabelledNumber) = labelled_mul(x, y) -Base.:*(x::LabelledNumber, y::Number) = labelled_mul(x, y) -Base.:*(x::Number, y::LabelledNumber) = labelled_mul(x, y) - -Base.:/(x::LabelledNumber, y::Number) = labelled_division(x, y) -Base.div(x::LabelledNumber, y::Number) = labelled_div(x, y) - -# TODO: This is only needed for older Julia versions, like Julia 1.6. -# Delete once we drop support for older Julia versions. -# TODO: Define in terms of a generic `labelled_minus` function. -# TODO: Define in terms of `set_value`? -Base.:-(x::LabelledNumber) = labelled_minus(x) - -# TODO: This is only needed for older Julia versions, like Julia 1.6. -# Delete once we drop support for older Julia versions. -Base.hash(x::LabelledNumber, h::UInt64) = labelled_hash(x, h) diff --git a/NDTensors/src/lib/LabelledNumbers/src/labelledunitrange.jl b/NDTensors/src/lib/LabelledNumbers/src/labelledunitrange.jl deleted file mode 100644 index 4f432c9226..0000000000 --- a/NDTensors/src/lib/LabelledNumbers/src/labelledunitrange.jl +++ /dev/null @@ -1,63 +0,0 @@ -struct LabelledUnitRange{T,Value<:AbstractUnitRange{T},Label} <: - AbstractUnitRange{LabelledInteger{T,Label}} - value::Value - label::Label -end -LabelledStyle(::Type{<:LabelledUnitRange}) = IsLabelled() -label(lobject::LabelledUnitRange) = lobject.label -# TODO: Use `TypeParameterAccessors`. -label_type(::Type{<:LabelledUnitRange{<:Any,Label}}) where {Label} = Label -labelled(object::AbstractUnitRange, label) = LabelledUnitRange(object, label) -unlabel(lobject::LabelledUnitRange) = lobject.value -unlabel_type(::Type{<:LabelledUnitRange{Value}}) where {Value} = Value - -# Used by `CartesianIndices` constructor. -# TODO: Maybe reconsider this definition? Also, this should preserve -# the label if possible, currently it drops the label. -function Base.AbstractUnitRange{T}(a::LabelledUnitRange) where {T} - return AbstractUnitRange{T}(unlabel(a)) -end - -# TODO: Is this a good definition? -Base.unitrange(a::LabelledUnitRange) = a - -for f in [:first, :getindex, :last, :length, :step] - @eval Base.$f(a::LabelledUnitRange, args...) = labelled($f(unlabel(a), args...), label(a)) -end - -labelled_getindex(a, index) = labelled(unlabel(a)[index], label(a)) - -# This is required in Julia 1.11 and above since -# the generic `axes(a::AbstractRange)` definition was removed -# and replace with a generic `axes(a)` definition that -# is written in terms of `Base.unchecked_oneto`, i.e.: -# ```julia -# map(Base.unchecked_oneto, size(A)) -# ``` -# which returns a `Base.OneTo` instead of a `LabelledUnitRange`. -Base.axes(a::LabelledUnitRange) = Base.oneto.(size(a)) - -# TODO: Delete this definition, this should output a `Base.OneTo`. -Base.OneTo(stop::LabelledInteger) = labelled(Base.OneTo(unlabel(stop)), label(stop)) - -# Fix ambiguity error with `AbstractRange` definition in `Base`. -Base.getindex(a::LabelledUnitRange, index::Integer) = labelled_getindex(a, index) -# Fix ambiguity error with `AbstractRange` definition in `Base`. -function Base.getindex(a::LabelledUnitRange, indices::AbstractUnitRange{<:Integer}) - return labelled_getindex(a, indices) -end - -function Base.iterate(a::LabelledUnitRange, i) - i == last(a) && return nothing - next = convert(eltype(a), labelled(i + step(a), label(a))) - return (next, next) -end - -function Base.show(io::IO, ::MIME"text/plain", a::LabelledUnitRange) - println(io, typeof(a)) - return print(io, label(a), " => ", unlabel(a)) -end - -function Base.show(io::IO, a::LabelledUnitRange) - return print(io, nameof(typeof(a)), " ", label(a), " => ", unlabel(a)) -end diff --git a/NDTensors/src/lib/LabelledNumbers/test/Project.toml b/NDTensors/src/lib/LabelledNumbers/test/Project.toml deleted file mode 100644 index d1bf575ce0..0000000000 --- a/NDTensors/src/lib/LabelledNumbers/test/Project.toml +++ /dev/null @@ -1,4 +0,0 @@ -[deps] -BlockArrays = "8e7c35d0-a365-5155-bbbb-fb81a777f24e" -NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" -Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" diff --git a/NDTensors/src/lib/LabelledNumbers/test/runtests.jl b/NDTensors/src/lib/LabelledNumbers/test/runtests.jl deleted file mode 100644 index f7b2084805..0000000000 --- a/NDTensors/src/lib/LabelledNumbers/test/runtests.jl +++ /dev/null @@ -1,132 +0,0 @@ -@eval module $(gensym()) -using LinearAlgebra: norm -using NDTensors.LabelledNumbers: - LabelledInteger, LabelledUnitRange, islabelled, label, labelled, unlabel -using Test: @test, @testset -@testset "LabelledNumbers" begin - @testset "Labelled number ($n)" for n in (2, 2.0) - x = labelled(2, "x") - @test typeof(x) == LabelledInteger{Int,String} - @test islabelled(x) - @test x == 2 - @test label(x) == "x" - @test unlabel(x) == 2 - @test !islabelled(unlabel(x)) - - @test labelled(1, "x") < labelled(2, "x") - @test !(labelled(2, "x") < labelled(2, "x")) - @test !(labelled(3, "x") < labelled(2, "x")) - - @test !(labelled(1, "x") > labelled(2, "x")) - @test !(labelled(2, "x") > labelled(2, "x")) - @test labelled(3, "x") > labelled(2, "x") - - @test labelled(1, "x") <= labelled(2, "x") - @test labelled(2, "x") <= labelled(2, "x") - @test !(labelled(3, "x") <= labelled(2, "x")) - - @test !(labelled(1, "x") >= labelled(2, "x")) - @test labelled(2, "x") >= labelled(2, "x") - @test labelled(3, "x") >= labelled(2, "x") - - @test x * 2 == 4 - @test !islabelled(x * 2) - @test 2 * x == 4 - @test !islabelled(2 * x) - @test x * x == 4 - @test !islabelled(x * x) - - @test x + 3 == 5 - @test !islabelled(x + 3) - @test 3 + x == 5 - @test !islabelled(3 + x) - @test x + x == 4 - @test !islabelled(x + x) - - @test x - 3 == -1 - @test !islabelled(x - 3) - @test 3 - x == 1 - @test !islabelled(3 - x) - @test x - x == 0 - @test !islabelled(x - x) - - @test x / 2 == 1 - @test x / 2 isa AbstractFloat - @test x / 2 isa Float64 - @test !islabelled(x / 2) - @test x ÷ 2 == 1 - @test x ÷ 2 isa Integer - @test x ÷ 2 isa Int - @test !islabelled(x ÷ 2) - @test -x == -2 - @test hash(x) == hash(2) - @test zero(x) == false - @test label(zero(x)) == "x" - @test one(x) == true - @test !islabelled(one(x)) - @test oneunit(x) == true - @test label(oneunit(x)) == "x" - @test islabelled(oneunit(x)) - @test one(typeof(x)) == true - @test !islabelled(one(typeof(x))) - end - @testset "randn" begin - d = labelled(2, "x") - - a = randn(Float32, d, d) - @test eltype(a) === Float32 - @test size(a) == (2, 2) - @test norm(a) > 0 - - a = rand(Float32, d, d) - @test eltype(a) === Float32 - @test size(a) == (2, 2) - @test norm(a) > 0 - - a = randn(d, d) - @test eltype(a) === Float64 - @test size(a) == (2, 2) - @test norm(a) > 0 - - a = rand(d, d) - @test eltype(a) === Float64 - @test size(a) == (2, 2) - @test norm(a) > 0 - end - @testset "Labelled array ($a)" for a in (collect(2:5), 2:5) - x = labelled(a, "x") - @test eltype(x) == LabelledInteger{Int,String} - @test x == 2:5 - @test label(x) == "x" - @test unlabel(x) == 2:5 - @test first(iterate(x, 3)) == 4 - @test label(first(iterate(x, 3))) == "x" - @test collect(x) == 2:5 - @test label.(collect(x)) == fill("x", 4) - @test x[2] == 3 - @test label(x[2]) == "x" - @test x[2:3] == 3:4 - @test label(x[2:3]) == "x" - @test x[[2, 4]] == [3, 5] - @test label(x[[2, 4]]) == "x" - - if x isa AbstractUnitRange - @test step(x) == true - @test islabelled(step(x)) - @test label(step(x)) == "x" - end - end -end - -using BlockArrays: Block, blockaxes, blocklength, blocklengths -@testset "LabelledNumbersBlockArraysExt" begin - x = labelled(1:2, "x") - @test blockaxes(x) == (Block.(1:1),) - @test blocklength(x) == 1 - @test blocklengths(x) == [2] - a = x[Block(1)] - @test a == 1:2 - @test a isa LabelledUnitRange - @test label(a) == "x" -end -end diff --git a/NDTensors/src/lib/MetalExtensions/.JuliaFormatter.toml b/NDTensors/src/lib/MetalExtensions/.JuliaFormatter.toml deleted file mode 100644 index 08f664cdb9..0000000000 --- a/NDTensors/src/lib/MetalExtensions/.JuliaFormatter.toml +++ /dev/null @@ -1,2 +0,0 @@ -style = "blue" -indent = 2 diff --git a/NDTensors/src/lib/MetalExtensions/src/MetalExtensions.jl b/NDTensors/src/lib/MetalExtensions/src/MetalExtensions.jl deleted file mode 100644 index 504f13f7aa..0000000000 --- a/NDTensors/src/lib/MetalExtensions/src/MetalExtensions.jl +++ /dev/null @@ -1,4 +0,0 @@ -module MetalExtensions -include("metal.jl") - -end diff --git a/NDTensors/src/lib/MetalExtensions/src/metal.jl b/NDTensors/src/lib/MetalExtensions/src/metal.jl deleted file mode 100644 index ab15cd3499..0000000000 --- a/NDTensors/src/lib/MetalExtensions/src/metal.jl +++ /dev/null @@ -1,15 +0,0 @@ -using NDTensors.TypeParameterAccessors: TypeParameterAccessors, Position -using NDTensors.GPUArraysCoreExtensions: storagemode -# Implemented in NDTensorsMetalExt -function mtl end - -## Here we need an MtlArrayAdaptor because the MtlArrayAdaptor provided by Metal -## converts 64 bit numbers to 32 bit. We cannot write `adapt(MtlArray, x)` because this -## Will not allow us to properly utilize the buffer preference without changing the value of -## default_buffertype. Also `adapt(MtlArray{<:Any, <:Any, Buffertype})` fails to work properly - -struct MtlArrayAdaptor{B} end - -function TypeParameterAccessors.position(::Type{<:MtlArrayAdaptor}, ::typeof(storagemode)) - return Position(1) -end diff --git a/NDTensors/src/lib/MetalExtensions/test/runtests.jl b/NDTensors/src/lib/MetalExtensions/test/runtests.jl deleted file mode 100644 index ca3b5cc32c..0000000000 --- a/NDTensors/src/lib/MetalExtensions/test/runtests.jl +++ /dev/null @@ -1,7 +0,0 @@ -@eval module $(gensym()) -using Test: @testset, @test -using NDTensors.MetalExtensions: mtl -@testset "mtl function exists" begin - @test mtl isa Function -end -end diff --git a/NDTensors/src/lib/NamedDimsArrays/.JuliaFormatter.toml b/NDTensors/src/lib/NamedDimsArrays/.JuliaFormatter.toml deleted file mode 100644 index 08f664cdb9..0000000000 --- a/NDTensors/src/lib/NamedDimsArrays/.JuliaFormatter.toml +++ /dev/null @@ -1,2 +0,0 @@ -style = "blue" -indent = 2 diff --git a/NDTensors/src/lib/NamedDimsArrays/README.md b/NDTensors/src/lib/NamedDimsArrays/README.md deleted file mode 100644 index ab0c19d38f..0000000000 --- a/NDTensors/src/lib/NamedDimsArrays/README.md +++ /dev/null @@ -1,36 +0,0 @@ -# NamedDimsArrays.jl - -````julia -using NDTensors.NamedDimsArrays: align, dimnames, named, unname -using NDTensors.TensorAlgebra: TensorAlgebra - -# Named dimensions -i = named(2, "i") -j = named(2, "j") -k = named(2, "k") - -# Arrays with named dimensions -na1 = randn(i, j) -na2 = randn(j, k) - -@show dimnames(na1) == ("i", "j") - -# Indexing -@show na1[j => 2, i => 1] == na1[1, 2] - -# Tensor contraction -na_dest = TensorAlgebra.contract(na1, na2) - -@show issetequal(dimnames(na_dest), ("i", "k")) -# `unname` removes the names and returns an `Array` -@show unname(na_dest, (i, k)) ≈ unname(na1) * unname(na2) - -# Permute dimensions (like `ITensors.permute`) -na1 = align(na1, (j, i)) -@show na1[i => 1, j => 2] == na1[2, 1] -```` - ---- - -*This page was generated using [Literate.jl](https://github.com/fredrikekre/Literate.jl).* - diff --git a/NDTensors/src/lib/NamedDimsArrays/examples/example_readme.jl b/NDTensors/src/lib/NamedDimsArrays/examples/example_readme.jl deleted file mode 100644 index 389e2b3984..0000000000 --- a/NDTensors/src/lib/NamedDimsArrays/examples/example_readme.jl +++ /dev/null @@ -1,29 +0,0 @@ -# # NamedDimsArrays.jl - -using NDTensors.NamedDimsArrays: align, dimnames, named, unname -using NDTensors.TensorAlgebra: TensorAlgebra - -## Named dimensions -i = named(2, "i") -j = named(2, "j") -k = named(2, "k") - -## Arrays with named dimensions -na1 = randn(i, j) -na2 = randn(j, k) - -@show dimnames(na1) == ("i", "j") - -## Indexing -@show na1[j => 2, i => 1] == na1[1, 2] - -## Tensor contraction -na_dest = TensorAlgebra.contract(na1, na2) - -@show issetequal(dimnames(na_dest), ("i", "k")) -## `unname` removes the names and returns an `Array` -@show unname(na_dest, (i, k)) ≈ unname(na1) * unname(na2) - -## Permute dimensions (like `ITensors.permute`) -na1 = align(na1, (j, i)) -@show na1[i => 1, j => 2] == na1[2, 1] diff --git a/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysAdaptExt/src/NamedDimsArraysAdaptExt.jl b/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysAdaptExt/src/NamedDimsArraysAdaptExt.jl deleted file mode 100644 index 027dd65ca6..0000000000 --- a/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysAdaptExt/src/NamedDimsArraysAdaptExt.jl +++ /dev/null @@ -1,3 +0,0 @@ -module NamedDimsArraysAdaptExt -include("adapt_structure.jl") -end diff --git a/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysAdaptExt/src/adapt_structure.jl b/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysAdaptExt/src/adapt_structure.jl deleted file mode 100644 index 97c075d628..0000000000 --- a/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysAdaptExt/src/adapt_structure.jl +++ /dev/null @@ -1,6 +0,0 @@ -using Adapt: Adapt, adapt -using NDTensors.NamedDimsArrays: AbstractNamedDimsArray, dimnames, named, unname - -function Adapt.adapt_structure(to, na::AbstractNamedDimsArray) - return named(adapt(to, unname(na)), dimnames(na)) -end diff --git a/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysAdaptExt/test/Project.toml b/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysAdaptExt/test/Project.toml deleted file mode 100644 index a970d0f894..0000000000 --- a/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysAdaptExt/test/Project.toml +++ /dev/null @@ -1,4 +0,0 @@ -[deps] -Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" -NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" -Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" diff --git a/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysAdaptExt/test/runtests.jl b/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysAdaptExt/test/runtests.jl deleted file mode 100644 index 733da91df2..0000000000 --- a/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysAdaptExt/test/runtests.jl +++ /dev/null @@ -1,13 +0,0 @@ -@eval module $(gensym()) -using Test: @test, @testset -using Adapt: adapt -using NDTensors.NamedDimsArrays: named -@testset "NamedDimsArraysAdaptExt (eltype=$elt)" for elt in ( - Float32, Float64, Complex{Float32}, Complex{Float64} -) - na = named(randn(2, 2), ("i", "j")) - na_complex = adapt(Array{complex(elt)}, na) - @test na ≈ na_complex - @test eltype(na_complex) === complex(elt) -end -end diff --git a/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysSparseArraysBaseExt/src/NamedDimsArraysSparseArraysBaseExt.jl b/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysSparseArraysBaseExt/src/NamedDimsArraysSparseArraysBaseExt.jl deleted file mode 100644 index aef726d2dc..0000000000 --- a/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysSparseArraysBaseExt/src/NamedDimsArraysSparseArraysBaseExt.jl +++ /dev/null @@ -1,3 +0,0 @@ -module NamedDimsArraysSparseArraysBaseExt -include("densearray.jl") -end diff --git a/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysSparseArraysBaseExt/src/densearray.jl b/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysSparseArraysBaseExt/src/densearray.jl deleted file mode 100644 index db9da0c76c..0000000000 --- a/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysSparseArraysBaseExt/src/densearray.jl +++ /dev/null @@ -1,8 +0,0 @@ -using ..NamedDimsArrays: AbstractNamedDimsArray, dimnames, named, unname -using ...SparseArraysBase: SparseArraysBase, densearray - -# TODO: Use `Adapt` or some kind of rewrap function like in -# ArrayInterface.jl (https://github.com/JuliaArrays/ArrayInterface.jl/issues/136) -function SparseArraysBase.densearray(na::AbstractNamedDimsArray) - return named(densearray(unname(na)), dimnames(na)) -end diff --git a/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysSparseArraysBaseExt/test/Project.toml b/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysSparseArraysBaseExt/test/Project.toml deleted file mode 100644 index c13dac32a6..0000000000 --- a/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysSparseArraysBaseExt/test/Project.toml +++ /dev/null @@ -1,4 +0,0 @@ -[deps] -LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" -NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" -Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" diff --git a/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysSparseArraysBaseExt/test/runtests.jl b/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysSparseArraysBaseExt/test/runtests.jl deleted file mode 100644 index bc87d642fb..0000000000 --- a/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysSparseArraysBaseExt/test/runtests.jl +++ /dev/null @@ -1,12 +0,0 @@ -@eval module $(gensym()) -using LinearAlgebra: Diagonal -using Test: @test, @testset -using NDTensors.SparseArraysBase: densearray -using NDTensors.NamedDimsArrays: named, unname -@testset "NamedDimsArraysSparseArraysBaseExt (eltype=$elt)" for elt in (Float32, Float64) - na = named(Diagonal(randn(2)), ("i", "j")) - na_dense = densearray(na) - @test na ≈ na_dense - @test unname(na_dense) isa Array -end -end diff --git a/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/src/NamedDimsArraysTensorAlgebraExt.jl b/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/src/NamedDimsArraysTensorAlgebraExt.jl deleted file mode 100644 index a397507f44..0000000000 --- a/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/src/NamedDimsArraysTensorAlgebraExt.jl +++ /dev/null @@ -1,7 +0,0 @@ -module NamedDimsArraysTensorAlgebraExt -include("contract.jl") -include("fusedims.jl") -include("qr.jl") -include("eigen.jl") -include("svd.jl") -end diff --git a/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/src/contract.jl b/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/src/contract.jl deleted file mode 100644 index f45391eea9..0000000000 --- a/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/src/contract.jl +++ /dev/null @@ -1,31 +0,0 @@ -using ..NamedDimsArrays: AbstractNamedDimsArray, dimnames, named, unname -using ...TensorAlgebra: TensorAlgebra, blockedperms, contract, contract! - -function TensorAlgebra.contract!( - na_dest::AbstractNamedDimsArray, - na1::AbstractNamedDimsArray, - na2::AbstractNamedDimsArray, - α::Number=true, - β::Number=false, -) - contract!( - unname(na_dest), - dimnames(na_dest), - unname(na1), - dimnames(na1), - unname(na2), - dimnames(na2), - α, - β, - ) - return na_dest -end - -function TensorAlgebra.contract( - na1::AbstractNamedDimsArray, na2::AbstractNamedDimsArray, α::Number=true -) - a_dest, dimnames_dest = contract( - unname(na1), dimnames(na1), unname(na2), dimnames(na2), α - ) - return named(a_dest, dimnames_dest) -end diff --git a/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/src/eigen.jl b/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/src/eigen.jl deleted file mode 100644 index 31a7e6d5b1..0000000000 --- a/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/src/eigen.jl +++ /dev/null @@ -1,47 +0,0 @@ -## using ..ITensors: IndexID -using LinearAlgebra: LinearAlgebra, Diagonal, Hermitian, eigen -## using ..NDTensors.DiagonalArrays: DiagonalMatrix -using ...NDTensors.NamedDimsArrays: AbstractNamedDimsArray, dimnames, name, unname -using ...NDTensors.RankFactorization: Spectrum, truncate!! -function LinearAlgebra.eigen( - na::Hermitian{T,<:AbstractNamedDimsArray{T}}; - mindim=nothing, - maxdim=nothing, - cutoff=nothing, - use_absolute_cutoff=nothing, - use_relative_cutoff=nothing, -) where {T<:Union{Real,Complex}} - # TODO: Handle array wrappers around - # `AbstractNamedDimsArray` more elegantly. - d, u = eigen(Hermitian(unname(parent(na)))) - - # Sort by largest to smallest eigenvalues - # TODO: Replace `cpu` with `Expose` dispatch. - p = sortperm(d; rev=true, by=abs) - d = d[p] - u = u[:, p] - - length_d = length(d) - truncerr = zero(Float64) # Make more generic - if any(!isnothing, (maxdim, cutoff)) - d, truncerr, _ = truncate!!( - d; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff - ) - length_d = length(d) - if length_d < size(u, 2) - u = u[:, 1:length_d] - end - end - spec = Spectrum(d, truncerr) - - # TODO: Handle array wrappers more generally. - names_a = dimnames(parent(na)) - # TODO: Make this more generic, handle `dag`, etc. - l = randname(names_a[1]) # IndexID(rand(UInt64), "", 0) - r = randname(names_a[2]) # IndexID(rand(UInt64), "", 0) - names_d = (l, r) - nd = named(Diagonal(d), names_d) - names_u = (names_a[2], r) - nu = named(u, names_u) - return nd, nu, spec -end diff --git a/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/src/fusedims.jl b/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/src/fusedims.jl deleted file mode 100644 index 9b0247925e..0000000000 --- a/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/src/fusedims.jl +++ /dev/null @@ -1,52 +0,0 @@ -using ...NDTensors.TensorAlgebra: TensorAlgebra, blockedperm, fusedims, splitdims -using ...NDTensors.TensorAlgebra.BaseExtensions: BaseExtensions - -function TensorAlgebra.blockedperm(na::AbstractNamedDimsArray, nameddim_blocks::Tuple...) - # Extract names if named dimensions or axes were passed - dimname_blocks = map(group -> name.(group), nameddim_blocks) - dimnames_a = dimnames(na) - perms = map(dimname_blocks) do dimname_block - return BaseExtensions.indexin(dimname_block, dimnames_a) - end - return blockedperm(perms...) -end - -# i, j, k, l = named.((2, 2, 2, 2), ("i", "j", "k", "l")) -# a = randn(i, j, k, l) -# fusedims(a, (i, k) => "a") -# fusedims(a, (i, k) => "a", (j, l) => "b") -# TODO: Rewrite in terms of `fusedims(a, .., (1, 3))` interface. -function TensorAlgebra.fusedims(na::AbstractNamedDimsArray, fusions::Pair...) - dimnames_fuse = map(group -> name.(group), first.(fusions)) - dimnames_fused = map(name, last.(fusions)) - if sum(length, dimnames_fuse) < ndims(na) - # Not all names are specified - dimnames_unspecified = setdiff(dimnames(na), dimnames_fuse...) - dimnames_fuse = vcat(tuple.(dimnames_unspecified), collect(dimnames_fuse)) - dimnames_fused = vcat(dimnames_unspecified, collect(dimnames_fused)) - end - perm = blockedperm(na, dimnames_fuse...) - a_fused = fusedims(unname(na), perm) - return named(a_fused, dimnames_fused) -end - -function TensorAlgebra.splitdims(na::AbstractNamedDimsArray, splitters::Pair...) - fused_names = map(name, first.(splitters)) - split_namedlengths = last.(splitters) - splitters_unnamed = map(splitters) do splitter - fused_name, split_namedlengths = splitter - fused_dim = findfirst(isequal(fused_name), dimnames(na)) - split_lengths = unname.(split_namedlengths) - return fused_dim => split_lengths - end - a_split = splitdims(unname(na), splitters_unnamed...) - names_split = Any[tuple.(dimnames(na))...] - for splitter in splitters - fused_name, split_namedlengths = splitter - fused_dim = findfirst(isequal(fused_name), dimnames(na)) - split_names = name.(split_namedlengths) - names_split[fused_dim] = split_names - end - names_split = reduce((x, y) -> (x..., y...), names_split) - return named(a_split, names_split) -end diff --git a/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/src/qr.jl b/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/src/qr.jl deleted file mode 100644 index 329b98574a..0000000000 --- a/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/src/qr.jl +++ /dev/null @@ -1,18 +0,0 @@ -# using ..ITensors: IndexID -using LinearAlgebra: LinearAlgebra, qr -using ...NDTensors.NamedDimsArrays: AbstractNamedDimsArray, dimnames, name, randname, unname - -function LinearAlgebra.qr(na::AbstractNamedDimsArray; positive=nothing) - return qr(na, (dimnames(na, 1),), (dimnames(na, 2),); positive) -end - -function LinearAlgebra.qr( - na::AbstractNamedDimsArray, labels_codomain::Tuple, labels_domain::Tuple; positive=nothing -) - @assert isnothing(positive) || !positive - q, r = qr(unname(na), dimnames(na), name.(labels_codomain), name.(labels_domain)) - name_qr = randname(dimnames(na)[1]) - dimnames_q = (name.(labels_codomain)..., name_qr) - dimnames_r = (name_qr, name.(labels_domain)...) - return named(q, dimnames_q), named(r, dimnames_r) -end diff --git a/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/src/svd.jl b/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/src/svd.jl deleted file mode 100644 index 787f89e3f0..0000000000 --- a/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/src/svd.jl +++ /dev/null @@ -1,53 +0,0 @@ -using LinearAlgebra: LinearAlgebra, svd -using ...NDTensors.RankFactorization: Spectrum, truncate!! -function LinearAlgebra.svd( - na::AbstractNamedDimsArray; - mindim=nothing, - maxdim=nothing, - cutoff=nothing, - use_absolute_cutoff=nothing, - use_relative_cutoff=nothing, - alg=nothing, - min_blockdim=nothing, -) - # TODO: Handle array wrappers around - # `AbstractNamedDimsArray` more elegantly. - USV = svd(unname(na)) - u, s, v = USV.U, USV.S, USV.Vt - - # Sort by largest to smallest eigenvalues - # TODO: Replace `cpu` with `Expose` dispatch. - p = sortperm(s; rev=true, by=abs) - u = u[:, p] - s = s[p] - v = v[p, :] - - s² = s .^ 2 - length_s = length(s) - truncerr = zero(Float64) # Make more generic - if any(!isnothing, (maxdim, cutoff)) - s², truncerr, _ = truncate!!( - s²; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff - ) - length_s = length(s²) - # TODO: Avoid this if they are already the - # correct size. - u = u[:, 1:length_s] - s = s[1:length_s] - v = v[1:length_s, :] - end - spec = Spectrum(s², truncerr) - - # TODO: Handle array wrappers more generally. - names_a = dimnames(na) - # TODO: Make this more generic, handle `dag`, etc. - l = randname(names_a[1]) # IndexID(rand(UInt64), "", 0) - r = randname(names_a[2]) # IndexID(rand(UInt64), "", 0) - names_u = (names_a[1], l) - nu = named(u, names_u) - names_s = (l, r) - ns = named(Diagonal(s), names_s) - names_v = (r, names_a[2]) - nv = named(v, names_v) - return nu, ns, nv, spec -end diff --git a/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/test/Project.toml b/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/test/Project.toml deleted file mode 100644 index ef491a529c..0000000000 --- a/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/test/Project.toml +++ /dev/null @@ -1,3 +0,0 @@ -[deps] -NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" -Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" diff --git a/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/test/runtests.jl b/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/test/runtests.jl deleted file mode 100644 index a73edfd8c6..0000000000 --- a/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/test/runtests.jl +++ /dev/null @@ -1,59 +0,0 @@ -@eval module $(gensym()) -using Test: @test, @testset, @test_broken -using NDTensors.NamedDimsArrays: named, unname -using NDTensors.TensorAlgebra: TensorAlgebra, contract, fusedims, splitdims -using LinearAlgebra: qr -elts = (Float32, Float64, Complex{Float32}, Complex{Float64}) -@testset "NamedDimsArraysTensorAlgebraExt (eltype=$(elt))" for elt in elts - @testset "contract" begin - i = named(2, "i") - j = named(2, "j") - k = named(2, "k") - na1 = randn(elt, i, j) - na2 = randn(elt, j, k) - na_dest = TensorAlgebra.contract(na1, na2) - @test eltype(na_dest) === elt - @test unname(na_dest, (i, k)) ≈ unname(na1) * unname(na2) - end - @testset "fusedims" begin - i, j, k, l = named.((2, 3, 4, 5), ("i", "j", "k", "l")) - na = randn(elt, i, j, k, l) - na_fused = fusedims(na, (k, i) => "a", (j, l) => "b") - # Fuse all dimensions. - @test unname(na_fused, ("a", "b")) ≈ - reshape(unname(na, (k, i, j, l)), (unname(k) * unname(i), unname(j) * unname(l))) - na_fused = fusedims(na, (k, i) => "a") - # Fuse a subset of dimensions. - @test unname(na_fused, ("a", "j", "l")) ≈ - reshape(unname(na, (k, i, j, l)), (unname(k) * unname(i), unname(j), unname(l))) - end - @testset "splitdims" begin - a, b = named.((6, 20), ("a", "b")) - i, j, k, l = named.((2, 3, 4, 5), ("i", "j", "k", "l")) - na = randn(elt, a, b) - # Split all dimensions. - na_split = splitdims(na, "a" => (k, i), "b" => (j, l)) - @test unname(na_split, ("k", "i", "j", "l")) ≈ - reshape(unname(na, ("a", "b")), (unname(k), unname(i), unname(j), unname(l))) - # Split a subset of dimensions. - na_split = splitdims(na, "a" => (j, i)) - @test unname(na_split, ("j", "i", "b")) ≈ - reshape(unname(na, ("a", "b")), (unname(j), unname(i), unname(b))) - end - @testset "qr" begin - dims = (2, 2, 2, 2) - i, j, k, l = named.(dims, ("i", "j", "k", "l")) - - na = randn(elt, i, j) - # TODO: Should this be allowed? - # TODO: Add support for specifying new name. - q, r = qr(na) - @test q * r ≈ na - - na = randn(elt, i, j, k, l) - # TODO: Add support for specifying new name. - q, r = qr(na, (i, k), (j, l)) - @test contract(q, r) ≈ na - end -end -end diff --git a/NDTensors/src/lib/NamedDimsArrays/generate_readme.jl b/NDTensors/src/lib/NamedDimsArrays/generate_readme.jl deleted file mode 100644 index 889a16915c..0000000000 --- a/NDTensors/src/lib/NamedDimsArrays/generate_readme.jl +++ /dev/null @@ -1,10 +0,0 @@ -using Literate -using NDTensors.NamedDimsArrays: NamedDimsArrays -Literate.markdown( - joinpath( - pkgdir(NamedDimsArrays), "src", "NamedDimsArrays", "examples", "example_readme.jl" - ), - joinpath(pkgdir(NamedDimsArrays), "src", "NamedDimsArrays"); - flavor=Literate.CommonMarkFlavor(), - name="README", -) diff --git a/NDTensors/src/lib/NamedDimsArrays/src/NamedDimsArrays.jl b/NDTensors/src/lib/NamedDimsArrays/src/NamedDimsArrays.jl deleted file mode 100644 index 71d1a78c95..0000000000 --- a/NDTensors/src/lib/NamedDimsArrays/src/NamedDimsArrays.jl +++ /dev/null @@ -1,27 +0,0 @@ -module NamedDimsArrays -include("traits.jl") -include("name.jl") -include("randname.jl") -include("abstractnamedint.jl") -include("abstractnamedunitrange.jl") -include("abstractnameddimsarray.jl") -include("abstractnameddimsmatrix.jl") -include("abstractnameddimsvector.jl") -include("namedint.jl") -include("namedunitrange.jl") -include("nameddimsarray.jl") -include("constructors.jl") -include("similar.jl") -include("permutedims.jl") -include("promote_shape.jl") -include("map.jl") -include("broadcast_shape.jl") -include("broadcast.jl") - -# Extensions -include("../ext/NamedDimsArraysAdaptExt/src/NamedDimsArraysAdaptExt.jl") -include( - "../ext/NamedDimsArraysSparseArraysBaseExt/src/NamedDimsArraysSparseArraysBaseExt.jl" -) -include("../ext/NamedDimsArraysTensorAlgebraExt/src/NamedDimsArraysTensorAlgebraExt.jl") -end diff --git a/NDTensors/src/lib/NamedDimsArrays/src/abstractnameddimsarray.jl b/NDTensors/src/lib/NamedDimsArrays/src/abstractnameddimsarray.jl deleted file mode 100644 index a73763bb90..0000000000 --- a/NDTensors/src/lib/NamedDimsArrays/src/abstractnameddimsarray.jl +++ /dev/null @@ -1,176 +0,0 @@ -using ..BaseExtensions: BaseExtensions -using NDTensors.TypeParameterAccessors: TypeParameterAccessors, parenttype - -# Some of the interface is inspired by: -# https://github.com/invenia/NamedDims.jl -# https://github.com/mcabbott/NamedPlus.jl - -abstract type AbstractNamedDimsArray{T,N,Parent,Names} <: AbstractArray{T,N} end - -# Required interface - -# Output the names. -# TODO: Define for `AbstractArray`. -dimnames(a::AbstractNamedDimsArray) = error("Not implemented") - -# Unwrapping the names -Base.parent(::AbstractNamedDimsArray) = error("Not implemented") - -## TODO remove TypeParameterAccessors when SetParameters is removed -function TypeParameterAccessors.position( - ::Type{<:AbstractNamedDimsArray}, ::typeof(parenttype) -) - return TypeParameterAccessors.Position(3) -end - -# Set the names of an unnamed AbstractArray -# `ndims(a) == length(names)` -# This is a constructor -## named(a::AbstractArray, names) = error("Not implemented") - -dimnames(a::AbstractNamedDimsArray, i::Int) = dimnames(a)[i] - -# Traits -# TODO: Define for `AbstractArray`. -# TODO: Define a trait type `IsNamed`. -isnamed(::AbstractNamedDimsArray) = true - -# AbstractArray interface -# TODO: Use `unname` instead of `parent`? - -# Helper function, move to `utils.jl`. -named_tuple(t::Tuple, names) = ntuple(i -> named(t[i], names[i]), length(t)) - -# TODO: Should `axes` output named axes or not? -# TODO: Use the proper type, `namedaxistype(a)`. -# Base.axes(a::AbstractNamedDimsArray) = named_tuple(axes(unname(a)), dimnames(a)) -Base.axes(a::AbstractNamedDimsArray) = axes(unname(a)) -namedaxes(a::AbstractNamedDimsArray) = named.(axes(unname(a)), dimnames(a)) -# TODO: Use the proper type, `namedlengthtype(a)`. -Base.size(a::AbstractNamedDimsArray) = size(unname(a)) -namedsize(a::AbstractNamedDimsArray) = named.(size(unname(a)), dimnames(a)) -Base.getindex(a::AbstractNamedDimsArray, I...) = unname(a)[I...] -function Base.setindex!(a::AbstractNamedDimsArray, x, I...) - unname(a)[I...] = x - return a -end - -# Derived interface - -# Output the names. -# TODO: Define for `AbstractArray`. -dimname(a::AbstractNamedDimsArray, i) = dimnames(a)[i] - -# Renaming -# Unname and set new naems -# TODO: Define for `AbstractArray`. -rename(a::AbstractNamedDimsArray, names) = named(unname(a), names) - -# replacenames(a, :i => :a, :j => :b) -# `rename` in `NamedPlus.jl`. -# TODO: Define for `AbstractArray`. -function replacenames(na::AbstractNamedDimsArray, replacements::Pair...) - # `BaseExtension.replace` needed for `Tuple` support on Julia 1.6 and older. - return named(unname(na), BaseExtensions.replace(dimnames(na), replacements...)) -end - -# Either define new names or replace names -# TODO: Define for `AbstractArray`, use `isnamed` trait -# to add names or replace names. -setnames(a::AbstractArray, names) = named(a, names) -setnames(a::AbstractNamedDimsArray, names) = rename(a, names) - -# TODO: Move to `utils.jl` file. -# TODO: Use `Base.indexin`? -function getperm(x, y) - return map(yᵢ -> findfirst(isequal(yᵢ), x), y) -end - -# TODO: Define for `AbstractArray`, use `isnamed` trait? -function get_name_perm(a::AbstractNamedDimsArray, names::Tuple) - # TODO: Call `getperm(dimnames(a), dimnames(namedints))`. - return getperm(dimnames(a), names) -end - -# Fixes ambiguity error -# TODO: Define for `AbstractArray`, use `isnamed` trait? -function get_name_perm(a::AbstractNamedDimsArray, names::Tuple{}) - # TODO: Call `getperm(dimnames(a), dimnames(namedints))`. - @assert iszero(ndims(a)) - return () -end - -# TODO: Define for `AbstractArray`, use `isnamed` trait? -function get_name_perm( - a::AbstractNamedDimsArray, namedints::Tuple{Vararg{AbstractNamedInt}} -) - # TODO: Call `getperm(dimnames(a), dimnames(namedints))`. - return getperm(namedsize(a), namedints) -end - -# TODO: Define for `AbstractArray`, use `isnamed` trait? -function get_name_perm( - a::AbstractNamedDimsArray, new_namedaxes::Tuple{Vararg{AbstractNamedUnitRange}} -) - # TODO: Call `getperm(dimnames(a), dimnames(namedints))`. - return getperm(namedaxes(a), new_namedaxes) -end - -# Indexing -# a[:i => 2, :j => 3] -# TODO: Write a generic version using `dim`. -# TODO: Define a `NamedIndex` or `NamedInt` type for indexing? -# Base.getindex(a::AbstractArray, I::NamedInt...) -function Base.getindex(a::AbstractNamedDimsArray, I::Pair...) - perm = get_name_perm(a, first.(I)) - i = last.(I) - return unname(a)[map(p -> i[p], perm)...] -end - -# a[:i => 2, :j => 3] = 12 -# TODO: Write a generic version using `dim`. -# TODO: Define a `NamedIndex` or `NamedInt` type for indexing? -function Base.setindex!(a::AbstractNamedDimsArray, value, I::Pair...) - perm = get_name_perm(a, first.(I)) - i = last.(I) - unname(a)[map(p -> i[p], perm)...] = value - return a -end - -# Output the dimension of the specified name. -dim(a::AbstractNamedDimsArray, name) = findfirst(==(name), dimnames(a)) - -# Output the dimensions of the specified names. -dims(a::AbstractNamedDimsArray, names) = map(name -> dim(a, name), names) - -# Unwrapping the names -# TODO: Use `isnamed` trait. -unname(a::AbstractNamedDimsArray) = parent(a) -unname(a::AbstractArray) = a - -# Permute into a certain order. -# align(a, (:j, :k, :i)) -# Like `named(nameless(a, names), names)` -# TODO: Use `isnamed` trait. -function align(na::AbstractNamedDimsArray, names) - perm = get_name_perm(na, names) - # TODO: Avoid permutation if it is a trivial permutation? - # return typeof(a)(permutedims(unname(a), perm), names) - return permutedims(na, perm) -end - -# Unwrapping names and permuting -# nameless(a, (:j, :i)) -# Could just call `unname`? -## nameless(a::AbstractNamedDimsArray, names) = unname(align(a, names)) -# TODO: Use `isnamed` trait. -unname(a::AbstractNamedDimsArray, names) = unname(align(a, names)) - -# In `TensorAlgebra` this this `fuse` and `unfuse`, -# in `NDTensors`/`ITensors` this is `combine` and `uncombine`. -# t = split(g, :n => (j=4, k=5)) -# join(t, (:i, :k) => :χ) - -# TensorAlgebra -# contract, fusedims, unfusedims, qr, eigen, svd, add, etc. -# Some of these can simply wrap `TensorAlgebra.jl` functions. diff --git a/NDTensors/src/lib/NamedDimsArrays/src/abstractnameddimsmatrix.jl b/NDTensors/src/lib/NamedDimsArrays/src/abstractnameddimsmatrix.jl deleted file mode 100644 index 5090e9842d..0000000000 --- a/NDTensors/src/lib/NamedDimsArrays/src/abstractnameddimsmatrix.jl +++ /dev/null @@ -1 +0,0 @@ -const AbstractNamedDimsMatrix{T,Parent,Names} = AbstractNamedDimsArray{T,2,Parent,Names} diff --git a/NDTensors/src/lib/NamedDimsArrays/src/abstractnameddimsvector.jl b/NDTensors/src/lib/NamedDimsArrays/src/abstractnameddimsvector.jl deleted file mode 100644 index d1f7b677cd..0000000000 --- a/NDTensors/src/lib/NamedDimsArrays/src/abstractnameddimsvector.jl +++ /dev/null @@ -1 +0,0 @@ -const AbstractNamedDimsVector{T,Parent,Names} = AbstractNamedDimsArray{T,1,Parent,Names} diff --git a/NDTensors/src/lib/NamedDimsArrays/src/abstractnamedint.jl b/NDTensors/src/lib/NamedDimsArrays/src/abstractnamedint.jl deleted file mode 100644 index 0c3851caef..0000000000 --- a/NDTensors/src/lib/NamedDimsArrays/src/abstractnamedint.jl +++ /dev/null @@ -1,41 +0,0 @@ -abstract type AbstractNamedInt{Value,Name} <: Integer end - -# Interface -unname(i::AbstractNamedInt) = error("Not implemented") -name(i::AbstractNamedInt) = error("Not implemented") - -# Derived -unname(::Type{<:AbstractNamedInt{Value}}) where {Value} = Value - -# Integer interface -# TODO: Should this make a random name, or require defining a way -# to combine names? -Base.:*(i1::AbstractNamedInt, i2::AbstractNamedInt) = unname(i1) * unname(i2) -Base.:-(i::AbstractNamedInt) = typeof(i)(-unname(i), name(i)) - -# TODO: Define for `NamedInt`, `NamedUnitRange` fallback? -# Base.OneTo(stop::AbstractNamedInt) = namedoneto(stop) -## nameduniterange_type(::Type{<:AbstractNamedInt}) = error("Not implemented") - -# TODO: Use conversion from `AbstractNamedInt` to `AbstractNamedUnitRange` -# instead of general `named`. -# Base.OneTo(stop::AbstractNamedInt) = namedoneto(stop) -Base.OneTo(stop::AbstractNamedInt) = named(Base.OneTo(unname(stop)), name(stop)) - -# TODO: Is this needed? -# Include the name as well? -Base.:<(i1::AbstractNamedInt, i2::AbstractNamedInt) = unname(i1) < unname(i2) -## Base.zero(type::Type{<:AbstractNamedInt}) = zero(unname(type)) - -function Base.promote_rule(type1::Type{<:AbstractNamedInt}, type2::Type{<:Integer}) - return promote_type(unname(type1), type2) -end -(type::Type{<:Integer})(i::AbstractNamedInt) = type(unname(i)) -# TODO: Use conversion from `AbstractNamedInt` to `AbstractNamedUnitRange` -# instead of general `named`. -function Base.oftype(i1::AbstractNamedInt, i2::Integer) - return named(convert(typeof(unname(i1)), i2), name(i1)) -end - -# Traits -isnamed(::AbstractNamedInt) = true diff --git a/NDTensors/src/lib/NamedDimsArrays/src/abstractnamedunitrange.jl b/NDTensors/src/lib/NamedDimsArrays/src/abstractnamedunitrange.jl deleted file mode 100644 index d2d5630d28..0000000000 --- a/NDTensors/src/lib/NamedDimsArrays/src/abstractnamedunitrange.jl +++ /dev/null @@ -1,35 +0,0 @@ -abstract type AbstractNamedUnitRange{T,Value<:AbstractUnitRange{T},Name} <: - AbstractUnitRange{T} end - -# Required interface -unname(::AbstractNamedUnitRange) = error("Not implemented") -name(::AbstractNamedUnitRange) = error("Not implemented") - -# Traits -isnamed(::AbstractNamedUnitRange) = true - -# Unit range -Base.first(i::AbstractNamedUnitRange) = first(unname(i)) -Base.last(i::AbstractNamedUnitRange) = last(unname(i)) -Base.length(i::AbstractNamedUnitRange) = named(length(unname(i)), name(i)) - -# TODO: Use `isnamed` trait? -dimnames(a::Tuple{Vararg{AbstractNamedUnitRange}}) = name.(a) - -unname(a::Tuple{Vararg{AbstractNamedUnitRange}}) = unname.(a) -unname(a::Tuple{Vararg{AbstractNamedUnitRange}}, names) = unname(align(a, names)) - -function named(as::Tuple{Vararg{AbstractUnitRange}}, names) - return ntuple(j -> named(as[j], names[j]), length(as)) -end - -function get_name_perm(a::Tuple{Vararg{AbstractNamedUnitRange}}, names::Tuple) - return getperm(dimnames(a), names) -end - -# Permute into a certain order. -# align(a, (:j, :k, :i)) -function align(a::Tuple{Vararg{AbstractNamedUnitRange}}, names) - perm = get_name_perm(a, names) - return map(j -> a[j], perm) -end diff --git a/NDTensors/src/lib/NamedDimsArrays/src/broadcast.jl b/NDTensors/src/lib/NamedDimsArrays/src/broadcast.jl deleted file mode 100644 index d2c1198272..0000000000 --- a/NDTensors/src/lib/NamedDimsArrays/src/broadcast.jl +++ /dev/null @@ -1,49 +0,0 @@ -using Base.Broadcast: BroadcastStyle, AbstractArrayStyle, DefaultArrayStyle, Broadcasted -using ..BroadcastMapConversion: map_function, map_args - -struct NamedDimsArrayStyle{N} <: AbstractArrayStyle{N} end - -function Broadcast.BroadcastStyle(::Type{<:AbstractNamedDimsArray{<:Any,N}}) where {N} - return NamedDimsArrayStyle{N}() -end - -NamedDimsArrayStyle(::Val{N}) where {N} = NamedDimsArrayStyle{N}() -NamedDimsArrayStyle{M}(::Val{N}) where {M,N} = NamedDimsArrayStyle{N}() - -Broadcast.BroadcastStyle(a::NamedDimsArrayStyle, ::DefaultArrayStyle{0}) = a -function Broadcast.BroadcastStyle(::NamedDimsArrayStyle{N}, a::DefaultArrayStyle) where {N} - return BroadcastStyle(DefaultArrayStyle{N}(), a) -end -function Broadcast.BroadcastStyle( - ::NamedDimsArrayStyle{N}, ::Broadcast.Style{Tuple} -) where {N} - return DefaultArrayStyle{N}() -end - -# TODO: Is this needed? -# Define `output_names`, like `allocate_output`. -# function dimnames(bc::Broadcasted{<:NamedDimsArrayStyle}) -# return dimnames(first(map_args(bc))) -# end - -function Broadcast.check_broadcast_axes(shp, a::AbstractNamedDimsArray) - # Unles we output named axes from `axes(::NamedDimsArray)`, - # this check won't make sense since it has to check up - # to an unknown permutation. - return nothing -end - -# TODO: Use `allocate_output`, share logic with `map`. -function Base.similar(bc::Broadcasted{<:NamedDimsArrayStyle}, elt::Type) - return similar(first(map_args(bc)), elt) -end - -# Broadcasting implementation -function Base.copyto!( - dest::AbstractNamedDimsArray{<:Any,N}, bc::Broadcasted{NamedDimsArrayStyle{N}} -) where {N} - # convert to map - # flatten and only keep the AbstractArray arguments - map!(map_function(bc), dest, map_args(bc)...) - return dest -end diff --git a/NDTensors/src/lib/NamedDimsArrays/src/broadcast_shape.jl b/NDTensors/src/lib/NamedDimsArrays/src/broadcast_shape.jl deleted file mode 100644 index ad6986190f..0000000000 --- a/NDTensors/src/lib/NamedDimsArrays/src/broadcast_shape.jl +++ /dev/null @@ -1,24 +0,0 @@ -using Base.Broadcast: Broadcast, broadcast_shape, combine_axes - -# TODO: Have `axes` output named axes so Base functions "just work". -function Broadcast.combine_axes(na1::AbstractNamedDimsArray, nas::AbstractNamedDimsArray...) - return broadcast_shape(namedaxes(na1), combine_axes(nas...)) -end -function Broadcast.combine_axes(na1::AbstractNamedDimsArray, na2::AbstractNamedDimsArray) - return broadcast_shape(namedaxes(na1), namedaxes(na2)) -end -Broadcast.combine_axes(na::AbstractNamedDimsArray) = namedaxes(na) - -function Broadcast.broadcast_shape( - na1::Tuple{Vararg{AbstractNamedUnitRange}}, - na2::Tuple{Vararg{AbstractNamedUnitRange}}, - nas::Tuple{Vararg{AbstractNamedUnitRange}}..., -) - return broadcast_shape(broadcast_shape(shape, shape1), shapes...) -end - -function Broadcast.broadcast_shape( - na1::Tuple{Vararg{AbstractNamedUnitRange}}, na2::Tuple{Vararg{AbstractNamedUnitRange}} -) - return promote_shape(na1, na2) -end diff --git a/NDTensors/src/lib/NamedDimsArrays/src/constructors.jl b/NDTensors/src/lib/NamedDimsArrays/src/constructors.jl deleted file mode 100644 index 9e167beea0..0000000000 --- a/NDTensors/src/lib/NamedDimsArrays/src/constructors.jl +++ /dev/null @@ -1,48 +0,0 @@ -using Random: AbstractRNG, default_rng - -# TODO: Use `AbstractNamedUnitRange`, determine the `AbstractNamedDimsArray` -# from a default value. Useful for distinguishing between `NamedDimsArray` -# and `ITensor`. -# Convenient constructors -default_eltype() = Float64 -for f in [:rand, :randn] - @eval begin - function Base.$f( - rng::AbstractRNG, elt::Type{<:Number}, dims::Tuple{NamedInt,Vararg{NamedInt}} - ) - a = $f(rng, elt, unname.(dims)) - return named(a, name.(dims)) - end - function Base.$f( - rng::AbstractRNG, elt::Type{<:Number}, dim1::NamedInt, dims::Vararg{NamedInt} - ) - return $f(rng, elt, (dim1, dims...)) - end - Base.$f(elt::Type{<:Number}, dims::Tuple{NamedInt,Vararg{NamedInt}}) = - $f(default_rng(), elt, dims) - Base.$f(elt::Type{<:Number}, dim1::NamedInt, dims::Vararg{NamedInt}) = - $f(elt, (dim1, dims...)) - Base.$f(dims::Tuple{NamedInt,Vararg{NamedInt}}) = $f(default_eltype(), dims) - Base.$f(dim1::NamedInt, dims::Vararg{NamedInt}) = $f((dim1, dims...)) - end -end -for f in [:zeros, :ones] - @eval begin - function Base.$f(elt::Type{<:Number}, dims::Tuple{NamedInt,Vararg{NamedInt}}) - a = $f(elt, unname.(dims)) - return named(a, name.(dims)) - end - function Base.$f(elt::Type{<:Number}, dim1::NamedInt, dims::Vararg{NamedInt}) - return $f(elt, (dim1, dims...)) - end - Base.$f(dims::Tuple{NamedInt,Vararg{NamedInt}}) = $f(default_eltype(), dims) - Base.$f(dim1::NamedInt, dims::Vararg{NamedInt}) = $f((dim1, dims...)) - end -end -function Base.fill(value, dims::Tuple{NamedInt,Vararg{NamedInt}}) - a = fill(value, unname.(dims)) - return named(a, name.(dims)) -end -function Base.fill(value, dim1::NamedInt, dims::Vararg{NamedInt}) - return fill(value, (dim1, dims...)) -end diff --git a/NDTensors/src/lib/NamedDimsArrays/src/map.jl b/NDTensors/src/lib/NamedDimsArrays/src/map.jl deleted file mode 100644 index d7b38575ec..0000000000 --- a/NDTensors/src/lib/NamedDimsArrays/src/map.jl +++ /dev/null @@ -1,14 +0,0 @@ -# TODO: Handle maybe-mutation. -# TODO: Handle permutations more efficiently by fusing with `f`. -function Base.map!(f, na_dest::AbstractNamedDimsArray, nas::AbstractNamedDimsArray...) - a_dest = unname(na_dest) - as = map(na -> unname(na, dimnames(na_dest)), nas) - map!(f, a_dest, as...) - return na_dest -end - -function Base.map(f, nas::AbstractNamedDimsArray...) - na_dest = similar(first(nas)) - map!(f, na_dest, nas...) - return na_dest -end diff --git a/NDTensors/src/lib/NamedDimsArrays/src/name.jl b/NDTensors/src/lib/NamedDimsArrays/src/name.jl deleted file mode 100644 index 07613ddb28..0000000000 --- a/NDTensors/src/lib/NamedDimsArrays/src/name.jl +++ /dev/null @@ -1,2 +0,0 @@ -# In general an object is a name -name(x) = x diff --git a/NDTensors/src/lib/NamedDimsArrays/src/nameddimsarray.jl b/NDTensors/src/lib/NamedDimsArrays/src/nameddimsarray.jl deleted file mode 100644 index 5727c5366a..0000000000 --- a/NDTensors/src/lib/NamedDimsArrays/src/nameddimsarray.jl +++ /dev/null @@ -1,114 +0,0 @@ -function _NamedDimsArray end - -struct NamedDimsArray{T,N,Arr<:AbstractArray{T,N},Names<:Tuple{Vararg{Any,N}}} <: - AbstractNamedDimsArray{T,N,Arr,Names} - array::Arr - names::Names - global @inline function _NamedDimsArray(array::AbstractArray, names) - elt = eltype(array) - n = ndims(array) - names_tuple = Tuple{Vararg{Any,n}}(names) - arraytype = typeof(array) - namestype = typeof(names_tuple) - return new{elt,n,arraytype,namestype}(array, names_tuple) - end - - # TODO: Delete, maybe this aligns according to the new names? - global @inline function _NamedDimsArray(array::NamedDimsArray, names) - return error("Not implemented, already named.") - end -end - -function NamedDimsArray{T,N,Arr,Names}( - a::AbstractArray, names -) where {T,N,Arr<:AbstractArray{T,N},Names} - return _NamedDimsArray(convert(Arr, a), convert(Names, names)) -end - -# TODO: Combine with other constructor definitions. -function NamedDimsArray{T,N,Arr,Names}( - a::AbstractArray, names::Tuple{} -) where {T,N,Arr<:AbstractArray{T,N},Names} - return _NamedDimsArray(convert(Arr, a), convert(Names, names)) -end - -NamedDimsArray(a::AbstractArray, names) = _NamedDimsArray(a, names) - -# TODO: Check size consistency -# TODO: Combine with other constructor definitions. -function NamedDimsArray{T,N,Arr,Names}( - a::AbstractArray, namedsize::Tuple{Vararg{AbstractNamedInt}} -) where {T,N,Arr<:AbstractArray{T,N},Names} - @assert size(a) == unname.(namedsize) - return _NamedDimsArray(convert(Arr, a), convert(Names, name.(namedsize))) -end - -# TODO: Check axes consistency -# TODO: Combine with other constructor definitions. -function NamedDimsArray{T,N,Arr,Names}( - a::AbstractArray, namedaxes::Tuple{Vararg{AbstractNamedUnitRange}} -) where {T,N,Arr<:AbstractArray{T,N},Names} - @assert axes(a) == unname.(namedaxes) - return _NamedDimsArray(convert(Arr, a), convert(Names, name.(namedaxes))) -end - -# Required interface - -# Output the names. -dimnames(a::NamedDimsArray) = a.names - -# Unwrapping the names -Base.parent(a::NamedDimsArray) = a.array - -# Set the names of an unnamed AbstractArray -function named(a::AbstractArray, names) - @assert ndims(a) == length(names) - return NamedDimsArray(a, names) -end - -# TODO: Use `Undefs.jl` instead. -function undefs(arraytype::Type{<:AbstractArray}, axes::Tuple{Vararg{AbstractUnitRange}}) - return arraytype(undef, length.(axes)) -end - -# TODO: Use `AbstractNamedUnitRange`, determine the `AbstractNamedDimsArray` -# from a default value. Useful for distinguishing between `NamedDimsArray` -# and `ITensor`. -function undefs(arraytype::Type{<:AbstractArray}, axes::Tuple{Vararg{NamedUnitRange}}) - array = undefs(arraytype, unname.(axes)) - names = name.(axes) - return named(array, names) -end - -# TODO: Use `AbstractNamedUnitRange`, determine the `AbstractNamedDimsArray` -# from a default value. Useful for distinguishing between `NamedDimsArray` -# and `ITensor`. -function Base.similar( - arraytype::Type{<:AbstractArray}, axes::Tuple{NamedUnitRange,Vararg{NamedUnitRange}} -) - # TODO: Use `unname`? - return undefs(arraytype, axes) -end - -# TODO: Define `NamedInt`, `NamedUnitRange`, `NamedVector <: AbstractVector`, etc. -# See https://github.com/mcabbott/NamedPlus.jl/blob/v0.0.5/src/int.jl. - -# TODO: Define `similar_name`, with shorthand `sim`, that makes a random name. -# Used in matrix/tensor factorizations. - -# TODO: Think about how to interact with array wrapper types, see: -# https://github.com/mcabbott/NamedPlus.jl/blob/v0.0.5/src/recursion.jl - -# TODO: What should `size` and `axes` output? Could output tuples -# of `NamedInt` and `NamedUnitRange`. - -# TODO: Construct from `NamedInt` or `NamedUnitRange` in standard -# array constructors, like `zeros`, `rand`, `randn`, `undefs`, etc. -# See https://mkitti.github.io/Undefs.jl/stable/, -# https://github.com/mkitti/ArrayAllocators.jl - -# TODO: Define `ArrayConstructors.randn`, `ArrayConstructors.rand`, -# `ArrayConstructors.zeros`, `ArrayConstructors.fill`, etc. -# for generic constructors accepting `CuArray`, `Array`, etc. -# Also could defign allocator types, `https://github.com/JuliaGPU/KernelAbstractions.jl` -# and `https://docs.juliahub.com/General/HeterogeneousComputing/stable/`. diff --git a/NDTensors/src/lib/NamedDimsArrays/src/namedint.jl b/NDTensors/src/lib/NamedDimsArrays/src/namedint.jl deleted file mode 100644 index 13b8a88141..0000000000 --- a/NDTensors/src/lib/NamedDimsArrays/src/namedint.jl +++ /dev/null @@ -1,28 +0,0 @@ -struct NamedInt{Value,Name} <: AbstractNamedInt{Value,Name} - value::Value - name::Name -end - -## Needs a `default_name(nametype::Type)` function. -## NamedInt{Value,Name}(i::Integer) where {Value,Name} = NamedInt{Value,Name}(i, default_name(Name)) - -# Interface -unname(i::NamedInt) = i.value -name(i::NamedInt) = i.name - -# Convenient constructor -named(i::Integer, name) = NamedInt(i, name) - -# TODO: Use `isnamed` trait? -dimnames(a::Tuple{Vararg{AbstractNamedInt}}) = name.(a) - -function get_name_perm(a::Tuple{Vararg{AbstractNamedInt}}, names::Tuple) - return getperm(dimnames(a), names) -end - -# Permute into a certain order. -# align(a, (:j, :k, :i)) -function align(a::Tuple{Vararg{AbstractNamedInt}}, names) - perm = get_name_perm(a, names) - return map(j -> a[j], perm) -end diff --git a/NDTensors/src/lib/NamedDimsArrays/src/namedunitrange.jl b/NDTensors/src/lib/NamedDimsArrays/src/namedunitrange.jl deleted file mode 100644 index 44867eae3e..0000000000 --- a/NDTensors/src/lib/NamedDimsArrays/src/namedunitrange.jl +++ /dev/null @@ -1,12 +0,0 @@ -struct NamedUnitRange{T,Value<:AbstractUnitRange{T},Name} <: - AbstractNamedUnitRange{T,Value,Name} - value::Value - name::Name -end - -# Interface -unname(i::NamedUnitRange) = i.value -name(i::NamedUnitRange) = i.name - -# Constructor -named(i::AbstractUnitRange, name) = NamedUnitRange(i, name) diff --git a/NDTensors/src/lib/NamedDimsArrays/src/permutedims.jl b/NDTensors/src/lib/NamedDimsArrays/src/permutedims.jl deleted file mode 100644 index 5e81a3bfd7..0000000000 --- a/NDTensors/src/lib/NamedDimsArrays/src/permutedims.jl +++ /dev/null @@ -1,4 +0,0 @@ -function Base.permutedims(na::AbstractNamedDimsArray, perm) - names = map(j -> dimnames(na)[j], perm) - return named(permutedims(unname(na), perm), names) -end diff --git a/NDTensors/src/lib/NamedDimsArrays/src/promote_shape.jl b/NDTensors/src/lib/NamedDimsArrays/src/promote_shape.jl deleted file mode 100644 index 84fe517ecf..0000000000 --- a/NDTensors/src/lib/NamedDimsArrays/src/promote_shape.jl +++ /dev/null @@ -1,12 +0,0 @@ -function Base.promote_shape(na1::AbstractNamedDimsArray, na2::AbstractNamedDimsArray) - return promote_shape(namedaxes(na1), namedaxes(na2)) -end - -function Base.promote_shape( - na1::Tuple{Vararg{AbstractNamedUnitRange}}, na2::Tuple{Vararg{AbstractNamedUnitRange}} -) - a1 = unname(na1) - a2 = unname(na2, dimnames(na1)) - a_promoted = promote_shape(a1, a2) - return named(a_promoted, dimnames(na1)) -end diff --git a/NDTensors/src/lib/NamedDimsArrays/src/randname.jl b/NDTensors/src/lib/NamedDimsArrays/src/randname.jl deleted file mode 100644 index 1143ff4ceb..0000000000 --- a/NDTensors/src/lib/NamedDimsArrays/src/randname.jl +++ /dev/null @@ -1,5 +0,0 @@ -using Random: randstring - -randname(::Any) = error("Not implemented") - -randname(::String) = randstring() diff --git a/NDTensors/src/lib/NamedDimsArrays/src/similar.jl b/NDTensors/src/lib/NamedDimsArrays/src/similar.jl deleted file mode 100644 index 441afecd19..0000000000 --- a/NDTensors/src/lib/NamedDimsArrays/src/similar.jl +++ /dev/null @@ -1,49 +0,0 @@ -# `similar` - -# Preserve the names -Base.similar(na::AbstractNamedDimsArray) = named(similar(unname(na)), dimnames(na)) -function Base.similar(na::AbstractNamedDimsArray, elt::Type) - return named(similar(unname(na), elt), dimnames(na)) -end - -# Remove the names -# TODO: Make versions taking `NamedUnitRange` and `NamedInt`. -function Base.similar(na::AbstractNamedDimsArray, elt::Type, dims::Tuple{Vararg{Int64}}) - return similar(unname(na), elt, dims) -end - -# Remove the names -# TODO: Make versions taking `NamedUnitRange` and `NamedInt`. -function Base.similar( - na::AbstractNamedDimsArray, elt::Type, dims::Tuple{Integer,Vararg{Integer}} -) - return similar(unname(na), elt, dims) -end - -# Remove the names -# TODO: Make versions taking `NamedUnitRange` and `NamedInt`. -function Base.similar( - na::AbstractNamedDimsArray, - elt::Type, - dims::Tuple{Union{Integer,Base.OneTo},Vararg{Union{Integer,Base.OneTo}}}, -) - return similar(unname(na), elt, dims) -end - -# Remove the names -# TODO: Make versions taking `NamedUnitRange` and `NamedInt`. -function Base.similar( - na::AbstractNamedDimsArray, elt::Type, dims::Union{Integer,AbstractUnitRange}... -) - return similar(unname(na), elt, dims...) -end - -# Remove the names -# TODO: Make versions taking `NamedUnitRange` and `NamedInt`. -Base.similar(na::AbstractNamedDimsArray, dims::Tuple) = similar(unname(na), dims) - -# Remove the names -# TODO: Make versions taking `NamedUnitRange` and `NamedInt`. -function Base.similar(na::AbstractNamedDimsArray, dims::Union{Integer,AbstractUnitRange}...) - return similar(unname(na), dims...) -end diff --git a/NDTensors/src/lib/NamedDimsArrays/src/traits.jl b/NDTensors/src/lib/NamedDimsArrays/src/traits.jl deleted file mode 100644 index c971ae7a86..0000000000 --- a/NDTensors/src/lib/NamedDimsArrays/src/traits.jl +++ /dev/null @@ -1 +0,0 @@ -isnamed(::Any) = false diff --git a/NDTensors/src/lib/NamedDimsArrays/test/Project.toml b/NDTensors/src/lib/NamedDimsArrays/test/Project.toml deleted file mode 100644 index 18bbc81308..0000000000 --- a/NDTensors/src/lib/NamedDimsArrays/test/Project.toml +++ /dev/null @@ -1,3 +0,0 @@ -[deps] -Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" -NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" diff --git a/NDTensors/src/lib/NamedDimsArrays/test/runtests.jl b/NDTensors/src/lib/NamedDimsArrays/test/runtests.jl deleted file mode 100644 index df2a0fba66..0000000000 --- a/NDTensors/src/lib/NamedDimsArrays/test/runtests.jl +++ /dev/null @@ -1,11 +0,0 @@ -using Test: @testset - -@testset "NamedDimsArrays" begin - filenames = filter(readdir(@__DIR__)) do filename - startswith("test_")(filename) && endswith(".jl")(filename) - end - @testset "Test $(@__DIR__)/$filename" for filename in filenames - println("Running $(@__DIR__)/$filename") - @time include(filename) - end -end diff --git a/NDTensors/src/lib/NamedDimsArrays/test/test_NDTensorsNamedDimsArraysExt.jl b/NDTensors/src/lib/NamedDimsArrays/test/test_NDTensorsNamedDimsArraysExt.jl deleted file mode 100644 index 06eeb0c6e0..0000000000 --- a/NDTensors/src/lib/NamedDimsArrays/test/test_NDTensorsNamedDimsArraysExt.jl +++ /dev/null @@ -1,33 +0,0 @@ -@eval module $(gensym()) -using NDTensors.NamedDimsArrays: NamedDimsArray, dimnames -using NDTensors: NDTensors -using Test: @test, @testset - -@testset "NDTensorsNamedDimsArraysExt" begin - elt = Float64 - - a = NamedDimsArray(randn(elt, 2, 2), ("i", "j")) - b = NDTensors.similar(a) - @test b isa NamedDimsArray{elt} - @test eltype(b) === elt - @test dimnames(b) == ("i", "j") - @test size(b) == (2, 2) - - a = NamedDimsArray(randn(elt, 2, 2), ("i", "j")) - b = NDTensors.similar(a, Float32) - @test b isa NamedDimsArray{Float32} - @test eltype(b) === Float32 - @test dimnames(b) == ("i", "j") - @test size(b) == (2, 2) - - a = NamedDimsArray(randn(elt, 2, 2), ("i", "j")) - b = copy(a) - α = randn(elt) - b = NDTensors.fill!!(b, α) - @test b isa NamedDimsArray{elt} - @test eltype(b) === elt - @test dimnames(b) == ("i", "j") - @test size(b) == (2, 2) - @test all(==(α), b) -end -end diff --git a/NDTensors/src/lib/NamedDimsArrays/test/test_NamedDimsArraysAdaptExt.jl b/NDTensors/src/lib/NamedDimsArrays/test/test_NamedDimsArraysAdaptExt.jl deleted file mode 100644 index 10ffe6fb7d..0000000000 --- a/NDTensors/src/lib/NamedDimsArrays/test/test_NamedDimsArraysAdaptExt.jl +++ /dev/null @@ -1,5 +0,0 @@ -using Test: @testset - -@testset "NamedDimsArrays $(@__FILE__)" begin - include("../ext/NamedDimsArraysAdaptExt/test/runtests.jl") -end diff --git a/NDTensors/src/lib/NamedDimsArrays/test/test_NamedDimsArraysSparseArraysBaseExt.jl b/NDTensors/src/lib/NamedDimsArrays/test/test_NamedDimsArraysSparseArraysBaseExt.jl deleted file mode 100644 index 5cd5ef6498..0000000000 --- a/NDTensors/src/lib/NamedDimsArrays/test/test_NamedDimsArraysSparseArraysBaseExt.jl +++ /dev/null @@ -1,5 +0,0 @@ -using Test: @testset - -@testset "NamedDimsArrays $(@__FILE__)" begin - include("../ext/NamedDimsArraysSparseArraysBaseExt/test/runtests.jl") -end diff --git a/NDTensors/src/lib/NamedDimsArrays/test/test_NamedDimsArraysTensorAlgebraExt.jl b/NDTensors/src/lib/NamedDimsArrays/test/test_NamedDimsArraysTensorAlgebraExt.jl deleted file mode 100644 index da5e5dd230..0000000000 --- a/NDTensors/src/lib/NamedDimsArrays/test/test_NamedDimsArraysTensorAlgebraExt.jl +++ /dev/null @@ -1,5 +0,0 @@ -using Test: @testset - -@testset "NamedDimsArrays $(@__FILE__)" begin - include("../ext/NamedDimsArraysTensorAlgebraExt/test/runtests.jl") -end diff --git a/NDTensors/src/lib/NamedDimsArrays/test/test_basic.jl b/NDTensors/src/lib/NamedDimsArrays/test/test_basic.jl deleted file mode 100644 index 480e9a259a..0000000000 --- a/NDTensors/src/lib/NamedDimsArrays/test/test_basic.jl +++ /dev/null @@ -1,108 +0,0 @@ -@eval module $(gensym()) -using Test: @test, @testset -using NDTensors.NamedDimsArrays: - NamedDimsArrays, - NamedDimsArray, - align, - dimnames, - isnamed, - named, - namedaxes, - namedsize, - unname - -@testset "NamedDimsArrays $(@__FILE__)" begin - @testset "Basic functionality" begin - a = randn(3, 4) - na = named(a, ("i", "j")) - # TODO: Just call this `size`? - i, j = namedsize(na) - # TODO: Just call `namedaxes`? - ai, aj = namedaxes(na) - @test !isnamed(a) - @test isnamed(na) - @test dimnames(na) == ("i", "j") - @test na[1, 1] == a[1, 1] - na[1, 1] = 11 - @test na[1, 1] == 11 - # TODO: Should `size` output `namedsize`? - @test size(na) == (3, 4) - @test namedsize(na) == (named(3, "i"), named(4, "j")) - @test length(na) == 12 - # TODO: Should `axes` output `namedaxes`? - @test axes(na) == (1:3, 1:4) - @test namedaxes(na) == (named(1:3, "i"), named(1:4, "j")) - @test randn(named(3, "i"), named(4, "j")) isa NamedDimsArray - @test na["i" => 1, "j" => 2] == a[1, 2] - @test na["j" => 2, "i" => 1] == a[1, 2] - na["j" => 2, "i" => 1] = 12 - @test na[1, 2] == 12 - @test na[j => 1, i => 2] == a[2, 1] - @test na[aj => 1, ai => 2] == a[2, 1] - na[j => 1, i => 2] = 21 - @test na[2, 1] == 21 - na[aj => 1, ai => 2] = 2211 - @test na[2, 1] == 2211 - na′ = align(na, ("j", "i")) - @test a == permutedims(unname(na′), (2, 1)) - na′ = align(na, (j, i)) - @test a == permutedims(unname(na′), (2, 1)) - na′ = align(na, (aj, ai)) - @test a == permutedims(unname(na′), (2, 1)) - end - @testset "Shorthand constructors (eltype=$elt)" for elt in ( - Float32, ComplexF32, Float64, ComplexF64 - ) - i, j = named.((2, 2), ("i", "j")) - value = rand(elt) - for na in (zeros(elt, i, j), zeros(elt, (i, j))) - @test eltype(na) === elt - @test na isa NamedDimsArray - @test dimnames(na) == ("i", "j") - @test iszero(na) - end - for na in (fill(value, i, j), fill(value, (i, j))) - @test eltype(na) === elt - @test na isa NamedDimsArray - @test dimnames(na) == ("i", "j") - @test all(isequal(value), na) - end - for na in (rand(elt, i, j), rand(elt, (i, j))) - @test eltype(na) === elt - @test na isa NamedDimsArray - @test dimnames(na) == ("i", "j") - @test !iszero(na) - @test all(x -> real(x) > 0, na) - end - for na in (randn(elt, i, j), randn(elt, (i, j))) - @test eltype(na) === elt - @test na isa NamedDimsArray - @test dimnames(na) == ("i", "j") - @test !iszero(na) - end - end - @testset "Shorthand constructors (eltype=unspecified)" begin - i, j = named.((2, 2), ("i", "j")) - default_elt = Float64 - for na in (zeros(i, j), zeros((i, j))) - @test eltype(na) === default_elt - @test na isa NamedDimsArray - @test dimnames(na) == ("i", "j") - @test iszero(na) - end - for na in (rand(i, j), rand((i, j))) - @test eltype(na) === default_elt - @test na isa NamedDimsArray - @test dimnames(na) == ("i", "j") - @test !iszero(na) - @test all(x -> real(x) > 0, na) - end - for na in (randn(i, j), randn((i, j))) - @test eltype(na) === default_elt - @test na isa NamedDimsArray - @test dimnames(na) == ("i", "j") - @test !iszero(na) - end - end -end -end diff --git a/NDTensors/src/lib/NamedDimsArrays/test/test_tensoralgebra.jl b/NDTensors/src/lib/NamedDimsArrays/test/test_tensoralgebra.jl deleted file mode 100644 index 464f85510c..0000000000 --- a/NDTensors/src/lib/NamedDimsArrays/test/test_tensoralgebra.jl +++ /dev/null @@ -1,80 +0,0 @@ -@eval module $(gensym()) -using Test: @test, @testset -using NDTensors.NamedDimsArrays: dimnames, named, unname, isnamed -@testset "NamedDimsArrays $(@__FILE__) (eltype=$elt)" for elt in ( - Float32, ComplexF32, Float64, ComplexF64 -) - a = randn(elt, 2, 3) - na = named(a, ("i", "j")) - b = randn(elt, 3, 2) - nb = named(b, ("j", "i")) - - nc = similar(na) - @test size(nc) == (2, 3) - @test eltype(nc) == elt - @test dimnames(nc) == ("i", "j") - - nc = similar(na, Float32) - @test size(nc) == (2, 3) - @test eltype(nc) == Float32 - @test dimnames(nc) == ("i", "j") - - c = similar(na, (3, 4)) - @test size(c) == (3, 4) - @test eltype(c) == elt - @test c isa typeof(a) - - c = similar(na, 3, 4) - @test size(c) == (3, 4) - @test eltype(c) == elt - @test c isa typeof(a) - - c = similar(na, Float32, (3, 4)) - @test size(c) == (3, 4) - @test eltype(c) == Float32 - @test !isnamed(c) - - c = similar(na, Float32, 3, 4) - @test size(c) == (3, 4) - @test eltype(c) == Float32 - @test !isnamed(c) - - nc = permutedims(na, (2, 1)) - @test unname(nc) ≈ permutedims(unname(na), (2, 1)) - @test dimnames(nc) == ("j", "i") - @test nc ≈ na - - nc = 2 * na - @test unname(nc) ≈ 2 * a - @test eltype(nc) === elt - - nc = 2 .* na - @test unname(nc) ≈ 2 * a - @test eltype(nc) === elt - - nc = na + nb - @test unname(nc, ("i", "j")) ≈ a + permutedims(b, (2, 1)) - @test eltype(nc) === elt - - nc = na .+ nb - @test unname(nc, ("i", "j")) ≈ a + permutedims(b, (2, 1)) - @test eltype(nc) === elt - - nc = map(+, na, nb) - @test unname(nc, ("i", "j")) ≈ a + permutedims(b, (2, 1)) - @test eltype(nc) === elt - - nc = named(randn(elt, 2, 3), ("i", "j")) - map!(+, nc, na, nb) - @test unname(nc, ("i", "j")) ≈ a + permutedims(b, (2, 1)) - @test eltype(nc) === elt - - nc = na - nb - @test unname(nc, ("i", "j")) ≈ a - permutedims(b, (2, 1)) - @test eltype(nc) === elt - - nc = na .- nb - @test unname(nc, ("i", "j")) ≈ a - permutedims(b, (2, 1)) - @test eltype(nc) === elt -end -end diff --git a/NDTensors/src/lib/NestedPermutedDimsArrays/src/NestedPermutedDimsArrays.jl b/NDTensors/src/lib/NestedPermutedDimsArrays/src/NestedPermutedDimsArrays.jl deleted file mode 100644 index 9234f6aed1..0000000000 --- a/NDTensors/src/lib/NestedPermutedDimsArrays/src/NestedPermutedDimsArrays.jl +++ /dev/null @@ -1,276 +0,0 @@ -# Mostly copied from https://github.com/JuliaLang/julia/blob/master/base/permuteddimsarray.jl -# Like `PermutedDimsArrays` but singly nested, similar to `Adjoint` and `Transpose` -# (though those are fully recursive). -#= -TODO: Investigate replacing this with a `PermutedDimsArray` wrapped around a `MappedArrays.MappedArray`. -There are a few issues with that: -1. Just using a type alias leads to type piracy, for example the constructor is type piracy. -2. `setindex!(::NestedPermutedDimsArray, I...)` fails because no conversion is defined between `Array` -and `PermutedDimsArray`. -3. The type alias is tricky to define, ideally it would have similar type parameters to the current -`NestedPermutedDimsArrays.NestedPermutedDimsArray` definition which matches the type parameters -of `PermutedDimsArrays.PermutedDimsArray` but that seems to be difficult to achieve. -```julia -module NestedPermutedDimsArrays - -using MappedArrays: MultiMappedArray, mappedarray -export NestedPermutedDimsArray - -const NestedPermutedDimsArray{TT,T<:AbstractArray{TT},N,perm,iperm,AA<:AbstractArray{T}} = PermutedDimsArray{ - PermutedDimsArray{TT,N,perm,iperm,T}, - N, - perm, - iperm, - MultiMappedArray{ - PermutedDimsArray{TT,N,perm,iperm,T}, - N, - Tuple{AA}, - Type{PermutedDimsArray{TT,N,perm,iperm,T}}, - Type{PermutedDimsArray{TT,N,iperm,perm,T}}, - }, -} - -function NestedPermutedDimsArray(a::AbstractArray, perm) - iperm = invperm(perm) - f = PermutedDimsArray{eltype(eltype(a)),ndims(a),perm,iperm,eltype(a)} - finv = PermutedDimsArray{eltype(eltype(a)),ndims(a),iperm,perm,eltype(a)} - return PermutedDimsArray(mappedarray(f, finv, a), perm) -end - -end -``` -=# -module NestedPermutedDimsArrays - -import Base: permutedims, permutedims! -export NestedPermutedDimsArray - -# Some day we will want storage-order-aware iteration, so put perm in the parameters -struct NestedPermutedDimsArray{T,N,perm,iperm,AA<:AbstractArray} <: AbstractArray{T,N} - parent::AA - - function NestedPermutedDimsArray{T,N,perm,iperm,AA}( - data::AA - ) where {T,N,perm,iperm,AA<:AbstractArray} - (isa(perm, NTuple{N,Int}) && isa(iperm, NTuple{N,Int})) || - error("perm and iperm must both be NTuple{$N,Int}") - isperm(perm) || - throw(ArgumentError(string(perm, " is not a valid permutation of dimensions 1:", N))) - all(d -> iperm[perm[d]] == d, 1:N) || - throw(ArgumentError(string(perm, " and ", iperm, " must be inverses"))) - return new(data) - end -end - -""" - NestedPermutedDimsArray(A, perm) -> B - -Given an AbstractArray `A`, create a view `B` such that the -dimensions appear to be permuted. Similar to `permutedims`, except -that no copying occurs (`B` shares storage with `A`). - -See also [`permutedims`](@ref), [`invperm`](@ref). - -# Examples -```jldoctest -julia> A = rand(3,5,4); - -julia> B = NestedPermutedDimsArray(A, (3,1,2)); - -julia> size(B) -(4, 3, 5) - -julia> B[3,1,2] == A[1,2,3] -true -``` -""" -Base.@constprop :aggressive function NestedPermutedDimsArray( - data::AbstractArray{T,N}, perm -) where {T,N} - length(perm) == N || - throw(ArgumentError(string(perm, " is not a valid permutation of dimensions 1:", N))) - iperm = invperm(perm) - return NestedPermutedDimsArray{ - PermutedDimsArray{eltype(T),N,(perm...,),(iperm...,),T}, - N, - (perm...,), - (iperm...,), - typeof(data), - }( - data - ) -end - -Base.parent(A::NestedPermutedDimsArray) = A.parent -function Base.size(A::NestedPermutedDimsArray{T,N,perm}) where {T,N,perm} - return genperm(size(parent(A)), perm) -end -function Base.axes(A::NestedPermutedDimsArray{T,N,perm}) where {T,N,perm} - return genperm(axes(parent(A)), perm) -end -Base.has_offset_axes(A::NestedPermutedDimsArray) = Base.has_offset_axes(A.parent) -function Base.similar(A::NestedPermutedDimsArray, T::Type, dims::Base.Dims) - return similar(parent(A), T, dims) -end -function Base.cconvert(::Type{Ptr{T}}, A::NestedPermutedDimsArray{T}) where {T} - return Base.cconvert(Ptr{T}, parent(A)) -end - -# It's OK to return a pointer to the first element, and indeed quite -# useful for wrapping C routines that require a different storage -# order than used by Julia. But for an array with unconventional -# storage order, a linear offset is ambiguous---is it a memory offset -# or a linear index? -function Base.pointer(A::NestedPermutedDimsArray, i::Integer) - throw( - ArgumentError("pointer(A, i) is deliberately unsupported for NestedPermutedDimsArray") - ) -end - -function Base.strides(A::NestedPermutedDimsArray{T,N,perm}) where {T,N,perm} - s = strides(parent(A)) - return ntuple(d -> s[perm[d]], Val(N)) -end -function Base.elsize(::Type{<:NestedPermutedDimsArray{<:Any,<:Any,<:Any,<:Any,P}}) where {P} - return Base.elsize(P) -end - -@inline function Base.getindex( - A::NestedPermutedDimsArray{T,N,perm,iperm}, I::Vararg{Int,N} -) where {T,N,perm,iperm} - @boundscheck checkbounds(A, I...) - @inbounds val = PermutedDimsArray(getindex(A.parent, genperm(I, iperm)...), perm) - return val -end -@inline function Base.setindex!( - A::NestedPermutedDimsArray{T,N,perm,iperm}, val, I::Vararg{Int,N} -) where {T,N,perm,iperm} - @boundscheck checkbounds(A, I...) - @inbounds setindex!(A.parent, PermutedDimsArray(val, iperm), genperm(I, iperm)...) - return val -end - -function Base.isassigned( - A::NestedPermutedDimsArray{T,N,perm,iperm}, I::Vararg{Int,N} -) where {T,N,perm,iperm} - @boundscheck checkbounds(Bool, A, I...) || return false - @inbounds x = isassigned(A.parent, genperm(I, iperm)...) - return x -end - -@inline genperm(I::NTuple{N,Any}, perm::Dims{N}) where {N} = ntuple(d -> I[perm[d]], Val(N)) -@inline genperm(I, perm::AbstractVector{Int}) = genperm(I, (perm...,)) - -function Base.copyto!( - dest::NestedPermutedDimsArray{T,N}, src::AbstractArray{T,N} -) where {T,N} - checkbounds(dest, axes(src)...) - return _copy!(dest, src) -end -Base.copyto!(dest::NestedPermutedDimsArray, src::AbstractArray) = _copy!(dest, src) - -function _copy!(P::NestedPermutedDimsArray{T,N,perm}, src) where {T,N,perm} - # If dest/src are "close to dense," then it pays to be cache-friendly. - # Determine the first permuted dimension - d = 0 # d+1 will hold the first permuted dimension of src - while d < ndims(src) && perm[d + 1] == d + 1 - d += 1 - end - if d == ndims(src) - copyto!(parent(P), src) # it's not permuted - else - R1 = CartesianIndices(axes(src)[1:d]) - d1 = findfirst(isequal(d + 1), perm)::Int # first permuted dim of dest - R2 = CartesianIndices(axes(src)[(d + 2):(d1 - 1)]) - R3 = CartesianIndices(axes(src)[(d1 + 1):end]) - _permutedims!(P, src, R1, R2, R3, d + 1, d1) - end - return P -end - -@noinline function _permutedims!( - P::NestedPermutedDimsArray, src, R1::CartesianIndices{0}, R2, R3, ds, dp -) - ip, is = axes(src, dp), axes(src, ds) - for jo in first(ip):8:last(ip), io in first(is):8:last(is) - for I3 in R3, I2 in R2 - for j in jo:min(jo + 7, last(ip)) - for i in io:min(io + 7, last(is)) - @inbounds P[i, I2, j, I3] = src[i, I2, j, I3] - end - end - end - end - return P -end - -@noinline function _permutedims!(P::NestedPermutedDimsArray, src, R1, R2, R3, ds, dp) - ip, is = axes(src, dp), axes(src, ds) - for jo in first(ip):8:last(ip), io in first(is):8:last(is) - for I3 in R3, I2 in R2 - for j in jo:min(jo + 7, last(ip)) - for i in io:min(io + 7, last(is)) - for I1 in R1 - @inbounds P[I1, i, I2, j, I3] = src[I1, i, I2, j, I3] - end - end - end - end - end - return P -end - -const CommutativeOps = Union{ - typeof(+), - typeof(Base.add_sum), - typeof(min), - typeof(max), - typeof(Base._extrema_rf), - typeof(|), - typeof(&), -} - -function Base._mapreduce_dim( - f, op::CommutativeOps, init::Base._InitialValue, A::NestedPermutedDimsArray, dims::Colon -) - return Base._mapreduce_dim(f, op, init, parent(A), dims) -end -function Base._mapreduce_dim( - f::typeof(identity), - op::Union{typeof(Base.mul_prod),typeof(*)}, - init::Base._InitialValue, - A::NestedPermutedDimsArray{<:Union{Real,Complex}}, - dims::Colon, -) - return Base._mapreduce_dim(f, op, init, parent(A), dims) -end - -function Base.mapreducedim!( - f, op::CommutativeOps, B::AbstractArray{T,N}, A::NestedPermutedDimsArray{S,N,perm,iperm} -) where {T,S,N,perm,iperm} - C = NestedPermutedDimsArray{T,N,iperm,perm,typeof(B)}(B) # make the inverse permutation for the output - Base.mapreducedim!(f, op, C, parent(A)) - return B -end -function Base.mapreducedim!( - f::typeof(identity), - op::Union{typeof(Base.mul_prod),typeof(*)}, - B::AbstractArray{T,N}, - A::NestedPermutedDimsArray{<:Union{Real,Complex},N,perm,iperm}, -) where {T,N,perm,iperm} - C = NestedPermutedDimsArray{T,N,iperm,perm,typeof(B)}(B) # make the inverse permutation for the output - Base.mapreducedim!(f, op, C, parent(A)) - return B -end - -function Base.showarg( - io::IO, A::NestedPermutedDimsArray{T,N,perm}, toplevel -) where {T,N,perm} - print(io, "NestedPermutedDimsArray(") - Base.showarg(io, parent(A), false) - print(io, ", ", perm, ')') - toplevel && print(io, " with eltype ", eltype(A)) - return nothing -end - -end diff --git a/NDTensors/src/lib/NestedPermutedDimsArrays/test/Project.toml b/NDTensors/src/lib/NestedPermutedDimsArrays/test/Project.toml deleted file mode 100644 index 9b1d5ccd25..0000000000 --- a/NDTensors/src/lib/NestedPermutedDimsArrays/test/Project.toml +++ /dev/null @@ -1,2 +0,0 @@ -[deps] -NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" diff --git a/NDTensors/src/lib/NestedPermutedDimsArrays/test/runtests.jl b/NDTensors/src/lib/NestedPermutedDimsArrays/test/runtests.jl deleted file mode 100644 index 704297fcc2..0000000000 --- a/NDTensors/src/lib/NestedPermutedDimsArrays/test/runtests.jl +++ /dev/null @@ -1,23 +0,0 @@ -@eval module $(gensym()) -using NDTensors.NestedPermutedDimsArrays: NestedPermutedDimsArray -using Test: @test, @testset -@testset "NestedPermutedDimsArrays" for elt in ( - Float32, Float64, Complex{Float32}, Complex{Float64} -) - a = map(_ -> randn(elt, 2, 3, 4), CartesianIndices((2, 3, 4))) - perm = (3, 1, 2) - p = NestedPermutedDimsArray(a, perm) - T = PermutedDimsArray{elt,3,perm,invperm(perm),eltype(a)} - @test typeof(p) === NestedPermutedDimsArray{T,3,perm,invperm(perm),typeof(a)} - @test size(p) == (4, 2, 3) - @test eltype(p) === T - for I in eachindex(p) - @test size(p[I]) == (4, 2, 3) - @test p[I] == permutedims(a[CartesianIndex(map(i -> Tuple(I)[i], invperm(perm)))], perm) - end - x = randn(elt, 4, 2, 3) - p[3, 1, 2] = x - @test p[3, 1, 2] == x - @test a[1, 2, 3] == permutedims(x, invperm(perm)) -end -end diff --git a/NDTensors/src/lib/RankFactorization/.JuliaFormatter.toml b/NDTensors/src/lib/RankFactorization/.JuliaFormatter.toml deleted file mode 100644 index 08f664cdb9..0000000000 --- a/NDTensors/src/lib/RankFactorization/.JuliaFormatter.toml +++ /dev/null @@ -1,2 +0,0 @@ -style = "blue" -indent = 2 diff --git a/NDTensors/src/lib/RankFactorization/src/RankFactorization.jl b/NDTensors/src/lib/RankFactorization/src/RankFactorization.jl deleted file mode 100644 index a2b950ac93..0000000000 --- a/NDTensors/src/lib/RankFactorization/src/RankFactorization.jl +++ /dev/null @@ -1,5 +0,0 @@ -module RankFactorization -include("default_kwargs.jl") -include("truncate_spectrum.jl") -include("spectrum.jl") -end diff --git a/NDTensors/src/lib/RankFactorization/src/default_kwargs.jl b/NDTensors/src/lib/RankFactorization/src/default_kwargs.jl deleted file mode 100644 index d0dc670d4c..0000000000 --- a/NDTensors/src/lib/RankFactorization/src/default_kwargs.jl +++ /dev/null @@ -1,11 +0,0 @@ -using NDTensors.TypeParameterAccessors: unwrap_array_type -replace_nothing(::Nothing, replacement) = replacement -replace_nothing(value, replacement) = value - -default_maxdim(a) = minimum(size(a)) -default_mindim(a) = true -default_cutoff(a) = zero(eltype(a)) -default_svd_alg(a) = default_svd_alg(unwrap_array_type(a), a) -default_svd_alg(::Type{<:AbstractArray}, a) = "divide_and_conquer" -default_use_absolute_cutoff(a) = false -default_use_relative_cutoff(a) = true diff --git a/NDTensors/src/lib/RankFactorization/src/spectrum.jl b/NDTensors/src/lib/RankFactorization/src/spectrum.jl deleted file mode 100644 index 7d6e9c46b7..0000000000 --- a/NDTensors/src/lib/RankFactorization/src/spectrum.jl +++ /dev/null @@ -1,23 +0,0 @@ -""" - Spectrum -contains the (truncated) density matrix eigenvalue spectrum which is computed during a -decomposition done by `svd` or `eigen`. In addition stores the truncation error. -""" -struct Spectrum{VecT<:Union{AbstractVector,Nothing},ElT<:Real} - eigs::VecT - truncerr::ElT -end - -eigs(s::Spectrum) = s.eigs -truncerror(s::Spectrum) = s.truncerr - -function entropy(s::Spectrum) - S = 0.0 - eigs_s = eigs(s) - isnothing(eigs_s) && - error("Spectrum does not contain any eigenvalues, cannot compute the entropy") - for p in eigs_s - p > 1e-13 && (S -= p * log(p)) - end - return S -end diff --git a/NDTensors/src/lib/RankFactorization/src/truncate_spectrum.jl b/NDTensors/src/lib/RankFactorization/src/truncate_spectrum.jl deleted file mode 100644 index 6f0f8b1472..0000000000 --- a/NDTensors/src/lib/RankFactorization/src/truncate_spectrum.jl +++ /dev/null @@ -1,105 +0,0 @@ -using ..NDTensors.TypeParameterAccessors: unwrap_array_type - -## TODO write Exposed version of truncate -function truncate!!(P::AbstractArray; kwargs...) - return truncate!!(unwrap_array_type(P), P; kwargs...) -end - -# CPU version. -function truncate!!(::Type{<:Array}, P::AbstractArray; kwargs...) - truncerr, docut = truncate!(P; kwargs...) - return P, truncerr, docut -end - -# GPU fallback version, convert to CPU. -function truncate!!(::Type{<:AbstractArray}, P::AbstractArray; kwargs...) - P_cpu = cpu(P) - truncerr, docut = truncate!(P_cpu; kwargs...) - P = adapt(unwrap_array_type(P), P_cpu) - return P, truncerr, docut -end - -# CPU implementation. -function truncate!( - P::AbstractVector; - mindim=nothing, - maxdim=nothing, - cutoff=nothing, - use_absolute_cutoff=nothing, - use_relative_cutoff=nothing, -) - mindim = replace_nothing(mindim, default_mindim(P)) - maxdim = replace_nothing(maxdim, length(P)) - cutoff = replace_nothing(cutoff, typemin(eltype(P))) - use_absolute_cutoff = replace_nothing(use_absolute_cutoff, default_use_absolute_cutoff(P)) - use_relative_cutoff = replace_nothing(use_relative_cutoff, default_use_relative_cutoff(P)) - - origm = length(P) - docut = zero(eltype(P)) - - #if P[1] <= 0.0 - # P[1] = 0.0 - # resize!(P, 1) - # return 0.0, 0.0 - #end - - if origm == 1 - docut = abs(P[1]) / 2 - return zero(eltype(P)), docut - end - - s = sign(P[1]) - s < 0 && (P .*= s) - - #Zero out any negative weight - for n in origm:-1:1 - (P[n] >= zero(eltype(P))) && break - P[n] = zero(eltype(P)) - end - - n = origm - truncerr = zero(eltype(P)) - while n > maxdim - truncerr += P[n] - n -= 1 - end - - if use_absolute_cutoff - #Test if individual prob. weights fall below cutoff - #rather than using *sum* of discarded weights - while P[n] <= cutoff && n > mindim - truncerr += P[n] - n -= 1 - end - else - scale = one(eltype(P)) - if use_relative_cutoff - scale = sum(P) - (scale == zero(eltype(P))) && (scale = one(eltype(P))) - end - - #Continue truncating until *sum* of discarded probability - #weight reaches cutoff reached (or m==mindim) - while (truncerr + P[n] <= cutoff * scale) && (n > mindim) - truncerr += P[n] - n -= 1 - end - - truncerr /= scale - end - - if n < 1 - n = 1 - end - - if n < origm - docut = (P[n] + P[n + 1]) / 2 - if abs(P[n] - P[n + 1]) < eltype(P)(1e-3) * P[n] - docut += eltype(P)(1e-3) * P[n] - end - end - - s < 0 && (P .*= s) - resize!(P, n) - return truncerr, docut -end diff --git a/NDTensors/src/lib/SmallVectors/.JuliaFormatter.toml b/NDTensors/src/lib/SmallVectors/.JuliaFormatter.toml deleted file mode 100644 index 08f664cdb9..0000000000 --- a/NDTensors/src/lib/SmallVectors/.JuliaFormatter.toml +++ /dev/null @@ -1,2 +0,0 @@ -style = "blue" -indent = 2 diff --git a/NDTensors/src/lib/SmallVectors/README.md b/NDTensors/src/lib/SmallVectors/README.md deleted file mode 100644 index 71ee904af7..0000000000 --- a/NDTensors/src/lib/SmallVectors/README.md +++ /dev/null @@ -1,73 +0,0 @@ -# SmallVectors - -## Introduction - -A module that defines small (mutable and immutable) vectors with a maximum length. Externally they have a dynamic/runtime length, but internally they are backed by a statically sized vector. This makes it so that operations can be performed faster because they can remain on the stack, but it provides some more convenience compared to StaticArrays.jl where the length is encoded in the type. - -## Examples - -For example: -```julia -using NDTensors.SmallVectors - -mv = MSmallVector{10}([1, 2, 3]) # Mutable vector with length 3, maximum length 10 -push!(mv, 4) -mv[2] = 12 -sort!(mv; rev=true) - -v = SmallVector{10}([1, 2, 3]) # Immutable vector with length 3, maximum length 10 -v = SmallVectors.push(v, 4) -v = SmallVectors.setindex(v, 12, 2) -v = SmallVectors.sort(v; rev=true) -``` -This also has the advantage that you can efficiently store collections of `SmallVector`/`MSmallVector` that have different runtime lengths, as long as they have the same maximum length. - -## List of functionality - -`SmallVector` and `MSmallVector` are subtypes of `AbstractVector` and therefore can be used in `Base` `AbstractVector` functions, though `SmallVector` will fail for mutating functions like `setindex!` because it is immutable. - -`MSmallVector` has specialized implementations of `Base` functions that involve resizing such as: -- `resize!` -- `push!` -- `pushfirst!` -- `pop!` -- `popfirst!` -- `append!` -- `prepend!` -- `insert!` -- `deleteat!` -which are guaranteed to not realocate memory, and instead just use the memory buffer that already exists, unlike Base's `Vector` which may have to reallocate memory depending on the operation. However, they will error if they involve operations that resize beyond the maximum length of the `MSmallVector`, which you can access with `SmallVectors.maxlength(v)`. - -In addition, `SmallVector` and `MSmallVector` implement basic non-mutating operations such as: -- `SmallVectors.setindex` -, non-mutating resizing operations: -- `SmallVector.resize` -- `SmallVector.push` -- `SmallVector.pushfirst` -- `SmallVector.pop` -- `SmallVector.popfirst` -- `SmallVector.append` -- `SmallVector.prepend` -- `SmallVector.insert` -- `SmallVector.deleteat` -which output a new vector. In addition, it implements: -- `SmallVectors.circshift` -- `sort` (overloaded from `Base`). - -Finally, it provides some new helpful functions that are not in `Base`: -- `SmallVectors.insertsorted[!]` -- `SmallVectors.insertsortedunique[!]` -- `SmallVectors.mergesorted[!]` -- `SmallVectors.mergesortedunique[!]` - -## TODO - -Add specialized overloads for: -- `splice[!]` -- `union[!]` (`∪`) -- `intersect[!]` (`∩`) -- `setdiff[!]` -- `symdiff[!]` -- `unique[!]` - -Please let us know if there are other operations that would warrant specialized implmentations for `AbstractSmallVector`. diff --git a/NDTensors/src/lib/SmallVectors/src/BaseExt/insertstyle.jl b/NDTensors/src/lib/SmallVectors/src/BaseExt/insertstyle.jl deleted file mode 100644 index c5008fef87..0000000000 --- a/NDTensors/src/lib/SmallVectors/src/BaseExt/insertstyle.jl +++ /dev/null @@ -1,9 +0,0 @@ -# Trait determining the style of inserting into a structure -abstract type InsertStyle end -struct IsInsertable <: InsertStyle end -struct NotInsertable <: InsertStyle end -struct FastCopy <: InsertStyle end - -# Assume is insertable -@inline InsertStyle(::Type) = IsInsertable() -@inline InsertStyle(x) = InsertStyle(typeof(x)) diff --git a/NDTensors/src/lib/SmallVectors/src/BaseExt/sort.jl b/NDTensors/src/lib/SmallVectors/src/BaseExt/sort.jl deleted file mode 100644 index 3c11321da0..0000000000 --- a/NDTensors/src/lib/SmallVectors/src/BaseExt/sort.jl +++ /dev/null @@ -1,23 +0,0 @@ -# Custom version of `sort` (`SmallVectors.sort`) that directly uses an `order::Ordering`. -function sort(v, order::Base.Sort.Ordering; alg::Base.Sort.Algorithm=Base.Sort.defalg(v)) - mv = thaw(v) - SmallVectors.sort!(mv, order; alg) - return freeze(mv) -end - -# Custom version of `sort!` (`SmallVectors.sort!`) that directly uses an `order::Ordering`. -function sort!( - v::AbstractVector{T}, - order::Base.Sort.Ordering; - alg::Base.Sort.Algorithm=Base.Sort.defalg(v), - scratch::Union{Vector{T},Nothing}=nothing, -) where {T} - if VERSION < v"1.9" - Base.sort!(v, alg, order) - else - Base.Sort._sort!( - v, Base.Sort.maybe_apply_initial_optimizations(alg), order, (; scratch) - ) - end - return v -end diff --git a/NDTensors/src/lib/SmallVectors/src/BaseExt/sortedunique.jl b/NDTensors/src/lib/SmallVectors/src/BaseExt/sortedunique.jl deleted file mode 100644 index b8c851a568..0000000000 --- a/NDTensors/src/lib/SmallVectors/src/BaseExt/sortedunique.jl +++ /dev/null @@ -1,241 +0,0 @@ -# Union two unique sorted collections into an -# output buffer, returning a unique sorted collection. - -using Base: Ordering, ord, lt - -function unionsortedunique!( - itr1, - itr2; - lt=isless, - by=identity, - rev::Union{Bool,Nothing}=nothing, - order::Ordering=Forward, -) - return unionsortedunique!(itr1, itr2, ord(lt, by, rev, order)) -end - -function unionsortedunique!(itr1, itr2, order::Ordering) - i1 = firstindex(itr1) - i2 = firstindex(itr2) - stop1 = lastindex(itr1) - stop2 = lastindex(itr2) - @inbounds while i1 ≤ stop1 && i2 ≤ stop2 - item1 = itr1[i1] - item2 = itr2[i2] - if lt(order, item1, item2) - i1 += 1 - elseif lt(order, item2, item1) - # TODO: Use `insertat!`? - resize!(itr1, length(itr1) + 1) - for j in length(itr1):-1:(i1 + 1) - itr1[j] = itr1[j - 1] - end - # Replace with the item from the second list - itr1[i1] = item2 - i1 += 1 - i2 += 1 - stop1 += 1 - else # They are equal - i1 += 1 - i2 += 1 - end - end - # TODO: Use `insertat!`? - resize!(itr1, length(itr1) + (stop2 - i2 + 1)) - @inbounds for j2 in i2:stop2 - itr1[i1] = itr2[j2] - i1 += 1 - end - return itr1 -end - -function unionsortedunique( - itr1, - itr2; - lt=isless, - by=identity, - rev::Union{Bool,Nothing}=nothing, - order::Ordering=Forward, -) - return unionsortedunique(itr1, itr2, ord(lt, by, rev, order)) -end - -# Union two unique sorted collections into an -# output buffer, returning a unique sorted collection. -function unionsortedunique(itr1, itr2, order::Ordering) - out = thaw_type(itr1)() - i1 = firstindex(itr1) - i2 = firstindex(itr2) - iout = firstindex(out) - stop1 = lastindex(itr1) - stop2 = lastindex(itr2) - stopout = lastindex(out) - @inbounds while i1 ≤ stop1 && i2 ≤ stop2 - iout > stopout && resize!(out, iout) - item1 = itr1[i1] - item2 = itr2[i2] - if lt(order, item1, item2) - out[iout] = item1 - iout += 1 - i1 += 1 - elseif lt(order, item2, item1) - out[iout] = item2 - iout += 1 - i2 += 1 - else # They are equal - out[iout] = item2 - iout += 1 - i1 += 1 - i2 += 1 - end - end - # TODO: Use `insertat!`? - r1 = i1:stop1 - resize!(out, length(out) + length(r1)) - @inbounds for j1 in r1 - out[iout] = itr1[j1] - iout += 1 - end - # TODO: Use `insertat!`? - r2 = i2:stop2 - resize!(out, length(out) + length(r2)) - @inbounds for j2 in r2 - out[iout] = itr2[j2] - iout += 1 - end - return freeze(out) -end - -function setdiffsortedunique!( - itr1, - itr2; - lt=isless, - by=identity, - rev::Union{Bool,Nothing}=nothing, - order::Ordering=Forward, -) - return setdiffsortedunique!(itr1, itr2, ord(lt, by, rev, order)) -end - -function setdiffsortedunique!(itr1, itr2, order::Ordering) - i1 = firstindex(itr1) - i2 = firstindex(itr2) - stop1 = lastindex(itr1) - stop2 = lastindex(itr2) - @inbounds while i1 ≤ stop1 && i2 ≤ stop2 - item1 = itr1[i1] - item2 = itr2[i2] - if lt(order, item1, item2) - i1 += 1 - elseif lt(order, item2, item1) - i2 += 1 - else # They are equal - # TODO: Use `deletate!`? - for j1 in i1:(length(itr1) - 1) - itr1[j1] = itr1[j1 + 1] - end - resize!(itr1, length(itr1) - 1) - stop1 = lastindex(itr1) - i2 += 1 - end - end - return itr1 -end - -function setdiffsortedunique( - itr1, - itr2; - lt=isless, - by=identity, - rev::Union{Bool,Nothing}=nothing, - order::Ordering=Forward, -) - return setdiffsortedunique(itr1, itr2, ord(lt, by, rev, order)) -end - -function setdiffsortedunique(itr1, itr2, order::Ordering) - out = thaw_type(itr1)() - i1 = firstindex(itr1) - i2 = firstindex(itr2) - iout = firstindex(out) - stop1 = lastindex(itr1) - stop2 = lastindex(itr2) - stopout = lastindex(out) - @inbounds while i1 ≤ stop1 && i2 ≤ stop2 - item1 = itr1[i1] - item2 = itr2[i2] - if lt(order, item1, item2) - iout > stopout && resize!(out, iout) - out[iout] = item1 - iout += 1 - i1 += 1 - elseif lt(order, item2, item1) - i2 += 1 - else # They are equal - i1 += 1 - i2 += 1 - end - end - resize!(out, iout - 1) - return freeze(out) -end - -function intersectsortedunique!( - itr1, - itr2; - lt=isless, - by=identity, - rev::Union{Bool,Nothing}=nothing, - order::Ordering=Forward, -) - return intersectsortedunique!(itr1, itr2, ord(lt, by, rev, order)) -end - -function intersectsortedunique!(itr1, itr2, order::Ordering) - return error("Not implemented") -end - -function intersectsortedunique( - itr1, - itr2; - lt=isless, - by=identity, - rev::Union{Bool,Nothing}=nothing, - order::Ordering=Forward, -) - return intersectsortedunique(itr1, itr2, ord(lt, by, rev, order)) -end - -function intersectsortedunique(itr1, itr2, order::Ordering) - return error("Not implemented") -end - -function symdiffsortedunique!( - itr1, - itr2; - lt=isless, - by=identity, - rev::Union{Bool,Nothing}=nothing, - order::Ordering=Forward, -) - return symdiffsortedunique!(itr1, itr2, ord(lt, by, rev, order)) -end - -function symdiffsortedunique!(itr1, itr2, order::Ordering) - return error("Not implemented") -end - -function symdiffsortedunique( - itr1, - itr2; - lt=isless, - by=identity, - rev::Union{Bool,Nothing}=nothing, - order::Ordering=Forward, -) - return symdiffsortedunique(itr1, itr2, ord(lt, by, rev, order)) -end - -function symdiffsortedunique(itr1, itr2, order::Ordering) - return error("Not implemented") -end diff --git a/NDTensors/src/lib/SmallVectors/src/BaseExt/thawfreeze.jl b/NDTensors/src/lib/SmallVectors/src/BaseExt/thawfreeze.jl deleted file mode 100644 index 7f6cd037a5..0000000000 --- a/NDTensors/src/lib/SmallVectors/src/BaseExt/thawfreeze.jl +++ /dev/null @@ -1,6 +0,0 @@ -thaw(x) = copy(x) -freeze(x) = x - -thaw_type(::Type{<:AbstractArray{<:Any,N}}, ::Type{T}) where {T,N} = Array{T,N} -thaw_type(x::AbstractArray, ::Type{T}) where {T} = thaw_type(typeof(x), T) -thaw_type(x::AbstractArray{T}) where {T} = thaw_type(typeof(x), T) diff --git a/NDTensors/src/lib/SmallVectors/src/SmallVectors.jl b/NDTensors/src/lib/SmallVectors/src/SmallVectors.jl deleted file mode 100644 index e6c6f3330f..0000000000 --- a/NDTensors/src/lib/SmallVectors/src/SmallVectors.jl +++ /dev/null @@ -1,45 +0,0 @@ -module SmallVectors -using StaticArrays - -export AbstractSmallVector, - SmallVector, - MSmallVector, - SubSmallVector, - FastCopy, - InsertStyle, - IsInsertable, - NotInsertable, - insert, - delete, - thaw, - freeze, - maxlength, - unionsortedunique, - unionsortedunique!, - setdiffsortedunique, - setdiffsortedunique!, - intersectsortedunique, - intersectsortedunique!, - symdiffsortedunique, - symdiffsortedunique!, - thaw_type - -struct NotImplemented <: Exception - msg::String -end -NotImplemented() = NotImplemented("Not implemented.") - -include("BaseExt/insertstyle.jl") -include("BaseExt/thawfreeze.jl") -include("BaseExt/sort.jl") -include("BaseExt/sortedunique.jl") -include("abstractarray/insert.jl") -include("abstractsmallvector/abstractsmallvector.jl") -include("abstractsmallvector/deque.jl") -include("msmallvector/msmallvector.jl") -include("smallvector/smallvector.jl") -include("smallvector/insertstyle.jl") -include("msmallvector/thawfreeze.jl") -include("smallvector/thawfreeze.jl") -include("subsmallvector/subsmallvector.jl") -end diff --git a/NDTensors/src/lib/SmallVectors/src/abstractarray/insert.jl b/NDTensors/src/lib/SmallVectors/src/abstractarray/insert.jl deleted file mode 100644 index 3a864aabe6..0000000000 --- a/NDTensors/src/lib/SmallVectors/src/abstractarray/insert.jl +++ /dev/null @@ -1,2 +0,0 @@ -SmallVectors.insert(a::Vector, index::Integer, item) = insert!(copy(a), index, item) -delete(d::AbstractDict, key) = delete!(copy(d), key) diff --git a/NDTensors/src/lib/SmallVectors/src/abstractsmallvector/abstractsmallvector.jl b/NDTensors/src/lib/SmallVectors/src/abstractsmallvector/abstractsmallvector.jl deleted file mode 100644 index 382928489f..0000000000 --- a/NDTensors/src/lib/SmallVectors/src/abstractsmallvector/abstractsmallvector.jl +++ /dev/null @@ -1,34 +0,0 @@ -""" -A vector with a fixed maximum length, backed by a fixed size buffer. -""" -abstract type AbstractSmallVector{T} <: AbstractVector{T} end - -# Required buffer interface -buffer(vec::AbstractSmallVector) = throw(NotImplemented()) - -similar_type(vec::AbstractSmallVector) = typeof(vec) - -# Required buffer interface -maxlength(vec::AbstractSmallVector) = length(buffer(vec)) -maxlength(vectype::Type{<:AbstractSmallVector}) = error("Not implemented") - -function thaw_type(vectype::Type{<:AbstractSmallVector}, ::Type{T}) where {T} - return MSmallVector{maxlength(vectype),T} -end -thaw_type(vectype::Type{<:AbstractSmallVector{T}}) where {T} = thaw_type(vectype, T) - -# Required AbstractArray interface -Base.size(vec::AbstractSmallVector) = throw(NotImplemented()) - -# Derived AbstractArray interface -function Base.getindex(vec::AbstractSmallVector, index::Integer) - return throw(NotImplemented()) -end -function Base.setindex!(vec::AbstractSmallVector, item, index::Integer) - return throw(NotImplemented()) -end -Base.IndexStyle(::Type{<:AbstractSmallVector}) = IndexLinear() - -function Base.convert(::Type{T}, a::AbstractArray) where {T<:AbstractSmallVector} - return a isa T ? a : T(a)::T -end diff --git a/NDTensors/src/lib/SmallVectors/src/abstractsmallvector/deque.jl b/NDTensors/src/lib/SmallVectors/src/abstractsmallvector/deque.jl deleted file mode 100644 index 3002f79f0d..0000000000 --- a/NDTensors/src/lib/SmallVectors/src/abstractsmallvector/deque.jl +++ /dev/null @@ -1,324 +0,0 @@ -# TODO: add -# splice[!] -# union[!] (∪) -# intersect[!] (∩) -# setdiff[!] -# symdiff[!] -# unique[!] - -# unionsorted[!] -# setdiffsorted[!] -# deletesorted[!] (delete all or one?) -# deletesortedfirst[!] (delete all or one?) - -Base.resize!(vec::AbstractSmallVector, len) = throw(NotImplemented()) - -@inline function resize(vec::AbstractSmallVector, len) - mvec = thaw(vec) - resize!(mvec, len) - return convert(similar_type(vec), mvec) -end - -@inline function Base.empty!(vec::AbstractSmallVector) - resize!(vec, 0) - return vec -end - -@inline function empty(vec::AbstractSmallVector) - mvec = thaw(vec) - empty!(mvec) - return convert(similar_type(vec), mvec) -end - -@inline function StaticArrays.setindex(vec::AbstractSmallVector, item, index::Integer) - @boundscheck checkbounds(vec, index) - mvec = thaw(vec) - @inbounds mvec[index] = item - return convert(similar_type(vec), mvec) -end - -@inline function Base.push!(vec::AbstractSmallVector, item) - resize!(vec, length(vec) + 1) - @inbounds vec[length(vec)] = item - return vec -end - -@inline function StaticArrays.push(vec::AbstractSmallVector, item) - mvec = thaw(vec) - push!(mvec, item) - return convert(similar_type(vec), mvec) -end - -@inline function Base.pop!(vec::AbstractSmallVector) - resize!(vec, length(vec) - 1) - return vec -end - -@inline function StaticArrays.pop(vec::AbstractSmallVector) - mvec = thaw(vec) - pop!(mvec) - return convert(similar_type(vec), mvec) -end - -@inline function Base.pushfirst!(vec::AbstractSmallVector, item) - insert!(vec, firstindex(vec), item) - return vec -end - -# Don't `@inline`, makes it slower. -function StaticArrays.pushfirst(vec::AbstractSmallVector, item) - mvec = thaw(vec) - pushfirst!(mvec, item) - return convert(similar_type(vec), mvec) -end - -@inline function Base.popfirst!(vec::AbstractSmallVector) - circshift!(vec, -1) - resize!(vec, length(vec) - 1) - return vec -end - -# Don't `@inline`, makes it slower. -function StaticArrays.popfirst(vec::AbstractSmallVector) - mvec = thaw(vec) - popfirst!(mvec) - return convert(similar_type(vec), mvec) -end - -# This implementation of `midpoint` is performance-optimized but safe -# only if `lo <= hi`. -# TODO: Replace with `Base.midpoint`. -midpoint(lo::T, hi::T) where {T<:Integer} = lo + ((hi - lo) >>> 0x01) -midpoint(lo::Integer, hi::Integer) = midpoint(promote(lo, hi)...) - -@inline function Base.reverse!(vec::AbstractSmallVector) - start, stop = firstindex(vec), lastindex(vec) - r = stop - @inbounds for i in start:midpoint(start, stop - 1) - vec[i], vec[r] = vec[r], vec[i] - r -= 1 - end - return vec -end - -@inline function Base.reverse!( - vec::AbstractSmallVector, start::Integer, stop::Integer=lastindex(v) -) - reverse!(smallview(vec, start, stop)) - return vec -end - -@inline function Base.circshift!(vec::AbstractSmallVector, shift::Integer) - start, stop = firstindex(vec), lastindex(vec) - n = length(vec) - n == 0 && return vec - shift = mod(shift, n) - shift == 0 && return vec - reverse!(smallview(vec, start, stop - shift)) - reverse!(smallview(vec, stop - shift + 1, stop)) - reverse!(smallview(vec, start, stop)) - return vec -end - -@inline function Base.insert!(vec::AbstractSmallVector, index::Integer, item) - resize!(vec, length(vec) + 1) - circshift!(smallview(vec, index, lastindex(vec)), 1) - @inbounds vec[index] = item - return vec -end - -# Don't @inline, makes it slower. -function StaticArrays.insert(vec::AbstractSmallVector, index::Integer, item) - mvec = thaw(vec) - insert!(mvec, index, item) - return convert(similar_type(vec), mvec) -end - -@inline function Base.deleteat!(vec::AbstractSmallVector, index::Integer) - circshift!(smallview(vec, index, lastindex(vec)), -1) - resize!(vec, length(vec) - 1) - return vec -end - -@inline function Base.deleteat!( - vec::AbstractSmallVector, indices::AbstractUnitRange{<:Integer} -) - f = first(indices) - n = length(indices) - circshift!(smallview(vec, f, lastindex(vec)), -n) - resize!(vec, length(vec) - n) - return vec -end - -# Don't @inline, makes it slower. -function StaticArrays.deleteat( - vec::AbstractSmallVector, index::Union{Integer,AbstractUnitRange{<:Integer}} -) - mvec = thaw(vec) - deleteat!(mvec, index) - return convert(similar_type(vec), mvec) -end - -# InsertionSortAlg -# https://github.com/JuliaLang/julia/blob/bed2cd540a11544ed4be381d471bbf590f0b745e/base/sort.jl#L722-L736 -# https://en.wikipedia.org/wiki/Insertion_sort#:~:text=Insertion%20sort%20is%20a%20simple,%2C%20heapsort%2C%20or%20merge%20sort. -# Alternatively could use `TupleTools.jl` or `StaticArrays.jl` for out-of-place sorting. -@inline function sort!(vec::AbstractSmallVector, order::Base.Sort.Ordering) - lo, hi = firstindex(vec), lastindex(vec) - lo_plus_1 = (lo + 1) - @inbounds for i in lo_plus_1:hi - j = i - x = vec[i] - jmax = j - for _ in jmax:-1:lo_plus_1 - y = vec[j - 1] - if !Base.Sort.lt(order, x, y) - break - end - vec[j] = y - j -= 1 - end - vec[j] = x - end - return vec -end - -@inline function Base.sort!( - vec::AbstractSmallVector; lt=isless, by=identity, rev::Bool=false -) - SmallVectors.sort!(vec, Base.Sort.ord(lt, by, rev)) - return vec -end - -# Don't @inline, makes it slower. -function sort(vec::AbstractSmallVector, order::Base.Sort.Ordering) - mvec = thaw(vec) - SmallVectors.sort!(mvec, order) - return convert(similar_type(vec), mvec) -end - -@inline function Base.sort( - vec::AbstractSmallVector; lt=isless, by=identity, rev::Bool=false -) - return SmallVectors.sort(vec, Base.Sort.ord(lt, by, rev)) -end - -@inline function insertsorted!(vec::AbstractSmallVector, item; kwargs...) - insert!(vec, searchsortedfirst(vec, item; kwargs...), item) - return vec -end - -function insertsorted(vec::AbstractSmallVector, item; kwargs...) - mvec = thaw(vec) - insertsorted!(mvec, item; kwargs...) - return convert(similar_type(vec), mvec) -end - -@inline function insertsortedunique!(vec::AbstractSmallVector, item; kwargs...) - r = searchsorted(vec, item; kwargs...) - if length(r) == 0 - insert!(vec, first(r), item) - end - return vec -end - -# Code repeated since inlining doesn't work. -function insertsortedunique(vec::AbstractSmallVector, item; kwargs...) - r = searchsorted(vec, item; kwargs...) - if length(r) == 0 - vec = insert(vec, first(r), item) - end - return vec -end - -@inline function mergesorted!(vec::AbstractSmallVector, item::AbstractVector; kwargs...) - for x in item - insertsorted!(vec, x; kwargs...) - end - return vec -end - -function mergesorted(vec::AbstractSmallVector, item; kwargs...) - mvec = thaw(vec) - mergesorted!(mvec, item; kwargs...) - return convert(similar_type(vec), mvec) -end - -@inline function mergesortedunique!( - vec::AbstractSmallVector, item::AbstractVector; kwargs... -) - for x in item - insertsortedunique!(vec, x; kwargs...) - end - return vec -end - -# Code repeated since inlining doesn't work. -function mergesortedunique(vec::AbstractSmallVector, item; kwargs...) - for x in item - vec = insertsortedunique(vec, x; kwargs...) - end - return vec -end - -Base.@propagate_inbounds function Base.copyto!( - vec::AbstractSmallVector, item::AbstractVector -) - for i in eachindex(item) - vec[i] = item[i] - end - return vec -end - -# Don't @inline, makes it slower. -function Base.circshift(vec::AbstractSmallVector, shift::Integer) - mvec = thaw(vec) - circshift!(mvec, shift) - return convert(similar_type(vec), mvec) -end - -@inline function Base.append!(vec::AbstractSmallVector, item::AbstractVector) - l = length(vec) - r = length(item) - resize!(vec, l + r) - @inbounds copyto!(smallview(vec, l + 1, l + r + 1), item) - return vec -end - -# Missing from `StaticArrays.jl`. -# Don't @inline, makes it slower. -function append(vec::AbstractSmallVector, item::AbstractVector) - mvec = thaw(vec) - append!(mvec, item) - return convert(similar_type(vec), mvec) -end - -@inline function Base.prepend!(vec::AbstractSmallVector, item::AbstractVector) - l = length(vec) - r = length(item) - resize!(vec, l + r) - circshift!(vec, length(item)) - @inbounds copyto!(vec, item) - return vec -end - -# Missing from `StaticArrays.jl`. -# Don't @inline, makes it slower. -function prepend(vec::AbstractSmallVector, item::AbstractVector) - mvec = thaw(vec) - prepend!(mvec, item) - return convert(similar_type(vec), mvec) -end - -# Don't @inline, makes it slower. -function smallvector_vcat(vec1::AbstractSmallVector, vec2::AbstractVector) - mvec1 = thaw(vec1) - append!(mvec1, vec2) - return convert(similar_type(vec1), mvec1) -end - -function Base.vcat(vec1::AbstractSmallVector{<:Number}, vec2::AbstractVector{<:Number}) - return smallvector_vcat(vec1, vec2) -end - -Base.vcat(vec1::AbstractSmallVector, vec2::AbstractVector) = smallvector_vcat(vec1, vec2) diff --git a/NDTensors/src/lib/SmallVectors/src/msmallvector/msmallvector.jl b/NDTensors/src/lib/SmallVectors/src/msmallvector/msmallvector.jl deleted file mode 100644 index 6d45801f93..0000000000 --- a/NDTensors/src/lib/SmallVectors/src/msmallvector/msmallvector.jl +++ /dev/null @@ -1,80 +0,0 @@ -""" -MSmallVector - -TODO: Make `buffer` field `const` (new in Julia 1.8) -""" -mutable struct MSmallVector{S,T} <: AbstractSmallVector{T} - buffer::MVector{S,T} - length::Int -end - -# Constructors -function MSmallVector{S}(buffer::AbstractVector, len::Int) where {S} - return MSmallVector{S,eltype(buffer)}(buffer, len) -end -function MSmallVector(buffer::AbstractVector, len::Int) - return MSmallVector{length(buffer),eltype(buffer)}(buffer, len) -end - -maxlength(::Type{<:MSmallVector{S}}) where {S} = S - -# Empty constructor -(msmallvector_type::Type{MSmallVector{S,T}} where {S,T})() = msmallvector_type(undef, 0) - -""" -`MSmallVector` constructor, uses `MVector` as a buffer. -```julia -MSmallVector{10}([1, 2, 3]) -MSmallVector{10}(SA[1, 2, 3]) -``` -""" -function MSmallVector{S,T}(vec::AbstractVector) where {S,T} - buffer = MVector{S,T}(undef) - copyto!(buffer, vec) - return MSmallVector(buffer, length(vec)) -end - -# Derive the buffer length. -MSmallVector(vec::AbstractSmallVector) = MSmallVector{length(buffer(vec))}(vec) - -function MSmallVector{S}(vec::AbstractVector) where {S} - return MSmallVector{S,eltype(vec)}(vec) -end - -function MSmallVector{S,T}(::UndefInitializer, dims::Tuple{Integer}) where {S,T} - return MSmallVector{S,T}(undef, prod(dims)) -end -function MSmallVector{S,T}(::UndefInitializer, length::Integer) where {S,T} - return MSmallVector{S,T}(MVector{S,T}(undef), length) -end - -# Buffer interface -buffer(vec::MSmallVector) = vec.buffer - -# Accessors -Base.size(vec::MSmallVector) = (vec.length,) - -# Required Base overloads -@inline function Base.getindex(vec::MSmallVector, index::Integer) - @boundscheck checkbounds(vec, index) - return @inbounds buffer(vec)[index] -end - -@inline function Base.setindex!(vec::MSmallVector, item, index::Integer) - @boundscheck checkbounds(vec, index) - @inbounds buffer(vec)[index] = item - return vec -end - -@inline function Base.resize!(vec::MSmallVector, len::Integer) - len < 0 && throw(ArgumentError("New length must be ≥ 0.")) - len > maxlength(vec) && - throw(ArgumentError("New length $len must be ≤ the maximum length $(maxlength(vec)).")) - vec.length = len - return vec -end - -# `similar` creates a `MSmallVector` by default. -function Base.similar(vec::AbstractSmallVector, elt::Type, dims::Dims) - return MSmallVector{length(buffer(vec)),elt}(undef, dims) -end diff --git a/NDTensors/src/lib/SmallVectors/src/msmallvector/thawfreeze.jl b/NDTensors/src/lib/SmallVectors/src/msmallvector/thawfreeze.jl deleted file mode 100644 index 563f20dd18..0000000000 --- a/NDTensors/src/lib/SmallVectors/src/msmallvector/thawfreeze.jl +++ /dev/null @@ -1,2 +0,0 @@ -thaw(vec::MSmallVector) = copy(vec) -freeze(vec::MSmallVector) = SmallVector(vec) diff --git a/NDTensors/src/lib/SmallVectors/src/smallvector/insertstyle.jl b/NDTensors/src/lib/SmallVectors/src/smallvector/insertstyle.jl deleted file mode 100644 index 027d1eb356..0000000000 --- a/NDTensors/src/lib/SmallVectors/src/smallvector/insertstyle.jl +++ /dev/null @@ -1 +0,0 @@ -InsertStyle(::Type{<:SmallVector}) = FastCopy() diff --git a/NDTensors/src/lib/SmallVectors/src/smallvector/smallvector.jl b/NDTensors/src/lib/SmallVectors/src/smallvector/smallvector.jl deleted file mode 100644 index 4b99e76cd6..0000000000 --- a/NDTensors/src/lib/SmallVectors/src/smallvector/smallvector.jl +++ /dev/null @@ -1,82 +0,0 @@ -""" -SmallVector -""" -struct SmallVector{S,T} <: AbstractSmallVector{T} - buffer::SVector{S,T} - length::Int -end - -# Accessors -# TODO: Use `Accessors.jl`. -@inline setbuffer(vec::SmallVector, buffer) = SmallVector(buffer, vec.length) -@inline setlength(vec::SmallVector, length) = SmallVector(vec.buffer, length) - -maxlength(::Type{<:SmallVector{S}}) where {S} = S - -# Constructors -function SmallVector{S}(buffer::AbstractVector, len::Int) where {S} - return SmallVector{S,eltype(buffer)}(buffer, len) -end -function SmallVector(buffer::AbstractVector, len::Int) - return SmallVector{length(buffer),eltype(buffer)}(buffer, len) -end - -""" -`SmallVector` constructor, uses `SVector` as buffer storage. -```julia -SmallVector{10}([1, 2, 3]) -SmallVector{10}(SA[1, 2, 3]) -``` -""" -function SmallVector{S,T}(vec::AbstractVector) where {S,T} - # TODO: This is a bit slower, but simpler. Check if this - # gets faster in newer Julia versions. - # return SmallVector{S,T}(MSmallVector{S,T}(vec)) - length(vec) > S && error("Data is too long for `SmallVector`.") - msvec = MVector{S,T}(undef) - @inbounds for i in eachindex(vec) - msvec[i] = vec[i] - end - svec = SVector(msvec) - return SmallVector{S,T}(svec, length(vec)) -end -# Special optimization codepath for `MSmallVector` -# to avoid a copy. -function SmallVector{S,T}(vec::MSmallVector) where {S,T} - return SmallVector{S,T}(buffer(vec), length(vec)) -end - -function SmallVector{S}(vec::AbstractVector) where {S} - return SmallVector{S,eltype(vec)}(vec) -end - -# Specialized constructor -function MSmallVector{S,T}(vec::SmallVector) where {S,T} - return MSmallVector{S,T}(buffer(vec), length(vec)) -end - -# Derive the buffer length. -SmallVector(vec::AbstractSmallVector) = SmallVector{length(buffer(vec))}(vec) - -# Empty constructor -(smallvector_type::Type{SmallVector{S,T}} where {S,T})() = smallvector_type(undef, 0) -function SmallVector{S,T}(::UndefInitializer, length::Integer) where {S,T} - return SmallVector{S,T}(SVector{S,T}(MVector{S,T}(undef)), length) -end - -# Buffer interface -buffer(vec::SmallVector) = vec.buffer - -# AbstractArray interface -Base.size(vec::SmallVector) = (vec.length,) - -# Base overloads -@inline function Base.getindex(vec::SmallVector, index::Integer) - @boundscheck checkbounds(vec, index) - return @inbounds buffer(vec)[index] -end - -Base.copy(vec::SmallVector) = vec - -# Optimization, default uses `similar`. -Base.copymutable(vec::SmallVector) = MSmallVector(vec) diff --git a/NDTensors/src/lib/SmallVectors/src/smallvector/thawfreeze.jl b/NDTensors/src/lib/SmallVectors/src/smallvector/thawfreeze.jl deleted file mode 100644 index 077e7d539e..0000000000 --- a/NDTensors/src/lib/SmallVectors/src/smallvector/thawfreeze.jl +++ /dev/null @@ -1,2 +0,0 @@ -thaw(vec::SmallVector) = MSmallVector(vec) -freeze(vec::SmallVector) = vec diff --git a/NDTensors/src/lib/SmallVectors/src/subsmallvector/subsmallvector.jl b/NDTensors/src/lib/SmallVectors/src/subsmallvector/subsmallvector.jl deleted file mode 100644 index 922eaa3521..0000000000 --- a/NDTensors/src/lib/SmallVectors/src/subsmallvector/subsmallvector.jl +++ /dev/null @@ -1,80 +0,0 @@ -abstract type AbstractSubSmallVector{T} <: AbstractSmallVector{T} end - -""" -SubSmallVector -""" -struct SubSmallVector{T,P} <: AbstractSubSmallVector{T} - parent::P - start::Int - stop::Int -end - -mutable struct SubMSmallVector{T,P<:AbstractVector{T}} <: AbstractSubSmallVector{T} - parent::P - start::Int - stop::Int -end - -# TODO: Use Accessors.jl -Base.parent(vec::SubSmallVector) = vec.parent -Base.parent(vec::SubMSmallVector) = vec.parent - -# buffer interface -buffer(vec::AbstractSubSmallVector) = buffer(parent(vec)) - -function smallview(vec::SmallVector, start::Integer, stop::Integer) - return SubSmallVector(vec, start, stop) -end -function smallview(vec::MSmallVector, start::Integer, stop::Integer) - return SubMSmallVector(vec, start, stop) -end - -function smallview(vec::SubSmallVector, start::Integer, stop::Integer) - return SubSmallVector(parent(vec), vec.start + start - 1, vec.start + stop - 1) -end -function smallview(vec::SubMSmallVector, start::Integer, stop::Integer) - return SubMSmallVector(parent(vec), vec.start + start - 1, vec.start + stop - 1) -end - -# Constructors -function SubSmallVector(vec::AbstractVector, start::Integer, stop::Integer) - return SubSmallVector{eltype(vec),typeof(vec)}(vec, start, stop) -end -function SubMSmallVector(vec::AbstractVector, start::Integer, stop::Integer) - return SubMSmallVector{eltype(vec),typeof(vec)}(vec, start, stop) -end - -# Accessors -Base.size(vec::AbstractSubSmallVector) = (vec.stop - vec.start + 1,) - -Base.@propagate_inbounds function Base.getindex(vec::AbstractSubSmallVector, index::Integer) - return parent(vec)[index + vec.start - 1] -end - -Base.@propagate_inbounds function Base.setindex!( - vec::AbstractSubSmallVector, item, index::Integer -) - buffer(vec)[index + vec.start - 1] = item - return vec -end - -function SubSmallVector{T,P}(vec::SubMSmallVector) where {T,P} - return SubSmallVector{T,P}(P(parent(vec)), vec.start, vec.stop) -end - -function Base.convert(smalltype::Type{<:SubSmallVector}, vec::SubMSmallVector) - return smalltype(vec) -end - -@inline function Base.resize!(vec::SubMSmallVector, len::Integer) - len < 0 && throw(ArgumentError("New length must be ≥ 0.")) - len > maxlength(vec) - vec.start + 1 && - throw(ArgumentError("New length $len must be ≤ the maximum length $(maxlength(vec)).")) - vec.stop = vec.start + len - 1 - return vec -end - -# Optimization, default uses `similar`. -function Base.copymutable(vec::SubSmallVector) - return SubMSmallVector(Base.copymutable(parent(vec)), vec.start, vec.stop) -end diff --git a/NDTensors/src/lib/SmallVectors/test/runtests.jl b/NDTensors/src/lib/SmallVectors/test/runtests.jl deleted file mode 100644 index 7bf559206f..0000000000 --- a/NDTensors/src/lib/SmallVectors/test/runtests.jl +++ /dev/null @@ -1,158 +0,0 @@ -using NDTensors.SmallVectors -using Test: @inferred, @test, @testset, @test_broken - -using NDTensors.SmallVectors: - setindex, - resize, - push, - pushfirst, - pop, - popfirst, - append, - prepend, - insert, - deleteat, - circshift, - insertsorted, - insertsorted!, - insertsortedunique, - insertsortedunique!, - mergesorted, - mergesorted!, - mergesortedunique, - mergesortedunique! - -function test_smallvectors() - return @testset "SmallVectors" begin - x = SmallVector{10}([1, 3, 5]) - mx = MSmallVector(x) - - @test x isa SmallVector{10,Int} - @test mx isa MSmallVector{10,Int} - @test eltype(x) === Int - @test eltype(mx) === Int - - # TODO: Test construction has zero allocations. - # TODO: Extend construction to arbitrary collections, like tuple. - ## in julia v"1.10" SmallVector(x) does 1 allocation - ## and vcat does 3 allocations (i.e. MSmallVector(x) does 3 allocations) - ## On my mac M1 a single allocation is 96B so allow for up to 3 allocations - alloc_size = 96 - nalloc_limit = 4 * alloc_size - - # conversion - @test @inferred(SmallVector(x)) == x - @test @allocated(SmallVector(x)) < nalloc_limit - @test @inferred(SmallVector(mx)) == x - @test @allocated(SmallVector(mx)) < nalloc_limit - - # length - @test @inferred(length(x)) == 3 - @test @allocated(length(x)) == 0 - @test @inferred(length(SmallVectors.buffer(x))) == 10 - @test @allocated(length(SmallVectors.buffer(x))) < nalloc_limit - - item = 115 - no_broken = (false, false, false, false) - for ( - f!, - f, - ans, - args, - nalloc, - f!_impl_broken, - f!_noalloc_broken, - f_impl_broken, - f_noalloc_broken, - ) in [ - (:push!, :push, [1, 3, 5, item], (item,), nalloc_limit, no_broken...), - (:append!, :append, [1, 3, 5, item], ([item],), nalloc_limit, no_broken...), - (:prepend!, :prepend, [item, 1, 3, 5], ([item],), nalloc_limit, no_broken...), - (:pushfirst!, :pushfirst, [item, 1, 3, 5], (item,), nalloc_limit, no_broken...), - (:setindex!, :setindex, [1, item, 5], (item, 2), nalloc_limit, no_broken...), - (:pop!, :pop, [1, 3], (), nalloc_limit, no_broken...), - (:popfirst!, :popfirst, [3, 5], (), nalloc_limit, no_broken...), - (:insert!, :insert, [1, item, 3, 5], (2, item), nalloc_limit, no_broken...), - (:deleteat!, :deleteat, [1, 5], (2,), nalloc_limit, no_broken...), - (:circshift!, :circshift, [5, 1, 3], (1,), nalloc_limit, no_broken...), - (:sort!, :sort, [1, 3, 5], (), nalloc_limit, no_broken...), - (:insertsorted!, :insertsorted, [1, 2, 3, 5], (2,), nalloc_limit, no_broken...), - (:insertsorted!, :insertsorted, [1, 3, 3, 5], (3,), nalloc_limit, no_broken...), - ( - :insertsortedunique!, - :insertsortedunique, - [1, 2, 3, 5], - (2,), - nalloc_limit, - no_broken..., - ), - ( - :insertsortedunique!, - :insertsortedunique, - [1, 3, 5], - (3,), - nalloc_limit, - no_broken..., - ), - (:mergesorted!, :mergesorted, [1, 2, 3, 3, 5], ([2, 3],), nalloc_limit, no_broken...), - ( - :mergesortedunique!, - :mergesortedunique, - [1, 2, 3, 5], - ([2, 3],), - nalloc_limit, - no_broken..., - ), - ] - mx_tmp = copy(mx) - @eval begin - if VERSION < v"1.7" - # broken kwarg wasn't added to @test yet - if $f!_impl_broken - @test_broken @inferred($f!(copy($mx), $args...)) == $ans - else - @test @inferred($f!(copy($mx), $args...)) == $ans - end - if $f!_noalloc_broken - @test_broken @allocated($f!($mx_tmp, $args...)) ≤ $nalloc - else - @test @allocated($f!($mx_tmp, $args...)) ≤ $nalloc - end - if $f_impl_broken - @test_broken @inferred($f($x, $args...)) == $ans - else - @test @inferred($f($x, $args...)) == $ans - end - if $f_noalloc_broken - @test_broken @allocated($f($x, $args...)) ≤ $nalloc - else - @test @allocated($f($x, $args...)) ≤ $nalloc - end - else - @test @inferred($f!(copy($mx), $args...)) == $ans broken = $f!_impl_broken - @test @allocated($f!($mx_tmp, $args...)) ≤ $nalloc broken = $f!_noalloc_broken - @test @inferred($f($x, $args...)) == $ans broken = $f_impl_broken - @test @allocated($f($x, $args...)) ≤ $nalloc broken = $f_noalloc_broken - end - end - end - - # Separated out since for some reason it breaks the `@inferred` - # check when `kwargs` are interpolated into `@eval`. - ans, kwargs = [5, 3, 1], (; rev=true) - mx_tmp = copy(mx) - @test @inferred(sort!(copy(mx); kwargs...)) == ans - @test @allocated(sort!(mx_tmp; kwargs...)) == 0 - @test @inferred(sort(x; kwargs...)) == ans - @test @allocated(sort(x; kwargs...)) ≤ nalloc_limit - - ans, args = [1, 3, 5, item], ([item],) - @test @inferred(vcat(x, args...)) == ans - @test @allocated(vcat(x, args...)) ≤ nalloc_limit - end -end - -# TODO: switch to: -# @testset "SmallVectors" test_smallvectors() -# (new in Julia 1.9) -test_smallvectors() diff --git a/NDTensors/src/lib/SortedSets/.JuliaFormatter.toml b/NDTensors/src/lib/SortedSets/.JuliaFormatter.toml deleted file mode 100644 index 08f664cdb9..0000000000 --- a/NDTensors/src/lib/SortedSets/.JuliaFormatter.toml +++ /dev/null @@ -1,2 +0,0 @@ -style = "blue" -indent = 2 diff --git a/NDTensors/src/lib/SortedSets/src/BaseExt/sorted.jl b/NDTensors/src/lib/SortedSets/src/BaseExt/sorted.jl deleted file mode 100644 index 54a2873765..0000000000 --- a/NDTensors/src/lib/SortedSets/src/BaseExt/sorted.jl +++ /dev/null @@ -1,54 +0,0 @@ -# TODO: -# Add ` -# Version that uses an `Ordering`. -function _insorted( - x, - v::AbstractVector; - lt=isless, - by=identity, - rev::Union{Bool,Nothing}=nothing, - order::Ordering=Forward, -) - return _insorted(x, v, ord(lt, by, rev, order)) -end -_insorted(x, v::AbstractVector, o::Ordering) = !isempty(searchsorted(v, x, o)) - -function alluniquesorted( - vec; lt=isless, by=identity, rev::Bool=false, order::Ordering=Forward -) - return alluniquesorted(vec, ord(lt, by, rev, order)) -end - -function alluniquesorted(vec, order::Ordering) - length(vec) < 2 && return true - iter = eachindex(vec) - I = iterate(iter) - while I !== nothing - i, s = I - J = iterate(iter, s) - isnothing(J) && return true - j, _ = J - !lt(order, @inbounds(vec[i]), @inbounds(vec[j])) && return false - I = J - end - return true -end - -function uniquesorted(vec; lt=isless, by=identity, rev::Bool=false, order::Ordering=Forward) - return uniquesorted(vec, ord(lt, by, rev, order)) -end - -function uniquesorted(vec::AbstractVector, order::Ordering) - mvec = thaw(vec) - i = firstindex(mvec) - stopi = lastindex(mvec) - while i < stopi - if !lt(order, @inbounds(mvec[i]), @inbounds(mvec[i + 1])) - deleteat!(mvec, i) - stopi -= 1 - else - i += 1 - end - end - return freeze(mvec) -end diff --git a/NDTensors/src/lib/SortedSets/src/DictionariesExt/insert.jl b/NDTensors/src/lib/SortedSets/src/DictionariesExt/insert.jl deleted file mode 100644 index 847f312208..0000000000 --- a/NDTensors/src/lib/SortedSets/src/DictionariesExt/insert.jl +++ /dev/null @@ -1,35 +0,0 @@ -SmallVectors.insert(inds::AbstractIndices, i) = insert(InsertStyle(inds), inds, i) - -function SmallVectors.insert(::InsertStyle, inds::AbstractIndices, i) - return error("Not implemented") -end - -function SmallVectors.insert(::IsInsertable, inds::AbstractIndices, i) - inds = copy(inds) - insert!(inds, i) - return inds -end - -function SmallVectors.insert(::FastCopy, inds::AbstractIndices, i) - minds = thaw(inds) - insert!(minds, i) - return freeze(minds) -end - -SmallVectors.delete(inds::AbstractIndices, i) = delete(InsertStyle(inds), inds, i) - -function SmallVectors.delete(::InsertStyle, inds::AbstractIndices, i) - return error("Not implemented") -end - -function SmallVectors.delete(::IsInsertable, inds::AbstractIndices, i) - inds = copy(inds) - delete!(inds, i) - return inds -end - -function SmallVectors.delete(::FastCopy, inds::AbstractIndices, i) - minds = thaw(inds) - delete!(minds, i) - return freeze(minds) -end diff --git a/NDTensors/src/lib/SortedSets/src/DictionariesExt/isinsertable.jl b/NDTensors/src/lib/SortedSets/src/DictionariesExt/isinsertable.jl deleted file mode 100644 index 6c9599ce39..0000000000 --- a/NDTensors/src/lib/SortedSets/src/DictionariesExt/isinsertable.jl +++ /dev/null @@ -1 +0,0 @@ -Dictionaries.isinsertable(::AbstractArray) = true diff --git a/NDTensors/src/lib/SortedSets/src/SmallVectorsDictionariesExt/interface.jl b/NDTensors/src/lib/SortedSets/src/SmallVectorsDictionariesExt/interface.jl deleted file mode 100644 index 20dca8d56f..0000000000 --- a/NDTensors/src/lib/SortedSets/src/SmallVectorsDictionariesExt/interface.jl +++ /dev/null @@ -1,3 +0,0 @@ -Dictionaries.isinsertable(::AbstractSmallVector) = true -Dictionaries.isinsertable(::SmallVector) = false -Dictionaries.empty_type(::Type{SmallVector{S,T}}, ::Type{T}) where {S,T} = MSmallVector{S,T} diff --git a/NDTensors/src/lib/SortedSets/src/SortedSets.jl b/NDTensors/src/lib/SortedSets/src/SortedSets.jl deleted file mode 100644 index 777407f3c5..0000000000 --- a/NDTensors/src/lib/SortedSets/src/SortedSets.jl +++ /dev/null @@ -1,21 +0,0 @@ -module SortedSets -using Compat -using Dictionaries -using Random -using ..SmallVectors - -using Base: @propagate_inbounds -using Base.Order: Ordering, Forward, ord, lt - -export AbstractWrappedSet, SortedSet, SmallSet, MSmallSet - -include("BaseExt/sorted.jl") -include("DictionariesExt/insert.jl") -include("DictionariesExt/isinsertable.jl") -include("abstractset.jl") -include("abstractwrappedset.jl") -include("SmallVectorsDictionariesExt/interface.jl") -include("sortedset.jl") -include("SortedSetsSmallVectorsExt/smallset.jl") - -end diff --git a/NDTensors/src/lib/SortedSets/src/SortedSetsSmallVectorsExt/smallset.jl b/NDTensors/src/lib/SortedSets/src/SortedSetsSmallVectorsExt/smallset.jl deleted file mode 100644 index 8c70f8fe60..0000000000 --- a/NDTensors/src/lib/SortedSets/src/SortedSetsSmallVectorsExt/smallset.jl +++ /dev/null @@ -1,16 +0,0 @@ -const AbstractSmallSet{T} = SortedSet{T,<:AbstractSmallVector{T}} -const SmallSet{S,T} = SortedSet{T,SmallVector{S,T}} -const MSmallSet{S,T} = SortedSet{T,MSmallVector{S,T}} - -# Specialized constructors -@propagate_inbounds SmallSet{S}(; kwargs...) where {S} = SmallSet{S}([]; kwargs...) -@propagate_inbounds SmallSet{S}(iter; kwargs...) where {S} = - SmallSet{S}(collect(iter); kwargs...) -@propagate_inbounds SmallSet{S}(a::AbstractArray{I}; kwargs...) where {S,I} = - SmallSet{S,I}(a; kwargs...) - -@propagate_inbounds MSmallSet{S}(; kwargs...) where {S} = MSmallSet{S}([]; kwargs...) -@propagate_inbounds MSmallSet{S}(iter; kwargs...) where {S} = - MSmallSet{S}(collect(iter); kwargs...) -@propagate_inbounds MSmallSet{S}(a::AbstractArray{I}; kwargs...) where {S,I} = - MSmallSet{S,I}(a; kwargs...) diff --git a/NDTensors/src/lib/SortedSets/src/abstractset.jl b/NDTensors/src/lib/SortedSets/src/abstractset.jl deleted file mode 100644 index 57ccb90619..0000000000 --- a/NDTensors/src/lib/SortedSets/src/abstractset.jl +++ /dev/null @@ -1,88 +0,0 @@ -abstract type AbstractSet{T} <: AbstractIndices{T} end - -# Specialized versions of set operations for `AbstractSet` -# that allow more specialization. - -function Base.union(i::AbstractSet, itr) - return union(InsertStyle(i), i, itr) -end - -function Base.union(::InsertStyle, i::AbstractSet, itr) - return error("Not implemented") -end - -function Base.union(::IsInsertable, i::AbstractSet, itr) - out = copy(i) - union!(out, itr) - return out -end - -function Base.union(::NotInsertable, i::AbstractSet, itr) - out = empty(i) - union!(out, i) - union!(out, itr) - return out -end - -function Base.intersect(i::AbstractSet, itr) - return intersect(InsertStyle(i), i, itr) -end - -function Base.intersect(::InsertStyle, i::AbstractSet, itr) - return error("Not implemented") -end - -function Base.intersect(::IsInsertable, i::AbstractSet, itr) - out = copy(i) - intersect!(out, itr) - return out -end - -function Base.intersect(::NotInsertable, i::AbstractSet, itr) - out = empty(i) - union!(out, i) - intersect!(out, itr) - return out -end - -function Base.setdiff(i::AbstractSet, itr) - return setdiff(InsertStyle(i), i, itr) -end - -function Base.setdiff(::InsertStyle, i::AbstractSet, itr) - return error("Not implemented") -end - -function Base.setdiff(::IsInsertable, i::AbstractSet, itr) - out = copy(i) - setdiff!(out, itr) - return out -end - -function Base.setdiff(::NotInsertable, i::AbstractSet, itr) - out = empty(i) - union!(out, i) - setdiff!(out, itr) - return out -end - -function Base.symdiff(i::AbstractSet, itr) - return symdiff(InsertStyle(i), i, itr) -end - -function Base.symdiff(::InsertStyle, i::AbstractSet, itr) - return error("Not implemented") -end - -function Base.symdiff(::IsInsertable, i::AbstractSet, itr) - out = copy(i) - symdiff!(out, itr) - return out -end - -function Base.symdiff(::NotInsertable, i::AbstractSet, itr) - out = empty(i) - union!(out, i) - symdiff!(out, itr) - return out -end diff --git a/NDTensors/src/lib/SortedSets/src/abstractwrappedset.jl b/NDTensors/src/lib/SortedSets/src/abstractwrappedset.jl deleted file mode 100644 index e1a1094ff8..0000000000 --- a/NDTensors/src/lib/SortedSets/src/abstractwrappedset.jl +++ /dev/null @@ -1,117 +0,0 @@ -# AbstractWrappedSet: a wrapper around an `AbstractIndices` -# with methods automatically forwarded via `parent` -# and rewrapped via `rewrap`. -abstract type AbstractWrappedSet{T,D} <: AbstractIndices{T} end - -# Required interface -Base.parent(set::AbstractWrappedSet) = error("Not implemented") -function Dictionaries.empty_type(::Type{AbstractWrappedSet{I}}, ::Type{I}) where {I} - return error("Not implemented") -end -rewrap(::AbstractWrappedSet, data) = error("Not implemented") - -SmallVectors.thaw(set::AbstractWrappedSet) = rewrap(set, thaw(parent(set))) -SmallVectors.freeze(set::AbstractWrappedSet) = rewrap(set, freeze(parent(set))) - -# Traits -SmallVectors.InsertStyle(::Type{<:AbstractWrappedSet{T,D}}) where {T,D} = InsertStyle(D) - -# AbstractSet interface -@propagate_inbounds function Base.iterate(set::AbstractWrappedSet, state...) - return iterate(parent(set), state...) -end - -# `I` is needed to avoid ambiguity error. -@inline Base.in(item::I, set::AbstractWrappedSet{I}) where {I} = in(item, parent(set)) -@inline Base.IteratorSize(set::AbstractWrappedSet) = Base.IteratorSize(parent(set)) -@inline Base.length(set::AbstractWrappedSet) = length(parent(set)) - -@inline Dictionaries.istokenizable(set::AbstractWrappedSet) = istokenizable(parent(set)) -@inline Dictionaries.tokentype(set::AbstractWrappedSet) = tokentype(parent(set)) -@inline Dictionaries.iteratetoken(set::AbstractWrappedSet, s...) = - iterate(parent(set), s...) -@inline function Dictionaries.iteratetoken_reverse(set::AbstractWrappedSet) - return iteratetoken_reverse(parent(set)) -end -@inline function Dictionaries.iteratetoken_reverse(set::AbstractWrappedSet, t) - return iteratetoken_reverse(parent(set), t) -end - -@inline function Dictionaries.gettoken(set::AbstractWrappedSet, i) - return gettoken(parent(set), i) -end -@propagate_inbounds Dictionaries.gettokenvalue(set::AbstractWrappedSet, x) = - gettokenvalue(parent(set), x) - -@inline Dictionaries.isinsertable(set::AbstractWrappedSet) = isinsertable(parent(set)) - -# Specify `I` to fix ambiguity error. -@inline function Dictionaries.gettoken!( - set::AbstractWrappedSet{I}, i::I, values=() -) where {I} - return gettoken!(parent(set), i, values) -end - -@inline function Dictionaries.deletetoken!(set::AbstractWrappedSet, x, values=()) - deletetoken!(parent(set), x, values) - return set -end - -@inline function Base.empty!(set::AbstractWrappedSet, values=()) - empty!(parent(set)) - return set -end - -# Not defined to be part of the `AbstractIndices` interface, -# but seems to be needed. -@inline function Base.filter!(pred, set::AbstractWrappedSet) - filter!(pred, parent(set)) - return set -end - -# TODO: Maybe require an implementation? -@inline function Base.copy(set::AbstractWrappedSet, eltype::Type) - return typeof(set)(copy(parent(set), eltype)) -end - -# Not required for AbstractIndices interface but -# helps with faster code paths -SmallVectors.insert(set::AbstractWrappedSet, item) = rewrap(set, insert(parent(set), item)) -function Base.insert!(set::AbstractWrappedSet, item) - insert!(parent(set), item) - return set -end - -SmallVectors.delete(set::AbstractWrappedSet, item) = rewrap(set, delete(parent(set), item)) -function Base.delete!(set::AbstractWrappedSet, item) - delete!(parent(set), item) - return set -end - -function Base.union(set1::AbstractWrappedSet, set2::AbstractWrappedSet) - return rewrap(set1, union(parent(set1), parent(set2))) -end -function Base.union(set1::AbstractWrappedSet, set2) - return rewrap(set1, union(parent(set1), set2)) -end - -function Base.intersect(set1::AbstractWrappedSet, set2::AbstractWrappedSet) - return rewrap(set1, intersect(parent(set1), parent(set2))) -end -function Base.intersect(set1::AbstractWrappedSet, set2) - return rewrap(set1, intersect(parent(set1), set2)) -end - -function Base.setdiff(set1::AbstractWrappedSet, set2::AbstractWrappedSet) - return rewrap(set1, setdiff(parent(set1), parent(set2))) -end -function Base.setdiff(set1::AbstractWrappedSet, set2) - return rewrap(set1, setdiff(parent(set1), set2)) -end - -function Base.symdiff(set1::AbstractWrappedSet, set2::AbstractWrappedSet) - return rewrap(set1, symdiff(parent(set1), parent(set2))) -end -function Base.symdiff(set1::AbstractWrappedSet, set2) - return rewrap(set1, symdiff(parent(set1), set2)) -end diff --git a/NDTensors/src/lib/SortedSets/src/sortedset.jl b/NDTensors/src/lib/SortedSets/src/sortedset.jl deleted file mode 100644 index dff1b441a0..0000000000 --- a/NDTensors/src/lib/SortedSets/src/sortedset.jl +++ /dev/null @@ -1,301 +0,0 @@ -""" - SortedSet(iter) - -Construct an `SortedSet <: AbstractSet` from an arbitrary Julia iterable with unique -elements. Lookup uses that they are sorted. - -SortedSet can be faster than ArrayIndices which use naive search that may be optimal for -small collections. Larger collections are better handled by containers like `Indices`. -""" -struct SortedSet{T,Data<:AbstractArray{T},Order<:Ordering} <: AbstractSet{T} - data::Data - order::Order - global @inline _SortedSet( - data::Data, order::Order - ) where {T,Data<:AbstractArray{T},Order<:Ordering} = new{T,Data,Order}(data, order) -end - -@inline Base.parent(set::SortedSet) = getfield(set, :data) -@inline order(set::SortedSet) = getfield(set, :order) - -# Dictionaries.jl interface -const SortedIndices = SortedSet - -# Inner constructor. -# Sorts and makes unique as needed. -function SortedSet{T,Data,Order}( - a::Data, order::Order -) where {T,Data<:AbstractArray{T},Order<:Ordering} - if !issorted(a, order) - a = SmallVectors.sort(a, order) - end - if !alluniquesorted(a, order) - a = uniquesorted(a, order) - end - return _SortedSet(a, order) -end - -@inline function SortedSet{T,Data,Order}( - a::AbstractArray, order::Ordering -) where {T,Data<:AbstractArray{T},Order<:Ordering} - return SortedSet{T,Data,Order}(convert(Data, a), convert(Order, order)) -end - -@inline function SortedSet{T,Data}( - a::AbstractArray, order::Order -) where {T,Data<:AbstractArray{T},Order<:Ordering} - return SortedSet{T,Data,Order}(a, order) -end - -@inline function SortedSet(a::Data, order::Ordering) where {T,Data<:AbstractArray{T}} - return SortedSet{T,Data}(a, order) -end - -# Accept other inputs like `Tuple`. -@inline function SortedSet(itr, order::Ordering) - return SortedSet(collect(itr), order) -end - -@inline function SortedSet{T,Data}( - a::Data; lt=isless, by=identity, rev::Bool=false -) where {T,Data<:AbstractArray{T}} - return SortedSet{T,Data}(a, ord(lt, by, rev)) -end - -# Traits -@inline SmallVectors.InsertStyle(::Type{<:SortedSet{T,Data}}) where {T,Data} = - InsertStyle(Data) -@inline SmallVectors.thaw(set::SortedSet) = SortedSet(thaw(parent(set)), order(set)) -@inline SmallVectors.freeze(set::SortedSet) = SortedSet(freeze(parent(set)), order(set)) - -@propagate_inbounds SortedSet(; kwargs...) = SortedSet{Any}([]; kwargs...) -@propagate_inbounds SortedSet{T}(; kwargs...) where {T} = - SortedSet{T,Vector{T}}(T[]; kwargs...) -@propagate_inbounds SortedSet{T,Data}(; kwargs...) where {T,Data} = - SortedSet{T}(Data(); kwargs...) - -@propagate_inbounds SortedSet(iter; kwargs...) = SortedSet(collect(iter); kwargs...) -@propagate_inbounds SortedSet{T}(iter; kwargs...) where {T} = - SortedSet{T}(collect(T, iter); kwargs...) - -@propagate_inbounds SortedSet(a::AbstractArray{T}; kwargs...) where {T} = - SortedSet{T}(a; kwargs...) -@propagate_inbounds SortedSet{T}(a::AbstractArray{T}; kwargs...) where {T} = - SortedSet{T,typeof(a)}(a; kwargs...) - -@propagate_inbounds SortedSet{T,Data}( - a::AbstractArray; kwargs... -) where {T,Data<:AbstractArray{T}} = SortedSet{T,Data}(Data(a); kwargs...) - -function Base.convert(::Type{AbstractIndices{T}}, set::SortedSet) where {T} - return convert(SortedSet{T}, set) -end -function Base.convert(::Type{SortedSet}, set::AbstractIndices{T}) where {T} - return convert(SortedSet{T}, set) -end -function Base.convert(::Type{SortedSet{T}}, set::AbstractIndices) where {T} - return convert(SortedSet{T,Vector{T}}, set) -end -function Base.convert( - ::Type{SortedSet{T,Data}}, set::AbstractIndices -) where {T,Data<:AbstractArray{T}} - a = convert(Data, collect(T, set)) - return @inbounds SortedSet{T,typeof(a)}(a) -end - -Base.convert(::Type{SortedSet{T}}, set::SortedSet{T}) where {T} = set -function Base.convert( - ::Type{SortedSet{T}}, set::SortedSet{<:Any,Data} -) where {T,Data<:AbstractArray{T}} - return convert(SortedSet{T,Data}, set) -end -function Base.convert( - ::Type{SortedSet{T,Data}}, set::SortedSet{T,Data} -) where {T,Data<:AbstractArray{T}} - return set -end -function Base.convert( - ::Type{SortedSet{T,Data}}, set::SortedSet -) where {T,Data<:AbstractArray{T}} - a = convert(Data, parent(set)) - return @inbounds SortedSet{T,Data}(a) -end - -# Basic interface -@propagate_inbounds function Base.iterate(set::SortedSet{T}, state...) where {T} - return iterate(parent(set), state...) -end - -@inline function Base.in(i::T, set::SortedSet{T}) where {T} - return _insorted(i, parent(set), order(set)) -end -@inline Base.IteratorSize(::SortedSet) = Base.HasLength() -@inline Base.length(set::SortedSet) = length(parent(set)) - -function Base.:(==)(set1::SortedSet, set2::SortedSet) - if length(set1) ≠ length(set2) - return false - end - for (j1, j2) in zip(set1, set2) - if j1 ≠ j2 - return false - end - end - return true -end - -function Base.issetequal(set1::SortedSet, set2::SortedSet) - if length(set1) ≠ length(set2) - return false - end - if order(set1) ≠ order(set2) - # TODO: Make sure this actually sorts! - set2 = SortedSet(parent(set2), order(set1)) - end - for (j1, j2) in zip(set1, set2) - if lt(order(set1), j1, j2) || lt(order(set1), j2, j1) - return false - end - end - return true -end - -@inline Dictionaries.istokenizable(::SortedSet) = true -@inline Dictionaries.tokentype(::SortedSet) = Int -@inline Dictionaries.iteratetoken(set::SortedSet, s...) = - iterate(LinearIndices(parent(set)), s...) -@inline function Dictionaries.iteratetoken_reverse(set::SortedSet) - li = LinearIndices(parent(set)) - if isempty(li) - return nothing - else - t = last(li) - return (t, t) - end -end -@inline function Dictionaries.iteratetoken_reverse(set::SortedSet, t) - li = LinearIndices(parent(set)) - t -= 1 - if t < first(li) - return nothing - else - return (t, t) - end -end - -@inline function Dictionaries.gettoken(set::SortedSet, i) - a = parent(set) - r = searchsorted(a, i, order(set)) - @assert 0 ≤ length(r) ≤ 1 # If > 1, means the elements are not unique - length(r) == 0 && return (false, 0) - return (true, convert(Int, only(r))) -end -@propagate_inbounds Dictionaries.gettokenvalue(set::SortedSet, x::Int) = parent(set)[x] - -@inline Dictionaries.isinsertable(set::SortedSet) = isinsertable(parent(set)) - -@inline function Dictionaries.gettoken!(set::SortedSet{T}, i::T, values=()) where {T} - a = parent(set) - r = searchsorted(a, i, order(set)) - @assert 0 ≤ length(r) ≤ 1 # If > 1, means the elements are not unique - if length(r) == 0 - insert!(a, first(r), i) - foreach(v -> resize!(v, length(v) + 1), values) - return (false, last(LinearIndices(a))) - end - return (true, convert(Int, only(r))) -end - -@inline function Dictionaries.deletetoken!(set::SortedSet, x::Int, values=()) - deleteat!(parent(set), x) - foreach(v -> deleteat!(v, x), values) - return set -end - -@inline function Base.empty!(set::SortedSet, values=()) - empty!(parent(set)) - foreach(empty!, values) - return set -end - -# TODO: Make into `MSmallVector`? -# More generally, make a `thaw(::AbstractArray)` function to return -# a mutable version of an AbstractArray. -@inline Dictionaries.empty_type(::Type{SortedSet{T,D,Order}}, ::Type{T}) where {T,D,Order} = - SortedSet{T,Dictionaries.empty_type(D, T),Order} - -@inline Dictionaries.empty_type(::Type{<:AbstractVector}, ::Type{T}) where {T} = Vector{T} - -function Base.empty(set::SortedSet{T,D}, ::Type{T}) where {T,D} - return Dictionaries.empty_type(typeof(set), T)(D(), order(set)) -end - -@inline function Base.copy(set::SortedSet, ::Type{T}) where {T} - if T === eltype(set) - SortedSet(copy(parent(set)), order(set)) - else - SortedSet(convert(AbstractArray{T}, parent(set)), order(set)) - end -end - -# TODO: Can this take advantage of sorting? -@inline function Base.filter!(pred, set::SortedSet) - filter!(pred, parent(set)) - return set -end - -function Dictionaries.randtoken(rng::Random.AbstractRNG, set::SortedSet) - return rand(rng, keys(parent(set))) -end - -@inline function Base.sort!(set::SortedSet; lt=isless, by=identity, rev::Bool=false) - @assert Base.Sort.ord(lt, by, rev) == order(set) - # No-op, should be sorted already. - return set -end - -# Custom faster operations (not required for interface) -function Base.union!(set::SortedSet, items::SortedSet) - if order(set) ≠ order(items) - # Reorder if the orderings are different. - items = SortedSet(parent(set), order(set)) - end - unionsortedunique!(parent(set), parent(items), order(set)) - return set -end - -function Base.union(set::SortedSet, items::SortedSet) - if order(set) ≠ order(items) - # TODO: Reorder if the orderings are different. - items = SortedSet(parent(set), order(set)) - end - out = unionsortedunique(parent(set), parent(items), order(set)) - return SortedSet(out, order(set)) -end - -function Base.union(set::SortedSet, items) - return union(set, SortedSet(items, order(set))) -end - -function Base.intersect(set::SortedSet, items::SortedSet) - # TODO: Make an `intersectsortedunique`. - return intersect(NotInsertable(), set, items) -end - -function Base.setdiff(set::SortedSet, items) - return setdiff(set, SortedSet(items, order(set))) -end - -function Base.setdiff(set::SortedSet, items::SortedSet) - # TODO: Make an `setdiffsortedunique`. - return setdiff(NotInsertable(), set, items) -end - -function Base.symdiff(set::SortedSet, items) - return symdiff(set, SortedSet(items, order(set))) -end - -function Base.symdiff(set::SortedSet, items::SortedSet) - # TODO: Make an `symdiffsortedunique`. - return symdiff(NotInsertable(), set, items) -end diff --git a/NDTensors/src/lib/SortedSets/test/runtests.jl b/NDTensors/src/lib/SortedSets/test/runtests.jl deleted file mode 100644 index 882fa15e18..0000000000 --- a/NDTensors/src/lib/SortedSets/test/runtests.jl +++ /dev/null @@ -1,42 +0,0 @@ -@eval module $(gensym()) -using Test: @test, @testset -using NDTensors.SortedSets -using NDTensors.SmallVectors - -@testset "Test NDTensors.SortedSets" begin - @testset "Basic operations" begin - for V in (Vector, MSmallVector{10}, SmallVector{10}) - for by in (+, -) - s1 = SortedSet(V([1, 5, 3]); by) - s2 = SortedSet(V([2, 3, 6]); by) - - @test thaw(s1) == s1 - @test SmallVectors.insert(s1, 2) isa typeof(s1) - @test SmallVectors.insert(s1, 2) == SortedSet([1, 2, 3, 5]; by) - @test SmallVectors.delete(s1, 3) isa typeof(s1) - @test SmallVectors.delete(s1, 3) == SortedSet([1, 5]; by) - - # Set interface - @test union(s1, s2) == SortedSet([1, 2, 3, 5, 6]; by) - @test union(s1, [3]) == s1 - @test setdiff(s1, s2) == SortedSet([1, 5]; by) - @test symdiff(s1, s2) == SortedSet([1, 2, 5, 6]; by) - @test intersect(s1, s2) == SortedSet([3]; by) - if SmallVectors.InsertStyle(V) isa IsInsertable - @test insert!(copy(s1), 4) == SortedSet([1, 3, 4, 5]; by) - @test delete!(copy(s1), 3) == SortedSet([1, 5]; by) - end - end - end - end - @testset "Replacement behavior" begin - s1 = SortedSet([("a", 3), ("b", 2)]; by=first) - s2 = SortedSet([("a", 5)]; by=first) - s = union(s1, s2) - @test s ≠ s1 - @test issetequal(s, s1) - @test ("a", 5) ∈ parent(s) - @test ("a", 3) ∉ parent(s) - end -end -end diff --git a/NDTensors/src/lib/SparseArraysBase/.JuliaFormatter.toml b/NDTensors/src/lib/SparseArraysBase/.JuliaFormatter.toml deleted file mode 100644 index 08f664cdb9..0000000000 --- a/NDTensors/src/lib/SparseArraysBase/.JuliaFormatter.toml +++ /dev/null @@ -1,2 +0,0 @@ -style = "blue" -indent = 2 diff --git a/NDTensors/src/lib/SparseArraysBase/README.md b/NDTensors/src/lib/SparseArraysBase/README.md deleted file mode 100644 index 629cc07e54..0000000000 --- a/NDTensors/src/lib/SparseArraysBase/README.md +++ /dev/null @@ -1,117 +0,0 @@ -# SparseArraysBase - -SparseArraysBase is a package that aims to expand on the sparse array functionality that is currently in Julia Base. -While SparseArrays.jl is centered mostly around `SparseMatrixCSC` and the SuiteSparse library, here we wish to broaden the scope a bit, and consider generic sparse arrays. -Abstractly, the mental model can be considered as a storage object that holds the stored values, and a bijection between the array indices and the indices of the storage. -For now, we focus on providing efficient implementations of Dictionary of Key (DOK) type sparse storage formats, but may expand upon this in the future. -As a result, for typical linear algebra routines, we still expect `SparseMatrixCSC` to be the object of choice. - -The design consists of roughly three components: -- `AbstractSparseArray` interface functions -- Overloaded Julia base methods -- `SparseArrayDOK` struct that implements this - -## AbstractSparseArray - -The first part consists of typical functions that are useful in the context of sparse arrays. -The minimal interface, which enables the usage of the rest of this package, consists of the following functions: - -| Signature | Description | Default | -|-----------|-------------|---------| -| `sparse_storage(a::AbstractArray)` | Returns the storage object of the sparse array | `a` | -| `storage_index_to_index(a::AbstractArray, I)` | Converts a storage index to an array index | `I` | -| `index_to_storage_index(a::AbstractArray, I)` | Converts an array index to a storage index | `I` | - -Using these primitives, several convenience functions are defined to facilitate the writing of sparse array algorithms. - -| Signature | Description | Default | -|-----------|-------------|---------| -| `storage_indices(a)` | Returns the indices of the storage | `eachindex(sparse_storage(a))` | -| `stored_indices(a)` | Returns the indices of the stored values | `Iterators.map(Base.Fix1(storage_index_to_index, a), storage_indices(a))` | -| `stored_length(a)` | Returns the number of stored values | `length(storage_indices(a))` | - - - -Interesting to note here is that the design is such that we can define sparse arrays without having to subtype `AbstractSparseArray`. -To achieve this, each function `f` is defined in terms of `sparse_f`, rather than directly overloading `f`. - - -## Overloaded Julia base methods - -The second part consists of overloading Julia base methods to work with sparse arrays. -In particular, specialised implementations exist for the following functions: - -- `sparse_similar` -- `sparse_reduce` -- `sparse_map` -- `sparse_map!` -- `sparse_all` -- `sparse_any` -- `sparse_isequal` -- `sparse_fill!` -- `sparse_zero`, `sparse_zero!`, `sparse_iszero` -- `sparse_one`, `sparse_one!`, `sparse_isone` -- `sparse_reshape`, `sparse_reshape!` -- `sparse_cat`, `sparse_cat!` -- `sparse_copy!`, `sparse_copyto!` -- `sparse_permutedims`, `sparse_permutedims!` -- `sparse_mul!`, `sparse_dot` - -## SparseArrayDOK - -Finally, the `SparseArrayDOK` struct is provided as a concrete implementation of the `AbstractSparseArray` interface. -It is a dictionary of keys (DOK) type sparse array, which stores the values in a `Dictionaries.jl` dictionary, and maps the indices to the keys of the dictionary. -This model is particularly useful for sparse arrays with a small number of non-zero elements, or for arrays that are constructed incrementally, as it boasts fast random accesses and insertions. -The drawback is that sequential iteration is slower than for other sparse array types, leading to slower linear algebra operations. -For the purposes of `SparseArraysBase`, this struct will serve as the canonical example of a sparse array, and will be returned by default when new sparse arrays are created. - -One particular feature of `SparseArrayDOK` is that it can be used in cases where the non-stored entries have to be constructed in a non-trivial way. -Typically, sparse arrays use `zero(eltype(a))` to construct the non-stored entries, but this is not always sufficient. -A concrete example is found in `BlockSparseArrays.jl`, where initialization of the non-stored entries requires the construction of a block of zeros of appropriate size. - - - -## TODO -Still need to implement `Base` functions: -```julia -[x] sparse_zero(a::AbstractArray) = similar(a) -[x] sparse_iszero(a::AbstractArray) = iszero(nonzero_length(a)) # Uses `all`, make `sparse_all`? -[x] sparse_one(a::AbstractArray) = ... -[x] sparse_isreal(a::AbstractArray) = ... # Uses `all`, make `sparse_all`? -[x] sparse_isequal(a1::AbstractArray, a2::AbstractArray) = ... -[x] sparse_conj!(a::AbstractArray) = conj!(nonzeros(a)) -[x] sparse_reshape(a::AbstractArray, dims) = ... -[ ] sparse_all(f, a::AbstractArray) = ... -[ ] sparse_getindex(a::AbstractArray, 1:2, 2:3) = ... # Slicing -``` -`LinearAlgebra` functions: -```julia -[ ] sparse_mul! -[ ] sparse_lmul! -[ ] sparse_ldiv! -[ ] sparse_rdiv! -[ ] sparse_axpby! -[ ] sparse_axpy! -[ ] sparse_norm -[ ] sparse_dot/sparse_inner -[ ] sparse_adoint! -[ ] sparse_transpose! - -# Using conversion to `SparseMatrixCSC`: -[ ] sparse_qr -[ ] sparse_eigen -[ ] sparse_svd -``` -`TensorAlgebra` functions: -```julia -[ ] add! -[ ] contract! -``` diff --git a/NDTensors/src/lib/SparseArraysBase/src/SparseArraysBase.jl b/NDTensors/src/lib/SparseArraysBase/src/SparseArraysBase.jl deleted file mode 100644 index 0a9ff4fe4a..0000000000 --- a/NDTensors/src/lib/SparseArraysBase/src/SparseArraysBase.jl +++ /dev/null @@ -1,36 +0,0 @@ -module SparseArraysBase -include("sparsearrayinterface/arraylayouts.jl") -include("sparsearrayinterface/densearray.jl") -include("sparsearrayinterface/vectorinterface.jl") -include("sparsearrayinterface/interface.jl") -include("sparsearrayinterface/interface_optional.jl") -include("sparsearrayinterface/indexing.jl") -include("sparsearrayinterface/base.jl") -include("sparsearrayinterface/map.jl") -include("sparsearrayinterface/copyto.jl") -include("sparsearrayinterface/broadcast.jl") -include("sparsearrayinterface/conversion.jl") -include("sparsearrayinterface/wrappers.jl") -include("sparsearrayinterface/zero.jl") -include("sparsearrayinterface/cat.jl") -include("sparsearrayinterface/SparseArraysBaseLinearAlgebraExt.jl") -include("abstractsparsearray/abstractsparsearray.jl") -include("abstractsparsearray/abstractsparsematrix.jl") -include("abstractsparsearray/abstractsparsevector.jl") -include("abstractsparsearray/wrappedabstractsparsearray.jl") -include("abstractsparsearray/arraylayouts.jl") -include("abstractsparsearray/sparsearrayinterface.jl") -include("abstractsparsearray/base.jl") -include("abstractsparsearray/broadcast.jl") -include("abstractsparsearray/map.jl") -include("abstractsparsearray/baseinterface.jl") -include("abstractsparsearray/convert.jl") -include("abstractsparsearray/cat.jl") -include("abstractsparsearray/SparseArraysBaseSparseArraysExt.jl") -include("abstractsparsearray/SparseArraysBaseLinearAlgebraExt.jl") -include("sparsearraydok/defaults.jl") -include("sparsearraydok/sparsearraydok.jl") -include("sparsearraydok/sparsematrixdok.jl") -include("sparsearraydok/sparsevectordok.jl") -include("sparsearraydok/arraylayouts.jl") -end diff --git a/NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/SparseArraysBaseLinearAlgebraExt.jl b/NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/SparseArraysBaseLinearAlgebraExt.jl deleted file mode 100644 index b90dfb5f84..0000000000 --- a/NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/SparseArraysBaseLinearAlgebraExt.jl +++ /dev/null @@ -1,15 +0,0 @@ -using LinearAlgebra: LinearAlgebra - -LinearAlgebra.norm(a::AbstractSparseArray, p::Real=2) = sparse_norm(a, p) - -# a1 * a2 * α + a_dest * β -function LinearAlgebra.mul!( - a_dest::AbstractMatrix, - a1::AbstractSparseMatrix, - a2::AbstractSparseMatrix, - α::Number=true, - β::Number=false, -) - sparse_mul!(a_dest, a1, a2, α, β) - return a_dest -end diff --git a/NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/SparseArraysBaseSparseArraysExt.jl b/NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/SparseArraysBaseSparseArraysExt.jl deleted file mode 100644 index 492a0a37f7..0000000000 --- a/NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/SparseArraysBaseSparseArraysExt.jl +++ /dev/null @@ -1,20 +0,0 @@ -using Base: Forward -using SparseArrays: SparseArrays, SparseMatrixCSC, findnz, getcolptr, nonzeros, rowvals -using ..SparseArraysBase: stored_length - -# Julia Base `AbstractSparseArray` interface -SparseArrays.nnz(a::AbstractSparseArray) = stored_length(a) - -sparse_storage(a::SparseMatrixCSC) = nonzeros(a) -function storage_index_to_index(a::SparseMatrixCSC, I) - I1s, I2s = findnz(a) - return CartesianIndex(I1s[I], I2s[I]) -end -function index_to_storage_index(a::SparseMatrixCSC, I::CartesianIndex{2}) - i0, i1 = Tuple(I) - r1 = getcolptr(a)[i1] - r2 = getcolptr(a)[i1 + 1] - 1 - (r1 > r2) && return nothing - r1 = searchsortedfirst(rowvals(a), i0, r1, r2, Forward) - return ((r1 > r2) || (rowvals(a)[r1] != i0)) ? nothing : r1 -end diff --git a/NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/abstractsparsearray.jl b/NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/abstractsparsearray.jl deleted file mode 100644 index 8c73f7e035..0000000000 --- a/NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/abstractsparsearray.jl +++ /dev/null @@ -1,3 +0,0 @@ -using ArrayLayouts: LayoutArray - -abstract type AbstractSparseArray{T,N} <: LayoutArray{T,N} end diff --git a/NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/abstractsparsematrix.jl b/NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/abstractsparsematrix.jl deleted file mode 100644 index a545c562e6..0000000000 --- a/NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/abstractsparsematrix.jl +++ /dev/null @@ -1 +0,0 @@ -const AbstractSparseMatrix{T} = AbstractSparseArray{T,2} diff --git a/NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/abstractsparsevector.jl b/NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/abstractsparsevector.jl deleted file mode 100644 index 033df09d63..0000000000 --- a/NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/abstractsparsevector.jl +++ /dev/null @@ -1 +0,0 @@ -const AbstractSparseVector{T} = AbstractSparseArray{T,1} diff --git a/NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/arraylayouts.jl b/NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/arraylayouts.jl deleted file mode 100644 index 293d58f3ce..0000000000 --- a/NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/arraylayouts.jl +++ /dev/null @@ -1,41 +0,0 @@ -using ArrayLayouts: ArrayLayouts, Dot, DualLayout, MatMulMatAdd, MatMulVecAdd, MulAdd -using LinearAlgebra: Adjoint, Transpose -using ..TypeParameterAccessors: parenttype - -function ArrayLayouts.MemoryLayout(arraytype::Type{<:AnyAbstractSparseArray}) - return SparseLayout() -end - -# TODO: Generalize to `SparseVectorLike`/`AnySparseVector`. -function ArrayLayouts.MemoryLayout(arraytype::Type{<:Adjoint{<:Any,<:AbstractSparseVector}}) - return DualLayout{typeof(MemoryLayout(parenttype(arraytype)))}() -end -# TODO: Generalize to `SparseVectorLike`/`AnySparseVector`. -function ArrayLayouts.MemoryLayout( - arraytype::Type{<:Transpose{<:Any,<:AbstractSparseVector}} -) - return DualLayout{typeof(MemoryLayout(parenttype(arraytype)))}() -end - -function sparse_matmul!(m::MulAdd) - α, a1, a2, β, a_dest = m.α, m.A, m.B, m.β, m.C - sparse_mul!(a_dest, a1, a2, α, β) - return a_dest -end - -function ArrayLayouts.materialize!( - m::MatMulMatAdd{<:AbstractSparseLayout,<:AbstractSparseLayout,<:AbstractSparseLayout} -) - sparse_matmul!(m) - return m.C -end -function ArrayLayouts.materialize!( - m::MatMulVecAdd{<:AbstractSparseLayout,<:AbstractSparseLayout,<:AbstractSparseLayout} -) - sparse_matmul!(m) - return m.C -end - -function Base.copy(d::Dot{<:SparseLayout,<:SparseLayout}) - return sparse_dot(d.A, d.B) -end diff --git a/NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/base.jl b/NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/base.jl deleted file mode 100644 index 0ea8265ed6..0000000000 --- a/NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/base.jl +++ /dev/null @@ -1,16 +0,0 @@ -# Base -function Base.:(==)(a1::AnyAbstractSparseArray, a2::AnyAbstractSparseArray) - return sparse_isequal(a1, a2) -end - -function Base.reshape(a::AnyAbstractSparseArray, dims::Tuple{Vararg{Int}}) - return sparse_reshape(a, dims) -end - -function Base.zero(a::AnyAbstractSparseArray) - return sparse_zero(a) -end - -function Base.one(a::AnyAbstractSparseArray) - return sparse_one(a) -end diff --git a/NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/baseinterface.jl b/NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/baseinterface.jl deleted file mode 100644 index ba16da704d..0000000000 --- a/NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/baseinterface.jl +++ /dev/null @@ -1,33 +0,0 @@ -Base.size(a::AbstractSparseArray) = error("Not implemented") - -function Base.similar(a::AbstractSparseArray, elt::Type, dims::Tuple{Vararg{Int}}) - return error("Not implemented") -end - -function Base.getindex(a::AbstractSparseArray, I...) - return sparse_getindex(a, I...) -end - -# Fixes ambiguity error with `ArrayLayouts`. -function Base.getindex(a::AbstractSparseMatrix, I1::AbstractVector, I2::AbstractVector) - return sparse_getindex(a, I1, I2) -end - -# Fixes ambiguity error with `ArrayLayouts`. -function Base.getindex( - a::AbstractSparseMatrix, I1::AbstractUnitRange, I2::AbstractUnitRange -) - return sparse_getindex(a, I1, I2) -end - -function Base.isassigned(a::AbstractSparseArray, I::Integer...) - return sparse_isassigned(a, I...) -end - -function Base.setindex!(a::AbstractSparseArray, I...) - return sparse_setindex!(a, I...) -end - -function Base.fill!(a::AbstractSparseArray, value) - return sparse_fill!(a, value) -end diff --git a/NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/broadcast.jl b/NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/broadcast.jl deleted file mode 100644 index 565fccb441..0000000000 --- a/NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/broadcast.jl +++ /dev/null @@ -1,4 +0,0 @@ -# Broadcasting -function Broadcast.BroadcastStyle(arraytype::Type{<:AnyAbstractSparseArray}) - return SparseArraysBase.SparseArrayStyle{ndims(arraytype)}() -end diff --git a/NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/cat.jl b/NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/cat.jl deleted file mode 100644 index 3d0475159c..0000000000 --- a/NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/cat.jl +++ /dev/null @@ -1,4 +0,0 @@ -# TODO: Change to `AnyAbstractSparseArray`. -function Base.cat(as::AnyAbstractSparseArray...; dims) - return sparse_cat(as...; dims) -end diff --git a/NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/convert.jl b/NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/convert.jl deleted file mode 100644 index 8b532ede73..0000000000 --- a/NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/convert.jl +++ /dev/null @@ -1,7 +0,0 @@ -Base.convert(type::Type{<:AbstractSparseArray}, a::AbstractArray) = type(a) - -Base.convert(::Type{T}, a::T) where {T<:AbstractSparseArray} = a - -function (::Type{T})(a::T) where {T<:AbstractSparseArray} - return copy(a) -end diff --git a/NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/map.jl b/NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/map.jl deleted file mode 100644 index 106cfff579..0000000000 --- a/NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/map.jl +++ /dev/null @@ -1,42 +0,0 @@ -using ArrayLayouts: LayoutArray - -# Map -function Base.map!(f, a_dest::AbstractArray, a_srcs::Vararg{AnyAbstractSparseArray}) - SparseArraysBase.sparse_map!(f, a_dest, a_srcs...) - return a_dest -end - -function Base.copy!(a_dest::AbstractArray, a_src::AnyAbstractSparseArray) - SparseArraysBase.sparse_copy!(a_dest, a_src) - return a_dest -end - -function Base.copyto!(a_dest::AbstractArray, a_src::AnyAbstractSparseArray) - SparseArraysBase.sparse_copyto!(a_dest, a_src) - return a_dest -end - -# Fix ambiguity error -function Base.copyto!(a_dest::LayoutArray, a_src::AnyAbstractSparseArray) - SparseArraysBase.sparse_copyto!(a_dest, a_src) - return a_dest -end - -function Base.permutedims!(a_dest::AbstractArray, a_src::AnyAbstractSparseArray, perm) - SparseArraysBase.sparse_permutedims!(a_dest, a_src, perm) - return a_dest -end - -function Base.mapreduce(f, op, as::Vararg{AnyAbstractSparseArray}; kwargs...) - return SparseArraysBase.sparse_mapreduce(f, op, as...; kwargs...) -end - -# TODO: Why isn't this calling `mapreduce` already? -function Base.iszero(a::AnyAbstractSparseArray) - return SparseArraysBase.sparse_iszero(a) -end - -# TODO: Why isn't this calling `mapreduce` already? -function Base.isreal(a::AnyAbstractSparseArray) - return SparseArraysBase.sparse_isreal(a) -end diff --git a/NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/sparsearrayinterface.jl b/NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/sparsearrayinterface.jl deleted file mode 100644 index 8b86d1be12..0000000000 --- a/NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/sparsearrayinterface.jl +++ /dev/null @@ -1,46 +0,0 @@ -using Dictionaries: set! - -sparse_storage(::AbstractSparseArray) = error("Not implemented") - -function index_to_storage_index( - a::AbstractSparseArray{<:Any,N}, I::CartesianIndex{N} -) where {N} - !isassigned(sparse_storage(a), I) && return nothing - return I -end - -function setindex_notstored!( - a::AbstractSparseArray{<:Any,N}, value, I::CartesianIndex{N} -) where {N} - iszero(value) && return a - return error("Setting the specified unstored index is not supported.") -end - -# TODO: Make this into a generic definition of all `AbstractArray`? -# TODO: Check if this is efficient, or determine if this mapping should -# be performed in `storage_index_to_index` and/or `index_to_storage_index`. -function sparse_storage(a::SubArray{<:Any,<:Any,<:AbstractSparseArray}) - parent_storage = sparse_storage(parent(a)) - all_sliced_storage_indices = map(keys(parent_storage)) do I - return map_index(a.indices, I) - end - sliced_storage_indices = filter(!isnothing, all_sliced_storage_indices) - sliced_parent_storage = map(I -> parent_storage[I], keys(sliced_storage_indices)) - return typeof(parent_storage)(sliced_storage_indices, sliced_parent_storage) -end - -# TODO: Make this into a generic definition of all `AbstractArray`? -function stored_indices( - a::AnyPermutedDimsArray{<:Any,<:Any,<:Any,<:Any,<:AbstractSparseArray} -) - return Iterators.map( - I -> CartesianIndex(map(i -> I[i], perm(a))), stored_indices(parent(a)) - ) -end - -# TODO: Make this into a generic definition of all `AbstractArray`? -function sparse_storage( - a::AnyPermutedDimsArray{<:Any,<:Any,<:Any,<:Any,<:AbstractSparseArray} -) - return sparse_storage(parent(a)) -end diff --git a/NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/wrappedabstractsparsearray.jl b/NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/wrappedabstractsparsearray.jl deleted file mode 100644 index a4ee4bebe3..0000000000 --- a/NDTensors/src/lib/SparseArraysBase/src/abstractsparsearray/wrappedabstractsparsearray.jl +++ /dev/null @@ -1,22 +0,0 @@ -using Adapt: WrappedArray -using LinearAlgebra: Adjoint, Transpose - -const WrappedAbstractSparseArray{T,N,A} = WrappedArray{ - T,N,<:AbstractSparseArray,<:AbstractSparseArray{T,N} -} - -const AnyAbstractSparseArray{T,N} = Union{ - <:AbstractSparseArray{T,N},<:WrappedAbstractSparseArray{T,N} -} - -function stored_indices(a::Adjoint) - return Iterators.map(I -> CartesianIndex(reverse(Tuple(I))), stored_indices(parent(a))) -end -stored_length(a::Adjoint) = stored_length(parent(a)) -sparse_storage(a::Adjoint) = Iterators.map(adjoint, sparse_storage(parent(a))) - -function stored_indices(a::Transpose) - return Iterators.map(I -> CartesianIndex(reverse(Tuple(I))), stored_indices(parent(a))) -end -stored_length(a::Transpose) = stored_length(parent(a)) -sparse_storage(a::Transpose) = Iterators.map(transpose, sparse_storage(parent(a))) diff --git a/NDTensors/src/lib/SparseArraysBase/src/sparsearraydok/arraylayouts.jl b/NDTensors/src/lib/SparseArraysBase/src/sparsearraydok/arraylayouts.jl deleted file mode 100644 index fc2740d377..0000000000 --- a/NDTensors/src/lib/SparseArraysBase/src/sparsearraydok/arraylayouts.jl +++ /dev/null @@ -1,13 +0,0 @@ -using ArrayLayouts: ArrayLayouts, MemoryLayout, MulAdd - -ArrayLayouts.MemoryLayout(::Type{<:SparseArrayDOK}) = SparseLayout() - -# Default sparse array type for `AbstractSparseLayout`. -default_sparsearraytype(elt::Type) = SparseArrayDOK{elt} - -# TODO: Preserve GPU memory! Implement `CuSparseArrayLayout`, `MtlSparseLayout`? -function Base.similar( - ::MulAdd{<:AbstractSparseLayout,<:AbstractSparseLayout}, elt::Type, axes -) - return similar(default_sparsearraytype(elt), axes) -end diff --git a/NDTensors/src/lib/SparseArraysBase/src/sparsearraydok/defaults.jl b/NDTensors/src/lib/SparseArraysBase/src/sparsearraydok/defaults.jl deleted file mode 100644 index 1dc674b8b2..0000000000 --- a/NDTensors/src/lib/SparseArraysBase/src/sparsearraydok/defaults.jl +++ /dev/null @@ -1,5 +0,0 @@ -using Dictionaries: Dictionary - -default_zero() = Zero() -default_data(type::Type, ndims::Int) = Dictionary{default_keytype(ndims),type}() -default_keytype(ndims::Int) = CartesianIndex{ndims} diff --git a/NDTensors/src/lib/SparseArraysBase/src/sparsearraydok/sparsearraydok.jl b/NDTensors/src/lib/SparseArraysBase/src/sparsearraydok/sparsearraydok.jl deleted file mode 100644 index 95a5b14017..0000000000 --- a/NDTensors/src/lib/SparseArraysBase/src/sparsearraydok/sparsearraydok.jl +++ /dev/null @@ -1,146 +0,0 @@ -using Accessors: @set -using Dictionaries: Dictionary, set! -using MacroTools: @capture - -# TODO: Parametrize by `data`? -struct SparseArrayDOK{T,N,Zero} <: AbstractSparseArray{T,N} - data::Dictionary{CartesianIndex{N},T} - dims::Ref{NTuple{N,Int}} - zero::Zero - function SparseArrayDOK{T,N,Zero}(data, dims::NTuple{N,Int}, zero) where {T,N,Zero} - return new{T,N,Zero}(data, Ref(dims), zero) - end -end - -# Constructors -function SparseArrayDOK(data, dims::Tuple{Vararg{Int}}, zero) - return SparseArrayDOK{eltype(data),length(dims),typeof(zero)}(data, dims, zero) -end - -function SparseArrayDOK{T,N,Zero}(dims::Tuple{Vararg{Int}}, zero) where {T,N,Zero} - return SparseArrayDOK{T,N,Zero}(default_data(T, N), dims, zero) -end - -function SparseArrayDOK{T,N}(dims::Tuple{Vararg{Int}}, zero) where {T,N} - return SparseArrayDOK{T,N,typeof(zero)}(dims, zero) -end - -function SparseArrayDOK{T,N}(dims::Tuple{Vararg{Int}}) where {T,N} - return SparseArrayDOK{T,N}(dims, default_zero()) -end - -function SparseArrayDOK{T}(dims::Tuple{Vararg{Int}}) where {T} - return SparseArrayDOK{T,length(dims)}(dims) -end - -function SparseArrayDOK{T}(dims::Int...) where {T} - return SparseArrayDOK{T}(dims) -end - -# Specify zero function -function SparseArrayDOK{T}(dims::Tuple{Vararg{Int}}, zero) where {T} - return SparseArrayDOK{T,length(dims)}(dims, zero) -end - -# undef -function SparseArrayDOK{T,N,Zero}( - ::UndefInitializer, dims::Tuple{Vararg{Int}}, zero -) where {T,N,Zero} - return SparseArrayDOK{T,N,Zero}(dims, zero) -end - -function SparseArrayDOK{T,N}(::UndefInitializer, dims::Tuple{Vararg{Int}}, zero) where {T,N} - return SparseArrayDOK{T,N}(dims, zero) -end - -function SparseArrayDOK{T,N}(::UndefInitializer, dims::Tuple{Vararg{Int}}) where {T,N} - return SparseArrayDOK{T,N}(dims) -end - -function SparseArrayDOK{T}(::UndefInitializer, dims::Tuple{Vararg{Int}}) where {T} - return SparseArrayDOK{T}(dims) -end - -# Axes version -function SparseArrayDOK{T}( - ::UndefInitializer, axes::Tuple{Vararg{AbstractUnitRange}} -) where {T} - @assert all(isone, first.(axes)) - return SparseArrayDOK{T}(length.(axes)) -end - -function SparseArrayDOK{T}(::UndefInitializer, dims::Int...) where {T} - return SparseArrayDOK{T}(dims...) -end - -function SparseArrayDOK{T}(::UndefInitializer, dims::Tuple{Vararg{Int}}, zero) where {T} - return SparseArrayDOK{T}(dims, zero) -end - -# Base `AbstractArray` interface -Base.size(a::SparseArrayDOK) = a.dims[] - -getindex_zero_function(a::SparseArrayDOK) = a.zero -function set_getindex_zero_function(a::SparseArrayDOK, f) - return @set a.zero = f -end - -function setindex_notstored!( - a::SparseArrayDOK{<:Any,N}, value, I::CartesianIndex{N} -) where {N} - set!(sparse_storage(a), I, value) - return a -end - -function Base.similar(a::SparseArrayDOK, elt::Type, dims::Tuple{Vararg{Int}}) - return SparseArrayDOK{elt}(undef, dims, getindex_zero_function(a)) -end - -# `SparseArraysBase` interface -sparse_storage(a::SparseArrayDOK) = a.data - -function dropall!(a::SparseArrayDOK) - return empty!(sparse_storage(a)) -end - -SparseArrayDOK(a::AbstractArray) = SparseArrayDOK{eltype(a)}(a) - -SparseArrayDOK{T}(a::AbstractArray) where {T} = SparseArrayDOK{T,ndims(a)}(a) - -function SparseArrayDOK{T,N}(a::AbstractArray) where {T,N} - return sparse_convert(SparseArrayDOK{T,N}, a) -end - -function Base.resize!(a::SparseArrayDOK{<:Any,N}, new_size::NTuple{N,Integer}) where {N} - a.dims[] = new_size - return a -end - -function setindex_maybe_grow!(a::SparseArrayDOK{<:Any,N}, value, I::Vararg{Int,N}) where {N} - if any(I .> size(a)) - resize!(a, max.(I, size(a))) - end - a[I...] = value - return a -end - -function is_setindex!_expr(expr::Expr) - return is_assignment_expr(expr) && is_getindex_expr(first(expr.args)) -end -is_setindex!_expr(x) = false - -is_getindex_expr(expr::Expr) = (expr.head === :ref) -is_getindex_expr(x) = false - -is_assignment_expr(expr::Expr) = (expr.head === :(=)) -is_assignment_expr(expr) = false - -macro maybe_grow(expr) - if !is_setindex!_expr(expr) - error( - "@maybe_grow must be used with setindex! syntax (as @maybe_grow a[i,j,...] = value)" - ) - end - @capture(expr, array_[indices__] = value_) - return :(setindex_maybe_grow!($(esc(array)), $(esc(value)), $(esc.(indices)...))) -end diff --git a/NDTensors/src/lib/SparseArraysBase/src/sparsearraydok/sparsematrixdok.jl b/NDTensors/src/lib/SparseArraysBase/src/sparsearraydok/sparsematrixdok.jl deleted file mode 100644 index f568760c19..0000000000 --- a/NDTensors/src/lib/SparseArraysBase/src/sparsearraydok/sparsematrixdok.jl +++ /dev/null @@ -1 +0,0 @@ -const SparseMatrixDOK{T} = SparseArrayDOK{T,2} diff --git a/NDTensors/src/lib/SparseArraysBase/src/sparsearraydok/sparsevectordok.jl b/NDTensors/src/lib/SparseArraysBase/src/sparsearraydok/sparsevectordok.jl deleted file mode 100644 index 7cc7df00d6..0000000000 --- a/NDTensors/src/lib/SparseArraysBase/src/sparsearraydok/sparsevectordok.jl +++ /dev/null @@ -1 +0,0 @@ -const SparseVectorDOK{T} = SparseArrayDOK{T,1} diff --git a/NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/SparseArraysBaseLinearAlgebraExt.jl b/NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/SparseArraysBaseLinearAlgebraExt.jl deleted file mode 100644 index 4e4ed259d0..0000000000 --- a/NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/SparseArraysBaseLinearAlgebraExt.jl +++ /dev/null @@ -1,78 +0,0 @@ -using LinearAlgebra: dot, mul!, norm - -sparse_norm(a::AbstractArray, p::Real=2) = norm(sparse_storage(a)) - -function mul_indices(I1::CartesianIndex{2}, I2::CartesianIndex{2}) - if I1[2] ≠ I2[1] - return nothing - end - return CartesianIndex(I1[1], I2[2]) -end - -# TODO: Is this needed? Maybe when multiplying vectors? -function mul_indices(I1::CartesianIndex{1}, I2::CartesianIndex{1}) - if I1 ≠ I2 - return nothing - end - return CartesianIndex(I1) -end - -function default_mul!!( - a_dest::AbstractMatrix, - a1::AbstractMatrix, - a2::AbstractMatrix, - α::Number=true, - β::Number=false, -) - mul!(a_dest, a1, a2, α, β) - return a_dest -end - -function default_mul!!( - a_dest::Number, a1::Number, a2::Number, α::Number=true, β::Number=false -) - return a1 * a2 * α + a_dest * β -end - -# a1 * a2 * α + a_dest * β -function sparse_mul!( - a_dest::AbstractArray, - a1::AbstractArray, - a2::AbstractArray, - α::Number=true, - β::Number=false; - (mul!!)=(default_mul!!), -) - for I1 in stored_indices(a1) - for I2 in stored_indices(a2) - I_dest = mul_indices(I1, I2) - if !isnothing(I_dest) - a_dest[I_dest] = mul!!(a_dest[I_dest], a1[I1], a2[I2], α, β) - end - end - end - return a_dest -end - -function sparse_dot(a1::AbstractArray, a2::AbstractArray) - # This requires that `a1` and `a2` have the same shape. - # TODO: Generalize (Base supports dot products of - # arrays with the same length but different sizes). - size(a1) == size(a2) || - throw(DimensionMismatch("Sizes $(size(a1)) and $(size(a2)) don't match.")) - dot_dest = zero(Base.promote_op(dot, eltype(a1), eltype(a2))) - # TODO: First check if the number of stored elements (`stored_length`, to be renamed - # `stored_length`) is smaller in `a1` or `a2` and use whicheven one is smallar - # as the outer loop. - for I1 in stored_indices(a1) - # TODO: Overload and use `Base.isstored(a, I) = I in stored_indices(a)` instead. - # TODO: This assumes fast lookup of indices, which may not always be the case. - # It could be better to loop over `stored_indices(a2)` and check that - # `I1 == I2` instead (say using `mul_indices(I1, I2)`. We could have a trait - # `HasFastIsStored(a::AbstractArray)` to choose between the two. - if I1 in stored_indices(a2) - dot_dest += dot(a1[I1], a2[I1]) - end - end - return dot_dest -end diff --git a/NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/arraylayouts.jl b/NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/arraylayouts.jl deleted file mode 100644 index 4ff6cc148d..0000000000 --- a/NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/arraylayouts.jl +++ /dev/null @@ -1,5 +0,0 @@ -using ArrayLayouts: MemoryLayout - -abstract type AbstractSparseLayout <: MemoryLayout end - -struct SparseLayout <: AbstractSparseLayout end diff --git a/NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/base.jl b/NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/base.jl deleted file mode 100644 index 9a6fd24941..0000000000 --- a/NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/base.jl +++ /dev/null @@ -1,126 +0,0 @@ -# This is used when a sparse output structure not matching -# the input structure is needed, for example when reshaping -# a DiagonalArray. Overload: -# -# sparse_similar(a::AbstractArray, elt::Type, dims::Tuple{Vararg{Int}}) -# -# as needed. -function sparse_similar(a::AbstractArray, elt::Type) - return similar(a, elt, size(a)) -end - -function sparse_similar(a::AbstractArray, dims::Tuple{Vararg{Int}}) - return sparse_similar(a, eltype(a), dims) -end - -function sparse_similar(a::AbstractArray) - return sparse_similar(a, eltype(a), size(a)) -end - -function sparse_reduce(op, a::AbstractArray; kwargs...) - return sparse_mapreduce(identity, op, a; kwargs...) -end - -function sparse_all(a::AbstractArray) - return sparse_reduce(&, a; init=true) -end - -function sparse_all(f, a::AbstractArray) - return sparse_mapreduce(f, &, a; init=true) -end - -function sparse_iszero(a::AbstractArray) - return sparse_all(iszero, a) -end - -function sparse_isreal(a::AbstractArray) - return sparse_all(isreal, a) -end - -# This is equivalent to: -# -# sparse_map!(Returns(x), a, a) -# -# but we don't use that here since `sparse_fill!` -# is used inside of `sparse_map!`. -function sparse_fill!(a::AbstractArray, x) - if iszero(x) - # This checks that `x` is compatible - # with `eltype(a)`. - x = convert(eltype(a), x) - sparse_zero!(a) - return a - end - for I in eachindex(a) - a[I] = x - end - return a -end - -# This could just call `sparse_fill!` -# but it avoids a zero construction and check. -function sparse_zero!(a::AbstractArray) - dropall!(a) - sparse_zerovector!(a) - return a -end - -function sparse_zero(a::AbstractArray) - # Need to overload `similar` for custom types - a = similar(a) - sparse_zerovector!(a) - return a -end - -# TODO: Is this a good definition? -function sparse_zero(arraytype::Type{<:AbstractArray}, dims::Tuple{Vararg{Int}}) - a = arraytype(undef, dims) - sparse_zerovector!(a) - return a -end - -function sparse_one!(a::AbstractMatrix) - sparse_zero!(a) - m, n = size(a) - @assert m == n - for i in 1:m - a[i, i] = one(eltype(a)) - end - return a -end - -function sparse_one(a::AbstractMatrix) - a = sparse_zero(a) - sparse_one!(a) - return a -end - -# TODO: Use `sparse_mapreduce(==, &, a1, a2)`? -function sparse_isequal(a1::AbstractArray, a2::AbstractArray) - Is = collect(stored_indices(a1)) - intersect!(Is, stored_indices(a2)) - if !(length(Is) == stored_length(a1) == stored_length(a2)) - return false - end - for I in Is - a1[I] == a2[I] || return false - end - return true -end - -function sparse_reshape!(a_dest::AbstractArray, a_src::AbstractArray, dims) - @assert length(a_src) == prod(dims) - sparse_zero!(a_dest) - linear_inds = LinearIndices(a_src) - dest_cartesian_inds = CartesianIndices(dims) - for I in stored_indices(a_src) - a_dest[dest_cartesian_inds[linear_inds[I]]] = a_src[I] - end - return a_dest -end - -function sparse_reshape(a::AbstractArray, dims) - a_reshaped = sparse_similar(a, dims) - sparse_reshape!(a_reshaped, a, dims) - return a_reshaped -end diff --git a/NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/broadcast.jl b/NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/broadcast.jl deleted file mode 100644 index d113932d44..0000000000 --- a/NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/broadcast.jl +++ /dev/null @@ -1,37 +0,0 @@ -using Base.Broadcast: BroadcastStyle, AbstractArrayStyle, DefaultArrayStyle, Broadcasted -using ..BroadcastMapConversion: map_function, map_args - -struct SparseArrayStyle{N} <: AbstractArrayStyle{N} end - -# Define for new sparse array types. -# function Broadcast.BroadcastStyle(arraytype::Type{<:MySparseArray}) -# return SparseArrayStyle{ndims(arraytype)}() -# end - -SparseArrayStyle(::Val{N}) where {N} = SparseArrayStyle{N}() -SparseArrayStyle{M}(::Val{N}) where {M,N} = SparseArrayStyle{N}() - -Broadcast.BroadcastStyle(a::SparseArrayStyle, ::DefaultArrayStyle{0}) = a -function Broadcast.BroadcastStyle(::SparseArrayStyle{N}, a::DefaultArrayStyle) where {N} - return BroadcastStyle(DefaultArrayStyle{N}(), a) -end -function Broadcast.BroadcastStyle(::SparseArrayStyle{N}, ::Broadcast.Style{Tuple}) where {N} - return DefaultArrayStyle{N}() -end - -# TODO: Use `allocate_output`, share logic with `map`. -function Base.similar(bc::Broadcasted{<:SparseArrayStyle}, elt::Type) - # TODO: Is this a good definition? Probably should check that - # they have consistent axes. - return similar(first(map_args(bc)), elt) -end - -# Broadcasting implementation -function Base.copyto!( - dest::AbstractArray{<:Any,N}, bc::Broadcasted{SparseArrayStyle{N}} -) where {N} - # convert to map - # flatten and only keep the AbstractArray arguments - sparse_map!(map_function(bc), dest, map_args(bc)...) - return dest -end diff --git a/NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/cat.jl b/NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/cat.jl deleted file mode 100644 index 9f2b3179a5..0000000000 --- a/NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/cat.jl +++ /dev/null @@ -1,64 +0,0 @@ -unval(x) = x -unval(::Val{x}) where {x} = x - -# TODO: Assert that `a1` and `a2` start at one. -axis_cat(a1::AbstractUnitRange, a2::AbstractUnitRange) = Base.OneTo(length(a1) + length(a2)) -function axis_cat( - a1::AbstractUnitRange, a2::AbstractUnitRange, a_rest::AbstractUnitRange... -) - return axis_cat(axis_cat(a1, a2), a_rest...) -end -function cat_axes(as::AbstractArray...; dims) - return ntuple(length(first(axes.(as)))) do dim - return if dim in unval(dims) - axis_cat(map(axes -> axes[dim], axes.(as))...) - else - axes(first(as))[dim] - end - end -end - -function allocate_cat_output(as::AbstractArray...; dims) - eltype_dest = promote_type(eltype.(as)...) - axes_dest = cat_axes(as...; dims) - # TODO: Promote the block types of the inputs rather than using - # just the first input. - # TODO: Make this customizable with `cat_similar`. - # TODO: Base the zero element constructor on those of the inputs, - # for example block sparse arrays. - return similar(first(as), eltype_dest, axes_dest...) -end - -# https://github.com/JuliaLang/julia/blob/v1.11.1/base/abstractarray.jl#L1748-L1857 -# https://docs.julialang.org/en/v1/base/arrays/#Concatenation-and-permutation -# This is very similar to the `Base.cat` implementation but handles zero values better. -function cat_offset!( - a_dest::AbstractArray, offsets, a1::AbstractArray, a_rest::AbstractArray...; dims -) - inds = ntuple(ndims(a_dest)) do dim - dim in unval(dims) ? offsets[dim] .+ axes(a1, dim) : axes(a_dest, dim) - end - a_dest[inds...] = a1 - new_offsets = ntuple(ndims(a_dest)) do dim - dim in unval(dims) ? offsets[dim] + size(a1, dim) : offsets[dim] - end - cat_offset!(a_dest, new_offsets, a_rest...; dims) - return a_dest -end -function cat_offset!(a_dest::AbstractArray, offsets; dims) - return a_dest -end - -# TODO: Define a generic `cat!` function. -function sparse_cat!(a_dest::AbstractArray, as::AbstractArray...; dims) - offsets = ntuple(zero, ndims(a_dest)) - # TODO: Fill `a_dest` with zeros if needed. - cat_offset!(a_dest, offsets, as...; dims) - return a_dest -end - -function sparse_cat(as::AbstractArray...; dims) - a_dest = allocate_cat_output(as...; dims) - sparse_cat!(a_dest, as...; dims) - return a_dest -end diff --git a/NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/conversion.jl b/NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/conversion.jl deleted file mode 100644 index 57f1850ba0..0000000000 --- a/NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/conversion.jl +++ /dev/null @@ -1,5 +0,0 @@ -function sparse_convert(arraytype::Type{<:AbstractArray}, a::AbstractArray) - a_dest = sparse_zero(arraytype, size(a)) - sparse_copyto!(a_dest, a) - return a_dest -end diff --git a/NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/copyto.jl b/NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/copyto.jl deleted file mode 100644 index 218502f3d9..0000000000 --- a/NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/copyto.jl +++ /dev/null @@ -1,15 +0,0 @@ -function sparse_copy!(dest::AbstractArray, src::AbstractArray) - @assert axes(dest) == axes(src) - sparse_map!(identity, dest, src) - return dest -end - -function sparse_copyto!(dest::AbstractArray, src::AbstractArray) - sparse_map!(identity, dest, src) - return dest -end - -function sparse_permutedims!(dest::AbstractArray, src::AbstractArray, perm) - sparse_copyto!(dest, PermutedDimsArray(src, perm)) - return dest -end diff --git a/NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/densearray.jl b/NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/densearray.jl deleted file mode 100644 index 2f4fd028c7..0000000000 --- a/NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/densearray.jl +++ /dev/null @@ -1,12 +0,0 @@ -# Generic functionality for converting to a -# dense array, trying to preserve information -# about the array (such as which device it is on). -# TODO: Maybe call `densecopy`? -# TODO: Make sure this actually preserves the device, -# maybe use `NDTensors.TypeParameterAccessors.unwrap_array_type`. -function densearray(a::AbstractArray) - # TODO: `set_ndims(unwrap_array_type(a), ndims(a))(a)` - # Maybe define `densetype(a) = set_ndims(unwrap_array_type(a), ndims(a))`. - # Or could use `unspecify_parameters(unwrap_array_type(a))(a)`. - return Array(a) -end diff --git a/NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/indexing.jl b/NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/indexing.jl deleted file mode 100644 index 5f8c1cad7e..0000000000 --- a/NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/indexing.jl +++ /dev/null @@ -1,216 +0,0 @@ -using ArrayLayouts: ArrayLayouts - -# An index into the storage of the sparse array. -struct StorageIndex{I} - i::I -end -index(i::StorageIndex) = i.i - -# Indicate if the index into the sparse array is -# stored or not. -abstract type MaybeStoredIndex{I} end - -# An index into a stored value of the sparse array. -# Stores both the index into the outer array -# as well as into the underlying storage. -struct StoredIndex{Iouter,Istorage} <: MaybeStoredIndex{Iouter} - iouter::Iouter - istorage::StorageIndex{Istorage} -end -index(i::StoredIndex) = i.iouter -StorageIndex(i::StoredIndex) = i.istorage - -stored_length(a::AbstractArray) = length(sparse_storage(a)) - -struct NotStoredIndex{Iouter} <: MaybeStoredIndex{Iouter} - iouter::Iouter -end -index(i::NotStoredIndex) = i.iouter - -function MaybeStoredIndex(a::AbstractArray, I) - return MaybeStoredIndex(I, index_to_storage_index(a, I)) -end -MaybeStoredIndex(I, I_storage) = StoredIndex(I, StorageIndex(I_storage)) -MaybeStoredIndex(I, I_storage::Nothing) = NotStoredIndex(I) - -# Convert the index into an index into the storage. -# Return `NotStoredIndex(I)` if it isn't in the storage. -storage_index(a::AbstractArray, I...) = MaybeStoredIndex(a, I...) - -function storage_indices(a::AbstractArray) - return eachindex(sparse_storage(a)) -end - -# Derived -function index_to_storage_index(a::AbstractArray{<:Any,N}, I::Vararg{Int,N}) where {N} - return index_to_storage_index(a, CartesianIndex(I)) -end - -function sparse_getindex(a::AbstractArray, I::NotStoredIndex) - return getindex_notstored(a, index(I)) -end - -function sparse_getindex(a::AbstractArray, I::StoredIndex) - return sparse_getindex(a, StorageIndex(I)) -end - -function sparse_getindex(a::AbstractArray, I::StorageIndex) - return sparse_storage(a)[index(I)] -end - -function sparse_getindex(a::AbstractArray{<:Any,N}, I::Vararg{Int,N}) where {N} - return sparse_getindex(a, CartesianIndex(I)) -end - -function sparse_getindex(a::AbstractArray{<:Any,N}, I::CartesianIndex{N}) where {N} - return _sparse_getindex(a, I) -end - -# Ambiguity with linear indexing -function sparse_getindex(a::AbstractArray{<:Any,1}, I::CartesianIndex{1}) - return _sparse_getindex(a, I) -end - -# Implementation of element access -function _sparse_getindex(a::AbstractArray{<:Any,N}, I::CartesianIndex{N}) where {N} - @boundscheck checkbounds(a, I) - return sparse_getindex(a, storage_index(a, I)) -end - -# Handle trailing indices or linear indexing -function sparse_getindex(a::AbstractArray, I::Vararg{Int}) - return sparse_getindex(a, CartesianIndex(I)) -end - -# Fix ambiguity error. -function sparse_getindex(a::AbstractArray{<:Any,0}) - return sparse_getindex(a, CartesianIndex()) -end - -# Linear indexing -function sparse_getindex(a::AbstractArray, I::CartesianIndex{1}) - return sparse_getindex(a, CartesianIndices(a)[I]) -end - -# Handle trailing indices -function sparse_getindex(a::AbstractArray, I::CartesianIndex) - t = Tuple(I) - length(t) < ndims(a) && error("Not enough indices passed") - I′ = ntuple(i -> t[i], ndims(a)) - @assert all(i -> isone(I[i]), (ndims(a) + 1):length(I)) - return _sparse_getindex(a, CartesianIndex(I′)) -end - -# Slicing -function sparse_getindex(a::AbstractArray, I::AbstractVector...) - return copy(@view a[I...]) -end - -function ArrayLayouts.sub_materialize(::SparseLayout, a::AbstractArray, axes) - a_dest = similar(a, axes) - a_dest .= a - return a_dest -end - -# Update a nonzero value -function sparse_setindex!(a::AbstractArray, value, I::StorageIndex) - sparse_storage(a)[index(I)] = value - return a -end - -# Implementation of element access -function _sparse_setindex!(a::AbstractArray{<:Any,N}, value, I::CartesianIndex{N}) where {N} - @boundscheck checkbounds(a, I) - sparse_setindex!(a, value, storage_index(a, I)) - return a -end - -# Ambiguity with linear indexing -function sparse_setindex!(a::AbstractArray{<:Any,1}, value, I::CartesianIndex{1}) - _sparse_setindex!(a, value, I) - return a -end - -# Handle trailing indices or linear indexing -function sparse_setindex!(a::AbstractArray, value, I::Vararg{Int}) - sparse_setindex!(a, value, CartesianIndex(I)) - return a -end - -# Fix ambiguity error -function sparse_setindex!(a::AbstractArray, value) - sparse_setindex!(a, value, CartesianIndex()) - return a -end - -# Linear indexing -function sparse_setindex!(a::AbstractArray, value, I::CartesianIndex{1}) - sparse_setindex!(a, value, CartesianIndices(a)[I]) - return a -end - -# Slicing -# TODO: Make this handle more general slicing operations, -# base it off of `ArrayLayouts.sub_materialize`. -function sparse_setindex!(a::AbstractArray, value, I::AbstractUnitRange...) - inds = CartesianIndices(I) - for i in stored_indices(value) - if i in CartesianIndices(inds) - a[inds[i]] = value[i] - end - end - return a -end - -# Handle trailing indices -function sparse_setindex!(a::AbstractArray, value, I::CartesianIndex) - t = Tuple(I) - length(t) < ndims(a) && error("Not enough indices passed") - I′ = ntuple(i -> t[i], ndims(a)) - @assert all(i -> isone(I[i]), (ndims(a) + 1):length(I)) - return _sparse_setindex!(a, value, CartesianIndex(I′)) -end - -function sparse_setindex!(a::AbstractArray, value, I::StoredIndex) - sparse_setindex!(a, value, StorageIndex(I)) - return a -end - -function sparse_setindex!(a::AbstractArray, value, I::NotStoredIndex) - setindex_notstored!(a, value, index(I)) - return a -end - -# isassigned -function sparse_isassigned(a::AbstractArray{<:Any,N}, I::CartesianIndex{N}) where {N} - return sparse_isassigned(a, Tuple(I)...) -end -function sparse_isassigned(a::AbstractArray, I::Integer...) - # Check trailing dimensions are one. This is needed in generic - # AbstractArray show when `a isa AbstractVector`. - all(d -> isone(I[d]), (ndims(a) + 1):length(I)) || return false - return all(dim -> I[dim] ∈ axes(a, dim), 1:ndims(a)) -end - -# A set of indices into the storage of the sparse array. -struct StorageIndices{I} - i::I -end -indices(i::StorageIndices) = i.i - -function sparse_getindex(a::AbstractArray, I::StorageIndices{Colon}) - return sparse_storage(a) -end - -function sparse_getindex(a::AbstractArray, I::StorageIndices) - return error("Not implemented") -end - -function sparse_setindex!(a::AbstractArray, value, I::StorageIndices{Colon}) - sparse_storage(a) .= value - return a -end - -function sparse_setindex!(a::AbstractArray, value, I::StorageIndices) - return error("Not implemented") -end diff --git a/NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/interface.jl b/NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/interface.jl deleted file mode 100644 index ed5f7cb1d8..0000000000 --- a/NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/interface.jl +++ /dev/null @@ -1,18 +0,0 @@ -# Also look into: -# https://juliaarrays.github.io/ArrayInterface.jl/stable/sparsearrays/ - -# Minimal sparse array interface. -# Data structure of the stored (generally nonzero) values. -# By default assume it is dense, so all values are stored. -sparse_storage(a::AbstractArray) = a - -# Minimal sparse array interface. -# Map an index into the stored data to a CartesianIndex of the -# outer array. -storage_index_to_index(a::AbstractArray, I) = I - -# Minimal interface -# Map a `CartesianIndex` to an index/key into the nonzero data structure -# returned by `storage`. -# Return `nothing` if the index corresponds to a structural zero (unstored) value. -index_to_storage_index(a::AbstractArray{<:Any,N}, I::CartesianIndex{N}) where {N} = I diff --git a/NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/interface_optional.jl b/NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/interface_optional.jl deleted file mode 100644 index 710cd2570d..0000000000 --- a/NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/interface_optional.jl +++ /dev/null @@ -1,44 +0,0 @@ -# Optional interface. - -# Function for computing unstored zero values. -getindex_zero_function(::AbstractArray) = Zero() - -# Change the function for computing unstored values -set_getindex_zero_function(a::AbstractArray, f) = error("Not implemented") - -function getindex_notstored(a::AbstractArray, I) - return getindex_zero_function(a)(a, I) -end - -# Optional interface. -# Insert a new value into a location -# where there is not a stored value. -# Some types (like `Diagonal`) may not support this. -function setindex_notstored!(a::AbstractArray, value, I) - iszero(value) && return a - return throw(ArgumentError("Can't set nonzero values of $(typeof(a)).")) -end - -# Optional interface. -# Iterates over the indices of `a` where there are stored values. -# Can overload for faster iteration when there is more structure, -# for example with DiagonalArrays. -function stored_indices(a::AbstractArray) - return Iterators.map(Inz -> storage_index_to_index(a, Inz), storage_indices(a)) -end - -# Empty the sparse storage if possible. -# Array types should overload `Base.dataids` to opt-in -# to aliasing detection with `Base.mightalias` -# to avoid emptying an input array in the case of `sparse_map!`. -# `dropall!` is used to zero out the output array. -# See also `Base.unalias` and `Base.unaliascopy`. -# Interface is inspired by Base `SparseArrays.droptol!` -# and `SparseArrays.dropzeros!`, and is like -# `dropall!(a) = SparseArrays.droptol!(a, Inf)`. -dropall!(a::AbstractArray) = a - -# Overload -function sparse_similar(a::AbstractArray, elt::Type, dims::Tuple{Vararg{Int}}) - return similar(a, elt, dims) -end diff --git a/NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/map.jl b/NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/map.jl deleted file mode 100644 index 0f9d9aad5f..0000000000 --- a/NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/map.jl +++ /dev/null @@ -1,120 +0,0 @@ -using Base.Broadcast: BroadcastStyle, combine_styles -using Compat: allequal -using LinearAlgebra: LinearAlgebra - -# Represents a value that isn't stored -# Used to hijack dispatch -struct NotStoredValue{Value} - value::Value -end -value(v::NotStoredValue) = v.value -stored_length(::NotStoredValue) = false -Base.:*(x::Number, y::NotStoredValue) = false -Base.:*(x::NotStoredValue, y::Number) = false -Base.:/(x::NotStoredValue, y::Number) = false -Base.:+(::NotStoredValue, ::NotStoredValue...) = false -Base.:-(::NotStoredValue, ::NotStoredValue...) = false -Base.:+(x::Number, ::NotStoredValue...) = x -Base.iszero(::NotStoredValue) = true -Base.isreal(::NotStoredValue) = true -Base.conj(x::NotStoredValue) = conj(value(x)) -Base.iterate(x::NotStoredValue) = (x, nothing) -Base.mapreduce(f, op, x::NotStoredValue) = f(x) -Base.zero(x::NotStoredValue) = zero(value(x)) -LinearAlgebra.norm(x::NotStoredValue, p::Real=2) = zero(value(x)) - -notstored_index(a::AbstractArray) = NotStoredIndex(first(eachindex(a))) - -# Get some not-stored value -function get_notstored(a::AbstractArray) - return sparse_getindex(a, notstored_index(a)) -end - -function apply_notstored(f, as::Vararg{AbstractArray}) - return apply(f, NotStoredValue.(get_notstored.(as))...) -end - -function apply(f, xs::Vararg{NotStoredValue}) - return f(xs...) - #return try - # return f(xs...) - #catch - # f(value(x)) - #end -end - -# Test if the function preserves zero values and therefore -# preserves the sparsity structure. -function preserves_zero(f, as...) - # return iszero(f(map(get_notstored, as)...)) - return iszero(apply_notstored(f, as...)) -end - -# Map a subset of indices -function sparse_map_indices!(f, a_dest::AbstractArray, indices, as::AbstractArray...) - for I in indices - a_dest[I] = f(map(a -> a[I], as)...) - end - return a_dest -end - -# Overload for custom `stored_indices` types. -function promote_indices(I1, I2) - return union(I1, I2) -end - -function promote_indices(I1, I2, Is...) - return promote_indices(promote_indices(I1, I2), Is...) -end - -# Base case -promote_indices(I) = I - -function promote_stored_indices(as::AbstractArray...) - return promote_indices(stored_indices.(as)...) -end - -function sparse_map_stored!(f, a_dest::AbstractArray, as::AbstractArray...) - # Need to zero out the destination. - sparse_zero!(a_dest) - Is = promote_stored_indices(as...) - sparse_map_indices!(f, a_dest, Is, as...) - return a_dest -end - -# Handle nonzero case, fill all values. -function sparse_map_all!(f, a_dest::AbstractArray, as::AbstractArray...) - Is = eachindex(a_dest) - sparse_map_indices!(f, a_dest, Is, as...) - return a_dest -end - -function sparse_map!(f, a_dest::AbstractArray, as::AbstractArray...) - return sparse_map!(combine_styles(as...), f, a_dest, as...) -end - -function sparse_map!(::BroadcastStyle, f, a_dest::AbstractArray, as::AbstractArray...) - @assert allequal(axes.((a_dest, as...))) - if preserves_zero(f, as...) - # Remove aliases to avoid overwriting inputs. - as = map(a -> Base.unalias(a_dest, a), as) - sparse_map_stored!(f, a_dest, as...) - else - sparse_map_all!(f, a_dest, as...) - end - return a_dest -end - -# `f::typeof(norm)`, `op::typeof(max)` used by `norm`. -function reduce_init(f, op, a) - return f(zero(eltype(a))) -end - -# TODO: Generalize to multiple arguements. -# TODO: Define `sparse_mapreducedim!`. -function sparse_mapreduce(f, op, a::AbstractArray; init=reduce_init(f, op, a), kwargs...) - output = mapreduce(f, op, sparse_storage(a); init, kwargs...) - f_notstored = apply_notstored(f, a) - @assert isequal(op(output, eltype(output)(f_notstored)), output) - return output -end diff --git a/NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/vectorinterface.jl b/NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/vectorinterface.jl deleted file mode 100644 index 787ec83019..0000000000 --- a/NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/vectorinterface.jl +++ /dev/null @@ -1,28 +0,0 @@ -using VectorInterface: zerovector! - -################################################## -# TODO: Move to `DictionariesVectorInterfaceExt`. -using VectorInterface: VectorInterface, zerovector!, zerovector!! -using Dictionaries: AbstractDictionary - -function VectorInterface.zerovector!(x::AbstractDictionary{<:Number}) - return fill!(x, zero(scalartype(x))) -end -function VectorInterface.zerovector!(x::AbstractDictionary) - T = eltype(x) - for I in eachindex(x) - if isbitstype(T) || isassigned(x, I) - x[I] = zerovector!!(x[I]) - else - x[I] = zero(eltype(x)) - end - end - return x -end -################################################## - -function sparse_zerovector!(a::AbstractArray) - dropall!(a) - zerovector!(sparse_storage(a)) - return a -end diff --git a/NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/wrappers.jl b/NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/wrappers.jl deleted file mode 100644 index a4fce3bb0c..0000000000 --- a/NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/wrappers.jl +++ /dev/null @@ -1,51 +0,0 @@ -using ..NestedPermutedDimsArrays: NestedPermutedDimsArray - -## PermutedDimsArray - -const AnyPermutedDimsArray{T,N,perm,iperm,P} = Union{ - PermutedDimsArray{T,N,perm,iperm,P},NestedPermutedDimsArray{T,N,perm,iperm,P} -} - -# TODO: Use `TypeParameterAccessors`. -perm(::AnyPermutedDimsArray{<:Any,<:Any,Perm}) where {Perm} = Perm -iperm(::AnyPermutedDimsArray{<:Any,<:Any,<:Any,IPerm}) where {IPerm} = IPerm - -# TODO: Use `Base.PermutedDimsArrays.genperm` or -# https://github.com/jipolanco/StaticPermutations.jl? -genperm(v, perm) = map(j -> v[j], perm) -genperm(v::CartesianIndex, perm) = CartesianIndex(map(j -> Tuple(v)[j], perm)) - -function storage_index_to_index(a::AnyPermutedDimsArray, I) - return genperm(storage_index_to_index(parent(a), I), perm(a)) -end - -function index_to_storage_index( - a::AnyPermutedDimsArray{<:Any,N}, I::CartesianIndex{N} -) where {N} - return index_to_storage_index(parent(a), genperm(I, perm(a))) -end - -# TODO: Add `getindex_zero_function` definition? - -## SubArray - -function map_index( - indices::Tuple{Vararg{Any,N}}, cartesian_index::CartesianIndex{N} -) where {N} - index = Tuple(cartesian_index) - new_index = ntuple(length(indices)) do i - findfirst(==(index[i]), indices[i]) - end - any(isnothing, new_index) && return nothing - return CartesianIndex(new_index) -end - -function storage_index_to_index(a::SubArray, I) - return storage_index_to_index(parent(a), I) -end - -function index_to_storage_index(a::SubArray{<:Any,N}, I::CartesianIndex{N}) where {N} - return index_to_storage_index(parent(a), I) -end - -getindex_zero_function(a::SubArray) = getindex_zero_function(parent(a)) diff --git a/NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/zero.jl b/NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/zero.jl deleted file mode 100644 index 72899b4445..0000000000 --- a/NDTensors/src/lib/SparseArraysBase/src/sparsearrayinterface/zero.jl +++ /dev/null @@ -1,5 +0,0 @@ -# Represents a zero value and an index -# TODO: Rename `GetIndexZero`? -struct Zero end -(f::Zero)(a::AbstractArray, I) = f(eltype(a), I) -(::Zero)(type::Type, I) = zero(type) diff --git a/NDTensors/src/lib/SparseArraysBase/test/Project.toml b/NDTensors/src/lib/SparseArraysBase/test/Project.toml deleted file mode 100644 index d674f61e4e..0000000000 --- a/NDTensors/src/lib/SparseArraysBase/test/Project.toml +++ /dev/null @@ -1,7 +0,0 @@ -[deps] -ArrayLayouts = "4c555306-a7a7-4459-81d9-ec55ddd5c99a" -Compat = "34da2185-b29b-5c13-b0c7-acf172513d20" -Dictionaries = "85a47980-9c8c-11e8-2b9f-f7ca1fa99fb4" -NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" -SafeTestsets = "1bc83da4-3b8d-516f-aca4-4fe02f6d838f" -Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" diff --git a/NDTensors/src/lib/SparseArraysBase/test/SparseArraysBaseTestUtils/AbstractSparseArrays.jl b/NDTensors/src/lib/SparseArraysBase/test/SparseArraysBaseTestUtils/AbstractSparseArrays.jl deleted file mode 100644 index 5ae5f5c1ff..0000000000 --- a/NDTensors/src/lib/SparseArraysBase/test/SparseArraysBaseTestUtils/AbstractSparseArrays.jl +++ /dev/null @@ -1,74 +0,0 @@ -module AbstractSparseArrays -using ArrayLayouts: ArrayLayouts, MatMulMatAdd, MemoryLayout, MulAdd -using NDTensors.SparseArraysBase: SparseArraysBase, AbstractSparseArray, Zero - -struct SparseArray{T,N,Zero} <: AbstractSparseArray{T,N} - data::Vector{T} - dims::Tuple{Vararg{Int,N}} - index_to_dataindex::Dict{CartesianIndex{N},Int} - dataindex_to_index::Vector{CartesianIndex{N}} - zero::Zero -end -function SparseArray{T,N}(dims::Tuple{Vararg{Int,N}}; zero=Zero()) where {T,N} - return SparseArray{T,N,typeof(zero)}( - T[], dims, Dict{CartesianIndex{N},Int}(), Vector{CartesianIndex{N}}(), zero - ) -end -function SparseArray{T,N}(dims::Vararg{Int,N}; kwargs...) where {T,N} - return SparseArray{T,N}(dims; kwargs...) -end -function SparseArray{T}(dims::Tuple{Vararg{Int}}; kwargs...) where {T} - return SparseArray{T,length(dims)}(dims; kwargs...) -end -function SparseArray{T}(::UndefInitializer, dims::Tuple{Vararg{Int}}; kwargs...) where {T} - return SparseArray{T}(dims; kwargs...) -end -SparseArray{T}(dims::Vararg{Int}; kwargs...) where {T} = SparseArray{T}(dims; kwargs...) - -# ArrayLayouts interface -struct SparseLayout <: MemoryLayout end -ArrayLayouts.MemoryLayout(::Type{<:SparseArray}) = SparseLayout() -function Base.similar(::MulAdd{<:SparseLayout,<:SparseLayout}, elt::Type, axes) - return similar(SparseArray{elt}, axes) -end -function ArrayLayouts.materialize!( - m::MatMulMatAdd{<:SparseLayout,<:SparseLayout,<:SparseLayout} -) - α, a1, a2, β, a_dest = m.α, m.A, m.B, m.β, m.C - SparseArraysBase.sparse_mul!(a_dest, a1, a2, α, β) - return a_dest -end - -# AbstractArray interface -Base.size(a::SparseArray) = a.dims -function Base.similar(a::SparseArray, elt::Type, dims::Tuple{Vararg{Int}}) - return SparseArray{elt}(dims) -end - -# Minimal interface -SparseArraysBase.getindex_zero_function(a::SparseArray) = a.zero -SparseArraysBase.sparse_storage(a::SparseArray) = a.data -function SparseArraysBase.index_to_storage_index( - a::SparseArray{<:Any,N}, I::CartesianIndex{N} -) where {N} - return get(a.index_to_dataindex, I, nothing) -end -SparseArraysBase.storage_index_to_index(a::SparseArray, I) = a.dataindex_to_index[I] -function SparseArraysBase.setindex_notstored!( - a::SparseArray{<:Any,N}, value, I::CartesianIndex{N} -) where {N} - push!(a.data, value) - push!(a.dataindex_to_index, I) - a.index_to_dataindex[I] = length(a.data) - return a -end - -# Empty the storage, helps with efficiency in `map!` to drop -# zeros. -function SparseArraysBase.dropall!(a::SparseArray) - empty!(a.data) - empty!(a.index_to_dataindex) - empty!(a.dataindex_to_index) - return a -end -end diff --git a/NDTensors/src/lib/SparseArraysBase/test/SparseArraysBaseTestUtils/DiagonalArrays.jl b/NDTensors/src/lib/SparseArraysBase/test/SparseArraysBaseTestUtils/DiagonalArrays.jl deleted file mode 100644 index 394a622694..0000000000 --- a/NDTensors/src/lib/SparseArraysBase/test/SparseArraysBaseTestUtils/DiagonalArrays.jl +++ /dev/null @@ -1,95 +0,0 @@ -module DiagonalArrays -using NDTensors.SparseArraysBase: SparseArraysBase - -struct DiagonalArray{T,N} <: AbstractArray{T,N} - data::Vector{T} - dims::Tuple{Vararg{Int,N}} -end -function DiagonalArray{T,N}(::UndefInitializer, dims::Tuple{Vararg{Int,N}}) where {T,N} - return DiagonalArray{T,N}(Vector{T}(undef, minimum(dims)), dims) -end -function DiagonalArray{T,N}(::UndefInitializer, dims::Vararg{Int,N}) where {T,N} - return DiagonalArray{T,N}(undef, dims) -end -function DiagonalArray{T}(::UndefInitializer, dims::Tuple{Vararg{Int}}) where {T} - return DiagonalArray{T,length(dims)}(undef, dims) -end -function DiagonalArray{T}(::UndefInitializer, dims::Vararg{Int}) where {T} - return DiagonalArray{T}(undef, dims) -end - -# AbstractArray interface -Base.size(a::DiagonalArray) = a.dims -function Base.getindex(a::DiagonalArray, I...) - return SparseArraysBase.sparse_getindex(a, I...) -end -function Base.setindex!(a::DiagonalArray, I...) - return SparseArraysBase.sparse_setindex!(a, I...) -end -function Base.similar(a::DiagonalArray, elt::Type, dims::Tuple{Vararg{Int}}) - return DiagonalArray{elt}(undef, dims) -end - -# Minimal interface -SparseArraysBase.sparse_storage(a::DiagonalArray) = a.data -function SparseArraysBase.index_to_storage_index( - a::DiagonalArray{<:Any,N}, I::CartesianIndex{N} -) where {N} - !allequal(Tuple(I)) && return nothing - return first(Tuple(I)) -end -function SparseArraysBase.storage_index_to_index(a::DiagonalArray, I) - return CartesianIndex(ntuple(Returns(I), ndims(a))) -end -function SparseArraysBase.sparse_similar( - a::DiagonalArray, elt::Type, dims::Tuple{Vararg{Int}} -) - return Array{elt}(undef, dims) -end -function SparseArraysBase.sparse_similar(a::DiagonalArray, elt::Type, dims::Tuple{Int}) - return similar(a, elt, dims) -end - -# Broadcasting -function Broadcast.BroadcastStyle(arraytype::Type{<:DiagonalArray}) - return SparseArraysBase.SparseArrayStyle{ndims(arraytype)}() -end - -# Base -function Base.iszero(a::DiagonalArray) - return SparseArraysBase.sparse_iszero(a) -end -function Base.isreal(a::DiagonalArray) - return SparseArraysBase.sparse_isreal(a) -end -function Base.zero(a::DiagonalArray) - return SparseArraysBase.sparse_zero(a) -end -function Base.one(a::DiagonalArray) - return SparseArraysBase.sparse_one(a) -end -function Base.:(==)(a1::DiagonalArray, a2::DiagonalArray) - return SparseArraysBase.sparse_isequal(a1, a2) -end -function Base.reshape(a::DiagonalArray, dims::Tuple{Vararg{Int}}) - return SparseArraysBase.sparse_reshape(a, dims) -end - -# Map -function Base.map!(f, dest::AbstractArray, src::DiagonalArray) - SparseArraysBase.sparse_map!(f, dest, src) - return dest -end -function Base.copy!(dest::AbstractArray, src::DiagonalArray) - SparseArraysBase.sparse_copy!(dest, src) - return dest -end -function Base.copyto!(dest::AbstractArray, src::DiagonalArray) - SparseArraysBase.sparse_copyto!(dest, src) - return dest -end -function Base.permutedims!(dest::AbstractArray, src::DiagonalArray, perm) - SparseArraysBase.sparse_permutedims!(dest, src, perm) - return dest -end -end diff --git a/NDTensors/src/lib/SparseArraysBase/test/SparseArraysBaseTestUtils/Project.toml b/NDTensors/src/lib/SparseArraysBase/test/SparseArraysBaseTestUtils/Project.toml deleted file mode 100644 index 9b1d5ccd25..0000000000 --- a/NDTensors/src/lib/SparseArraysBase/test/SparseArraysBaseTestUtils/Project.toml +++ /dev/null @@ -1,2 +0,0 @@ -[deps] -NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" diff --git a/NDTensors/src/lib/SparseArraysBase/test/SparseArraysBaseTestUtils/SparseArrays.jl b/NDTensors/src/lib/SparseArraysBase/test/SparseArraysBaseTestUtils/SparseArrays.jl deleted file mode 100644 index 082adb173d..0000000000 --- a/NDTensors/src/lib/SparseArraysBase/test/SparseArraysBaseTestUtils/SparseArrays.jl +++ /dev/null @@ -1,166 +0,0 @@ -module SparseArrays -using LinearAlgebra: LinearAlgebra -using NDTensors.SparseArraysBase: SparseArraysBase, Zero - -struct SparseArray{T,N,Zero} <: AbstractArray{T,N} - data::Vector{T} - dims::Tuple{Vararg{Int,N}} - index_to_dataindex::Dict{CartesianIndex{N},Int} - dataindex_to_index::Vector{CartesianIndex{N}} - zero::Zero -end -function SparseArray{T,N}(dims::Tuple{Vararg{Int,N}}; zero=Zero()) where {T,N} - return SparseArray{T,N,typeof(zero)}( - T[], dims, Dict{CartesianIndex{N},Int}(), Vector{CartesianIndex{N}}(), zero - ) -end -function SparseArray{T,N}(dims::Vararg{Int,N}; kwargs...) where {T,N} - return SparseArray{T,N}(dims; kwargs...) -end -function SparseArray{T}(dims::Tuple{Vararg{Int}}; kwargs...) where {T} - return SparseArray{T,length(dims)}(dims; kwargs...) -end -function SparseArray{T}(::UndefInitializer, dims::Tuple{Vararg{Int}}; kwargs...) where {T} - return SparseArray{T}(dims; kwargs...) -end -SparseArray{T}(dims::Vararg{Int}; kwargs...) where {T} = SparseArray{T}(dims; kwargs...) - -# LinearAlgebra interface -function LinearAlgebra.mul!( - a_dest::AbstractMatrix, - a1::SparseArray{<:Any,2}, - a2::SparseArray{<:Any,2}, - α::Number, - β::Number, -) - SparseArraysBase.sparse_mul!(a_dest, a1, a2, α, β) - return a_dest -end - -function LinearAlgebra.dot(a1::SparseArray, a2::SparseArray) - return SparseArraysBase.sparse_dot(a1, a2) -end - -# AbstractArray interface -Base.size(a::SparseArray) = a.dims -function Base.similar(a::SparseArray, elt::Type, dims::Tuple{Vararg{Int}}) - return SparseArray{elt}(dims) -end - -function Base.getindex(a::SparseArray, I...) - return SparseArraysBase.sparse_getindex(a, I...) -end -function Base.setindex!(a::SparseArray, value, I...) - return SparseArraysBase.sparse_setindex!(a, value, I...) -end -function Base.fill!(a::SparseArray, value) - return SparseArraysBase.sparse_fill!(a, value) -end - -# Minimal interface -SparseArraysBase.getindex_zero_function(a::SparseArray) = a.zero -SparseArraysBase.sparse_storage(a::SparseArray) = a.data -function SparseArraysBase.index_to_storage_index( - a::SparseArray{<:Any,N}, I::CartesianIndex{N} -) where {N} - return get(a.index_to_dataindex, I, nothing) -end -SparseArraysBase.storage_index_to_index(a::SparseArray, I) = a.dataindex_to_index[I] -function SparseArraysBase.setindex_notstored!( - a::SparseArray{<:Any,N}, value, I::CartesianIndex{N} -) where {N} - push!(a.data, value) - push!(a.dataindex_to_index, I) - a.index_to_dataindex[I] = length(a.data) - return a -end - -# TODO: Make this into a generic definition of all `AbstractArray`? -using NDTensors.SparseArraysBase: perm, stored_indices -function SparseArraysBase.stored_indices( - a::PermutedDimsArray{<:Any,<:Any,<:Any,<:Any,<:SparseArray} -) - return Iterators.map( - I -> CartesianIndex(map(i -> I[i], perm(a))), stored_indices(parent(a)) - ) -end - -# TODO: Make this into a generic definition of all `AbstractArray`? -using NDTensors.SparseArraysBase: sparse_storage -function SparseArraysBase.sparse_storage( - a::PermutedDimsArray{<:Any,<:Any,<:Any,<:Any,<:SparseArray} -) - return sparse_storage(parent(a)) -end - -# TODO: Make this into a generic definition of all `AbstractArray`? -using NDTensors.NestedPermutedDimsArrays: NestedPermutedDimsArray -function SparseArraysBase.stored_indices( - a::NestedPermutedDimsArray{<:Any,<:Any,<:Any,<:Any,<:SparseArray} -) - return Iterators.map( - I -> CartesianIndex(map(i -> I[i], perm(a))), stored_indices(parent(a)) - ) -end - -# TODO: Make this into a generic definition of all `AbstractArray`? -using NDTensors.NestedPermutedDimsArrays: NestedPermutedDimsArray -using NDTensors.SparseArraysBase: sparse_storage -function SparseArraysBase.sparse_storage( - a::NestedPermutedDimsArray{<:Any,<:Any,<:Any,<:Any,<:SparseArray} -) - return sparse_storage(parent(a)) -end - -# Empty the storage, helps with efficiency in `map!` to drop -# zeros. -function SparseArraysBase.dropall!(a::SparseArray) - empty!(a.data) - empty!(a.index_to_dataindex) - empty!(a.dataindex_to_index) - return a -end - -# Broadcasting -function Broadcast.BroadcastStyle(arraytype::Type{<:SparseArray}) - return SparseArraysBase.SparseArrayStyle{ndims(arraytype)}() -end - -# Map -function Base.map!(f, dest::AbstractArray, src::SparseArray) - SparseArraysBase.sparse_map!(f, dest, src) - return dest -end -function Base.copy!(dest::AbstractArray, src::SparseArray) - SparseArraysBase.sparse_copy!(dest, src) - return dest -end -function Base.copyto!(dest::AbstractArray, src::SparseArray) - SparseArraysBase.sparse_copyto!(dest, src) - return dest -end -function Base.permutedims!(dest::AbstractArray, src::SparseArray, perm) - SparseArraysBase.sparse_permutedims!(dest, src, perm) - return dest -end - -# Base -function Base.:(==)(a1::SparseArray, a2::SparseArray) - return SparseArraysBase.sparse_isequal(a1, a2) -end -function Base.reshape(a::SparseArray, dims::Tuple{Vararg{Int}}) - return SparseArraysBase.sparse_reshape(a, dims) -end -function Base.iszero(a::SparseArray) - return SparseArraysBase.sparse_iszero(a) -end -function Base.isreal(a::SparseArray) - return SparseArraysBase.sparse_isreal(a) -end -function Base.zero(a::SparseArray) - return SparseArraysBase.sparse_zero(a) -end -function Base.one(a::SparseArray) - return SparseArraysBase.sparse_one(a) -end -end diff --git a/NDTensors/src/lib/SparseArraysBase/test/SparseArraysBaseTestUtils/SparseArraysBaseTestUtils.jl b/NDTensors/src/lib/SparseArraysBase/test/SparseArraysBaseTestUtils/SparseArraysBaseTestUtils.jl deleted file mode 100644 index 9067f9df96..0000000000 --- a/NDTensors/src/lib/SparseArraysBase/test/SparseArraysBaseTestUtils/SparseArraysBaseTestUtils.jl +++ /dev/null @@ -1,5 +0,0 @@ -module SparseArraysBaseTestUtils -include("AbstractSparseArrays.jl") -include("DiagonalArrays.jl") -include("SparseArrays.jl") -end diff --git a/NDTensors/src/lib/SparseArraysBase/test/runtests.jl b/NDTensors/src/lib/SparseArraysBase/test/runtests.jl deleted file mode 100644 index 40340f5eaa..0000000000 --- a/NDTensors/src/lib/SparseArraysBase/test/runtests.jl +++ /dev/null @@ -1,5 +0,0 @@ -@eval module $(gensym()) -for filename in ["sparsearraydok", "abstractsparsearray", "array", "diagonalarray"] - include("test_$filename.jl") -end -end diff --git a/NDTensors/src/lib/SparseArraysBase/test/test_abstractsparsearray.jl b/NDTensors/src/lib/SparseArraysBase/test/test_abstractsparsearray.jl deleted file mode 100644 index b64955cb55..0000000000 --- a/NDTensors/src/lib/SparseArraysBase/test/test_abstractsparsearray.jl +++ /dev/null @@ -1,439 +0,0 @@ -@eval module $(gensym()) -using LinearAlgebra: dot, mul!, norm -using NDTensors.SparseArraysBase: SparseArraysBase -using NDTensors.NestedPermutedDimsArrays: NestedPermutedDimsArray -include("SparseArraysBaseTestUtils/SparseArraysBaseTestUtils.jl") -using .SparseArraysBaseTestUtils.AbstractSparseArrays: AbstractSparseArrays -using .SparseArraysBaseTestUtils.SparseArrays: SparseArrays -using Test: @test, @testset -@testset "AbstractSparseArray (arraytype=$SparseArray, eltype=$elt)" for SparseArray in ( - AbstractSparseArrays.SparseArray, SparseArrays.SparseArray - ), - elt in (Float32, ComplexF32, Float64, ComplexF64) - - a = SparseArray{elt}(2, 3) - @test size(a) == (2, 3) - @test axes(a) == (1:2, 1:3) - @test SparseArraysBase.sparse_storage(a) == elt[] - @test iszero(SparseArraysBase.stored_length(a)) - @test collect(SparseArraysBase.stored_indices(a)) == CartesianIndex{2}[] - @test iszero(a) - @test iszero(norm(a)) - for I in eachindex(a) - @test iszero(a) - end - for I in CartesianIndices(a) - @test isassigned(a, Tuple(I)...) - @test isassigned(a, I) - end - @test !isassigned(a, 0, 1) - @test !isassigned(a, CartesianIndex(0, 1)) - @test !isassigned(a, 1, 4) - @test !isassigned(a, CartesianIndex(1, 4)) - - a = SparseArray{elt}(2, 3) - fill!(a, 0) - @test size(a) == (2, 3) - @test iszero(a) - @test iszero(SparseArraysBase.stored_length(a)) - - a_dense = SparseArraysBase.densearray(a) - @test a_dense == a - @test a_dense isa Array{elt,ndims(a)} - - a = SparseArray{elt}(2, 3) - fill!(a, 2) - @test size(a) == (2, 3) - @test !iszero(a) - @test SparseArraysBase.stored_length(a) == length(a) - for I in eachindex(a) - @test a[I] == 2 - end - - a = SparseArray{elt}(2, 3) - a[1, 2] = 12 - @test a[1, 2] == 12 - @test a[3] == 12 # linear indexing - @test size(a) == (2, 3) - @test axes(a) == (1:2, 1:3) - @test a[SparseArraysBase.StorageIndex(1)] == 12 - @test SparseArraysBase.sparse_storage(a) == elt[12] - @test isone(SparseArraysBase.stored_length(a)) - @test collect(SparseArraysBase.stored_indices(a)) == [CartesianIndex(1, 2)] - @test !iszero(a) - @test !iszero(norm(a)) - for I in eachindex(a) - if I == CartesianIndex(1, 2) - @test a[I] == 12 - else - @test iszero(a[I]) - end - end - for I in CartesianIndices(a) - @test isassigned(a, Tuple(I)...) - @test isassigned(a, I) - end - @test !isassigned(a, 0, 1) - @test !isassigned(a, CartesianIndex(0, 1)) - @test !isassigned(a, 1, 4) - @test !isassigned(a, CartesianIndex(1, 4)) - - a = SparseArray{elt}(2, 3) - a[1, 2] = 12 - a = map(x -> 2x, a) - for I in eachindex(a) - if I == CartesianIndex(1, 2) - @test a[I] == 2 * 12 - else - @test iszero(a[I]) - end - end - - a = SparseArray{elt}(2, 2, 2) - a[1, 2, 2] = 122 - a_r = reshape(a, 2, 4) - @test a_r[1, 4] == a[1, 2, 2] == 122 - - a = SparseArray{elt}(2, 3) - a[1, 2] = 12 - a = zero(a) - @test size(a) == (2, 3) - @test iszero(SparseArraysBase.stored_length(a)) - - a = SparseArray{elt}(2, 3) - a[1, 2] = 12 - b = SparseArray{elt}(2, 3) - b[2, 1] = 21 - @test a == a - @test a ≠ b - - a = SparseArray{elt}(2, 3) - a[1, 2] = 12 - @test isreal(a) - - a = SparseArray{elt}(2, 3) - a[1, 2] = randn(elt) - b = copy(a) - conj!(b) - for I in eachindex(a) - @test conj(a[I]) == b[I] - end - - a = SparseArray{elt}(2, 3) - a[1, 2] = randn(elt) - b = conj(a) - for I in eachindex(a) - @test conj(a[I]) == b[I] - end - - if !(elt <: Real) - a = SparseArray{elt}(2, 3) - a[1, 2] = 12 + 12im - @test !isreal(a) - end - - a = SparseArray{elt}(2, 2) - a[1, 2] = 12 - a = one(a) - @test size(a) == (2, 2) - @test isone(a[1, 1]) - @test isone(a[2, 2]) - @test iszero(a[1, 2]) - @test iszero(a[2, 1]) - - a = SparseArray{elt}(2, 3) - a[1, 2] = 12 - a = zero(a) - @test size(a) == (2, 3) - @test iszero(SparseArraysBase.stored_length(a)) - - a = SparseArray{elt}(2, 3) - a[1, 2] = 12 - a = copy(a) - @test size(a) == (2, 3) - @test axes(a) == (1:2, 1:3) - @test SparseArraysBase.sparse_storage(a) == elt[12] - @test isone(SparseArraysBase.stored_length(a)) - @test SparseArraysBase.storage_indices(a) == 1:1 - @test collect(SparseArraysBase.stored_indices(a)) == [CartesianIndex(1, 2)] - @test !iszero(a) - @test !iszero(norm(a)) - for I in eachindex(a) - if I == CartesianIndex(1, 2) - @test a[I] == 12 - else - @test iszero(a[I]) - end - end - - a = SparseArray{elt}(2, 3) - a[1, 2] = 12 - a = 2 * a - @test size(a) == (2, 3) - @test axes(a) == (1:2, 1:3) - @test SparseArraysBase.sparse_storage(a) == elt[24] - @test isone(SparseArraysBase.stored_length(a)) - @test collect(SparseArraysBase.stored_indices(a)) == [CartesianIndex(1, 2)] - @test !iszero(a) - @test !iszero(norm(a)) - for I in eachindex(a) - if I == CartesianIndex(1, 2) - @test a[I] == 24 - else - @test iszero(a[I]) - end - end - - a = SparseArray{elt}(2, 3) - a[1, 2] = 12 - b = SparseArray{elt}(2, 3) - b[2, 1] = 21 - c = a + b - @test size(c) == (2, 3) - @test axes(c) == (1:2, 1:3) - @test SparseArraysBase.sparse_storage(c) == elt[12, 21] - @test SparseArraysBase.stored_length(c) == 2 - @test collect(SparseArraysBase.stored_indices(c)) == - [CartesianIndex(1, 2), CartesianIndex(2, 1)] - @test !iszero(c) - @test !iszero(norm(c)) - for I in eachindex(c) - if I == CartesianIndex(1, 2) - @test c[I] == 12 - elseif I == CartesianIndex(2, 1) - @test c[I] == 21 - else - @test iszero(c[I]) - end - end - - a = SparseArray{elt}(2, 3) - a[1, 2] = 12 - b = permutedims(a, (2, 1)) - @test size(b) == (3, 2) - @test axes(b) == (1:3, 1:2) - @test SparseArraysBase.sparse_storage(b) == elt[12] - @test SparseArraysBase.stored_length(b) == 1 - @test collect(SparseArraysBase.stored_indices(b)) == [CartesianIndex(2, 1)] - @test !iszero(b) - @test !iszero(norm(b)) - for I in eachindex(b) - if I == CartesianIndex(2, 1) - @test b[I] == 12 - else - @test iszero(b[I]) - end - end - - a = SparseArray{elt}(2, 3) - a[1, 2] = 12 - b = PermutedDimsArray(a, (2, 1)) - @test size(b) == (3, 2) - @test axes(b) == (1:3, 1:2) - @test SparseArraysBase.sparse_storage(b) == elt[12] - @test SparseArraysBase.stored_length(b) == 1 - @test collect(SparseArraysBase.stored_indices(b)) == [CartesianIndex(2, 1)] - @test !iszero(b) - @test !iszero(norm(b)) - for I in eachindex(b) - if I == CartesianIndex(2, 1) - @test b[I] == 12 - else - @test iszero(b[I]) - end - end - - a = SparseArray{Matrix{elt}}( - 2, 3; zero=(a, I) -> (z = similar(eltype(a), 2, 3); fill!(z, false); z) - ) - a[1, 2] = randn(elt, 2, 3) - b = NestedPermutedDimsArray(a, (2, 1)) - @test size(b) == (3, 2) - @test axes(b) == (1:3, 1:2) - @test SparseArraysBase.sparse_storage(b) == [a[1, 2]] - @test SparseArraysBase.stored_length(b) == 1 - @test collect(SparseArraysBase.stored_indices(b)) == [CartesianIndex(2, 1)] - @test !iszero(b) - @test !iszero(norm(b)) - for I in eachindex(b) - if I == CartesianIndex(2, 1) - @test b[I] == permutedims(a[1, 2], (2, 1)) - else - @test iszero(b[I]) - end - end - - a = SparseArray{elt}(2, 3) - a[1, 2] = 12 - b = randn(elt, 2, 3) - b .= a - @test a == b - for I in eachindex(a) - @test a[I] == b[I] - end - - a = SparseArray{elt}(2, 3) - a[1, 2] = 12 - b = randn(elt, 2, 3) - b .= 2 .* a - @test 2 * a == b - for I in eachindex(a) - @test 2 * a[I] == b[I] - end - - a = SparseArray{elt}(2, 3) - a[1, 2] = 12 - b = randn(elt, 2, 3) - b .= 2 .+ a - @test 2 .+ a == b - for I in eachindex(a) - @test 2 + a[I] == b[I] - end - - a = SparseArray{elt}(2, 3) - a[1, 2] = 12 - b = randn(elt, 2, 3) - map!(x -> 2x, b, a) - @test 2 * a == b - for I in eachindex(a) - @test 2 * a[I] == b[I] - end - - a = SparseArray{elt}(2, 3) - a[1, 2] = 12 - b = zeros(elt, 2, 3) - b[2, 1] = 21 - @test Array(a) == a - @test a + b == Array(a) + b - @test b + a == Array(a) + b - @test b .+ 2 .* a == 2 * Array(a) + b - @test a .+ 2 .* b == Array(a) + 2b - @test a + b isa Matrix{elt} - @test b + a isa Matrix{elt} - @test SparseArraysBase.stored_length(a + b) == length(a) - - a = SparseArray{elt}(2, 3) - a[1, 2] = 12 - b = zeros(elt, 2, 3) - b[2, 1] = 21 - a′ = copy(a) - a′ .+= b - @test a′ == a + b - # TODO: Should this be: - # ```julia - # @test SparseArraysBase.stored_length(a′) == 2 - # ``` - # ? I.e. should it only store the nonzero values? - @test SparseArraysBase.stored_length(a′) == 6 - - # Matrix multiplication - a1 = SparseArray{elt}(2, 3) - a1[1, 2] = 12 - a1[2, 1] = 21 - a2 = SparseArray{elt}(3, 4) - a2[1, 1] = 11 - a2[2, 2] = 22 - a_dest = a1 * a2 - @test Array(a_dest) ≈ Array(a1) * Array(a2) - @test a_dest isa SparseArray{elt} - @test SparseArraysBase.stored_length(a_dest) == 2 - - # Dot product - a1 = SparseArray{elt}(4) - a1[1] = randn() - a1[3] = randn() - a2 = SparseArray{elt}(4) - a2[2] = randn() - a2[3] = randn() - a_dest = a1' * a2 - @test a_dest isa elt - @test a_dest ≈ Array(a1)' * Array(a2) - @test a_dest ≈ dot(a1, a2) - - # In-place matrix multiplication - a1 = SparseArray{elt}(2, 3) - a1[1, 2] = 12 - a1[2, 1] = 21 - a2 = SparseArray{elt}(3, 4) - a2[1, 1] = 11 - a2[2, 2] = 22 - a_dest = SparseArray{elt}(2, 4) - mul!(a_dest, a1, a2) - @test Array(a_dest) ≈ Array(a1) * Array(a2) - @test a_dest isa SparseArray{elt} - @test SparseArraysBase.stored_length(a_dest) == 2 - - # In-place matrix multiplication - a1 = SparseArray{elt}(2, 3) - a1[1, 2] = 12 - a1[2, 1] = 21 - a2 = SparseArray{elt}(3, 4) - a2[1, 1] = 11 - a2[2, 2] = 22 - a_dest = SparseArray{elt}(2, 4) - a_dest[1, 2] = 12 - a_dest[2, 1] = 21 - α = elt(2) - β = elt(3) - a_dest′ = copy(a_dest) - mul!(a_dest, a1, a2, α, β) - @test Array(a_dest) ≈ Array(a1) * Array(a2) * α + Array(a_dest′) * β - @test a_dest isa SparseArray{elt} - @test SparseArraysBase.stored_length(a_dest) == 2 - - # cat - a1 = SparseArray{elt}(2, 3) - a1[1, 2] = 12 - a1[2, 1] = 21 - a2 = SparseArray{elt}(2, 3) - a2[1, 1] = 11 - a2[2, 2] = 22 - - a_dest = cat(a1, a2; dims=1) - @test size(a_dest) == (4, 3) - @test SparseArraysBase.stored_length(a_dest) == 4 - @test a_dest[1, 2] == a1[1, 2] - @test a_dest[2, 1] == a1[2, 1] - @test a_dest[3, 1] == a2[1, 1] - @test a_dest[4, 2] == a2[2, 2] - - a_dest = cat(a1, a2; dims=2) - @test size(a_dest) == (2, 6) - @test SparseArraysBase.stored_length(a_dest) == 4 - @test a_dest[1, 2] == a1[1, 2] - @test a_dest[2, 1] == a1[2, 1] - @test a_dest[1, 4] == a2[1, 1] - @test a_dest[2, 5] == a2[2, 2] - - a_dest = cat(a1, a2; dims=(1, 2)) - @test size(a_dest) == (4, 6) - @test SparseArraysBase.stored_length(a_dest) == 4 - @test a_dest[1, 2] == a1[1, 2] - @test a_dest[2, 1] == a1[2, 1] - @test a_dest[3, 4] == a2[1, 1] - @test a_dest[4, 5] == a2[2, 2] - - ## # Sparse matrix of matrix multiplication - ## TODO: Make this work, seems to require - ## a custom zero constructor. - ## a1 = SparseArray{Matrix{elt}}(2, 3) - ## a1[1, 1] = zeros(elt, (2, 3)) - ## a1[1, 2] = randn(elt, (2, 3)) - ## a1[2, 1] = randn(elt, (2, 3)) - ## a1[2, 2] = zeros(elt, (2, 3)) - ## a2 = SparseArray{Matrix{elt}}(3, 4) - ## a2[1, 1] = randn(elt, (3, 4)) - ## a2[1, 2] = zeros(elt, (3, 4)) - ## a2[2, 2] = randn(elt, (3, 4)) - ## a2[2, 2] = zeros(elt, (3, 4)) - ## a_dest = SparseArray{Matrix{elt}}(2, 4) - ## a_dest[1, 1] = zeros(elt, (3, 4)) - ## a_dest[1, 2] = zeros(elt, (3, 4)) - ## a_dest[2, 2] = zeros(elt, (3, 4)) - ## a_dest[2, 2] = zeros(elt, (3, 4)) - ## mul!(a_dest, a1, a2) - ## @test Array(a_dest) ≈ Array(a1) * Array(a2) - ## @test a_dest isa SparseArray{Matrix{elt}} - ## @test SparseArraysBase.stored_length(a_dest) == 2 -end -end diff --git a/NDTensors/src/lib/SparseArraysBase/test/test_array.jl b/NDTensors/src/lib/SparseArraysBase/test/test_array.jl deleted file mode 100644 index 0037412edf..0000000000 --- a/NDTensors/src/lib/SparseArraysBase/test/test_array.jl +++ /dev/null @@ -1,13 +0,0 @@ -@eval module $(gensym()) -using NDTensors.SparseArraysBase: SparseArraysBase -include("SparseArraysBaseTestUtils/SparseArraysBaseTestUtils.jl") -using Test: @test, @testset -@testset "Array (eltype=$elt)" for elt in (Float32, ComplexF32, Float64, ComplexF64) - a = randn(2, 3) - @test SparseArraysBase.sparse_storage(a) == a - @test SparseArraysBase.index_to_storage_index(a, CartesianIndex(1, 2)) == - CartesianIndex(1, 2) - @test SparseArraysBase.storage_index_to_index(a, CartesianIndex(1, 2)) == - CartesianIndex(1, 2) -end -end diff --git a/NDTensors/src/lib/SparseArraysBase/test/test_diagonalarray.jl b/NDTensors/src/lib/SparseArraysBase/test/test_diagonalarray.jl deleted file mode 100644 index 18075e3d35..0000000000 --- a/NDTensors/src/lib/SparseArraysBase/test/test_diagonalarray.jl +++ /dev/null @@ -1,76 +0,0 @@ -@eval module $(gensym()) -using LinearAlgebra: norm -using NDTensors.SparseArraysBase: SparseArraysBase -include("SparseArraysBaseTestUtils/SparseArraysBaseTestUtils.jl") -using .SparseArraysBaseTestUtils.DiagonalArrays: DiagonalArray -using Test: @test, @testset, @test_throws -@testset "DiagonalArray (eltype=$elt)" for elt in (Float32, ComplexF32, Float64, ComplexF64) - # TODO: Test `fill!`. - - # Test - a = DiagonalArray{elt}(undef, 2, 3) - @test size(a) == (2, 3) - a[1, 1] = 11 - a[2, 2] = 22 - @test a[1, 1] == 11 - @test a[2, 2] == 22 - @test_throws ArgumentError a[1, 2] = 12 - @test SparseArraysBase.storage_indices(a) == 1:2 - @test collect(SparseArraysBase.stored_indices(a)) == - [CartesianIndex(1, 1), CartesianIndex(2, 2)] - a[1, 2] = 0 - @test a[1, 1] == 11 - @test a[2, 2] == 22 - - a_dense = SparseArraysBase.densearray(a) - @test a_dense == a - @test a_dense isa Array{elt,ndims(a)} - - b = similar(a) - @test b isa DiagonalArray - @test size(b) == (2, 3) - - a = DiagonalArray(elt[1, 2, 3], (3, 3)) - @test size(a) == (3, 3) - @test a[1, 1] == 1 - @test a[2, 2] == 2 - @test a[3, 3] == 3 - @test a[SparseArraysBase.StorageIndex(1)] == 1 - @test a[SparseArraysBase.StorageIndex(2)] == 2 - @test a[SparseArraysBase.StorageIndex(3)] == 3 - @test iszero(a[1, 2]) - - a = DiagonalArray(elt[1, 2, 3], (3, 3)) - a = 2 * a - @test size(a) == (3, 3) - @test a[1, 1] == 2 - @test a[2, 2] == 4 - @test a[3, 3] == 6 - @test iszero(a[1, 2]) - - a = DiagonalArray(elt[1, 2, 3], (3, 3)) - a_r = reshape(a, 9) - @test a_r isa DiagonalArray{elt,1} - for I in LinearIndices(a) - @test a[I] == a_r[I] - end - - # This needs `Base.reshape` with a custom destination - # calling `SparseArraysBase.sparse_reshape!` - # in order to specify an appropriate output - # type to work. - a = DiagonalArray(elt[1, 2], (2, 2, 2)) - a_r = reshape(a, 2, 4) - @test a_r isa Matrix{elt} - for I in LinearIndices(a) - @test a[I] == a_r[I] - end - - # Matrix multiplication! - a1 = DiagonalArray(elt[1, 2], (2, 2)) - a2 = DiagonalArray(elt[2, 3], (2, 2)) - a_dest = a1 * a2 - @test Array(a_dest) ≈ Array(a1) * Array(a2) - @test a_dest isa DiagonalArray{elt} -end -end diff --git a/NDTensors/src/lib/SparseArraysBase/test/test_sparsearraydok.jl b/NDTensors/src/lib/SparseArraysBase/test/test_sparsearraydok.jl deleted file mode 100644 index 92afb068bc..0000000000 --- a/NDTensors/src/lib/SparseArraysBase/test/test_sparsearraydok.jl +++ /dev/null @@ -1,139 +0,0 @@ -@eval module $(gensym()) - -# TODO: Test: -# zero (PermutedDimsArray) -# Custom zero type -# Slicing - -using Dictionaries: Dictionary -using Test: @test, @testset, @test_broken -using NDTensors.SparseArraysBase: - SparseArraysBase, SparseArrayDOK, SparseMatrixDOK, @maybe_grow -using NDTensors.SparseArraysBase: storage_indices, stored_length -using SparseArrays: SparseMatrixCSC, nnz -@testset "SparseArrayDOK (eltype=$elt)" for elt in - (Float32, ComplexF32, Float64, ComplexF64) - @testset "Basics" begin - a = SparseArrayDOK{elt}(3, 4) - @test a == SparseArrayDOK{elt}((3, 4)) - @test a == SparseArrayDOK{elt}(undef, 3, 4) - @test a == SparseArrayDOK{elt}(undef, (3, 4)) - @test iszero(a) - @test iszero(nnz(a)) - @test stored_length(a) == nnz(a) - @test size(a) == (3, 4) - @test eltype(a) == elt - for I in eachindex(a) - @test iszero(a[I]) - @test a[I] isa elt - end - @test isempty(storage_indices(a)) - - x12 = randn(elt) - x23 = randn(elt) - b = copy(a) - @test b isa SparseArrayDOK{elt} - @test iszero(b) - b[1, 2] = x12 - b[2, 3] = x23 - @test iszero(a) - @test !iszero(b) - @test b[1, 2] == x12 - @test b[2, 3] == x23 - @test iszero(stored_length(a)) - @test stored_length(b) == 2 - end - @testset "map/broadcast" begin - a = SparseArrayDOK{elt}(3, 4) - a[1, 1] = 11 - a[3, 4] = 34 - @test stored_length(a) == 2 - b = 2 * a - @test stored_length(b) == 2 - @test b[1, 1] == 2 * 11 - @test b[3, 4] == 2 * 34 - end - @testset "reshape" begin - a = SparseArrayDOK{elt}(2, 2, 2) - a[1, 2, 2] = 122 - b = reshape(a, 2, 4) - @test b[1, 4] == 122 - end - @testset "Matrix multiplication" begin - a1 = SparseArrayDOK{elt}(2, 3) - a1[1, 2] = 12 - a1[2, 1] = 21 - a2 = SparseArrayDOK{elt}(3, 4) - a2[1, 1] = 11 - a2[2, 2] = 22 - a2[3, 3] = 33 - a_dest = a1 * a2 - # TODO: Use `densearray` to make generic to GPU. - @test Array(a_dest) ≈ Array(a1) * Array(a2) - # TODO: Make this work with `ArrayLayouts`. - @test stored_length(a_dest) == 2 - @test a_dest isa SparseMatrixDOK{elt} - - a2 = randn(elt, (3, 4)) - a_dest = a1 * a2 - # TODO: Use `densearray` to make generic to GPU. - @test Array(a_dest) ≈ Array(a1) * Array(a2) - @test stored_length(a_dest) == 8 - @test a_dest isa Matrix{elt} - end - @testset "SparseMatrixCSC" begin - a = SparseArrayDOK{elt}(2, 2) - a[1, 2] = 12 - for (type, a′) in ((SparseMatrixCSC, a), (SparseArrayDOK, SparseMatrixCSC(a))) - b = type(a′) - @test b isa type{elt} - @test b[1, 2] == 12 - @test isone(nnz(b)) - for I in eachindex(b) - if I ≠ CartesianIndex(1, 2) - @test iszero(b[I]) - end - end - end - end - @testset "Maybe Grow Feature" begin - a = SparseArrayDOK{elt,2}((0, 0)) - SparseArraysBase.setindex_maybe_grow!(a, 230, 2, 3) - @test size(a) == (2, 3) - @test a[2, 3] == 230 - # Test @maybe_grow macro - @maybe_grow a[5, 5] = 550 - @test size(a) == (5, 5) - @test a[2, 3] == 230 - @test a[5, 5] == 550 - # Test that size remains same - # if we set at an index smaller than - # the maximum size: - @maybe_grow a[3, 4] = 340 - @test size(a) == (5, 5) - @test a[2, 3] == 230 - @test a[5, 5] == 550 - @test a[3, 4] == 340 - # Test vector case - v = SparseArrayDOK{elt,1}((0,)) - @maybe_grow v[5] = 50 - @test size(v) == (5,) - @test v[5] == 50 - # Test setting from a variable (to test macro escaping) - i = 6 - val = 60 - @maybe_grow v[i] = val - @test v[i] == val - i, j = 1, 2 - val = 120 - @maybe_grow a[i, j] = val - @test a[i, j] == val - end - @testset "Test Lower Level Constructor" begin - d = Dictionary{CartesianIndex{2},elt}() - a = SparseArrayDOK(d, (2, 2), zero(elt)) - a[1, 2] = 12.0 - @test a[1, 2] == 12.0 - end -end -end diff --git a/NDTensors/src/lib/SymmetrySectors/.JuliaFormatter.toml b/NDTensors/src/lib/SymmetrySectors/.JuliaFormatter.toml deleted file mode 100644 index 08f664cdb9..0000000000 --- a/NDTensors/src/lib/SymmetrySectors/.JuliaFormatter.toml +++ /dev/null @@ -1,2 +0,0 @@ -style = "blue" -indent = 2 diff --git a/NDTensors/src/lib/SymmetrySectors/Project.toml b/NDTensors/src/lib/SymmetrySectors/Project.toml deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/NDTensors/src/lib/SymmetrySectors/src/SymmetrySectors.jl b/NDTensors/src/lib/SymmetrySectors/src/SymmetrySectors.jl deleted file mode 100644 index 93ecbda986..0000000000 --- a/NDTensors/src/lib/SymmetrySectors/src/SymmetrySectors.jl +++ /dev/null @@ -1,17 +0,0 @@ -module SymmetrySectors - -include("symmetry_style.jl") -include("abstractsector.jl") -include("sector_definitions/fib.jl") -include("sector_definitions/ising.jl") -include("sector_definitions/o2.jl") -include("sector_definitions/trivial.jl") -include("sector_definitions/su.jl") -include("sector_definitions/su2k.jl") -include("sector_definitions/u1.jl") -include("sector_definitions/zn.jl") - -include("namedtuple_operations.jl") -include("sector_product.jl") - -end diff --git a/NDTensors/src/lib/SymmetrySectors/src/abstractsector.jl b/NDTensors/src/lib/SymmetrySectors/src/abstractsector.jl deleted file mode 100644 index 2257e9fb36..0000000000 --- a/NDTensors/src/lib/SymmetrySectors/src/abstractsector.jl +++ /dev/null @@ -1,114 +0,0 @@ -# This file defines the abstract type AbstractSector -# all fusion categories (Z{2}, SU2, Ising...) are subtypes of AbstractSector - -using BlockArrays: blocklengths -using ..LabelledNumbers: LabelledInteger, label, label_type, labelled, unlabel, unlabel_type -using ..GradedAxes: GradedAxes, blocklabels, fuse_blocklengths, gradedrange, tensor_product - -abstract type AbstractSector end - -# =================================== Base interface ===================================== -function Base.isless(c1::C, c2::C) where {C<:AbstractSector} - return isless(sector_label(c1), sector_label(c2)) -end - -# ================================= Sectors interface ==================================== -trivial(x) = trivial(typeof(x)) -function trivial(axis_type::Type{<:AbstractUnitRange}) - return gradedrange([trivial(eltype(axis_type))]) # always returns nondual -end -function trivial(la_type::Type{<:LabelledInteger}) - return labelled(one(unlabel_type(la_type)), trivial(label_type(la_type))) -end -function trivial(type::Type) - return error("`trivial` not defined for type $(type).") -end - -istrivial(c::AbstractSector) = (c == trivial(c)) - -function sector_label(c::AbstractSector) - return error("method `sector_label` not defined for type $(typeof(c))") -end - -block_dimensions(g::AbstractUnitRange) = block_dimensions(SymmetryStyle(g), g) -block_dimensions(::AbelianStyle, g) = unlabel.(blocklengths(g)) -function block_dimensions(::NotAbelianStyle, g) - return quantum_dimension.(blocklabels(g)) .* blocklengths(g) -end - -quantum_dimension(x) = quantum_dimension(SymmetryStyle(x), x) - -function quantum_dimension(::NotAbelianStyle, c::AbstractSector) - return error("method `quantum_dimension` not defined for type $(typeof(c))") -end - -quantum_dimension(::AbelianStyle, ::AbstractSector) = 1 -quantum_dimension(::AbelianStyle, g::AbstractUnitRange) = length(g) -quantum_dimension(::NotAbelianStyle, g::AbstractUnitRange) = sum(block_dimensions(g)) - -# =============================== Fusion rule interface ================================== -⊗(c1::AbstractSector, c2::AbstractSector) = fusion_rule(c1, c2) - -function fusion_rule(c1::AbstractSector, c2::AbstractSector) - return fusion_rule(combine_styles(SymmetryStyle(c1), SymmetryStyle(c2)), c1, c2) -end - -function fusion_rule(::NotAbelianStyle, c1::C, c2::C) where {C<:AbstractSector} - sector_degen_pairs = label_fusion_rule(C, sector_label(c1), sector_label(c2)) - return gradedrange(sector_degen_pairs) -end - -# abelian case: return Sector -function fusion_rule(::AbelianStyle, c1::C, c2::C) where {C<:AbstractSector} - return label(only(fusion_rule(NotAbelianStyle(), c1, c2))) -end - -function label_fusion_rule(sector_type::Type{<:AbstractSector}, l1, l2) - return [abelian_label_fusion_rule(sector_type, l1, l2) => 1] -end - -# ================================ GradedAxes interface ================================== -# tensor_product interface -function GradedAxes.fuse_blocklengths( - l1::LabelledInteger{<:Integer,<:AbstractSector}, - l2::LabelledInteger{<:Integer,<:AbstractSector}, -) - return fuse_blocklengths(combine_styles(SymmetryStyle(l1), SymmetryStyle(l2)), l1, l2) -end - -function GradedAxes.fuse_blocklengths( - ::NotAbelianStyle, l1::LabelledInteger, l2::LabelledInteger -) - fused = label(l1) ⊗ label(l2) - v = labelled.(l1 * l2 .* blocklengths(fused), blocklabels(fused)) - return gradedrange(v) -end - -function GradedAxes.fuse_blocklengths( - ::AbelianStyle, l1::LabelledInteger, l2::LabelledInteger -) - fused = label(l1) ⊗ label(l2) - return gradedrange([labelled(l1 * l2, fused)]) -end - -# cast to range -to_gradedrange(c::AbstractSector) = to_gradedrange(labelled(1, c)) -to_gradedrange(l::LabelledInteger) = gradedrange([l]) -to_gradedrange(g::AbstractUnitRange) = g - -# allow to fuse a Sector with a GradedUnitRange -function GradedAxes.tensor_product(c::AbstractSector, g::AbstractUnitRange) - return tensor_product(to_gradedrange(c), g) -end - -function GradedAxes.tensor_product(g::AbstractUnitRange, c::AbstractSector) - return tensor_product(g, to_gradedrange(c)) -end - -function GradedAxes.tensor_product(c1::AbstractSector, c2::AbstractSector) - return to_gradedrange(fusion_rule(c1, c2)) -end - -function GradedAxes.fusion_product(c::AbstractSector) - return to_gradedrange(c) -end diff --git a/NDTensors/src/lib/SymmetrySectors/src/namedtuple_operations.jl b/NDTensors/src/lib/SymmetrySectors/src/namedtuple_operations.jl deleted file mode 100644 index a325dea2b2..0000000000 --- a/NDTensors/src/lib/SymmetrySectors/src/namedtuple_operations.jl +++ /dev/null @@ -1,16 +0,0 @@ - -@generated function sort_keys(nt::NamedTuple{N}) where {N} - return :(NamedTuple{$(Tuple(sort(collect(N))))}(nt)) -end - -@generated function intersect_keys(nt1::NamedTuple{N1}, nt2::NamedTuple{N2}) where {N1,N2} - return :(NamedTuple{$(Tuple(intersect(N1, N2)))}(merge(nt2, nt1))) -end - -union_keys(ns1::NamedTuple, ns2::NamedTuple) = Base.merge(ns2, ns1) - -setdiff_keys(ns1::NamedTuple, ns2::NamedTuple) = Base.structdiff(ns1, ns2) - -function symdiff_keys(ns1::NamedTuple, ns2::NamedTuple) - return merge(Base.structdiff(ns1, ns2), Base.structdiff(ns2, ns1)) -end diff --git a/NDTensors/src/lib/SymmetrySectors/src/sector_definitions/fib.jl b/NDTensors/src/lib/SymmetrySectors/src/sector_definitions/fib.jl deleted file mode 100644 index d7fded55f2..0000000000 --- a/NDTensors/src/lib/SymmetrySectors/src/sector_definitions/fib.jl +++ /dev/null @@ -1,45 +0,0 @@ -# -# Fibonacci category -# -# (same fusion rules as subcategory {0,1} of su2{3}) -# -using ..GradedAxes: GradedAxes - -struct Fib <: AbstractSector - l::Int -end - -# TODO: Use `Val` dispatch here? -function Fib(s::AbstractString) - if s == "1" - return Fib(0) - elseif s == "τ" - return Fib(1) - end - return error("Unrecognized input \"$s\" to Fib constructor") -end - -SymmetryStyle(::Type{Fib}) = NotAbelianStyle() - -GradedAxes.dual(f::Fib) = f - -sector_label(f::Fib) = f.l - -trivial(::Type{Fib}) = Fib(0) - -quantum_dimension(::NotAbelianStyle, f::Fib) = istrivial(f) ? 1.0 : ((1 + √5) / 2) - -# Fusion rules identical to su2₃ -function label_fusion_rule(::Type{Fib}, l1, l2) - suk_sectors_degen = label_fusion_rule(su2{3}, l1, l2) - suk_sectors = first.(suk_sectors_degen) - degen = last.(suk_sectors_degen) - sectors = Fib.(sector_label.(suk_sectors)) - return sectors .=> degen -end - -label_to_str(f::Fib) = istrivial(f) ? "1" : "τ" - -function Base.show(io::IO, f::Fib) - return print(io, "Fib(", label_to_str(f), ")") -end diff --git a/NDTensors/src/lib/SymmetrySectors/src/sector_definitions/ising.jl b/NDTensors/src/lib/SymmetrySectors/src/sector_definitions/ising.jl deleted file mode 100644 index d4a955069e..0000000000 --- a/NDTensors/src/lib/SymmetrySectors/src/sector_definitions/ising.jl +++ /dev/null @@ -1,46 +0,0 @@ -# -# Ising category -# -# (same fusion rules as su2{2}) -# - -using HalfIntegers: Half, twice -using ..GradedAxes: GradedAxes - -struct Ising <: AbstractSector - l::Half{Int} -end - -# TODO: Use `Val` dispatch here? -function Ising(s::AbstractString) - for (a, v) in enumerate(("1", "σ", "ψ")) - (v == s) && return Ising((a - 1)//2) - end - return error("Unrecognized input \"$s\" to Ising constructor") -end - -SymmetryStyle(::Type{Ising}) = NotAbelianStyle() - -GradedAxes.dual(i::Ising) = i - -sector_label(i::Ising) = i.l - -trivial(::Type{Ising}) = Ising(0) - -quantum_dimension(::NotAbelianStyle, i::Ising) = (sector_label(i) == 1//2) ? √2 : 1.0 - -# Fusion rules identical to su2₂ -function label_fusion_rule(::Type{Ising}, l1, l2) - suk_sectors_degen = label_fusion_rule(su2{2}, l1, l2) - suk_sectors = first.(suk_sectors_degen) - degen = last.(suk_sectors_degen) - sectors = Ising.(sector_label.(suk_sectors)) - return sectors .=> degen -end - -# TODO: Use `Val` dispatch here? -label_to_str(i::Ising) = ("1", "σ", "ψ")[twice(sector_label(i)) + 1] - -function Base.show(io::IO, f::Ising) - return print(io, "Ising(", label_to_str(f), ")") -end diff --git a/NDTensors/src/lib/SymmetrySectors/src/sector_definitions/o2.jl b/NDTensors/src/lib/SymmetrySectors/src/sector_definitions/o2.jl deleted file mode 100644 index 4a0dcf7646..0000000000 --- a/NDTensors/src/lib/SymmetrySectors/src/sector_definitions/o2.jl +++ /dev/null @@ -1,75 +0,0 @@ -# -# Orthogonal group O(2) -# isomorphic to Z_2 ⋉ U(1) -# isomorphic to SU(2) subgroup with Sz conservation + Sz-reversal -# -# O(2) has 3 kinds of irreps: -# - trivial irrep, or "0e", corresponds to Sz=0 and even under Sz-reversal -# - "zero odd", or "0o" irrep, corresponds to Sz=0 and odd under Sz-reversal -# - 2-dimensional Sz=±|m| irrep, with m a half integer -# - -using HalfIntegers: Half, HalfInteger -using ..GradedAxes: GradedAxes - -# here we use only one half-integer as label: -# - l=0 for trivial -# - l=-1 for zero odd -# - l=+|m| for Sz=±|m| -struct O2 <: AbstractSector - l::Half{Int} -end - -SymmetryStyle(::Type{O2}) = NotAbelianStyle() - -sector_label(s::O2) = s.l - -trivial(::Type{O2}) = O2(0) -zero_odd(::Type{O2}) = O2(-1) - -is_zero_even_or_odd(s::O2) = is_zero_even_or_odd(sector_label(s)) -iszero_odd(s::O2) = iszero_odd(sector_label(s)) - -is_zero_even_or_odd(l::HalfInteger) = iszero_even(l) || iszero_odd(l) -iszero_even(l::HalfInteger) = l == sector_label(trivial(O2)) -iszero_odd(l::HalfInteger) = l == sector_label(zero_odd(O2)) - -quantum_dimension(::NotAbelianStyle, s::O2) = 2 - is_zero_even_or_odd(s) - -GradedAxes.dual(s::O2) = s - -function Base.show(io::IO, s::O2) - if iszero_odd(s) - disp = "0o" - elseif istrivial(s) - disp = "0e" - else - disp = "±" * string(sector_label(s)) - end - return print(io, "O(2)[", disp, "]") -end - -function label_fusion_rule(::Type{O2}, l1, l2) - if is_zero_even_or_odd(l1) - degens = [1] - if is_zero_even_or_odd(l2) - labels = l1 == l2 ? [sector_label(trivial(O2))] : [sector_label(zero_odd(O2))] - else - labels = [l2] - end - else - if is_zero_even_or_odd(l2) - degens = [1] - labels = [l1] - else - if l1 == l2 - degens = [1, 1, 1] - labels = [sector_label(zero_odd(O2)), sector_label(trivial(O2)), 2 * l1] - else - degens = [1, 1] - labels = [abs(l1 - l2), l1 + l2] - end - end - end - return O2.(labels) .=> degens -end diff --git a/NDTensors/src/lib/SymmetrySectors/src/sector_definitions/su.jl b/NDTensors/src/lib/SymmetrySectors/src/sector_definitions/su.jl deleted file mode 100644 index b44ef4d6eb..0000000000 --- a/NDTensors/src/lib/SymmetrySectors/src/sector_definitions/su.jl +++ /dev/null @@ -1,175 +0,0 @@ -# -# Special unitary group SU(N) -# - -using HalfIntegers: HalfInteger, half, twice -using ...GradedAxes: GradedAxes - -struct SU{N,M} <: AbstractSector - # l is the first row of the - # Gelfand-Tsetlin (GT) pattern describing - # an SU(N) irrep - # this first row is identical to the Young tableau of the irrep - l::NTuple{M,Int} - - # M is there to avoid storing a N-Tuple with an extra zero. - # inner constructor enforces M = N - 1 - # It does NOT check for Young Tableau validity (non-increasing positive integers) - function SU{N,M}(t::NTuple{M,Integer}) where {N,M} - return N == M + 1 && M > 0 ? new{N,M}(t) : error("Invalid tuple length") - end -end - -SU{N}(t::Tuple) where {N} = SU{N,length(t)}(t) -SU(t::Tuple) = SU{length(t) + 1}(t) # infer N from tuple length - -SymmetryStyle(::Type{<:SU}) = NotAbelianStyle() - -sector_label(s::SU) = s.l - -groupdim(::SU{N}) where {N} = N - -trivial(::Type{<:SU{N}}) where {N} = SU{N}(ntuple(_ -> 0, Val(N - 1))) - -fundamental(::Type{<:SU{N}}) where {N} = SU{N}(ntuple(i -> i == 1, Val(N - 1))) - -function quantum_dimension(::NotAbelianStyle, s::SU) - N = groupdim(s) - l = (sector_label(s)..., 0) - d = 1 - for k1 in 1:N, k2 in (k1 + 1):N - d *= ((k2 - k1) + (l[k1] - l[k2]))//(k2 - k1) - end - return Int(d) -end - -function GradedAxes.dual(s::SU) - l = sector_label(s) - nl = reverse(cumsum((l[begin:(end - 1)] .- l[(begin + 1):end]..., l[end]))) - return typeof(s)(nl) -end - -function Base.show(io::IO, s::SU) - disp = join([string(l) for l in sector_label(s)], ", ") - return print(io, "SU(", groupdim(s), ")[", disp, "]") -end - -# display SU(N) irrep as a Young tableau with utf8 box char -function Base.show(io::IO, ::MIME"text/plain", s::SU) - if istrivial(s) # singlet = no box - return print(io, "●") - end - - N = groupdim(s) - l = sector_label(s) - println(io, "┌─" * "┬─"^(l[1] - 1) * "┐") - i = 1 - while i < N - 1 && l[i + 1] != 0 - println( - io, - "├─", - "┼─"^(l[i + 1] - 1 + (l[i] > l[i + 1])), - "┴─"^max(0, (l[i] - l[i + 1] - 1)), - "┤"^(l[i] == l[i + 1]), - "┘"^(l[i] > l[i + 1]), - ) - i += 1 - end - - print(io, "└─", "┴─"^max(0, l[i] - 1), "┘") - return nothing -end - -# -# Specializations for the case SU{2} -# - -# optimize implementation -quantum_dimension(s::SU{2}) = sector_label(s)[1] + 1 - -GradedAxes.dual(s::SU{2}) = s - -function label_fusion_rule(::Type{<:SU{2}}, s1, s2) - irreps = [SU{2}((i,)) for i in (abs(s1[1] - s2[1])):2:(s1[1] + s2[1])] - degen = ones(Int, length(irreps)) - return irreps .=> degen -end - -# define angular momentum-like interface using half-integers -SU2(h::Number) = SU{2}((twice(HalfInteger(h)),)) - -# display SU2 using half-integers -function Base.show(io::IO, s::SU{2}) - return print(io, "SU(2)[S=", half(quantum_dimension(s) - 1), "]") -end - -function Base.show(io::IO, ::MIME"text/plain", s::SU{2}) - return print(io, "S = ", half(quantum_dimension(s) - 1)) -end - -# -# Specializations for the case SU{3} -# aimed for testing non-abelian non self-conjugate representations -# TODO replace with generic implementation -# - -function label_fusion_rule(::Type{<:SU{3}}, left, right) - # Compute SU(3) fusion rules using Littlewood-Richardson rule for Young tableaus. - # See e.g. Di Francesco, Mathieu and Sénéchal, section 13.5.3. - if sum(right) > sum(left) # impose more boxes in left Young tableau - return label_fusion_rule(SU{3}, right, left) - end - - if right[1] == 0 # avoid issues with singlet - return [SU{3}(left) => 1] - end - - left_row1 = left[1] - left_row2 = left[2] - right_row1 = right[1] - right_row2 = right[2] - - irreps = [] - - # put a23 boxes on 2nd or 3rd line - a23max1 = 2 * left_row1 # row2a <= row1a - a23max2 = right_row1 # a2 + a3 <= total number of a - a23max = min(a23max1, a23max2) - for a23 in 0:a23max - a3min1 = left_row2 + 2 * a23 - left_row1 - right_row1 - a3min2 = left_row2 - left_row1 + a23 # no a below a: row2a <= row1 - a3min = max(0, a3min1, a3min2) - a3max1 = left_row2 # row3a <= row2a - a3max2 = a23 # a3 <= a2 + a3 - a3max3 = right_row1 - right_row2 # more a than b, right to left: b2 + b3 <= a1 + a2 - a3max = min(a3max1, a3max2, a3max3) - for a3 in a3min:a3max - a2 = a23 - a3 - row1a = left_row1 + right_row1 - a23 - row2a = left_row2 + a23 - a3 - - # cannot put any b on 1st line: row1ab = row1a - b3min1 = row2a + right_row2 - row1a # row2ab <= row1ab = row1a - b3min2 = right_row2 + a23 - right_row1 - b3min = max(0, b3min1, b3min2) - b3max1 = right_row2 # only other.row2 b boxes - b3max2 = (row2a + right_row2 - a3) ÷ 2 # row3ab >= row2ab - b3max3 = right_row1 - a3 # more a than b, right to left: b2 <= a1 - b3max4 = row2a - a3 # no b below b: row2a >= row3ab - b3max = min(b3max1, b3max2, b3max3, b3max4) - for b3 in b3min:b3max - b2 = right_row2 - b3 - row2ab = row2a + b2 - row3ab = a3 + b3 - yt = (row1a - row3ab, row2ab - row3ab) - - push!(irreps, yt) - end - end - end - - unique_labels = sort(unique(irreps)) - degen = [count(==(irr), irreps) for irr in unique_labels] - sectors = SU{3}.(unique_labels) - return sectors .=> degen -end diff --git a/NDTensors/src/lib/SymmetrySectors/src/sector_definitions/su2k.jl b/NDTensors/src/lib/SymmetrySectors/src/sector_definitions/su2k.jl deleted file mode 100644 index cdf9bfbb1a..0000000000 --- a/NDTensors/src/lib/SymmetrySectors/src/sector_definitions/su2k.jl +++ /dev/null @@ -1,27 +0,0 @@ -# -# Quantum 'group' su2ₖ -# - -using HalfIntegers: Half -using ...GradedAxes: GradedAxes - -struct su2{k} <: AbstractSector - j::Half{Int} -end - -SymmetryStyle(::Type{<:su2}) = NotAbelianStyle() - -GradedAxes.dual(s::su2) = s - -sector_label(s::su2) = s.j - -level(::su2{k}) where {k} = k - -trivial(::Type{su2{k}}) where {k} = su2{k}(0) - -function label_fusion_rule(::Type{su2{k}}, j1, j2) where {k} - labels = collect(abs(j1 - j2):min(k - j1 - j2, j1 + j2)) - degen = ones(Int, length(labels)) - sectors = su2{k}.(labels) - return sectors .=> degen -end diff --git a/NDTensors/src/lib/SymmetrySectors/src/sector_definitions/trivial.jl b/NDTensors/src/lib/SymmetrySectors/src/sector_definitions/trivial.jl deleted file mode 100644 index 27b3fec88e..0000000000 --- a/NDTensors/src/lib/SymmetrySectors/src/sector_definitions/trivial.jl +++ /dev/null @@ -1,38 +0,0 @@ -# -# Trivial sector -# acts as a trivial sector for any AbstractSector -# - -using ...GradedAxes: GradedAxes - -# Trivial is special as it does not have a label -struct TrivialSector <: AbstractSector end - -SymmetryStyle(::Type{TrivialSector}) = AbelianStyle() - -trivial(::Type{TrivialSector}) = TrivialSector() - -GradedAxes.dual(::TrivialSector) = TrivialSector() - -# TrivialSector acts as trivial on any AbstractSector -function fusion_rule(::NotAbelianStyle, ::TrivialSector, c::AbstractSector) - return to_gradedrange(c) -end -function fusion_rule(::NotAbelianStyle, c::AbstractSector, ::TrivialSector) - return to_gradedrange(c) -end - -# abelian case: return Sector -fusion_rule(::AbelianStyle, c::AbstractSector, ::TrivialSector) = c -fusion_rule(::AbelianStyle, ::TrivialSector, c::AbstractSector) = c -fusion_rule(::AbelianStyle, ::TrivialSector, ::TrivialSector) = TrivialSector() - -# any trivial sector equals TrivialSector -Base.:(==)(c::AbstractSector, ::TrivialSector) = istrivial(c) -Base.:(==)(::TrivialSector, c::AbstractSector) = istrivial(c) -Base.:(==)(::TrivialSector, ::TrivialSector) = true - -# sorts as trivial for any Sector -Base.isless(c::AbstractSector, ::TrivialSector) = c < trivial(c) -Base.isless(::TrivialSector, c::AbstractSector) = trivial(c) < c -Base.isless(::TrivialSector, ::TrivialSector) = false # bypass default that calls label diff --git a/NDTensors/src/lib/SymmetrySectors/src/sector_definitions/u1.jl b/NDTensors/src/lib/SymmetrySectors/src/sector_definitions/u1.jl deleted file mode 100644 index 2d79796c34..0000000000 --- a/NDTensors/src/lib/SymmetrySectors/src/sector_definitions/u1.jl +++ /dev/null @@ -1,31 +0,0 @@ -# -# U₁ group (circle group, or particle number, total Sz etc.) -# - -using ...GradedAxes: GradedAxes - -# Parametric type to allow both integer label as well as -# HalfInteger for easy conversion to/from SU(2) -struct U1{T} <: AbstractSector - n::T -end - -SymmetryStyle(::Type{<:U1}) = AbelianStyle() -sector_label(u::U1) = u.n - -set_sector_label(s::U1, sector_label) = typeof(s)(sector_label) -GradedAxes.dual(s::U1) = set_sector_label(s, -sector_label(s)) - -trivial(::Type{U1}) = trivial(U1{Int}) -trivial(::Type{U1{T}}) where {T} = U1(zero(T)) - -abelian_label_fusion_rule(sector_type::Type{<:U1}, n1, n2) = sector_type(n1 + n2) - -# hide label type in printing -function Base.show(io::IO, u::U1) - return print(io, "U(1)[", sector_label(u), "]") -end - -# enforce U1(Int32(1)) == U1(1) -Base.:(==)(s1::U1, s2::U1) = sector_label(s1) == sector_label(s2) -Base.isless(s1::U1, s2::U1) = sector_label(s1) < sector_label(s2) diff --git a/NDTensors/src/lib/SymmetrySectors/src/sector_definitions/zn.jl b/NDTensors/src/lib/SymmetrySectors/src/sector_definitions/zn.jl deleted file mode 100644 index 8628288dc3..0000000000 --- a/NDTensors/src/lib/SymmetrySectors/src/sector_definitions/zn.jl +++ /dev/null @@ -1,25 +0,0 @@ -# -# Cyclic group Zₙ -# - -using ...GradedAxes: GradedAxes - -struct Z{N} <: AbstractSector - m::Int - Z{N}(m) where {N} = new{N}(mod(m, N)) -end - -modulus(::Type{Z{N}}) where {N} = N -modulus(c::Z) = modulus(typeof(c)) - -SymmetryStyle(::Type{<:Z}) = AbelianStyle() -sector_label(c::Z) = c.m - -set_sector_label(s::Z, sector_label) = typeof(s)(sector_label) -GradedAxes.dual(s::Z) = set_sector_label(s, -sector_label(s)) - -trivial(sector_type::Type{<:Z}) = sector_type(0) - -function abelian_label_fusion_rule(sector_type::Type{<:Z}, n1, n2) - return sector_type(n1 + n2) -end diff --git a/NDTensors/src/lib/SymmetrySectors/src/sector_product.jl b/NDTensors/src/lib/SymmetrySectors/src/sector_product.jl deleted file mode 100644 index 12a72d022e..0000000000 --- a/NDTensors/src/lib/SymmetrySectors/src/sector_product.jl +++ /dev/null @@ -1,238 +0,0 @@ -# This files defines a structure for Cartesian product of 2 or more fusion sectors -# e.g. U(1)×U(1), U(1)×SU2(2)×SU(3) - -using BlockArrays: blocklengths -using ..LabelledNumbers: LabelledInteger, label, labelled, unlabel -using ..GradedAxes: AbstractGradedUnitRange, GradedAxes, dual - -# ===================================== Definition ======================================= -struct SectorProduct{Sectors} <: AbstractSector - arguments::Sectors - global _SectorProduct(l) = new{typeof(l)}(l) -end - -SectorProduct(c::SectorProduct) = _SectorProduct(arguments(c)) - -arguments(s::SectorProduct) = s.arguments - -# ================================= Sectors interface ==================================== -function SymmetryStyle(T::Type{<:SectorProduct}) - return arguments_symmetrystyle(arguments_type(T)) -end - -function quantum_dimension(::NotAbelianStyle, s::SectorProduct) - return mapreduce(quantum_dimension, *, arguments(s)) -end - -# use map instead of broadcast to support both Tuple and NamedTuple -GradedAxes.dual(s::SectorProduct) = SectorProduct(map(dual, arguments(s))) - -function trivial(type::Type{<:SectorProduct}) - return SectorProduct(arguments_trivial(arguments_type(type))) -end - -# =================================== Base interface ===================================== -function Base.:(==)(A::SectorProduct, B::SectorProduct) - return arguments_isequal(arguments(A), arguments(B)) -end - -function Base.show(io::IO, s::SectorProduct) - (length(arguments(s)) < 2) && print(io, "sector") - print(io, "(") - symbol = "" - for p in pairs(arguments(s)) - print(io, symbol) - sector_show(io, p[1], p[2]) - symbol = " × " - end - return print(io, ")") -end - -sector_show(io::IO, ::Int, v) = print(io, v) -sector_show(io::IO, k::Symbol, v) = print(io, "($k=$v,)") - -function Base.isless(s1::SectorProduct, s2::SectorProduct) - return arguments_isless(arguments(s1), arguments(s2)) -end - -# ======================================= shared ========================================= -# there are 2 implementations for SectorProduct -# - ordered-like with a Tuple -# - dictionary-like with a NamedTuple - -arguments_type(::Type{<:SectorProduct{T}}) where {T} = T - -arguments_maybe_insert_unspecified(s1, ::Any) = s1 -function sym_arguments_maybe_insert_unspecified(s1, s2) - return arguments_maybe_insert_unspecified(s1, s2), - arguments_maybe_insert_unspecified(s2, s1) -end - -function make_empty_match(a1, b1) - a2 = isempty(a1) ? empty(b1) : a1 - b2 = isempty(b1) ? empty(a2) : b1 - return a2, b2 -end - -function arguments_isequal(a1, b1) - return ==(sym_arguments_maybe_insert_unspecified(make_empty_match(a1, b1)...)...) -end - -function arguments_product(s1, s2) - isempty(s1) && return s2 - isempty(s2) && return s1 - return throw(ArgumentError("Mixing non-empty storage types is illegal")) -end - -function arguments_isless(a1, b1) - return isless(sym_arguments_maybe_insert_unspecified(make_empty_match(a1, b1)...)...) -end - -# ================================= Cartesian Product ==================================== -×(c1::AbstractSector, c2::AbstractSector) = ×(SectorProduct(c1), SectorProduct(c2)) -function ×(p1::SectorProduct, p2::SectorProduct) - return SectorProduct(arguments_product(arguments(p1), arguments(p2))) -end - -×(a, g::AbstractUnitRange) = ×(to_gradedrange(a), g) -×(g::AbstractUnitRange, b) = ×(g, to_gradedrange(b)) -×(nt1::NamedTuple, nt2::NamedTuple) = ×(SectorProduct(nt1), SectorProduct(nt2)) -×(c1::NamedTuple, c2::AbstractSector) = ×(SectorProduct(c1), SectorProduct(c2)) -×(c1::AbstractSector, c2::NamedTuple) = ×(SectorProduct(c1), SectorProduct(c2)) - -function ×(l1::LabelledInteger, l2::LabelledInteger) - c3 = label(l1) × label(l2) - m3 = unlabel(l1) * unlabel(l2) - return labelled(m3, c3) -end - -function ×(g1::AbstractUnitRange, g2::AbstractUnitRange) - v = map( - ((l1, l2),) -> l1 × l2, - Iterators.flatten((Iterators.product(blocklengths(g1), blocklengths(g2)),),), - ) - return gradedrange(v) -end - -# ==================================== Fusion rules ====================================== -# cast AbstractSector to SectorProduct -function fusion_rule(style::SymmetryStyle, c1::SectorProduct, c2::AbstractSector) - return fusion_rule(style, c1, SectorProduct(c2)) -end -function fusion_rule(style::SymmetryStyle, c1::AbstractSector, c2::SectorProduct) - return fusion_rule(style, SectorProduct(c1), c2) -end - -# generic case: fusion returns a GradedAxes, even for fusion with Empty -function fusion_rule(::NotAbelianStyle, s1::SectorProduct, s2::SectorProduct) - return to_gradedrange(arguments_fusion_rule(arguments(s1), arguments(s2))) -end - -# Abelian case: fusion returns SectorProduct -function fusion_rule(::AbelianStyle, s1::SectorProduct, s2::SectorProduct) - return label(only(fusion_rule(NotAbelianStyle(), s1, s2))) -end - -# lift ambiguities for TrivialSector -fusion_rule(::AbelianStyle, c::SectorProduct, ::TrivialSector) = c -fusion_rule(::AbelianStyle, ::TrivialSector, c::SectorProduct) = c -fusion_rule(::NotAbelianStyle, c::SectorProduct, ::TrivialSector) = to_gradedrange(c) -fusion_rule(::NotAbelianStyle, ::TrivialSector, c::SectorProduct) = to_gradedrange(c) - -function arguments_fusion_rule(sects1, sects2) - isempty(sects1) && return SectorProduct(sects2) - isempty(sects2) && return SectorProduct(sects1) - shared_sect = shared_arguments_fusion_rule(arguments_common(sects1, sects2)...) - diff_sect = SectorProduct(arguments_diff(sects1, sects2)) - return shared_sect × diff_sect -end - -# =============================== Ordered implementation ================================= -SectorProduct(t::Tuple) = _SectorProduct(t) -SectorProduct(sects::AbstractSector...) = SectorProduct(sects) - -function arguments_symmetrystyle(T::Type{<:Tuple}) - return mapreduce(SymmetryStyle, combine_styles, fieldtypes(T); init=AbelianStyle()) -end - -arguments_product(l1::Tuple, l2::Tuple) = (l1..., l2...) - -arguments_trivial(T::Type{<:Tuple}) = trivial.(fieldtypes(T)) - -function arguments_common(t1::Tuple, t2::Tuple) - n = min(length(t1), length(t2)) - return t1[begin:n], t2[begin:n] -end - -function arguments_diff(t1::Tuple, t2::Tuple) - n1 = length(t1) - n2 = length(t2) - return n1 < n2 ? t2[(n1 + 1):end] : t1[(n2 + 1):end] -end - -function shared_arguments_fusion_rule(shared1::T, shared2::T) where {T<:Tuple} - return mapreduce( - to_gradedrange ∘ fusion_rule, - ×, - shared1, - shared2; - init=to_gradedrange(SectorProduct(())), - ) -end - -function arguments_maybe_insert_unspecified(t1::Tuple, t2::Tuple) - n1 = length(t1) - return (t1..., trivial.(t2[(n1 + 1):end])...) -end - -# =========================== Dictionary-like implementation ============================= -function SectorProduct(nt::NamedTuple) - arguments = sort_keys(nt) - return _SectorProduct(arguments) -end - -SectorProduct(; kws...) = SectorProduct((; kws...)) - -function SectorProduct(pairs::Pair...) - keys = Symbol.(first.(pairs)) - vals = last.(pairs) - return SectorProduct(NamedTuple{keys}(vals)) -end - -function arguments_symmetrystyle(NT::Type{<:NamedTuple}) - return mapreduce(SymmetryStyle, combine_styles, fieldtypes(NT); init=AbelianStyle()) -end - -function arguments_maybe_insert_unspecified(nt1::NamedTuple, nt2::NamedTuple) - diff1 = arguments_trivial(typeof(setdiff_keys(nt2, nt1))) - return sort_keys(union_keys(nt1, diff1)) -end - -function arguments_product(l1::NamedTuple, l2::NamedTuple) - if length(intersect_keys(l1, l2)) > 0 - throw(ArgumentError("Cannot define product of shared keys")) - end - return union_keys(l1, l2) -end - -function arguments_trivial(NT::Type{<:NamedTuple{Keys}}) where {Keys} - return NamedTuple{Keys}(trivial.(fieldtypes(NT))) -end - -function arguments_common(nt1::NamedTuple, nt2::NamedTuple) - # SectorProduct(nt::NamedTuple) sorts keys at init - @assert issorted(keys(nt1)) - @assert issorted(keys(nt2)) - return intersect_keys(nt1, nt2), intersect_keys(nt2, nt1) -end - -arguments_diff(nt1::NamedTuple, nt2::NamedTuple) = symdiff_keys(nt1, nt2) - -function map_blocklabels(f, r::AbstractUnitRange) - return gradedrange(labelled.(unlabel.(blocklengths(r)), f.(blocklabels(r)))) -end - -function shared_arguments_fusion_rule(shared1::NT, shared2::NT) where {NT<:NamedTuple} - tuple_fused = shared_arguments_fusion_rule(values(shared1), values(shared2)) - return map_blocklabels(SectorProduct ∘ NT ∘ arguments ∘ SectorProduct, tuple_fused) -end diff --git a/NDTensors/src/lib/SymmetrySectors/src/symmetry_style.jl b/NDTensors/src/lib/SymmetrySectors/src/symmetry_style.jl deleted file mode 100644 index f3e443dce0..0000000000 --- a/NDTensors/src/lib/SymmetrySectors/src/symmetry_style.jl +++ /dev/null @@ -1,17 +0,0 @@ -# This file defines SymmetryStyle, a trait to distinguish abelian groups, non-abelian groups -# and non-group fusion categories. - -using ..LabelledNumbers: LabelledInteger, label_type - -abstract type SymmetryStyle end - -struct AbelianStyle <: SymmetryStyle end -struct NotAbelianStyle <: SymmetryStyle end - -SymmetryStyle(x) = SymmetryStyle(typeof(x)) -SymmetryStyle(T::Type) = error("method `SymmetryStyle` not defined for type $(T)") -SymmetryStyle(L::Type{<:LabelledInteger}) = SymmetryStyle(label_type(L)) -SymmetryStyle(G::Type{<:AbstractUnitRange}) = SymmetryStyle(eltype(G)) - -combine_styles(::AbelianStyle, ::AbelianStyle) = AbelianStyle() -combine_styles(::SymmetryStyle, ::SymmetryStyle) = NotAbelianStyle() diff --git a/NDTensors/src/lib/SymmetrySectors/test/runtests.jl b/NDTensors/src/lib/SymmetrySectors/test/runtests.jl deleted file mode 100644 index 5bf73eb20f..0000000000 --- a/NDTensors/src/lib/SymmetrySectors/test/runtests.jl +++ /dev/null @@ -1,12 +0,0 @@ -@eval module $(gensym()) -using Test: @test, @testset -@testset "$(@__DIR__)" begin - filenames = filter(readdir(@__DIR__)) do f - startswith("test_")(f) && endswith(".jl")(f) - end - @testset "Test $(@__DIR__)/$filename" for filename in filenames - println("Running $(@__DIR__)/$filename") - @time include(filename) - end -end -end diff --git a/NDTensors/src/lib/SymmetrySectors/test/test_fusion_rules.jl b/NDTensors/src/lib/SymmetrySectors/test/test_fusion_rules.jl deleted file mode 100644 index bd00abef86..0000000000 --- a/NDTensors/src/lib/SymmetrySectors/test/test_fusion_rules.jl +++ /dev/null @@ -1,287 +0,0 @@ -@eval module $(gensym()) -using NDTensors.GradedAxes: - dual, fusion_product, space_isequal, gradedrange, flip, tensor_product -using NDTensors.SymmetrySectors: - ⊗, - Fib, - Ising, - O2, - SU, - SU2, - TrivialSector, - U1, - Z, - block_dimensions, - quantum_dimension, - trivial -using Test: @inferred, @test, @testset, @test_throws - -@testset "Simple SymmetrySector fusion rules" begin - @testset "Z{2} fusion rules" begin - z0 = Z{2}(0) - z1 = Z{2}(1) - - @test z0 ⊗ z0 == z0 - @test z0 ⊗ z1 == z1 - @test z1 ⊗ z1 == z0 - @test (@inferred z0 ⊗ z0) == z0 # no better way, see Julia PR 23426 - - q = TrivialSector() - @test (@inferred q ⊗ q) == q - @test (@inferred q ⊗ z0) == z0 - @test (@inferred z1 ⊗ q) == z1 - - # using GradedAxes interface - @test space_isequal(fusion_product(z0, z0), gradedrange([z0 => 1])) - @test space_isequal(fusion_product(z0, z1), gradedrange([z1 => 1])) - - # test different input number - @test space_isequal(fusion_product(z0), gradedrange([z0 => 1])) - @test space_isequal(fusion_product(z0, z0, z0), gradedrange([z0 => 1])) - @test space_isequal(fusion_product(z0, z0, z0, z0), gradedrange([z0 => 1])) - @test (@inferred block_dimensions(gradedrange([z1 => 1]))) == [1] - end - @testset "U(1) fusion rules" begin - q1 = U1(1) - q2 = U1(2) - q3 = U1(3) - - @test q1 ⊗ q1 == U1(2) - @test q1 ⊗ q2 == U1(3) - @test q2 ⊗ q1 == U1(3) - @test (@inferred q1 ⊗ q2) == q3 # no better way, see Julia PR 23426 - end - - @testset "O2 fusion rules" begin - s0e = O2(0) - s0o = O2(-1) - s12 = O2(1//2) - s1 = O2(1) - - q = TrivialSector() - @test space_isequal((@inferred s0e ⊗ q), gradedrange([s0e => 1])) - @test space_isequal((@inferred q ⊗ s0o), gradedrange([s0o => 1])) - - @test space_isequal((@inferred s0e ⊗ s0e), gradedrange([s0e => 1])) - @test space_isequal((@inferred s0o ⊗ s0e), gradedrange([s0o => 1])) - @test space_isequal((@inferred s0o ⊗ s0e), gradedrange([s0o => 1])) - @test space_isequal((@inferred s0o ⊗ s0o), gradedrange([s0e => 1])) - - @test space_isequal((@inferred s0e ⊗ s12), gradedrange([s12 => 1])) - @test space_isequal((@inferred s0o ⊗ s12), gradedrange([s12 => 1])) - @test space_isequal((@inferred s12 ⊗ s0e), gradedrange([s12 => 1])) - @test space_isequal((@inferred s12 ⊗ s0o), gradedrange([s12 => 1])) - @test space_isequal((@inferred s12 ⊗ s1), gradedrange([s12 => 1, O2(3//2) => 1])) - @test space_isequal((@inferred s12 ⊗ s12), gradedrange([s0o => 1, s0e => 1, s1 => 1])) - - @test (@inferred quantum_dimension(s0o ⊗ s1)) == 2 - @test (@inferred block_dimensions(s0o ⊗ s1)) == [2] - end - - @testset "SU2 fusion rules" begin - j1 = SU2(0) - j2 = SU2(1//2) - j3 = SU2(1) - j4 = SU2(3//2) - j5 = SU2(2) - - @test space_isequal(j1 ⊗ j2, gradedrange([j2 => 1])) - @test space_isequal(j2 ⊗ j2, gradedrange([j1 => 1, j3 => 1])) - @test space_isequal(j2 ⊗ j3, gradedrange([j2 => 1, j4 => 1])) - @test space_isequal(j3 ⊗ j3, gradedrange([j1 => 1, j3 => 1, j5 => 1])) - @test space_isequal((@inferred j1 ⊗ j2), gradedrange([j2 => 1])) - @test (@inferred quantum_dimension(j1 ⊗ j2)) == 2 - @test (@inferred block_dimensions(j1 ⊗ j2)) == [2] - - @test space_isequal(fusion_product(j2), gradedrange([j2 => 1])) - @test space_isequal(fusion_product(j2, j1), gradedrange([j2 => 1])) - @test space_isequal(fusion_product(j2, j1, j1), gradedrange([j2 => 1])) - end - - @testset "Fibonacci fusion rules" begin - ı = Fib("1") - τ = Fib("τ") - - @test space_isequal(ı ⊗ ı, gradedrange([ı => 1])) - @test space_isequal(ı ⊗ τ, gradedrange([τ => 1])) - @test space_isequal(τ ⊗ ı, gradedrange([τ => 1])) - @test space_isequal((@inferred τ ⊗ τ), gradedrange([ı => 1, τ => 1])) - @test (@inferred quantum_dimension(gradedrange([ı => 1, ı => 1]))) == 2.0 - end - - @testset "Ising fusion rules" begin - ı = Ising("1") - σ = Ising("σ") - ψ = Ising("ψ") - - @test space_isequal(ı ⊗ ı, gradedrange([ı => 1])) - @test space_isequal(ı ⊗ σ, gradedrange([σ => 1])) - @test space_isequal(σ ⊗ ı, gradedrange([σ => 1])) - @test space_isequal(ı ⊗ ψ, gradedrange([ψ => 1])) - @test space_isequal(ψ ⊗ ı, gradedrange([ψ => 1])) - @test space_isequal(σ ⊗ σ, gradedrange([ı => 1, ψ => 1])) - @test space_isequal(σ ⊗ ψ, gradedrange([σ => 1])) - @test space_isequal(ψ ⊗ σ, gradedrange([σ => 1])) - @test space_isequal(ψ ⊗ ψ, gradedrange([ı => 1])) - @test space_isequal((@inferred ψ ⊗ ψ), gradedrange([ı => 1])) - @test (@inferred quantum_dimension(σ ⊗ σ)) == 2.0 - end -end -@testset "Gradedrange fusion rules" begin - @testset "Trivial GradedUnitRange" begin - g1 = gradedrange([U1(0) => 1]) - g2 = gradedrange([SU2(0) => 1]) - @test space_isequal(trivial(g1), g1) - @test space_isequal(trivial(dual(g1)), g1) # trivial returns nondual - @test space_isequal(trivial(typeof(g2)), g2) - end - @testset "GradedUnitRange abelian tensor/fusion product" begin - g1 = gradedrange([U1(-1) => 1, U1(0) => 1, U1(1) => 2]) - g2 = gradedrange([U1(-2) => 2, U1(0) => 1, U1(1) => 2]) - - @test space_isequal(flip(dual(g1)), gradedrange([U1(1) => 1, U1(0) => 1, U1(-1) => 2])) - @test (@inferred block_dimensions(g1)) == [1, 1, 2] - - gt = gradedrange([ - U1(-3) => 2, - U1(-2) => 2, - U1(-1) => 4, - U1(-1) => 1, - U1(0) => 1, - U1(1) => 2, - U1(0) => 2, - U1(1) => 2, - U1(2) => 4, - ]) - gf = gradedrange([ - U1(-3) => 2, U1(-2) => 2, U1(-1) => 5, U1(0) => 3, U1(1) => 4, U1(2) => 4 - ]) - @test space_isequal((@inferred tensor_product(g1, g2)), gt) - @test space_isequal((@inferred fusion_product(g1, g2)), gf) - - gtd1 = gradedrange([ - U1(-1) => 2, - U1(-2) => 2, - U1(-3) => 4, - U1(1) => 1, - U1(0) => 1, - U1(-1) => 2, - U1(2) => 2, - U1(1) => 2, - U1(0) => 4, - ]) - gfd1 = gradedrange([ - U1(-3) => 4, U1(-2) => 2, U1(-1) => 4, U1(0) => 5, U1(1) => 3, U1(2) => 2 - ]) - @test space_isequal((@inferred tensor_product(dual(g1), g2)), gtd1) - @test space_isequal((@inferred fusion_product(dual(g1), g2)), gfd1) - - gtd2 = gradedrange([ - U1(1) => 2, - U1(2) => 2, - U1(3) => 4, - U1(-1) => 1, - U1(0) => 1, - U1(1) => 2, - U1(-2) => 2, - U1(-1) => 2, - U1(0) => 4, - ]) - gfd2 = gradedrange([ - U1(-2) => 2, U1(-1) => 3, U1(0) => 5, U1(1) => 4, U1(2) => 2, U1(3) => 4 - ]) - @test space_isequal((@inferred tensor_product(g1, dual(g2))), gtd2) - @test space_isequal((@inferred fusion_product(g1, dual(g2))), gfd2) - - gtd = gradedrange([ - U1(3) => 2, - U1(2) => 2, - U1(1) => 4, - U1(1) => 1, - U1(0) => 1, - U1(-1) => 2, - U1(0) => 2, - U1(-1) => 2, - U1(-2) => 4, - ]) - gfd = gradedrange([ - U1(-2) => 4, U1(-1) => 4, U1(0) => 3, U1(1) => 5, U1(2) => 2, U1(3) => 2 - ]) - @test space_isequal((@inferred tensor_product(dual(g1), dual(g2))), gtd) - @test space_isequal((@inferred fusion_product(dual(g1), dual(g2))), gfd) - - # test different (non-product) sectors cannot be fused - @test_throws MethodError fusion_product(gradedrange([Z{2}(0) => 1]), g1) - @test_throws MethodError tensor_product(gradedrange([Z{2}(0) => 1]), g2) - end - - @testset "GradedUnitRange non-abelian fusion rules" begin - g3 = gradedrange([SU2(0) => 1, SU2(1//2) => 2, SU2(1) => 1]) - g4 = gradedrange([SU2(1//2) => 1, SU2(1) => 2]) - g34 = gradedrange([ - SU2(1//2) => 1, - SU2(0) => 2, - SU2(1) => 2, - SU2(1//2) => 1, - SU2(3//2) => 1, - SU2(1) => 2, - SU2(1//2) => 4, - SU2(3//2) => 4, - SU2(0) => 2, - SU2(1) => 2, - SU2(2) => 2, - ]) - - @test space_isequal(tensor_product(g3, g4), g34) - - @test space_isequal(dual(flip(g3)), g3) # trivial for SU(2) - @test space_isequal( - (@inferred fusion_product(g3, g4)), - gradedrange([SU2(0) => 4, SU2(1//2) => 6, SU2(1) => 6, SU2(3//2) => 5, SU2(2) => 2]), - ) - @test (@inferred block_dimensions(g3)) == [1, 4, 3] - - # test dual on non self-conjugate non-abelian representations - s1 = SU{3}((0, 0)) - f3 = SU{3}((1, 0)) - c3 = SU{3}((1, 1)) - ad8 = SU{3}((2, 1)) - - g5 = gradedrange([s1 => 1, f3 => 1]) - g6 = gradedrange([s1 => 1, c3 => 1]) - @test space_isequal(dual(flip(g5)), g6) - @test space_isequal( - fusion_product(g5, g6), gradedrange([s1 => 2, f3 => 1, c3 => 1, ad8 => 1]) - ) - @test space_isequal( - fusion_product(dual(g5), g6), - gradedrange([s1 => 1, f3 => 1, c3 => 2, SU{3}((2, 2)) => 1]), - ) - @test space_isequal( - fusion_product(g5, dual(g6)), - gradedrange([s1 => 1, f3 => 2, c3 => 1, SU{3}((2, 0)) => 1]), - ) - @test space_isequal( - fusion_product(dual(g5), dual(g6)), gradedrange([s1 => 2, f3 => 1, c3 => 1, ad8 => 1]) - ) - end - - @testset "Mixed GradedUnitRange - Sector fusion rules" begin - g1 = gradedrange([U1(1) => 1, U1(2) => 2]) - g2 = gradedrange([U1(2) => 1, U1(3) => 2]) - @test space_isequal((@inferred fusion_product(g1, U1(1))), g2) - @test space_isequal((@inferred fusion_product(U1(1), g1)), g2) - - g3 = gradedrange([SU2(0) => 1, SU2(1//2) => 2]) - g4 = gradedrange([SU2(0) => 2, SU2(1//2) => 1, SU2(1) => 2]) - @test space_isequal((@inferred fusion_product(g3, SU2(1//2))), g4) - @test space_isequal((@inferred fusion_product(SU2(1//2), g3)), g4) - - # test different simple sectors cannot be fused - @test_throws MethodError Z{2}(0) ⊗ U1(1) - @test_throws MethodError SU2(1) ⊗ U1(1) - @test_throws MethodError fusion_product(g1, SU2(1)) - @test_throws MethodError fusion_product(U1(1), g3) - end -end -end diff --git a/NDTensors/src/lib/SymmetrySectors/test/test_sector_product.jl b/NDTensors/src/lib/SymmetrySectors/test/test_sector_product.jl deleted file mode 100644 index de046fc307..0000000000 --- a/NDTensors/src/lib/SymmetrySectors/test/test_sector_product.jl +++ /dev/null @@ -1,622 +0,0 @@ -@eval module $(gensym()) -using NDTensors.SymmetrySectors: - ×, - ⊗, - Fib, - Ising, - SectorProduct, - SU, - SU2, - TrivialSector, - U1, - Z, - block_dimensions, - quantum_dimension, - arguments, - trivial -using NDTensors.GradedAxes: dual, fusion_product, space_isequal, gradedrange -using Test: @inferred, @test, @testset, @test_throws - -@testset "Test Ordered Products" begin - @testset "Ordered Constructor" begin - s = SectorProduct(U1(1)) - @test length(arguments(s)) == 1 - @test (@inferred quantum_dimension(s)) == 1 - @test (@inferred dual(s)) == SectorProduct(U1(-1)) - @test arguments(s)[1] == U1(1) - @test (@inferred trivial(s)) == SectorProduct(U1(0)) - - s = SectorProduct(U1(1), U1(2)) - @test length(arguments(s)) == 2 - @test (@inferred quantum_dimension(s)) == 1 - @test (@inferred dual(s)) == SectorProduct(U1(-1), U1(-2)) - @test arguments(s)[1] == U1(1) - @test arguments(s)[2] == U1(2) - @test (@inferred trivial(s)) == SectorProduct(U1(0), U1(0)) - - s = U1(1) × SU2(1//2) × U1(3) - @test length(arguments(s)) == 3 - @test (@inferred quantum_dimension(s)) == 2 - @test (@inferred dual(s)) == U1(-1) × SU2(1//2) × U1(-3) - @test arguments(s)[1] == U1(1) - @test arguments(s)[2] == SU2(1//2) - @test arguments(s)[3] == U1(3) - @test (@inferred trivial(s)) == SectorProduct(U1(0), SU2(0), U1(0)) - - s = U1(3) × SU2(1//2) × Fib("τ") - @test length(arguments(s)) == 3 - @test (@inferred quantum_dimension(s)) == 1.0 + √5 - @test dual(s) == U1(-3) × SU2(1//2) × Fib("τ") - @test arguments(s)[1] == U1(3) - @test arguments(s)[2] == SU2(1//2) - @test arguments(s)[3] == Fib("τ") - @test (@inferred trivial(s)) == SectorProduct(U1(0), SU2(0), Fib("1")) - - s = TrivialSector() × U1(3) × SU2(1 / 2) - @test length(arguments(s)) == 3 - @test (@inferred quantum_dimension(s)) == 2 - @test dual(s) == TrivialSector() × U1(-3) × SU2(1//2) - @test (@inferred trivial(s)) == SectorProduct(TrivialSector(), U1(0), SU2(0)) - @test s > trivial(s) - end - - @testset "Ordered comparisons" begin - # convention: missing arguments are filled with singlets - @test SectorProduct(U1(1), SU2(1)) == SectorProduct(U1(1), SU2(1)) - @test SectorProduct(U1(1), SU2(0)) != SectorProduct(U1(1), SU2(1)) - @test SectorProduct(U1(0), SU2(1)) != SectorProduct(U1(1), SU2(1)) - @test SectorProduct(U1(1)) != U1(1) - @test SectorProduct(U1(1)) == SectorProduct(U1(1), U1(0)) - @test SectorProduct(U1(1)) != SectorProduct(U1(1), U1(1)) - @test SectorProduct(U1(0), SU2(0)) == TrivialSector() - @test SectorProduct(U1(0), SU2(0)) == SectorProduct(TrivialSector(), SU2(0)) - @test SectorProduct(U1(0), SU2(0)) == SectorProduct(U1(0), TrivialSector()) - @test SectorProduct(U1(0), SU2(0)) == SectorProduct(TrivialSector(), TrivialSector()) - - @test SectorProduct(U1(0)) < SectorProduct((U1(1))) - @test SectorProduct(U1(0), U1(2)) < SectorProduct((U1(1)), U1(0)) - @test SectorProduct(U1(0)) < SectorProduct(U1(0), U1(1)) - @test SectorProduct(U1(0)) > SectorProduct(U1(0), U1(-1)) - end - - @testset "Quantum dimension and GradedUnitRange" begin - g = gradedrange([(U1(0) × Z{2}(0)) => 1, (U1(1) × Z{2}(0)) => 2]) # abelian - @test (@inferred quantum_dimension(g)) == 3 - - g = gradedrange([ # non-abelian - (SU2(0) × SU2(0)) => 1, - (SU2(1) × SU2(0)) => 1, - (SU2(0) × SU2(1)) => 1, - (SU2(1) × SU2(1)) => 1, - ]) - @test (@inferred quantum_dimension(g)) == 16 - @test (@inferred block_dimensions(g)) == [1, 3, 3, 9] - - # mixed group - g = gradedrange([(U1(2) × SU2(0) × Z{2}(0)) => 1, (U1(2) × SU2(1) × Z{2}(0)) => 1]) - @test (@inferred quantum_dimension(g)) == 4 - @test (@inferred block_dimensions(g)) == [1, 3] - g = gradedrange([(SU2(0) × U1(0) × SU2(1//2)) => 1, (SU2(0) × U1(1) × SU2(1//2)) => 1]) - @test (@inferred quantum_dimension(g)) == 4 - @test (@inferred block_dimensions(g)) == [2, 2] - - # NonGroupCategory - g_fib = gradedrange([(Fib("1") × Fib("1")) => 1]) - g_ising = gradedrange([(Ising("1") × Ising("1")) => 1]) - @test (@inferred quantum_dimension((Fib("1") × Fib("1")))) == 1.0 - @test (@inferred quantum_dimension(g_fib)) == 1.0 - @test (@inferred quantum_dimension(g_ising)) == 1.0 - @test (@inferred quantum_dimension((Ising("1") × Ising("1")))) == 1.0 - @test (@inferred block_dimensions(g_fib)) == [1.0] - @test (@inferred block_dimensions(g_ising)) == [1.0] - - @test (@inferred quantum_dimension(U1(1) × Fib("1"))) == 1.0 - @test (@inferred quantum_dimension(gradedrange([U1(1) × Fib("1") => 1]))) == 1.0 - - # mixed product Abelian / NonAbelian / NonGroup - g = gradedrange([ - (U1(2) × SU2(0) × Ising(1)) => 1, - (U1(2) × SU2(1) × Ising(1)) => 1, - (U1(2) × SU2(0) × Ising("ψ")) => 1, - (U1(2) × SU2(1) × Ising("ψ")) => 1, - ]) - @test (@inferred quantum_dimension(g)) == 8.0 - @test (@inferred block_dimensions(g)) == [1.0, 3.0, 1.0, 3.0] - - ϕ = (1 + √5) / 2 - g = gradedrange([ - (Fib("1") × SU2(0) × U1(2)) => 1, - (Fib("1") × SU2(1) × U1(2)) => 1, - (Fib("τ") × SU2(0) × U1(2)) => 1, - (Fib("τ") × SU2(1) × U1(2)) => 1, - ]) - @test (@inferred quantum_dimension(g)) == 4.0 + 4.0ϕ - @test (@inferred block_dimensions(g)) == [1.0, 3.0, 1.0ϕ, 3.0ϕ] - end - - @testset "Fusion of Abelian products" begin - p1 = SectorProduct(U1(1)) - p2 = SectorProduct(U1(2)) - @test (@inferred p1 ⊗ TrivialSector()) == p1 - @test (@inferred TrivialSector() ⊗ p2) == p2 - @test (@inferred p1 ⊗ p2) == SectorProduct(U1(3)) - - p11 = U1(1) × U1(1) - @test p11 ⊗ p11 == U1(2) × U1(2) - - p123 = U1(1) × U1(2) × U1(3) - @test p123 ⊗ p123 == U1(2) × U1(4) × U1(6) - - s1 = SectorProduct(U1(1), Z{2}(1)) - s2 = SectorProduct(U1(0), Z{2}(0)) - @test s1 ⊗ s2 == U1(1) × Z{2}(1) - end - - @testset "Fusion of NonAbelian products" begin - p0 = SectorProduct(SU2(0)) - ph = SectorProduct(SU2(1//2)) - @test space_isequal( - (@inferred p0 ⊗ TrivialSector()), gradedrange([SectorProduct(SU2(0)) => 1]) - ) - @test space_isequal( - (@inferred TrivialSector() ⊗ ph), gradedrange([SectorProduct(SU2(1//2)) => 1]) - ) - - phh = SU2(1//2) × SU2(1//2) - @test space_isequal( - phh ⊗ phh, - gradedrange([ - (SU2(0) × SU2(0)) => 1, - (SU2(1) × SU2(0)) => 1, - (SU2(0) × SU2(1)) => 1, - (SU2(1) × SU2(1)) => 1, - ]), - ) - @test space_isequal( - phh ⊗ phh, - gradedrange([ - (SU2(0) × SU2(0)) => 1, - (SU2(1) × SU2(0)) => 1, - (SU2(0) × SU2(1)) => 1, - (SU2(1) × SU2(1)) => 1, - ]), - ) - end - - @testset "Fusion of NonGroupCategory products" begin - ı = Fib("1") - τ = Fib("τ") - s = ı × ı - @test space_isequal(s ⊗ s, gradedrange([s => 1])) - - s = τ × τ - @test space_isequal( - s ⊗ s, gradedrange([(ı × ı) => 1, (τ × ı) => 1, (ı × τ) => 1, (τ × τ) => 1]) - ) - - σ = Ising("σ") - ψ = Ising("ψ") - s = τ × σ - g = gradedrange([ - (ı × Ising("1")) => 1, (τ × Ising("1")) => 1, (ı × ψ) => 1, (τ × ψ) => 1 - ]) - @test space_isequal(s ⊗ s, g) - end - - @testset "Fusion of mixed Abelian and NonAbelian products" begin - p2h = U1(2) × SU2(1//2) - p1h = U1(1) × SU2(1//2) - @test space_isequal( - p2h ⊗ p1h, gradedrange([(U1(3) × SU2(0)) => 1, (U1(3) × SU2(1)) => 1]) - ) - - p1h1 = U1(1) × SU2(1//2) × Z{2}(1) - @test space_isequal( - p1h1 ⊗ p1h1, - gradedrange([(U1(2) × SU2(0) × Z{2}(0)) => 1, (U1(2) × SU2(1) × Z{2}(0)) => 1]), - ) - end - - @testset "Fusion of fully mixed products" begin - s = U1(1) × SU2(1//2) × Ising("σ") - @test space_isequal( - s ⊗ s, - gradedrange([ - (U1(2) × SU2(0) × Ising("1")) => 1, - (U1(2) × SU2(1) × Ising("1")) => 1, - (U1(2) × SU2(0) × Ising("ψ")) => 1, - (U1(2) × SU2(1) × Ising("ψ")) => 1, - ]), - ) - - ı = Fib("1") - τ = Fib("τ") - s = SU2(1//2) × U1(1) × τ - @test space_isequal( - s ⊗ s, - gradedrange([ - (SU2(0) × U1(2) × ı) => 1, - (SU2(1) × U1(2) × ı) => 1, - (SU2(0) × U1(2) × τ) => 1, - (SU2(1) × U1(2) × τ) => 1, - ]), - ) - - s = U1(1) × ı × τ - @test space_isequal(s ⊗ s, gradedrange([(U1(2) × ı × ı) => 1, (U1(2) × ı × τ) => 1])) - end - - @testset "Fusion of different length Categories" begin - @test SectorProduct(U1(1) × U1(0)) ⊗ SectorProduct(U1(1)) == - SectorProduct(U1(2) × U1(0)) - @test space_isequal( - (@inferred SectorProduct(SU2(0) × SU2(0)) ⊗ SectorProduct(SU2(1))), - gradedrange([SectorProduct(SU2(1) × SU2(0)) => 1]), - ) - - @test space_isequal( - (@inferred SectorProduct(SU2(1) × U1(1)) ⊗ SectorProduct(SU2(0))), - gradedrange([SectorProduct(SU2(1) × U1(1)) => 1]), - ) - @test space_isequal( - (@inferred SectorProduct(U1(1) × SU2(1)) ⊗ SectorProduct(U1(2))), - gradedrange([SectorProduct(U1(3) × SU2(1)) => 1]), - ) - - # check incompatible sectors - p12 = Z{2}(1) × U1(2) - z12 = Z{2}(1) × Z{2}(1) - @test_throws MethodError p12 ⊗ z12 - end - - @testset "GradedUnitRange fusion rules" begin - s1 = U1(1) × SU2(1//2) × Ising("σ") - s2 = U1(0) × SU2(1//2) × Ising("1") - g1 = gradedrange([s1 => 2]) - g2 = gradedrange([s2 => 1]) - @test space_isequal( - fusion_product(g1, g2), - gradedrange([U1(1) × SU2(0) × Ising("σ") => 2, U1(1) × SU2(1) × Ising("σ") => 2]), - ) - end -end - -@testset "Test Named Sector Products" begin - @testset "Construct from × of NamedTuples" begin - s = (A=U1(1),) × (B=Z{2}(0),) - @test length(arguments(s)) == 2 - @test arguments(s)[:A] == U1(1) - @test arguments(s)[:B] == Z{2}(0) - @test (@inferred quantum_dimension(s)) == 1 - @test (@inferred dual(s)) == (A=U1(-1),) × (B=Z{2}(0),) - @test (@inferred trivial(s)) == (A=U1(0),) × (B=Z{2}(0),) - - s = (A=U1(1),) × (B=SU2(2),) - @test length(arguments(s)) == 2 - @test arguments(s)[:A] == U1(1) - @test arguments(s)[:B] == SU2(2) - @test (@inferred quantum_dimension(s)) == 5 - @test (@inferred dual(s)) == (A=U1(-1),) × (B=SU2(2),) - @test (@inferred trivial(s)) == (A=U1(0),) × (B=SU2(0),) - @test s == (B=SU2(2),) × (A=U1(1),) - - s = s × (C=Ising("ψ"),) - @test length(arguments(s)) == 3 - @test arguments(s)[:C] == Ising("ψ") - @test (@inferred quantum_dimension(s)) == 5.0 - @test (@inferred dual(s)) == (A=U1(-1),) × (B=SU2(2),) × (C=Ising("ψ"),) - - s1 = (A=U1(1),) × (B=Z{2}(0),) - s2 = (A=U1(1),) × (C=Z{2}(0),) - @test_throws ArgumentError s1 × s2 - end - - @testset "Construct from Pairs" begin - s = SectorProduct("A" => U1(2)) - @test length(arguments(s)) == 1 - @test arguments(s)[:A] == U1(2) - @test s == SectorProduct(; A=U1(2)) - @test (@inferred quantum_dimension(s)) == 1 - @test (@inferred dual(s)) == SectorProduct("A" => U1(-2)) - @test (@inferred trivial(s)) == SectorProduct(; A=U1(0)) - - s = SectorProduct("B" => Ising("ψ"), :C => Z{2}(1)) - @test length(arguments(s)) == 2 - @test arguments(s)[:B] == Ising("ψ") - @test arguments(s)[:C] == Z{2}(1) - @test (@inferred quantum_dimension(s)) == 1.0 - end - - @testset "Comparisons with unspecified labels" begin - # convention: arguments evaluate as equal if unmatched labels are trivial - # this is different from ordered tuple convention - q2 = SectorProduct(; N=U1(2)) - q20 = (N=U1(2),) × (J=SU2(0),) - @test q20 == q2 - @test !(q20 < q2) - @test !(q2 < q20) - - q21 = (N=U1(2),) × (J=SU2(1),) - @test q21 != q2 - @test q20 < q21 - @test q2 < q21 - - a = (A=U1(0),) × (B=U1(2),) - b = (B=U1(2),) × (C=U1(0),) - @test a == b - c = (B=U1(2),) × (C=U1(1),) - @test a != c - end - - @testset "Quantum dimension and GradedUnitRange" begin - g = gradedrange([ - SectorProduct(; A=U1(0), B=Z{2}(0)) => 1, SectorProduct(; A=U1(1), B=Z{2}(0)) => 2 - ]) # abelian - @test (@inferred quantum_dimension(g)) == 3 - - g = gradedrange([ # non-abelian - SectorProduct(; A=SU2(0), B=SU2(0)) => 1, - SectorProduct(; A=SU2(1), B=SU2(0)) => 1, - SectorProduct(; A=SU2(0), B=SU2(1)) => 1, - SectorProduct(; A=SU2(1), B=SU2(1)) => 1, - ]) - @test (@inferred quantum_dimension(g)) == 16 - - # mixed group - g = gradedrange([ - SectorProduct(; A=U1(2), B=SU2(0), C=Z{2}(0)) => 1, - SectorProduct(; A=U1(2), B=SU2(1), C=Z{2}(0)) => 1, - ]) - @test (@inferred quantum_dimension(g)) == 4 - g = gradedrange([ - SectorProduct(; A=SU2(0), B=Z{2}(0), C=SU2(1//2)) => 1, - SectorProduct(; A=SU2(0), B=Z{2}(1), C=SU2(1//2)) => 1, - ]) - @test (@inferred quantum_dimension(g)) == 4 - - # non group sectors - g_fib = gradedrange([SectorProduct(; A=Fib("1"), B=Fib("1")) => 1]) - g_ising = gradedrange([SectorProduct(; A=Ising("1"), B=Ising("1")) => 1]) - @test (@inferred quantum_dimension(g_fib)) == 1.0 - @test (@inferred quantum_dimension(g_ising)) == 1.0 - - # mixed product Abelian / NonAbelian / NonGroup - g = gradedrange([ - SectorProduct(; A=U1(2), B=SU2(0), C=Ising(1)) => 1, - SectorProduct(; A=U1(2), B=SU2(1), C=Ising(1)) => 1, - SectorProduct(; A=U1(2), B=SU2(0), C=Ising("ψ")) => 1, - SectorProduct(; A=U1(2), B=SU2(1), C=Ising("ψ")) => 1, - ]) - @test (@inferred quantum_dimension(g)) == 8.0 - - g = gradedrange([ - SectorProduct(; A=Fib("1"), B=SU2(0), C=U1(2)) => 1, - SectorProduct(; A=Fib("1"), B=SU2(1), C=U1(2)) => 1, - SectorProduct(; A=Fib("τ"), B=SU2(0), C=U1(2)) => 1, - SectorProduct(; A=Fib("τ"), B=SU2(1), C=U1(2)) => 1, - ]) - @test (@inferred quantum_dimension(g)) == 4.0 + 4.0quantum_dimension(Fib("τ")) - end - - @testset "Fusion of Abelian products" begin - q00 = SectorProduct(;) - q10 = SectorProduct(; A=U1(1)) - q01 = SectorProduct(; B=U1(1)) - q11 = SectorProduct(; A=U1(1), B=U1(1)) - - @test (@inferred q10 ⊗ q10) == SectorProduct(; A=U1(2)) - @test (@inferred q01 ⊗ q00) == q01 - @test (@inferred q00 ⊗ q01) == q01 - @test (@inferred q10 ⊗ q01) == q11 - @test q11 ⊗ q11 == SectorProduct(; A=U1(2), B=U1(2)) - - s11 = SectorProduct(; A=U1(1), B=Z{2}(1)) - s10 = SectorProduct(; A=U1(1)) - s01 = SectorProduct(; B=Z{2}(1)) - @test (@inferred s01 ⊗ q00) == s01 - @test (@inferred q00 ⊗ s01) == s01 - @test (@inferred s10 ⊗ s01) == s11 - @test s11 ⊗ s11 == SectorProduct(; A=U1(2), B=Z{2}(0)) - end - - @testset "Fusion of NonAbelian products" begin - p0 = SectorProduct(;) - pha = SectorProduct(; A=SU2(1//2)) - phb = SectorProduct(; B=SU2(1//2)) - phab = SectorProduct(; A=SU2(1//2), B=SU2(1//2)) - - @test space_isequal( - (@inferred pha ⊗ pha), - gradedrange([SectorProduct(; A=SU2(0)) => 1, SectorProduct(; A=SU2(1)) => 1]), - ) - @test space_isequal((@inferred pha ⊗ p0), gradedrange([pha => 1])) - @test space_isequal((@inferred p0 ⊗ phb), gradedrange([phb => 1])) - @test space_isequal((@inferred pha ⊗ phb), gradedrange([phab => 1])) - - @test space_isequal( - phab ⊗ phab, - gradedrange([ - SectorProduct(; A=SU2(0), B=SU2(0)) => 1, - SectorProduct(; A=SU2(1), B=SU2(0)) => 1, - SectorProduct(; A=SU2(0), B=SU2(1)) => 1, - SectorProduct(; A=SU2(1), B=SU2(1)) => 1, - ]), - ) - end - - @testset "Fusion of NonGroupCategory products" begin - ı = Fib("1") - τ = Fib("τ") - s = SectorProduct(; A=ı, B=ı) - @test space_isequal(s ⊗ s, gradedrange([s => 1])) - - s = SectorProduct(; A=τ, B=τ) - @test space_isequal( - s ⊗ s, - gradedrange([ - SectorProduct(; A=ı, B=ı) => 1, - SectorProduct(; A=τ, B=ı) => 1, - SectorProduct(; A=ı, B=τ) => 1, - SectorProduct(; A=τ, B=τ) => 1, - ]), - ) - - σ = Ising("σ") - ψ = Ising("ψ") - s = SectorProduct(; A=τ, B=σ) - g = gradedrange([ - SectorProduct(; A=ı, B=Ising("1")) => 1, - SectorProduct(; A=τ, B=Ising("1")) => 1, - SectorProduct(; A=ı, B=ψ) => 1, - SectorProduct(; A=τ, B=ψ) => 1, - ]) - @test space_isequal(s ⊗ s, g) - end - - @testset "Fusion of mixed Abelian and NonAbelian products" begin - q0h = SectorProduct(; J=SU2(1//2)) - q10 = (N=U1(1),) × (J=SU2(0),) - # Put names in reverse order sometimes: - q1h = (J=SU2(1//2),) × (N=U1(1),) - q11 = (N=U1(1),) × (J=SU2(1),) - q20 = (N=U1(2),) × (J=SU2(0),) # julia 1.6 does not accept gradedrange without J - q2h = (N=U1(2),) × (J=SU2(1//2),) - q21 = (N=U1(2),) × (J=SU2(1),) - q22 = (N=U1(2),) × (J=SU2(2),) - - @test space_isequal(q1h ⊗ q1h, gradedrange([q20 => 1, q21 => 1])) - @test space_isequal(q10 ⊗ q1h, gradedrange([q2h => 1])) - @test space_isequal((@inferred q0h ⊗ q1h), gradedrange([q10 => 1, q11 => 1])) - @test space_isequal(q11 ⊗ q11, gradedrange([q20 => 1, q21 => 1, q22 => 1])) - end - - @testset "Fusion of fully mixed products" begin - s = SectorProduct(; A=U1(1), B=SU2(1//2), C=Ising("σ")) - @test space_isequal( - s ⊗ s, - gradedrange([ - SectorProduct(; A=U1(2), B=SU2(0), C=Ising("1")) => 1, - SectorProduct(; A=U1(2), B=SU2(1), C=Ising("1")) => 1, - SectorProduct(; A=U1(2), B=SU2(0), C=Ising("ψ")) => 1, - SectorProduct(; A=U1(2), B=SU2(1), C=Ising("ψ")) => 1, - ]), - ) - - ı = Fib("1") - τ = Fib("τ") - s = SectorProduct(; A=SU2(1//2), B=U1(1), C=τ) - @test space_isequal( - s ⊗ s, - gradedrange([ - SectorProduct(; A=SU2(0), B=U1(2), C=ı) => 1, - SectorProduct(; A=SU2(1), B=U1(2), C=ı) => 1, - SectorProduct(; A=SU2(0), B=U1(2), C=τ) => 1, - SectorProduct(; A=SU2(1), B=U1(2), C=τ) => 1, - ]), - ) - - s = SectorProduct(; A=τ, B=U1(1), C=ı) - @test space_isequal( - s ⊗ s, - gradedrange([ - SectorProduct(; B=U1(2), A=ı, C=ı) => 1, SectorProduct(; B=U1(2), A=τ, C=ı) => 1 - ]), - ) - end - @testset "GradedUnitRange fusion rules" begin - s1 = SectorProduct(; A=U1(1), B=SU2(1//2), C=Ising("σ")) - s2 = SectorProduct(; A=U1(0), B=SU2(1//2), C=Ising("1")) - g1 = gradedrange([s1 => 2]) - g2 = gradedrange([s2 => 1]) - s3 = SectorProduct(; A=U1(1), B=SU2(0), C=Ising("σ")) - s4 = SectorProduct(; A=U1(1), B=SU2(1), C=Ising("σ")) - @test space_isequal(fusion_product(g1, g2), gradedrange([s3 => 2, s4 => 2])) - - sA = SectorProduct(; A=U1(1)) - sB = SectorProduct(; B=SU2(1//2)) - sAB = SectorProduct(; A=U1(1), B=SU2(1//2)) - gA = gradedrange([sA => 2]) - gB = gradedrange([sB => 1]) - @test space_isequal(fusion_product(gA, gB), gradedrange([sAB => 2])) - end -end - -@testset "Mixing implementations" begin - st1 = SectorProduct(U1(1)) - sA1 = SectorProduct(; A=U1(1)) - - @test sA1 != st1 - @test_throws MethodError sA1 < st1 - @test_throws MethodError st1 < sA1 - @test_throws MethodError st1 ⊗ sA1 - @test_throws MethodError sA1 ⊗ st1 - @test_throws ArgumentError st1 × sA1 - @test_throws ArgumentError sA1 × st1 -end - -@testset "Empty SymmetrySector" begin - st1 = SectorProduct(U1(1)) - sA1 = SectorProduct(; A=U1(1)) - - for s in (SectorProduct(()), SectorProduct((;))) - @test s == TrivialSector() - @test s == SectorProduct(()) - @test s == SectorProduct((;)) - - @test !(s < SectorProduct()) - @test !(s < SectorProduct(;)) - - @test (@inferred s × SectorProduct(())) == s - @test (@inferred s × SectorProduct((;))) == s - @test (@inferred s ⊗ SectorProduct(())) == s - @test (@inferred s ⊗ SectorProduct((;))) == s - - @test (@inferred dual(s)) == s - @test (@inferred trivial(s)) == s - @test (@inferred quantum_dimension(s)) == 1 - - g0 = gradedrange([s => 2]) - @test space_isequal((@inferred fusion_product(g0, g0)), gradedrange([s => 4])) - - @test (@inferred s × U1(1)) == st1 - @test (@inferred U1(1) × s) == st1 - @test (@inferred s × st1) == st1 - @test (@inferred st1 × s) == st1 - @test (@inferred s × sA1) == sA1 - @test (@inferred sA1 × s) == sA1 - - @test (@inferred U1(1) ⊗ s) == st1 - @test (@inferred s ⊗ U1(1)) == st1 - @test (@inferred SU2(0) ⊗ s) == gradedrange([SectorProduct(SU2(0)) => 1]) - @test (@inferred s ⊗ SU2(0)) == gradedrange([SectorProduct(SU2(0)) => 1]) - @test (@inferred Fib("τ") ⊗ s) == gradedrange([SectorProduct(Fib("τ")) => 1]) - @test (@inferred s ⊗ Fib("τ")) == gradedrange([SectorProduct(Fib("τ")) => 1]) - - @test (@inferred st1 ⊗ s) == st1 - @test (@inferred SectorProduct(SU2(0)) ⊗ s) == gradedrange([SectorProduct(SU2(0)) => 1]) - @test (@inferred SectorProduct(Fib("τ"), SU2(1), U1(2)) ⊗ s) == - gradedrange([SectorProduct(Fib("τ"), SU2(1), U1(2)) => 1]) - - @test (@inferred sA1 ⊗ s) == sA1 - @test (@inferred SectorProduct(; A=SU2(0)) ⊗ s) == - gradedrange([SectorProduct(; A=SU2(0)) => 1]) - @test (@inferred SectorProduct(; A=Fib("τ"), B=SU2(1), C=U1(2)) ⊗ s) == - gradedrange([SectorProduct(; A=Fib("τ"), B=SU2(1), C=U1(2)) => 1]) - - # Empty behaves as empty NamedTuple - @test s != U1(0) - @test s == SectorProduct(U1(0)) - @test s == SectorProduct(; A=U1(0)) - @test SectorProduct(; A=U1(0)) == s - @test s != sA1 - @test s != st1 - - @test s < st1 - @test SectorProduct(U1(-1)) < s - @test s < sA1 - @test s > SectorProduct(; A=U1(-1)) - @test !(s < SectorProduct(; A=U1(0))) - @test !(s > SectorProduct(; A=U1(0))) - end -end -end diff --git a/NDTensors/src/lib/SymmetrySectors/test/test_simple_sectors.jl b/NDTensors/src/lib/SymmetrySectors/test/test_simple_sectors.jl deleted file mode 100644 index a2b243c725..0000000000 --- a/NDTensors/src/lib/SymmetrySectors/test/test_simple_sectors.jl +++ /dev/null @@ -1,215 +0,0 @@ -@eval module $(gensym()) -using NDTensors.GradedAxes: dual -using NDTensors.SymmetrySectors: - Fib, - Ising, - O2, - SU, - SU2, - TrivialSector, - U1, - Z, - quantum_dimension, - fundamental, - istrivial, - trivial -using Test: @inferred, @test, @testset, @test_throws -@testset "Test SymmetrySectors Types" begin - @testset "TrivialSector" begin - q = TrivialSector() - - @test (@inferred quantum_dimension(q)) == 1 - @test q == q - @test trivial(q) == q - @test istrivial(q) - - @test dual(q) == q - @test !isless(q, q) - end - - @testset "U(1)" begin - q1 = U1(1) - q2 = U1(2) - q3 = U1(3) - - @test quantum_dimension(q1) == 1 - @test quantum_dimension(q2) == 1 - @test (@inferred quantum_dimension(q1)) == 1 - - @test trivial(q1) == U1(0) - @test trivial(U1) == U1(0) - @test istrivial(U1(0)) - - @test dual(U1(2)) == U1(-2) - @test isless(U1(1), U1(2)) - @test !isless(U1(2), U1(1)) - @test U1(Int8(1)) == U1(1) - @test U1(UInt32(1)) == U1(1) - - @test U1(0) == TrivialSector() - @test TrivialSector() == U1(0) - @test U1(-1) < TrivialSector() - @test TrivialSector() < U1(1) - @test U1(Int8(1)) < U1(Int32(2)) - end - - @testset "Z₂" begin - z0 = Z{2}(0) - z1 = Z{2}(1) - - @test trivial(Z{2}) == Z{2}(0) - @test istrivial(Z{2}(0)) - - @test quantum_dimension(z0) == 1 - @test quantum_dimension(z1) == 1 - @test (@inferred quantum_dimension(z0)) == 1 - - @test dual(z0) == z0 - @test dual(z1) == z1 - - @test dual(Z{2}(1)) == Z{2}(1) - @test isless(Z{2}(0), Z{2}(1)) - @test !isless(Z{2}(1), Z{2}(0)) - @test Z{2}(0) == z0 - @test Z{2}(-3) == z1 - - @test Z{2}(0) == TrivialSector() - @test TrivialSector() < Z{2}(1) - @test_throws MethodError U1(0) < Z{2}(1) - @test Z{2}(0) != Z{2}(1) - @test Z{2}(0) != Z{3}(0) - @test Z{2}(0) != U1(0) - end - - @testset "O(2)" begin - s0e = O2(0) - s0o = O2(-1) - s12 = O2(1//2) - s1 = O2(1) - - @test trivial(O2) == s0e - @test istrivial(s0e) - - @test (@inferred quantum_dimension(s0e)) == 1 - @test (@inferred quantum_dimension(s0o)) == 1 - @test (@inferred quantum_dimension(s12)) == 2 - @test (@inferred quantum_dimension(s1)) == 2 - - @test (@inferred dual(s0e)) == s0e - @test (@inferred dual(s0o)) == s0o - @test (@inferred dual(s12)) == s12 - @test (@inferred dual(s1)) == s1 - - @test s0o < s0e < s12 < s1 - @test s0e == TrivialSector() - @test s0o < TrivialSector() - @test TrivialSector() < s12 - end - - @testset "SU(2)" begin - j1 = SU2(0) - j2 = SU2(1//2) # Rational will be cast to HalfInteger - j3 = SU2(1) - j4 = SU2(3//2) - - # alternative constructors - @test j2 == SU{2}((1,)) # tuple SU(N)-like constructor - @test j2 == SU{2,1}((1,)) # tuple constructor with explicit {N,N-1} - @test j2 == SU((1,)) # infer N from tuple length - @test j2 == SU{2}((Int8(1),)) # any Integer type accepted - @test j2 == SU{2}((UInt32(1),)) # any Integer type accepted - @test j2 == SU2(1 / 2) # Float will be cast to HalfInteger - @test_throws MethodError SU2((1,)) # avoid confusion between tuple and half-integer interfaces - @test_throws MethodError SU{2,1}(1) # avoid confusion - - @test trivial(SU{2}) == SU2(0) - @test istrivial(SU2(0)) - @test fundamental(SU{2}) == SU2(1//2) - - @test quantum_dimension(j1) == 1 - @test quantum_dimension(j2) == 2 - @test quantum_dimension(j3) == 3 - @test quantum_dimension(j4) == 4 - @test (@inferred quantum_dimension(j1)) == 1 - - @test dual(j1) == j1 - @test dual(j2) == j2 - @test dual(j3) == j3 - @test dual(j4) == j4 - - @test j1 < j2 < j3 < j4 - @test SU2(0) == TrivialSector() - @test !(j2 < TrivialSector()) - @test TrivialSector() < j2 - end - - @testset "SU(N)" begin - f3 = SU{3}((1, 0)) - f4 = SU{4}((1, 0, 0)) - ad3 = SU{3}((2, 1)) - ad4 = SU{4}((2, 1, 1)) - - @test trivial(SU{3}) == SU{3}((0, 0)) - @test istrivial(SU{3}((0, 0))) - @test trivial(SU{4}) == SU{4}((0, 0, 0)) - @test istrivial(SU{4}((0, 0, 0))) - @test SU{3}((0, 0)) == TrivialSector() - @test SU{4}((0, 0, 0)) == TrivialSector() - - @test fundamental(SU{3}) == f3 - @test fundamental(SU{4}) == f4 - - @test dual(f3) == SU{3}((1, 1)) - @test dual(f4) == SU{4}((1, 1, 1)) - @test dual(ad3) == ad3 - @test dual(ad4) == ad4 - - @test quantum_dimension(f3) == 3 - @test quantum_dimension(f4) == 4 - @test quantum_dimension(ad3) == 8 - @test quantum_dimension(ad4) == 15 - @test quantum_dimension(SU{3}((4, 2))) == 27 - @test quantum_dimension(SU{3}((3, 3))) == 10 - @test quantum_dimension(SU{3}((3, 0))) == 10 - @test quantum_dimension(SU{3}((0, 0))) == 1 - @test (@inferred quantum_dimension(f3)) == 3 - end - - @testset "Fibonacci" begin - ı = Fib("1") - τ = Fib("τ") - - @test trivial(Fib) == ı - @test istrivial(ı) - @test ı == TrivialSector() - - @test dual(ı) == ı - @test dual(τ) == τ - - @test (@inferred quantum_dimension(ı)) == 1.0 - @test (@inferred quantum_dimension(τ)) == ((1 + √5) / 2) - - @test ı < τ - end - - @testset "Ising" begin - ı = Ising("1") - σ = Ising("σ") - ψ = Ising("ψ") - - @test trivial(Ising) == ı - @test istrivial(ı) - @test ı == TrivialSector() - - @test dual(ı) == ı - @test dual(σ) == σ - @test dual(ψ) == ψ - - @test (@inferred quantum_dimension(ı)) == 1.0 - @test (@inferred quantum_dimension(σ)) == √2 - @test (@inferred quantum_dimension(ψ)) == 1.0 - - @test ı < σ < ψ - end -end -end diff --git a/NDTensors/src/lib/TagSets/.JuliaFormatter.toml b/NDTensors/src/lib/TagSets/.JuliaFormatter.toml deleted file mode 100644 index 08f664cdb9..0000000000 --- a/NDTensors/src/lib/TagSets/.JuliaFormatter.toml +++ /dev/null @@ -1,2 +0,0 @@ -style = "blue" -indent = 2 diff --git a/NDTensors/src/lib/TagSets/README.md b/NDTensors/src/lib/TagSets/README.md deleted file mode 100644 index 346026463b..0000000000 --- a/NDTensors/src/lib/TagSets/README.md +++ /dev/null @@ -1,20 +0,0 @@ -# TagSets.jl - -A sorted collection of unique tags of type `T`. - -# TODO - -- Add `skipchars` (see `skipmissing`) and `delim` for delimiter. -- https://docs.julialang.org/en/v1/base/strings/#Base.strip -- https://docs.julialang.org/en/v1/stdlib/DelimitedFiles/#Delimited-Files -- Add a `Bool` param for bounds checking/ignoring overflow/spillover? -- Make `S` a first argument, hardcode `SmallVector` storage? -- https://juliacollections.github.io/DataStructures.jl/v0.9/sorted_containers.html -- https://github.com/JeffreySarnoff/SortingNetworks.jl -- https://github.com/vvjn/MergeSorted.jl -- https://bkamins.github.io/julialang/2023/08/25/infiltrate.html -- https://github.com/Jutho/TensorKit.jl/blob/master/src/auxiliary/dicts.jl -- https://github.com/tpapp/SortedVectors.jl -- https://discourse.julialang.org/t/special-purpose-subtypes-of-arrays/20327 -- https://discourse.julialang.org/t/all-the-ways-to-group-reduce-sorted-vectors-ideas/45239 -- https://discourse.julialang.org/t/sorting-a-vector-of-fixed-size/71766 diff --git a/NDTensors/src/lib/TagSets/examples/benchmark.jl b/NDTensors/src/lib/TagSets/examples/benchmark.jl deleted file mode 100644 index 98a40e5f46..0000000000 --- a/NDTensors/src/lib/TagSets/examples/benchmark.jl +++ /dev/null @@ -1,47 +0,0 @@ -using NDTensors.TagSets -using NDTensors.InlineStrings -using NDTensors.SmallVectors -using NDTensors.SortedSets -using NDTensors.TagSets - -using BenchmarkTools -using Cthulhu -using Profile -using PProf - -function main(; profile=false) - TS = SmallTagSet{10,String31} - ts1 = TS(["a", "b"]) - ts2 = TS(["b", "c", "d"]) - - @btime $TS($("x,y")) - - @show union(ts1, ts2) - @show intersect(ts1, ts2) - @show setdiff(ts1, ts2) - @show symdiff(ts1, ts2) - - @btime union($ts1, $ts2) - @btime intersect($ts1, $ts2) - @btime setdiff($ts1, $ts2) - @btime symdiff($ts1, $ts2) - - @show addtags(ts1, ts2) - @show commontags(ts1, ts2) - @show removetags(ts1, ts2) - @show noncommontags(ts1, ts2) - @show replacetags(ts1, ["b"], ["c", "d"]) - - @btime addtags($ts1, $ts2) - @btime commontags($ts1, $ts2) - @btime removetags($ts1, $ts2) - @btime noncommontags($ts1, $ts2) - @btime replacetags($ts1, $(["b"]), $(["c", "d"])) - - if profile - Profile.clear() - @profile foreach(_ -> TagSet("x,y"; data_type=set_type), 1:1_000_000) - return pprof() - end - return nothing -end diff --git a/NDTensors/src/lib/TagSets/src/TagSets.jl b/NDTensors/src/lib/TagSets/src/TagSets.jl deleted file mode 100644 index c4d9f1013f..0000000000 --- a/NDTensors/src/lib/TagSets/src/TagSets.jl +++ /dev/null @@ -1,89 +0,0 @@ -module TagSets -using Dictionaries -using ..SmallVectors -using ..SortedSets - -using Base: @propagate_inbounds - -export TagSet, - SmallTagSet, MSmallTagSet, addtags, removetags, replacetags, commontags, noncommontags - -# A sorted collection of unique tags of type `T`. -struct TagSet{T,D<:AbstractIndices{T}} <: AbstractWrappedSet{T,D} - data::D -end - -TagSet{T}(data::D) where {T,D<:AbstractIndices{T}} = TagSet{T,D}(data) - -TagSet{T,D}(vec::AbstractVector) where {T,D<:AbstractIndices{T}} = TagSet{T,D}(D(vec)) -TagSet{T,D}() where {T,D<:AbstractIndices{T}} = TagSet{T,D}(D()) - -# Defaults to Indices if unspecified. -default_data_type() = Indices{String} -TagSet(vec::AbstractVector) = TagSet(default_data_type()(vec)) - -# Constructor from string -default_delim() = ',' -@inline function TagSet(str::AbstractString; delim=default_delim()) - return TagSet(default_data_type(), str) -end -@inline function TagSet( - ::Type{D}, str::AbstractString; delim=default_delim() -) where {T,D<:AbstractIndices{T}} - return TagSet{T,D}(str) -end -@inline function TagSet{T,D}( - str::AbstractString; delim=default_delim() -) where {T,D<:AbstractIndices{T}} - return TagSet{T,D}(split(str, delim)) -end - -for (SetTyp, TagSetTyp) in ((:SmallSet, :SmallTagSet), (:MSmallSet, :MSmallTagSet)) - @eval begin - const $TagSetTyp{S,T,Order} = TagSet{T,$SetTyp{S,T,Order}} - @propagate_inbounds function $TagSetTyp{S,I}(a::AbstractArray; kwargs...) where {S,I} - return TagSet($SetTyp{S,I}(a; kwargs...)) - end - @propagate_inbounds $TagSetTyp{S}(; kwargs...) where {S} = $TagSetTyp{S}([]; kwargs...) - @propagate_inbounds $TagSetTyp{S}(iter; kwargs...) where {S} = - $TagSetTyp{S}(collect(iter); kwargs...) - @propagate_inbounds $TagSetTyp{S}(a::AbstractArray{I}; kwargs...) where {S,I} = - $TagSetTyp{S,I}(a; kwargs...) - # Strings get split by a deliminator. - function $TagSetTyp{S}(str::T; kwargs...) where {S,T<:AbstractString} - return $TagSetTyp{S,T}(str, kwargs...) - end - # Strings get split by a deliminator. - function $TagSetTyp{S,T}( - str::AbstractString; delim=default_delim(), kwargs... - ) where {S,T} - # TODO: Optimize for `SmallSet`. - return $TagSetTyp{S,T}(split(str, delim); kwargs...) - end - end -end - -# Field accessors -Base.parent(set::TagSet) = getfield(set, :data) - -# AbstractWrappedSet interface. -# Specialized version when they are the same data type is faster. -@inline SortedSets.rewrap(::TagSet{T,D}, data::D) where {T,D<:AbstractIndices{T}} = - TagSet{T,D}(data) -@inline SortedSets.rewrap(::TagSet, data) = TagSet(data) - -# TagSet interface -addtags(set::TagSet, items) = union(set, items) -removetags(set::TagSet, items) = setdiff(set, items) -commontags(set::TagSet, items) = intersect(set, items) -noncommontags(set::TagSet, items) = symdiff(set, items) -function replacetags(set::TagSet, rem, add) - remtags = setdiff(set, rem) - if length(set) ≠ length(remtags) + length(rem) - # Not all are removed, no replacement - return set - end - return union(remtags, add) -end - -end diff --git a/NDTensors/src/lib/TagSets/test/runtests.jl b/NDTensors/src/lib/TagSets/test/runtests.jl deleted file mode 100644 index ee5c72a199..0000000000 --- a/NDTensors/src/lib/TagSets/test/runtests.jl +++ /dev/null @@ -1,35 +0,0 @@ -@eval module $(gensym()) -using Test: @test, @testset -using NDTensors.TagSets -using NDTensors.SortedSets -using NDTensors.SmallVectors -using NDTensors.InlineStrings -using NDTensors.Dictionaries - -@testset "Test NDTensors.TagSets" begin - for data_type in (Vector,) # SmallVector{10}) - d1 = data_type{String31}(["1", "3", "5"]) - d2 = data_type{String31}(["2", "3", "6"]) - for set_type in (Indices, SortedSet) - s1 = TagSet(set_type(d1)) - s2 = TagSet(set_type(d2)) - - @test issetequal(union(s1, s2), ["1", "2", "3", "5", "6"]) - @test issetequal(setdiff(s1, s2), ["1", "5"]) - @test issetequal(symdiff(s1, s2), ["1", "2", "5", "6"]) - @test issetequal(intersect(s1, s2), ["3"]) - - # TagSet interface - @test issetequal(addtags(s1, ["4"]), ["1", "3", "4", "5"]) - @test issetequal(removetags(s1, ["3"]), ["1", "5"]) - @test issetequal(replacetags(s1, ["3"], ["6", "7"]), ["1", "5", "6", "7"]) - @test issetequal(replacetags(s1, ["3", "4"], ["6, 7"]), ["1", "3", "5"]) - - # Only test if `isinsertable`. Make sure that is false - # for `SmallVector`. - ## @test issetequal(insert!(copy(s1), "4"), ["1", "3", "4", "5"]) - ## @test issetequal(delete!(copy(s1), "3"), ["1", "5"]) - end - end -end -end diff --git a/NDTensors/src/lib/TensorAlgebra/.JuliaFormatter.toml b/NDTensors/src/lib/TensorAlgebra/.JuliaFormatter.toml deleted file mode 100644 index 08f664cdb9..0000000000 --- a/NDTensors/src/lib/TensorAlgebra/.JuliaFormatter.toml +++ /dev/null @@ -1,2 +0,0 @@ -style = "blue" -indent = 2 diff --git a/NDTensors/src/lib/TensorAlgebra/Project.toml b/NDTensors/src/lib/TensorAlgebra/Project.toml deleted file mode 100644 index 659e1f0606..0000000000 --- a/NDTensors/src/lib/TensorAlgebra/Project.toml +++ /dev/null @@ -1,3 +0,0 @@ -[deps] -EllipsisNotation = "da5c29d0-fa7d-589e-88eb-ea29b0a81949" -LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" diff --git a/NDTensors/src/lib/TensorAlgebra/ext/TensorAlgebraGradedAxesExt/Project.toml b/NDTensors/src/lib/TensorAlgebra/ext/TensorAlgebraGradedAxesExt/Project.toml deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/NDTensors/src/lib/TensorAlgebra/ext/TensorAlgebraGradedAxesExt/src/TensorAlgebraGradedAxesExt.jl b/NDTensors/src/lib/TensorAlgebra/ext/TensorAlgebraGradedAxesExt/src/TensorAlgebraGradedAxesExt.jl deleted file mode 100644 index 2b66b688cb..0000000000 --- a/NDTensors/src/lib/TensorAlgebra/ext/TensorAlgebraGradedAxesExt/src/TensorAlgebraGradedAxesExt.jl +++ /dev/null @@ -1,8 +0,0 @@ -module TensorAlgebraGradedAxesExt -using ...GradedAxes: GradedUnitRange, tensor_product -using ..TensorAlgebra: TensorAlgebra - -function TensorAlgebra.:⊗(a1::GradedUnitRange, a2::GradedUnitRange) - return tensor_product(a1, a2) -end -end diff --git a/NDTensors/src/lib/TensorAlgebra/ext/TensorAlgebraGradedAxesExt/test/Project.toml b/NDTensors/src/lib/TensorAlgebra/ext/TensorAlgebraGradedAxesExt/test/Project.toml deleted file mode 100644 index c959701978..0000000000 --- a/NDTensors/src/lib/TensorAlgebra/ext/TensorAlgebraGradedAxesExt/test/Project.toml +++ /dev/null @@ -1,7 +0,0 @@ -[deps] -BlockArrays = "8e7c35d0-a365-5155-bbbb-fb81a777f24e" -Compat = "34da2185-b29b-5c13-b0c7-acf172513d20" -NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" -OffsetArrays = "6fe1bfb0-de20-5000-8ca7-80f57d26f881" -Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" -Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" diff --git a/NDTensors/src/lib/TensorAlgebra/ext/TensorAlgebraGradedAxesExt/test/runtests.jl b/NDTensors/src/lib/TensorAlgebra/ext/TensorAlgebraGradedAxesExt/test/runtests.jl deleted file mode 100644 index b8b27153b2..0000000000 --- a/NDTensors/src/lib/TensorAlgebra/ext/TensorAlgebraGradedAxesExt/test/runtests.jl +++ /dev/null @@ -1,7 +0,0 @@ -@eval module $(gensym()) -using Test: @testset -@testset "TensorAlgebraGradedAxesExt" begin - include("test_basics.jl") - include("test_contract.jl") -end -end diff --git a/NDTensors/src/lib/TensorAlgebra/ext/TensorAlgebraGradedAxesExt/test/test_basics.jl b/NDTensors/src/lib/TensorAlgebra/ext/TensorAlgebraGradedAxesExt/test/test_basics.jl deleted file mode 100644 index 1ee0a45fbe..0000000000 --- a/NDTensors/src/lib/TensorAlgebra/ext/TensorAlgebraGradedAxesExt/test/test_basics.jl +++ /dev/null @@ -1,30 +0,0 @@ -@eval module $(gensym()) -using BlockArrays: Block -using NDTensors.TensorAlgebra: ⊗ -using NDTensors.GradedAxes: GradedAxes, gradedrange, label -using Test: @test, @testset - -struct U1 - dim::Int -end -Base.isless(l1::U1, l2::U1) = isless(l1.dim, l2.dim) -GradedAxes.fuse_labels(l1::U1, l2::U1) = U1(l1.dim + l2.dim) - -## TODO: This should need to get implemented, but `dual` -## isn't being used right now in `GradedAxes`. -## GradedAxes.dual(l::U1) = U1(-l.dim) - -@testset "TensorAlgebraGradedAxesExt" begin - a1 = gradedrange([U1(0) => 2, U1(1) => 3]) - a2 = gradedrange([U1(2) => 3, U1(3) => 4]) - a = a1 ⊗ a2 - @test label(a[Block(1)]) == U1(2) - @test label(a[Block(2)]) == U1(3) - @test label(a[Block(3)]) == U1(3) - @test label(a[Block(4)]) == U1(4) - @test a[Block(1)] == 1:6 - @test a[Block(2)] == 7:15 - @test a[Block(3)] == 16:23 - @test a[Block(4)] == 24:35 -end -end diff --git a/NDTensors/src/lib/TensorAlgebra/ext/TensorAlgebraGradedAxesExt/test/test_contract.jl b/NDTensors/src/lib/TensorAlgebra/ext/TensorAlgebraGradedAxesExt/test/test_contract.jl deleted file mode 100644 index 636900303c..0000000000 --- a/NDTensors/src/lib/TensorAlgebra/ext/TensorAlgebraGradedAxesExt/test/test_contract.jl +++ /dev/null @@ -1,34 +0,0 @@ -@eval module $(gensym()) -using BlockArrays: Block, blocksize -using Compat: Returns -using NDTensors.BlockSparseArrays: BlockSparseArray -using NDTensors.GradedAxes: gradedrange -using NDTensors.SparseArraysBase: densearray -using NDTensors.SymmetrySectors: U1 -using NDTensors.TensorAlgebra: contract -using Random: randn! -using Test: @test, @testset - -function randn_blockdiagonal(elt::Type, axes::Tuple) - a = BlockSparseArray{elt}(axes) - blockdiaglength = minimum(blocksize(a)) - for i in 1:blockdiaglength - b = Block(ntuple(Returns(i), ndims(a))) - a[b] = randn!(a[b]) - end - return a -end - -const elts = (Float32, Float64, Complex{Float32}, Complex{Float64}) -@testset "`contract` `BlockSparseArray` (eltype=$elt)" for elt in elts - d = gradedrange([U1(0) => 2, U1(1) => 3]) - a1 = randn_blockdiagonal(elt, (d, d, d)) - a2 = randn_blockdiagonal(elt, (d, d, d)) - a_dest, dimnames_dest = contract(a1, (-1, 1, -2), a2, (-1, -2, 2)) - a1_dense = densearray(a1) - a2_dense = densearray(a2) - a_dest_dense, dimnames_dest_dense = contract(a1_dense, (-1, 1, -2), a2_dense, (-1, -2, 2)) - @test dimnames_dest == dimnames_dest_dense - @test a_dest ≈ a_dest_dense -end -end diff --git a/NDTensors/src/lib/TensorAlgebra/src/BaseExtensions/BaseExtensions.jl b/NDTensors/src/lib/TensorAlgebra/src/BaseExtensions/BaseExtensions.jl deleted file mode 100644 index c994fd81cd..0000000000 --- a/NDTensors/src/lib/TensorAlgebra/src/BaseExtensions/BaseExtensions.jl +++ /dev/null @@ -1,4 +0,0 @@ -module BaseExtensions -include("indexin.jl") -include("permutedims.jl") -end diff --git a/NDTensors/src/lib/TensorAlgebra/src/BaseExtensions/indexin.jl b/NDTensors/src/lib/TensorAlgebra/src/BaseExtensions/indexin.jl deleted file mode 100644 index 80a6f58eaf..0000000000 --- a/NDTensors/src/lib/TensorAlgebra/src/BaseExtensions/indexin.jl +++ /dev/null @@ -1,5 +0,0 @@ -# `Base.indexin` doesn't handle tuples -indexin(x, y) = Base.indexin(x, y) -indexin(x, y::Tuple) = Base.indexin(x, collect(y)) -indexin(x::Tuple, y) = Tuple{Vararg{Any,length(x)}}(Base.indexin(x, y)) -indexin(x::Tuple, y::Tuple) = Tuple{Vararg{Any,length(x)}}(Base.indexin(x, collect(y))) diff --git a/NDTensors/src/lib/TensorAlgebra/src/BaseExtensions/permutedims.jl b/NDTensors/src/lib/TensorAlgebra/src/BaseExtensions/permutedims.jl deleted file mode 100644 index c80e07db1e..0000000000 --- a/NDTensors/src/lib/TensorAlgebra/src/BaseExtensions/permutedims.jl +++ /dev/null @@ -1,20 +0,0 @@ -# Workaround for https://github.com/JuliaLang/julia/issues/52615. -# Fixed by https://github.com/JuliaLang/julia/pull/52623. -function _permutedims!( - a_dest::AbstractArray{<:Any,N}, a_src::AbstractArray{<:Any,N}, perm::Tuple{Vararg{Int,N}} -) where {N} - permutedims!(a_dest, a_src, perm) - return a_dest -end -function _permutedims!( - a_dest::AbstractArray{<:Any,0}, a_src::AbstractArray{<:Any,0}, perm::Tuple{} -) - a_dest[] = a_src[] - return a_dest -end -function _permutedims(a::AbstractArray{<:Any,N}, perm::Tuple{Vararg{Int,N}}) where {N} - return permutedims(a, perm) -end -function _permutedims(a::AbstractArray{<:Any,0}, perm::Tuple{}) - return copy(a) -end diff --git a/NDTensors/src/lib/TensorAlgebra/src/LinearAlgebraExtensions/LinearAlgebraExtensions.jl b/NDTensors/src/lib/TensorAlgebra/src/LinearAlgebraExtensions/LinearAlgebraExtensions.jl deleted file mode 100644 index 471f2bd30a..0000000000 --- a/NDTensors/src/lib/TensorAlgebra/src/LinearAlgebraExtensions/LinearAlgebraExtensions.jl +++ /dev/null @@ -1,3 +0,0 @@ -module LinearAlgebraExtensions -include("qr.jl") -end diff --git a/NDTensors/src/lib/TensorAlgebra/src/LinearAlgebraExtensions/qr.jl b/NDTensors/src/lib/TensorAlgebra/src/LinearAlgebraExtensions/qr.jl deleted file mode 100644 index 7c0d3f0ebb..0000000000 --- a/NDTensors/src/lib/TensorAlgebra/src/LinearAlgebraExtensions/qr.jl +++ /dev/null @@ -1,68 +0,0 @@ -using ArrayLayouts: LayoutMatrix -using LinearAlgebra: LinearAlgebra, qr -using ..TensorAlgebra: - TensorAlgebra, - BlockedPermutation, - blockedperm, - blockedperm_indexin, - blockpermute, - fusedims, - splitdims - -# TODO: Define as `tensor_qr`. -# TODO: This look generic but doesn't work for `BlockSparseArrays`. -function _qr(a::AbstractArray, biperm::BlockedPermutation{2}) - a_matricized = fusedims(a, biperm) - - # TODO: Make this more generic, allow choosing thin or full, - # make sure this works on GPU. - q_matricized, r_matricized = qr(a_matricized) - q_matricized_thin = typeof(a_matricized)(q_matricized) - - axes_codomain, axes_domain = blockpermute(axes(a), biperm) - axes_q = (axes_codomain..., axes(q_matricized_thin, 2)) - # TODO: Use `tuple_oneto(n) = ntuple(identity, n)`, currently in `BlockSparseArrays`. - biperm_q = blockedperm( - ntuple(identity, length(axes_codomain)), (length(axes_codomain) + 1,) - ) - axes_r = (axes(r_matricized, 1), axes_domain...) - biperm_r = blockedperm((1,), ntuple(identity, length(axes_domain)) .+ 1) - q = splitdims(q_matricized_thin, axes_q) - r = splitdims(r_matricized, axes_r) - return q, r -end - -function LinearAlgebra.qr(a::AbstractArray, biperm::BlockedPermutation{2}) - return _qr(a, biperm) -end - -# Fix ambiguity error with `LinearAlgebra`. -function LinearAlgebra.qr(a::AbstractMatrix, biperm::BlockedPermutation{2}) - return _qr(a, biperm) -end - -# Fix ambiguity error with `ArrayLayouts`. -function LinearAlgebra.qr(a::LayoutMatrix, biperm::BlockedPermutation{2}) - return _qr(a, biperm) -end - -# TODO: Define in terms of an inner function `_qr` or `tensor_qr`. -function LinearAlgebra.qr( - a::AbstractArray, labels_a::Tuple, labels_q::Tuple, labels_r::Tuple -) - return qr(a, blockedperm_indexin(labels_a, labels_q, labels_r)) -end - -# Fix ambiguity error with `LinearAlgebra`. -function LinearAlgebra.qr( - a::AbstractMatrix, labels_a::Tuple, labels_q::Tuple, labels_r::Tuple -) - return qr(a, blockedperm_indexin(labels_a, labels_q, labels_r)) -end - -# Fix ambiguity error with `ArrayLayouts`. -function LinearAlgebra.qr( - a::LayoutMatrix, labels_a::Tuple, labels_q::Tuple, labels_r::Tuple -) - return qr(a, blockedperm_indexin(labels_a, labels_q, labels_r)) -end diff --git a/NDTensors/src/lib/TensorAlgebra/src/TensorAlgebra.jl b/NDTensors/src/lib/TensorAlgebra/src/TensorAlgebra.jl deleted file mode 100644 index f0b5f07568..0000000000 --- a/NDTensors/src/lib/TensorAlgebra/src/TensorAlgebra.jl +++ /dev/null @@ -1,14 +0,0 @@ -module TensorAlgebra -include("blockedpermutation.jl") -include("BaseExtensions/BaseExtensions.jl") -include("fusedims.jl") -include("splitdims.jl") -include("contract/contract.jl") -include("contract/output_labels.jl") -include("contract/blockedperms.jl") -include("contract/allocate_output.jl") -include("contract/contract_matricize/contract.jl") -# TODO: Rename to `TensorAlgebraLinearAlgebraExt`. -include("LinearAlgebraExtensions/LinearAlgebraExtensions.jl") -include("../ext/TensorAlgebraGradedAxesExt/src/TensorAlgebraGradedAxesExt.jl") -end diff --git a/NDTensors/src/lib/TensorAlgebra/src/blockedpermutation.jl b/NDTensors/src/lib/TensorAlgebra/src/blockedpermutation.jl deleted file mode 100644 index c13b68a642..0000000000 --- a/NDTensors/src/lib/TensorAlgebra/src/blockedpermutation.jl +++ /dev/null @@ -1,192 +0,0 @@ -using BlockArrays: - BlockArrays, Block, blockfirsts, blocklasts, blocklength, blocklengths, blocks -using EllipsisNotation: Ellipsis, var".." -using TupleTools: TupleTools - -value(::Val{N}) where {N} = N - -_flatten_tuples(t::Tuple) = t -function _flatten_tuples(t1::Tuple, t2::Tuple, trest::Tuple...) - return _flatten_tuples((t1..., t2...), trest...) -end -_flatten_tuples() = () -flatten_tuples(ts::Tuple) = _flatten_tuples(ts...) - -_blocklength(blocklengths::Tuple{Vararg{Int}}) = length(blocklengths) -function _blockfirsts(blocklengths::Tuple{Vararg{Int}}) - return ntuple(_blocklength(blocklengths)) do i - prev_blocklast = - isone(i) ? zero(eltype(blocklengths)) : _blocklasts(blocklengths)[i - 1] - return prev_blocklast + 1 - end -end -_blocklasts(blocklengths::Tuple{Vararg{Int}}) = cumsum(blocklengths) - -collect_tuple(x) = (x,) -collect_tuple(x::Ellipsis) = x -collect_tuple(t::Tuple) = t - -const TupleOfTuples{N} = Tuple{Vararg{Tuple{Vararg{Int}},N}} - -abstract type AbstractBlockedPermutation{BlockLength,Length} end - -BlockArrays.blocks(blockedperm::AbstractBlockedPermutation) = error("Not implemented") - -function Base.Tuple(blockedperm::AbstractBlockedPermutation) - return flatten_tuples(blocks(blockedperm)) -end - -function BlockArrays.blocklengths(blockedperm::AbstractBlockedPermutation) - return length.(blocks(blockedperm)) -end - -function BlockArrays.blockfirsts(blockedperm::AbstractBlockedPermutation) - return _blockfirsts(blocklengths(blockedperm)) -end - -function BlockArrays.blocklasts(blockedperm::AbstractBlockedPermutation) - return _blocklasts(blocklengths(blockedperm)) -end - -Base.iterate(permblocks::AbstractBlockedPermutation) = iterate(Tuple(permblocks)) -function Base.iterate(permblocks::AbstractBlockedPermutation, state) - return iterate(Tuple(permblocks), state) -end - -# Block a permutation based on the specified lengths. -# blockperm((4, 3, 2, 1), (2, 2)) == blockedperm((4, 3), (2, 1)) -# TODO: Optimize with StaticNumbers.jl or generated functions, see: -# https://discourse.julialang.org/t/avoiding-type-instability-when-slicing-a-tuple/38567 -function blockperm(perm::Tuple{Vararg{Int}}, blocklengths::Tuple{Vararg{Int}}) - starts = _blockfirsts(blocklengths) - stops = _blocklasts(blocklengths) - return blockedperm(ntuple(i -> perm[starts[i]:stops[i]], length(blocklengths))...) -end - -function Base.invperm(blockedperm::AbstractBlockedPermutation) - return blockperm(invperm(Tuple(blockedperm)), blocklengths(blockedperm)) -end - -Base.length(blockedperm::AbstractBlockedPermutation) = length(Tuple(blockedperm)) -function BlockArrays.blocklength(blockedperm::AbstractBlockedPermutation) - return length(blocks(blockedperm)) -end - -function Base.getindex(blockedperm::AbstractBlockedPermutation, i::Int) - return Tuple(blockedperm)[i] -end - -function Base.getindex(blockedperm::AbstractBlockedPermutation, I::AbstractUnitRange) - perm = Tuple(blockedperm) - return [perm[i] for i in I] -end - -function Base.getindex(blockedperm::AbstractBlockedPermutation, b::Block) - return blocks(blockedperm)[Int(b)] -end - -# Like `BlockRange`. -function blockeachindex(blockedperm::AbstractBlockedPermutation) - return ntuple(i -> Block(i), blocklength(blockedperm)) -end - -# -# Constructors -# - -# Bipartition a vector according to the -# bipartitioned permutation. -# Like `Base.permute!` block out-of-place and blocked. -function blockpermute(v, blockedperm::AbstractBlockedPermutation) - return map(blockperm -> map(i -> v[i], blockperm), blocks(blockedperm)) -end - -# blockedperm((4, 3), (2, 1)) -function blockedperm(permblocks::Tuple{Vararg{Int}}...; length::Union{Val,Nothing}=nothing) - return blockedperm(length, permblocks...) -end - -function blockedperm(length::Nothing, permblocks::Tuple{Vararg{Int}}...) - return blockedperm(Val(sum(Base.length, permblocks; init=zero(Bool))), permblocks...) -end - -# blockedperm((3, 2), 1) == blockedperm((3, 2), (1,)) -function blockedperm(permblocks::Union{Tuple{Vararg{Int}},Int}...; kwargs...) - return blockedperm(collect_tuple.(permblocks)...; kwargs...) -end - -function blockedperm(permblocks::Union{Tuple{Vararg{Int}},Int,Ellipsis}...; kwargs...) - return blockedperm(collect_tuple.(permblocks)...; kwargs...) -end - -function _blockedperm_length(::Nothing, specified_perm::Tuple{Vararg{Int}}) - return maximum(specified_perm) -end - -function _blockedperm_length(vallength::Val, specified_perm::Tuple{Vararg{Int}}) - return value(vallength) -end - -# blockedperm((4, 3), .., 1) == blockedperm((4, 3), 2, 1) -# blockedperm((4, 3), .., 1; length=Val(5)) == blockedperm((4, 3), 2, 5, 1) -function blockedperm( - permblocks::Union{Tuple{Vararg{Int}},Ellipsis}...; length::Union{Val,Nothing}=nothing -) - # Check there is only one `Ellipsis`. - @assert isone(count(x -> x isa Ellipsis, permblocks)) - specified_permblocks = filter(x -> !(x isa Ellipsis), permblocks) - unspecified_dim = findfirst(x -> x isa Ellipsis, permblocks) - specified_perm = flatten_tuples(specified_permblocks) - len = _blockedperm_length(length, specified_perm) - unspecified_dims = Tuple(setdiff(Base.OneTo(len), flatten_tuples(specified_permblocks))) - permblocks_specified = TupleTools.insertat(permblocks, unspecified_dim, unspecified_dims) - return blockedperm(permblocks_specified...) -end - -# Version of `indexin` that outputs a `blockedperm`. -function blockedperm_indexin(collection, subs...) - return blockedperm(map(sub -> BaseExtensions.indexin(sub, collection), subs)...) -end - -struct BlockedPermutation{BlockLength,Length,Blocks<:TupleOfTuples{BlockLength}} <: - AbstractBlockedPermutation{BlockLength,Length} - blocks::Blocks - global function _BlockedPermutation(blocks::TupleOfTuples) - len = sum(length, blocks; init=zero(Bool)) - blocklength = length(blocks) - return new{blocklength,len,typeof(blocks)}(blocks) - end -end - -BlockArrays.blocks(blockedperm::BlockedPermutation) = getfield(blockedperm, :blocks) - -function blockedperm(length::Val, permblocks::Tuple{Vararg{Int}}...) - @assert value(length) == sum(Base.length, permblocks; init=zero(Bool)) - blockedperm = _BlockedPermutation(permblocks) - @assert isperm(blockedperm) - return blockedperm -end - -trivialperm(length::Union{Integer,Val}) = ntuple(identity, length) - -struct BlockedTrivialPermutation{BlockLength,Length,Blocks<:TupleOfTuples{BlockLength}} <: - AbstractBlockedPermutation{BlockLength,Length} - blocks::Blocks - global function _BlockedTrivialPermutation(blocklengths::Tuple{Vararg{Int}}) - len = sum(blocklengths; init=zero(Bool)) - blocklength = length(blocklengths) - permblocks = blocks(blockperm(trivialperm(len), blocklengths)) - return new{blocklength,len,typeof(permblocks)}(permblocks) - end -end - -BlockArrays.blocks(blockedperm::BlockedTrivialPermutation) = getfield(blockedperm, :blocks) - -function blockedtrivialperm(blocklengths::Tuple{Vararg{Int}}) - return _BlockedTrivialPermutation(blocklengths) -end - -function trivialperm(blockedperm::AbstractBlockedPermutation) - return blockedtrivialperm(blocklengths(blockedperm)) -end -Base.invperm(blockedperm::BlockedTrivialPermutation) = blockedperm diff --git a/NDTensors/src/lib/TensorAlgebra/src/contract/allocate_output.jl b/NDTensors/src/lib/TensorAlgebra/src/contract/allocate_output.jl deleted file mode 100644 index 2beff4c5bc..0000000000 --- a/NDTensors/src/lib/TensorAlgebra/src/contract/allocate_output.jl +++ /dev/null @@ -1,83 +0,0 @@ -using Base.PermutedDimsArrays: genperm - -# TODO: Use `ArrayLayouts`-like `MulAdd` object, -# i.e. `ContractAdd`? -function output_axes( - ::typeof(contract), - biperm_dest::BlockedPermutation{2}, - a1::AbstractArray, - biperm1::BlockedPermutation{2}, - a2::AbstractArray, - biperm2::BlockedPermutation{2}, - α::Number=true, -) - axes_codomain, axes_contracted = blockpermute(axes(a1), biperm1) - axes_contracted2, axes_domain = blockpermute(axes(a2), biperm2) - @assert axes_contracted == axes_contracted2 - return genperm((axes_codomain..., axes_domain...), invperm(Tuple(biperm_dest))) -end - -# Inner-product contraction. -# TODO: Use `ArrayLayouts`-like `MulAdd` object, -# i.e. `ContractAdd`? -function output_axes( - ::typeof(contract), - perm_dest::BlockedPermutation{0}, - a1::AbstractArray, - perm1::BlockedPermutation{1}, - a2::AbstractArray, - perm2::BlockedPermutation{1}, - α::Number=true, -) - axes_contracted = blockpermute(axes(a1), perm1) - axes_contracted′ = blockpermute(axes(a2), perm2) - @assert axes_contracted == axes_contracted′ - return () -end - -# Vec-mat. -function output_axes( - ::typeof(contract), - perm_dest::BlockedPermutation{1}, - a1::AbstractArray, - perm1::BlockedPermutation{1}, - a2::AbstractArray, - biperm2::BlockedPermutation{2}, - α::Number=true, -) - (axes_contracted,) = blockpermute(axes(a1), perm1) - axes_contracted′, axes_dest = blockpermute(axes(a2), biperm2) - @assert axes_contracted == axes_contracted′ - return genperm((axes_dest...,), invperm(Tuple(perm_dest))) -end - -# Mat-vec. -function output_axes( - ::typeof(contract), - perm_dest::BlockedPermutation{1}, - a1::AbstractArray, - perm1::BlockedPermutation{2}, - a2::AbstractArray, - biperm2::BlockedPermutation{1}, - α::Number=true, -) - axes_dest, axes_contracted = blockpermute(axes(a1), perm1) - (axes_contracted′,) = blockpermute(axes(a2), biperm2) - @assert axes_contracted == axes_contracted′ - return genperm((axes_dest...,), invperm(Tuple(perm_dest))) -end - -# TODO: Use `ArrayLayouts`-like `MulAdd` object, -# i.e. `ContractAdd`? -function allocate_output( - ::typeof(contract), - biperm_dest::BlockedPermutation, - a1::AbstractArray, - biperm1::BlockedPermutation, - a2::AbstractArray, - biperm2::BlockedPermutation, - α::Number=true, -) - axes_dest = output_axes(contract, biperm_dest, a1, biperm1, a2, biperm2, α) - return similar(a1, promote_type(eltype(a1), eltype(a2), typeof(α)), axes_dest) -end diff --git a/NDTensors/src/lib/TensorAlgebra/src/contract/blockedperms.jl b/NDTensors/src/lib/TensorAlgebra/src/contract/blockedperms.jl deleted file mode 100644 index 60009de9ef..0000000000 --- a/NDTensors/src/lib/TensorAlgebra/src/contract/blockedperms.jl +++ /dev/null @@ -1,32 +0,0 @@ -using ..BackendSelection: Algorithm -using .BaseExtensions: BaseExtensions - -function blockedperms( - f::typeof(contract), alg::Algorithm, dimnames_dest, dimnames1, dimnames2 -) - return blockedperms(f, dimnames_dest, dimnames1, dimnames2) -end - -# codomain <-- domain -function blockedperms(::typeof(contract), dimnames_dest, dimnames1, dimnames2) - codomain = Tuple(setdiff(dimnames1, dimnames2)) - contracted = Tuple(intersect(dimnames1, dimnames2)) - domain = Tuple(setdiff(dimnames2, dimnames1)) - - perm_codomain_dest = BaseExtensions.indexin(codomain, dimnames_dest) - perm_domain_dest = BaseExtensions.indexin(domain, dimnames_dest) - - perm_codomain1 = BaseExtensions.indexin(codomain, dimnames1) - perm_domain1 = BaseExtensions.indexin(contracted, dimnames1) - - perm_codomain2 = BaseExtensions.indexin(contracted, dimnames2) - perm_domain2 = BaseExtensions.indexin(domain, dimnames2) - - permblocks_dest = (perm_codomain_dest, perm_domain_dest) - biperm_dest = blockedperm(filter(!isempty, permblocks_dest)...) - permblocks1 = (perm_codomain1, perm_domain1) - biperm1 = blockedperm(filter(!isempty, permblocks1)...) - permblocks2 = (perm_codomain2, perm_domain2) - biperm2 = blockedperm(filter(!isempty, permblocks2)...) - return biperm_dest, biperm1, biperm2 -end diff --git a/NDTensors/src/lib/TensorAlgebra/src/contract/contract.jl b/NDTensors/src/lib/TensorAlgebra/src/contract/contract.jl deleted file mode 100644 index 328be79387..0000000000 --- a/NDTensors/src/lib/TensorAlgebra/src/contract/contract.jl +++ /dev/null @@ -1,120 +0,0 @@ -using ..BackendSelection: Algorithm, @Algorithm_str - -# TODO: Add `contract!!` definitions as pass-throughs to `contract!`. - -default_contract_alg() = Algorithm"matricize"() - -# Required interface if not using -# matricized contraction. -function contract!( - alg::Algorithm, - a_dest::AbstractArray, - biperm_dest::BlockedPermutation, - a1::AbstractArray, - biperm1::BlockedPermutation, - a2::AbstractArray, - biperm2::BlockedPermutation, - α::Number, - β::Number, -) - return error("Not implemented") -end - -function contract( - a1::AbstractArray, - labels1::Tuple, - a2::AbstractArray, - labels2::Tuple, - α::Number=true; - alg=default_contract_alg(), - kwargs..., -) - return contract(Algorithm(alg), a1, labels1, a2, labels2, α; kwargs...) -end - -function contract( - alg::Algorithm, - a1::AbstractArray, - labels1::Tuple, - a2::AbstractArray, - labels2::Tuple, - α::Number=true; - kwargs..., -) - labels_dest = output_labels(contract, alg, a1, labels1, a2, labels2, α; kwargs...) - return contract(alg, labels_dest, a1, labels1, a2, labels2, α; kwargs...), labels_dest -end - -function contract( - labels_dest::Tuple, - a1::AbstractArray, - labels1::Tuple, - a2::AbstractArray, - labels2::Tuple, - α::Number=true; - alg=default_contract_alg(), - kwargs..., -) - return contract(Algorithm(alg), labels_dest, a1, labels1, a2, labels2, α; kwargs...) -end - -function contract!( - a_dest::AbstractArray, - labels_dest::Tuple, - a1::AbstractArray, - labels1::Tuple, - a2::AbstractArray, - labels2::Tuple, - α::Number=true, - β::Number=false; - alg=default_contract_alg(), - kwargs..., -) - contract!(Algorithm(alg), a_dest, labels_dest, a1, labels1, a2, labels2, α, β; kwargs...) - return a_dest -end - -function contract( - alg::Algorithm, - labels_dest::Tuple, - a1::AbstractArray, - labels1::Tuple, - a2::AbstractArray, - labels2::Tuple, - α::Number=true; - kwargs..., -) - biperm_dest, biperm1, biperm2 = blockedperms(contract, labels_dest, labels1, labels2) - return contract(alg, biperm_dest, a1, biperm1, a2, biperm2, α; kwargs...) -end - -function contract!( - alg::Algorithm, - a_dest::AbstractArray, - labels_dest::Tuple, - a1::AbstractArray, - labels1::Tuple, - a2::AbstractArray, - labels2::Tuple, - α::Number, - β::Number; - kwargs..., -) - biperm_dest, biperm1, biperm2 = blockedperms(contract, labels_dest, labels1, labels2) - return contract!(alg, a_dest, biperm_dest, a1, biperm1, a2, biperm2, α, β; kwargs...) -end - -function contract( - alg::Algorithm, - biperm_dest::BlockedPermutation, - a1::AbstractArray, - biperm1::BlockedPermutation, - a2::AbstractArray, - biperm2::BlockedPermutation, - α::Number; - kwargs..., -) - a_dest = allocate_output(contract, biperm_dest, a1, biperm1, a2, biperm2, α) - contract!(alg, a_dest, biperm_dest, a1, biperm1, a2, biperm2, α, false; kwargs...) - return a_dest -end diff --git a/NDTensors/src/lib/TensorAlgebra/src/contract/contract_matricize/contract.jl b/NDTensors/src/lib/TensorAlgebra/src/contract/contract_matricize/contract.jl deleted file mode 100644 index beb70104bb..0000000000 --- a/NDTensors/src/lib/TensorAlgebra/src/contract/contract_matricize/contract.jl +++ /dev/null @@ -1,57 +0,0 @@ -using ..BackendSelection: @Algorithm_str -using LinearAlgebra: mul! - -function contract!( - alg::Algorithm"matricize", - a_dest::AbstractArray, - biperm_dest::BlockedPermutation, - a1::AbstractArray, - biperm1::BlockedPermutation, - a2::AbstractArray, - biperm2::BlockedPermutation, - α::Number, - β::Number, -) - a_dest_mat = fusedims(a_dest, biperm_dest) - a1_mat = fusedims(a1, biperm1) - a2_mat = fusedims(a2, biperm2) - _mul!(a_dest_mat, a1_mat, a2_mat, α, β) - splitdims!(a_dest, a_dest_mat, biperm_dest) - return a_dest -end - -# Matrix multiplication. -function _mul!( - a_dest::AbstractMatrix, a1::AbstractMatrix, a2::AbstractMatrix, α::Number, β::Number -) - mul!(a_dest, a1, a2, α, β) - return a_dest -end - -# Inner product. -function _mul!( - a_dest::AbstractArray{<:Any,0}, - a1::AbstractVector, - a2::AbstractVector, - α::Number, - β::Number, -) - a_dest[] = transpose(a1) * a2 * α + a_dest[] * β - return a_dest -end - -# Vec-mat. -function _mul!( - a_dest::AbstractVector, a1::AbstractVector, a2::AbstractMatrix, α::Number, β::Number -) - mul!(transpose(a_dest), transpose(a1), a2, α, β) - return a_dest -end - -# Mat-vec. -function _mul!( - a_dest::AbstractVector, a1::AbstractMatrix, a2::AbstractVector, α::Number, β::Number -) - mul!(a_dest, a1, a2, α, β) - return a_dest -end diff --git a/NDTensors/src/lib/TensorAlgebra/src/contract/output_labels.jl b/NDTensors/src/lib/TensorAlgebra/src/contract/output_labels.jl deleted file mode 100644 index 1de28dd765..0000000000 --- a/NDTensors/src/lib/TensorAlgebra/src/contract/output_labels.jl +++ /dev/null @@ -1,21 +0,0 @@ -using ..BackendSelection: Algorithm - -function output_labels( - f::typeof(contract), - alg::Algorithm, - a1::AbstractArray, - labels1, - a2::AbstractArray, - labels2, - α, -) - return output_labels(f, alg, labels1, labels2) -end - -function output_labels(f::typeof(contract), alg::Algorithm, labels1, labels2) - return output_labels(f, labels1, labels2) -end - -function output_labels(::typeof(contract), labels1, labels2) - return Tuple(symdiff(labels1, labels2)) -end diff --git a/NDTensors/src/lib/TensorAlgebra/src/fusedims.jl b/NDTensors/src/lib/TensorAlgebra/src/fusedims.jl deleted file mode 100644 index c118dc6ab2..0000000000 --- a/NDTensors/src/lib/TensorAlgebra/src/fusedims.jl +++ /dev/null @@ -1,69 +0,0 @@ -using .BaseExtensions: _permutedims, _permutedims! - -abstract type FusionStyle end - -struct ReshapeFusion <: FusionStyle end -struct BlockReshapeFusion <: FusionStyle end -struct SectorFusion <: FusionStyle end - -# Defaults to a simple reshape -combine_fusion_styles(style1::Style, style2::Style) where {Style<:FusionStyle} = Style() -combine_fusion_styles(style1::FusionStyle, style2::FusionStyle) = ReshapeFusion() -combine_fusion_styles(styles::FusionStyle...) = foldl(combine_fusion_styles, styles) -FusionStyle(axis::AbstractUnitRange) = ReshapeFusion() -function FusionStyle(axes::Tuple{Vararg{AbstractUnitRange}}) - return combine_fusion_styles(FusionStyle.(axes)...) -end -FusionStyle(a::AbstractArray) = FusionStyle(axes(a)) - -# Overload this version for most arrays -function fusedims(::ReshapeFusion, a::AbstractArray, axes::AbstractUnitRange...) - return reshape(a, axes) -end - -⊗(a::AbstractUnitRange) = a -function ⊗(a1::AbstractUnitRange, a2::AbstractUnitRange, as::AbstractUnitRange...) - return ⊗(a1, ⊗(a2, as...)) -end -⊗(a1::AbstractUnitRange, a2::AbstractUnitRange) = Base.OneTo(length(a1) * length(a2)) -⊗() = Base.OneTo(1) - -# Overload this version for most arrays -function fusedims(a::AbstractArray, axes::AbstractUnitRange...) - return fusedims(FusionStyle(a), a, axes...) -end - -# Overload this version for fusion tensors, array maps, etc. -function fusedims(a::AbstractArray, axesblocks::Tuple{Vararg{AbstractUnitRange}}...) - return fusedims(a, flatten_tuples(axesblocks)...) -end - -# Fix ambiguity issue -fusedims(a::AbstractArray{<:Any,0}, ::Vararg{Tuple{}}) = a - -# TODO: Is this needed? Maybe delete. -function fusedims(a::AbstractArray, permblocks...) - return fusedims(a, blockedperm(permblocks...; length=Val(ndims(a)))) -end - -function fuseaxes( - axes::Tuple{Vararg{AbstractUnitRange}}, blockedperm::AbstractBlockedPermutation -) - axesblocks = blockpermute(axes, blockedperm) - return map(block -> ⊗(block...), axesblocks) -end - -function fuseaxes(a::AbstractArray, blockedperm::AbstractBlockedPermutation) - return fuseaxes(axes(a), blockedperm) -end - -# Fuse adjacent dimensions -function fusedims(a::AbstractArray, blockedperm::BlockedTrivialPermutation) - axes_fused = fuseaxes(a, blockedperm) - return fusedims(a, axes_fused) -end - -function fusedims(a::AbstractArray, blockedperm::BlockedPermutation) - a_perm = _permutedims(a, Tuple(blockedperm)) - return fusedims(a_perm, trivialperm(blockedperm)) -end diff --git a/NDTensors/src/lib/TensorAlgebra/src/splitdims.jl b/NDTensors/src/lib/TensorAlgebra/src/splitdims.jl deleted file mode 100644 index 0554c613ba..0000000000 --- a/NDTensors/src/lib/TensorAlgebra/src/splitdims.jl +++ /dev/null @@ -1,68 +0,0 @@ -using .BaseExtensions: _permutedims, _permutedims! - -to_axis(a::AbstractUnitRange) = a -to_axis(n::Integer) = Base.OneTo(n) - -function blockedaxes(a::AbstractArray, sizeblocks::Pair...) - axes_a = axes(a) - axes_split = tuple.(axes(a)) - for (dim, sizeblock) in sizeblocks - # TODO: Handle conversion from length to range! - axes_split = Base.setindex(axes_split, to_axis.(sizeblock), dim) - end - return axes_split -end - -# splitdims(randn(4, 4), 1:2, 1:2, 1:2, 1:2) -function splitdims(::ReshapeFusion, a::AbstractArray, axes::AbstractUnitRange...) - # TODO: Add `uncanonicalizedims`. - # TODO: Need `length` since `reshape` doesn't accept `axes`, - # maybe make a `reshape_axes` function. - return reshape(a, length.(axes)...) -end - -# splitdims(randn(4, 4), 1:2, 1:2, 1:2, 1:2) -function splitdims(a::AbstractArray, axes::AbstractUnitRange...) - return splitdims(FusionStyle(a), a, axes...) -end - -# splitdims(randn(4, 4), (1:2, 1:2), (1:2, 1:2)) -function splitdims(a::AbstractArray, axesblocks::Tuple{Vararg{AbstractUnitRange}}...) - # TODO: Add `uncanonicalizedims`. - return splitdims(a, flatten_tuples(axesblocks)...) -end - -# Fix ambiguity issue -splitdims(a::AbstractArray) = a - -# splitdims(randn(4, 4), (2, 2), (2, 2)) -function splitdims(a::AbstractArray, sizeblocks::Tuple{Vararg{Integer}}...) - return splitdims(a, map(x -> Base.OneTo.(x), sizeblocks)...) -end - -# splitdims(randn(4, 4), 2 => (1:2, 1:2)) -function splitdims(a::AbstractArray, sizeblocks::Pair...) - return splitdims(a, blockedaxes(a, sizeblocks...)...) -end - -# TODO: Is this needed? -function splitdims( - a::AbstractArray, - axes_dest::Tuple{Vararg{AbstractUnitRange}}, - blockedperm::BlockedPermutation, -) - # TODO: Pass grouped axes. - a_dest_perm = splitdims(a, axes_dest...) - a_dest = _permutedims(a_dest_perm, invperm(Tuple(blockedperm))) - return a_dest -end - -function splitdims!( - a_dest::AbstractArray, a::AbstractArray, blockedperm::BlockedPermutation -) - axes_dest = map(i -> axes(a_dest, i), Tuple(blockedperm)) - # TODO: Pass grouped axes. - a_dest_perm = splitdims(a, axes_dest...) - _permutedims!(a_dest, a_dest_perm, invperm(Tuple(blockedperm))) - return a_dest -end diff --git a/NDTensors/src/lib/TensorAlgebra/test/Project.toml b/NDTensors/src/lib/TensorAlgebra/test/Project.toml deleted file mode 100644 index 661098f08c..0000000000 --- a/NDTensors/src/lib/TensorAlgebra/test/Project.toml +++ /dev/null @@ -1,9 +0,0 @@ -[deps] -BlockArrays = "8e7c35d0-a365-5155-bbbb-fb81a777f24e" -Combinatorics = "861a8166-3701-5b0c-9a16-15d98fcdc6aa" -EllipsisNotation = "da5c29d0-fa7d-589e-88eb-ea29b0a81949" -NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" -TensorOperations = "6aa20fa7-93e2-5fca-9bc0-fbd0db3c71a2" - -[compat] -TensorOperations = "4.1.1" \ No newline at end of file diff --git a/NDTensors/src/lib/TensorAlgebra/test/runtests.jl b/NDTensors/src/lib/TensorAlgebra/test/runtests.jl deleted file mode 100644 index c26bbcdc4b..0000000000 --- a/NDTensors/src/lib/TensorAlgebra/test/runtests.jl +++ /dev/null @@ -1,4 +0,0 @@ -@eval module $(gensym()) -include("test_basics.jl") -include("../ext/TensorAlgebraGradedAxesExt/test/runtests.jl") -end diff --git a/NDTensors/src/lib/TensorAlgebra/test/test_basics.jl b/NDTensors/src/lib/TensorAlgebra/test/test_basics.jl deleted file mode 100644 index 95576a8bf5..0000000000 --- a/NDTensors/src/lib/TensorAlgebra/test/test_basics.jl +++ /dev/null @@ -1,199 +0,0 @@ -@eval module $(gensym()) -using BlockArrays: blockfirsts, blocklasts, blocklength, blocklengths, blocks -using Combinatorics: permutations -using EllipsisNotation: var".." -using LinearAlgebra: norm, qr -using NDTensors.TensorAlgebra: - TensorAlgebra, blockedperm, blockedperm_indexin, fusedims, splitdims -using NDTensors: NDTensors -include(joinpath(pkgdir(NDTensors), "test", "NDTensorsTestUtils", "NDTensorsTestUtils.jl")) -using .NDTensorsTestUtils: default_rtol -using Test: @test, @test_broken, @testset -const elts = (Float32, Float64, Complex{Float32}, Complex{Float64}) -@testset "BlockedPermutation" begin - p = blockedperm((3, 4, 5), (2, 1)) - @test Tuple(p) === (3, 4, 5, 2, 1) - @test isperm(p) - @test length(p) == 5 - @test blocks(p) == ((3, 4, 5), (2, 1)) - @test blocklength(p) == 2 - @test blocklengths(p) == (3, 2) - @test blockfirsts(p) == (1, 4) - @test blocklasts(p) == (3, 5) - @test invperm(p) == blockedperm((5, 4, 1), (2, 3)) - - # Empty block. - p = blockedperm((3, 2), (), (1,)) - @test Tuple(p) === (3, 2, 1) - @test isperm(p) - @test length(p) == 3 - @test blocks(p) == ((3, 2), (), (1,)) - @test blocklength(p) == 3 - @test blocklengths(p) == (2, 0, 1) - @test blockfirsts(p) == (1, 3, 3) - @test blocklasts(p) == (2, 2, 3) - @test invperm(p) == blockedperm((3, 2), (), (1,)) - - # Split collection into `BlockedPermutation`. - p = blockedperm_indexin(("a", "b", "c", "d"), ("c", "a"), ("b", "d")) - @test p == blockedperm((3, 1), (2, 4)) - - # Singleton dimensions. - p = blockedperm((2, 3), 1) - @test p == blockedperm((2, 3), (1,)) - - # First dimensions are unspecified. - p = blockedperm(.., (4, 3)) - @test p == blockedperm(1, 2, (4, 3)) - # Specify length - p = blockedperm(.., (4, 3); length=Val(6)) - @test p == blockedperm(1, 2, 5, 6, (4, 3)) - - # Last dimensions are unspecified. - p = blockedperm((4, 3), ..) - @test p == blockedperm((4, 3), 1, 2) - # Specify length - p = blockedperm((4, 3), ..; length=Val(6)) - @test p == blockedperm((4, 3), 1, 2, 5, 6) - - # Middle dimensions are unspecified. - p = blockedperm((4, 3), .., 1) - @test p == blockedperm((4, 3), 2, 1) - # Specify length - p = blockedperm((4, 3), .., 1; length=Val(6)) - @test p == blockedperm((4, 3), 2, 5, 6, 1) - - # No dimensions are unspecified. - p = blockedperm((3, 2), .., 1) - @test p == blockedperm((3, 2), 1) -end -@testset "TensorAlgebra" begin - @testset "fusedims (eltype=$elt)" for elt in elts - a = randn(elt, 2, 3, 4, 5) - a_fused = fusedims(a, (1, 2), (3, 4)) - @test eltype(a_fused) === elt - @test a_fused ≈ reshape(a, 6, 20) - a_fused = fusedims(a, (3, 1), (2, 4)) - @test eltype(a_fused) === elt - @test a_fused ≈ reshape(permutedims(a, (3, 1, 2, 4)), (8, 15)) - a_fused = fusedims(a, (3, 1, 2), 4) - @test eltype(a_fused) === elt - @test a_fused ≈ reshape(permutedims(a, (3, 1, 2, 4)), (24, 5)) - a_fused = fusedims(a, .., (3, 1)) - @test eltype(a_fused) === elt - @test a_fused ≈ reshape(permutedims(a, (2, 4, 3, 1)), (3, 5, 8)) - a_fused = fusedims(a, (3, 1), ..) - @test eltype(a_fused) === elt - @test a_fused ≈ reshape(permutedims(a, (3, 1, 2, 4)), (8, 3, 5)) - a_fused = fusedims(a, .., (3, 1), 2) - @test eltype(a_fused) === elt - @test a_fused ≈ reshape(permutedims(a, (4, 3, 1, 2)), (5, 8, 3)) - a_fused = fusedims(a, (3, 1), .., 2) - @test eltype(a_fused) === elt - @test a_fused ≈ reshape(permutedims(a, (3, 1, 4, 2)), (8, 5, 3)) - a_fused = fusedims(a, (3, 1), ..) - @test eltype(a_fused) === elt - @test a_fused ≈ reshape(permutedims(a, (3, 1, 2, 4)), (8, 3, 5)) - end - @testset "splitdims (eltype=$elt)" for elt in elts - a = randn(elt, 6, 20) - a_split = splitdims(a, (2, 3), (5, 4)) - @test eltype(a_split) === elt - @test a_split ≈ reshape(a, (2, 3, 5, 4)) - a_split = splitdims(a, (1:2, 1:3), (1:5, 1:4)) - @test eltype(a_split) === elt - @test a_split ≈ reshape(a, (2, 3, 5, 4)) - a_split = splitdims(a, 2 => (5, 4), 1 => (2, 3)) - @test eltype(a_split) === elt - @test a_split ≈ reshape(a, (2, 3, 5, 4)) - a_split = splitdims(a, 2 => (1:5, 1:4), 1 => (1:2, 1:3)) - @test eltype(a_split) === elt - @test a_split ≈ reshape(a, (2, 3, 5, 4)) - a_split = splitdims(a, 2 => (5, 4)) - @test eltype(a_split) === elt - @test a_split ≈ reshape(a, (6, 5, 4)) - a_split = splitdims(a, 2 => (1:5, 1:4)) - @test eltype(a_split) === elt - @test a_split ≈ reshape(a, (6, 5, 4)) - a_split = splitdims(a, 1 => (2, 3)) - @test eltype(a_split) === elt - @test a_split ≈ reshape(a, (2, 3, 20)) - a_split = splitdims(a, 1 => (1:2, 1:3)) - @test eltype(a_split) === elt - @test a_split ≈ reshape(a, (2, 3, 20)) - end - using TensorOperations: TensorOperations - @testset "contract (eltype1=$elt1, eltype2=$elt2)" for elt1 in elts, elt2 in elts - dims = (2, 3, 4, 5, 6, 7, 8, 9, 10) - labels = (:a, :b, :c, :d, :e, :f, :g, :h, :i) - for (d1s, d2s, d_dests) in ( - ((1, 2), (1, 2), ()), - ((1, 2), (2, 1), ()), - ((1, 2), (2, 1, 3), (3,)), - ((1, 2, 3), (2, 1), (3,)), - ((1, 2), (2, 3), (1, 3)), - ((1, 2), (2, 3), (3, 1)), - ((2, 1), (2, 3), (3, 1)), - ((1, 2, 3), (2, 3, 4), (1, 4)), - ((1, 2, 3), (2, 3, 4), (4, 1)), - ((3, 2, 1), (4, 2, 3), (4, 1)), - ((1, 2, 3), (3, 4), (1, 2, 4)), - ((1, 2, 3), (3, 4), (4, 1, 2)), - ((1, 2, 3), (3, 4), (2, 4, 1)), - ((3, 1, 2), (3, 4), (2, 4, 1)), - ((3, 2, 1), (4, 3), (2, 4, 1)), - ((1, 2, 3, 4, 5, 6), (4, 5, 6, 7, 8, 9), (1, 2, 3, 7, 8, 9)), - ((2, 4, 5, 1, 6, 3), (6, 4, 9, 8, 5, 7), (1, 7, 2, 8, 3, 9)), - ) - a1 = randn(elt1, map(i -> dims[i], d1s)) - labels1 = map(i -> labels[i], d1s) - a2 = randn(elt2, map(i -> dims[i], d2s)) - labels2 = map(i -> labels[i], d2s) - labels_dest = map(i -> labels[i], d_dests) - - # Don't specify destination labels - a_dest, labels_dest′ = TensorAlgebra.contract(a1, labels1, a2, labels2) - a_dest_tensoroperations = TensorOperations.tensorcontract( - labels_dest′, a1, labels1, a2, labels2 - ) - @test a_dest ≈ a_dest_tensoroperations - - # Specify destination labels - a_dest = TensorAlgebra.contract(labels_dest, a1, labels1, a2, labels2) - a_dest_tensoroperations = TensorOperations.tensorcontract( - labels_dest, a1, labels1, a2, labels2 - ) - @test a_dest ≈ a_dest_tensoroperations - - # Specify α and β - elt_dest = promote_type(elt1, elt2) - # TODO: Using random `α`, `β` causing - # random test failures, investigate why. - α = elt_dest(1.2) # randn(elt_dest) - β = elt_dest(2.4) # randn(elt_dest) - a_dest_init = randn(elt_dest, map(i -> dims[i], d_dests)) - a_dest = copy(a_dest_init) - TensorAlgebra.contract!(a_dest, labels_dest, a1, labels1, a2, labels2, α, β) - a_dest_tensoroperations = TensorOperations.tensorcontract( - labels_dest, a1, labels1, a2, labels2 - ) - ## Here we loosened the tolerance because of some floating point roundoff issue. - ## with Float32 numbers - @test a_dest ≈ α * a_dest_tensoroperations + β * a_dest_init rtol = - 50 * default_rtol(elt_dest) - end - end -end -@testset "qr (eltype=$elt)" for elt in elts - a = randn(elt, 5, 4, 3, 2) - labels_a = (:a, :b, :c, :d) - labels_q = (:b, :a) - labels_r = (:d, :c) - q, r = qr(a, labels_a, labels_q, labels_r) - label_qr = :qr - a′ = TensorAlgebra.contract( - labels_a, q, (labels_q..., label_qr), r, (label_qr, labels_r...) - ) - @test a ≈ a′ -end -end diff --git a/NDTensors/src/lib/TypeParameterAccessors/.JuliaFormatter.toml b/NDTensors/src/lib/TypeParameterAccessors/.JuliaFormatter.toml deleted file mode 100644 index 08f664cdb9..0000000000 --- a/NDTensors/src/lib/TypeParameterAccessors/.JuliaFormatter.toml +++ /dev/null @@ -1,2 +0,0 @@ -style = "blue" -indent = 2 diff --git a/NDTensors/src/lib/TypeParameterAccessors/src/TypeParameterAccessors.jl b/NDTensors/src/lib/TypeParameterAccessors/src/TypeParameterAccessors.jl deleted file mode 100644 index 3333cd417b..0000000000 --- a/NDTensors/src/lib/TypeParameterAccessors/src/TypeParameterAccessors.jl +++ /dev/null @@ -1,21 +0,0 @@ -module TypeParameterAccessors -include("interface.jl") -include("to_unionall.jl") -include("parameters.jl") -include("abstractposition.jl") -include("abstracttypeparameter.jl") -include("type_parameters.jl") -include("position.jl") -include("parameter.jl") -include("is_parameter_specified.jl") -include("unspecify_parameters.jl") -include("set_parameters.jl") -include("specify_parameters.jl") -include("default_parameters.jl") -include("ndims.jl") -include("base/abstractarray.jl") -include("base/similartype.jl") -include("base/array.jl") -include("base/linearalgebra.jl") -include("base/stridedviews.jl") -end diff --git a/NDTensors/src/lib/TypeParameterAccessors/src/abstractposition.jl b/NDTensors/src/lib/TypeParameterAccessors/src/abstractposition.jl deleted file mode 100644 index 4508c180ae..0000000000 --- a/NDTensors/src/lib/TypeParameterAccessors/src/abstractposition.jl +++ /dev/null @@ -1,3 +0,0 @@ -struct Position{Pos} end -Position(pos) = Position{pos}() -Base.Int(pos::Position) = Int(parameter(typeof(pos))) diff --git a/NDTensors/src/lib/TypeParameterAccessors/src/abstracttypeparameter.jl b/NDTensors/src/lib/TypeParameterAccessors/src/abstracttypeparameter.jl deleted file mode 100644 index 65f07c9cff..0000000000 --- a/NDTensors/src/lib/TypeParameterAccessors/src/abstracttypeparameter.jl +++ /dev/null @@ -1,12 +0,0 @@ -abstract type AbstractTypeParameter end -AbstractTypeParameter(param::AbstractTypeParameter) = param -wrapped_type_parameter(param) = AbstractTypeParameter(param) -wrapped_type_parameter(type::Type, pos) = AbstractTypeParameter(parameter(type, pos)) - -struct TypeParameter{Param} <: AbstractTypeParameter end -TypeParameter(param) = TypeParameter{param}() -TypeParameter(param::TypeParameter) = param -AbstractTypeParameter(param) = TypeParameter(param) - -struct UnspecifiedTypeParameter <: AbstractTypeParameter end -AbstractTypeParameter(param::TypeVar) = UnspecifiedTypeParameter() diff --git a/NDTensors/src/lib/TypeParameterAccessors/src/base/abstractarray.jl b/NDTensors/src/lib/TypeParameterAccessors/src/base/abstractarray.jl deleted file mode 100644 index 89e38d3b2f..0000000000 --- a/NDTensors/src/lib/TypeParameterAccessors/src/base/abstractarray.jl +++ /dev/null @@ -1,95 +0,0 @@ -struct Self end - -parameter(type::Type, pos::Self) = type -function set_type_parameter(type::Type, pos::Self, param) - return error("Can't set the parent type of an unwrapped array type.") -end - -function set_eltype(array::AbstractArray, param) - return convert(set_eltype(typeof(array), param), array) -end - -## This will fail if position of `ndims` is not defined for `type` -function set_ndims(type::Type{<:AbstractArray}, param) - return set_type_parameter(type, ndims, param) -end -function set_ndims(type::Type{<:AbstractArray}, param::NDims) - return set_type_parameter(type, ndims, ndims(param)) -end - -using SimpleTraits: SimpleTraits, @traitdef, @traitimpl - -# Trait indicating if the AbstractArray type is an array wrapper. -# Assumes that it implements `NDTensors.parenttype`. -@traitdef IsWrappedArray{ArrayType} - -#! format: off -@traitimpl IsWrappedArray{ArrayType} <- is_wrapped_array(ArrayType) -#! format: on - -parenttype(type::Type{<:AbstractArray}) = parameter(type, parenttype) -parenttype(object::AbstractArray) = parenttype(typeof(object)) -position(::Type{<:AbstractArray}, ::typeof(parenttype)) = Self() - -is_wrapped_array(arraytype::Type{<:AbstractArray}) = (parenttype(arraytype) ≠ arraytype) -@inline is_wrapped_array(array::AbstractArray) = is_wrapped_array(typeof(array)) -@inline is_wrapped_array(object) = false - -using SimpleTraits: Not, @traitfn - -@traitfn function unwrap_array_type( - arraytype::Type{ArrayType} -) where {ArrayType; IsWrappedArray{ArrayType}} - return unwrap_array_type(parenttype(arraytype)) -end - -@traitfn function unwrap_array_type( - arraytype::Type{ArrayType} -) where {ArrayType; !IsWrappedArray{ArrayType}} - return arraytype -end - -# For working with instances. -unwrap_array_type(array::AbstractArray) = unwrap_array_type(typeof(array)) - -function set_parenttype(t::Type, param) - return set_type_parameter(t, parenttype, param) -end - -@traitfn function set_eltype( - type::Type{ArrayType}, param -) where {ArrayType <: AbstractArray; IsWrappedArray{ArrayType}} - new_parenttype = set_eltype(parenttype(type), param) - # Need to set both in one `set_type_parameters` call to avoid - # conflicts in type parameter constraints of certain wrapper types. - return set_type_parameters(type, (eltype, parenttype), (param, new_parenttype)) -end - -@traitfn function set_eltype( - type::Type{ArrayType}, param -) where {ArrayType <: AbstractArray; !IsWrappedArray{ArrayType}} - return set_type_parameter(type, eltype, param) -end - -# These are generic fallback definitions. By convention, -# this is very commonly true of `AbstractArray` subtypes -# but it may not be correct, but it is very convenient -# to define this to make more operations "just work" -# on most AbstractArrays. -position(type::Type{<:AbstractArray}, ::typeof(eltype)) = Position(1) -position(type::Type{<:AbstractArray}, ::typeof(ndims)) = Position(2) - -default_type_parameters(::Type{<:AbstractArray}) = (Float64, 1) - -for wrapper in [:PermutedDimsArray, :(Base.ReshapedArray), :SubArray] - @eval begin - position(type::Type{<:$wrapper}, ::typeof(eltype)) = Position(1) - position(type::Type{<:$wrapper}, ::typeof(ndims)) = Position(2) - end -end -for wrapper in [:(Base.ReshapedArray), :SubArray] - @eval position(type::Type{<:$wrapper}, ::typeof(parenttype)) = Position(3) -end -for wrapper in [:PermutedDimsArray] - @eval position(type::Type{<:$wrapper}, ::typeof(parenttype)) = Position(5) -end diff --git a/NDTensors/src/lib/TypeParameterAccessors/src/base/array.jl b/NDTensors/src/lib/TypeParameterAccessors/src/base/array.jl deleted file mode 100644 index 247c73ba1f..0000000000 --- a/NDTensors/src/lib/TypeParameterAccessors/src/base/array.jl +++ /dev/null @@ -1,2 +0,0 @@ -position(::Type{<:Array}, ::typeof(eltype)) = Position(1) -position(::Type{<:Array}, ::typeof(ndims)) = Position(2) diff --git a/NDTensors/src/lib/TypeParameterAccessors/src/base/linearalgebra.jl b/NDTensors/src/lib/TypeParameterAccessors/src/base/linearalgebra.jl deleted file mode 100644 index 3d818cab16..0000000000 --- a/NDTensors/src/lib/TypeParameterAccessors/src/base/linearalgebra.jl +++ /dev/null @@ -1,25 +0,0 @@ -using LinearAlgebra: - Adjoint, - Diagonal, - Hermitian, - LowerTriangular, - Symmetric, - Transpose, - UnitLowerTriangular, - UnitUpperTriangular, - UpperTriangular - -for wrapper in [ - :Transpose, - :Adjoint, - :Symmetric, - :Hermitian, - :UpperTriangular, - :LowerTriangular, - :UnitUpperTriangular, - :UnitLowerTriangular, - :Diagonal, -] - @eval position(::Type{<:$wrapper}, ::typeof(eltype)) = Position(1) - @eval position(::Type{<:$wrapper}, ::typeof(parenttype)) = Position(2) -end diff --git a/NDTensors/src/lib/TypeParameterAccessors/src/base/similartype.jl b/NDTensors/src/lib/TypeParameterAccessors/src/base/similartype.jl deleted file mode 100644 index f6fed09885..0000000000 --- a/NDTensors/src/lib/TypeParameterAccessors/src/base/similartype.jl +++ /dev/null @@ -1,88 +0,0 @@ -""" -`set_indstype` should be overloaded for -types with structured dimensions, -like `OffsetArrays` or named indices -(such as ITensors). -""" -function set_indstype(arraytype::Type{<:AbstractArray}, dims::Tuple) - return set_ndims(arraytype, NDims(length(dims))) -end - -function similartype(arraytype::Type{<:AbstractArray}, eltype::Type, ndims::NDims) - return similartype(similartype(arraytype, eltype), ndims) -end - -function similartype(arraytype::Type{<:AbstractArray}, eltype::Type, dims::Tuple) - return similartype(similartype(arraytype, eltype), dims) -end - -@traitfn function similartype( - arraytype::Type{ArrayT} -) where {ArrayT; !IsWrappedArray{ArrayT}} - return arraytype -end - -@traitfn function similartype( - arraytype::Type{ArrayT}, eltype::Type -) where {ArrayT; !IsWrappedArray{ArrayT}} - return set_eltype(arraytype, eltype) -end - -@traitfn function similartype( - arraytype::Type{ArrayT}, dims::Tuple -) where {ArrayT; !IsWrappedArray{ArrayT}} - return set_indstype(arraytype, dims) -end - -@traitfn function similartype( - arraytype::Type{ArrayT}, ndims::NDims -) where {ArrayT; !IsWrappedArray{ArrayT}} - return set_ndims(arraytype, ndims) -end - -function similartype( - arraytype::Type{<:AbstractArray}, dim1::Base.DimOrInd, dim_rest::Base.DimOrInd... -) - return similartype(arraytype, (dim1, dim_rest...)) -end - -## Wrapped arrays -@traitfn function similartype( - arraytype::Type{ArrayT} -) where {ArrayT; IsWrappedArray{ArrayT}} - return similartype(unwrap_array_type(arraytype), NDims(arraytype)) -end - -@traitfn function similartype( - arraytype::Type{ArrayT}, eltype::Type -) where {ArrayT; IsWrappedArray{ArrayT}} - return similartype(unwrap_array_type(arraytype), eltype, NDims(arraytype)) -end - -@traitfn function similartype( - arraytype::Type{ArrayT}, dims::Tuple -) where {ArrayT; IsWrappedArray{ArrayT}} - return similartype(unwrap_array_type(arraytype), dims) -end - -@traitfn function similartype( - arraytype::Type{ArrayT}, ndims::NDims -) where {ArrayT; IsWrappedArray{ArrayT}} - return similartype(unwrap_array_type(arraytype), ndims) -end - -# This is for uniform `Diag` storage which uses -# a Number as the data type. -# TODO: Delete this when we change to using a -# `FillArray` instead. This is a stand-in -# to make things work with the current design. -function similartype(numbertype::Type{<:Number}) - return numbertype -end - -# Instances -function similartype(array::AbstractArray, eltype::Type, dims...) - return similartype(typeof(array), eltype, dims...) -end -similartype(array::AbstractArray, eltype::Type) = similartype(typeof(array), eltype) -similartype(array::AbstractArray, dims...) = similartype(typeof(array), dims...) diff --git a/NDTensors/src/lib/TypeParameterAccessors/src/base/stridedviews.jl b/NDTensors/src/lib/TypeParameterAccessors/src/base/stridedviews.jl deleted file mode 100644 index f74ae5c465..0000000000 --- a/NDTensors/src/lib/TypeParameterAccessors/src/base/stridedviews.jl +++ /dev/null @@ -1,3 +0,0 @@ -using StridedViews: StridedView - -@eval position(type::Type{<:StridedView}, ::typeof(parenttype)) = Position(3) diff --git a/NDTensors/src/lib/TypeParameterAccessors/src/default_parameters.jl b/NDTensors/src/lib/TypeParameterAccessors/src/default_parameters.jl deleted file mode 100644 index 79f7a96dfe..0000000000 --- a/NDTensors/src/lib/TypeParameterAccessors/src/default_parameters.jl +++ /dev/null @@ -1,62 +0,0 @@ -default_type_parameters(type::Type) = error("Not implemented") -function default_type_parameters(type::Type, positions::Tuple) - return default_type_parameter.(type, positions) -end -default_type_parameters(object) = default_type_parameters(typeof(object)) -function default_type_parameters(object, positions::Tuple) - return default_type_parameters(typeof(object), positions) -end -function default_type_parameter(type::Type, pos::Position) - return default_type_parameters(type)[Int(pos)] -end -function default_type_parameter(type::Type, pos) - return default_type_parameter(type, position(type, pos)) -end -function default_type_parameter(object, pos) - return default_type_parameter(typeof(object), pos) -end - -# Wrapping type parameters to improve type stability. -function wrapped_default_type_parameters(type::Type) - return wrapped_type_parameter.(default_type_parameters(type)) -end -function wrapped_default_type_parameters(type::Type, positions::Tuple) - return wrapped_default_type_parameter.(type, positions) -end -wrapped_default_type_parameters(object) = wrapped_default_type_parameters(typeof(object)) -function wrapped_default_type_parameters(object, positions::Tuple) - return wrapped_default_type_parameters(typeof(object), positions) -end -function wrapped_default_type_parameter(type::Type, pos::Position) - return wrapped_default_type_parameters(type)[Int(pos)] -end -function wrapped_default_type_parameter(type::Type, pos) - return wrapped_default_type_parameter(type, position(type, pos)) -end -function wrapped_default_type_parameter(object, pos) - return wrapped_default_type_parameter(typeof(object), pos) -end - -function set_default_type_parameter(type::Type, pos) - return set_type_parameter(type, pos, wrapped_default_type_parameter(type, pos)) -end -function set_default_type_parameters(type::Type) - return set_type_parameters(type, wrapped_default_type_parameters(type)) -end -function set_default_type_parameters(type::Type, positions::Tuple) - return set_type_parameters( - type, positions, wrapped_default_type_parameters(type, positions) - ) -end - -function specify_default_type_parameter(type::Type, pos) - return specify_type_parameter(type, pos, wrapped_default_type_parameter(type, pos)) -end -function specify_default_type_parameters(type::Type) - return specify_type_parameters(type, wrapped_default_type_parameters(type)) -end -function specify_default_type_parameters(type::Type, positions::Tuple) - return specify_type_parameters( - type, positions, wrapped_default_type_parameters(type, positions) - ) -end diff --git a/NDTensors/src/lib/TypeParameterAccessors/src/interface.jl b/NDTensors/src/lib/TypeParameterAccessors/src/interface.jl deleted file mode 100644 index 7eaa1391f1..0000000000 --- a/NDTensors/src/lib/TypeParameterAccessors/src/interface.jl +++ /dev/null @@ -1,22 +0,0 @@ -""" - position(type::Type, position_name)::Position - -An optional interface function. Defining this allows accessing a parameter -at the defined position using the `position_name`. - -For example, defining `TypeParameterAccessors.position(::Type{<:MyType}, ::typeof(eltype)) = Position(1)` -allows accessing the first type parameter with `type_parameter(MyType(...), eltype)`, -in addition to the standard `type_parameter(MyType(...), 1)` or `type_parameter(MyType(...), Position(1))`. -""" -function position end - -""" - default_parameters(type::Type)::Tuple - -An optional interface function. Defining this allows filling type parameters -of the specified type with default values. - -This function should output a Tuple of the default values, with exactly -one for each type parameter slot of the type. -""" -function default_type_parameters end diff --git a/NDTensors/src/lib/TypeParameterAccessors/src/is_parameter_specified.jl b/NDTensors/src/lib/TypeParameterAccessors/src/is_parameter_specified.jl deleted file mode 100644 index 014e5089eb..0000000000 --- a/NDTensors/src/lib/TypeParameterAccessors/src/is_parameter_specified.jl +++ /dev/null @@ -1,5 +0,0 @@ -is_specified_parameter(param::TypeParameter) = true -is_specified_parameter(param::UnspecifiedTypeParameter) = false -function is_parameter_specified(type::Type, pos) - return is_specified_parameter(wrapped_type_parameter(type, pos)) -end diff --git a/NDTensors/src/lib/TypeParameterAccessors/src/ndims.jl b/NDTensors/src/lib/TypeParameterAccessors/src/ndims.jl deleted file mode 100644 index f640763583..0000000000 --- a/NDTensors/src/lib/TypeParameterAccessors/src/ndims.jl +++ /dev/null @@ -1,6 +0,0 @@ -struct NDims{ndims} end -Base.ndims(::NDims{ndims}) where {ndims} = ndims - -NDims(ndims::Integer) = NDims{ndims}() -NDims(arraytype::Type{<:AbstractArray}) = NDims(ndims(arraytype)) -NDims(array::AbstractArray) = NDims(typeof(array)) diff --git a/NDTensors/src/lib/TypeParameterAccessors/src/parameter.jl b/NDTensors/src/lib/TypeParameterAccessors/src/parameter.jl deleted file mode 100644 index 5b3554fcf7..0000000000 --- a/NDTensors/src/lib/TypeParameterAccessors/src/parameter.jl +++ /dev/null @@ -1,3 +0,0 @@ -parameter(type::Type, pos) = parameter(type, position(type, pos)) -parameter(type::Type, pos::Position) = parameters(type)[Int(pos)] -parameter(type::Type) = only(parameters(type)) diff --git a/NDTensors/src/lib/TypeParameterAccessors/src/parameters.jl b/NDTensors/src/lib/TypeParameterAccessors/src/parameters.jl deleted file mode 100644 index f6059fdf02..0000000000 --- a/NDTensors/src/lib/TypeParameterAccessors/src/parameters.jl +++ /dev/null @@ -1,18 +0,0 @@ -# JULIA_INTERNALS: Warning! This code relies on undocumented -# internal details of the Julia language. -# It could break from version to version of Julia. - -# The signature `parameters(::Type{type}) where {type}` -# doesn't work if `type` is a `DataType` with `TypeVar`s. -function _parameters(type::Type) - return Tuple(Base.unwrap_unionall(type).parameters) -end -@generated function parameters(type_type::Type) - type = only(type_type.parameters) - return _parameters(type) -end -parameters(object) = parameters(typeof(object)) - -nparameters(type_or_object) = length(parameters(type_or_object)) - -eachposition(type_or_object) = ntuple(Position, Val(nparameters(type_or_object))) diff --git a/NDTensors/src/lib/TypeParameterAccessors/src/position.jl b/NDTensors/src/lib/TypeParameterAccessors/src/position.jl deleted file mode 100644 index aff80a8197..0000000000 --- a/NDTensors/src/lib/TypeParameterAccessors/src/position.jl +++ /dev/null @@ -1,11 +0,0 @@ -# These definitions help with generic code, where -# we don't know what kind of position will be passed -# but we want to canonicalize to `Position` positions. -position(type::Type, pos::Position) = pos -position(type::Type, pos::Int) = Position(pos) -# Used for named positions. -function position(type::Type, pos) - return error( - "Type parameter position not defined for type `$(type)` and position name `$(pos)`." - ) -end diff --git a/NDTensors/src/lib/TypeParameterAccessors/src/set_parameters.jl b/NDTensors/src/lib/TypeParameterAccessors/src/set_parameters.jl deleted file mode 100644 index 6ab25d2d9d..0000000000 --- a/NDTensors/src/lib/TypeParameterAccessors/src/set_parameters.jl +++ /dev/null @@ -1,46 +0,0 @@ -function _set_type_parameter(type::Type, pos::Int, param) - params = Base.setindex(parameters(type), param, pos) - return new_parameters(type, params) -end -@generated function set_type_parameter( - type_type::Type, pos_type::Position, param_type::TypeParameter -) - type = parameter(type_type) - pos = parameter(pos_type) - param = parameter(param_type) - return _set_type_parameter(type, pos, param) -end -function set_type_parameter(type::Type, pos, param) - return set_type_parameter(type, position(type, pos), param) -end -function set_type_parameter(type::Type, pos::Position, param) - return set_type_parameter(type, pos, TypeParameter(param)) -end -function set_type_parameter(type::Type, pos::Position, param::UnspecifiedTypeParameter) - return unspecify_type_parameter(type, pos) -end - -function _set_type_parameters(type::Type, positions::Tuple{Vararg{Int}}, params::Tuple) - @assert length(positions) == length(params) - new_params = parameters(type) - for i in 1:length(positions) - new_params = Base.setindex(new_params, params[i], positions[i]) - end - return new_parameters(type, new_params) -end -@generated function set_type_parameters( - type_type::Type, - positions_type::Tuple{Vararg{Position}}, - params_type::Tuple{Vararg{TypeParameter}}, -) - type = parameter(type_type) - positions = parameter.(parameters(positions_type)) - params = parameter.(parameters(params_type)) - return _set_type_parameters(type, positions, params) -end -function set_type_parameters(type::Type, positions::Tuple, params::Tuple) - return set_type_parameters(type, position.(type, positions), TypeParameter.(params)) -end -function set_type_parameters(type::Type, params::Tuple) - return set_type_parameters(type, eachposition(type), params) -end diff --git a/NDTensors/src/lib/TypeParameterAccessors/src/specify_parameters.jl b/NDTensors/src/lib/TypeParameterAccessors/src/specify_parameters.jl deleted file mode 100644 index 8c424c286e..0000000000 --- a/NDTensors/src/lib/TypeParameterAccessors/src/specify_parameters.jl +++ /dev/null @@ -1,30 +0,0 @@ -function specify_type_parameter(type::Type, pos, param) - is_parameter_specified(type, pos) && return type - return set_type_parameter(type, pos, param) -end - -function _specify_type_parameters(type::Type, positions::Tuple{Vararg{Int}}, params::Tuple) - new_params = parameters(type) - for i in 1:length(positions) - if !is_parameter_specified(type, positions[i]) - new_params = Base.setindex(new_params, params[i], positions[i]) - end - end - return new_parameters(type, new_params) -end -@generated function specify_type_parameters( - type_type::Type, - positions_type::Tuple{Vararg{Position}}, - params_type::Tuple{Vararg{TypeParameter}}, -) - type = parameter(type_type) - positions = parameter.(parameters(positions_type)) - params = parameter.(parameters(params_type)) - return _specify_type_parameters(type, positions, params) -end -function specify_type_parameters(type::Type, positions::Tuple, params::Tuple) - return specify_type_parameters(type, position.(type, positions), TypeParameter.(params)) -end -function specify_type_parameters(type::Type, params::Tuple) - return specify_type_parameters(type, eachposition(type), params) -end diff --git a/NDTensors/src/lib/TypeParameterAccessors/src/to_unionall.jl b/NDTensors/src/lib/TypeParameterAccessors/src/to_unionall.jl deleted file mode 100644 index 449715ef8e..0000000000 --- a/NDTensors/src/lib/TypeParameterAccessors/src/to_unionall.jl +++ /dev/null @@ -1,17 +0,0 @@ -# JULIA_INTERNALS: Warning! This code relies on undocumented -# internal details of the Julia language. -# It could break from version to version of Julia. - -# Similar to `Base.rewrap_unionall` but handles -# more general cases of `TypeVar` parameters. -@generated function to_unionall(type_type::Type) - type = only(type_type.parameters) - params = Base.unwrap_unionall(type).parameters - for i in reverse(eachindex(params)) - param = params[i] - if param isa TypeVar - type = UnionAll(param, type) - end - end - return type -end diff --git a/NDTensors/src/lib/TypeParameterAccessors/src/type_parameters.jl b/NDTensors/src/lib/TypeParameterAccessors/src/type_parameters.jl deleted file mode 100644 index bbdb7375d1..0000000000 --- a/NDTensors/src/lib/TypeParameterAccessors/src/type_parameters.jl +++ /dev/null @@ -1,17 +0,0 @@ -type_parameter(param::TypeParameter) = parameter(typeof(param)) -function type_parameter(param::UnspecifiedTypeParameter) - return error("The requested type parameter isn't specified.") -end -function type_parameter(type::Type, pos) - return type_parameter(wrapped_type_parameter(type, pos)) -end -function type_parameter(object, pos) - return type_parameter(typeof(object), pos) -end -function type_parameter(type_or_object) - return only(type_parameters(type_or_object)) -end - -function type_parameters(type_or_object, positions=eachposition(type_or_object)) - return map(pos -> type_parameter(type_or_object, pos), positions) -end diff --git a/NDTensors/src/lib/TypeParameterAccessors/src/unspecify_parameters.jl b/NDTensors/src/lib/TypeParameterAccessors/src/unspecify_parameters.jl deleted file mode 100644 index be0851db86..0000000000 --- a/NDTensors/src/lib/TypeParameterAccessors/src/unspecify_parameters.jl +++ /dev/null @@ -1,45 +0,0 @@ -function _unspecify_type_parameters(type::Type) - return Base.typename(type).wrapper -end -@generated function unspecify_type_parameters(type_type::Type) - type = parameter(type_type) - return _unspecify_type_parameters(type) -end - -# Like `set_parameters` but less strict, i.e. it allows -# setting with `TypeVar` while `set_parameters` would error. -function new_parameters(type::Type, params) - return to_unionall(unspecify_type_parameters(type){params...}) -end - -function _unspecify_type_parameter(type::Type, pos::Int) - !is_parameter_specified(type, pos) && return type - unspecified_param = parameter(unspecify_type_parameters(type), pos) - params = Base.setindex(parameters(type), unspecified_param, pos) - return new_parameters(type, params) -end -@generated function unspecify_type_parameter(type_type::Type, pos_type::Position) - type = parameter(type_type) - pos = parameter(pos_type) - return _unspecify_type_parameter(type, pos) -end -function unspecify_type_parameter(type::Type, pos) - return unspecify_type_parameter(type, position(type, pos)) -end - -function _unspecify_type_parameters(type::Type, positions::Tuple{Vararg{Int}}) - for pos in positions - type = unspecify_type_parameter(type, pos) - end - return type -end -@generated function unspecify_type_parameters( - type_type::Type, positions_type::Tuple{Vararg{Position}} -) - type = parameter(type_type) - positions = parameter.(parameters(positions_type)) - return _unspecify_type_parameters(type, positions) -end -function unspecify_type_parameters(type::Type, positions::Tuple) - return unspecify_type_parameters(type, position.(type, positions)) -end diff --git a/NDTensors/src/lib/TypeParameterAccessors/test/Project.toml b/NDTensors/src/lib/TypeParameterAccessors/test/Project.toml deleted file mode 100644 index 372840c75d..0000000000 --- a/NDTensors/src/lib/TypeParameterAccessors/test/Project.toml +++ /dev/null @@ -1,4 +0,0 @@ -[deps] -NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" -StridedViews = "4db3bf67-4bd7-4b4e-b153-31dc3fb37143" -Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" diff --git a/NDTensors/src/lib/TypeParameterAccessors/test/runtests.jl b/NDTensors/src/lib/TypeParameterAccessors/test/runtests.jl deleted file mode 100644 index 7c5ffc416d..0000000000 --- a/NDTensors/src/lib/TypeParameterAccessors/test/runtests.jl +++ /dev/null @@ -1,10 +0,0 @@ -@eval module $(gensym()) -using Test: @testset -@testset "TypeParameterAccessors.jl" begin - include("test_basics.jl") - include("test_defaults.jl") - include("test_custom_types.jl") - include("test_wrappers.jl") - include("test_similartype.jl") -end -end diff --git a/NDTensors/src/lib/TypeParameterAccessors/test/test_basics.jl b/NDTensors/src/lib/TypeParameterAccessors/test/test_basics.jl deleted file mode 100644 index 7df288f3dd..0000000000 --- a/NDTensors/src/lib/TypeParameterAccessors/test/test_basics.jl +++ /dev/null @@ -1,85 +0,0 @@ -@eval module $(gensym()) -using Test: @test_throws, @testset -using NDTensors.TypeParameterAccessors: - TypeParameterAccessors, - Position, - TypeParameter, - set_type_parameter, - set_type_parameters, - specify_type_parameter, - specify_type_parameters, - type_parameter, - type_parameters, - unspecify_type_parameter, - unspecify_type_parameters -include("utils/test_inferred.jl") -@testset "TypeParameterAccessors basics" begin - @testset "Get parameters" begin - @test_inferred type_parameter(AbstractArray{Float64}, 1) == Float64 wrapped = true - @test_inferred type_parameter(AbstractArray{Float64}, Position(1)) == Float64 - @test_inferred type_parameter(AbstractArray{Float64}, eltype) == Float64 - @test_inferred type_parameter(AbstractMatrix{Float64}, ndims) == 2 - - @test_inferred type_parameter(Array{Float64}, 1) == Float64 wrapped = true - @test_inferred type_parameter(Array{Float64}, Position(1)) == Float64 - @test_inferred type_parameter(Val{3}) == 3 - @test_throws ErrorException type_parameter(Array, 1) - @test_inferred type_parameter(Array{Float64}, eltype) == Float64 - @test_inferred type_parameter(Matrix{Float64}, ndims) == 2 - @test_throws ErrorException type_parameter(Array{Float64}, ndims) == 2 - @test_inferred type_parameters(Matrix{Float64}, (2, eltype)) == (2, Float64) wrapped = - true - @test_inferred type_parameters(Matrix{Float64}, (Position(2), eltype)) == (2, Float64) - end - @testset "Set parameters" begin - @test_inferred set_type_parameter(Array, 1, Float64) == Array{Float64} wrapped = true - @test_inferred set_type_parameter(Array, Position(1), Float64) == Array{Float64} - @test_inferred set_type_parameter(Array, 2, 2) == Matrix wrapped = true - @test_inferred set_type_parameter(Array, eltype, Float32) == Array{Float32} - @test_inferred set_type_parameters( - Array, (eltype, Position(2)), (TypeParameter(Float32), TypeParameter(3)) - ) == Array{Float32,3} - @test_inferred set_type_parameters(Array, (eltype, 2), (Float32, 3)) == Array{Float32,3} wrapped = - true - - # TODO: This should infer without wrapping but doesn't. - @test_inferred set_type_parameters( - Array, (eltype, Position(2)), (Float32, TypeParameter(3)) - ) == Array{Float32,3} wrapped = true - end - @testset "Specify parameters" begin - @test_inferred specify_type_parameter(Array, 1, Float64) == Array{Float64} wrapped = - true - @test_inferred specify_type_parameter(Array, Position(1), Float64) == Array{Float64} - @test_inferred specify_type_parameters(Matrix, (2, 1), (4, Float32)) == Matrix{Float32} wrapped = - true - @test_inferred specify_type_parameters(Array, (Float64, 2)) == Matrix{Float64} wrapped = - true - @test_inferred specify_type_parameter(Array, eltype, Float32) == Array{Float32} - @test_inferred specify_type_parameters(Array, (eltype, 2), (Float32, 3)) == - Array{Float32,3} wrapped = true - end - @testset "Unspecify parameters" begin - @test_inferred unspecify_type_parameter(Vector, 2) == Array wrapped = true - @test_inferred unspecify_type_parameter(Vector, Position(2)) == Array - @test_inferred unspecify_type_parameter(Vector{Float64}, eltype) == Vector - @test_inferred unspecify_type_parameters(Vector{Float64}) == Array - @test_inferred unspecify_type_parameters(Vector{Float64}, (eltype, 2)) == Array wrapped = - true - @test_inferred unspecify_type_parameters(Vector{Float64}, (eltype, Position(2))) == - Array - end - @testset "On objects" begin - @test_inferred type_parameter(Val{3}()) == 3 - @test_inferred type_parameter(Val{Float32}()) == Float32 - a = randn(Float32, (2, 2, 2)) - @test_inferred type_parameter(a, 1) == Float32 wrapped = true - @test_inferred type_parameter(a, eltype) == Float32 - @test_inferred type_parameter(a, Position(1)) == Float32 - @test_inferred type_parameter(a, 2) == 3 wrapped = true - @test_inferred type_parameter(a, ndims) == 3 - @test_inferred type_parameters(a) == (Float32, 3) - @test_inferred type_parameters(a, (2, eltype)) == (3, Float32) wrapped = true - end -end -end diff --git a/NDTensors/src/lib/TypeParameterAccessors/test/test_custom_types.jl b/NDTensors/src/lib/TypeParameterAccessors/test/test_custom_types.jl deleted file mode 100644 index 86d99cd7d1..0000000000 --- a/NDTensors/src/lib/TypeParameterAccessors/test/test_custom_types.jl +++ /dev/null @@ -1,78 +0,0 @@ -@eval module $(gensym()) -using Test: @testset -@testset "TypeParameterAccessors custom types" begin - @eval module $(gensym()) - using Test: @testset - using NDTensors.TypeParameterAccessors: - TypeParameterAccessors, - Position, - default_type_parameter, - default_type_parameters, - set_default_type_parameter, - set_default_type_parameters, - set_type_parameter, - set_type_parameters, - specify_default_type_parameter, - specify_default_type_parameters, - specify_type_parameter, - specify_type_parameters, - type_parameter, - type_parameters, - unspecify_type_parameter, - unspecify_type_parameters - include("utils/test_inferred.jl") - @testset "TypeParameterAccessors, named positions and defaults" begin - struct MyType{P1,P2} end - TypeParameterAccessors.default_type_parameters(::Type{<:MyType}) = (:P1, :P2) - - @test_inferred default_type_parameter(MyType, 1) == :P1 wrapped = true - @test_inferred default_type_parameter(MyType, Position(1)) == :P1 - @test_inferred default_type_parameter(MyType, 2) == :P2 wrapped = true - @test_inferred default_type_parameter(MyType, Position(2)) == :P2 - @test_inferred default_type_parameter(MyType{<:Any,2}, Position(1)) == :P1 - @test_inferred default_type_parameter(MyType{<:Any,2}, Position(2)) == :P2 - @test_inferred default_type_parameters(MyType{<:Any,2}) == (:P1, :P2) - @test_inferred default_type_parameters(MyType) == (:P1, :P2) - # TODO: These don't infer, need to investigate. - @test_inferred default_type_parameter(MyType{<:Any,2}, 1) == :P1 inferred = false - @test_inferred default_type_parameter(MyType{<:Any,2}, 2) == :P2 inferred = false - - @test_inferred set_default_type_parameter(MyType{1,2}, 1) == MyType{:P1,2} wrapped = - true - @test_inferred set_default_type_parameter(MyType{1,2}, Position(1)) == MyType{:P1,2} - @test_inferred set_default_type_parameter(MyType{<:Any,2}, Position(1)) == - MyType{:P1,2} - @test_inferred set_default_type_parameter(MyType{<:Any,2}, Position(2)) == - MyType{<:Any,:P2} - @test_inferred set_default_type_parameters(MyType{<:Any,2}) == MyType{:P1,:P2} - # TODO: These don't infer, need to investigate. - @test_inferred set_default_type_parameter(MyType{<:Any,2}, 1) == MyType{:P1,2} inferred = - false - @test_inferred set_default_type_parameter(MyType{<:Any,2}, 2) == MyType{<:Any,:P2} inferred = - false - - @test_inferred specify_default_type_parameter(MyType{<:Any,2}, Position(1)) == - MyType{:P1,2} - @test_inferred specify_default_type_parameters(MyType{<:Any,2}) == MyType{:P1,2} - @test_inferred specify_default_type_parameter(MyType{<:Any,2}, Position(2)) == - MyType{<:Any,2} - @test_inferred specify_default_type_parameters(MyType) == MyType{:P1,:P2} - # TODO: These don't infer, need to investigate. - @test_inferred specify_default_type_parameter(MyType{<:Any,2}, 2) == MyType{<:Any,2} inferred = - false - - # Named positions - function p1 end - function p2 end - ## TODO remove TypeParameterAccessors when SetParameters is removed - TypeParameterAccessors.position(::Type{<:MyType}, ::typeof(p1)) = Position(1) - TypeParameterAccessors.position(::Type{<:MyType}, ::typeof(p2)) = Position(2) - - @test_inferred type_parameter(MyType{:p1}, p1) == :p1 - @test_inferred type_parameter(MyType{<:Any,:p2}, p2) == :p2 - @test_inferred default_type_parameter(MyType, p1) == :P1 - @test_inferred default_type_parameter(MyType, p2) == :P2 - end - end -end -end diff --git a/NDTensors/src/lib/TypeParameterAccessors/test/test_defaults.jl b/NDTensors/src/lib/TypeParameterAccessors/test/test_defaults.jl deleted file mode 100644 index c10d360762..0000000000 --- a/NDTensors/src/lib/TypeParameterAccessors/test/test_defaults.jl +++ /dev/null @@ -1,75 +0,0 @@ -@eval module $(gensym()) -using Test: @test_throws, @testset -using NDTensors.TypeParameterAccessors: - TypeParameterAccessors, - Position, - default_type_parameter, - default_type_parameters, - set_default_type_parameter, - set_default_type_parameters, - specify_default_type_parameter, - specify_default_type_parameters -include("utils/test_inferred.jl") -@testset "TypeParameterAccessors defaults" begin - @testset "Erroneously requires wrapping to infer" begin end - @testset "Get defaults" begin - @test_inferred default_type_parameter(Array, 1) == Float64 wrapped = true - @test_inferred default_type_parameter(Array, Position(1)) == Float64 - @test_inferred default_type_parameter(Array, 2) == 1 wrapped = true - @test_inferred default_type_parameter(Array, Position(2)) == 1 - @test_inferred default_type_parameters(Array) == (Float64, 1) - @test_inferred default_type_parameters(Array, (2, 1)) == (1, Float64) wrapped = true - @test_inferred default_type_parameters(Array, (Position(2), Position(1))) == - (1, Float64) - @test_inferred default_type_parameters(Array, (ndims, eltype)) == (1, Float64) - end - @testset "Set defaults" begin - @test_inferred set_default_type_parameter(Array{Float32}, 1) == Array{Float64} wrapped = - true - @test_inferred set_default_type_parameter(Array{Float32}, Position(1)) == Array{Float64} - @test_inferred set_default_type_parameter(Array{Float32}, eltype) == Array{Float64} - @test_inferred set_default_type_parameters(Array{Float32}) == Vector{Float64} - @test_inferred set_default_type_parameters(Array{Float32}, (1, 2)) == Vector{Float64} wrapped = - true - @test_inferred set_default_type_parameters( - Array{Float32}, (Position(1), Position(2)) - ) == Vector{Float64} - @test_inferred set_default_type_parameters(Array{Float32}, (eltype, ndims)) == - Vector{Float64} - @test_inferred set_default_type_parameters(Array) == Vector{Float64} wrapped = true - @test_inferred set_default_type_parameters(Array, (Position(1),)) == Array{Float64} - @test_inferred set_default_type_parameters(Array, (Position(1), Position(2))) == - Vector{Float64} - end - @testset "Specify defaults" begin - @test_inferred specify_default_type_parameter(Array, 1) == Array{Float64} wrapped = true - @test_inferred specify_default_type_parameter(Array, Position(1)) == Array{Float64} - @test_inferred specify_default_type_parameter(Array, eltype) == Array{Float64} - @test_inferred specify_default_type_parameter(Array, 2) == Vector wrapped = true - @test_inferred specify_default_type_parameter(Array, Position(2)) == Vector - @test_inferred specify_default_type_parameter(Array, ndims) == Vector - @test_inferred specify_default_type_parameters(Array) == Vector{Float64} - @test_inferred specify_default_type_parameters(Array, (1,)) == Array{Float64} wrapped = - true - @test_inferred specify_default_type_parameters(Array, (Position(1),)) == Array{Float64} - @test_inferred specify_default_type_parameters(Array, (eltype,)) == Array{Float64} - @test_inferred specify_default_type_parameters(Array, (2,)) == Vector wrapped = true - @test_inferred specify_default_type_parameters(Array, (Position(2),)) == Vector - @test_inferred specify_default_type_parameters(Array, (ndims,)) == Vector - @test_inferred specify_default_type_parameters(Array, (1, 2)) == Vector{Float64} wrapped = - true - @test_inferred specify_default_type_parameters(Array, (Position(1), Position(2))) == - Vector{Float64} - @test_inferred specify_default_type_parameters(Array, (eltype, ndims)) == - Vector{Float64} - end - @testset "On objects" begin - a = randn(Float32, (2, 2, 2)) - @test_inferred default_type_parameter(a, 1) == Float64 wrapped = true - @test_inferred default_type_parameter(a, eltype) == Float64 - @test_inferred default_type_parameter(a, 2) == 1 wrapped = true - @test_inferred default_type_parameter(a, ndims) == 1 - @test_inferred default_type_parameters(a) == (Float64, 1) - end -end -end diff --git a/NDTensors/src/lib/TypeParameterAccessors/test/test_similartype.jl b/NDTensors/src/lib/TypeParameterAccessors/test/test_similartype.jl deleted file mode 100644 index 0813fc917b..0000000000 --- a/NDTensors/src/lib/TypeParameterAccessors/test/test_similartype.jl +++ /dev/null @@ -1,26 +0,0 @@ -@eval module $(gensym()) -using Test: @test, @test_broken, @testset -using LinearAlgebra: Adjoint, Diagonal -using NDTensors.TypeParameterAccessors: NDims, similartype -@testset "TypeParameterAccessors similartype" begin - @test similartype(Array, Float64, (2, 2)) == Matrix{Float64} - @test similartype(Array) == Array - @test similartype(Array, Float64) == Array{Float64} - @test similartype(Array, (2, 2)) == Matrix - @test similartype(Array, NDims(2)) == Matrix - @test similartype(Array, Float64, (2, 2)) == Matrix{Float64} - @test similartype(Array, Float64, NDims(2)) == Matrix{Float64} - @test similartype(Adjoint{Float32,Matrix{Float32}}, Float64, (2, 2, 2)) == - Array{Float64,3} - @test similartype(Adjoint{Float32,Matrix{Float32}}, Float64, NDims(3)) == Array{Float64,3} - @test similartype(Adjoint{Float32,Matrix{Float32}}, Float64) == Matrix{Float64} - @test similartype(Diagonal{Float32,Vector{Float32}}) == Matrix{Float32} - @test similartype(Diagonal{Float32,Vector{Float32}}, Float64) == Matrix{Float64} - @test similartype(Diagonal{Float32,Vector{Float32}}, (2, 2, 2)) == Array{Float32,3} - @test similartype(Diagonal{Float32,Vector{Float32}}, NDims(3)) == Array{Float32,3} - @test similartype(Diagonal{Float32,Vector{Float32}}, Float64, (2, 2, 2)) == - Array{Float64,3} - @test similartype(Diagonal{Float32,Vector{Float32}}, Float64, NDims(3)) == - Array{Float64,3} -end -end diff --git a/NDTensors/src/lib/TypeParameterAccessors/test/test_wrappers.jl b/NDTensors/src/lib/TypeParameterAccessors/test/test_wrappers.jl deleted file mode 100644 index ed8e223922..0000000000 --- a/NDTensors/src/lib/TypeParameterAccessors/test/test_wrappers.jl +++ /dev/null @@ -1,121 +0,0 @@ -@eval module $(gensym()) -using Test: @test_broken, @testset -using LinearAlgebra: - Adjoint, - Diagonal, - Hermitian, - LowerTriangular, - Symmetric, - Transpose, - UnitLowerTriangular, - UnitUpperTriangular, - UpperTriangular -using NDTensors.TypeParameterAccessors: - NDims, - TypeParameter, - is_wrapped_array, - parenttype, - set_eltype, - set_ndims, - set_parenttype, - type_parameter, - unspecify_type_parameters, - unwrap_array_type -using StridedViews: StridedView -include("utils/test_inferred.jl") -@testset "TypeParameterAccessors wrapper types" begin - @testset "Array" begin - array = randn(2, 2) - array_type = typeof(array) - @test_inferred parenttype(Matrix) == Matrix - @test_inferred is_wrapped_array(array) == false - @test_inferred parenttype(array) == array_type - @test_inferred set_eltype(array, Float32) isa Matrix{Float32} - @test_inferred set_eltype(array, Float32) ≈ array - @test_inferred set_eltype(Array{<:Any,2}, Float64) == Matrix{Float64} - @test_inferred set_ndims(Array{Float64}, 2) == Matrix{Float64} wrapped = true - @test_inferred set_ndims(Array{Float64}, NDims(2)) == Matrix{Float64} wrapped = true - @test_inferred set_ndims(Array{Float64}, TypeParameter(2)) == Matrix{Float64} - @test_inferred unwrap_array_type(array_type) == array_type - end - @testset "Base AbstractArray wrappers" begin - array = randn(2, 2) - for wrapped_array in ( - Base.ReshapedArray(array, (2, 2), ()), - SubArray(randn(2, 2), (:, :)), - PermutedDimsArray(randn(2, 2), (1, 2)), - ) - wrapped_array_type = typeof(wrapped_array) - @test_inferred parenttype(wrapped_array) == Matrix{Float64} - @test_inferred type_parameter(wrapped_array, eltype) == Float64 - @test_inferred is_wrapped_array(wrapped_array) == true - @test_inferred unwrap_array_type(wrapped_array_type) == Matrix{Float64} - @test_inferred set_eltype(wrapped_array_type, Float32) <: - unspecify_type_parameters(wrapped_array_type){Float32} - # Julia doesn't have the necessary conversions defined for this to work. - @test_broken set_eltype(wrapped_array, Float32) isa - unspecify_type_parameters(wrapped_array_type){Float32} - end - end - @testset "LinearAlgebra wrappers" begin - for wrapper in ( - Transpose, - Adjoint, - Symmetric, - Hermitian, - UpperTriangular, - LowerTriangular, - UnitUpperTriangular, - UnitLowerTriangular, - ) - array = randn(2, 2) - wrapped_array = wrapper(array) - wrapped_array_type = typeof(wrapped_array) - @test_inferred is_wrapped_array(wrapped_array) == true - @test_inferred parenttype(wrapped_array) == Matrix{Float64} - @test_inferred unwrap_array_type(wrapped_array_type) == Matrix{Float64} - @test_inferred set_parenttype(wrapped_array_type, wrapped_array_type) == - wrapper{eltype(wrapped_array_type),wrapped_array_type} - @test_inferred set_eltype(wrapped_array_type, Float32) == - wrapper{Float32,Matrix{Float32}} - if wrapper ∉ (UnitUpperTriangular, UnitLowerTriangular) - @test_inferred set_eltype(wrapped_array, Float32) isa - wrapper{Float32,Matrix{Float32}} - end - end - end - @testset "LinearAlgebra Diagonal wrapper" begin - array = randn(2, 2) - wrapped_array = Diagonal(array) - wrapped_array_type = typeof(wrapped_array) - @test_inferred is_wrapped_array(wrapped_array) == true - @test_inferred parenttype(wrapped_array) == Vector{Float64} - @test_inferred unwrap_array_type(wrapped_array_type) == Vector{Float64} - @test_inferred set_eltype(wrapped_array_type, Float32) == - Diagonal{Float32,Vector{Float32}} - if VERSION ≥ v"1.8" - # `Diagonal{T,Vector{T}}(diag::Diagonal)` not define in Julia 1.7 - # and below. - @test_inferred set_eltype(wrapped_array, Float32) isa - Diagonal{Float32,Vector{Float32}} - end - end - @testset "LinearAlgebra nested wrappers" begin - array = randn(2, 2) - wrapped_array = view(reshape(transpose(array), 4), 1:2) - wrapped_array_type = typeof(wrapped_array) - @test_inferred is_wrapped_array(wrapped_array) == true - @test_inferred parenttype(wrapped_array) <: - Base.ReshapedArray{Float64,1,Transpose{Float64,Matrix{Float64}}} - @test_inferred unwrap_array_type(array) == Matrix{Float64} - end - @testset "StridedView" begin - array = randn(2, 2) - wrapped_array = StridedView(randn(2, 2)) - wrapped_array_type = typeof(wrapped_array) - @test_inferred is_wrapped_array(wrapped_array) == true - @test_inferred parenttype(wrapped_array) == Matrix{Float64} - @test_inferred unwrap_array_type(wrapped_array_type) == Matrix{Float64} - end -end -end diff --git a/NDTensors/src/lib/TypeParameterAccessors/test/utils/test_inferred.jl b/NDTensors/src/lib/TypeParameterAccessors/test/utils/test_inferred.jl deleted file mode 100644 index 755c31eb98..0000000000 --- a/NDTensors/src/lib/TypeParameterAccessors/test/utils/test_inferred.jl +++ /dev/null @@ -1,28 +0,0 @@ -using Test: @inferred, @test - -macro test_inferred(ex, kws...) - @assert ex.head in [:call, :(<:)] - first_arg = ex.head === :(<:) ? 1 : 2 - @assert length(ex.args[first_arg:end]) == 2 - # Collect the broken/skip keywords and remove them from the rest of keywords - @assert all(kw -> kw.head === :(=), kws) - inferreds = [kw.args[2] for kw in kws if kw.args[1] === :inferred] - inferred = isempty(inferreds) ? true : only(inferreds) - wrappeds = [kw.args[2] for kw in kws if kw.args[1] === :wrapped] - wrapped = isempty(wrappeds) ? false : only(wrappeds) - kws = filter(kw -> kw.args[1] ∉ (:inferred, :wrapped), kws) - arg1 = ex.args[first_arg] - arg1 = quote - if $inferred - if $wrapped - @inferred((() -> $arg1)()) - else - @inferred($arg1) - end - else - $arg1 - end - end - ex.args[first_arg] = arg1 - return Expr(:macrocall, Symbol("@test"), :(), esc(ex), kws...) -end diff --git a/NDTensors/src/lib/UnallocatedArrays/.JuliaFormatter.toml b/NDTensors/src/lib/UnallocatedArrays/.JuliaFormatter.toml deleted file mode 100644 index 08f664cdb9..0000000000 --- a/NDTensors/src/lib/UnallocatedArrays/.JuliaFormatter.toml +++ /dev/null @@ -1,2 +0,0 @@ -style = "blue" -indent = 2 diff --git a/NDTensors/src/lib/UnallocatedArrays/README.md b/NDTensors/src/lib/UnallocatedArrays/README.md deleted file mode 100644 index 360efad10c..0000000000 --- a/NDTensors/src/lib/UnallocatedArrays/README.md +++ /dev/null @@ -1,4 +0,0 @@ -# UnallocatedArrays - -A module defining a set of unallocated immutable lazy arrays which will be used to quickly construct -tensors and allocating as little data as possible. diff --git a/NDTensors/src/lib/UnallocatedArrays/src/UnallocatedArrays.jl b/NDTensors/src/lib/UnallocatedArrays/src/UnallocatedArrays.jl deleted file mode 100644 index fb95f4ec21..0000000000 --- a/NDTensors/src/lib/UnallocatedArrays/src/UnallocatedArrays.jl +++ /dev/null @@ -1,11 +0,0 @@ -module UnallocatedArrays -include("abstractfill/abstractfill.jl") - -include("unallocatedfill.jl") -include("unallocatedzeros.jl") -include("broadcast.jl") -include("abstractunallocatedarray.jl") -include("set_types.jl") - -export UnallocatedFill, UnallocatedZeros, alloctype, set_alloctype, allocate -end diff --git a/NDTensors/src/lib/UnallocatedArrays/src/abstractfill/abstractfill.jl b/NDTensors/src/lib/UnallocatedArrays/src/abstractfill/abstractfill.jl deleted file mode 100644 index 6356478641..0000000000 --- a/NDTensors/src/lib/UnallocatedArrays/src/abstractfill/abstractfill.jl +++ /dev/null @@ -1,22 +0,0 @@ -using FillArrays: AbstractFill -using NDTensors.TypeParameterAccessors: TypeParameterAccessors, Position, type_parameter -## Here are functions specifically defined for UnallocatedArrays -## not implemented by FillArrays -## TODO this might need a more generic name maybe like compute unit -function alloctype(A::AbstractFill) - return A.alloc -end - -## TODO this fails if the parameter is a type -function alloctype(Atype::Type{<:AbstractFill}) - return type_parameter(Atype, alloctype) -end - -axestype(T::Type{<:AbstractArray}) = type_parameter(axestype) -set_axestype(T::Type{<:AbstractFill}, ax::Type) = s(T, axestype, ax) - -TypeParameterAccessors.position(::Type{<:AbstractFill}, ::typeof(alloctype)) = Position(4) -TypeParameterAccessors.position(::Type{<:AbstractFill}, ::typeof(axestype)) = Position(3) -function TypeParameterAccessors.default_type_parameters(::Type{<:AbstractFill}) - return (Float64, 0, Tuple{}) -end diff --git a/NDTensors/src/lib/UnallocatedArrays/src/abstractunallocatedarray.jl b/NDTensors/src/lib/UnallocatedArrays/src/abstractunallocatedarray.jl deleted file mode 100644 index 7b07cc27ff..0000000000 --- a/NDTensors/src/lib/UnallocatedArrays/src/abstractunallocatedarray.jl +++ /dev/null @@ -1,62 +0,0 @@ -using FillArrays: FillArrays, getindex_value -using NDTensors.TypeParameterAccessors: set_eltype, set_ndims -using Adapt: adapt - -const UnallocatedArray{ElT,N,AxesT,AllocT} = Union{ - UnallocatedFill{ElT,N,AxesT,AllocT},UnallocatedZeros{ElT,N,AxesT,AllocT} -} - -@inline Base.axes(A::UnallocatedArray) = axes(parent(A)) -Base.size(A::UnallocatedArray) = size(parent(A)) -function FillArrays.getindex_value(A::UnallocatedArray) - return getindex_value(parent(A)) -end - -function Base.complex(A::UnallocatedArray) - return complex(eltype(A)).(A) -end - -function Base.transpose(a::UnallocatedArray) - return set_alloctype(transpose(parent(a)), alloctype(a)) -end - -function Base.adjoint(a::UnallocatedArray) - return set_alloctype(adjoint(parent(a)), alloctype(a)) -end - -using NDTensors.TypeParameterAccessors: set_type_parameter -function set_alloctype(T::Type{<:UnallocatedArray}, alloc::Type{<:AbstractArray}) - return set_type_parameter(T, alloctype, alloc) -end - -## This overloads the definition defined in `FillArrays.jl` -for STYPE in (:AbstractArray, :AbstractFill) - @eval begin - @inline $STYPE{T}(F::UnallocatedArray{T}) where {T} = F - @inline $STYPE{T,N}(F::UnallocatedArray{T,N}) where {T,N} = F - end -end - -function allocate(f::UnallocatedArray) - a = similar(f) - fill!(a, getindex_value(f)) - return a -end - -function allocate(arraytype::Type{<:AbstractArray}, elt::Type, axes) - ArrayT = set_ndims(set_eltype(arraytype, elt), length(axes)) - return similar(ArrayT, axes) -end - -function Base.similar(f::UnallocatedArray, elt::Type, axes::Tuple{Int64,Vararg{Int64}}) - return allocate(alloctype(f), elt, axes) -end - -## TODO fix this because reshape loses alloctype -#FillArrays.reshape(a::Union{<:UnallocatedFill, <:UnallocatedZeros}, dims) = set_alloctype(reshape(parent(a), dims), allocate(a)) - -# function Adapt.adapt_storage(to::Type{<:AbstractArray}, x::Union{<:UnallocatedFill, <:UnallocatedZeros}) -# return set_alloctype(parent(x), to) -# end - -# function Adapt.adapt_storage(to::Type{<:Number}, x::) diff --git a/NDTensors/src/lib/UnallocatedArrays/src/broadcast.jl b/NDTensors/src/lib/UnallocatedArrays/src/broadcast.jl deleted file mode 100644 index c5e98e71d4..0000000000 --- a/NDTensors/src/lib/UnallocatedArrays/src/broadcast.jl +++ /dev/null @@ -1,28 +0,0 @@ -using FillArrays: broadcasted_fill, broadcasted_zeros, getindex_value - -abstract type ZeroPreserving end -struct IsZeroPreserving <: ZeroPreserving end -struct NotZeroPreserving <: ZeroPreserving end - -# Assume operations don't preserve zeros for safety -ZeroPreserving(x) = NotZeroPreserving() -ZeroPreserving(::typeof(Complex)) = IsZeroPreserving() -ZeroPreserving(::typeof(Real)) = IsZeroPreserving() - -function Broadcast.broadcasted(style::Broadcast.DefaultArrayStyle, f, a::UnallocatedZeros) - return _broadcasted(style, f, ZeroPreserving(f), a) -end - -function _broadcasted( - style::Broadcast.DefaultArrayStyle, f, ::IsZeroPreserving, a::UnallocatedZeros -) - z = f.(parent(a)) - return broadcasted_zeros(f, a, eltype(z), axes(z)) -end - -function _broadcasted( - style::Broadcast.DefaultArrayStyle, f, ::NotZeroPreserving, a::UnallocatedZeros -) - z = f.(parent(a)) - return broadcasted_fill(f, a, getindex_value(z), axes(z)) -end diff --git a/NDTensors/src/lib/UnallocatedArrays/src/set_types.jl b/NDTensors/src/lib/UnallocatedArrays/src/set_types.jl deleted file mode 100644 index 4d169c5511..0000000000 --- a/NDTensors/src/lib/UnallocatedArrays/src/set_types.jl +++ /dev/null @@ -1,14 +0,0 @@ -using .TypeParameterAccessors: TypeParameterAccessors -using NDTensors.UnspecifiedTypes: UnspecifiedArray, UnspecifiedNumber, UnspecifiedZero - -function TypeParameterAccessors.default_type_parameters(::Type{<:UnallocatedArray}) - return ( - UnspecifiedNumber{UnspecifiedZero}, - 0, - Tuple{}, - UnspecifiedArray{UnspecifiedNumber{UnspecifiedZero},0}, - ) -end - -unspecify_parameters(::Type{<:UnallocatedFill}) = UnallocatedFill -unspecify_parameters(::Type{<:UnallocatedZeros}) = UnallocatedZeros diff --git a/NDTensors/src/lib/UnallocatedArrays/src/unallocatedfill.jl b/NDTensors/src/lib/UnallocatedArrays/src/unallocatedfill.jl deleted file mode 100644 index c84d2e0d7e..0000000000 --- a/NDTensors/src/lib/UnallocatedArrays/src/unallocatedfill.jl +++ /dev/null @@ -1,67 +0,0 @@ -using FillArrays: - FillArrays, AbstractFill, Fill, broadcasted_fill, getindex_value, kron_fill, mult_fill - -struct UnallocatedFill{ElT,N,Axes,Alloc} <: AbstractFill{ElT,N,Axes} - f::Fill{ElT,N,Axes} - alloc::Alloc -end - -function UnallocatedFill{ElT,N,Axes}(f::Fill, alloc::Type) where {ElT,N,Axes} - return UnallocatedFill{ElT,N,Axes,Type{alloc}}(f, alloc) -end - -function UnallocatedFill{ElT,N}(f::Fill, alloc) where {ElT,N} - return UnallocatedFill{ElT,N,typeof(axes(f))}(f, alloc) -end - -function UnallocatedFill{ElT}(f::Fill, alloc) where {ElT} - return UnallocatedFill{ElT,ndims(f)}(f, alloc) -end - -set_alloctype(f::Fill, alloc::Type) = UnallocatedFill(f, alloc) - -Base.parent(F::UnallocatedFill) = F.f - -Base.convert(::Type{<:UnallocatedFill}, A::UnallocatedFill) = A - -############################################# -# Arithmatic - -# mult_fill(a, b, val, ax) = Fill(val, ax) -function FillArrays.mult_fill(a::UnallocatedFill, b, val, ax) - return UnallocatedFill(Fill(val, ax), alloctype(a)) -end -FillArrays.mult_fill(a, b::UnallocatedFill, val, ax) = mult_fill(b, a, val, ax) -function FillArrays.mult_fill(a::UnallocatedFill, b::UnallocatedFill, val, ax) - @assert alloctype(a) == alloctype(b) - return UnallocatedFill(Fill(val, ax), alloctype(a)) -end - -function FillArrays.broadcasted_fill(f, a::UnallocatedFill, val, ax) - return UnallocatedFill(Fill(val, ax), alloctype(a)) -end -function FillArrays.broadcasted_fill(f, a::UnallocatedFill, b::UnallocatedFill, val, ax) - @assert alloctype(a) == alloctype(b) - return UnallocatedFill(Fill(val, ax), alloctype(a)) -end - -function FillArrays.broadcasted_fill(f, a::UnallocatedFill, b, val, ax) - return UnallocatedFill(Fill(val, ax), alloctype(a)) -end -function FillArrays.broadcasted_fill(f, a, b::UnallocatedFill, val, ax) - return broadcasted_fill(f, b, a, val, ax) -end - -function FillArrays.kron_fill(a::UnallocatedFill, b::UnallocatedFill, val, ax) - @assert alloctype(a) == alloctype(b) - return UnallocatedFill(Fill(val, ax), alloctype(a)) -end - -Base.:+(A::UnallocatedFill, B::UnallocatedFill) = A .+ B - -function Base.Broadcast.broadcasted( - ::Base.Broadcast.DefaultArrayStyle, op, r::UnallocatedFill -) - f = op.(parent(r)) - return broadcasted_fill(op, r, getindex_value(f), axes(f)) -end diff --git a/NDTensors/src/lib/UnallocatedArrays/src/unallocatedzeros.jl b/NDTensors/src/lib/UnallocatedArrays/src/unallocatedzeros.jl deleted file mode 100644 index 39752e206e..0000000000 --- a/NDTensors/src/lib/UnallocatedArrays/src/unallocatedzeros.jl +++ /dev/null @@ -1,88 +0,0 @@ -using FillArrays: - FillArrays, - AbstractZeros, - Fill, - Zeros, - broadcasted_fill, - broadcasted_zeros, - kron_fill, - kron_zeros, - mult_zeros - -## TODO Should Alloc also be of ElT and N or should there be -## More freedom there? -struct UnallocatedZeros{ElT,N,Axes,Alloc} <: AbstractZeros{ElT,N,Axes} - z::Zeros{ElT,N,Axes} - alloc::Alloc -end - -function UnallocatedZeros{ElT,N,Axes}(z::Zeros, alloc::Type) where {ElT,N,Axes} - return UnallocatedZeros{ElT,N,Axes,Type{alloc}}(z, alloc) -end - -function UnallocatedZeros{ElT,N}(z::Zeros, alloc) where {ElT,N} - return UnallocatedZeros{ElT,N,typeof(axes(z))}(z, alloc) -end - -function UnallocatedZeros{ElT}(z::Zeros, alloc) where {ElT} - return UnallocatedZeros{ElT,ndims(z)}(z, alloc) -end - -set_alloctype(f::Zeros, alloc::Type) = UnallocatedZeros(f, alloc) - -Base.parent(Z::UnallocatedZeros) = Z.z - -Base.convert(::Type{<:UnallocatedZeros}, A::UnallocatedZeros) = A - -############################################# -# Arithmatic - -function FillArrays.mult_zeros(a::UnallocatedZeros, b, elt, ax) - return UnallocatedZeros(Zeros{elt}(ax), alloctype(a)) -end -FillArrays.mult_zeros(a, b::UnallocatedZeros, elt, ax) = mult_zeros(b, a, elt, ax) -function FillArrays.mult_zeros(a::UnallocatedZeros, b::UnallocatedZeros, elt, ax) - @assert alloctype(a) == alloctype(b) - return UnallocatedZeros(Zeros{elt}(ax), alloctype(a)) -end - -function FillArrays.broadcasted_zeros(f, a::UnallocatedZeros, elt, ax) - return UnallocatedZeros(Zeros{elt}(ax), alloctype(a)) -end -function FillArrays.broadcasted_zeros(f, a::UnallocatedZeros, b::UnallocatedZeros, elt, ax) - @assert alloctype(a) == alloctype(b) - return UnallocatedZeros(Zeros{elt}(ax), alloctype(a)) -end - -function FillArrays.broadcasted_zeros(f, a::UnallocatedZeros, b, elt, ax) - return UnallocatedZeros(Zeros{elt}(ax), alloctype(a)) -end -function FillArrays.broadcasted_zeros(f, a, b::UnallocatedZeros, elt, ax) - return broadcasted_zeros(f, b, a, elt, ax) -end - -function FillArrays.broadcasted_fill(f, a::UnallocatedZeros, val, ax) - return UnallocatedFill(Fill(val, ax), alloctype(a)) -end -function FillArrays.broadcasted_fill(f, a::UnallocatedZeros, b, val, ax) - return UnallocatedFill(Fill(val, ax), alloctype(a)) -end - -function FillArrays.broadcasted_fill(f, a, b::UnallocatedZeros, val, ax) - return broadcasted_fill(f, b, a, val, ax) -end - -function FillArrays.kron_zeros(a::UnallocatedZeros, b::UnallocatedZeros, elt, ax) - @assert alloctype(a) == alloctype(b) - return UnallocatedZeros(Zeros{elt}(ax), alloctype(a)) -end - -function FillArrays.kron_fill(a::UnallocatedZeros, b::UnallocatedFill, val, ax) - @assert alloctype(a) == alloctype(b) - elt = typeof(val) - return UnallocatedZeros(Zeros{elt}(ax), alloctype(a)) -end - -function FillArrays.kron_fill(a::UnallocatedFill, b::UnallocatedZeros, val, ax) - return kron_fill(b, a, val, ax) -end diff --git a/NDTensors/src/lib/UnallocatedArrays/test/Project.toml b/NDTensors/src/lib/UnallocatedArrays/test/Project.toml deleted file mode 100644 index 88851e5c59..0000000000 --- a/NDTensors/src/lib/UnallocatedArrays/test/Project.toml +++ /dev/null @@ -1,5 +0,0 @@ -[deps] -FillArrays = "1a297f60-69ca-5386-bcde-b61e274b549b" -LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" -NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" -Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" diff --git a/NDTensors/src/lib/UnallocatedArrays/test/runtests.jl b/NDTensors/src/lib/UnallocatedArrays/test/runtests.jl deleted file mode 100644 index 4dd8b8b521..0000000000 --- a/NDTensors/src/lib/UnallocatedArrays/test/runtests.jl +++ /dev/null @@ -1,326 +0,0 @@ -@eval module $(gensym()) -using FillArrays: FillArrays, AbstractFill, Fill, Zeros -using NDTensors: NDTensors -using NDTensors.UnallocatedArrays: - UnallocatedFill, UnallocatedZeros, allocate, alloctype, set_alloctype -using LinearAlgebra: norm -using Test: @test, @test_broken, @testset - -include(joinpath(pkgdir(NDTensors), "test", "NDTensorsTestUtils", "NDTensorsTestUtils.jl")) -using .NDTensorsTestUtils: devices_list - -@testset "Testing UnallocatedArrays on $dev with eltype $elt" for dev in devices_list(ARGS), - elt in (Float64, Float32, ComplexF64, ComplexF32) - - @testset "Basic funcitonality" begin - z = Zeros{elt}((2, 3)) - Z = UnallocatedZeros(z, dev(Matrix{elt})) - - @test Z isa AbstractFill - @test size(Z) == (2, 3) - @test length(Z) == 6 - @test iszero(sum(Z)) - @test iszero(norm(Z)) - @test iszero(Z[2, 3]) - @test allocate(Z) isa dev(Matrix{elt}) - Zp = UnallocatedZeros{elt}(Zeros(2, 3), dev(Matrix{elt})) - @test Zp == Z - Zp = set_alloctype(z, dev(Matrix{elt})) - @test Zp == Z - Zc = copy(Z) - @test Zc == Z - Zc = complex(Z) - @test eltype(Zc) == complex(eltype(z)) - @test iszero(Zc[1, 2]) - @test Zc isa UnallocatedZeros - @test alloctype(Zc) == alloctype(Z) - - Zs = similar(Z) - @test Zs isa alloctype(Z) - - Z = UnallocatedZeros(z, dev(Array)) - Za = allocate(Z) - @test Za isa dev(Array{elt,2}) - @test Za[1, 3] == zero(elt) - - ######################################### - # UnallocatedFill - f = Fill{elt}(3, (2, 3, 4)) - F = UnallocatedFill(f, Array{elt,ndims(f)}) - - @test F isa AbstractFill - @test size(F) == (2, 3, 4) - @test length(F) == 24 - @test sum(F) ≈ elt(3) * 24 - @test norm(F) ≈ sqrt(elt(3)^2 * 24) - @test F[2, 3, 1] == elt(3) - @test allocate(F) isa Array{elt,3} - Fp = UnallocatedFill{elt}(Fill(3, (2, 3, 4)), Array{elt,ndims(f)}) - @test Fp == F - Fp = allocate(F) - @test norm(Fp) ≈ norm(F) - Fs = similar(F) - @test Fs isa alloctype(F) - @test length(Fs) == 2 * 3 * 4 - Fs[1, 1, 1] = elt(10) - @test Fs[1, 1, 1] == elt(10) - - Fp = set_alloctype(f, dev(Array{elt,ndims(f)})) - @test allocate(Fp) isa dev(Array{elt,ndims(f)}) - @test Fp == F - Fc = copy(F) - @test Fc == F - Fc = allocate(complex(F)) - @test eltype(Fc) == complex(eltype(F)) - ## Here we no longer require the eltype of the alloctype to - ## Be the same as the eltype of the `UnallocatedArray`. It will be - ## replaced when the array is allocated - # @test_broken typeof(Fc) == alloctype(complex(F)) - Fc[2, 3, 4] = elt(0) - @test iszero(Fc[2, 3, 4]) - - F = UnallocatedFill(f, dev(Array)) - Fa = allocate(F) - @test Fa isa dev(Array{elt,3}) - @test Fa[2, 1, 4] == elt(3) - - F = UnallocatedFill(f, dev(Vector)) - Fa = allocate(F) - @test ndims(Fa) == 3 - @test Fa isa dev(Array) - end - - @testset "Multiplication" begin - z = Zeros{elt}((2, 3)) - Z = UnallocatedZeros(z, dev(Matrix{elt})) - - R = Z * Z' - @test R isa UnallocatedZeros - @test alloctype(R) == alloctype(Z) - @test size(R) == (2, 2) - M = rand(elt, (3, 4)) - R = Z * M - @test R isa UnallocatedZeros - @test alloctype(R) == alloctype(Z) - @test size(R) == (2, 4) - R = M' * Z' - @test R isa UnallocatedZeros - @test alloctype(R) == alloctype(Z) - @test size(R) == (4, 2) - R = transpose(M) * transpose(Z) - @test R isa UnallocatedZeros - @test alloctype(R) == alloctype(Z) - @test size(R) == (4, 2) - - ################################### - ## UnallocatedFill - f = Fill{elt}(3, (2, 12)) - F = UnallocatedFill(f, dev(Matrix{elt})) - p = Fill{elt}(4, (12, 5)) - P = UnallocatedFill(p, dev(Array{elt,ndims(p)})) - R = F * P - @test F isa UnallocatedFill - @test R[1, 1] == elt(144) - @test alloctype(R) == alloctype(F) - @test size(R) == (2, 5) - - R = F * F' - @test R isa UnallocatedFill - @test R[1, 2] == elt(108) - @test alloctype(R) == alloctype(F) - @test size(R) == (2, 2) - - R = transpose(F) * F - @test R isa UnallocatedFill - @test R[12, 3] == elt(18) - @test alloctype(R) == alloctype(F) - @test size(R) == (12, 12) - - R = transpose(Z) * F - @test R isa UnallocatedZeros - @test alloctype(R) == alloctype(Z) - @test size(R) == (3, 12) - end - - @testset "Broadcast" begin - z = Zeros{elt}((2, 3)) - Z = UnallocatedZeros(z, dev(Matrix{elt})) - R = elt(2) .* Z - @test R isa UnallocatedZeros - @test alloctype(R) == alloctype(Z) - R = Z .* elt(2) - @test R isa UnallocatedZeros - @test alloctype(R) == alloctype(Z) - - R = Z .* Z - @test R isa UnallocatedZeros - @test alloctype(R) == alloctype(Z) - - Z = UnallocatedZeros(Zeros{elt}((2, 3)), dev(Matrix{elt})) - R = Z + Z - @test R isa UnallocatedZeros - @test alloctype(R) == alloctype(Z) - - R = Z .+ elt(2) - @test R isa UnallocatedFill - @test alloctype(R) == alloctype(Z) - - R = (x -> x .+ 1).(Z) - @test R isa UnallocatedFill - @test alloctype(R) == alloctype(Z) - @test R[1, 1] == elt(1) - - Z .*= 1.0 - @test Z isa UnallocatedZeros - @test alloctype(R) == dev(Matrix{elt}) - @test Z[2, 1] == zero(elt) - ######################## - # UnallocatedFill - f = Fill(elt(3), (2, 3, 4)) - F = UnallocatedFill(f, Array{elt,ndims(f)}) - F2 = F .* 2 - @test F2 isa UnallocatedFill - @test F2[1, 1, 1] == elt(6) - @test alloctype(F2) == alloctype(F) - - F2 = F2 .+ elt(2) - @test F2 isa UnallocatedFill - @test F2[1, 1, 1] == elt(8) - @test alloctype(F2) == alloctype(F) - - F = UnallocatedFill(Fill(elt(2), (2, 3)), dev(Matrix{elt})) - R = Z + F - @test R isa UnallocatedFill - @test alloctype(R) == alloctype(Z) - - R = F + Z - @test R isa UnallocatedFill - @test alloctype(R) == alloctype(Z) - - F = UnallocatedFill(Fill(elt(3), (2, 12)), dev(Matrix{elt})) - R = F .* F - @test R isa UnallocatedFill - @test R[2, 9] == elt(9) - @test alloctype(R) == alloctype(F) - @test size(R) == (2, 12) - - P = UnallocatedFill(Fill(elt(4), (2, 3)), dev(Matrix{elt})) - R = Z .* P - @test R isa UnallocatedZeros - @test alloctype(R) == alloctype(P) - @test size(R) == (2, 3) - - F = UnallocatedFill(Fill(elt(2), (2, 3)), dev(Matrix{elt})) - R = F + F - @test R isa UnallocatedFill - @test R[1, 3] == elt(4) - - R = (x -> x .+ 1).(F) - @test R isa UnallocatedFill - @test R[2, 1] == elt(3) - @test alloctype(R) == alloctype(F) - end - - ## TODO make other kron tests - @testset "Kron" begin - A = UnallocatedZeros(Zeros{elt}(2), dev(Vector{elt})) - B = UnallocatedZeros(Zeros{elt}(2), dev(Vector{elt})) - C = kron(A, B) - @test C isa UnallocatedZeros - @test alloctype(C) == alloctype(B) - - B = UnallocatedFill(Fill(elt(2), (2)), dev(Vector{elt})) - C = kron(A, B) - @test C isa UnallocatedZeros - @test alloctype(C) == alloctype(B) - - C = kron(B, A) - @test C isa UnallocatedZeros - @test alloctype(C) == alloctype(B) - - A = UnallocatedFill(Fill(elt(3), (2)), dev(Vector{elt})) - C = kron(A, B) - @test C isa UnallocatedFill - @test alloctype(C) == alloctype(B) - @test C[1] == elt(6) - end -end -end - -using FillArrays: Fill, Zeros -using NDTensors.UnallocatedArrays: UnallocatedFill, UnallocatedZeros -using NDTensors.TypeParameterAccessors: - Position, default_type_parameter, nparameters, set_type_parameter, type_parameter -using Test: @test, @testset - -@testset "SetParameters" begin - @testset "Tetsing $typ" for (typ) in (:Fill, :Zeros) - @eval begin - t1 = default_type_parameter($typ, Position{1}()) - t2 = default_type_parameter($typ, Position{2}()) - t3 = default_type_parameter($typ, Position{3}()) - t4 = Any - ft1 = $typ{t1} - ft2 = $typ{t1,t2} - ft3 = $typ{t1,t2,t3} - - ## check 1 parameter specified - ftn1 = set_type_parameter(ft1, Position{1}(), t4) - ftn2 = set_type_parameter(ft1, Position{2}(), t4) - ftn3 = set_type_parameter(ft1, Position{3}(), t4) - @test ftn1 == $typ{t4} - @test ftn2 == $typ{t1,t4} - @test ftn3 == $typ{t1,<:Any,t4} - - ## check 2 parameters specified - ftn1 = set_type_parameter(ft2, Position{1}(), t4) - ftn2 = set_type_parameter(ft2, Position{2}(), t4) - ftn3 = set_type_parameter(ft2, Position{3}(), t4) - @test ftn1 == $typ{t4,t2} - @test ftn2 == $typ{t1,t4} - @test ftn3 == $typ{t1,t2,t4} - - ## check 3 parameters specified - ftn1 = set_type_parameter(ft3, Position{1}(), t4) - ftn2 = set_type_parameter(ft3, Position{2}(), t4) - ftn3 = set_type_parameter(ft3, Position{3}(), t4) - @test ftn1 == $typ{t4,t2,t3} - @test ftn2 == $typ{t1,t4,t3} - @test ftn3 == $typ{t1,t2,t4} - - @test type_parameter(ft3, Position{1}()) == t1 - @test type_parameter(ft3, Position{2}()) == t2 - @test type_parameter(ft3, Position{3}()) == t3 - - @test nparameters(ft3) == 3 - end - end - - @testset "Tetsing $typ" for (typ) in (:UnallocatedFill, :UnallocatedZeros) - @eval begin - t1 = default_type_parameter($typ, Position{1}()) - t2 = default_type_parameter($typ, Position{2}()) - t3 = default_type_parameter($typ, Position{3}()) - t4 = default_type_parameter($typ, Position{4}()) - t5 = Any - ft = $typ{t1,t2,t3,t4} - - ## check 4 parameters specified - ftn1 = set_type_parameter(ft, Position{1}(), t5) - ftn2 = set_type_parameter(ft, Position{2}(), t5) - ftn3 = set_type_parameter(ft, Position{3}(), t5) - ftn4 = set_type_parameter(ft, Position{4}(), t5) - @test ftn1 == $typ{t5,t2,t3,t4} - @test ftn2 == $typ{t1,t5,t3,t4} - @test ftn3 == $typ{t1,t2,t5,t4} - @test ftn4 == $typ{t1,t2,t3,t5} - - @test type_parameter(ft, Position{1}()) == t1 - @test type_parameter(ft, Position{2}()) == t2 - @test type_parameter(ft, Position{3}()) == t3 - @test type_parameter(ft, Position{4}()) == t4 - - @test nparameters(ft) == 4 - end - end -end diff --git a/NDTensors/src/lib/UnspecifiedTypes/.JuliaFormatter.toml b/NDTensors/src/lib/UnspecifiedTypes/.JuliaFormatter.toml deleted file mode 100644 index 08f664cdb9..0000000000 --- a/NDTensors/src/lib/UnspecifiedTypes/.JuliaFormatter.toml +++ /dev/null @@ -1,2 +0,0 @@ -style = "blue" -indent = 2 diff --git a/NDTensors/src/lib/UnspecifiedTypes/README.md b/NDTensors/src/lib/UnspecifiedTypes/README.md deleted file mode 100644 index 15ad85367d..0000000000 --- a/NDTensors/src/lib/UnspecifiedTypes/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# UnspecifiedTypes - -A module defining a set of basic types which are place holders for allocated bit-wise representable types. diff --git a/NDTensors/src/lib/UnspecifiedTypes/TODO.md b/NDTensors/src/lib/UnspecifiedTypes/TODO.md deleted file mode 100644 index 06955df78d..0000000000 --- a/NDTensors/src/lib/UnspecifiedTypes/TODO.md +++ /dev/null @@ -1,6 +0,0 @@ -## TODO -Currently this code is relatively unimplemented things we need to do are -1. Fully implement all of the types -2. Make sure these types work properly with infrastructure like `UnallocatedArrays`... -3. Make sure type promotion works as expected for each `UnallocatedType` -4. Make a test suite for the module \ No newline at end of file diff --git a/NDTensors/src/lib/UnspecifiedTypes/src/UnspecifiedTypes.jl b/NDTensors/src/lib/UnspecifiedTypes/src/UnspecifiedTypes.jl deleted file mode 100644 index f94a63a14c..0000000000 --- a/NDTensors/src/lib/UnspecifiedTypes/src/UnspecifiedTypes.jl +++ /dev/null @@ -1,11 +0,0 @@ -module UnspecifiedTypes - -using LinearAlgebra - -include("unspecifiednumber.jl") -include("unspecifiedzero.jl") - -include("unspecifiedarray.jl") - -export UnspecifiedArray, UnspecifiedNumber, UnspecifiedZero -end diff --git a/NDTensors/src/lib/UnspecifiedTypes/src/unspecifiedarray.jl b/NDTensors/src/lib/UnspecifiedTypes/src/unspecifiedarray.jl deleted file mode 100644 index e47e9832e5..0000000000 --- a/NDTensors/src/lib/UnspecifiedTypes/src/unspecifiedarray.jl +++ /dev/null @@ -1 +0,0 @@ -struct UnspecifiedArray{ElT,N} <: AbstractArray{ElT,N} end diff --git a/NDTensors/src/lib/UnspecifiedTypes/src/unspecifiednumber.jl b/NDTensors/src/lib/UnspecifiedTypes/src/unspecifiednumber.jl deleted file mode 100644 index c44587b376..0000000000 --- a/NDTensors/src/lib/UnspecifiedTypes/src/unspecifiednumber.jl +++ /dev/null @@ -1,5 +0,0 @@ -abstract type AbstractUnspecifiedNumber <: Number end - -struct UnspecifiedNumber{T} <: AbstractUnspecifiedNumber - value::T -end diff --git a/NDTensors/src/lib/UnspecifiedTypes/src/unspecifiedzero.jl b/NDTensors/src/lib/UnspecifiedTypes/src/unspecifiedzero.jl deleted file mode 100644 index 4ca1858218..0000000000 --- a/NDTensors/src/lib/UnspecifiedTypes/src/unspecifiedzero.jl +++ /dev/null @@ -1,39 +0,0 @@ -struct UnspecifiedZero <: AbstractUnspecifiedNumber end - -# Base.Complex{UnspecifiedZero}() = complex(UnspecifiedZero()) -# function Base.Complex{UnspecifiedZero}(z::Real) -# return (iszero(z) ? complex(UnspecifiedZero()) : throw(ErrorException)) -# end - -zero(::Type{UnspecifiedZero}) = UnspecifiedZero() -zero(n::UnspecifiedZero) = zero(typeof(n)) - -# This helps handle a lot of basic algebra, like: -# UnspecifiedZero() + 2.3 == 2.3 -convert(::Type{T}, x::UnspecifiedZero) where {T<:Number} = T(zero(T)) - -#convert(::Type{Complex{UnspecifiedZero}}, x::UnspecifiedZero) = complex(x) - -# TODO: Should this be implemented? -#Complex(x::Real, ::UnspecifiedZero) = x - -# This is to help define `float(::UnspecifiedZero) = 0.0`. -# This helps with defining `norm` of `UnallocatedZeros{UnspecifiedZero}`. -AbstractFloat(::UnspecifiedZero) = zero(AbstractFloat) - -# Basic arithmetic -(::UnspecifiedZero + ::UnspecifiedZero) = UnspecifiedZero() -(::UnspecifiedZero - ::UnspecifiedZero) = UnspecifiedZero() -(::Number * ::UnspecifiedZero) = UnspecifiedZero() -(::UnspecifiedZero * ::Number) = UnspecifiedZero() -(::UnspecifiedZero * ::UnspecifiedZero) = UnspecifiedZero() -(::UnspecifiedZero / ::Number) = UnspecifiedZero() -(::Number / ::UnspecifiedZero) = throw(DivideError()) -(::UnspecifiedZero / ::UnspecifiedZero) = throw(DivideError()) --(::UnspecifiedZero) = UnspecifiedZero() - -Base.promote_type(z::Type{<:UnspecifiedZero}, ElT::Type) = Base.promote_type(ElT, z) - -Base.promote_type(ElT::Type, ::Type{<:UnspecifiedZero}) = ElT -Base.promote_type(::Type{<:UnspecifiedZero}, ::Type{<:UnspecifiedZero}) = UnspecifiedZero -Base.promote_type(ElT::Type, ::Type{<:Complex{<:UnspecifiedZero}}) = Complex{real(ElT)} diff --git a/NDTensors/src/lib/UnspecifiedTypes/test/runtests.jl b/NDTensors/src/lib/UnspecifiedTypes/test/runtests.jl deleted file mode 100644 index e9649c3c50..0000000000 --- a/NDTensors/src/lib/UnspecifiedTypes/test/runtests.jl +++ /dev/null @@ -1,6 +0,0 @@ -@eval module $(gensym()) -using NDTensors.UnspecifiedTypes -using Test: @test, @testset - -@testset "Testing UnspecifiedTypes" begin end -end diff --git a/NDTensors/src/linearalgebra/linearalgebra.jl b/NDTensors/src/linearalgebra/linearalgebra.jl deleted file mode 100644 index c372667cde..0000000000 --- a/NDTensors/src/linearalgebra/linearalgebra.jl +++ /dev/null @@ -1,483 +0,0 @@ -using .RankFactorization: Spectrum - -# -# Linear Algebra of order 2 NDTensors -# -# Even though DenseTensor{_,2} is strided -# and passable to BLAS/LAPACK, it cannot -# be made <: StridedArray -import .Expose: qr_positive, ql, ql_positive - -# TODO: Generalize this to any `Tensor` type using: -# ```julia -# contract(T1, (1, -1), T2, (-1, 2)) -# ``` -function Base.:*(T1::Tensor{<:Any,2,<:Dense}, T2::Tensor{<:Any,2,<:Dense}) - RM = matrix(T1) * matrix(T2) - indsR = (ind(T1, 1), ind(T2, 2)) - return tensor(Dense(vec(RM)), indsR) -end - -function LinearAlgebra.dot(x::Tensor, y::Tensor) - size(x) == size(y) || throw( - DimensionMismatch( - "dimensions must match in `dot(x::Tensor, y::Tensor)`: `x` has size `$(size(x))` while `y` has size `$(size(y))`.", - ), - ) - labels = ntuple(dim -> -dim, ndims(x)) - return contract(conj(x), labels, y, labels)[] -end - -function LinearAlgebra.exp(T::DenseTensor{ElT,2}) where {ElT<:Union{Real,Complex}} - expTM = exp(matrix(T)) - return tensor(Dense(vec(expTM)), inds(T)) -end - -function LinearAlgebra.exp( - T::Hermitian{ElT,<:DenseTensor{ElT,2}} -) where {ElT<:Union{Real,Complex}} - # exp(::Hermitian/Symmetric) returns Hermitian/Symmetric, - # so extract the parent matrix - expTM = parent(exp(matrix(T))) - return tensor(Dense(vec(expTM)), inds(T)) -end - -function svd_catch_error(A; kwargs...) - USV = try - svd(expose(A); kwargs...) - catch - return nothing - end - return USV -end - -function lapack_svd_error_message(alg) - return "The SVD algorithm `\"$alg\"` has thrown an error,\n" * - "likely because of a convergance failure. You can try\n" * - "other SVD algorithms that may converge better using the\n" * - "`alg` (or `svd_alg` if called through `factorize` or MPS/MPO functionality) keyword argument:\n\n" * - " - \"divide_and_conquer\" is a divide-and-conquer algorithm\n" * - " (LAPACK's `gesdd`). It is fast, but may lead to some innacurate\n" * - " singular values for very ill-conditioned matrices.\n" * - " It also may sometimes fail to converge, leading to errors\n" * - " (in which case `\"qr_iteration\"` or `\"recursive\"` can be tried).\n\n" * - " - `\"qr_iteration\"` (LAPACK's `gesvd`) is typically slower \n" * - " than \"divide_and_conquer\", especially for large matrices,\n" * - " but is more accurate for very ill-conditioned matrices \n" * - " compared to `\"divide_and_conquer\"`.\n\n" * - " - `\"recursive\"` is ITensor's custom SVD algorithm. It is very\n" * - " reliable, but may be slow if high precision is needed.\n" * - " To get an `svd` of a matrix `A`, an eigendecomposition of\n" * - " ``A^{\\dagger} A`` is used to compute `U` and then a `qr` of\n" * - " ``A^{\\dagger} U`` is used to compute `V`. This is performed\n" * - " recursively to compute small singular values.\n" * - " - `\"qr_algorithm\"` is a CUDA.jl implemented SVD algorithm using QR.\n" * - " - `\"jacobi_algorithm\"` is a CUDA.jl implemented SVD algorithm.\n\n" * - "Returning `nothing`. For an output `F = svd(A, ...)` you can check if\n" * - "`isnothing(F)` in your code and try a different algorithm.\n\n" * - "To suppress this message in the future, you can wrap the `svd` call in the\n" * - "`@suppress` macro from the `Suppressor` package.\n" -end - -""" - svd(T::DenseTensor{<:Number,2}; kwargs...) - -svd of an order-2 DenseTensor -""" -function svd( - T::DenseTensor{ElT,2,IndsT}; - mindim=nothing, - maxdim=nothing, - cutoff=nothing, - use_absolute_cutoff=nothing, - use_relative_cutoff=nothing, - alg=nothing, - # Only used by BlockSparse svd - min_blockdim=nothing, -) where {ElT,IndsT} - alg = replace_nothing(alg, default_svd_alg(T)) - if alg == "divide_and_conquer" - MUSV = svd_catch_error(matrix(T); alg=LinearAlgebra.DivideAndConquer()) - if isnothing(MUSV) - # If "divide_and_conquer" fails, try "qr_iteration" - alg = "qr_iteration" - MUSV = svd_catch_error(matrix(T); alg=LinearAlgebra.QRIteration()) - if isnothing(MUSV) - # If "qr_iteration" fails, try "recursive" - alg = "recursive" - MUSV = svd_recursive(matrix(T)) - end - end - elseif alg == "qr_iteration" - MUSV = svd_catch_error(matrix(T); alg=LinearAlgebra.QRIteration()) - if isnothing(MUSV) - # If "qr_iteration" fails, try "recursive" - alg = "recursive" - MUSV = svd_recursive(matrix(T)) - end - elseif alg == "recursive" - MUSV = svd_recursive(matrix(T)) - elseif alg == "qr_algorithm" || alg == "jacobi_algorithm" - MUSV = svd_catch_error(matrix(T); alg) - else - error( - "svd algorithm $alg is not currently supported. Please see the documentation for currently supported algorithms.", - ) - end - if isnothing(MUSV) - if any(isnan, expose(T)) - println("SVD failed, the matrix you were trying to SVD contains NaNs.") - else - println(lapack_svd_error_message(alg)) - end - return nothing - end - MU, MS, MV = MUSV - conj!(MV) - #end # @timeit_debug - - P = MS .^ 2 - if any(!isnothing, (maxdim, cutoff)) - P, truncerr, _ = truncate!!( - P; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff - ) - else - truncerr = 0.0 - end - spec = Spectrum(P, truncerr) - dS = length(P) - if dS < length(MS) - MU = expose(MU)[:, 1:dS] - # Fails on some GPU backends like Metal. - # resize!(MS, dS) - MS = MS[1:dS] - MV = expose(MV)[:, 1:dS] - end - - # Make the new indices to go onto U and V - u = eltype(IndsT)(dS) - v = eltype(IndsT)(dS) - Uinds = IndsT((ind(T, 1), u)) - Sinds = IndsT((u, v)) - Vinds = IndsT((ind(T, 2), v)) - U = tensor(Dense(vec(MU)), Uinds) - S = tensor(Diag(MS), Sinds) - V = tensor(Dense(vec(MV)), Vinds) - return U, S, V, spec -end - -function LinearAlgebra.eigen( - T::Hermitian{ElT,<:DenseTensor{ElT,2,IndsT}}; - mindim=nothing, - maxdim=nothing, - cutoff=nothing, - use_absolute_cutoff=nothing, - use_relative_cutoff=nothing, -) where {ElT<:Union{Real,Complex},IndsT} - matrixT = matrix(T) - ## TODO Here I am calling parent to ensure that the correct `any` function - ## is envoked for non-cpu matrices - ## TODO use expose here - if any(!isfinite, parent(matrixT)) - throw( - ArgumentError( - "Trying to perform the eigendecomposition of a matrix containing NaNs or Infs" - ), - ) - end - - ### What do we do if DM is full of Nan or Inf? - DM, VM = eigen(expose(matrixT)) - - # Sort by largest to smallest eigenvalues - # TODO: Replace `cpu` with `unwrap_array_type` dispatch. - p = sortperm(cpu(DM); rev=true, by=abs) - DM = DM[p] - VM = VM[:, p] - - if any(!isnothing, (maxdim, cutoff)) - DM, truncerr, _ = truncate!!( - DM; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff - ) - dD = length(DM) - if dD < size(VM, 2) - VM = VM[:, 1:dD] - end - else - dD = length(DM) - truncerr = 0.0 - end - spec = Spectrum(DM, truncerr) - - # Make the new indices to go onto V - l = eltype(IndsT)(dD) - r = eltype(IndsT)(dD) - Vinds = IndsT((dag(ind(T, 2)), dag(r))) - Dinds = IndsT((l, dag(r))) - V = tensor(Dense(vec(VM)), Vinds) - D = tensor(Diag(DM), Dinds) - return D, V, spec -end - -""" - random_unitary(n::Int,m::Int)::Matrix{ComplexF64} - random_unitary(::Type{ElT},n::Int,m::Int)::Matrix{ElT} - -Return a random matrix U of dimensions (n,m) -such that if n >= m, U'*U is the identity, or if -m > n U*U' is the identity. Optionally can pass a numeric -type as the first argument to obtain a matrix of that type. - -Sampling is based on https://arxiv.org/abs/math-ph/0609050 -such that in the case `n==m`, the unitary matrix will be sampled -according to the Haar measure. -""" -function random_unitary(::Type{ElT}, n::Int, m::Int) where {ElT<:Number} - return random_unitary(Random.default_rng(), ElT, n, m) -end - -function random_unitary(rng::AbstractRNG, DataT::Type{<:AbstractArray}, n::Int, m::Int) - ElT = eltype(DataT) - if n < m - return DataT(random_unitary(rng, ElT, m, n)') - end - F = qr(randn(rng, ElT, n, m)) - Q = DataT(F.Q) - # The upper triangle of F.factors - # are the elements of R. - # Multiply cols of Q by the signs - # that would make diagonal of R - # non-negative: - for c in 1:size(Q, 2) - Q[:, c] .*= sign(F.factors[c, c]) - end - return Q -end - -function random_unitary(rng::AbstractRNG, ::Type{ElT}, n::Int, m::Int) where {ElT<:Number} - return random_unitary(rng, set_ndims(default_datatype(ElT), 2), n, m) -end - -random_unitary(n::Int, m::Int) = random_unitary(ComplexF64, n, m) - -""" - random_orthog(n::Int,m::Int)::Matrix{Float64} - random_orthog(::Type{ElT},n::Int,m::Int)::Matrix{ElT} - -Return a random, real matrix O of dimensions (n,m) -such that if n >= m, transpose(O)*O is the -identity, or if m > n O*transpose(O) is the -identity. Optionally can pass a real number type -as the first argument to obtain a matrix of that type. -""" -random_orthog(::Type{ElT}, n::Int, m::Int) where {ElT<:Real} = random_unitary(ElT, n, m) - -random_orthog(n::Int, m::Int) = random_orthog(Float64, n, m) - -function LinearAlgebra.eigen( - T::DenseTensor{ElT,2,IndsT}; - mindim=nothing, - maxdim=nothing, - cutoff=nothing, - use_absolute_cutoff=nothing, - use_relative_cutoff=nothing, -) where {ElT<:Union{Real,Complex},IndsT} - matrixT = matrix(T) - if any(!isfinite, matrixT) - throw( - ArgumentError( - "Trying to perform the eigendecomposition of a matrix containing NaNs or Infs" - ), - ) - end - - DM, VM = eigen(expose(matrixT)) - - # Sort by largest to smallest eigenvalues - #p = sortperm(DM; rev = true) - #DM = DM[p] - #VM = VM[:,p] - - if any(!isnothing, (maxdim, cutoff)) - DM, truncerr, _ = truncate!!( - DM; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff - ) - dD = length(DM) - if dD < size(VM, 2) - VM = VM[:, 1:dD] - end - else - dD = length(DM) - truncerr = 0.0 - end - spec = Spectrum(abs.(DM), truncerr) - - i1, i2 = inds(T) - - # Make the new indices to go onto D and V - l = typeof(i1)(dD) - r = dag(sim(l)) - Dinds = (l, r) - Vinds = (dag(i2), r) - D = complex(tensor(Diag(DM), Dinds)) - V = complex(tensor(Dense(vec(VM)), Vinds)) - return D, V, spec -end - -# LinearAlgebra.qr -function qr(T::DenseTensor{<:Any,2}; positive=false) - qxf = positive ? qr_positive : qr - return qx(qxf, T) -end - -# NDTensors.Expose.ql -function ql(T::DenseTensor{<:Any,2}; positive=false) - qxf = positive ? ql_positive : ql - return qx(qxf, T) -end - -# -# Generic function for qr and ql decomposition of dense matrix. -# The X tensor = R or L. -# -function qx(qx::Function, T::DenseTensor{<:Any,2}) - QM, XM = qx(expose(matrix(T))) - # Be aware that if positive==false, then typeof(QM)=LinearAlgebra.QRCompactWYQ, not Matrix - # It gets converted to matrix below. - # Make the new indices to go onto Q and R - q, r = inds(T) - q = dim(q) < dim(r) ? sim(q) : sim(r) - IndsT = indstype(T) #get the index type - Qinds = IndsT((ind(T, 1), q)) - Xinds = IndsT((q, ind(T, 2))) - QM = convert(typeof(XM), QM) - ## Here I convert QM twice because of an issue in CUDA where convert does not take QM to be a UnifiedBuffer array - QM = convert(typeof(XM), QM) - Q = tensor(Dense(vec(QM)), Qinds) #Q was strided - X = tensor(Dense(vec(XM)), Xinds) - return Q, X -end - -# Version of `sign` that returns one -# if `x == 0`. -function nonzero_sign(x) - iszero(x) && return one(x) - return sign(x) -end - -# -# Just flip signs between Q and R to get all the diagonals of R >=0. -# For rectangular M the indexing for "diagonal" is non-trivial. -# NDTensors.Expose.qr_positive and # NDTensors.Expose.ql_positive -# -""" - qr_positive(M::AbstractMatrix) - -Compute the QR decomposition of a matrix M -such that the diagonal elements of R are -non-negative. Such a QR decomposition of a -matrix is unique. Returns a tuple (Q,R). -""" -function qr_positive(M::AbstractMatrix) - sparseQ, R = qr(M) - Q = convert(typeof(R), sparseQ) - signs = nonzero_sign.(diag(R)) - Q = Q * Diagonal(signs) - R = Diagonal(conj.(signs)) * R - return (Q, R) -end - -using .TypeParameterAccessors: unwrap_array_type -""" - ql_positive(M::AbstractMatrix) - -Compute the QL decomposition of a matrix M -such that the diagonal elements of L are -non-negative. Such a QL decomposition of a -matrix is unique. Returns a tuple (Q,L). -""" -function ql_positive(M::AbstractMatrix) - # TODO: Change to `isgpu`, or better yet rewrite - # in terms of broadcasting and linear algebra - # like `qr_positive`. - sparseQ, L = ql(M) - Q = convert(typeof(L), sparseQ) - nr, nc = size(L) - dc = nc > nr ? nc - nr : 0 #diag is shifted over by dc if nc>nr - for c in 1:(nc - dc) - if L[c, c + dc] != 0.0 #sign(0.0)==0.0 so we don't want to zero out a column of Q. - sign_Lc = sign(L[c, c + dc]) - if c <= nr && !isone(sign_Lc) - L[c, 1:(c + dc)] *= sign_Lc #only fip non-zero portion of the column. - Q[:, c] *= conj(sign_Lc) - end - end - end - return (Q, L) -end - -# -# Lapack replaces A with Q & R carefully packed together. So here we just copy a -# before letting lapack overwirte it. -# -function ql(A::AbstractMatrix) - Base.require_one_based_indexing(A) - T = eltype(A) - AA = similar(A, LinearAlgebra._qreltype(T), size(A)) - copyto!(expose(AA), expose(A)) - Q, L = ql!(AA) - return (Q, L) -end -# -# This is where the low level call to lapack actually occurs. Most of the work is -# about unpacking Q and L from the A matrix. -# -function ql!(A::StridedMatrix{<:LAPACK.BlasFloat}) - ## TODO is this really necessary here, we could create Expose function if - ## we need this function on CU/GPU - if iscu(A) - throw("Error: ql is not implemented in CUDA.jl") - end - tau = Base.similar(A, min(size(A)...)) - x = LAPACK.geqlf!(A, tau) - #save L from the lower portion of A, before orgql! mangles it! - nr, nc = size(A) - mn = min(nr, nc) - L = similar(A, (mn, nc)) - for r in 1:mn - for c in 1:(r + nc - mn) - L[r, c] = A[r + nr - mn, c] - end - for c in (r + 1 + nc - mn):nc - L[r, c] = 0.0 - end - end - # Now we need shift the orth vectors from the right side of Q over the left side, before - if (mn < nc) - for r in 1:nr - for c in 1:mn - A[r, c] = A[r, c + nc - mn] - end - end - for r in 1:nr - A = A[:, 1:mn] #whack the extra columns in A. - end - end - LAPACK.orgql!(A, tau) - return A, L -end - -# TODO: support alg keyword argument to choose the svd algorithm -function polar(T::DenseTensor{ElT,2,IndsT}) where {ElT,IndsT} - QM, RM = polar(matrix(T)) - dim = size(QM, 2) - # Make the new indices to go onto Q and R - q = eltype(IndsT)(dim) - # TODO: use push/pushfirst instead of a constructor - # call here - Qinds = IndsT((ind(T, 1), q)) - Rinds = IndsT((q, ind(T, 2))) - Q = tensor(Dense(vec(QM)), Qinds) - R = tensor(Dense(vec(RM)), Rinds) - return Q, R -end diff --git a/NDTensors/src/linearalgebra/svd.jl b/NDTensors/src/linearalgebra/svd.jl deleted file mode 100644 index 5798c46657..0000000000 --- a/NDTensors/src/linearalgebra/svd.jl +++ /dev/null @@ -1,71 +0,0 @@ -using .TypeParameterAccessors: unwrap_array_type -# The state of the `svd_recursive` algorithm. -function svd_recursive_state(S::AbstractArray, thresh::Float64) - return svd_recursive_state(unwrap_array_type(S), S, thresh) -end - -# CPU version. -function svd_recursive_state(::Type{<:Array}, S::AbstractArray, thresh::Float64) - N = length(S) - (N <= 1 || thresh < 0.0) && return (true, 1) - S1t = S[1] * thresh - start = 2 - while start <= N - (S[start] < S1t) && break - start += 1 - end - if start >= N - return (true, N) - end - return (false, start) -end - -# Convert to CPU to avoid slow scalar indexing -# on GPU. -function svd_recursive_state(::Type{<:AbstractArray}, S::AbstractArray, thresh::Float64) - return svd_recursive_state(Array, cpu(S), thresh) -end - -function svd_recursive(M::AbstractMatrix; thresh::Float64=1E-3, north_pass::Int=2) - Mr, Mc = size(M) - if Mr > Mc - V, S, U = svd_recursive(transpose(M)) - conj!(U) - conj!(V) - return U, S, V - end - - #rho = BLAS.gemm('N','T',-1.0,M,M) #negative to sort eigenvalues greatest to smallest - rho = -M * M' #negative to sort eigenvalues in decreasing order - D, U = eigen(expose(Hermitian(rho))) - - Nd = length(D) - - V = M' * U - - V, R = qr_positive(expose(V)) - D[1:Nd] = diag(R)[1:Nd] - - (done, start) = svd_recursive_state(D, thresh) - - done && return U, D, V - - u = view(U, :, start:Nd) - v = view(V, :, start:Nd) - - b = u' * (M * v) - bu, bd, bv = svd_recursive(b; thresh=thresh, north_pass=north_pass) - - u .= u * bu - v .= v * bv - view(D, start:Nd) .= bd - - return U, D, V -end - -# TODO: maybe move to another location? -# Include options for other svd algorithms -function polar(M::AbstractMatrix) - U, S, V = svd(expose(M)) # calls LinearAlgebra.svd(_) - return U * V', V * Diagonal(S) * V' -end diff --git a/NDTensors/src/linearalgebra/symmetric.jl b/NDTensors/src/linearalgebra/symmetric.jl deleted file mode 100644 index 51ddb7b854..0000000000 --- a/NDTensors/src/linearalgebra/symmetric.jl +++ /dev/null @@ -1,30 +0,0 @@ - -dims(H::Hermitian{<:Number,<:Tensor}) = dims(parent(H)) - -blockdims(H::Hermitian{<:Number,<:Tensor}, b) = blockdims(parent(H), b) - -dim(H::Hermitian{<:Number,<:Tensor}, i::Int) = dim(parent(H), i) - -matrix(H::Hermitian{<:Number,<:Tensor}) = Hermitian(matrix(parent(H))) - -inds(H::Hermitian{<:Number,<:Tensor}) = inds(parent(H)) - -ind(H::Hermitian{<:Number,<:Tensor}, i::Int) = ind(parent(H), i) - -nnzblocks(H::Hermitian{<:Number,<:Tensor}) = nnzblocks(parent(H)) - -nzblocks(H::Hermitian{<:Number,<:Tensor}) = nzblocks(parent(H)) - -eachnzblock(H::Hermitian{<:Number,<:Tensor}) = eachnzblock(parent(H)) - -eachblock(H::Hermitian{<:Number,<:Tensor}) = eachblock(parent(H)) - -eachdiagblock(H::Hermitian{<:Number,<:Tensor}) = eachdiagblock(parent(H)) - -nblocks(H::Hermitian{<:Number,<:Tensor}) = nblocks(parent(H)) - -function blockview(H::Hermitian{<:Number,<:Tensor}, block) - return _blockview(H, blockview(parent(H), block)) -end -_blockview(::Hermitian{<:Number,<:Tensor}, blockviewH) = Hermitian(blockviewH) -_blockview(::Hermitian{<:Number,<:Tensor}, ::Nothing) = nothing diff --git a/NDTensors/src/nodata.jl b/NDTensors/src/nodata.jl deleted file mode 100644 index 324fdf8811..0000000000 --- a/NDTensors/src/nodata.jl +++ /dev/null @@ -1,6 +0,0 @@ -# Denotes when a storage type has no data -struct NoData end - -size(::NoData) = (0,) -length(::NoData) = 0 -fill!(::NoData, ::EmptyNumber) = NoData() diff --git a/NDTensors/src/tensor/set_types.jl b/NDTensors/src/tensor/set_types.jl deleted file mode 100644 index e17683ed17..0000000000 --- a/NDTensors/src/tensor/set_types.jl +++ /dev/null @@ -1,31 +0,0 @@ -using .TypeParameterAccessors: TypeParameterAccessors, Position, parenttype -function TypeParameterAccessors.set_ndims(arraytype::Type{<:Tensor}, ndims) - # TODO: Implement something like: - # ```julia - # return set_storagetype(arraytype, set_ndims(storagetype(arraytype), ndims)) - # ``` - # However, we will also need to define `set_ndims(indstype(arraytype), ndims)` - # and use `set_indstype(arraytype, set_ndims(indstype(arraytype), ndims))`. - return error( - "Setting the number dimensions of the array type `$arraytype` (to `$ndims`) is not currently defined.", - ) -end - -function set_storagetype(tensortype::Type{<:Tensor}, storagetype) - return Tensor{eltype(tensortype),ndims(tensortype),storagetype,indstype(tensortype)} -end - -# TODO: Modify the `storagetype` according to `inds`, such as the dimensions? -# TODO: Make a version that accepts `indstype::Type`? -function TypeParameterAccessors.set_indstype(tensortype::Type{<:Tensor}, inds::Tuple) - return Tensor{eltype(tensortype),length(inds),storagetype(tensortype),typeof(inds)} -end - -TypeParameterAccessors.parenttype(tensortype::Type{<:Tensor}) = storagetype(tensortype) -function TypeParameterAccessors.parenttype(storagetype::Type{<:TensorStorage}) - return datatype(storagetype) -end - -function TypeParameterAccessors.position(::Type{<:Tensor}, ::typeof(parenttype)) - return Position(3) -end diff --git a/NDTensors/src/tensor/similar.jl b/NDTensors/src/tensor/similar.jl deleted file mode 100644 index 266500cf17..0000000000 --- a/NDTensors/src/tensor/similar.jl +++ /dev/null @@ -1,71 +0,0 @@ -using .TypeParameterAccessors: TypeParameterAccessors, set_indstype, similartype - -# NDTensors.similar -similar(tensor::Tensor) = setstorage(tensor, similar(storage(tensor))) - -# NDTensors.similar -similar(tensor::Tensor, eltype::Type) = setstorage(tensor, similar(storage(tensor), eltype)) - -# NDTensors.similar -function similar(tensor::Tensor, dims::Tuple) - return setinds(setstorage(tensor, similar(storage(tensor), dims)), dims) -end - -# NDTensors.similar -function similar(tensor::Tensor, dims::Dims) - return setinds(setstorage(tensor, similar(storage(tensor), dims)), dims) -end - -# NDTensors.similar -function similar(tensortype::Type{<:Tensor}, dims::Tuple) - # TODO: Is there a better constructor pattern for this? - # Maybe use `setstorage(::Type{<:Tensor}, ...)` and - # `setinds(::Type{<:Tensor}, ...)`? - return similartype(tensortype, dims)( - AllowAlias(), similar(storagetype(tensortype), dims), dims - ) -end - -# NDTensors.similar -function similar(tensortype::Type{<:Tensor}, dims::Dims) - # TODO: Is there a better constructor pattern for this? - # Maybe use `setstorage(::Type{<:Tensor}, ...)` and - # `setinds(::Type{<:Tensor}, ...)`? - return similartype(tensortype, dims)( - AllowAlias(), similar(storagetype(tensortype), dims), dims - ) -end - -# NDTensors.similar -function similar(tensor::Tensor, eltype::Type, dims::Tuple) - return setinds(setstorage(tensor, similar(storage(tensor), eltype, dims)), dims) -end - -# NDTensors.similar -function similar(tensor::Tensor, eltype::Type, dims::Dims) - return setinds(setstorage(tensor, similar(storage(tensor), eltype, dims)), dims) -end - -# Base overloads -Base.similar(tensor::Tensor) = NDTensors.similar(tensor) -Base.similar(tensor::Tensor, eltype::Type) = NDTensors.similar(tensor, eltype) -Base.similar(tensor::Tensor, dims::Tuple) = NDTensors.similar(tensor, dims) -Base.similar(tensor::Tensor, dims::Dims) = NDTensors.similar(tensor, dims) -function Base.similar(tensor::Tensor, eltype::Type, dims::Tuple) - return NDTensors.similar(tensor, eltype, dims) -end -function Base.similar(tensor::Tensor, eltype::Type, dims::Dims) - return NDTensors.similar(tensor, eltype, dims) -end - -function TypeParameterAccessors.similartype(tensortype::Type{<:Tensor}, eltype::Type) - return set_storagetype(tensortype, similartype(storagetype(tensortype), eltype)) -end - -function TypeParameterAccessors.similartype(tensortype::Type{<:Tensor}, dims::Tuple) - tensortype_new_inds = set_indstype(tensortype, dims) - # Need to pass `dims` in case that information is needed to make a storage type, - # for example `BlockSparse` needs the number of dimensions. - storagetype_new_inds = similartype(storagetype(tensortype_new_inds), dims) - return set_storagetype(tensortype_new_inds, storagetype_new_inds) -end diff --git a/NDTensors/src/tensor/tensor.jl b/NDTensors/src/tensor/tensor.jl deleted file mode 100644 index 45dd96fa43..0000000000 --- a/NDTensors/src/tensor/tensor.jl +++ /dev/null @@ -1,497 +0,0 @@ -using SparseArrays: SparseArrays, nnz - -""" -Tensor{StoreT,IndsT} - -A plain old tensor (with order independent -interface and no assumption of labels) -""" -struct Tensor{ElT,N,StoreT,IndsT} <: AbstractArray{ElT,N} - storage::StoreT - inds::IndsT - - """ - Tensor{ElT,N,StoreT,IndsT}(inds, store::StorageType) - - Internal constructor for creating a Tensor from the - storage and indices. - - The Tensor is a view of the tensor storage. - - For normal usage, use the Tensor(store::TensorStorage, inds) - and tensor(store::TensorStorage, inds) constructors. - """ - function Tensor{ElT,N,StoreT,IndsT}( - ::AllowAlias, storage, inds::Tuple - ) where {ElT,N,StoreT,IndsT} - @assert ElT == eltype(StoreT) - @assert length(inds) == N - return new{ElT,N,StoreT,IndsT}(storage, inds) - end -end - -## Tensor constructors - -function Tensor{ElT,N,StoreT,IndsT}( - ::NeverAlias, storage::TensorStorage, inds -) where {ElT,N,StoreT<:TensorStorage,IndsT} - return Tensor{ElT,N,StoreT,IndsT}(AllowAlias(), copy(storage), inds) -end - -# Constructs with undef -function Tensor{ElT,N,StoreT,IndsT}( - ::UndefInitializer, inds::Tuple -) where {ElT,N,StoreT<:TensorStorage,IndsT} - return Tensor{ElT,N,StoreT,IndsT}(AllowAlias(), similar(StoreT, inds), inds) -end - -# constructs with the value x -function Tensor{ElT,N,StoreT,IndsT}( - x::S, inds::Tuple -) where {S,ElT,N,StoreT<:TensorStorage,IndsT} - return Tensor{ElT,N,StoreT,IndsT}(AllowAlias(), fill!(similar(StoreT, inds), x), inds) -end - -# constructs with zeros -function Tensor{ElT,N,StoreT,IndsT}(inds::Tuple) where {ElT,N,StoreT<:TensorStorage,IndsT} - return Tensor{ElT,N,StoreT,IndsT}(AllowAlias(), StoreT(dim(inds)), inds) -end - -""" - Tensor(storage::TensorStorage, inds) - -Construct a Tensor from a tensor storage and indices. -The Tensor holds a copy of the storage data. - -The indices `inds` will be converted to a `Tuple`. -""" -function Tensor(as::AliasStyle, storage::TensorStorage, inds::Tuple) - return Tensor{eltype(storage),length(inds),typeof(storage),typeof(inds)}( - as, storage, inds - ) -end - -function Tensor(as::NeverAlias, storage::TensorStorage, inds::Tuple) - return Tensor(AllowAlias(), copy(storage), inds) -end - -# Automatically convert to Tuple if the indices are not a Tuple -# already (like a Vector). In the future this may be lifted -# to allow for very large tensor orders in which case Tuple -# operations may become too slow. -function Tensor(as::AliasStyle, storage, inds) - return Tensor(as, storage, Tuple(inds)) -end - -tensor(args...; kwargs...) = Tensor(AllowAlias(), args...; kwargs...) -Tensor(storage::TensorStorage, inds::Tuple) = Tensor(NeverAlias(), storage, inds) - -function Tensor(eltype::Type, inds::Tuple) - return Tensor(AllowAlias(), default_storagetype(eltype, inds)(dim(inds)), inds) -end - -Tensor(inds::Tuple) = Tensor(default_eltype(), inds) - -function Tensor(eltype::Type, ::UndefInitializer, inds::Tuple) - return Tensor( - AllowAlias(), default_storagetype(default_datatype(eltype), inds)(undef, inds), inds - ) -end - -Tensor(::UndefInitializer, inds::Tuple) = Tensor(default_eltype(), undef, inds) - -function Tensor(data::AbstractArray{<:Any,1}, inds::Tuple) - return Tensor(AllowAlias(), default_storagetype(typeof(data), inds)(data), inds) -end - -function Tensor(data::AbstractArray{<:Any,N}, inds::Tuple) where {N} - return Tensor(vec(data), inds) -end - -function Tensor(datatype::Type{<:AbstractArray}, inds::Tuple) - return Tensor(generic_zeros(datatype, dim(inds)), inds) -end - -## End Tensor constructors - -## Random Tensor - -## TODO make something like this work. -# function randomTensor(storeT::Type{<:TensorStorage}, inds::Tuple) -# return tensor(generic_randn(storeT, dim(inds)), inds) -# end - -function randomTensor(::Type{ElT}, inds::Tuple) where {ElT} - return tensor(generic_randn(default_storagetype(default_datatype(ElT)), dim(inds)), inds) -end - -randomTensor(inds::Tuple) = randomDenseTensor(default_eltype(), inds) - -function randomTensor(DataT::Type{<:AbstractArray}, inds::Tuple) - return tensor(generic_randn(default_storagetype(DataT), dim(inds)), inds) -end - -function randomTensor(StoreT::Type{<:TensorStorage}, inds::Tuple) - return tensor(generic_randn(StoreT, dim(inds)), inds) -end -## End Random Tensor - -Base.ndims(::Type{<:Tensor{<:Any,N}}) where {N} = N - -# Like `Base.to_shape` but more general, can return -# `Index`, etc. Customize for an array/tensor -# with custom index types. -# NDTensors.to_shape -function to_shape(arraytype::Type{<:Tensor}, shape::Tuple) - return shape -end - -# Allow the storage and indices to be input in opposite ordering -function (tensortype::Type{<:Tensor})(as::AliasStyle, inds, storage::TensorStorage) - return tensortype(as, storage, inds) -end - -storage(T::Tensor) = T.storage - -# TODO: deprecate -store(T::Tensor) = storage(T) - -data(T::Tensor) = data(storage(T)) - -datatype(T::Tensor) = datatype(storage(T)) -datatype(tensortype::Type{<:Tensor}) = datatype(storagetype(tensortype)) - -indstype(::Type{<:Tensor{<:Any,<:Any,<:Any,IndsT}}) where {IndsT} = IndsT -indstype(T::Tensor) = indstype(typeof(T)) - -storagetype(::Type{<:Tensor{<:Any,<:Any,StoreT}}) where {StoreT} = StoreT -storagetype(T::Tensor) = storagetype(typeof(T)) - -# TODO: deprecate -storetype(args...) = storagetype(args...) - -inds(T::Tensor) = T.inds - -ind(T::Tensor, j::Integer) = inds(T)[j] - -eachindex(T::Tensor) = CartesianIndices(dims(inds(T))) - -eachblock(T::Tensor) = eachblock(inds(T)) - -eachdiagblock(T::Tensor) = eachdiagblock(inds(T)) - -eltype(::Tensor{ElT}) where {ElT} = ElT -scalartype(T::Tensor) = eltype(T) - -strides(T::Tensor) = dim_to_strides(inds(T)) - -setstorage(T, nstore) = tensor(nstore, inds(T)) - -setinds(T, ninds) = tensor(storage(T), ninds) - -# -# Generic Tensor functions -# - -size(T::Tensor) = dims(T) -size(T::Tensor, i::Int) = dim(T, i) - -# Needed for passing Tensor{T,2} to BLAS/LAPACK -# TODO: maybe this should only be for DenseTensor? -function unsafe_convert(::Type{Ptr{ElT}}, T::Tensor{ElT}) where {ElT} - return unsafe_convert(Ptr{ElT}, storage(T)) -end - -copy(T::Tensor) = setstorage(T, copy(storage(T))) - -copyto!(R::Tensor, T::Tensor) = (copyto!(storage(R), storage(T)); R) - -complex(T::Tensor) = setstorage(T, complex(storage(T))) - -real(T::Tensor) = setstorage(T, real(storage(T))) - -imag(T::Tensor) = setstorage(T, imag(storage(T))) - -function Base.map(f, t1::Tensor, t_tail::Tensor...; kwargs...) - elt = mapreduce(eltype, promote_type, (t1, t_tail...)) - if !iszero(f(zero(elt))) - # TODO: Do a better job of preserving the storage type, if possible. - return tensor(Dense(map(f, array(t1), array.(t_tail)...; kwargs...)), inds(t1)) - end - return setstorage(t1, map(f, storage(t1), storage.(t_tail)...; kwargs...)) -end - -function Base.mapreduce(f, op, t1::Tensor, t_tail::Tensor...; kwargs...) - elt = mapreduce(eltype, promote_type, (t1, t_tail...)) - if !iszero(f(zero(elt))) - return mapreduce(f, op, array(t1), array.(t_tail)...; kwargs...) - end - return mapreduce(f, op, storage(t1), storage.(t_tail)...; kwargs...) -end - -# -# Necessary to overload since the generic fallbacks are -# slow -# - -norm(T::Tensor) = norm(storage(T)) - -conj(as::AliasStyle, T::Tensor) = setstorage(T, conj(as, storage(T))) -conj(T::Tensor) = conj(AllowAlias(), T) - -randn!!(T::Tensor) = randn!!(Random.default_rng(), T) -randn!!(rng::AbstractRNG, T::Tensor) = (randn!(rng, T); T) -Random.randn!(T::Tensor) = randn!(Random.default_rng(), T) -Random.randn!(rng::AbstractRNG, T::Tensor) = (randn!(rng, storage(T)); T) - -LinearAlgebra.rmul!(T::Tensor, α::Number) = (rmul!(storage(T), α); T) -scale!(T::Tensor, α::Number) = rmul!(storage(T), α) - -fill!!(T::Tensor, α::Number) = fill!(T, α) -fill!(T::Tensor, α::Number) = (fill!(storage(T), α); T) - --(T::Tensor) = setstorage(T, -storage(T)) - -function convert( - ::Type{<:Tensor{<:Number,N,StoreR,Inds}}, T::Tensor{<:Number,N,<:Any,Inds} -) where {N,Inds,StoreR} - return setstorage(T, convert(StoreR, storage(T))) -end - -function zeros(TensorT::Type{<:Tensor{ElT,N,StoreT}}, inds) where {ElT,N,StoreT} - return error("zeros(::Type{$TensorT}, inds) not implemented yet") -end - -function promote_rule( - ::Type{<:Tensor{ElT1,N1,StoreT1,IndsT1}}, ::Type{<:Tensor{ElT2,N2,StoreT2,IndsT2}} -) where {ElT1,ElT2,N1,N2,StoreT1,StoreT2,IndsT1,IndsT2} - StoreR = promote_type(StoreT1, StoreT2) - ElR = eltype(StoreR) - return Tensor{ElR,N3,StoreR,IndsR} where {N3,IndsR} -end - -function promote_rule( - ::Type{<:Tensor{ElT1,N,StoreT1,Inds}}, ::Type{<:Tensor{ElT2,N,StoreT2,Inds}} -) where {ElT1,ElT2,N,StoreT1,StoreT2,Inds} - StoreR = promote_type(StoreT1, StoreT2) - ElR = eltype(StoreR) - return Tensor{ElR,N,StoreR,Inds} -end - -# Convert the tensor type to the closest dense -# type -function dense(::Type{<:Tensor{ElT,NT,StoreT,IndsT}}) where {ElT,NT,StoreT,IndsT} - return Tensor{ElT,NT,dense(StoreT),IndsT} -end - -dense(T::Tensor) = setstorage(T, dense(storage(T))) - -# Convert to Array, avoiding copying if possible -array(T::Tensor) = array(dense(T)) -matrix(T::Tensor{<:Number,2}) = array(T) -vector(T::Tensor{<:Number,1}) = array(T) - -array(T::Transpose{<:Any,<:Tensor}) = transpose(array(transpose(T))) -matrix(T::Transpose{<:Any,<:Tensor}) = transpose(array(transpose(T))) - -# -# Helper functions for BlockSparse-type storage -# - -""" -nzblocks(T::Tensor) - -Return a vector of the non-zero blocks of the BlockSparseTensor. -""" -nzblocks(T::Tensor) = nzblocks(storage(T)) - -eachnzblock(T::Tensor) = eachnzblock(storage(T)) - -blockoffsets(T::Tensor) = blockoffsets(storage(T)) -nnzblocks(T::Tensor) = nnzblocks(storage(T)) -SparseArrays.nnz(T::Tensor) = nnz(storage(T)) -nblocks(T::Tensor) = nblocks(inds(T)) -blockdims(T::Tensor, block) = blockdims(inds(T), block) -blockdim(T::Tensor, block) = blockdim(inds(T), block) - -""" -offset(T::Tensor, block::Block) - -Get the linear offset in the data storage for the specified block. -If the specified block is not non-zero structurally, return nothing. - -offset(T::Tensor,pos::Int) - -Get the offset of the block at position pos -in the block-offsets list. -""" -offset(T::Tensor, block) = offset(storage(T), block) - -""" -isblocknz(T::Tensor, - block::Block) - -Check if the specified block is non-zero -""" -isblocknz(T::Tensor, block) = isblocknz(storage(T), block) - -function blockstart(T::Tensor{<:Number,N}, block) where {N} - start_index = @MVector ones(Int, N) - for j in 1:N - ind_j = ind(T, j) - for block_j in 1:(block[j] - 1) - start_index[j] += blockdim(ind_j, block_j) - end - end - return Tuple(start_index) -end - -function blockend(T::Tensor{<:Number,N}, block) where {N} - end_index = @MVector zeros(Int, N) - for j in 1:N - ind_j = ind(T, j) - for block_j in 1:block[j] - end_index[j] += blockdim(ind_j, block_j) - end - end - return Tuple(end_index) -end - -# -# Some generic getindex and setindex! functionality -# - -@propagate_inbounds @inline setindex!!(T::Tensor, x, I...) = setindex!(T, x, I...) - -insertblock!!(T::Tensor, block) = insertblock!(T, block) - -function tensor_isequal(x, y) - # TODO: Use a reduction to avoid intermediates. - # This doesn't work right now because `mapreduce` - # on `Tensor`s is limited to functions that preserve - # zeros. - # return mapreduce(==, ==, x, y) - - # TODO: Use `x - y` instead of `map(-, x, y)`. - # `x - y` calls `x .- y` and broadcasting isn't - # defined properly for sparse Tensor storage - # like `Diag` and `BlockSparse`. - return iszero(norm(map(-, x, y))) -end - -function Base.:(==)(x::Tensor, y::Tensor) - return tensor_isequal(x, y) -end - -function Base.:(==)(x::AbstractArray, y::Tensor) - return array(x) == array(y) -end -function Base.:(==)(x::Tensor, y::AbstractArray) - return array(x) == array(y) -end - -function Base.isequal(x::Tensor, y::Tensor) - return tensor_isequal(x, y) -end - -function Base.isequal(x::AbstractArray, y::Tensor) - return isequal(array(x), array(y)) -end -function Base.isequal(x::Tensor, y::AbstractArray) - return isequal(array(x), array(y)) -end - -""" -getdiagindex - -Get the specified value on the diagonal -""" -function getdiagindex(T::Tensor{<:Number,N}, ind::Int) where {N} - return getindex(T, CartesianIndex(ntuple(_ -> ind, Val(N)))) -end - -using .Expose: Exposed, expose, unexpose -# TODO: add support for off-diagonals, return -# block sparse vector instead of dense. -diag(tensor::Tensor) = diag(expose(tensor)) - -function diag(ETensor::Exposed) - tensor = unexpose(ETensor) - ## d = NDTensors.similar(T, ElT, (diaglength(T),)) - tensordiag = NDTensors.similar( - dense(typeof(tensor)), eltype(tensor), (diaglength(tensor),) - ) - array(tensordiag) .= diagview(tensor) - return tensordiag -end - -""" -setdiagindex! - -Set the specified value on the diagonal -""" -function setdiagindex!(T::Tensor{<:Number,N}, val, ind::Int) where {N} - setindex!(T, val, CartesianIndex(ntuple(_ -> ind, Val(N)))) - return T -end - -function map_diag!(f::Function, t_dest::Tensor, t_src::Tensor) - map_diag!(f, expose(t_dest), expose(t_src)) - return t_dest -end -function map_diag!(f::Function, exposed_t_dest::Exposed, exposed_t_src::Exposed) - diagview(unexpose(exposed_t_dest)) .= f.(diagview(unexpose(exposed_t_src))) - return unexpose(exposed_t_dest) -end - -map_diag(f::Function, t::Tensor) = map_diag(f, expose(t)) -function map_diag(f::Function, exposed_t::Exposed) - t_dest = copy(exposed_t) - map_diag!(f, expose(t_dest), exposed_t) - return t_dest -end - -# -# Some generic contraction functionality -# - -function zero_contraction_output( - T1::TensorT1, T2::TensorT2, indsR::IndsR -) where {TensorT1<:Tensor,TensorT2<:Tensor,IndsR} - return zeros(contraction_output_type(TensorT1, TensorT2, indsR), indsR) -end - -# -# Broadcasting -# - -BroadcastStyle(::Type{T}) where {T<:Tensor} = Broadcast.ArrayStyle{T}() - -function Base.similar( - bc::Broadcast.Broadcasted{Broadcast.ArrayStyle{T}}, ::Type{ElT} -) where {T<:Tensor,ElT} - A = find_tensor(bc) - return NDTensors.similar(A, ElT) -end - -"`A = find_tensor(As)` returns the first Tensor among the arguments." -find_tensor(bc::Broadcast.Broadcasted) = find_tensor(bc.args) -find_tensor(args::Tuple) = find_tensor(find_tensor(args[1]), Base.tail(args)) -find_tensor(x) = x -find_tensor(a::Tensor, rest) = a -find_tensor(::Any, rest) = find_tensor(rest) - -function summary(io::IO, T::Tensor) - for (dim, ind) in enumerate(inds(T)) - println(io, "Dim $dim: ", ind) - end - println(io, typeof(storage(T))) - return println(io, " ", Base.dims2string(dims(T))) -end - -# -# Printing -# - -print_tensor(io::IO, T::Tensor) = Base.print_array(io, expose(T)) -print_tensor(io::IO, T::Tensor{<:Number,1}) = Base.print_array(io, reshape(T, (dim(T), 1))) diff --git a/NDTensors/src/tensoroperations/contraction_logic.jl b/NDTensors/src/tensoroperations/contraction_logic.jl deleted file mode 100644 index 0844d3813d..0000000000 --- a/NDTensors/src/tensoroperations/contraction_logic.jl +++ /dev/null @@ -1,648 +0,0 @@ - -const Labels{N} = NTuple{N,Int} - -# Automatically determine the output labels given -# input labels of a contraction -function contract_labels(T1labels::Labels{N1}, T2labels::Labels{N2}) where {N1,N2} - ncont = 0 - for i in T1labels - i < 0 && (ncont += 1) - end - NR = N1 + N2 - 2 * ncont - ValNR = Val{NR} - return contract_labels(ValNR, T1labels, T2labels) -end - -function contract_labels( - ::Type{Val{NR}}, T1labels::Labels{N1}, T2labels::Labels{N2} -) where {NR,N1,N2} - Rlabels = MVector{NR,Int}(undef) - u = 1 - # TODO: use Rlabels, don't assume ncon convention - for i in 1:N1 - if T1labels[i] > 0 - @inbounds Rlabels[u] = T1labels[i] - u += 1 - end - end - for i in 1:N2 - if T2labels[i] > 0 - @inbounds Rlabels[u] = T2labels[i] - u += 1 - end - end - return Labels{NR}(Rlabels) -end - -function _contract_inds!( - Ris, T1is, T1labels::Labels{N1}, T2is, T2labels::Labels{N2}, Rlabels::Labels{NR} -) where {N1,N2,NR} - for n in 1:NR - Rlabel = @inbounds Rlabels[n] - found = false - for n1 in 1:N1 - if Rlabel == @inbounds T1labels[n1] - @inbounds Ris[n] = @inbounds T1is[n1] - found = true - break - end - end - if !found - for n2 in 1:N2 - if Rlabel == @inbounds T2labels[n2] - @inbounds Ris[n] = @inbounds T2is[n2] - break - end - end - end - end - return nothing -end - -# Old version that doesn't take into account Rlabels -#function _contract_inds!(Ris, -# T1is, -# T1labels::Labels{N1}, -# T2is, -# T2labels::Labels{N2}, -# Rlabels::Labels{NR}) where {N1,N2,NR} -# ncont = 0 -# for i in T1labels -# i < 0 && (ncont += 1) -# end -# IndT = promote_type(eltype(T1is), eltype(T2is)) -# u = 1 -# # TODO: use Rlabels, don't assume ncon convention -# for i1 ∈ 1:N1 -# if T1labels[i1] > 0 -# Ris[u] = T1is[i1] -# u += 1 -# else -# # This is to check that T1is and T2is -# # can contract -# i2 = findfirst(==(T1labels[i1]),T2labels) -# dir(T1is[i1]) == -dir(T2is[i2]) || error("Attempting to contract index:\n\n$(T1is[i1])\nwith index:\n\n$(T2is[i2])\nIndices must have opposite directions to contract.") -# end -# end -# for i2 ∈ 1:N2 -# if T2labels[i2] > 0 -# Ris[u] = T2is[i2] -# u += 1 -# end -# end -# return nothing -#end - -function contract_inds(T1is, T1labels::Labels{0}, T2is, T2labels::Labels{0}, Rlabels) - return () -end - -# isbitstype that returns a Val for dispatch -isbitsval(T) = Val(isbitstype(T)) - -function contract_inds(T1is, T1labels, T2is, T2labels, Rlabels) - IndT = promote_type(eltype(T1is), eltype(T2is)) - return _contract_inds(isbitsval(IndT), IndT, T1is, T1labels, T2is, T2labels, Rlabels) -end - -# isbits -function _contract_inds(::Val{true}, IndT, T1is, T1labels, T2is, T2labels, Rlabels) - Ris = MVector{length(Rlabels),IndT}(undef) - _contract_inds!(Ris, T1is, T1labels, T2is, T2labels, Rlabels) - return Tuple(Ris) -end - -# !isbits -function _contract_inds(::Val{false}, IndT, T1is, T1labels, T2is, T2labels, Rlabels) - Ris = SizedVector{length(Rlabels),IndT}(undef) - _contract_inds!(Ris, T1is, T1labels, T2is, T2labels, Rlabels) - return Tuple(Ris) -end - -mutable struct ContractionProperties{NA,NB,NC} - ai::NTuple{NA,Int} - bi::NTuple{NB,Int} - ci::NTuple{NC,Int} - nactiveA::Int - nactiveB::Int - nactiveC::Int - AtoB::NTuple{NA,Int} - AtoC::NTuple{NA,Int} - BtoC::NTuple{NB,Int} - permuteA::Bool - permuteB::Bool - permuteC::Bool - dleft::Int - dmid::Int - dright::Int - ncont::Int - Acstart::Int - Bcstart::Int - Austart::Int - Bustart::Int - PA::NTuple{NA,Int} - PB::NTuple{NB,Int} - PC::NTuple{NC,Int} - ctrans::Bool - newArange::NTuple{NA,Int} - newBrange::NTuple{NB,Int} - newCrange::NTuple{NC,Int} - function ContractionProperties( - ai::NTuple{NA,Int}, bi::NTuple{NB,Int}, ci::NTuple{NC,Int} - ) where {NA,NB,NC} - return new{NA,NB,NC}( - ai, - bi, - ci, - 0, - 0, - 0, - ntuple(_ -> 0, Val(NA)), - ntuple(_ -> 0, Val(NA)), - ntuple(_ -> 0, Val(NB)), - false, - false, - false, - 1, - 1, - 1, - 0, - NA, - NB, - NA, - NB, - ntuple(i -> i, Val(NA)), - ntuple(i -> i, Val(NB)), - ntuple(i -> i, Val(NC)), - false, - ntuple(_ -> 0, Val(NA)), - ntuple(_ -> 0, Val(NB)), - ntuple(_ -> 0, Val(NC)), - ) - end -end - -function compute_perms!(props::ContractionProperties{NA,NB,NC}) where {NA,NB,NC} - #leng.th(props.AtoB)!=0 && return - - # Access the fields before the loop - # since getting fields from the mutable struct - # takes nontrivial time - ai = props.ai - bi = props.bi - ci = props.ci - - ncont = props.ncont - AtoB = props.AtoB - Acstart = props.Acstart - Bcstart = props.Bcstart - for i in 1:NA - for j in 1:NB - if @inbounds ai[i] == @inbounds bi[j] - ncont += 1 - #TODO: check this if this should be i,j or i-1,j-1 (0-index or 1-index) - i <= Acstart && (Acstart = i) - j <= Bcstart && (Bcstart = j) - #AtoB[i] = j - AtoB = setindex(AtoB, j, i) - break - end - end - end - props.ncont = ncont - props.AtoB = AtoB - props.Acstart = Acstart - props.Bcstart = Bcstart - - Austart = props.Austart - AtoC = props.AtoC - for i in 1:NA - for k in 1:NC - if @inbounds ai[i] == @inbounds ci[k] - #TODO: check this if this should be i,j or i-1,j-1 (0-index or 1-index) - i <= Austart && (Austart = i) - #AtoC[i] = k - AtoC = setindex(AtoC, k, i) - break - end - end - end - props.Austart = Austart - props.AtoC = AtoC - - Bustart = props.Bustart - BtoC = props.BtoC - for j in 1:NB - for k in 1:NC - if bi[j] == ci[k] - #TODO: check this if this should be i,j or i-1,j-1 (0-index or 1-index) - j <= Bustart && (Bustart = j) - #BtoC[j] = k - BtoC = setindex(BtoC, k, j) - break - end - end - end - props.Bustart = Bustart - props.BtoC = BtoC - - return nothing -end - -function checkACsameord(props::ContractionProperties)::Bool - AtoC = props.AtoC - - props.Austart >= length(props.ai) && return true - aCind = props.AtoC[props.Austart] - for i in 1:length(props.ai) - if !contractedA(props, i) - AtoC[i] != aCind && return false - aCind += 1 - end - end - return true -end - -function checkBCsameord(props::ContractionProperties)::Bool - props.Bustart >= length(props.bi) && return true - bCind = props.BtoC[props.Bustart] - for i in 1:length(props.bi) - if !contractedB(props, i) - props.BtoC[i] != bCind && return false - bCind += 1 - end - end - return true -end - -contractedA(props::ContractionProperties, i::Int) = (props.AtoC[i] < 1) -contractedB(props::ContractionProperties, i::Int) = (props.BtoC[i] < 1) -Atrans(props::ContractionProperties) = contractedA(props, 1) -Btrans(props::ContractionProperties) = !contractedB(props, 1) -Ctrans(props::ContractionProperties) = props.ctrans - -function compute_contraction_properties!( - props::ContractionProperties{NA,NB,NC}, A, B, C -) where {NA,NB,NC} - compute_perms!(props) - - #Use props.PC.size() as a check to see if we've already run this - #length(props.PC)!=0 && return - - #ra = NA #length(props.ai) - #rb = NB #length(props.bi) - #rc = NC #length(props.ci) - - #props.PC = fill(0,rc) - - PC = props.PC - AtoC = props.AtoC - BtoC = props.BtoC - - dleft = props.dleft - dmid = props.dmid - dright = props.dright - - dleft = 1 - dmid = 1 - dright = 1 - c = 1 - for i in 1:NA - #if !contractedA(props,i) - if !(AtoC[i] < 1) - dleft *= size(A, i) - #props.PC[props.AtoC[i]] = c - PC = setindex(PC, c, AtoC[i]) - c += 1 - else - dmid *= size(A, i) - end - end - for j in 1:NB - #if !contractedB(props,j) - if !(BtoC[j] < 1) - dright *= size(B, j) - #props.PC[props.BtoC[j]] = c - PC = setindex(PC, c, BtoC[j]) - c += 1 - end - end - props.PC = PC - props.dleft = dleft - props.dmid = dmid - props.dright = dright - - if !is_trivial_permutation(props.PC) - props.permuteC = true - if checkBCsameord(props) && checkACsameord(props) - #Can avoid permuting C by - #computing Bt*At = Ct - props.ctrans = true - props.permuteC = false - end - end - - #Check if A can be treated as a matrix without permuting - props.permuteA = false - if !(contractedA(props, 1) || contractedA(props, NA)) - #If contracted indices are not all at front or back, - #will have to permute A - props.permuteA = true - else - #Contracted ind start at front or back, check if contiguous - #TODO: check that the limits are correct (1-indexed vs. 0-indexed) - for i in 1:(props.ncont) - if !contractedA(props, props.Acstart + i - 1) - #Contracted indices not contiguous, must permute - props.permuteA = true - break - end - end - end - - #Check if B is matrix-like - props.permuteB = false - if !(contractedB(props, 1) || contractedB(props, NB)) - #If contracted indices are not all at front or back, - #will have to permute B - props.permuteB = true - else - #TODO: check that the limits are correct (1-indexed vs. 0-indexed) - for i in 1:(props.ncont) - if !contractedB(props, props.Bcstart + i - 1) - #Contracted inds not contiguous, permute - props.permuteB = true - break - end - end - end - - if !props.permuteA && !props.permuteB - #Check if contracted inds. in same order - #TODO: check these limits are correct - for i in 1:(props.ncont) - if props.AtoB[props.Acstart + i - 1] != (props.Bcstart + i - 1) - #If not in same order, - #must permute one of A or B - #so permute the smaller one - props.dleft < props.dright ? (props.permuteA = true) : (props.permuteB = true) - break - end - end - end - - if props.permuteC && !(props.permuteA && props.permuteB) - PCost(d::Real) = d * d - #Could avoid permuting C if - #permute both A and B, worth it? - pCcost = PCost(props.dleft * props.dright) - extra_pABcost = 0 - !props.permuteA && (extra_pABcost += PCost(props.dleft * props.dmid)) - !props.permuteB && (extra_pABcost += PCost(props.dmid * props.dright)) - if extra_pABcost < pCcost - props.permuteA = true - props.permuteB = true - props.permuteC = false - end - end - - if props.permuteA - #props.PA = fill(0,ra) - #Permute contracted indices to the front, - #in the same order as on B - - AtoC = props.AtoC - BtoC = props.BtoC - ai = props.ai - bi = props.bi - PA = props.PA - - newi = 0 - bind = props.Bcstart - for i in 1:(props.ncont) - while !(BtoC[bind] < 1) - bind += 1 - end - j = findfirst(==(bi[bind]), ai) - #props.PA[newi + 1] = j - PA = setindex(PA, j, newi + 1) - bind += 1 - newi += 1 - end - #Reset p.AtoC: - #fill!(props.AtoC,0) - AtoC = ntuple(_ -> 0, Val(NA)) - #Permute uncontracted indices to - #appear in same order as on C - #TODO: check this is correct for 1-indexing - for k in 1:NC - j = findfirst(==(props.ci[k]), props.ai) - if !isnothing(j) - #props.AtoC[newi+1] = k - AtoC = setindex(AtoC, k, newi + 1) - #props.PA[newi+1] = j - PA = setindex(PA, j, newi + 1) - newi += 1 - end - newi == NA && break - end - props.PA = PA - props.AtoC = AtoC - end - - ##Also update props.Austart,props.Acstart - - Acstart = props.Acstart - Austart = props.Austart - newArange = props.newArange - PA = props.PA - - Acstart = NA + 1 - Austart = NA + 1 - #TODO: check this is correct for 1-indexing - for i in 1:NA - #if contractedA(props,i) - if @inbounds AtoC[i] < 1 - Acstart = min(i, Acstart) - else - Austart = min(i, Austart) - end - #props.newArange = permute_extents([size(A)...],props.PA) - newArange = permute(size(A), PA) #[size(A)...][props.PA] - end - props.Acstart = Acstart - props.Austart = Austart - props.newArange = newArange - - if (props.permuteB) - PB = props.PB - AtoC = props.AtoC - BtoC = props.BtoC - ai = props.ai - bi = props.bi - ci = props.ci - Bcstart = props.Bcstart - Bustart = props.Bustart - - #props.PB = fill(0,rb) - #TODO: check this is correct for 1-indexing - newi = 0 #1 - - if (props.permuteA) - #A's contracted indices already set to - #be in same order as B above, so just - #permute contracted indices to the front - #keeping relative order - - i = props.Bcstart - while newi < props.ncont - while !(BtoC[i] < 1) - i += 1 - end - #props.PB[newi+1] = i - PB = setindex(PB, i, newi + 1) - i += 1 - newi += 1 - end - else - #Permute contracted indices to the - #front and in same order as on A - - aind = props.Acstart - for i in 0:(props.ncont - 1) - while !(AtoC[aind] < 1) - aind += 1 - end - j = findfirst(==(ai[aind]), bi) - #props.PB[newi + 1] = j - PB = setindex(PB, j, newi + 1) - aind += 1 - newi += 1 - end - end - - #Reset p.BtoC: - #fill!(props.BtoC,0) - BtoC = ntuple(_ -> 0, Val(NB)) - - #Permute uncontracted indices to - #appear in same order as on C - for k in 1:NC - j = findfirst(==(ci[k]), bi) - if !isnothing(j) - #props.BtoC[newi + 1] = k - BtoC = setindex(BtoC, k, newi + 1) - #props.PB[newi + 1] = j - PB = setindex(PB, j, newi + 1) - newi += 1 - end - newi == NB && break - end - Bcstart = NB - Bustart = NB - for i in 1:NB - if BtoC[i] < 1 - Bcstart = min(i, Bcstart) - else - Bustart = min(i, Bustart) - end - end - #props.newBrange = permute_extents([size(B)...],props.PB) - #props.newBrange = [size(B)...][props.PB] - props.newBrange = permute(size(B), PB) - - props.BtoC = BtoC - props.PB = PB - props.Bcstart = Bcstart - props.Bustart = Bustart - end - - if props.permuteA || props.permuteB - AtoC = props.AtoC - BtoC = props.BtoC - PC = props.PC - - #Recompute props.PC - c = 1 - #TODO: check this is correct for 1-indexing - for i in 1:NA - AtoC_i = AtoC[i] - if !(AtoC_i < 1) - #props.PC[props.AtoC[i]] = c - PC = setindex(PC, c, AtoC_i) - c += 1 - end - end - #TODO: check this is correct for 1-indexing - for j in 1:NB - BtoC_j = BtoC[j] - if !(BtoC_j < 1) - #props.PC[props.BtoC[j]] = c - PC = setindex(PC, c, BtoC_j) - c += 1 - end - end - props.PC = PC - - props.ctrans = false - if (is_trivial_permutation(PC)) - props.permuteC = false - else - props.permuteC = true - #Here we already know since pc_triv = false that - #at best indices from B precede those from A (on result C) - #so if both sets remain in same order on C - #just need to transpose C, not permute it - if checkBCsameord(props) && checkACsameord(props) - props.ctrans = true - props.permuteC = false - end - end - end - - if props.permuteC - Rb = MVector{NC,Int}(undef) #Int[] - k = 1 - AtoC = props.AtoC - BtoC = props.BtoC - if !props.permuteA - #TODO: check this is correct for 1-indexing - for i in 1:NA - if !(AtoC[i] < 1) - #push!(Rb,size(A,i)) - Rb[k] = size(A, i) - k = k + 1 - end - end - else - #TODO: check this is correct for 1-indexing - for i in 1:NA - if !(AtoC[i] < 1) - #push!(Rb,size(props.newArange,i)) - Rb[k] = props.newArange[i] - k = k + 1 - end - end - end - if !props.permuteB - #TODO: check this is correct for 1-indexing - for j in 1:NB - if !(BtoC[j] < 1) - #push!(Rb,size(B,j)) - Rb[k] = size(B, j) - k = k + 1 - end - end - else - #TODO: check this is correct for 1-indexing - for j in 1:NB - if !(BtoC[j] < 1) - #push!(Rb,size(props.newBrange,j)) - Rb[k] = props.newBrange[j] - k = k + 1 - end - end - end - props.newCrange = Tuple(Rb) - end -end diff --git a/NDTensors/src/tensoroperations/generic_tensor_operations.jl b/NDTensors/src/tensoroperations/generic_tensor_operations.jl deleted file mode 100644 index 3df58f3a9e..0000000000 --- a/NDTensors/src/tensoroperations/generic_tensor_operations.jl +++ /dev/null @@ -1,234 +0,0 @@ -function permutedims(tensor::Tensor, perm) - (ndims(tensor) == length(perm) && isperm(perm)) || - throw(ArgumentError("no valid permutation of dimensions")) - output_tensor = NDTensors.similar(tensor, permute(inds(tensor), perm)) - return permutedims!!(output_tensor, tensor, perm) -end - -# Version that may overwrite the result or allocate -# and return the result of the permutation. -# Similar to `BangBang.jl` notation: -# https://juliafolds.github.io/BangBang.jl/stable/. -function permutedims!!(output_tensor::Tensor, tensor::Tensor, perm, f::Function) - Base.checkdims_perm(output_tensor, tensor, perm) - permutedims!(output_tensor, tensor, perm, f) - return output_tensor -end - -# Equivalent to `permutedims!!(output_tensor, tensor, perm, (r, t) -> t)` -function permutedims!!(output_tensor::Tensor, tensor::Tensor, perm) - Base.checkdims_perm(output_tensor, tensor, perm) - permutedims!(output_tensor, tensor, perm) - return output_tensor -end - -function permutedims!(output_tensor::Tensor, tensor::Tensor, perm, f::Function) - Base.checkdims_perm(output_tensor, tensor, perm) - error( - "`permutedims!(output_tensor::Tensor, tensor::Tensor, perm, f::Function` not implemented for `typeof(output_tensor) = $(typeof(output_tensor))`, `typeof(tensor) = $(typeof(tensor))`, `perm = $perm`, and `f = $f`.", - ) - return output_tensor -end - -function permutedims!(output_tensor::Tensor, tensor::Tensor, perm) - Base.checkdims_perm(output_tensor, tensor, perm) - error( - "`permutedims!(output_tensor::Tensor, tensor::Tensor, perm` not implemented for `typeof(output_tensor) = $(typeof(output_tensor))`, `typeof(tensor) = $(typeof(tensor))`, and `perm = $perm`.", - ) - return output_tensor -end - -function (x::Number * tensor::Tensor) - return NDTensors.tensor(x * storage(tensor), inds(tensor)) -end -(tensor::Tensor * x::Number) = x * tensor - -function (tensor::Tensor / x::Number) - return NDTensors.tensor(storage(tensor) / x, inds(tensor)) -end - -function contraction_output_type( - tensortype1::Type{<:Tensor}, tensortype2::Type{<:Tensor}, inds -) - return similartype(promote_type(tensortype1, tensortype2), inds) -end - -function contraction_output( - tensor1::Tensor, labelstensor1, tensor2::Tensor, labelstensor2, labelsoutput_tensor -) - indsoutput_tensor = contract_inds( - inds(tensor1), labelstensor1, inds(tensor2), labelstensor2, labelsoutput_tensor - ) - output_tensor = contraction_output(tensor1, tensor2, indsoutput_tensor) - return output_tensor -end - -# Trait returning true if the two tensors or storage types can -# contract with each other. -@traitdef CanContract{X,Y} -#! format: off -@traitimpl CanContract{X,Y} <- can_contract(X, Y) -#! format: on - -# Assume storage types can contract with each other -can_contract(tensor1::Type, tensor2::Type) = true -function can_contract(tensor1::Type{<:Tensor}, tensor2::Type{<:Tensor}) - return can_contract(storagetype(tensor1), storagetype(tensor2)) -end - -function can_contract(tensor1::TensorStorage, tensor2::TensorStorage) - return can_contract(typeof(tensor1), typeof(tensor2)) -end -function can_contract(tensor1::Tensor, tensor2::Tensor) - return can_contract(typeof(tensor1), typeof(tensor2)) -end - -# Version where output labels aren't supplied -@traitfn function contract( - tensor1::TensorT1, labels_tensor1, tensor2::TensorT2, labels_tensor2 -) where {TensorT1<:Tensor,TensorT2<:Tensor;CanContract{TensorT1,TensorT2}} - labelsoutput_tensor = contract_labels(labels_tensor1, labels_tensor2) - return contract(tensor1, labels_tensor1, tensor2, labels_tensor2, labelsoutput_tensor) -end - -@traitfn function contract( - tensor1::TensorT1, labels_tensor1, tensor2::TensorT2, labels_tensor2 -) where {TensorT1<:Tensor,TensorT2<:Tensor;!CanContract{TensorT1,TensorT2}} - return error( - "Can't contract tensor of storage type $(storagetype(tensor1)) with tensor of storage type $(storagetype(tensor2)).", - ) -end - -function contract( - tensor1::Tensor, labelstensor1, tensor2::Tensor, labelstensor2, labelsoutput_tensor -) - # TODO: put the contract_inds logic into contraction_output, - # call like output_tensor = contraction_ouput(tensor1,labelstensor1,tensor2,labelstensor2) - #indsoutput_tensor = contract_inds(inds(tensor1),labelstensor1,inds(tensor2),labelstensor2,labelsoutput_tensor) - output_tensor = contraction_output( - tensor1, labelstensor1, tensor2, labelstensor2, labelsoutput_tensor - ) - # contract!! version here since the output output_tensor may not - # be mutable (like UniformDiag) - output_tensor = contract!!( - output_tensor, labelsoutput_tensor, tensor1, labelstensor1, tensor2, labelstensor2 - ) - return output_tensor -end - -using NDTensors.Expose: Exposed, expose, unexpose -# Overload this function for immutable storage types -function _contract!!( - output_tensor::Tensor, - labelsoutput_tensor, - tensor1::Tensor, - labelstensor1, - tensor2::Tensor, - labelstensor2, - α::Number=1, - β::Number=0, -) - if α ≠ 1 || β ≠ 0 - contract!( - expose(output_tensor), - labelsoutput_tensor, - expose(tensor1), - labelstensor1, - expose(tensor2), - labelstensor2, - α, - β, - ) - else - contract!( - expose(output_tensor), - labelsoutput_tensor, - expose(tensor1), - labelstensor1, - expose(tensor2), - labelstensor2, - ) - end - return output_tensor -end - -function contract!( - output_tensor::Exposed, - labelsoutput_tensor, - tensor1::Exposed, - labelstensor1, - tensor2::Exposed, - labelstensor2, - α::Number=one(Bool), - β::Number=zero(Bool), -) - return contract!( - unexpose(output_tensor), - labelsoutput_tensor, - unexpose(tensor1), - labelstensor1, - unexpose(tensor2), - labelstensor2, - α, - β, - ) -end - -# Is this generic for all storage types? -function contract!!( - output_tensor::Tensor, - labelsoutput_tensor, - tensor1::Tensor, - labelstensor1, - tensor2::Tensor, - labelstensor2, - α::Number=1, - β::Number=0, -) - Noutput_tensor = ndims(output_tensor) - N1 = ndims(tensor1) - N2 = ndims(tensor2) - if (N1 ≠ 0) && (N2 ≠ 0) && (N1 + N2 == Noutput_tensor) - # Outer product - (α ≠ 1 || β ≠ 0) && error( - "contract!! not yet implemented for outer product tensor contraction with non-trivial α and β", - ) - # TODO: permute tensor1 and tensor2 appropriately first (can be more efficient - # then permuting the result of tensor1⊗tensor2) - # TODO: implement the in-place version directly - output_tensor = outer!!(output_tensor, tensor1, tensor2) - labelsoutput_tensorp = (labelstensor1..., labelstensor2...) - perm = getperm(labelsoutput_tensor, labelsoutput_tensorp) - if !is_trivial_permutation(perm) - output_tensorp = reshape(output_tensor, (inds(tensor1)..., inds(tensor2)...)) - output_tensor = permutedims!!(output_tensor, copy(output_tensorp), perm) - end - else - if α ≠ 1 || β ≠ 0 - output_tensor = _contract!!( - output_tensor, - labelsoutput_tensor, - tensor1, - labelstensor1, - tensor2, - labelstensor2, - α, - β, - ) - else - output_tensor = _contract!!( - output_tensor, labelsoutput_tensor, tensor1, labelstensor1, tensor2, labelstensor2 - ) - end - end - return output_tensor -end - -function outer!!(output_tensor::Tensor, tensor1::Tensor, tensor2::Tensor) - outer!(output_tensor, tensor1, tensor2) - return output_tensor -end - -function outer end - -const ⊗ = outer diff --git a/NDTensors/src/tensorstorage/default_storage.jl b/NDTensors/src/tensorstorage/default_storage.jl deleted file mode 100644 index 9e6c813a96..0000000000 --- a/NDTensors/src/tensorstorage/default_storage.jl +++ /dev/null @@ -1,21 +0,0 @@ -## This is a fil which specifies the default storage type provided some set of parameters -## The parameters are the element type and storage type -default_datatype(eltype::Type=default_eltype()) = Vector{eltype} -default_eltype() = Float64 - -using .TypeParameterAccessors: specify_default_type_parameters -## TODO use multiple dispace to make this pick between dense and blocksparse -function default_storagetype(datatype::Type{<:AbstractArray}, inds::Tuple) - datatype = specify_default_type_parameters(datatype) - return Dense{eltype(datatype),datatype} -end - -function default_storagetype(datatype::Type{<:AbstractArray}) - return default_storagetype(datatype, ()) -end - -default_storagetype(eltype::Type) = default_storagetype(default_datatype(eltype)) -function default_storagetype(eltype::Type, inds::Tuple) - return default_storagetype(default_datatype(eltype), inds) -end -default_storagetype() = default_storagetype(default_eltype()) diff --git a/NDTensors/src/tensorstorage/set_types.jl b/NDTensors/src/tensorstorage/set_types.jl deleted file mode 100644 index 7b27d4d245..0000000000 --- a/NDTensors/src/tensorstorage/set_types.jl +++ /dev/null @@ -1,7 +0,0 @@ -using .TypeParameterAccessors: TypeParameterAccessors -function TypeParameterAccessors.set_ndims(arraytype::Type{<:TensorStorage}, ndims::Int) - # TODO: Change to this once `TensorStorage` types support wrapping - # non-AbstractVector types. - # return set_datatype(arraytype, set_ndims(datatype(arraytype), ndims)) - return arraytype -end diff --git a/NDTensors/src/tensorstorage/similar.jl b/NDTensors/src/tensorstorage/similar.jl deleted file mode 100644 index 374a3235f3..0000000000 --- a/NDTensors/src/tensorstorage/similar.jl +++ /dev/null @@ -1,83 +0,0 @@ -using .TypeParameterAccessors: TypeParameterAccessors, set_ndims, similartype - -# NDTensors.similar -similar(storage::TensorStorage) = setdata(storage, NDTensors.similar(data(storage))) - -# NDTensors.similar -function similar(storage::TensorStorage, eltype::Type) - return setdata(storage, NDTensors.similar(data(storage), eltype)) -end - -# NDTensors.similar -function similar(storage::TensorStorage, dims::Tuple) - # TODO: Don't convert to an `AbstractVector` with `vec`, once we support - # more general data types. - # return setdata(storage, NDTensors.similar(data(storage), dims)) - return setdata(storage, vec(NDTensors.similar(data(storage), dims))) -end - -# NDTensors.similar -function similar(storage::TensorStorage, eltype::Type, dims::Tuple) - # TODO: Don't convert to an `AbstractVector` with `vec`, once we support - # more general data types. - # return setdata(storage, NDTensors.similar(data(storage), eltype, dims)) - return setdata(storage, vec(NDTensors.similar(data(storage), eltype, dims))) -end - -# NDTensors.similar -function similar(storagetype::Type{<:TensorStorage}, eltype::Type, dims::Tuple) - return similar(similartype(storagetype, eltype), dims) -end - -# NDTensors.similar -function similar(storagetype::Type{<:TensorStorage}, eltype::Type) - return error("Must specify dimensions.") -end - -# NDTensors.similar -function similar(storagetype::Type{<:TensorStorage}, dims::Tuple) - # TODO: Don't convert to an `AbstractVector` with `vec`, once we support - # more general data types. - # return setdata(storagetype, NDTensors.similar(datatype(storagetype), dims)) - return setdata(storagetype, vec(NDTensors.similar(datatype(storagetype), dims))) -end - -# NDTensors.similar -function similar(storagetype::Type{<:TensorStorage}, dims::Dims) - # TODO: Don't convert to an `AbstractVector` with `prod`, once we support - # more general data types. - # return setdata(storagetype, NDTensors.similar(datatype(storagetype), dims)) - return setdata(storagetype, NDTensors.similar(datatype(storagetype), prod(dims))) -end - -# NDTensors.similar -function similar(storagetype::Type{<:TensorStorage}, dims::DimOrInd...) - return similar(storagetype, NDTensors.to_shape(dims)) -end - -# Define Base.similar in terms of NDTensors.similar -Base.similar(storage::TensorStorage) = NDTensors.similar(storage) -Base.similar(storage::TensorStorage, eltype::Type) = NDTensors.similar(storage, eltype) -## TODO: Are these methods needed? -## Base.similar(storage::TensorStorage, dims::Tuple) = NDTensors.similar(storage, dims) -## Base.similar(storage::TensorStorage, dims::Dims...) = NDTensors.similar(storage, dims...) -## Base.similar(storage::TensorStorage, dims::DimOrInd...) = NDTensors.similar(storage, dims...) - -function TypeParameterAccessors.similartype( - storagetype::Type{<:TensorStorage}, eltype::Type -) - # TODO: Don't convert to an `AbstractVector` with `set_ndims(datatype, 1)`, once we support - # more general data types. - # return set_datatype(storagetype, NDTensors.similartype(datatype(storagetype), eltype)) - return set_datatype(storagetype, set_ndims(similartype(datatype(storagetype), eltype), 1)) -end - -function TypeParameterAccessors.similartype(storagetype::Type{<:TensorStorage}, dims::Tuple) - # TODO: In the future, set the dimensions of the data type based on `dims`, once - # more general data types beyond `AbstractVector` are supported. - # `similartype` unwraps any wrapped data. - return set_ndims( - set_datatype(storagetype, set_ndims(similartype(datatype(storagetype)), 1)), - length(dims), - ) -end diff --git a/NDTensors/src/tensorstorage/tensorstorage.jl b/NDTensors/src/tensorstorage/tensorstorage.jl deleted file mode 100644 index 92d7b50418..0000000000 --- a/NDTensors/src/tensorstorage/tensorstorage.jl +++ /dev/null @@ -1,105 +0,0 @@ -using SparseArrays: SparseArrays - -abstract type TensorStorage{ElT} <: AbstractVector{ElT} end - -data(S::TensorStorage) = S.data - -datatype(::Type{<:TensorStorage}) = error("Not implemented") - -datatype(S::TensorStorage) = typeof(data(S)) - -Base.eltype(::TensorStorage{ElT}) where {ElT} = ElT -scalartype(T::TensorStorage) = eltype(T) - -Base.eltype(::Type{<:TensorStorage{ElT}}) where {ElT} = ElT - -Base.iterate(S::TensorStorage, args...) = iterate(data(S), args...) - -dense(S::TensorStorage) = S - -# This is necessary since for some reason inference doesn't work -# with the more general definition (eltype(Nothing) === Any) -Base.eltype(::TensorStorage{Nothing}) = Nothing - -Base.length(S::TensorStorage) = length(data(S)) - -Base.size(S::TensorStorage) = size(data(S)) - -Base.@propagate_inbounds Base.getindex(S::TensorStorage, i::Integer) = data(S)[i] -Base.@propagate_inbounds function Base.setindex!(S::TensorStorage, v, i::Integer) - return (setindex!(data(S), v, i); S) -end - -(S::TensorStorage * x::Number) = setdata(S, x * data(S)) -(x::Number * S::TensorStorage) = S * x -(S::TensorStorage / x::Number) = setdata(S, data(S) / x) - --(S::TensorStorage) = setdata(S, -data(S)) - -# Needed for passing Tensor{T,2} to BLAS/LAPACK -function Base.unsafe_convert(::Type{Ptr{ElT}}, T::TensorStorage{ElT}) where {ElT} - return Base.unsafe_convert(Ptr{ElT}, data(T)) -end - -# This may need to be overloaded, since storage types -# often have other fields besides data - -Base.conj!(S::TensorStorage) = (conj!(data(S)); return S) - -Base.conj(S::TensorStorage) = conj(AllowAlias(), S) - -function Base.conj(::AllowAlias, S::TensorStorage) - return setdata(S, conj(data(S))) -end - -function Base.conj(::NeverAlias, S::TensorStorage) - return conj!(copy(S)) -end - -Base.complex(S::TensorStorage) = setdata(S, complex(data(S))) - -Base.real(S::TensorStorage) = setdata(S, real(data(S))) - -Base.imag(S::TensorStorage) = setdata(S, imag(data(S))) - -function copyto!(S1::TensorStorage, S2::TensorStorage) - copyto!(expose(data(S1)), expose(data(S2))) - return S1 -end - -Random.randn!(S::TensorStorage) = randn!(Random.default_rng(), S) -Random.randn!(rng::AbstractRNG, S::TensorStorage) = (randn!(rng, data(S)); S) - -function Base.map(f, t1::TensorStorage, t_tail::TensorStorage...; kwargs...) - return setdata(t1, map(f, data(t1), data.(t_tail)...; kwargs...)) -end - -function Base.mapreduce(f, op, t1::TensorStorage, t_tail::TensorStorage...; kwargs...) - return mapreduce(f, op, data(t1), data.(t_tail)...; kwargs...) -end - -Base.fill!(S::TensorStorage, v) = (fill!(data(S), v); S) - -LinearAlgebra.rmul!(S::TensorStorage, v::Number) = (rmul!(data(S), v); S) - -scale!(S::TensorStorage, v::Number) = rmul!(S, v) - -norm(S::TensorStorage) = norm(data(S)) - -Base.convert(::Type{T}, S::T) where {T<:TensorStorage} = S - -blockoffsets(S::TensorStorage) = S.blockoffsets - -""" -nzblocks(T::TensorStorage) - -Return a vector of the non-zero blocks of the BlockSparse storage. -""" -nzblocks(T::TensorStorage) = nzblocks(blockoffsets(T)) - -eachnzblock(T::TensorStorage) = eachnzblock(blockoffsets(T)) - -nnzblocks(S::TensorStorage) = length(blockoffsets(S)) -SparseArrays.nnz(S::TensorStorage) = length(S) - -offset(S::TensorStorage, block) = offset(blockoffsets(S), block) diff --git a/NDTensors/src/truncate.jl b/NDTensors/src/truncate.jl deleted file mode 100644 index d548a3dcbf..0000000000 --- a/NDTensors/src/truncate.jl +++ /dev/null @@ -1,105 +0,0 @@ -using .TypeParameterAccessors: unwrap_array_type -## TODO write Exposed version of truncate -function truncate!!(P::AbstractArray; kwargs...) - return truncate!!(unwrap_array_type(P), P; kwargs...) -end - -# CPU version. -function truncate!!(::Type{<:Array}, P::AbstractArray; kwargs...) - truncerr, docut = truncate!(P; kwargs...) - return P, truncerr, docut -end - -using .TypeParameterAccessors: unwrap_array_type -# GPU fallback version, convert to CPU. -function truncate!!(::Type{<:AbstractArray}, P::AbstractArray; kwargs...) - P_cpu = cpu(P) - truncerr, docut = truncate!(P_cpu; kwargs...) - P = adapt(unwrap_array_type(P), P_cpu) - return P, truncerr, docut -end - -# CPU implementation. -function truncate!( - P::AbstractVector; - mindim=nothing, - maxdim=nothing, - cutoff=nothing, - use_absolute_cutoff=nothing, - use_relative_cutoff=nothing, -) - mindim = replace_nothing(mindim, default_mindim(P)) - maxdim = replace_nothing(maxdim, length(P)) - cutoff = replace_nothing(cutoff, typemin(eltype(P))) - use_absolute_cutoff = replace_nothing(use_absolute_cutoff, default_use_absolute_cutoff(P)) - use_relative_cutoff = replace_nothing(use_relative_cutoff, default_use_relative_cutoff(P)) - - origm = length(P) - docut = zero(eltype(P)) - - #if P[1] <= 0.0 - # P[1] = 0.0 - # resize!(P, 1) - # return 0.0, 0.0 - #end - - if origm == 1 - docut = abs(P[1]) / 2 - return zero(eltype(P)), docut - end - - s = sign(P[1]) - s < 0 && (P .*= s) - - #Zero out any negative weight - for n in origm:-1:1 - (P[n] >= zero(eltype(P))) && break - P[n] = zero(eltype(P)) - end - - n = origm - truncerr = zero(eltype(P)) - while n > maxdim - truncerr += P[n] - n -= 1 - end - - if use_absolute_cutoff - #Test if individual prob. weights fall below cutoff - #rather than using *sum* of discarded weights - while P[n] <= cutoff && n > mindim - truncerr += P[n] - n -= 1 - end - else - scale = one(eltype(P)) - if use_relative_cutoff - scale = sum(P) - (scale == zero(eltype(P))) && (scale = one(eltype(P))) - end - - #Continue truncating until *sum* of discarded probability - #weight reaches cutoff reached (or m==mindim) - while (truncerr + P[n] <= cutoff * scale) && (n > mindim) - truncerr += P[n] - n -= 1 - end - - truncerr /= scale - end - - if n < 1 - n = 1 - end - - if n < origm - docut = (P[n] + P[n + 1]) / 2 - if abs(P[n] - P[n + 1]) < eltype(P)(1e-3) * P[n] - docut += eltype(P)(1e-3) * P[n] - end - end - - s < 0 && (P .*= s) - resize!(P, n) - return truncerr, docut -end diff --git a/NDTensors/src/tupletools.jl b/NDTensors/src/tupletools.jl deleted file mode 100644 index 132b219af5..0000000000 --- a/NDTensors/src/tupletools.jl +++ /dev/null @@ -1,272 +0,0 @@ - -# This is a cache of [Val(1), Val(2), ...] -# Hard-coded for now to only handle tensors up to order 100 -const ValCache = Val[Val(n) for n in 0:100] -# Faster conversions of collection to tuple than `Tuple(::AbstractVector)` -_NTuple(::Val{N}, v::Vector{T}) where {N,T} = ntuple(n -> v[n], Val(N)) -_Tuple(v::Vector{T}) where {T} = _NTuple(ValCache[length(v) + 1], v) -_Tuple(t::Tuple) = t - -""" - ValLength(::Type{NTuple{N}}) = Val{N} -""" -ValLength(::Type{NTuple{N,T}}) where {N,T} = Val{N} - -""" - ValLength(::NTuple{N}) = Val(N) -""" -ValLength(::NTuple{N}) where {N} = Val(N) - -# Only to help with backwards compatibility, this -# is not type stable and therefore not efficient. -ValLength(v::Vector) = Val(length(v)) - -ValLength(::Tuple{Vararg{Any,N}}) where {N} = Val(N) - -ValLength(::Type{<:Tuple{Vararg{Any,N}}}) where {N} = Val{N} - -ValLength(::CartesianIndex{N}) where {N} = Val(N) -ValLength(::Type{CartesianIndex{N}}) where {N} = Val{N} - -push(s::Tuple, val) = (s..., val) - -pushfirst(s::Tuple, val) = (val, s...) - -pop(s::NTuple{N}) where {N} = ntuple(i -> s[i], Val(N - 1)) - -popfirst(s::NTuple{N}) where {N} = ntuple(i -> s[i + 1], Val(N - 1)) - -# Permute some other type by perm -# (for example, tuple, MVector, etc.) -# as long as the constructor accepts a tuple -@inline function _permute(s, perm) - return ntuple(i -> s[perm[i]], ValLength(perm)) -end - -permute(s::Tuple, perm) = _permute(s, perm) - -# TODO: is this needed? -function permute(s::T, perm) where {T<:NTuple} - return T(_permute(s, perm)) -end - -function permute(s::T, perm) where {T} - return T(_permute(Tuple(s), perm)) -end - -# TODO: This is to handle Vector, is this correct? -permute(s::AbstractVector, perm) = _permute(s, perm) - -sim(s::NTuple) = s - -# type stable findfirst -@inline _findfirst(args...) = (i = findfirst(args...); i === nothing ? 0 : i) - -""" - getperm(col1,col2) - -Get the permutation that takes collection 2 to collection 1, -such that col2[p].==col1 -""" -@inline function getperm(s1, s2) - return ntuple(i -> _findfirst(==(@inbounds s1[i]), s2), length(s1)) -end - -""" - getperms(col1,col2,col3) - -Get the permutations that takes collections 2 and 3 to collection 1. -""" -function getperms(s, s1, s2) - N = length(s) - N1 = length(s1) - N2 = length(s2) - N1 + N2 ≠ N && error("Size of partial sets don't match with total set") - perm1 = ntuple(i -> findfirst(==(s1[i]), s), Val(N1)) - perm2 = ntuple(i -> findfirst(==(s2[i]), s), Val(N2)) - isperm((perm1..., perm2...)) || - error("Combined permutations are $((perm1...,perm2...)), not a valid permutation") - return perm1, perm2 -end - -function invperm!(permres, perm) - for i in 1:length(perm) - permres[perm[i]] = i - end - return permres -end - -function invperm(perm::NTuple{N,Int}) where {N} - mpermres = MVector{N,Int}(undef) - invperm!(mpermres, perm) - return Tuple(mpermres) -end - -function invperm(perm) - permres = similar(perm) - invperm!(permres, perm) - return permres -end - -# Override TupleTools.isperm to speed up -# Strided.permutedims a bit (see: -# https://github.com/Jutho/Strided.jl/issues/15) -function isperm(p::NTuple{N}) where {N} - N < 6 && return Base.isperm(p) - used = @MVector zeros(Bool, N) - for a in p - (0 < a <= N) && (used[a] ⊻= true) || return false - end - return true -end - -""" - is_trivial_permutation(P) - -Determine if P is a trivial permutation. -""" -function is_trivial_permutation(P) - #isperm(P) || error("Input is not a permutation") - # TODO: use `all(n->P[n]==n,1:length(P))`? - N = length(P) - for n in 1:N - @inbounds P[n] != n && return false - end - return true -end - -# Combine a bunch of tuples -@inline flatten(x) = x -@inline flatten(x, y) = (x..., y...) -@inline flatten(x, y, z...) = (x..., flatten(y, z...)...) - -function _deleteat(t, pos, i) - i < pos && return t[i] - return t[i + 1] -end - -function deleteat(t::Tuple, pos::Integer) - return ntuple(i -> _deleteat(t, pos, i), Val(length(t) - 1)) -end - -deleteat(t::Tuple, I::Tuple{Int}) = deleteat(t, I[1]) -function deleteat(t::Tuple, I::Tuple{Int,Int,Vararg{Int}}) - return deleteat_sorted(t, TupleTools.sort(I; rev=true)) -end - -deleteat_sorted(t::Tuple, pos::Int64) = deleteat(t, pos[1]) -deleteat_sorted(t::Tuple, pos::Tuple{Int}) = deleteat(t, pos[1]) -function deleteat_sorted(t::Tuple, pos::NTuple{N,Int}) where {N} - return deleteat_sorted(deleteat_sorted(t, pos[1]), Base.tail(pos)) -end - -# Make a slice of the block on the specified dimensions -# Make this a generic tupletools function (TupleTools.jl calls it getindices) -function getindices(t::Tuple, I::NTuple{N,Int}) where {N} - return ntuple(i -> t[I[i]], Val(N)) -end - -function _insertat(t, pos, n_insert, val, i) - if i < pos - return t[i] - elseif i > pos + n_insert - 1 - return t[i - n_insert + 1] - end - return val[i - pos + 1] -end - -""" - insertat - -Remove the value at pos and insert the elements in val -""" -function insertat(t::Tuple, val::Tuple, pos::Integer) - N, M = length(t), length(val) - @boundscheck checkbounds(Base.OneTo(N), pos) - return ntuple(i -> _insertat(t, pos, M, val, i), Val(N + M - 1)) -end - -insertat(t::Tuple, val, pos::Integer) = insertat(t, tuple(val), pos) - -function _insertafter(t, pos, n_insert, val, i) - if i <= pos - return t[i] - elseif i > pos + n_insert - return t[i - n_insert] - end - return val[i - pos] -end - -""" - insertafter(t, val, pos) - -Insert the elements in val after the position pos -""" -function insertafter(t::NTuple{N}, val::NTuple{M}, pos::Integer) where {N,M} - return ntuple(i -> _insertafter(t, pos, M, val, i), Val(N + M)) -end - -function insertafter(t::NTuple{N}, val, pos::Integer) where {N} - return insertafter(t, tuple(val), pos) -end - -""" - isdisjoint(s1, s2) - -Determine if s1 and s2 have no overlapping elements. -""" -function isdisjoint(s1, s2) - for i1 in 1:length(s1) - for i2 in 1:length(s2) - s1[i1] == s2[i2] && return false - end - end - return true -end - -""" - diff(t::Tuple) - -For a tuple of length N, return a tuple of length N-1 -where element i is t[i+1] - t[i]. -""" -diff(t::NTuple{N}) where {N} = ntuple(i -> t[i + 1] - t[i], Val(N - 1)) - -function count_unique(labelsT1, labelsT2) - count = 0 - for l1 in labelsT1 - l1 ∉ labelsT2 && (count += 1) - end - return count -end - -function count_common(labelsT1, labelsT2) - count = 0 - for l1 in labelsT1 - l1 ∈ labelsT2 && (count += 1) - end - return count -end - -function intersect_positions(labelsT1, labelsT2) - for i1 in 1:length(labelsT1) - for i2 in 1:length(labelsT2) - if labelsT1[i1] == labelsT2[i2] - return i1, i2 - end - end - end - return nothing -end - -function is_replacement(labelsT1, labelsT2) - return count_unique(labelsT1, labelsT2) == 1 && count_common(labelsT1, labelsT2) == 1 -end - -function is_combiner(labelsT1, labelsT2) - return count_unique(labelsT1, labelsT2) == 1 && count_common(labelsT1, labelsT2) > 1 -end - -function is_uncombiner(labelsT1, labelsT2) - return count_unique(labelsT1, labelsT2) > 1 && count_common(labelsT1, labelsT2) == 1 -end diff --git a/NDTensors/test/NDTensorsTestUtils/NDTensorsTestUtils.jl b/NDTensors/test/NDTensorsTestUtils/NDTensorsTestUtils.jl deleted file mode 100644 index 945c6653eb..0000000000 --- a/NDTensors/test/NDTensorsTestUtils/NDTensorsTestUtils.jl +++ /dev/null @@ -1,10 +0,0 @@ -module NDTensorsTestUtils - -using NDTensors - -include("device_list.jl") -include("is_supported_eltype.jl") - -default_rtol(elt::Type) = 10^(0.75 * log10(eps(real(elt)))) - -end diff --git a/NDTensors/test/NDTensorsTestUtils/device_list.jl b/NDTensors/test/NDTensorsTestUtils/device_list.jl deleted file mode 100644 index 9294956fc2..0000000000 --- a/NDTensors/test/NDTensorsTestUtils/device_list.jl +++ /dev/null @@ -1,59 +0,0 @@ -using Pkg: Pkg -using NDTensors: NDTensors - -if "cuda" in ARGS || "all" in ARGS - ## Right now adding CUDA during Pkg.test results in a - ## compat issues. I am adding it back to test/Project.toml - Pkg.add("CUDA") - using CUDA: CUDA -end -if "rocm" in ARGS || "all" in ARGS - ## Warning AMDGPU does not work in Julia versions below 1.8 - Pkg.add("AMDGPU") - using AMDGPU: AMDGPU -end -if "metal" in ARGS || "all" in ARGS - ## Warning Metal does not work in Julia versions below 1.8 - Pkg.add("Metal") - using Metal: Metal -end -if "cutensor" in ARGS || "all" in ARGS - Pkg.add("CUDA") - Pkg.add("cuTENSOR") - using CUDA: CUDA - using cuTENSOR: cuTENSOR -end - -using JLArrays: JLArrays, jl - -function devices_list(test_args) - devs = Vector{Function}(undef, 0) - if isempty(test_args) || "base" in test_args - push!(devs, NDTensors.cpu) - ## Skip jl on lower versions of Julia for now - ## all linear algebra is failing on Julia 1.6 with JLArrays - if VERSION > v"1.7" - push!(devs, jl) - end - end - - if "cuda" in test_args || "cutensor" in test_args || "all" in test_args - if CUDA.functional() - push!(devs, NDTensors.CUDAExtensions.cu) - else - println( - "Warning: CUDA.jl is not functional on this architecture and tests will be skipped." - ) - end - end - - if "rocm" in test_args || "all" in test_args - push!(devs, NDTensors.AMDGPUExtensions.roc) - end - - if "metal" in test_args || "all" in test_args - push!(devs, NDTensors.MetalExtensions.mtl) - end - - return devs -end diff --git a/NDTensors/test/NDTensorsTestUtils/is_supported_eltype.jl b/NDTensors/test/NDTensorsTestUtils/is_supported_eltype.jl deleted file mode 100644 index 553c87954c..0000000000 --- a/NDTensors/test/NDTensorsTestUtils/is_supported_eltype.jl +++ /dev/null @@ -1,6 +0,0 @@ -using NDTensors.MetalExtensions: mtl -is_supported_eltype(dev, elt::Type) = true -is_supported_eltype(dev::typeof(mtl), elt::Type{Float64}) = false -function is_supported_eltype(dev::typeof(mtl), elt::Type{<:Complex}) - return is_supported_eltype(dev, real(elt)) -end diff --git a/NDTensors/test/Project.toml b/NDTensors/test/Project.toml deleted file mode 100644 index 79a4eabfd4..0000000000 --- a/NDTensors/test/Project.toml +++ /dev/null @@ -1,34 +0,0 @@ -[deps] -Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" -ArrayLayouts = "4c555306-a7a7-4459-81d9-ec55ddd5c99a" -BlockArrays = "8e7c35d0-a365-5155-bbbb-fb81a777f24e" -Combinatorics = "861a8166-3701-5b0c-9a16-15d98fcdc6aa" -Compat = "34da2185-b29b-5c13-b0c7-acf172513d20" -Dictionaries = "85a47980-9c8c-11e8-2b9f-f7ca1fa99fb4" -EllipsisNotation = "da5c29d0-fa7d-589e-88eb-ea29b0a81949" -FillArrays = "1a297f60-69ca-5386-bcde-b61e274b549b" -GPUArraysCore = "46192b85-c4d5-4398-a991-12ede77f4527" -JLArrays = "27aeb0d3-9eb9-45fb-866b-73c2ecf80fcb" -LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" -MappedArrays = "dbb5928d-eab1-5f90-85c2-b9b0edb7c900" -NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" -Octavian = "6fd5a793-0b7e-452c-907f-f8bfe9c57db4" -Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" -Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" -SafeTestsets = "1bc83da4-3b8d-516f-aca4-4fe02f6d838f" -SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" -StridedViews = "4db3bf67-4bd7-4b4e-b153-31dc3fb37143" -TensorOperations = "6aa20fa7-93e2-5fca-9bc0-fbd0db3c71a2" -Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" -Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" - -[compat] -Metal = "1.1.0" -cuTENSOR = "2.0" - -[extras] -AMDGPU = "21141c5a-9bdb-4563-92ae-f87d6854732e" -CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba" -Metal = "dde4c033-4e86-420c-a63e-0dd931031962" -TBLIS = "48530278-0828-4a49-9772-0f3830dfa1e9" -cuTENSOR = "011b41b2-24ef-40a8-b3eb-fa098493e9e1" diff --git a/NDTensors/test/backup/arraytensor/Project.toml b/NDTensors/test/backup/arraytensor/Project.toml deleted file mode 100644 index 2238172387..0000000000 --- a/NDTensors/test/backup/arraytensor/Project.toml +++ /dev/null @@ -1,3 +0,0 @@ -[deps] -BlockArrays = "8e7c35d0-a365-5155-bbbb-fb81a777f24e" -NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" diff --git a/NDTensors/test/backup/arraytensor/array.jl b/NDTensors/test/backup/arraytensor/array.jl deleted file mode 100644 index 95c44925a3..0000000000 --- a/NDTensors/test/backup/arraytensor/array.jl +++ /dev/null @@ -1,51 +0,0 @@ -@eval module $(gensym()) -using LinearAlgebra: svd -using NDTensors: array, contract, inds, storage, storagetype, tensor -using Random: randn! -using Test: @test, @testset, @test_broken -@testset "Tensor wrapping Array" begin - is1 = (2, 3) - D1 = randn(is1) - - is2 = (3, 4) - D2 = randn(is2) - - T1 = tensor(D1, is1) - T2 = tensor(D2, is2) - - @test T1[1, 1] == D1[1, 1] - - x = rand() - T1[1, 1] = x - - @test T1[1, 1] == x - @test array(T1) == D1 - @test storagetype(T1) <: Matrix{Float64} - @test storage(T1) == D1 - @test eltype(T1) == eltype(D1) - @test inds(T1) == is1 - - R = T1 * T2 - @test storagetype(R) <: Matrix{Float64} - @test Array(R) ≈ Array(T1) * Array(T2) - - T1r = randn!(similar(T1)) - @test Array(T1r + T1) ≈ Array(T1r) + Array(T1) - @test Array(permutedims(T1, (2, 1))) ≈ permutedims(Array(T1), (2, 1)) - - U, S, V = svd(T1) - - # TODO: Should this work? Currently broken. - ## I was able to fix this test but labels have to match up - ## If you do U * S * V it fails because (U * S) is (2,2) and V is (3,2) - @test U * S * V' ≈ T1 - # TODO: Should this require labels, or use existing labels? - @test_broken contract(contract(U, (1, -1), S, (-1, 2)), (1, -1), V, (2, -1)) ≈ T1 - - T12 = contract(T1, (1, -1), T2, (-1, 2)) - @test T12 ≈ T1 * T2 - - D12 = contract(D1, (1, -1), D2, (-1, 2)) - @test D12 ≈ Array(T12) -end -end diff --git a/NDTensors/test/backup/arraytensor/blocksparsearray.jl b/NDTensors/test/backup/arraytensor/blocksparsearray.jl deleted file mode 100644 index 5d508608a5..0000000000 --- a/NDTensors/test/backup/arraytensor/blocksparsearray.jl +++ /dev/null @@ -1,52 +0,0 @@ -using NDTensors -using NDTensors.BlockSparseArrays -using BlockArrays: BlockArrays -using LinearAlgebra -using Test - -using NDTensors: storage, storagetype - -@testset "Tensor wrapping BlockSparseArray" begin - is1 = ([1, 1], [1, 2]) - D1 = BlockSparseArray( - [BlockArrays.Block(1, 1), BlockArrays.Block(2, 2)], [randn(1, 1), randn(1, 2)], is1 - ) - - is2 = ([1, 2], [2, 2]) - D2 = BlockSparseArray( - [BlockArrays.Block(1, 1), BlockArrays.Block(2, 2)], [randn(1, 2), randn(2, 2)], is2 - ) - - T1 = tensor(D1, is1) - T2 = tensor(D2, is2) - - @test T1[1, 1] == D1[1, 1] - - x = rand() - T1[1, 1] = x - - @test T1[1, 1] == x - @test array(T1) == D1 - @test storagetype(T1) <: BlockSparseArray{Float64,2} - @test storage(T1) == D1 - @test eltype(T1) == eltype(D1) - @test inds(T1) == is1 - - @test_broken R = T1 * T2 - @test_broken storagetype(R) <: Matrix{Float64} - @test_broken Array(R) ≈ Array(T1) * Array(T2) - - @test_broken T1r = randn!(similar(T1)) - @test_broken Array(T1r + T1) ≈ Array(T1r) + Array(T1) - @test_broken Array(permutedims(T1, (2, 1))) ≈ permutedims(Array(T1), (2, 1)) - - # TODO: Not implemented yet. - ## U, S, V = svd(T1) - ## @test U * S * V ≈ T1 - - @test_broken T12 = contract(T1, (1, -1), T2, (-1, 2)) - @test_broken T12 ≈ T1 * T2 - - @test_broken D12 = contract(D1, (1, -1), D2, (-1, 2)) - @test_broken D12 ≈ Array(T12) -end diff --git a/NDTensors/test/backup/arraytensor/diagonalarray.jl b/NDTensors/test/backup/arraytensor/diagonalarray.jl deleted file mode 100644 index 1e80d30fd1..0000000000 --- a/NDTensors/test/backup/arraytensor/diagonalarray.jl +++ /dev/null @@ -1,25 +0,0 @@ -@eval module $(gensym()) -using NDTensors: contract, tensor -using NDTensors.SparseArraysBase: densearray -using NDTensors.DiagonalArrays: DiagonalArray -using Test: @test, @testset -@testset "Tensor wrapping DiagonalArray" begin - D = DiagonalArray(randn(3), 3, 4, 5) - Dᵈ = densearray(D) - A = randn(3, 4, 5) - - for convert_to_dense in (true, false) - @test contract(D, (-1, -2, -3), A, (-1, -2, -3); convert_to_dense) ≈ - contract(Dᵈ, (-1, -2, -3), A, (-1, -2, -3)) - @test contract(D, (-1, -2, 1), A, (-1, -2, 2); convert_to_dense) ≈ - contract(Dᵈ, (-1, -2, 1), A, (-1, -2, 2)) - end - - # Tensor tests - Dᵗ = tensor(D, size(D)) - Dᵈᵗ = tensor(Dᵈ, size(D)) - Aᵗ = tensor(A, size(A)) - @test contract(Dᵗ, (-1, -2, -3), Aᵗ, (-1, -2, -3)) ≈ - contract(Dᵈᵗ, (-1, -2, -3), Aᵗ, (-1, -2, -3)) -end -end diff --git a/NDTensors/test/backup/arraytensor/runtests.jl b/NDTensors/test/backup/arraytensor/runtests.jl deleted file mode 100644 index a423524d05..0000000000 --- a/NDTensors/test/backup/arraytensor/runtests.jl +++ /dev/null @@ -1,8 +0,0 @@ -@eval module $(gensym()) -using Test: @testset -@testset "Tensor wrapping AbstractArrays $(f)" for f in [ - "array.jl", "blocksparsearray.jl", "diagonalarray.jl" -] - include(f) -end -end diff --git a/NDTensors/test/broken/readwrite.jl b/NDTensors/test/broken/readwrite.jl deleted file mode 100644 index 50576d3aa5..0000000000 --- a/NDTensors/test/broken/readwrite.jl +++ /dev/null @@ -1,75 +0,0 @@ -## TODO this file was not included in the previous testing -## and appears to be out of date with current code. -using NDTensors, Test -using HDF5 - -@testset "Write to Disk and Read from Disk" begin - @testset "HDF5 readwrite Dense storage" begin - # Real case - - D = randomTensor(3, 4) - - fo = h5open("data.h5", "w") - write(fo, "D", D.store) - close(fo) - - fi = h5open("data.h5", "r") - rDstore = read(fi, "D", Dense{Float64}) - close(fi) - @test rDstore ≈ D.store - - # Complex case - - D = randomTensor(ComplexF64, 3, 4) - - fo = h5open("data.h5", "w") - write(fo, "D", D.store) - close(fo) - - fi = h5open("data.h5", "r") - rDstore = read(fi, "D", Dense{ComplexF64}) - close(fi) - @test rDstore ≈ D.store - end - - @testset "HDF5 readwrite BlockSparse storage" begin - # Indices - indsA = ([2, 3], [4, 5]) - - # Locations of non-zero blocks - locs = [(1, 2), (2, 1)] - - # Real case - - B = randomBlockSparseTensor(locs, indsA) - - fo = h5open("data.h5", "w") - write(fo, "B", B.store) - close(fo) - - fi = h5open("data.h5", "r") - rBstore = read(fi, "B", BlockSparse{Float64}) - close(fi) - @test rBstore ≈ B.store - - # Complex case - - B = randomBlockSparseTensor(ComplexF64, locs, indsA) - - fo = h5open("data.h5", "w") - write(fo, "B", B.store) - close(fo) - - fi = h5open("data.h5", "r") - rBstore = read(fi, "B", BlockSparse{ComplexF64}) - close(fi) - @test rBstore ≈ B.store - end - - # - # Clean up the test hdf5 file - # - rm("data.h5"; force=true) -end - -nothing diff --git a/NDTensors/test/lib/Project.toml b/NDTensors/test/lib/Project.toml deleted file mode 100644 index fe5a5dbc71..0000000000 --- a/NDTensors/test/lib/Project.toml +++ /dev/null @@ -1,11 +0,0 @@ -[deps] -Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" -BlockArrays = "8e7c35d0-a365-5155-bbbb-fb81a777f24e" -Combinatorics = "861a8166-3701-5b0c-9a16-15d98fcdc6aa" -Compat = "34da2185-b29b-5c13-b0c7-acf172513d20" -EllipsisNotation = "da5c29d0-fa7d-589e-88eb-ea29b0a81949" -FillArrays = "1a297f60-69ca-5386-bcde-b61e274b549b" -GPUArraysCore = "46192b85-c4d5-4398-a991-12ede77f4527" -NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" -SafeTestsets = "1bc83da4-3b8d-516f-aca4-4fe02f6d838f" -TensorOperations = "6aa20fa7-93e2-5fca-9bc0-fbd0db3c71a2" diff --git a/NDTensors/test/lib/runtests.jl b/NDTensors/test/lib/runtests.jl deleted file mode 100644 index d15a163960..0000000000 --- a/NDTensors/test/lib/runtests.jl +++ /dev/null @@ -1,32 +0,0 @@ -@eval module $(gensym()) -using NDTensors: NDTensors -using Test: @testset -@testset "Test NDTensors lib $lib" for lib in [ - "AllocateData", - "AMDGPUExtensions", - "BackendSelection", - "BaseExtensions", - "BlockSparseArrays", - "BroadcastMapConversion", - "CUDAExtensions", - "DiagonalArrays", - "GradedAxes", - "GPUArraysCoreExtensions", - "LabelledNumbers", - "MetalExtensions", - "NamedDimsArrays", - "NestedPermutedDimsArrays", - "SmallVectors", - "SortedSets", - "SparseArraysBase", - "SymmetrySectors", - "TagSets", - "TensorAlgebra", - "TypeParameterAccessors", - "UnallocatedArrays", - "UnspecifiedTypes", - "Expose", -] - include(joinpath(pkgdir(NDTensors), "src", "lib", lib, "test", "runtests.jl")) -end -end diff --git a/NDTensors/test/runtests.jl b/NDTensors/test/runtests.jl deleted file mode 100644 index b073a05e2d..0000000000 --- a/NDTensors/test/runtests.jl +++ /dev/null @@ -1,20 +0,0 @@ -using SafeTestsets: @safetestset - -@safetestset "NDTensors" begin - using Test: @testset - using NDTensors: NDTensors - @testset "$(@__DIR__)" begin - filenames = filter(readdir(@__DIR__)) do f - startswith("test_")(f) && endswith(".jl")(f) - end - for dir in ["lib"] - push!(filenames, joinpath(dir, "runtests.jl")) - end - @testset "Test $(@__DIR__)/$filename" for filename in filenames - println("Running $(@__DIR__)/$filename") - include(filename) - end - end -end - -nothing diff --git a/NDTensors/test/test_blocksparse.jl b/NDTensors/test/test_blocksparse.jl deleted file mode 100644 index 30a0e82bf5..0000000000 --- a/NDTensors/test/test_blocksparse.jl +++ /dev/null @@ -1,356 +0,0 @@ -@eval module $(gensym()) -using GPUArraysCore: @allowscalar -using LinearAlgebra: Hermitian, exp, norm, svd -using NDTensors: - NDTensors, - BlockSparseTensor, - array, - blockdims, - blockoffsets, - blockview, - data, - dense, - diag, - diaglength, - dims, - eachnzblock, - inds, - isblocknz, - nnz, - nnzblocks, - randomBlockSparseTensor, - store, - storage -include("NDTensorsTestUtils/NDTensorsTestUtils.jl") -using .NDTensorsTestUtils: default_rtol, devices_list, is_supported_eltype -using Random: randn! -using Test: @test, @test_throws, @testset - -@testset "BlockSparseTensor basic functionality" begin - C = nothing - - @testset "test device: $dev, eltype: $elt" for dev in devices_list(copy(ARGS)), - elt in (Float32, Float64) - - if !is_supported_eltype(dev, elt) - continue - end - # Indices - indsA = ([2, 3], [4, 5]) - - # Locations of non-zero blocks - locs = [(1, 2), (2, 1)] - - A = dev(BlockSparseTensor{elt}(locs, indsA...)) - randn!(A) - - @test blockdims(A, (1, 2)) == (2, 5) - @test blockdims(A, (2, 1)) == (3, 4) - @test !isempty(A) - @test nnzblocks(A) == 2 - @test nnz(A) == 2 * 5 + 3 * 4 - @test inds(A) == ([2, 3], [4, 5]) - @test isblocknz(A, (2, 1)) - @test isblocknz(A, (1, 2)) - @test !isblocknz(A, (1, 1)) - @test !isblocknz(A, (2, 2)) - dA = diag(A) - @test @allowscalar dA ≈ diag(dense(A)) - @test sum(A) ≈ sum(array(A)) - @test prod(A) ≈ prod(array(A)) - - # Test different ways of getting nnz - @test nnz(blockoffsets(A), inds(A)) == nnz(A) - - B = 2 * A - @test B[1, 1] == 2 * A[1, 1] - @test nnz(A) == 2 * 5 + 3 * 4 - @test nnz(B) == 2 * 5 + 3 * 4 - @test nnzblocks(A) == 2 - @test nnzblocks(B) == 2 - - B = A / 2 - @test B[1, 1] == A[1, 1] / 2 - @test nnz(A) == 2 * 5 + 3 * 4 - @test nnz(B) == 2 * 5 + 3 * 4 - @test nnzblocks(A) == 2 - @test nnzblocks(B) == 2 - - @allowscalar begin - A[1, 5] = 15 - A[2, 5] = 25 - - @test A[1, 1] == 0 - @test A[1, 5] == 15 - @test A[2, 5] == 25 - end - D = dense(A) - - @allowscalar begin - @test D == A - - for I in eachindex(A) - @test D[I] == A[I] - end - end - - A12 = blockview(A, (1, 2)) - - @test dims(A12) == (2, 5) - - @allowscalar for I in eachindex(A12) - @test A12[I] == A[I + CartesianIndex(0, 4)] - end - - B = dev(BlockSparseTensor(elt, undef, locs, indsA)) - randn!(B) - - C = A + B - - @allowscalar for I in eachindex(C) - @test C[I] == A[I] + B[I] - end - Cp = NDTensors.map_diag(i -> 2 * i, C) - @allowscalar for i in 1:diaglength(Cp) - @test Cp[i, i] == 2 * C[i, i] - end - - Ap = permutedims(A, (2, 1)) - - @test blockdims(Ap, (1, 2)) == (4, 3) - @test blockdims(Ap, (2, 1)) == (5, 2) - @test nnz(A) == nnz(Ap) - @test nnzblocks(A) == nnzblocks(Ap) - - @allowscalar for I in eachindex(C) - @test A[I] == Ap[NDTensors.permute(I, (2, 1))] - end - - A = dev(BlockSparseTensor(complex(elt), locs, indsA)) - randn!(A) - @test conj(data(store(A))) == data(store(conj(A))) - @test typeof(conj(A)) <: BlockSparseTensor - - @testset "No blocks" begin - T = dev(BlockSparseTensor{elt}(Tuple{Int,Int}[], [2, 2], [2, 2])) - @test nnzblocks(T) == 0 - @test size(T) == (4, 4) - @test length(T) == 16 - @test !isempty(T) - @test isempty(storage(T)) - @test nnz(T) == 0 - @test eltype(T) == elt - @test norm(T) == 0 - end - - @testset "Empty" begin - T = dev(BlockSparseTensor{elt}(Tuple{Int,Int}[], Int[], Int[])) - @test nnzblocks(T) == 0 - @test size(T) == (0, 0) - @test length(T) == 0 - @test isempty(T) - @test isempty(storage(T)) - @test nnz(T) == 0 - @test eltype(T) == elt - @test norm(T) == 0 - end - - @testset "Random constructor" begin - T = dev(randomBlockSparseTensor(elt, [(1, 1), (2, 2)], ([2, 2], [2, 2]))) - @test nnzblocks(T) == 2 - @test nnz(T) == 8 - @test eltype(T) == elt - @test norm(T) ≉ 0 - - Tc = dev(randomBlockSparseTensor(complex(elt), [(1, 1), (2, 2)], ([2, 2], [2, 2]))) - @test nnzblocks(Tc) == 2 - @test nnz(Tc) == 8 - @test eltype(Tc) == complex(elt) - @test norm(Tc) ≉ 0 - end - - @testset "Complex Valued Operations" begin - T = dev(randomBlockSparseTensor(complex(elt), [(1, 1), (2, 2)], ([2, 2], [2, 2]))) - rT = real(T) - @test eltype(rT) == elt - @test nnzblocks(rT) == nnzblocks(T) - iT = imag(T) - @test eltype(iT) == elt - @test nnzblocks(iT) == nnzblocks(T) - @test norm(rT)^2 + norm(iT)^2 ≈ norm(T)^2 - - cT = conj(T) - @test eltype(cT) == complex(elt) - @test nnzblocks(cT) == nnzblocks(T) - end - - @testset "similartype regression test" begin - # Regression test for issue seen in: - # https://github.com/ITensor/ITensorInfiniteMPS.jl/pull/77 - # Previously, `similartype` wasn't using information about the dimensions - # properly and was returning a `BlockSparse` storage of the dimensions - # of the input tensor. - T = dev(BlockSparseTensor(elt, [(1, 1)], ([2], [2]))) - @test NDTensors.ndims( - NDTensors.storagetype(NDTensors.similartype(typeof(T), ([2], [2], [2]))) - ) == 3 - end - - @testset "Random constructor" begin - T = dev(randomBlockSparseTensor(elt, [(1, 1), (2, 2)], ([2, 2], [2, 2]))) - @test nnzblocks(T) == 2 - @test nnz(T) == 8 - @test eltype(T) == elt - @test norm(T) ≉ 0 - - Tc = dev(randomBlockSparseTensor(complex(elt), [(1, 1), (2, 2)], ([2, 2], [2, 2]))) - @test nnzblocks(Tc) == 2 - @test nnz(Tc) == 8 - @test eltype(Tc) == complex(elt) - @test norm(Tc) ≉ 0 - end - - @testset "permute_combine" begin - indsA = ([2, 3], [4, 5], [6, 7, 8]) - locsA = [(2, 1, 1), (1, 2, 1), (2, 2, 3)] - A = dev(BlockSparseTensor{elt}(locsA, indsA...)) - randn!(A) - - B = NDTensors.permute_combine(A, 3, (2, 1)) - @test nnzblocks(A) == nnzblocks(B) - @test nnz(A) == nnz(B) - - Ap = NDTensors.permutedims(A, (3, 2, 1)) - - @allowscalar for (bAp, bB) in zip(eachnzblock(Ap), eachnzblock(B)) - blockAp = blockview(Ap, bAp) - blockB = blockview(B, bB) - @test reshape(blockAp, size(blockB)) == blockB - end - end - end - - @testset "BlockSparseTensor setindex! add block" begin - T = BlockSparseTensor([2, 3], [4, 5]) - - @allowscalar for I in eachindex(T) - @test T[I] == 0.0 - end - @test nnz(T) == 0 - @test nnzblocks(T) == 0 - @test !isblocknz(T, (1, 1)) - @test !isblocknz(T, (2, 1)) - @test !isblocknz(T, (1, 2)) - @test !isblocknz(T, (2, 2)) - - T[1, 1] = 1.0 - - @test T[1, 1] == 1.0 - @test nnz(T) == 8 - @test nnzblocks(T) == 1 - @test isblocknz(T, (1, 1)) - @test !isblocknz(T, (2, 1)) - @test !isblocknz(T, (1, 2)) - @test !isblocknz(T, (2, 2)) - - T[4, 8] = 2.0 - - @test T[4, 8] == 2.0 - @test nnz(T) == 8 + 15 - @test nnzblocks(T) == 2 - @test isblocknz(T, (1, 1)) - @test !isblocknz(T, (2, 1)) - @test !isblocknz(T, (1, 2)) - @test isblocknz(T, (2, 2)) - - T[1, 6] = 3.0 - - @test T[1, 6] == 3.0 - @test nnz(T) == 8 + 15 + 10 - @test nnzblocks(T) == 3 - @test isblocknz(T, (1, 1)) - @test !isblocknz(T, (2, 1)) - @test isblocknz(T, (1, 2)) - @test isblocknz(T, (2, 2)) - - T[4, 2] = 4.0 - - @test T[4, 2] == 4.0 - @test nnz(T) == 8 + 15 + 10 + 12 - @test nnzblocks(T) == 4 - @test isblocknz(T, (1, 1)) - @test isblocknz(T, (2, 1)) - @test isblocknz(T, (1, 2)) - @test isblocknz(T, (2, 2)) - end - - @testset "svd on $dev, eltype: $elt" for dev in devices_list(copy(ARGS)), - elt in (Float32, Float64) - - if !is_supported_eltype(dev, elt) - continue - end - @testset "svd example 1" begin - A = dev(BlockSparseTensor{elt}([(2, 1), (1, 2)], [2, 2], [2, 2])) - randn!(A) - U, S, V = svd(A) - @test @allowscalar array(U) * array(S) * array(V)' ≈ array(A) - atol = default_rtol(elt) - end - - @testset "svd example 2" begin - A = dev(BlockSparseTensor{elt}([(1, 2), (2, 3)], [2, 2], [3, 2, 3])) - randn!(A) - U, S, V = svd(A) - @test @allowscalar array(U) * array(S) * array(V)' ≈ array(A) - atol = default_rtol(elt) - end - - @testset "svd example 3" begin - A = dev(BlockSparseTensor{elt}([(2, 1), (3, 2)], [3, 2, 3], [2, 2])) - randn!(A) - U, S, V = svd(A) - @test @allowscalar array(U) * array(S) * array(V)' ≈ array(A) - atol = default_rtol(elt) - end - - @testset "svd example 4" begin - A = dev(BlockSparseTensor{elt}([(2, 1), (3, 2)], [2, 3, 4], [5, 6])) - randn!(A) - U, S, V = svd(A) - @test @allowscalar array(U) * array(S) * array(V)' ≈ array(A) - atol = default_rtol(elt) - end - - @testset "svd example 5" begin - A = dev(BlockSparseTensor{elt}([(1, 2), (2, 3)], [5, 6], [2, 3, 4])) - randn!(A) - U, S, V = svd(A) - @test @allowscalar array(U) * array(S) * array(V)' ≈ array(A) - atol = default_rtol(elt) - end - end - - @testset "exp, eltype: $elt" for elt in (Float32, Float64) - A = BlockSparseTensor{elt}([(1, 1), (2, 2)], [2, 4], [2, 4]) - randn!(A) - expT = exp(A) - @test array(expT) ≈ exp(array(A)) - atol = default_rtol(elt) - - # Hermitian case - A = BlockSparseTensor(complex(elt), [(1, 1), (2, 2)], ([2, 2], [2, 2])) - randn!(A) - Ah = BlockSparseTensor(complex(elt), undef, [(1, 1), (2, 2)], ([2, 2], [2, 2])) - for bA in eachnzblock(A) - b = blockview(A, bA) - blockview(Ah, bA) .= b + b' - end - expTh = exp(Hermitian(Ah)) - @test array(expTh) ≈ exp(Hermitian(array(Ah))) rtol = default_rtol(eltype(Ah)) - - A = BlockSparseTensor{elt}([(2, 1), (1, 2)], [2, 2], [2, 2]) - @test_throws ErrorException exp(A) - end -end -end diff --git a/NDTensors/test/test_combiner.jl b/NDTensors/test/test_combiner.jl deleted file mode 100644 index b9e6d3e77c..0000000000 --- a/NDTensors/test/test_combiner.jl +++ /dev/null @@ -1,122 +0,0 @@ -@eval module $(gensym()) -using GPUArraysCore: @allowscalar -using NDTensors: - NDTensors, - Block, - BlockOffsets, - BlockSparse, - BlockSparseTensor, - Combiner, - Dense, - DenseTensor, - contract, - dim, - dims, - tensor -include("NDTensorsTestUtils/NDTensorsTestUtils.jl") -using .NDTensorsTestUtils: devices_list, is_supported_eltype -using Test: @testset, @test, @test_throws - -# Testing generic block indices -struct Index{Space} - space::Space -end -NDTensors.dim(i::Index) = sum(b -> last(b), i.space) -NDTensors.nblocks(i::Index) = length(i.space) -NDTensors.blockdim(i::Index, block::Integer) = last(i.space[block]) -function NDTensors.outer(i1::Index, i2::Index) - return Index(vec( - map(Iterators.product(i1.space, i2.space)) do (b1, b2) - return first(b1) + first(b2) => last(b1) * last(b2) - end, - )) -end -NDTensors.permuteblocks(i::Index, perm::Vector{Int}) = Index(i.space[perm]) - -struct QN end -Base.:+(q1::QN, q2::QN) = QN() - -@testset "CombinerTensor basic functionality" begin - @testset "test device: $dev, eltype: $elt" for dev in devices_list(copy(ARGS)), - elt in (Float64, Float32) - - if !is_supported_eltype(dev, elt) - continue - end - @testset "Dense * Combiner" begin - d = 2 - input_tensor_inds = (d, d, d) - combiner_tensor_inds = (d^2, d, d) - output_tensor_inds = (d, d^2) - - input_tensor = dev(tensor(Dense(randn(elt, input_tensor_inds)), input_tensor_inds)) - combiner_tensor = dev(tensor(Combiner([1], [1]), combiner_tensor_inds)) - - output_tensor = contract(input_tensor, (1, -1, -2), combiner_tensor, (2, -1, -2)) - @test output_tensor isa DenseTensor - @test dims(output_tensor) == output_tensor_inds - @allowscalar for i in 1:length(input_tensor) - @test input_tensor[i] == output_tensor[i] - end - - # Test uncombining - new_input_tensor = contract(output_tensor, (1, -1), combiner_tensor, (-1, 2, 3)) - @test NDTensors.cpu(new_input_tensor) == NDTensors.cpu(input_tensor) - - # Catch invalid combining - input_tensor_inds = (d,) - input_tensor = dev(tensor(Dense(randn(elt, input_tensor_inds)), input_tensor_inds)) - combiner_tensor = dev(tensor(Combiner([1], [1]), combiner_tensor_inds)) - @test_throws Any contract(input_tensor, (-1,), combiner_tensor, (1, -1, -2)) - end - - ind_constructors = (dim -> [dim], dim -> Index([QN() => dim])) - #TODO cu doesn't work with blocksparse yet - @testset "BlockSparse * Combiner" for ind_constructor in ind_constructors - d = 2 - i, j, k = map(ind_constructor, (d, d, d)) - c = ind_constructor(d^2) - - input_tensor_inds = (i, j, k) - combiner_tensor_inds = (c, j, k) - output_tensor_inds = (c, i) - - input_tensor = dev( - tensor( - BlockSparse( - randn(elt, dim(input_tensor_inds)), BlockOffsets{3}([Block(1, 1, 1)], [0]) - ), - input_tensor_inds, - ), - ) - combiner_tensor = tensor(Combiner([1], [1]), combiner_tensor_inds) - - output_tensor = contract(input_tensor, (1, -1, -2), combiner_tensor, (2, -1, -2)) - @test output_tensor isa BlockSparseTensor - @test dims(output_tensor) == dims(output_tensor_inds) - output_tensor = permutedims(output_tensor, (2, 1)) - @allowscalar for i in 1:length(input_tensor) - @test input_tensor[i] == output_tensor[i] - end - - # Test uncombining. Broken for inds that are not `Index`. - new_input_tensor = contract(output_tensor, (1, -1), combiner_tensor, (-1, 2, 3)) - new_input_tensor = permutedims(new_input_tensor, (3, 1, 2)) - @test NDTensors.cpu(new_input_tensor) == NDTensors.cpu(input_tensor) - - # Catch invalid combining - invalid_input_tensor_inds = (k,) - invalid_input_tensor = dev( - tensor( - BlockSparse( - randn(elt, dim(invalid_input_tensor_inds)), BlockOffsets{1}([Block(1)], [0]) - ), - invalid_input_tensor_inds, - ), - ) - combiner_tensor = tensor(Combiner([1], [1]), combiner_tensor_inds) - @test_throws Any contract(invalid_input_tensor, (-1,), combiner_tensor, (1, 2, -1)) - end - end -end -end diff --git a/NDTensors/test/test_dense.jl b/NDTensors/test/test_dense.jl deleted file mode 100644 index c2b327811a..0000000000 --- a/NDTensors/test/test_dense.jl +++ /dev/null @@ -1,327 +0,0 @@ -@eval module $(gensym()) -using NDTensors -using NDTensors.MetalExtensions: mtl -using Test: @testset, @test, @test_throws, @test_broken -using GPUArraysCore: @allowscalar -include("NDTensorsTestUtils/NDTensorsTestUtils.jl") -using .NDTensorsTestUtils: devices_list - -struct MyInd - dim::Int -end -NDTensors.dim(i::MyInd) = i.dim - -@testset "Dense Tensors" begin - @testset "test device: $dev" for dev in devices_list(copy(ARGS)) - elt = dev == mtl ? Float32 : Float64 - # Testing with GPU and CPU backends - @testset "DenseTensor basic functionality" begin - A = dev(Tensor(elt, (3, 4))) - @allowscalar for I in eachindex(A) - @test A[I] == 0 - end - - @test @allowscalar A[2, 1] isa elt - @test dims(A[1:2, 1]) == (2,) - @test dims(A[1:2, 2]) == (2,) - @test dims(A[2:3, 2]) == (2,) - @test dims(A[2, 2:4]) == (3,) - @test dims(A[2:3, 2:4]) == (2, 3) - @test dims(A[2:3, 2:end]) == (2, 3) - @test dims(A[3, 2:end]) == (3,) - - randn!(A) - - @test ndims(A) == 2 - @test dims(A) == (3, 4) - @test inds(A) == (3, 4) - - Aview = A[2:3, 2:3] - @test dims(Aview) == (2, 2) - ## Added for issue 1431 create a tensor from - ## a sliced view of another tensor - Acopy = Tensor(NDTensors.storage(Aview), (1, 4)) - @test NDTensors.cpu(data(Acopy)) == NDTensors.cpu(data(Aview)) - @test dims(Acopy) == (1, 4) - - B = dev(Tensor(elt, undef, (3, 4))) - randn!(B) - C = copy(A) - C = permutedims!!(C, B, (1, 2), +) - Cp = NDTensors.map_diag(i -> 2 * i, C) - @allowscalar for i in 1:diaglength(Cp) - @test Cp[i, i] == 2 * C[i, i] - end - - Ap = permutedims(A, (2, 1)) - @allowscalar begin - for I in eachindex(A) - @test A[I] != 0 - end - - for I in eachindex(A) - @test A[I] != 0 - end - - ## TODO Currently this fails with scalar indexing on CUDA - ## Because A + B calls - ## +(A::DenseTensor{Float64, 2, Tuple{Int64, Int64}, Dense{Float64, CuArray{Float64, 1, CUDA.Mem.DeviceBuffer}}}, B::DenseTensor{Float64, 2, Tuple{Int64, Int64}, Dense{Float64, CuArray{Float64, 1, CUDA.Mem.DeviceBuffer}}}) - ## @ Base ./arraymath.jl:8 - #C = A + B - - for I in eachindex(C) - @test C[I] == A[I] + B[I] - end - - for I in eachindex(A) - @test A[I] == Ap[NDTensors.permute(I, (2, 1))] - end - - A[1, 1] = 11 - @test A[1, 1] == 11 - - @test A[2, 2] == Aview[1, 1] - end - - ## Testing A .= α .* B .+ β .* A - C = copy(A) - @allowscalar fill!(B, zero(elt)) - β = elt(2) - α = elt(1) - permutedims!!(A, B, (1, 2), (a, b) -> +(*(β, a), *(α, b))) - @allowscalar 2 .* C == A - randn!(B) - C = copy(A) - A = permutedims!!(A, B, (1, 2), (a, b) -> +(*(β, a), *(α, b))) - @allowscalar for i in 1:3, j in 1:4 - @test A[i, j] == α * B[i, j] + β * C[i, j] - end - - ## add elt around 2.0 to preserve the eltype of A. - @test data(A * elt(2.0)) == data(elt(2.0) * A) - - Asim = similar(data(A), 10) - @test eltype(Asim) == elt - @test length(Asim) == 10 - - t = dev(Tensor(complex(elt), (100, 100))) - randn!(t) - @test conj(data(store(t))) == data(store(conj(t))) - @test typeof(conj(t)) <: DenseTensor - - @test Dense(complex(elt)) == Dense{complex(elt)}() - @test Dense(complex(elt)) == complex(Dense(elt)) - - D = dev(Tensor(complex(elt), (100, 100))) - @test eltype(D) == complex(elt) - @test ndims(D) == 2 - @test dim(D) == 100^2 - - E = dev(Tensor(complex(elt), undef, (100, 100))) - @test eltype(E) == complex(elt) - @test ndims(E) == 2 - @test dim(E) == 100^2 - - F = dev(Tensor(elt, (100, 100))) - @test eltype(F) == elt - @test ndims(F) == 2 - @test dim(F) == 100^2 - - G = dev(Tensor(elt, undef, (100, 100))) - @test eltype(G) == elt - @test ndims(G) == 2 - @test dim(G) == 100^2 - - H = dev(Tensor(complex(elt), undef, (100, 100))) - @test eltype(H) == complex(elt) - @test ndims(H) == 2 - @test dim(H) == 100^2 - - I_arr = dev(rand(elt, 10, 10, 10)) - I = dev(Tensor(I_arr, (10, 10, 10))) - @test eltype(I) == elt - @test dim(I) == 1000 - @test Array(I) == I_arr - - J = dev(Tensor(elt, (2, 2))) - K = dev(Tensor(elt, (2, 2))) - @test Array(J * K) ≈ Array(J) * Array(K) - end - - @testset "Random constructor" begin - T = dev(randomTensor(elt, (2, 2))) - @test dims(T) == (2, 2) - @test eltype(T) == elt - @test @allowscalar T[1, 1] ≉ 0 - @test norm(T) ≉ 0 - - Tc = dev(randomTensor(complex(elt), (2, 2))) - @test dims(Tc) == (2, 2) - @test eltype(Tc) == complex(elt) - @test @allowscalar Tc[1, 1] ≉ 0 - @test norm(Tc) ≉ 0 - end - - @testset "Complex Valued Tensors" begin - d1, d2, d3 = 2, 3, 4 - T = dev(randomTensor(complex(elt), (d1, d2, d3))) - - rT = real(T) - iT = imag(T) - cT = conj(T) - - @allowscalar for n1 in 1:d1, n2 in 1:d2, n3 in 1:d3 - @test rT[n1, n2, n3] ≈ real(T[n1, n2, n3]) - @test iT[n1, n2, n3] ≈ imag(T[n1, n2, n3]) - @test cT[n1, n2, n3] ≈ conj(T[n1, n2, n3]) - end - end - - @testset "Custom inds types" begin - T = dev(Tensor(elt, (MyInd(2), MyInd(3), MyInd(4)))) - @test store(T) isa Dense - @test eltype(T) == elt - @test norm(T) == 0 - @test dims(T) == (2, 3, 4) - @test ndims(T) == 3 - @test inds(T) == (MyInd(2), MyInd(3), MyInd(4)) - @allowscalar begin - T[2, 1, 2] = 1.21 - @test T[2, 1, 2] == elt(1.21) - end - @test norm(T) == elt(1.21) - - T = dev(randomTensor(complex(elt), (MyInd(4), MyInd(3)))) - @test store(T) isa Dense - @test eltype(T) == complex(elt) - @test norm(T) > 0 - @test dims(T) == (4, 3) - @test ndims(T) == 2 - @test inds(T) == (MyInd(4), MyInd(3)) - - T2 = 2 * T - @test eltype(T2) == complex(elt) - @test store(T2) isa Dense - @test norm(T2) > 0 - @test norm(T2) / norm(T) ≈ 2 - @test dims(T2) == (4, 3) - @test ndims(T2) == 2 - @test inds(T2) == (MyInd(4), MyInd(3)) - end - - @testset "generic contraction" begin - # correctness of _gemm! - for alpha in [0.0, 1.0, 2.0] - for beta in [0.0, 1.0, 2.0] - for tA in ['N', 'T'] - for tB in ['N', 'T'] - A = randn(4, 4) - B = randn(4, 4) - C = randn(4, 4) - A = BigFloat.(A) - B = BigFloat.(B) - C2 = BigFloat.(C) - NDTensors._gemm!(tA, tB, alpha, A, B, beta, C) - NDTensors._gemm!(tA, tB, alpha, A, B, beta, C2) - @test C ≈ C2 - end - end - end - end - end - - @testset "Contraction with size 1 block and NaN" begin - @testset "No permutation" begin - R = dev(Tensor(complex(elt), (2, 2, 1))) - fill!(R, NaN) - @test @allowscalar any(isnan, R) - T1 = dev(randomTensor(elt, (2, 2, 1))) - T2 = dev(randomTensor(complex(elt), (1, 1))) - NDTensors.contract!(R, (1, 2, 3), T1, (1, 2, -1), T2, (-1, 1)) - @test @allowscalar !any(isnan, R) - @test convert(Array, R) ≈ convert(Array, T1) * T2[] - end - - @testset "Permutation" begin - R = dev(Tensor(complex(elt), (2, 2, 1))) - fill!(R, NaN) - @test @allowscalar any(isnan, R) - T1 = dev(randomTensor(elt, (2, 2, 1))) - T2 = dev(randomTensor(complex(elt), (1, 1))) - NDTensors.contract!(R, (2, 1, 3), T1, (1, 2, -1), T2, (-1, 1)) - @test @allowscalar !any(isnan, R) - @test convert(Array, R) ≈ permutedims(convert(Array, T1), (2, 1, 3)) * T2[] - end - end - end - - # Only CPU backend testing - @testset "Contract with exotic types" begin - # BigFloat is not supported on GPU - ## randn(BigFloat, ...) is not defined in Julia 1.6 - a = BigFloat.(randn(Float64, 2, 3)) - t = Tensor(a, (1, 2, 3)) - m = Tensor(a, (2, 3)) - v = Tensor([one(BigFloat)], (1,)) - - @test m ≈ contract(t, (-1, 2, 3), v, (-1,)) - tp = similar(t) - NDTensors.contract!(tp, (1, 2, 3), t, (1, 2, 3), v, (1,), false, false) - @test iszero(tp) - - fill!(tp, one(BigFloat)) - NDTensors.contract!(tp, (1, 2, 3), t, (1, 2, 3), v, (1,), false, true) - for i in tp - @test i == one(BigFloat) - end - - rand_factor = BigFloat(randn(Float64)) - NDTensors.contract!(tp, (1, 2, 3), t, (1, 2, 3), v, (1,), false, rand_factor) - for i in tp - @test i == rand_factor - end - end - - @testset "change backends" begin - a, b, c = [randn(5, 5) for i in 1:3] - backend_auto() - @test NDTensors.gemm_backend[] == :Auto - @test NDTensors.auto_select_backend(typeof.((a, b, c))...) == - NDTensors.GemmBackend(:BLAS) - res1 = NDTensors._gemm!('N', 'N', 2.0, a, b, 0.2, copy(c)) - backend_blas() - @test NDTensors.gemm_backend[] == :BLAS - res2 = NDTensors._gemm!('N', 'N', 2.0, a, b, 0.2, copy(c)) - backend_generic() - @test NDTensors.gemm_backend[] == :Generic - res3 = NDTensors._gemm!('N', 'N', 2.0, a, b, 0.2, copy(c)) - @test res1 == res2 - @test res1 ≈ res3 - backend_auto() - end - - @testset "change backends" begin - a, b, c = [randn(5, 5) for i in 1:3] - backend_auto() - @test NDTensors.gemm_backend[] == :Auto - @test NDTensors.auto_select_backend(typeof.((a, b, c))...) == - NDTensors.GemmBackend(:BLAS) - res1 = NDTensors._gemm!('N', 'N', 2.0, a, b, 0.2, copy(c)) - @test_throws UndefVarError backend_octavian() - if VERSION >= v"1.5" - # Octavian only support Julia 1.5 - # Need to install it here instead of - # putting it as a dependency in the Project.toml - # since otherwise it fails for older Julia versions. - using Octavian - NDTensors.backend_octavian() - @test NDTensors.gemm_backend[] == :Octavian - res4 = NDTensors._gemm!('N', 'N', 2.0, a, b, 0.2, copy(c)) - @test res1 ≈ res4 - backend_auto() - end - end -end - -nothing -end diff --git a/NDTensors/test/test_diag.jl b/NDTensors/test/test_diag.jl deleted file mode 100644 index cb8adc367a..0000000000 --- a/NDTensors/test/test_diag.jl +++ /dev/null @@ -1,118 +0,0 @@ -@eval module $(gensym()) -using Adapt: adapt -using GPUArraysCore: @allowscalar -using LinearAlgebra: diagm, dot, norm -using NDTensors: - NDTensors, - Dense, - Diag, - DiagTensor, - Tensor, - array, - contract, - data, - dense, - diaglength, - matrix, - randomTensor, - tensor -using Test: @testset, @test, @test_throws -include("NDTensorsTestUtils/NDTensorsTestUtils.jl") -using .NDTensorsTestUtils: devices_list, is_supported_eltype - -@testset "DiagTensor basic functionality" begin - @testset "test device: $dev" for dev in devices_list(copy(ARGS)), - elt in (Float32, ComplexF32, Float64, ComplexF64) - - if !is_supported_eltype(dev, elt) - # Metal doesn't support double precision - continue - end - t = dev(tensor(Diag(rand(elt, 100)), (100, 100))) - @test conj(data(t)) == data(conj(t)) - @test typeof(conj(t)) <: DiagTensor - - d = rand(real(elt), 10) - D = dev(Diag{elt}(d)) - @test eltype(D) == elt - @test @allowscalar dev(Array(dense(D))) == convert.(elt, d) - simD = similar(D) - @test length(simD) == length(D) - @test eltype(simD) == eltype(D) - D = dev(Diag(one(elt))) - @test eltype(D) == elt - @test complex(D) == Diag(one(complex(elt))) - @test similar(D) == Diag(0.0) - - D = Tensor(Diag(1), (2, 2)) - @test norm(D) == √2 - d = 3 - ## TODO this fails because uniform diag tensors are immutable - #S = NDTensors.map_diag((i->i * 2), dev(D)) - # @allowscalar for i in 1:diaglength(S) - # @test S[i,i] == 2.0 * D[i,i] - # end - - vr = rand(elt, d) - D = dev(tensor(Diag(vr), (d, d))) - Da = array(D) - Dm = matrix(D) - Dp = permutedims(D, (2, 1)) - for x in (Da, Dm, Dp) - @test x == dev(diagm(0 => vr)) - @test x == dev(diagm(0 => vr)) - @test x == D - end - @test sqrt(contract(D, (-1, -2), conj(D), (-1, -2))[]) ≈ norm(D) - # This if statement corresponds to the reported bug: - # https://github.com/JuliaGPU/Metal.jl/issues/364 - if !(dev == NDTensors.mtl && elt === ComplexF32) - S = permutedims(dev(D), (1, 2), sqrt) - @allowscalar begin - for i in 1:diaglength(S) - @test S[i, i] ≈ sqrt(D[i, i]) - end - end - end - S = NDTensors.map_diag(i -> 2 * i, dev(D)) - @allowscalar for i in 1:diaglength(S) - @test S[i, i] == 2 * D[i, i] - end - - # Regression test for https://github.com/ITensor/ITensors.jl/issues/1199 - S = dev(tensor(Diag(randn(elt, 2)), (2, 2))) - ## This was creating a `Dense{ReshapedArray{Adjoint{Matrix}}}` which, in mul!, was - ## becoming a Transpose{ReshapedArray{Adjoint{Matrix}}} which was causing issues on - ## dispatching GPU mul! - V = dev(tensor(Dense(randn(elt, 12, 2)'), (3, 4, 2))) - S1 = contract(S, (2, -1), V, (3, 4, -1)) - S2 = contract(dense(S), (2, -1), copy(V), (3, 4, -1)) - @test @allowscalar S1 ≈ S2 - end -end -@testset "DiagTensor contractions" for dev in devices_list(copy(ARGS)) - ## TODO add more GPU tests - elt = (dev == NDTensors.mtl ? Float32 : Float64) - t = dev(tensor(Diag(elt[1.0, 1.0, 1.0]), (3, 3))) - A = dev(randomTensor(Dense{elt}, (3, 3))) - - @test sum(t) ≈ sum(array(t)) - @test sum(A) ≈ sum(array(A)) - @test prod(t) ≈ prod(array(t)) - @test prod(A) ≈ prod(array(A)) - - @test contract(t, (1, -2), t, (-2, 3)) == t - @test contract(A, (1, -2), t, (-2, 3)) == A - @test contract(A, (-2, 1), t, (-2, 3)) == transpose(A) - - ## Testing sparse contractions on GPU - t = dev(tensor(Diag(one(elt)), (3, 3))) - @test contract(t, (-1, -2), A, (-1, -2))[] ≈ dot(dev(array(t)), array(A)) rtol = sqrt( - eps(elt) - ) - - ## Test dot on GPU - @test dot(t, A) ≈ dot(dev(array(t)), array(A)) rtol = sqrt(eps(elt)) -end -nothing -end diff --git a/NDTensors/test/test_diagblocksparse.jl b/NDTensors/test/test_diagblocksparse.jl deleted file mode 100644 index 19fd7449ef..0000000000 --- a/NDTensors/test/test_diagblocksparse.jl +++ /dev/null @@ -1,88 +0,0 @@ -@eval module $(gensym()) -using Dictionaries: Dictionary -using GPUArraysCore: @allowscalar -using NDTensors: - NDTensors, - Block, - BlockSparseTensor, - Diag, - DiagBlockSparse, - Tensor, - blockoffsets, - contract, - dense, - inds, - nzblocks -using Random: randn! -using Test: @test, @test_broken, @test_throws, @testset -@testset "UniformDiagBlockSparseTensor basic functionality" begin - NeverAlias = NDTensors.NeverAlias - AllowAlias = NDTensors.AllowAlias - - storage = DiagBlockSparse(1.0, Dictionary([Block(1, 1), Block(2, 2)], [0, 1])) - tensor = Tensor(storage, ([1, 1], [1, 1])) - - @test conj(tensor) == tensor - @test conj(NeverAlias(), tensor) == tensor - @test conj(AllowAlias(), tensor) == tensor - - c = 1 + 2im - tensor *= c - - @test tensor[1, 1] == c - @test conj(tensor) ≠ tensor - @test conj(NeverAlias(), tensor) ≠ tensor - @test conj(AllowAlias(), tensor) ≠ tensor - @test conj(tensor)[1, 1] == conj(c) - @test conj(NeverAlias(), tensor)[1, 1] == conj(c) - @test conj(AllowAlias(), tensor)[1, 1] == conj(c) -end -@testset "DiagBlockSparse off-diagonal (eltype=$elt)" for elt in ( - Float32, Float64, Complex{Float32}, Complex{Float64} -) - inds1 = ([1, 1], [1, 1]) - inds2 = ([1, 1], [1, 1]) - blocks = [(1, 2), (2, 1)] - a1 = BlockSparseTensor{elt}(blocks, inds1...) - for b in nzblocks(a1) - randn!(a1[b]) - end - a2 = Tensor(DiagBlockSparse(one(elt), blockoffsets(a1)), inds2) - for (labels1, labels2) in (((1, -1), (-1, 2)), ((-1, -2), (-1, -2))) - @test_throws ErrorException contract(a1, labels1, a2, labels2) - end -end - -include("NDTensorsTestUtils/NDTensorsTestUtils.jl") -using .NDTensorsTestUtils: devices_list -@testset "DiagBlockSparse contract" for dev in devices_list(copy(ARGS)) - elt = dev == NDTensors.mtl ? Float32 : Float64 - A = dev(BlockSparseTensor{elt}([(1, 1), (2, 2)], [2, 2], [2, 2])) - randn!(A) - t = Tensor(DiagBlockSparse(one(elt), blockoffsets(A)), inds(A)) - tdense = Tensor(Diag(one(elt)), inds(A)) - - a = dense(contract(A, (1, -2), t, (3, -2))) - b = contract(dense(A), (1, -2), tdense, (3, -2)) - @test @allowscalar a ≈ b - - a = dense(contract(A, (-2, 1), t, (-2, 3))) - b = contract(dense(A), (-2, 1), tdense, (-2, 3)) - @test @allowscalar a ≈ b - - a = contract(A, (-1, -2), t, (-1, -2))[] - b = contract(dense(A), (-1, -2), tdense, (-1, -2))[] - @test @allowscalar a ≈ b - - ## TODO fix these kinds of contractions - A = BlockSparseTensor{elt}([(1, 1), (2, 2)], [3, 2, 3], [2, 2]) - randn!(A) - t = Tensor(DiagBlockSparse(one(elt), blockoffsets(A)), inds(A)) - @test_broken dense(contract(A, (1, -2), (t), (3, -2))) ≈ - contract(dense(A), (1, -2), dense(t), (3, -2)) - @test_broken dense(contract(A, (-2, 1), t, (-2, 3))) ≈ - contract(dense(A), (-2, 1), dense(t), (-2, 3)) - @test_broken contract(dev(A), (-1, -2), dev(t), (-1, -2))[] ≈ - contract(dense(A), (-1, -2), dense(t), (-1, -2))[] -end -end diff --git a/NDTensors/test/test_emptynumber.jl b/NDTensors/test/test_emptynumber.jl deleted file mode 100644 index 73d82117f5..0000000000 --- a/NDTensors/test/test_emptynumber.jl +++ /dev/null @@ -1,39 +0,0 @@ -@eval module $(gensym()) -using LinearAlgebra: norm -using NDTensors: EmptyNumber -using Test: @testset, @test, @test_throws - -const 𝟎 = EmptyNumber() - -@testset "NDTensors.EmptyNumber" begin - x = 2.3 - - @test complex(𝟎) == 𝟎 - @test complex(EmptyNumber) == Complex{EmptyNumber} - - # Promotion - for T in (Bool, Float32, Float64, Complex{Float32}, Complex{Float64}) - @test promote_type(EmptyNumber, T) === T - @test promote_type(T, EmptyNumber) === T - end - - # Basic arithmetic - @test 𝟎 + 𝟎 == 𝟎 - @test 𝟎 + x == x - @test x + 𝟎 == x - @test -𝟎 == 𝟎 - @test 𝟎 - 𝟎 == 𝟎 - @test x - 𝟎 == x - @test 𝟎 * 𝟎 == 𝟎 - @test x * 𝟎 == 𝟎 - @test 𝟎 * x == 𝟎 - @test 𝟎 / x == 𝟎 - @test_throws DivideError() x / 𝟎 == 𝟎 - @test_throws DivideError() 𝟎 / 𝟎 == 𝟎 - - @test float(𝟎) == 0.0 - @test float(𝟎) isa Float64 - @test norm(𝟎) == 0.0 - @test norm(𝟎) isa Float64 -end -end diff --git a/NDTensors/test/test_emptystorage.jl b/NDTensors/test/test_emptystorage.jl deleted file mode 100644 index 1f82ae2a57..0000000000 --- a/NDTensors/test/test_emptystorage.jl +++ /dev/null @@ -1,35 +0,0 @@ -@eval module $(gensym()) -using NDTensors -using Test: @testset, @test -include("NDTensorsTestUtils/NDTensorsTestUtils.jl") -using .NDTensorsTestUtils: devices_list - -@testset "EmptyStorage test" begin - @testset "test device: $dev" for dev in devices_list(copy(ARGS)) - T = dev(Tensor(EmptyStorage(NDTensors.EmptyNumber), (2, 2))) - @test size(T) == (2, 2) - @test eltype(T) == NDTensors.EmptyNumber - @test T[1, 1] == NDTensors.EmptyNumber() - @test T[1, 2] == NDTensors.EmptyNumber() - # TODO: This should fail with an out of bounds error! - #@test T[1, 3] == NDTensors.EmptyNumber() - - Tc = complex(T) - @test size(Tc) == (2, 2) - @test eltype(Tc) == Complex{NDTensors.EmptyNumber} - @test Tc[1, 1] == Complex(NDTensors.EmptyNumber(), NDTensors.EmptyNumber()) - @test Tc[1, 2] == Complex(NDTensors.EmptyNumber(), NDTensors.EmptyNumber()) - - T = dev(EmptyTensor(Float64, (2, 2))) - @test blockoffsets(T) == BlockOffsets{2}() - T = dev(EmptyBlockSparseTensor(Float64, ([1, 1], [1, 1]))) - @test blockoffsets(T) == BlockOffsets{2}() - - T = dev(EmptyStorage(NDTensors.EmptyNumber)) - @test zero(T) isa typeof(T) - - T = dev(EmptyTensor(NDTensors.EmptyNumber, (2, 2))) - @test zero(T) isa typeof(T) - end -end -end diff --git a/NDTensors/test/test_linearalgebra.jl b/NDTensors/test/test_linearalgebra.jl deleted file mode 100644 index 96bde8efdf..0000000000 --- a/NDTensors/test/test_linearalgebra.jl +++ /dev/null @@ -1,97 +0,0 @@ -@eval module $(gensym()) -using NDTensors -using NDTensors: cpu -using LinearAlgebra: Diagonal, qr, diag -using Test: @testset, @test -using GPUArraysCore: @allowscalar -include("NDTensorsTestUtils/NDTensorsTestUtils.jl") -using .NDTensorsTestUtils: devices_list, is_supported_eltype - -@testset "random_orthog" begin - n, m = 10, 4 - O1 = random_orthog(n, m) - @test eltype(O1) == Float64 - @test norm(transpose(O1) * O1 - Diagonal(fill(1.0, m))) < 1E-14 - O2 = random_orthog(m, n) - @test norm(O2 * transpose(O2) - Diagonal(fill(1.0, m))) < 1E-14 -end - -@testset "random_unitary" begin - n, m = 10, 4 - U1 = random_unitary(n, m) - @test eltype(U1) == ComplexF64 - @test norm(U1' * U1 - Diagonal(fill(1.0, m))) < 1E-14 - U2 = random_unitary(m, n) - @test norm(U2 * U2' - Diagonal(fill(1.0, m))) < 1E-14 -end - -@testset "QX testing" begin - @testset "Dense $qx decomposition, elt=$elt, positve=$positive, singular=$singular, device=$dev" for qx in - [ - qr, ql - ], - elt in (Float64, ComplexF64, Float32, ComplexF32), - positive in [false, true], - singular in [false, true], - dev in devices_list(copy(ARGS)) - - ## Skip Float64 on Metal - if !is_supported_eltype(dev, elt) - continue - end - ## Looks like AMDGPU has an issue with QR when A is singular - ## TODO potentially make an is_broken function? - if dev == NDTensors.AMDGPUExtensions.roc && singular - continue - end - eps = Base.eps(real(elt)) * 100 #this is set rather tight, so if you increase/change m,n you may have open up the tolerance on eps. - n, m = 4, 8 - Id = Diagonal(fill(1.0, min(n, m))) - # - # Wide matrix (more columns than rows) - # - A = dev(randomTensor(elt, (n, m))) - # We want to test 0.0 on the diagonal. We need to make all rows equal to gaurantee this with numerical roundoff. - @allowscalar if singular - for i in 2:n - A[i, :] = A[1, :] - end - end - Q, X = qx(A; positive=positive) #X is R or L. - Ap = Q * X - @test cpu(A) ≈ cpu(Ap) atol = eps - @test cpu(array(Q)' * array(Q)) ≈ Id atol = eps - @test cpu(array(Q) * array(Q)') ≈ Id atol = eps - @allowscalar if positive - nr, nc = size(X) - dr = qx == ql ? Base.max(0, nc - nr) : 0 - diagX = diag(X[:, (1 + dr):end]) #location of diag(L) is shifted dr columns over the right. - @test all(real(diagX) .>= 0.0) - @test all(imag(diagX) .== 0.0) - end - # - # Tall matrix (more rows than cols) - # - A = dev(randomTensor(elt, (m, n))) #Tall array - # We want to test 0.0 on the diagonal. We need make all rows equal to gaurantee this with numerical roundoff. - @allowscalar if singular - for i in 2:m - A[i, :] = A[1, :] - end - end - Q, X = qx(A; positive=positive) - Ap = Q * X - @test cpu(A) ≈ cpu(Ap) atol = eps - @test cpu(array(Q)' * array(Q)) ≈ Id atol = eps - @allowscalar if positive - nr, nc = size(X) - dr = qx == ql ? Base.max(0, nc - nr) : 0 - diagX = diag(X[:, (1 + dr):end]) #location of diag(L) is shifted dr columns over the right. - @test all(real(diagX) .>= 0.0) - @test all(imag(diagX) .== 0.0) - end - end -end - -nothing -end diff --git a/NDTensors/test/test_tupletools.jl b/NDTensors/test/test_tupletools.jl deleted file mode 100644 index 959a127d5f..0000000000 --- a/NDTensors/test/test_tupletools.jl +++ /dev/null @@ -1,34 +0,0 @@ -@eval module $(gensym()) -using Test: @testset, @test -using NDTensors: NDTensors - -@testset "Test non-exported tuple tools" begin - @test NDTensors.diff((1, 3, 6, 4)) == (2, 3, -2) - @test NDTensors.diff((1, 2, 3)) == (1, 1) -end - -@testset "Test deleteat" begin - t = (1, 2, 3, 4) - t = NDTensors.deleteat(t, 2) - @test t == (1, 3, 4) - - # deleteat with mixed-type Tuple - t = ('a', 2, 'c', 4) - t = NDTensors.deleteat(t, 2) - @test t == ('a', 'c', 4) - t = NDTensors.deleteat(t, 2) - @test t == ('a', 4) -end - -@testset "Test insertat" begin - t = (1, 2) - t = NDTensors.insertat(t, (3, 4), 2) - @test t == (1, 3, 4) - - # insertat with mixed-type Tuple - t = (1, 'b') - t = NDTensors.insertat(t, ('c'), 2) - @test t == (1, 'c') -end - -end diff --git a/Project.toml b/Project.toml index ff794c3122..e5befc8145 100644 --- a/Project.toml +++ b/Project.toml @@ -1,72 +1,7 @@ name = "ITensors" uuid = "9136182c-28ba-11e9-034c-db9fb085ebd5" -authors = ["Matthew Fishman ", "Miles Stoudenmire "] -version = "0.7.11" - -[deps] -Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" -BitIntegers = "c3b6d118-76ef-56ca-8cc7-ebb389d030a1" -ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" -Compat = "34da2185-b29b-5c13-b0c7-acf172513d20" -Dictionaries = "85a47980-9c8c-11e8-2b9f-f7ca1fa99fb4" -DocStringExtensions = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae" -Functors = "d9f16b24-f501-4c13-a1f2-28368ffc5196" -IsApprox = "28f27b66-4bd8-47e7-9110-e2746eb8bed7" -LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" -NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" -Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" -Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" -Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" -Requires = "ae029012-a4dd-5104-9daa-d747884805df" -SerializedElementArrays = "d3ce8812-9567-47e9-a7b5-65a6d70a3065" -SimpleTraits = "699a6c99-e7fa-54fc-8d76-47d257e15c1d" -SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" -StaticArrays = "90137ffa-7385-5640-81b9-e52037218182" -Strided = "5e0ebb24-38b0-5f93-81fe-25c709ecae67" -TimerOutputs = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f" -TupleTools = "9d95972d-f1c8-5527-a6e0-b4b365fa01f6" -Zeros = "bd1ec220-6eb4-527a-9b49-e79c3db6233b" - -[weakdeps] -HDF5 = "f67ccb44-e63f-5c2f-98bd-6dc0ccc4ba2f" -VectorInterface = "409d34a3-91d5-4945-b6ec-7529ddf182d8" -ZygoteRules = "700de1a5-db45-46bc-99cf-38207098b444" - -[extensions] -ITensorsHDF5Ext = "HDF5" -ITensorsVectorInterfaceExt = "VectorInterface" -ITensorsZygoteRulesExt = "ZygoteRules" +authors = ["ITensor developers and contributors"] +version = "0.8.0" [compat] -Adapt = "3.5, 4" -BitIntegers = "0.2, 0.3" -ChainRulesCore = "1.10" -Compat = "2.1, 3, 4" -Dictionaries = "0.4" -DocStringExtensions = "0.9.3" -Functors = "0.2, 0.3, 0.4, 0.5" -HDF5 = "0.14, 0.15, 0.16, 0.17" -IsApprox = "0.1, 1, 2" -LinearAlgebra = "1.10" -NDTensors = "0.3.34" -Pkg = "1.10" -Printf = "1.10" -Random = "1.10" -Requires = "1.1" -SerializedElementArrays = "0.1" -SimpleTraits = "0.9.4" -SparseArrays = "<0.0.1, 1.10" -StaticArrays = "0.12, 1.0" -Strided = "1.1, 2" -TimerOutputs = "0.5.5" -TupleTools = "1.2" -VectorInterface = "0.4, 0.5" -Zeros = "0.3.0" -ZygoteRules = "0.2.2" julia = "1.10" - -[extras] -ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" -HDF5 = "f67ccb44-e63f-5c2f-98bd-6dc0ccc4ba2f" -VectorInterface = "409d34a3-91d5-4945-b6ec-7529ddf182d8" -ZygoteRules = "700de1a5-db45-46bc-99cf-38207098b444" diff --git a/README.md b/README.md deleted file mode 120000 index 8785db193e..0000000000 --- a/README.md +++ /dev/null @@ -1 +0,0 @@ -docs/src/index.md \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000000..f3db44263e --- /dev/null +++ b/README.md @@ -0,0 +1,43 @@ +# ITensors.jl + +[![Stable](https://img.shields.io/badge/docs-stable-blue.svg)](https://ITensor.github.io/ITensors.jl/stable/) +[![Dev](https://img.shields.io/badge/docs-dev-blue.svg)](https://ITensor.github.io/ITensors.jl/dev/) +[![Build Status](https://github.com/ITensor/ITensors.jl/actions/workflows/Tests.yml/badge.svg?branch=main)](https://github.com/ITensor/ITensors.jl/actions/workflows/Tests.yml?query=branch%3Amain) +[![Coverage](https://codecov.io/gh/ITensor/ITensors.jl/branch/main/graph/badge.svg)](https://codecov.io/gh/ITensor/ITensors.jl) +[![Code Style: Blue](https://img.shields.io/badge/code%20style-blue-4495d1.svg)](https://github.com/invenia/BlueStyle) +[![Aqua](https://raw.githubusercontent.com/JuliaTesting/Aqua.jl/master/badge.svg)](https://github.com/JuliaTesting/Aqua.jl) + +## Installation instructions + +This package resides in the `ITensor/ITensorRegistry` local registry. +In order to install, simply add that registry through your package manager. +This step is only required once. +```julia +julia> using Pkg: Pkg + +julia> Pkg.Registry.add(url="https://github.com/ITensor/ITensorRegistry") +``` +or: +```julia +julia> Pkg.Registry.add(url="git@github.com:ITensor/ITensorRegistry.git") +``` +if you want to use SSH credentials, which can make it so you don't have to enter your Github ursername and password when registering packages. + +Then, the package can be added as usual through the package manager: + +```julia +julia> Pkg.add("ITensors") +``` + +## Examples + +````julia +using ITensors: ITensors +```` + +Examples go here. + +--- + +*This page was generated using [Literate.jl](https://github.com/fredrikekre/Literate.jl).* + diff --git a/benchmark/benchmarks.jl b/benchmark/benchmarks.jl new file mode 100644 index 0000000000..274f15f442 --- /dev/null +++ b/benchmark/benchmarks.jl @@ -0,0 +1,7 @@ +using ITensors +using BenchmarkTools + +SUITE = BenchmarkGroup() +SUITE["rand"] = @benchmarkable rand(10) + +# Write your benchmarks here. diff --git a/docs/Project.toml b/docs/Project.toml index 763805b88e..d0900102a4 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -1,10 +1,4 @@ [deps] +ITensors = "3b3c2350-9fb7-4c22-a0c2-b3f20f86ea25" Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4" -HDF5 = "f67ccb44-e63f-5c2f-98bd-6dc0ccc4ba2f" -ITensorMPS = "0d1a4710-d33b-49a5-8f18-73bdf49b47e2" -ITensors = "9136182c-28ba-11e9-034c-db9fb085ebd5" -LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" -Strided = "5e0ebb24-38b0-5f93-81fe-25c709ecae67" - -[compat] -Documenter = "0.27" +Literate = "98b081ad-f1c9-55d3-8b20-4c87d4299306" diff --git a/docs/make.jl b/docs/make.jl index a9121b55d9..29a1240267 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -1,19 +1,18 @@ -include("settings.jl") +using ITensors: ITensors +using Documenter: Documenter, DocMeta, deploydocs, makedocs -makedocs(; sitename=sitename, settings...) +DocMeta.setdocmeta!(ITensors, :DocTestSetup, :(using ITensors); recursive=true) -# If ENV["GITHUB_EVENT_NAME"] == "workflow_dispatch" -# it indicates the Documenter build was launched manually, -# by a GitHub action run through the GitHub website. -# As of Dec 2022, Documenter does not build the dev branch -# in this case, so change the value to "push" to fix: -if get(ENV, "GITHUB_EVENT_NAME", nothing) == "workflow_dispatch" - ENV["GITHUB_EVENT_NAME"] = "push" -end +include("make_index.jl") -deploydocs(; - repo="github.com/ITensor/ITensors.jl.git", - devbranch="main", - push_preview=true, - deploy_config=Documenter.GitHubActions(), +makedocs(; + modules=[ITensors], + authors="ITensor developers and contributors", + sitename="ITensors.jl", + format=Documenter.HTML(; + canonical="https://ITensor.github.io/ITensors.jl", edit_link="main", assets=String[] + ), + pages=["Home" => "index.md"], ) + +deploydocs(; repo="github.com/ITensor/ITensors.jl", devbranch="main", push_preview=true) diff --git a/docs/make_index.jl b/docs/make_index.jl new file mode 100644 index 0000000000..c0d699b2eb --- /dev/null +++ b/docs/make_index.jl @@ -0,0 +1,9 @@ +using Literate: Literate +using ITensors: ITensors + +Literate.markdown( + joinpath(pkgdir(ITensors), "examples", "README.jl"), + joinpath(pkgdir(ITensors), "docs", "src"); + flavor=Literate.DocumenterFlavor(), + name="index", +) diff --git a/docs/make_local_notest.jl b/docs/make_local_notest.jl deleted file mode 100644 index 85b8b4d451..0000000000 --- a/docs/make_local_notest.jl +++ /dev/null @@ -1,5 +0,0 @@ -include("settings.jl") - -settings[:doctest] = false - -makedocs(; sitename=sitename, settings...) diff --git a/docs/make_local_test.jl b/docs/make_local_test.jl deleted file mode 100644 index 4dcaf08680..0000000000 --- a/docs/make_local_test.jl +++ /dev/null @@ -1,3 +0,0 @@ -include("settings.jl") - -makedocs(; sitename=sitename, settings...) diff --git a/docs/make_readme.jl b/docs/make_readme.jl new file mode 100644 index 0000000000..f4013c20d8 --- /dev/null +++ b/docs/make_readme.jl @@ -0,0 +1,9 @@ +using Literate: Literate +using ITensors: ITensors + +Literate.markdown( + joinpath(pkgdir(ITensors), "examples", "README.jl"), + joinpath(pkgdir(ITensors)); + flavor=Literate.CommonMarkFlavor(), + name="README", +) diff --git a/docs/settings.jl b/docs/settings.jl deleted file mode 100644 index 0514c1b62d..0000000000 --- a/docs/settings.jl +++ /dev/null @@ -1,78 +0,0 @@ -using Documenter -using ITensors -using ITensorMPS - -# Allows using ITensorMPS.jl docstrings in ITensors.jl documentation: -# https://github.com/JuliaDocs/Documenter.jl/issues/1734 -DocMeta.setdocmeta!(ITensors, :DocTestSetup, :(using ITensors); recursive=true) -DocMeta.setdocmeta!(ITensorMPS, :DocTestSetup, :(using ITensorMPS); recursive=true) - -sitename = "ITensors.jl" - -settings = Dict( - # Allows using ITensorMPS.jl docstrings in ITensors.jl documentation: - # https://github.com/JuliaDocs/Documenter.jl/issues/1734 - :modules => [ITensors, ITensorMPS], - :pages => [ - "Introduction" => "index.md", - "Getting Started with ITensor" => [ - "Installing Julia and ITensor" => "getting_started/Installing.md", - "Running ITensor and Julia Codes" => "getting_started/RunningCodes.md", - "Enabling Debug Checks" => "getting_started/DebugChecks.md", - "Next Steps" => "getting_started/NextSteps.md", - ], - "Tutorials" => [ - "DMRG" => "tutorials/DMRG.md", - "Quantum Number Conserving DMRG" => "tutorials/QN_DMRG.md", - "MPS Time Evolution" => "tutorials/MPSTimeEvolution.md", - ], - "Code Examples" => [ - "ITensor Examples" => "examples/ITensor.md", - "MPS and MPO Examples" => "examples/MPSandMPO.md", - "DMRG Examples" => "examples/DMRG.md", - "Physics (SiteType) System Examples" => "examples/Physics.md", - ], - "Documentation" => [ - "Index" => "IndexType.md", - "Index collections" => "IndexSetType.md", - "ITensor" => "ITensorType.md", - "MPS and MPO" => "MPSandMPO.md", - "QN" => "QN.md", - "SiteType and op, state, val functions" => "SiteType.md", - "SiteTypes Included with ITensor" => "IncludedSiteTypes.md", - "DMRG" => [ - "DMRG.md", - "Sweeps.md", - "ProjMPO.md", - "ProjMPOSum.md", - "Observer.md", - "DMRGObserver.md", - ], - "OpSum" => "OpSum.md", - ], - "Frequently Asked Questions" => [ - "Programming Language (Julia, C++, ...) FAQs" => "faq/JuliaAndCpp.md", - "DMRG FAQs" => "faq/DMRG.md", - "Quantum Number (QN) FAQs" => "faq/QN.md", - "ITensor Development FAQs" => "faq/Development.md", - "Relationship of ITensor to other tensor libraries FAQs" => "faq/RelationshipToOtherLibraries.md", - "Julia Package Manager FAQs" => "faq/JuliaPkg.md", - "High-Performance Computing FAQs" => "faq/HPC.md", - ], - "Upgrade guides" => ["Upgrading from 0.1 to 0.2" => "UpgradeGuide_0.1_to_0.2.md"], - "ITensor indices and Einstein notation" => "Einsum.md", - "Advanced Usage Guide" => [ - "Advanced Usage Guide" => "AdvancedUsageGuide.md", - "Multithreading" => "Multithreading.md", - "Running on GPUs" => "RunningOnGPUs.md", - "Symmetric (QN conserving) tensors: background and usage" => "QNTricks.md", - "Timing and profiling" => "CodeTiming.md", - "Contraction sequence optimization" => "ContractionSequenceOptimization.md", - "HDF5 File Formats" => "HDF5FileFormats.md", - ], - "Developer Guide" => "DeveloperGuide.md", - ], - :format => Documenter.HTML(; assets=["assets/favicon.ico"], prettyurls=false), - :doctest => true, - :checkdocs => :none, -) diff --git a/docs/src/AdvancedUsageGuide.md b/docs/src/AdvancedUsageGuide.md deleted file mode 100644 index 54d3da6660..0000000000 --- a/docs/src/AdvancedUsageGuide.md +++ /dev/null @@ -1,1124 +0,0 @@ -# [Advanced ITensor Usage Guide](@id advanced_usage_guide) - -## Installing and updating ITensors.jl - -The ITensors package can be installed with the Julia package manager. -Assuming you have already downloaded Julia, which you can get -[here](https://julialang.org/downloads/), from the Julia REPL, -type `]` to enter the Pkg REPL mode and run: -``` -$ julia -``` -```julia -julia> ] - -pkg> add ITensors -``` - -Or, equivalently, via the `Pkg` API: - -```julia -julia> import Pkg; Pkg.add("ITensors") -``` - -We recommend using ITensors.jl with Intel MKL in order to get the -best possible performance. If you have not done so already, you can -replace the current BLAS and LAPACK implementation used by Julia with -MKL by using the MKL.jl package. Please follow the instructions -[here](https://github.com/JuliaComputing/MKL.jl). - -To use the latest registered (stable) version of ITensors.jl, use `update ITensors` -in `Pkg` mode or `import Pkg; Pkg.update("ITensors")`. -We will commonly release new patch versions (such as updating from `v0.1.12` to -`v0.1.13`) with bug fixes and improvements. However, make sure to double check before -updating between minor versions (such as from `v0.1.41` to `v0.2.0`) because new minor -releases may be breaking. - -Remember that if you are compiling system images of ITensors.jl, such as with the -`ITensors.compile()` command, you will need to rerurn this command to compile -the new version of ITensor after an update. - -To try the "development branch" of ITensors.jl (for example, if -there is a feature or fix we added that hasn't been released yet), -you can do `add ITensors#main`. You can switch back to the latest -released version with `add ITensors`. Using the development/main -branch is generally not encouraged unless you know what you are doing. - -## Using ITensors.jl in the REPL - -There are many ways you can write code based on ITensors.jl, ranging -from using it in the REPL to writing a small script to making a -package that depends on it. - -For example, you can just start the REPL from your command line like: -``` -$ julia -``` -assuming you have an available version of Julia with the ITensors.jl -package installed. Then just type: -```julia -julia> using ITensors -``` -and start typing ITensor commands. For example: -```julia -julia> i = Index(2, "i") -(dim=2|id=355|"i") - -julia> A = random_itensor(i, i') -ITensor ord=2 (dim=2|id=355|"i") (dim=2|id=355|"i")' -NDTensors.Dense{Float64,Array{Float64,1}} - -julia> @show A; -A = ITensor ord=2 -Dim 1: (dim=2|id=355|"i") -Dim 2: (dim=2|id=355|"i")' -NDTensors.Dense{Float64,Array{Float64,1}} - 2×2 - 1.2320011464276275 1.8504245734277216 - 1.0763652402177477 0.030353720156277037 - -julia> (A*dag(A))[] -3.9627443142240617 -``` - -Note that there are some "gotchas" with working in the REPL like this. -Technically, all commands in the REPL are in the "global scope". -The global scope might not work as you would expect, for example: -```julia -julia> for _ in 1:3 - A *= 2 - end -ERROR: UndefVarError: A not defined -Stacktrace: - [1] top-level scope at ./REPL[12]:2 - [2] eval(::Module, ::Any) at ./boot.jl:331 - [3] eval_user_input(::Any, ::REPL.REPLBackend) at /home/mfishman/software/julia-1.4.0/share/julia/stdlib/v1.4/REPL/src/REPL.jl:86 - [4] run_backend(::REPL.REPLBackend) at /home/mfishman/.julia/packages/Revise/AMRie/src/Revise.jl:1023 - [5] top-level scope at none:0 -``` -since the `A` inside the for-loop introduces a new local variable. -Some alternatives are to wrap that part of the code in a let-block -or a function: -```julia -julia> function f(A) - for _ in 1:3 - A *= 2 - end - A - end -f (generic function with 1 method) - -julia> A = f(A) -ITensor ord=2 (dim=2|id=355|"i") (dim=2|id=355|"i")' -NDTensors.Dense{Float64,Array{Float64,1}} - -julia> @show A; -A = ITensor ord=2 -Dim 1: (dim=2|id=355|"i") -Dim 2: (dim=2|id=355|"i")' -NDTensors.Dense{Float64,Array{Float64,1}} - 2×2 - 9.85600917142102 14.803396587421773 - 8.610921921741982 0.2428297612502163 -``` -In this particular case, you can alternatively modify the ITensor -in-place: -```julia -julia> for _ in 1:3 - A ./= 2 - end - -julia> @show A; -A = ITensor ord=2 -Dim 1: (dim=2|id=355|"i") -Dim 2: (dim=2|id=355|"i")' -NDTensors.Dense{Float64,Array{Float64,1}} - 2×2 - 1.2320011464276275 1.8504245734277216 - 1.0763652402177477 0.030353720156277037 -``` - -A common place you might accidentally come across this is when -you are creating a Hamiltonian with `OpSum`: -```julia -julia> using ITensors, ITensorMPS - -julia> N = 4; - -julia> sites = siteinds("S=1/2",N); - -julia> os = OpSum(); - -julia> for j=1:N-1 - os += "Sz", j, "Sz", j+1 - end -ERROR: UndefVarError: os not defined -Stacktrace: - [1] top-level scope at ./REPL[16]:2 - [2] eval(::Module, ::Any) at ./boot.jl:331 - [3] eval_user_input(::Any, ::REPL.REPLBackend) at /home/mfishman/software/julia-1.4.0/share/julia/stdlib/v1.4/REPL/src/REPL.jl:86 - [4] run_backend(::REPL.REPLBackend) at /home/mfishman/.julia/packages/Revise/AMRie/src/Revise.jl:1023 - [5] top-level scope at none:0 -``` -In this case, you can use `os .+= ("Sz", j, "Sz", j+1)`, -`add!(os, "Sz", j, "Sz", j+1)`, or wrap your code in a let-block -or function. - -Take a look at Julia's documentation [here](https://docs.julialang.org/en/v1/manual/variables-and-scoping/) -for rules on scoping. Also note that this behavior is particular -to Julia v1.4 and below, and is expected to change in v1.5. - -Note that the REPL is very useful for prototyping code quickly, -but working directly in the REPL and outside of functions can -cause sub-optimal performance. See Julia's [performance tips](https://docs.julialang.org/en/v1/manual/performance-tips/index.html) -for more information. - -We recommend the package [OhMyREPL](https://kristofferc.github.io/OhMyREPL.jl/latest/) which adds syntax highlighting to the Julia REPL. - -## Finding documentation interactively - -Julia provides many tools for searching for documentation interactively at the REPL. Say that you want to learn more about how to use an ITensor from the command line. You can start by typing `?` followed by `ITensor`: -```julia -julia> using ITensors - -julia> ?ITensor -search: ITensor ITensors itensor random_itensor - - An ITensor is a tensor whose interface is independent of its - memory layout. Therefore it is not necessary to know the ordering - of an ITensor's indices, only which indices an ITensor has. - Operations like contraction and addition of ITensors automatically - handle any memory permutations. - - Examples - ≡≡≡≡≡≡≡≡≡≡ - - julia> i = Index(2, "i") - (dim=2|id=287|"i") - - julia> A = random_itensor(i', i) - ITensor ord=2 (dim=2|id=287|"i")' (dim=2|id=287|"i") - NDTensors.Dense{Float64,Array{Float64,1}} - - julia> @show A; - A = ITensor ord=2 - Dim 1: (dim=2|id=287|"i")' - Dim 2: (dim=2|id=287|"i") - NDTensors.Dense{Float64,Array{Float64,1}} - 2×2 - 0.28358594718392427 1.4342219756446355 - 1.6620103556283987 -0.40952231269251566 - - julia> @show inds(A); - inds(A) = IndexSet{2} (dim=2|id=287|"i")' (dim=2|id=287|"i") -[...] -``` -(the specific output may be different for different versions of ITensors.jl as we update the docs). You can use the help prompt (which you get by typing `?` at the REPL) to print out documentation for types and methods. - -Another way to get information about types is with the function `fieldnames`: -```julia -julia> fieldnames(ITensor) -(:store, :inds) -``` -which shows the fields of a type. Note that in general the specific names of the fields and structures of types may change (we consider those to be internal details), however we often make functions to access the fields of a type that have the same name as the field, so it is a good place to get started. For example, you can access the storage and indices of an ITensor `A` with the functions `store(A)` and `inds(A)`. - -Another helpful function is `apropos`, which search through all documentation for a string (ignoring the case) and prints a list of all types and methods with documentation that contain the string. - -Based on the `apropos` function, we can make some helper functions that may be useful. For example: -```julia -using ITensors - -function finddocs(s) - io = IOBuffer() - apropos(io, s) - v = chomp(String(take!(io))) - return split(v, "\n") -end - -function finddocs(s...) - intersect(finddocs.(s)...) -end - -found_methods = finddocs("indices", "set difference") -display(found_methods) -``` -returns: -```julia -3-element Array{SubString{String},1}: - "ITensors.noncommoninds" - "Base.setdiff" - "ITensors.uniqueinds" -``` -which are the functions that have docs that contain the strings `"indices"` and `"set difference"`. We can print the docs for `uniqueinds` to find: -```julia -help?> uniqueinds -search: uniqueinds unique_siteinds uniqueind uniqueindex - - uniqueinds(A, B; kwargs...) - uniqueinds(::Order{N}, A, B; kwargs...) - - - Return an IndexSet with indices that are unique to the set of - indices of A and not in B (the set difference). - - Optionally, specify the desired number of indices as Order(N), - which adds a check and can be a bit more efficient. -``` - -We can also filter the results to only specify functions from certain modules, for example: -```julia -julia> filter(x -> startswith(x, "ITensors"), finddocs("indices", "set difference")) -2-element Array{SubString{String},1}: - "ITensors.noncommoninds" - "ITensors.uniqueinds" - -julia> filter(x -> !startswith(x, "ITensors"), finddocs("indices", "set difference")) -1-element Array{SubString{String},1}: - "Base.setdiff" -``` -Ideally we could have `apropos` do a "smart" Google-like search of the appropriate docstrings, but this is a pretty good start. - -Additionally, the `names` function can be useful, which prints the names of all functions and types that are exported by a module. For example: -```julia -julia> names(ITensors) -264-element Array{Symbol,1}: - Symbol("@OpName_str") - Symbol("@SiteType_str") - Symbol("@StateName_str") - Symbol("@TagType_str") - Symbol("@disable_warn_order") - Symbol("@reset_warn_order") - Symbol("@set_warn_order") - Symbol("@ts_str") - :AbstractObserver - :OpSum - :DMRGObserver - :ITensor - :ITensors - :Index -[...] -``` -Of course this is a very long list (and the methods are returned as `Symbol`s, which are like strings but not as easy to work with). However, we can convert the list to strings and filter the strings to find functions we are interested in, for example: -```julia -julia> filter(x -> contains(x, "common") && contains(x, "ind"), String.(names(ITensors))) -8-element Array{String,1}: - "common_siteind" - "common_siteinds" - "commonind" - "commonindex" - "commoninds" - "hascommoninds" - "noncommonind" - "noncommoninds" -``` - -Julia types do not have member functions, so people coming from object oriented programming languages may find that at first it is more difficult to find methods that are applicable to a certain type. However, Julia has many fantastic tools for introspection that we can use to make this task easier. - -## Make a small project based on ITensors.jl - -Once you start to have longer code, you will want to put your -code into one or more files. For example, you may have a short script -with one or more functions based on ITensors.jl: -```julia -# my_itensor_script.jl -using ITensors - -function norm2(A::ITensor) - return (A*dag(A))[] -end -``` -Then, in the same directory as your script `my_itensor_script.jl`, -just type: -```julia -julia> include("my_itensor_script.jl"); - -julia> i = Index(2; tags="i"); - -julia> A = random_itensor(i', i); - -julia> norm2(A) -[...] -``` - -As your code gets longer, you can split it into multiple files -and `include` this files into one main project file, for example -if you have two files with functions in them: -```julia -# file1.jl - -function norm2(A::ITensor) - return (A*dag(A))[] -end -``` -and -```julia -# file2.jl - -function square(A::ITensor) - return A .^ 2 -end -``` - -```julia -# my_itensor_project.jl - -using ITensors - -include("file1.jl") - -include("file2.jl") -``` -Then, as before, you can use your functions at the Julia REPL -by just including the file `my_itensor_project.jl`: -```julia -julia> include("my_itensor_project.jl"); - -julia> i = Index(2; tags="i"); - -julia> A = random_itensor(i', i); - -julia> norm2(A) -[...] - -julia> square(A) -[...] -``` - -As your code gets more complicated and has more files, it is helpful -to organize it into a package. That will be covered in the -next section. - -## Make a Julia package based on ITensors.jl - -In this section, we will describe how to make a Julia package based on -ITensors.jl. This is useful to do when your project gets longer, -since it helps with: - - Code organization. - - Adding dependencies that will get automatically installed through Julia's package system. - - Versioning. - - Automated testing. - - Code sharing and easier package installation. - - Officially registering your package with Julia. -and many more features that we will mention later. - -Start up Julia and install [PkgTemplates](https://invenia.github.io/PkgTemplates.jl/stable/) -```julia -$ julia - -julia> ] - -pkg> add PkgTemplates -``` -then press backspace and type: -``` -julia> using PkgTemplates - -julia> t = Template(; user="your_github_username", plugins=[Git(; ssh=true),]) - -julia> t("MyITensorsPkg") -``` -You should put your Github account name instead of `"your_github_username"`, -if you want to use Github to host your package. -The option `plugins=[Git(; ssh=true),]` sets the Github authentication to use -ssh, which is generally more convenient. You can switch to https (where you -have to type your username and password to push changes) by setting `ssh=false` -or leaving off `plugins=[...]`. By default, the package will be located in -the directory `~/.julia/dev`, you can change this with the keyword argument -`dir=[...]`. However, `~/.julia/dev` is recommended since that is the directory -Julia's package manager (and other packages like `Revise`) will look for development -packages. Please see the `PkgTemplate` documentation for more customization options. - -Then, we want to tell Julia about our new package. We do this as -follows: -```julia -julia> ] - -pkg> dev ~/.julia/dev/MyITensorsPkg -``` -then you can do: -```julia -julia> using MyITensorsPkg -``` -from any directory to use your new package. However, it doesn't -have any functions available yet. Additionally, there should be -an empty test file already set up here: -``` -~/.julia/dev/MyITensorsPkg/test/runtests.jl -``` -which you can run from any directory like: -```julia -julia> ] - -pkg> test MyITensorsPkg -``` -It should show something like: -```julia -[...] -Test Summary: | -MyITensorsPkg.jl | No tests - Testing MyITensorsPkg tests passed -``` -since there are no tests yet. - -First we want to add ITensors as a dependency of our package. -We do this by "activating" our package environment and then -adding ITensors: -```julia -julia> ] - -pkg> activate MyITensorsPkg - -(MyITensorsPkg) pkg> add ITensors -``` -This will edit the file `~/.julia/dev/MyITensorsPkg/Project.toml` -and add the line -``` -[deps] -ITensors = "9136182c-28ba-11e9-034c-db9fb085ebd5" -``` -Because your package is under development, back in the main -Pkg environment you should type `resolve`: -```julia -(MyITensorsPkg) pkg> activate - -pkg> resolve -``` -Now, if you or someone else uses the package, it will automatically -install ITensors.jl for you. - -Now your package is set up to develop! Try editing the file -`~/.julia/dev/MyITensorsPkg/src/MyITensorsPkg.jl` and add the -`norm2` function, which calculates the squared norm of an ITensor: -```julia -module MyITensorsPkg - -using ITensors - -export norm2 - -norm2(A::ITensor) = (A*dag(A))[] - -end -``` -The export command makes `norm2` available in the namespace without -needing to type `MyITensorsPkg.norm2` when you do -`using MyITensorsPkg`. Now in a new Julia session you can do: -```julia -julia> using ITensors - -julia> i = Index(2) -(dim=2|id=263) - -julia> A = random_itensor(i) -ITensor ord=1 (dim=2|id=263) -NDTensors.Dense{Float64,Array{Float64,1}} - -julia> norm(A)^2 -6.884457016011188 - -julia> norm2(A) -ERROR: UndefVarError: norm2 not defined -[...] - -julia> using MyITensorsPkg - -julia> norm2(A) -6.884457016011188 -``` -Unfortunately, if you continue to edit the file `MyITensorsPkg.jl`, -even if you type `using MyITensorsPkg` again, if you are in the -same Julia session the changes will not be reflected, and -you will have to restart your Julia session. The -[Revise](https://timholy.github.io/Revise.jl/stable/) package -will allow you to edit your package files and have the changes -reflected in real time in your current Julia session, so you -don't have to restart the session. - -Now, we can add some tests for our new functionality. Edit -the file `~/.julia/dev/MyITensorsPkg/test/runtests.jl` to -look like: -```julia -using MyITensorsPkg -using ITensors -using Test - -@testset "MyITensorsPkg.jl" begin - i = Index(2) - A = random_itensor(i) - @test isapprox(norm2(A), norm(A)^2) -end -``` -Now when you test your package you should see: -```julia -pkg> test MyITensorsPkg -[...] -Test Summary: | Pass Total -MyITensorsPkg.jl | 1 1 - Testing MyITensorsPkg tests passed -``` - -Your package should already be set up as a git repository by -the `PkgTemplates` commands we started with. -We recommend using Github or similar versions control systems -for your packages, especially if you plan to make them public -and officially register them as Julia packages. - -You can set up your local package as a Github repository by -following the steps [here](https://help.github.com/en/github/importing-your-projects-to-github/adding-an-existing-project-to-github-using-the-command-line). Many of the steps may be unnecessary since they -were already set up by `PkgTemplates`. You should be able to -go to the website [here](https://github.com/new), create a new -Github repository with the name `MyITensorsPkg.jl`, and then following -the instructions under "push an existing repository from the command line". - -You may also want to switch between HTTPS and SSH authentication -as described [here](https://help.github.com/en/github/using-git/changing-a-remotes-url), -if you didn't choose your preferred authentication protocol with -PkgTemplates. - -There are many more features you can add to your package through -various Julia packages and Github, for example: - - Control of precompilation with tools like [SnoopCompile](https://timholy.github.io/SnoopCompile.jl/stable/). - - Automatic testing of your package at every pull request/commit with Github Actions, Travis, or similar services. - - Automated benchmarking of your package at every pull request with [BenchmarkTools](https://github.com/JuliaCI/BenchmarkTools.jl), [PkgBenchmark](https://juliaci.github.io/PkgBenchmark.jl/stable/) and [BenchmarkCI](https://github.com/tkf/BenchmarkCI.jl). - - Automated building of your documentation with [Documenter](https://juliadocs.github.io/Documenter.jl/stable/). - - Compiling your package with [PackageCompiler](https://julialang.github.io/PackageCompiler.jl/dev/). - - Automatically check what parts of your code your tests check with code coverage. - - Officially register your Julia package so that others can easily install it and follow along with updated versions using the [Registrator](https://juliaregistries.github.io/Registrator.jl/stable/). -You can take a look at the [ITensors](https://github.com/ITensor/ITensors.jl) -Github page for inspiration on setting up some of these services -and ideas for organizing your package. - -## Developing ITensors.jl - -This section is for someone who is interested in modifying the source code of ITensors.jl, -and then possibly contribute you changes to the official ITensors.jl package. - -This should not be necessary for most people. If for whatever reason you think that the functionality -of ITensors.jl needs to be modified, oftentimes you can add new functions outside of ITensors.jl -or directly overload a function of ITensors.jl (for example with the [import](https://docs.julialang.org/en/v1/manual/modules/#using-and-import-with-specific-identifiers,-and-adding-methods) keyword). - -However, if you would like to only modify parts of the internals of an ITensors.jl function, -and/or plan to contribute changes like bug fixes or new features to the official ITensors.jl -package, this section is for you. - -If you install a package like ITensors with the package manager using the standard `Pkg.add` command: -```julia -julia> using Pkg - -julia> Pkg.add("ITensors") -``` -it will automatically clone the latest registered/tagged version of `ITensors` in a randomly -generated directory inside `~/.julia/packages`. You can find out what version you are using with `Pkg.status`: -```julia -julia> Pkg.status("ITensors") - Status `~/.julia/environments/v1.7/Project.toml` - [9136182c] ITensors v0.2.16 -``` -and you can use [`pkgdir`](https://docs.julialang.org/en/v1/base/base/#Base.pkgdir-Tuple{Module}) -to find out the directory of the source code of a package that you have loaded: -```julia -julia> using ITensors - -julia> pkgdir(ITensors) -"/home/mfishman/.julia/packages/ITensors/cu9Bo" -``` -The source code of a package loaded in this way is read-only, so you won't be able to modify it. - -If you want to modify the source code of `ITensors.jl`, you should check out the packages -`NDTensors.jl` and `ITensors.jl` in development mode with `Pkg.develop`: -```julia -julia> Pkg.develop(["NDTensors", "ITensors"]) -Path `/home/mfishman/.julia/dev/ITensors` exists and looks like the correct repo. Using existing path. - Resolving package versions... - Updating `~/.julia/environments/v1.7/Project.toml` - [9136182c] ~ ITensors v0.2.16 ⇒ v0.2.16 `~/.julia/dev/ITensors` - [23ae76d9] ~ NDTensors v0.1.35 ⇒ v0.1.35 `~/.julia/dev/ITensors/NDTensors` - Updating `~/.julia/environments/v1.7/Manifest.toml` - [9136182c] ~ ITensors v0.2.16 ⇒ v0.2.16 `~/.julia/dev/ITensors` - [23ae76d9] ~ NDTensors v0.1.35 ⇒ v0.1.35 `~/.julia/dev/ITensors/NDTensors` - -julia> Pkg.status(["NDTensors", "ITensors"]) - Status `~/.julia/environments/v1.7/Project.toml` - [9136182c] ITensors v0.2.16 `~/.julia/dev/ITensors` - [23ae76d9] NDTensors v0.1.35 `~/.julia/dev/ITensors/NDTensors` -``` -Then, Julia will use the version of `ITensors.jl` living in the directory `~/.julia/dev/ITensors` -and the version of `NDTensors.jl` living in the directory `~/.julia/dev/ITensors/NDTensors`, -though you may need to restart Julia for this to take affect. - -We recommend checking out the development versions of both `NDTensors.jl` and `ITensors.jl` -since we often develop both packages tandem, so the development branch -of `ITensors.jl` may rely on changes we make in `NDTensors.jl`. - -By default, when you modify code in `~/.julia/dev/ITensors` or `~/.julia/dev/ITensors/NDTensors` -you will need to restart Julia for the changes to take affect. -A way around this issue is the [Revise](https://timholy.github.io/Revise.jl/stable/) package. -We highly recommend using the [Revise](https://timholy.github.io/Revise.jl/stable/) package -when you are developing packages, which automatically detects changes you are making to -a package you have checked out for development and edit code and not have to restart your Julia session. -In short, if you have `Revise.jl` loaded, you can edit the code in `~/.julia/dev/ITensors` -or `~/.julia/dev/ITensors/NDTensors` and the changes you make will be reflected on the fly as -you use the package (there are some limitations, for example you will need to restart Julia -if you change the definitions of types). - -Note that the code in `~/.julia/dev/ITensors` is just a git repository cloned from -the repository https://github.com/ITensor/ITensors.jl, so you can do anything that -you would with any other git repository (use forks of the project, check out branches, -push and pull changes, etc.). - -The standard procedure for submitting a bug fix or new feature to ITensors.jl would then -be to first [fork the ITensors.jl repository](https://docs.github.com/en/get-started/quickstart/fork-a-repo). -Then, check out your fork for development with: -```julia -julia> using Pkg - -julia> Pkg.develop(url="https://github.com/mtfishman/ITensors.jl") -``` -where you would replace `mtfishman` with your own Github username. -Make the changes to the code in `~/.julia/dev/ITensors`, push the changes to your fork, and then -[make a pull request](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request) to the [ITensors.jl Github repository](https://github.com/ITensor/ITensors.jl/compare). - -To go back to the official version of the `NDTensors.jl` and `ITensors.jl` packages, you can use the command `Pkg.free(["NDTensors", "ITensors"])`: -```julia -julia> Pkg.free(["NDTensors", "ITensors"]) - Resolving package versions... - Updating `~/.julia/environments/v1.7/Project.toml` - [9136182c] ~ ITensors v0.2.16 `~/.julia/dev/ITensors` ⇒ v0.2.16 - [23ae76d9] ~ NDTensors v0.1.35 `~/.julia/dev/ITensors/NDTensors` ⇒ v0.1.35 - Updating `~/.julia/environments/v1.7/Manifest.toml` - [9136182c] ~ ITensors v0.2.16 `~/.julia/dev/ITensors` ⇒ v0.2.16 - [23ae76d9] ~ NDTensors v0.1.35 `~/.julia/dev/ITensors/NDTensors` ⇒ v0.1.35 - -julia> Pkg.status(["NDTensors", "ITensors"]) - Status `~/.julia/environments/v1.7/Project.toml` - [9136182c] ITensors v0.2.16 - [23ae76d9] NDTensors v0.1.35 -``` -so it returns to the version of the package you would have just after installing with `Pkg.add`. - -Some of the Julia package development workflow definitely takes some getting used to, -but once you figure out the "flow" and have a picture of what is going on there are -only a small set of commands you really need to use. - -A small note is that we follow the [Blue style guide](https://github.com/invenia/BlueStyle) -for formatting the source code in ITensors.jl. -To make this more automated, we use the wonderful package -[JuliaFormatter.jl](https://github.com/domluna/JuliaFormatter.jl). -To format your developed version of ITensors.jl, all you have to do is change your directory -to `~/.julia/dev/ITensors` and run the command `format(".")` after loading the `JuliaFormatter` -package: -```julia -julia> using Pkg - -julia> Pkg.status("ITensors") - Status `~/.julia/environments/v1.7/Project.toml` - [9136182c] ITensors v0.2.16 `~/.julia/dev/ITensors` - -julia> using ITensors - -julia> pkgdir(ITensors) -"/home/mfishman/.julia/dev/ITensors" - -julia> cd(pkgdir(ITensors)) - -julia> using JuliaFormatter - -julia> format(".") -false - -julia> format(".") # Check the formatting succeeded -true -``` -This will automatically change the style of the code according to the `Blue` style guide. -The `format` command returns `false` if the code was not already formatted (and therefore -if the command made changes to the source code to follow the style guide), and -returns `true` otherwise. - -If you make changes to ITensors that you think will be useful to others, such as fixing bugs -or adding new features, please consider making a [pull request](https://github.com/ITensor/ITensors.jl/compare). -However, please ask us first before doing so -- either by raising an [issue on Github](https://github.com/ITensor/ITensors.jl/issues) or asking a question on the [ITensor support forum](http://itensor.org/support/) -- -to make sure it is a change or addition that we will want to include or to check that it is not something -we are currently working on. Coordinating with us in that way will help save your time and energy as well as ours! - -[Here](https://www.youtube.com/watch?v=QVmU29rCjaA) is a great introduction to Julia package development -as well as making pull requests to existing Julia packages by the irreplacable Chris Rackauckas. - -## Compiling ITensors.jl - -You might notice that the time to load ITensors.jl (with `using -ITensors`) and the time to run your first few ITensor commands is -slow. This is due to Julia's just-in-time (JIT) compilation. -Julia is compiling special versions of each function that is -being called based on the inputs that it gets at runtime. This -allows it to have fast code, often nearly as fast as fully compiled -languages like C++, while still being a dynamic language. - -However, the long startup time can still be annoying. In this section, -we will discuss some strategies that can be used to minimize this -annoyance, for example: - - Precompilation. - - Staying in the same Julia session with [Revise](https://timholy.github.io/Revise.jl/stable/). - - Using [PackageCompiler](https://julialang.github.io/PackageCompiler.jl/dev/) to compile ITensors.jl ahead of time. - -Precompilation is performed automatically when you first install -ITensors.jl or update a version and run the command `using ITensors` -for the first time. For example, when you first use ITensors after -installation or updating, you will see: -```julia -julia> using ITensors -[ Info: Precompiling ITensors [9136182c-28ba-11e9-034c-db9fb085ebd5] -``` -The process is done automatically, and -puts some compiled binaries in your `~/.julia` directory. The -goal is to decrease the time it takes when you first type -`using ITensors` in your next Julia session, and also the time -it takes for you to first run ITensor functions in a new -Julia session. This helps the startup time, but currently doesn't -help enough. -This is something both ITensors.jl and the Julia language will try -to improve over time. - -To avoid this time, it is recommended that you work as much as you -can in a single Julia session. You should not need to restart your -Julia session very often. For example, if you are writing code in -a script, just `include` the file again which will pull in the new -changes to the script (the exception is if you change the definition -of a type you made, which would requiring restarting the REPL). - -If you are working on a project, we highly recommend using the -[Revise](https://timholy.github.io/Revise.jl/stable/) package -which automatically detects changes you are making in your -packages and reflects them real-time in your current REPL session. -Using these strategies should minimize the number of times you -need to restart your REPL session. - -If you plan to use ITensors.jl directly from the command line -(i.e. not from the REPL), and the startup time is an issue, -you can try compiling ITensors.jl using [PackageCompiler](https://julialang.github.io/PackageCompiler.jl/dev/). - -Before using PackageCompiler to compile ITensors, when we first start using ITensors.jl we might see: -```julia -julia> @time using ITensors - 3.845253 seconds (10.96 M allocations: 618.071 MiB, 3.95% gc time) - -julia> @time i = Index(2); - 0.000684 seconds (23 allocations: 20.328 KiB) - -julia> @time A = random_itensor(i', i); - 0.071022 seconds (183.24 k allocations: 9.715 MiB) - -julia> @time svd(A, i'); - 5.802053 seconds (24.56 M allocations: 1.200 GiB, 7.83% gc time) - -julia> @time svd(A, i'); - 0.000177 seconds (450 allocations: 36.609 KiB) -``` -ITensors provides the command `ITensors.compile()` to create what is -called a "custom system image", a custom version of Julia that -includes a compiled version of ITensors (see the [PackageCompiler documentation](https://julialang.github.io/PackageCompiler.jl/dev/) for more details). - -!!! compat "ITensors 0.7" - As of ITensors 0.7, you must now install and load both the - [ITensorMPS.jl](https://github.com/ITensor/ITensorMPS.jl) package - and the [PackageCompiler.jl](https://github.com/JuliaLang/PackageCompiler.jl) - package in order to use `ITensors.compile()`, since it relies on running MPS/MPO - functionality as example code for Julia to compile and is based in a package - extension in order to make `PackageCompiler.jl` an optional dependency. - -Just run the commands: -``` -julia> using ITensors, ITensorMPS - -julia> using PackageCompiler - -julia> ITensors.compile() -[...] -``` -By default, this will create the file `sys_itensors.so` in the directory -`~/.julia/sysimages`. -Then if we start julia with: -``` -$ julia --sysimage ~/.julia/sysimages/sys_itensors.so -``` -then you should see something like: -```julia -julia> @time using ITensors - 0.330587 seconds (977.61 k allocations: 45.807 MiB, 1.89% gc time) - -julia> @time i = Index(2); - 0.000656 seconds (23 allocations: 20.328 KiB) - -julia> @time A = random_itensor(i', i); - 0.000007 seconds (7 allocations: 576 bytes) - -julia> @time svd(A, i'); - 0.263526 seconds (290.02 k allocations: 14.220 MiB) - -julia> @time svd(A, i'); - 0.000135 seconds (350 allocations: 29.984 KiB) -``` -which is much better. - -Note that you will have to recompile ITensors with the command -`ITensors.compile()` any time that you update the version of ITensors -in order to keep the system image updated. We hope to make this -process more automated in the future. - -## Benchmarking and profiling - -Julia has great built-in tools for benchmarking and profiling. -For benchmarking fast code at the command line, you can use -[BenchmarkTools](https://github.com/JuliaCI/BenchmarkTools.jl/blob/main/doc/manual.md): -```julia -julia> using ITensors; - -julia> using BenchmarkTools; - -julia> i = Index(100, "i"); - -julia> A = random_itensor(i, i'); - -julia> @btime 2*$A; - 4.279 μs (8 allocations: 78.73 KiB) -``` - -We recommend packages like [ProfileView](https://github.com/timholy/ProfileView.jl) -to get detailed profiles of your code, in order to pinpoint functions -or lines of code that are slower than they should be. - -## ITensor type design and writing performant code - -Advanced users might notice something strange about the definition -of the ITensor type, that it is often not "type stable". Some of -this is by design. The definition for ITensor is: -```julia -mutable struct ITensor - inds::IndexSet - store::TensorStorage -end -``` -These are both abstract types, which is something that is generally -discouraged for peformance. - -This has a few disadvantages. Some code that you might expect to be -type stable, like `getindex`, is not, for example: -```julia -julia> i = Index(2, "i"); - -julia> A = random_itensor(i, i'); - -julia> @code_warntype A[i=>1, i'=>2] -Variables - #self#::Core.Compiler.Const(getindex, false) - T::ITensor - ivs::Tuple{Pair{Index{Int64},Int64}} - p::Tuple{Union{Nothing, Int64}} - vals::Tuple{Any} - -Body::Number -1 ─ %1 = NDTensors.getperm::Core.Compiler.Const(NDTensors.getperm, false) -│ %2 = ITensors.inds(T)::IndexSet{1,IndexT,DataT} where DataT<:Tuple where IndexT<:Index -│ %3 = Base.broadcasted(ITensors.ind, ivs)::Base.Broadcast.Broadcasted{Base.Broadcast.Style{Tuple},Nothing,typeof(ind),Tuple{Tuple{Pair{Index{Int64},Int64}}}} -│ %4 = Base.materialize(%3)::Tuple{Index{Int64}} -│ (p = (%1)(%2, %4)) -│ %6 = NDTensors.permute::Core.Compiler.Const(NDTensors.permute, false) -│ %7 = Base.broadcasted(ITensors.val, ivs)::Base.Broadcast.Broadcasted{Base.Broadcast.Style{Tuple},Nothing,typeof(val),Tuple{Tuple{Pair{Index{Int64},Int64}}}} -│ %8 = Base.materialize(%7)::Tuple{Int64} -│ (vals = (%6)(%8, p)) -│ %10 = Core.tuple(T)::Tuple{ITensor} -│ %11 = Core._apply_iterate(Base.iterate, Base.getindex, %10, vals)::Number -│ %12 = Core.typeassert(%11, ITensors.Number)::Number -└── return %12 - -julia> typeof(A[i=>1, i'=>2]) -Float64 -``` -Uh oh, that doesn't look good! Julia can't know ahead of time, based on -the inputs, what the type of the output is, besides that it will be a -`Number` (though at runtime, the output has a concrete type, `Float64`). - -So why is it designed this way? The main reason is to allow more -generic and dynamic code than traditional, statically-typed Arrays. -This allows us to have code like: -```julia -julia> i = Index(2, "i") -(dim=2|id=811|"i") - -julia> A = ITensor(i', i); - -julia> @show A; -A = ITensor ord=2 -Dim 1: (dim=2|id=811|"i")' -Dim 2: (dim=2|id=811|"i") -NDTensors.Empty{Float64,NDTensors.Dense{Float64,Array{Float64,1}}} - 2×2 - - - -julia> A[i' => 1, i => 2] = 1.2; - -julia> @show A; -A = ITensor ord=2 -Dim 1: (dim=2|id=811|"i")' -Dim 2: (dim=2|id=811|"i") -NDTensors.Dense{Float64,Array{Float64,1}} - 2×2 - 0.0 1.2 - 0.0 0.0 -``` -Here, the type of the storage of A is changed in-place. It starts as an `Empty` storage, a special trivial storage. When we set an element, we then allocate the appropriate storage. Allocations are performed only when needed, so if another element is set then no allocation is performed. -More generally, this allows ITensors to have more generic in-place -functionality, so you can write code where you don't know what the -storage is until runtime. - -This can lead to certain types of code having perfomance problems, -for example looping through ITensors with many elements can be slow: -```julia -julia> function myscale!(A::ITensor, x::Number) - for n in 1:dim(A) - A[n] = x * A[n] - end - end; - -julia> d = 10_000; - -julia> i = Index(d); - -julia> @btime myscale!(A, 2) setup = (A = random_itensor(i)); - 2.169 ms (117958 allocations: 3.48 MiB) -``` -However, this is fast: -``` -julia> function myscale!(A::Array, x::Number) - for n in 1:length(A) - A[n] = x * A[n] - end - end; - -julia> @btime myscale!(A, 2) setup = (A = randn(d)); - 3.451 μs (0 allocations: 0 bytes) - -julia> myscale2!(A::ITensor, x::Number) = myscale!(array(A), x) -myscale2! (generic function with 1 method) - -julia> @btime myscale2!(A, 2) setup = (A = random_itensor(i)); - 3.571 μs (2 allocations: 112 bytes) -``` -How does this work? It relies on a "function barrier" technique. -Julia compiles functions "just-in-time", so that calls to an inner -function written in terms of a type-stable type are still fast. -That inner function is compiled to very fast code. -The main overhead is that Julia has to determine which function -to call at runtime. - -Therefore, users should keep this in mind when they are writing -ITensors.jl code, and we warn that explicitly looping over large -ITensors by individual elements should be done with caution in -performance critical sections of your code. -However, be sure to benchmark and profile your code before -prematurely optimizing, since you may be surprised about -what are the fast and slow parts of your code. - -Some strategies for avoiding ITensor loops are: - - Use broadcasting and other built-in ITensor functionality that makes use of function barriers. - - Convert ITensors to type-stable collections like the Tensor type of NDTensors.jl and write functions in terms of the Tensor type (i.e. the function barrier techique that is used throughout ITensors.jl). - - When initializing very large ITensors elementwise, use built-in ITensor constructors, or first construct an equivalent tensor as an Array or Tensor and then convert it to an ITensor. - -## ITensor in-place operations - -In-place operations can help with optimizing code, when the -memory of the output tensor of an operation is preallocated. - -The main way to access this in ITensor is through broadcasting. -For example: -```julia -A = random_itensor(i, i') -B = random_itensor(i', i) -A .+= 2 .* B -``` -Internally, this is rewritten by Julia as a call to `broadcast!`. -ITensors.jl overloads this call (or more specifically, a lower -level function `copyto!` written in terms of a special lazy type -that saves all of the objects and operations). Then, this call is -rewritten as -```julia -map!((x,y) -> x+2*y, A, A, B) -``` -This is mostly an optimization to use when you can preallocate -storage that can be used multiple times. - -Additionally, ITensors makes the unique choice that: -```julia -C .= A .* B -``` -is interpreted as an in-place tensor contraction. What this means -is that this calls a function: -```julia -mul!(C, A, B) -``` -(likely to be given an alternative name `contract!`) which contracts -`A` and `B` into the pre-allocated memory `C`. - -Because of the design of the ITensor type (see the section above), -there is some flexibility we take in allocating memory for users. -For example, if the storage type is more narrow than the result, -for convenience we might expand it in-place. If you are worried -about memory allocations, we recommend using benchmarking and -profiling to pinpoint slow parts of your code (often times, you -may be surprised by what is actually slow). - -## NDTensors and ITensors - -ITensors.jl is built on top of another, more traditional tensor -library called NDTensors. NDTensors implements AbstractArrays with -a variety of sparse storage types, with more to come in the future. - -NDTensors implements functionality like permutation of dimensions, -fast get and set index, broadcasting, and tensor contraction (where -labels of the dimensions must be specified). - -For example: -```julia -using ITensors -using NDTensors - -T = Tensor(2,2,2) -T[1,2,1] = 1.3 # Conventional element setting - -i = Index(2) -T = Tensor((i,i',i')) # The identifiers are ignored, just interpreted as above -T[1,2,1] = 1.3 -``` -To make performant ITensor code (refer to the the previous section -on type stability and function barriers), ITensor storage data and -indices are passed by reference into Tensors, where the performance -critical operations are performed. - -An example of a function barrier using NDTensors is the following: -```julia -julia> using NDTensors - -julia> d = 10_000; - -julia> i = Index(d); - -julia> function myscale!(A::Tensor, x::Number) - for n in 1:dim(A) - A[n] = x * A[n] - end - end; - -julia> @btime myscale!(A, 2) setup = (A = Tensor(d)); - 3.530 μs (0 allocations: 0 bytes) - -julia> myscale2!(A::ITensor, x::Number) = myscale!(tensor(A), x) -myscale2! (generic function with 1 method) - -julia> @btime myscale2!(A, 2) setup = (A = random_itensor(i)); - 3.549 μs (2 allocations: 112 bytes) -``` -A very efficient function is written for the Tensor type. Then, -the ITensor version just wraps the Tensor function by calling it -after converting the ITensor to a Tensor (without any copying) -with the `tensor` function. -This is the basis for the design of all performance critical ITensors.jl functions. diff --git a/docs/src/CodeTiming.md b/docs/src/CodeTiming.md deleted file mode 100644 index 3fefa2a0a3..0000000000 --- a/docs/src/CodeTiming.md +++ /dev/null @@ -1,10 +0,0 @@ - -# Timing and Profiling your code - -It is very important to time and profile your code to make sure your code is running as fast as possible. Here are some tips on timing and profiling your code. - -If you are concerned about the performance of your code, a good place to start is Julia's [performance tips](https://docs.julialang.org/en/v1/manual/performance-tips/). - -## Timing and benchmarking - -Julia has many nice timing tools available. Tools like [@time](https://docs.julialang.org/en/v1/base/base/#Base.@time) and [TimerOutputs](https://github.com/KristofferC/TimerOutputs.jl) can be used to measure the time of specific lines of code. For microbenchmarking, we recommend the [BenchmarkTools](https://github.com/JuliaCI/BenchmarkTools.jl) package. For profiling your code, see the Julia documentation on [profiling](https://docs.julialang.org/en/v1/manual/profile/). diff --git a/docs/src/ContractionSequenceOptimization.md b/docs/src/ContractionSequenceOptimization.md deleted file mode 100644 index 5991946e80..0000000000 --- a/docs/src/ContractionSequenceOptimization.md +++ /dev/null @@ -1,130 +0,0 @@ -# Contraction sequence optimization - -When contracting a tensor network, the sequence of contraction makes a big difference in the computational cost. However, the complexity of determining the optimal sequence grows exponentially with the number of tensors, but there are many heuristic algorithms available for computing optimal sequences for small networks[^1][^2][^3][^4][^5][^6]. ITensors.jl provides some functionality for helping you find the optimal contraction sequence for small tensor network, as we will show below. - -The algorithm in ITensors.jl currently uses a modified version of[^1] with simplifications for outer product contractions similar to those used in [TensorOperations.jl](https://github.com/Jutho/TensorOperations.jl). - -[^1]: [Faster identification of optimal contraction sequences for tensor networks](https://arxiv.org/abs/1304.6112) -[^2]: [Improving the efficiency of variational tensor network algorithms](https://arxiv.org/abs/1310.8023) -[^3]: [Simulating quantum computation by contracting tensor networks](https://arxiv.org/abs/quant-ph/0511069) -[^4]: [Towards a polynomial algorithm for optimal contraction sequence of tensor networks from trees](https://journals.aps.org/pre/abstract/10.1103/PhysRevE.100.043309) -[^5]: [Algorithms for Tensor Network Contraction Ordering](https://arxiv.org/abs/2001.08063) -[^6]: [Hyper-optimized tensor network contraction](https://arxiv.org/abs/2002.01935) - -## Functions - -```@docs -ITensors.optimal_contraction_sequence -ITensors.contraction_cost -contract -``` - -## Examples - -In the following example we show how to compute the contraction sequence cost of a -```@julia -using ITensors -using Symbolics - -using ITensors: contraction_cost - -@variables m, k, d - -l = Index(m, "l") -r = Index(m, "r") -h₁ = Index(k, "h₁") -h₂ = Index(k, "h₂") -h₃ = Index(k, "h₃") -s₁ = Index(d, "s₁") -s₂ = Index(d, "s₂") - -H₁ = ITensor(dag(s₁), s₁', dag(h₁), h₂) -H₂ = ITensor(dag(s₂), s₂', dag(h₂), h₃) -L = ITensor(dag(l), l', h₁) -R = ITensor(dag(r), r', h₃) -ψ = ITensor(l, s₁, s₂, r) - -TN = [ψ, L, H₁, H₂, R] -sequence1 = Any[2, Any[3, Any[4, Any[1, 5]]]] -sequence2 = Any[Any[4, 5], Any[1, Any[2, 3]]] -cost1 = contraction_cost(TN; sequence = sequence1) -cost2 = contraction_cost(TN; sequence = sequence2) - -println("First sequence") -display(sequence1) -display(cost1) -@show sum(cost1) -@show substitute(sum(cost1), Dict(d => 4)) - -println("\nSecond sequence") -display(sequence2) -display(cost2) -@show sum(cost2) -@show substitute(sum(cost2), Dict(d => 4)) -``` -This example helps us learn that in the limit of large MPS bond dimension `m`, the first contraction sequence is faster, while in the limit of large MPO bond dimension `k`, the second sequence is faster. This has practical implications for writing an efficient DMRG algorithm in both limits, which we plan to incorporate into ITensors.jl. - -Here is a more systematic example of searching through the parameter space to find optimal contraction sequences: -```julia -using ITensors -using Symbolics - -using ITensors: contraction_cost, optimal_contraction_sequence - -function tensor_network(; m, k, d) - l = Index(m, "l") - r = Index(m, "r") - h₁ = Index(k, "h₁") - h₂ = Index(k, "h₂") - h₃ = Index(k, "h₃") - s₁ = Index(d, "s₁") - s₂ = Index(d, "s₂") - - ψ = ITensor(l, s₁, s₂, r) - L = ITensor(dag(l), l', h₁) - H₁ = ITensor(dag(s₁), s₁', dag(h₁), h₂) - H₂ = ITensor(dag(s₂), s₂', dag(h₂), h₃) - R = ITensor(dag(r), r', h₃) - return [ψ, L, H₁, H₂, R] -end - -function main() - mrange = 50:10:80 - krange = 50:10:80 - sequence_costs = Matrix{Any}(undef, length(mrange), length(krange)) - for iₘ in eachindex(mrange), iₖ in eachindex(krange) - m_val = mrange[iₘ] - k_val = krange[iₖ] - d_val = 4 - - TN = tensor_network(; m = m_val, k = k_val, d = d_val) - sequence = optimal_contraction_sequence(TN) - cost = contraction_cost(TN; sequence = sequence) - - @variables m, k, d - TN_symbolic = tensor_network(; m = m, k = k, d = d) - cost_symbolic = contraction_cost(TN_symbolic; sequence = sequence) - sequence_cost = (dims = (m = m_val, k = k_val, d = d_val), sequence = sequence, cost = cost, symbolic_cost = cost_symbolic) - sequence_costs[iₘ, iₖ] = sequence_cost - end - return sequence_costs -end - -sequence_costs = main() - -# Analyze the results. -println("Index dimensions") -display(getindex.(sequence_costs, :dims)) - -println("\nContraction sequences") -display(getindex.(sequence_costs, :sequence)) - -println("\nSymbolic contraction cost with d = 4") -# Fix d to a certain value (such as 4 for a Hubbard site) -@variables d -var_sub = Dict(d => 4) -display(substitute.(sum.(getindex.(sequence_costs, :symbolic_cost)), (var_sub,))) -``` - -A future direction will be to allow optimizing over contraction sequences with the dimensions specified symbolically, so that the optimal sequence in limits of certain dimensions can be found. In addition, we plan to implement more algorithms that work for larger networks, as well as algorithms like[^2] which take an optimal sequence for a closed network and generate optimal sequences for environments of each tensor in the network, which is helpful for computing gradients of tensor networks. - diff --git a/docs/src/DMRG.md b/docs/src/DMRG.md deleted file mode 100644 index cc1580877e..0000000000 --- a/docs/src/DMRG.md +++ /dev/null @@ -1,5 +0,0 @@ -# DMRG - -```@docs -dmrg -``` diff --git a/docs/src/DMRGObserver.md b/docs/src/DMRGObserver.md deleted file mode 100644 index 8a56cfca48..0000000000 --- a/docs/src/DMRGObserver.md +++ /dev/null @@ -1,43 +0,0 @@ -# DMRGObserver - -A DMRGObserver is a type of [observer](@ref observer) which -offers certain useful, general purpose capabilities -for DMRG calculations such as measuring custom -local observables at each step and stopping DMRG -early if certain energy convergence conditions are met. - -## Sample Usage - -In the following example, we have already made a Hamiltonian MPO `H` -and initial MPS `psi0` for a system of spins whose sites -have an associated "Sz" operator defined. We construct a -`DMRGObserver` which measures "Sz" on each site at each -step of DMRG, and also stops the calculation early if -the energy no longer changes to a relative precision of 1E-7. - -``` -Sz_observer = DMRGObserver(["Sz"],sites,energy_tol=1E-7) - -energy, psi = dmrg(H,psi0,sweeps,observer=Sz_observer) - -for (sw,Szs) in enumerate(measurements(Sz_observer)["Sz"]) - println("Total Sz after sweep $sw = ", sum(Szs)/N) -end -``` - - -## Constructors - -```@docs -DMRGObserver(;energy_tol::Float64,minsweeps::Int) -DMRGObserver(ops::Vector{String},sites::Vector{<:Index};energy_tol::Float64,minsweeps::Int) -``` - -## Methods - -```@docs -measurements(::DMRGObserver) -DMRGMeasurement -energies(::DMRGObserver) -``` - diff --git a/docs/src/DeveloperGuide.md b/docs/src/DeveloperGuide.md deleted file mode 100644 index c30269e69e..0000000000 --- a/docs/src/DeveloperGuide.md +++ /dev/null @@ -1,100 +0,0 @@ -# Developer Guide - - -## Keyword Argument Best Practices - -Keyword arguments such as `f(x,y; a=1, b=2)` are a powerful Julia feature, but it is easy to -misuse them in library code. Below are the "best practices" for using keyword arguments -when developing ITensor library code. - -A particular challenge how to properly use keyword argument "forwarding" where the notation -`f(; a, b, kwargs...)` allows any number of keyword arguments to be passed. -If a keyword argument is misspelled, then forwarding keywords with `kwargs...` will -silently allow the misspelling, whereas ideally there would be an error message. - -Best practices: - -1. **Popping Terminal Keyword Arguments**: - When passing keyword arguments downward through a stack of function calls, if a certain keyword - argument will not be used in any functions further down the stack, then these arguments should - be *listed explicitly* to remove them from the keyword arguments. - - For example, in a call stack `fA -> fB -> fC` if a keyword argument - such as `cutoff` is used in the body of `fB` but not in `fC`, then use the following pattern: - - ``` - function fA(...; kwargs...) - ... - fB(...; kwargs...) - ... - end - - function fB(...; cutoff, kwargs...) # <- explicitly list cutoff here - ... - truncate!(psi; cutoff) # <- fB uses cutoff - fC(...; kwargs...) # fC does not get passed cutoff - end - - function fC(...; maxdim, outputlevel) # fC does not use or need the `cutoff` kwarg - ... - end - ``` - -2. **Leaf Functions Should Not Take `kwargs...`**: - Functions which are the last in the call stack to take any keyword arguments - should not take keyword arguments by the `kwargs...` pattern. They should only take an explicit - list of keyword arguments, so as to ensure that an error is thrown if a keyword argument - is misspelled or missing (if it has no default value). - - Example: `fC` above is a leaf function and does not have `kwargs...` in its signature. - -3. **Use Functions to Set Defaults**: - Keyword arguments can be made optional by providing default values. To avoid having explicit and - possibly inconsistent defaults spread all over the library code, use globally defined functions to - provide these defaults. - - For example: - ``` - function sum(A::MPS, B::MPS; cutoff=default_cutoff(), kwargs...) - ... - end - - function inner(A::MPS, B::MPS; cutoff=default_cutoff(), kwargs...) - ... - end - ``` - where above the default value for the `cutoff` keyword is provided by a function `default_cutoff()` - that is defined for the whole library. - -4. **Use Named Tuples to "Tunnel" Keywords to Leaf Functions**: - This is a more advanced pattern. In certain situations, there might be multiple leaf - functions depending on the execution pathway of the code or in cases where the leaf function - is a "callback" passed into the code from the upper-level calling code. - - In such cases, different leaf function implementations may expect different sets of keyword arguments. - - To avoid requiring all leaf functions to take all possible keyword arguments (or to use the `kwargs...` - pattern as a workaround, breaking rule #2 above), use the following pattern: - - ``` - function fA(callback, psi; callback_args, kwargs...) - ... - callback(psi; callback_args...) - ... - end - - my_callback(psi; a, b) = ... # define custom callback function - - # Call fA like this: - fA(my_callback, psi; callback_args = (; a, b)) - - ``` - -5. **External (non-ITensor) Functions**: - Though it requires judgment in each case, if the keyword arguments an external - (non-ITensor) function accepts are small in number, not expected to change, - and known ahead of time, try to list them explicitly if possible (rather than forwarding - with `kwargs...`). Possible exceptions could be if you want to make use of defaults - defined for keyword arguments of an external function. - - diff --git a/docs/src/Einsum.md b/docs/src/Einsum.md deleted file mode 100644 index 873ac3239d..0000000000 --- a/docs/src/Einsum.md +++ /dev/null @@ -1,192 +0,0 @@ -# ITensor Index identity: dimension labels and Einstein notation - -Many tensor contraction libraries use [Einstein notation](https://en.wikipedia.org/wiki/Einstein_notation), -such as [NumPy's einsum function](https://numpy.org/doc/stable/reference/generated/numpy.einsum.html), [ncon](https://arxiv.org/abs/1402.0939), and various Julia packages such as [TensorOperations.jl](https://github.com/Jutho/TensorOperations.jl), [Tullio.jl](https://github.com/mcabbott/Tullio.jl), [OMEinsum.jl](https://github.com/under-Peter/OMEinsum.jl), and [Einsum.jl](https://github.com/ahwillia/Einsum.jl), among others. - -ITensor also uses Einstein notation, however the labels are stored inside the tensor and carried around with them during various operations. In addition, the labels that determine if tensor indices match with each other, and therefore automatically contract when doing `*` or match when adding or subtracting, are more sophisticated than simple characters or strings. ITensor indices are given a unique random ID number when they are constructed, and additionally users can add additional information like prime levels and tags which uniquely determine an Index. This is in contrast to simpler implementations of the same idea, such as the [NamedDims.jl](https://github.com/invenia/NamedDims.jl) package, which only allow symbols as the metadata for uniquely identifying a tensor/array dimension. - -```@setup itensor -using ITensors -using Random -Random.seed!(1) -``` - -## Index identity - -Here is an illustration of how the different types of Index metadata (random ID, prime level, and tags) work for Index identity: -```@repl itensor -i = Index(2) -j = Index(2) -i == j -id(i) -id(j) -ip = i' -ip == i -plev(i) == 0 -plev(ip) == 1 -noprime(ip) == i -ix = addtags(i, "x") -ix == i -removetags(ix, "x") == i -ixyz = addtags(ix, "y,z") -ixyz == addtags(i, "z,y,x") -``` - -The different metadata that are stored inside of ITensor indices that determine their identity are useful in different contexts. The random ID is particularly useful in the case when a new Index needs to be generated internally by ITensor, such as when performing a matrix factorization. In the case of a matrix factorization, we want to make sure that the new Index will not accidentally clash with an existing one, for example: -```@repl itensor -i = Index(2, "i") -j = Index(2, "j") -A = random_itensor(i, j) -U, S, V = svd(A, i; lefttags="i", righttags="j"); -inds(U) -inds(S) -inds(V) -norm(U * S * V - A) -``` -You can see that it would have been a problem here if there wasn't a new ID assigned to the Index, since it would have clashed with the original index. In this case, it could be avoided by giving the new indices different tags (with the keyword arguments `lefttags` and `righttags`), but in more complicated examples where it is not practical to do that (such as a case where many new indices are being introduced, for example for a tensor train (TT)/matrix product state (MPS)), it is convenient to not force users to come up with unique prime levels or tags themselves. It can also help to avoid accidental contractions in more complicated tensor network algorithms where there are many indices that can potentially have the same prime levels or tags. - -In contrast, using multiple indices with the same Index ID but different prime levels and tags can be useful in situations where there is a more fundamental relationship between the spaces. For example, in the case of an ITensor corresponding to a Hermitian operator, it is helpful to make the bra space and ket spaces the same up to a prime level: -```repl itensor -i = Index(2, "i") -j = Index(3, "j") -A = random_itensor(i', j', dag(i), dag(j)) -H = 0.5 * (A + swapprime(dag(A), 0 => 1)) -v = random_itensor(i, j) -Hv = noprime(H * v) -vH = dag(v)' * H -norm(Hv - dag(vH)) -``` -Note that we have added `dag` in a few places, which is superfluous in this case since the tensors are real and dense but become important when the tensors are complex and/or have symmetries. -You can see that in this case, it is very useful to relate the bra and ket spaces by prime levels, since it makes it much easier to perform operations that map from one space to another. We could have created `A` from 4 entirely different indices with different ID numbers, but it would make the operations a bit more cumbersome, as shown below: -```@repl itensor -i = Index(2, "i") -j = Index(3, "j") -ip = Index(2, "i") -jp = Index(3, "jp") -A = random_itensor(ip, jp, dag(i), dag(j)) -H = 0.5 * (A + swapinds(dag(A), (i, j), (ip, jp))) -v = random_itensor(i, j) -Hv = replaceinds(H * v, (ip, jp) => (i, j)) -vH = replaceinds(dag(v), (i, j) => (ip, jp)) * H -norm(Hv - dag(vH)) -``` - -## Relationship to other Einstein notation-based libraries - -Here we show examples of different ways to perform the contraction -`"ab,bc,cd->ad"` in ITensor. - -```@repl itensor -da, dc = 2, 3; -db, dd = da, dc; -tags = ("a", "b", "c", "d"); -dims = (da, db, dc, dd); -a, b, c, d = Index.(dims, tags); -Aab = random_itensor(a, b) -Bbc = random_itensor(b, c) -Ccd = random_itensor(c, d) - -# "ab,bc,cd->ad" -out1 = Aab * Bbc * Ccd -@show hassameinds(out1, (a, d)) - -# -# Using replaceinds (most general way) -# - -# "ba,bc,dc->ad" -Aba = replaceinds(Aab, (a, b) => (b, a)) -Cdc = replaceinds(Ccd, (c, d) => (d, c)) -out2 = Aba * Bbc * Cdc -@show hassameinds(out2, (a, d)) - -# -# Using setinds -# - -# This is a bit lower level -# since it doesn't check if the indices -# are compatible in dimension, -# so is not recommended in general. -using ITensors: setinds - -Aba = setinds(Aab, (b, a)) -Cdc = setinds(Ccd, (d, c)) -out2 = Aba * Bbc * Cdc -@show hassameinds(out2, (a, d)) - -# -# Using prime levels (assuming -# the indices were made with these -# prime levels in the first place) -# - -a = Index(da, "a") -c = Index(dc, "c") -b, d = a', c' -Aab = random_itensor(a, b) -Bbc = random_itensor(b, c) -Ccd = random_itensor(c, d) -out1 = Aab * Bbc * Ccd -@show hassameinds(out1, (a, d)) - -Aba = swapprime(Aab, 0 => 1) -Cdc = swapprime(Ccd, 0 => 1) -out2 = Aba * Bbc * Cdc -@show hassameinds(out2, (a, d)) - -# -# Using tags (assuming -# the indices were made with these -# tags in the first place) -# - -a = Index(da, "a") -c = Index(dc, "c") -b, d = settags(a, "b"), settags(c, "d") -Aab = random_itensor(a, b) -Bbc = random_itensor(b, c) -Ccd = random_itensor(c, d) -out1 = Aab * Bbc * Ccd -@show hassameinds(out1, (a, d)) - -Aba = swaptags(Aab, "a", "b") -Cdc = swaptags(Ccd, "c", "d") -out2 = Aba * Bbc * Cdc -@show hassameinds(out2, (a, d)) - -# -# Using Julia Arrays -# - -A = randn(da, db) -B = randn(db, dc) -C = randn(dc, dd) - -tags = ("a", "b", "c", "d") -dims = (da, db, dc, dd) -a, b, c, d = Index.(dims, tags) - -Aab = itensor(A, a, b) -Bbc = itensor(B, b, c) -Ccd = itensor(C, c, d) -out1 = Aab * Bbc * Ccd -@show hassameinds(out1, (a, d)) - -Aba = itensor(A, b, a) -Cdc = itensor(C, d, c) -out2 = Aba * Bbc * Cdc -@show hassameinds(out2, (a, d)) - -# -# Note that we may start allowing -# this notation in future: -# (https://github.com/ITensor/ITensors.jl/issues/673) -# -#out1 = A[a, b] * B[b, c] * C[c, d] -#@show hassameinds(out1, (a, d)) -# -#out2 = A[b, a] * B[b, c] * C[d, c] -#@show hassameinds(out2, (a, d)) -``` - diff --git a/docs/src/HDF5FileFormats.md b/docs/src/HDF5FileFormats.md deleted file mode 100644 index 47cf26ea8f..0000000000 --- a/docs/src/HDF5FileFormats.md +++ /dev/null @@ -1,187 +0,0 @@ -# HDF5 File Formats - -This page lists the formats for the HDF5 representations of -various types in the `ITensors` module. - -HDF5 is a portable file format which has a directory structure similar -to a file system. In addition to containing "groups" (= directories) -and "datasets" (= files), groups can have "attributes" -appended to them, which are similar to 'tags' or 'keywords'. -Unless otherwise specified, integers are 64 bit and are signed -(H5T\_STD\_I64LE) unless explicitly stated. (For example, the "id" -field of the `Index` type is stored as an unsigned 64 bit integer -(H5T\_STD\_U64LE).) - -Each type in ITensor which is writeable to HDF5 is written -to its own group, with the name of the group either specified -by the user or specified to some default value when it is -a subgroup of another ITensor type (for example, the `Index` -type saves its `TagSet` in a subgroup named "tags"). - -Each group corresponding to an ITensors type always carries -the following attributes: -* "type" --- a string such as `Index` or `TagSet` specifying the information - necessary to determine the type of the object saved to the HDF5 group -* "version" --- an integer specifying the file format version used to - store the data. This version is in general different from the release - version of ITensors.jl. The purpose of the version number is to aid - in maintaining backwards compatibility, while allowing the format - to be occasionally changed. - -The C++ version of ITensor uses exactly the same file formats listed below, -for the purpose of interoperability with the Julia version of ITensor, -even though conventions such as the "type" field values are Julia-centric. - - -## [TagSet](@id tagset_hdf5) - -HDF5 file format for the `ITensors.TagSet` type. - -Attributes: -* "version" = 1 -* "type" = "TagSet" - -Datasets and Subgroups: -* "tags" [string] = a comma separated string of the tags in the `TagSet` - - -## [QN](@id qn_hdf5) - -HDF5 file format for the `ITensors.QN` type. - -Attributes: -* "version" = 1 -* "type" = "QN" - -Datasets and Subgroups: -* "names" [group] = array of strings (length 4) of names of quantum numbers -* "vals" [group] = array of integers (length 4) of quantum number values -* "mods" [group] = array of integers (length 4) of moduli of quantum numbers - - -## [QNBlocks](@id qnblocks_hdf5) - -HDF5 file format for the `ITensors.QNBlocks` type. -(Note: `QNBlocks` is equivalent to `Vector{Pair{QN, Int64}}`.) - -Attributes: -* "version" = 1 -* "type" = "QNBlocks" - -Datasets and Subgroups: -* "length" [integer] = the number of blocks (length of Vector) -* "dims" [group] = array of (integer) dimensions of each block -* "QN[n]" [group] = these groups "QN[1]", "QN[2]", etc. - correspond to the [QN](@ref qn_hdf5) of each block - - -## [Index](@id index_hdf5) - -HDF5 file format for the `ITensors.Index` type. - -Attributes: -* "version" = 1 -* "type" = "Index" -* "space_type" = "Int" if the Index is a regular, dense Index or "QNBlocks" if the Index - is a QNIndex (carries QN subspace information) - -Datasets and Subgroups: -* "id" [unsigned integer] = id number of the Index -* "dim" [integer] = dimension of the Index -* "dir" [integer] = arrow direction of the Index, +1 for `ITensors.Out` and -1 for `ITensors.In` -* "plev" [integer] = prime level of the Index -* "tags" [group] = the [TagSet](@ref tagset_hdf5) of the Index - -Optional Datasets and Subgroups: -* "space" [group] = if the `"space_type"` attribute is "QNBlocks", this group - is present and represents a [QNBlocks](@ref qnblocks_hdf5) object - - - -## [IndexSet](@id indexset_hdf5) - -HDF5 file format for types in the Union type `ITensors.Indices` -which includes `IndexSet` and tuples of Index objects. - -Attributes: -* "version" = 1 -* "type" = "IndexSet" - -Datasets and Subgroups: -* "length" [integer] = number of indices -* "index_n" [group] = for n=1 to n=length each of these groups contains an Index - - -## [ITensor](@id itensor_hdf5) - -HDF5 file format for the `ITensors.ITensor` type. - -Attributes: -* "version" = 1 -* "type" = "ITensor" - -Datasets and Subgroups: -* "inds" [group] = indices of the ITensor -* "storage" [group] = storage of the ITensor - (note that some earlier versions of ITensors.jl may call this group "store") - - -## [NDTensors.Dense](@id dense_hdf5) - -HDF5 file format for objects which are subtypes of `ITensors.NDTensors.Dense`. - -Attributes: -* "version" = 1 -* "type" = "Dense{Float64}" or "Dense{ComplexF64}" - -Datasets and Subgroups: -* "data" = array of either real or complex values (in the same dataset format used - by the HDF5.jl library for storing `Vector{Float64}` or `Vector{ComplexF64}`) - - -## [NDTensors.BlockSparse](@id blocksparse_hdf5) - -HDF5 file format for objects which are subtypes of `ITensors.NDTensors.BlockSparse`. - -Attributes: -* "version" = 1 -* "type" = "BlockSparse{Float64}" or "BlockSparse{ComplexF64}" - -Datasets and Subgroups: -* "ndims" [integer] = number of dimensions (order) of the tensor -* "offsets" = block offset data flattened into an array of integers -* "data" = array of either real or complex values (in the same dataset format used - by the HDF5.jl library for storing `Vector{Float64}` or `Vector{ComplexF64}`) - - -## [MPS](@id mps_hdf5) - -HDF5 file format for `ITensorMPS.MPS` - -Attributes: -* "version" = 1 -* "type" = "MPS" - -Datasets and Subgroups: -* "length" [integer] = number of tensors of the MPS -* "rlim" [integer] = right orthogonality limit -* "llim" [integer] = left orthogonality limit -* "MPS[n]" [group,ITensor] = each of these groups, where n=1,...,length, stores the nth ITensor of the MPS - - -## [MPO](@id mpo_hdf5) - -HDF5 file format for `ITensorMPS.MPO` - -Attributes: -* "version" = 1 -* "type" = "MPO" - -Datasets and Subgroups: -* "length" [integer] = number of tensors of the MPO -* "rlim" [integer] = right orthogonality limit -* "llim" [integer] = left orthogonality limit -* "MPO[n]" [group,ITensor] = each of these groups, where n=1,...,length, stores the nth ITensor of the MPO - - - diff --git a/docs/src/ITensorType.md b/docs/src/ITensorType.md deleted file mode 100644 index c90c448168..0000000000 --- a/docs/src/ITensorType.md +++ /dev/null @@ -1,139 +0,0 @@ -# ITensor - -## Description - -```@docs -ITensor -``` - -## Dense Constructors - -```@docs -ITensor(::Type{<:Number}, ::ITensors.Indices) -ITensor(::Type{<:Number}, ::UndefInitializer, ::ITensors.Indices) -ITensor(::Type{<:Number}, ::Number, ::ITensors.Indices) -ITensor(::ITensors.AliasStyle, ::Type{<:Number}, ::Array{<:Number}, ::ITensors.Indices{Index{Int}}; kwargs...) -random_itensor(::Type{<:Number}, ::ITensors.Indices) -onehot -``` - -## Dense View Constructors - -```@docs -itensor(::Array{<:Number}, ::ITensors.Indices) -``` - -## QN BlockSparse Constructors - -```@docs -ITensor(::Type{<:Number}, ::QN, ::ITensors.QNIndices) -ITensor(::ITensors.AliasStyle, ::Type{<:Number}, ::Array{<:Number}, ::ITensors.QNIndices; tol=0) -ITensor(::Type{<:Number}, ::UndefInitializer, ::QN, ::ITensors.Indices) -``` - -## Diagonal constructors - -```@docs -diag_itensor(::Type{<:Number}, ::ITensors.Indices) -diag_itensor(::ITensors.AliasStyle, ::Type{<:Number}, ::Vector{<:Number}, ::ITensors.Indices) -diag_itensor(::ITensors.AliasStyle, ::Type{<:Number}, ::Number, ::ITensors.Indices) -delta(::Type{<:Number}, ::ITensors.Indices) -``` - -## QN Diagonal constructors - -```@docs -diag_itensor(::Type{<:Number}, ::QN, ::ITensors.Indices) -delta(::Type{<:Number}, ::QN, ::ITensors.Indices) -``` - -## Convert to Array - -```@docs -Array{ElT, N}(::ITensor, ::ITensors.Indices) where {ElT, N} -array(::ITensor, ::Any...) -matrix(::ITensor, ::Any...) -vector(::ITensor, ::Any...) -array(::ITensor) -matrix(::ITensor) -vector(::ITensor) -``` - -## Getting and setting elements - -```@docs -getindex(::ITensor, ::Any...) -setindex!(::ITensor, ::Number, ::Int...) -``` - -## Properties - -```@docs -inds(::ITensor) -ind(::ITensor, ::Int) -dir(::ITensor, ::Index) -``` - -## [Priming and tagging](@id Priming_and_tagging_ITensor) - -```@docs -prime(::ITensor, ::Any...) -setprime(::ITensor, ::Any...) -noprime(::ITensor, ::Any...) -mapprime(::ITensor, ::Any...) -swapprime(::ITensor, ::Any...) -addtags(::ITensor, ::Any...) -removetags(::ITensor, ::Any...) -replacetags(::ITensor, ::Any...) -settags(::ITensor, ::Any...) -swaptags(::ITensor, ::Any...) -``` - -## Index collections set operations - -```@docs -commoninds -commonind -uniqueinds -uniqueind -noncommoninds -noncommonind -unioninds -unionind -hascommoninds -``` - -## Index Manipulations - -```@docs -replaceind(::ITensor, ::Any...) -replaceinds(::ITensor, ::Any...) -swapind(::ITensor, ::Any...) -swapinds(::ITensor, ::Any...) -``` - -## Math operations - -```@docs -*(::ITensor, ::ITensor) -dag(T::ITensor; kwargs...) -directsum(::Pair{ITensor},::Pair{ITensor},::Pair{ITensor},args...; kws...) -exp(::ITensor, ::Any, ::Any) -nullspace(::ITensor, ::Any...) -``` - -## Decompositions -```@docs -svd(::ITensor, ::Any...) -eigen(::ITensor, ::Any, ::Any) -factorize(::ITensor, ::Any...) -``` - -## Memory operations - -```@docs -permute(::ITensor, ::Any) -dense(::ITensor) -denseblocks(::ITensor) -``` - diff --git a/docs/src/IncludedSiteTypes.md b/docs/src/IncludedSiteTypes.md deleted file mode 100644 index 7a4f6e85f0..0000000000 --- a/docs/src/IncludedSiteTypes.md +++ /dev/null @@ -1,388 +0,0 @@ -# SiteTypes Included with ITensor - -## "S=1/2" SiteType - -Site indices with the "S=1/2" site type represent ``S=1/2`` spins with the states -``|\!\uparrow\rangle``, ``|\!\downarrow\rangle``. - -Making a single "S=1/2" site or collection of N "S=1/2" sites -``` -s = siteind("S=1/2") -sites = siteinds("S=1/2",N) -``` - -Available keyword arguments for enabling and customizing quantum numbers (QN) subspaces: -- `conserve_qns` (default: false): conserve total ``S^z`` -- `conserve_sz` (default: conserve_qns): conserve total ``S^z`` -- `conserve_szparity` (default: false): conserve total ``S^z`` modulo two -- `qnname_sz` (default: "Sz"): name of total ``S^z`` QN -- `qnname_szparity` (default: "SzParity"): name of total ``S^z`` modulo two QN -For example: -``` -sites = siteinds("S=1/2",N; conserve_szparity=true, qnname_szparity="SzP") -``` - -Operators associated with "S=1/2" sites can be made using the `op` function, -for example -``` -Sz = op("Sz",s) -Sz4 = op("Sz",sites[4]) -``` - -Available operators are exactly the same as those for the "Qubit" site type. Please -see the list of "Qubit" operators below. - -## "Qubit" SiteType - -Site indices with the "Qubit" site type represent qubits with the states -``|0\rangle``, ``|1\rangle``. - -Making a single "Qubit" site or collection of N "Qubit" sites -``` -s = siteind("Qubit") -sites = siteinds("Qubit",N) -``` - -Available keyword arguments for enabling and customizing quantum numbers (QN) subspaces: -- `conserve_qns` (default: false): conserve total qubit parity -- `conserve_parity` (default: conserve_qns): conserve total qubit parity -- `conserve_number` (default: false): conserve total qubit number -- `qnname_parity` (default: "Parity"): name of total qubit parity QN -- `qnname_number` (default: "Number"): name of total qubit number QN -For example: -``` -sites = siteinds("Qubit",N; conserve_parity=true) -``` - -#### "Qubit" and "S=1/2" States - -The available state names for "Qubit" sites are: -- `"0"` (aliases: `"Z+"`, `"Zp"`, `"Up"`, `"↑"`) Qubit in the 0 state -- `"1"` (aliases: `"Z-"`, `"Zm"`, `"Dn"`, `"↓"`) Qubit in the 1 state -- `"+"` (aliases: `"X+"`, `"Xp"`) Qubit in the $|+\rangle$ state (+1 eigenvector of $\sigma_x$) -- `"+"` (aliases: `"X-"`, `"Xm"`) Qubit in the $|-\rangle$ state (-1 eigenvector of $\sigma_x$) -- `"i"` (aliases: `"Y+"`, `"Yp"`) Qubit in the $|i\rangle$ state (+1 eigenvector of $\sigma_y$) -- `"-i"` (aliases: `"Y-"`, `"Ym"`) Qubit in the $|-i\rangle$ state (+1 eigenvector of $\sigma_y$) - -#### "Qubit" and "S=1/2" Operators - -Operators or gates associated with "Qubit" sites can be made using the `op` function, -for example -``` -H = op("H",s) -H3 = op("H",sites[3]) -``` - -Single-qubit operators: -- `"X"` (aliases: `"σx"`, `"σ1"`) Pauli X operator -- `"Y"` (aliases: `"σy"`, `"σ2"`) Pauli Y operator -- `"iY"` (aliases: `"iσy"`, `"iσ2"`) Pauli Y operator times i -- `"Z"` (aliases: `"σz"`, `"σ3"`) Pauli Z operator -- `"√NOT"` (aliases: `"X"`) -- `"H"` Hadamard gate -- `"Phase"` (takes optional argument: ϕ=π/2) (aliases: `"P"`, `"S"`) -- `"π/8"` (aliases: `"T"`) -- `"Rx"` (takes argument: θ) Rotation around x axis -- `"Ry"` (takes argument: θ) Rotation around y axis -- `"Rz"` (takes argument: θ) Rotation around z axis -- `"Rn"` (takes arguments: θ, ϕ, λ) (aliases: `"Rn̂"`) Rotation about axis n=(θ, ϕ, λ) -- `"Proj0"` (aliases: `"ProjUp"`, `"projUp"`) Operator $|0\rangle\langle 0|$ -- `"Proj1"` (aliases: `"ProjDn"`, `"projDn"`) Operator $|1\rangle\langle 1|$ - -Spin operators: -- `"Sz"` (aliases: `"Sᶻ"`) Spin z operator $S^z = \frac{1}{2} \sigma_z$ -- `"S+"` (alises: `"S⁺"`, `"Splus"`) Raising operator $S^+ = S^x + iS^y$ -- `"S-"` (aliases: `"S⁻"`, `"Sminus"`) Lowering operator $S^- = S^x - iS^y$ -- `"Sx"` (alises: `"Sˣ"`) Spin x operator $S^x = \frac{1}{2} \sigma_x$ -- `"iSy"` (aliases: `"iSʸ"`) i times spin y operator $iS^y = \frac{i}{2} \sigma_y$ -- `"Sy"` (aliases: `"Sʸ"`) Spin y operator $S^y = \frac{1}{2} \sigma_y$ -- `"S2"` (aliases: "S²"`) Square of spin vector operator $S^2=\vec{S}\cdot\vec{S}=\frac{3}{4} I$ -- `"ProjUp"` (aliases: `"projUp"`, `"Proj0"`) Operator $|\!↑\rangle\langle ↑\!|$ -- `"ProjDn"` (aliases: `"projDn"`, `"Proj1"`) Operator $|\!↓\rangle\langle ↓\!|$ - -Two-qubit gates: -- `"CNOT"` (aliases: `"CX"`) Controlled NOT gate -- `"CY"` Controlled Y gate -- `"CZ"` Controlled Z gate -- `"CPHASE"` (aliases: `"Cphase"`) Controlled Phase gate -- `"CRx"` (aliases: `"CRX"`) (takes arguments: θ) -- `"CRy"` (aliases: `"CRY"`) (takes arguments: θ) -- `"CRz"` (aliases: `"CRZ"`) (takes arguments: θ) -- `"CRn"` (aliases: `"CRn̂"`) (takes arguments: θ, ϕ, λ) -- `"SWAP"` (aliases: `"Swap"`) -- `"√SWAP"` (aliases: `"√Swap"`) -- `"iSWAP"` (aliases: `"iSwap"`) -- `"√iSWAP"` (aliases: `"√iSwap"`) -- `"Rxx"` (aliases: `"RXX"`) (takes arguments: ϕ) Ising (XX) coupling gate -- `"Ryy"` (aliases: `"RYY"`) (takes arguments: ϕ) Ising (YY) coupling gate -- `"Rzz"` (aliases: `"RZZ"`) (takes arguments: ϕ) Ising (ZZ) coupling gate - -Three-qubit gates: -- `"Toffoli"` (aliases `"CCNOT"`, `"CCX"`, `"TOFF"`) -- `"Fredkin"` (aliases `"CSWAP"`, `"CSwap"`, `"CS"`) - -Four-qubit gates: -- `"CCCNOT"` - -## "S=1" SiteType - -Site indices with the "S=1" site type represent ``S=1`` spins with the states -``|\!\uparrow\rangle``, ``|0\rangle``, ``|\!\downarrow\rangle``. - -Making a single "S=1" site or collection of N "S=1" sites -``` -s = siteind("S=1") -sites = siteinds("S=1",N) -``` - -Available keyword arguments for enabling and customizing quantum numbers (QN) subspaces: -- `conserve_qns` (default: false): conserve total ``S^z`` -- `conserve_sz` (default: conserve_qns): conserve total ``S^z`` -- `qnname_sz` (default: "Sz"): name of total ``S^z`` QN -For example: -``` -sites = siteinds("S=1",N; conserve_sz=true, qnname_sz="TotalSz") -``` - -#### "S=1" States - -The available state names for "S=1" sites are: -- `"Up"` (aliases: `"Z+"`, `"↑"`) spin in the up state -- `"Z0"` (aliases: `"0"`) spin in the Sz=0 state -- `"Dn"` (aliases: `"Z-"`, `"↓"`) spin in the Sz=0 state - -#### "S=1" Operators - -Operators associated with "S=1" sites can be made using the `op` function, -for example -``` -Sz = op("Sz",s) -Sz4 = op("Sz",sites[4]) -``` - -Spin operators: -- `"Sz"` (aliases: `"Sᶻ"`) -- `"Sz2"` Square of `S^z` operator -- `"S+"` (alises: `"S⁺"`, `"Splus"`) -- `"S-"` (aliases: `"S⁻"`, `"Sminus"`) -- `"Sx"` (alises: `"Sˣ"`) -- `"Sx2"` Square of `S^x` operator -- `"iSy"` (aliases: `"iSʸ"`) -- `"Sy"` (aliases: `"Sʸ"`) -- `"Sy2"` Square of `S^y` operator -- `"S2"` (aliases: "S²"`) - -## "Boson" SiteType - -The "Boson" site type is an alias for the "Qudit" site type. Please -see more information about "Qudit" below: - -## "Qudit" SiteType - -Making a single "Qudit" site or collection of N "Qudit" sites -``` -s = siteind("Qudit") -sites = siteinds("Qudit",N) -``` - -Available keyword arguments for enabling and customizing quantum numbers (QN) subspaces: -- `dim` (default: 2): dimension of the index (number of qudit or boson values) -- `conserve_qns` (default: false): conserve total qudit or boson number -- `conserve_number` (default: conserve_qns): conserve total qudit or boson number -- `qnname_number` (default: "Number"): name of total qudit or boson number QN -For example: -``` -sites = siteinds("Qudit",N; conserve_number=true) -``` - -#### "Qudit" and "Boson" Operators - -Operators associated with "Qudit" sites can be made using the `op` function, -for example -``` -A = op("A",s) -A4 = op("A",sites[4]) -``` - -Single-qudit operators: -- `"A"` (aliases: `"a"`) -- `"Adag"` (aliases: `"adag"`, `"a†"`) -- `"N"` (aliases: `"n"`) - -Two-qudit operators: -- `"ab"` -- `"a†b"` -- `"ab†"` -- `"a†b†"` - -## "Fermion" SiteType - -Site indices with the "Fermion" SiteType represent -spinless fermion sites with the states -``|0\rangle``, ``|1\rangle``, corresponding to zero fermions or one fermion. - -Making a single "Fermion" site or collection of N "Fermion" sites -``` -s = siteind("Fermion") -sites = siteinds("Fermion",N) -``` - -Available keyword arguments for enabling and customizing quantum numbers (QN) subspaces: -- `conserve_qns` (default: false): conserve total number of fermions -- `conserve_nf` (default: conserve_qns): conserve total number of fermions -- `conserve_nfparity` (default: conserve_qns): conserve total fermion number parity -- `qnname_nf` (default: "Nf"): name of total fermion number QN -- `qnname_nfparity` (default: "NfParity"): name of total fermion number parity QN -For example: -``` -sites = siteinds("Fermion",N; conserve_nfparity=true) -``` - -#### "Fermion" States - -The available state names for "Fermion" sites are: -- `"0"` (aliases: `"Emp"`) unoccupied fermion site -- `"1"` (aliases: `"Occ"`) occupied fermion site - -#### "Fermion" Operators - -Operators associated with "Fermion" sites can be made using the `op` function, -for example -``` -C = op("C",s) -C4 = op("C",sites[4]) -``` - -Single-fermion operators: -- `"N"` (aliases: `"n"`) Density operator -- `"C"` (aliases: `"c"`) Fermion annihilation operator -- `"Cdag"` (aliases: `"cdag"`, `"c†"`) Fermion creation operator -- `"F"` Jordan-Wigner string operator - -## "Electron" SiteType - -The states of site indices with the "Electron" SiteType correspond to -``|0\rangle``, ``|\!\uparrow\rangle``, ``|\!\downarrow\rangle``, ``|\!\uparrow\downarrow\rangle``. - -Making a single "Electron" site or collection of N "Electron" sites -``` -s = siteind("Electron") -sites = siteinds("Electron",N) -``` - -Available keyword arguments for enabling and customizing quantum numbers (QN) subspaces: -- `conserve_qns` (default: false): conserve total number of electrons -- `conserve_sz` (default: conserve_qns): conserve total ``S^z`` -- `conserve_nf` (default: conserve_qns): conserve total number of electrons -- `conserve_nfparity` (default: conserve_qns): conserve total electron number parity -- `qnname_sz` (default: "Sz"): name of total ``S^z`` QN -- `qnname_nf` (default: "Nf"): name of total electron number QN -- `qnname_nfparity` (default: "NfParity"): name of total electron number parity QN -For example: -``` -sites = siteinds("Electron",N; conserve_nfparity=true) -``` - -#### "Electron" States - -The available state names for "Electron" sites are: -- `"Emp"` (aliases: `"0"`) unoccupied electron site -- `"Up"` (aliases: `"↑"`) electron site occupied with one up electron -- `"Dn"` (aliases: `"↓"`) electron site occupied with one down electron -- `"UpDn"` (aliases: `"↑↓"`) electron site occupied with two electrons (one up, one down) - -#### "Electron" Operators - -Operators associated with "Electron" sites can be made using the `op` function, -for example -``` -Cup = op("Cup",s) -Cup4 = op("Cup",sites[4]) -``` - -Single-fermion operators: -- `"Ntot"` (aliases: `"ntot"`) Total density operator -- `"Nup"` (aliases: `"n↑"`) Up density operator -- `"Ndn"` (aliases: `"n↓"`) Down density operator -- `"Cup"` (aliases: `"c↑"`) Up-spin annihilation operator -- `"Cdn"` (aliases: `"c↓"`) Down-spin annihilation operator -- `"Cdagup"` (aliases: `"c†↑"`) Up-spin creation operator -- `"Cdagdn"` (aliases: `"c†↓"`) Down-spin creation operator -- `"Sz"` (aliases: `"Sᶻ"`) -- `"Sx"` (aliases: `"Sˣ"`) -- `"S+"` (aliases: `"Sp"`, `"S⁺"`,`"Splus"`) -- `"S-"` (aliases: `"Sm"`, `"S⁻"`, `"Sminus"`) -- `"F"` Jordan-Wigner string operator -- `"Fup"` (aliases: `"F↑"`) Up-spin Jordan-Wigner string operator -- `"Fdn"` (aliases: `"F↓"`) Down-spin Jordan-Wigner string operator - -Non-fermionic single particle operators (these do not have Jordan-Wigner string attached, -so will commute within systems such as OpSum or the `apply` function): -- `"Aup"` (aliases: `"a↑"`) Up-spin annihilation operator -- `"Adn"` (aliases: `"a↓"`) Down-spin annihilation operator -- `"Adagup"` (aliases: `"a†↑"`) Up-spin creation operator -- `"Adagdn"` (aliases: `"a†↓"`) Down-spin creation operator - - -## "tJ" SiteType - -"tJ" sites are similar to electron sites, but cannot be doubly occupied -The states of site indices with the "tJ" SiteType correspond to -``|0\rangle``, ``|\!\uparrow\rangle``, ``|\!\downarrow\rangle``. - -Making a single "tJ" site or collection of N "tJ" sites -``` -s = siteind("tJ") -sites = siteinds("tJ",N) -``` - -Available keyword arguments for enabling and customizing quantum numbers (QN) subspaces: -- `conserve_qns` (default: false): conserve total number of fermions -- `conserve_nf` (default: conserve_qns): conserve total number of fermions -- `conserve_nfparity` (default: conserve_qns): conserve total fermion number parity -- `qnname_nf` (default: "Nf"): name of total fermion number QN -- `qnname_nfparity` (default: "NfParity"): name of total fermion number parity QN -For example: -``` -sites = siteinds("tJ",N; conserve_nfparity=true) -``` - -#### "tJ" States - -The available state names for "tJ" sites are: -- `"Emp"` (aliases: `"0"`) unoccupied site -- `"Up"` (aliases: `"↑"`) site occupied with one up electron -- `"Dn"` (aliases: `"↓"`) site occupied with one down electron - -#### "tJ" Operators - -Operators associated with "tJ" sites can be made using the `op` function, -for example -``` -Cup = op("Cup",s) -Cup4 = op("Cup",sites[4]) -``` - -Single-fermion operators: -- `"Ntot"` (aliases: `"ntot"`) Total density operator -- `"Nup"` (aliases: `"n↑"`) Up density operator -- `"Ndn"` (aliases: `"n↓"`) Down density operator -- `"Cup"` (aliases: `"c↑"`) Up-spin annihilation operator -- `"Cdn"` (aliases: `"c↓"`) Down-spin annihilation operator -- `"Cdagup"` (aliases: `"c†↑"`) Up-spin creation operator -- `"Cdagdn"` (aliases: `"c†↓"`) Down-spin creation operator -- `"Sz"` (aliases: `"Sᶻ"`) -- `"Sx"` (aliases: `"Sˣ"`) -- `"S+"` (aliases: `"Sp"`, `"S⁺"`,`"Splus"`) -- `"S-"` (aliases: `"Sm"`, `"S⁻"`, `"Sminus"`) -- `"F"` Jordan-Wigner string operator -- `"Fup"` (aliases: `"F↑"`) Up-spin Jordan-Wigner string operator -- `"Fdn"` (aliases: `"F↓"`) Down-spin Jordan-Wigner string operator - -Non-fermionic single particle operators (these do not have Jordan-Wigner string attached, -so will commute within systems such as OpSum or the `apply` function): -- `"Aup"` (aliases: `"a↑"`) Up-spin annihilation operator -- `"Adn"` (aliases: `"a↓"`) Down-spin annihilation operator -- `"Adagup"` (aliases: `"a†↑"`) Up-spin creation operator -- `"Adagdn"` (aliases: `"a†↓"`) Down-spin creation operator - diff --git a/docs/src/IndexSetType.md b/docs/src/IndexSetType.md deleted file mode 100644 index ce69a81d2c..0000000000 --- a/docs/src/IndexSetType.md +++ /dev/null @@ -1,32 +0,0 @@ -# Index collections - -Collections of `Index` are used throughout ITensors.jl to represent the dimensions of tensors. In general, collections that are recognized and returned by ITensors.jl functions are either `Vector` of `Index` or `Tuple` of `Index`, depending on the context. For example internally an `ITensor` has a static number of indices so stores a `Tuple` of `Index`, while set operations like `commoninds((i, j, k), (j, k, l))` will return a `Vector` `[j, k]` since the operation is inherently dynamic, i.e. the number of indices in the intersection can't in general be known before running the code. `Vector` of `Index` and `Tuple` of `Index` can usually be used interchangeably, but one or the other may be faster depending on the operation being performed. - -## [Priming and tagging](@id Priming_and_tagging_IndexSet) - -Documentation for priming and tagging collections of Index can be found in the ITensor [Priming and tagging](@ref Priming_and_tagging_ITensor) section. - -## Set operations - -Documentation for set operations involving Index collections can be found in the ITensor [Index collections set operations](@ref) section. - -## Subsets - -```@docs -getfirst(::Function, ::IndexSet) -getfirst(::IndexSet) -``` - -## Iterating - -```@docs -eachval(::Index...) -eachindval(::Index...) -``` - - -## Symmetry related properties - -```@docs -dir(::IndexSet, ::Index) -``` diff --git a/docs/src/IndexType.md b/docs/src/IndexType.md deleted file mode 100644 index 1fc95d4567..0000000000 --- a/docs/src/IndexType.md +++ /dev/null @@ -1,65 +0,0 @@ -# Index - -## Description - -```@docs -Index -ITensors.QNIndex -``` - -## Constructors - -```@docs -Index(::Int) -Index(::Int, ::Union{AbstractString, TagSet}) -Index(::Pair{QN, Int}...) -Index(::Vector{Pair{QN, Int}}) -Index(::Vector{Pair{QN, Int}}, ::Union{AbstractString, TagSet}) -``` - -## Properties - -```@docs -id(::Index) -hasid(::Index, ::ITensors.IDType) -tags(::Index) -ITensors.set_strict_tags!(::Bool) -ITensors.using_strict_tags() -hastags(::Index, ::Union{AbstractString,TagSet}) -plev(::Index) -hasplev(::Index, ::Int) -dim(::Index) -==(::Index, ::Index) -dir(::Index) -hasqns(::Index) -``` - -## Priming and tagging methods - -```@docs -prime(::Index, ::Int) -adjoint(::Index) -^(::Index, ::Int) -setprime(::Index, ::Int) -noprime(::Index) -settags(::Index, ::Any) -addtags(::Index, ::Any) -removetags(::Index, ::Any) -replacetags(::Index, ::Any, ::Any) -``` - -## Methods - -```@docs -sim(::Index) -dag(::Index) -removeqns(::Index) -``` - -## Iterating - -```@docs -eachval(::Index) -eachindval(::Index) -``` - diff --git a/docs/src/MPSandMPO.md b/docs/src/MPSandMPO.md deleted file mode 100644 index 5474bbc1ae..0000000000 --- a/docs/src/MPSandMPO.md +++ /dev/null @@ -1,162 +0,0 @@ -# MPS and MPO - -## Types - -```@docs -MPS -MPO -``` - -## MPS Constructors - -```@docs -MPS(::Int) -MPS(::Type{<:Number}, ::Vector{<:Index}) -random_mps(sites::Vector{<:Index}) -random_mps(::Type{<:Number}, sites::Vector{<:Index}) -random_mps(::Vector{<:Index}, ::Any) -MPS(::Vector{<:Index}, ::Any) -MPS(::Type{<:Number}, ::Vector{<:Index}, ::Any) -MPS(::Vector{<:Pair{<:Index}}) -MPS(::Type{<:Number}, ::Vector{<:Pair{<:Index}}) -``` - -## MPO Constructors - -```@docs -MPO(::Int) -MPO(::Type{<:Number}, ::Vector{<:Index}, ::Vector{String}) -MPO(::Type{<:Number}, ::Vector{<:Index}, ::String) -``` - -## Copying behavior - -```@docs -copy(::ITensorMPS.AbstractMPS) -deepcopy(::ITensorMPS.AbstractMPS) -``` - -## Properties - -```@docs -eltype(::ITensorMPS.AbstractMPS) -flux(::ITensorMPS.AbstractMPS) -hasqns(::ITensorMPS.AbstractMPS) -length(::ITensorMPS.AbstractMPS) -maxlinkdim(::ITensorMPS.AbstractMPS) -``` - -## Obtaining and finding indices - -```@docs -siteinds(::typeof(commoninds), ::ITensorMPS.AbstractMPS, ::ITensorMPS.AbstractMPS, ::Int) -siteinds(::typeof(uniqueinds), ::ITensorMPS.AbstractMPS, ::ITensorMPS.AbstractMPS, ::Int) -findsite -findsites -firstsiteinds -linkind(::ITensorMPS.AbstractMPS,::Int) -siteind(::MPS, ::Int) -siteind(::typeof(first), ::MPS, ::Int) -siteinds(::MPS) -siteind(::MPO, ::Int) -siteinds(::MPO) -siteinds(::ITensorMPS.AbstractMPS, ::Int) -``` - -## Priming and tagging - -```@docs -prime(::ITensorMPS.AbstractMPS) -prime(::typeof(siteinds), ::ITensorMPS.AbstractMPS) -prime(::typeof(linkinds), ::ITensorMPS.AbstractMPS) -prime(::typeof(siteinds), ::typeof(commoninds), ::ITensorMPS.AbstractMPS, ::ITensorMPS.AbstractMPS) -prime(::typeof(siteinds), ::typeof(uniqueinds), ::ITensorMPS.AbstractMPS, ::ITensorMPS.AbstractMPS) - -swapprime(::ITensorMPS.AbstractMPS, args...; kwargs...) - -setprime(::ITensorMPS.AbstractMPS) -setprime(::typeof(siteinds), ::ITensorMPS.AbstractMPS) -setprime(::typeof(linkinds), ::ITensorMPS.AbstractMPS) -setprime(::typeof(siteinds), ::typeof(commoninds), ::ITensorMPS.AbstractMPS, ::ITensorMPS.AbstractMPS) -setprime(::typeof(siteinds), ::typeof(uniqueinds), ::ITensorMPS.AbstractMPS, ::ITensorMPS.AbstractMPS) - -noprime(::ITensorMPS.AbstractMPS) -noprime(::typeof(siteinds), ::ITensorMPS.AbstractMPS) -noprime(::typeof(linkinds), ::ITensorMPS.AbstractMPS) -noprime(::typeof(siteinds), ::typeof(commoninds), ::ITensorMPS.AbstractMPS, ::ITensorMPS.AbstractMPS) -noprime(::typeof(siteinds), ::typeof(uniqueinds), ::ITensorMPS.AbstractMPS, ::ITensorMPS.AbstractMPS) - -addtags(::ITensorMPS.AbstractMPS) -addtags(::typeof(siteinds), ::ITensorMPS.AbstractMPS) -addtags(::typeof(linkinds), ::ITensorMPS.AbstractMPS) -addtags(::typeof(siteinds), ::typeof(commoninds), ::ITensorMPS.AbstractMPS, ::ITensorMPS.AbstractMPS) -addtags(::typeof(siteinds), ::typeof(uniqueinds), ::ITensorMPS.AbstractMPS, ::ITensorMPS.AbstractMPS) - -removetags(::ITensorMPS.AbstractMPS) -removetags(::typeof(siteinds), ::ITensorMPS.AbstractMPS) -removetags(::typeof(linkinds), ::ITensorMPS.AbstractMPS) -removetags(::typeof(siteinds), ::typeof(commoninds), ::ITensorMPS.AbstractMPS, ::ITensorMPS.AbstractMPS) -removetags(::typeof(siteinds), ::typeof(uniqueinds), ::ITensorMPS.AbstractMPS, ::ITensorMPS.AbstractMPS) - -replacetags(::ITensorMPS.AbstractMPS) -replacetags(::typeof(siteinds), ::ITensorMPS.AbstractMPS) -replacetags(::typeof(linkinds), ::ITensorMPS.AbstractMPS) -replacetags(::typeof(siteinds), ::typeof(commoninds), ::ITensorMPS.AbstractMPS, ::ITensorMPS.AbstractMPS) -replacetags(::typeof(siteinds), ::typeof(uniqueinds), ::ITensorMPS.AbstractMPS, ::ITensorMPS.AbstractMPS) - -settags(::ITensorMPS.AbstractMPS) -settags(::typeof(siteinds), ::ITensorMPS.AbstractMPS) -settags(::typeof(linkinds), ::ITensorMPS.AbstractMPS) -settags(::typeof(siteinds), ::typeof(commoninds), ::ITensorMPS.AbstractMPS, ::ITensorMPS.AbstractMPS) -settags(::typeof(siteinds), ::typeof(uniqueinds), ::ITensorMPS.AbstractMPS, ::ITensorMPS.AbstractMPS) -``` - -## Operations - -```@docs -expect(::MPS, ::Any) -correlation_matrix(::MPS, ::AbstractString, ::AbstractString) -dag(::ITensorMPS.AbstractMPS) -dense(::ITensorMPS.AbstractMPS) -movesite(::ITensorMPS.AbstractMPS, ::Pair{Int, Int};orthocenter::Int,kwargs...) -orthogonalize! -replacebond!(::MPS, ::Int, ::ITensor) -sample(::MPS) -sample!(::MPS) -sample(::MPO) -swapbondsites(::ITensorMPS.AbstractMPS, ::Int; kwargs...) -truncate! -``` - -## Gate evolution - -```@docs -product(::ITensor, ::ITensorMPS.AbstractMPS) -product(::Vector{ITensor}, ::ITensorMPS.AbstractMPS) -``` - -## Algebra Operations - -```@docs -inner(::MPST, ::MPST) where {MPST <: ITensorMPS.AbstractMPS} -dot(::MPST, ::MPST) where {MPST <: ITensorMPS.AbstractMPS} -loginner(::MPST, ::MPST) where {MPST <: ITensorMPS.AbstractMPS} -logdot(::MPST, ::MPST) where {MPST <: ITensorMPS.AbstractMPS} -inner(::MPS, ::MPO, ::MPS) -dot(::MPS, ::MPO, ::MPS) -inner(::MPO, ::MPS, ::MPO, ::MPS) -dot(::MPO, ::MPS, ::MPO, ::MPS) -norm(::ITensorMPS.AbstractMPS) -normalize(::ITensorMPS.AbstractMPS) -normalize!(::ITensorMPS.AbstractMPS) -lognorm(::ITensorMPS.AbstractMPS) -+(::ITensorMPS.AbstractMPS...) -contract(::MPO, ::MPS) -apply(::MPO, ::MPS) -contract(::MPO, ::MPO) -apply(::MPO, ::MPO) -error_contract(y::MPS, A::MPO, x::MPS) -outer(::MPS, ::MPS) -projector(::MPS) -``` - diff --git a/docs/src/Multithreading.md b/docs/src/Multithreading.md deleted file mode 100644 index 6e54bcf86a..0000000000 --- a/docs/src/Multithreading.md +++ /dev/null @@ -1,157 +0,0 @@ -# Multithreading - -Most modern computers, including laptops, have multiple cores (processing units) which can be used -to perform multiple tasks at the same time and therefore speed up computations. -Multithreading is a form of shared memory parallelism that makes use of these multiple cores that -you may have available. - -There are three primary sources of parallelization available to ITensors.jl. These are: - - BLAS/LAPACK multithreading (through whatever flavor you are using, i.e. OpenBLAS or MKL). - - The Strided.jl package, which implements efficient multithreaded dense array permutations. - - Block sparse multithreading (currently only for block sparse contractions) implemented in the NDTensors.jl package. - -First, you can obtain the number of threads that are available to you with: -```julia -julia> Sys.CPU_THREADS -6 -``` - -If your computations are dominated by large dense tensors, you likely want to make use of BLAS multithreading -in order to multithread dense matrix multiplications and other linear algebra methods like SVD -and QR decompositions. This will be on by default. The BLAS/LAPACK multithreading can be controlled -in the usual way with environment variables such as by starting Julia with: -``` -$ MKL_NUM_THREADS=4 julia # Set the number of MKL threads to 4 - -$ OPENBLAS_NUM_THREADS=4 julia # Set the number of OpenBLAS threads to 4 - -$ OMP_NUM_THREADS=4 julia # Set the number of OpenMP threads to 4, which will be used by MKL or OpenBLAS if they are not specifically set -``` -or at runtime from within Julia: -```julia -julia> using LinearAlgebra - -julia> BLAS.vendor() # Check which BLAS you are using -:mkl - -julia> BLAS.get_num_threads() -6 - -julia> BLAS.set_num_threads(4) - -julia> BLAS.get_num_threads() -4 -``` -Note that in Julia v1.6, you will be able to use the command `using LinearAlgebra; BLAS.get_num_threads()`. - -We would highly recommend using MKL (see the installation instructions for how to do that), especially if you -are using an Intel chip. How well BLAS multithreading will work depends on how much your -calculations are dominated by large dense matrix operations (which is not always the case, -especially if you are using QN conservation). - -Currently, ITensors.jl makes use of the package [Strided.jl](https://github.com/Jutho/Strided.jl) -for performant dense array permutations. It also provides multithreaded array permutations. -If you start Julia with multiple threads, Strided multithreading is on by default: -```julia -$ julia -t 4 - -julia> Threads.nthreads() -4 - -julia> using Strided - -julia> Strided.get_num_threads() -4 -``` -We find that this threading competes with BLAS threading as well as ITensors.jl's own block sparse -multithreading, so if you are using Julia with multiple threads you may want to disable -Strided.jl's threading with: -```julia -julia> Strided.disable_threads() -1 - -julia> Strided.get_num_threads() -1 -``` -in favor of either BLAS threading or ITensors.jl's block sparse threading. - -Additionally, ITensors.jl, through the [NDTensors.jl](https://github.com/ITensor/NDTensors.jl) library, -provides multithreaded block sparse operations. By default, this kind of threading is disabled. -If your computations involve QN conserving tensors, you may want to consider enabling block sparse -multithreading as described below. - -```@docs -ITensors.enable_threaded_blocksparse -``` - -Here is a simple example of using block sparse multithreading to speed up a sparse -tensor contraction: -```julia -using BenchmarkTools -using ITensors, ITensorMPS -using LinearAlgebra -using Strided - -function main(; d = 20, order = 4) - BLAS.set_num_threads(1) - Strided.set_num_threads(1) - - println("#################################################") - println("# order = ", order) - println("# d = ", d) - println("#################################################") - println() - - i(n) = Index(QN(0) => d, QN(1) => d; tags = "i$n") - is = IndexSet(i, order ÷ 2) - A = random_itensor(is'..., dag(is)...) - B = random_itensor(is'..., dag(is)...) - - ITensors.enable_threaded_blocksparse(false) - - println("Serial contract:") - @disable_warn_order begin - C_contract = @btime $A' * $B samples = 5 - end - println() - - println("Threaded contract:") - @disable_warn_order begin - ITensors.enable_threaded_blocksparse(true) - C_threaded_contract = @btime $A' * $B samples = 5 - ITensors.enable_threaded_blocksparse(false) - end - println() - @show C_contract ≈ C_threaded_contract - return nothing -end - -main(d = 20, order = 4) -``` -which outputs the following on a laptop with 6 threads, starting Julia with -5 threads: -``` -julia> main(d = 20, order = 4) -################################################# -# order = 4 -# d = 20 -################################################# - -Threads.nthreads() = 5 -Sys.CPU_THREADS = 6 -BLAS.get_num_threads() = 1 -Strided.get_num_threads() = 1 - -Serial contract: - 21.558 ms (131 allocations: 7.34 MiB) - -Threaded contract: - 5.934 ms (446 allocations: 7.37 MiB) - -C_contract ≈ C_threaded_contract = true -``` - -In addition, we plan to add more threading to other parts of the -code beyond contraction (such as SVD) and improve composibility with -other forms of threading like BLAS and Strided, so stay tuned! - diff --git a/docs/src/Observer.md b/docs/src/Observer.md deleted file mode 100644 index 66affcfaa1..0000000000 --- a/docs/src/Observer.md +++ /dev/null @@ -1,200 +0,0 @@ -# [Observer System for DMRG](@id observer) - -An observer is an object which can be passed to the ITensor DMRG -algorithm, to allow measurements to be performed throughout -the DMRG calculation and to set conditions for early stopping -of DMRG. - -The only requirement of an observer is that it is a subtype -of `AbstractObserver`. But to do something interesting, it -should also overload at least one the methods `measure!` -or `checkdone!`. - -A general purpose observer type called [`DMRGObserver`](@ref) is -included with ITensors which already provides some -quite useful features. It accepts a list of strings naming -local operators to be measured at each step of DMRG, with -the results saved for later analysis. It also accepts an -optional energy precision, and stops a DMRG calculation early -if the energy no longer changes to this precision. For more -details about the [`DMRGObserver`](@ref) type, see -the [DMRGObserver](@ref) documentation page. - -## Defining a Custom Observer - -To define a custom observer, just make a struct with -any name and internal fields you would like, and make -this struct a subtype of `AbstractObserver`. - -For example, let's make a type called `DemoObserver` -as: - -```julia -using ITensors, ITensorMPS - -mutable struct DemoObserver <: AbstractObserver - energy_tol::Float64 - last_energy::Float64 - - DemoObserver(energy_tol=0.0) = new(energy_tol,1000.0) -end - -``` - -In this minimal example, our `DemoObserver` -contains a field `energy_tol` which we can use to set -an early-stopping condition for DMRG, and an field -`last_energy` which our observer will use internally -to keep track of changes to the energy after each sweep. - -Now to give our `DemoObserver` type a useful behavior -we need to define overloads of the methods `measure!` -and `checkdone!`. - -### Overloading the `checkdone!` method - -Let's start with the `checkdone!` method. After -each sweep of DMRG, the `checkdone!` method is -passed the observer object, as well as a set of keyword -arguments which currently include: - - energy: the current energy - - psi: the current wavefunction MPS - - sweep: the number of the sweep that just finished - - outputlevel: an integer stating the desired level of output - -If the `checkdone!` function returns `true`, then the DMRG -routine stops (recall that `checkdone!` is called only at the -end of a sweep). - -In our example, we will just compare the `energy` keyword -argument to the `last_energy` variable held inside the `DemoObserver`: - -```julia -function ITensorMPS.checkdone!(o::DemoObserver;kwargs...) - sw = kwargs[:sweep] - energy = kwargs[:energy] - if abs(energy-o.last_energy)/abs(energy) < o.energy_tol - println("Stopping DMRG after sweep $sw") - return true - end - # Otherwise, update last_energy and keep going - o.last_energy = energy - return false -end -``` - -(Recall that in order to properly overload the default behavior, -the `checkdone!` method has to be imported from the ITensors module -or preceded with `ITensors.`) - - -### Overloading the `measure!` method - -The other method that an observer can overload is `measure!`. -This method is called at every step of DMRG, so at every -site and for every sweep. The `measure!` method is passed -the current observer object and a set of keyword arguments -which include: - - energy: the energy after the current step of DMRG - - psi: the current wavefunction MPS - - bond: the bond `b` that was just optimized, corresponding to sites `(b,b+1)` in the two-site DMRG algorithm - - sweep: the current sweep number - - sweep\_is\_done: true if at the end of the current sweep, otherwise false - - half_sweep: the half-sweep number, equal to 1 for a left-to-right, first half sweep, or 2 for the second, right-to-left half sweep - - spec: the Spectrum object returned from factorizing the local superblock wavefunction tensor in two-site DMRG - - outputlevel: an integer specifying the amount of output to show - - projected_operator: projection of the linear operator into the current MPS basis - -For our minimal `DemoObserver` example here, we will just make a `measure!` function -that prints out some of the information above, but in a more realistic setting one -could use the MPS `psi` to perform essentially arbitrary measurements. - -```julia -function ITensorMPS.measure!(o::DemoObserver; kwargs...) - energy = kwargs[:energy] - sweep = kwargs[:sweep] - bond = kwargs[:bond] - outputlevel = kwargs[:outputlevel] - - if outputlevel > 0 - println("Sweep $sweep at bond $bond, the energy is $energy") - end -end -``` - -## Calling DMRG with the Custom Observer - -After defining an observer type and overloading at least one of the -methods `checkdone!` or `measure!` for it, one can construct an -object of this type and pass it to the ITensor [`dmrg`](@ref) function -using the `observer` keyword argument. - -Continuing with our `DemoObserver` example above: - -```julia -obs = DemoObserver(1E-4) # use an energy tolerance of 1E-4 -energy, psi = dmrg(H,psi0,sweeps; observer=obs, outputlevel=1) -``` - -## Complete Sample Code - -```julia -using ITensors, ITensorMPS - -mutable struct DemoObserver <: AbstractObserver - energy_tol::Float64 - last_energy::Float64 - - DemoObserver(energy_tol=0.0) = new(energy_tol,1000.0) -end - -function ITensorMPS.checkdone!(o::DemoObserver;kwargs...) - sw = kwargs[:sweep] - energy = kwargs[:energy] - if abs(energy-o.last_energy)/abs(energy) < o.energy_tol - println("Stopping DMRG after sweep $sw") - return true - end - # Otherwise, update last_energy and keep going - o.last_energy = energy - return false -end - -function ITensorMPS.measure!(o::DemoObserver; kwargs...) - energy = kwargs[:energy] - sweep = kwargs[:sweep] - bond = kwargs[:bond] - outputlevel = kwargs[:outputlevel] - - if outputlevel > 0 - println("Sweep $sweep at bond $bond, the energy is $energy") - end -end - -let - N = 10 - etol = 1E-4 - - s = siteinds("S=1/2",N) - - a = OpSum() - for n=1:N-1 - a += "Sz",n,"Sz",n+1 - a += 0.5,"S+",n,"S-",n+1 - a += 0.5,"S-",n,"S+",n+1 - end - H = MPO(a,s) - psi0 = random_mps(s;linkdims=4) - - nsweeps = 5 - cutoff = 1E-8 - maxdim = [10,20,100] - - obs = DemoObserver(etol) - - println("Starting DMRG") - energy, psi = dmrg(H,psi0; nsweeps, cutoff, maxdim, observer=obs, outputlevel=1) - - return -end -``` diff --git a/docs/src/OpSum.md b/docs/src/OpSum.md deleted file mode 100644 index 89234e2b54..0000000000 --- a/docs/src/OpSum.md +++ /dev/null @@ -1,14 +0,0 @@ -# OpSum - -## Description - -```@docs -OpSum -``` - -## Methods - -```@docs -add! -MPO(::OpSum,::Vector{<:Index}) -``` diff --git a/docs/src/ProjMPO.md b/docs/src/ProjMPO.md deleted file mode 100644 index 8010775b3f..0000000000 --- a/docs/src/ProjMPO.md +++ /dev/null @@ -1,23 +0,0 @@ -# ProjMPO - -## Description - -```@docs -ProjMPO -``` - -## Methods - -```@docs -product(::ProjMPO,::ITensor) -position!(::ProjMPO, ::MPS, ::Int) -noiseterm(::ProjMPO,::ITensor,::String) -``` - -## Properties - -```@docs -length(::ProjMPO) -eltype(::ProjMPO) -size(::ProjMPO) -``` diff --git a/docs/src/ProjMPOSum.md b/docs/src/ProjMPOSum.md deleted file mode 100644 index 8a713b269e..0000000000 --- a/docs/src/ProjMPOSum.md +++ /dev/null @@ -1,22 +0,0 @@ -# ProjMPOSum - -## Description - -```@docs -ProjMPOSum -``` - -## Methods - -```@docs -product(::ProjMPOSum,::ITensor) -position!(::ProjMPOSum, ::MPS, ::Int) -noiseterm(::ProjMPOSum,::ITensor,::String) -``` - -## Properties - -```@docs -eltype(::ProjMPOSum) -size(::ProjMPOSum) -``` diff --git a/docs/src/QN.md b/docs/src/QN.md deleted file mode 100644 index 56feee6a2f..0000000000 --- a/docs/src/QN.md +++ /dev/null @@ -1,28 +0,0 @@ -# QN - -## Description - -```@docs -QN -``` - -## Constructors - -```@docs -QN(qvs...) -QN(name,val::Int,modulus::Int=1) -QN(val::Int,modulus::Int=1) -``` - -## Properties - -```@docs -val(q::QN,name) -modulus(q::QN,name) -``` - -## Related Functions - -```@docs -zero(q::QN) -``` diff --git a/docs/src/QNTricks.md b/docs/src/QNTricks.md deleted file mode 100644 index abf7a8239e..0000000000 --- a/docs/src/QNTricks.md +++ /dev/null @@ -1,81 +0,0 @@ -# Symmetric (QN Conserving) Tensors: Background and Usage - -Here is a collection of background material and example codes for understanding how symmetric tensors (tensors with conserved quantum numbers) work in ITensors.jl - -## Combiners and Symmetric Tensors - -In ITensors.jl, combiners are special sparse tensors that represent the action of taking the tensor product of one or more indices. It generalizes the idea of reshaping and permuting. For dense ITensors, a combiner is just the action of permuting and reshaping the data of the tensor. For symmetric tensors (quantum number conserving tensors represented as block sparse tensors), the combiner also fuses symmetry sectors together. They can be used for various purposes. Generally they are used internally in the library, for example in order to reshape a high order ITensor into an order 2 ITensor to perform a matrix decomposition like an SVD or eigendecomposition. - -For example: -```@repl -using ITensors - -# This is a short code showing how a combiner -# can be used to "flip" the direction of an Index -i = Index([QN(0) => 2, QN(1) => 3], "i") -j = Index([QN(0) => 2, QN(1) => 3], "j") -A = random_itensor(i, dag(j)) -C = combiner(i, dag(j); tags = "c", dir = dir(i)) -inds(A) -inds(A * C) -``` -You can see that the combiner reshapes the indices of `A` into a single Index that contains the tensor product of the two input spaces. The spaces have size `QN(-1) => 2 * 3`, `QN(0) => 2 * 2 + 3 * 3`, and `QN(0) => 2 * 3` (determined from all of the combinations of combining the sectors of the different indices, where the QNs are added and the block dimensions are multiplied). The ordering of the sectors is determined internally by ITensors.jl. - -You can also use a combiner on a single Index, which can be helpful for changing the direction of an Index or combining multiple sectors of the same symmetry into a single sector: -```@repl -using ITensors - -# This is a short code showing how a combiner -# can be used to "flip" the direction of an Index -i = Index([QN(0) => 2, QN(1) => 3], "i") -j = dag(Index([QN(0) => 2, QN(1) => 3], "j")) -A = random_itensor(i, j) -C = combiner(j; tags = "jflip", dir = -dir(j)) -inds(A) -inds(A * C) -``` -Unless you are writing very specialized custom code with symmetric tensors, this is generally not needed. - -## Block Sparsity and Quantum Numbers - -In general, not all blocks that are allowed according to the flux will actually exist in the tensor (which helps in many cases for efficiency). Usually this would happen when the tensor is first constructed and not all blocks are explicitly set: -```@repl -using ITensors - -i = Index([QN(0) => 1, QN(1) => 1]) -A = ITensor(i', dag(i)); -A[2, 2] = 1.0; -@show A; -D, U = eigen(A; ishermitian=true); -@show D; -@show U; -``` -If we had set `A[1, 1] = 0.0` as well, then all of the allowed blocks (according to the flux `QN(0)` would exist and would be included in the eigendecomposition: -```@repl -using ITensors - -i = Index([QN(0) => 1, QN(1) => 1]) -A = ITensor(i', dag(i)); -A[2, 2] = 1.0; -A[1, 1] = 0.0; -@show A; -D, U = eigen(A; ishermitian=true); -@show D; -@show U; -``` -"Missing" blocks can also occur with tensor contractions, since the final blocks of the output tensor are made from combinations of contractions of blocks from the input tensors, and there is no guarantee that all flux-consistent blocks will end up in the result: -```@repl -using ITensors - -i = Index([QN(0) => 1, QN(1) => 1]) -j = Index([QN(0) => 1]) -A = ITensor(i, dag(j)); -A[2, 1] = 1.0; -@show A; -A2 = prime(A, i) * dag(A); -@show A2; -D, U = eigen(A2; ishermitian=true); -@show D; -@show U; -``` - diff --git a/docs/src/RunningOnGPUs.md b/docs/src/RunningOnGPUs.md deleted file mode 100644 index 25a1df4cfc..0000000000 --- a/docs/src/RunningOnGPUs.md +++ /dev/null @@ -1,75 +0,0 @@ -# Running on GPUs - -ITensor provides package extensions for running tensor operations on a variety of GPU backends. -You can activate a backend by loading the appropriate Julia GPU package alongside ITensors.jl -and moving your tensors and/or tensor networks to an available GPU using that package's provided conversion functions. - -For example, you can load CUDA.jl to perform tensor operations on NVIDIA GPUs or Metal.jl to perform tensor operations on Apple GPUs: - -```julia -using ITensors - -i, j, k = Index.((2, 2, 2)) -A = random_itensor(i, j) -B = random_itensor(j, k) - -# Perform tensor operations on CPU -A * B - -########################################### -using CUDA # This will trigger the loading of `NDTensorsCUDAExt` in the background - -# Move tensors to NVIDIA GPU -Acu = cu(A) -Bcu = cu(B) - -# Perform tensor operations on NVIDIA GPU -Acu * Bcu - -########################################### -using Metal # This will trigger the loading of `NDTensorsMetalExt` in the background - -# Move tensors to Apple GPU -Amtl = mtl(A) -Bmtl = mtl(B) - -# Perform tensor operations on Apple GPU -Amtl * Bmtl -``` - -Note that we highly recommend using these new package extensions as opposed to [ITensorGPU.jl](https://github.com/ITensor/ITensorGPU.jl), which is ITensor's previous CUDA backend. The package extensions are better integrated into the main library so are more reliable and better supported right now. We plan to deprecate `ITensorGPU.jl` in the future. - -## GPU backends - -ITensor currently provides -package extensions for the following GPU backends: - -* [CUDA.jl](https://github.com/JuliaGPU/CUDA.jl) (NVIDIA GPUs) -* [cuTENSOR.jl] (https://github.com/JuliaGPU/CUDA.jl/tree/master/lib/cutensor) (`CUDA.jl` extension providing accelerated binary tensor contractions) -* [Metal.jl](https://github.com/JuliaGPU/Metal.jl) (Apple GPUs) -* [AMDGPU.jl](https://github.com/JuliaGPU/AMDGPU.jl) (AMD GPUs) - -Our goal is to support all GPU backends which are supported by the [JuliaGPU organization](https://juliagpu.org). - -Notice that `cuTENSOR.jl` is an extension of `CUDA.jl` that provides new functionality for accelerated binary tensor contractions. If the `cuTENSOR.jl` library is loaded then ITensors with `CuArray` data are contracted using `cuTENSOR` and if the `cuTENSOR.jl` library is not loaded but `CUDA.jl` is loaded then binary tensor contractions are mapped to a matrix multiplication and performed using `cuBLAS`. - -Some important caveats to keep in mind related to the ITensor GPU backends are: -* only dense tensor operations are well supported right now. Block sparse operations (which arise when QN conservation is enabled) are under active development and either may not work or may be slower than their CPU counterparts, -* certain GPU backends do not have native support for certain matrix decompositions like `svd`, `eigen`, and `qr` in which case we will perform those operations on CPU. If your calculation is dominated by those operations, there likely is no advantage to running it on GPU right now. CUDA generally has good support for native matrix decompositions, while Metal and AMD have more limited support right now, and -* single precision (`Float32`) calculations are generally fastest on GPU. - -The table below summarizes each backend's current capabilities. - -| | CUDA | cuTENSOR | ROCm | Metal | oneAPI | -|------------------------------|------|------------|--------|--------|--------| -| Contractions (dense) | ✓ (cuBLAS) | ✓ | ✓ | ✓ | N/A[^oneapi] | -| QR (dense) | ✓ (cuSOLVER) | ✓ (cuSOLVER) | On CPU[^linalg] | On CPU[^linalg] | N/A[^oneapi] | -| SVD (dense) | ✓ (cuSOLVER) | ✓ (cuSOLVER) | On CPU[^linalg] | On CPU[^linalg] | N/A[^oneapi] | -| Eigendecomposition (dense) | ✓ (cuSOLVER) | ✓ (cuSOLVER) | On CPU[^linalg] | On CPU[^linalg] | N/A[^oneapi] | -| Double precision (`Float64`) | ✓ | ✓ | ✓ | N/A[^metal] | N/A[^oneapi] | -| Block sparse | ✓[^blocksparse] | ✓[^blocksparse] | ✓[^blocksparse] | ✓[^blocksparse] | N/A[^oneapi] | - -[^linalg]: Some GPU vendors have not implemented certain matrix factorizations, or the ones they have implemented are not efficient compared to running on CPU, so as a workaround we perform those operations on CPU by transferring the data back and forth from GPU to CPU. We will add support for running those operations on GPU as they become available. If your algorithm's cost is dominated by those operations you won't see any speedup by trying to run it on those kinds of GPUs. -[^blocksparse]: Support is experimental. Operations may not be fully optimized and could have bugs. -[^oneapi]: We plan to add Intel GPU support through Julia's oneAPI.jl interface but don't have any Intel GPUs to test on right now. -[^metal]: Apple doesn't support double precision floating point operations on their GPUs, see Section 2.1 of the [Metal Shading Language Specification](https://developer.apple.com/metal/Metal-Shading-Language-Specification.pdf). Until it does, we can't support double precision operations on Apple GPUs. diff --git a/docs/src/SiteType.md b/docs/src/SiteType.md deleted file mode 100644 index 42f4d116e1..0000000000 --- a/docs/src/SiteType.md +++ /dev/null @@ -1,17 +0,0 @@ -# SiteType and op, state, val functions - -## Description - -```@docs -SiteType -``` - -## Methods - -```@docs -op -state -val -space -``` - diff --git a/docs/src/Sweeps.md b/docs/src/Sweeps.md deleted file mode 100644 index 889caa7a2e..0000000000 --- a/docs/src/Sweeps.md +++ /dev/null @@ -1,26 +0,0 @@ -# Sweeps - -```@docs -Sweeps -Sweeps(nsw::Int, d::AbstractMatrix) -``` - -## Modifying Sweeps Objects - -```@docs -setmaxdim! -setcutoff! -setnoise! -setmindim! -``` - -## Getting Sweeps Object Data - -```@docs -nsweep(sw::Sweeps) -maxdim(sw::Sweeps,n::Int) -cutoff(sw::Sweeps,n::Int) -noise(sw::Sweeps,n::Int) -mindim(sw::Sweeps,n::Int) -``` - diff --git a/docs/src/UpgradeGuide_0.1_to_0.2.md b/docs/src/UpgradeGuide_0.1_to_0.2.md deleted file mode 100644 index 2433db7786..0000000000 --- a/docs/src/UpgradeGuide_0.1_to_0.2.md +++ /dev/null @@ -1,365 +0,0 @@ -# Upgrade guide - -## Upgrading from ITensors.jl 0.1 to 0.2 - -The main breaking changes in ITensor.jl v0.2 involve changes to the `ITensor`, `IndexSet`, and `IndexVal` types. Most user code should be fine, but see below for more details. - -In addition, we have moved development of NDTensors.jl into ITensors.jl to simplify the development process until NDTensors is more stable and can be a standalone package. Again, see below for more details. - -For a more comprehensive list of changes, see the [commit history on Github](https://github.com/ITensor/ITensors.jl/commits/main). - -If you have issues upgrading, please reach out by [raising an issue on Github](https://github.com/ITensor/ITensors.jl/issues/new) or asking a question on the [ITensor support forum](http://itensor.org/support/). - -Also make sure to run your code with `julia --depwarn=yes` to see warnings about function names and interfaces -that have been deprecated and will be removed in v0.3 of ITensors.jl (these are not listed here). - -## Major design changes: changes to the `ITensor`, `IndexSet`, and `IndexVal` types - -### Changes to the ITensor type - -#### Removal of tensor order type parameter - -The tensor order type paramater has been removed from the `ITensor` type, so you can no longer write `ITensor{3}` to specify an order 3 ITensor ([PR #591](https://github.com/ITensor/ITensors.jl/pull/591)). Code that uses the ITensor order type parameter will now lead to the following error: -```julia -julia> i = Index(2) -(dim=2|id=588) - -julia> ITensor{2}(i', i) -ERROR: TypeError: in Type{...} expression, expected UnionAll, got Type{ITensor} -Stacktrace: - [1] top-level scope - @ REPL[27]:1 -``` -Simply remove the type parameter: -```julia -julia> ITensor(i', i) -ITensor ord=2 (dim=2|id=913)' (dim=2|id=913) -ITensors.NDTensors.EmptyStorage{ITensors.NDTensors.EmptyNumber, ITensors.NDTensors.Dense{ITensors.NDTensors.EmptyNumber, Vector{ITensors.NDTensors.EmptyNumber}}} -``` -Pro tip: from the command line, you can replace all examples like that with: -```bash -find . -type f -iname "*.jl" -exec sed -i 's/ITensor{.*}/ITensor/g' "{}" + -``` -Of course, make sure to back up your code before running this! - -Additionally, a common code pattern may be using the type parameter for dispatch: -```julia -using ITensors - -function mynorm(A::ITensor{N}) where {N} - return norm(A)^N -end - -function mynorm(A::ITensor{1}) - return norm(A) -end - -function mynorm(A::ITensor{2}) - return norm(A)^2 -end -``` - Instead, you can use an if-statement: -```julia -function mynormN(A::ITensor) - return norm(A)^order(A) -end - -function mynorm1(A::ITensor) - return norm(A) -end - -function mynorm2(A::ITensor) - return norm(A)^2 -end - -function mynorm(A::ITensor) - return if order(A) == 1 - mynorm1(A) - elseif order(A) == 2 - mynorm2(A) - else - return mynormN(A) - end -end -``` -Alternatively, you can use the `Order` type to dispatch on the -ITensor order as follows: -```julia -function mynorm(::Order{N}, A::ITensor) where {N} - return norm(A)^N -end - -function mynorm(::Order{1}, A::ITensor) - return norm(A) -end - -function mynorm(::Order{2}, A::ITensor) - return norm(A)^2 -end - -function mynorm(A::ITensor) - return mynorm(Order(A), A) -end -``` -`Order(A::ITensor)` returns the order of the ITensor (like `order(A::ITensor)`), however -as a type that can be dispatched on. Note that it is not type stable, so there will -be a small runtime overhead for doing this. - -#### Change to storage type of Index collection in ITensor - -ITensors now store a `Tuple` of `Index` instead of an `IndexSet` ([PR #626](https://github.com/ITensor/ITensors.jl/pull/626)). Therefore, calling `inds` on -an ITensor will now just return a `Tuple`: -```julia -julia> i = Index(2) -(dim=2|id=770) - -julia> j = Index(3) -(dim=3|id=272) - -julia> A = random_itensor(i, j) -ITensor ord=2 (dim=2|id=770) (dim=3|id=272) -ITensors.NDTensors.Dense{Float64, Vector{Float64}} - -julia> inds(A) -((dim=2|id=770), (dim=3|id=272)) -``` -while before it returned an `IndexSet` (in fact, the `IndexSet` type has been removed, see below for details). In general, this should not affect user code, since a `Tuple` of `Index` should have all of the same functions defined for it that `IndexSet` did. If you find this is not the case, please [raise an issue on Github](https://github.com/ITensor/ITensors.jl/issues/new) or on the [ITensor support forum](http://itensor.org/support/). - -#### ITensor type now directly wraps a Tensor - -The ITensor type no longer has separate field `inds` and `store`, just a single field `tensor` (PR #626). In general you should not be accessing the fields directly, instead you should be using the functions `inds(A::ITensor)` and `storage(A::ITensor)`, so this should not affect most code. However, in case you have code like: -```julia -i = Index(2) -A = random_itensor(i) -A.inds -``` -this will error in v0.2 with: -```julia -julia> A.inds -ERROR: type ITensor has no field inds -Stacktrace: - [1] getproperty(x::ITensor, f::Symbol) - @ Base ./Base.jl:33 - [2] top-level scope - @ REPL[43]:1 -``` -and you should change it to: -```julia -inds(A) -``` - -### Changes to the ITensor constructors - -#### Plain ITensor constructors now return ITensors with `EmptyStorage` storage - -`ITensor` constructors from collections of `Index`, such as `ITensor(i, j, k)`, now return an `ITensor` with `EmptyStorage` (previously called `Empty`) storage instead of `Dense` or `BlockSparse` storage filled with 0 values. Most operations should still work that worked previously, but please contact us if there are issues ([PR #641](https://github.com/ITensor/ITensors.jl/pull/641)). - -For example: -```julia -julia> i = Index(2) -(dim=2|id=346) - -julia> A = ITensor(i', dag(i)) -ITensor ord=2 (dim=2|id=346)' (dim=2|id=346) -ITensors.NDTensors.EmptyStorage{ITensors.NDTensors.EmptyNumber, ITensors.NDTensors.Dense{ITensors.NDTensors.EmptyNumber, Vector{ITensors.NDTensors.EmptyNumber}}} - -julia> A' * A -ITensor ord=2 (dim=2|id=346)'' (dim=2|id=346) -ITensors.NDTensors.EmptyStorage{ITensors.NDTensors.EmptyNumber, ITensors.NDTensors.Dense{ITensors.NDTensors.EmptyNumber, Vector{ITensors.NDTensors.EmptyNumber}}} -``` -so now contracting two `EmptyStorage` ITensors returns another `EmptyStorage` ITensor. You can allocate the storage by setting elements of the ITensor: -```julia -julia> A[i' => 1, i => 1] = 0.0 -0.0 - -julia> @show A; -A = ITensor ord=2 -Dim 1: (dim=2|id=346)' -Dim 2: (dim=2|id=346) -ITensors.NDTensors.Dense{Float64, Vector{Float64}} - 2×2 - 0.0 0.0 - 0.0 0.0 -``` -Additionally, it will take on the element type of the first value set: -```julia -julia> A = ITensor(i', dag(i)) -ITensor ord=2 (dim=2|id=346)' (dim=2|id=346) -ITensors.NDTensors.EmptyStorage{ITensors.NDTensors.EmptyNumber, ITensors.NDTensors.Dense{ITensors.NDTensors.EmptyNumber, Vector{ITensors.NDTensors.EmptyNumber}}} - -julia> A[i' => 1, i => 1] = 1.0 + 0.0im -1.0 + 0.0im - -julia> @show A; -A = ITensor ord=2 -Dim 1: (dim=2|id=346)' -Dim 2: (dim=2|id=346) -ITensors.NDTensors.Dense{ComplexF64, Vector{ComplexF64}} - 2×2 - 1.0 + 0.0im 0.0 + 0.0im - 0.0 + 0.0im 0.0 + 0.0im -``` -If you have issues upgrading, please let us know. - -#### Slight change to automatic conversion of element type when constructing ITensor from Array - -`ITensor` constructors from `Array` now only convert to floating point for `Array{Int}` and `Array{Complex{Int}}`. That same conversion is added for QN ITensor constructors to be consistent with non-QN versions ([PR #620](https://github.com/ITensor/ITensors.jl/pull/620)). Previously it tried to convert arrays of any element type to the closest floating point type with Julia's `float` function. This should not affect most user code. - -### Changes to the IndexSet type - -The `IndexSet` type has been removed in favor of Julia's `Tuple` and `Vector` types ([PR #626](https://github.com/ITensor/ITensors.jl/pull/626)). `ITensor`s now contain a `Tuple` of `Index`, while set operations like `commoninds` that used to return `IndexSet` now return a `Vector` of `Index`: -```julia -julia> i = Index(2) -(dim=2|id=320) - -julia> A = random_itensor(i', i) -ITensor ord=2 (dim=2|id=320)' (dim=2|id=320) -ITensors.NDTensors.Dense{Float64, Vector{Float64}} - -julia> inds(A) # Previously returned IndexSet, now returns Tuple -((dim=2|id=320)', (dim=2|id=320)) - -julia> commoninds(A', A) # Previously returned IndexSet, now returns Vector -1-element Vector{Index{Int64}}: - (dim=2|id=320)' -``` - -To help with upgrading code, `IndexSet{IndexT}` has been redefined as a type alias for `Vector{IndexT<:Index}` (which is subject to change to some other collection of indices, and likely will be removed in ITensors v0.3). Therefore it no longer has a type parameter for the number of indices, similar to the change to the `ITensor` type. If you were using the plain `IndexSet` type, code should generally still work properly. However, if you were using the type parameters of `IndexSet`, such as: -```julia -function myorder2(is::IndexSet{N}) where {N} - return N^2 -end -``` -then you will need to remove the type parameter and rewrite your code generically to accept `Tuple` or `Vector`, such -as: -```julia -function myorder2(is) - return length(is)^2 -end -``` -In general you should be able to just remove usages of `IndexSet` in your code, and can just use `Tuple` or `Vector` of `Index` instead, such as change `is = IndexSet(i, j, k)` to `is = (i, j, k)` or `is = [i, j, k]`. Priming, tagging, and set operations now work generically on those types. If you see issues with upgrading your code, please let us know. - -### Changes to the IndexVal type - -Similar to the removal of `IndexSet`, we have also removed the `IndexVal` type ([PR #665](https://github.com/ITensor/ITensors.jl/pull/665)). Now, all use cases of `IndexVal` can be replaced by using Julia's `Pair` type, for example instead of: -```julia -i = Index(2) -IndexVal(i, 2) -``` -use: -```julia -i = Index(2) -i => 2 -# Or: -Pair(i, 2) -``` -Note that we have made `IndexVal{IndexT}` an alias for `Pair{IndexT,Int}`, so code using `IndexVal` such as `IndexVal(i, 2)` should generally still work. However, we encourage users to change from `IndexVal(i, 2)` to `i => 2`. - -## NDTensors.jl package now being developed internally within ITensors.jl - -The `NDTensors` module has been moved into the `ITensors` package, so `ITensors` no longer depends on the standalone `NDTensors` package. This should only effect users who were using both `NDTensors` and `ITensors` seperately. If you want to use the latest `NDTensors` library, you should do `using ITensors.NDTensors` instead of `using NDTensors`, and will need to install `ITensors` with `using Pkg; Pkg.add("ITensors")` in order to use the latest versions of `NDTensors`. Note the current `NDTensors.jl` package will still exist, but for now developmentof `NDTensors` will occur within `ITensors.jl` ([PR #650](https://github.com/ITensor/ITensors.jl/pull/650)). - -## Miscellaneous breaking changes - -### `state` function renamed `val`, `state` given a new more general definition - -Rename the `state` functions currently defined for various site types to `val` for mapping a string name for an index to an index value (used in ITensor indexing and MPS construction). `state` functions now return single-index ITensors representing various single-site states ([PR #664](https://github.com/ITensor/ITensors.jl/pull/664)). So now to get an Index value from a string, you use: -```julia -N = 10 -s = siteinds("S=1/2", N) -val(s[1], "Up") == 1 -val(s[1], "Dn") == 2 -``` -`state` now returns an ITensor corresponding to the state with that value as the only nonzero element: -```julia -julia> @show state(s[1], "Up"); -state(s[1], "Up") = ITensor ord=1 -Dim 1: (dim=2|id=597|"S=1/2,Site,n=1") -ITensors.NDTensors.Dense{Float64, Vector{Float64}} - 2-element - 1.0 - 0.0 - -julia> @show state(s[1], "Dn"); -state(s[1], "Dn") = ITensor ord=1 -Dim 1: (dim=2|id=597|"S=1/2,Site,n=1") -ITensors.NDTensors.Dense{Float64, Vector{Float64}} - 2-element - 0.0 - 1.0 -``` -which allows for more general states to be defined, such as: -```julia -julia> @show state(s[1], "X+"); -state(s[1], "X+") = ITensor ord=1 -Dim 1: (dim=2|id=597|"S=1/2,Site,n=1") -ITensors.NDTensors.Dense{Float64, Vector{Float64}} - 2-element - 0.7071067811865475 - 0.7071067811865475 - -julia> @show state(s[1], "X-"); -state(s[1], "X-") = ITensor ord=1 -Dim 1: (dim=2|id=597|"S=1/2,Site,n=1") -ITensors.NDTensors.Dense{Float64, Vector{Float64}} - 2-element - 0.7071067811865475 - -0.7071067811865475 -``` -which will be used for making more general MPS product states. - -This should not affect end users in general, besides ones who had customized the previous `state` function, such as with overloads like: -```julia -ITensors.state(::SiteType"My_S=1/2", ::StateName"Up") = 1 -ITensors.state(::SiteType"My_S=1/2", ::StateName"Dn") = 2 -``` -which should be changed now to: -```julia -ITensors.val(::SiteType"My_S=1/2", ::StateName"Up") = 1 -ITensors.val(::SiteType"My_S=1/2", ::StateName"Dn") = 2 -``` - -### `"Qubit"` site type QN convention change - -The QN convention of the `"Qubit"` site type is changed to track the total number of 1 bits instead of the net number of 1 bits vs 0 bits (i.e. change the QN from +1/-1 to 0/1) ([PR #676](https://github.com/ITensor/ITensors.jl/pull/676)). -```julia -julia> s = siteinds("Qubit", 4; conserve_number=true) -4-element Vector{Index{Vector{Pair{QN, Int64}}}}: - (dim=2|id=925|"Qubit,Site,n=1") - 1: QN("Number",0) => 1 - 2: QN("Number",1) => 1 - (dim=2|id=799|"Qubit,Site,n=2") - 1: QN("Number",0) => 1 - 2: QN("Number",1) => 1 - (dim=2|id=8|"Qubit,Site,n=3") - 1: QN("Number",0) => 1 - 2: QN("Number",1) => 1 - (dim=2|id=385|"Qubit,Site,n=4") - 1: QN("Number",0) => 1 - 2: QN("Number",1) => 1 -``` -Before it was +1/-1 like `"S=1/2"`: -```julia -julia> s = siteinds("S=1/2", 4; conserve_sz=true) -4-element Vector{Index{Vector{Pair{QN, Int64}}}}: - (dim=2|id=364|"S=1/2,Site,n=1") - 1: QN("Sz",1) => 1 - 2: QN("Sz",-1) => 1 - (dim=2|id=823|"S=1/2,Site,n=2") - 1: QN("Sz",1) => 1 - 2: QN("Sz",-1) => 1 - (dim=2|id=295|"S=1/2,Site,n=3") - 1: QN("Sz",1) => 1 - 2: QN("Sz",-1) => 1 - (dim=2|id=810|"S=1/2,Site,n=4") - 1: QN("Sz",1) => 1 - 2: QN("Sz",-1) => 1 -``` -This shouldn't affect end users in general. The new convention is a bit more intuitive since the quantum number -can be thought of as counting the total number of 1 bits in the state, though the conventions can be mapped -to each other with a constant. - -### `maxlinkdim` for MPS/MPO with no indices - -`maxlinkdim(::MPS/MPO)` returns a minimum of `1` (previously it returned 0 for MPS/MPO without and link indices) ([PR #663](https://github.com/ITensor/ITensors.jl/pull/663)). - diff --git a/docs/src/assets/favicon.ico b/docs/src/assets/favicon.ico deleted file mode 100644 index c7b942c3e75bf2f95f7299827e76c54d8aef03df..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 318 zcmbtNK?;B{3`;=p+QIO=qbCvkpilEHdx9UB?W$vV5?o2rX6Xh{OGIF2CNP;Q%t;Dj zPzLtOPM-F@l!6!|mV9=Jk8KbuplKj|PU?03oioRt_B&NwY<>)9s353FPK_vtNKlj z5R{xja+aK@j(WX!zIT4ik6E+kTO;doIi5bJcB=4H)!x;Q6y>Fl5YZCh;o%*Txpw6i z9v-R>5AVPv<`CSO<2_@Dhe!0yL{d^wMpBYV(bme)#M}T6@7kmANCL&sZfftA{Uma7 zbv*ntFZm^e@DqI*uo6NKuU|tEvVK4-W^n{^6rFwe@UGhXJ7fntQ4Q*(#LAR(Ye$&R zPTOVQl^olxVws(Pu|817V>jU`D7YRby#6_c(R;jxERg3Wl{e?j;8PbHBFW-I8kj`! zMZVw;`Q5D@zFwR5@ZtF?efRxT_o5>W4W2cIA37`d_jRrY)koUmhn#w>a346k{bh_5Jj zUXQEl#KQFhT#6e*vGSqf`PXTeV^op z@SBQ%y5BbxWix(#m%RRY{Srxdd{Yb=Uvo+|wxCbpsIY6yW}A1iO@zMmC%rewAbwP-RbA>C2-PFY))vCZYH5)~9S2gdS^WcF`o zhMW_B!Se8EL}&B!hIsCKavNcFjdr6;y9e0J?|okzIicgiX=CWy7Z-p0RHy9ntM5j3 zS@nW8e3w1_r(!}L+W6glhyX!hK_A8{FZM5(613TUVVvAU_C&IAiU16iua+;1Z?3O~uUIt!^V4LROY9}& zK92}L*pFNup&e1a$C4e?b#<2Y=(B65ES~Xx45gGG39-F)HL)qtBQZ2_CQ+=7J%_p5 zdlq~26#KKQ->JS6{m}kF_`~4`&ySG%0rykxmtHPHDV)^Njjfh{p!^_}$b(3d@=kzY zZ78{%OcH0(?OU{`jn0sqUZA9+3Zm?%G&)N}6?sPaEQ6v*X4mVD=kKKQUv1qCO)mbH z|E>QU-?Y}WFl~^b)bdN#Hd|ve6NkL7dD3|b*X>y~SPRG%Oj2&;-wJJWc+>fb=+oOz zoMVEY4t}Eg6X^(5|!2a({{Yi zqMxgK(<-DZrYq0osdLjy^S|A0EnW#F6QlXHVqynyV!Awrru~Bwkf%Law7`rqA9vpAlYaTVGjajI6i119AvThUu$TT@$OTkI>_Ml1p2`xxzEYL?KO9|dZT zo&UyrnC~-hpKh0lfWE#ZrCwfja_i*F3$;-uF{YZ9eU>73@7=xFum4o0{G+JSL8Z4T zRraAK{!`LCcDVw%cMO{hTblAecaI+&UmYjmG^QS)Ugk9Av{WZQ_dMcwgvPl{wU^4x z%BWP+)YOjT{J{J#uBi^4l+~1lXI`D*RVuMa=Hq-J^1}56gN9u1`Ch7mrh?3*=o@k| z>O%Q?(@w%hr8i1r9oOgDXC-G*(VAf65?_&w{aiUpZnm{Gj3o1 zd|ssACETubnV&|5=8>$AY-Q>rvz!^~PRqAKJok(JTn2)M#%Bt?%|-PZInk`2SofLt za_(~0cb;8!7He{+_3CLhDUXwiJJCWPcahsqC8x7R`=XVkRj^agx*nq*V_XYYyb-6Y zvXB|$Onzo;=7;r%3)13Fm9?H0djk7R`}g*n@$$W+yx)0CAr z7myyfdu)PHg_=Dup0-k8@78_!`{!S>J`FqeqKL!BTDdAMD0k~wOlZI}%ND-)^1Evf z1cmG_PMnBO6^Z0oSG;&qkjusDhl;#Fk{?f0wS4!pi&d+mmLnD4Hy1y-?7Ay>9^Y4c zXJo22y?m)9?}*7R%1YwFQ1}-GRaW;PugZ&ohK<4b)8mJcN#3>Px$Z4WuT9#*RGMJNK;1;12 zVZ>RPQ*C**J7Aq-RJnk9nu?2KcK76@{}%HbW|^DoXP?~Z_4=@XdMDd1e{{MjwUTBz zfIFfq{C1eTR*jb368m=iSua}c7bOLDv5h4bGmmDj&R9jdM$Vs6I>&K(rvJQ$q>|w4 zvZ9^cx?(!++X8pY2d{L8mC7g|SMJW~)?eOg(n&?%DSYRVN>?~(sa8HVsyxs!)9YTq zYu?$bowM86QXRiAi|J{-t*5MXQ#P$B)uV*R(WsvLv(B{R)a=>rRf|I7LUV=haXbzg z4i0V(>lvxiGj=oE>#TF71%qB*vOaP(!BS0i&kk5Ktycx=gnE^GoL{-SeRBMzU(H+p z!V@nU&sbiL{_)~a^ueZ_2;Yc|*C(ZWHAK~)>=Rtu*oQwAjZT5p~` znoC$m%(iy((^L7#=C`RCDjFHZT4r<69S$QU>#IuqJ618JA1hp@Iv#6i73x_IPRUP4 z^bhyDc=9jLzi@L|_nYT-!#XSOD30mYvh+*yR%+}GoVI&Pzp%7WydyBZw5x5o(8%7# zuCq-P-xjAV!sD#G-Z#9Ny;rqo#%)+W>2}6baOK@32crbcU#6tsSvl$P*r4!sY4Hp?x^9S`ZRm2UI{n^jB)iz{wkDowDgG-O zl&=?kjTSq-Mk$G^JaO;4WI*(W!7_qryE`yD@Dw8#ktPa}Nn*@GL(LLwr! zg++GKsb$y1uo`<^oZ`$<*YTR0S?mj+9`cIJ7Tj0h-ShB&dN^iiVOtzO(u$f(+k3VW zU~Yhcs*ItW9Nt;D#^9mxY4Hxi6+Zlm;nV%Omcl=cci`uJ6ds@A!>Yt}keF+EtxjrxnpW#U;Ny^B;UnMFtFEU za<(wHv=ed`W&Zhu5L_d-xtW=MK4Nbs%B(7<$RufHYrw?Ib&2advltN*6O)LozM;^q zD^kB-hkuDO8{6Ak3vqKhIXQ7T@o-t$8gX9`6cpq>f06s*#dGk)IXf3idtK*qmUb+^ zi2Ot6ih-S;t%UbGkcchY~q z{NE4%*B5^_srGM^7tWtQ|KB$M^U?2DMYxfn|7R+G&GXN@FwtT}BHVu#O^hg9zD)qu z@r210h1>8KxD5HhUxELf{`D8RzPzFCnEV_M4~r*rMdG$I{%k*Ck-^vJ;(K>OFFc^` zry@*uQCBHEZO|cdDg6Q4_!+``kEPfRt{*lCa5+sTLHeQVg0GbCv=lRWbX0iAqOs6k zo6M!w9>rIW_h&;H3p3u8w}`ICd90}$d8{pePqAz1F-mvx7*JBEu$;?nNq){_R6%nB zjUi>idgI}v(3o$`=dTZC)V3%7druvOD&$8YAO3s_M|%)GPirG5_v;0?haFmD_~(66 zCdJp@Raz_=DTIG$V)_OS{7cT1lC;R|1269%g7~;zw?DMJu|X)5Q&7=cw%;8hWn!{+ zBl$y%H2EfWD1)r?+rI?SmJ9#VLh%w|`glj8qkoPDdCj|OPV^5gY=RU%u2f1>iQ-R( zNsGz}{xkm5OxB$b%}oCpEj|o>{SPg?zk~m~^8awd{}ybI<09rP8oI9)m+e{t!mb>D zHw@`NEfr}I-B_UjK57>jeqB#x?h)Lsp&>2ux0(nC*@{Z_t`b$DRv>4>GHKxBgnK)B z;b8|tOrOKY=ty|h1mw(QO|Bh{xx&e0E!}tG4H*+Q0Ux9b-(HrS>B}I9vDbR%Jc!np z6z{73l&EFU2w$YILzV~hqkzD3p!J-$&ykQcSqDUVu;?RIoaffmV&(pJrEY$^vRTey zGYY1V5bvs3#oVy#39(he3uP2sC6lkR+LbjP)KY7t^-tBAk%Kn5AEe)tUN|7s=Q@)< zY!|<+DX`Y1S2GgG%^+j&2?n4H1L$Z@4#fJS#P^n-)JPD}Q+h51>iEVw4G6}#%@@-* z53N{2fBK(2wu)oios`ziZQ|Qb$a>XTJ6L)~>8;yFkXHO*Sbh;*OXOmsoMCt;1QiD$6kv< z>u)!uH#x5gbB>}fl93kinB-Zk`By1;ZGG)neMKU^vlzm6dxvLTl0@`-PuocohlNwP zQn!fg&v}EM+YhX~wtMNrbH7|B&gWY1aa$;_DY6+Aa!n5TfAZ!k)1-hk+j$DzqMFy6-I2Yjj%DIN{SaFR6H~stkV* zRX9p#k=JUiJ``s(E_gA@i|?*<>}z}NuIgIkVZ5G*{CKDlc|_LA znVY=Ge}Z}@HK6nvr4#${sihPJk9Q6pPGnW|Xv}w3ChLiIt5UgZL(yg#gtb;lfrFM$ zR=Pa5=W1$cbVS-r$~+&!qLaQP7ns`~-1nUE?Cn46Fx!=_n_0d_D`+vbNAB z^NP*!bna#~*QqydIdNApq?73|thivLNQpjvzO z+*@APIbpl;&rt<~H2IXBYBc*j-x~D>Tqcg_Y_&yJzb9ZkA@<|xlNDPNGcqQ{T)+!F zSyP0c<%Vb~H?Gd?-wu=&arJQDY}7eg&){aB8QEkiiW8o5q=|ohy5d4kuxzOxbME(N zvVO;T42!(>6tiL^naDuIpGuv{^+E&K>F(g~DBoS{4Kg={{u7gkEMgZGI#-v7`&PTx z?v0PEc{mMvIB`cr&(Ll!RC);v`%;Vxjmx0WZGcU_&c1j@L9h7G=`J#C@XoGjyMiG`BQ|an_Yi->0J0x6naY&OMzgLpepK zi|=iW*2u8Oep6j)dP< z;1Mz9dha>0vaWBUUiCqAH1n1Fds(wNjci=U6*f^$BIU*^A3?cvZmF*lFuG^;r{o7I z#0v4Au=zli-(+d$2Cb*fQ1IE&;)+ zmZG!x*0r^pmsJ%j#joQwmw(>n$M?lPyB1z$>m|k>kaiEf8I34B+yDlwfK34rt^j2e z=VEwz&!jj-Z{)+H;fn2fV!irci+8>iYAQqWufuB%P9(*9y6V+tluW-Ht{304ImT;x zTaQ=u|4hZIViTy4Y)mD^Te?|lvB$Xo_^A6S?2Ln4d=v1)PP*s#bEE1{9GNE!o1)vV zWnXRBYklHLsjTC%D%DYV`ag>tj;i%=4(#>eb>PftU{3EvBi2Nc* zjiRZyFK9XJ>*<{iPsDq9{vHxcO!;a7Eez>}Wr;}W-=@u`-huibuKB-{#PT6%juiwhqmN)3#5*isNRhi35FS<0%THS+~!lmzSe_{nh| zHke3J-Qux`RD-<}Y*a;~bGA$Crs??|L{CK5RQ0YhfItj$=xkp9-661L<@|-r&>;eP zdqshX7`q;%Lo?}x+EHR2u3BH$cVTME&WSCQEmz|R7`jw(UV9T>3=Ezd!{KHle*b)kQ;L>w#jlo7>m>q+W|wR(`z>)Gz{{L(J;@x3dB}{Sj?Qq=2#?R>#GB~ zD}dmhHm{VGE-DBnC+LSp47M1%gnM`3s6fJwZ?trTn&ERr_w50Z<T-L1_wP~KA1t-NDziD6}gZwD-8@GTM)Q@tcyu z13+o#C?U}I;ifyw03fa7h_+*kP_UK53C9&sMTSrgj;m_kYWrsM=}eL@x5t#M+&L>P z6q=cow1~y{J~<|jLI6Ppz~=+=c|CQVns!Z#HH>>%1)JLz_kcz)wa&;Thv{#1L|cuO zJB`-+yX)JFHhT0~=Wzm{g9Nq`h*04uL}q6k4Y|t}FORu69M!gOz524KthQAfY@*e- zK%Ri!eX2ryPD_HWZ#GT5oxXB>-H9n<>9XF~Q(#(81t}iFIie!99M!r2l=Z|a_im8O za<*;>O@m3UEa2TTfUX3~Q%Blpx-ZoRQRNQ%(fQ3}R#s%Mp1a|V6#*I|<%)#3>o~AE z{FLiU9^aozpKVFCTl^wPazvWJML$+{rM3^{v~0myQBuA^v_6vB_d=j9n4WGPAlzan zs98BqD>;}K3YZtOhqjp8gg~a>_;t`b`4oq=jk&eQ$9XO<^;$|7gC({6n))<|e?Xwm zrk=iB-7?rAU@(J*F~S~P;RKl2auUobwyc?qQooUsFK>e_3cA@SBMSvSf!4Ksq5W9X zso!CEIey>$r*ZW9!R|#KFqp*++QE)~{S-cG6O7m9YF{t~jWcLXjlxMc(5bY&GB)+k zMw(fJi$MS#Tc`&`! zBahhmlx=Jj=0^tlYDG)v?LdI+qF1J$gN@O9B3hu**p%HoxYIP4m(kC_WBB=mUL0V0 z9WeNV1HCTQC?2Ec!yFk!_cofWJim5vHUBZBo}WR>H`z}VPs9<2vMU?~w5HB;a7VWt zl-8M@-NF6e8d?AbFR=aUEXv|wz)Vr#Oz{gLwi{7jO7uQ$ERDW6d$Pyj;Jl%FE?^Pg z)QpmM&ujOyrl~c|$uf#Y0_r*n&*P((pvy~ZQU>U=2M53?W$kUv_=6*m`J+sTV13YQ zUr}>l3CG)9(8 zZY-q#8N8mC$r266`dD#pSXBRxcF?*XmELN zUO0yInmoY93b7exf{6q7 zW~To>qk2k_hN4a2=hoNKUdA7ufXjId*pq-$ggGP)dst`frtcoGzpz@3B{i3|lTxmG z#<2?YG=cbrgsNG3{-?l8m%Z_D;$T|`4xzWM3?aCDqi;g2v&)iN%f5AS^7WG~p&4;| zfMiX*)&OydI(57}tf*f!o<&S^(Z)#N#Kn;_5au}ymWaS@;}NzEMQ2rJ0|OJL?Uh`j zn;+rtFJ?qJ?Jjti&X;hQ7Y+NZa^KFW1}MOYn2jtzpb;^{CI|`W#sO*uvTNE28%OMx zT6z~Mwk=uQjte`Q&r}Riu*;k4@2XN$70t|DS=!SEU9X{2c6;IaSS}-=72WA?DP!!% zPJ(M`4qk@&F$VHU0^CTWF&Fslbp<)^57GJ3Sr)y_Ue!}o9m*&qrqQy$8FkU%dU@dO z&wGy)+?hW|U0~tNszOm29cKnA5b+YdJ4}YFAl>L+@T6gri>B0e84IslQ*xi%VS8Z? zMn;aC)4H=02d-EC>YnHD1Da`##HP>S2%2<(|JNI1fH>#}pf}zRq|sYaL$kRpM*(G7 z>usw!b{o{}t`9QS1<`e?9+Be$LP&)~`YMFn+ri0i(SQhi40JS{+D zTVeG4yi-}xW4+}j{)N2{k})uWm%0*L|32O$+Ml5SSKx;wPAuSgehV%#y+) z;@z{jaa7Eg6zTjR;z;|_uqAiythJL$h4X$!b{*~TBlft-`49A;9jgi1eMgRB!B@Z> z27u2H9NkHJ?b;Ax2*Ar%)}zYIZ59gBKJ?{7-NE_!cQlbUC+xpS^vCA;bVq2rPAv}k zkodVw#1{If2!{W6fo=@}QnBopngcJ)+}sN>R;M#6o5ldh&rrGP*~uw!Z>2?vh%axSJNguj8liYNF$m^tCqFN^oN#@JU3^pK_28bW?-#I< zr#LG%M(!A_)kanI$MRX0OuZdm>$O^F)~|^&KzwkW{jE;_rR&^?5dC41{HkE#`C}F9 zJj1U7xlGM>wa;-{KhfN(qq903zdLIrE!KM82yx(Rz}QeD%0st#$lyQv z=*v2)a%YUY=4@&jIfAGgv;(cCIXv~$`rc*5li=L+`_EO#*tXwMIidjZhXyv<_odybIG_0s&*-1DUTKx~S5gqo&n~CdEyeU--I(u`9AZ#20xTF{wg;9+KIYgw zti*Z8*{)wu*#p52?-_FCDlc+jhVWPu<$z+}d{Twt`xG|)^ zg!t}XwJnM|x1$B^3^NOqw}rrxIc-FGZASKVp3%4)D#u3%5KOK+YXTv}36iL4u6>2c z0=GWk#+?Q_GTo}h%#^*BpMW~DaAowQHA!YFm}^Wee<-**r8O_^`exGdTWS{V?`_xQ zG;#@7KQD{<5A%csRRI{L*c)ctdzrmUt#e<={kZ(n8-ZO%Okd56vCZ-Ney2gvG8(7h zJAjIHkO*4o`Oq^EJv$Ha4Tjlac9a_7EHpJ-I=E}8=QaQqs0IXMplizkmfVR;L$fX$ zkdNbDHCOq)>{@E#Ct_R1s>UxLlw5jkf{^)Va)faRL+RF1sT|I%+;i1)s|4(<5lMDQ z8G8h@OYF?9 zsLYH#^XX96eLz*M<Yh5e`brSi+eLezby%sY$!$l5(??M4sU4hnw4lTuO zJ(u6<2Z-C{l+xqL{9VW0v?|e+!W!@bs53zSzN}1WG_ovT?Fbt1%WCYsK#z}l3IOIQ zpOhNf^&B|u%x`ZzCkJ)7H4O3Kj;l_ejz@>)f~kkNoP4FfBQ}8pHIM6~s_Qs~a&l`t zHh<8o;iSLs>tLaG9@))Ej1&V3mJH=8i1Q@h7@?^=##Q|NSzSS1s}$V>Sb&V|)zbW| zs($U94Ze>f8mupbwsw2vU)bh@QnCYZI59t>hEgL!I9d0{_m9KjD%vLI`}}>@l3jOw zj)1wKgGFc$K;Ybr#~OHwcj8q?Cm+yg9}@$Qq@(CQl6(4RF})3c#ed)|nprSdDTjM%V!WysGErCKg1p0R<47YS*qH8tEs5T%^E9 z?JL}c=$pkZTh^Hn5E>r$QMy56(@nB;4(pcK1w?mt>4(aVi*@-J1y)cxjc#t#ix2p; zb4q|kh8VqDRW%C=ZAjq^`nZPHER76ebFtlVbNw}lRvr;+YCKn{xj;SFb{(}I6H>cv z435!Z@%_z~i4(J}JP-}C7nn@&am|Z1_>b6lSNWqWKvDWzdCX`qx)wNzXMn}m-CCI{ z*T|CPAndcd3|`VK#BuD7u1r3E6QO=qWaW#bE`;c2S_2{w-R=Dgy5OT2$(^U(L{@_| zTwXF#ss|1@&IfM#+X%*v62KD$kvg8mt75K8wKN1ID~m}G_7X9NXo09^>#hH2ZtPty zRIOD};GjLf1845%i^JggW0sqRt&~{p%QC7&qh`UP9tF2~)tOl;vvNXN5QK;?-BO`;$_owG|t9=Xd>#XlriGFzi&JN?w z5WxY3m5o-={|wDSGieN&rh@-LsCYPX++5*cMJvhqd5HE7mn}DQq;O><9h?_m8wIR$ zwAVaWPOnJ~G@AOlosM~SEp6)iYM77HXvmQsK%xCkA&z-I*OAi|k8&Qp-temuo-bla zgD9sQ=U{6z$_ax+6I4A`+a&?oDAhwuk#Y@`?k~K676X7Tp0Ef*azr6f@7Fp~N`3X4 z{^C2jr5EWKX^JETmOt~=hXryDVUmNUh z!9<5cHEX11+hk`miGfZr!TjLvyi@x6N5qCvW~et8gHux@zBek~aj{SaNh1A|PtxbI z1}^bKA~}{fbRV%sU?4eRt$G)CkT&&Ye>G4-ayNloCCT(k7Nd0LSrP!^=}cb?en2Gh zHuwg&KX0Nh(m1d8J4xE;j6#G@j^3Z%Q5Aw%k5(H5t;M${U+W@Kr`qkrM|u$P{~5GH z0R24|Gf0iy%>Z<(sU{-hjSa^Ffg}8VS<_+a_2cngd*}7)9xLt?2-mSpRFyB)(G`mI z_1xB}*r?_91FgZEU*>>lDHRaL49>VySjcZR3oo1qn9+YB1R0uAMa>ra(WhoZdjsng zG>+iS*Qw`Bz6va`JW$lY9C+zxyo%{OKfG>vHjV|GkY1JBB1ZQ!pbW?+PqZ+DB0xBT z(uwQQT*C$HmT2BohsHVxK)%g(M3e;vvR+0%gJ<|C1F$jAAA?ml=CFo_9)v2070_@YQ}Vj{WvHR zY$q6?jL_&1tTqbb&DyGv64EHVXJ$GYl+4c0_%MGd{!LSi7C@f8~mk z9(x8F=@c6~q$yaL?2mMOWVNneWYi#FaK3w5Om+xt9riPH_;deFItXV#C~WVHrsw8Z z(48`fnw<{=PuO0I5^^x@ zwTnG(i!;kC=R*SSFE=_`wHf~LtKEtd^AWSa47_>8UHgW2RUevfzoTvpH zRw`w}j3^Rk7;Lq=SvnPBxYpy}&9ywDU@xf9uE6vKkD~)f6sRlb0O_Pk#;$)C+gYrs zfkAZiNu?tmsV;cyHP~fxU@>CE=NYah<~H{lIp2af0UGaNP-{fBZ%zdZppgKa^%n_( z{EEkKd#R`@+qpbTwRR$U5i1%7ZeUo+8)XzBVHpGyblVbTh5>BOo3?}86GFUILtr}j zb+xqkR%=GHMb12ivT~TKV!C|trcG&sC4Ho(txOjHvJ&1{$Qm+z;Q{n!^o^Q1B)$sr zL{vvbSH5Iexc57BT4*Je@BCJ^?r4ySC1oxP( zLO`rGBjq`k8TF4AuK`IGd=&s5;FLAud!8%kx_4#S*@g$zBC>pA=12UydUswTQ7aD?piyrz=o zpq6`bLFnsWSPsED*X{sL%(t6n3kj)G*07*Lpyo*x%^j)SqDZGD3qK^y;K0>?NOlgP#F@S1-wKoU&K+htgN+fSZi6?=7M=U zR9`#vFMvbV2G0=^Q!>64Me^yu3o4rx0`oZ_k#hl!2u-FarSL8>B7&z zzZTUwEX|!Oq`GMe3CZSj zkQ5m}^D+f;t>`wYjX5i?-jdM|ldZ46_WOPUlLIP?08|!%syyqTZBc(LD)KQ9Kx6Dd zYdCq2-GSjw0Zi_AY}P6#rt!c=x*I598@+eO6%9Z%s=vPjr2Va&p0U5J121`|7oc9LbU z8Tb%dZ}cAH;p9O?zA=o9+EHd2w)G11t_4^Xc2|| znOV|1#RZI*;hF4*hcg8p>Zw1CO9OsR>xo@4JUE}&o8@j#uT-wBVQ0-7hq^he(6{xtpYa-}yTS|9ANSbL!lenSE=rqoJOq zr{nN&hao(1INCfA&I&;&c|N_c9m1KUrQTKaZvNk-y|K2yW47m%knE%>1meubcm?rM zg20J_j{R{EELDWPn326dH|J#mDcx)rGI(?gq1D^ba||CP2SF9Nj~d8{I2(A)X6@=7 zH0BiK>Dl{4-oPQ#ghLQ0IdofzoGGLbf+b8+w}BLrN;IY*2jlKNN?_O6^vh(w-k zfhNSvlII{FPzFIjcaqx>jm1U5-Kdh6)X?r_Xg83r2Mv8Ffn7mPL2Xwc2Dt$zZs%fu z25xh~`OY8XC5OCMrzc z1VN}5_I9A0RF26Z+bjR<6u^*TB!N_c@ zkRyOHFkl%EIJ);G5-8~uw7*4?LFSMQ>ay3q22_7-5?Ho z`XiY0kVKd${a#ABe{*Z_s4F+5u?G4AU?m|K)jx5hKokj6!|6pQ%(y*V=0SR>m+&|{ z^idlc*4{19fVYt37hX3x4EGvLf_$>tbyh6X5eQ=iiKRS)F_eR{abvZhN1=1zq+6~@ zeh`w@y#{IBkf@j|(A$?VNXbH5Hl(+Z|F)aV5rxx@NGvs}kD3JJbu>^xXkg$lb>0RpIx$7gF#+Qp87Kc%x943mW<&G|8y7x1y)$=5HIY4`aLdO zH}H5juLvyJ?{R%GfHi7Ncnj-Y1v~D#lzbtyzh*BQhztR11pP45ZbnuZj>?vvdrAS# zg+Q=A_D-oq1RhQsWUgGX0FGCo+hENdjZNR(OXn7LqvBmeKIX! ze%Iv59(^*oCV5q!#rC@3mD9}6n8{h+zG8l=baX6jDpCJ?(SGilSLdvwnYi1WLH~Ni zf`eCF=gfM5Ls908r%ubx{-C3qs6ud!KM6ZAH<}cShigFp7;mEMR+pgOTG$t6_T-!#EHUuP=+D(w&q-b7ns8EaKe;y{3pHfP{)bkq;tXWVHAdG=oKEW zw>Bl$2u-SZ7CWT;sc{6uyzu*99(6*KK9uqvEPhh=^bTp3jCYmA z*Stw8+$9ux{?f4>GJF6Xebh$+mB}7ngBHn7H8Le=cvq!s#JFOvuz>0P``X_r`G1s4 zunt4=^70+!tkWm&`eK7m2!^dm{9@i?XRt$jww#w9V8?+wKL-_|aOdITylI~g9_kVB z=_7jqfA9Ce-#-45X38K5=?sibEO!$@4jQt*P@(F&?jesJi~lwWO}Jp1SLg8S`>7GSngy+SBnVkkC_1N}9E$VK#r2lo z%pENv{ND6JWhw&Gcily;?=Eos`0J$t9AXNl=tUUVZ9i|AZu%pY?Y`-ad-ah);#RuLz zMB?8;nspl*n;J5v!$Ig8=~bFURA=2oZt4E`4^s=cZ)CmiZ>2?Hl1Z7YQxy&5(3b#1 z^D-%+@DBp8KbXw<$a@$rY6z~lVf0Tyr0kmCC?*P`XnbyB_Ws_Gtt}+F({Tms1L*$1 zMOIQpCIIc;pPpfkL0FB?9h>Kml>+zrvA#gCcQ+8qose|;+t}Y}{11b8%OT~l!uTwD zj4d|d6#Y{TtMWT$aser18&IExMMg%(_Q!V_-h=MS79DBNp~}tsiNpR#B4rZ%#8$N& za#S<1{Y-}(6xCEqKPv1r9|q-P6x7Xb-H64GeF`_2>&c%cO4Qhd+IS&QknU9f&Ak9T z6y^#CNFDAl(_xY-eR370Rbu}XO3|fqhpC^G4nC1% zH7F(srDO8i6G>5+B6e^}t;T1M;j^OZgXu%vH-9`@SJ29YQbr9>62LzEb?_k^RH%|I zb)36&drRk$%ksEP5H0VXleajM${_<6mF19uAX<}3HQzM+cB-=6%$olZIyQPC`!DLD z_e>8KSoWVa_!M?i<5d4NQYLm~zF+vCzC+`|&CkN2?{}ffW?QNXclx2mNbX46u}>!H zG8ghuA3uJ)m4L@g7eG(yWgZM)C_U>0fz=>&Pvk?&?zKAi^^xQ-yxDw9y z+d@H(&Dasu8|YwqVKzjvRwi#+2%C0gy}S`CbVoy!=%lx|FsNxK!zOt$MP{U2VI$O1 z%hb21yb8r~&up*Fg+kTK&V@_$_f9F6yE>>9+rBBFcZq*IBKo+I7}A&zA>=}ze|qNR zP!tws&!EQdNRQ*3$NOfe$dQ=N@E}KFXxN`I0#?h!VhCztghAy|FtG)>(?b zAbbV;ylS|{pVez`2a$XO-Zt*OcXv2kwx*K4GLA1?pp4=%3eGcby*$%w*c@{P(v#)bh^2Y^cO2LoeKY> zUGH@R{(;DWNq3KpVSl7v(iEpmx;bSL)TADRf{7g+xb^cH>xx`RCYR|tG#3_~_P|>Y zB^Q0^>E`l;S+B!DnTx^x-tIFF6)q5>uyRz%px!`D(ST&}KuvKOqPCJ@ZEwot97P=^ zqCBU5X{>kC-Xb<$Z2B2rQgfDWO`_Fcg*x3xyNOR+U5G7Pp)?~8@JB&7ACoG-<6sWWKh5t zn-Pt(Gf=@40R(U~>6xX%VGeFkS79!woqdT-l8@Dg8aRZRkA5VsS0%1n&qH}bim{xu z2Y9HXpyucq#5AOt(yHM2`z_Bl_wNl)vt;!*E8tA`M-bc`cSim$?xCh~z4~Bv{t=H$ z3F{%h<`=+)6J48T%IF!>Cg=;TJqsw%bjtSW(cl1Nyk()#`TxmZw6jL0i z3Fqc;t&<5ihG3kxvhGUA$jd7$v+P!bRUloUBXB9^$h!L;Wztuat&F#zE4QGlg(#S+ zDM(8ysDx~jS>L$3G*a7^AaNkQ)Dub@eush`-#|JH3EiQ_;;qDMWa)A)jM4)74W#M3 zXZ-L`OO&t`$1Y=*2=uXn$dd12+Dq3Eb&;u2C?7z5ZW{FQQNl1zwOfw!eJK|8P&XzP zDKyhFeG`L^9zksAf_e9GumjVbncT7woHug{246u9wu;&ATxr|y4Z}{+Kp@Yw{kpd2 zrh$d9*i2gPErt$JsKS!}(~y2pg86PK&f!4RRb}Na6z=2D5Z|qa%JN~wb`x*KKM&GV z9^)~Dul(y)nrV*Y*5LxH{`Hr{iz5&% zQF0ovSx8knpUau#~#l>ZKxOW z156Xr_-j;usZ%5z@|T*K-;5MX@1vAtiU1x_4Gxz(Nm`C|+?m*cnr5;{v7TK+)mC-n zH1~C|XL)P;P+PeH>gB%s#aY@2XElZPGXZD81V?IE7GFhH`N^>hmwWQfCSa9=iJ*3j zr+Iw;;3NEC946=nF-zHH!Q(r*_X|8b!+@`j9XukI{B48 z;MO6UsMEqQEy7B!?{jWYK+>Lck>PG%sgu=xj#r`%HAnax01#5my-K4d#=Z9;kOl3U zKVR$S+_^o7_frr5lYk$J0`x+V2&C1WcA;w0C-`T6M`9E`N;Z-!+;w%@#KN(!$Yx|$ zh2f`FhOMzq$9U%@sxII!tgFDZy>sY<61?{>8GpfoW#~R>y*MPXy#-g`? zYC-VO|9Eqz9g5DJNiW@lqMKt-!@C1Ga}5)APD8oCy!%zU?!vIv!b$D_31lNt z3uiYWCv;hcc_ZJ@izX;VBu5lyQGQXxmc&8-Ah1 zSxC?Jic30r16l+F_(tTs%k;N12%K;SSlN(#^K~1*3CxZ>@|70hAV@P#^>-K+J>S60 zlmD7Em~^$)5p|d~=ZRRSM9cmXU@fRL`UxsdrGseAQg-DUJ)z{*)-+olkPIED3H@o} z)4*!yzN*c~2(X<8O*_?@8Q-Cmsg^6{_OPu)BH-#-YbeW|wA1XB7|5nN3$>8Kpqwbr zd#aeruoKLa8#}aDPACls<4ywLq#Wzv%N7jKeB5k*sfz9R=ah7|2eZ__`>B;#=Cb^0 zZ+E-attgTPHinTxPK^F`zznIi;t-gwaHtNb-rJmdCzTDZN(x*lb9AcQjN9CtZ%xej zkcfqtWfV&p$(RE08w23iNN{Z%lMEH{0=d6Wr)DbWmo1ctx~+v(cx-RQjWVG;Q3LfT zS4NWLWnSCw*Gzf~(wm=sgI+fRe?^dltdS|Qo(F7e(qEGQZp@=IR@ANFTZUHe`UwKE zMG6QvSbwTti4}2aTgZE+*Ytuv(q^>ojM|H?+AzAWC{O?YGa&)O?P0GuPC zQjhYCCDfN=0iD|R(`enz1bjZw-71LsgobaEG1-K=z!~Nql@Gu$X`RujCmsr}AefpgXmn{w-oK6v9jd>R>=f@&4+#lY{too>LLYt6yU;L<1-T55qm=@gbW zSBTVOJPrQ&4*+H8oKs)k%F#WCQ$P{(g0X=vUF-HMMKt(@u*#NAFZ!8bMTEt)=Q6!^ zQ_G;(5#jd8DPRF8jeUG-pxiA)mZh|AJMt^&bg2RcV(?}}5N{@iqx3RUSk-vbi`wm$ z??O6I%`ttyipN)}`k#8|Fr}<2P=q$SENI%IP5|hzl+wD0GXHwg@SCwrX*B$V#JYx00 zrT=?>JWz>wrz9`OC=*-9`l0n=1(sAs97`;eMFDAVVl zsUcDn^T`B*JvlpjdYu=e{&VTU>Y>i{yhQm+BSwcnN~aYu&CVB49nm zlRO=A-DZ+sjn#CI1{GOYmAe%%YCoQ8+f|*SwH>|`K;&@L#*yCqir)~3lAIf@ehOky zA>5kINJW#ixmy#xF}_fNY*1%55k^7Z+X#i4@hFAg01`lIb&)TH9}+!74e z+7j*JF-fqvJ$0be-=g6oCJ;(;UOE5oYX*;q!r0nPEg67*DnndO%+><0y{YRJR!}hw zHcfK^zTaMc^6j3uq-f0qhAMbX_Qoeug&tWZ+TPo)EMVKvTU=@4l<#>h!>jh-I+w1h zYt6xr-(Y}KG-d==E9}gJ=wt`|Vz*J4yZEVOR=h6#rr+n+_P!0d?l#@+OeU)Eq17EI zEHgQCHhPz0y1ojunOeI@dm(w32;_|G+g>iBq);(wdS(IQVv8E5s25}PZ#tC(l)hhi ziq$P7t$lbM$r)f|+HpIx4j=efpPm~my)F9px7*QSYz@GZ*Ka8;u#_6W?Nys+e6F_n zIZCF5-pS4G=4Z6#Z7x0H-;}fWLf(jzcionL5Op^mSiNfkvAm(UOA=z2rdw?|hQZSa zSd~<@-3*lT8t_qQ&D^ScJB(v7^n1R$pxq<-_XfI9BYoB-Qj@}+&`{}goH-8=4J>fJ zvhsSgeqQ%@@7sDX6w6~aAuSaw#C8(b?%L!F^?%Yu1?Zx`eIX~h8eeoxSf4KeE=4}+ zRvBCEK4wgs`uluL#S0-{nTZP|28(;!jnc35qRtW}kRL>NUuM5f=b9J-TlIsAfmL~s zSKTga4fe(a@G_MI14H+3ubSuiz3gxP-8i zMyXJ|l|W7QwH0o8TwUr;#;f-ukvt=1@svd(7{A88#=A-}&kv`gD4JX|JfTr?P6V2j z8Z;}1FT3sw4X^FMILmNs@vx1&a3T26)_2aB{O08UAvwtkN1SRw?W{~thYayDX~#?d*pv z_UZ?`%9?{@o%8$fY^0Nk)1;N4=~WfFb1cmlGl#v9kvlVJrzI3!A*(b`fDknwFuTtB zydjP&Fk{O(t(S^`e9-aa!=%)${|{b>H3C)FjYLRQd;4)$P9k^7UG4>M&MOxZpDcBJ zxbyt)3!#(viTVa&{$;aI5T-hor)HL>psI^u^UTFn7@TLXP#k)+B=i`sDl-ASf+IQ- zP|)vsv@BP4EX=8Q#1%SVnyEOs<~$aF<1JK1dvTI4fV~^|WjPPm-1DPK24#8I^GgcA z`#dtpHoISv;Pu6j7Q>2|XZ&dBNDpRfb}dpSIRj^zA0SJd5g?;yyTlM6kZGcLutw*G zQAub68IcbnD2@NKCB-TNv{s3z3ELl^d5ZbGgkM9lyMxbu`5@baiY;wxK~l|f48#m` z%q^ATLVAM>zRC&@$W9xOmFR?`BIJid#hxUzQmGFmGiiADz?-u3?k&1wv5s>zPgGBK zEz30fJ5I;O4Ptr5bRu)2eaLy*cFL2{@D(wP ztNxv_8wMU;7jj*YIAjt(iiy6rhh=?t1s;?|riR5-^v7;Y9bM*L-xI^^H-l6Sp!BJ)uc`0~h2USFmb^mWqj^~Y zmMHK*&0fJ$O#8mr{>GHP@mm5J#mS4Rm~DYnvAZ-p*9YNy#2HG0g9D#bkZQubo^EVt zF80r!vB*lP=41W@fhY=UNBR+rgIr`Xyz7(3&Wdu3CFfq*f&2PZpFZ8C!HC!iwW^ug z|L)mB9iN^m5-YCrt1=vQVzB^dZPGmbUfuaeI#>dqWq&7+dOvbsTfBae@5T;POQPp% z9qtOxi)+VBJ|au;#CQ#f@$9B}qh`Z7CZ^tdfio-dKs8Gi`9nARx+T@rVLVMfu5ntZ zB0|K^Lhp~!Q4aYer?ZnzlEli>E_79UbeBDcl-r8FydvFTAI>EfmaW~gOft3iEdduv zw$Du=xrJrcfoX`5`oTe3UT;se?^P@-4n0v4dJn@lG@wzgdWkh{+%CyZvx z*!e7aeVn%UUu@YArC=l*aD}Veo^3)x|4IBFSI^F9;+OE?lkc?{kcGfI%YvWuh%sg` zuB{HOz#!(^TYe$|3v#N%V`(q18(Hef>12PruI+ql!%VxJJ?2)=?oFcz*@EikdG5(M zOPj?Y;J6?3@OXh{tuAKSX7se&H!GgB&|kqNk_I0A4&!NS>La;Tce%_HmC94~-XKbbHHUgcaI`;$S&pLFMK^u?0IT1umhWp!+`hMdjl2ZoMV!tv zxO8*vRLbYFA3kIs19g9dlE>0Tz|fcOE;~0_T0dwj2~^|rvUPcfIMdDJgGc>|?nkp0 zyuIlP@#)GxcqDm`0PH>sDd{&{B6!_`JdA&u!V?*s`bQww71c4z?je)<|SG7BgvLYZ$Zza<${OJ?f(2K(vacku&XjYZgfeo@dSZM_#@M%#Htmlbp+06 zdLFwpSK%%0_MO8p*z64`2YAngaG=kl?>0Al#I}h@b>^@IAFdc6YLA^XP{HZ$E8Pb@ zUS?7BfMYiF?S72==JEdplUHFfaX*Y%OLr4|XYAFF$=CK1r<|L71a;ct&Sr)5qEc*_ zc?%_z$j?H@#cctNm$}s{Jx{;J`7FC&Kz0!?#euN2U0zY-z0m~`QGIvY>hvP`kXg(2 zx7w0O0ZSyl3y2=JAUY43)je$V#EU(@%-ba!5`)J^HT?Qq(sliv7w`mb@=e<^fD-JB z)%R9^OWAzwsH#KWM*cl2;2u{`G?;^X%*Rf^x4=Wz_i{LwrM7x9de|jArDBPBB1|O> z=olX9{-V?6R(jHkL&Hd3`{|9zOkaoBn0_>7%LRQmXnEmYm!gRD#yh}QGg#2&(nqrI zl_Yr5^}ESkV#92HTdbxI?h*;aB5$8vdQopp()q6+>P*O8dTLYT0>4JIVsL*__L^u8 z&KA)T{&>bc_!~v25h{D0ug~FgG((1zlb-=p!i2udcg#w$@5ll%cqeA3F2r+b-2f}X z2^;V2sBpXK+(^IENY5wm`={kv`v(IHayDao#~qIbz!Cd%`mrmb8%8eTnUp}5*d76anG_7oE=hwiKtQY{M6dcZyEpW6$Am7aR^2cb zovq`ORxj!MsaX+M$%mhe9w6x53A#}PekP0!8~4AjR9|R%bu;la#`T|DQ251{IFbD@ z(bnnqdp{i-=xMSj^6?-cGl9|sult1_eG%dOSa}gc7U&WH;8%Oc+zUD$gz{OGwIY5A z4?hLU-hW|J8LxW_fEqRrYFL&zh{vDbFRrCUu}+E4!=e{ib0TTU9qD6WRhuT^O(ETs z6CzU4M00Ss5{!+WuA{?0_|jz-a#>u-sFgA8G!5&*Tw(5Qs2_0nIyTA~bA=A=^4-g+m!62tq=2;XKue1g!B9|es4UU^AITZu3F1$P$a1!0zS#gzl4 z$*I!bSjXx@azv&xv%FzbP6w=lTgYjr7vr@xp+{?w&n_5bO{E5j_XW9i3hs}~43!O7 zH*w0W#O(RGVyCP+utOsPrF?DSu3cfeFuS}EfV+KWzlWqY5g$EkN`#txxb;toQ{|Jw zyWn`ez{1*0XJ>yIVR@{vDU%DnND&lX=jD}^BEfWMpqDq&8_B_W1XL?CHamMY@>!^2 zN$3LaQZyx8maslte^ZiTE^<-D#uN!n`dilQ44V`yWthP`$4elHZrSeW9x6)UWli@A z;Gv{VZ%Edv)F0bQAY|f(L};=a0AgF7jqPtk@w&BY-_;cd*zIv|0CImN@-jix2xVB+ zuLD2-nbCl7U94XE{W?gt0dgU7_)WNhyz_wkff_cG@VW_%S;#9pmTk;C$^ytOts9S*c6+p{(h%hIm?xY%} zVlzOvQkq2HsE9dE$h*E*51R)wY%=4q+iEor71IU~=hAy`*imz@i;xe0#3W~V#?3pTLhx(73G?4+og+yNmZa?*+QhYy@*|XAG z7ESTCvjo~Gm=}I|nMbpgdlN|@q_qO#wHS>CDbgWxMIEuv(0}kbNY3iLJGMzRGhL0} zkKTC~AHr3!K~%Iwp`1n5_nJkzMlIc*sCsRWbnem5G4X^pr5Qy&` z4QxJsG*DLu(~^(lXpEa|7fI&{w4J%wAd*TnTbEqI5Q(^9dUfSB4>{253zMfW*u#@h z{&`G7hdFFVq8lo@@6PGUa4dR@-kRrK%gV&>+OviRYIEHi+**!t`|feYG~jRjG78U4 zCSsv=rc=5{!@v5JsE0+!iW2+a*FCyNdLZaw=VK;4wH93@Q}I<#Ex$;k-C_ZvS&n|; z0>WBF|M1O3Dmf5Ea$gyUo(+M201q zf;@vnOP;JPsj4(dRJPVS54)=D^~W4qo)A0Lu%mMmaUp(nBpCq-Sd`wJhu)xOHwz&~ z57w|VOI0D@ICP6F&Zqfs*y%(FpwU1V91=96#1KGPuIrl$4keK@RRT+g6nI|XZddGk zG~KKmtui^i-Kfw}VBkE!yKU~;(FqfT*OGY_Vw+*USRdFZNTLa_IT~BpHWIVb7uxK} zDYJP>*P{);Ja&4(zdUv!`9$bk>Fi6w;HtKFT^Iz!kYC0?dLyvzw}k6I+R%Y{{uc~S z2=8sP#3|ef<%w@s)q?HS&9M{!?1sE~X^M=t75U`@{1ySy7$x+`G!ZgZ7IY1dp+ydP zX=%3i4EcPV5Ix!5De{SH5~2NN?Q7B?((T}T{V_X&o_S(dG^RcrI$w-lmgWH;Y-zf^ z>f#Y>d?q?4h6wz+aiOc)-W!OD7&=YHBCE9hx)Un_dM!EOVkQhI8|#5Mb9+7D99kq> zDDuO@4G-Qe&`t&y&(8F*y0h7UTsztNTwOx<9sl@WT^ud{ba9}os}}l1-C&kBgqPw_ zDG*~`(rx#9GqnH^nuI08R+zwXf^N`SP@l&2cL=%mrVXjn0$=H|Db!S4uKV8;un+upu z?X7wF#)+ybu`*~lKh2q`_b#b}ZN@4YYeDzlZJj;*PC8AZ3o-IVi14B9({rhNxjMF1 zU)=m@WKF9`1Na|lqFYfj1o7x!Jv6bx#71LI^yK(HC`mXex5}??1$Qb6EUcVio|+^j zxO{)>C5gnEUR|^JRThPZyPbo%%_2>}ucq|L!E5p7jA zZ;+HmPzgg{z9T$@%j(A#pz!|m3(VoKgO}YiB3cU$>D**l12g_?rhvf-5tjT*Y%@@x z*qLYOT|<8#?_d9nO8E6-RQNEHj=$Cz{|WMA|GeBJuxUf?$kIDiAms|HQN2b^`ucZ)CbA J!_ejE{{T3R&#?di diff --git a/docs/src/examples/DMRG.md b/docs/src/examples/DMRG.md deleted file mode 100644 index 48d51feb11..0000000000 --- a/docs/src/examples/DMRG.md +++ /dev/null @@ -1,552 +0,0 @@ -# DMRG Code Examples - -## Perform a basic DMRG calculation - -Because tensor indices in ITensor have unique identities, before we can make a Hamiltonian -or a wavefunction we need to construct a "site set" which will hold the site indices defining -the physical Hilbert space: - -```julia -using ITensors, ITensorMPS -N = 100 -sites = siteinds("S=1",N) -``` - -Here we have chosen to create a Hilbert space of N spin 1 sites. The string "S=1" -denotes a special Index tag which hooks into a system that knows "S=1" indices have -a dimension of 3 and how to create common physics operators like "Sz" for them. - -Next we'll make our Hamiltonian matrix product operator (MPO). A very -convenient way to do this is to use the OpSum helper type which lets -us input a Hamiltonian (or any sum of local operators) in similar notation -to pencil-and-paper notation: - -```julia -os = OpSum() -for j=1:N-1 - os += 0.5,"S+",j,"S-",j+1 - os += 0.5,"S-",j,"S+",j+1 - os += "Sz",j,"Sz",j+1 -end -H = MPO(os,sites) -``` - -In the last line above we convert the OpSum helper object to an actual MPO. - -Before beginning the calculation, we need to specify how many DMRG sweeps to do and -what schedule we would like for the parameters controlling the accuracy. -These parameters can be specified as follows: - -```julia -nsweeps = 5 # number of sweeps is 5 -maxdim = [10,20,100,100,200] # gradually increase states kept -cutoff = [1E-10] # desired truncation error -``` - -The random starting wavefunction `psi0` must be defined in the same Hilbert space -as the Hamiltonian, so we construct it using the same collection of site indices: - -```julia -psi0 = random_mps(sites;linkdims=2) -``` - -Here we have made a random MPS of bond dimension 2. We could have used a random product -state instead, but choosing a slightly larger bond dimension can help DMRG avoid getting -stuck in local minima. We could also set psi to some specific initial state using the -`MPS` constructor, which is actually required if we were conserving QNs. - -Finally, we are ready to call DMRG: - -```julia -energy,psi = dmrg(H,psi0;nsweeps,maxdim,cutoff) -``` - -When the algorithm is done, it returns the ground state energy as the variable `energy` and an MPS -approximation to the ground state as the variable `psi`. - -Below you can find a complete working code that includes all of these steps: - -```julia -using ITensors, ITensorMPS - -let - N = 100 - sites = siteinds("S=1",N) - - os = OpSum() - for j=1:N-1 - os += 0.5,"S+",j,"S-",j+1 - os += 0.5,"S-",j,"S+",j+1 - os += "Sz",j,"Sz",j+1 - end - H = MPO(os,sites) - - nsweeps = 5 # number of sweeps is 5 - maxdim = [10,20,100,100,200] # gradually increase states kept - cutoff = [1E-10] # desired truncation error - - psi0 = random_mps(sites;linkdims=2) - - energy,psi = dmrg(H,psi0;nsweeps,maxdim,cutoff) - - return -end -``` - -## Using a Custom Observer for DMRG - -An Observer is any object which can be used to perform custom measurements throughout -a DMRG calculation and to stop a DMRG calculation early. Because an Observer has -access to the entire wavefunction at every step, a wide range of customization is -possible. - -For detailed examples of making custom Observers, see the [Observer](@ref observer) -section of the documentation. - - -## DMRG Calculation with Mixed Local Hilbert Space Types - -The following fully-working example shows how to set up a calculation -mixing S=1/2 and S=1 spins on every other site of a 1D system. The -Hamiltonian involves Heisenberg spin interactions with adjustable -couplings between sites of the same spin or different spin. - -Note that the only difference from a regular ITensor DMRG calculation -is that the `sites` array has Index objects which alternate in dimension -and in which physical tag type they carry, whether `"S=1/2"` or `"S=1"`. -(Try printing out the sites array to see!) -These tags tell the OpSum system which local operators to use for these -sites when building the Hamiltonian MPO. - -```julia -using ITensors, ITensorMPS - -let - N = 100 - - # Make an array of N Index objects with alternating - # "S=1/2" and "S=1" tags on odd versus even sites - # (The first argument n->isodd(n) ... is an - # on-the-fly function mapping integers to strings) - sites = siteinds(n->isodd(n) ? "S=1/2" : "S=1",N) - - # Couplings between spin-half and - # spin-one sites: - Jho = 1.0 # half-one coupling - Jhh = 0.5 # half-half coupling - Joo = 0.5 # one-one coupling - - os = OpSum() - for j=1:N-1 - os += 0.5*Jho,"S+",j,"S-",j+1 - os += 0.5*Jho,"S-",j,"S+",j+1 - os += Jho,"Sz",j,"Sz",j+1 - end - for j=1:2:N-2 - os += 0.5*Jhh,"S+",j,"S-",j+2 - os += 0.5*Jhh,"S-",j,"S+",j+2 - os += Jhh,"Sz",j,"Sz",j+2 - end - for j=2:2:N-2 - os += 0.5*Joo,"S+",j,"S-",j+2 - os += 0.5*Joo,"S-",j,"S+",j+2 - os += Joo,"Sz",j,"Sz",j+2 - end - H = MPO(os,sites) - - nsweeps = 10 - maxdim = [10,10,20,40,80,100,140,180,200] - cutoff = [1E-8] - - psi0 = random_mps(sites;linkdims=4) - - energy,psi = dmrg(H,psi0;nsweeps,maxdim,cutoff) - - return -end -``` - -## Use a Sum of MPOs in DMRG - -One version of the ITensor `dmrg` function accepts an array of MPOs -`[H1,H2,H3]` (or any number of MPOs you want). This version of DMRG -will find the ground state of `H1+H2+H3`. Internally it does not -actually sum these MPOs, but loops over them during each step of -the "eigensolver" at the core of the DMRG algorithm, so it is usually -more efficient than if the MPOs had been summed together into a single MPO. - -To use this version of DMRG, say you have MPOs `H1`, `H2`, and `H3`. -Then call DMRG like this: -```julia -energy,psi = dmrg([H1,H2,H3],psi0;nsweeps,maxdim,cutoff) -``` - -## Make a 2D Hamiltonian for DMRG - -You can use the OpSum system to make 2D Hamiltonians -much in the same way you make 1D Hamiltonians: by looping over -all of the bonds and adding the interactions on these bonds to -the OpSum. - -To help with the logic of 2D lattices, ITensor pre-defines -some helper functions which -return an array of bonds. Each bond object has an -"s1" field and an "s2" field which are the integers numbering -the two sites the bond connects. -(You can view the source for these functions at [this link](https://github.com/ITensor/ITensors.jl/blob/main/src/lib/ITensorMPS/src/lattices/lattices.jl).) - -The two provided functions currently are `square_lattice` and -`triangular_lattice`. It is not hard to write your own similar lattice -functions as all they have to do is define an array of `ITensors.LatticeBond` -structs or even a custom struct type you wish to define. We welcome any -user contributions of other lattices that ITensor does not currently offer. - -Each lattice function takes an optional named argument -"yperiodic" which lets you request that the lattice should -have periodic boundary conditions around the y direction, making -the geometry a cylinder. - -**Full example code:** - -```julia -using ITensors, ITensorMPS - -let - Ny = 6 - Nx = 12 - - N = Nx*Ny - - sites = siteinds("S=1/2", N; - conserve_qns = true) - - # Obtain an array of LatticeBond structs - # which define nearest-neighbor site pairs - # on the 2D square lattice (wrapped on a cylinder) - lattice = square_lattice(Nx, Ny; yperiodic = false) - - # Define the Heisenberg spin Hamiltonian on this lattice - os = OpSum() - for b in lattice - os += 0.5, "S+", b.s1, "S-", b.s2 - os += 0.5, "S-", b.s1, "S+", b.s2 - os += "Sz", b.s1, "Sz", b.s2 - end - H = MPO(os,sites) - - state = [isodd(n) ? "Up" : "Dn" for n=1:N] - # Initialize wavefunction to a random MPS - # of bond-dimension 10 with same quantum - # numbers as `state` - psi0 = random_mps(sites,state;linkdims=20) - - nsweeps = 10 - maxdim = [20,60,100,100,200,400,800] - cutoff = [1E-8] - - energy,psi = dmrg(H,psi0;nsweeps,maxdim,cutoff) - - return -end -``` - -## Compute excited states with DMRG - -ITensor DMRG accepts additional MPS wavefunctions as a optional, extra argument. -These additional 'penalty states' are provided as an array of MPS just -after the Hamiltonian, like this: - -```julia -energy,psi3 = dmrg(H,[psi0,psi1,psi2],psi3_init;nsweeps,maxdim,cutoff) -``` - -Here the penalty states are `[psi0,psi1,psi2]`. -When these are provided, the DMRG code minimizes the -energy of the current MPS while also reducing its overlap -(inner product) with the previously provided MPS. If these overlaps become sufficiently small, -then the computed MPS is an excited state. So by finding the ground -state, then providing it to DMRG as a "penalty state" or previous state -one can compute the first excited state. Then providing both of these, one can -get the second excited state, etc. - -A keyword argument called `weight` can also be provided to -the `dmrg` function when penalizing overlaps to previous states. The -`weight` parameter is multiplied by the overlap with the previous states, -so sets the size of the penalty. It should be chosen at least as large -as the (estimated) gap between the ground and first excited states. -Otherwise the optimal value of the weight parameter is not so obvious, -and it is best to try various weights during initial test calculations. - -Note that when the system has conserved quantum numbers, a superior way -to find excited states can be to find ground states of quantum number (or symmetry) -sectors other than the one containing the absolute ground state. In that -context, the penalty method used below is a way to find higher excited states -within the same quantum number sector. - -**Full Example code:** - -```julia -using ITensors, ITensorMPS - -let - N = 20 - - sites = siteinds("S=1/2",N) - - h = 4.0 - - weight = 20*h # use a large weight - # since gap is expected to be large - - - # - # Use the OpSum feature to create the - # transverse field Ising model - # - # Factors of 4 and 2 are to rescale - # spin operators into Pauli matrices - # - os = OpSum() - for j=1:N-1 - os -= 4,"Sz",j,"Sz",j+1 - end - for j=1:N - os -= 2*h,"Sx",j; - end - H = MPO(os,sites) - - - # - # Make sure to do lots of sweeps - # when finding excited states - # - nsweeps = 30 - maxdim = [10,10,10,20,20,40,80,100,200,200] - cutoff = [1E-8] - noise = [1E-6] - - # - # Compute the ground state psi0 - # - psi0_init = random_mps(sites;linkdims=2) - energy0,psi0 = dmrg(H,psi0_init;nsweeps,maxdim,cutoff,noise) - - println() - - # - # Compute the first excited state psi1 - # - psi1_init = random_mps(sites;linkdims=2) - energy1,psi1 = dmrg(H,[psi0],psi1_init;nsweeps,maxdim,cutoff,noise,weight) - - # Check psi1 is orthogonal to psi0 - @show inner(psi1,psi0) - - - # - # The expected gap of the transverse field Ising - # model is given by Eg = 2*|h-1| - # - # (The DMRG gap will have finite-size corrections) - # - println("DMRG energy gap = ",energy1-energy0); - println("Theoretical gap = ",2*abs(h-1)); - - println() - - # - # Compute the second excited state psi2 - # - psi2_init = random_mps(sites;linkdims=2) - energy2,psi2 = dmrg(H,[psi0,psi1],psi2_init;nsweeps,maxdim,cutoff,noise,weight) - - # Check psi2 is orthogonal to psi0 and psi1 - @show inner(psi2,psi0) - @show inner(psi2,psi1) - - return -end -``` - -## Printing the Entanglement Entropy at Each Step - -To obtain the entanglement entropy of an MPS at each step during a DMRG calculation, -you can use the [Observer](@ref observer) system to make a custom observer object that prints out -this information. - -First we define our custom observer type, `EntanglementObserver`, and overload the `measure!` function -for it: - -```julia -using ITensors, ITensorMPS - -mutable struct EntanglementObserver <: AbstractObserver -end - -function ITensorMPS.measure!(o::EntanglementObserver; bond, psi, half_sweep, kwargs...) - wf_center, other = half_sweep==1 ? (psi[bond+1],psi[bond]) : (psi[bond],psi[bond+1]) - U,S,V = svd(wf_center, uniqueinds(wf_center,other)) - SvN = 0.0 - for n=1:dim(S, 1) - p = S[n,n]^2 - SvN -= p * log(p) - end - println(" Entanglement across bond $bond = $SvN") -end -``` - -The `measure!` function grabs certain helpful keywords passed to it by DMRG, such as what bond DMRG -has just finished optimizing. - -Here is a complete sample code including constructing the observer and passing it to DMRG: - -```julia -using ITensors, ITensorMPS - -mutable struct EntanglementObserver <: AbstractObserver -end - -function ITensorMPS.measure!(o::EntanglementObserver; bond, psi, half_sweep, kwargs...) - wf_center, other = half_sweep==1 ? (psi[bond+1],psi[bond]) : (psi[bond],psi[bond+1]) - U,S,V = svd(wf_center, uniqueinds(wf_center,other)) - SvN = 0.0 - for n=1:dim(S, 1) - p = S[n,n]^2 - SvN -= p * log(p) - end - println(" Entanglement across bond $bond = $SvN") -end - -let - N = 100 - - s = siteinds("S=1/2",N) - - a = OpSum() - for n=1:N-1 - a += "Sz",n,"Sz",n+1 - a += 0.5,"S+",n,"S-",n+1 - a += 0.5,"S-",n,"S+",n+1 - end - H = MPO(a,s) - psi0 = random_mps(s;linkdims=4) - - nsweeps = 5 - maxdim = [10,20,80,160] - cutoff = 1E-8 - - observer = EntanglementObserver() - - energy,psi = dmrg(H,psi0;nsweeps,maxdim,cutoff,observer,outputlevel=2) - - return -end -``` - -Example output: -``` -... -Sweep 2, half 2, bond (35,36) energy=-44.08644657103751 - Truncated using cutoff=1.0E-08 maxdim=20 mindim=1 - Trunc. err=2.54E-07, bond dimension 20 - Entanglement across bond 35 = 0.7775882479059774 -Sweep 2, half 2, bond (34,35) energy=-44.086696891668424 - Truncated using cutoff=1.0E-08 maxdim=20 mindim=1 - Trunc. err=2.12E-07, bond dimension 20 - Entanglement across bond 34 = 0.7103532704635472 -Sweep 2, half 2, bond (33,34) energy=-44.08696190368391 - Truncated using cutoff=1.0E-08 maxdim=20 mindim=1 - Trunc. err=1.29E-07, bond dimension 20 - Entanglement across bond 33 = 0.7798362911744212 -... -``` - -If you only want to see the maximum entanglement during each sweep, you can add a field to the EntanglementObserver -object that saves the maximum value encountered so far and keep overwriting this field, printing out the most recently observed maximum at the end of each sweep. - - -## Monitoring the Memory Usage of DMRG - -To monitor how much memory (RAM) a DMRG calculation is using while it is running, -you can use the [Observer](@ref observer) system to make a custom observer object that prints out -this information. Also the `Base.summarysize` function, which returns the size -in bytes of any Julia object is very helpful here. - -First we define our custom observer type, `SizeObserver`, and overload the `measure!` function -for it: - -```julia -using ITensors, ITensorMPS - -mutable struct SizeObserver <: AbstractObserver -end - -function ITensorMPS.measure!(o::SizeObserver; bond, half_sweep, psi, projected_operator, kwargs...) - if bond==1 && half_sweep==2 - psi_size = Base.format_bytes(Base.summarysize(psi)) - PH_size = Base.format_bytes(Base.summarysize(projected_operator)) - println("|psi| = $psi_size, |PH| = $PH_size") - end -end -``` - -The `measure!` function grabs certain helpful keywords passed to it by DMRG, checking -`if bond==1 && half_sweep==2` so that it only runs when at the end of a full sweep. - -When it runs, it calls `Base.summarysize` on the wavefunction `psi` object and the `projected_operator` object. The `projected_operator`, which is the matrix (Hamiltonian) wrapped into the current MPS basis, is usually the largest-sized object in a DMRG calculation. The code also uses `Base.format_bytes` to turn an integer representing bytes into a human-readable string. - -Here is a complete sample code including constructing the observer and passing it to DMRG: - -```julia -using ITensors, ITensorMPS - -mutable struct SizeObserver <: AbstractObserver -end - -function ITensorMPS.measure!(o::SizeObserver; bond, sweep, half_sweep, psi, projected_operator, kwargs...) - if bond==1 && half_sweep==2 - psi_size = Base.format_bytes(Base.summarysize(psi)) - PH_size = Base.format_bytes(Base.summarysize(projected_operator)) - println("After sweep $sweep, |psi| = $psi_size, |PH| = $PH_size") - end -end - -let - N = 100 - - s = siteinds("S=1/2",N) - - a = OpSum() - for n=1:N-1 - a += "Sz",n,"Sz",n+1 - a += 0.5,"S+",n,"S-",n+1 - a += 0.5,"S-",n,"S+",n+1 - end - H = MPO(a,s) - psi0 = random_mps(s;linkdims=4) - - nsweeps = 5 - maxdim = [10,20,80,160] - cutoff = 1E-8 - - obs = SizeObserver() - - energy,psi = dmrg(H,psi0;nsweeps,maxdim,cutoff,observer=obs) - - return -end -``` - -Example output: -``` -After sweep 1, |psi| = 211.312 KiB, |PH| = 593.984 KiB -After sweep 1 energy=-43.95323393592883 maxlinkdim=10 maxerr=8.26E-06 time=0.098 -After sweep 2, |psi| = 641.000 KiB, |PH| = 1.632 MiB -After sweep 2 energy=-44.10791340895817 maxlinkdim=20 maxerr=7.39E-07 time=0.132 -After sweep 3, |psi| = 1.980 MiB, |PH| = 5.066 MiB -After sweep 3 energy=-44.12593605906466 maxlinkdim=44 maxerr=9.96E-09 time=0.256 -After sweep 4, |psi| = 2.863 MiB, |PH| = 7.246 MiB -After sweep 4 energy=-44.127710946536645 maxlinkdim=56 maxerr=9.99E-09 time=0.445 -After sweep 5, |psi| = 3.108 MiB, |PH| = 7.845 MiB -After sweep 5 energy=-44.127736798226536 maxlinkdim=57 maxerr=9.98E-09 time=0.564 -``` diff --git a/docs/src/examples/ITensor.md b/docs/src/examples/ITensor.md deleted file mode 100644 index aec5384f6f..0000000000 --- a/docs/src/examples/ITensor.md +++ /dev/null @@ -1,558 +0,0 @@ -# [ITensor Code Examples](@id itensor_examples) - - -## Print Indices of an ITensor - -Sometimes the printout of an ITensor can be rather large, whereas you -might only want to see its indices. For these cases, just wrap the -ITensor in the function `inds` like this: - -```julia -@show inds(T) -``` - -or this - -```julia -println("T inds = ",inds(T)) -``` - -## Getting and Setting Elements of an ITensor - -Say we have an ITensor constructed as: - -```julia -i = Index(3,"index_i") -j = Index(2,"index_j") -k = Index(4,"index_k") - -T = ITensor(i,j,k) -``` - -An ITensor constructed this way starts with all of its elements -equal to zero. (Technically it allocates no storage at all but this is -an implementation detail.) - -**Setting Elements** - -To set an element of this ITensor, such as the element where `(i,j,k) = (2,1,3)`, -you can do the following: - -```julia -T[i=>2,j=>1,k=>3] = -3.2 -``` - -In the Julia language, the notation `a=>b` is a built-in notation for making a `Pair(a,b)` -object. - -Because the Index objects are passed to `T` along with their values, passing them in a different order has exactly the same effect: - -```julia -# Both of these lines of code do the same thing: -T[j=>1,i=>2,k=>3] = -3.2 -T[j=>1,k=>3,i=>2] = -3.2 -``` - -**Getting Elements** - -You can retrieve individual elements of an ITensor by accessing them through the same notation used to set elements: - -```julia -el = T[j=>1,i=>2,k=>3] -println("The (i,j,k) = (2,1,3) element of T is ",el) -``` - -## Making ITensors from Arrays - -To initialize all of the elements of an ITensor at once, you -can pass a Julia array into the ITensor constructor. - -For example, if we want to construct an ITensor `A` with indices -`i,j` we can initialize it from a matrix as follows: - -```julia -M = [1.0 2.0; - 3.0 4.0] - -i = Index(2,"i") -j = Index(2,"j") - -A = ITensor(M,i,j) -``` - -More generally we can use an nth-order (n-dimensional) Julia array to initialize an ITensor: - -```julia -T = randn(4,7,2) - -k = Index(4,"index_k") -l = Index(7,"index_l") -m = Index(2,"index_m") - -B = ITensor(T,k,l,m) -``` - -## Making Arrays from ITensors - -Not only can we make an ITensor from a Julia array, but we can also convert -an ITensor back into a Julia array. - -Say we have made an ITensor with two indices: - -```@example from_array -using ITensors # hide -k = Index(4,"index_k") -m = Index(2,"index_m") - -T = random_itensor(k,m) -@show T -display(T) # hide -``` - -Here we used the `random_itensor` constructor to fill T with random elements -but we could make an ITensor some other way too. - -Now to convert `T` into a regular Julia array `A`, use the [`Array`](@ref) constructor -and pass the indices of `T` in the order that you want: - -```@example from_array -A = Array(T,k,m) -@show A -``` - -The reason you have to pass the indices is that the ordering of ITensor indices -is an implementation detail and not part of the user interface. So when leaving the -ITensor system and converting to a regular array, you must say what ordering of the -indices you want. Making the array as `A = Array(T,m,k)` would give the transpose -of the array in the code above. - -Note that for efficiency reasons, the array returned by the `array` function will -sometimes be a *view* of the ITensor, such that changing an element of `A` would -also change the corresponding element of `T`. This is not always the case though: -for example if the indices are passed in a different order from how the internal -ITensor storage is arranged, or if `T` is a block-sparse ITensor, since the -(not stored) zero blocks will need to be filled in. - - -## Arithmetic With ITensors - -ITensors can be added and subtracted and multiplied by scalars just like plain tensors can. But ITensors have the additional feature that you can add and subtract them even if their indices are in a different order from each other, as long as they have the same collection of indices. - -For example, say we have ITensors `A`, `B`, and `C`: -```julia -i = Index(3,"i") -j = Index(2,"j") -k = Index(4,"k") - -A = random_itensor(i,j,k) -B = random_itensor(i,j,k) -C = random_itensor(k,i,j) -``` -Above we have initialized these ITensors to have random elements, just for the sake of this example. - -We can then add or subtract these ITensors - -```julia -R1 = A + B -R2 = A - B -R3 = A + B - C -``` - -or do more complicated operations involving real and complex scalars too: - -```julia -R4 = 2.0*A - B + C/(1+1im) -``` - -## Elementwise Operations on ITensors - -[*Note: currently elementwise operations are only defined for dense ITensors, not for block-sparse QN ITensors.*] - -ITensors support Julia broadcasting operations, making it quite easy to carry out element-wise operations on them in a very similar way as for regular Julia arrays. As a concrete example, consider the following ITensor initialized with random elements - -```julia -i = Index(2,"i") -j = Index(3,"j") - -A = random_itensor(i,j) -``` - -Here are some examples of basic element-wise operations we can do using Julia's dotted operator broadcasting syntax. - -```julia -# Multiply every element of `A` by 2.0: -A .*= 2.0 -``` - -```julia -# Add 1.5 to every element of A -A .+= 1.5 -``` - -The dotted notation works for functions too: - -```julia -# Replace every element in A by its absolute value: -A .= abs.(A) -``` - -```julia -# Replace every element in A by the number 1.0 -A .= one.(A) -``` - -If have another ITensor `B = ITensor(j,i)`, which has the same set of indices -though possibly in a different order, then we can also do element-wise operations -involving both ITensors: - -```julia -# Add elements of A and B element-wise -A .= A .+ B -# Add elements of A and B element-wise with coefficients included -A .= (2.0 .* A) .+ (-3.0 .* B) -``` - -Last but not least, it is possible to make custom functions yourself and broadcast them across elements of ITensors: - -```julia -myf(x) = 1.0/(1.0+exp(-x)) -T .= myf.(T) -``` - -## Making an ITensor with a Single Non-Zero Element - -It is often useful to make ITensors with all elements zero -except for a specific element that is equal to 1.0. -Use cases can include making product-state quantum wavefunctions -or contracting single-element ITensors with other ITensors to -set their indices to a fixed value. - -To make such an ITensor, use the [`onehot`](@ref) function. Borrowing terminology from engineering, -a "one hot" vector or tensor has a single element equal to 1.0 and -the rest zero. (In previous versions of ITensor this function was called `setelt`.) - -The ITensor function [`onehot`](@ref) takes one or more -Index-value Pairs such as `i=>2` and `j=>1` and returns an ITensor -with a 1.0 in the location specified by the Index values: - -```@example onehot_1 -using ITensors # hide -i = Index(2) -O1 = onehot(i=>1) -println(O1) -``` - -```@example onehot_2 -using ITensors # hide -i = Index(2) # hide -O2 = onehot(i=>2) -println(O2) -``` - -```@example onehot_3 -using ITensors # hide -i = Index(2) # hide -j = Index(3) -T = onehot(i=>2,j=>3) -println(T) -``` - -## Tracing an ITensor - -An important operation involving a single tensor is tracing out certain -pairs of indices. Say we have an ITensor `A` with indices `i,j,l`: - -```julia -i = Index(4,"i") -j = Index(3,"j") -l = Index(4,"l") - -A = random_itensor(i,j,l) -``` - -and we want to trace `A` by summing over the indices `i` and `l` locked together, -in other words: ``\sum_{i} A^{iji}``. - -To do this in ITensor, we can use a `delta` tensor, which you can think of as -an identity operator or more generally a Kronecker delta or "hyper-edge": - -![](itensor_trace_figures/delta_itensor.png) - -Viewed as an array, a delta tensor has all diagonal elements equal to 1.0 and -zero otherwise. - -Now we can compute the trace by contracting `A` with the delta tensor: - -```julia -trA = A * delta(i,l) -``` - -![](itensor_trace_figures/trace_A.png) - -## Factoring ITensors (SVD, QR, etc.) - -The ITensor approach to tensor factorizations emphasizes the structure -of the factorization, and does not require knowing the index ordering. - -ITensor offers various tensor factorizations, such as the -singular value decomposition (SVD) and the QR factorization. -These are extended to the case of tensors by treating some of the indices -as the "row" indices and the rest of the indices as the "column" indices, -reshaping the tensor into a matrix to carry out the factorization, then -restoring the tensor structure at the end. All of these steps are done -for you by the ITensor system as we will see below. - -#### Singular Value Decomposition - -The singular value decomposition (SVD) is a matrix factorization -that is also extremely useful for general tensors. - -As a brief review, the SVD is a factorization of a matrix M into the product -```math -M = U S V^\dagger -``` -with U and V having the property ``U^\dagger U = 1`` and ``V^\dagger V = 1``. -The matrix S is diagonal and has real, non-negative entries known as the singular -values, which are typically ordered from largest to smallest. -The SVD is well-defined for any matrix, including rectangular matrices. It also -leads to a controlled approximation, where the error due to discarding columns of U and V -is small if the corresponding singular values discarded are small. - -To compute the SVD of an ITensor, you only need to specify which indices are (collectively) -the "row" indices (thinking of the ITensor as a matrix), with the rest assumed to be the "column" -indices. - -Say we have an ITensor with indices i,j, and k - -```julia -T = ITensor(i,j,k) -``` - -and we want to treat i and k as the "row" indices for the purpose of the SVD. - -To perform this SVD, we can call the function `svd` as follows: - -```julia -U,S,V = svd(T,(i,k)) -``` - -Diagrammatically the SVD operation above looks like: - -![](itensor_factorization_figures/SVD_Ex1.png) - -The guarantee of the `svd` function is that the ITensor -product `U*S*V` gives us back an ITensor identical to T: - -```julia -@show norm(U*S*V - T) # typical output: norm(U*S*V-T) = 1E-14 -``` - -*Full working example:* - -```julia -i = Index(3,"i") -j = Index(4,"j") -k = Index(5,"k") - -T = random_itensor(i,j,k) - -U,S,V = svd(T,(i,k)) - -@show norm(U*S*V-T) -``` - -**Truncated SVD** - -An important use of the SVD is approximating a higher-rank tensor -by a product of lower-rank tensors whose indices range over only -a modest set of values. - -To obtain an approximate SVD in ITensor, pass one or more of -the following accuracy parameters as named arguments: - -* `cutoff` --- real number ``\epsilon``. Discard the smallest singular values - ``\lambda\_n`` such that the truncation error is less than ``\epsilon``: - $$ - \frac{\sum\_{n\in\text{discarded}} \lambda^2\_n}{\sum\_{n} \lambda^2\_n} < \epsilon \:. - $$ - Using a cutoff allows the SVD algorithm to truncate as many states as possible while still - ensuring a certain accuracy. - -* `maxdim` --- integer M. If the number of singular values exceeds M, only the largest M will be retained. - -* `mindim` --- integer m. At least m singular values will be retained, even if some fall below the cutoff - -Let us revisit the example above, but also provide some of these accuracy parameters - -```julia -i = Index(10,"i") -j = Index(40,"j") -k = Index(20,"k") -T = random_itensor(i,j,k) - -U,S,V = svd(T,(i,k),cutoff=1E-2) -``` - -Note that we have also made the indices larger so that the truncation performed will be -non-trivial. -In the code above, we specified that a cutoff of ``\epsilon=10^{-2}`` be used. We can check that the resulting factorization is now approximate by computing the squared relative error: - -```julia -truncerr = (norm(U*S*V - T)/norm(T))^2 -@show truncerr -# typical output: truncerr = 8.24E-03 -``` - -Note how the computed error is below the cutoff ``\epsilon`` we requested. - -*Full working example including truncation:* - -```julia -i = Index(10,"i"); -j = Index(40,"j"); -k = Index(20,"k"); - -T = random_itensor(i,j,k) - -U,S,V = svd(T,(i,k),cutoff=1E-2) - -@show norm(U*S*V-T) -@show (norm(U*S*V - T)/norm(T))^2 -``` - -#### QR Factorization - -Computing the QR factorization of an ITensor works in a similar way as for the SVD. -In addition to passing the ITensor you want to factorize, you must also pass -the indices you want to end up on the tensor `Q`, in other words to be treated -as the "row" indices for the purpose of defining the QR factorization. - -Say we want to compute the QR factorization of an ITensor `T` with indices `i,j,k`, -putting the indices `i` and `k` onto `Q` and the remaining indices onto `R`. We -can do this as follows: - -![](itensor_factorization_figures/QR_Ex1.png) - -```julia -T = random_itensor(i,j,k) -Q,R = qr(T,(i,k);positive=true) -``` - -Note the use of the optional `positive=true` keyword argument, which ensures that -the diagonal elements of `R` are non-negative. With this option, the QR factorization -is *unique*, which can be useful in certain cases. - -## Combining Multiple Indices into One Index - -It can be very useful to combine or merge multiple indices of an ITensor into a -single Index. Say we have an ITensor with indices `i,j,k` and we want to combine -Index `i` and Index `k` into a new Index. This new Index (call it `c`) will have -a dimension whose size is the dimension of `i` times the dimension of `k`. - -To carry out this procedure we can make a special kind of ITensor: a combiner. -To make a combiner, call the function `combiner`, passing the indices you -want to combine: -```@example combiner -using ITensors # hide -i = Index(4,"i") # hide -j = Index(3,"j") # hide -k = Index(2,"k") # hide -C = combiner(i,k; tags="c") -nothing # hide -``` - -Then if we have an ITensor -```@example combiner -T = random_itensor(i,j,k) -@show inds(T) -``` -we can combine indices `i` and `k` by contracting with the combiner: -```@example combiner -CT = C * T -nothing # hide -``` - -Printing out the indices of the new ITensor `CT` we can see that it -has only two indices: -```@example combiner -@show inds(CT) -``` -The first is the newly made combined Index, which was made for us by -the `combiner` function and the second is the `j` Index of `T` -which was not part of the combining process. To access the combined -Index you can call the `combinedind` function on the combiner: -```@example combiner -ci = combinedind(C) -``` - -We can visualize all of the steps above as follows: -![](combiner_itensor.png) - -Combining is not limited to two indices and you can -combine any number of indices, in any order, using a combiner. - -To undo the combining process and uncombine the Index `c` back into `i,k`, -just contract with the conjugate of the combiner ITensor `dag(C)`. -```@example combiner -UT = dag(C) * CT -@show inds(UT) -``` - - -## Write and Read an ITensor to Disk with HDF5 - - - -!!! info - Make sure to install the HDF5 package to use this feature. (Run `julia> ] add HDF5` in the Julia REPL console.) - -Saving ITensors to disk can be very useful. For example, you -might encounter a bug in your own code, and by reading the -ITensors involved from disk you can shortcut the process of -running a lengthy algorithm over many times to reproduce the bug. -Or you can save the output of an expensive calculation, such as -a DMRG calculation, and use it as a starting point for multiple -follow-up calculations such as computing time-dependent properties. - -ITensors can be written to files using the HDF5 format. HDF5 offers -many benefits such as being portable across different machine types, -and offers a standard interface across various libraries and languages. - -**Writing an ITensor to an HDF5 File** - -Let's say you have an ITensor `T` which you have made or obtained -from a calculation. To write it to an HDF5 file named "myfile.h5" -you can use the following pattern: - -```julia -using HDF5 -f = h5open("myfile.h5","w") -write(f,"T",T) -close(f) -``` - -Above, the string "T" can actually be any string you want such as "ITensor T" -or "Result Tensor" and doesn't have to have the same name as the reference `T`. -Closing the file `f` is optional and you can also write other objects to the same -file before closing it. - -**Reading an ITensor from an HDF5 File** - -Say you have an HDF5 file "myfile.h5" which contains an ITensor stored as a dataset with the -name "T". (Which would be the situation if you wrote it as in the example above.) -To read this ITensor back from the HDF5 file, use the following pattern: - -```julia -using HDF5 -f = h5open("myfile.h5","r") -T = read(f,"T",ITensor) -close(f) -``` - -Note the `ITensor` argument to the read function, which tells Julia which read function -to call and how to interpret the data stored in the HDF5 dataset named "T". In the -future we might lift the requirement of providing the type and have it be detected -automatically from the data stored in the file. - - - diff --git a/docs/src/examples/MPSandMPO.md b/docs/src/examples/MPSandMPO.md deleted file mode 100644 index c8ee559261..0000000000 --- a/docs/src/examples/MPSandMPO.md +++ /dev/null @@ -1,454 +0,0 @@ -# MPS and MPO Examples - -The following examples demonstrate operations available in ITensor -to work with [matrix product state (MPS)](http://tensornetwork.org/mps/) -(or tensor train) and matrix product operator (MPO) tensor networks. - -## Creating an MPS from a Tensor - -![](mps_from_tensor.png) - -A matrix product state (MPS) made of N tensors, each with -one site or physical index, is a way of representing a single -tensor with N indices. One way of obtaining the MPS form of an -N-index tensor `T` is by repeatedly factorizing `T` into N -separate tensors using a factorization such as the [Singular Value Decomposition](@ref) (SVD). -This algorithm for obtaining an MPS is known in the mathematics -literature as the "tensor train SVD" or "TT-SVD" algorithm. - -To turn an N-index (order-N) tensor T into an MPS, you can just -construct an MPS by passing T as the first argument, along with -keyword arguments that control the approximations used in factorizing -T. Let's look at a few specific cases. - -#### ITensor to MPS Example - -If you have a tensor `T` which is an ITensor and has indices `i,j,k,l,m`, -you can create an MPS approximation of `T` where the MPS has site indices -`i,j,k,l,m` as follows: - -```julia -cutoff = 1E-8 -maxdim = 10 -T = random_itensor(i,j,k,l,m) -M = MPS(T,(i,j,k,l,m);cutoff=cutoff,maxdim=maxdim) -``` - -Here we used a random ITensor for illustrative purposes, but it could be any ITensor and -typically tensors with additional structure are more well approximated by MPS. - -#### Julia Tensor to MPS Example - -Another situation could be where you have a Julia array or Julia tensor of -dimension ``d^N`` and want to approximate it as an MPS with ``N`` site indices, -each of dimension ``d``. For example, we could have the following random Julia -array of dimension ``2\times 2\times 2 \times 2 \times 2``: - -```julia -d = 2 -N = 5 -A = randn(d,d,d,d,d) -``` - -Alternatively, the array could be just a one dimensional array of length ``d^N``: - -```julia -A = randn(d^N) -``` - -To convert this array to an MPS, we will first need a collection of Index objects -to use as the site indices of the MPS. We can conveniently construct an array of -four indices of dimension 2 as follows: - -```julia -sites = siteinds(d,N) -``` - -Finally, we can pass our array `A` and our `sites` to the MPS constructor along with -parameters controlling the truncation level of the factorizations used: - -```julia -cutoff = 1E-8 -maxdim = 10 -M = MPS(A,sites;cutoff=cutoff,maxdim=maxdim) -``` - -## Obtaining Elements of a Tensor Represented by an MPS - -A matrix product state (MPS) or tensor train (TT) is a format for representing a large tensor having N indices in terms of N smaller tensors. Given an MPS represeting a tensor T -we can obtain a particular element ``T^{s_1 s_2 s_3 \cdots s_N}`` -of that tensor using code similar to the following code below. - -In the example code below we will obtain the element ``T^{1,2,1,1,2,1,2,2,2,1}`` of the tensor T -which is (implicitly) defined by the MPS psi: - -```@example mps_element -using ITensors, ITensorMPS -let # hide -N = 10 -s = siteinds(2,N) -chi = 4 -psi = random_mps(s;linkdims=chi) - -# Make an array of integers of the element we -# want to obtain -el = [1,2,1,1,2,1,2,2,2,1] - -V = ITensor(1.) -for j=1:N - V *= (psi[j]*state(s[j],el[j])) -end -v = scalar(V) - -# v is the element we wanted to obtain: -@show v -end # hide -``` - -The call to `state(s[j],el[j])` in the code above makes a single-index ITensor -with the Index `s[j]` and the entry at location `el[j]` set to 1.0, with all other -entries set to 0.0. Contracting this tensor with the MPS tensor at site `j` -can be viewed as "clamping" or "fixing" the index to a set value. The resulting -tensors are contracted sequentially, overwriting the ITensor `V`, and the final -scalar value of `V` is the tensor element we seek. - -See below for a visual depiction of what the above code is doing: - -![](mps_element.png) - -## Expected Value of Local Operators - -When using an MPS to represent a quantum wavefunction ``|\psi\rangle`` -a common operation is computing the expected value ``\langle\psi|\hat{A}_j|\psi\rangle`` -of a local operator ``\hat{A}_j`` acting on site ``j``. This can be accomplished -efficiently and conveniently using the [`expect`](@ref) function as: - -```julia -Avals = expect(psi,"A") -``` - -where `"A"` must be an operator associated with the physical site type, or site tags, of -the sites of the MPS `psi`. For example, the operator name could be -`"Sz"` for spin sites or `"Ntot"` for electron sites. -(For more information about defining such operators yourself, -see the section on [Extending op Function Definitions](@ref custom_op).) - -As a concrete example, consider computing the expectation value of ``S^z_j`` on -every site of an MPS representing a system of N spins of size ``S=1/2``. In the -following example we will use a random MPS of bond dimension ``\chi=4`` but the -MPS could be obtained other ways such as through a DMRG calculation. - -```@example expect -using ITensors, ITensorMPS -N = 10 -chi = 4 -sites = siteinds("S=1/2",N) -psi = random_mps(sites;linkdims=chi) -magz = expect(psi,"Sz") -for (j,mz) in enumerate(magz) - println("$j $mz") -end -``` - -![](mps_expect.png) - -## Expected Values of MPO Operators - -When using an MPS to represent a quantum wavefunction ``|\psi\rangle`` -another common operation is computing the expected value ``\langle\psi|W|\psi\rangle`` -of an operator ``W`` which is represented as a matrix product operator (MPO) tensor network. -A key example could be the Hamiltonian defining a quantum system. - -Given an MPO `W` and an MPS `psi`, you can compute ``\langle\psi|W|\psi\rangle`` -by using the function `inner` as follows: -```julia -ex_W = inner(psi',W,psi) -``` -which will return a scalar that may be either real or complex, depending on the properties of -`psi` and `W`. - -## Computing Correlation Functions - -In addition to expected values of local operators -discussed above, another type of observable that is very important -in physics studies are correlation functions of the form - -```math -C_{ij} = \langle\psi| A_i B_j |\psi\rangle -``` - -These can be computed efficiently for an MPS `psi` in ITensor -using the [`correlation_matrix`](@ref) function: - -```julia -C = correlation_matrix(psi,"A","B") -``` - -where `"A"` and `"B"` must be an operator names associated with the physical site type, -or site tags, of the sites of the MPS `psi`. For example, these strings could be -`"Sz"`, `"S+"`, or `"S-"` for spin sites, or `"Cdagup"` and `"Cup"` for electron sites. -(For more information about defining such operators yourself, -see the section on [Extending op Function Definitions](@ref custom_op).) - -As a concrete example, say we have an MPS `psi` for a system of spins and -want to compute the correlator ``\langle\psi|S^z_i S^z_j|\psi\rangle``. -We can compute this as: - -```julia -zzcorr = correlation_matrix(psi,"Sz","Sz") -``` - -![](mps_zz_correlation.png) - -See the [`correlation_matrix`](@ref) docs for more details about additional arguments you can pass -to this function. - -## Applying a Single-site Operator to an MPS - -In many applications one needs to modify a matrix product -state (MPS) by multiplying it with an operator that acts -only on a single site. This is actually a very straightforward -operation and this formula shows you how to do it in ITensor. - -Say we have an operator ``G^{s'_3}_{s_3}`` which acts non-trivially on site 3 of our MPS `psi` -as in the following diagram: - -![](mps_onesite_figures/operator_app_mps.png) - -To carry out this operation, contract the operator G with the MPS tensor for site 3, -removing the prime from the ``s'_3`` index afterward: - -![](mps_onesite_figures/operator_contract.png) - -```julia -newA = G * psi[3] -newA = noprime(newA) -``` - -Finally, put the new tensor back into MPS `psi` to update its third MPS tensor: - -```julia -psi[3] = newA -``` - -Afterward, we can visualize the modified MPS as: - -![](mps_onesite_figures/updated_mps.png) - -As a technical note, if you are working in a context where gauge or orthogonality -properties of the MPS are important, such as in time evolution using two-site gates, -then you may want to call `psi = orthogonalize(psi, 3)` -before modifying the tensor at site 3, which will ensure that the MPS remains in a -well-defined orthogonal gauge centered on site 3. Modifying a tensor which is left- or right-orthogonal -(i.e. not the "center" tensor of the gauge) will destroy the gauge condition and -require extra operations to restore it. (Calling `orthogonalize` method will automatically -fix this but will have to do extra work to do so.) - - -## Applying a Two-site Operator to an MPS - -A very common operation with matrix product states (MPS) is -multiplication by a two-site operator or "gate" which modifies -the MPS. This procedure can be carried out in an efficient, -controlled way which is adaptive in the MPS bond dimension. - -Say we have an operator ``G^{s'_3 s'_4}_{s_3 s_4}`` which -is our gate and which acts on physical sites 3 and 4 of our MPS `psi`, -as in the following diagram: - -![](twosite_figures/gate_app_mps.png) - -To apply this gate in a controlled manner, first 'gauge' the MPS `psi` such -that either site 3 or 4 is the *orthogonality center*. Here we make site 3 -the center: - -```julia -psi = orthogonalize(psi, 3) -``` - -![](twosite_figures/gate_gauge.png) - -The other MPS tensors are now either left-orthogonal or right-orthogonal and can be -left out of further steps without producing incorrect results. - -Next, contract the gate tensor G with the MPS tensors for sites 3 and 4 - -![](twosite_figures/gate_contract.png) - -```julia -wf = (psi[3] * psi[4]) * G -wf = noprime(wf) -``` - -Finally, use the singular value decomposition (SVD) to factorize the -resulting tensor, multiplying the singular values into either U or V. -Assign these two tensors back into the MPS to update it. - -![](twosite_figures/gate_svd.png) - -```julia -inds3 = uniqueinds(psi[3],psi[4]) -U,S,V = svd(wf,inds3,cutoff=1E-8) -psi[3] = U -psi[4] = S*V -``` - -The call to `uniqueinds(psi[3])` analyzes the indices of `psi[3]` and `psi[4]` -and finds any which are unique to just `psi[3]`, saving this collection of indices as `inds3`. -Passing this collection of indices to the `svd` function tells it to treat any indices -that are unique to `psi[3]` as the indices which should go onto the `U` tensor afterward. -We also set a truncation error cutoff of 1E-8 in the call to `svd` to truncate -the smallest singular values and control the size of the resulting MPS. -Other cutoff values can be used, depending on the desired accuracy, -as well as limits on the maximum bond dimension (`maxdim` keyword argument). - -**Complete code example** - -```julia -using ITensors, ITensorMPS - -psi = orthogonalize(psi, 3) - -wf = (psi[3] * psi[4]) * G -wf = noprime(wf) - -inds3 = uniqueinds(psi[3], psi[4]) -U, S, V = svd(wf, inds3; cutoff=1E-8) -psi[3] = U -psi[4] = S * V -``` - -## Computing the Entanglement Entropy of an MPS - -A key advantage of using the matrix product state (MPS) format to represent quantum wavefunctions is that it allows one to efficiently compute the entanglement entropy of any left-right bipartition of the system in one dimension, or for a two-dimensional system any "cut" along the MPS path. - -Say that we have obtained an MPS `psi` of length N and we wish to compute the entanglement entropy of a bipartition of the system into a region "A" which consists of sites 1,2,...,b and a region B consisting of sites b+1,b+2,...,N. - -Then the following code formula can be used to accomplish this task: - -```julia -psi = orthogonalize(psi, b) -U,S,V = svd(psi[b], (linkinds(psi, b-1)..., siteinds(psi, b)...)) -SvN = 0.0 -for n=1:dim(S, 1) - p = S[n,n]^2 - SvN -= p * log(p) -end -``` - -As a brief explanation of the code above, the call to `psi = orthogonalize(psi, b)` -shifts the orthogonality center to site `b` of the MPS. - -The call to the `svd` routine says to treat the link (virtual or bond) Index connecting the b'th MPS tensor `psi[b]` and the b'th physical Index as "row" indices for the purposes of the SVD (these indices will end up on `U`, along with the Index connecting `U` to `S`). - -The code in the `for` loop iterates over the diagonal elements of the `S` tensor (which are the singular values from the SVD), computes their squares to obtain the probabilities of observing the various states in the Schmidt basis (i.e. eigenvectors of the left-right bipartition reduced density matrices), and puts them into the von Neumann entanglement entropy formula ``S_\text{vN} = - \sum_{n} p_{n} \log{p_{n}}``. - -## Sampling from an MPS - -A matrix product state (MPS) can be viewed as defining a probability distribution -through the Born rule, as is the case when the MPS represents a quantum wavefunction. -To sample from the distribution defined by an MPS, you can use the function `sample` -provided in ITensor. For an MPS `psi` call to `sample(psi)` returns a random -sample from the distribution defined by `psi`. (Note that each sample is drawn anew -and not from a Markov chain seeded by a previous sample; this is possible because -the algorithm for sampling MPS is a `perfect' sampling algorithm with no autocorrelation.) - -In more detail, say we have a set of `N` site indices `s` and define a random MPS -with these sites: -```@example sample_mps; continued=true -using ITensors, ITensorMPS - -N = 10 # number of sites -d = 3 # dimension of each site -chi = 16 # bond dimension of the MPS -s = siteinds(d,N) -psi = random_mps(s;linkdims=chi) -``` - -We can now draw some samples from this MPS as - -```@example sample_mps -v1 = sample(psi) -v2 = sample(psi) -v3 = sample(psi) -println(v1) -println(v2) -println(v3) -``` - -The integers in each of the samples represent settings of each of the MPS indices -in the "computational basis". - -For reasons of efficiency, the `sample` function requires the MPS to be in orthogonal -form, orthogonalized to the first site. If it is not already in this form, it -can be brought into orthogonal form by calling `psi = orthogonalize(psi, 1)`. - - -## Write and Read an MPS or MPO to Disk with HDF5 - -!!! info - Make sure to install the HDF5 package to use this feature. (Run `julia> ] add HDF5` in the Julia REPL console.) - -**Writing an MPS to an HDF5 File** - -Let's say you have an MPS `psi` which you have made or obtained -from a calculation. To write it to an HDF5 file named "myfile.h5" -you can use the following pattern: - -```julia -using HDF5 -f = h5open("myfile.h5","w") -write(f,"psi",psi) -close(f) -``` - -Above, the string "psi" can actually be any string you want such as "MPS psi" -or "Result MPS" and doesn't have to have the same name as the reference `psi`. -Closing the file `f` is optional and you can also write other objects to the same -file before closing it. - -**Reading an MPS from an HDF5 File** - -Say you have an HDF5 file "myfile.h5" which contains an MPS stored as a dataset with the -name "psi". (Which would be the situation if you wrote it as in the example above.) -To read this ITensor back from the HDF5 file, use the following pattern: - -```julia -using HDF5 -f = h5open("myfile.h5","r") -psi = read(f,"psi",MPS) -close(f) -``` - -Many functions which involve MPS, such as the `dmrg` function or the `OpSum` system -require that you use an array of site indices which match the MPS. So when reading in -an MPS from disk, do not construct a new array of site indices. Instead, you can -obtain them like this: `sites = siteinds(psi)`. - -So for example, to create an MPO from an OpSum which has the same site indices -as your MPS `psi`, do the following: - -```julia -using ITensors, ITensorMPS - -os = OpSum() -# Then put operators into os... - -sites = siteinds(psi) # Get site indices from your MPS -H = MPO(os,sites) - -# Compute -energy_psi = inner(psi',H,psi) -``` - -Note the `MPS` argument to the read function, which tells Julia which read function -to call and how to interpret the data stored in the HDF5 dataset named "psi". In the -future we might lift the requirement of providing the type and have it be detected -automatically from the data stored in the file. - - -**Writing and Reading MPOs** - -To write or read MPOs to or from HDF5 files, just follow the examples above but use -the type `MPO` when reading an MPO from the file instead of the type `MPS`. - diff --git a/docs/src/examples/Physics.md b/docs/src/examples/Physics.md deleted file mode 100644 index 751e76e4f4..0000000000 --- a/docs/src/examples/Physics.md +++ /dev/null @@ -1,549 +0,0 @@ -# Physics (SiteType) System Examples - -## Obtaining a Predefined Operator - -Given an Index carrying a "physical" tag such as "Qubit", "S=1/2", "Boson", etc. -there are a set of pre-defined operators for each tag. The entire set of operators -can be found in the section [SiteTypes Included with ITensor](@ref). - -If you have an Index `s` carrying a "S=1/2" tag, for example, you can obtain the "Sz" -operator like this: -```julia -using ITensors, ITensorMPS - -op("Sz",s) -``` - -Usually indices with physical tags come from an array of indices returned from the `siteinds` function -```julia -using ITensors, ITensorMPS - -N = 10 -sites = siteinds("S=1/2",N) -``` -in which case one might want the "Sz" operator on site 4 -```julia -using ITensors, ITensorMPS -Sz4 = op("Sz",sites[4]) -``` - -## Make a Custom Operator from a Matrix - -The `op` function can be passed any matrix, as long as it has the correct dimensions, -and it will make this into an ITensor representing the operator with the corresponding -matrix elements. - -For example, if we have a two-dimensional Index `s` we could make the "Sz" operator ourselves from -the matrix -```julia -M = [1/2 0 ; 0 -1/2] -``` -by calling -```julia -using ITensors, ITensorMPS -Sz = op(M,s) -``` - - -## [Making a Custom op Definition](@id custom_op) - -The function `op` is used to obtain operators defined for a -given "site type". ITensor includes pre-defined site types such -as "S=1/2", "S=1", "Electron" and others. Or you can define your own site type -as discussed in detail in the code examples further below. - -**Extending op Function Definitions** - -Perhaps the most common part of the site type system one wishes to extend -are the various `op` or `op!` function overloads which allow code like - -```julia -using ITensors, ITensorMPS -s = siteind("S=1/2") -Sz = op("Sz",s) -``` - -to automatically create the ``S^z`` operator for an Index `s` based on the -`"S=1/2"` tag it carries. A major reason to define such `op` overloads -is to allow the OpSum system to recognize new operator names, as -discussed more below. - -Let's see how to introduce a new operator name into the ITensor site type -system for this existing site type of `"S=1/2"`. The operator we will -introduce is the projector onto the up spin state ``P_\uparrow`` which -we will denote with the string `"Pup"`. - -As a matrix acting on the space ``\{ |\!\uparrow\rangle, |\!\downarrow\rangle \}``, -the ``P_\uparrow`` operator is given by - -```math -\begin{aligned} - -P_\uparrow &= -\begin{bmatrix} - 1 & 0 \\ - 0 & 0 \\ -\end{bmatrix} - -\end{aligned} -``` - -To add this operator to the ITensor `op` system, we just need to introduce the following -code - -```julia -using ITensors, ITensorMPS -ITensors.op(::OpName"Pup",::SiteType"S=1/2") = - [1 0 - 0 0] -``` - -This code can be defined anywhere, such as in your own personal application code and does -not have to be put into the ITensor library source code. - -Note that we have to name the function `ITensors.op` and not just `op` so that it overloads -other functions of the name `op` inside the ITensors module. - -Having defined the above code, we can now do things like - -```julia -using ITensors, ITensorMPS -s = siteind("S=1/2") -Pup = op("Pup",s) -``` - -to obtain the `"Pup"` operator for our `"S=1/2"` Index `s`. Or we can do a similar -thing for an array of site indices: - -```julia -using ITensors, ITensorMPS -N = 40 -s = siteinds("S=1/2",N) -Pup1 = op("Pup",s[1]) -Pup3 = op("Pup",s[3]) -``` - -Note that for the `"Qudit"`/`"Boson"` site types, you have to define your overload -of `op` with the dimension of the local Hilbert space, for example: -```julia -using ITensors, ITensorMPS -function ITensors.op(::OpName"P1", ::SiteType"Boson", d::Int) - o = zeros(d, d) - o[1, 1] = 1 - return o -end -``` -Alternatively you could use Julia's [array comprehension](https://docs.julialang.org/en/v1/manual/arrays/#man-comprehensions) syntax: -```julia -using ITensors, ITensorMPS -ITensors.op(::OpName"P1", ::SiteType"Boson", d::Int) = - [(i == j == 1) ? 1.0 : 0.0 for i in 1:d, j in 1:d] -``` - -**Using Custom Operators in OpSum** - -A key use of these `op` system extensions is allowing additional operator names to -be recognized by the OpSum system for constructing matrix product operator (MPO) -tensor networks. With the code above defining the `"Pup"` operator, we are now -allowed to use this operator name in any OpSum code involving `"S=1/2"` site -indices. - -For example, we could now make an OpSum involving our custom operator such as: - -```julia -using ITensors, ITensorMPS -N = 100 -sites = siteinds("S=1/2",N) -os = OpSum() -for n=1:N - os += "Pup",n -end -P = MPO(os,sites) -``` - -This code makes an MPO `P` which is just the sum of a spin-up projection operator -acting on every site. - - -## Making a Custom state Definition - -The function `state` is used to define states (single-site wavefunctions) -that sites can be in. For example, the "Qubit" site type includes -definitions for the "0" and "1" states as well as the "+" (eigenstate of X operator) -state. The "S=1/2" site type includes definitions for the "Up" and "Dn" (down) states. - -Say we want to define a new state for the "Electron" site type called "+", which has -the meaning of one electron with its spin in the +X direction. First let's review -the existing state definitions: -```julia -using ITensors, ITensorMPS -ITensors.state(::StateName"Emp", ::SiteType"Electron") = [1.0, 0, 0, 0] -ITensors.state(::StateName"Up", ::SiteType"Electron") = [0.0, 1, 0, 0] -ITensors.state(::StateName"Dn", ::SiteType"Electron") = [0.0, 0, 1, 0] -ITensors.state(::StateName"UpDn", ::SiteType"Electron") = [0.0, 0, 0, 1] -``` -As we can see, the four settings of an "Electron" index correspond to the states -``|0\rangle, |\uparrow\rangle, |\downarrow\rangle, |\uparrow\downarrow\rangle``. - -So we can define our new state "+" as follows: -```julia -ITensors.state(::StateName"+", ::SiteType"Electron") = [0, 1/sqrt(2), 1/sqrt(2), 0] -``` -which makes the state -```math -|+\rangle = \frac{1}{\sqrt{2}} |\uparrow\rangle + \frac{1}{\sqrt{2}} |\downarrow\rangle -``` - -Having defined this overload of `state`, if we have an Index of type "Electron" -we can obtain our new state for it by doing -```julia -using ITensors, ITensorMPS -s = siteind("Electron") -plus = state("+",s) -``` -We can also use this new state definition in other ITensor features such as -the MPS constructor taking an array of state names. - - -## Make a Custom Local Hilbert Space / Physical Degree of Freedom - -ITensor provides support for a range of common local Hilbert space types, -or physical degrees of freedom, such as S=1/2 and S=1 spins; spinless and spinful -fermions; and more. - -However, there can be many cases where you need to make custom -degrees of freedom. You might be working with an -exotic system, such as ``Z_N`` parafermions for example, or need -to customize other defaults provided by ITensor. - -In ITensor, such a customization is done by overloading functions -on specially designated Index tags. -Below we give an brief introduction by example of how to make -such custom Index site types in ITensor. -Other code formulas following this one explain how to build on this -example to expand the capabilities of your custom site type such as -adding support for quantum number (QN) conservation and defining -custom mappings of strings to states. - -Throughout we will focus on the example of ``S=3/2`` spins. These -are spins taking the ``S^z`` values of ``+3/2,+1/2,-1/2,-3/2``. -So as tensor indices, they are indices of dimension 4. - -The key operators we will make for this example are ``S^z``, ``S^+``, -and ``S^-``, which are defined as: - -```math -\begin{aligned} -S^z &= -\begin{bmatrix} -3/2 & 0 & 0 & 0 \\ - 0 & 1/2 & 0 & 0 \\ - 0 & 0 &-1/2 & 0 \\ - 0 & 0 & 0 &-3/2\\ -\end{bmatrix} \\ - -S^+ & = -\begin{bmatrix} - 0 & \sqrt{3} & 0 & 0 \\ - 0 & 0 & 2 & 0 \\ - 0 & 0 & 0 & \sqrt{3} \\ - 0 & 0 & 0 & 0 \\ -\end{bmatrix} \\ - -S^- & = -\begin{bmatrix} - 0 & 0 & 0 & 0 \\ - \sqrt{3} & 0 & 0 & 0 \\ - 0 & 2 & 0 & 0 \\ - 0 & 0 & \sqrt{3} & 0 \\ -\end{bmatrix} \\ -\end{aligned} -``` - -**Code Preview** - -First let's see the minimal code needed to define and use this new -``S=3/2`` site type, then we will discuss what each part of -the code is doing. - -```julia -using ITensors, ITensorMPS - -ITensors.space(::SiteType"S=3/2") = 4 - -ITensors.op(::OpName"Sz",::SiteType"S=3/2") = - [+3/2 0 0 0 - 0 +1/2 0 0 - 0 0 -1/2 0 - 0 0 0 -3/2] - -ITensors.op(::OpName"S+",::SiteType"S=3/2") = - [0 √3 0 0 - 0 0 2 0 - 0 0 0 √3 - 0 0 0 0] - -ITensors.op(::OpName"S-",::SiteType"S=3/2") = - [0 0 0 0 - √3 0 0 0 - 0 2 0 0 - 0 0 √3 0] - -``` - -Now let's look at each part of the code above. - -**The SiteType** - -The most important aspect of this code is a special type, known as a `SiteType`, -which is a type made from a string. The string of interest here will be an Index -tag. In the code above, the `SiteType` we are using is - -```julia -SiteType"S=3/2" -``` - -What is the purpose of a `SiteType`? The answer is that we would like to be -able to select different functions to call on an ITensor Index based on what tags -it has, but that is not directly possible in Julia or indeed most languages. -However, if we can map a tag -to a type in the Julia type system, we can create function overloads for that type. -ITensor does this for certain functions for you, and we will discuss a few of these -functions below. So if the code encounters an Index such as `Index(4,"S=3/2")` it can -call these functions which are specialized for indices carrying the `"S=3/2"` tag. - -**The space Function** - -One of the overloadable `SiteType` functions is `space`, whose job is to -describe the vector space corresponding to that site type. For our -`SiteType"S=3/2"` overload of `space`, which gets called for any Index -carrying the `"S=3/2"` tag, the definition is - -```julia -using ITensors, ITensorMPS -ITensors.space(::SiteType"S=3/2") = 4 -``` - -Note that the function name is prepended with `ITensors.` before `space`. -This prefix makes sure the function is overloading other versions of the `space` -inside the `ITensors` module. - -The only information needed about the vector space of a `"S=3/2"` Index in -this example is that it is of dimension four. So the `space` function returns -the integer `4`. We will see in more advanced examples that the returned value -can instead be an array which specifies not only the dimension of a `"S=3/2"` -Index, but also additional subspace structure it has corresponding to quantum -numbers. - -After defining this `space` function, you can just write code like: - -```julia -using ITensors, ITensorMPS -s = siteind("S=3/2") -``` - -to obtain a single `"S=3/2"` Index, or write code like - -```julia -using ITensors, ITensorMPS -N = 100 -sites = siteinds("S=3/2",N) -``` - -to obtain an array of N `"S=3/2"` indices. The custom `space` function -will be used to determine the dimension of these indices, and the `siteind` -or `siteinds` functions provided by ITensor will help with extra things like -putting other Index tags that are conventional for site indices. - -**The op Function** - -The `op` function lets you define custom local operators associated -to the physical degrees of freedom of your `SiteType`. Then for example -you can use indices carrying your custom tag with OpSum and the -OpSum system will know how to automatically convert names of operators -such as `"Sz"` or `"S+"` into ITensors so that it can make an actual MPO. - -In our example above, we defined this function for the case of the `"Sz"` -operator as: - -```@example S32 -using ITensors, ITensorMPS -ITensors.op(::OpName"Sz",::SiteType"S=3/2") = - [+3/2 0 0 0 - 0 +1/2 0 0 - 0 0 -1/2 0 - 0 0 0 -3/2] -``` - -As you can see, the function is passed two objects: an `OpName` and a `SiteType`. -The strings `"Sz"` and `"S=3/2"` are also part of the type of these objects, and -have the meaning of which operator name we are defining and which site type these -operators are defined for. - -The body of this overload of `ITensors.op` constructs and returns a Julia matrix -which gives the matrix elements of the operator we are defining. - -Once this function is defined, and if you have an Index such as - -```@example S32; continued = true -s = Index(4,"S=3/2") -``` - -then, for example, you can get the `"Sz"` operator for this Index -and print it out by doing: - -```@example S32 -using ITensors, ITensorMPS -Sz = op("Sz",s) -println(Sz) -``` - -Again, through the magic of the `SiteType` -system, the ITensor library takes your Index, reads off its tags, -notices that one of them is `"S=3/2"`, and converts this into the type -`SiteType"S=3/2"` in order to call the specialized function `ITensors.op` defined above. - -You can use the `op` function yourself with a set of site indices created from -the `siteinds` function like this: - -```julia -using ITensors, ITensorMPS -N = 100 -sites = siteinds("S=3/2",N) -Sz1 = op("Sz",sites[1]) -Sp3 = op("S+",sites[3]) -``` - -Alternatively, you can write the lines of code above in the style -of `Sz1 = op("Sz",sites,1)`. - -This same `op` function is used inside of OpSum (formerly called AutoMPO) -when it converts its input into -an actual MPO. So by defining custom operator names you can pass any of these -operator names into OpSum and it will know how to use these operators. - -**Further Steps** - -See how the built-in site types are defined inside the ITensor library: -* [S=1/2 sites](https://github.com/ITensor/ITensors.jl/blob/main/src/lib/SiteTypes/src/sitetypes/spinhalf.jl) - Dimension 2 local Hilbert space. Similar to the `"Qubit"` site type, shares many of the same operator definitions. -* [Qubit sites](https://github.com/ITensor/ITensors.jl/blob/main/src/lib/SiteTypes/src/sitetypes/qubit.jl) - Dimension 2 local Hilbert space. Similar to the `"S=1/2"` site type, shares many of the same operator definitions. -* [S=1 sites](https://github.com/ITensor/ITensors.jl/blob/main/src/lib/SiteTypes/src/sitetypes/spinone.jl) - Dimension 3 local Hilbert space. -* [Fermion sites](https://github.com/ITensor/ITensors.jl/blob/main/src/lib/SiteTypes/src/sitetypes/fermion.jl) - Dimension 2 local Hilbert space. Spinless fermion site type. -* [Electron sites](https://github.com/ITensor/ITensors.jl/blob/main/src/lib/SiteTypes/src/sitetypes/electron.jl) - Dimension 4 local Hilbert space. Spinfull fermion site type. -* [tJ sites](https://github.com/ITensor/ITensors.jl/blob/main/src/lib/SiteTypes/src/sitetypes/tj.jl) - Dimension 3 local Hilbert space. Spinfull fermion site type but without a doubly occupied state in the Hilbert space. -* [Boson sites](https://github.com/ITensor/ITensors.jl/blob/main/src/lib/SiteTypes/src/sitetypes/boson.jl) - General d-dimensional local Hilbert space. Shares the same operator definitions as the `"Qudit"` site type. -* [Qudit sites](https://github.com/ITensor/ITensors.jl/blob/main/src/lib/SiteTypes/src/sitetypes/qudit.jl) - General d-dimensional local Hilbert space. Generalization of the `"Qubit"` site type, shares the same operator definitions as the ``Boson`` site type. - - -## Make a Custom Local Hilbert Space with QNs - -In the previous example above, we discussed the basic, -minimal code needed to define a custom local Hilbert space, using the example -of a ``S=3/2`` spin Hilbert space. In those examples, the `space` function -defining the vector space of a ``S=3/2`` spin only provides the dimension of -the space. But the Hilbert space of a ``S=3/2`` spin has additional structure, which -is that each of its four subspaces (each of dimension 1) can be labeled by -a different ``S^z`` quantum number. - -In this code formula we will include this extra quantum information in the -definition of the space of a ``S=3/2`` spin. - -**Code Preview** - -First let's see the minimal code needed to add the option for including -quantum numbers of our ``S=3/2`` site type, then we will discuss what each part of -the code is doing. - -```julia -using ITensors, ITensorMPS - -function ITensors.space(::SiteType"S=3/2"; - conserve_qns=false) - if conserve_qns - return [QN("Sz",3)=>1,QN("Sz",1)=>1, - QN("Sz",-1)=>1,QN("Sz",-3)=>1] - end - return 4 -end - -ITensors.op(::OpName"Sz",::SiteType"S=3/2") = - [+3/2 0 0 0 - 0 +1/2 0 0 - 0 0 -1/2 0 - 0 0 0 -3/2] - -ITensors.op(::OpName"S+",::SiteType"S=3/2") = - [0 √3 0 0 - 0 0 2 0 - 0 0 0 √3 - 0 0 0 0] - -ITensors.op(::OpName"S-",::SiteType"S=3/2") = - [0 0 0 0 - √3 0 0 0 - 0 2 0 0 - 0 0 √3 0] - - -``` - -Now let's look at each part of the code above. - -**The space function** - -In the previous code example above, we discussed -that the function `space` tells the ITensor library the basic information about how -to construct an Index associated with a special Index tag, in this case the tag `"S=3/2"`. -As in that code formula, if the user does not request that quantum numbers be included -(the case `conserve_qns=false`) then all that the `space` function returns is the number -4, indicating that a `"S=3/2"` Index should be of dimension 4. - -But if the `conserve_qns` keyword argument gets set to `true`, the `space` function we -defined above returns an array of `QN=>Int` pairs. (The notation `a=>b` in Julia constructs -a `Pair` object.) Each pair in the array denotes a subspace. -The `QN` part of each pair says what quantum number the subspace has, and the integer following -it indicates the dimension of the subspace. - -After defining the `space` function this way, you can write code like: - -```julia -using ITensors, ITensorMPS -s = siteind("S=3/2"; conserve_qns=true) -``` - -to obtain a single `"S=3/2"` Index which carries quantum number information. -The `siteind` function built into ITensor relies on your custom `space` function -to ask how to construct a `"S=3/2"` Index but also includes some other Index tags -which are conventional for all site indices. - -You can now also call code like: - -```julia -using ITensors, ITensorMPS -N = 100 -sites = siteinds("S=3/2",N; conserve_qns=true) -``` - -to obtain an array of N `"S=3/2"` indices which carry quantum numbers. - -**The op Function in the Quantum Number Case** - -Note that the `op` function overloads are exactly the same as for the -more basic case of defining an `"S=3/2"` Index type that does not carry -quantum numbers. There is no need to upgrade any of the `op` functions -for the QN-conserving case. -The reason is that all QN, block-sparse information -about an ITensor is deduced from the indices of the tensor, and setting elements -of such tensors does not require any other special code. - -However, only operators which have a well-defined QN flux---meaning they always -change the quantum number of a state they act on by a well-defined amount---can -be used in practice in the case of QN conservation. Attempting to build an operator, or any ITensor, -without a well-defined QN flux out of QN-conserving indices will result in a run time error. -An example of an operator that would lead to such an error would be the "Sx" spin operator -since it alternately increases ``S^z`` or decreases ``S^z`` depending on the state it acts -on, thus it does not have a well-defined QN flux. But it is perfectly fine to define an -`op` overload for the "Sx" operator and to make this operator when working with dense, -non-QN-conserving ITensors or when ``S^z`` is not conserved. - - diff --git a/docs/src/examples/combiner_itensor.png b/docs/src/examples/combiner_itensor.png deleted file mode 100644 index b52357410e64c24b59d5a72beb3047e7c531c74a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 62978 zcmeFZc{G*n7e8z&B9#h7hEgdUk}0!NlsRP{BlAI-hfI|Ql1hj&l+4G>ky(Qj86xu- znWxM%zkR23>eTan*88sYuJ!xleOgP7``q_^?Q36qx;}gF>#mBDEHxzqB@q!3^|`aB zRf&i;Hxm(&@>6VvE4<5xD~O1;+E_?StDKXTW>vAXHMKyS5D}fd8>~UDdEp~nq|T!U z4|cG6dq1vy%Bo6wcFXfk5A?|x_is7)c+aluenngyubgOz8S^<_G^mNTKe6 z6j7`9HlkyFlYOdAy1M-D3=LP@-JMBUD<+e&9*smzujdR=-n5e-`k;A-Zcih*j{W%X1ax@Px*eBTt`1V|^RVKAu45tn5>6wAHW}@i`zAmeTLVw$rX* z<%#;kBS&eCDBm^u_B`HmCx_MP;lN>;L`NCAyC--_MUJ1jUgu?XXV=X~-wkOc)t?-q zl2;iC{Z>idQzq#5u6>U{rv|H>pGqIOXxYH*@^<6b+*F4~zY#M(zveI_#YJ-IYyyYV zXr9WWX9@j_tSAa4m6Wohb%iH#By5g;qSB~9?Vo!kbB=jf&@_DKtqToxyFA5|z9 z9=T1wH-3uTVDk%N@|#45RZ1hb9KODf)n?3c@VawS*s)U5;}58BmTIB)YCde0WbuoDoVD>scD!!Xpq!tE;&Fi=6izWw2zoe+MvOqz1bloky3XT zSQ=f^r=OR&rgILg7hRKDQhamkk$KK(Uc-mySZWX6+9`d9=H0Ouq&D}+yFFd_3pc+W zzm;_*%I8G!(5}dxtrUS2O%hXeMZ)Zd)XR^>ZtgpcUB4O|5)(4O>VLec zYe2Tos^73!_y(V3>%g^858{0>o?qV487*gIHE$UebJ}A?q%gXt@lKAR>|>WJ&f-H| zdZdjzmsUUeiuHIkGrD9}C{*ZK>51w|Cihd`yg>PwJdp3ah}z+5I@)6`-$z(4+a044vI|=S?Lb?@E>D#6Ee{i_Wtm=o6;&`BVmO7JO_|7w_YBuW+ z*5CHm-7P#R@qCZmnOT(!U+9?bXQ~J?_#S@VTZ?wsseQ9rHZ;1=Tp)i}imY-p{pS&L z(WRYZuZiOB?lq))!uS5o2~Dn$`zDu;qYtZJ55Cl($<#;|HLZqW8hInpbNZPN-&M0$ zH`u;keio?w#;nJGlJc6Ueqhv_w4UvgHY=PyGRO9xpt-h1_l@}}!Ar-VKT(lsmOCMJ zg1SbP{43MLv;3^BG(}&v6xjNA#M994YNB!86}YSIPT*bfgF8Ro`F47Wz3h?8OA=LH z3zP->(4Ha6#2Ygwr7yhIeXGc(z@{htf>$F(!T9$3^8uU;HT-TLTQ4(a@(71;u__

Z$;6gQbVMy zLeyi`iyCQDG#rk1+^UWXjHmxxIk-F&I;hur*;>c?zM!&uoKx)F$+4%c;v!okXhbqZ z7DfCWod%yZxu(k3x^KOX-ybL4EPYd&jyjRi&g6sp z+vm4V-a7Sm=i823P1KTmc>_)b*ad_z3he0+dLO_P&>O(cm?k7)-7av^Hla&pt@hFL z5D??kSfx$_+Kf6@(hl9>*5ip2;xMGSN!Ow8Rc$1#_8A z#5?jEm+n)y+%jRy)H1ej>@P}*uud8yY1`a3;B?DLx8Oj5c7gjG|FYEbxw(e9rhRv| zaoyIv?YJ$J>AbA6>_g+&-b3tPK31L^&8|+g-gD_m&9RT^!*K!$DpfEoy#04ac%K?*4vVNxBUJ*o2x~lr<_lJyOYNjE>n2b-l4ZZEnm+*KJRv3 zjh(H7*Vxx@`)4#Ku2u$p6|S)NG0PgD=dtAp(}>WB&&_eZK_3iFm zqkNSE={2$jJY6JR9>rcu!pnV2W8d8+>TF(K`4;jpa22)cy7qLXjr0leUSdYlR#Fs+ zt51VZ;C0M(vXeSDA~Tqpg-0VsHD0H_&g>?AJt18DZ11z){hDW8&YnAKm0)9TXMSez z#<9xu(e&2z@hdgoExwksQP7;HsbsUUQ_Q<&L!B#HCu%42E%AuuMWc&nzQmZ9n`=4L zA91+&Eg?8&ysY`%StACclUzbhCZnq-3SOPNdb{WDgrw=e4P8}_pN`oTSfEZ@W=Cs`x7};+hn)1h#Sw8@%Qpy)ixG6Dt^er%B|)} zEpKwU!@b#kI%0G0PZV6e@IHdYR!qWn<>8i@ErnZ_`6%`#?iD>QF>U19P#CFTW2$B< z6!q5Nt=CbCotT{tl&ij+H)}FiveYk&*UZ)EhjkpK+#P&3|J}Z;z6*{@r>k#0o6pP0 zYqMLPQk`}hu=-~9jpgy6oZX@L!{Lh0j%O$iA4G*>5|kXldrcw9u1mT{v387tnPTj)!`S3HGXgWJi~4&_bOXO+VFZ*&)nu~ z<`b+sZE9MLjx-UlTEpCL`I?nm^Ze7ti0L-nraaF6H31$+{rYS5i&+DbYe!e&Ta8kT zv?i-Ic~S{-y`%S|$8y^lWIKIs+IjM$uWD0WW?hZ{qCd}ySkJ&z)N;sWl!e}Kj>GEl zp5q@g87oOcZ4I7hy)ZB94USQ7OfyR5(3));-jZAs>iXDPQ(7xVvtFNBuk%%-+gN5! z6z6H$Ce9n2&KhACQAutkO6lVjOVP8Aj0!@JZ5Fc67fv?DeOH`qz&4mXz5A3(NV4Nm zX5T0e#=sKGV2kQlGI@GgXL;`CSiEJSm8H{@QB7_CMh;6+=ldP5t{NXw--`_7D;7{Y zg^m-u?{?Q8t!j2RHuzH9zlvFPENNZQcJ#g){gOE`O1rCr)zaKn!3r(9$2C3gW7<&Z z{!r<*Z94l>#a?(_9y(v~c~N%xJ}uLTFt>Q8LgRk;SpXwzP@eokK;U%gkT(KYENUn`~EL zzPLXAeR5??={YOATeSGKpNWsEM>0h_-6x zo`0WU`FibE!f=x(+1f7k2LIJ6DsPd6b>^;3>oztd>1I@1C9IN6jY(Jei5^`e8XlKu zZcvQu8;|RG@kYe=dvw|a3e$3ylXxK$la`j7pEglxI>?1#3Y5PnkdR4psVbefU*6R7 z@Pd${T9hY!Zy=kBTQ{-izV%JJ${*C6IabdC8xTJe&2y%TibNdnnSzLnn1N^$d?JP) z31UY4XBpxnM5I5ilMoTzwjd(=d5;qOM*cm3ALN;h-=vRj5s|}RJK@JAngnSo01?h9=;P(Y%I};NddvjX{F(&6QxUl8QSuJ}aBDzD!5AiwGgWuu(O%@k5 z9W)gcM2u~%xi4L|H8SCLvA%-5he*st1U_1uI9y_Nu}0h2i@1m%_;H5_d`2$w9AN!% zi-VQ;0Zl~}R%u&16IMa)W8B9MNKmq}vWnSVHWg7lE%Wnn_)GkNxr4(M5gs0AXJ>9_ zK5kn(Gag=HVPT$Q$9aw)N5LH^dsiEWOD-rId$x_2;NElE#NOD>;);WXtqm*k-b+Td zjt=4n4j=>J|83A|;$rb@Bpdsm(}D@|Am8xtav$TtzZ)JEL#~RbSh$#=wN6`D1J7U# zi4(_h_}i13zn-lgR0%+!m8wV0lp_!x72UF)GiZbwf`PXUXV)M&PVdMj-7r{t1PE* z@Zkp&^A9Eqf$cmdk8d8}J#zfRp?mL5bnATEP1mNyMV1|_!v->=283(!ikn6nCPs^D zM6A+g_1#zW(*~xst+qem%RT5zL`*_Pv4d60`yW2|=#r3 z@@oHk!o(ybf;IoZ2>fe(n1NC^?^q7}m4Em^{M789xQV+{YC9PjTD(*>_+JL{_O9al zAG7$ufoO6P5-hV}qTIg>BvsnK_n+5wgOXZOVq%@`=@$?E%RsD#(_8;#)piW}czdUi zYbNddmw`Y$HvbY2e(%z;+^$IdYm;RdcH{T=&Q+pvb)ClloQL^7Ag=e5ZzOK?R=Mty z^;O&O6IYEy)>o$MYoANkpCo5`?9^%zk3D{5n^XU%FpCzGP(}OxPe=9Tgxar2w|Xv* zW!aPeAu>|Eu!Ia|+%q?S^1oJ{BSdd?l0R%2JDX6lvWX*G0t z8Hc-H->7G_Equ6n>z)UC_J1;r|2}ZxYX2!^+%&BZr7!kBjn}a)%0Yi3-*-hyWCW8{ zk(%@NoJpx5x;cHOPM#;{ZEe6qLJ{DPh4&ndSB%GLN)=CfC3nHgsP&Zr6Vk=T+~VNk z`J#znJ*(S672ehh-@jTkVzkDNIT#-#BuViotD#keMZLvOQJ0#gS{ZjPku~Nu zZ8d%FOD7j7KBuW|8fFsibwzxik9W##!=zmV%jhFQMbSJgMr@<}b9Kzke9R(U59`@i z30TxdpB(ZxSTSM{aWL1hqiAlmYo!h+m`=DRsQtU3_!0a9r0fT>0$lki2{h201(tgG zQ_e%Y5b-AfA5OB%Hj!?EccX{}B#~08Pe}6+|@$pdTXsULlN7qyD zvDqdAWzq3h#^EB4mi@ejrQZA|6_2y0t3}P4_TlOK`JsV+N4i?qMSncQXi4C`&J28} z3S|wh%A3#=VOv=w#Hzvttn{PT^;~BgH0t9eW)mF-BAiVfH&Zh__FSruio4qT-sJC> zuL9L0EE*|zErXao ziv!A``fe6WD+7{Ie@p*N6s*=dI!6Jg!?gjWs`9Ad0~a~zh}+zEMr=YWODtY)bsI^! zaE(sB(0QzaGfkn*roejgkurfKuu4ToZ6ZUjZpFCbuKQHOg^8ALuH-rxP5)r4jO!_y zbb1b-9ad+X;uQs3Ub=i_#~Ie2LZE`!HR=Z)*KtA-ZA=VwaCdA%Pp0O$3}_d+r)b`y zW(|0F2d3FNg^t6&$7Kq+5u?=kTQ)g)NZYOb-2a^@khh1K6=?(fs0 zXj0d{X)!n264o4j@`macCSIe{PF2LX zdvP3b2ab$Yb>PiSw->y0o|S_Xo67vapF2MxJt-wwq?iq_SQ{UfG(dw(inZBs*9T8Y zmCi9MDU)Dd-yn~NB}4`6<%^@zGN2(a9KW1XXx zKlz@qav#Xm5WjV6cJ`;^m5=qDPvK zQg)SK+e?r64k5*G{zQc~LyBDMpIrjfU^~bCK5(iM1Wwh8Lj- z@GqS=61O(r+RhSbS{1GqCkQ?%yVGqXb|BI{b#&=2d*qY*VpAV7t&|829Ffl&Ercj( zX^l7sK$Z_NOVRBhc0>Pg=T29%)@##M7cl0WAwHz!aU-b(+}0(lb7k@p`1?0r=;KKT z@8L`D$6pr#(^6y>BxLB{3PjkndabWks?dWXy_tKMoPyPEab(9&;HM6(!#tMC)speo zg+VDM_Rk(P^f$Q2A}JAJUoC9%CPpSe?uU^AKkyGPnFa1N2sIqQ^V_%d?RLp$amrEp zR|R4{=33Z`=31>wGEw1~}d3_-klV$Wg1&x;M;?&JX{vVn-j$`j}p%Ae_ z>?i}jxyq19Ii9ee1P8-dYMUGO;l?U0Bq?WhM=ycr6P=(JPE~j+h)O9K&lk27v<;5j zjOMKyi1I4#J`Sss>Ektkx9&cVZgmu3W>h9smO899yaukTAvK` zQg~{UZ{apwB_yvcZ&CO1oNt4V34O$$_FZik*!KWcX*}T(Tb^Bgzjvtn#`f{i5v8>4 zdycV+Prln_@$JLIh(-%v9yiAHvSwqQDju)JN|UWrTAEo;-~H< zfq_rHAEW+DroJe4!;|viw#AEeZ>V^lt=fL?e(g9C$cR_V9~AnigK*?5ZDnM^O%P`} ze`%wX8p7PZFMIywmShMQv^{PG1pfUv?n-nLa4yNt@*eKOuMu!pq8nhO6p6R*W&i#T zcSS`F)L2beKn8aKr{BNKQ0*{`q#kvt_fMnti#x1?}G05iK+P~xX8le$5ds(6DlH2Su|TYvLbHV;HuI$)x!@_|Ykvr^HA} z03#bF;_^I$%?U3AjFJDOY22LI~ruIb|)_A+)JU-Cgk>2OB8K`}5 z@ee(8M5aXCfx=G-V^}%86=*n;C{4&FOCBJ!C{Gkm=qK78%#Q1OjtzI7rN+&3L*G=k z15?DmbOhn3k}3`9-3v5Gss6Ep|8f!tC3nch6PkVSQ+zj8=>wd6oE|6^^c#$ zQFTm1%H_+dacP1(o4_C37YFXosyP1&*NAoQn*j}^=V<>RlpF|Us_l4$BeawfD^}y} zjS74uf@kxO57Bd=psh~}*l<*p>Jo#P1pa?3Cd8coE&hLt|DWa)Xz72N|DW0a&+PvU z2LA8*d|bUXQ*Plxo2o+)5~eODIoobR&qfl@<8gu?ce_yn zdYCxTiKApQLP_mq{*xMHXvJga^6_!UkMMW(Fdzm(+SEAu!sQ{<)ty$G=%j2O7AGCY zVUQaEkU3JdP%$(&F60Cm4BF+DQ z-~2$j8_+9x_DB~ak)fACb?KMh+O3=lP9^IEfFgT+Z8`q!Hc_sWy#8=KwOJ+~2?(^8 z6q@a*VS{KRM|}qu9R7$rlVD?!yz`|ggq*eGBeG$x^V(XamE6` zYIElhi9oO6FNqdy*13u|HdD*>mIo@AGdb(JtuJ?V4LAu%MYBQlDa$eDp;+43Ic+=h zbKy4DsWuOm-k!1Rhngt)z>fXsSkZi^1tbnyc%0g=mOGTuYFr-J=Ei=dl$iYG-CgXa zlK0ho%%VJy979B=)i#QYAe8E;kBt_xrHvx?76%F5uc8NoJBsJXM5mCMetPn74Iouh zV|}}ihw2YMML&uVHQ(-YF6eM9g-@sJKtv`nPj0*2P?%Zd(Sk+}cEBLcAvp`bF-Cur zr`FhW6YgEhe(`F{y3QSr^#E^aH0s}E&7dBjk==LRsbh;zF=*EkN3&ZAeR@vkGX_Fg z!~@{w@;+nOHtr6W-hh^7$@#pRSkAP<+tpbV6d@@&l9V#91BOFleGxLzt_}Hz z`WxFNo^-gsr4~|fZoew)G#dvuO*~cJ&$xwE^k~|eEyH>2LFYlP^)Hm18 zj=ANobqVY2&oZu_`avR)53pYPWF`^RVTHhk;{_Ad)yW>;0X~e6xi6BmgQI*nukN@_ z`zr?kEkpI~6r!8J7VFpNjBekAMv3NZCd3~+z$neq)Yfdt&XO`IrF#x4Fj_ltkOp(6 zR9%bNbKz_Da8k0mzEkIRl1ZDK3TJOr{@{zDc*#|ZRuG6~hjQ!O4nf5TksM5b4Anz` zBo}h80HhQ;ne zqYWn1N<{LYRP=E}z`aH&+XUuvKXrkMW%u}Th7?U(6)*P7D~r!wWB}aQtE+gdEs)h7 zkacB53;Fy{-q;*#usW<^;g7<8NS&SQmN!^Z^=o(#+c#pc?k2MMEl8fHVfNK=!#2z0 zl)5yp)sgU-p-%S+MM#xhb?Xff%3MaJmy!%&+ic_+(dj1vnfB>KD2{1tEdh$>Rv4HF zDb@#y-9<|d*3Oa4QZ|#UQ5h_p$_GKp6uzS-POtBB2^QLm4AGxA!>=h?&&E*H0M&f8^SB!-wtRsIIm2W(|0zgH@vpQq>$w=O$^T5X13&{y5XS=+1qm)z7B6> zn81pPI9&}U^4Ui+yS3TQK{=R%YGuA;U6Ey}_O|LxgR-4EXF4J2X#p@_nY{I63k9(b z<~zXjW0X4fbf4)Bu1(J5maL-6;!i~jfh2b(AM6k)K_9LY(zi<2Puv9j3YX~98i!DiMorH%Jv&y!%&yDr^ks)O8F zl;T??(~Hr{s)GavVuQZ{zSS3O8RbTToeQ&KBgkI+L_+-H)*mbAqtN}4JxXCxb`Ek` z?Bet7_CvuM`sXHZkI}Hn>{fp~3f9Y_K~1A-ctG3*bgX`T*=s#*hweM^NZ0-_{>vX8 z@Y{X+kX zK=__wAz6Uw^rhE-k_t-qrl6{4`wY#CeJ z+RT?${VdDsplp4sRayk3-m;im-i6$pEY3-lYer|3(dns$4HRgu+JoyeX^OGEq{`K# zYp-}|`7KC0kNDDHjIZ^)y1r{4KUk|%2{aSrBNk!8_NGg#6JAj-s@x}vgcPGBioUmF zz$Fi{u`Hf#y&R-iC&Os{CC0S3#ovqFiTBD$HQ&HS$L_ zniTbf(tPlo^f%y&jB^=BGE)vv@epf!t(VbcLZOmlb;2+ur%?{{{ObPp9x(DBU$CRC%20qQ^-%0tgm72KAB&bI452Ko?Oc%F3E8s&11~Ucb!{3S$xa^Rombd z6koGCpOlTHgID|8?+`2!D_liI1!iAdr08Q*<^W7^)m3 zeZ3E=-|s`wpifiS*PbMIT!2D{l(%y01+NBQ_{UKTwWn9A8D)qYVtP3Aa@6*wuXdA5 zo{fKIC+7wkqk??BIZ!_9P;PLn{7`2xsx3>7r2F%~J(3q$;wVs0@wSFY1%(-QgUMxL zMMdwZOhGNi*yzZpn;6ez`@89&+;^1m6ld8K#!@eCMrXX?RcDeJR9~`5t!}+bd+JA7 z2_&*blT|eZ_Hs`69qJc(^QJh2oY{p{Xd2C;F(MdQ8i2fu+~#+utJP$xY41$WsZd}bC-8$8bs)~PK0lU&DFqb_y)WGV zWLr7?*9&9^_)I^Yog|;>QTK=qW^)l4O@AxLLwld?!`+m25$#D@oz1D(R?DN<#ixSS z+P7bKLhPl9%}j8)0qJ5*Yg$g7p6$!LkyMrNv}5of-lP)D{) zID{Re{_9Mqbuu#hQ*t{wFsX~+Vf3^Pc?`)L$YngjBxgc+Pp|GD+_+dSxpv+yJc%Z~ zOwc`JiWB>aGvo}N*30x*BQ*PBjn`V1i|}&c>RemF*0Vv5H|_dG#uqY=u)&H z(6&q*(^ur&@l_pNpur$Ws}Tojdr#lm7bjj(>^E9|AEYcHT(C8MDMrq@;>9Z4I0~m1H}x=?cZ1bpUn|dW zVlUrg&0s%-HM^&hq6teu+vZn8QEVh%u4zJ_yooK{Z%wvYL)*wv51rAhb+^I4YjxZ? zD-DSO#?ibd$T+>qdF*YWwuwvdJ=d!B*Y2(@ZO6 zIaf!tY+cEL-$v)O)62T0CJ0a@%Vh}Bf+7TosIE$DJlLkUboY>7XoW;?fy=ta5Jz%O zlYRllYw|{gH>%U0&VxH-ud_D;=k|>6_j7w(BWu>SHopF2rBz{NDh~~vR4(yT1eW%lR+C&6bI7gt07kYf1BhDOa(n5x4xKXmw$8%vOBL-Ap zKB+8g$4OnYLs%hQ2)8oRZg1VIO3|OcBz;XiFO)oGN4lQZ>a6cP4c&m`ksrk*8NFGp zht>*aKy}5f1hF&i<}m@8PzpCi;V?U)T^b^rK06HInXOX5AA9kiIb>~3R(0AktY2&& z#O|Ev5ly$0m|mBXy5rT@t{lYSp%!>T#$JPd*xoRQtTn@lXk-! z)S2?ZAO$F-$Y;(&Hu({3t%_$p2YT`+PbV9!a#&|o3AP}1W%W%`c8@P&oKosL3ceYY zUBrCiKxOzEDWAH%M%jFz`L6X|P7O5eGDDl?dyaQ?D%n}ZvI(wG%th}Np09J1j+X77 zmTO-yRa*d^*Ph6)ty)7z4E=)x<~!YO{j6mQI5Lu8{~bD-BgNsCusyjfDh;ZmBjHmmzhq9Ct-AOAa(e=42Q4aoJMH~yI4T5f9%Ak0tg1F zlJ{{gGn{OPeZK0eeO}p-5Cwj9O~PonGV9!W-}t;tHP0^HrD&n$d?_6<8m*=su#+>x zsrk}vOd^e6xxNzCKiD#wOSPigb;-KQ?Ol`(n)N&MUp{cUNqat$+!sCQ!+2_tXDD9A^dAjL#RhXV9$g&`^hVjP18mZSl-glLg*sh~ zm%gz}hDDU0#BAm42+v^a%4B)`?A_es3_Y`qca1M5s|%~4q)4Z*$tFOcad4DfJfFFy z54_vts1TF&xo{7y*c+zQ%^Yt(ci-`n&Ir$mLCGe+Q!|g7>iW{4VGtoP9aeL3ny%Fn zsXSv;u-kt7h~dcoC&T!i8}gx65W-Y!m?i00Kq;U;VcSFoqetT>@5j`s8Mn|Eowy_E zDNCWwMKk^>T!0gHjp;6ll{H-}{X)LZg|wVybB{7IQx=xNL}8itiMd?KaQm9YZ|JY4 zMf37;2x8PtbS^p3r=NMSBe`?x@nmm#sISbVO|E8QaPEw3DP7y8_Z%_f6+Yz5PpT!B zM={znCJ?b#RQIx~zbOmxi0wmaIfpFb%O~&odStwty9dad6-Z?b3>fK4agkor(8qOn zuKyI;XYuuR&tuC&2mHAn86|&OmxQ5HQFuoY9ba`@06@d<&DWS*wm7>r3kgeidh7fM7`?1eiR#t3ZZE0X-Rav z8@&#}Rzj<_adJalL$sQerZf{o4kDK=M*LyjBRy{rqT}4KOV;aN@RRD3i`8^g%ePD9 zIpsk;VdSfdImm&EosdkHEu9OBT6u{sGqC952&|D@U&*eKSiT%8)NFVob3WX}z`?GT zg;=L^J_O=$19>QK4AyaRI(|<#)SV$6pd_au`jUk+sn}_CZP-ClP7HE32GTmkw#m?? zX^dp4kzl`TLiKMQY>qE1U_z8lL?`c;scGotjM~X2t5b7n)%mL=m0&@vv_&cZ;vhHX~86(7Krz^_7PneZd;sx-fGms6dSaye1c; z91(HTYJsj2OvHm!s1@t6$c+kMPCujR+;f-RwD%6JLh!_NW$|b~Yv-c#m-DiY+R0!& zlu@GAufF+f7A+#~reG)MF*fhV;OU=utB%nnldSI?C8u z%xIUg*HZmVACFgF^Qw&`Ob>Bc^@w4(iq0R38-jA&s_JA{6@Q4t1_S_t$+YQ$jKb0D z5o<@l&Xfs0ph=+Za;2L{@Tc{kw9U^xeP`b#I{|sJ2&=RrC#|5#l>lb({88-`rE%D@ z&HxCfKIR0q_ey@>RTBeXV=7o(rY!&enjOlWvekO|BA;*=J*3#gIN2G3Ht1SUl` z9;$2~>y1=>q~a-5S%{g0K<4RcA-fKj=TuOX0H;$pKX>?SJQwaN!I>_Y6=wZL^QyBZ16uUA!e&#C7z+i-MErBm&$Ov0w|w6Px5Uu}z^%A2Gk&OU*!{gIV`)WpR`fg#D8n|` zN&zMt5v<9Z`<}t-Wy#uXnpSW#N0x(t{@fXQ@~R!_WtQa>q5vTb0cuZ9hKFU=*peP za+>05uJ!Yk)qnX+iR}r^2)05GX=@wlBgOr3&!umN=#Nicy*ZpQm7^J=X>@nyTadE4 z31gelQ;9Yt+p0euVV3=Oot%^?Shv=zjRAS(B<`abvf*N`b}bV|n{+Vm{DLnBn?UvX zP>(;464K{_fc|yE$!Yp7k{3)^=OL3q-x;)fLWKi`_>ZO5rebxs#4G!($6oc`AA(4Z zYk9n_Yt2by{8ev7YJ=4#9ft$z!e^p)w0#W@q2QluF^h7j5@@;{N)EjZMk_S=voHCj zOX;R*dzav`5Ng!*V5qPhMt_b28oWduq&CUw4r%Dg(Q)7 zuWxp{AfnAQ4;J_cliT3)B8zHUirwJ6nQzL0w~M`FPmr@>{T>OuTR~E+x1|b2IXig} z;4`%0KJB?l<|IfOymV~TicdG`mA3CXzl&3j87xd@K7tPJf52IAV*cQp*v~Pt@e6F% zEEZwSB8x$0=sVTu+DbY=){kDl2gyimwzeONYa(wTa<|7(6yyP3uw-x?K^vhb$=jAE zN+i!**)x&DiK00b31h{E_6y&4Ru8uEp{ISr_U5Kh3v2}Yx6mQ&4+zR^6znR&HproO za=~kT-V1?4pTz6;`BF$Ki;iWF+THZXuuF@Xg4pnY$MA(Hj%5z912@~jIZ_eplBGev zG1twq*iij&qgbQIfczZn0HE1-AR6=VQIPg#G!3)YObiREO;F(a7Z>_A*h%WtC11Ne zIq`C)$fnGo%${a_-0u)pib>*{%VhTVz4nau?Ll@R>v2y8D(nzsGs3wuZ^|xyKzvK{ zi-#@)J%GFk)s%F(g!FWsQLaQZ=1rE~5(G_vo(GCeo%ORu(~=Y+HT3GKj$Z7BRvmc) z#|hSF@F^HKTTjmp84?7~ZG||AQipfWq4@^+c5aOM2-nTKQirZb)=7p84N;`nL4%t{G|$!`2~d&Gxc?HSPzN@# zSyrywJip&yd5L%m*QDwT?CLkqq_+m`tT}!2LU3$BiX!L8JjDE6vO9{QdBW_gMMl+h zjbsVeAWDB?ln=W-AU$19q2GIESe?$Rb?s2;N&QY@vj*0_~_ z)f(~*XG;M8D`*$wA3k%J7Be}XTTv|I&6s?Emc#>DZngATPaZ?7B4L381k0U(bmocN z*S6sEk>}2PnI7CdGgL3i`cPpMFI$d%Z*558Di>o!OX3s45 zTrEuF9H$}H>2n|TbCi?hB*jW^6F#^KmV-N`j;CCf+4V9~ol()M@AB1`s%8RGvwBWN zSwTn@0uo$jr49try^MS=}vq-fV~MP8fwn3-l&@R1^KDg^rDKCitdXL(kA zqW4~=4l3&t9R8A~q3>$)nqw^>I7D-mA-Q`uNdv?W>uCy61)DPKJYk10_tc*4SDPME zJ9G`MUQ@E98C2<>EN9_C`_HdT*U-O7ZSJ``6dS4pI|Ng0_puz&6~##Z3^hYEDgFLZ zi2O8~DY;1sg4MMXD^p0Ov-Ofic#39Jq00@RVzRrF*8o^MQkZN54HG%Tr=+_EK-rZ2 zyahWY=_UsJ2224cQqJfwLr1g(O;w9dJoXKvv2&c2or=uvQ0sWoL~&5QOI8B|3hGt- zo%>BH7R%L`GHZr8aPPqw?B+h$FmYahJ=f@Hq2zX>;vT&KByr`J$FY_+CGJ-y@h)rU za|>C{i5_uJ8daycHX}xfE#GJdhfA19u!VjpYRaXJ+S3;N1TcV#ICza&t{ezLe6YJu zKsJ#onPcEn*ag8g3d#vc>FiF9QZnk@eV!x`ZD$k$XNHAk=xH1CrCLSK^BW z_D*oNfiKIH;VxO3+O4B4XxRdZj(U(owLyJMra$eCjNpV*(URb2SD&}GTT?cNyRXee z5^TDbAV=Qlh1CW*lhUC)oj^CSl~Kq>cWGFoq*Y~ehDjd+=ppq;r%I6W6bBG9!HX=M zqPvnw0TN(|Grli;7}+KV8;G+1Y$(xr!Aqk*g9v`glwY zPT^z4foeFb@rx%?(=h*V=kb|mxQR#mk%({E<14RrvuQVh)!AWw3RZHLQaCpr9=d1% z8BLE1D1ucX@=-??arhGhJ??p_K$x9)k7^ihkPQN|ky5af_^xjJp41OWeR103VI4WiEy3^Ck0o{C@gA0MLkf=X1Kuv)^mORFF)W)0^A z2v?mG*? z$XOP*Og@{TE>NKR<>1agrlA7UxS@2<2G52su;l7Gw$6WuRuvbBR)0&qGyaW9U|o`A z68GaSY`nO33M7F*$v1-kKO4VTcOb`{{Av>S$%DTGfBn7jwg+JULNw&Wabx)WtbS&| z_t*Z^kUyI!&DCLES=G-x@yh?hCH&oOM~!%;(sQ2=;fDA*jc7!}BKCCPA;^Rj;54># zoY0e!g=>Lee#(lUzzgPI4YnJS+J|_d-|gx~-s+Fwg;k0b#4Yl-E`ED>j`_F0f4q$~ z4JujE9=qWOMWJ9CLqh^52&eH^k7_Lj=4Bcoyh;!l6O-hCHMR@bi;6zwO2RLnA)DiH9!es%w!pxMr@NY~4Nlz<=u0^OgcjCPw~?nto7pvCY5!-jI8JAV&Y_EPf1ar2Mq5-s%eBjeoTOM{B{n zszmIU6mj#~FkOGONAe?~1~L1YQ2Y=Z1K&XGj)RedKaLI2Sm5;UXC`cXVQvZ(^3_BZ z;|24gA}E}I95r48HzYL#LAzIWGqU12^)sdURCcN>l4r{3_#Nj&j&pgG=YlA;uph#BT-QF4R%FU%G0FA22SV9ItdX+y%JH6)zQ8-dyO30BKuCZV6#?IUVrXjDK_WF-$Y{XrO zy!b&)k$H`L8p3tN{1ltNy4q^Jfx6AlcI*8y4Qgn#wyM4R8HIb}Z$$l+RiF2W&F(K8 z#6?|%qWNRxplkXMx%>O3XkM6PfH036UT6Pw%a9oU%ia-$bU%nQR{-7^SNrP|@`ix$ zzx^zsfFV-EZ~=7k$@B{iLY*Wa{6F^m#~wn9gVWsqr02IIsfO9VL;fTV@{WQXfQSzP zATOU*I3gVEFn^VSM(v#~^ze zYaA#p$mICte4bYf{v4K{3tYPbCbBcd_>)0dFU7b${d z?erJXzQ-5ppgdo&#*zLH3**xWW|IJgl2Y=qsE>_+xJz$u{lgJfDagM0cSzsIhRjHyb?|dcJ>(Xn z8g!jnOzKTS^+E*Oue=@A?t2Y|A(2oIoBYNPOH8^MY3Yii=-vH?*^fpJHy~*KsP&OT zk1Yh)UW*xhiX3ue4aEv3O(G(BUvDuYC2;jG&jr;%5u8X;5B^LT97A6KL+K(k-{O7F zpf-}>wMdK&;XsdRV)EdHu^)rj>6Hss?lssVSqp9B0D*kW0RCu;pH@A37U)V5ikHFf zJ)!Fk>8^otd3~)afX@U2_VB*=`$!^hsaSB|s_$?Z0Kj7u&!DYgDEh{>iuVi?`cEOl z{r8c3thER@!?(8yFR0qu-J)HKO5w*Yof3R+jcqv$%}D~H&VRYEK?U$CVfTI=^oBxR z>GmOygK{rX#@>a=-pcAMp5NkupTfog^P^=|!VlLf{i|#Qxcswh6DT3V z*qd-ZrVb=NBtjG~)3^@~2(YE_-VrANu%*G5XarpCF7+9LO3)uoZGWGkh3hR$#e3lF z!qR(t`RX9SXCPRu`C0JNo11^1=tN8s3<4oXgF}&sNvzq@8?~)sk&1D~i3j_o>mVah z|KTB97WB5@3@z@%-_IfqVewF?mya%lZWPyiXn3fH-T-x{qN#7tZXzeU@9JqNe)q3~ z76r}492~-gv**#i5Rah5@^0a`Qnf{;%R zgP`{^h#9h~4oJO^EQ?1rW}LfVRF&de1-A$eVZ<@zZ$QL#6Velyjc62{%Y|E|hBZP7kpy=9mk4e60VzJ%zJX52NXyFRxcNnbY zH+pv`yMiP6C8WTGQy^bT{aGX8_l2E+g)L^ZBviuIi`|fLR0P{%h$LNdNst3p%_1Dt zwc)?~lw^|MrW%027Nd)wMG2(bT!o-9I?w&YzeErL?C^?0;m>S@uejm^CnRz+*?;0? znzQ?oaNaj%KQv=KjD!^H4ANtJVdC)xrE|BLoSMa&^nRZca==^_ud*9q7bf+AV{ky= z?l$r2NO94T^H)s}3lfSPu7^~;H`OCWg-Q~O-)8!s+L|_zZ`=0-Ib8Nf`wzj9jNwqM z+$d%r8&42MshIg_1_-*%3_3txrnT?wE4!u$&OyDI%sTx){k(ocob? zwkkJLh8Cw>erRa!u?U6slCg)f%;&5y88V@>*M8%iR4%(aIIGnxHQ+IU-CSb5f(s(7E`ihzA({DVrr&I3 ztpw=Y-#w=i6uOXJw@53RtI*za;)j)zJh;rAoOYqyE)Kuyo`Y=x8?5A&|3^ zvlY0c;#9TeH#JQ`Fqc(f!BYe}mhh-l+bSiP8JbElIs2t=y&S5KkAKJ}Yf5nLYwZ%^ zX`+nT@K6z=Z9W`Z+94}y0bwM!l{Lf@${Ge9j!2DgMXE+*I~;sFbcgN!kFrti|HIy! zhf~3Qf5Ua6j8Vo?QA9$KnaXJ*$*0 zyY>->@9%p*zu)tH-siop_qy);k8YPvXYalCT5GTIv#bl5_;T%5WyY`!;B$mteRpFm zdUfwXJXDmv0K4|FE!Tf)Mc{lOj4%4_d=JH{^OI4Cc;=z@UuZE8z6fQR?*>bu6xw8V zAWa!*H&omSb*D=3-uhO84;HqzrmANZ0w^v9eSgXMU;Ea6|Lr_L!3{)0H3l^alj#a1 zBf@D;TV}O}5&{;#Cq(D87Vu7yo1U-kQviEZVtV`S#v^(L-b_?&^{7!{R($^|Au4x( z`}h>baAeQ$;*Si^@J|N&))oBn=fii9`Ki9`-rDH()oPtuxR+wZ-5$&-TI=o7Wo|80 zihWWOlka9`mr-+l(S zbv-WOv00}eCyVpaSef@Ph$1? zD&DQ_zFG4Lj%kk344Qqe^QNgE*i_UV*g; z>AXzvo7?GxHR%l8TlnSM?mjyb-4ZD&%xbT=tWqL~APc~h3+qNiS5A(D^akPBrampw zzl1ZW=wbEaE&C}+zuZGbVYcN}`&rU&;A7?99(gr|Pj--cwDJZqn1;a@Vyn&ts*fLT zBv?L^1R0HMfsa|zEw0$&OSF%Ny;PrPCb25(8LU3rweSOLAsnG-oDf_eNeb?A_yg&T znU&T;Qbqs;B51!^lbE)jivvGEX0lw4u*4ysc{uPt!!lN(vrfTp9O>3vrAMCZdYn0D zc+`p?O5dY{)$eoh@Lvm&R0N9_{ct`+A}}ZHmUH~b+H}HuX8OAKyP^tKoHqJ8eh$2O z%;*WyMFXinAK<_nx^HNVT4rXBcwi~pk-@b|8*z|5afW&lvtl0u#UEd`xw_`t{5Ux9 zr=HvtCdtB8P#>w{lPA}Fj1?H0?aKW`lD3Uj0ahnVUMWFXT1W?U!X2+%5=UHzvuAV- z5Jc)!Y{W*t>H9a$8Q`ZXi0=qgWvF(l&x04>u_zbXq#;B zanc%Uuxk7h{(oU58@Miub_UYHw!uH7QMFsi4-?mk-NS*uV35j7T67o|o!TPFvGN=A z!$~Ikx@StCGDv6m4!`N0sbA{@ZiGwSxGOQ9#3LFYZp>CLBu*t#E*Q45cV`RfW31{x zaeIQrN_c>%F&+Wqe90rMq=z`@&tvterC5vB92GYOJD%GnN%A#u+u=8~7o16A1k#12 z0)?vQ&Bw;zs>ekHj7e9Gj{wJNrO4*H$X#uIk-Z(v)Uwv>p2a&>#BzTTA$%9@TJD6)pio$65uy#m62=JZvhBg z!M*R^q$_*_tEvfUu0%|i9y|zwG&FwXAsy@x>~>pBKMD93{guiocxv*msgf3L1Huh+ zUSCOf5^v3KKj^NAyEddV#PV_AtH)DU%2tRJp#d#Qdss)E#3Oi6dIHe#w?wdIy7R*s z6jnp;3nl`^2hEz+LS*j;#rg2V%M3u;VG=>#LjvQ6Sv*xQVsT zAR~y#ZJ|uA6*j~H--~hnCNT~1Z#3ZfNw9fG+Hu}hpyX8sTXqtSfO1#4Iq;&-CpbvI zdQm-#)U3ZhlooSorX-3LzLo(f6O{TpsO2LB&kE^#W z9ZG!kNXmIAxvwENsrS2*>+AQIH>%B($&h%6$aHzJ;ME z;ZH?zl4_KY3cP?>eV*vNUb}GOQahE=poioCd6LspM|@n$K}K!N2ZN zluaV_z7?Xu5BnKa^yBH1*Q@g4MLCLtmrLTV-uJ+0dj^`nUj6N{e_?S6 zXG&(uUgoJS?$BO-cB19c-o6l0NqYkV9EXEZ39$)2!f0;@G+WkjxQ&$i@S^*E1e16Y z`Nu$-xbJTkb*Rk7%ic0Af6xEN8^&c}8jjev=(ZgRiPIyqYc_oRO@*oMkjV|iNW{w@ zdkR!x=T`i@nks(Bus8u9{*O$a7;T+t`V}h%+fBw;CuMAEv|1*=YXJ#sehH*XkQ4qg zQ(9r8o)bW>qh~|!ufBD9Pgssd(~oZ>IU#*y_lk1Hl>!lMOKZNZ-bQRHd^n*>`!jHjJi&Ynu@?IlkH-!P3BTLRWStvNG1SfAPJ#!hKR8PBoI+!mGCtT>~FZ zRkD0t$cuk;$!4Wo56e!q4?k{D>9ta_iEIE?py#E}$907E;R_$pRjk~~4JDwd4Z zHWFZO*CLw#x}&PkSJ=h1daTn^A!S(gks^GHb{vK61cUo?vdjeGDg%=;BAp zNZSBSBa%ghS4;Udf#^K7zOi7faEvyu7|)9O|A(RfVQAbx485!!kjVU_q5sj)NT>g! zq5m;Cz~mSKYvv=5PdeGgjT_skPLnJpvKp}a%y$llud!sg!8j0=oPDmBiD~F5315k$ z(IO8NBy*q>faOzsh6QJwvu)vf($w6W-_1eSWoG643-Bp!f&-d_7a0 ztYsI?^55Ycz|v_ix%YX+T!0_wkumuF*R2C!X7>EGXT?#(*@8Wyz3K3)WfL3yqv&E- zPO)^Mr#3N9%t_dI#jyIw2PTH4#mUvNDO@Zp_e@Fo#s^;fsmEK_ub2{Vz*udNYL#4@ zrjp{hub!v+hPS&kB|I#dw8a}rd?Y; zFCyT{gVC(#U2i{8v%Ds|{Ck`c*o>mWLu#wzqqrV&M$b6qEmggWCCjc2NBLQ>y^Ok= z;#RLtpY3r;Wt{htR`%;{E58rr1(WVumNqG74y!+7QK5-Ddg^Svxadlw9#$C66B=S4 zzs3d~!`wbr{mRc|JjQ4x8gMlRZbo?2qI$({gdhAaVufO77=;p=zmppDO2ASzHEK>+ zGvl*@df&ziQF~&l!$MbP3r2^){gr&nys%nvgJTKARQ>o5F^Rt0A7VPc??uophXF{t zK&T&T&)QBMbgCGB%SOD7Xm;|fobk&wAJ2MV`_bJQ|C_seJhA{eOJ67*5f67?m^J`- z{5B{{-7L4$_Wa|)BaxYofMHVrusS_6e_~Y ze=$p6V!m+NS|uI<68!}k^%)o?u)a6P*~R{CVqT)rt2JX zyhRFh?&mNnh{S4DXxei1BwXGgud<#eU-u1Qh*Pcc}MZ|rbSC0?q)*t!} z&=%X9$6~}&Q$*+U`n0Rq*Via}U|*4e5I;85%Kr7f^lDjZ;?2^fz-4<_0Rc-#1yuP^b=?vHVNKN)9j&QUoAexGyN`wiYNT5kG(9$VC8M|@pO;F|>M{Hp zOzV~~6q+k8oZ*Y`dinwPrn*@(s;p}~p^Ral3&=VXl66s#b=}%+0~s|J%BPp;KZ@vY z*%&Yh3gYD(g!aFC`byz#CcceB)=(ltH`(Iu{sa(VAMf$&u*h1!O>Wo8W6_D2#MElz z2|zfefT_M+!yf@E+cx-*>4dutn1b06V;4^sG1Pt3C@T_IDbzy?}!Sw@4-g5AiZ41@$d98aVDBK!kCsG#}0=-q;1Iu7trVA%VMFETCG#pO6O?GdiGVLEpR z1un+7PJ#AgZ~pCX?}chohx4JfgB=w=4JR$mBzcz!;iX_l)tX|4+(5J|Qq$)PP4#B$ zQ;poZLX5TQ8S6%<&5Gr&(@A#T9|nZ)>viV&UX*QsI!*(S9GzBSpz%ahLFhxXWQOxL z7yw8c1)xlrHBdf2sMlOM$Y3V0qhQl>Uc93#n~@F5yl_(O+j#G^NbmZya*ArjloL9S z1Vd3_i!0~w5Fm-SA$Yh32hbO@ACi1rrrLZ@j93;k@u|HL9SpZAr9`ttYOd9mYr?K0 z#uGaFK{bBs1t_>}fzRs812EclIDzSl?SM`v!R1aj&y_oA+??guHV$YLDnY2d6(8(j zI-teX_x-+(ZyJ=_wn;M9`S5C4FbcpPO$}_2THXTkAj`FVd|k|Q?!GUuM_mIulyd30 zop+O*dSh;r+0QSJ3MR$Jic{=!Mk731dPKiJn7bCP?9$HACbKvyQxLdU!6&o;D!&T= zPF;5*S&~~7TOPpe4{0g5b!&}en)tW{F21$~5G$_`VdKrB(bG`&B`f!8PTzUgj zI69Z;pZ-ZuoPC^qdpdf^+kDw~b(Kc2fzYN0OYZo%8#dy9_#>k?+DMyX%N$1guajXQ zw5N}5KQfkFt)}gbW(*hrfa|VnBdC4uI;dqEN7*KmQH{I&Nk}f9JUs`Oc9kcZ^}CW` zQ~i^~Fuhl$gznylZybRU%8#Hp{{1YT2QH}{p`n>MSU_7W{K4a#v6r5OsR)51y#$TByvx<9 zB^_AaGh@E%j-w${T~-d$s4&VsX~LtRdFW?DNN`$TXUK$3#CK7&_xGGuaMMZSv^ogR zBOl7>GZ{Rbf2DiW_W3hRHtmFK^_G~7Gt@v>Oz@8H6O%GwM2}MTFxZu>VIvcLU8b#WU7P3WzlfmO1OBF z2e#4#Sncrv{LhG=;{wWZ=x?;(>a~ueqybS6Z zw+8BMYkhSTXxkU`HBRkWp4y`;ILEAC((f@1(3Em~L;*Vq6C^P2ec&a~Pen%yAOq%) zBx(~rqiYDLVJlL4?3=)hNVo*g#(u^Ua13$HR9StxLsU>`x^43%DE${GD%fc;mH_9< zQPeHMz%$P}P|x68C7zjwS{)Xe-8RWXXDCa7Hl#}Bp?f5`O# zv4FzHcSNz9-iE}EA6DiM85ubHwZV4etaGALZzz(7rY#cCQ5k;YiN6e*P zc9Gg4dvI}wS~duO+$o!`(HP5FOan9yu_HS*FhDWNq5YP@U}J{BZG=ph4*fa#oy9@g z^{8w!Is%R_{bk)IrapB$xr&O8aW=^N7oeqt6wId!Rn!!XJD@LWUg ze8ccgG+4WBVRnf9sNM5LuyrQgH^*PZVYI8CNsc;J*$-eEKHg#|0GpldB237k`y!zG zUZfu|C)+@+nmjlR_@4#jEIQ?tT_{&G055vO>xY)fj{HV&?~bl~T}J38WVfo&Y?r?} zHh)&mvgV#}f5!W#{YSy#Dc8iI0TG)m-Dewohpqs!ok7L3QwA{YXorQ(^IWtY`vs{j z*%)n}^TJ(YF+=s_1c2`s;nzvWpX#rWW2XfqP8u|xMPU{&^mZx%np?2gCgAJS)af-b zr)mc3y52s~dr#!|FvRby@>rp(Tu&<|g?DCaikITMdwt%`qD-VQIhtHF+|H7l{!=MX zIR*8j37Bu5QP1nMUF=F!eE{-(hES5#JSL!~uh3%w6NSL!-sIKEV17cTcyR_QBKe3O z$M;S&W$w-1R;^6tVkyzO!^|Cgf9We2jez#6LfVf8j5k{O^ON>GXM>W)+@UGV*Rv7Y z>+Ox1q5|51zUC zuuP>>iFq}=Kg33zs?r>A0&TM}sA}uNbkt(Tsp~M~ef!+ZXEd|qWfRJJ`@jwPa>Y$I z09`IuV6+c~)uvsFk%c#bV@uCTp?w0gf9qyBX_))Riz(*du0TcY<_#M*6v(RU+CExj zm3(uq-{*+B5YKbCQu@ri0JpK{R>L1QrFn(8__YH1AgyZ5SrZdwcCOq*-6z;*RHkNX zb9W&Z=|)-y1z5at_q{$JhXm23<3tJ94h;Q~Z0rjr$l|r)zecE!qr&nJ?sJ4263F=7 zj~p2Gjlk`P<1gI09%i6W0g}T8Cx$hzg+?*mg60Mw>P$5 z)#Z=Z@jascD6|W5fmloO<_SWYH-KJ6@Ad0sFl(8qNy`UH|EP@I@kS@(5X>EEq2P(Q zQQ2zR1D%eCrv`X9nL2AiPoF44-a&Z!&>)0MHt^1ZC2?e`qGjwmjC`1DJxJJbZ4pqK z-q$SZH~@4pn^?0$$J5=Zv(obcNp`u}O=xqs^I~dT(ms&F?2`_HGXsr$dV3(#D zB{zWabQ+#@>0+cs`{Bh3A~J9Swz@FdZZc}lxdZuozs@wb&Wt?HZLCy#>=~z6m4pj3 zdnfMzL*;t&t5a?3Y;5nNP7 zvatr4Nx_L#$}&e>`Y*Gan#6xSRvlw_aYNrF&7V^J8FAl55NwXqK`SUY_kor-yJ6Ro z3fUl+?T4pNYDeV0b4uZUdw0@lrvi8TtrKt$8N)G_0JP=q(gb#m0kk{NDFpMIgdCoh zdfXX=bAwku5;Ug$^#vFT;?o)L=<7zPyL9KTMJ%gSZlb%hYrLsZ!ku-vi`0mr(jw82 zuVHOrwKp0w!kMSLJy8o})@Z~K4FWR-uWm#oIhT`R*lDNc@jVoBIDwm7 z6i?IhlHA7Q1)--5;Q!cY7UhqM4G?i@q&Tertknl_{>&UkI^8RptO{tsayZY~3eJce zaz2{$WAo5RBNEQDqve)hAIAZboz!?Gn@J1$icQ8b#KnaXuEeQeuwu0 zI5mRoK-O^n2fg&U4)#R}G~dd+`^lj?P@I15C==9lcBSbv;keb#s;9IhZ5Cua&Y- zyekdB$5ZF$c`k$n3*ZH`3H)4(*2eG_P=V<}4cfc8b)l2|pu;f>i<#F|k?~RQ_Nt-1 zU5x`PD8?u!)Rqr_q2nqu`W;8PI7Z%j0Qt-Q;?ugpfaBfRT6>DyQliGZqaeq(&f}nnvUSMFnp4remsCbFMFW#$cKwpWnR7kO9Sdw30T=Qs@?y93NGP!7WY#8 z%bE&Jw`v3Iaw>$G=(ipXwxR*_@d_jWx;I7P>pA_tttpvHwHiXyD7p!@NiHk@yS;@eNSW<+Os-g}T_`OjY3P&@58s;aM2x`E)chjF?zdbor; zb%UgeJTNZFZX<>K<-Ku~zP5m{py2&e4}To}3p{#V)8Zi=1-Hri@-i4ex-}6-zX^hQ z|CHOE6q@gmhU`O*LWl=AH*h;dQRFD6fnWBH>ZF=n#0l^|D}jL&hhcF95#@}9oT62V4AGqTF8Ex4-+uLNjhu5+>-ihumGEp zJ4}?4($Hyu&Koe>RxO_d;kx-yNo&$Jf!n*_m(@J!*Q)HIRylOne|~8rzsn|mfa~%r z<*1{?SPzjEfYaO{q1D&je``%`v?z1zd)9}ielnQF8^%qMX$MVdo@LDjeX?N_xb&+ptkpHiIN-2L`55cT6sO>?`?OEQxd8(UXL)CAx*Yv z=M9x~=AnAcd_pNUMErKW z>rHbMCLZlBqx0Qy{j|jIj|>Hk88+H~WkJl*3*{)zMjp<1na#VY$w^1Nuu2MuLZ6In z7M0m)A5z;kw!=T3>GRhpmkXv%Ny)r$a+^rM!uyg;Xf2%C`q~F(Vb6}x+&RO1K;4=!qrmCrQg9b$3DkbTJmkE@_+6k*A}%n9i?5FT_MXx>*m@C{^Iw_i`||UM-uzYu9|~D^{}8- zooEd&`c~OUB&u;7bRW5pV4GVSlUL*Y&2bO&iLUeCia{s7BiE68XMTG^kbnOLvmIqi zRxO7@w07+|azgdCeWS7!SHUjM{szswbG7dC>Iz>_+|mlV@4TTil#o(8j+p%Yj3zQ} zXXH7CLZdFq{ijrpospVBlQt1%xoJ}qAP=)M3uuWnE&!W}o((xU94r4U7W@8t3EcBq9sL@1lPjjIFM% z5H33`9B%z-o2LEah8+D5H}PQUbo>AEEsax@)7DqGivsA!zM>ekcYpSftL0Q<_tDx) zwIWa0gB+(R`W{{49z_4+J;1Hd$B7Am!#Ls!7LX4BsmSnazI(BE9;NY$d8qp4RBbo+ zni&$gz@Eqj=2T)P51QG#e=6rVMMbgh0X2J^`M>8pm2ulKl>2Zfb8Zf@wGSDP-$8eD zO5XvVfl$d9Jt^1*96Bp7sr_)97lT$0OzL4p;1GhYGh@LMQajH_cuM`ypa!<+<-TuU zqMUT$1GhlsY)95yvHH*`!wE77Yk$7Ub4lom5|PLp+IhBI4KY{g3ZDH{_~|!4Q7XvZ zk?F`KU-^<&WHf_j!$4>8Q8O^r3*5OCL(jY6T&;}EBzra-+6|%>ZAp$U_!RGdbip2d zSA)+^+r)4(+RxST@^{7-BtVF&XXifBLtKgFUF$6knh;f@n0{uq z+F4BCtuMgGo_}k7_>SbC=vi&WpXk{I+n62a_nPbi8|33bL<_kj{UGLF^VS!^=$@Ru zwHrmtuYCCLUxCEue1WD$C)n!9;?C>ZPB-zQi8cU>DeJ*g1@2?c9yA|BSa=r9$_r=0un{X2z91^o-*wIROU96p?*;n9M5o zWXqJo`#647M(8BuXubIz{#%WiXDgx*<04%(Vs{~echd#=P~=7artZ`ot*YSrhavxW z#r+?K{D&d`(UAXW$p58H_Ro|3&y$TTzJH$Ve{9JAE`ITk4f&5#g92dx_njJ%cX`2+ zAN_XoUh-4OD?EhS>#&j7=J6&QN##LHSDHnmbAv(LkVI*#W?`@;L(zZ;gcdn&Gi3W#3bGn7xT9xZmxCp6FW z^7x1`)UeP%*#qT4mp1AGR3sXs9JPzkWY>UF3cgUeceK-dA~S1;J<1rLqh|a;JqH8k z%fiJoED|&5PFu|GhREwkeZ;Z+)Yx_Fju@daNs}6i7l(r8J+iU=A&gK>B}B>iL)F23 zcg)>eFR5@>l!eBOd$PSdEr$-Cf2!@WnVj$t+Q6w^?DnNHqm@rFoY3LmjVuWhKd1v^ zKT)wbH4LeZa@rNNj$$l6@B3@j0#pZd5h}TSfhO1S6tzjFTc851WDB0MK9+tcp><33xxt3&xmyeu~jPvDG|8mnFS3P$3^pFt@7JwYQzmY1)m8q3VaT zwV`@42gqwTp2zfYV>$M$cgFbz?tb`*qK;aGyPEk-^f}o2y)$WrvU~KEg|6>mtouur z%8d8E&yM|gQLc*MAEG#vkbo(4qK%Mk)y-74Kk0#u&jV0K7BmRm3(6)^>?BZZl7+0F z{Jls20gAgE`UeFu_bnixdps}l*}+P#&!NV%cR^1$#EDx+8b?`YQy`NlO zQsSjU16TIukA$Z^tt5bJNBJBZYVJ-{sB;@oSDbI4rzw@h+rx&vc4B$FGmt zL;xX}ViqPO?<+lokPr*uvQQWB9D6$ov2J}0{ldWR{3v&O8wHx_@r`jw^Zj69V|*uX@9L6vR`J$K^Fi%`HT ziQA$v)6ZU@YVUgRuwxOE!OuA5tb1d^7LDmvFlCwV-956GQR{HWLCCzH(CjHc2V&tr z5*oe^cL2_RgR;Jf{___6tIr>bd!T6T!#?~=?Dp%ic{TS>Y8$=8h&MZ2c4+Fm$Px)X zICG9gcWJ{C_Q#p0PQEpmb7K1$P^|!bgZo(~Ko!tDS2Fy)ENRzo2J>Q`A8t^&I`jT> zWxWy?6q?~#h9x-YAA=rSLT)G|@8`X(y-hxv4bENoh^g)`wbhcS8IlLldiDuOZVZ1D zF6%6nwEhy@_R`l)&`i+0sL>j-bg@dUKjhqZy3IBzd%wIB`8h&lI>HPpZnY7U+Z6qR zMlh^29Mx`rJ;le}`UmMzXNU(@2v{3sZhE{ujud|Gg+)2Z35=&tY-Xz49(eWZo0Iy` z$6zp&J-Hd`K0ilTez8lUY1lEdAZXD(lhJVlR8%&0WM^;geDkaPXPYd)Mz{dba~t5Z zZie*c=bf*DfWJ(s>ZsqmZWF^vkoS|ff4dV4brwwcS69caPIAb?`g3o9$S$CLIA|r$ z>zkf_CXl=p2YEOF7Z|+#EyA=)J?m&zPjpcJleZn$(Mh6hFdO*Py;>HB71{!F=iCM( zxXOMZ2y*+oN)RW1!>fM~ALG+N*3N`y@e>!{!-)!Hk9f;ra0~Qsv!y{8?Ju%80rJZG zC&&ptT=j=)HCq9!o(@%(va+&%4(H=d%$~C4F`npy)IcUCcXpxM=8hSFxzfta{wT`_ zF!1+mPneGa6{yitQtvfd_;7ax#Y zAFesYXm|KwhDUwR-f}KjjN7n5s}D5bTRU#x8}8M7lc9t`ncifhk}u(x+aC%PC@qeW z=h_IUp5BCOrhKC@qcb(`p3B3pJLX_SKiXA7jX=eXayJ7J>!=-Qz?=k*StyD4O!@sB zXmfwf)R-8GI@J^-EnGr1=W5aDlA`Q~vA=DAOnyzK=*e3H-^KCQ_zx*N8zxQnVxzk; z?yh6aRxPL4X$eAq-SO^A7GLsG<%JtUG!E?H5ie!(hb;M%ynjsUom! zyQQYO{%lxbtH5f`E~> zVFPIODklDo4O~}fm@{7zlDzB@6H&4mbT0N}ZX=8-JX3GP;Qcrvw)(!3Q}Uq7cISbd zsdhql0BE|ppI#YA)>8*+PkUewW4^L1N;V#u^uk{%)@#@Z!x!Ydhg`a)&1Xfna+I3^ z{fxR>diyv*!$s+6QR3Wy^LuK)-3r6TgT!5`xjf=Z|$6FF+5Z-dO^huml>3jn$H48544!V zg3w~x#|I0xG-_?b)q!fcRCa%g;;s_tRx)zJ_sgGnR?FJMJb*%CgO<6DK6X&;iyaYq z)C}T{FTOVdOic7FMj>kc@>I3DKefTn+XVZg#--tc&J+CvSICV+Q)xC9HIKd+iFQ9h%jT?qS%n!^v3E=G$G$Q2dtKze z`ke^gT8Eknp|z=EqH&;ku0qf-^GL&l3FzL7xKnh^%)7G2z7f`2ye(vIgH*94k+epj zr$nHUSJx7V=+s@?OYw0(7Ps7_0mjI}@C6@baa-_C^Ag0rGrunn^))HekbUyV_9$A1bAq+L3n!_a;xGacev-^7 zhWSYzn)6Bp_|I;c|L(|Cpk^$MjCbSrTEVpT=?sV}rG(Ba8A=5F-5~uWtN2cSX;DN2 zK^awH7yVRFeS7B67ZT6sTo3G5AfkHI$ETDXq64bA`iM>ih9u(hhDLtG# zKV(wxGAZtzKqv9`y|ggHdW-}Z!`OPz9Zt1KItuEAB7*sx6P(YWq=#(6c+kA#P{6?jG zTcb^bJ8kD!Z8$yQ$#hqxjWkGQGu7d@t(C){ZEYGh3W0P8f|S6?JGP#7dt5m3^Q-f& z?*IqR8gn+Ja#Vs$B^!2vnzQNezz_T(ew>?~Z%d4*bnolu?G4v8HGThQTXHLA7;I|; z`xI6Jrj5h4pvIv9Y8dM)yP~yl;Hez?7ABrF@S-8BZ9|*4^z2SWb8C^UJsWVZhNk7G z1|}Dh&0oM9K=oMXx;*fMHDy-6;X$^!_njBzY`^fn@x^SAP<%|cI83)#eyHm*`>Y1?b+qeuoCg$We`z07Wj^piI(z=BLAIh^-u-kj8eU;;Ogy|~hqu-MBWtJ@3@E{)eB*d^ zI9WUW)se{KIDv82^UIEq_^pd6SI?oIV5AaHj7ZlYu@nSQdBO(-0d4?BqUkT`#ktW< zWhN#%9j&O)buQane#;P)iJGv&4CL+E7R+0SEpotXm3BP`cp?Fe?8h_nC_q7IXVG}q zbIWSdap)@B)saSb=t%@cdsiVZ%)t8RbN>p^b3*YoC&2>C_K?dD1+`;(Exm~n=&2MN z8U&B^AL;j8zRq#(3cZDq#|5a3t4+EEW}f8d&V9}GLS5ackSF~}#_y?)0AHwyj(a%( zV>2jGC7VJ+ULF>e(ruzPg|5;8E4|FO1rZiks8Gc%Y8*A6T^xL2=|jn8*0@aT**@X6 z$2R_;tzdvOGn$4@l-Xe;4V~%O%OVpP+i!8ATQmgp54JwQ$G~$9-RAblt!0So;-dFm z)*aEyx+7bD4D6rnZVRJPNw(c=<|!5W4+MA#d!sBA+@N@V3o3I_OVp7(ya08gr=~J< zXEQ%^>{ST%vUD2_YlBJWH_}l7bP-LH#8~->0+^94dZTCPCPx10Xqo$hO+!VJ#B_tf zTqo_)OXdcCh76nA@4~bcT9J&+JW+=x7;^KV4Ryo{O8zGgm#ZpC?sb_+jv8>>fr-5c zvW);P{J`(y2tNC9i|>(BI~-$e7{SsiO-vQljV^Yk0_)(p-m+yp$TI_Q0<@@V%x1`; z@`ZdGRCPK}wQiDO1ye8c{z8kA@{#fLtWd%B1aR4>@>y~Or+CxePzG7&PM272>@ME^ z+|?WhedY%b1ACd;zq$5f{Gp>kzt@D`uOAa+@Gza+i@*M3v@Nl$RZ zmCrrDqD6&!_~%kK0Vv**1KXrL_Uh>P>*1$#uCe7i1E7F(2h2ThYiL=p`#J^fr;;wv zm_iNuLr=Rtc{4P(WIp|s!m_C@2od4`_w zWyW2Jjb$s}7_kjN75tm-n9EFkprdl%FR*hOaP4j3W%<9jATBgUL&A?%}${f{5J3vL~^$m@v za#-di0j96}bh@9Kek}{bS;aawv?O#XeLL=fEh8J8Dmx)OBpsg~>39?Tmuuhys(ek+ zV#FE1RTxl-Lu?!m>-yppm7|L=icLg|!(mzH6o&P5(l#TXlPfnlp{cC@M{+^9q)ol8 zNjx@hQ80d81tZxWrmG{1f_Fsr=I^0@3Gd@WlP!$6GeF<{1|l%WfgB7Ps^i;a0i7^3 zi)hE`gF8laY43`)PV0%w7&G{3 zMi;j{*kyC^qb!FQY`pH!o?Vz~;HvY+CfpD)VaC5=MU8Ru`~NW0x%g#f3cuxsV!3>J z(_ipbZTXGsf?6_z8QbmZ416I#Aa_s@nrQH5Ki&~|2JL!(8hn$`EBe7K$B8>i&#LkE zaN~4HL4&LmgmdvXepUWQM%&{aG&!yxUaU5FPfaNY;mxaS4a-6Ojm?z{?;dCKH==eV zh)4+xY_3t%&mOO#E*|=5I+@(ZJDLx{u?AZc@Tuv@)DAcwZ5+?gH0O)}K!q+I--gI& zFtX~A%`BG6EXx7*96j`9phwx_TINAa6I>1P+0$5#!3rdvJgE7OD{Ig;iCVnT>V$$m z3i`>+|57ga@`$ehnYT^$ElrhCE{{4Vp?%}N%YaAJH8L>Jwk|*vIxV7~iN{^~Zd8`R zA9H~iW;>WO6->Z!m(2r=MvQsXr*yJG2(*4%q^Kx4od(nAh65jGy$G}amGhnp?O-Ew(%JgwM?Ne4kxGc6{v)ene9N+`{A_WcWkJ9Nx`Oohq8FzE7Ae>l zhJCf`TNW24eU%}!E?{2#0rmP4`@0hCXrHsJSP>H+b(IdwoDF?ZMsxs#7pC8#>P zKNYwm3rGonM8hC%Su}RsItuSk0XTZOYJx=U6QEfERQYPnpbnhj+v-_+4o|gu3Jesd z54jFGd+G`QsB8l)5ipcGv~o9DUJZZg&~$;T zWx(~TK$_Q;4c$m5I}ZgWb(u{j7YKEG(Z>8Xhb9xO48`1dg>!)`OHQtNY_ZLvvCOO{ z(f)T3=1+9_F0|blCuLL}O7RB#>fzIXrxSTekv2_wH$>AZFW7ALJMqUPxU8~l5;Swq zsOQdHa2Su*+lk~PiRm+s%yjYa2Rb*agb7i$=IK&w}x1X>ZtL7qk22jw|r@yf`}?|Pbz z%{5k;gL3nJ@^|K;b(fVZKEAQHFPUI$8n?3~d(Zplx*tFaW+^sr$uR#Qz=bOu`j<_~ zf5)k3??>qi1L&Fy;=PYkj$95C zcj3sg>fTUK3>mnsa-^am-f{)u7X43*TwG@hD}>cdrBf$a>fc@Ef=;qO)^5!5%QG%k z!Ka=`gMfo1AV%-I0cFqfb?uv&E`kwEm-e$MedG(DHkAqVW*AICFYR{!qcc5ORb%%! ztSaSsyulSr8;JdtB)Qv6XtZS6!D663KpUVVo@O4MY+SY(T7gW6R#oTZ&w0p*{Bl%5 zeT`FNuVkA3Gk4UQYv$RV160uD>!Ok6V2#|;{MqKooO9%M--L?1uK0JdZ$yztu%X+7Ey ze7z+^$3!svd6Uv1jX=r^o}(tkZ$`N-N1owF ztp{?Cz8j!~c$(jt*2qy~-|i^4bn~-F+)ukvX8dmkFi@V;U#s;NPb7u5MTX(*=%>51 z?u(I@zSa!2mMuw-*5R&_tMR)Yn46=vq(s43R_N6JjW1-@Ub5p>&g2g^j?H!Rmt>b9 z9pbl_p`Ak|VJQk$eKhi$+6jaYvOV!jAO$1KY3TEiO>Zi*vs}3Pq9l8@Uv^+jG6Dr`Oh58{cf3*0K~aGNuom1ii%g1|77F;sX%cAFBSc~A4y z`mHaW_k=dH?cpv8?2HJE=!uNGoA$kX_I2WBvjObjBY-{NK)&n!NdNFRS8)REFbuMy zK%(|PjpY4gmo7E)gZ?~}MGe8gR6YxdUoV2{Cj)k}EEGxSGILv`%g?$cfRDn1ANwKW zqv!fFon9^RDq8~>hWTJ15`6){z?;Az47^%(a7}VDQV(I?LLm8L$E6o5zN z{Ef8<3;kppxtjvD$(Rr5u%c8rfTPq@9=(D8$+u|ygizMtvw0rCraCbJ&13h5rvzHA zvHkR-WFRKz!39Vr%>R!9u43=y?pEAG5Pnzc1*G;`L5GtsYKyIYKV#MSf;RsE&o|nR z+J;#3T;#iN(a4!9Ya7hq@nTX~s22lYOKLv^uUkM}0D+CbCJ>QcRMR6}6$GtMhu4@ytfC3V`6r@63>w<-ET!JU zG}H&s3P9xwW1W`74iOD~2wPCwH`%H?hY;Zn)SgG!8D z(2E;zf3mnnabfWOaNvn%?P=Gr`|EJmF}aH$SPE2g)1^i`VZ@#fxcveM<}c(j0;ktM zop(|HbUxTaa=`rV*Z2N~QQQiNy}t+vsRvM?$M`Rp6c8kvvK0H5kLJW6Q>|ceT4s^Q ze(=TTiFVJHfgrPtGg6c{)HtT24CxlHDp^gHe2$z_+-kBdl?5_-N^ddR1jPhr1igOo4rlo}9ANBugGnaM54ePrZZT$Q z#&un^>@*$RkQL2Pf#t9q(@JA4Zj<4SN#17%?N((id$5c#kYh}QrEE*y*jCHK*_3{y zFk>Le@d_n5-W-LqO=q0f|2p~FJPD(1u4-yP_I?ksZ6A=Lgy+DK|B{`1OO-$YsnQSl zwv>paw51d-_o$QQ)MC!;Yy6Fhj*K7+=tc9?qn$>+k$pJIU8XRAyZq|eJ&21kBgN`QLMTqjG|CbLMRk>OP=rq}t_CI+aDw#TrsQ zoU%JiK`;R&2hYMPe_94timE-D>yChv=LlR|%9gna!wGzRmn+3C7gPcFy{qT+O(Coh zh*c*GR2&v6+95o`^TX$v=J+kbJ-vyz<>G`Ah)Q1nEsi@!!R4JIvpASI{K`3ruI#Xt z0tKJkf$Ka~JW)EcU=qDf7o##gK}pZ52`qhulat~+MEz7!W2cFD1(ADv`2C)mz_s@U zTay!PNBbI*nd)Z5Y%x6aFR>%Go00Rw3ebun6@_VWb%f@agOow)^C2RJUj^I9!@Pd? zDN=UuSh9XfnEBWMj&8w|-wXwHzZpq9xwz2M76ib8y~M)dJ=TTsr0@=N&YR752uQX5j6&!$o^0;&fM^Qp}!0eD` zDtr+q@cyn3nc5gr@Pyys+PL9FQ0!Ha_L{-r9(?z^px1f9=Bj{c8Zgng6<1* zRyCT&-hj@*7y5+kc=R9!0d)|In_yb3p zZ=0YuqR|ZMH)B!u z26Ivsm9MaT{!jrWWeKodB&fGk;0?VYe@>q*Lfu@J4`0pwi1~PQ5*Y&TKpu}53P$O` zYuXlK9QiALid=z%8wWkQKDAk0q6i((71>}8ZPa?Kst%V>3!OF)iNL>|@*!NnKH{8< zYfyr4F6g9|EF;|Y&nlSv(43f5c3vL_S5ih|1rUyPKFTA){O`!&@)LC4C~W>*KMmzB?n<+0x(&9xutiJGk*Rc>Zl#o65Y)7u9BjIp{mC;xHjnjL z1tww9o!te{AcZSxYgr0>>DXQF?vOmB6@Xr*;opE0RRpD58^0$1?kFk(P_|279`Xu- zv-L#Ewzt((bo)0eNWE(1O=9l(*IEV8I}Fcy)Y!C8AI5w!0abq(=P|?^x$f)Bds~K& z6jp6{d2`^`=Qh2TQ+@3EY3^E!^3zcF21*g(aQv4ahTYebYrh;KgFim7tsM)t>hq_W zRq%Tmf~PT5ghMvTrhoHYSdLten*#0|HSMDHzvd6d{QDK*(l?MXn$c3ceY}OyEIU^7 z?|0{?g1<0x0RGsX^>#I=!s=t-uD}5f8H5~+*5?xz&s+^AvF6Yt)cfj2jPDz6q9XA3 zTdc-BUqc!ZEcQc<2fg#QV;^engo^&%5!S8G^-QRr^thQ&-$SePU#R+LgIrW#*Y44O z{guh=CAV?Q-+dX@0*)|9cBo13Loj9Y+WBXzz8cp~&S+LBSJ$wPiqPEn2FfMZ9FTNf z`|9?)(!n3J&2@jfj5d1e575t-+Fx}O#8Ed|E8kU?E{`_tOxi+(%I8<@}`z5z? zMC+Sk{j%>y>-U{1S()>*hO&tJuwFM4D(!LbN9(=SFz0JN$OShQNcq(Th!T?VV-0jU z>5MSkP7nJbSjLlIS_N-tV83bx+uAcnuLq{x=vDWXf{JkQ`y0?X|NaORQ#;M7b2c%W zRm)mh{Vju^%j2orx?VvyQJ)V`^8c3EuEP-@H); zKi*}ughu^uWE5CZ?UytBVKV&bZ&n=1A)%mJ4C*4V!WbA2^IKEskB)b1V^Z52M+m9BTMuqovv3QcV+X>a0c=5ar9a zlElB2~XNIjH=>O~D|4)0@7S!Ysh1Jp#Y(WGWiz&u-6ucnRb|72? zqG*{GL8l!>3AoqrqG=9mQe(_Kwf7t~(#?8e1#gpff55kJP>W zbR4t{I--gfqRHy*HOq)*hmASa%jjZHNp}h_#?bKJ+(%bC8j(2Gg(#&0miWWk>rq`u z1ujKs(9+r=@EEudmG>|DH&q0M-YxQ@j8>F2*agF?byWb=^S4X+T!u4H-uNrR3{uP ziSI<1PI<(fq+>Lg5$Wq>>IzJ1YnDzW^m*Coo6o6lLiP`M2c33o`RP0biA?s-y($cqvOl?zyBoeAH@CXsj@Z_4ZjYw0^6HQTDZ;4}E*^YubE zNt5suXP!|&qPWYAjqWCC8 z;?9>0;i8%1!R^#KWj3+YOygkpv^QL;US4&XHV=!J$Qr_$K68uV$_xr^kAG&$d-_pY z9p}mhdfJ;1`Hq-<6|}c~NCfflBrBAXooOOdgZG5gKDS8w8f-_JV;id`LHF zXIRt`mEmIvcaaWghUh6v;V@W6Mv98 zzXhY<;kKEaPoj7@*{uv;djpAbD{<^{iV{n&FJ?O5%l0Bcfe0{-xR7F`xyaV!HeDbF zGIbrKp7wJLKj}s5pIf8{6GLut26+x-m&o!)huRa4!QJwx6_bo?9Y*%`B2k#2(xJLC zcN-~`I~ZS;L+A9j$o4LzV*{QikiM7{@F1 z5d7BZzkf!k)~2Nl;BcX5XH57cO-wM@8QB+&0D1)LJ!VU)+kUN=PtpVhjSEax`4caW zNt^Y?yo!@Q@g`|vg~6H~I6cxXe+QIX b-02^^AJ=-770O2$_GzE23tXGKChX8Z$ri69 diff --git a/docs/src/examples/itensor_factorization_figures/QR_Ex1.png b/docs/src/examples/itensor_factorization_figures/QR_Ex1.png deleted file mode 100644 index f3572503a11f4f4b962a6b2bcf981d114a7c164d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 43444 zcmeFYby(G1_dZCcl%#a0gmiazr<8Pew+Kj=fOMB2AR!%sA|>73t+X`E2RZoge&_d_ zcjm8|xvq!n98c|7d+mFzb+5Izp^EYnNC>zHU|?WKQj(&|U|_HpU|`^^a4^6(&xLvV z!N3qkEJQ>Ur9?!C6&>wNEv!wzz$8Nxl3*3%r?C77ZVN-+fPv?sd!px|3rcT-W8?)E zy&)H*CJD|l(7@8LS3^bnQu(@s6rl_{riK`+l$w1P&sa$*GrVC^(L-P#uJLlcnPabS zwq9SppKq<7k7WZk%j<|dPF5A3SSF1w67JaW+c&BT$!mXD8%0Rnto<{tr}FOZ)CSC- zZq?=&z>S?-E%>%~7H)6Lb4XFw_rYUCBjTaCuAWECd^*9?C;b84_Otz19MO>?*BjhO zjV6(}6Gei{B@I@B%Om38>_uu3oPGowSQjgyLJEQ)OXqQ^w=eB_$a=`9kDA0!jUujL z6nt?N{14C~3QbK+qAQ;mIrZs52vQmK$=}2ejJBRBBwlVDD6jGAO}vk}7UhbQ)%$Ym z!FH=t$$Uil>iw(mL`tMEoVibzjj6E&q-G9>eHHLn@SVHgIq4axJzt-5wZNl4Upx=$ z8ZhmDlD>$c1g7zeDhZG6OFRB;7kr|rM3+&%qz@N*U%Y(hkKVV5J0ovi*59^Q7fr(V zV}I*QTTF}Sijl(8uh&b#CK=GDMm|oCz#}f_quWq839CD-s<%)QI-K^1KS%tQCY8zM za5xJkEO0B8YF^(=J|RIU6Zedv7TXl2=Y;D#7HB=WlE#b2(AwdwXMYAIg8Bv$@o5|3 zOUEEYi3RF2QyGN3oYCr+Sg(`b%Luq85@V3@nA`%wzba`}xZ#cXI(W_WRa)?lgxENK@h*gsCST#u+p#EaJWJJLMlV&<|Nv0%0x;AwVef~ zBQnGpb8D5JT|6ad2bTvgjACiiTAAe3e~H5Yp7c6?IdNIqKA}$gC0QBLL}+7Z{F2@h z|5B(sWkyhKXX8rX{*XJrFTXF1FMSJcEA~Q!KzM8D-ZzxxR!kExBEwYr>`_`6PTKeG0R(42l*%Fv$qzRlLlW zUdn2R4$=!!V8u+K_>q(-*_-<@_h)v^$giRKp)F^X1`fV}3>kfi^1{q)fdW5OW7=tI zM@;CjUJNUYR7^YS7E?tW$|uN147o8%-JE@Oo$mRfO3Z49YRfeAY9Exi3r2HU^ZHaj zt01fNE9;dsDV*mVI$_3z6^FHk3Fb@Y%dbU96QO@+eRaWn-lx{5+vn8htCsU2nO1{M zw?_5E-!f@CDaoE(NX}ZY>+XRVC{+8w+bMs}3uX#Zt}l8dD4U!Ray=`JIH%A3is->o;u{i^|zw-q7L&x%?0=1 zgI6msE0>fQ=%1xhx=rKd<53Ds3qBQ;$mz&trdqsr_<`^Pw~sJo(%Hf zG(|Q+&o?$_Iy~pxO1n;*;Q!38oSDl{m`w=JI=(Acl{loq17qJ@)jlO@LDjfkic+ zY{YK1losw(vu@hY_agd5G{>I(QxY0>3;khiJ)H7Hj(2@>^C`Str)zfWtr+fzv`~=3 zD2Da>T455>9z92@(W8;YkQRCt2^Qs|EjgA$smEEz1Y1PR?aaT8BaAyu*-c`r;wqy; z)xJJkuA1_kLab_}P>*{RYTYt(C3ggyPcl5mCE1^rHvUr0nzJF!EaOyH&#uMHSMKIJ z^b15}MpZp(J@@>!N&~qsRqg)mnOIBM2iPvp*)^M9IqW^{k4u+{&F_}sNqk06r#;@v zYkxkEIg0s=uB@zDSF7bPTJ2NGbeMB^BKC<7_L;88r&ph<>&$8{w+R+jTCsf=YB~*n zB(g0?Ls2A2+sM$#w3e=wHy;Pip{NN&l*(vL)XaNa|3>v!$H5HK#j)0r-}Ete0wWHA2)TY&1?dqc4+JE~*1 zBT_;(%9^i-S9>^u6`K|1b6!oC_KHqpG&IqUxsx5wMfeX!uP8kHa;|LGSJzV8-8u1O zg$N2rH8Rus>GFJY6EDhFz9iSN)b{st&$uwG&ITQUZwxp)ax3WW*V$j?%zQISnoQDW z;L*2jS$1CAR4XizXiRHNDX)KN)%?q|`JG;|Vi=Ep-Xp2)V z)h7v84g1sc<~}dGUmFWVz7|mIqexeOj(L6XmBp@m*9X7iT_< ze!b^EW&DOzE=GFHE(W$UX9NWJQJL(ozwKgQGg2^y=wsfBoD=Ejr?&DRHlFTjz^uSr zE=Vp!H0fT-AI)V5;NNcIAP{+8@$X#khr~sv5S{wI^4q?`zdWDXf6bW3sHRWf^4SOR z{I@;1m9c1h{9&g!IR59scNqS)UKPolg#I1`{@h;P1%&9vS3Cw0ivkB{vwW2OU@m-M zO5tF_I}0+5$jaApRcZh{2(t2_E}@yiARzrxppA}OQA#pjbDV|u_^5+fs? zOhiU_f9#0!0UOj~;_r@YAEBPJ%IR1z@$SD>iS%1AWQCMCyS!0I*}}qu?4mIQ zSZq7hy@rFsEs`EdiNIt zJn#D4ze2_ZgTVryo&rzzEQp8Juoqd75AVTQf#1M{R79ktfL9eGM-vlUCv!XJlD&RW z;0t(rNi8QZFf59@C%BX{`93iIgoUc6v!}=1?$mr(g#^A=nVCQJY_=1azi;;<$k(rquXhHAfVe4$@PH*c( zc0b6&IHD#_MvfNt&K7pI#CPKwzOr+1<|8G&o9OS~y__cQ7XQp->jYXBut3JUUl?C7 zFfsle8|cb=_bs=gg}aHhrl^Gtpfg|&er6_CX5K#?{!c&sGv$3(H765C5jz{8r!)UQ zS_gH0{Nw-Z_-9V7f97Oi;(R>iqhCNhc^U7P{zw(~dj9h*plE&sUdF$h#*dJ~9;gNE zBff>Gf-3L|pzP0I3h*yA@Vt8ko+Jww5ZJcBYl)Pokg7ZQ?krMi;+hB8;)Xr4z2L_j zUFa$2xwca74sFzN*_l%Hk`qRY6xYbHh6?kSRvJ7drM=cA%cWMkmY6?$OSyG*TpXNq zT?Dkw9g>|Sb!kmp9ptmMUM~*32}NLTiKneQ<*|RW(YUa|wIJrWyyVlmSHE}KAP{#s z%fVUU^Syi(8yo@(4wYEYAMAhkMlq<(U(65FC64@m*W#fw4qCsT{*3CDYS zwO+j)ONBeYY~rr$dHR|RZ$kG0*Y6mzT4|bv56K=Z68J&T3j*u<=Tx}ty@eZNoXsDO zZTS>>7dtFB`fmN)TJpPaTxmcAd4Ygo=wpt24N2d}Ucp?UEvJ>wD;EBbG6EtXa*^80 zTU`w=R?msLaJrQq`r?3J%SBXCKkizj1a$5E(4Bk_EOWRBcDUP(J$m_8UO(5B6NU%% zN@EW$ywf8&M|)Tc@Iw|Kc=*llT`|i0gG% zx#QFbfBDpP(p|zk0nBYettpsJmnoo=>>p9BVRE=3$g_?J$Dk% z_t@)s(ijXa$mp7(->c9N0%pl@6au7BA&84qVspdCu3j-4aGgP7@ys9rF__@&QtE{7 zWf-N;YAtT`t)zMkB}PCbui_o+dm)Hr+lYB1w248VBnAD@q3EAHmWURkVza!$Tp6^1 zDS$+B=o1{TMBq`G81_F6najLcD=rC43`L<0{f{nE^YZv*0P1^1^4j zYeEQCUM?^YUAzUwizxs!Tnpyl!m-U3ALnBMegnJIDnZH!0+l6d7Z=>^DbY1&y7NVX z#m|PwNQq1VJN1hs6AiYq#)qYL0c-E3-{yV1-w{N>_8q9mDBSDkLYsJk_-bkI){OhS z@0&$OCpK;dVpMwiugl(nLJQcR6LlL9AVG+00rb=s9X(b(lP}csi|-2beDHr7ZCVB3 z2kz(7sTZ>Fd&_wmj)Wuio;IIikLb^NNKO0QE_L!@KXh+K1=cA%mi$<4I7EPrujEE$ zy#52H9lc7G6GA!^JgGu&P`x^bCo)Mj4xJ=;q8MzrAZ#SOHs00Y2a znCk5RW0{h`GIL%@CESmWqr3RyNjuN2=j{l#ADOP-$h9s?(;;GOqVH)Oic9kb>BD|X z13;&sM}Ca6ZFB(W<)+Lapf|upxS+PU?0k0q7P z2jr@*bN>&yBmlWGw7cKk%QYFijAUFhPg)}GC* zQvgUogTekc1xPM09y=bz5WuDgT;eG11+IV%;6rM^-3Y=S*&=G;ruQ?DTgj7(4<6>Q zJO6l`&4SJPtNy_d%w+`qMiQIq9!p1k2aYS;?ii5P0^5GUEZN;o)#k?!G|~FpE@YUe zvAS%pIuFz`$Wx^=6*>$+gA7S971&PplW!m`{Xw+g6aZb52?^KxIofcb#qr+wU49N9 z-qSt!c$|BCwpT{wb9jZ4w^Ob=d6E$hk|ls0a340?;UG`($MFVK0{*GDEP5B@5?Q~om7QcA>Y!w(?9^iyZo7D=xh7|!)-k(*g-H}r@hHel!&4u6nN!;W# zuQ0$%{?U7J+j(;a8ULsI#DjvDfEC$0f-njMq0HJzh z__Fmqr5x_GF3#zLJsSceFj<1U*nJnpbYKJ*ig>2`kArds3W59C*z09*|$6`Y7I@Eb6sL=Hh=#>G`7t>?ik8qEV?WX|fOY|x! z?mvp7;n<+wBtjPCwYGREc@8-2xnHMSP1wjDMd`d77r4IKnb5m5wYL|(_{G;6FVAh* zZBX$rVHm1o#-cdmnFa)!7I`3k^eI4k8XQzMDmg3xAoK$${ zkbV(3FZym}uW95@Rhd3Q@LRyWiAsh4M_|1GeD6+4a>V^y0StF+hUjGjCq1XDYm!@@ z^R?V*A)?>qdfBP(QdjcU7nAUj0K4Srla{~O(*xqWC;hJX;{Y*A2Y5NVpT_d{PH&gA zf+KLhe`bJhFW7m9px2elVxzV&?YRFqy;buar7X^*$IxHrL<~3q)M_RWwEyV1ND(lz z_8h4P-h=?)(EIT=d2B|}Jvh2O&jGOq8)F3QqIuMX{XcdJ3)rcDg1IkgMeUtz6J;~+ z*9mH=Kl)vqmu1vmcNY#%5qT+0criSrW}XyTty>ADPXDnjt-!WivmHa-i`$0$2j5^x zAI}UuZ!I+P?zV&X4qYaGm-(!I^QHgKqYZL9&s+W0gO=m1@YbO*r_N&wi#14%!VXi} z9nXcoE4|}Dwn`h18Kc1!oaS+KrncwPKk68qfZ&2zB4~3OXe62Q{9aj>_`U!7y!vX7 zK_T65Y40KT{l6DxR&n3h0g;sZ=4&U>clLrM+RD5w~%hH4-?GtWf-`RkS4BzL6#At zLp+hw8lQAS>E@CqaSyJ0Z|4elyO|U0^!tWghx{+Rg*|sAfqoWje$Yam90+4*atFck^KMDD)gE^bQ2%H@~)D89dg`;~7EwTcib8|N3?} zXm20oM2`Rf2yh_}wA+*Z74Y8{VhSiGrG(1uzs&ou0w2Tak=nEVf2P3S)J#lF72hYE z^(uTWf4^w%4}Ty86!4oa(jS6fK777gFe;V75Rn=E(|bq{zV=^oL);Vw1pz|{&-DLE zP~HI{Kxr)ol6d+RE2NLYZX#NTix9?ivqW1<;O>A&B z2hk7%>hRuQkLquxdfx(r0*KeDT;w2Xu!_ZW)&Do=%OVFvk{XLad~D>)fHPPr6$4cr zv{9L^Y{7LU?`0sC6)Z;=r$>F^4T7L#m*%g&CYXbgn!Qj5V7_eq+HRNuP2dRgC(&`k>9Su^U zlwi4vGTSOBp}&tAoPZzTw2Hmo`~WPdC#^by*m3tA;Lrzm-Y6l0Gt67kjH+1toUXCx2u8A4Y7a8vcI=L$*oqNBmSQpOla#=B5KZB7GR4Km z#~<-(2Awz&qf%6O9IeOd6^`tc>bLTzJN0uJA^6O>Oymcm@N*J)Zf0-occ2iR;}Q}Q z-gOfcK;S*cbv8$;~!yWbW$ zx!_Y_Ik;@>V$x=}n5f}KQnjtO{%?n4NlD)-L*<1!ixo?k50e}tlaQbad$!)%>KzQM zJ#}TMuXH<2J*~2LVf!5HITm<-rihuy6)oG z&enlD@pig({S~gV`dJjc$xl z_*}06<&lw)d|5SZY-m{iS-T*f&h4U{>0&wd0ou&WEPmz-j8vN@l(2}1$^zZ08{&EM z^2N!HsYm(#;o-1nd68l^!~$Ie+h%DJl%zlCQTooGfZ$r4yK)x`G_NLj03tSnL2n}c zWQz|EgI<$)B0MKW1ON&l;Jcx05m*2m7z_*y(qiuE{|uxI4+RZvxHJB-`1?{v$dBbo z4O|2c4h~lHL9m}QwGuO*ZDM)cZ1YK*4T!-_FymMw|B?e5rVfK}1AUZ9YVL z(ww7JW!jZUpQ?MkrLVD%#2Rld3C~rfNfAv?HwJTe6KyPu8syc9hTqF5=1>Sw70jHjITW@qWt06VT|kcXO)eaI`L0HIo|+{_EE-^RW+V+g?HVVi9|o7lXNp z4)nTV$ub~lh-1g>iSj|zreSAK?N}GiLM`y&~9jIBk3rzT0h!2Poq=f6b zG-A-IBzGCU?2e~OE)u#HOG``JpZD4`JlkI?(ysY%fm8tGVIaw1$YOv$+-JU^M$|5VBkq6(f^#wDYn34WP zfF8ub!GY#y>u09@X`vF)gj#kIBgdtpp-t-*-EqklW89g z6by`M%Tqc!8ao|Uw_h*CSt=_X*v#BkPZ2zJ$^mEp)UnRD;1(@L;6w5YBNzm123f!X zJ}5vCf?d=<$`=pU+V#PB`h*q%(Bg2~6PM|?+QzN_-2J(f(t%gT2eGzUXbY4+i>rI_xp-po4kXKX%;ZL$Hp+xspMr{ zo^Wz;CFe+e7J8W5KoH{e*IY*oi3l;zgEi5@^Np-H14C!bMDV0h@J|Cb_;$GE&&?#Jn4`?SUlpzigQipOSjMjP* z#CNiD!|8Ytu>uS299H`_P*8{j>by2CGg$<}KX!`0AvxR<5taNN{=Ig8dFaPzG+3pk zP4G9s&xRBEo`Kg`j`b*ZApYH)ZIYOG@7|mrZ5SP{c8ent{G#SUm%(-t${%K+R+)wV zcAc(NTESA;IBoV!)L50-c2?o*4=?ySZ!*FS$Q65$3%+lXH+~8V3P5IN} zIJPc<;3A!z{ABd$)i(rlR@@b5y_d91;dG=GP8&*gGA`V^0C z8~Lpk<~wA+EoyQC`=1?&!bs1-kRA0`w%DYUi?{=hABVxG5V zYlvb;OuY|9sLz#w`&&Q;vqin`WxbX*&w1{F7+9v(vLlf^1jfn(iarQw3s z*4EqO>fu=Q%rCziZ}|E^FG=e;^w6Z(;Ltq*`$fU$nic++p?IrNl^N22 zn+!rG;9dlLbnw3%8hjoo?O8HV)?UXYqjyTg(1xX7i(M{Ug3n+rMw%(#_8mMQK4o0-qq3Yq-w z=fhw+#+V7_>WOKWEs0WWY)!Y&-M__W56ojbGuk3($-ehhS;5RQ)|^i&Nd&J6*JUe$V8`$WsR1UkuZ9_xQK)@+D{MF6n-sW@-T~1CVmcnJa|W2$UjVPE7~FIJd^Yy99g9WBJ2y_FTnsV_QJrgA^U-Az@XS}wNfya58BwNrKV zQ&wuLh?489xl%nrTiPm)T*lb!R$v${ovnmcqwL9ftHt?|&UYS-ar275zyX8T^4Ld|m3BTk^$2q}B7 zpB|o^o__C7=TZ7>tySZi;QC<6V`!Rn_9WsFI037xeRjPnpX*pUk@m$SrN@^R73^Y8 z@uS*zO$_T!sgD3`ZHm!pl!OAm%pKSj6_u-0Euc(AR`|fYA-2q$guU;2TUL{ zh_mQ$!h9ZlNPp4RTP|9#Q<3-jUlrS_ z@<9I9{{Ds0u}y|2A_cMC^;m%%9zMVhj`+IBFh4SKogtdGAjr@bhsKxkkq~U;i^OyF z^6~Okxa_F20y8;bR$%-C#Tb8DxWS?FFvBdg2OHyp%HZ<~HvN&Zv^!Om%sf5x4ao%8 zO-wL>GDhV0j5Y#FL*NmQujpAkPx5v{Q{nphkJOxA=<$dvMFZ)sEtj0N9MjSCY$ zfBxi0D#!P&kwTUU3aVmF`mG{ruJez#I=BSlDTT`8{9ABC_2loC0Air;cZ~_89azIE ze}fW0rLy<|qFZ7oYmNq5Xi;KYJe9&=orBRGIqT_c`bL+nJQL8hZkT_-3SDeY4%`0G zsFiHL9BGieH`R7KDJ?zylkFTgkd-L4zE%aXPB8eUSV3jwr=GueLjhjeq?9xycKcfX zJqrU0Z9;cq>{ru6l#H-1W=gFeAi$pCWt1Br`A z76hDRGE`jLFd+F_p02U-)L2|xji^oeEi9;J!5+0G0Kpizx6-#B^w#}iv6Xj7e^sg; z$kDR_x0NGSMD`HI+#$g|DEy)c`6_&`FF1;rTSoKMkRymTkvKGFbiQSxCG5X7mQ5oH z(+$aCiEDb{x7ro^p2zJM9$S;S2ULcDUlfqut^k?y1>PPpW-%E7dqbE2nL&!#aDwhI zT1qVGiZ?7Q?A2i}Y52>F+ywO(q^uq0Y)aQVwLScQt_!6r%Cg0kM42ALLZ6;WuJ7AK$Epjjmh|X9JUI zXlMY|3|nJ0@%hnYK&eCX`JJ3l~bz0R?1spuRs^;`wZ?(Xg} zgH;K1hCp3Z{EitJ38gY46hA*dkjq1bVbJ7gaE1N#m;PemScaQ}*`B#Y*PrRId}C1v zr=ES#$C$6GAV1E;iIACC5ev`TUfq!L=*c3WgfPX8Q6qtg>t_w@1-xoK;3KUluN%u9Jhgxa3pB>{-8>g?j!t@itZ0VB|T8V>OIMER3mC2N6Z+#gC`a=tq^QD&n_Pu|6jKL^M0n))F zXMn)X86;4J6LPHdmC#R`j*`O@`F;nS&PwVv48SSKtpz93MZ z0EYj>4JZ4a34EkbVPXL1e4r$Db&;`9|HX$}3Be=fi+VZk_G2Tya`B7y*;lEP(^eZHTF z2xdUl@JpF)qk-K(LJ&fcdopMQdm!UhEp$*-eW%9?PgVfKEW}A=BNhFWdS`)e3#Ei|%4=(ORMA zD4~CQqCB&K?|on(FRJX{sD%c<@o%HpdG4NcTNagbHw`bW# zWaOe$MN~UdpO&|H1!vZZ8IwzK=+6sW0c1QFD10oaW)qH61wU(yfcIe}<3%Pu}@$aq|NW9LiH7L=Ti10v^{awG#CnB*lk>nq4|e zCW*mw50k$#6C1{@Fn(dMwr|x=4+vxYptMow@c z)Q$&MLUoTD#v@#Vxpqr^%2!wxBkGFv8JYqS+^NF6!Vy5qpIFh!MgaFy?grB#t#IB_ z_C6kx@N`LDx;SaiPtw$z5^!~;{`_l*G2#~BVndbr7&GZoHBfiK4~S52XUwRibY$dj za+bx4FsQOZlLDdzVCu0i4ez680krNaIy# z?q~ZXJqdIn0LTz#6dMe&-<3`XX(|}5!;8M*ZOdNEB%|S1uZ!DjxkMHTeu6fx@=#0~ zmnG)ndgih{`UR+E1T|{y*l0Y@j3tUfcD9EX>CI!JWQW!b77$;g=jyD}j1vj#&Hk1= zDn?9AO?}c#`uw@F&^77(Me;o$w_i6DXlU0kKvKvY|sAO=v<5|u{+ zxj;>}fV)Zu5cAu8o@;l(r?wmgNw|6~F=LoU3U*bISK}SSWgB+4W7^Etp#cWi0&LL= zdW=BZixR(4UE!Rc**KkD(9EcRZJWu5CiLoXRr>pag%w_1J z$HvFR4G0|>^&aU#=mvOzNMt#04abOBhmixdg(3k1%1W;_^jH-m0|;CTw%bsbDpbdn zQhR{k+KGoC&%P4E!2#8K)g`iY8(o2-f1`aXmUjmB-nbj~xQe+Z1RZ;j(Z%UN5FG)p zI};YYHhF2`AScu+ip465)9Sgg0AWS^rF{Mpy!mn-w_hUiCj+YeqoamEVTPo4LPJ5$gVO8@0M=#w-vJle%FKz}is6 zbRZ-0g;6Km4$Z@kSrNy?#k~efW=qT8!iDD!diUNgBwdg+mNds?kglM?A;}2$q7W{N zz0rJf3n;{JqD;4wP?-&?3QWD1fd918YQlw!NHz7#Z9jQf1{z!Y9^AW$*hv zy3WlsQ&HrwrKZ+yA-J}-35^#0fsMcf0M(yG;7k$q*KV*KLY6d_2Cz$=6ctB;7{PMz zXttuR$GymYlq2W>UqDg+s7*0maYZ$*<9K|eAQzDYubR@DC(iJeZFOlWK&RH0M1fG8 z9|%8~pz#6t;zR?&b{h4jfH<7s7!=^h<8bFy=Z|#Bn~!enF}zC(-oC>{N0WRT*?F@k z4i~b2dpx4>!f9O=UF*9k$ZxhaK|MuBS88&1=t7GSRZenqGT4?RLZe}$B;Q{;qvEtz zwoY(QpjETKIJQ97{$5}V0Yr1aU4{+d>QP!TtirVokksv&5M~g7xg)MIH4|gm>)Yms^!?+ z905?>UisilqK$E%c4%@3{o{X;4Do5d7V zXb%B74}Cex2iIl_AtfbMd>PamMIFm3r`^4vr9Cu|Cl9euhR{)4q7U~BC{nSdaXJV~ z$|#M)&o+6W18Wm^bgXnVagEC>T;1G|SMDk8%Nw+g!}X;eKLaWVOkTgOU%XIzOyR^y z01Bs6QK<}?R*@qBMc@jxH;ECOxl*zQi+Z85JSLI!2-W@I3=8>&8q0x1qwJ;o@g^D& zL~IF-rf5N!dhg!`WQRi^bvUvUrJ*=iPKkUB?HIjCl1C+p*%_cCJ^9*3HX!?U`6FlA)Z`rOtCuSM% zOdevV6@_B6r%M{IhjNiuui2_Z^9NgD?XW%xgB2n)903m1E`K0TF*sWP0es7>un3&D z?Vp^80p(vuim13%$nK#50X7qLQ+0D!DVUd-EX%2OEng}OJHCEZ1*N!uJOfS|U}i89 zJqQnGHV2$M{sc~AFqkXv@~n}f@H_23efz}c4d8ykao4V`WD;nDfap9q<*=6HArblU zz8HZM=)b#|L~u=7EKGxKt+z@?n%~(66&J^5{`}*%A4Xc9d1zoDr0qfrlAoWS7(H|{ z)jq(1jDW1@6=oAXz=4c~A`+6-QtZ3^6Na;|v;%+&&T?lYp0X&07mLo1^}o5CWE=Zk zMLl(;U+->ae<65Y&nx_1^_!p#!`QCm4y2#jt%0-&;keaE>ZUjlG&D4DaByvbuqYUG z&i4Jmres(SY^|g;CveAFW4FLbV>XcFZ@|}Ji>QwIS%|;*MP0S#r&o1O8&QssFMpKxNtz(| zsGsaj*B}D-7F4uQ%#r``Ym+cQ?65IPo07T12NP}p%f_!PrQY&=W?`O}E2E4J2JMr` z;#NtRW>Iax`NLqI6nb{Mm{qlK{d)78YK-DQ!LxWT&I(a$HMKj#_dKz5!LjX7|N5Qk z)t17g!_Q75;1DfGs^idnHVZ#3eeIdUoxwVxE>=Zy72%crC@P{}?M*@%8X5{qNTgkk z03r^}Hnk#3z*lmlZP(`L!3txs+|`)F)4hcaX(KV^WORIPXE-2zL8p5ur2S(-QC2#E z8_qmK%Ju5GDUeL)0;L+y|6#K*cWIZ9LnZUw5-a#1#r0rmnTn}|f;i!G|-oe71a?PMeG zeZ3=mz(7Vt42Dve`fj&^VuXN`d~x7FH>et8OCrl|p+&RaF<&#AKQ4Dc1z4LWoH@J9 zXKL8jBA>0N_E)>nS9=m<_&P_Pg3S4DMQ8wLnduU&O1*Pb22UVfyg1nJnn9$Y*|M8I zU2%gPY_%gZV*SC*A|$9cwZ5_vI60~LZTMWKInYw~JN0%S#r|)5v6ErwA7i}8FPow_ z>qM(+ZVQ3~%QD)Fk(}nq&-YSzxn=-4Z~M=~E#PHS4iO9b-kB-7jI`HZGdUYL%^~&y z82}S4IR6X|X1Nrt-DnZo>Q9SzaT@Yo`16Y>*k)c7#oOgPW{fbkvyn^7dbr389*dj1 zk?&SH$vfvI;+)C9&Pw!EHA~9B+&190nbOuP} z4wgcQ!a|E5T-hI_?f9R;q7)MOWE(YFPFB0VL(?%E19oc*ZLvzuQ}sF39&ojz z*zhtw-w!cCx%JfX)m7H4H1mmGZB>XwOVs{zUR3$An=n^P`!V>&(lmo!{Ac68fg z3^?o*I6NrSb_2xaOuiS##npUK@zagQO24}YqJeT8W>tO7>Lw#4C0n7TO|;9Vc72JE zEL=Ez$AQ=PgCl{GPd|s*$;qj3^7K?Ra$;LI^eG7-7Mxh^=JEY*&I^m>jJ?!_{b!Q3 zx&Wi4Jp6R{=iotWVYcVk@|`hl!8`Aa%bD+ewI<9k zXFv3soGNh((n;N>7_2R>o+?ZpM(rLV%2SqTjaWVWmFciEX&2_a%9NU&oo(;trZK8) z;11!EBHJEoG%@_K&ZJ6uvVN_~xqQlUy-gvJlvDH^63c3x#nc^d^n=oHN@G!2%fXa5 zW=9MSSt6r8LsQOiv`Gs!+G1R>YoBh{SQV}#b|#eBY2VeUAQ{(HE%EWi#$cXi!I9Fs z-=_1Ei%2h0R*cFcC0AkQXHow6XqJF?>A`G}j=KSDP)N(tHi(4ay8M)alNkq+-S^Ex z8MQ!atDZc4t5uYAVw&=2@tgPJH$}lqmkC-**>ZrkERGhRfjsP{3_xvsLY3~X(qxG( z?ulp&HvMEJCnvX9bK6nHv+}zTrHiIoCyROi9?s6rPE?$>T$oY41Zm!Ky2HB@p;}hr zHkCvS$es1W0lgpZjR7SP1PM>n4>6>M5G(xp;aW$-(IMO(?U!e-nlz!1mtg2$YUb@K z&NiQpwtL009hu2*MJY%ilx6j4tFY^)46rBiTz%B8X$NW+m^_NW5z%MH1C`NYrI=hn zv$J}vFllxf*h78l?GVT=F+ zP}lQ;v*%&)F;|iS6+jvQx;kOfj(_jf_~6qT?Xh<_2*2sWGLqHS!fMx+z81PpZJ(c) zA4s9*zzPrvLCywFiiF1|F3q`l5kK&>P!e4Ch}7FH*^BZaDr59`S@d{WGAC!WjSMCg z4H=OBnnfD2q zI;LOq%_T*-f)Q<@=v0ci;z%zXsJe&r6=f}L6Fw1KJKiGC^S`BB;1E|1`z>*}{;N^> z5|_(8u#H2g+4H3RgW79-Xlk~e9d;1_L{oi=Rk`;jCw^B#k8P{e2KjtN^nd|@TYQy5 zc{SEqbiKulMi!_iAFUj{5b2|B+~Y8^M=9vcQ- z!7mi8A0kS(>cIb{e=QYHNrU~4$YizR8td?j z$J-JVC=i8R`5WMZ)Ot;I03ChGbXQZ_+T@sM{_aX2*!DFP{e{gmtC&&v8JFQek8l__ z8+_WF%$M38)5)pEJS%nMef2kUH@H#<4Kt~noE{_^*}h#%Gl;*DZMPDm;ilgXTD=;0 zvZZcN&<9ka=eO)p(3EJxu%7ER;RM1UDUT{;$z}n!2ROaYDk>||R#)vxpR0tEl1a4A zXli_=_-0~WkbX=Nm>Q4g?R1_aR*YqSt&TTJr@a^1fBU(2X0ev8)kL#!n!$Yawz?Fv zY8yD9qSpCbdRHZixiiaon0wff$FKp!!YqdspKdxVBAXUB|^j~szX5T zpXZ)fjiAEjNBuxG*KXQ}&&-9>h(aepwmIL-1C*+b#AQC_TAcd``6AXTjpJV*DyChS5Wa>;Ame(1!vrRi_+05=bBOgR*yd(oZl)hBc?=qGLT|jtUZ zaVeA%3%lL#+K%zyoMXWGBAz`LxQ15;9AHmdXY5EV9&e4@-Idg&q49sM%l@JmS2fuA zxc9m{nVu>Ni6?5hp&)%g(;X`w8b*+#wEDs3~I3JFsWyih>Yn}T=NSdRJ@ng2xiz9{I z=0i~~u@S_f<~|-8687I>gnm}sJ8#H8HVv-Gc=!f zqW{^9bF{mUn(xlB&17gu)?9HQTWU9DXb=Sii;L?1qAjskkx>)yM4a32qJT!PD{VM{ z?Z}Zz3-U^tC1#LD3;0GkN44DjqM}?@lM?Hk8PSWhHZe=!65}k@RSjcunCC?k9-FMB z2=Q($>SA5T{hvowVyYXlV`L)MA5vwr)kpbW=|&Ph@E9ajE9=u~0&(@kEK(ojZ1S?A`pYv}KN7Z{6$~O|Z{0;6R}8it z_vCknyfLHYKkuf`SET`I48$-wqE=x0i@e^B-;BgSo&?zFFBBdsx0xeCo*7}l&Q{18 zua2kSG3Gyrh2^p!x-e52-hn$&{VOCsBC9$B<{ixhAveeG9UV-L=>z>|ySogzxfQBD zECT1tWdgh3Oi=GDJ0f=~k%9iMS>CsThYj~hn%^V$Kz>FD%?OONPo^NXE*R(Gbu%?? z)eN0pD2@uJmp1B}Yfsl3;c;0h%0G!TiKlEaNsxW5VwnaYaD4#I1{{0ry|X&Bh*eR_ zLH04IM%*oNk?M#VC+Nr;xfF8PKkC^7(05eH3`dIj9=L%D9(S2ia z3bF>2xP`W84df|)zVxGyDOu64Fk_}P?{r&^40t(iybc5YN3(GX^n>-_46L*{Gr%NBGAVB4v4Zt#s@lKaKvVQ zXQ!mFI+A*jm)a5kSnD;&&DX*Q(Vg^W@V2cCUOkA#&H1THgWe{??}VE~m=Qdv5>YVy>F#XMJvQxPi{r4X&?Plm^=z^sl56yWd2(4zzbu22xS*%~ z!s^y)${Nq(LezFRoD;0St#MokE?GEH)pejMX@1@H5ct)K_RuWp7FDEE3j@Z%c5BHH zT|fy&Jj+oQsH=P2Xs79SGLW+r+o@FApdPCnVPLakqxM$$?jqq~K{avjM4(w!nsi88 zk~khhFo#0Xdt*VgM!Zm-+m9Hu6ifBR1YI)=RSG+w-|@#JeG1alD7PI3{<91E?&0jm z*%`2|93T3mno9Lpz3kMRd!_LoDOM|oVAD!s7?i&PvE-*?N3zg$Pvx-? zcx1W0L>72#@osw{B_q=gFCeQfwwrQRhwYksxdqN0!s2KQ0AW4sTNGo~W!9`%Rk1;o zA->0qI_Z^|hQhC5-4-J&~DW%?{PL&0~3-A;Y6!l%>ED5Bn z+m(q|rC0geA1mCXi(Y@ifT4u;ISzDncYSs$QwT3m^h#oUlx!CKF}FbA%&g~Aki1wH zj}u-`MNN&Qjt&W!J^)zHDmI+u>UtdYW}hjmH6jDj6d~m?7QDRb;LVa(@w*9R&$# zzL(VHID~--sa5a!;!pl}A!L}k@7+$stG;ZG=JAX|4?n9vQUD+R7l-;310T>4a9F2T zSDT(g^D3s#%vJf&GRuyFxPD3H@?xv%r_0$conQmM924k%fw&_*b=y5drERE1>C;{O z-QR0bTzY<+tFfd|8EMba@D!K0L%PQv=k|cjAy%jFvNmd&$*hvl`Z5ICi}%F9qvW2z z%*;C@lt~urg2vhf~!CKGmzul)icHDUtLLiS?1u z{6#M&smfDCNlM0-yn2%KaTNYrvENQ|aG-V^IvMP{3dL}1RlU#S-JG8pmiz}qUL)lLMh zXyUcZl~r;Q5d!nfNzEc%q%Tr6!`iOVioWB7A<#UQoY(G)djeO13IUQ(wtWgg9-Wmh zlbl@pHIKT!at_bl3#{xT*hGhNQRT3Rf5zqj)BZB>FP@o`lQ$yFP3NdZWSpFH6+km=4NeC5S8cwv2BU$N0| zAm`Nmfm*ZhOza#&7|1oV5X{rjAY+{N?n%YZ2m}6`nz26x9N{^q!Sy_o&lqZ)D~Djy z=ZLYNze?h%6<0nm?6RtIL}syM#IZa%KXK>8WoJv)Y8Qs@zSAEi{V+5k;#+frPULaA_t;}#;B5B?@|`tzRKo4SKSJ%+0P*x zoj00(5tXEgEcQT*Xfpjb#+T>_2ex60w4+Jm5GOp9fu$$mc~TXm*K0JV2B-sf2;;>c z;wUY({`wCac~4>!n~nQ%pLOm85a!rM51Vy|K(FS4pBb)FMF)L!?Rng>L_}9L>?YFB zF-4=lZjw-$c~%%1^XYYO$MH$4pM1% zY*{1f7UN1tNjpW+ImeN38qz$WkIT-P~LSBo~k3Z#cC$Yy|w^3YIkWS#A zuo8A0;3_9WvP~l8*LM zHLa=Te++eaqoDzFOpKgsCz^gPzVh{(%CKBCO9leuGx2*g6EfY3fC2pi7#8%CD%(TcvyI+Izb1w!p9T{Dhhsf`%`=$ru6&AI^+B1 z_@WCk)wLZDVO7p|DW7f^7*Y?L`|;EE@#SUBUiidJu>M&GP0!6bbhQTN`ktyEy5SKa z^tFoH3wWOByJS*#tS*_*PvR~fM&Z;HGLxje_wc^VruS(j8g_MVtNAIJ@UZh>j4diq3Y@)vJJsE(zfLILi zy@P7hKiNsrfjv-2}B`&%0()c`%=jG=AJREg5C3!6~Qv_{eQmI zZytS+wE7?L*dhGAPi~>V&er;Auity~1KwfHEl)kl8ZT3EwVWm$g1;$0oq0z-{EpP*%N65hz9c@uY#CM3k(7#jPUk<*riAKm`FLf)D{k)Ytf zT{QL@xX1pb;XnuQG2pQeF0BRY`t{(pl+xo8pa-jbWRgGW0vTuy0SDDjjF;aS7oGw6 zntuLCA)v4Sw`P$^hJ3hyz*xr)Nof3q$?B4}tKV9;3vVpBe?gX@e?NTML2ET=*1! zu>8;Jj3{_fdMX(ZvpuV&F=DTMcB4rH$?1YY_c-6Qkb>Qk$HtSS5R{}1Ur=S)!Mud8!lhi4S;o%A~* zExnqzuSf`O6<5v!#|h{;Xl}7B>aOclEZyxni8vx!EL|$jn{~8nEQmu#|F3V4oq}IN zX#xI(>J2MHVv4DX6baTfc~QZLU;fw!WR)rrk!6bo#Q_;KPhtQy<=6atYHn7G4H93T z5noK)GN!3&j;(4_@q_A&m zo43Kh-{&?x@)8i>?ykO>`T4I@D)12H2 zL-^{!L{zEUwrE>gcQg}$ZliNrZbnNfeC;ojK>~SK48Hc%xAg*!o8vxU_?eOCWkv*( zXL3f9?W?k|cqGu3qgOS6$Zv#yo!aHHsg(CL<|KKw-vWuT`gqI0T`^3PSfZPI>)@8t zBU;9;zmUP73^M4E&_lq#9D!$$eoLz*$vi4oE1K^PO)B$67=_U#vM)w`eQ(_opn_`w zAkS-7I#`&Md=78lLs>4f2aRZZE-DN9$n*s~gl3#3%C9Lm>rWK{4)n;R9sBzGk_YpK zr{<4UPd5N2$o{$&NunEF!}NVX5sNvSdYgRKgYox)JmK21H2H|dDD-T`a`dB1`Sp1)k$w}!I|(TW)zCIJF3 z@WJ9?16diTCXplt2R(UUd~>|X?Ib~>(aZ+Ecs^xyDMU?LfU7W5ahcxv`t@sPiU{OQ zLa%&Fv`C=nY<@eoG8~p&*Hr0N4W>mq2~m}lc~h(IX{^u~5cJ;UmzY2sS>oHE?{RMM z+7i_*J}93wYRkda_C1OG^z5u|(JQ5@SH!$l@G-1t%c$m@vW;9&MNt-nKmlewKi@Ad zFcTmbB(Z1OhK2he)2?TbgNv6Y&`YDb9!#X1g3dG?t89TFoyvdL_PGIPIY8D8{aIlk zeVxaB2H47o2QhrY7~qp?TpSG=!dyhzBnx_Flz12x09m=3H`ck@AZQC;n}Yxz$(|(W z3Hi~qNV9y~H?*YIM@0n9F(iqhk+g80U`GcB33vC#HyQq;cu##ABRb{~Bb){XI+sft zj^tOXK7T@;)DL?EhwO`Wt18zcL?~LZ(=h=` z2|k-)#)mkLgbKYUBk&;2_Jr#3Yu()Rc=*IV^y&i70XI{DoSGWxY=8L$?9JpfMMTA1 zt1i>sU#lggip2iH!ih1+*pWC&w$IU~F&c90dLlTu8mP@n7_|jm_6^|^h=1B!dWi0H z3q=4~`tv)UPE^wC_e2@>b!qv`JMz~$uXi^G@omR3qoYS-Ojn)H~>*$g;&*%#}kUR$7thE>T}p7KcI9FQ?jd{%^h>ysghp~ zyRuz{Rx)fJlQh;ss4c7W`*K)4{h|}}7$^DcR~11ARZ&S&tR2aAr8jt`FU157S{E#> z5?s>Gt39xTaed6t+y0j9<2F$H%phsIP_PR7is$91B$w%vG)<;uLAitiH&*L|T`=Cc zp{tLHW+>{t^Nh&*HP4M>Km~mqy<6Bc*E>aoGjHhJZZT||^*s0?TyP_uA8+ce=&u?F zaXCi(N-Q`y8;QM8X7}9st+Fz%q_i%YCZKKpeK+1EOLSqj&A1^}bT6oBCEBi~Bb=6o z9zmV7SZ=qP5LX3#tR=l6^ya^upE;i>o44!U6F!gn5&%lO)kCKBwG`kR%jBDF{E~z& z%`fkO%5GAEi=o!5kTqiAL)C$r=cvoLL1GNk+YUA2ip)}9y^z3=roXtq z{Ye^3VRt>&4`dLn1vuTOru33tMc!I`mwOMAyArAbE=>QIw* zmEge{=3P+?252+s!Hg6jc1GVt>bZV_N~J*X)LtqbcnkL(iud8J(}WpN?`GXRCd!S7 zUP0OBAhzPFX68+iDb=g1UXE;7lAd2K=Lk&>0~>4LqcoMu#!Z4to@ICDaiq3eX(;Jh4h^{`7SS$t*Ix(G7 zku_bFuAgeTWFQB&8pAob#7fp#%~91+8;gwK&h1}kUBHQ=YVO2b2e4Ldm*)#kNQ15YB*uT6tG>_3wT?;fl;9ybiGFNbt}#ky`m4#W-`%=Uo0k@W&kHdbjy%Sf@N zn(N}5AvU7O6dE!lcxZhoQoVmgt~jv%zJF?8v> zP1E^8ViUdl+yufCH-aQv+o)UoTHZ8S-Mk9}R{c~Gk$_bWrgAWyPAau2=<%~a``?no&MA*cUZSjNohXR4EqRKsrSSWdD?BbOr=__kG|A7Q}ThdrC0@lG-V>%ja& zyZF~SzN7>G=Xjl6PWlAt7Z?5GaX~n-U_UDUcBuQ-AU6Z+0%Ec*(dX~u7i+p+(F{*c z4m4gO1E*3_Ug?Q^U6NGpGylpw87C(Z!YO#W?u~G})bIy3@MzYw%F^9xPuffwK4n>SFhIy1O9e&T2(Lu8* z5D*d=;JMfs>hAwuPHIzwfZR=Xlr5L=1_bc{(mfVE6wo6q%&mClQ^bFsBExhRD$R5* zF$l2MFg875K#WJG1)0dZRCu?P&9jD{>C7VIXXhX;kV0zDN%72o>-fkT1ps{5 zp4bvN-Ww0jrpaP^G?;nCdy?_vBzR~k|3vjbs(ed>{o;|J^DU5cz^SPJEl08N6ZJ{I z4fr>{U(dVyuaLzVbOsXn?cg#w5=|=An{U}288%4=Q&80lMcd#elLGS~+D@9y;G+cc zE#KM#yMA5aF{P8MAjxPv^fBjsWFCE$>LkvrEvN`+Wgrm-<1G6-@G2%VFQguDzWl*- z76wtpTEqL?mjVf4>MWE5p*5LUQ1dL4%1xpKYL_1jO%zS)<=;(jcwBLn!QCYs)|< zO)1oCxa;xR)&++t2xK)%wP2@hnP-x@bf#~iSdmJde3|6nE{|V{Dq;khPhdhbn>q76 z(jW-aBKNS8|8fr=!MS0#@w0Axvwj{Dt4^(V03H*1yk5l$(h`HZn3TCRB^c)Dey!%z zL*#v$Ytu&Y@Omx+(6L(hkkF$^KKh3yo2v=CuQtSq6x4v`$m(kBj!(Qw{o+u#rOOk5 zK4prFLTeNpm;C(vvhCEKY++ovyCwl`lFnQl9s!six8x>3SOR- z7(FuFLu<2TZXJpjQ(B-d_}R}KU8oFI`398K%4~i;<#zPp$%FIY8Og}MNdC#|Qq6$d zEpT;y`xjX!^z#-xdsV9cb$CQph8sVB_*V4PJ>>|^X$Pj#gj_{u$+Q^oer|la zZH}R-dJ5I>=!&VJ+>g0J4w-P*GcsB?*$j*Xa33LqG@@IPN&cNh{D43Ov7?i13dZOB z7BDbw#%?~!y!g=2pz7k{LL76TI4;v0+pz~vEgy@pdjOWLc2yM994Jx@O$&gU7=C@blYxAfPh{k4b!0{!DIa{F7aEUJ-PLA(~GeHEy$tsa&_|}0W^?I1U zO5&DgT9Boj@+#5?HxUi^&zD1tD)V8R85PbZo_K54N@pMN><|VwBej|{r|aRZ_Gfqm z1RJjYs@wc{e&j7%Kktf#J6vO=gtFZBN zA}y|>%t-4EqP}mG?^cTAR64XY&+reQLfw-`Cxwm!k;-MsX=#zcXnU9!W7e_U^xhF) z_iP357VH493}4#y(m-E<8LwzPJ(PPUbcGWxMjVbyR`^l!Xv{4HvvtpI^1-`7&dISc zUWetnq$E$WXPFuV2hfxjpv$LJQ#{}B9+$6cy9mZsFgo|UDk1cjgTHw^0X~jjW8^+L zKaD1n-dyB5bmX1I_?~@WbfOX>uef!#ThDZS-W=i;xn_8(g)|6AaCHiJff$B~U}X}6 ziEDf_X5vi_Ht&v@(%%{87HrZ1w_#yf1JOYwF+aD0T8xKIa(rq-e!u-Y zot82$u4R*(>wVVCW?3aV3^*J#T2yDNgE~t5EUk}HvFw~sH6P1+jyKfp!#<3yt(`X2 zy=tJ=L3?UqVO~Ep=Op%P_s|ur?K1efGL35i_8tl}Cm7aWC2rXq5_m0j{YuSMYY=tZ zH|f?yyi8G+xi0+U9SY5v~OXC2;@d#_Y#s(=pSxB9WULjc!KF;**m?^@w zLKL%VGJVE=bJmGM?R4n=hk+c)D_(+#Vo#29I_68L0_m#)eCx#;xs>%K&JWis4{EW7 zT>0@>PNII6^jh`xc~wLw<0lC{3C}saTh!dqBpBFXB4E0#dHxsHPO}055gCw}@CTJM z$$Y@jqJD28{~KR{7rjtKFab3zE=q-xtPFilqN}UxyuXB&n8czVihMMmMFYd0cQkIq zyT*Q{$jw0{>3b*=Lwhr>97t&cy>wB{r^d&guFZu8~6kJpP%^d|W#g*o;J8$10ay`B}yWz#5`d`SHRFINht3# z<-~nS@5wtOF8B|kgC>)9sA%O~6b=AQbFtMI=J6?~clKqq8kj2k3ZIZ0{k#Cmzv#Fq zMhC3^PBBiN&l4OswY02kv>FcB0Cl0p&FjG7EdyH3C4)X`nty{SGh%s$p5rMK?GraH z=qWMKep883PQ^s@k5yYUcZ!kYmPC&fjP*%oJgU>1Np5ztZDE;f#Bkl+UrxtBMb_rmA=?x9kSx4m3l?7 zfRAXZ1?u;5a7E+5dQdy3#3ITUTdT#TfIaJCZM-8CQvShg>_jm$!g_W6=+LOLgeiWCfIllQe%= zezgK2DA!vQjDnXLbYN1v$PPXfTX8BfQ1eRWu=^EWV(E7!FCFsy%^}NL{nE5k7n)l~ zqK(aAm^|t30u=k>OmgIFO*_4Q6K+_l*oRW*cV8^;{+vNzvJe6CQ@-eU*prOn^~>JL zgGIkBeaQ<8Uz`{I22BE?di%9EMo8nN9%%5nfxq(hl7Uw0JO8QTG^uytLwQOW&!1ME0`)1_`coUbDuTfR&84OF`;dId3r{zV zrk3K5X ztDz%+7dL4@?#*pY^3*yiyjfOtF_1jNN%?#N0s(lbU5xP67LcerUJuK*ya!5D)?yzd zRBnb3TNII=K7ERXg>}ot0V0(@&=3voe4Hd`S33_=3FA+dIDxd+GP2s;RAPo3)k?&pHp1(us;y8pL zkze{j6dpyTKru|l_hJ$fUC<)o9ItG**sqSXt;aT;*|`2(w`XUe)nne*+vsUjMEcjX z3?DwbZA`0a_x+S12;k`A?so@!x4QJFn@TvlNxzPWzm5q(GWq~|2hzFGip0J0j;D6M zWS1CxXlo;^wtW|@?lSVxPY(iNZa90+EKf_k(&~$oAP@adL_C=-BdLrh>+Dt(%ic(U z8V{LbcP%I7&GF{M8;2irLx*9BSMEhvi$+hQe+LbZQ)Qi7rUN3L^z(rYp5>2>eC7LyD1*lbE&|~C1Ci2NN zyf=yI{&B0 zsXtfon)H$cJO*qm%sZ&4$U0)XCAdZ~^iP9oVHK$K#{u^oPnb8+@hx)6(?5B8OMbzX z2!m>fn%&C!C=Yb)!USM$DXj~vVJEY)w49MULMt+Hbw>w5y$N#-Cn0XMV%zm2myFIs z_f-00S{jBFxSsY2wtRMlHn8u(zh<)snZcmBE(M7u{(eK}z zzp_Z|hHst36B~k|DnpQX7XIe#P)n0}m#FDmKP`)ofP^L5B94T5=+^10Pl!tHeG3sa z%3svcv6`BWt{b#JxRPgC>uaT%ZZkh&dYU5FDYzqR$TGuP8q{)+(E%d;RlUf7XJ71jOj*0f~=SC<>X>Rr*7M+ZR>^u`!IL zL_X@d=8MWTmJ93JU&xvbjzt?NPXn))?CjG5S_PRvtp>DiEzBxSd&HL`z0?T+M540H zu;l7<`5wE@Zk1x9sc$;wDm*;A-0j3FPb)epN5_l%XH70yB_W65r!#c33r~@%V3E8_ zB$6Dpxgk}aGf_Dp6KV)FnJ-VuE?J3$sNg#c-)1i?@_rZ!CiMNE??u~i`t@8l4$P0# zbFw3}91n6XbfdZ)9>z1uQ;ebG;w=FkiazPyy${(BN2kV%wQG87+p`%JQLp!%3>Ube z%tq+>Mz+cEt~Z~HGx5?D6N`7xdrKqe-d5)k$ ziprBzU!d<5u{MZnpXM!&aw4K}_Ob5y(Km$xjgpwkN-jG*df%G|MkQDL4X#`3PK?8Z zpEVNJuDcIV!*~IPP@j46o6!0``!405W~rC)_uov4 z*w@xM|9n`~Ees-x{Wc1 zr4Y!m&qxJ7Yap%HC{aa&vBn@eT z+U@Qh-t^EmIBp8m>f=hDN&k6~;D7jV5Rmul{r+x$Z86?XsH@RdAPtHIJ>V~oY=UVm zm(P? z9rRT^CD6g+gp6dT=Q<4~4W4?PpRxv^{AAwPWh>v$h|R2hJa_mOh@%^u`Xd8hcjHmm z@H@M@A^;%U_4NaaTwl7HOs*`Ew%pVAS90LdxqUe6+HCv2`5T@Q_@-;!0MJ*MQ^rIZNf@YqtINu|!{7jGAW%nz}^3};)%?Mfa zwMoP6x=Y<#)NA5=@MbZ(bi1t>Y@K?rbBz~^R_^8^c_%p{+WF$I?GQ~u$hh8L`mWga zs3~TF$uV*uI+yX$cTHSD`r56K+vK%?!f1I^+d4WTBI1Q!^LiiYVC(XhM!t+9d3SIL zy3t}!#fpsKe5Ak85az*sF29c-3B{mMQMklH%5AR@OPbH~8xIFxjMkeMyuV8PuHW<1 z(6oa=O%dg+FTOA}fCe~eusPI}l&$VGGL(OP3u>*Lw|oKevc_mz)k%QK zx7VgEJyvAQW0lQ4%B1LeugG~1a?$r8^k;RN8f?x#8h-uyRSwVz)4|LiG$s2~H%F?& zxC_)-x|vEEZP#98L8q%cJ_dOSD)e4mA9N-ViR3XKhSJgYy9yLf;UtUc*JNB zVS=&hZ+ghC45SaLc={96N<~Jr@MQE}wS*VVrljpp6vQ?h&Vb=4;CO@IHBM!f9B}?? zLMy0Pt0#%(U!`u`4WW-gM+ni1*&}}2?}Ks5_0x1?*$!2z{;jbDo&4bxBJP4(y(@{$ z?mkS70${*#VC8<~&8g0JAqw1@s4~|8Ez-Scj90&tSQ*D*lXjmAHz7g6Be91vP>MdX zaEFAVJ7+r$JHqjTF6ATJ^m_7$bn=N|U|>b+`E$Rj2e~YlSHtR1>Xkbr=ZA)Gv50A{3%-p+`G|Svqy~RWXZ|DJ07k7h2O2KmDc-wnkO%l5@6o?yxmHta=6{1^ALz=c07?B4;h(>2E}e^_J_4 zFX+cLdjSlto8+W)!~M;w{C$&H{&$HecbZO;t7M|CV<_CW+cezr@vMm)qPTv7S*phl z>#9V2HmS=9dPDUUTxtauSC?mq(~PraLJ5Z72o|&G=URWo=SKnzzZ+B}Mb$W%rIR$v z*UQf>MFeD+SP)`m9II!?Cf&^j=%3A!s>m%EoWhD-s^(;jNm#h5H|8oJ{mLT`xy=YW zaMnbmXa7+64aOM!zrD8HWyp7y#M@n|ZvB$LxW$(W-K*-tPbul|sAU0-`Sz|7XQL`i-6=GxB}}o~fyvBQkC!=|1w$Cu ze^P#2Mn+niUA;y^LP9g|x#rEsDI$VFqeyTufz<1?susH~GoSshAYRRAedYUtJKnh1 zc$dgQl|z9bp>R*Ttg}-}v(A>r#8+GSps^KXWZ|GV(il>% zm5E79U!dqK1uAJ4c6N5W=n$8GvIAWj(bTyDR)P-_f6|&%ccjCKAeC&{vS}C9=r4ok zvvkz?whrL&m3<@9K|W?vzByIWR zuU#mKyk?dV!H5A-kC~5(4f0MI9lfY%6%!LP2Hb~3)IbnDc*ok!$9opr6C`P)k`Ht8 z?Nz=)Wkw6sGaq7J-R(h-=G9KAnq?6u3l5LUOerM4-eve!JAVL%V_ho~^x#kY^|8f= zyPnx5x9*?uoGhW#kNH%Ucig4SxYYeEBA_qG*)~2FrtEz&B%Qs`+lfAY3UKM*5lhgN z%)(ptxY>Vt3H^odpE-aWud>W5=oZCEdzZoe?=SSd=aw;4=Qa9PVhItTnt`^=OsiC% zjeo{b2)Q1X>cpZaGbpAz7Q^K(SBoxLazxb@dGIzNy|z8*N?+5w+78vPB#RVyDJaC6 zVNkcaZ2_C24W-+E(*}AG5W$vB6#8y5#Axr?-s9dUJ5hHq@W&rIZ4F~a3@!_!p`%r( z%@Ht>PG2@zB*VgSmsd2Cb+crmml}4OBf*70FaJG;+Qt?U=-z#o-|;4_Fk^-i9&;>8 zm@ALi`miAr3`13W=wNvdLKfMCQb-z@x^Z!K_M?N$iy5e1L;6X)&OeEUPxj;k6sBGN zUUl}c;>dc7Iy82Y-!TuuqQVaw)abb)At0ce^SmhHG9Ue^b4TVs4kZX2N@yMKU8L*P z^$~RT^~Jhv*L57n+Wsm-Gjo6`=Gn>_^rcu3g*+DF)#hP4Rm8N`HZO?%{{5L|x#8!` z%*^q81)7@!sV#4Tr;~X($@R8A1&uiP>vRhD^Qb-QB?2K{ZLenZUM_{5h={OQ%L}nc zxH7#9lrC9H5p>}Y^9;0_Rc%a76$N@hQ~*+{=G~-MlPlRSu`H=C8MaMS;XG3}B3-Qs z&sWW(oO3@223jA}&n@$>GdH+>Mb+H$f$ldW9)?~gHnC`{czob)iqe{y-#d0iGndAu zgLx1n{|)O^M)|=N=r-(KoyCRm7u;kKbMpmoB}4?!)sne7Ke8Oj!M<^?QLMY*U^{`{ z(=!-kO5SYt@w0Y8Es2LZ#BXxyHO9^$MJr3bWAdHbSECwx8=rwHGI0tgD3`ZKFz+_o zBX|go-@Y$M4#7mQ^?N(Dbmi?(d%vzOFAh)on5tF$2+{bkAYX6~_RMY;rJo+xd+QrISB6f1!g*S+s zT))&DLK5jCygb4f>|%dUK5_!2n4gLKDuAS7*l>^0HP+iR&KRgh?zag@+K%=dIsV59d0R9p%g1cO_!nxtO%~ zF*dinQ>D#D3mE{DA{L}Z=W$p=>{O|@uR?{-b5&08urplim5i$PqEqfCC;s9q`dzKH zY#*H@1(MT9e!JgNfIWy)q=@&|3y8Mhz>DKnHT}CcexM9KTb>0_KilKNrlM6gSs})> z7_PT-;&(2GzW2>tK#)Z21jQXaM6@w2)#lnKrjJkF2hR>3g63g#YHBJ5+aBGG+0{yu z3kkilf?PQQd7V%dq4ti`?$ic@tQ}aplws|O*C>C|tHSpenj8-D)rmqTA}u9K+SL{e zdLBp7t3}_X@igW)N+2=99_QdLoe(=z0Z)$)T>|qvR}iy>{yw526_?nq-m82f$x^^) z{=r8nUiJLxpI`$2W7igVH(gHzBH`};9UA0Ot zmNx4!o;~Y&c$(Z(?$qPX-|SRjhn4RrCG`nDT?gHdh49Vb-@AE5y*+$3H!a|q0=R6f z&yxx=7IgO{GWk`yk+;?_y@vV|q3^L!^Je!;qv|pV2q)7in<(Z(E~#194HmR}_nLOG zz0~!6lAbup0A2HEb9#J9>8cn#YF6opP~GE_*uBJEPwwLs)!Cf#NNJJBg-!;<0I7>) zQ6l~Kz#`!XHaMcs{P)1TbzL$vlKZ}Jchgj$qN0bd2MmP?9Cm@3r?omkMh z)E#=ZQXH?6YL=bc6|7pkJTZ0t!{RD)*N^N#u1V z3GPC^O19@F#JGMmVe(y&>veU8Q_=A9ju};;1?Dl25Lfe5ABC2v%~4e!_jL~R8dhe0$AN(Zg@yz1Qv*KQ4A_e z;gXNSIZb+Xn!S@WsMKgk4;9qw{7jIpg6E-D+>bm2Iw_)G#es&e5eWi50l`YEx+(g< z2dDx+z^-V0lIsIJ4Qm0nF1>L_Xm+gK8%fNHcOROmRa5eB^|witcO z#3z-cIY%uE9p;d=di#hrI6|D7Pl^S`wrDR5dFDAA&1TM<6sZPtlIO7$VQZr)-r^!B z7fp6uXK-1L)_7)3BmXp&%LioHL@Qk-I(6lh>7#S^mDJ2&$!h(23o|&UPKhcWq)KNk zJNznFYYUlVgrkygKtVp%#xg{5V`89Yrb7b-PiDc_mp26ug-=8THr3m!X}i8UN4#Dy z!aKgtrF4P~KNo+Y9#AurQGF_h7AhAO_GuMHZ@OL5=6feyV`IhmQ4D9t{fGP;{kr^a z>$!Ev7BTBH4K+bzBD!FJ!m?VF`zB7<)WC~VBhY(|NKV!@eznm8!930vb zZ1c}P)F{T(&>m+{QY=lFq(m1M7Akj7v8LXJsi_qg+Gfq!1))Rz!FU?AWO#liFBM-= zIp*&aRn3%rH9r-_qCnj?5ikdkr(X00%7pf>QTPK4=VG_b$n55=eh=@*2Nzu))rD=# zB8Yv(#($;x{u`CV4q_@9P%ylU4e=H=K=F=Ppd6^!Aak<= zW~HG)0(8o%q34-tz+dN;gSND?(me+~=7mEbi6T2+4mNszvOgB3F0)Pj-hnKoQwez{ zi7X{mnxe(9LwDXpiD$@6Z!6WTm6wvFLaAvYL(N$r6=H-R==x~Ll0iUV^5s{NH{VeQ zn9ik9$MN`F4|B^R+rg~E7SHPv-PHW-n#(Z+m}2zNZG>|@qokt>o;Jpfxje+^BQHdI z;evAAx}A1aa6gm*o)-`goE~R-^>0!al>^U&hI6d1kCQR11>6C-1o(fk0F~27)zovz z2Y0@4fGb1b?Rf1m-T2Q=H-`1l2WZ?7GKDx`DVK;AuQEJ-toX=%SB$Dt4`i+Zyy_lQXM|s`Op;xmFDxu{8QrxH_>;EXLNuZS=@#t-Ou8T*Rv(R6uA zN(#_86n`H2h~?fxuHE@?4lZkg{wID-Ko+(f1Sjj1fTBHre}7_0|J97aL-Ftb=21QY zbfT${g;#dQL4Q78v<@RG_v#JgbO8fAPmsk_bmpcG6k`<+3eeyPP#?d7 z(<4%*e;|^zx-~siXa5NlR!V`jKXZPpI$`SAM*qwLv4H}^PHrWa{Z#sw^eb^w= z)ExUTaBb>}ukReHLqUNO0j5fR0Bo{pl7gOI3NG;BzUI+E>hwwX*WYx(Cx9g^J|5if zA{!1G0vF#yo`YUO1R#j$0f|^T*A+@^#AM2_9Wi!IRA_P$F@FyQoZubHjVo zbN#&CkPbb3XN6?vG{7Z2dVrg;m>iO{DKV!}X*DxhTe3P;Q$kHkD=Mb!eK>0S=> zy%msAEqC0~f+v;QV&Jt48sT4ay6|`*6G#CNyU^&oIU38Lz%_K4K z@Ek9AwDs`LG4swkQ=Q;#H{Lq@r)hBd36!2>bz!pCg|$xrz0LlTw81iH(N6_cSRH-1 zd`{bmtNIXVVFl2W zngF;txP(RFkW~^KdzH{fAlSNCArCHg1xo@J(tPUT?@wr1p*#?Hhl7(7W}{bTKE@h5 zqcL3k>H@OO1h9cV4_^YsvAt(X#Qy~Ne=3uI zQ{BH`)xe=VCf=lfjNrdK@_ZsNx0TD>;Q#Mu{2xE*tqr_TLv%v#zxC_?{*{e9@CbIE z!~a60e;@DvU1v)X*tVH2Ig|hK-~BI7#0Nsx9KVR!KhWQQ`Js7rux(b$*_{9H|M~y! zUm^}Tk3_Pne+>J-{E$rn*tQzmp}%h7zaDfii2F{xp_BjRLI1I)TD5r?f{B6>YFb03 R$}R9qT0;I=@l*Zx{|6xe_l5uf diff --git a/docs/src/examples/itensor_factorization_figures/SVD_Ex1.png b/docs/src/examples/itensor_factorization_figures/SVD_Ex1.png deleted file mode 100644 index 6310500fefe73e2a6b12cdce2a06b0a5ae5d3cec..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 51458 zcmeFZby!tf*e?nQN=t))bV-ABE7Fb94N}sb5-Qy-jkJV-NOyO4cO%`k?pz3N-RC*y zJNM7~+|9#UYdY5)W4!Tua}4y8l@>vQ$AyQ0fIt!xeI*Y8@#qEu0#X$AA#g`UOc5CZ z0)EI?P*7G(P>@L0+QPutR38FD)GsRLk!-{SmPa2b%jXjWWIDPtdOEs*_&Ov;`p4`~ zWCB#g-l^J3SW1=(D5wqJKjf0Ye}f4vA;QX|V%x@hD<_v6P&qE^#J>kyb-7x@zS})r zp(WkRyVA@1eC<(+%OP2~bpxU)ko<9GiY}|-!=92DS{-y3!ePO+ZntsJF;F>wIOnW|L#LfT_iFDm z=tWGN$B=_i+M$fWV{K@}2erdR8;G>)Wr)7#MDLD}Ztdyvjoupiq+S7P{EsOTTRO|XhD=u4-nAm3Dm8< z5Jcvv&I}~r(^H3ksAE0aVkQotS)G#9pO~7Qf($&_K6~zn%`_U|d(k1L!8R(meX5)s zDYY3+1EJ~<$+9D}8Cdp!(H|m?`^;(Z%4)`l8xk_R9zvlXnwJsErm*$E`k}+#Lw8tj zM&A#PpAlR|KhlOaLMbECq~M6_JTU$6ZXD9C5F5Fk*vNCq61TTWzZ|)20kS=XP_vP! z+D8Zag(&P(9g;X-KHU$>#3!(L`>(FTWXYiXM8ZSgyb4?<4hT~aeI_6LhLk(ZdD(Co z#~Qc7*Ov4&RP!x_Q-G_m6%~?#ke%FXTii}zP9s=cuih802hNR%RX=?b%Hg*|^(2)w zCX9aR;zCkRh5Oy%H!+h~CDcd%=9(tcg%JPm0!7(cwfJ-xgb8~-b~I~mtK%Wl~#G4VbvM* zjF9CAYc=a9Tyk6~+%i@^lkY6otj}4xSkzbujTcI2OAL(Z`X|5HNpD4!=9E^kX;rBL^lL}Crz837Hy9juw>l?6w`$JNJ!$J9sCn@H==k9m)UE-){$ zjoACrj(D7DkXe+OE2Sox9B=&Dst3LYx0@htsHdiTEq*L+?mLlc zAssVqsfv2pE4oD5AzBmK0+la{y^18cW{NYf;a+QG`Dcwv|18Q>vB^8k$IEju8#a4s zra1XyygJE%hfYUz;zzIXoN1!zb2EntzQWf<2&HPqV+Nh2399 z7J|~E()z8cmNY0chY+(yq@PM_C2AyO^ak}lpl_uYqgT^#9?#jfJW)M?QTmV@vc){M z^euThVa9p}x#D`q{AbcaZQu2|ZSh9qMpfpi6OQv9kG*p{k4lnQ62Eh2HDa}+W{IXu zbx<|TY*kIN6)nd`!gazJUn!q_avC3@>x^s6S?T4%71Xu(Rpph!RoZp#HQ^(ihv^Sl zpb;N(!O+8-J|=8ab$*E9n$bp|K;W5!9 z4;MovsW)l_&TGO$;!%cD9z`s6I&-n<6~}x>Iad>?XT%gb4vqb$pow^cQ%Wv!EXvtf zJvPIIgaDh8RfAq$rVyqO_FYRf;%98eT7%e{I0ez{k=;_WaXjs(D;BGD7>)?k&<|cd z)~VQ2@fQ(y>O6cCG92{vfw87>lySkU+SGRg@kc2~Pd5me8<|hu2EJ`IVABsP4*wqP zr_lUlv3SCH0-?ByTrphN&$M>xO6u@Y2Jzqwr)Y0N!l=4}DMw|vVbZCFrbVrxo77Dk z3=;w}!y8R1O~;J-@7hug#f=_~$yf{6``C80Y|7QTR=a4u;fWGq866Vb(NE}TR7dN0 zEYD{#hcTbfeEar8L#6g0M4>2m(%&{98vFP?_L+uYk#5nCa>J6#&8Krqb=dFcN?LV# zqFLv}p~+*!%_L|f>he|!YK}h6JXYWj%#%k zBqAAX%G=4KIvB`;&GNW3y`)`rNv$dbhH%Ts#)A7IpohUF7!SXI^NYRiwb*7yY6NK( zybMx_#H3cDH1Et9Q^8V0Z29xD-d?ULJNo5muS3YSK3i)p8I8Si%d6C>7QLA97*%?1 zE%Vw%+m&^NtXz?*gsQlL3U!m39p{>S7w6LsJWY1#xuYtl8?4jOm8s_z4s(9BHf5AW zBKAs_r{^`@E*3j$bA)b%SHufC9i$l|Q6Y8&V>j>iO(MEg>{G`IPCFyoDIZ^)y=QRm zI{zu*KJfZtsMGMGZ!>v{pPvtf(ek=w8~d7poWVy66C`*}sHPQP$9GV5x~ueX>EY#^ z=v-j6#-;S(OcFmnXdMTh(D{mQ>w3>8JS2|r)J@lY^9uj+d}8kdLppt*soSu4cxwvK$pufH1)()KK-#?q?rRasQUm2SLG6UA&V`gMwx9tJS$e~99joNw;}g64zgVfd{yK6vQHSZqL# z_{Ohf-T%=q#ALC-6cWf~@7LzK50P&)qTKK&Jlm{fwL^RH^ZKZbtz+S4nDQ-?4X zL%CJ?E3F42wqiM{h)AN-Hh=|Tp8`~xWM{wLx9(s=);1)={V?O!nXPilYIM#_I!`$HOFTmJu@ zh2~>AF0{l99b780Bw4JE2OiaWU9x_wx_2zT^eJ_In#AX*f62-723Gm%v~A`q*qO-+ z%s+DAi{x`WQo^IGIgOc(+PPdS_$vX01&!o>1|+XcxHMuI1V*q z{-QL+93XV&ID+?Ii$wv86})XB2M?%4zzIC9fkwNNB{Rah1@lLEk=q#Zn?Y3)?ftp% zm(Gabd5z$40@9qvAg{?r;RC z8Z4c^wXsYP$X|3YVS7k-ny%q6L)EJigMs z*uv|Rp0eN{EnkDW1564wU^AN8{$hWFauXZKaO%91Ztt=Y7nXe7R|DCrTr_uR$t*3I z;8EhAhJ73ga=-CnVJnqdO8RYrnsDnD&g3-oe_KViTK!_(&P7KOmQ z*~*L0g9C)84G%T+B+&i*1KyBRox9E0y4Kaz{>CDC@}_g2(7i_C0FA=w8~&YjdR_n; z-O7y(yq(4$N{tz3_iF4qnR`oZ<9n|8F7EXQ29WB-OfmoDaZ|oeb=gjA1498E-mi3Nk{HkB*OOrglsc7@3(9ZniszhO%U?1pZK_ z8|=Ep#g`q)zp;j*RlmsZ7W7sWEd3#j@oDWVcTH#Vr>8{$gNY9O(;PajfdfZBP26bS zAARx)a-qMM(*=1--9}8c4Ol?`z)ipufbRHl*i$fMOug#ge%yDs-sT>Xqg4OwcD74{ zOW7S)Vph4rZJ0s&m_PduaPR}%UJpC4{f#wW033v6Y|Nx!Zu2q5hqbS&#u8)QcCU8R z2|;V_4!jG`v)*%P*mtCUJ5>kE1U&xewSfDO%zMLsL8Bb7cPkUp=U@qpi-Y41JGT95 z_mhop?i>tHuac6drX^n+Obca;}8LshdjVfXW?0`sX(JN;`uu*2cnB$EKo_t_h)wW1=kL8&o!;-@Z04aTmn zW?b98HbdW6LhN~0!UOmcdwG^V{~_>5jQ+XEVSB89F)9KunxxwCOIWhtB*9^u@%eR6 zPr%USwjMsry4B>I?80(QlU!3h?KsW2Fa`}-vu zg`~YwUD1F~`o!RNb7s3AE@M}-Mskmu7;tph^q2a_T*0$#Vge4O%$^wxe9s4n%mBucJ3Dwwu6*&bAR}{|J&H$NVz>&E@rNdy!TuyFul!%gGzXAbxkc%6i_BNrCXbd9`{$|;N{=dvYe$YFIbFJ? zyLM`+esUVxk6qH=Cui0`a%LUi|1Z}$2{Z)eg1MaG!IQ0%gz5pDx^#Yx$uhA}o?Q79 ziOhMnHk9b@uDN-|4{;`RC#L_qI8*U(2?(V_IwA@2(RA)Vw!TujEf(5dj~LqiqX42T zAT7yZ|nKe$Ai+hYim%xMkcxhsJNN)^>63>x(}&W3lJ#J7sRpN)V(a4U?T- zj=B89B>-M)F@M3o^*ITEckY|BGcK4*#{eL$`IibTI9Dj^o)QpGXem3P>+ffF$8j&NM;v zzb5$i+>;c5N=p5m3I2zLai{>ads3JAfD4Ylh4j~yo~1xaQI?;Y^e?~lzcBSsynfL= zrSqrGy*K&aFrH!tV9+c#d+lGT%fBE0PlW$lt^9vXgyVVj{r&w0T0>3^-bi?vM_ddf zcbI#H*1z>C$6Nhx@C!Tvpvl}Tee0wIpu876{fY02XQC<(O-k;s|EjKtssST9LlX-H z1KAn@$9oM6^De9LB}*CedxKQ*H(WiL0SxLVB)$Yg-xC_K=>eu7^)CPl*hAJx+si}# zjbZ^ufTTV0a3Ziyma!NS`{<|%e(48AOrQY$E3Wmw;H(eWYZnK%7nmd{I2`Xs)ZTCI ze8V=BbK-V7)W1B&9zfE91lbF)-L}(d<0SpA)ri6YrsY|eZ>jyqOge$qN{p2~JeUk< zG)Bb7ZxFxUu{i;A#5ExXbDt#K-CY36m8KZc58x?6)#@2}%Ca5rm?i_5PJh>aOYJ|( zh(s+sV_4e80Fwkurj0|8jU#`@=68g3yYCV3x2gO;BsC%^U->YEL4$=yROX@M_`7`d z6a|YrQfmn7Q27zEhXlyPX8h46Hs41)*^ue z3lAueoA#zt7w=k5VbDihVL8OuU~2zyVY1*XITpve+Kbig!nH){FgHQ zeCb`Ry1fF@uXt_D1j%^%q=PKrlQzp9}LpE<6BPz&vQC$v$5YsICo|vwAPnc&}+C?GNu`S=6roTHWc{eIYn*hjQtdg%gMH1 zpUPTs=~|M@MTIWP97mFqyl1cB>PA*_(wWQl*Q}(tGf6u)Z}*$?wV|`Kv(~=Ry}do) zy*u_frJLfe>o+d`;@z6lS#)=wpQTgg5lJf)cg{KGC%C39C|m;)#d-)h+lLQMXI(5R zK{uB}f{%Gw_K(|1LnILwtr-J@gQXGCkvZl)hR?b7>K_b!fI%3XuCTHBg#8hmVFFny zX^P46d;g!v=61Z%Koy@s|{4}$McDs}L`JdYM zzxX=z=v+u!7FyGB!Rs5pQl3gN1;UFpLUf0lqMj-DZ=cyMG^5Wxp!iZ3>bCg%^^HDw z%u~2;c+y7TLYuEq#U{~NY|tZ~*f$WLoZmaAg@8>jS+!GX0v>XgNv9%Q5dvp;K%l0i zG)4#r0SNE`p6$d(t{5$st;t8yP{hL+&QY^21%oe~8A$vG&j((76<5H>5S8<{eI+Z9 zewuoOOBDGjT->?O{q*XiI(^3uxzfo_(td=DxnWYQ(;7GS#Eir0(_9u}Pd{i;5Ad&K zXnj`fm9f3#XMLV8X9ym@d&Zo zfxy_vS43=VSd`KU^~vrxey_8n7XUDdN=mYn`$hoA%XFiIJMk~M$vDC+%+ms%0M_DC=1NAuNk+?#?4 zc>QhGdSAGDvkOzpr9y8GfBk6C6ZiGzDb1VgP^NC}WHJG$yWbQCC`H?=twbhnB1kmg zx1ZQESE)dE?3>0Jz?#+1N!GOL&G?3RwyV)L4O*`=vTD zeGXGAlr3LB_{`=v z-_$v6*ZUzgs_cc1H%Fq3NX$rX=>}8pX^NV%#IH84{7X;uq0j_8jvh#OY}$vbJt+L~ z^42ysf|n;hnJni8Ms}b~OiW%gCzpobGlAnF@ah$Gsp-TAKr<>Or1E~q?cI#y z1joB`S}OupR#qL7ovvt%T%IB!1U2u!mAiv#2YwE;LMSOI{V*AmK2|3dezIia#defE zLJ7G)N6W<3b8);?WY|aG3(Aa@KR7-X>PzBfGU_LaeD=CEQZg(54^_PI2zEyz>;9z( zStv-Wtol;3sls<7)V@_Sk{d9N23M}Gu5TDN`7YK;8GmRSM1GdJ(_^%tfNi8J==zje zM9kH-y5q6Btt6n0r-bfTMVm$t5rim@w6UO@*ZiMy+h-@y1QYU`WA$NrAz-(9Kt5p9 zZH9Gpafxr?u|mldnIFkj%Bjn>+ZtV1X!6Nq_duk@3faTsvZcB>+Gv8sd$Om4^;_)+ zaQ*uQSDdL{U^cYnpqmd_-Om;tjY#aR^(DuzjQ`x;?$1|a40+{${mt+b zbO82mgqSWawhD;7vDwGo)VaM6#AOw?!}q|!^zeg5lp z-S7d%@nk#p9GPXN9WXktudlOm>}9MW1-Lk>hY=vHun$s&0*JV-^mKKP27S=*%m&l_ z07%QF35&CYu*E`pO2p7F1TfUrThv_}JWLB?hko?v5#keO=m-j_4>%1Wzn27jgfxJJ zj6Rf~q+sHoQ=$+M1XtLsq*u&(yR`v9b1MgIup;3XT_ zXW&yB&QYLwk6Iu`qz3&>tL~e%D-b7kCvq&1qX18&AOhhVaB{fPWr!@ei1W)40`J|C z5Kln{Tt)b|prA_ha2#EAn0ue)3`c;4m1%2(q?>$(R_(Amn}Z!)TU+~ZBO>w6XF$LK z=UCRsVi|Ry%x5Yy>ZB#}^!GofwFF>4pA7Tsi4X6=Or~j0Yvco6+4wG8$KH?=(SMKS zcjxoRc=nQvER+t4L9@F4Xk)0ze1^+6VEi)#J3G5%63-L8)x6nN!U8xg zfA;0YElzKsgP(GHhwxp%0(7yqYvL8z@!NS7U1IYrE;7;&Sx7o+Wb2B-rWI7&R)A=2W8|9{kS$O->#mAwMv^VL*e8flv zY4RwjQU0NO7(DK=f%P$BupyEo#9n#vn?qTTqiIzd6C+ggbiYD-Mo`M+^e)uCzbySE z&)&lJ^0{W?oIK=$1$A;FCQqKHI-8;;rW=C$JCyIAe6c+C486$~D5cLL{3S7OW}%af zvqR6-&+uHIVKl?*dyDFRK6v&zGuz%IfIB)?Q~()rprg+9+&XHApNmUXItt&!H2>$q z&gVxJ_&f`n^N^Hk*c_7;^|P(0Y>K6YMWruPRdMA9!^6VkK75MgXr^o-qJM@P2B(vg zoc+l%J@biy=ZEqX;puiDf=uCuBqIHx7<#Y+k%%)5xHU^638+Qng#D#9z3Yo(l}&4T zqpkT*(_RupWe#HH*n1!2V;-yXB3x}v_lY&VoGEW-d{yJ@FSLWf606xnXbcgL|xSbS+UgXz&0MsT5=>*>rld+rx&kLN|;`w@pEhDefPs!oPic}PiC-1!o)dq@_n=nXdp&W^5 z>zzJ-_Ey_BE=NtyQN;*tGI*Y^nJM{X#6}1>>hT;*@hd2R2SvI}RQD~KiitJhFX}C_ zj7q4E6^^6@HpI(l2{)WlUyZ-QhPZfzeio?qck*5f3qip+hK1`W;xywLx*&AjgRH%| zFu|ZzW_*o=@Y`BNfuOj4#!hB-A_R6BSd!YHo9mpIN(Qak;b$;3I%RH}b>H`>HBrw$ z5uRSTHLhMK)SX3}q~o9xoh{f^J6j?ZTosTIua->be79Yc*elX&)7_b_KmoS2!|vlK zRYZM_%V~ds7QcGA<%_k4(KI?C7bA&sb#dr9HEUGci|o@i(t(Bg=ZhsA36=Oh5!ibB z^J3w>ZDXlI2qTUCFTLP5s?FGp+@O&Oczj=ce7JCaxW@G~p*+6MKLYa|ml^`UA{9Bu zWxn0~atxTXd0kU#de{YsUW7V9yJ{;H*Kati?;@$M_yr@o5;405{1C=gyJORwcQE+3 zm^mjZQ$=^Islr9h_Nj8wDeKfPo2ej)i(bIO)@58c;k$`ovI>~L=d9#$I+(xQtrI!j znOSN7O8h&^fUH4FS<)(SoZz^Pf&!9~h^;|*S(7@cG`PA(H&^7jsj}yjRI)kL+xUrg zBeublV(6t!Uc5CvDQasdj)!@hpFQwd&!ku2F{!neT7$Bcix#;nu0-!$cnUPsB7!?p zj+|-UjyH+$qsPVk^pZev?oWT9`XCBo?UmvNuNJ$!V=tk}<(uLKK7 zI29ZMCN55X27trI&d!cXY(4XDkB8Pz1IcxB>>m>Z&JI2F(aT*<=_~Dj`p2_dY(jr> z@g&*Hoy3^M+-#biP@pv1%tf_A^}vCd-{%+EzdC_Qhg(BgW%1*-X~s_c_~>!3@$0<% z9EU9B0hFAaT#kE;!}j=>kE5T8n-0fQ)J)iI%~rfgW;^*qE>Au}Z}Z@I&G@A5af!39 zLAo9dh~yM@@Pw#SYCS79Zl0SX)Tp#=INlnwW6nf-*?x1fGivnH0Lj6NP4E~d$;dW0_x*b^p+UdT&pY!v`ZeI8q*5gS3gaoik6h)nOZ2*b^DSB#tUhnP6^js zPv7>$u@=3*v_HGJc&GQU#6VmdNK0`zEQvJhdMCnE7qt`xbsXFuKgN55*?1nWix%== z{G1D>N(;d-cFk3T=^-Lc3cTFT^731M4kH_6STb^QoyBH9WYzPsoR2~1)R?9o zIbQLNGXu}tem+U_OlcP>$9KeW_#_JP)tK{g2@RB_n5X>Z0u;^E+Q|32Jfe*U(urtZ z5I!&1y4DjsaH6eS1E1s^LfQPO?kfaBlrq9D+vh*rwk+6f zV;G+Z-nwl8B4T35j}MU=qv_PFStehun54NL&RTZ7CKCBcRbnkJPePEy?@>i*Ps~Yt zX$X0Fey#)fc`B{iYIz^=s=I0Hw_$0uYVm<=>4muXNsZZx#H8M!rOfbGT>@lKAjNrq zhmmUu1tMhMcS1;kK=U7N*Um@l1Egv6pAB1~(a_PEOvf3oj)r77arO0dd_ZkZW(y`1 z$e<%ev`;V)++HvcTaEIe5kqX?nxz*6SVHFRhcN@;c_2SGnkqF5P8}InnM%u)p;^cx z-!|)RT1a+_BVB5;WY&IcUd=;^k&(d={eGtQz01fYDmFIugdG*xZzt@T08ZyAkCbC> z^M-jkNTwr$D*K<7K&@NmH0icSr4EHTh4_jfS$OeyBGb3^CH)R81Xdq$bvR(H`*cPT=()JqAWXs-u z`-Iy}D=S?mfBtHYAk2UJIs?+&+0tLJ(~H*LJ2 zj{6HmDa*MWjTHLcv@oP4%@;gU+dn$4F@5|ZFvn`X9@00=^bddt%mDxyW7jFY^_Exy zL}X;p`Kl$2KtCoie_)b}BTfBKlS;J)rWpkk9Zm9qV6k&n!PoJIQB4rL4+UuSP)`0Z zj>x~rdv$Lzp{$<6!*5*A7JSZt9#%1yfjsra*`;l}mz5yT!m6&*DHpL$$LGuURBqg8 ziEzXl+r~qgIw#u`gDY(_P@c zeg-kal%anw6P##hfSf9`@F|yw?Wl%L;Bk^Kk>v-vZ)y*^w$x$s#g9Lhx%iomupf($ zdG&)t2uwv>ttCsn<$;t5pU)KssNs4Q-z)yp27s&pL=hw5&vol=JrUt}+;8xE;@R8s z)ykM@V>-LFzQgC(Ucm;NS7f9cXe3c^U|jWWH|3Mo5z&#WY!yvTP3Zv*gZ0Uh+)+Cf z&s%^2ZkEUfs-vT0eXM}c?R+JsTSz7H470Q;RqR|gBf-gAuvd2-SE!AlS@$4S$q_TI zMh{Imgdj1^7XkMnlc3yfwFz_^=Yej(#P{6H`Qzm1IjnQj9{xCCMGtwebuu-tL=erV zy5e-xGmx2?S&`L}FhNlm=38_cZrgPfE{C1Kapj*OvBe))H_u9hH0Dlq^vq0jNyqXl zip1nNY~9g!9%}U}HVFk_wP?9t!|b!8-xukC8w~)Q>IF%cJ=o3GL&5qIL*cQR`T~s= zBVJWJZkOt@@BIFwreEzeY`*PXwCJhNR^W#fBpLBa?vu{|rBZ*9o=E=Icc}XrJt%bchTZ&N2W0dulWu>6?=Wci=lve^(s?)a!?zeBdkUvLa!~jWw9k-#a1W? z4iy#&NeD=Jfc6+tem+*}#Zm-lZp8vl#HbzSNOt_1|4;~ihw|O-by-nS1JKK=$X2E| zh1m+m=lpOw=S~Qm?o(BZoq9$%8TI7D%L;#Ejj;rpX-6k4pIwU=FU1o^n~n|-KjOS^ zXloPMs?7V{;}U?f2FgDMA2ilqy$4)q5)zXBbTM=w1rkb=q;481MtOIFAEJI+m^M+qXSaaJ1dQ!SuBImJ^!sGg8l zsQr@fhw(CXnSaX$+wS|Ii8(JKu}cAw&rEs`!8L4vp24>VSb`w|Yn=gWAtv#z+aBjI zW^#=U2f_WJP1H=RND?1j7g91S1glGt3S&$a8$J#}f1ReQif!1JB-9m6hx#=Cnu$ig zIwwtP1G2D8=SN&1@F$-1I8_#IAxca#Nv3*LN{=dfCYJ^+80~&sOz!otqRIsHL`$zR}(TG&g(YeJ4na!=kaMX9iQfCEWXG%ZD+JH{ve4 zpm1^tMuTpgVtu`556~Tm1A@h@9_5vO{Sts_0RQD$!<3f$M;*TTrI#q4R*m?J57F_U z#DoMQZ@&Vqy4nQuY&jIj{dPnquH?;_;U6uy8gs(I3m z&uA3$vaN5bs;W#hVle*+m_#&SeuaGW?^WJBV=+@{*Yr)JilsB1S)6tI!*?kc4`PlC z(Q74wi)H#=i*tCtA~}$L=h^bgiWktd{G!a{tsm(bfOeO*$&&8PILzrRw}-uRvZmiu zvS~bsg_&P%oN2Rm^ot8Ee2W-=0yVZv(Vm|==yw{ih$;f)ngclmEuF+fd`d@~BNCYM znrgWhE2XV%No=m)`k|(S&fZAEiVHjXV?{xK&B~%|hM6Omh#vk%G9pa6cXe(9G-MED zlHZl#X-v<|eVvBI=y~01R2r|_ww6rLT;#+3gRREtaP@2{grA6DqV2cq7J#ZG5ooLX zCi?v;nD#;xD0E8c=0-1s65RZ#5~Lhk9OOCbppqPs{W@sfM$P%|)^lID_unwXne{v8 zflY<>MD8-hW2*b0C_%9Y6hdi&KFFeC1HIg%hu7+#S*aOJD=w9{3S<)IL-Dymm^VC_ zX$l%v><2(eC|hhGmA@&MhX@7qw_B>64u3GW3d_oL8-5JlS)`;egM%i|&@t6Ug9`tV z`;6;aXsxas)7Myb=f@4nmC-_iKD21Avgj)5ofyWunP)q|EZHnT9lF|~v9)cy=ls5`ss-%gC zDbNhv;y#WsKn)|1eN+8mM)dlbH#ICa08{Q zLy3x<9O*UIBfYg3KU48BsbnGCLD%f?ShSxbc2vgdq1h~E1Nq)x7>(p6q71m3L#naZ zuDs7zoFDYlLU1=pgNm>c=_&^W;jt?|r8LlM62jgHrQVbe>@Bldlg>Bo{258eOB3X^ zer4Fn=Shj`P;}U8O_phd-C(dWn1RFVY^{(@jtv-$8XxCQwcqvIqrT9rMAJ0z5ztjf>@`pD*)u#X$)VrLjC*bvtZ4U-mPv`Y zE?W|pQV+G_S5g+G|KshON@Ah$!D%!sfx4`y-ie8R`a`%l@HV3i&2j+lGsq1K>S`pS0 zX!7Y@o$aftknZg5%30B>Z`&9vge(TaK@0IUmTq;0hT_K;5q(r=0{$l`i_d2kPyUEz z%ixY9Cwcn?*tE4F!OmP3*x2K7+!Hu4$~XHdS2l;SOxJh_sV^8oEa@N8vVl~Lic02A z-~8MXDCPs~e@=F+d)`}hp(GTq%gOLX-hR=?!}%pX+l#FWyysi-+NM_u+-o4fc^AIz zK+}kCJh7D|qNWx#)^~-&keG8w{bOBVuB3> zY}&L(;)r7N&3@=cqj^t&)|0-&)`mJt4ULRPabo;77lt^xBLSv|);D@A`LFH$+JVzWfCk2@;~Dg3ART!gs81y-&yBV-$6*)yK^BxckbN?c8fPTxOZbZT`p&zr7L?5(kGHy zm%Hf&s>8=4HeO|AWo#O9xgt;t3k%t@U!b5KJkUQ~Tv+$wi=u2CX%Mu33crHxcyhs4 zs;TC^Jp5`RO10#j7yr%KgT==86#sy^ogQc#6iWmjgME1YOIXv%A8JQ2TPI9JS8yi<&;|->7OuIV|TrOnL9)oHiVEv#)RzwZb6vwqr=C`DbqW zWb%2YNu1ZvKFcD{HRsnk&lkd562o1;oYSfqg=c zV#B^p0)v`&(q~p~FUBV=MUqU7CNNrCGg8;q1*Q*T>h$ChU(kD#7|LULrlb%7EkGHMgf@WnDj0^|GqZ!B*_vN|mUc~k{?bU${K-+Qulw#Ly(LaSjS=n#^a>|4U};^GIjGV_M9F{R)H+&f4xnm_>?CN1Md zdio(niad!mzM+9Lr?|@FU)nYT8@NnpFTLQKk9e}rhaCL8-7jo&b5sTSH?o~qQ_QdQdWBEN5aNGy~o@*UWGDt~{;Ha#5#!AcjUgbeCGj<%XupbdnFb|rON1n))Hr;A1G0o!b~HPIKcHB(fMO4*d;dKCn4 zWUayr1-q@p#9S{G8HD^Xsc8aZc`NO*bvvnqOqd%i4`P+) z&#}7bnKK_kH{U24rOwgUf0!r<;EK+9e^oO;T6?yyH^S)&lvnT0V>K|a@NZb5+xR2) z;i{`akAaMVVJfts_5vQALNfFZALQw=&iGY610KkzDNH!{8abCP6))>sI#i>RNUnkN z3KGIo#4@M^&lpzS0bE%E9gFKH&C$hVC^$I2@)_d9Y>KnGK@{HIO|#IT2RFa5zKnqzaphoyj1vY0TLU7S66E+ z1F~%cGBqTlr&VZhzF+3LqG%p5HF_toNWkvT#S!1Eq%eP`pkPJfeefw1^z5J#+~dSz z(4>`-R49e>6*=$|IqVSA^#y%lHEbH0)c~CP15`&0dxWWP3{f*v8$(%gW%P4Z?GJ_T za~)+40GM=F2=xo#&4cBa~}MM%=%50cfEjifu@TJ{yPp8oeFlrtF90F+5-dYZzSF58J6pci=Pwl zIyY;8oD)c`%G?K1g;4QnEy^6b!r#kg$m;Q`HfI(~%$H5&T5XNMsEnwUn)(6z7oCIh ze^h0*SBKoc%JCEuaCuP;9LD-?zoM)%1xpMRI3vYG8_y!^I|vPljyj&IjQXuv`L9_s z-=z>9oamy`(u#@f^jw+C=@md$M$j1SPN9!>W*~@Dd;Uy4+N$K;NF#7g;H~kqPEOCu z)7>=FiSMFfVYAfcbhc$;u=`rWNyIlc(wh8qt_QSmF-yyI-@&`@wHKQ-*Eo%eMMeW8 zzJV)b4A>TkIc!fA6&JOhL=3{dB7dbl;`LQjS}Ib1a-!H!uFBp_MfD$Z`@3adP0k3^ zW>-K>YEe*pA)BX&_r{c)iUzJLP?8APrfpN|2xy;QSnvdT8V$gnRl8!b%U#6pC-elY zGzHj_|IS6FFidc%=+rbB*DI=0A4udMAR|InhoexBE?X z-oiCU9TDz`ZFELPyCZbzHG$u=fEBOm%%Nt+Lkt1}WD5(6=GInm`*o_k+zEwdWGB;# zdSSHaFqq^z(N;pauix{4U6v=pLAc}+5@D`TleC&NaaBB(O89hjR@6Z+iI~VCY5HV@ zP30rsf`ACKcX}!kMj}L3g7M2=0Pp{12&Ji7CZ0_SD@Y!xDn-3G2`s-Dfj4{4ua0Lp zh@_m|rDK`(Ew@JLh%v~ts&O4g$GLAKrUxIaFS2_Du{BMURekSn^|8*wsbMD1GnCo0SmLHYktX|1w{knlY{ReKE6d07Xu)13KZKB!os$@{4172Bm%3TdPgin#9 zBXB20h=zKlb}FK~iRM&wlO-7oWz|3v;Rc%y7a%1ON#*JDm{8n)o+yuXrf23RJRy?o zGdXP`Cy~t<2xvI;1BY$MV=V^38?i77Id8&PA2LPBYa+gh4thI{?SU}T*4pDk0-8{#_HW!|*b#ME9MXy@V*J4ZH zg#u_04#1*&t3a=r^_2R@4g&xiX4qFekL!yFmw#4ng8SJ(YN(H6-k?ZEMy7LIS!cPX zNwmFfI*@8C#9|>2d2%w1YS^HKfIQO_@T5QD_| zbEyBUOnRD$?<%l$fUaxgOfiFk5+Z%J8gKEIbu@R-@yt0AFcr}!(13yZL_pScG8^0{ z{C@;Rpo+J>Qd_OI8750nf-1q4YVeY{d2(WP+Brt0@Lk(mzncZw6F4lR3!Ix8Ohh1_ zR|`}{j+s7BFL^`dU*AzujB;@noh}6&-vJ^r%=Gj$U@+@Gz}`O8CR?Jn2|GcqU4Nbx z%vG%mb0X;78^;$=-lPC7nua*5pf55IR=>;36HLPnH6QabJR~Pd- z@SV}b_Y7J(p-EI679*6=0xgEzK{kHwvq^-hn^;NzdP7gKauk~PfuDg>OG*6h$UtQs zft!pLBqb^NYN62!6%CEVI4LH8{h?Cwv#)6aD$u&nA^FnzuX^g7GS zIXX}=XE|eMZv6Y{sl{!1l2u{{pJM`Lm0tZ@8vFwiJKm zsJC!>%8Nb-vd(f$y-%bXL!h#nvolAa^MIoxNIha#+8EwwFVDOZgMg5hC=vZpq6N3` zu=`OJk7l8jo!~oNC^RTS(wqGUWIQJ#c_1^`$L*bjA&3YvLRq?3HpXk|#uL_DJo4E;Erl0pt z#XK}2DtG6EmHSeJd*0f6cHPUR?(QT!h=_-WHJ43__{XXWy-we1dFHgJsqvK;VGpsHw?Pvb^#2cGZxvPr z7j1zGf^>Ix2uOD;N{1j_n?|IirBy<@q*F?|q*EH{21zOD?%H>4{LeX0_udB|_&(X2 zRddZb<``qi1~Vs9`y0Q>oK8M!gHf;{ct8P zsN922J{*(Hmnup2cTEe?c64O&w?p?<9aCe|)$-kz^P1f#%8F${+$A@1)=e$V+Vo&> zv$uwP>E`jo4_;?<*%#&LOTOR?fy&R9`GaVlQEDBt-jJisIX@<`jwRRb=OuZij#eus z!Bw=kiM7VZH1py&FX<$hNcJR3K7BCgx8|*IURG{Gk0PRx&aR~7frm)6b+e+GoxzB+ z=w$qgpxtSXz3k}oD-lWeIjpdYe*x%~3Q1NGu?Xn0neu60>fS%2?w}l|Obh+Zr(mJo zt=NILxzJc!Tt!5&@IHHT=D^szWS+>G;$1>}X8c!ltRtxB?MsJSq^A={9Lesh%fp8f zCn@w)Tx^yO4xIW11`Hs0q)UDx;KS)kBPRh0LIh}piidvn%yV zk$3^_Z2OR0%X^sX!1h#0+jkEN@}^3+)cclhvfL1sY2%orRSB+cs$uq~$RE60T^MRP zO0!44D<*7DR6eltz0|1R48u>0QEZ^2o)$`)dvaOwox&zy;W#h8*ky3zGJ5tE-Pyl_cblwJPaRP|E@c?wH%%1 zDFbg~()JZY`SYKz_+o6&p#4P3-eltV@tx5Yl+oxFo04=tp0k{~s4wty~C^W%t zu13j~9$5!NjXtFejxv z>9C9tgC*$G-)1FDIMRv_fH7c}7NFOXBj$7WvnM!S-YpGGxTq0gJ0*Lxon&Bp90hbm z)%5IxsI)`kDSR-XF06urI_mjnTp<88lA6p|2@Um8Qce-CnzQEXyt10#5s-=A>R;C8 z{Tn*-W_1<&azPZK1cYy7kxiv%V#BbO5nHK=H3; zRwchknWs9sreEH#yL7MFHh;!I+(z?VF~#L-9G6ZDxZ%)e;Se{Nlrv3~@wrC|@@XoL zE{hARQt|2eBGbq)h(k0Pj1vw(fkk-wK1}A3r*M>i^WUV}ggt-~k}v?$T=Lsz1X)Q> zc&NVj=Vx=utG!2Ly*pch=e9+ro#pZ6$`^k5-u=Z{tnfghAPLwJW_;6|9Ui>~b)VTv zDqcF@D#S`OdGUUe`bZzJqLaFkl&ToWbjWhax6PXr7!*0-_5uG{43^f=vzP1wOdVvo z%amJ}NPxOBPZW2-A81-qln$69FzG^Pr)-cfl>j}K^yyGZQR&&3vPA^694Qq0URyxf z=8fxZ=!m5^URZdmwEQuLS`IQOzUaZ-N9hmi%qgb{S9Y>C+p6}-)vI=LlYX{Lv16_{ zwM53MA|bF8;c%Z;aThcf{OojGxe!>va*jg|#R8-hwE|!0{Y<&>r?{~7Bv zoZOc0Ec^NXWzP8v?IcNh45`mI_Ch(r$>-yuPaI~K4cw17cO@K9Z}2HEP>a|VznV{$ zwcdwdQV92sshR1+iw+If-OfIw#M&@i<(cD@kOiz%!2VxE#|*|UF6s8>DB!8d)J$rB zYgqqX(?Xp?4>Ul!MZFvAhJ%imMOofr;#e=Jc6m{5ivj_p&VWJ8Sy&Q3 ze|19iXXeKl{`s9FNnhX7?|MbNj4GR9^OWrIVGJD=<3}nzzJU;QiI zTWAj`eqT@Y7{*U&(+c0`_GR2LVcvBsaJFh;#xcl#o%I449bJ8sInG{l{nuB= zvuzDIsUrM$Vet$gcZ{R{K3-2Q95Hs=IAUH0<=Sba^~E{s^7^R)F?DR+lb4Lj#jxfW zeBlswx-`%mO;_P{b<8$rlEsaDC@5h*@VMjw%u;~0JsAf1d*oqp{s0@>Z9TZrDMxKI zxkPpA?xz}eEE}{5LZ{A_tTK2m_$N4I5iKWe_1#y+HQI;TR4Lkg4`cu^SOiw#wrPVT zJAZXadr9ovniL^VDJ2k;*Yc!#13j2QQ#bP4Vb4IC_2G${+Su!Cg-5j6xo)q^v@762 zNg8DrhbjjTv?rZ?u}lmuY43GR)qKWLPVD!VgHdnp1G6EH;T2yn?=vSu54{F)gR^dU z6e3!>UOUVPuI+H~O35WVD7Jsi%cW+g=FD~Gpdq-+=+!HB0K|zn4#;9$U_P=Qr1?Vt zJR&M`{&QIw%}qmN-jEG0jBY!RR8Dgl`{sa~;hoipBMEED$h=cdsC|nw6V=|&B&M4s zeT(ZxpyL4msB+#q+iOf%kVvi1WPpKQMo(d7MO@N`APl5lrho^aVByJfOia$OMe(F3 zEoyw}y|V4e)my6wC@)Re<^crbm74yKHF|&o5&}Y6%!H~Wd^Ze!qkwkb-dEVK-{RMyt2bq5VWI?y{Vr8F=CQ^}#l#D#PVvxvf}tIBL1jomyC+;Nlv?T-IUxv43R%Rrz1ri;<3NkXC*m+9zrVi+ zouo=ehohBO6U7BvW(tsg`CdWo0nB=vPL;f97K$d!o^QFlVcwiR)GyBMMtc)}Ik0Gw zuGdh0D)FTJdU}@B>R{<0BB`Bf9b(lTYDOLsQ)8g7$i~iMF&6P3EItKr2KR;@X>D>dG zPAKSQJstS4o-$tNXi6^OqdzwCBBJhOtbMBOAQHg0gr}1 zgYZ2aU;xhil8bp`VQpRXo59N7UYLfC0qK?hET|kaB76;e3Owwy6_UH{tpRO@Sk%2d zhlb&9_pnD7BA14X;C$Pkg2;v7QLPLY92W0NP9f@!4Zyp&X-*9iTx>TdK;jYasC46L ztkW|!5ubCb!oL$z*wW` zLii{}>$o-(Ui=C-*QdqsyFpdX=U>MmTj2J9BrJUZm2gbn(Ht%v*%zFMoG~QZc~0M^ z53)uD!rd(3e~4fGjQ!}aVv=eW0O&tP;2H*pmz#+?@QSG8P9c}%%65c5Av{izYaQ{4 z_mkE>G`?Mr!`3Y1*}E#~76ywP)KHsb3E3Am$)}qf1`VIzO%}?b6}-lAzdn8H<-N`( zU)8<|;@r=v(}<0tBOfd0v=K?uZYSJ*-`;k9W*sSSF8`+iSH6!|jb|`yTJO(N`W1ac zm!f+jsVsGe9{PR{muo)=>#gQPA~H(wLX2$*t3uLme>~~ru{Y4ykBXx9w$QJNR`|1# zd%tYM+i^Bh0XEsIupUii>6asq<-VDy2OBnjklSYNO>V1*6&TGq`>PX5FYoDA7OAw8 z)3GVfYwFtN>C+gT&Gf$Jd;3Jz4|_d|!4!4QGh^m~S{4SdhWri&LsRr69-lbG8Kd8$~4t{|J*6yC%c5rDrMao@uTpzC(0)vr*H z)~O1Px>sIwFtu6ErwSz4`-V2&bs0bvp}3caR=A~I3ZC}@HZy)eZ;F?q2*1(_SphBd zzK&6}4(p_iQLmwTa!%V??ENL_J1E*L#>K(fyGSd~>-KKV=4Ltx$j>Y)4!JCKbq_@o z`W7(}7#+_mUlg6(PE|2C-FOaLL#p6kt^e~h0U%HevQod;JIq+7TOJy>8pT>Hf(r_W@%*h;OatJwuK>*)Y=bc# zuJz$vUR{~%Un-Yg@;VlOrq~^$YPyS%wxVioIF+eDEaDpoY4&_PJ?HfInv4_8kkM2n z=d^+pC>GlRNo7fW)gI-4y7Ua{l5)epHjfvXVuW*1pZHFa=Dz!2otTj30M$rNkc_kn z=ljiF4HA(jhi6-(!733RPDxw#Tr2WG#>?}}67GMRuzu?x04*JFq|!0MrC}#>d9qq? z%oJ$op`Vozus45!Uhi=5+Ar!?-sxX?jh^D2EDo9!L_lxyFIG0P!ef@;9JM0qM1r2e z$Y8@H5>t$DQaH=a;aRdftyEkM(Vsp&?p#vax*eiDJmn036#?p*!42Q_+FQI4 z&9yD8lJw}*%6YY~q17h%TwKY-v7+$VGLhZW5QU%*WqWgk;UTW^)8FAWCgtq95?F6i#XOj!$b(*f;8?7wFqG922BBw{TePpYP3$0_m(A1!4n3+v=@z ze{9LWp2TK62>cD}eN(bv8__f6FaN6cL+`m^UM6U`&?{7x?rD93n{8GM5;7C*HwEhD ziWn2ye9z@A#@6GvFR>cD;fQ>q4JSrnmy_6siJZUMDBa^nWuJ92G3cq4{4V&%!W3W8 zxDi!WWfPR{aagc)z*9hv^I2FJ2{eR7V~UMzZNmWoH#3WpZpB!F0(e3=fFcoFtTjE0 zs(_k8Xv%f1GszQaoFY=zeW_;h&7sO#tW&J1NY%oMB>&qRu6vKlwQ>>zDctA>Y4}W) zWu&g5G?A@5hGf^rGaeT1FQOXm_X{uFeMHUs{@uF8;^iCfQlS)#^RIFGr-v z^@QYboj}{~=%}4ZGdZs_@_W;3n(dW7^!{fI;}SFk-WjqY_sz}C zN2gnF6n@66I%*baVgOcRt(k}zOT??_d2HNe|JMA;({?4Gk90zYWOj%CqBuFkM8#@w$lWon}N4LDQ!)l$?Sx)K{)g#o} zBu!9Q_`4FJLB*=rIF0|$eb*{{r|IKGr9AQt2zgKS-~7WKu`SaJ_pt89D!aO2=UcJ} zKF@z0zL9jWsebdA!X#cO@PR%;cIM^`#yFu>Qkn1mMjw{UbBbsv4L-=aW|jlROu233f`9ATpJwF`v<>z9^X&v^Oi2+PpCf$tHky|% zsZ;NmlpE#McLLhty!bbl?LI{K;sj13eR^%PkEQco&I>2_uQlQ7WKFefcx}^xe)G*2 z*}fQTw6j-6>dj?;-tH8aZD5R*ehL-KJJ@=xCvpar>BYDJZ}tQfGy>+vg6iO6Hd)#7 za27QvD%&KktqT?%Pkmu)n zC*4eaZwZXR_w9E?Mf%uO?4FS9eE}&qx?Tx&x+Qh)^yc%PJXT0@NPS58c+~K|=#UD| z8)dqG{Y7rT3-V<&N(8*1DDQ{m#l&iu%`a(9#t0k;%(u6?bIJ6Je+Ho0x3jXHfvsw7 zY|L{AQZ1tHQXeCr0`!+D2ys#miz_qanRfcp*-7l3q@CAqiAXzA9!XZP$&oyM#~9sK zN~rx?U$+2T)xlY33Mg&dzg&e}$7?Jm7RSeo%FtFPVA?Vw(L1h$67GvT8^s=kP$Xmx z58LfJROEh;M#IjKTr8h&lsF{S0s0?CUNni}aZVa~ZKu`GQym(WNPX*Wl~<5ttL_nDP(XdXMF>@Y z4#x>T3#sfQ=D@mClT?Mk0kD?^KCq=$E;8*xc&eDp2}oW~Gv^N;KdaJz7^_P1e`DQ7Rzct1 z$V%x(AAxP)=Z1$;$Wt~T!xwpXty*?b@rp!d-0e2h*(TNCkJvlL!AP*10VF7xqp563r;oBX0}!KkJ*dR4fir*obZ+^ z)C)+Rx}jG?VXQR8!*H3oY0#H4ZZf)O5~GFSOhp?l?ZQ8xWE@R;9ESYEfLlQXpXPwn zD8m!VwT*D}Tyg7501XRWLfc@!o#Lmwtfjo*PJjZ(XKL=Ge>); zy|Iha^cT$(?jst!NU^#xUcMN?lPf7e4ir;rKXVtp#}I>1Mi>$sTBm0( zBq{Oz9%h;M?WdU>`2iuQ7T&-H;rO-linW)UYt!!;_h`O{k=l;>=6ZEQBM~M3OOrA{ zNWQilppwE#61jbcXQLQGyE|$?$lw9lY`Ttwkkn&YuPPf~l zpQi=h*ylrClgG4_WiK~B1%OEDzy+B(g3`I&vYHeQSoQdR$gJ7-h${Pw4S+m+%*9t@ zM3(MpxM!r17Cs>(-g-Q20@VZ0+asWJ^GEiFR{iTyGT_&9f8I=L1-VAlBv#~ebS@p_ z6}R6->MRjlg?^`+b#flpAlLUkT3}Q>b~0-!UxkXg-CFZ_bi2DISW{&jt;c+e__w@7 zu;oR3RRi*mOzHpD7o&BEozz>9svfPV^>R`N3>Bbxjp;A?nzAia0@c-J`gkFit2$s8(~r5?FMm9+@yss5?= zo=E~4-46Sdo4^xW$ovt<{;f&|H|j;kbPvU%)h{=+J(yU(*0H*jj0dJXiZ`f;T% z@YG5-e;&Tv7IlB{FQCta0KjJ_fkTf8E|lJAGhx+>q%@;U=k8|)T^U5@j<+;Q^C2p4 zF$ISsYxXKKPotYQi7qR^t@}{DO4ja3?LdTtoC++S?y4$;y z#U3npT$-0RmfW0hNMuZ5rOVuLaRZBfmXrq z%bS+ko>xxQM@O{Q@L*Yoe}99@Uxh)b(0u+&FeCsBcs86U7UkE4!+F~8*n&PitW~U8 zhQh3!7wAK(C0i7~d$|Ol2|yah|NlO?F0IP-oF{KM{x2j4CvhD9Eg;@wQ7s{YcS4)_ zmXxwCc)uk1!_iUdXO^j|!y&V_NPSbE=1m$dS$L6BKlRfAXQH(luc+W&^=jjzc1Z+A zUVRj6)|0EbN?49A=52IfmM_f-MoQ&74cD@MQm{4fFw%USi}I_&Eg1yW6lyUD@#?I> ze$-_cM*_afzs#b68lVj9%nxFDKKO1Qp;8NU4qoKbooOsG0lSG_l?>I~Rpd(z+Zge= z;?if6M;V|A-V8a7NmUpfjf;yzAS20=XCTX#>JYq>gT$~aU z6H}flhIZG<%}vHE=Twy;W6A&?7%aS+DQ2bnpk)71evmhk+)H5Ix3p;}v8h?$bB{*# z>0tQ+asyRiyk#NL#YbrOzTT2TGR%g@6v!>XUlpd{H+KD_uK^v&@a7Pefe2tIa{`{V4bmI_q-DRFNPGeEN^iZYw-#q*olK4+4lB1sgKqjkc z!ATaF7O-cfYs@kXtKak!O?v#!C6V!LU48bh3eq37mXPjR&?tJ6V2%IsoykY(q%ofM z*zu8#Xy;U40Pb~ma>57AU$JQx6T|v@PpS0137hm$6}A=SUYDj*T@O#{hn?BgL3Js9 zeLI=?@U8G`kxPn!35E~67kMA z$v&e=G|2k6ME;x_)hj7D^dms_tb6%v7@vL7`Ud+5#VRIG-18hg04*h9Rq_3ZFfN%b&Z`r9Qs>aylE*+Cq^L@?0gBVGo~T z7I2lLCR7hO{Dob%mjPQkMzEpzf+WOKGio9!rfq8I$@pgnXaxKDJBwDb|H1k0pmRO^ zkS>3XWTd_g? zhkORfjLt|ycQp2|Yg=Bxzc!))*RGdR2 z|KMb;FZS(Bd6pR>5KxSm#M!w<%DGz5`-39!fO6@}oJLCm4Rl+zHtJ#`nEDOUQ}%7>3g1b%l$Fir-tuX|Nx?;_6VtAHGSt zr8vEnTjFV_-gx;UdvKXz=g8rgL%3J2<-mvNu*a$eF&0C~d}utD+ZO&YW|kJzmmtU= z0TPr#M9$m&<4r?Vo{yi+UIn1p>=tv`xpJb2?loRkgEImV$sA`Q~F-MCx%_BMzkOh3PgYTmPBtPk8t^3|Eg+z ze?OwPS5{7n%=eT*6qWGnL{ed!8my=jz*H?umsg$xj+UbZZ88M0Hu~g4l+ld6G?5 zDdI(2-?pqm?4nDNCf7oMwLfMr#7?e3A2}2?kdCqQN9wAR=4Ye3p=nZ6xeS^>8Nv>F z9`g*z#U>9NQc{-Lwn*_B=7aB)$$FJBB&hnoG`c-KhpfYPyBh2d-R}`#^9tzRpBque zE=*P`uz~-T|5#8q@eOb~#5{6GC|G-U;IFcs`1fj`N)gD=N)+U1UVr6)I$*&c1oVz2 zf82EOLiQ1q&>2$))IA{1X#4qGOfAyv6)L(i2#8N)hA)>2gIQnLJNeqX|QH~Ch5_Y?u(8lMB6wac;@k7ewlp@@V!Sa-tAh=&(g+U45ZPMpd zbts!b*43>0Bf$3l07YsH9r=OzCp$|17-0sK)R3>ze}?lqe4Qa_NngwUa^l~u=!KuB zvnPjuRvqv0*VJfcwicUfYLq_iFrb@c1e!G_DY-AQFq08r-dSlBR$N8QeqL{8r6i|b zm?r?xf8DXQkxW^(-DV{9NIx{l-v^!D z?Ck7-w9fe?1{8%Jc#!R3#YOlU{oED5T$+hE&QeW!1`yajLjG*cYoOs8DbnJQBST%U z>iY6*o95HQ_S7-q7WE2xtBrkRhVkN#=9Y8BNDoTL7TCON>b_HztDb9pM7h-|bn1>mb_XnuV}%G;avF z^V)o29sEW8XNxXvMsqvJpm*WaJG^Ee(>5{~F9)m5G{ zV#Nk8auhpqJWAf+mv2~%N0DrwV(EPM6zLncGk>SjVz;Uky+mKfZxP}%5op6XP~VYi zYpvaxijK97DO`*Vo)(57;hqQeWj7s2rjG!`j>~BU@6`I6;WOUgrTOnY5&xN{y}0hn z51{>4X^v{W7D|G#^Hr+j+MI{YG;bfJSifFvYM)Vn!=9I{faF-?f{-Nt6==FIiPR(2 zV%o6j%f)sKemO^^v!TRnT&^g+YwdQbMx;JW-!rDu46}2rBfD*1f0%6T3Ft(+ zTyNT+ZEq(=sG}H!D9M9_Xjgh`FGq(;3xqyT#AiuPXj z5)t?`B9IBVg=Ym&%!l7(oKL`Vgt|=n)QCQJp0>ss-|MY>7_WbEaK(}Yp>y?Jr>S+0 z8eAcp1x=1Hn$X{7?;3>wLs{`l&vd>Fokn--I!Mo8wlO^G9q{Ye%RDXb zH$KM04b;@%{K6wpnbmTcLzum&GvGt~jNlM)Ug6{8Y-NslB5m<~#)seXj)e|f-7_av zR`KbjI~ml+q2K&40q26#; z9zw$Q(?F8sXPN%Tj6ChN1@dwb{PB14%fD@{w^gs)=%F>Ewr18il|>Amq8p(kWk_?1 z7x1)@S4P!bT{Qx$=gOUFdH1m1O#d!Asx$~Uw)E`sw!vmlomgJVMrc+ zh(d*L6{wv=#p5e%(vp}vP&S&j(o+7hEkcE6WiN7Bv*2>qjj6F#^16U7;1B@cBw}G-2o0_ZU+ig2oe4Re%0$m+*{D!KbFBO*lgr z?%I6JRty(z6a;U{jpxa4;qpD{yy z?FY}#*5>a^*5Y9!jQC5#_nrnR1{bn_cBfM(E)i(O%e)a3u z`^K*XQ0^We>ZUpWx@PPF=fg~GKjiO=dD}0c=PYwO!f zfR%Q6&Fqdf`jusBs@Uswf5`HN3#7ukf3WW3=c?E!4@;Gb$k5m!_7-L$er*5fj<2Yp zn4N_9Ycv}URAvlREESm6Uer9SR7&X$A5--gvZFz#2gZ%HuiZszZsQa(Si zs}9+V7R$R@^70YF?6G03X?4q3BP4aR)`kvyMcUt81V77pgve~90#ANfo8Q>i^1AIK z{MQ3d5&_eL#N80ZGkb3Ntq>ra4gk)pJ7I2ThthwiiQ08c@2JMMb_ZJF$!(XRo4;Sx zy*C-X=T~`{ z!~`X-X;+wxKErb+w2;K{7joJ#0&aD$hM+f}6S!+Fr1-Szo))>6Tp@1@J<`Y@3X;8o zY-bgJAYwhMDm8eaJixccuoNh}b)b_SalNj_UOut%EmeE3OldOhS=L0zujQY!C?49; zab+jJfEAU*P%4*=Cqmj+5lKma5W$=j)5JGpK&bdIYa zan$ow^hu46$7vO zW!@O|RKfT#MTKX`?t37|3LO;4LHhVkiNob^DT+OZ$wqqjOI9HMvlH&JW}+ZSZSIzh zjJi60=Tf_pO{&aV)tOx#!Kluw-JcZzpjmGGog5>>o>7n-6&O(TdT zmMV$4K2BP{_ugZ!Fc98iQHhhEY-}m(+3lbIes>3CSsg%Y@65lXv>JgU-{4DRNfE2z zJZ0t$r%by5EnGZAIc&;zJWt$!TKqC+6XU*1_lZC4KWK$U@Z0{5JLMeKOL~NKU$Prm zE`*=?2ys{6?=jTX=hLhhEUYk~1Y>nbNl8skDl>H9r(@Yq5Qb+|4r2MC-MpT6lOerq zby??iWlz^9mx^`*9HW?dl}cIFbWVTCdilpuF&_>VJ7XD7JAE0&EI{&7=Yf z8wW}^sB+p9=QK3Z8>S>Arn}*U&0ih|u?zD)*18or-Lw45fB=&sp6<@Rp~clcH#YzA*8L#lMZrgN^0f_bs$W{Z zc!uk9#PcytyMmn=l%WG6o*1Rs#c21H3%>$5z3IgxYxcgPwI` zmyms4sEGsM_dB}PmR&@}w_pK(8<^L^P`@U}61Lb5#yITS*C11G?{{jrUvTzRJjsA$ zcHN4tMn@OjECm9#+aF+LaWnTxLc8@*?McOO`)! z0lv{2TMN^tMsogPVdw~mh|j&IU7Qb=KPBPWxDC4v`w<>uQK1j}A^llnt3oY*lq81~ z_2I-ar>9J+h2MZES_4>i=%b&L*9({`J7Sco<%TqHFZA^_TqJRn?oD~w0TYzBlk)q` z?Rq~+omulMHumwYQewLi{V6g&`rect+E3!J->-*u#yQiTjOozZ8Wf>l4?nkip)Y65 z?4tmPQfWy0WYV-hnPpXAUlXi?9^e|Ai`p+3=W8`+4ENC zu^_fea>09ecmOT8>Ubx?$)Q+2e8;c(%nC@_A4Ami)A>lB4X%;$z(s#sBoP`C@@J%5(<7Nj!DGcy@*#f zDoA&~O^fj4a^5Mu6@(!W(No)ahDARCKaOp5S2ri%_kweHQB?xh=j%40vx+#}7VmIT zRAy$4`mtM#m~>jw#YloI0@Q`b0CWrnB9|chyTE z8ndm;n3~_bz4dY-tC*Sbgho6q9juNshbpw&3N*&WNOpKQJbBWX_DTM5}HoO~JWc8Qx`JiLPw+?b z92TQd?Umm$pPt|z4sPQY5K;&`BLg`CYzO%~v^T)3H94r>s$Gbx=|dp4R8so4XgN93 zobk{5V zIWWT1Wv1~QOhTa@5}6ci5@=wPfL)uE_Am*RZKK&Tqu-MP7e#|8{OPf#M^{690UPrhMzxO%V5_QQZ{a z^?~4@d>41`DQs_?<`qYSqw)w*kpmP1<0+NW50wEE^^^N9ghy#Xu{@L%M_gXNw8lt& zJ=(#ZIa(KyZE}?^5j`wWp!T9|R^fVF&rplbb*Z$xmyR!J zn=i9B-5(QCvrQE9?om&sN6`NUe4K+j+|hFn zU<97~M5#Saqc^1$Gbl9h6XVo% z+~X&*XMeaLmWlTRrAod0Mezpyn4U-n6 z&Yjhe@+d&{6VznH_bg;uwdx7P()IXPm<@<-j z#|cZUl+b@|!}i-uVH?0?xx5?Z4G(Fmle0WHhInf?JSk45eRiQf@#=UWFPw<;Ck>Zo zNq^pe=riX)EG6@-^X%-7xm3R%o`}feJ!eliEFOHpW6CMq*LQmK?FI0(Hh(>fTr5&W z%(UXFV2~O+os&3#+qri;(EdyQDH!1EwlGk83ed3^<-rP31$balx!vM#a@IO<)Qc9Y zN>dr3$ZX2S7F9ObF{RQ?edk&{@fCEVBDEmv=NM|*9}sf$%uP0S`Lru{vC{A!&Clx} znoU~}-@fp4eoK_wqd_bWzQhmy8o760f_gP|(v$5sVy4fS4>yY4Z|lT0wnMvp!N0z0y z34bcz4%F{Ste#G-t4-qfBS>CTnF)* zg{lOsnw`Mi@XyxPQ-WdYJH+FY6V-fW3KwkcJm-`0o{_xYgYD^(({Vy)eaf z<_4V#>{{T4->O|Lce&SI`@6Ns^!mqJkRlLsnS3@U?J0Uls~#a1OJ)N5aY5(JD0Kx& zyOl3xmg=x?Ge zsAJNm?=}@bJ3b(oIH4UjRsM#U2w+r?0)OXrU_w}u|Je0+jD5@16MzvXY^UL+a+UYd zBiyJ*hggEIiRiZJ={{MKHBhwaaoZRmQ7wOvP5K zg6LcB;g5pH2z(30qqc*(Coyb;hCZrX$*UcqbC>yP3zd6r@zU$Z%w4GF7!gp>I#joM z9H3d}KWw8N+?gg19vDSq;d>I^$I<4;Rh6=zl7tSc*4;R*M{Ns=1VuzvK`{Idtd!M7 zKPj?uT8+^w0*{|UH39PqsKB4nU~GxQa#a6myEPM)Kz-ft@^VfW0%5J26zG&ywJz_w z31lYa-sZn(N8vA*r0v^hzsy-BLiaaYUc7U~Z4o`htgL>o$A$?WuL4h{)$fOp7~{a% z{k6ogF#P;*b%)`DjUMgC_7IE~VbAVN&aQ#kkJ;9h{7U-vY)oD7DOK??v!1`g4gd0G zLs~>)W2O zT!m3!aq7hX#bt>BX8Hvf`dg(uxzJ15wODt-7j`KrW7aF#-N?aTt|ihFKlQsQ%W15x zu6_da4@sNspB)XbUF@=eRTKCy+>pvyKsu=IRX@m_7??dZtkrGY|p zCQR914oM7*B1}{&7|s4CT5D>zp>VO_af@s<28%rIevIg1b;TxZR!#FrqQ_}mIigIn zy&sb3_H~uvt|751M@ul3hp2lLR6KdrB2F!z_>u_k5s;@Iw?bA0Xf&ZG%p(3$0yW0{ z0qbKJiNY={i#|8DTkcO_SMR@=MFY|gaV*#^^*S@y?-E;o*Oyf-Tb@Gxcbm+UmQSeB zE;;QaX1e}OsW!LuhtG1lAqj=rIn4{T_8A`{b5S{i|9y~A(1Ridys(+$$zSRFNE#D4 zF2XX?3X#!=IFevTEtfS-$cYiS2{Ta)OP(OF{rxMYC`jzInslkJpEbE`d`F1i8zln?Id4qKH;B$1|#yW@fI^JZG$Nw2u z%301FPBRy*cIe~mvgE-Xmgltns1q$#iuQyu{f3!dgAAoDMro~jA2(6dgBvg$-cKiu z`8IKIq5??5(lu}_N%8)zrkyY$RdPt&?#rzP0bofjL*=b5obLbMih_lF4Z>~!?k((i z`oX_KkPajER}sI;>Ai|kMhKkj?0CEs(A`y?gPkr=(bgrl<6zQ(yQK>Nv?K6j2F#|l z0sZmAweAOh&H{UE&iP=`puzLrXGr@>j^JhBu|0(qFWuwFjJ_9T(fv?F_;FdI zx>%ejr|4j=@Zfbiy@uy5;Dly|poyjmU2=q5m@WUJe`dkZYwJtP&QLQ7dsY*)S}fPT z>de3P2Yys8lTe_(&na>jTtN}VomB!O>PA^3dRuYaDXQP%B;W82SZ^E6R+|4`2mzyW zLvvV$+F{%)K8*DOIryssOqCA^NC!fY-yB(IlrKPuB_c&b@Ttz-gW(g%Z1qr3V!qcJ#OW8lN44?1knQD23HLR&)#5->~G9N5T2p zpH@Kb*YOs9kvRk(I__RkYN_LzS(~>U0$qgfPsEz@n>FiVWfK$myt2(20;b~YWh%_~ z_x&9`7A`JjBSC~jddm1*F4p&quo_*VpYAJBq$VQAo_TTKj~v@Gf($Y)G4V)3=qIH(VKXh zwcaeUmn4EZ>O(lZl^;2}bPSbebyLsz)H*YgObfiiZjfx(* zh==UqG#$*Zf3a(c$$Gv3cbFilZd?=@@BG?6uqJDPRFgDe%;M8}+}g}^vXA}Er_Le> zMIubtBlrdG_s@qZ9`1Q-2MU*y=q=Cnl>$2hSF`%KD~>2LZYNDC)ehk$K~v9z5j$t3 z>f8JKjgBuP5(U|b4I3()^Dl5)2O!H=U@TIjVeIGY`{~t7`lpujbXFAMFWbx8PG^(k z=oUr*gnakohruHELE%zqd;5Y5**}35tmBc>*Rs*|tRm8dTyD6VMFK?!`{t&5gYW6p zSkJ4Q2=J32aT?;8)E+>%a1}j#d)Err6s2=JQ-B?NZAo>V7M0@c*L>jRG~sbvc90Y! zUsM7Eq}6KuX5X+)Ra8|~y@Ix?Z@Idv|Jbw<>}TB;ErGcCa-seG+rPqlX?SE)hlgs7 z9n*=dxpE^9RHJA-B5%eY(Oy1Pw4s+L`_rtSz}zXk$HfLpUG5^wB`j9{ucZ#)R%E{( zW{x#LKPsGHmGR~{LY;*QR{UvS7(6iTN zhn)K^ZMpxvdPd_?ctF{Gpy7G&7}MnU*Mu2P!>MDjtd{^kK@*Z;K-+B730i?P3yr(6 z7pYfECR217N$3Tlo>GS!nW$dYnNAuZydqwCr&lOwJJb!a{M}W1jb@0N?MSB(weQn& zQWK1H)*-=3x#zO#?$BGLV!4VseBNUcd+w{$${>t<<_jZ@8^#d}R%}sGQD``ePJ{Yk z;nA?HWSJLulshy2IceX{%n==)1In-OTf4B{R2mvP@DI0i)^MmQM%&_B_{F|1i}c#E z?@_1Z=GP})FnOCLya}p<^o}2hp-!Xt#KI3e&z3r(-O|~X%j}1x6Zf=W5;f8D#ws7L zJue|4r_wXoS6&>UbZO5SBTqgCxi5hU4CkI*`}jitt$7_9-`zvYE14!Gt`m2?g~{N= z1{uakJcLl{lY+5JhB2`OYC6q3D7k&ULA`p2Vei8z`H)%^s&EWM$*P%;b>D5a{whnY z7lFf06K4`BHvi0-+7>YU+?jjp)~&3Hr0BKQhTlZcuUT+2y~7&ydU9OTyOn`H&o`8& zw5M&g;N<)F&52@5Q3-`6%0<6^fNan3^mLAijDmrobk+0cK09t(^9@w)Mn4*R=@1XS z>GyO}JEH!ItWTroW_)Llhh5SDGq5?=L)mkMzxl6at0l96+j_*-u(W5PDdvh!xkErf6Euc(Nq?M)ezNkaN6}6IBSXg+ek>TpzNtBFcMJflV z6(josZ^HKBQ~Y|%gVb6S_fk{S>WXS#pS9HBJ%XJq-Rfy>J^-Y0L&l>qtzE~UT6Rw5 z={aTWPmxupcgt7!=er!|O^#x_E%^o_yJc&2kEua8H#<TnJ21{%B5mx=?=7)J*UHBHEzuMcbQqd03kJq_Q&oGG7{^}ci|cS+ zW7xIJI8K(|E{4FZ6CC+y8SS7(cX578d`&0IP=oXf55c;dn|O`4U;%SqUa6w1t2pNE zK{GQm_eWNLo)?dJ2O1lggR4hZjKXI}e}z49AWCg$7)bkeYQCX@R^_<|#rg6@k5i%C z{e68%m?v#1He$9(!0NvR(ZR}`(!7jLC>{$57jF> zqGN78VQzxd{D#qn^1N4jEesE?74}8l7!x$&z_5G(?%B3tYgMC{o_z{Li}9ay;H2wGCpx+BH62j7rI+8fxum8vQ z!YWK6#cWEJKfC=5MbL7os^khSOjP&?NT}PoroPP@bYn5>m%LgNF)%bI(3#pl$}hkLyY zn}gzpRT+b#Y5C)tskZI^tYvGWwd7!To@~TcqRB4o4dS%A8UFXnzjW3;Uc%Co8Jme29pq2?ps|xQSEMWV)tEsBjT6-`=KYF`M>E*IeCDa69~`W&O1~eKiDe%RDb$;) zbzLy0+<3HTSiBPG#!Bc4JZg_ay<6-f`)Bh9wN#k>Y*SjhxxcB`P8fqTrja96JntSUz_Xei7GS$r+ z^`AeM&d14l(r=QONLC%n6y=k%*Qt>4qNJc`02+NnQ^>ESn0jx2p$RI8FKsVtBK7DI zoV{E`KuN#guM(DT6<-#9t6XvxqH*wvLoeyBn2cY!{M85#kKRC8UPenG4}8P7T49gn zNi$N(k3CW7p6j!q?TCSlz#)4Sht&!$i~JZ-nfQQwVhPucPuJR58(#_ffa<)ymkGjq z?YFJpc7Yhgu_J(vYYz8b`sfoJAah6^vtUIyl7QrVQak!UBP(8UV+ zuPr8;f6Yul4AxynW7iWX@IcX%vlOx-^78UMS7|9+nS>~tB!-R@WZQoqcbzPlnFtS(Qe`a=*Nb_?#s76B}sG- zQSw;eT)EWzGPje4Q|=r#!!~EmL+ zaa@}>^E>pQx}j~Z#+v%DDK1uyoP6=_MHhM(L*ww}K3c!eh*&HlAy80IK+;|RV1IM` z_eH?x@Om#6%P2ls92lXQvdPmj?Q&<9*!75yOPhz`I-z%NCH8#p#!?-{wM$sl(Y8R^R-#8N%Hn{)1;+lDLKzos)b`yM7KGR|h?!r*4*nQqhH- z_w;K}%SEYL5lB;rpZ~EFIKs8iE?((HXaB4(-;(s)mYky*w??w}?_O&|5z|jJVOD=4 za}x4r689-1rFO(IH6NOoeaD|IbO`TEE?J&m>#469D~&DDs}dr+>T_MeJ>#y=%(>eg z^8FP0X(uLz%XX10cAz@?=~Ek8*|fyP)#4cZgu7?DE*$BL>RVh5CI#3LcD0jXA$mm?;3`U)Z4}SCM#Mh2%_!AWJd&N zD=LoOy{#K=6H!?yag^VXbYWve=I=qz%LVZNs?E|3#88-(hJdeT+NKP&ZjSZa3gAq& zYzN2=hySSWZ1_02!K?4PHqWy2PSCJ#iD^XOgz!ek2qDn|{%4-TDsyL}yuZg$12Lo_ z1x@SlK!i#lKz9*$#;fYtytB2`vaS%`L*2L4T6Gir)}u2oVmjk?n)2oa9(qotKbrr* z@dE8}iatdA_bRyFWB>%zSb;D}Zm!B;$##&XjhXWsZ>0km^VH7o?^eL zZ#Q3lD_Ug4MAb}7{iFLJWJ(zs85Ia0#^(lSXrH(ozIZ4u&+DzoZ7IdX@YtftyOT{i zeXU`_-(~MKa0o30lij(vizAS`N2eq8u-kWyBav?i$%5j~n_F!_YUEmnZFsH(cekBy zkb<51rIfeY4unzB&J^7dJ1;%AgTo}6d|X-Z8V`^n_ymy}J))|ERNZ$!^e%xpi#?<$ zU|xsy&5iuK@m%SRvSWcv9x2uwCeEH1raAjA3D=l}=tz`Ln%Q`O7W1WeE9b$>#OBF9 zJ6r41gOTjnQSm+D==W@*x7t`daoelxQ?ZqcH*9nBcCa}rc(+ETEIYA0Bljp?UDxle z6}=ofX&CE#(wylKxDXhz%*c)^>B*}Ddc@9P5!TpnE=s9Y>2LXKEYrH~r1;P4GqO7F zp!$?*f@q2==XKxVkKMYqgwsRjOd!g$Pc(a5*%b1})M?w!fWa8adOm{{!Xpklx(2UU z5S$v6cNmRy&4{0mb0<6fI`GX7586C#H0t0nDw74R+e(>V$2R=3wI)xriNUfqX0NgI z4Gs>ntrQ*ETO_DKbG!o09+9NNqHG-2KKeaD+nkKU#jXyf{HJd9UsGo}@Y6DG7uh+G zh|VcBzt|Ku4e8wW_kyO3=nHxpnb(uvX>MKjWhH>okNPb2)To#kjWIzvRU#gJJPaFJ zv_+SYibLF>UI*`PizyC!dworA{X&8^-e~tKE_Df%fO8B2ppkd$UX0ZFH|WQD!2QJF zxzI?@jv`&i4|z&=Y;-5NNk@=OTBWP62n|}*zvZY~ zaZ=Lr2S3^;q8t|<#x&u?JM8<<-u;5N;M)gdmmw3+JzB?5#j^icGSdQIns!;SGn+oK z?$2G*f;pawEz2iO&$XOi%H;ae(%Kwc+}q=Ejz)sYYGiVX_f~kn+Lp=Kv2YULRlNWy zO@e6mX9u9!3?ascY6~+rjv$_(*CA7j8riRA}{aS zowSrM(r-h(nO`~}G{v3dQ~Iv6feHU*=ddmpfwzqI5R)OX@lL(ts_1M3v7J37#%AXY zh9M1OqFWndZz#X#?BW5qUY#ca@Z2}Fy_7S{dGn#M zL`#K;+7E}aElX8MOWdF{?xmyqXgf5UhkJ;=Xw@b-r*ooU;C*BH$MfPoXAWexP7D=> z_OhRxtgVzn)LttNL+h1qLhL^JS!_^VLnB7zu%CGx8(vnbC4Sc2&xf0F z+xR)$gFHO3_FEzwwXO1muNULHZ=7({X6&K&+oX>#tbM(Bw_l!X>*{ox-Z&00`e`<{ zG{_WZqPZO>T92cI65fC3Ft+}AVXNT45it$E7J#Dy)KSZy-z6XU}fcf}@So|I21 zwO06J!V=5ky$Khl+RL6e5kGOu_;_Ir8vt!t!=SUNU3{jjj5Ax#bw30`3ip7e>H3I7 zeGx~PN(N(ES=qS8nwW%(=L+oSmviyN1WpF=;+sD}H#O2-KSqx_qgo?MHF7mE$^Y4> zbK^X8`q|3wKb$q)$?L++Pmg``c)!jv@qWEb ze@Lz90R_Rf2&vj{{?(=SIwQs`XKfPhMhmwVw8eE}{MYgETOV4U4=Z!6E2*@|8KxX< z*-{BT4&1%dsCh|Mi5I%P_se9CmgUuHb+JRY?HQ4krPcZ+6^?i0t+ zlxH--;+PEPmX8Ooy!E!^Cd~YBgXQx26)pCjKJ^-d#Vnotrxh(Gvl6X-02qS8jsc|N zzW)4TwyI#Ng`{EC@XQ?5LjTuJrh;Qsx1ODA6Xy{SA74r1Yhx9VqCfwg*Yalum&ugU z=NiAhtvjiO`KrW#4$3yZhlGWF%Vech%omY8CNG!854l%0wW$1a ziND_9`-0eF7M2Hd|3u3}qBHrp9}cyL1k0N~e-3A(J*xy^vN=#}P3O4WOT&p}saqLx za&wbI304*oW-VoBsxI!se_Bpsz|W19^Lt$~dP>rsq&4P^=5(PLK}rSn z$_Ii)U{HQl&{QO{k2@Lo9Dzj;lrNIkW3WLkY)*gna9JqeA<_Qr>VYupfd7PJ*7likYB9gcL$?2F=zRd2m zBB|9y6GNyGh})Ww(kr!3k~Se-n&&HA0gATQED2c5`l$ z=qD3jx^&6r#%~Z>_U%2#i#M12h}v%l(oyOhcrYy9e2Nw$>Is^#@<1wZ>UG!re%b;} z(8UiYsco&?os-B)AE2RT!~@NfWbfoxKv-U>5(;syHmVMn{>WIi+-aP7ma@2Ve^rAd zV;hG)Ko&j}I(8}v7nYV9LIrBFG8pM|+1%8Ud5JKaAuylZh&?JsFxdie0+mo4`L z)~+TOqT7%_4QF)bvcOGp7a&tX^Tii(Q5I(YrQpVB!Fl5r3NETwA!5gMMXD%NbgWOySaGPgoW=5FBzoI4PWn&r8h1Nqb3?#gX zi=(A5>Mi>iv`R;HaUQ)B@)VgED7;b{1oS{c1-s+n7jVoO~ z=RBSI^(!12%X4!#EAL|?0I(WpZ}u?kAd8=S|2Izopo!3MG(&U;vCv>bJSf( zJaqcTK-;OKb{a}5si#5%u^r%-MLfr5Xs&Ky!V$amKn&UvX5)9fP1Mt`(0vqppqqxq zCtx)ggy5<3gNV=8LSW~$SBJ6fAfTR1xg09vvv6bdA3?_KWpE(Boj7`_kUOH}X6bMV zp_+x>H9zPg9|2oB8V`O$O?NOTej7x>8lh8zwBX3ANxq@RKhWHT59$QTJNc(|)Iw@e z$fR+RO|M2EL^L3DlXg?kruX$3m?~_p#_;)nnbFf4f_y!*#2OUsI z9SqfM&S`$_>B`lm8p(w@Na}2{zjYI7{HvsvH*eF<noz}x9=AG%exw#vq5o;dJ zU%pWJ`^*GwziYIyI00zG9wawe`T4JVnVd4d=a1zfH?h5u{~Oz_KfFo4@LaZPV68^B zRNyqTu5^`HWN`5MChiru5J;WRU~$cuhnw%9~_Y|B%|C{h!=!1mboT zeMny6{%)|pf$RPX{IR;-|6uwRdEZk%nw$1hA4t~u-~S0Z70Td)?Jw|WrT(6v|8V;M z;+j2gAc1YS@w;f@U;Of4Txe(#Zs`89HQuJ`|9T^TVh|YhL;6}1YrOWI^1r|Ly+0AJ zfEzl#%~-ed|9s+W$P;HcoMifc-q3$u!GB)C|J)n@*#-aCW(G%uJ0RijjHXLbo5F)H N<;&OQ3uH|n{1@&!NuK}! diff --git a/docs/src/examples/itensor_trace_figures/delta_itensor.png b/docs/src/examples/itensor_trace_figures/delta_itensor.png deleted file mode 100644 index daf3dabe4ddab77585e2d52ce32082b5cf41dee3..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 25198 zcmeIacRbba|38i>k%%(ONLI!nWIIMhWJE&7NkWo6vkuA%DT=ZsD_Pm|lwDax_6QZ( zdmN7Qy`FmYdVA{iet&Mie|~?xy4~b-&h@;m$9~)&kH>Wi)>K!bAY&vWARwT)sC-U~ zfZza@fPhedbU%3KAtj?Q0RdTurJ|zdMMXtcO$S?Z%R6QS1j@lrqYr3C_R{*a;nM@} z6A-?nai@7nBd0P!c=YAHjQgkLxY+_;T-T-5wbP+G^6rga)=9D)l1D|Xw6D3(PBY!S zd^s_syhrmca-Ote^GB7~T=PH~O1)L)d#jA__<@9EhKMeYL+jMJ8-0U5}NApM6otdDoHrU9Z-_E8@jMA6q`Yc3Kb=25@FVeA(Ins zSbpv4efme>kHGwFJyvSdP~3hEZ$=HDg(IQq=4NK$Zw{F{HX9SkahW!&Z%4LuV%9aH zHpdsVzDpZ-KYN5bClPVU_#OVPDBj@BnI+B}&u)Z7aZ)^>AI#sZh<$wGq=o%rbKXJP zgALP-;`|71cfSpZ>Vq_VUpM@l+RR%I#eY3|nLu}zE1F64UCnWP)4?cnr6$uFS%~zQ%<%J-Wz*Dl?9uV{1ckA6|YQ%EX#0!#l3A z@}P9Fpv>~J{9@nT$PM!PxQmGr_KO3=3P}9L*jr^)r@EDxRU;S#O6bh@e^`;&cnnte zjKxVa31DiSjP2Hm6{+s;Bd31LZ0O)mt~AWOZmvr9@@s>cRomfVY$T6>B(P6*q{C+fPfMAfH+cR=c<$HXOYKZg>@g~u$nC!dbce{ts zIgjq}Tee%iV|I+KwPvM#^o3 zqF|+nM_0~;eq#%HtfS1M_3#S2)MNK=7T@R{7|Vj3*w-Ez-z2yj;(5WIn?mQj^JQ%( z#t#=HZjmzjx5{gO+PKAb?S778R{J$4WM62)1w=~8Wd`Z1V@e-kI0S$yKyG?mo!w2-JAd9ChINh&Ew5q&4{5J z4n>Awg6HZfM=?hxh)?((dl&YWqwa{-L2}ai`zkdyHJ587YOEEIBQlSbuW-d2F$$8e zbGspFf5e{Oo+&@LM}3-e^+=%dsR+*Z55CEhu`ei>y}a?6_OthAp3f9o$e--`zOOZk zxeh#{X}Ynf;=pm2W#iPw{tbq^&t9ApN-n)z!%d_@XeMJumm1~}_F01=?X-4F26HF( zZAoX6q6dA-GD=>`Ug=3m*E7Dp5>%CcnP-@+GLlq7;&1G)A#yB+qa`{@`D03U%1m-m z$Lyz}Pd}Zs%f)1T6I4-3x#@|?$Takoo2UD@9gdMa_;}RnXzVdt?rL*Qdd@?KO$AaO zU4Ad#T-xB4dhYTWokg8d9)6uymnG9WQ$$`iUn$l;tlg?*oK>l@@nX^OSj2oglRJ2rdkymQhgPUlmuJMOB>Je6;Fy;!2|ymx9G(_4w9f@oYvi zgK|c}v$TW4<+;n9E0rssS57ahPf|<>ugI*N#~#C`FQf80^^RZV%zEG)C3-8#lF2J`sL80k)vh<}7O-$*Z2#CM=TVyCi!txbBh@1* z)6CQI)3VeIE+xiVKC^EjYhi3=j_GKrY95d6jv0Q#dM%It3}3OnVaYlEc)kukE52O) zz5Tib*6hPCvhNd&|&a!8E)2QCSjzy z4Y%P`@U!M;McUe3diQy07xyM<{e+7NNcRtwhbrBSi;OiY!zxLJDykCg`NV(5;o`bw zi)FPEQ)F2@2R)?Sf5_>=d}x?3bF0X6o`hcN{U!r>VJ6`< zEj8nT_%k8myq;HEUZ)>%ITFGy$^QKD(#f^+)!~c5*H~6q`=liiVM|J8CwbvzDmObjI|#QDRS;)rMlEo7TSD{=gh{BR@^NPl3{;Y+nC?lFHo=OIN}> z!!q|-8e2ZK%sp5A;`XQ5<)q~kKUvPyoLRjYdb7d&tl8s&h&K;|b?O*K3wqsq$qOnt zu14GlzEj=5rM7e+mF@GOgmP2yw|!g(smm|$2yNO@aE(c z8tGRrhU?^K^*wM3iK1KaqFXmo%)gOeSZYzUIeB7u3`6HNT-0FF5+ypULd+4ZVy()n zih2D#w`%#`Af*m6^tGyfchS&Y+$u#K#VCdAhsqDDQM}P~QES$5`T1pjI+wK;8q@B^ z3S5=EI@@z;Fs?-Z>alBI%In=e2JoX^b-c3eUhfpK{?c1+J{4%~XDwFjVQV{(T#@3` zIL>k&IW)*nT2R(iSp4RpUNmaWNw+ztHL(ZzWv;8wsC;vZd4aiiD5%O}%4l+G?dPn} zp(1o5l*KQLny!r*RD_eTOx<#{mBNO!AUqy29nY0WcDaGOIQjlXBzroU21Su-A1Yp5 zX0Tf*ckJEsQsI);R>^*6fo}u;ON8TXP7ab9M)Re1TQBsd;xUaouG8R)GUaq*iO}p0hyPv$O z$m944dNLOGE!ipDd|*U)49lAL4fN#J@y(6ic|XKUgbs?oy4Z_+W7Y1I)y;D?k&6u% z2#*)HKRN1C;*s}UoZ08@2_H!h&opM5n_E)XL%$*y)(2!bTM3+H2rh>ZDC82|$!uuQ zYew$dq$Rbu?(Qmg;`p~IR7B+Hb>wWw5C1UE@Cz4)p6`3~fuK4nEc8%!Sg2=qZG;y= zyRo3`$M6TqCw(G2xCgCX)DD|@w&JzJ(8DGo`;^u4sZ(lYI9;xPdz_ko$-MRPSZ zg45tNDFHDdBLN9`MF@Um37P(WeSwghVBgOBL<9tZmITCqtx*TRp+Aqn5A@8h-}@p0 z2o8XMQG*}1B%gr1Ykz5xjBGPFdfP zfPj_*`XRiib!r~$-)(tC&q+`1lBB7vwSdV@+Z$#AZq|0tJ_OQklHjGanUe{toAn(V zM@cuClRGOU!E5Mk#7WkjB~Df{C-u}cSru&^%vePQL<9s+%963NvPwJLG?&ymcj2$c z!M|iq-g0uXlSCj~U0nrSg#~OKED%Bx5)uf(Gl(;1_`wQ($GbL8CT{#Tj_kiS`Mu9M zGe=VgOFJh^TN_qrUy~cQ&Q3BXPeKR%{pS~*W^R`MImyQHFSY=K2!IMt1|B?D%5AXhP z?}IyM>i_3VVL{lDyFU5rNofSc`YuuYlJm~HK+v*e(ulv6CQIgVb)XFB@wnwVjVs_c zu(F*$(co`x@B{q@Ka3&y-7^~Cx9-Jr@>kplr~9j{o~RyuI}*(OPQidTznIE^pU-dI zKk4ZW#LIaWDnDz!Y5qIJs-G0j9r<)p?`1)L9@QNZDfx5nY0EC?Rau?2U;2T2CcM=1 z&Aec9_WAgwwXxQ0{8sc9yPOXJArUbt73+U{p+MSv5XZK9Q~7_c0vpV$%1yW)`5!y& zd?d$8n2cd0Q@&x{LTjdhKqm0#Xn@>f5XN9kKy7b_xwzMNulf9kO&f)IO25S1AbQPCuHTJFU$QI<3!-V1|C`?g4a3R|%6bT`f{# zyG@?-CfEE(ZJ^WE`oi*9V|Z}y!xK_bLscGP3o)oYh|1TUq1>)baO9gm*BjhAx$ z+!8C6uD-I3+jLr<_>_rWIte?3id8e(rybQ@9Ulclqi1M)g0`6Q(S4ZiTvU2Tx`xJP zWcS`#F{y;lh3O_dYEg-k!DX;EXic*WVgyo+d=}r^v$!h2&qP<5af}@991impJ1kop1ktc(@de_b&Iw zVVffl;)J#D?m06$>;-m zLwjnga#|{E1?{RQd@&VDZXz6`$hCW40$)GiOny;6xncasC9#9hz^)h00cTzfEato| zs|XV_@g%V3slJj9xUu{I6#pPU!wk2MCQzEST|amMMg$>|Dd2-|gbTiwHSeiUJLYL< z_rcx4*XJz(Fm{58>Ik^ph-s*>_xPLPQQ@C$k+hJFb-5&Yf?*HS9Dr#K*w&tn~`nl!Z$^FAVTO@em19DWD!heBYbS7tY4bQ>@Dtg@;mLEED)% z0-^R2ohct|;|y(J_KtJvYH)qHoMc^|JNMB8E;hMFppeG5%o^~;WFR7t>%CxyohK&& z)=E@3QNxa7Z3p@oe)@VGrf34+{b2Dv5tcn-dkVboDgS^yj0#p;VDodhXYklS`i&LX zd>)A~oCqBvRJG^Dlux%NpCb%Q9}QJ|NiCG5Yb2)8aH(IaEMEAywdE=4HvcuPy9#cm z#H3I7S(l@$Q0cI42Z+(Mw6vc;eE5*VRpGhe;J*0neNN&SA>0alp5jw7fv~lt&D?e~eMQS4=`svg zt@P8<(REaLtQC9T!@#5g9y(7(iaVe4B^hRjFd}z2NxA~exU)r)ZjU$ZmC;cUXz0qW z2JS%@9VFG>xR|J@sLud|bolN&kcN4mwO2%4K;>OiLdW4p+|sztzLL-$Xw(kEnI1&KtmeO{gxG~okbg0p<>YEO z$JQx0d-g!q;uU9j;0O2Qzjb+x-V)6JGuQ-ykkHr@$pb(5?L(5TuCC8DH44{^jBw)? z_1kFbyZhjqHeCQA&HUK@J@!9C&WAvx8aJg(niZBm=^H zi~&jm1}^?vToLr~=|K{d=kDSC1n|5Q@7($Jkf|m&-Db`HV-J}vKv3p3vghG;q(Byg zW9v4*1^0hJAR`UC^*sse34(Ps8e_@@qr&$M2v~2{rkvoGbNv;d_-TCz{9V@jASK^W z3xMrFNHjwpgqw+V--8?{6oK(5_PWBo0|dyrKLaG|KSy3)M&i0{#^UKv7#W%o0JnTT z8^4D=k)u?bOf?(yaB0$mJjOyWsJkZ=9tTt$FS)@9ms_5`+{Eym(PQv;HM4zsP&1P! z;$ZQB2q@1t%Ipe!ZF`_k&v4+IIaoj;Bx(Ud{^24Ehdsm(j!|uf7-ipsxtAOva9KiC zC*nOD`-6@BUs>$oV-Of;h1l|*#hR}H#nZP>gu*EXYki|Foc3(|&lDGc#RUd;_hkBv zt8&x1E-Q!CVOGch;vIuPy)sNoa;!>(`N+oXXTdNcZiA3(HBc_RC*%eY-9Rgfv%t~- zArU{=nE!_RQMgJo>)xM5YbUj9|9xd709JodIgGmgt-cg zVpUD%F>EbqGsx0yFTTmZH}-jZg)kp|I)n}$hvd?M_R=ev;Q}K5Ku5LtaX6m_cA%Uw zSZll~NC9V$l!tXS&@eU`wlQl4@H{8`1{fG7Bq}=?ge$YIlZMfz`3m^Qq*^Z+>?SNdC*1^s_%RR**tZ=hZeoQY z0lse?HPxo+%*R3)4Uf`&+7ri(FYRIZpIu*r2oFY>c>MRAWH6kRlz~P}GlK^!jXIK% z^T%$G$tzGXDn@i^wuEH5nOl-33*G0MkPBuz+3+gUi)DyL;Rd(R({#}*hlGfa_^Djh z7U`V7bmRtE%(+u4%8Rp-ZB=X29<5btaUN|l9k`sf^u&s_K9Ah0F4x3m{5Ec;>S`c| zD94@x!-v54B-z#)8t*DT^}4{Je5||!b7aqnqF52ONJai^4A6( zVP7my%a25PF4WuAAJe@r+$Jsjx@5>bEItj=(TuTN&+sgIPg$4bPvo~vTT}KcNY+u! zm5x1Bl{<7oQpC_}aF?ifu<|AalAf2I|d=N_gs>5?ptE+le*+q+t zHQt*ubhDL9t(7^J)8B+!#o0rTOPt}uY%Pv_i_|!b*=Vt?QQ^(*?D;tf+^;={=Cd-m~|*+mNk34)&`d*LlM}e(bG-uMOArItM`X%ub_dF8LS%+wPU)ttnA`z4f&p@ zIW-cFW7p;qS0Z9<%6YChaN~L{62Hq#$tPD&gd=V(Pk!Oen#xR1h=~zORt!9>AD#sO ziJS*5)oRIjy6rUF|8jkjkSL+?oqXdvNBo8a7mnO^>#=V<@v-aH!f5?S`BV=1(s)~2 zU!y9bN~Cggwj$WH{xH^bc(nW5Le^qK&F6Cmvh2p|XfzH*8#-LMWaQbqOH`Vgh;`t=}0^~-1l5}bzY z#?2nf^!S<==R-cn=Jqq6xv5+_62MgR=A(dtuEjklq->+)dgh|{gg6Hps^#$?-+mRA zkJvPW<YcRg6X9!albPvS+VPFf6JvBU>51O%r<>KC(oRfo&AMX9 z$KxE;yB;?nrgN)S`|@hYcyG!i*qUjh5;S5&c&33{Z=BT^8gKkgwg2$(XA`!`e%n10 zC_J{?ZX!9*oG(Pvqt?iEP&AYUDf{}(htowm+fXul>q=DPjU?FvDJp%EFn8H4c5;R9 zYv5+D`S@Q=cV!y{X=kTL0|_J8gVd15Vf`se;fEm)Z)lfvnWi7j9}7pyj_FzP22ipv z3jmKeC+snuuT+ZP-c%ITKn!=uqlrP!(uQ8r=~gLjwV$TCND7Jl6kbyoOj9d4^;+BD zI|4F6PcGK!HSIkZOpTlr8@p=%WG85goCY?E|A^X-h8N0rfid0GwcB&gJ#8rj9qg&3Au{>9*sKOZ&P7?`^Wu&lpyVON_xR0lOiw3Ofrs8 zt!^^xIsFo$JUT6y5&E3!tgfTu9>I9+c%yF;fPYf%k0RayH0iK{MZUT}Dn}Ggf<8s%z1otCJR^2&2 zzZ64==BpH;2_|t*w^^Bt6dTFBx&sR2T7k{RWyVM^j0CbAu9zt{7>z5v;Az-h+4m8O zmD@AJ+x(tOEn>mRvl#sL$U~`F<<4H7*K(PD+u{#~w^vI>1{~X3=-UYo9H+7Y&xg7n zgIJr#18>3X1?vdbiQJOB9e^-n_!YRoVJKj@Eq_v~^_Z)olii#ypi0*WNA)#iep&0c z#pJDY83Z0aA^Gwp*@rW~N4w0a>~Rtr=UyJI6?ua2)7AGZC&G z0USM@WAKA}3$X6vP)>i{7?ZUHe7SaGJdwqwa-rc$-tfSN!JYi`;>+!dwU5{o6%M^4 zzdWi+BICYHxp3H2&I3#wDHvK2G70i2~pdj z%Di6lSbs3=;93W{IGqY5omohYI{AL(d_>g4Y}3Yv{8gRK#H3fA`{ZFhFlUUwJVY*p zm>5(-jz=H|z7q2G<;6&atyhxX_3>8v?mtttQQMep?9>Q^U<{*d+mZI8bys|(L7+G% z)iMKx>&)nV55-4=eyF!hxf$%o0grJgK%Dv3(HljK6AB9LNDF6{k@N()cw<1o*=Fm5 zY-?%W_b7uOUe>kbqapjDT=K&G9>rNwTMW)+@b!`4d)Ym~!o+o&65S^!+H_tyj}N*} zWoJe!eKrUd4yfGt5u2==Yy9(z8K&yXRRjO=M0B;8A*wtBo|Y7|>jsUTdJ% z+fT02f2&?-U1X! z=K}E1Jg{F2r!tSVSrd>n$YnfM5AnXvQ&?Xg3s>A)JgdGoWcXfmB7Dk@#qKSkek_Qg zGg^&j`);M=U4Q2{-z?mApDV%br1VOsHp4igV){)cw(EZD&#+>%cFcgoN3%=XiQaRb z#bea_*M`w(dco^*t1xB#TVt=Sy*6h`7!VcfFOa-C zu~*DD=ln|f8Y||inMQ42Xgx^6XeB)Hrp}NhG(oS(XtTp`qVE8(;xY+|ox=2$^4D0~ z>Vr%+r6vvCo%SE5>4{OG7+_NuPBb1B6`E!N_&&0u4Y}CFZqjY!s)i9U-2^F3Sq--h zeD&1BY+&&wof((IstKslvmdIGh{QR_RzIS`^vn*FIlichnl3bJPn=18(91c~RUP|UaVZY1VbV<+dQ zP?pp;%1T`<*j8;xbD283OvF>VDhAO0lzi zASTyEhcH%aQloALV}ez9{I*r@YGE%XNdhqcWy3J+!A^4j?cy=CRLf#BW86E=o&2__ z+n6cf@QE0D{l?+_saGgmMvCg9z z@$>G7*2Y4ZAQ;xjg7#FOQ3qJ?SUH%8iraf1!kT_fw3Pq_MD0w;a6;+p?1s3~i%}=! z(J~)?*D%-zC#_F7HVcis%F0n0;NQN`5XKap!8;z$qn&oM^8S^HZEyVMFeTHSJo)6< zFJQ)`bCPvBH{yULJ(LfB`#$TV0)WyjPIEy%BqvH*msAXf>I|r*#@uxciSJM10@guD zPET8Hb8&E#$A3g%;$oO(b6SiHMwbVKlxM*V+FB0 z+uuC}#hf5~ty2z%cUbj815-NI(4z$uy3yDVn4*rg=#vvHBDXYepY`v{ufw1>`-inq zL+Ra5-KDBSsZc7)2&fKeP$m1>SYX{2Bg*@_sOws=qJ3siX`vhs@KEEbEAnW=hwR(? zM=>+Yi|@G-??WkFw|6JgniJp0wGc>Lf;HE4*a0gsN|vk;E003nW?W9G0r)FQ+!+MJ zOy@qDr!CV1prjh{*>|`}JgY1TJ?Pv`0U+Y*3IPFzdEiy4T@yK3%_i*XQp0xp{pim% zek~JCJt~g|=tR*;6@g!GMAoFm+Qb;^EY6fUjt78&{3+~_Xm_`-z6YqCTW0F?Pk1k< zuyCk{Y})JlYJn%K+yUC^xyN7JbF#=&$;(#34HPO0TWiUXUCDV9)ogE7#=$3UPXU_F zZ5~hS-qDDHFMYG@`qz8GSpZybTxp8RxMMz^5-v*yAf8Kuz8VS6wrb^SkkzI1X!_B5 zLBue1_pA;5Wa8Z)&)N2`Wb>qwVoN}Po(JfOqg)2`0P=?1r#L1*m~aUcP(`CN_#p&* zVc05MbXBZhWCp??4X^iy8KEZJ0rj^4E&zu~Ld}Mi_{`q&+489x05=md3UxYoY1D4BPV^xu6!k} zD+)ixf~WW1_=w`Pp{cDZZS|=B{z-|Yg%qnHy-+7;-u%onRwWJvOu}}!dI#SxkAa9n z4%Oi=a*n%lR(F!F4V~2{n5b)Ffq7Ul(`o5tJ z8d;d`EttagfB`)frqK3c@2!-unyFk<O=U65fwopvfcH=iFG!bY1lRoXW)y=B>A} zb|AX4Y<)d2(jqo|$%!vVzZzshj>*0o7VjfC*aT<ptlvvR9rKrRzNQ02Cz8v+14l9BRI#gIDsi8x5IG|u&XN0z%)qXAgNyM)WE z_F!8(@5Fnp_VT+Q;rUbeAlFIZ3F&2b^Y%(lt{qiR+1(B)kCi9y)~6SF&1XKo zB-C5l3bBFqSDdOx zmU9bQB}Zy0k*}v*Jue99*?y#swhBVEHzL-|TVlS98Wf$HV3BgU1NFp46QYe=^dU&} zB1aE^8uy=C`*9@$4yxiH_f1M^7w81(OHxzxfw(0nbqusjIu6+O>@qQ`3_U5XO)6Rl_SfpKRk?X)Jm8&fsT4!Mmf zh-JwT5%&cHAQmrut9JKvN=}Lz6tgLT))|0B05QpF`AB)Q3$^^%)R~6uu(-N|YSVSS z1f$U-KT|W4kO37!-_#H%q+CE{>!a|bG-|CqS<&Fj6<}iG=h0ZYEeQ0_RjnCkH9Vdf z4W{7=xJM}~zMv&vytJ5Ub+H|d$E^i|_6o0k?E3Vm`*bQdP{0aM03FKHJ_vJfnR*H; zGL~Px88imLrSIFYHP%Pz;ql(IkBKjbKb{5GR(L>uY#qOSWw%Oh=a5;_9DU zj8GLg$bKP|`zoD^L*fBuH`YVP#Rcu>fFu^CaP)Gi(M-0LLZ+*M%VXaQ`6m$R>B$Jt z?BHOXwpD2q+8yzzGEM<=-@wT5Zd-}P>uA?CtL(~tV^HSSHa{1p z>8=+YHl>ft{@aU>moI--tp!;d1gh`q`S-T(F8%P=xvI}0^v$_TpQ{qIm4eH=runWF zavyt*1o$r4Jj&Spr(LVW_2nUa4tp`QvD<4!#jjJ> z1hRPRxxVC(M{l*%MFY&vT?HIdY@F`!=Ul6qVi3&p05i42SEs)KbpCunOWDizqtz7X zAs<>8b~K186?xAVc>HrzNjtFB8D-E{f5=>%yu0*CQUR5Z4)!|54$6N+UY9h{XO*jV zTpBx(Qu|cb_&LPYZzR=V37Km3H{-kCpK1#qs^cK}kbD%F>Npb5FuHi~Cx?109`usZ zbc=3#G(BP)=`yfYE3^k)&Vb7)7dR$k`t?igOeQ)rwS$C-JEI`|^qp|&VFTHXrl{rR z_SvFo#nC#VIa2JDyy~%K^!&R6BTz{L0k|$;Ci8#}uF2^gz+eI(oc~HE=2EEbY)E;@X$otiZT9DwN9t)YmXOQHeymZdM9t|pMSA8aDYsnB#70cq-^oP# zpiJb}6tH_T>U;Va)w1r-d!jp?<0b)UXpsk+1%Y57!==7HIOk2by@bRIU+(T{1>lwj zf)rQmhUeqb{I=a@dD7#kFN8D4m%dgCXC6W0*U;Li5U#^duBQWjj^Ehw!-E8>61v)k z-gdl@ks!|27~f!gY8^}(mfZ7Ax3@M-LimeGZ7UZpjJEQFdi>h7P?H*Bfqb@<@2!?9 z|5M|^AN?Rij%xW&Rm_%Kxq(LIq?A^HD8MFmqrqI9e<`P*lytMdK?i=lrM(zL#p0DO zl90-tB)-g{BP9A13v4CcI6nWxzMtT3OaO=%B4c{5ZwX9<3m;ep9sD}W zuN|s}u2_Jcz|6-S4P)7iGIrdR$D!as3 z@6FAN?z&nCd>!3`orW3rLy+pkTW8@nUP+E-Y_J^rb$OvZhnVxyGb);*P<-d5W0-Hb zXq;6FttiSFk$Lwb>sa}6@om5D3%R*6c3@m(VLjHaNNKbP<&lK0iQ$;9ci)$dCse%r z@%{c4DUX?=?uZyVDn=P(;=cUB<%PPLn0aI(=b~)M533)c9u1pN-%!6F3yPGDC8-O@gyEB=3;{gVd&Engj{}@nO?a@MhFxYeoHsghB|mgdQ1z`bk*f zD}8ZeG-(X4cDgzRZgWk8xa((k9sZqLq}tho$n|%W3Si8s*rGcO+86lg7XI^N;)d59pX z<6sy9o&V4uVE%>3wIgGPmnC+O9WfVK^}>wvw|GZ_z;h7AhdYc$J^9E9#`vnPf? zS~yqGQdRm$T%s{PQbSgv=!eI{bTRj9pj}e~>cZ}c1w+cZXdP(ebwLB)k+|JC5ke4V z-1+q_zygXbuMbk(HYj1j$IF1glx=ltW|BrjTBx%*;n7tZE&R|JHOoRt=1vx~1l4=0 zy(|111Wg)p?TLxe1<>px=>ai_t&hq2he5x#QfZ}sc>C z;0R@wDgU%kH%4ddW|GqAC}H>MJYA}JW7GEm!#19q?JTI~%;ztVP*2T%6BMDj^>|up zieGJ)tDr_Us2{%p1B~2$pr#~kve}uiedndZaDraYpkf>~GMXCmrV7-_J7soEYxPKX zsZd-fn^_Krx)Mvedblv*H-j6fQMz{kh%|8+BA)0lfJ%VOL!uY+WtQK4&f_5=S!c*l=M27X3f`;7+x(1^N-r(+!sgRb=v1Di6lhR~Bx ze;70(Z$md992ip~cO1tWAQkJWEN2+kx8ecU&fYmYJ)F~2P^Alvm9_92o5T17=X3V% z>!k@;kOroxY6tZMKK6=b!LC*<^DSvT0T{ zz&+dMIsfS=LZ!kexRVevHZ1@}6-_px1@cYcX<|iJH^Jmx3uVvzY?~QI`tN0yKqHv&DwNC4;kbs+l%EM&VxFl|C&jHA_wP@24=->$5Z9JFG(YBuxDY%RCQKo{y*|cJ z5`u^ji4#3J_L3~;nTVy<`cDmXjF1;jO>FQtFI+Yro-M()$7H_>;2>3QU}SIP7U8nXdI5-8+;I|Zh4!S64^qO#^W zP>mhQAN<|GZh!VxwA+y8q2y#nXsvy(4^(rCW}RzzcRHg7%r0uQHDq+eBG&!!GHV0c zB`AKTg^dO?*3nCBKh2(-MIF>myL7PsPJxCnChss<^M)9o) zGE&Fw>yEIsHb2US`fVoQ`$oNoGj~w^nWI#z(IbkVICi^Jp*CPs%4)x_w#6y=G+M+(JEkl5F3v`_+Sv=?E zyVSS0*D#b&0)3KWCuDY0cvM^PgRwV7PkS&ZujH4Mb}CNm~W3jbN9gt*W9eE&M@2R4bC{wse*u|dJZnCg@vrOPn!Q9q+_KYo zwKbr{7U#?gHBg=jnkdw(+oJ(K4NyERaf;8lved>C>NC-U8!dz09JV6G#EQo|9`NSS z=YWpLD+VhLB$pSsE-hyTIL|)j1aAcvUI3gk%5?w6X?@D9IR@x1eGC7kHYD;$Q1{Sm zav(Dq$nCj<+0QVbfq8BMwEgu!{b!U2ZuG3%{Y;JSn$g8JfG|{>XHt76Apd7tYybe| zGIjb4JN}b#7%=NBt(3AmP{?rrW_cGfe+vJq{Mv1Z(=I)axHgU(bREuSTJk->Ob><{ zx+SQKOwi>WXsGrKiS~Zg4^Sw&6arMBkkPqVd+SXur0R)JDN2KdIQu)`W)cNx3Lk{# zdy(CeJ2lx;a4{vf(ju+#mnr|60%hz6#wIF#-t;fBUR}EmKK7`gEICFtoU-!PaG~MYa-xnRoA>6a$(7U$&gW!eE0g;NdY~^{q~ubGuq0 zzAoSfgzXw~&v&(er6dD1;~&xa>sr}wrGYNd#2;NE@S$7+X!%ROi-TXiy$f#BEO9y# z{!%W;3xB;xvIMr!A^Y&Ry+N7PM{Wv8C40$Hbnj}`ET23iZ-~n77(vDD15z$QP|R<_ zpdEI$|HpPuHYmeOuC}l2);#opFDGD9?W_21iGZ8WP}?%7<@_J32Y~aG8AwF_=40nX zP@OG{*?v0&BM;&y3vxBiPS|4TcGSclx1)$jk6x7PL7lo)viqv5+*2UFtSv&OJ>L#v z)iegeUUX-6-~AOcUlnk3NOx4b!(tC1NiP5$$8hUTup6@+fa>V2BNK4yKrf7$Wbxo8 z=<0RPH?fBoDsh^^Zaw?*fx3W?`_+!dc0XHB)x`1nA*HThSkSAR@g<4jv%!`9i$gSYBNL z4Wm@buDm_e`F@~QrUdJsu1qh(?zz;1Z<_8SA{1EP24R4Kqfb@FN3i8eQ?hJ^BVh77)ChIn%^H<+fDx8RKx&HioG9W-TPtE zBC$Z(w+KP!$<9hiS(oXab}+1~%&|WMTlfS7a}=Yi>+YnoLuTRgw_x_nGzRljoS*arjKNfhp-1sW=D+y8*bYC zy8}|vGpGcD5U9-NshnV6O3Vh*^9OLB_nZ7r&fSzSg7d_PQsAxw z9@NYRg1znareW8$d^r!u*Q2?t;pT<3E^|13cWm8Je_<8iaVBgKrblb&(xAMYQ$F^R{gVhuopXhh3zSRa8U)@>`i-%hu)vajofy+h@#V%R-o(5S+_fxIN z4tm<{Bys{^wc(RESoB|nTw<=^lkgw9i3Pgpzps-yB<)24zl22zgmqW?Ko#sj zRbYVpPaR=)2Jw}o06sW|{ptjb#{S-a7+4AJ=q|p0?KN`=fXr6r(ve-V(sTx_yz+Rt zAD)20R=2K!)rH}3s$BtbWk@#MpZ*ICe6MS^z+g}l*!$$Yia)9D1RrQ23D1cHU27d|xX(SHQuI|;Ik?%I;5GaHj%2KEckyZ(2Yf_<|3JCv0|q>CKl2 zfNk=iOrAgJ;DFqBgy$#hKMY`maE;B_AO8zN(rG9hZRk<_E<>Rp=W+^ojS^+Q<{q+r zSJ`%oC=zm75y>8C%H|2?K*3`>eHBm)(t!J5M&(Wi!PNIs?i)B89KZphNr$ZWY)MFD z&6a03QqvAbIP=?^b999pV1yq4s-wyz>)S8@L3|C2`D~xZX8mD|W;}xE?JX#O`mL$8 zqwKUOc3GudQa!K`+K#eaTKr|-n%>}pMGsE*-S+CM0D?I=|0^&Cho2*N5J-xn&edx7 z2LS1{??rn5F_v9!bNPxr-(=hEz`vg1z7K4I>OtS1pX_<( zJPwfwLYA`8=M-aq_;RB6paGh6o1-=dgnOMRgpU0~P%di>f*n@ZbSiBd*UY3NGq^z){Xz*uH@N+}vKMI$nZFQJ z{eHh7OKi&)?}lrv#Tmv}b3{(mA?kpfsI%Yf%9+SyLPI|xoH{{z)d(VKftrQ4ZXTyb zo{T)nPQS)TZtREIuj)ag>N!j1mt<;U5}0$y*sj3Y`84TyC+s z+WEOo=g?K+>L`U+QJcAL{0mZ8g~;2*<*aRT^hzPec?+pc_kUUxT@8k-+Xth>X?ZI@ z*c(`{;LDIc+(%4aMXzW3h*++dW5ra7DB*QWo*vbK34SJDu92?GCLLX!UATTrlPdyF z)Mr|KpRUy_=$~zsnOwdW`%HN}gbPQ<2Ul=Pb=-!^Q1-adzmn_PvX z^%Y;@G@e~n&7AFi$C>>o!pEnddO5uyc9Va=9rPaW+99sEV~RKS-SxWDj_Z&`O;W{l z8$D=E)A+%ph@`L|w=Rys;5}p6lN9Hi z^^t)}vN3!*FUoiRRcs&0)DIF9RzFmHZ~0#Bz3BUU7o_?ng5@>YBgpihp8x1{Q_P0U zhR23B+q+$5^5hcP6M5E2PjW5acEgW~m90>A# zI^`wg+~wVq;^K^w`%+IUolnTpd!^VP_nzR9!6Q{c$_TbE;bHQhIP|Ls;jT(>?lKgQhX|Xq!Z;6Rfhc( z87OK6Z?5sLHfS~IH`q0JXuVE-c}iPIKVNgv^G^79T!c?VypCVzg?yTPiaY!cQ6(v2 z+7cS>Sy-ts3Zw~GsTt9h~qCg>{-ZWGt^vE02H`#B} zlyxu1Mw-8{`9k!Crhz`<`D(a5%l-W*1qEFOODg6n#P7kQ$&zjIq~XI_7Gwec!* z+P$kkH4bX%C;23`s{GD=bKUOkd^+u0*LyAZF5J7?nb%$xV=B&bL#HFJ(Y*I=^j(2_ zjvbO&8rj4Jy5?=B%dI`%Vyp~}QJ>LMq&;I87p`;%wbSj3? z`ca9E{*C*1Yj_oSb@g4^QzxyLbe0ITz0v|F_}d0EV!NY$+WsUd#!OlMj_I#x!mQfo z{(Aqb^v$yKF_#(f`!03j*JBi7q+C9g9V&A&$Tv_e^DiUlDJ_q+;THK7g^6mDERa-> zjhAF_`{@?GQn22?frnAtDA~~3h{vR17!DlUpRivL|ImIh0$!rKN0{*+;Xfx>w00SOpoxI`r{_9QAQ77V4`1>v#xlufG-A7K*`O~~+V2gjs zK63-}=jNFgD_-Aej$DXap!>zZ|DJ#8mfx)!)3YYQxgj|L-dZ1z59D^ZbP(s3vRw_i z>3z4NYeRYdKqAw(pQ7@OQBkdWT6aZCLd;^8^$n~l%siAgYYEN}lkjO8a2PlxR^=Ee zzsr5^`97AapL&+sf&1*WvYR&34>3Um!k27ZJ@W;aghaEqaeOe0R3a1WRz8969UF@Z$raoKy#yOJr zs@T=3_RBw`3a?+K)cH|T?ezIEk74TfRI9d?7QuTzI!a6@p4{`gCtToaW!3$vG~T{; zl;M(8&(GsUxy7w{1vvrN!VQ=0wHq=TW80;EOt*IGm#k0F&(e4FJS{hy&>x>z{xzxF z>v?ua=+LBUHwxp~i*A*jsEJk{S57?8Z`AXST8bUxHhQJ*RltYI5B_qO1MW(E64&|W zCrB-Lq#z-`PG?ZJG?0K{;dzEeZ@liRHV$tO*Wo>9Xjw5}KRl+D zlqOdiRT`05tY=X^atuUJxik8^FRmsw#_@U7NHH-@4s%#T>cbV$9`mO#hnlJdG)svs##<0 z`zN!trt#P=DJeCFxut&y zamV>;a9Z+npwBC2&(QB2KP_H==rwU`#A^7XdT$8slUrHeRE_vWMZ2$#^9GZjZ!Cr!y8M)EkzwLdRIGO*slcn9xvEYDw$UA&zcu(`~ ztqmU)M}8}I#oWo{?zM~N_dsTF4@v&hf`Z~(ANXHy{ki4#N44xsY-OzO!6)q{|44o3 z!+*c|KR>v2&+C8PQ$*zNTmJRV&L_qBkfZ-4ifuV>{T2i*NhHp7hk`;`W+s4O;@;hUegJ8^5@}aT0fkn`oeL~UnG4RY(hYx z?8mmB^qyLLk=MT0$?|!OXN78vo|0#KTr_8E7NpF-noV`on@8y`IVICUMw&+}hdo$z zsyKXJ?Dv1m>~DlWEYpXT`u@n|R%7?MhH?SBM)yXY>jKM@j?=lzxzjrDR)5-Arv1iZ z2@l}n;S-WFN_*n`ryqhi(O2=Xmw$)XlmF9YumYL}7hNfg_n#Kpy+>V~XO+~0NYhXM z<;K#|ebW-VANr5C=I3CXY3h6PlK;QmmUPAS$UhzwIl&=%eO+k2IVf*-x8H9qUXK-c_YJAq$}aN?JLD( zMYA=u#@$6W4jH4*wGF&uBwSlNMp*yZb3ZXL&hR}+J!8ii^bn&kX4sZ(^v)|eBO@cN zUi6=1*RxB~eTGz5AB?COjWoPGL zUtbmL#8>}gTnajji$oVkC#b%O7p;lJS2So9M9ppv-tR%9qsJ7th=%0KgFkf zxG-O&>_5G|=a3y>BWA|24LJu13hs80Tb0eqHfwJ*m`PIn?keOfF<|;jK#;`d#I}9am@Pi|DR8vzSo&|Oz4hgvjzDIwQ^$6Kba+(974U)Q}bju zLc)#t?7*w%|G^6wry-J;O=m3%T{8j)I0KF&wtlZ8hpoV>*C6c+-G3EQoHs~P8UHu` zDyhiqM+)Q3jn#ZAmYK}zeu94#jWR3aj1}|k@cJi^Ga1%zQ|^iU-~2Gg2&wP`uZ~RU zLBfqlzs^VhXb2o|H6KpG%c@2=TeQ%I3UdBL^7gD7GI{3TPj=>8Xs0sy%lvz(P{M~Z zJ(UYSeuG;nnVGX9p7`{Iu$6(|ae~g2-HAK z&dE4~f^A@PXJ`?52&x#Z&J8ywsm8V!*%+szT^7eFmBM+`9U6)MgUeqD_pGYqr5q(p z0Cj2@MV&;l`F`SVr$PTSmfb~S>*aHwxSDg#n#8)I{*&3C109`X-C(`)5Y{4OWkW=K z<(V}368kHZeytY)@#~V3l8lq1|3R!(wQzz!&W#IkS@8L+U?OCz94dRU$0sc{_2!RM zt>$#yLjBID|KwEXhzU0wTW8Lp@nAVT=?KK@<8GtBL<-mTy?Zb?Tg#C7V>C4_!{;Ey z$riEEzwKv{_%7I_$8u?J^+aYRJ=fK@PYZio(ncQS)ju(B%h0EtXifK(!u(RrHvj%c z{!XFgPa5zTZ;7RLmd-*ehV{u@Rr{%K(WRe`y<~35axSwg(tWY-lm$%SojfD-5vh&S^iF+fr?FR0?reT_`ox*^@#`xY z-5Eb+|2>D4o+oBSV+N{yjhkOznjP@vYu%`Ty+<%s>g7&AQd#)*oqcU_LVLb4K0qvW z#Mw{&aRy6>He=qTxmcHbM_xhX^sy(+4^@9g@ibT*zs z9QL!}CO;$h__9cxQzllkq=&80~^Dxi%Ib5rpk=o%Zj@HLC>~Or%&ADS@mhf23IqmOC6GWXVf_zggUU)tC8y1ojBEz8~6#{-E`0&@Ek>cBW;`%V(S4^h@vi ztHsM!zc^z&kQDDbWdD?Eaxp`lmx64RPO)c)i=6vS$9SoGJIPXsR z2NNf&LYj^ZIExnxSp1;WSsz9*@Fqo-A z{vEB2{v#;FGr0_0f9GE-xO;tL%CXWnA)`gE(gQQ_nAY+s?!NucTy77N?OL{G5SWhF z*)DZjFnFu7S7bx>7#Srre56)m>v4wf(XbQPzlRTf&b@oo^8CRGWpF90;{)*x2*M)?q2#N3&B=)RJgmQpLJYm8<^RHkc{zo05AROL)mK=+YAkJ z&aG80B=S`UG0EKg8h75N+otwMcY3d$c>Pc8{Ag1kh`Yu(P$F!tFyZE?Hh{a)Sd@pJ z<4=d#6jk@scFXZU!TMoUZ=`1xJAO!QGFxnm<)qEpucTT&k6GUul6Wb^yN9k-g&NzI z6^F_0SV6K8+I6a!y~dwbd;Z!+4gU$7k7Tp?neIG~57SZ|U~scGe!``M9U2K{UBUiR zMd{2lc|vTTT5>44|C@&5rtW>?>OC4esV}^G|Fxu>jQ~J%QfYn&X7lD2N|#;9t-gd z)wEV+*k_xnfMHe8#DlIt&YYv?p?)m zu`dv{VeH2sB{@|#-`EcE)AGxy;z&&1cNJe>-VzQKk^YBuYd?Q`e|T_2@)&k46RTs_ zhMkIIHKcZ4{GnY7fc`jysg7u-1*TntOvZt;5!}sTJA2t^s4m#1*L_*&tp?qX`eh;2 zXtDeNs-!LTRAB)GlvC`fDu>s4x_r5&n%eM$MeEy+^hEoyuS|{Srps2kDluIv8>3Q{ zy2g16DoSD8-WHkVXHCCEP<9nsS{gq9vAW!glFC1s{>q{)L&&{0fR0w-slIpZe3O)+@ytgGRY9j!vnU(c%aLb! zmO9L0_b>lI@5X(`SgZci@!gSu^B*jt|l?u zK*vtIGHVT`fZCjoZF})nuXxUQ@TSLV_oyVb)OttLYp}NJXK8yXtSbiOIZAuid0SA@ zpDCdn>@Gheu$Lz~@c>o@Sh-K=W`iO+4-**VLv=OMjFnn>cVs{#c1;sK{>fsq@4=Cq z?;jqt`K~NnJ@O^ecmM6)3U^jjot3q=N&|a_KPhAtGVrLKVY<)uG3q>F1N$Ox`Rr&4 zO+<}=LOt7hwlk2t_bVMbb~?ylW?z_ou1tnpX4$pW zl`GYNtPTvsJs>$%;Jag70Cc0kv+Z(%!**(zTaZy>%OwPicMdGwLl9EhkXba~Q#2f= zOOw|kb3{TA@$#|WO4Nfj&J10~;t$x;UW{e;{jPfs)Gamzx`mdrps^YT`EW}?VMT9 zuH-fja_bgwKPI800hG((Vgu?4ES)M*D$>hqpQ7XiqM}h`(9dagINX;e4s!7s_qBy% zqPDlx7JdGN86K#YmNs8XY+FIl4G0mBg_~M-ai%D_JpH~{H#4_v$6$6PhBG%WLiV9+ zPE-m%p^(>|c(*<4)rboArGOcgP(9$AmF7T?qw%u-r&0~b_T2nFAhM31=LFcdBN0C@ z+;U}qgyhZzj0f-(IK)jGUbL4uSg55b(08Gba8d|re?3n5T*YX8>9-9MT_Mouw#3!9 z@m!7&>%2n3@>6g<1;9HfS36{nZ-a6`0kDYFk0 z8!$vNU*Wz^^;7P~F9MIb(Abvl;!1!4ulHB3jzkUuP~%Fy`sWx+;2<4c0s=@`1zWde zhGW#r>g?Y=hB02Gu+_HL;n!Bv15f1(+TcPW7GN;0_$s8Dmw7_szue=RnVfbxO5_Z0 z`NFr?cPRHD&MhS{+H?%R?zYh~%E6gV+uU6GSJv~6%yCVBehhT4&4l*!sv&@FA zY)nX67gyzpyKexhJFA5TRVmEEN;RKH^O(({y$p8cCEj$|yviYZaS?zAdPl2(OgdKo zL~VWHmxgBf>k*1;$D)p?%wY0VYLZjsRq*p%CAEO5}&{QM&l5JacL26+1 zvcipde;fU3%tmS6i_)r4ljYxC!Vs*f*QYB{0Y~S`pJ7*HvEESEQ!b(d&fe?%IN6bF zTo=T&Dx196BIw}`^F^hnc5KBHvOVh!taioRb0^GVn=i9M#r-=q ze#f)HG^n*QlZOf4>X#ObaFSGRE@l#|oLVh;EXUw3h*X1FS8;b2k^T&%V&Np_%()#^ z4&t%M1^7V_&TLLQCgEa#8Sq8(;IWjj#g(R5Umn!RbOf4jnnahdkk9D&_ z_zsV1@!Sp9Bh>QmH$zp#QbuN@YERf*7T=ipZ~;5gOC_*d;XKna!mKT->Fd*ha@^ic zfQjfk{-gxZs{(G*L5Y+}%4686(rGHak4I1|A67P(D-E+${;~*2wuNsor)hVyUOd5l zl6A*JCl`8|`q_6QRqR%z(omHU8fl6(v_`+*^txDl>RR4ywL>RFPD{eRT-pF!ry+^i z4NCd+LKT)jMz?c|NGk524LMc!+NIxJ6Be!8^+LqOxQr}R?W0FqB+`!M3@I_LMt+1a zsNcfU5t@Yp_Hb4hH7~q+AuGa#0AUaiba(e>Nq$cW37yzSfhKW^a+k%_f4M7!io&S! z)1g$5J*@g^Z@D01C0%@kIp_57Uv4JXH}{zjDTb-^g0dFtLiScGX^>`8`8Je{N)%N zYY6aEZ#9Cf(1DU`94eBlL)Ws5&cAzfQWd|b&x`n9Zf1boY^~JgF(j_zo!lBwBTEAa z$8b5#^F(jEa54uPQh7~wEj{f-TP9!8)a+pGR-VmLId6glp|b&E4F!k)uB5-X;58jW@w;KR=G z4So^su%bfTEpdtEr8ffa57H?cEK?_&P}KPjK&X zO5R;K)aM?CT!hm=cY?e+=?!+}HOHJ~@;MCpA^}M;E~$MXDv(1E8_okPZj|ozi2~8b zKqk~}#Jn3JO1$2>mc@xi(Re$~f`c9bD-#YZO*8$A&eJUN{95_G7bi@0JK7DY8%?%n zpK+cW^0EM#8Hz(ABfHJOmGZ6Dm0~=wGjC3fH!Z|L8_JW~m1|lEGVc-d-wQ-vSipYi z9ksvw-2o=OfU?@%g|r902O!~^wu+^EFJiH=xCaB^I{uiYu1w?%RPA}@8Yf&QvuEEE z^R*D$02HoK;wwK28Twxw<~w51J>8T7P~Wdm9j&twDFXSC)!Z@z|MQ8+xqBk+?wck~x^ry=Z26(Ew_{^=kAeRY2@|A=tsAnsy z3o91WRbw(1#hg(*dPS!LfCSVTE&t;=|Hhbc&|(A+Hyv_BcKNWnhtk034n#Zlq&Q|^ z7gVuFwGEt2%7thSCLBCOCzdy|kH!3V{VLG9ZQ`4i%8_RmpPTP-W4AAWWl`KL)hs~Z zYL8Sd0$0&kO42n~_T5yJ!m0vT;DTA->+SS^+m| zPSHqexqlenhpcG2V*Mtt2a93%fd&ZUGo(i?nW3&)Ma%Bl?i<8*{d3cXU}hlpfpQ)` z^3KXN=`rk?M~1sf=P@uEtnqIK4{mV=cH=C-muHfL%m~>B+b22OOpYuV7Y3d|^@Msi!BKH#0l7B@U6b(*F~_MB3jPnIIq^NP z>%Fx%p3qS-xXqj&w?;4-aNV-8lJ4pjg1h1&6!Zr>GLO<{JAgpmyRX@hcdB^7zxCz! z+5~wbXt2L?C63cEVwsS?rLLyyJm16FWC&V^U{y?bz=O*j5e04#$&&#lo&{0-Ot*WQmIk+yYxY=QNK@|_d-ikzn%r@D`c z+21agp|@)|lYP7SwTp$|o?|`J271iEFn&}XwjBQ@?VBwDAY%1o@ctTZHa@J`+5#*fXKj8Ajfpazt++@>WMG5 zXr%n6*)=izPe@}TGqc$a`gTptraDAOzofnAzs+B8+9%C03%$aGNcNQ*UmJ^2Hv;j22wLas!_SuwgXXsNtT2gtlssH z5`fpnvn{q;79Ix{&KSHGnnMY@-IssbOcjs5F_?(5YxGX2IUlEo_h@P^Gc^^E@70gz z57235a2^ZaMaYcMe~-l*6hx~6y(h&vu*J4$dcm^f_d9mN13HY?8U*kHy0Wnq*-{vDkBle-_mM$k2b%S%`@&o<7VJQqE**-fC{6u zkuZKuAOl02zN7w~Ce%hP%5U0z+7&UpXBQBP0BLL@)q(nxG%$VwVyc7!i#_wT8rz*#=kP+zCt}HI=U-NMdlzkVHfwnN!1Eri4+@Bj>0OW?E%qBrTj9) ztqnlif%FnC39YyPESwGt=f%i=&W43+BQ4DT-NH0H*6#?YKO$PKeTkG@LL_*@R8Gr3 z9gU&a9|&#DB!g4VG}XOWOmEWPyZ*O}8zH-{x8L2uWP$Wt^cZ%Q0UO+jg<{4BqJVI| z{ywxiQIzw@%US4YaXZv~I3gv!IWL73fOqzwygB$%;-a8V0KAI!=qYVss{iRz`-u-R zNQ#>_M!oE`f)X{jMTAiB(H1U+!Hq?OJHwxzBOs*p?|>+XC|HCh5#bamv+BdPn6blD zq-(-2E+5!6ng;|bqQlRefYH!cXTzOLNs5U;VmT)P(*HwvXV$=F)pIA6BaTw*+pFp1 z&TktVcorK_m*J$}udG7-JL-x`4?w)_z9EG3_4_Wn5!8naiCqH#ZIyU1KQcWikr1PC z@aS3uQoC0}rT9(ryA9p$ZY}{{;&NH%sbGS=KfaxUG?dVa6r^j~odLRh+amk=3=e{; zz4&ssT}rsy$V)S3u#(%S(ARTKs|_(f3=<79Aa`d&5^M)b{$IQZ@~0tKVu;F|)&t(`H5;q* z8hopmX_S=5$_Iuwe_J7vngs)7Q+68}TM$nH|KTRv4MD;kV*;9eABhMfB;>`}3DwQH zCsG?sNWyq?v}mei;wJPWQ^JwKp+Rr<82P`$F3d}!%uo2MYV!&D5%0kj_4hXjdgZuR% zY|W*P(+C>dO?3Z$=@eKxA!`2)tUPtasH@-}(gbaJ&W_4N7|x?>rI*HWXb1%_^ub>J zFZog>23SKVs1+TJ3jJHB2(4U0OY`ai1lK3?P-Syne9ylHD5XDu@DI|@zvQc%L9yLl#C zn!OH-o7@oM*w*Y7Ts-JMc|-ZZ=a2~0lGXOg)=Hw65m%%b1eITq?ykVtB zO1OMaDR$8ii-5d6TmK^$2rfCzVO!|m1aNxz^R~Y29Y~W97`QL~7=!VF@lPZY3t!~{ zaYe`&{VZ`nsOuJ`LYVG3YuSAaNGO%`<~dc z+&_n+bmw3}6liF5imfeA$CgC!p;kXb$G86}nE6f%6~C_1!_($Egua9OX3NosxD;#M)>}i)^QXzkea&dVhS_ ztldDQMn{z&wtInS1MXvrFoOj9>NedNEUugCmhv zD&-r8g}X_im{I!gTL#V}GOC+k%Xe_leuS)!D(sF|lIXyO4ayz0lNRk_q*D+GBYAK= z^lR_l6xAkN^0YS)W;(pGY?ui^uS-X0$+F) z;FLk=z{!`peG7BrsQODBXzT$T94uDU&@@`3*dmh<{t&{G^dLPk$% zJv^vvo{G;@?JM|{cdzRz-~`s;&*6p7V=u$o+t&o)1J%e9D6#Y+`!g0g=EE_rQ)k|U zdU|%@@s-VZb=!Yet}%sbk{2vr7=FL?DdbyFl=OaF><|uTv?Lz>#&@%f1X&hX9 z(yedSa-N=*=Q3JoF5u#AGDT@Veu&(z&lT5evm%yRZ2N3Dc)Y~j)*lYf0b-}sZPwQA zg$ceIdva21T_fl-*g~?Pjr`S-n=~d4!r9&R6d~!1W9@St84l?xsfVGTg0}96$0Yq# ziuC(McCzhFRI#CX^-JoY9qyMOctTWtd&6HVQ7C7aRrC7-%?|DThmILtX2;##voxb9 zY_@(?4PmiXlA3P`Xl|Vb*tvz}pU=KB?VN2!J`S9n-($nc*a8MOD5Ib3Lu(!cVpJx^ zZBD^>9wx3sfO?_*uJA`5@+zL5C|i$b2*12(F4%F161i(tI37O6U6jY3XlD<4RDd-f zfLnF6e}YnG7@EOnxAmy4@9?|~x^*?C2F}BjzdG~N&P3zyY)BdhJp?;uAC>zBb#-5? zZ0MIOTVinIlK$FTs3B9ktrTex3Ut1%SF8*W8~^_PMww!J>rozSzvg}=#nMD#r%_lg z;0`nXPZsBa#oNBijP5Re0}ZIC9%7?^z6Yr8^}ID>&?#h`vmR#u{J+c}7y1^ot3*N( zX6dS4GTBoKFfg-1+oGDHN793YSf{2B`sI|2!zybYb#jfL;3HFto+=kuo#;ztA$ zi3gQXD8I=kc@>p_?3d6mkx7L?xBsq9v@{{-u}DcECT~IK5V6ZWeTGnjn9G=i61;J6v4SseKlVnZ%{jnY443H*YdF3@s`3?H9R9mVfM4;!lQb#M}@6d?% z_M?~Re4&E3y&2e%^c;A3sBpsLpo7rMlcVC%VI_v{#v@|tPcYJUa&75WtlOOTuslMM zYo%%i1H&a}WpW>eyX0bDCB?>TXWzW^Ifs>bd!y1&gJVMH;{|utPcW#;W0(ayQe-7R zbAJXv-}5^^QnMLu9A$o|j7EkV_-%*ejVbo+KNJMS9L=tOFh&zxo~VRLf=@ue0J1=a7M#dHpnT$mFA(AZ>qN|9r z7|2#QZ{Zvu9OF6y6Co~DFJD_hGmfU5!nFBluI!v}Viww~S-KiV@k>3jDrO`FeH?BI4fd3ecl(eRonf6*XgA47L>{OEpkRMZJ)m=!eXt3to7RHkf^ zIk34%iG?0i@(GWX{_}MEF|ym5gqk0t@kw=Xd!>ZuIO}BQLO;!=+Mjc2D?*jVLuc9) z^QD?Q?83Ic;OoP^zM7pD$FTP?#1!9RRWyS3nJu(eKk-#=kkcG-Ps2jz-Oy!uiib@p z?9i8@V}#o$F+lQ` zC@;}&-TsZNNZ_^-iSVNgu_@;@H8K@Z-ZfIw?WL{7YnQv=KA5W^N zcj&S3cgcLOrE=n9eAJbN0usE<9+&SC#CV}6l|!HlR-dYMsxD?M_$8a|HHyVMr_^=J zNz(a3et-S3FcI#}y?*}IBNv|N#LYfSg}m^P=g-+7VZ|;?w5jIg=2B!l@dB)ZLc_F= zBKV{q9R>n4#OXyx-+j3FcZo?C3Y9O(?wEQOQlpM6y`-$DozciY_S2(d{uV>i(i+6 z8IpXsUv7>@;|_3QwRC{Q1Tu}*7QL| zJJq#Hk8(YiUy1whE+)R>2mUyI z@ad%7(fNA$ObzyE`w%yv#%s>UI<#JYo#{EWodXyT;nk;OpLv!{X-1?mqKGno`b=uM ze?_AlBvS8Q$rL%#in8%aWQ4qPH17K?TRfOT4F=v(+hcQebmi2kBzn(pIk{^eTq<0a z6pB|qmAusLjqV%GykuI&lQW)Fq;u+ePC^-^0b68_R4bri%dQ~CaB3+^C8vzpG+&~w zN9H?DTJQf~mbIefiFt})AXs8q(oN_(hv_{J!vfsX1)?`U5*7p5^v(bA>{cp%c!jZl zF&2)XlN|2iWiLCcTgKV!)5EQ6_#0d9k{*yr{1!VfKPM)Pi^{Av=i@{l7cjl}0JC=e zMnND%fm*{i2HH=b%`_Xf4(!hwvItgFOi6$xmu=@4<^yech9`fBE{|+DM`Epd$UD+3*RAG(WGsY^P@Sn| z4wI)eDgHR18tD9<`ldyfPzHTQpQurMJi?EBP&jx9DDI?MOR)cIO(N@Lt&%qsf0nyC zE;QQ#`V$~glJc5|hAUE5ZHFpSd~zM^4=olARinn_V-LwTJ8H+$Hy7`1R%-FNccCH= zOh`pup*Lmen`Szz%*GpsEgJ7yneHzBZoroK)_zNl5a)M??nwQ1Iqu<0|xicDL32_k__w?~>R zu}q2*NQ75tp`bg={baUTjzIys9NhCewWMb(!2m?p=BPwGj94GTSC*Vh4sNeq(sQJ6 zT2V{sWpxTwy~HM+@`}>EXNUa+8naL)sNW}R!D|FOq;A$;SNeE0 z9MBp&JhWH%JRH2P&kEPwFJZvORK=a56%1Q0Exc!)?lNu~n zRHFk7ep}g{{uqDvf^(3+XXSx9!Qi4QV2u(WuI1eLu;2B0)YnTx57RF3=S~hEQOrop z{qbWHd$c3F{%Rls(Td@LiF?Cr6nKPAET`|ZaESg&2pxi9``n*>Kw+E&hLI}-1rP0g zgG|Xhs?3fAJ_HRnG+fr8YGm>BgizB|yAv<*=K$KQD8Z4+0xx!AqXf-EqTH7{!blLv z(Cs>19_B2+je@E&@oX}&0op9)@m?6FE?a+Yjlpl)>(g1n8Xw~)MT?^WX?4ACiOs44 z!N$Maj74pV-lu!n!92PtlhB`%J-=V5_RSCG_#aXcXKN~w6t7+lyZqwx`9@JW_k2Hy z^6I=<*8LaEzs8}TL9{Nh|HTYru)(la?PME_8x8@#%N=3`M3OCZBB(_TS3ZUtdT>5C za3l=)WJ7rFft}jG=|(^IMgR^T3}~-W_X!H2wS@ByHUY9B7aIVnGv2O zh(;zs&di5EvEEJjwMoDQsQfwF=0f2zKK44c z+gnjyRlE_uUrN_;C)fK*ROw{=0bOj$E5kT1jKhUXS0;^^*NCvQ z}IKDSCftmF|BcF!_CODDIOlF@(fX*6jK4 zG^-?b!)}mqF}#zS`xCQ@?&HFZUveMStgfc)Ozt~tV*<)f~l#gAPl#DPlN)7zy~UXdVVP4HjvO8 z3R64v5KdU&p@}b2hTV}cSq`|fOkIy0sE4>?pu6fg>um-FO$r6o4P)Dmz%2C93~mOG zl~++PQ5p8&o1rFyfT^DeJ^402G!yTMS&SXWB>tkDY>LdRGgzF`UsQ+K;y@bc!!~3f zhKpLhn<9#fbfn|~Q{MWKdaNm>se!8amXS+$P47^Voo`)o4D*CBDqqo%?X49iZu4JWtPX9s zUZT;FI>&vVjj1l%#J{et^kPAJ?upCrtk^9Wj8%um#b-CDUp#a1)>PQQQ-gtetkuu< zI8+6!G$Up`P0s0r^%&Qt1pr@{{)AXQ$|ki8JyB<9_g<2c^@OlAiNIy6ODfx;#OoB4 zo>2m|@XhF5L&o&66s-Oo2R&#TH>1pShM5j;d!Q6iy8$*D;^ZPm1kY;yXh&+Nt5SKv z^+F}_Po1^tns}R*_3BpLcu1x}CMkIxFHi^HZ>wnx=Y>-KIss<-X#G!P(VLcuH|u4X z@iK@SVkFL$;J$wFd+pN!zChqLe0w~b`RK;lv{&uGnKvLu4GPBt96)l^c`}gtyO%em zn9mm(9qr07@f|zYn0`H;_w1Py$B!R>jV*VDhXan=TGMRr7KQAVF4GIczG8PMWlZZL zAF=&csTzmdALGkn>7X)+{C@Kc9{d@eZ&^1!jPa!UD)gXI?^6VG(NoknbfN+rnFVTn z-S`{SFJM_FefcVaC{)Fp`5C++t7cTew8h*5ED(;AnB}H=8Ejz>RKgpKl@%D~19*fY zEI`5?m%FnK->Xys2(XIM!ncECy#o@iOV8M2oZja=&@ZtM4rYG?x55ksz zs7q8{B-jwZob_s1V&~V*$G&u6cq(^&y(n0*s`s$gxZfTRs~gUxZ;&uC&pe!VeN?Hf zB^z_mW3$F~0s2Z|Ru!wV3kO-1q z0VE6sZ;p$XZy%QNv1j>WZ0gV*KnFKC1770=FSisM`o>7j9GXquC)ABnt?G3*)_KYV zLf@aFTbOaTGp}CEjD7LMgmI_NsKRJVwC+V&d+jFG4x^MAND7}Qwe@bid~y+=bjE;D zSXldwYHSQ@5evwI``oJsYhp#2RzFZR$IYIStbxsV3WLp*+q|LNHtcRHBdPaHH(={t z3vxmTISFHDpJ0m1qT>~i!Jp&KKlp>gdhzdOV#QL8Q2LdNJyIC&Np*K;{qh3cWpAfv z`GKJ+<_t4!zU=`1l%Qr~SKLgg5>j;BDTe(6;bfb|{uN|{kU zQU)%+*orsTCVW+gxbX;!c)^?;ZqgvdksEu}CVKh}>o1aQhG4<|O``U|b}Oj4Ascr_V@&0&d?`z2r;G$~=$Ch!o!pZ5w7A9G{5DNu7q3%=nc#&y?1!FcDb&r}W$CBN!UG1+32 zm=DnveKyj0qD+TTTVXY zW&Yh_c#Xsr&!XjlhC!(oYfjUPN+@Xgp;(7hIYk70jyB=C{0XbW$Ve2wX~Qwy56OTF z7S8(0%{5%qHMn)5>M*1~%Vc$Sh+KVqxJySigcwv7A7coUSdSz>!93k}+H3iKQ;dY| z#%%3p;Cy+`q2R&Y&t|diCn1uBt!`mcFK!@<>T=DdO4Ap6XHT_fT8ezRp~{$Zb=Xy= zoOxv9V|>F|l{X3Zs;~_LO@0Q)uOG_=zs!Ll!uLt5KM8Q%%^ty%`-(8FX_-;3#}+&_ zex8v{@#TDwx~9>$P-eUI>BKpBXwuHaT1`~75qX%eU|)$d-2f3*+}j%4QmD*5rDO&J zq0i?V+0Xa+Uz?Uv+VH<-rmR?L2Sh@Ess#WSwbu{96rA|+h##5~YUF6}zGPR6^I z!_d#1QL5Do<8X5TigeDto)#1~YdnGAU=v9sIill;!7kShQT?t=Uo^a~r6O@&=s~0> zH9?*T#mm>^=~iMqALF@k=oA&ZJW1z|w!IG-X%FR>oL~BfTH2ldMDg~B4 z!-uT)>@i5;0mB8pep~_q0xpXe0of$NJWUQXznPsE!f|_0Soh6dt&5K-2%Xr$M$cL3 zAe?a-l@Yee!e+V4SnB;H<0Tfoj}Do4Fl@G%qN46@z0b0{MOXu*If$qG5!&`ewU=>L z{-#Be7Y3`Hb)vGG>gio|XW46`Ec7P72QS{~+`nN}^6R8?|08OjWAu*QHqBP4R0@3$ z2l436-G2suYU0HNPfCS~gKr77e2VRec)|;U9$Jo^F*{?I6!pYg@a&~}N=S(>-jhsx zh$J)2juni3(`lTxWdbnziq^AlzEo|QabhL;<*)Baz7_7TD;81xGAK0|gl2lAI)4u8 zF2o6C?nWR76h8_Fl(`!iaZ~EO$!$2`{`&=MFe&Csa`%@n#oGY*6G)$+K;dIke3rH4 z!HlFtaL8BxUwhvjPxagXACc9twQLeXc1DgpLw0s$C2>NO#6iO>kr}0oY>G-UQ`sXj zk}WHQaE!w_=lgme^|`;l`*%MczyE%J-G6lSN8>uz`?_A^IbQFidh*+0RRoj%9+O(h zcm3DMWGiaamCni~CV4Xf=5#7P_AMEa2#2W)1e}{-a$gp~hL};=BGSuk?@CWCWhyc3 z52jUj((Q3RaKgBlRGfwdjxDG=}#;4VAraNqAW4>7t-9qt@4pS)_XJYuVY(Q99R=a;E{cQ(ONJBoWr z^FT~IT~xAx!Y`@KiARROdt_poFq3g~hOYq-h3*re+g%r{O-`ihPUyYiX{tGc7MOLi-_HMd3?KZKUa-^rm z=M*`AEkWhkibP#$4zk`jF7Me9v_$7ad;R!d9EPO)m!LaUynP>MxL>J5MN(+F0qJCt9))h%B> z+Yp;vT^O=noznAcXyC+LsXTI@uxM%T=hufltPafcr}?*3R=J~+GqS+_3p*5azJMe15dhMpKMOw$aMartYJtRHP?AY z_&R()>ipoAD*F`a2IS8|xz_@suM@(B=U%0^K2flk?ap;bj#dY80OS<2<=2tQS1GO( z5)CZKFdl>_tC^a7S`T;M|sR3^*BO2m)pF+)i1fv4s$wD znc+8I_k9nkSQ0Ad?hFMBfqjSrLzQ_>KQ1PlN@d2I|A?My=*e8 zaB5+79e7)~z40SjT=+2bi^zl=XC(>XvEk(9#%9}6gz(b)-}C|u42F1R`t z%nqTaI2a-5@TI8MqAXRK8$=MHTgyXk*0Qpb;nAnAZU`C6be7U6>|V1=CbYYvX4JK8F6TWtKckF4$owT4GDg z5+NX>=1EXYiz>yGKBc2yT2CD;-D6Q?BZ#C>$m=YwrRw2~%fHG-bW5FQMFl*O=@v0S zXd)zcWL%dIh`#*OQ6|vwKfFn%ph5&KgF0se%qf{A^Fm;N#9GK%qBvq?Ad2MNbnx3A z?zlF?xkUwPiLHjlz)yj6h9j>y#{|pF96*QuaCLEHL`YBS7gXgjpa(>nEQcxn-Q>Bf z=kbZNhp6qg3Wy=HE={Z7dU1=!EKAc?jgCo?argIHWcxD9J;8+BFjcynfu`PI8|NFf zr~Wi%KMjHl2=C&3kIhsk+C6#t*NQ!)Ei`qjYyD@}MkAFvh;Noss5ovYa{x68rQm4| zc=jd~i$MPX=Yc1f6Br!HE4F z(EkNtC<;Od6kB*n!&M40iY~RFeIu($)0IXgoG}7pzYW)T759i$%LSO#RKG{{BACfc zJl=gEuZ10!qTJ05F!x^%e6a8C9@5!)6{1(t4btGKTEwE zrooG>!dcUTye}YP_4*-DD11H@pAaqxLf64UHI&P5Fce)2*eeAHLO2aG+p55~v?Ts4 zWOa@d-#cnwDM{9ScLOaE?fp4n@7&^U%zKHni&>S+YKnh)OE0Y(2A6U-g*}9YU z3P!~HFw&+kiOrv8U&L8*rK3F*+ENXW8+wM8jHr5(Vw*1b)OLP#h}np0>kTP+wTa5% zdPhb^=8Z|~81VxK>0~c`Q4ZYxtp(l0Fmw~^y$bP#@IZAyDeHSk)+1F4Bo4VnGcKIA z2qgzO*A+TUV;>_Bm<+O=IyqzbawGt4g*1Z^kOaz3#Vl zxe?cYEIIBAPorSq$B8!L6cL-0+m^zY$3vgk%))d(en>DmyWcP$v!J*^f zabta9R|rGrKEOx%rKx{$riYWw`}$W>Y!9)% z2t*!?4LK4{5W}%_4vde}25@y;NX?n_%qSr+-0pWl)m7;{04Y+sI!$MBLBR<-n!}FH z&drb`gaK}OBOQ$pWS}$F->)!#(t5Lr)CXt4wn`;okC@dGpGh5bp}lv|&szF@^We+C z)Gwg1cW77E9fW{Pv(bQm?)j$n$q#kic<(piuM2Oj2JjHKl-xtn*GDO~f9b{?Kpt=p>i*;#ZJhhWSc4$DX>AOZVlP2ppd8L+uCvTA`F^m3gjHZbrGS zch^~}`mi>XLha}&(Vuh{FpNXhdynxrE7z!DEo$Mw{d~%Wr^jQHCoeO(T5oQU@kG{~ z85kR2KShSAFrH9@2t&UVSqq*m9>iFjuV6FA$;Ot9VD%S1%I;I5nAdtI+XNpjM_k%) z4?=Lw1l|Xyc-mtfwcxLI8G^LtaO>t1q0zMDJwMKTpyy>W%|N9b2?XKu1hit(E?oQ` zAf=<&{z3Ti&gxmi77xE}#dr1Z=1MP$8^t8Md^dcl3~EP;m^Vp+jcuMKn5ozd?48TJ z0iY+5nG8!}s&zETE*S^yv)6lil@EZ2VMX|O>V8z3)2G`AV3job`RI>P1(4B)5y94Z z0@{L%$mn{qoq4Z#dYa-U%KIat7Y%68-9HU=InxrTcqY{K%W|pwlB6M z8EL4^ZjTS;BH$;O{gGHVEUl}DESDP5cb#W+*!Mi2c7yY%Fw@U)YIV(AV@#4BMC)v? z+1%{M|L01s4taw_X=f$IvN8Ku?Q?allR|b2jM{&>Jwp0wA&63P!@H4so(85{8AChb zBIZr&4Ro!}Dkkyh2S%uo&#lCtMMLHdYcuCwHfMkJz8W=M!dZ9=$7*?U&|;TgQ;M;Kv=gO(hj!^PSjldOq*+#pMcc)6x6LZT=YphAy2(TjK7xT zRG&j>_4bNjsdL-%X-yFQIfC)IdQpJ=#But2`$j@|b!$exMQe|xHo)hF4%MM+;2v=V z4O?t-@}XHziFd0=WjDOCxHe-+EEky%#6!Mf%L^hJEv)mhTZbuTXjU~SdLez5I`mmg zONOhZkIt4u(W45}QQW=zzk0B`_VlM3=w2ir=#AW4(REHnLBSeaUHs4)*DLY?4t2i% z4-VC(5<~=Q>4c=F$l02}0L#~xViv`rw+xJk_{DXk`f~}zEFY3#MUQMTAmf32MWG3~ zScQ~J2-ce{W`6pDexDNVG65XrEhB`rGG=}GHXcYYbEw6Fudn!fF(1;fNpm%8LZc%* z8$VVDsCfoVqwS>DBS@tJDds-f-2P zta>9IhC>e7EQBTSB;vo4wiI=XZ@KfVLq9SN005$kX9PZ2laWh;ObuXYE_z=wLJi~E zA?I6v+R?B3;5rYT#lAII9@KEX@8y;GK%gtU9_Ti&HDR&@N~KgbKrAW+r5E65 z*$7Y0;>BX3X|4Tfr^g>HgB!E+s>`S3!?lv#V|BQZ_@CTiIOv4+?iVu4U5OG6f!PlV&h`}JAA#_5OC$u>4sFh1mepd$ZU$WG zNKBtuO>fGU=AY^rf?d-rT-Z>^yB9hB+RzEW9s))%}{t{OBbSMr3(s!Hlde z!zEFWdO92LNIjKjC^m??GcV%E4pG2hXl{@PN@U`uR}0IAg@CVFHw>K#7lDPki*G71 zZ82~dGQh{}uSdy$3P5DRPYt#(loD2q2ddseQ)*yBM*K18h8nv3VBVNFu>EHDr;LIC zMty!{&pJB{>L#J{llKF!Z|mfdR35Jn0aMqE$C=J8$2YzUjdI#AKf~4k+Vzp6lhdJ< zhyQWYlpw9DLc$V@8$#d4Q?K8rdky?6+5SKhg+`qKQSW?PVs04(4y2V3INavwkq)6xF`yW@t?hgDdjfFv^-^1BC$HVzDF@@HD?O+tQadvk(>*+ zKq?MbFO?0Z!-RtPV(6SF49#@{>nvp#1{#rEU@wBbsR-$u@i9-H1dIv}BMlGS2B_k9 zNN;UBX#6Jj;m<-=vb&|?sT+SBe{vLtSK@7&)9~iXv>ptY1JZCN6&Y}xd24gI-E6}` zpJJO`AWIclmlU!t$@sm{FB>0+bDzWxnvKwMPk$*A85Eadg!7@F5#8!n>&ZR=@wlbxTmWGZA%Bl7z!hms<7R?;>N!dKcUa<1; zGV8ZjmJ^$zJpSHMeqgJ!^MmsUn)XvrC5%8+Cke4$LQRVPr6zM7kVU`qa>JPScipNr}j8y@lukJR7hT9Nhal$R^Y*Cu#H1^fUmV1tv z%cVz~A;y+}^eD<)-(df4pT?|$aN1Z5I@a0g+MgW2OTNDJ90$B4MO`sz*Hi#2W_x`o za%kk+9LlUg`cB40J=Qed<#>5i#>VT)Qk?k6r;KH(RAMsFAD%zzb*9^&(1pz&cs%Jn zNjEW-=tv1OA%v=K=H_n{J}@{Ce$+d}Y=2h-Q=^f6_D#a&7~3fYWQ>rNb8Fadws-H| z%Y~BEj=RW%p&sST`I%k{sJ9vrjkit->iC8ughgRQDd&YOH6*JA{XK2tGcSNkq!b#( zTss;cD=kFdpAk5dzjZxD1ps>YR9zbVT4D;iItB!$XAv7#%M@;|I1$mvC=ZkLDK|di zB4)@bH~0mS+00EaSLGOtWs>OgzO*|gKTRV0k#S}1`VV!bfTE4pHSN{lC5O8^N_u#Z z$gTn+cVK;6LY{&?%pRWcMq8bE2%ehFB@RA3b4j`lE+a@3;mvomQ-K!LXIZM@QFK_l z%ok&sm*v`9$N(IQCyz8U879C#9vspwpu=#4Nyq8vwhKP3kEQisNZXBmaM}c4!O`sk z)so272lj(3#}^{mG_2;k?QJJV-Zr`#9Hj6?Ta3IgPW!>0viFdBo_V2AOQKjJ*d~sw z{9}e119pk?fuAEYY%S=PsoGE7d(H|Ndy)nMv}ftUT?Y(_@*I(0_~6rJpgq{lzSMZ! zsGncc=E3C=EPC8R%lVTcB-CL)kx;aTmJDabN56VV=GkzF6C?q`P-^Qz!-4 zO>DiQNV%VFNPcv1(;F>KZ7fhUH7w z3ma){Pn15)q3FdQlNdi1)2PI&1&Vhks0F<|KZ`R({GD>=Ls= zkM1VmbP0gHvVlLs<=T?{$lF_pb4(GLQ%ztek4o0Qs;`W?r_zxzX-4SJB2+fljB^)Z zd6!t`D)=ggv>f}7wme5fqa-T(AF&Qa9WQh_ir!7yu-kHQ_hMJ*99%(a^4Kh8ive(< zH1KHi?w8h;CWQ#A68#b@7T&AZpWzCsPp=vOr4Y_}A)&?ARiLV5`f zw;RF4K#m`$xU7C2ax*_NJy+?gV$5=!r{?HvKD_O4_{PWgC06&u#y~LeBA@5vn`Z0ng68lfFnp(i zv{^`h3hGLNx0>{Z&vz>#dtSX?E-lVw52n&1ic7+z2UqUXh7>}n2K#Q++TVs3(byo~ zRnCA_BO4`9^JOEG$c#()I_^c&wcl+;oO+1K5M+S`ZfNhU#wOdsl}v}(+0Q)aLH-hS z@#IWeAkGyb-$ni^p?8swA>piO(@_e#^;EcXO`CULNo_Icvg zIB;ODrWaz_#ijab-)P>NeAZ!ev6V+z8Ma&;lWS*~3sB5t#7ge_ha@$DkdRA;5oPlO z7TW^)D6w(S`z6B6ZX>HQbK)DX4bAf);OQzuj~v7|(U;HL^Jqy$$B5mJy}n5A_lq?~ zKVO7usK8Qkld!c2!XE{`bE+|ajKGZm8H8R@J+vRX6dn++@J2n)y@L;<$c}}NvE%oU zIQBxy3R;|1~=edM%lD_LO@F5&u;5s?Mh_aF! zzb{&rc|3^|u{H<(j`wD%38oPk#>B+4U+(wOzDXx=>G8s(i=b#FqUeed3P6iI}qKYQ4U|+E}(c|C?JZ5 zOe%tOS~gVRYH>PqV*_5+5a7+Ps+4#?-4gXxpI*+KKC#{vEt1}^`|O_Wp}KUtb;@gJ zaUkW19kgL-d@C=MOOBf6PK!p3MAGIWf%jJBJX3D^28Iy|Oir>?OCD?K)2QxRH+ZE4X`csCE5`|$HOuqn z{#}4BEZ}*n`5xE_2=O7X02w2YsM2W2AfX+>zAA!#VMn+x^Cjjy!|2pRH$EX~RGVW1 znJPTSjb7a!Y*wiq5wV=rKZp4Kx?qz2=B)7CK)2SBoUf{x-k2ChSJu|2a&{AsE{Na9 z{x|_MhPlBV^B_6#pFQ|r4+EBf?19V~L?x@v`ezB$5z01mH6MF&L`_wQ^UtE%lLIxK z^KU9e+Oqx5`KIYH1vvW)Q6GU5FTEypT|3k7!m!h0zU-kg%;h(Nw3BIHt=!`dZ9{j> zJ%4ai&KTt9jQ5Nr5nvkme%fatD1=;PCINlgdWa7B#S_pWAC3K+L~_VI3yGKWjO;Q) z2wAt~zi;5&*P8=vg@5O4TzS&2UpwMzH0yol{P)+cUN9s<$H2f~UOqpk*xVME0|Ew; zI3A8;bQ(kQmc2FaySbtY#5wqqHc?nrj3=;{NVSprW0@RC3%vm|kPRV|N>} z+_29z=5J~@C@n5vqaS4><=kr^>P!DJU&?7jeMeEAo^RK#{a(l{fE`c{g7Fn()k685 z*UeRk%NI<(p`JD>VF@PK$7RIS9))oubNOL_4iOrikr^2-ISEMl8w(uQ8`r|(Rn){K z|IOQRZtMGF4sbGAUi=|Wb=ii^=4vpaRF{yHye*}Ri}+K0dD6~^X#%=sVP0^K!9{0o z9&{*C!|qM;Y<~{f9SfieYNitKVM~FHhYtM-#?17H96w@qU@LU#aCcbso^@U8{cKli&D7 z$Pgo;g)Kbn7Ruc5dkfFj+V@#|z|9;XTp@Obfo8*Ff6wt-Kqt?4a8{8PvQz^neO_yW zXrg=I?`78se%FWbF`IMAt;M(s_>Z~U${*z~?Vi)ac9pG^c|vjwp18nDPT;us)zpg| zJXf(mzJD*C;KFjno1ht8ujb(;0-MSgWMc$7B2J_@RA}L%=RjkU1qt+R%fmkkTZ#}7 zNZQ|4v=&MKZwO)Ƣxw$pkbXOhB@YXO^9J?7;PFa=xXE6v_~B}22~$+fL^Tt{`h zg&a5~`1KVYP%FYLVTtp#K^wDqL+TSz1L}F4EcWH56T?-ujyR0E^8>cOMq|*-{&)MKGZwNZ0(r>V}lYC>4p_eET6O-`|^S)=!6r=B38$EjZ z(A{otZ~heCcIPM{g2yHmJT?uZQi+wdpd+0?#z27ZY%AW$F+%FJbtcw{Cw~7M`}X`) zIR%Zt4Q;$HGk3NR}h?lVfcmxT~IR}&hgLhMp=nOdwvsNVCOk_%6jqv2Q+|u+xSO8H8sQuM#3FZ;DMOuO^kwOg4e9r6|kMXIUHeXjPVriVU&A!Putl% zDg?ui9^fkRoJ=c51+rQaZa2QqT5GnN=qmfQ*qITE)WF7hrh<{Fd^@(d|Fxk(Y{*id zpE>e(sUSBvYD@f?kH-Qh*P};8#J1%K=ZTSjyA|Q>SkYW#R3($!cNp6hOu){@PpBvg z>C`ea1Kdk~?eR4&WC+zVyg)?_reSjfp4qRtYTrlOv*Vw(%5+RdVWt)snv4!8^=H`O zrh9#6s|dKbQY|E4Z7hv-!Eh`W;TL~%|IT$FWl3F#;h7u9cMR=FfH0^X!lsQMFSf(I z?_i{=T>Z6SrY(f)GK?5r>+{EcY`T3T?$P7n9*CR((=TfyFWiCA!iJk`);?R?L7ZUC ziicO%ob{;QbZ%;I-@kEb=8rr6?>;~wjQHd$y;~ap-q7H20E}3&>&i_*`(Sui-u=Dk zv*IPp)~~WGcq3QXvj~nIZh^evm44VKxru?bl4mTS-=Z ziSWX-1OSP3Kmeh;_rdA4bEJ_ENZ$lXpBI@2r2gjbf9>}$HGn$zB@6$FK!FT^r6(p+ zV?h2Fig|G8`Q2Jj2{?j@f<)vAq}}3y*IvAAd}7lDLpS+=xlTvs9eTk)R0JT5&41YS zuYdi|@94E5xRT4cU;E=wfeP##aHMtDm|t@P6t;)avJ3RD+b|4^8*y2HZ=kX6E}au- z8A<}Sdl6rmQ z-c10w*YapIn+ZV$&t^XOLRVm4V~02?anlg5`2++6a9tnpAYagh|1=9A2q?qMa*Zp8%VxQ^jS_jeypD!}dXeh+ GkpBT0#D9(e diff --git a/docs/src/examples/mps_element.png b/docs/src/examples/mps_element.png deleted file mode 100644 index 648069ffd92f2d97f7dffb0ca8575e7643d796ff..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 93910 zcmeFZcUV)~`Yswk6bm3CDouJ9uuue~D7_0vZz4@<=ruG!kSaw$Isz(43B40gq&F!N zdJsYr5D7K3+!5Dyt$pt~zrELUpYzvw7I{b}nRCoB<``djzxVqF;Sbg1Nzc)ogFqmp ziV8BC5C{o4ByuA;4L%qtJLw^ib6;(wr5`FvOEW%nfm+$vTS6cT;fcv-9%<=Q25$JS zFfmC%Rzhat)F51M!kC*PloSZhvW0|XKi0jX>!f{=g20{lD)D8b^Ne=)Z|G^*>wO_D zf04iXJ#4N|!kAM@LBre?*1%u8^)wyoex%S_Kbei0FbCXgB!l)d}6LI0^yEYEi zAt!0b`A#VU?*})k^*Tw&$q8QcE;siaHb7EGYKqv+j}P~7+)8vMuSp@VLY&UY_x3-i zrKD~yd!AzidFV^g@G;)#8>^{|o_+}H3nv`wd)2LF*7qv6Xv7yN=~seFX&_#gBH5IP z0xe#Q*?6xBu#$CMHKQd6$D<>egu5v5#n{HFq=lEl3&{d zL}oEZ4m^3=<>h0l^=xk;@_9Fkm+c&C@@P`0m>Q%tj3=R;@M zt=1cZYxVI_*HSV%EW78f{>+)Ty8DtrEFiRvTH|Adgv9Lzg-cvLSv({^GmVj^)q6KK zO?P{&yB8NRIYgGe)_scuR5z{6BjzpZoNE#O6%w~@2bh)A*z=lM)%IqkQ8QLVZsl0G zy2ka0hzT2cgsv22|D--0#7Or=in)}izi#Sng6HLZi}PEuM*=2%(Ls->liM$s7W3M8 z2zgXPR7Tgf1-)oRv?F7`7w8)bX-aSJDP+9rpU~_kc*;X4wf?cF&*>`U)dPq@!`LN_ znM#N9bjaz81jL*~?%6~a0=Y^bU|a|tTh8wXF2GXihaiPlQ8Pp-ebCFjC3-g`LUvT=c(^s ziKUkbyT>TWc`730Qnm#r=_RR4Pem%pxP!X0xu24cK#niahxJP(XPYcb2tcBxpxFaA z&#`~(E)}xBFz|fkCJ|X^?ziVoH0eZFTb}=9uf0_AoV=y@C-we0*qNkH{Ij<%OLe_@ zq~XKKbD4f2_?$Fbj;03lh|;559|*N0ZROmv>osZ~YK|z6a0pycl^y<~^HkW4QS@}5 z+)}n~j&M%lm&3v0W!GiHWrA(a1KQB2!B2XRjc%!^++!!LIHeU*&@y5kW7co}<8kQC zjWWF=?$J}LQKX;Q9}k!Hl+u=omGO?L+4Fl`6bo(rHsXBcuBXwpnCCrhRNp*i$!68& z9p^%w7&cwJj2r1^7_wemokh;^{EXh$+E3dzJGy&6=&_6wTL{J75ZEWHPbLq}d=oW$ zaZ~LPYwE@6@WIb+b2Lxb%2K>C$JAjw931cYmzb|IbtD_|>T-E1{K%OKoZ^VT)Rm5w zR$^9ENK-ge$jCd)tFgE}{(4PTjddwrIF7c{y6a^Z!$JXO6rqPR4v9H?3^S6mk67UDM1guM&V zx~+rJn&YJ6{>qi1O_P_CfBF#vcUpZ+eX>ucXkL`IkcJiad!A%YhDSd%u4`*(IzJ58 z*8iYc?4l|AAwpA7$MS>K2S*+8A|KtIExL7)D3K`<+C_nZI+3N|)OV@LG`hul#j@)$ z6V~pj^;tflJ_SB$2P~wOp>=X0?U$mBX_f>{Y`N{~ZOR2B1Sbdc=J72Az35{1W_^4cBB9LDdBmR|M0;L}D*C&LX(d=2(QRG+tJTMlvCEIrNj zG4KiUxzr@?BknVQzGJ7<(+j7KPv?f#hrS8zAx*o$aE&5rS?*r$ zy4{Ov?W_`<6)K+4Bj;~u6V;1ji z*>0^~DSe(mjC%537S7f_)Ta4Z-)`Jmw)~0Sg1(?Wsa~N$m>#^!vBF;v>Et*!HB>k4 z+B|-D!eU~r%KK@d1+7KHXm#subVeQhWXZ&jYVq+Ilz*aM!?E3&^Jhw0`NDi-9(6qI z*oiTYG0jNNkWuSXW9bj=@9G!q_j9xKx$DProO8atZPGHa=WMciiCRfrNtiLtsluhl z$g+2{N2JWHDfg$>RtTp@a~PWW<)@0KmzV*pm|GqV84{gW2eV9 zj~TSmx~zn5$_J+h>zv9s&rU2)tV=9;_7*wCbGMLBZ|=V_ls;Ud?Yr6wJx^rTTUu=P z@{{T(*G~cAEpj39kL2$~S4JCMAE%d%HlX`@ZAW54%*S=R_1f??MHSr}Y(iYsOuN$Q zP$T_x0e{D%h<%>3G-r=Oc4ds7*gx@`8zXEcp1g8_O@y6_ZQA&0xzoxgFRI*cY;v6cu1UH}KM69!u$^AbU7c6iFKIEe zTF@b6sjG0d34Qppa34NQH0=34pp&F$EcsR(b@F)p^8uxPQND+lEJ*q-S1tJaeoz^Z zM1~B}A=4d1a&sPQG+n#If4OFfo2741JG=lb)+LoGZ{j}F!B(amb3@yRThB)jbuU2r z#3?A?qk7Mmp0S?#q*Y$i3fN;}^R~>yFWs*ajSDa6P?gXa(3kq)Xy#I*1d*c?MZAWG zj>c2dEy_TSrE-oT>v>`PIl=B+hP)HW9`rNu|Y^V5#g z^5Y&KN-vO8rfCZA*n3Pg+MV}^eB3jwIWTl=8DU9m8EhzBQ=K&29od6&Fl=1cd@kMg zWBv1b{Q6LLZ1*mGs95#dzGhotz1F(%v}Ue$#hb-ewf_FJhDHy;jnNt7^24o{M+_V5 zXUB#O?2HUG!isweed~oipnK}uSv21Tzbk)l{z18x@Z;rFK*`6tH*Yiwon0xXSZ!!T zV%=uj@1ug(`*iwz4IUXPkp)S|OAiF7onU`_n{73gUzSgONkd;OA}6jhb*^f$CZ>9G zB5(cddQ(SmjHi38JKbU1L9!pmw%VhtMG2bPQLAQd>z~$e1ptEQbL6g+5rt48LbT^Wk1V0 zHY#Rb25_F_pE%<<4&MJ5aLx+rNQQm%D>$;UT`E#l*@x#tQEiD4C z3i6e3N=Wt$B(U~z{rk*IPY)y?N3;aYtss1r(job)M2tSv5GEvKOyhZFnF8z_hX114 z{WV8Obxm?~K=|FSGmfHHx+rZ_udAvw3KBzf&$S?j!oxS~!^7e33gzSk%e`4Y7C2;0 zuS4WJ6zfqBPUBv!NZ|N=kKUh7puQUz@LiIC^()nC6?JI^z_}5YdWu%6st|5)OaeJY zKm#ELM+D$kf`InVV>tp&2+^PdGYjYwOFl0LC;WXN_q;^Gp@XHH8Kakjy`!tBmpJpUD@4ID z{%d|_#$T7X*@-jjsXkTd&&9$@R8vOoU(LZ^;>=Io+?+)D z`8_>7`8@CNL0zo*Z;6PA@ZY@6fBQBsxPsT!+tJO;i`UVW<&T^EzK@KhtGSDflba3H zkr97ivnNn@H*scWd`Ex&{4q{TFPp!5a&-OIV}S?c$DiT9#dnkc&wGQW_we6}KD6<& zwAYidaR74$`jEJNQ{e8sUk(13Q-5{&M^kNAOBZRV18C_c@z<>XtMR{|{Qoxm)u;Yn zecrt*@b6vz+nIm0yvL7!^naU*Kj!(@w_u_r&fVkxbI~NuEuUHT1?xy>BcuKZd;%%M z{}9xJH_ksk@yCV4hr)diAP^~tqRjnAUIc5?XB)NWPdYZU7K0w;yn4X(hDVxH)<~ni zQ#;hH|hp#n<{yH5T3b{k9H z{nkw;dznq|ti=Zo8<3)EtNH^IHx^Ytg+K^N7z6*?o6dQAxigT$tz69CxR_+0&6r(4 zXaCN{UkCy>{@bMfx(^x27lM|ci#CLRXBoh>iv69L`e$1Inbv=#^&cbfk8Sh_EjXjx*=UAKLA?Xh!_{ODD9bCUb&sAZ9R0=sG+I2wAVN;?j1KF+g4mYgg_ ziIwI&B}5wv96Y0Y4LkqIs4_SfI=Fn^JjYvfvNFAv@}Z@(~J>ujzcsW53G zMoj08jvIoTN{ak!A$qHn$Za!Ontc-t;M&p?`sc7nHq+Ph@ldn$NcYPa-Xxe=j56Z6 zGvBC2XN?}_xdNpXv*$lQxkWAM-*|yR z!iZfpl~weD$s?@_t3Gt)uv&s^zlc1IpzY5dc-WTB~FSTU+CPCr?& zqStI6BycSbxczEy|c7brvVE+Y#(DG6?kyJ|U*tNeF;1NO7 zAZ~kDy{AtYwMM>a+UYY|pe29$!ZpTZV~>=l?;-Dg5_by2v=Wq9O%sMln)%SQhyq2n zc(|fXi0M@fDQYC|QWr8$;1m2jHWX^B8IHEor1^$?4r>*DD|3oF4M1pa!@0_b{8T65FpIZo4f%tUJQ=;AKa)Fs%3tyQyJ94}1olmX%O! z7zZ!zTF`}J`Ubj2i#w5{XiT9nc5%sKRR?Wn=Ysv32HSEicjv)W35tA=4y*%`b5oes zHuO))iM>A?Cx1Qee(>oohJd3uv&f^X$b4;GTjaE+?Z^if?LwUdce{WD$6O?NQV$JFCcna;m~hV2L$an+DLUChx*|nNc#NdlCt7-xT)h7cfe}sY4aS z56jdsW{m2H)5aZaeX`AyNYEg7fQ0d45}5N(@p*K=H5wB1jN}5XkV*;S;JXWb;XBWY zfUxSr1KecE*B|9ytHrt(cdF0busg1ov{)G0uxC>%Yei;->UwTgkH!;R)wU^t&2BdJ|kHyK4!s z^% z(Un?Y^U*`f{Cy4K#gh!2;_8i>)wbo*Gl#FNrPoP*e*Da_fNP)OSsg9uDtuw=Ijue8 zw`XkgCHua~q;6<*CvyE&S2B!}xK@J0_BAeL3wad9ttpKuevyeijE7^Xwev#Jh7{sk zdu*m#cRnbPcGWQZtVKcyu|TO9O}VP2e>n*J8)rm|>wq|i-{CG5Ju^S@QLc`$$oCOD z%RM!D^n}33PND9NxFwux987GlwXs`#HjoItUV};kdbd#3)I%Ew?^%h6<;S*zp>(jV zVY{$)>N*PrT-YS}zD&PPWM)yfmKKKhWvcjLEL>ba@%=lr;dQ~?`;Z_EVAitl*{%iu zscH`|2Eh>rO1o38>ez*3Qg+gHd}o?8 zD|Qz}YNCCD)lu|3=JG~NU_iY-`F%o@D(q_MoYVF0J@pxdJYC4I7C8H>Kz@w^S6z)h zWoD%}?^_f5Nh9_I|91LKU^3{@t?ujb9it2#F3^4^eIx!-2n z)gIoOsd!hhWFbb=H~& zVCb!Ka*;!^L2b$1sK-}zlj7k#il=LWVZp{m`bvlrR_ZA-45^2+B^S0n-oi<&Q+TW`@h!eVB@w~l zsb$DxQ>&3bv4oRBwl3TR)6E07aG3wbVZxMApOx-TtEE$--}K-|(-E|MF`0_G@X=c( zsuVHLl1j&E(RrbgB1Ti)_zR9_u%{bp%U{8a6>rzlqiNUXx-~KGY+bV+vCz^;9_21% zB5UyEeZ)t;k{fKGK^{HH7yy%y!`r#-zo$R2g3xubH*VTvQgyovtD=>#?iqI{48iF$ zJe9PPz{TIKOm3Q4Rd1c{9C~kmHdw!=%#RHl&%Xq3vc9#%|LX;}Aw1gqnicNkw|LOI zGhSJ!b+YsAXaT(Xa4<7vV2bc~rhBn%^mnHaLW*me z!#cKMpO1hHAAI0n!>9H)3MDDO zfo@Y)D{OS%AFZ}xd%!CVhy9cUEPxQ+0`x@||AlRsFO8L&1q(P#JWAp8zbuq(bvNK%_TUXSSv{r#By+RPC%${V|~nM}qo+ z%`f*4lbZvud!z|)jS*6UG-4oAu0A95zkQ>V!fe!?{@5@E|GVaqG=dIXOEcRB?K?t6 z`^z}F9v&8Wx87G^=eyJ;kI`s5NT+THOeEk==20ko7QbwJ@NXP8N#C7MT)@jpOwL9Bd?Y53hVZr>JmnjWCDw+Ve?1k^k$>z~ zgZZd~y&e|5PP7m?z;0di{cW-dxA3%^78nocb!Y@{Biq%}n?Dvf`;mE+nQ)M{`zA_~HP9-JYxha~bvqnG= z812$c$|a}|=x$im-ry7Hj`w$W>6xHAH_EgmCqF*b$W_;Z=h0cU67S!)?}$u?umKc_ z0BZFKA29qLMbPbYMghk=w=FtjqkNCw4pX?SH9HQ}&b*_FL}ap&nYRwNL3v?XMMP;GQdCY+jd08Ti=O*{%r95mD!v{E}<*I8Q(F1jvWzk zFi|V)Hvr=J2^8QWf{X1>n@;jdLn_QYUDVySz3Ct$pt*8-H5QD)@Yc~MX^VyO=hTJb zeg|$VU*1Rq87yW*p+9#DkGEEf^n;^c_ZI^97r9k3&~?4K5A>}Twdz_P=HP0Sl|VUv zkiKR%Ne71V>(ZNH5W+#A2#hqkO0s`}uizHGhBao_rT)I!j%?a?#rgFK3-GKF9gY&R zh|FXEc3bt=H}BVbZe%()Iq-A}i`>Gf?v^^lY=NV{)9u(_BkW}glo9~q_@X^g^DjHs z&G@C!+f~2z<`(7V)fsGz{dlP&wUU7Eu*P0G4=(MbemwEUEJhe@DJsx)cg>i^<9F*eAc`pSFFQW$qnJhw9ihq`g#}HxO9u+6ycX0XP zYiXvyK!801Hwb=Pfk3H2Vj6*`jD zUA`B*XJ^5$bliDxAD1U&zrh0uW32MyhV|+Mz=Bv#t>uVuC^v;h-zlkaAcjK}eu=eF z8jC6NlBy#)mfg2jNFGtbmO6DEEcX9)Al!zCMnXoBfW?cPuYhu&W9xJNt=j~rZl9}k znABP9OG|b>9v6;Dc1T@+g_oY|&6a_%9qgmY5A^zeU4LsPPhsWc>|{Qo?PM;*Y3GzL z98TE56<=N#XWq(teCi?zK#5YN=av7GrgNO7xRb*k5CCRvzThD9f9NqYpN-cj=Ox*& z?ufUjNuBl1@5+d{K^l#lSQ-jxi$q3L=hJA;*RbdpOyx?k|KnmO@}g{p-*q7lf6%6I z5VHhIm4fGfW~}#b9FtVg$?;)Ng1SV?<_qxyW4^<^yKM75X*%{(wf93asvGPlDhJ=$ z${XlnqF>c4KTm)Tvb?AyMALFX2hVe$a^x@CfIwYyCg-XeLiQ!&(79M>UZX63IxO<~ z{XD9Cdn}(R!l-(}wopNpi;m#b5HKdOj-M|5WmE~z{|?Qglog@NH3j$mj` zV<9(kuT<5JX)Hn0ia2YKOCe+zULMW<%|8;kP^U?0IwPofn1vT0K$!S|JiAdQ{S|y) z)_V+yH5UL!tnb+#%k^3Oq=)|Mdyhkc2`=vL*Us(veME7_ccTd`htYk++k!YK-cj5Q z7}f%&%JIqYCE!nh0p-Jc8syUU2yh{3W0fJRt~Lh1s^g76u%&|k$(BC;_UZXhqxV*k zud&l%+R)?GwMh%!me|CBDet7-?zQZFHwG|$sY0(Ywxz3N&TNvd2Yk^JuSP95G619uC?OSkq(6Ud)>AQ|MFzUHOB zObYQSgDP7#z^}TuUm_DfVHTN%5&<}_vlS0oBHd=h+K1ZT^JsXq>b8W75?3m8q=O8)giS_E|;?ZtJ~Jrq7Qhc~3k zKPmt6XZeIgr!U0DvB+B$NzX5e^<=M_cGb>=Qe~-lE1LN1mq%$AYrc)R0c2=f+`B~e zH*`b(C+MasAzgtizMa)PZmAa;NR8*FSkBn~$!b3G338rqUw~nbw+k~9uN3ch0FU1(QqY&G;X#gJ(3kk6=avY_ux!3np!t8V;CH{07Gf#QTSwhn>*$X7&Kz z3S=~@*%5crmnmUKaZ?)1uN7K~_qS~cQsOTKfrZUbUU-pF-R<1Ae%}*qwZ_??s8v;y z&AxP-+nDwf{lfj9t>ir`@3S)9X$?ULN`l1$#8j@9ofH%RZ*>xB-tOJTJST?{IKLLg}X+?(4{Z_4(vq5+I@3uB8rWW6_CiA3_t zuooY}CJJvKAkLTTte_LrlRB>Z-dGi?hO?St7LKa+ZY=(aYXA&a4a6Z1?ogBr{CaQ? z_wHtiKf1?e@!JDrYDo!f=y>B8&7YFQ_$27ZbVJYQaLS77Z;>|0t{LN(ef7~?Jjj#X z$SGee%3`ORjk*xjv|9D0vH_=J*zRDYfB!!Pl>bu%swj4`6t6?OmU7X58u9nUGd`=l zE2G7Rqe41x4Z{<`HCdZVhn}Lzlu}Hc??A#hgIcFSB1_EO2kp_c$?mkeI)6K|{r7d@zfOf`5xx4F=Jpgr9RM)v5 z61_!X$rZkS_*(9$8CVic_bPX$ycx7*nGU3zV3*sOFr_Ojn;gt^fi6*ifu4-NiCVfQCwB12*jn zH>^#o9vrQb6`15&^*iLwJnoMYw(LC)cAYsw`b5!->Gx{(Eoj$*4+ii19V zC3i&zA=iG%;BBX{fD+4jW>XgPv&EWdTAH_U#BuPrvJf3E3~BUG=d(^^r(XVLLN$Pz zT2{Pl=zgo<PGtN=c+%W0IF zXP@0+sn7w4SusPc$vutgGyR2~O}i7yz{`pk=vVBsTtRDpBN1jskvNlbrou=`m_BjG zLPKg#Usgq&4!LMX)hYbIh$w>Hl<2{W(sO89rV;|#1ULbcj2wX1cA2mm!m3R!s^Hj z9Qk$qNhx1H1c599h-s&8#QJYnKyWI8`{v^o2(^fd#f*EtMn#O<+Kj2_^-)tHc#ZbV z;X=imnfLj=Py7gjj$_Y-VNRP?TzU+DB)4>NmyK5`BT&!cmdJ@81Mj3I0QLfvausaUO#{t34 zi|rU??@PHOUsAV7<9&i%S^yc7M1b+-M>Hw?E;)_lYhe1tHuiV30#5L`F#e(J`$U6p z{9j5OJVlvSaE=IoKo+~#j9W+9>os^3KZ2WxmrFyZY6kYqB}L<2zR^@fazce*zbT2zl)~xT`R-93^T2s&%jw)Jc=ijTiYiiX9BwgicRWdl5V`_NA;!g zp?a|oulVm2;KU?{cTh&r#ZXCFeINLsUD#gIi0OXqX@0xB{^>_N_FNm$ zQjZ3i!Qwo%8PZOLmnnMt02V))=^K5$7h>AA*^FIG`>gul_~>{K)j=k{{UzS~ZGFem z>PP`)!v^B;fw;#*Gmva_Uh8Cesq*2a=bd{`haV<+3s0H@QwSZ}oO4h+T5%73;hn+y zK*Z_^)4?<*^Ifku3_7+jHdba%=}Ed!fm~s%?%``d-}48VK64F|Gjq|Kq6BTCAX%!( z0H0TdQAy%mzV#^}an-L+JXPm5QZTR?`-;9NhK^MZMIiN32FOca;DA+Ymrj2g{jp_^;xoh>lDAugLS_YrLs{Ga2xYA$xS ziJit?89-c=XGcBYyt;7>ZFukS0bL4c)O+h7C8hI{X8b?~ znW03L0VnjfN_bM01p4Ti!ckKk!;=8qAiPy9m?YVb4SwT z;+G8$O<|jDEgOb&o0^io((+**M;uwO+dmJJe0ujAt?oEZ)qaV|(u>`*uqiU{h_TTJ zfxSjJ$j#2ix4h!gdNS!yV?5!`@88F}tKPKen@c4#vpMuug%V^l!fAf^v77GQR<)|sg4rBKk9JNM z9oUhjAjo~BXF;LDg+5a;2|CXy=OE#e`Sq7ML@hwZ<3+RV>lTvA`ZL51*8v?reaGX% zmkq^hs4$QbY2Pk5@uH;_vcDz<@`F$0>D^ytDbJD6Z8~hf_xLiX!OShnt5?ccuobpf zAFPKgF+Lu=>M`w(jH0KkY<=;Ac2od7g-$q!j8kc&wI3t;y_IH4p!;6>_L z64daZ0_5iheXIHW^BpgR<9a`c#=*6n6YI*+hE}%=Js3?3=eS)hyJ~tZbk?O1I&IpE z-SN>j8agN!9fxh5PPJ)Ne7fdCrZw?GXm0MG6G2L^0z zag6~dHXI?f6(@N_QnGI2pv=Uc;tQ^S!h0zrp(G$2`RRe*sG8#h*EWc!%bxc~_{77h zZY4IvqYWKJc|1Uc#}jT(wyv5fH=Q+5`|F!ldJ3ZYnR?Dde~REzCyv&Ca`@FC|Z2JJh(qeU~ykhb9<2 z&OrQjWRBXaXO8-Xj)6l|EqsX(k{U?Br~x3&&$R`-XsSCQ8&#~3%PcfOts=)yt|I3jV|PR8xQAFl+hx`M7Mu^rP+mi#hO!a1}isjt5krt zQ@S)b5)8|A7JT~%Bgl64dr)8#Fp@8BHjD(o$QpEa=IV;hQq93>E-d8nN4HdxfDwe+^lDgde3PdpM=a(6}!Hj#^zZBn9sK4G0$C;gs zkXbIJyQ-}LEfGxlTu>he*K&R*7z!I!NH>r|sI{exUc;!eINK6nMX9U}%sYjnKNk?A zS0AO@L^GiTF1^1J3mObvJ9Um5uwEo}xj!IIhwC+mxO~6W;+n;;5unWGeYDa|x8*O5 zySxK(C7zJ|B}v@MVKcx%-kZyPKSpTDK;4kN$zhUv?gKZ-T}&s-@vcgiggE@Wjdj?_ zu6n8f7u+vcXh`?^GRdh5>j5%@jOw+ox~8gPCsEXQhN^o!KLSioKfK|E5QaUIWBL-u zo%0%4>F3;*%z=T$ga^KJATb*Sil0Q+_oTjWyoD{Tjuz9df+{Q((~lJ%>js84>G{G~ zQ3~^Rxmc(i4TnxFobr~H{VA+fp7Rsp|8#;SS}OZ7VcJSuW5Ak4gj69D9;=C$F>Fnt~4mZ+oY|^4yo2?dYNs~NBVgWb}7sBHpAkX4Jwj3 zcTrBRAzA_{4`9RNv7C}#xNH3Qi$z0(2?N)Ft||6#->v{@{s>PZJv{%h=_s>wquckK zQ6uf?9QR0dV1AP`#eLhYr4O+?NK<8ia^kzUE}sJYtcmO|z7%i? zc@C0TH05SyzluT9Kri47{FFBe{5NNOvp`7TT@E4?c8AsJz+?bCxoq?$?lhkq6nd${ zBKEeMy;^Zg6_LH^B3ctQ(5%Yhy$=`0t_tWt zbN1Drv1mi7HDsT+9qCL~C9nm`I#;wz1d+&dT8XgJv9>zQ)b`AOFK*=!^cCDeL{$bJk?b;t6QEX^aEd01!=CYvF+z1mxE!U;)8y9H3 zk15ixU^mHl7%)V@_(<&>sQT$3c5$B@M!DBjJEwPkUtH*pffdIHFV!i6=&vSlvTEr- z1yP3}ay&miU2inLd6*1azv9%eo8F90TNRtFzE~`s?Q#}ipihlJ4S+1&DH+ zH46^gN-n7YN->pUa@Ae>(}dv`65q)|N=)_p+Ta`MGn*z^7rV>=COFIU9B5lEVcp5T zW$BHEU=GuUtsZS(Odp%Yo)4xa%DXhB#oUu@)Q(z)Xd;~zs2{*5SmUwzlFOa6qJ;9WdZ1Ql zfCi!mLJtziAa7h~jK=`px*Wc6)u=KZ=6y=#@+`Rp zrPf)v8k#oqZ8%#TJiH)8mjgpOF!VXT6Z!ekr$S0>sI|=#MYP=$?r&eiVXYMcWvm#% zTZuov%OHNy<9sA~(*)U26ymn+NPf;jwz|{mP4vO_{p_wvG6;xt-UJLwnjpij`zV|y zqq~+xddvYHTQSS$JJ(>^$tIwC6;FIa=C{mF1j>k7KxGfJ&#gGGTNhh$XK~F;>Z2*3n_1hU33-XVjE_?w0gQ%M^r#D!5tgSa>|HTcH%f2oB*S`0 zti~77cKVMki}uTm(oU-t%AoPrf zBCzeKgQAN5cpy{!FU4EOEr6DSe+!VrXZ-zYzPcI;Fq_{lUw?Rx9zyt#&e-kS^Q+we zImV5OiX%bEVNK=2f_`>0zuV=&&o)12Ke_-%$H6yuhj8@V&okv!gA3Zun7+ua2~Yqy zv5fg}7rWSIu`Y+Ub8z$4PKRwhDfeK(R0-8La;qV{Z|OHsU=3)EO{h3fjjzW@f|OVu zI{@_avI#LD@pNb}YNM><`pIycD7uNPp+tREw@^p*1UnFLdVLN)%rUpq^*F;#!P4)6 z9%`~!*g@D~;#D;YWWr^$xcdcCMnFKp{e?DL^r-oS`OEtO@_Rp9RJ6%0Md%?))WGp7 z+e}s$gT$K#sF`8`x*dhjRCwktN1vQ>|JU*B?>a_+#kZGcK`{}WK^6Y<^Y#A*GP^~d_K+?~PZSTHf9 zUwjQ13b(bfTgAKw4In<%c1soat**2~@)f_{Lj_3wthJ#nhY*}UwNEtuESCGBET$Ne zN!$<*CuY*ljzAj{+&k~VW_td?2Pvwq8bYrtYf6YKA&-K%nw+-F(EEYVu^O^^Ks=)Ip; zL8Ze-g zc{DWO&@i;P)$|C6cy~#|5AO3VMM8W9QHWqZ90HRCXZ5xFQ5CAVps={;6S){70 zKz@D;V^DgpmT?qAJ~4P7Dzn;$&F2t>9B~~1ek{U6ZSZ#@rWZ%IfWu2@*xBagoXDd^ zIq_}3*a8V+zdk|ae&M#&Vxq1Ib)~*V&!w=Z&0y+eP1`7TUilQ8p9;)xi!?l8OEzMz znenkRVEg1n+5(UtFP@Ci9KV3e`us79i0~+AP3lF~5m4fzn`4Rg$+drl!v(~>BCIix42*LgH{aJd5+j2k?7A{><1FG*O_GgJ~fbV(i z*J`402C|tIKMiEY0*hHV(Lx=FhCgsa+fHJtR5%v%R^};}0A7#N@A^M0^7%%xv`JFD|GqDSWUC94>qKqa;iT$x9aL(Z$ zgulAc2-&~BKz*APKK3kBag@Gh+=^De(aX7?HbE3!mh@&=d~b?Hw8+1@0E9Ab%R2y; z9;hF!7Lwx$_`$uGS8YG6(L*Y?o+FqapoCG?4u*rea+g1BJqJ_fzyIw_z(^E;AlQtD z+g4s+`_1{Coh>obH6VfIZYv;pV2a}gg?6z(3a_s1PprsonO4~*;y0^cK;1SL4&jsm zS(Aq(mK8B?kh_54daQ{6a*(y9;}@g`WR6cZpPeTHI5R1mpf+Pu!5pjz6ad*qbm)q*>c!r48PecCUti=Wjk53Luo=TnadvXTX3A z=dZQ~djtRR zrT+{*%*g9dxd8(;VtB|uNJl2R%;>#dh0Fs4TX>qxK?-p97#jUr#HYn}TBu5)p{xl} zcCPhuUD{n^r;o%g9rT{8zg*jS;M#U5tQ;U%tA#RuHmm`I+34w5sLu6OkUajcC(PIg z6V+M(5#S^33`lhhpbI+{IWk`9J_SB0Fx`DrKWI{7XGF_oE}Y|ZiLz0x)3*mUGmUk) zW}tKu&f2p}Pdp&C-$H4RJTs65S{(th4*(HGxO#2jQ}+Y`BdF{J#0wg@N)U3V;?cu7 zQOq+dgI+SoDiWTY`%=Qd1aL&n!_n2vAQ`XW|24kb1Q6_!nCBioHpr3;u}}~gox(&>ERkKv|9YbIT96VqUbp<7 zn1-z(nVJf<3zKx+@C2#l+E|&*{dR7XhVN7+Fe#tT21Z%`mUNIkG1f?NJ1SC2&dgeAFWc2T)CXqXYbVj z=UM{;JYYfOoqo7p*{6V)UJ~ltS8|C05#6&u33s7imQ}|GVD1SMe&B*z>ZSz z{x0^UD2g5ecJ0_w1Cu5>bp+Ih>wxUQC_zG_;dpoQkKH(_dk0V4A=mCYWk+4R^=ac_ zuKLN+R>ly=jwi~`vtT*eWgV8y^6cJL@12v?E8F{GKWfd@?(`K%hmleF%LbWPgRITE zXT8mMIY?PEW~tya5i8(7d*;JAL6q7*mN7N^p|B(#1R;FI4h!C-EgJIl3!Zt3H=@4b zCp6 zT3N?|w9u;R*sz)yuQhF@bTC;0u*6}?61g1Q1fbf2jp(B@%mgLjBn2~pQd`Wiirxecp zEEdtvQWdp-gii!)Jv zd;lsxQWKpx%apaDT!NWfHC}<>>t3*6aM_syI|spUBV1(zBNpyz7;+17tv`iQdlb=*t%Zq#x{sNyVS(i zUscwTslzjFV^S*3JC_G@WyLswoLxsWFQ(YkgA}SP^!A53|A7X#m3W)s_yq$G0F#c= z+$4{F2D8R|D6=2GO)Y5q>Zz5ek~|9Ghu=lWcKFV}79FpQ9yNsS^nCfYw-bJ1Gz?;u zg)h?NJI%R1M=~e?2?r$+nRxsN$0tklp=-wgGQ{JKmLyL&PfuW_!f*qEE1nIYnExaJ zm)h*_IhSyKZ`?X75oAo1I%4RL0G1vkWeX%QS=wP|eY?7V64;{{t$*EW6ev16q z&ulA?-gMhtFkDODl<4Bd>v+9ThgRIGNS-?%Iu9B50O67s>)?8<2LP$ZC$uuc@qF#H zXum1O-LPeS)cGVHK1t|U6SI{Jn+6)sTw?0GE(iiU2be~exFKT_dq@AXW5{v9YwL?gqhDV>IaVsuQ!7=-jvl3I4w?qYrATiH+`9$ zTSkm`Irhls4u`}FsbwED&7nG+pBq~s{GlrNnN>*!+ri$3X*u6=E2!Ez-g|0yCKzmd zK17}66_c=83Imjcnhwa?nxZ{5y}{11w_r0TTTi*)ml)atdc*C*xCt)i{3wvG#daXq z=M!c)k!jDug;(ZRL6(G11b4J`tbaW7U3J|)A(IkpI(v&d9*$uL0Jhj}4YcsV95@Kd=S#(|uC>yN+8>$P^So`ec{~z|=Jf6yS?HgZ-NEuR5$&^rp zWF|uiAw$LtQE4zOl$m9^ODdT%mAOorA{olCD9MzJQN%KjnU-;x)_Yv;*zJ3_pL_4m z=Y9V8y}$ljE^A%ad7bBR9`pBmQmkUrQ#@J=X;`F-uQIMqm<71TzULbEnpx}%BzKH& z2xhc?GeGaDl{qB6{Czi$cPb^H0G2sUrnu09LDNQJ%zp`*l9q8P&QJx~Ii)CCwP4@? zHm?>+F$}m)&%L)DUuUBwdhfBX%)HQ;Q>%NXQAsr;_C%sMY(N;*-ZKn~td^!b>hdVJ zscgRlYTv#GZ*_WUuHG)+{?wwMk>uH1`Jn$6->6AS!nfU7)G#KtIO>VOWb%`M&d7Ng zo2tpG_6aK9SjUr-+XspRhB}`sb?A*jAhD3U3W?y53lK1GEugWmYsiVREbgINpKA5+HD zD`lXr&@@k+^dvIQ#6G0?CS4_A_Jo=LrYr1!lG-9bOYIh|UHa;lCvUJH!kqd-b;f^l3y-hZM z`v;jf^<@te&04&K|JO5TF777Pe045H{Yz*dJ!RtH6A7EkA7W=3^tJzMH>XQ74%c@3k2bt!~xtM}%C2jFj ztitszggdGPv#0C#p%=N&4Gq;~q=_WU1~1b^A7Va4p}K-PpBB7j%#jqi^8Vfh@O_&C z#{9)--3FiBxYD(|OFwXoiY+!j%Q&MXrNFdncP+=VtNQGRVy>pG1*#>lZ$!35wRM@H zIIfNQf(J_${^ql;VMTGuw~&0*nCaNAfKHHGZ_%LQr@^L>BUiPM8(ou_q;h9=UZLkT zAVnMJoEsaIn%iFljS{9vs9~_bOKU)2Qfc{dS9gtwOF03S-=iy?;*sNwaXsx!7X$Na zH0NZ?z>?H-EQvaIo12f^r6-U)Gih>673|tQCSX&w@_0cAb18+?M%(vstA0VPh>JQc zIWx{p(Y?vK>-da-&6lMzv{uIx^Df4(Y^mpBkY44jwdcmO6PP%-+X-cEaW3uCL?l7R zOoT+Ra=VC@;{9`IDg)-0)JjIxLsN@gmfIc$#7QLJEGP4}y#yf^xAmMfUwXW7AfTo5 zVQk7)Swe5)4ENmfV97SttartF&WYn=R7r6&?QwcNWZxkDPxjuiTO)NyWwdy!YDSAH zvZ7ecKCy;HWR;oBt|VEXMpi!TK&AJG6p)5V+1)%2yOEX6Y7wIrl7@wKA&mxwR$dnhG&y zfDoFJ#nOP7-+|N+>K$EaIy}?Xml9v#eJk4fhWk5%_PXO-+yyQM+^A7G^~(Yn;E@zAvOd@aPfk8+i9jW4pu@2M|&m@{23 zJk)$G;v#2P$efZq!~`aQ7_QIFAD$vw>=qgsqnZ`oE|KW|qS|frn`fAp2eBl8&OFLZ z^nS&dRgi9Y{sO=g77E&Wrso_;?w8M|>{ekfoxU4B*oe=iA5LXvg(R0FNOA?fmP6|!fYcY+M zvQRz`ah#E;UaXxBqq`pxI|+H=>%Gq&)i1f;g^aOu<|huu!%}8W$&LkfXYg^?;O+jXg&|U=$Tn; z%rm#(`IIy3_LJeUv$IPzSL3{5iC>c+7>~UQt1BD^ID>276=rXYUnpbq#*je}_b9oh-=6b0TCUJM# zoEl|j8{~Sa`o{SKOUb`6uE}IEPJA7G3BEo-^n_MHW7K3<(5i>qY|QY|+KiQGyo0CB zJr^a64aVZh{h6iOv$*qdBiE}Dv>}U$&FtuMlcN6tj{BnVyK5hKwnn`od_%R3&%GvR zcD{sZ%Z*bBKQ@i?&Ky5~!{+ezQZ27m4rv#;D=D3c0a>*RDP6fXkOXTVY=2qCxc#k7 zAoCCus%C6z+{y=1(2OR>UGV*h3m+;Y$X8n?V?2l_RNKW71ck?PZpi&L{^@PGT{|25 zbnHhPp5AWBG!)I;Cv{^kIlqf^K>Nr&xg`R%*pBq_D+n*yj$4RwG2`f30& zi@R21M^%^h3tqnm6;)5QQGq$Pa9%&mSRNo_{6r}g+Xv%3mQstCM$@0gsKp{R7RCZs zQxUBjoF&F)zTp_I@%POyE*4=#Z%EdX&aO1#+*3mwZC?*YT52*xd>nJhWHQXXt^gnw}e3uE}9ewRU_21^_Dc#LWR++ zE_3k@1)d{kt|9(WBiE(ng~>^$J4fCN><$#L`f_}_OO+2&j`_}$#p*^nLG=;FuBqQ{ z)$-B_8JuF{ru95TxXWlA1lzip*7uym-LA#9sHsy4dk>@cH9mC2G@Rc8H@`O=v}erA zE8x&GCM7zqNnzBp7(-a2)CxCVg{Dz(=svL!*xOiWl_UjjdtD=WgT*~WUDA&~`k4c_ zhqm&1b=yl>tw=fSQPzOEM_H!c$wIyuE!l%n943_{s^-*$WG^uh6>I=mWYa;s>Xys& z-pYL4_T;F^ixam#?7u;(kRkfnZFYFq>raV3P#%#^I%(N@4DlQXo+kuwo6YHF8IRL2 ztU>&7e__p8d^r9#r7MQ%!y6z`cvg-Ft-1XDmHMf`EcPwKT1y5rOg*Q3pYr!7%1w+- zgL};bo^c3M@#W+l;v-`e^IuG9h)Ro~E_78}*LQrXIAJj1>ZC}&rHvz@1FX0IB&4o{JOOVoZ2Lo8i_M7bP9S&d<0u>&30b9KZYrQSBBJU2#wdHO~un znsZN1$OlMHh&2YID8)xCDePOexaXBY677s4$&vnC4B-}%S{-%5ia}aP9=Vp7<ow|fV#=-D^OD$NFotQT@KdG!D6gJwMZCk0A8ccR?$GFQ zRQx1$TZ-eHg6F7q#?$XwW5He z4U}almu6p5U(6iW^M#H8f~YK#e)B)p;vmuP<;bd^eaU-vyrYeU1Pk%!Q)YI<%ag@3 z0mOE)5`vU%x2((5Tk_SEsaNB>2PIaqG0wgBpNW?GO_OF*yLbZ3u?m{CzL&0D8g6)c zS%m+FJ_P9*lUN|s?NR6as%X?f^+5de^**})=)sfk^;vi{a;47<71`~u- zYL?`N$i2S-{OANU9mVT>IM(d+!sKSf-dk_U818kgWN4+Rof=D4kH)!&)DVK zDVsG*xw+(5U9+vOMz^XtG@;Gmk9>qaiJ@dW1~SV_4yh2QBKwMOSap%nruh79^@34p zarukrfR*@i+?Z20)ChMJEk+LjfVzT(aA|R>2biTwqdR;{ys`6_AXj}0)Y_hb>Sd(M zxi!m#A4w9+P%3^IPbjWoiyz7|meERyZ$lY5bvoSotfR+)b_ESt4axK;xexb~8tti? zlN6!%d-LVHax*71uUi1sKmpk|Yo?)JYTpJ4Qrfe7Z1cC4L95Ip?xdQP)ygaBk>~2C zB2SJ?ElZFVQ&$^3REjHZO(`|Zx6*%DzRWH<+%N=5Q1|lzS)!gqNQ#1n^%GcyKygi# z2s!9IeN#D*!!B+14S06`;Rt*+4BB3mpw8@f)7i6sJG!X#>Aq&D1|kW4GN!qNedGO) z8!3m!Bq1&2n|qtIWLrF>41t8}VIms3fG#%fGJ8yGd(q1ibyM>4|NK7d>4k!sBnOI6 zowBb`2%oTJQjl`6fP6XzJfT#-CyMLX<}z{b04B)?*rvlbmEEaiH59i%Ib=`Yt1+Z@ z_TQu`yh4!5r;Gco3%(6%G4^H)2w8!Hz|1;B#NF)~26@Wi{3gcP1|yUHSNtV*>9EfU zA2^j&tc0N|10}*M@y)aM>hcs3dQ)kcloErQK{Flk@wj-7tz!uP67uN-_8dc&q0SYz zI|+7c?J*`V+Cyyk*ouG_sgp2}CPf&e%W;z;{PG`Hq)5V4%BrbRNRkrZP=}6JLn_=D zvan3A1ANv;I7k7TT2I0MfKE2$MTEo!n$-Sa%AzH2YI8xF{xxlEIk4Nhp*9n)6vD}& zQ#s#5%2ZUmGfgWk-NP;)u5k|F7ataP}bA8ceHu& zRNI>xu2i)(TsoabPq<5JZYI!gX?C1V#}6a-hfwZ5ys*%|G#}Yu(rBc=g>Q7Xe`<*I zlN9w3kbW75y&E%WrpPzmp!2-Cf*NJfmZZY)1VHh0rYpjzCCQgCv>b@y9?jK73Wt#G zpzp_6EOU+3q82^9;p*6mxxl62e#Si8o3&iH-X%qMd%VBFWBjgcm9jU~sIO~{L`Y19 zrN1bwd2#1CFTjyBl-9>FB_UKo?OO^+MW*5;RMN z0woHUEz^#5$lgafXRr;|pdvz+t_{%a#2fXoWRk{gG%b0SdQVQe+-hPo%r#?EiQo%^ zKELo|PL~&n&Zf}xFG7GcZQrY4Vlhl(BG{y07T>LR_{&(TdZHq=4-q0m6&4-Xa8s(P z+`$-CW%iZAAe+Tx&akss&ZJ!)7yk`yodyHx1O9F!axbj=at(CRHeE$9YT&B}bL*rf zW}5zh|MTtm=1E*VVb%~It!fV=iuS*{l~favbF%j@_3;kJ8bSwyY$)%UzUm*l5P!6e z?Qh+>BykFDjj~i~6kwnZh*}@34d+!cU9Uc^&NiFZ>i-GF_ZlN<-=Y>bfdl%%*5tzD zE1s>}a_GEK;1^KK-Za}qkl1rwR;x-BRq<55e@x@?7F9MJYL72{aF*QVZ9+bntr`an z&S0s1_BZUoQ~me>r|KzI(k&?xC=4t_6Lj6lh66;#2vU+P8+w4^HJ@!2n{O2;>zJ2M z_^60+FM~5=S!Ka@|J5D;qai41(Ndg3UU(}xbtvG|NOLgwYOWmUiy?hs%JwIO?!^v- zjOP|4ZLV7liVsDzBbB5Na~ZN_I*2DE{8gqy9=l<||E(n(bHwISd*@J!HE-!o&%)wu zric*rd2d59=vGjx^B%hZ^oOAEV0LRG_!m5BtiNQlWvA{WvQ)i$r_2f>W$k0Z1SHC?g+}(6m#Ybif#IfsQX+k>x_?69x(mxL`!aFb6i+J{2m&^Vdj|twsFCN9 z#PKQ;(u1^Bb0vWlSEzGy@FL!r&QGzJe;X1ZIr3b9J<^e7d`D#?b${q4x!P6ia^mtk zQkS{UdK_&spIXy&R3QJ#1C<@5kx3b9mr0N=)+}-yKSLwUzAjqb8D$943(~@_2Fv@_ zWc=@tg`HiusaF4mO@%r(0xE?JMQH|F71%MUhpV=n`;!kzdz<%O9^JE$Nx2FsRHeF@ zlP)<2q3uYi_VKCs913vNI+Q|w>4yAMRbV3sgzDyFaHD~B?{hO*65XRX%;u4?;Yb~<$*;qE&#E4F@1D%Cx+C8Y{)rOy?%iGtHJ=>UIg3&f zDc9Lci^N{}x4~4j(5CrI9A}%?v+6c4+Cv#61ciH+vW@+gvJ|^?y)o?Xh2zce!mM{F z(q*x`dNqYe=n)9|yN2f{Ht?@rvSKjL2LlpPlY+8RtM6#>FE5$(*~@N~%N;Dr%_!@*!kV z-@iu74)0!Mvc)D2?%hBl%a$R^S!83H4rmAw4$z*yqr>_iqiXr&*)Kf>wvb~x{UOi2 z!2olI9kFyjrs@<-@k?iKj{M?sq^Z6ZKu6~Uq@}(iZp5b z{c8XF*VyCl|6^|NS6n#(oM3@m}g}>&M+Zhh|EK=2g0~mZc!0*aKZGBNR!PF+s-@CSwIyPhgzH zS?>s(Sd)B`H8wMRUA(1n{gcMKgyjodGkG40qP0&0_;63P@h<1Egsb8$=OmtFwc(!L zky414ArnH)-k7+q>V?`uLP|ku*!di3GyP%`!Z{z) z>mMlq0AaRI)Y>R>sMNL(VUjSis}t^7jxb^NfF`+8+vl@mNLS@CfU*|P=VkGB=Cg?I zvqnhHwu4hf37)W_^8k;@{s$%{$G>&#RzJB5>Fd_$UxA5jYxl==*inhX!j)KRDGYRk z9xl|Xw4HHqpHO3t=^9)AWKY-m6=w?zR}`X{vcQc~^AoZ3#G`eYV_sZ;O>kWux2Ky| zWHs4B1+9{n*J06+ck<34m0p_;^v!Y~w+tK7E;bzbVj4&_>9SBHy5jX3znjPE0_M(& zj^4$+gVCNIi*+_0T0Q&g(K%3-@d4nqeQ|6JnUHgNA%UJaEdXVa?4ekSa~Sm zJo1*vr4WqU)03{1L6gH@7kaB4W?)hLv=82vHHJmO*ECygEQ&rQ(xQ49V%V@D$6+?# zJL;Z;f|nz_ojRRnbcIL|z-tfV);Q%Uz}4$WXisk)LpleL&?78=(${}<(6G3R~8dl!pct)g-y*|t(q`qA@JLcu#T*ZmXY z=jwtfiK()!<mOS>d9tq& zqZ@+^TrjnVsYngmIyv=FaYi4{q`Er6Vu=Zx<0_jJrBv6GaSjaXi>nU zwGT4!C#zv!)H4lDE>R<6|28Kk;o&6#yLJ82K!-!`c_}Z9Vp!=Ic~%JMq!*PvDAF8c z#P07o&u-`>t!@Nh<=hZe-@#}kZeuJ4QA9atlMZGJ zK#N?49*Q1b8yA(1@mow8A(MBu?|ugfY32twH?j7DUKw=wF@4RY`+H!56_<_12dBp} zOxlL5IU8f#+p0kpB=_6fWg*jF^yG}w{6#L1<@HPW@#Kl&NkeQx+$m8Wc-uCdHRoj< z>5`)CR$1azbn?b_OeBS#v|QVzk(7wml|l37chK_11gJ;7LUfkM;&h_Y_>~%xl{O|>sT&eOG@$QpI76bKJ-h^mUA`-BH+ToQt}@0Q69k+g4E9y8 zi@oSNtnPsi>48Y`SSMc!RoY+?^#0!|z|Lj*FZB>V97K9%kYW_Nm~qt*dp`2g>IZb> zNlk~0nQzbn@Xm=iBLnSJ1b~~#VCJIj+CL>MURdt1 zF5>+THVIu&Yo4u)HALs>Al%W6bJf(IF>mn$wdDXgTWX^z_!eQaqjJV612=T^(eksJ zr7)XYFQrXv!(fla4Jl9fal`nkE0!>CbwD1FD?13B4EmXW3>fb7Akf z_-RmR2Uvnhw*Rm)s~on#mhB6~DzC>A{aDiIWme)&TvP5CS8%h1*u6)NWl7_L5ngQ? z062Uf0f&)WJ6=7V-d|MB8D+KDL&UAr;bs$)dzcfeoB;UVQ&75!?zmW05LUT_o;y2Y z95%;;3#CJ6<^hj%edhI}o$1giAEo&$Gfx+cCR$i!Bo?EkobHi{&$J0|5==t|nsnx~ zGAk3NTFH;2B3Jkqg)g2z*OC%q z)1OAapaZ>~Rv9M4`8}RT){H|xDNUT@Tuc5ytbLPG4{mK7*KQrMbSVtu=d=o(5mJEN z=6o`OfZs1eV>@~*Y|`#zt4&99V&iR{)+`Jo)8&p{n3|<;;n{EnR`|yi%hh}itJ!$W z>T=Z*dX^4tXcRU#Hdny=^b`rH0&D!1OQg=R%0G4wYlFTw)LDFNFnx?@C)a6t1|P@( z=65EW-*3OYZfr7MYKpiE?ygU-)2l((^hc9WN#BnYWD+SJV6uQUY6?2- z&y0mn+6lGV%L`a(;fxO~Cf<&7!bzA2SSz?gcsCN6%;UC4b(d9>(D2 zD&(4tY?cIeE0`PDEFE+rt}xEljVqbB{5$^4p*9q&Ypbv3I)*vg^Ivn1fgv?KeVdfx zp@!_%GKwuyF@JiVtg`WfBtX7EQ&Gg&-ie)?Br(_W-=G zM?5mLKGTQv7dTADL~Gg>w-D|&OyXn=OZ`ofVLmfVvP#cOZzZL$mjz``PCNd`#>g)9 zlT=KejcXq4Tt(cw*Gp;X3Lf$2zm-EXdO30R9mBJ|4*`dD610o{+5xW|4B*Dbjsu6> zb5+W%@^+Q#o=eSUsjJahbBt2FkMw)EqILFxIz!`u29 zPs?}`tyVc-o=cDq&qM|$feh>{49u5x{O;z!u9ASVb?b1d^z3J6*-lL9XP;HnJ+%;b zu~>4Yp`3b0Mdg+c)-sE4Y2Gy|vM@2K%}?iGOo&TuzR*W0`IWHO2We%ahGS^-FO+(? z+cU35B4gzEkH)xcbZ&JC7B=er34eP1e2%k?OjX0vZ3}INr-bM#Jx}qd_7$32=1b7`7`;(>CkO_#Z&KW2lcRYyaXu9 zQTufWQJjhU((Mfk`{jyXK0(ykf}eXF5DGNJRUC2kS*56SLm(Il#%A@I$sE|RLUx@O z9Q0wwGA1>x6di-H*BOcGUJTEgD>~0zS2`g1*igm;)|YxPu2=6GvLzm*rah5$MZ%QY za}^s-#CKBcY!bf@G`|WiMYC!onsSB|c|zY&>d=8A z=o7n(ASEhNOP&uZGP@i`f){>QOyAbk$MA50RUslDB7K5w{yGr@V@t)zaB4y6HGgz4 z{d^612NbM$xCZTJ#>(zO@5pfxjm69ehX{KN9}Vz)LxZZH~79zi}MAg zOfhXK(u8UatB~?>aCNAb&&3=7@<@i5Pe@>w4}1;7&K6s*5fJC#GH5zww1Hf!VZSRN zzwZEb2aUM#$uBnW_(m(*(U8_qe01}38q^4SMtn%nB7M9)Qu@JhtCmvum$7U3>6 zn1`br%Ar4jfk_=5e+&m@wUdxkI+|xLKeoEIP|6Yu)s$i9(g9Gl>OP0J)queWxhG(7 z68D#qPX_~(u#)6(4-q~`JK84=!>t2niIPcuf!T*sE-BoCj~e(Xuf7Dgj&cM z+DV?xj1<7U45tRYm;DO6f=)oe?^tcyoVaqtds%(Zbw4gF`8RP0u;N-DFO4tX7%LDa zwoHT}DkYU8&6vcT5}YeRQJqcaitb3)YUc#gv>!d+PUM7v>z`Pb7c0yEAlvb52F{eQ z7K0%ESzjtu=m1#$%DdBjl_?^bW+gI9je+mB(wwAan*3iH)GA>5&J4{q$E_T;%KNO_ zINh>MdihPg56?E~&#aC`aQ@WkX}7I*Z^8zaX^2xam%qKf`R1HcdDg4f3tesr|2uQ_ z1!zd#A^D2oXb+L8n*>AaH`nY>=6UCA^FoYfkJO}#x<&JYR>N>$B_h{bm!k*;x~|Kq}#D6F3s#?W-MU@|Tf*Sexrq_@5U!=JluPdo}cu`4Z%6C=tA_y_)DTgFT2EL*!_6o(ENf=k@4(i<|;u@UYQ!d0Y<_x-Q6JU|>29 zXj!wIGD0@k`W8auChv^l7%!M2^qlf|EXHv6%u>f_aJv*y+Cuf?m`%E`8(1JD&T%pt z58JRn@plmdw0Bt^bLqo^gBM+{IoDYl?}pvfy~?sEdm-nlQto$f+(Zz>EV5)Zc&Wk1 zrl+U*?i*Qe3Uc%RiXe+YM;>e?e8_M#)}_m(C%c^5FCaz4w*S1M59Fs=4d&}T=3hbS zj2z4kGW$F(ErG;UAtDD~~rgDgs9Kjq;7+ z?!fnms7y2@7ca-9uE(zl%`jDp?|=v5hf)G$Apds&7Nn|LR=Ku;PUU;k{|L!nu~qt! zFY_XXk^8WKM!v^D)R~8khiVab2X?#$SeqXgz%Igws|tjZJ(kzLE}`pW^4!*~j>Y}7 z2-!%)>U>oFykT`RNY#LF&9>bUPyxBq z1uL=Y%38epT)GKU*KD#Vhj>9LUt!h6XnNM#m~UJu4zY^$m2v$H>k|KA3Db!9JTmHG za0L67bYD3@!J>LwjJ53=yIr&_K5V1fW=LIGWH=2nY*qdVS_kiaOQ)BFGmr}K00i*rt=jDDF2(XHIFa5A`Zn_N{@MCEewG+E-53xMJrZn?R(w(fal!} znT0oIsZmxrO3tudK=t$46@?8LesqB|_mtF^0kC7fJjS>r4%=fyOA^_>gE64&x?o{l zhAbf$F+zg%3?MFpx?B3%ftAL|_y*0CdUmAPgvts+azu!oT<1p|Pr77`U}m}9hn6`Q zOb!BLN8HuSH9#ttLlPlj4ybc_E(6aic#hzVy1{7fGC6)>`)a<{O)f)$aIfaBArX^* z>;Zq4=Q$n&9gF)QR)Z{)I0PXO#Tb5Bqxf*J(uRt24o)w>XsH#GKf8x&yT5?(?` zclJLE>6R9P=Qnn7jaaZBcWCp`QWH-aKi?(Xz-kLHU++mG}Sd_`Y*p+8E~V9qkk#{E@j?0F9G5RY=C>5_~2PZ-^g^xM*D zEiQR$hj3x&p@fd^PtB}6fQSY3!4e~nE$&&ROm+AxEa0%;CaU z4fx)e4W&|Au&e=MX=_ySHY_cc_BEvbPEQw2#X>V*{*2UYkleer(zQ8-wV*{snWgiO z=Yj+8KqpIHOu4VjN)MHtS<(6Hw{>RRkpdGnbY5jcs!&MnQpJP4Pph0Er--%;v5Br+ z^6wCGNgRckAVlk7o31dJi`SP!6w?MFQWmdpMkATmzPqZ-!8Rey`(4m@rZ<;E7Bqxm z#Z}eLqkpz(>vn9`r;JN_`u;jq(Wkk``vu=cKwPR(++29HAgMOoCqzx+=nCW!tPPtW zCT2nUF*S-A({a{1Go9#y&UdHCnOdH6dLel)#8t zr375Qu?2omv{XELE-Y(3Z*AXUZIf-=_)KG^5BuGhD9@y4bW65pcK5ZKW)x8V?|QAjs9{q|4?nYo?0dM!chB=>z}oi?+`f)H1I;ftV=zvTCZtkD z0aJLzbJfhn;Sblg#!YCx0$7+=+h_q&mDOZ1sUUins}n&HED zUlJ*1gxt$bI!nm6z3QZh{VLyn;;94_1kg|Id{N$)VVm#U7MZeP^C)K3x>siEX;>D^ z$hn!a7mut0KA(M0sbp3}0kGd4!c0K9g-Z0B#z=OB%wWVZ{ib{B$DV83GKi*y*i3Jy zA!otZ9`SHO^V?LUh<}4)BQFosgoRP_;7%nY1)$s3sTdBN_M-#FF%F=Rm}95Sq|=dN zQNoj!sQ{$j;^h1xfpClvknFJ#$cvj`3>@4w3K?uCp^#fvBqKXODaptQ{?JJjSNW&D z)_XXX)6A!<&5ql6x-Gs3_p0}zYG?7@Nm7IZv|O)Voa-duY{IDXAWLEm`E{rXaieiG zjGyZYj(>xmNVx(Ts`x5U29s0>@e@)9;g)B^F$f~XPWP9WLs))n7zZfWGQV-*U(gVR zt!11nh&M&P!h~F61tDBmyMP}rap4cVAPSOOa<1|v;Bz2wi#t|#XTyojD}zYC3n*|H zv|~-D%X>VRDm`}+#ink621;Kk=4Paq*=H;?Wli*4ZS$l8IwH5Q6ss@Ei*uwElGR-5 zu|@>()tt+n7@!bfgtrPjeyt`n8-TJ-p{Z3)pbvVWOuqwhJ_g)tMb&hm7%OW1_5Ch_ zh(Oq<&)UeiKYd-eqK>@EgglTyDRhF=uV2EC`{0L}xECvn>$HdpbS&R zqyB2M@W}(`K1WWya4O+3ave#^8x#YU?Bk~0ySG43&@NB}rHFEBab4RXYMf`G^BOo9 z%D+vz^CK<4{0F&L3m1j{K$-*)eV5cR-?y#2uAs^eY=6hp4PXV@2?h22A~*#2&DI7< zBgqvo85wyjg#4ic){xwAYfFAuQ*cztJ|&=M5VH3%CnZYS)<2CLE__wm1G>(UD^P7x z2OC3=Ze8o|$T%r+vCbB$K93y!9tcMnK=rw<_2WkM`Rn#QlOLt8btkOW?_aE1?fLe= z$&3Y;kRv z>Zo3uyM+l_rU~PxPV!jnX762-RgJ49aMNktdDu zk0PuV?Y(+q=qut5_4Q5$cl8^ZWNX3Jo*Q?Y-bt80?J^tCn2Pb+|5`fGIN&9_6F11* z{wSv=e3(rEsZm@JhvaL}$PI@ov-+b<({9^6nv9nZ zFsT-;)FvM~j63Cbu=J5lKx*V}?=HwE4rdl5?Ap-G_WJRQzIZ3>OpGniYuko?$SE^@%I zle>a_~GtWbayoSKF1`w-8!IRYvyXRgY#a<<%5m^_A;`Y?RGEXX#+)>NH*N z{#-`g^pXYZSsUsRK^1qm+Yr>*;If)9o^oJreBDzI4hi-t+ojKGI%u->%3oXYpfIC! zE0M~^SxyWXao!;vQx5_|9WBGXFi~NODe1vBDe_TyG6WHxKs7z~pId(ve%53D7)jpt zjMeG_eV6BVWp_h(BP6g_cyrEY&s>@t{ruF7y>E(2-G_AKWL zo_EmV=Bnv;+um3f8zYKtVjk#76p}FU!${h+CrZ^6D4E-1=?RWqxk_%KHX%*J7+(B% zv3bb+5gcbyBf+4*!$r&Oa8M2e?V-(Dk{hwh59_ue;2T0fyNI+asgZ5S;lKNOkT=|K z=V_t8{bKxW2wV{SloAN=ck{mTgUBRxs1XSDwIiw98AP)6Kz@qYP% zbt^2@{yS!rfA>Y%tzLE;e)z_4|78SMp;FFzn^qOTHUIPCo1tu}e>t9?9^t(c5;Iq6 zpB??XFTxvq`kUv`vi=sfpQd(wwtfrSPc!mc*#5dV45ZAO2Zk>6(Y zuNMEe&-U|-{Px-Y>WBXJ*?yXl-vPp3LyF%4!cQ~uI~x6Kc=tOR{dq=yr#}9gBKV#9 z_-RIdr#^lP^na&5{+j9go%;AW&`0*o@6^Z7Gx9t2@l&9`u1WsCpZZwwM4?bbV*Z{T zmr*EL6w<`{y_4**(dSi}?%jva#>>XPZkF}w<$EgI*sJj^?*f~~VOeikzimgmdg+SE z9;C>!oqAx{xQAEs-~;;A$pvX*oek#Nb*IUX*ddp9pNymyqy(c*M%RnqD04z{9Q~`t z`Pp|_kAh^icI>1@nT^hCx%@17KiRNNZ#ZZ>1!|G?SO3}k3ViUUr-l1@E6h^--Lqv; zWjFq69{#%C6uDkrDA^rC&lLaVKYtwv2@1iK{q^cUUx_CpB}Ey;?~?l4dJrvYYX9Fp zl9!bg)j;Auyz6fs3-91n|8r8}pGWm$63n)2+2TcSb|&!W>HL=kLvBNgLa>Mb^YCPM zP*9+9W!YUx|88AbQQgqI`8UHekd;NnlO1Se|C`66wroMg{c_Mh54eGZgapOgaF6+K z%T*w9(3AT=5AV0g{bKcgi`+lU{cn-`#Wwo?tCnl9A=|cdpT^8rx2KA*J5*yTEP+-<{p}E8}n#9xVM2i@K@{+88HNLcY-t^V8ZkJVJEq*9_(d8!8h*;{5jYjl;Qry09EBAYot()I==YiiB ztWBu>>LpF#B@c{^SZsb<^)P(C-`$W13b3|u5;_vW}GFT%=yHPl$$yz8aO~u_-=}C!0}|`!$1}cQiY{<{Oaca zh4LcOMzJ$}wk`O+W@mKZI=G|r97o|F3U#~^fsf4xM-gxp)KBl(^l6;{# z@RfbTup6B32C#=Z0#$Z}0|IXmoB_vh`GCNcGY6zG&XJe?*nUf2G3?OKll0?#;+r{9_%oML!d!!bia^ zNd%rBb~MoFW#Z3{Jm7h87O+N!a0m;;{eiWcAA89*f<(vrwVeKVNJ>6-wu`KXFw zcO=aMHEpth>4!u(*-M;i-*MFTpWbEl49s4fP;TW$v?f$kmVLRr30CIYNaf?#XbIx5 zZM$^`y4mVgUF*n@IO}3pq?k zd<3a9Y=!Sm^Hss@&5A7Up&tU6qTrO{H^zl-^<9Gqxh4HJ%a)7J*xi9?(57V|&6w+Y zzBr(Lnn4hvdMB%NboDt5laq_yzGk$3&6e3o|H4vR9H0kH06Xe>Ui(HZO7^34E*wL4 zVv`-uLEK+UtmwHiADwY0J3Gj=PVqPfNQu$du3ZBqXK6g>B;w8;2Q$+ju)@9TH$!q;+GvN zC~eBQxd;xFH0)-3;0{>V(!d_(CjTBeJ*pk3I3DCXEYu|gN=MS`Uky%6sXWF6CtqOF zhF%%Y^*a6>Iix`Fj}} zZ+4mxBdNvvgJ!{IwPc)Lp;>55P`ok5-*&P=jilrV?O1;0Qa2puWHkRZ`tsfVuG15Q zlFwsVrLnOdOC!e~=Dyv=*kAC>UHcWB$|H@~yb7x$VKv8IeS8D}$s!>*Un@B?zdfyq zO*Jo56Sc0X5glh8$9_Hb{=g0HFyS^MLHQdVHsPT4xW+;4$ro77p=6IS+JiE`E+|k8 zM?uQhcdaD{Z}Ccrb>+^?so@@h!CIAKzSbjfzl+X2KAi?6&%R#~`DrqO7{R)J6Y_bm zd2J76OXQ2!Q?JAH435SCo$Q;XxHf^BTlFSH3HQ~xHXj{cHz_#v{YFeCuoGPSD$PhS z8(tQ-Hk)D(r(#`a`X=C7fUHVRENO(bL`W`1R3we zkMD;P+pCBa+l*WW!cxQD9Y&-XCZ8X~scA})+h z((A|Hde!SEj-B%71C=?h``#etN}M~O^wm69`dCy%7djRMdSxVt z<>{IP2SZwJ5(8vICv~b8W$xdK0uCle1K7b@=H#=$ixvh9TrX!!Cf&32Iuc4rQO``J zu9H1zT#PyDPmRjO%>W?$?7Gp0BmX)?_pU~R}5=ct5opPRtBD6i+YM0%Cr`9F9LTk2Fud-&tZ{AceB+Ko?wN zpG@d0;Yms%L7n#n^(5LG1d=o0w+X2mrXJmJ zPE06(f>+8Mm`2hG8I9)5F+>3-r&yJ9No(mQtK9yXITN?sIz`*f671m!wY<`~5ZNrs zk0KpbBX9;MPvHuDY+k<{gThs{LLhGnS0w-U0Fa0qK=fNO8xwhncM-nN{<8Wlr9;QN{B>q-X{$fv)(ig4X3-HXuC)XoW zv(np!76Q-rRUWiEr!TdQ`rzaBQ>)E7@W(QZNKvCUox0Sh_d?6Cm(`bG7HZ?K%s*RP z$;fLp&_CM^s+C6~Ib|#A<|_&=g{rYXcJLi3bXsN-Hz8?SPU zr2gd0xut4#k4LNZBNj9Z3Hj%V0tV6&bx}zO3*bn1zOa|&UQ55Wrk7bMq{s%eDx7Z` zgPHzn4uI=po!@M_;#@~lkG5v>27I6__p6!;nT zcC{r8sOP0hhTN>k`7C2w?ZK>37x!Gw%z#F`)VY{s~~r3pXiOa<&@Um*3mPuKDo~jSJVG zTno=hqN7|`h-l-e=aCC__Xm$pDi{&Qb*(3*tM^FiuTbJ~sf zTILyn_@i8vA=f<1joo7ccGF0xmfF!m?Q@pI205C&=&^KB0awvvhB$Dc-0~=2iYx-t ziGgE1Mes==EiwmEX`YU@C`RBnBwpi`l9GFfl3mSgAKl{hiDJ$H#X1U-!GB%Q6*)-i znvV2HVn`Esr5E!%xBV1;;vu;lL=@4K`*BUSl=d|&Kr=WG#xwf$k_+DurSP)X=z_5$sHI6_U7wmfdeMNlHP6O5)UqJZdVy&WdlBOj0#6?gvyuZPYm4jViA~NUt z<(&9HC{?<#F$rp#DOHG3PLJ6hLYLu?Qav)1W{n(*RhkD}7LRrqTCKE*fZ+L+)jc?l zpPlnTNUNK@nAbGiSc6*1??5Pdw3R7K7df%sUe3^0T`&STc(|E#Ph>jx%$%vzvA-l- zdjchJWfcMh9?TtPccz$K(|7J z%++vNJmg3Y!2JKP_ulbT_y7O+A(fU=D$$9!C_9mgjLIl`&rn3R;-G9-%hs@Egkz8F z5E@q5#|oiB9GmR~_-|hDP&((Dtuh(-t#{F@B+#e7Lj(uX2sH(g* z1b5hcV_ArQC*mLA`eA`R4c{yDdtktqGVaHmq}SU@;QVUD2F-4Op&c%B?xhQ*XNrH(icJF!;`BI4#Ymxq`hWmF zAwIrUXBd8CFthU#sCW3MYqxHpI>(5$t>smlA2h9k^dpGrRih4GYYT=~Zk`~+(~YOD z$&>9X-mmhWC%5rZ9u<9=HE0kFXHj={@4=MQEKg|#*p1RF8JMlXWX;&-5O4q4sBreO zD2$lw2pYkj7|IsglMRAH31B7idunbDO5HUDt1lnzwsx;+cKBXf>@K@W*5Jpqf;f|u zT={$36V<<9Wmw85GRC>nl}vMUF=7(KDfD%iCz(euu`dk-JVBi}Ev*h|6YyJnew97; zZ!x<*#Oy2ie&HJhxFtU^*$^yqL~77b{Ngg!LA9$}wabuV>`oPfeqMbISHU$p9cD(w z*>aK3$1#n(j=0du&LLhGNBn+f?z3NRf8cMwW^2UEX-Kycx`Vkb(G9d2I3k}}`pni^ z15S~qk~@5H-AyWO?5O#Ld2ymVhEZ}YGpmyBOe!Cw_=za?@0cdncbQdlgajdnItA|1 z#nzu7CsY;>wx?`(?LC%vmz3|e2`@so{1okC*(dhYcv_-fa`}}nc5PZGR{KwgUj269 z5X$}L&Yv6$wH7T?!aB6JQICh%$VgsexCtV3jyc^eVo!T?6ZZ9N#DSMAba#a;n)8&F}glKbRgoW~~ zRw5W=8s^G~mw+RFk%p`0umo;E(f)d^@f~=SL`r8>_9il0u(OHa3uEClHUIblT$1<3#5a=SG0ONRSPY$;~XAvAXt^=-<(C_SbV=9Ef${9d-hnDADEE=Z6tnBgZt2 zQx7H6T=F);6lKrIT01}bvI)@i7={bqYB@?W0YfYvmR)ULf>UnjW;7~5O#DIk7u%XzvMBsi%6njX0@RVtUyZuu5xj-sIBg}=8`s}FnaKSsTc z4SP?hj=%Y44H+k-PiL&@qx~rNl@oU22av{^BZPM{XB~(xzB{?c#Ct&j?$aeQ-6E3{ zRQ`@tzT5FEB0^caD*>@lFG?L<1zal_L6g~9rW8)%}?f3N01i&|Om@m3*PBhBNF`SrkcESUjnvt*V2x)(c-Xh>vvRJzXHQ^5b zFO=m)1H6QiT%@*p=GL2&!XiCYjsv_UF1S#s#9L`+MQ&Chap}2W4CG9j=Pdq* zMA8a>gh!Uh+Fx^SWBQBQ2rv|Rg$q;S0QV@)m8Py!V}?>R7pIId)q}e+3@$TD;dd&J zs8D>~SpyR&BiA5EJY+RZs;~pC^XM*0O#kaUs(u@JkUoXM1GDtm`pn|F&PO==9~FEF z&83-^!$dO{L2?cAT{}+JaqVkv%>;=rqrKG4&#WPhFzOQO+!t3oZyvFy4+K!99)cg5NY95X32^~bY2)^z`T87f&(>j{RcEiU zr~<@wxlq|TWNBU=5h`KWBypIlSe4Uf|Ba^^fl!6vTLoFrE4;%cuaJy2G1iBh{Ywk6 zy*+<*&2RFmsn@_E1nv>)_PDq~-6j!cx(4dHD1=xLH62 zcVT=Jhvs&XxjREra#2IeCwL)wo= zWtjM5;V%jpVL!<5l;FJCh(Gnd3#qc5{0XtzP3AYnI_sK3u7s;6xP(?A6_|L6zhXeV z59&>EL#|mA)c`d%C9{qD$Xhts0#u(EnjZ72T(vLjk%Pl0q})q&Ve)OPilOP>gpR}B zR3?oP$go!scHvnPW&+qg7HczEQ0LUO(DwsfBgJ`fiX5}J2P3|aM6fx*`}5P+clT@P zoBz$@08ad*m{sZc#zk8{;8`C}up{g`xCYQPTSC88kd30wc3t0;bals+xU9}qi-{v1 zntUalCkMFlKQpZ&F*2uk$C5MappzSxQMxo7TT?#NpiLGOdw+RS&etZBpCBrD?=WDu zR_&uOIwR@x$5xehFQoZm#JAhko@rk@Ty z_ip{VlyS4Eb-~owKx$b)A$B7{jqJqTRH1+3%HOReJa>^&uF>@L>F+*YP?-;(c|Q9oE@7mb1*BB)5+ zYAVd)d=Imwz4K=)=^-rJhDlkaDump%844sF^bC9Ff0r`FV&oxm6C{fy@s$+I#Bli% zi8^`H^Z8UIM~p*Xc8(`>S?zF7FweHqSb)%6rs`B2_pCU3#m)Q3lJ@?AO8uwcAmaB%0F9lWe`)t6c@y!aN zCAU=qN_qgiBtR9&To2X#Tsy%Z5g@A%H`w756?G5_P{y`6vqX%T>lO@kinmxDpn4iD z!C9k{oMkUJAuLbaPy}1>ly0NkrmgoCltpi#!eolbpnhgCH!@F2MP(iKqpzFg&$Me! zj9=+CwjJV`P0x?A!4C$U5_^DYgIGSdUT`G-$gLMygd2Oh^2_(j!~-PxYRIvqqfiIe zmwy(hRcx3 zeD1DMzrd5Q*bCA8r#T7An5b!hi~=LqkWw-BOa96gjT-pX5O~j<<1v)vFFy-{-l;NsX_!wL8oTR*4xlMMNL_l%w%tR7#mMt_*A@pAet|eI zQTc~>@Xzlg*P-P{2RmDA~`b&-OW!IWBATU; zF91}?IR>|B&mQtxuyRB+<1nvS+V0^Tn>ZhrrNP+zz0 z5`S#y`ygH2pntHbgufe?C&+K+gj=(E-6W~Ta~5LCcpw}=nss69Gk8A&yj+jDns-Ar zz>JP#uEKZXc*<5z>w?IqR-&~mTW{-rl?Z*P2C=nWuoe#kOp5?UR2P?w$P{C+-4b1N zi>$`t=K!V4ssHRKwXBh%Q9|Z^PSjZMBo`zQrrs=cbXo#XFu(x=;mq_iwanIT`#nCV z*f@Dp)}G05vwdkX&otDY=IbS@Q0U{_3Zu&#tl(rwhkUK>vjdZXJw8 zDHw}OoT~-ZFrn%Rk~JScTg11C**J6ttc6~jJ@ZD4>S0EX2qnn+q>Rb)HC(VQO1D(C z;t2HV60GwVSsF?S*dM2AxO3Y0WR^Tiu|AutSw6&w9d83W7Kyy%XakC(UZ?&Tfsrs= zh%>O~h~Kf1IH@Z88(!4JtWhzAAlSR}BX82}R)}$-@Q3}igb+|qsIw~xNG8gJJfb~)^GaPWb$7`(s2l_uXv2X7 zF=ByTku*_+?GYpGSK!u6QahJ{slnqXe&s6flYBBWPG^AiOgM| zxAnpm3}YFY`@$++uit`}CX;g;dk*hGk&t#09zvn51ZMc4PAlIAIuI`V6~I}Urev*D z-_qGOet>`41moG}Du$Yc*4E}z>_F7sC8sG_ZEtlG6`6bG8A;hZX9(&OLa9W_8Jh~V zQq!P>!IqE`4b?z%Y}>ceb7i^DVFZdoLooZ$WWUwXeIAd|WX6LHFH94{lyL&q5Q3Qk z_LX{9mRJcRbrZ4-s}5OaoW&?68=khBY}L@md$EQm4e8C6dWG$iP8vgdTh;1b0cutYT1gDGFrn zZxAOx?Bx-8^{}_1%v2AISL_L2F-6W&x5dsB1;fYq$a<;__$!v zA|!f|#`#d{Tsgz~cbXwb#QXOeWMGJ{gC| z+%vp`7W$przumSynP(TqlO@vUMu~APOq#qF=u0G8DSZr z@-#98?0Iu*fhmRw6`{j~vms3l{ zhx)yIRc^sMC3Gs-PiYfLsk#hE5o=vn_H3A?)k|N2PoV&JNxtqD*6t&VDa2nH_hh3S ztloTN{S_IuvRCGgqmhBFtM)Sr9<)yxnu9`ui3t(hB>)fOBZOAh5KZd-wms}ZrBPIi z***s?G{HzWLv2gx6$_qWbx4W`go}yk>h;&v>wtmt0}CgF>xLq%2ucq>JleyNa_fy3 zUiw!Z3DzU&(0k8iRBxHRR&-+!RaWFsg{vc*rfj);g7EGPx!j&qu`S#*GBV{$19&5- zh{_e@bMZ8{GGQ9kvZnjCqGW>N3vQwe3xoXs&?<55&xZjv|KsQ#JU^9ViU2HYsw zrsL}A;@cOAiIM7-4jBcaP{cRw0{v*MDtJMmF<|PT8d4#JLHRObP(9jxL|LPP!!D|7 zYuteg3BD6nUTpI~LmG)~?&G74u8<6-q<4l1R5Ea*6u{K)a^Q+! z{dmR{ZxlXVpaPoLccb}KhYv6a^o4YYqi4)JbkQF1oNw$ds^nkfc{6#biO6gF4knqZ z=BA(MDZkQ>@ZeoexcMbsn|qLU%Vh1Axq*s>zf$Yh$tsx~c26Xqumr!Zwp|1%V(mkh zd?p$p#&;~~gp_VKrkrq!ulCes@2?6syo_!z`6g@%ZU3=MLx;)HT-C?e01cQ+gbJes z+%X;N9J#vs?4feMJC@XPGNFG-A%x8!soAsrG--epRdSzq8 zgv~1yp!z|Ch=V7k3sDF3U>I?%C0Nlj9USRc7Zi3N7D-J=fPBi3Zzg}7BrH}aw`-iA z0DG!R?zhQrAAVUNNy0kRrnrh_Wk2YeI(2xIyn5NWpNo2DD1bapY1@!IeJ4=b5M@s- zORmVKZ{ZpeFPBI}$Q7GN#3ooZn#`S}Yt1k3YL|rso~6_Wa_BomNY!0q+_-uCq7bB7+Z@Exe|6{+V zWnL=*kN_22OVvzYkYuXE4=;lUuD;cyUj9qck+DjNLz)w^jX%J**GZ;S&1b87jZ_Tv z5%x0|2b}*AN~pt@W*$700ztQVS5?a--Uy1WR{;Q1ApY>40EB)K8Bm%Uew?a7+jEFM zN-S`WoYuD|O6C*%EmCzw6Gr@54t*D#4Te|aO^5QhQOJ3~-kKp#*3U1Yrbm>d8GwFI zm4M09I>nwaJTbm|H?^v#)^2GT3Ud^0pl1MY@g(WPmL(?4zTRF+=W^M@JB)IA z8?fs6kEucd32%m~tR4=9?;I#v1&s7(9Ri%J0EUm%sqDR>)PTZJlj5_#{XJ#Bh8P3| zmT?@{6$QzaztqyvN z%7Xw7M#c(`f%9M!lsy4Nj@U75iI@CzD0^A1_=&rmyow4(pmsqA)!_~ngF?R{N)!y# z%;j05(Gl4!9N+nlMPAMeX7V&N*x^pt-<)05LX3=D{0@d)4(yan;tLyTBEGUL^DJf( zNDRiR}K7S|1*E-m$Y^V++MiB($kQ|z_kX<#u zBDQj;5}8~Yy&R6rpNAl7_x$c)L(Dz}8|B)un_u^yd=)8nfb!Hb*vJy=aevbGoS{HL z+zfI>mDFq_VkJeE(nGW*FXm5MM7t>aUQ1UqxM^uxC7#|lR#WgcMBg>cz;X&GML}x( zg0hBmi=l1K5HGWJ>A(I4k-1D0?=?$fPbjDHaOJfqKTO0e&H_6&+)h(N@Qx2x{uPK* zY?@Ba(VnNDNBkLJrFQeL<<`sYE$M$IlYm_4%K0&#Hp^Yr=J2 zxCKZ!z#C!Hc@~!|Lfzx(Uo#}jIr_lge+scFM-~F8N0dszZzckJFx~i6?CKJAjA!yp zUyh146KKnOok)N<>?Ka4V;L3BwW`)34H0qeMTv4=D=-7jyXrXTl@ES{f*Y4m9-pPY zz4CVY@8=C>)&a~`x4-#79l>iTY&e{EvBSfX-hj;qBISGoD=KUh=3iO>)kYAAUdenj zEiIsO8Et4yZ!UBxIfC(QtcF=L+F1s?eC1&415u3Nz3gOT*h7v%Kx)NpwUrEg zNXQ+Zz0ccZo=i|<;{A)d{{KR5#bR7NLS2O<8QR&K&hw|Fe@IV_OE%ZdX<9}a-ng4x zOFengzOP(iz*2VJcXRA5{hyRL*7E(4ADVaztC@UmsyvrEq3UZD;NJj@H8zRFVTgjB z8}!@Zvs=}5@IKr{NrVh;OP_`wi21Z`$wB5MhXBFufmoPv<|NQlYc;?yi{+8 zG}d+Ouy+TKf$BOcf|Z)CG`t=J3lQ~vnI^7Kh~1A8GYVr)I)*0A=DppkU}+;;PYCWc zY;%XKIAe5@R`+OJgOG|r^QSQ)uQxKa^RW=I(jzyCfT_ zubtoPx{UaQ!B*phWwYoJ%~-}@Ytx%vVFv`YwHYNXtpS7TsAZd4^}9W04LZh!@wHUk`hI)z@^dtveYcDCMm+$5P znzVjiw{X6&0%S8ic zQ*nn9g`dsQYy40m(2a}|D2rRJn5jpcSgFUYUl`)Pp+KJh!)`uRVQ%BSnx$hx^n8@6 z9DouV{^AUN9|TA6v@TY$NYXyfwyWSQSoDj&L)W>-cAJ+;-EylO-KLx6_ZIM}_bGAv zk3Spw;v2@q{%~(+!`_--;L4X49aLGf*fUEQLG#V7PMtSX^_q}#c1!W=%$OHJd;)YGG$s|uM3G~7KU4#@p&>b0oWHLR>Mz&+gBt^s z_9qi_NS?WoeG0L+cQ!< zMjW(R&Dc?1JqZc)Ir^sFJib&0EcR8_HSx5no}~TF@l}*-q4#2hSlU8*A%kp2^L4&u zZ@Uwq86qP1d4vtZ*gM`$@3S5`;U*6TOHyFIJKlP6N3t{L!11W}PKQhVspion1w>SR z7$M}0ZJxJ=+$3@*84j04TS6cA)$oBcvovl7)u4@_=3X0moXObN+U`-QM z$gJeDJIEj<`aCAH6N8JeTJb75i1B3n1|>k!_6}a z2~cglhSQEC$J}W`6nehQGcI?V>biQwb+9m{OY4NILt>pP6t$Io-}o)J({<}RPRz|W z382Dno520;d#!q}%{PQL{RJ+XWv?{Q3r3Y~J01}FgqB4_B6xM(C{7O(<8@)GPxizO z3dLcrIy%e(OoUKvv!{OLS-;T3jTUzBS5X;i)!MVvh5~+2oGu<-i|Ty(81ohpLAfrf z(#3n<@;5RzC_J*6+42K=N=-@yR18T+k$M3*_hAO*w)ivYGSmh;d!Nf+;H_A%o=CN6 zg5tTR% zwQh75<{d-h-GaD*M`B^QY!>3nHh>uCIGC@AbGmsCXQmG%M6IN65Nagbr%rVlKCr?S zRn{_2>00vykz>{&gCzij)!}-jTNJS4kGXXMUGcyx${o&Q3d{nu(-`ZASTrBrU%4~! zKlL9>q}CzE;Om;jmg<4G!>vmfQ`!omuvstO0?$m{-s_ZS$2N6sQ+&3I-{*ai>o9~n zI$KdZ_s*@5c2B!H!+cNp3u!mLg&FU`)QT*R8eNPBPcO7flRwt4$i}Z{HwH1_I%*G2 zb)>7lhe3#X$+@Xg1!_An*C2W{1s6Zow0ubKf^l*oa3OY{HOdUpHcQ4q_$n|eq+-8? zo@6*-x}g#vX~zca?vRi)$F1yk`&Em7@Hfjow+r55sdMYi*&38$Q;GCHp=lnnPY~a3 zyb>^bp;ogoY)LR7i(KDr)vjoLZ8?W}l6QCp=BP>kLSUP|^J+UmMN$s&Z?sFtY>zVA zI~kO_rJwM={T@CQZeZ$;%8v9N$0FDG>iAW(A;QlQ^-EL1{SaB5X(qxva1A<`I$})S zJ1|u1PnAuZg^g4~#OZXi`Gry_WpeMbMl5$NY$DWxXjE=WVdK&rPdn3;8NUDV@UUL> zi|>N~hlIH3#ae!EK)!Hm0$qndtD>RmB&zS^3+;{-hOZfijrcMNUKGJn#?cWAR29*H zpNcL=()C6!93b;t7{%qTIXMisrjfZPCxoY1oPUT2c^$q7yYD0ozL0h+kW%aeyeV@q zTdl#bu?e!OoQ})VdlA}CfQU=AsIaQHYNGS~5s~i`?h7RZ9hNjeKa}i?0pL7^(6$av zStB9K&4nR0L8kKQ7w}fgtVHu8#e2&)u2@%!;0l#A^Tp_vDWQT?vW)^@;U? zv{^?Z*PCvjSy?ehwlcli|1EGqOWSqf1A}o{A5(c#3SJp=9oRp$?|HlL{Rn+jmRMpS z;I;LTW=xu6RRLG@0X|7wB4F`+M8K;?>yx!8hk-ZAEm&}33uf$W3jc6v=B0gOcEdi= zD0&y@lfgSx$x0&6jn~cJKo{Y3HU0%ruN*-!Wdj))njnX`-$f7{iBR1s)J2!xDR?th zg;nytK(=_*I@S`#8-cBx0W)p4MBo*igI&3L`O$9T%y}vxN{b!aYH;2M#pe>tpg zQZvSWcrGWPwVV*Vq{W6&Y}kUbF5M-k<7gl?WuRR?@F%kk60Lll1E?pv*-)=Q{-}gQ zeL7CwngSyovd1e-LJvS%3)TQ>JF^PQDeGj)NSb<)#3b?p#s8f-~6c_X9NO8Cn2xPn8MR8>o810vk0Hfc%gsSLblZv(S1@oCAlOY zU(}GUmE6~yf+%IW=j*aiCY=73TI%T2*yem?Ex5bQaOB%9!v2}7p+Wq$L`{9 zih$aj0G5z{O)2k4kU@tpSmaZ3P+gUqQB9Hl-5Mx{8t0|ie{W#FGNWYbSeHQF`c+Gn zafifm$`v5Qu;)*7t7m^W#&Eo6I_+_-rQ9M0HxgCeD^Fg8!WI6n0+M` za4Zr^B2ja4uHZOhSSVt*%mRF!xxkqv`xZEu&y1~?b-SeTnZPl1xbE8)a7rtX_G1Y! z6pb^@R|kRhZYKz_Zw-~#fDLL#%N~pDZnPkC-)LRvW3c{v2W$13YrrXq)DS>6&qYwP zT{<81>4j~5*jkT3j?#+iCL?F!inh-Bvcd+cIpjco;>GPtf?wUY5K#YVd(K|kQ;c=M zbu>*$PLRIe8N566l_mRES~h^Nq7#sna?)+z(X36xcz{aszONa7p~ZgWrI%&?gaV9XE;CL1@MaEo3*-C+*|9(| z1;$F*2ZPn6Zyz2bR2R20H+NXybdU0q((4>240o!_`Y|^asnOk zKTM`kDMaF~Ipkg@iHTH=_rnKjh(*2$0980_L8>9Ccy%5mkkbl%==oi9fmX`PB{77@4$W zBD(NyZ+87UfD?Jcy#Ln2<&40{c4ISAbMNH()*iR@)m(^d;-LpaP=l2>_9q|>@(?^3 zXAI*{!+H#{d1RKY2_is}2n&|x=mP`O0V!xkw5nNJA@*J6Ewd=IF9?3gR${{qymVoFtZ(8n2 z%|=d!X9*Vf@^9C4(j^v&QPeQa9IEf;YFczd#Tu|31;rsI;Xl9g(dICr)@2DhfB7i{ z2b$<`l%7Y7FGYD`jvQbqM2E6v-TXl1TJ`wN$fYB!X?lA?l;?&I;@Jw=RjNf%A4IbW zD620qJJlq901OucP$O1jRqz^jx_vPo2?y>%=^=-z?Y*KpPOvg?1coJFZx&XFVs}PO z+NcDqE)NsQDZuW7(RH!gO}n$C_pY4l0>*) zXX|*YjtV>S9EN&_7~c*Qr*}~H4c+#qs$LL2LdJ!6^+3i23q%hh&;W)`QSRewPJ>_% z!gPD-)U<5r)w9;w9OY#@3{1UgXBpmkEnJvOJdJvR?MFZVLJ^Nw0DGFNVn5~Pa(+7M zE$$o}A#NOqRc*$$@KR_~X@+Pn3|j8n_xYd}am}+1s7JJHp@N6-*K=9IlaXIKIMBK6 zbt38tDmLcxi><7w(w*4%&Korr@NrxKH`w<9gV-(M&_UCTP~lY7^6{;AMmz>sdD5Zd2&^Z-3-G$njKPy_`f6OZdmb z{p;gNQgC^W2@h^$y8qiMdAxvryZ-p&s+%~un_uDg@3k;9ViW_SY&Qi_e|$ju>I_v-IYEk~z zS^qB%{g=J|Z|wMgU9ElpzlmEM4K3+UwHeNtd>-smlaSu9ht5wXCND`8u z6#~C0k)R_O z{^>0Khn)P34&)TBT{FivgcdeMp<*)dkhwQ=98YdIjDM<|kscAMTt1LY=X6F$S~($4hlc_1Kwo`2F6c;y)^c`oRjAq$Q@3){7s>!W}phoI?6* zo8GqZCH&9F;#8!--9!yuQ{7UlVT1fl`ab|Hx}hg&u%B-sy^JMnfXFt|f1)2I*E@@M zohtOk-FW*@<_Y624@P>%*Tqh+e6~B>xElrsRPz#9 zk>UZ#!=%ng6LEMJzt85KBus4c895LCbKqtRX&3oR;9U|X4EFwE(_>^nF?L9~dy`VG z4UqEt&F*gEsc$@`yI35c2PQMeT3kuT&mR5D*-e7Aq!pmtmvkAP#O^a&I4QqxJP+F_ zSkYSv+i3V{AHU> z0>DGc@DD|*4o;dI@3LuQ<Gv-$(ui3g?8wij>blLVqX%dGGpv|E{02OoXyQ2 zkARbu>s9lP^mKI~@EjB7*qpQJHj;jU5%boG^er(paP5Y&btm4DUgY<}^W)Je8dcv* zy040WySPgS9=%NZg5N)g#lcIO1V#}^3X&e-a8i=a|5;7@)FIe9UkT>|(o517!}e5W zlogYX@utUD1QUtLo5h{k*gB7GV9p!GBJs(j*WUC)E<3D%8q;>tH}I#zlN*oy+4h*D zMF?LVPbMwG8nlbQ?fU(4r2ByJFh+k@wq%o@b|+eHxocFUHzA#Guxs`3nl1OXn;zRZ zYhwS{-OV5W2_MIm77CFJY71I&K_X?{rcaTcD~1)8%FsZll0^n@F>^zWd7m9`%G}~X}_NPoeCQd*JuU* zGWNeHYX9?^3rd0Y96VP{^3K1PoQymY;HMbd7u_V(ewV{eVx&d1 zfj#*2G3ifV9Yc*iZIEe7(l-~AgUj0Tpy`hynMoj?jyzY?5&Ycq}~6gQ~JH1`%I976WA=O9R|lstA53WAtNaKmomV$rQW06VGbijWZY=5M zzfl9&73v_QB9RBoiGq|`h`B52?8(-ci1}jLHl*1qN`-w^4@1YL+lPlei z+4w3C<9!Rzuh0yAd(R$GVSh=pqK=apsGa98m!&r0D{Bz_p zFQXk&bWk8IYxqti3)R870vy)tgm%l3iXyv_VE=R9JrQ>&9;g$q(i4TE{;)-443I1K21y#WBx? zH&p(CSrG*s@n2e>P@!rcfQnYnUDLPjWXW#pYrpoOM!*aeOgA0i|1rdMEO5{YmztY5 z_Mqs&TPW^_g7k6b43L--hX+xusagDhT;TUxyIaT&0dF&t%zq;`ReDPrj!U}FAh{9PygQ(b33`|GHf+&g=4b;RHr}^!`v)e(KeeELNR=4}{f5X?ft)N-it=9o+E?ttr$<|xQ0G1YNt zHGW77l*g8!u45S_FG&2$!_%%qhmI6GipAKYVC5E3nsk}4jQf&<3%=OY(8yqN)hij? zR&tS8onDD!p1q0O{5sIEI!4<Q%OaBx z=QBzFqaDLP%YB!i@LTwUxTaGwwSH_L+0LgsVs6_T75-eFpNK}?4mQQD5aPzXr1)OU zLwJWoP>i}tkiZl)OW;S>Bs-MYK`MK;6|PSol|DJ6t0*X;-e>Vzo*Rom?Nv7VGC@cR zus8&B@?cAQtqkS@@K8ErS1A1#fxV++Acw3$ClDr7YI`PnN}PPE@^gPe^NSp+=5}0W zlBr@#eDN&h-b8T8>&=GJ7ZP-gVNh=!$(w!JfoMude^X{k>SvpDz%gM0durOVx>)nE z&;C6O7kBa2w#FkxkA39Q?RZ#nIPmX#vZw2rOiA&6> zl1`sMEL$1w9a5=Potr`O8oF?y#*k#L#Mq`wbv+Go7TOWr7&6i+#ZzcYYrJrBjxX@T z6@r7%vzpa(!4mC}YO4BujPZ**l@_n;px(`pI%fAXgKH8x^Mt$M^*S&PwLQnMZxD%q z=KdqyEOEwiP}&O#do%rsg9sgOA8Aq3Jx$lA^&RC-tUV_NjMu8>e?mWRL^xY$JoOd~ z6*Mm+Df0D^*Do}X{+;)``Fc*>;~2)j9|-_KI;D(nX?7%J+a9{WN)Qtla8Pn~z=9+b z5vcoeK)%rV_>);ke1$61<`bLDxCSZW0*XW*cvX`(BFzRH#-n-H8?f8I`C0E0lBbz_&%jzXVeIU4uv)2&h;vT-wTEp7f1C47bArdWKKI8_IEsnD15> z@BEHpkcz&_AN8oQ1BoDW3ETi?+7%=2Dmw_q;F&>8=`}A`pLso}8jx~~GT8APsa*H7psvwhin7o*Q}iFJ;g42>$icN~ zH9+OzFD_^Ugw=?D=l5>iah=&NUK!b@W*`iI{{BKK%mrcXAMkPP(OVtX7aUh?;1E50;oNOGbzTtDrYpHxXtxu^8Lf& zaN}a7EtuitE#YS{jII+>`})8rUn|FUoaTmxPkx}Q9Uk-XzS4yHPvb$Ak7*Uepd_lq z+v2LZThetvYCA|BIu?1LeRD~r2x!~VQP~iM;9-{%`bGY2Ga@<1#dqoBXmIFxJj1 z;}~I?&t@h`@w4{Shusphy6H^De$hC>Rfz!X5iP_Ew3xGPs%K4sB*^8KfH_9&rn+ZL z3ZftFMJl+p!F#>ZMAL(iAw=N}5rQKQLH=#yecG*ol**ornb4FK-*0qNknLmaMg#*_ z_AkL zdQi%G;l>`a5DF%s{qSL{uJLkOPQ7%NtBaLaOE=t6TW|NsjB8AqY{7n%KJRT{Z}G0H zO9$1@cIRWcR2`^l zi3pAPG#8Txf(^9(*6d^^Ke9PQqf>z-+i)&IeFNg-FLj0|UC@a6O_xFoyd>l_X@)o0GmxB~Ub*T%=?@Iy{j zsu3ZKVSK=}!1H%KhR$pBdGPL)0lR}0yjjteGD9u}3hTngG-S5rX99-@frO*MM9q{@`xOoCJOy`Wc z3WZBC8b1&L0Su(ytdJ!xzT(@lz}q#w*KOglcgK$s$JtLJ2T-?EIIuXoehKr!J<*w7 z(KKIryJatD=R8u>j&-s^3aD)^nfc{n9pD=xpg-IVkMaV_P_OGTV7-~>8~r>6;3W?l z7@IjCo`G)Tl&;DW1I7~7BcKSU^j4lz0n{v4@Gal&**z5yBH-Fn#095-AXyt`iO6j*h9)pd5Lbfb=Me0skZQDTv#(eyE{@xy!%4O#Ep3ZkSfhWwM@!&4eM zf5dxUhN7|DiCN?_G^J>0gE|#o1W3!jF|JDu!MHE@27an+4Biep?^?}Br!-i_%*H|k zw&f6ePcMGk60;>r8TH2os$S@GH!_|+P%)9Xz`1#t!#}(lfBIWM0D=v=r)-Qqu_ekP z@_S+T9;J22NrPnbgo>tkZy(&E>6g7*tX6X!jI-p~vJY;R*Tq-bt6ADS{9C32gy%FX z0;n&Nair%e3qt(ab>`I@&^(MUTOG4L*qh07Z{d@%upKm;#fs9VQbp`m=e@STnK>F?9 zM}Flm@P?@i3;9Pi<-kW2e}=fIi+@u8b3W}+p}`ln1b?A%&DW`|W6&4FBNNv<>|uE! zrW4~%J1T_O?m(Pw{q7)9Xk1|{q5gOwt>yG}R$oNieA87qx?X*X7rx?0t3$cu%{tF{ z8Ry_m9j4niGfU<^ky9cy|2aKY8R9XmBQ`Qz&Dn!`1^9Myj5fh^#f2KP?;S_h7bz>R z`5rhpkveL#Bh>;Z?m6f(C@%ILu$SIb9$spfU;jodJXitoYy1mK(H*LYVUuiCZ>@U}}(ncmB zYq#WXjD(^rixEiAjlDY3IkpGm30l2+JV!sL^xu=l9!Oq9{r}3v(rp#bwqS$U%fjIs zM#@kaP)(y`=`~RcjhtDXAlF+(VELc+&MJnV85(stKXj9V{4|dlK&~r`Z)39_Xn9s_ znEYU2+-!nChc}klg*kauH(z<1dp5tJJ;W=!I$~Brz1Ll)KolntXyLi9iiXFkx7`7K zjf1*_KnIrrXn`X|g9O@wt6qhQLQ-#O)9H=qI)vI2*TI`pcr6U#1f4JX<8K_(7z8Xd zy+wOMIFQ&XaIPPJ8p}v^B*UaTNbN=t4}zl2=6 zlX^?S!{j*IE>Re(k`T(Mwb8=v0>;uSxf%8Dnr=xM9^A+Ky7pd}DMhNRh4O@`Bd?40GxD; zwnCoUug9*_#%LT?iE&(@3D&&S2U2>{NU8Rt#ZJVAb_5(rg(bQB!#s%halEMr9Rnap z&;{~H!~8ypTys#hsZHg`<3(B6yIuIXJk|CxxAkWoqD=U8;K4sLI_GJAIGDD-2mE~Y zb@%r!O=qXpv}``I>~hF6bpk#)w_`L2XY+?CFm7qd7$*I<`WD?fcmQVMO6`<+VK(aAXaL+r6Nc%3FK3n z*(`nD6+yF2%IJ0dMChPAi&}cJ@mvN^xC_d^+VB1}2QXwoQUA!@=yHrPr`x`qHmMMB z$lNfKCwG{C?uK@#U>V=viYP?Sg~&NEw43363zAjH7jR;EM*%k6QlPo41+xZ8Q#jSR zukKB@3uij{94h82iw5v*rXt{Mb!zV_ePZ9{!c^k2JG3eW#C>}JW>@$BOxF(2ez$N=r7}<5FM-I6K~Z8n7UnS z-_~)Vo-KjuC>@Q|_N>1yeM{?^tL^E9)R6%FAsQ`dX3DctlFyXGws&UtUsAfyo_6vq zZAa9_#=DskPs9Te;#{#b3@CBxE0)E_J22JrCJn*6F$1kguhEFwYhw9JHjIWa(&Dfg zF;X6S4>3RRKW>db+|^NY;UE8?~)Wd|Uuo+8ouWf2n#Ji>O&kQd2}n1Dtd|RIIsY zm-pF|lkVw2{HyZplhmAq;QhTENH@(}YogEy6nN!AbnUgQ)G9Jj=Pc(!{Nw{os~eyt z$(EKuoNJO|h(vR~aditW)LJ__wGHx@A+L?ZpVJ4y$WF(lP%yr}FHPom!EqygU*-rd zI5~^`qO~qC)SRe*#zs!l(OwLNluS=Aa&r&EFe;a>&iMszuz-qu#a3=C0%Uq}`3?B0 zbVsqw(Tb|~{!oL8sSu5?@*RS}=O$-vEtfglZE)v!P}xZxdX|-NFFwJ%jsc>p2 zk7NUYh=+>YiHU5G`31B>Q^|Yz&4KmeJ4oBOTK$QR!PvOwhhkwP7|uw9F$#4;XUMa6 z=J51m732Jntp}Cghb+oVQqt4;!Ib%)7JRMSY#`N6b)K3c$9T&bw7F2MS4?w1WN{6F zvK;ndD>fVZP6{6^s$!PHv~B&pQJGWzYD!gXEa$k4)W2 zW;$vO1loQ~EKI07TcL?P&w6DfwztPHDX&_?a19BK6l1ohu|CP0pwpPkP>;ZE76Vpg zf)NzVW0r-4R&oqC05!P)?*{iN_KJ~1km-TJOUEk&%=~ydb|d>ED~H{c$L&FM)n7mk zS~+c^hms}xgd+=E3KyS{ihFFzj z(11x3#?-0uNjIlt49&h#aXv8+OnDu37~7-@_JblaRdn?y#S~5u#v@iU2h=XH1X~WJ zDG%HJ1duy%>)~s!a>5%kmeKeMu)KPogw6&_&|bKETTl0v1tn%dbb!*>yLSuTcmb6` zwIqWd9?~9Kmmm77F;IG$8^ALpCxDym*44QQ_-ZSE$j$m213&JA2-~ITK4ahhiH7XXQSm`SO z!fgstN%zKlI#N-pjF#w=k=0!|_|2fr(;h67=V)>BAQBFp_VzA9c{Ngy?$31OxcSY6*tuU|8F53}uS96NFe3_dV!J6TmP?^K8Bdj})eWK!VQA3Vn-RMoU#)ul){CbAnlkF$`ZPmi%?E$c zV9p`UFLIf0TcP3YVVKY$J0)1~1;F1hT`OH3y%3p~46}Jr59v+U(D0Vn9}@e-&KRLN z28j$1n*NfXOtDTkfDl?C%i^jPvA4`k@Y{m}aH>Y_c1ud`LmCL$O~Ozv49|rE@U7^V zv`G`5NEMiU*!$4l{g@{NSlM8Mk3rSPlr#-6BIMOf7#oW~2r~pFZeOxOl4B|>A}Sln z6Rb=Us3>D*d!@8DD10W49xw+{0E%0NM$xtqdUJ(6D<0ipSIXSB!1W^UMOQyGZeOT) zn?^WF&Z~&MKa5%mBaWTi=FY_TYlI(7v9fpoSKKcx*b@6YC z{J-1<0Se{X2W4P#Ro=`amEaUB4NJ^h*adBygmrMEVK9;=Kc!Rsu1$Pqz{R}(*WQ&s zLcO-{vE-;II>?fUR|m~hqbNiw`$$5zF$mFE+ANW5b##y=#h5xNEkkx@NR+K?F|vft zjHOa4TUzYv_kO&GI_Leq=RJSG>BoK;pLyo_JkNb!*L7dlGx;IOM=&E_h03!lFDiNe z6&`Z#nWCWRnmw4jNBf9%Kt108pI~_CJ1KpW%P{%0wT4n1ZCn_chB+Yb!0IR`*9ZcF zS+9bD2!?D`_80*CH=r?jk`YL54LRW6 zxE=cB6aYGDuwzP6LNU;58G?n9araP%y27JRV+pX;M27!{`)ImBJs4V1VObm0`U|rje(; z4_A*k;f9f{UgdgHM#-$W$l=RGM~hiRmJr*>IkLD(kOo{qwk{)};J!auD)KX09YQ#Pf>y9XLCE0veyyiGCK}P>AR0^pGT1o0E8Z=U1RFTjy5|vs25g7jad06 z->E}59DnzH?^)5KvEI{hx1=@a@m3P^RZ6^ZI`hZ*Qy(l}B2GV~0}9 zV&?wsFpSKD08b7OE(E^ZLLWgck>&$K3a~fsP=bvu`7q(MU<=!mRpyElo9G=-nQD;Z zbVQ#1BEFLNc~<;p^u$%7%mh|R<$IX173@(%riNv4i(-)NB@2c?jwTEmy;)a+X4P2K zT#ro3(TqMl_9OstE)28!^(K1ipmodMCe!u>a6hVyWDs{qK4ndC!XXQTxbz^nD>Bjr zPX@h~ZUk;R33te(Ev)|fI*!%gn@oOMvS7kl@ecXZqU-{>ScS}}ihLlqKwIcs9h3R)}rp1H?*k*qPn zOLT;QeH7o+5huKn{zQlZjYPpl!cif+7b=T|9vn&q=ynS*vs_#(fup}@F^zBq>py-V z;GhH|Ln9zn!n(9ub~eCSU4Gq(hCr&@4_mx)+M08gVZ5VGaC6y$@h59=On)sY8z#P$ z$uKs)sqkUj6u5IMRGoPqCdU&1*ul%k{pp3scNekaxAR+0zJ~wPAexnCZKEUKk`dUP zZws>6jZM((U4=rb?2&tQHIPu$jX<2;BV$U~g2TA4X;N{$mJ^XhgM04Kz%$k<^w4vgS$2M4nIA zguFz2W{S9_ar*(ALQ}^PJ-KqujPFP0wUGw&O?ga zxwSIkw{mwO%LAd8| z3(OTT$#O|`V?H!Y2(K=z4=K{Y(f`T<6>%an^%N6dbGNqo5>bX|bQWNA{P!*bqey)| zH?6;Nv5g?@eFdkp0rIQ3*eK3*1KlqdYg)fP13NxkRVM3{TuU)BX6yCM7{1Mkm)+fx zu$Dqqoq{(>*uwJBEW}FR6%A3uw(-@jwcB)By@iNEm5L6LZ}`76K#b7Cc1IM=d}U*j zcgyW4aX5%T#~_wwUl!y)6cufieizYaqiyE#E0OCk{8VkW#V+rGc`8f;{#mEq6oGcA zGRk2Es(sm;m#>{^Z)eqRdVKELA*tacKe^BJW0P1X`QZB_oDw5-zb7Uo+ z;q$?cV;0Ft0CxKn2)rE!Hb+h1Xr93=BO%ork zvZ3&*pumw9@EiNGnYjOcD2guxBU%gp7Z;=5xc=R$zK|)wy|#3#0qE?61o%-wEryF2MxwxP0A1Qh`bC8% zg7-qo>s^kNK7hMS?|0$DCH8PeU!E%)JpUVB_Z29Xp7LHadJ2+M4p=2@pbS+boHK+xq5#YZD@EY&nsbzC|tfN5wi^^O`*zaK;gTDR>1! z3j;X2bc#U^WjJO)JhpCl0Ms2n4acL*mZ|{8(kZlp0|OpNCy<+Zuo zVQve&3>grfEpCQqMRtJQ>+LhbO9&)4Zz~dTO%1NilKH)S@-_%X{$n@@V#8@pqjpD# zzyax|rg?yx1{Fo*K7na*BL}#Vm~_obt-!ldTa<|1 zS)P6Zf28iOjt+r9%DWf%$F-}ur0ZQcM=`YB;vxlaC@T=`OVHcVWtwYJz=6rTA9(k| z7AzRsc$@*#(G?klV|wgi)E87K@n`A|c-KlcwQ=0KtFPT}eW#yB^fVl$WZTyV#&J#ns9+q&Sz;UEa6x|` z>1l|2k^8-zEukQKlgk1PjDIv6dL&amDoX11AIVx1dV;oQ!R6pTDJj-p|jYI$w42Wlil&=4R2 z#pN}v+QU);^9foH7q0hCo2g%Wpd>y3dt|@y{<(Uq%df$fv0=&|WR6=@r+3Up&6kaW zm7yvUE9pE00KXXPcN8LH71=LLtDrrmhfXF~*eFOa2imiUgkK7ax7^soHl>Q}Vk3{N zR5!Xw{KH9bJZS+mPQ&Fi_7{N)nYhSDI1Yz6oFg3RIPun~l{aC7li@6r@`^RI2Ncpg zedc767~ms@$UHf6xVMuIM|UdqbGP994OEh}0H_=n@NjT?`#Av$*a1G8I>QRbYJX0$ z70u6uf=u{Ga$2H_`w!n$B{YC$;(ZS;4K#o#PnTM+MKIK-kYNTdNrOBB;&%r zj|$4G;E`}<uqPQ$B&( z2mYCws8RX2thaza>f@H_MwK38W=ES(eePDk3hq3KG%O(VEjnS@JO<6kP5v1`%-WpB zkkrZAem^8HIrX!@%5<+rgido>!bo7VL6&)?SEW&(X{ecC{wZMPEaI1Mq|kxJl{Dlj zG6E=*b=u3>?dT$frM?Z+c)EbeXZGmM6mC@7hs2o5v*VTUcOk?zVCeRA)wD{fdImV_ zu6Y>-?6i$?qJsZNxJoUR9w?R$5Ht=vy?Ae_UW99KGhBnMIy;VXFaCmB*;;pB#;|H| z2~_;L;7i9mn{sLH-Xo<{16#N9ZR4f*CsR=5J#CL{%o;B~xlD1nV?@PkQ7SU7VsP9O z{W+2~g#R=NHDIc!`Rsx>=R2P?I!cj6QKYTeDs5E(#{rq#6kyD!S@YfloO%q9v3H!s z+=4bq^PI2EG?OC!Nh0u8ZIB>i%97;M)_lk*s9FQW0fQ)foknfwvPpv)F|%2c@uWV1 z_fxphznX7|?uu%h0;Qy#w;ywJ4C{SDyw!V6N`(CNK=rB4t>Bh97vFUnW!K;NVdM@! zI5h^31w}~zte<3WwJ0tVu_z=z?3`Ak1c&z$-K-tEM%pPN;np31fRiYya+Y3T*9xINB`@c%*>VFO3SLfhWL-s`p4>?e05)#Lq=h4i&X~ zO|!KxB%EKRcCYiJN?XnQ)1Z4X%H!5LYz<9WhT*K#&7oRpffuo;W|-)#95;MDd>v?a zWgY4Ldd`#gj!cslaR@Y(aTXAp$@TY_PR4#M>A4f@&l|#CBV5etV=-JK&3i{De(4jf z4TK)4Zk1+qP%aWpgSH}AqNLCD3U8&yb83mbo9^j?*ar|tcDELLkj$QG|G2g<34#3W ztK3fpGcU}8o}dN3pZ(Z_+V#0$)#fb5T_@RkM=y3e02e6}lOXMI*+z2mJxesiabkMD zN^mj5&L8$9>#dVu@v|KzIJgA41uU`>cx56g8h|cWoNySKJnRpLP^#Zcb0Wp|)Kf4p zemnpg3x0)ey#mG0MG=h$imW@@x?Ldvi%B=m&#%-Wj05L?+4#t`+9r%N{e-s7G{unH zvCU0OL-Pq>MgD}sj5GGQ4)tPu+~K$2hb>;B`tChS1X%^T4}6E+;^YJ@Sd%EU2jX{b z<}b?KntHkGvLZ;lf(%VyIatLjI41y0GG*@a^c2`)no$NQo3cbLE#USohef! z#(D~0o=9{9MEb>o>H;?Ol_M^-B|)?tr}9~ZDg;iAq>aQ6KG1(z1_*Ed)a=2hI!{HO|rcBL2T@uzz zkRVuj`G>G0up^J3Wc~d3&8DuHahJOTu^~}zJYT&P<;PIrW{;fO>=IOy z?Bkkf%GE0gZ}6@cK=dVLgFccTE6UoAOlaxT9rQb`UIo1~hA~-3Fy)r^feAPT6H$HP zp>VCMR>yR|U)|4Wb?t7F@!wYW5ujiSF1r$Ko)-&jaM9KiK!#~H4>F$Lcj`MwO|X0# z(gooLU5ER!)Q3X5?m321!_**r!W&>z*S_gZk|r_ZKFn;?v}M?+7hk@h%N_-;YYPc_ zWfps^_gO^z7f{$9Q+s=9A6tX5OH&r*5i^o6z}HLZ%&Km!;p-Chj%G^7&7JX95XO80 zb9Z$lAHB0WZR$OeoFRjEB>AP3Sa2VyOt$6Q{t@ouESJ6V0k_Uco*YJ0myXTncBRP5 zv^?%7a4RROB4}lqWk!Pw_NQDB=ZTQ8d@>#7pxL#_&k+&EPs}4R`^w00&1`S~XsZ{y zcoyb0%rjnHMJo?hqLG|&NEV1Kzbz8jN^y@Dmmj`|K@Rns>um*xyG-S5QHYS~L$>W& zF&mlkG+6=;8UsQD*t1)3Qzn zY*hdSX()aGW{w!08zfDo27cZRTT>^kw!RC?v-QpDfo40}!15Vr&{4upqf2>B=5s5h z9+($uq~wLO*hs%N^+sx*YGHLaOlX_0yYf-%G;i*oI^mfVC_EEzy3#`^q9Zp^dK!v} zn6<1|sdD&;@INRzqetMAT}9B~j0#mUt=FmAmroa=PzjoQe_6NV?8GOm-y7(^ne3e) z(WmLhNqR<=Mp{=&9zr5W<~uZi0&d2ziQ{@^*987Jj{~mLy4)N$oznTWGV|4@><71x zo)*;(u5Q6fT9*Qif_-lI=y=aIq4_{yAXul}&Ia|A@rHzt8@msb6{aRdi(|Gz8%JVCBXNpGSI`V`% zdlTIN#lq_}sKZyFM+xevE8Yvw+=1p>mg$K|t=VG&*1=CZn&UVyca95jKu)s3F)`vE zfg;ZjgD+4`Px(x14Lksq916=$VAGbO9^gjiL8>%qN!ry#swsPOF3iTz;B9%}c!7#r z$7PNeRet)fE9Phhth#(2)rgK-axSkeY#e<(oL78+;XGREPV|{R3Ig@k9+03dm$(aQ zpXs5mZGy+!S8AtWLfqJB?6072e6O=6I2Oxi(%(Jm#pN13`d$&QIvqgb?Lh&E9a9LOWJ>C5Hv~~B;>=l_lA+ zoM}nTm*si!+5v`lmSME3&jH7}WB4E%;8K}?C^O8-pN~|tG*Il{_RytSsk}kD5@1LC zxik_|mL}SP#X$qs3c@~1T{)~RqYWj5pyAcRj6l4TQu|;`#RN54D)?;-$E8zpJTpl2 z<`F$+m8!vG$2OChcm+&>F+6QRfbr>7u;2PEq|~SUgHgIV(LWN_hrlkDr3r4Yj7-G8_Tal?}$e7 z3}7ZReTwxE=3de0-}VabADvkoGS9VU#sNEb-JMZVfmqDh3Jv;dd03gFDxnDQ&4Mgz z@`t8da#Efb#9Z_fZBH2>D{}Z=m35-}r9h~=wT$o@2Ego%+4#pUVo5hZIF}YGtQsBN zU#-eY(IhL#=hmC;o~i=m@A|@oDhy6Jc^~R-|B?DIa&O*6&fLAnPyaEYuzf8*L<9X(P#hIkOeUkpIb!pOpYtuU}W{kS{ z!nyp2$Y?X>u2~Qv^nYn~ba)rSwW`*1NcTi2U{|3o!r?++QL%WD@$@uQ^gm_cNLkwg70BwDH=}+jyUbu^yl^4G|*n!q#g)OYc{w7*%@3JW}Nj{?Ufm z5B-=qjrr8yA#1Hy!OKcv?j#?Y{XFh4v(gPqX)nOP3Chyw8D9k1nVjKJR( zl%y~w5@$C9mz8nex{EC;q57n<1;T_UnZ1UvO}l>@W&g1WZL^bj-coptXo%1;BFf4S zqedDse}1DDyPOqdi6P@#daOvjfi0-Z$WUl11Vnv5xwkz9K&|S*9a$R}Il)@N@a@%k z)t`Y?_i|I8KZ2FZg`31x;cCOFp|i+<*kN!*%? zXl4uU!xw(^14Sf*qrEdZQ`9>O+=p@bCQmdDXedtrHAg@Z5)XqKtD-)^Go})lGqIWy zFm_@Cw8?W6w2-p$05BY_Ae_W^G|cqCaX~;rFt`Cbs5OIdxJQIqjn97tfc(SovC+%7MV+r$D~3N(mGk z%6T6EoQm}ELg)0z?)k5~$$;CWKlLG@iSq_|}*hqPzxOD061om=mhpMJq`xwj> zfZDUE4k5@u00N5TM*G_~O)t0)FF1tPDQ4WQ)oxBO+G+b6UcrB9$?^vygf=uup!>f~ZvWa0>bspOa8DPmPUXL0 z7VIMfJHuFQ?>9%XKYZD5kEx{$$6Wf~FkSvncam>E#Z|MtTC{%ZcSCI5Vq{PE2C=al^8 zdi?oy`{$JW>wDy%i~G+z=1&LGKNt7!VvPTC4*gS0{+ESWeDBf)CxUymVpj}bePV-w Oe+D|n+Sz*@&ix;wWqASs diff --git a/docs/src/examples/mps_expect.png b/docs/src/examples/mps_expect.png deleted file mode 100644 index f36ecdb85584be3015b451ccb0d9a8d992ae8ae2..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 131704 zcmeFZXIN9+)-DVxf+(OOAVolWM?pk-2LS=;QUe0gdxy{j3(~uEl_EXTdq72sC`bn> zK?uDEDWT=ez@zT{p8f9UJ>NM$z8{k>Xuh z4ICW&BOIJF(FA9~H(NAwOE@@$1J*J!_Z4Mi=^gy=Jum7brIdSAj-GFd>+@Yn(`sbEqcNALW@_m2r~FH@VV zs}Fs8OqAf)tTsH3Ilu%Q3C(5fH+s8T&Q7?_rF-MtZy07!KU+i~nR~;4=F+4}RG8YU z>8ChnCvnd`IPH2{f#KoPCr@x>bLW~m_eXJ3UOGNd+1=aO!Dz?Vik`#AVY~Fhyd)$p z)Cr%@fBJdL2+pf*MxWrEkvHey&)>bcs(A4SK53fpCq~jV(GIJh*(zEgBp()WCOhn^ z?vUY>U4Qd-&Pn5vggGR{KEyESqA8Nn%W2n+=YztfsxM!@q?k~C$bH^OfI~=YDvyFd zbC13Tl#NNGrgO~Y`>7C4=+a-Zc;|}g&O#$%ldq>onucHCgV0>Pd6RL@lU0oKLDvT) z`gI@Ir0RDyJ_yxSv9C0tY9DU7j=A@`_rm6quZ?GaBd^(;aeevi~Ip8%nLE^`EQH6G(MoZ-kr&MVLc;si5KCU zi{#xWdxx|wN2>>+8p|zFbLW!z#hIN^V>P0XCdyQ++D5if16t+yODxrM7H8VGV$<%T zzuO^Qx{8S6GEJ#O*z8iI=m}1{MdK_7y54piX}N|cuJ^{Uls+L-#gak0HLE;?##Xs5 zJtCbOhg3ksooGV--XKG^hmCyFlm3g-Vj8TU`0J1wIQr*t?l%c$@sFLx5ugi9RE^Y1 zQ*4}4V>I&!j8^!HMqQ!_>U#oo}bzYAWNA2-&3I0DKOY-Ribn@sUq2NdxMOnRF+GiJkX7{A?DLdT zABAAj6(BEREl$QBvV*-AF)F-1J7 zI$bjT)QN6`sexyLcr8k;`TWMxy2b9<1NH;=1I>f0QZXj7PRt>fYeEb^kbm%(7WqMC z`uv8x7DLL#@lf@aTT}WEg-S!cGIj%Z6J;3|;tcQpV8XQ~%1?89?iO=Pa!*!=B{j~k zJ=C1to4+=3ObV{-ul)LcIJ*C>^0Y}7n#n##MZ(xJny#ja@`rxD!CoMZX< zj)j?pghh!9+@%twHzP&$;*sR$YiFw^>wW7vYZ;CQWTa$?N-0V)pQRFL6E106XyXrD z9k3`!C}7pLuwfH06G+s5`klP;qfPaIcz18cr?gM0mCibSMN8%4dEGiY8guy+1%ibL z4NQ^L(0bq9A+|xQ0k6E!!CONYh71d86*e1YJ4ZOm#~-;qqUaJA-OnH|vM3>YU1?aT zUua;fWuPD;X^`EhxnH!cd`;jQZ~CotjdV@nk9D_i|L=iIb~<1(ap-)3e- z&081$b*9`0?ht{3zWa`fDK?bpZ>hE(4wN6-2;V+{6$Te1A&z(?$5V`mvokNT`$ZrePPe& z6PQbl$WnRw)jxH=>T{J;)so$)Jz_X{)Ond>#o@c=^5Kf^X!S^w$2+GAS9J zb2aZv`!)*W%HxLfJFJICz6o`BC%H<|yW>{$X3)c(&0$O0(_8V@t@q4m zP2i}i4RT+)&7;Zb#q!>_A1P+2Ck^IDr{6A9tmyvOX@?*r2AX;Tn3`{zcuOkKv)s7z z?#{c+oUc>w7WEjVx}-RRyYS+I8G|_=(>;6n?77VMx%ZooXYEsWOyE7x9|wc?FMcK* zC*QmAg~?8FnD@u<`n&bd>#UcmE^#u{F<8fhF;X)i6ucGeL+j}5skkFn!(X>Dwf46` zpKnEQhbDab8frl4$!yGTd=ZftsQg?pPw}eJWVOluwF1Cb-c(I*SUoCjNF>H@!6uv z?rP(S!e4ffNgEJC=ozxdL)JmozJ21^&vJ)yqbyq<-LjnGkK>CvpYUYigR7pfUT4{! zEcu&LF?OL#u`9~*E$^qO&N1n+S$FD4rDQqe=l>L0kc?DNwdiWgxv3n*rK!(iIGApm zQGavCIwN30t+S_7s8d9DzW71?Llb7#8MyYh0;G0&A(74;j8>oYU9JPSv;E%p!X>-G zgd*Q+aSuOVxW`e-!TYsSa9ghXY!@khIy#NM+wuLm^LWko_IJmz+6{y2)$1-XD(ZSY zNh)b7NdfN;UmrZ%_{yH@TXn~Nq@1i?yJpLy)Y91~R726YW3tM7R9|T!=Sj|574L+9 z{+HKwYs6E;3&eWfcfChdKc*Xa`sYvTjureYv75-Lw4E*?sjsbvt!;eotI+jz?3w76 zAoOo2=QV*SCpc^C2~-Kc-Rn4dd1xeY)4u@LQ^a0?*5@or7?A5vFZ3*dIexPl?j;aZ zqJBogL_04w@B6N}qre(cuJ77&-RZ8+Ji1#^0wGQVt=S})zT2msl~o_v6G>Jj$z99M zSbU(PX8<)oY`mIIUvXGz^DjNh*$$Zq{Tx@oTAjv_@mWH^UwG}O)w8_mndE-{;Pfz& zwaf?+-%Z2a+KkQjbG*juO-@x|Bkp@|_CJv(8S-9h`cgCI^Jzgzx!P=}v@lXPYU|Ec zg`q2aaB!)2bV9Itt>3+S$SLLLepG?Kz<#CPER%@Pq{z3!JE)CAsjkT|1XGhA<>vrtZ;frO9qU8> zdP~-)htGU7dA-DcO80#(>*#k+;x%D~ zkMeYl-B5|`*XvoWZFOqlYDSr65)#)8_V9Lc4hlzxwjJLiXu~Zdqr!$CZkuxkN}o-O zJ=L75pUoPHA2@g($N9aMni%`H{(xqtAWVy<-%j=&-23AslKn0?(j}+fm7lUUyM)m~ zLg>R^%#Y*PIr7qBE-Suh9FONMW9>~5rJ)P3CB!MY8VLI|J#98HJq;<0F<(3VEk^uWMAnC-x^A{9^Ap`8*x_6bB!i zT?W5iuTK5C8vp3k>0ifZuvg+pY04-nf=^8|7YhpqS1U)i55DLv@WnZ&yLzrTIONRO zUtC2Ergbp>u(j3$w+AZsgv}i7c}&e6A6oEu**js!!4dTm28Z?*Zl?5J_I3`g!d_ww z$5#l0W9-+w4D`pBxY>#^JW#n$FXQN9K`+2_i{}P|I3Ybfy{L=%BVi3$`M+)lXJQOi zZf;J(yu6;Co;;rXJdQ4wyf=k}gm`c8@$&I;gDbdQy&c?4y|^7*8BYfJHIA%>tC@?n zlbf}p13h+J(}#}kZek1!SVF&lPM*`k%lZ!`2iL!*1t!Rg{SWU=o*TTs#|F2GV!su> zZ|!Aa_dwR#9`Fp%5a+vb>$d3e4gcG}{*XMmRom6VMaIz{-03F%hwFdc{Ez?quQwjk z)cZs8=FQu8{*mTC{Ohk2F#7mgz-e(pQQqIHCQf)^nsN>-B#pJKnilv3 zVut-Wbr<|$JNX31xR>uXE6q~l;7H;q%1UW@;VzHkr(Kcz-nP+(mzZ`%@v_#N=chR6 zPq}csc=;`ENA$w;Kp#(&=5WFnG-t0!T@sUPp}}E%^Z4AQ`)B03aGnQVzQV-H%)Rdr zQN0|pnlX{Qno*De2@s#?a?{q8I6PeY;-c9%j*q0^`tTU%6ajtU-~Ev%q-kzxim6q< z^*_1#5-!UTT22y#kQe{vPqPWQl!!R0_tM{7h#kvhDiiVdLxL+Wkef^)CfC`n{ZFn2 zk9zHYKwR=akNO|3{zq2-qgMYhR{uY;tD={Os~t1#zE>z#IU;D$j@=nz8n30FvL!+1box}VKJ5s>K|hiftMQ93$4YXJ}rILy8$m6K{+B?HQxXng@#w zEC!48_{>@_pjYeH%sXP4%j`z@MjCyqS6&L(4#vwzP$^EgM&#I!R`upY zh6d3&BEyOT-qUG*+aoSNo=|)aWzAp%SdORaO&HxNdNJ+^T|zXBt{^=0c%G7rr|uoRb!bcc0}1r#qcbykHFNL9#4sv+ z{<+X6XH?P)n~9bojuH3UYuDB_Vjd{a-n11Ar)HLft6#kiy$ip7L0~buU67D5grq|- zGikoD{6r#JfZWEc=^7if>UO#dnwusH=!TIdfwAA-DdM-;|hQh>qm_MC&@ zq5{KOm)?3$$J&`h16e=Hz)!xosI3XDx0gC}-`;9~J%Kg~4p_^g2H1(#Y_CJsRNSdz zpzcZ^_x0O{d^P1|k5NdoEx;L#qBGOC=ZIgzFvY6`2z)h9n z%%re%2(V&fs+wdbb4(Tchb^%=@|uWwb097X8K?H5E+mIXIZHmm{9t#Z7w+e4q{LT7 zj1u6xJtP+gv*lnEiG{_K*7Br4jWj7oVxSS>U)_6>%M|?ytdgbk zE{*VSFeVw({21>%^6`0|=ucr36vh4X>Y`>p_&Q&W+Es7#27XKC4R%*UpdZuu(6KNB+=B=+A~P>}Sq3Y}7w#BqcP#AP(L-K$1r3)N(09s?w(zBjdH&>k8+OebF_ zyhG=<%1ck9pNhDnm?4~iKgC)<4ceA;o9;Cf zJ%={-_d9Z-26#z%IT;|l0+i>Ev2D;7@HCl}p4%_}*eTN>+|y^<^VG8pb=BX+ZPbG3@~2edJE^Rn#p}LH)p;IV++=y3M@-i7Yn`CWPD1pNK^0RySKZVTa%~ zu`0P^U6Z5(8ot%>Sm_V_Qy0Bv5_Gc_qG;dnSR~tZ07>ntUFH$08`tZo^Z5>UpVm+Q znMsX&D+j5esy1tjMD-}pDFUNzv_;~|iXw8YV*qmT1jq%2(#y@bA()U}js`nA_syfO z+elkgoqR1Ncz#|f(TQC65Z>5;S@LK-`Mna8!Ogun$~oGFI%U8iWM>T(4QUVDn2G~500l}j5cEO^KBX7h11L}KN>W;Uyp8lc! zx_NcHKF4o=o6m8gAv=q9qA2}m&Fm$1qbliMxWDJF&nz{nHLm(I+|N6*UU)L_v^iD| zYh5kd{7vnd5bs#_=bU4B(d2~dM}};sU24Wnv*rm(Impx(DMF*sznRf``(*v+xj}?D zfxdt3kESY|3dSc%d@b{gAeIqY3%a1dw|t1KNJj{R`_Lk%Op+EsF_|MRU@^oZ>M_(9bM9XHf*r7Cm4J$g|JXz zM10Hc?R66=diqUA2!3Aw@iA`uye9sCwe0*G584HLnC{uO3*NJ4p*@dgNeQtgLO*3! ziNrfmK?XoNjLR$JH|H;E1i5yL=}tg5Rob#d@@7q4X3|M>?g{yICt`lCup$r7G15e3 zY2|C6z6v7l#<8f->_5o|Q~p2mHIlFH0*t_Lp7#3h=so1V34kZPuVwJt7&+C;wBBc9 znDX^t`DRV`1ZU^wIyV_hqM0%2KOQLUtd3huwS-3cedxUlH&yK2yVE}OotewFLy${R zpAH|bUms$@jeg3kxHL@m2J687>1%w=!wcEa7=~U614cJR*5chaxR;z31{6zFATKIB^ z7X~qIYVda_okZvcuzc9(U+Zp=SlV8d75mGeQ2>M1NGA3tHF1G-9?{ekPRZX|x+q@V z#I{9Ew=}F!Ex#rAv{dGZr7-busH%d`;&c1=O4}Tb9HlBikN*FL-sRzXPgsA6kpV!S zmIm&@vnw;bsTpPVL$#(I0haBqB!;UGrue%>8O40J-ImR47|}1HkEi7Wl=ce(GLh*W zFt|C+?yY^W{~`dgfr*N3Qk@Svmejy5fFhRCh%5idPd3*L_eCwC!=HWEs@L2*`&_)r zJtFIuQ^}NFzgozxN>u zoEDZ#-o{`U(_LTR#v-nu$ulo^NxD+R{?IzZ_G;Bx!ejZtsF+TnUyY&PT7A_D%dIc_ zUXL3D{G;vjnUxdciKuUfpjJiDx#j$4f&$@ZNZn!VpeP{sNe- ztYd^16o{oqkTU)!vvr2^Tec!8`o}XmJ5L`JdZ-;zci34q9`Y1X?U?vOw=&7k+tV1w6kSj_%R((3xVM_;M(wF?u?xOEXb8Cr$zFU{yksWPdZYq|7pe5~qEb%g7! zHtraYdXQ4LSF;HKD|ch#5)87`?-fwxanqE0ty_+LkegMDgJR-%f>yC8OC)F z+OqHZ(569xHRWZ+Q$V=l!|suxo15r=XMUyGZrBaqs6E^swU1iP^RVo|9J+?s*DDeg zCiyW&A*?aX&uj(?I_lh)?g?jSSH?PCx`Zazd%633$nl@Zcm?oaIRYLhf8!|4>R~Sx zc${9s>3!Fpo(%%51*RXxd%4@ajlhYdJ2uGR55a3mm~kUA*)pS4tc*v@A#vNtceEefDA z*s(;y8;-7wR5&EqkGRftB}_+(bXAR4oJB>bghS25?)kPZ3EsXrk$HT$?`rq3L88s) z<#zba+;9t^xpiZO5)ci2vT`W2J@0kw=cB)30|D1P8j;_Nt`Wo-c}JR@Ti@K*?Qk`N zOJMONp%>Qz9LHdk!=Q0R8)yGDsN+H2j%cE3yycpp2}Otaq(SDNE~wS$pf<>#Xd zyGsgw3<)Gflqp#O-aiLWhhzD#g)-boLZE$Q{l31a0g+6%LPmre-qu70bgPSXe6d~7 zNUE`#71Fq=SY2n==?R=uZ=>&y+sH8O$)c_RjUM8;On5AB$FTdZ?kgZcW!{^QMorI( z-_`|Di2104{zqH2F$PFCbiAU>uP_Pfmo@Ov1U0~L7V&}}g0QN*`Vw0C{Czb;`;eeM z_gNMQ_rvw#=YSwK9xK%4MTk;fJti1*?;Ox5cJJ-5-+p~ERWgA?+q^T8yJ{M-m+7sM zYbZDB(MR=iH|lr^KWv9XKPFl&Z$vv5DGX=CI1XQq5Tiv!Y!Qs|qlgUNl(xm$qs6!S zHC;w5l0Gr4UpOIj5gSlEz9@9&cWfH;AV}DKv3+r%&`eietlqT$LcW_xM4rGS38V(5 z6b+W_Uk@(>FQqm0m4}-u-`e&n`#GwYo3LWgh+LvBCO;hqBfq(;a|XR9U-+pRStxe_P9_D1 z_t@OLfws96>?mh$0AGXem!$dcEx^6`Zkz~X@@1eFny6)pll23WYZeS5RlOr6zQ4>r zy1`qw>TKPM;yqT3x*u92yQ74CpSO#RQCkr@VL=_bVOOD?kN?}{E8|88xz65Ay=@QO zG&ecn93L~7V=i)p_m?&V20ESso-uwd!?(-hppTb^jWulo82#+04GoQa72Q2$j*tvR zi+z1g^xo&xjrSdb)0cMjasJyA$*P%p|3e?4`B%YE0Sl+Us9pzDS^7s<|4@bficm*a zvn-yu&cJlXEJqzQKEfn;lEv2-*yEz{D$x zXZMz1N%*v0%~j(qXHncr5zu3gvNR+t0Gyi9y$7B9uIfs_sbL+!j^#f&zb6}qwe#~Q z@H%=0JM(=3`W_!(y{;DGhFp(Ti^<}?fDG3YSH;t>G zkR4;srRwzolL-)@V&T{hwc@7}I0himS|!``Kfp1@^{pX@a-mK!%~Jm8yPTibewNWV zHRDDey4f6fbcyhpe!*#r;k~!;_S*i-iMwzlfPG`SH&0>(@54ow<|f$Dy^tNt zV{E-L9_n*qQJr|f{CqimP7<3~oxIBSEk~Im`L=zE+06LffkEWT&j5g?t;f*&x4q3L zlgr%u^haa+ldi(iv((rS#(qB}CkXm6HharC8djiJ`3sP5M=#Y+8WBHMkh zHxd*|V%FeZ-KFv6UBb#NY8lJJtXvUD>O`jEs95<=xre z)}7w`)Z28Alm^Vvex!nSxp7}RGsaap$$dGgqg9BmfnGDCv9mPfGP6=UfThd|bbb|P zO*aH$|6Nw~pJHzpt`kAJ0yG3-{+{uVw~Qv_GOW3;Utx9KcYE1c^n*ONYW#Va-#w*x zwuNC{(ulCR050Chy(Hw^n(LTflm8(yMuFCOnY2pG=k+iVvcWVGvU7qkm)r6obP z`h=`dqMvfBN8+D2YYBig=LB*8%$oCzL(Pw~ls6yB_L|xEz*Eya(PFW2nhe++2Udzj zeFNx#4N98kDby}xkvLcn#wNH*Ay=t81jF;muaN>n9I|l<{q*L*8#4w3L)Z;*2E>hA zeYbl^1M^F*3P|c!Eg=@{WP|KLKq@gBaQcq_uesAJp8;VliS>A0C3I)pQ!#4yxtWOa zPJlRDj*Bwu%97`^ClG&nQo2T-h3uvRkmi_(24)&<6q`652TLsV(z$~6k5|b zWsu3iGt_N_E~0HH!z|@uV3zUv#IkVxR%!JU#%E*Qt3K2&{FmvaH;HYoM)-5uf3LW- zbzQeC$9URo=J8Qh_QQn5j=H_Uqmh0~WFy6zijQ}Iwj}20(BkKO4}9R^hi0U$v{L@i zU3mUz@;hd9O0>oz5QIe!7CS{aOLn|Z1TwWQ{hUE8>>Rr~=a2pSAQnVFp&)k|}Do1`TF1d{>=~+Iv>I68nz)v$P8IKVtKnU%I=K(3xH@^Dqpz{O|RJRX?$HUWcHvS%p+@{69IL4vObq!$%z=P@sbU`LcWn%B{}S#L2_ z$ie{m1+BaAh51tM69()7ln7HJ*ZR}1aq*Sp+C-xT0InrFFAeI_Xb?GXX#(`s^6xLu z4~^C?+kV9i}cE|DVUP#j>JeL^=dXStXX&((I&Pg)n;Dzr# zEZsIN(<-4EU!}3`Iikr&WLF}Q$Sbz{^IZmlB?*qR_hNY8s>N?L@q{@j?bY~fE%Jec zgY`(6MK6407}&maVNe@L0NzB0W3PC6s}RCstRo9IwpkP;M=vIYCXvO!l2XD?Qmm4X zKuFLXF5-KV0LHG;H%ZBK>wv?pq0aUgM$Lj;Ac(!M(C1vN-c#T#-mu;~5oCUuY90C5+0RE((@e1(h z?SN9l5ML}OE%A^`ZyjdaSbm@In4OgECYwOD;)uE&|F@Xpe#Mgyzj|E|0WC+)%;ab< z;NsKJClD90TXgCs_F8;aRt=3U^Br0%vz;RzDLc{~Dc_H(Xw{{zw(gUk@LA%{2P^g6 z%%T(Luaz1nSU*~pXWtcX@DCOwafw1wXM3#gGax3v29GhK&*p!HpSYuS)xgkzH7?3? zzgMc+Bq*8RQYng7%p4T#^l6`@RMf53Eg^yU#(-RvwO&>iQ2MTgYzQ?v*D-IC09oiN zyf7pWU!YzTJ%wIOQ{AhKaol?<93=@Cek*eVR|p;eC+oQKLI3xJgSf5?*EnYxde0>A zS@$v(N5VZ;zVm@RkI-~De?>uO#l!ZYik26;AhF!tSuugM)qnU2glS=O7W$4T0q)YN z0cH57O}=75bV}mYt&?JGJ>crd5q0y%374_Xm5Jb9>TQ)0_?`_2V4S55qB&h$U1z@A z!V&-)RD426_IS)aZ$}RPI#Rbi2^xHH@^FOC!noD5(oqg4Z!g zT|3d~J6OQ5EW`N+p(GM6kCZFW`Yzw}o(#e_pZS~+#1YS>r@>P*d5Tih>)kC-3j5|~ z+1b@DIHnlKi{4{y-YQ(0L0Fpbi1Zx8M zyxsR4x+PrL00x{JFAl-z5MNyB&a@BVYehU|f<$~+e|-hDg{z%)y+b$a29)gy)K7B2 z>Mm-4?E)RB_0ip@ynM3yQ9x042x3kD#C{)m*AI6PongFiHd2;*(ID66<6X<>Lvy0ttkMIX>zoN7`!We}X0a+mlxE6JGAJ12~jf7k*r@#4Z^9 zMg1U93F!dYQwEG5a8@zAr;3c~2RFPrw9b!o{dAwK)1vq5{>q>f@*%-o!1?v3rmP#m<+hrAVf zWTE}7j4Sf+j2kv01ZW%T2-YVtFxK$z#M1EZ^3#f@$fa|P!6$HSQXi=4ke1s?mHODx zH0uSDRHcE;F*Br()U6~Fn*KM(*(Y$I4=l1E_szwBu!cHdq>6~vxDV|^r1JLo*CF;< z8`(EdTOsPBQti56`1`mJMieaQkO!8%m^AENRqFjCIn@+hk+I2u;fa()Ag14E;QlzQhXoyW{-hJYn-Ku$(H&;XZ$csvrX+ z{XLH=uZ2J>f3fxKi4Hec0+lYwpMU-v90dlJUrS=Sp{M{MT|6i zF8y|#IJlO$;=?Z6Hyab6Dr(QEK~PLfvi@v=>8SV2t z|Hpk~p(z_}qAZ;Dq0OBuXq&4qo-xP3p1q_ZmV;A0l~z2qbFZ;wy8aCpr$1KDn6SvW zQQz1fZIOJ#^f45q5@M5*lD5|uJ8>*C{i+PTemrrR3T0JNRaO0N#uq4Y3hJ~FRmhVS z3*%AT)x<~dO&4_LAq$l+AN5PY|4V2Qe$tT@m=0{M2Vc?eD%nNJSCZIvvD|yf42%kq z@e6%#JK=u&P*)*jv`n^Irnt+1wyt8O%>Zk0I>!4-Iq`EZ`M9RP2N z+W3HIe1j50p@s4K>M@s2?l1eq1Vgl=VURMPaAmj;a#EKJxs+5yI~@z7 z724It1K~|!TMe>MfmrLfg&WwV| z&(tlI#Ep#^26%kPE5X~H>=j6exz;h%K5R�x!uAU%m^MZ=J2PLbR^~`d4_qLs< z=a}=eG4HwbjEFW#0{Y4H=5McMEY>IA=Mz1sGjYMJyD&UUpvc$N$=0`yA!IIY`>wf; z7Z_15H+ThFYl0?@$-%=Eb9!Zb=xy+JO%`@901Yl8Pl`vFv~k62?)`0RD*FIOhoH7l z)k%vjy#Rn~nyzM?03^{@0C~hva+6|SACk>AUmWJewk{v;?<9e2vl~WOmxNGtD(MlK zoIY8aGos@0-Huy4^-Ugc+aGf?Y~BvD8(*^7d6Ev|Y~H?HmAKXjYKoGnx7}E!=XZMg zj1_SKe{yrRW5{yy0*M-&lDK(pJPy{~Oi7yo<);bku4jd022q|=tAp-i=ftX;I zamTq_OXK&xBL|N=Fbhj^gY`{>lIgK&e6R&OcB&VBMFn zQDw0mEZX>~fdMj~2$Cor*@|%wSq`;{_P1B!+-xGmBiS``v!VHzd0xgVksUcl=b3jy z18Glvd`6N4x>!d1ZmJM=ElbqTW+tILo3uB43wUw9e5Xgf7mgXnt3K;gddqh3-lMKG zc8~>d?@8rZAYj_3HrqPN>^d60{Qwt5G$=CP7~}YD>iudIGdEFE9l+4HpD;tRswTSeu0utHp6dQucNcG ze5^K!6q6|8REz3h&5Q?a^P;(eq?nsDqEhr(r(g}J7a%Fk_@h%MQErCZuoVNVl+`Cs zTcAUhSwyvS7Rn|buG+w^iFm|=-rl=PiChZ>66qSGlfj++WA{d0KvfhuDup`)1q3y# z7$BOlO2w)um9Jmpi`$3dvo#9ipnr)bi0n1UfdBbRZlyV1jkaKb?pqAMGezxAM|G}_ zcO^AFnv%$Fil-qzMN7zKRHvn1X~PT*g0i?xF&_g3g`qTJsdJANv6(5hO_p|uF@2wm zS(KpY{M9(x6UUPe{GvsH%Og7l_#B6rJaXJ)Z-7Ve}8Okt&=$A|@==c*xksOgB=tTjAzmB(}3J!#Cj z?b^+yq72YI%j`n4*m=5+3+j~rFA>;Da`%VYHRRGS(02d0cqv(hZJOs+bWH>94 zo}gPypIGeYeRazOTCd3<7f}8UOIg;H0HtCop%=gzvZC;w?8-d+#SJx;*(ZMLmOR~= z5)#YLDC?#*Ei(#3t=n>1a2XB>Sq^QJoR$Wy zVWG)z+T`;`b2LbCQWf)agJwTt*T^H638Oe7ZuZ*m=LBE62) z#|N;VNx-H*8ry@@pM11&o)ojekQ}hOH?fe+D^N`Ma#t;k*Nob8X;1}Qd6+xQ)Yi=Y zDAff@*5hzpb7T~Q2+EH0b4N!Ewo@DIVX@n1vaLBlb2{D?Icsp4Xpqc2MN(g7(3Nx$x(`&p;l^Jm4U5kuWewZ>~ zu}|l}Yud?cet5LM)|;ge^^*5NUI2d{CtjWb$rYYBR<*>5M&C}b3n@qX3E`7MMhS%P zT4OtzG2sqrUtR~PcuhYOLFQ+ANT!JZWg=AI+=sV(?L({&RGux3^tRaQSW2GIH@l1XNe32tYe%aojE<`BCG(+@nhuysYE47;1uUAvHN}4{Ga^E z>7q%$ujhf-EGh9i-Lp^{eIC=wLC$_=44n~A8H!|-Z=eZxu*8THH1(COm<_x${dhJX z(gX*(Xcn*u-~>0o))7J?0Rht(T2Q=G{P>h;1~kjQC{hpLZ`z#-d+88xu%VF6;;x)D zFXtzfWmNAGgXu?@6&5zi?sWH?iPwCIhu+v{Mwq1WS5lz9{+kog{o}NKyAD=v1-wlc zAAkDwE`Ugp_)`eKo%CCd6QZ-j#2`#;*K@adh$(HO4YLmOf3y1f2*7gr)z>Af#S<`y zgg1y--_*B=^5~WrB3+ijLq+JV&4#92yq~n=H=At9pnJ)wJYrE3xSL~VL|S0O$2cyR zo*sa8`B*3q%#PwS&A5w#V@@*WC>_7+lM)zoR9=Un0Gh5T4`Y_q#8Ic7K`7xwX zX`|g)H?|xV8X&0DUXCk~q%CUVH5a6XT=(e_UqjD5LXAd^~c_butSS4IV068)XQF?&HLc_Am>9qz>!=QbcMY|8;Fl|!EagwopW z-^{#s;H#=%gqys}OP-{jp1do_QPPeOcTb>d%W?FJd0Lv3=D7?rn9ie<8(c0cE>KS5 zdBgJ>l4b$o&KTZWImayc1*cX@OIcVMwCHF%zwmS()@Q^tRVH4;RBIgiPFD1(#yK_} z-ga>h{7B4Mv-i05Je}cx*{U@yWsO|IebXuv{a<%?#UJ+*UsM9(Cqv1JpHO^ChZOUv zw7sJvG))DPwvSubi7GI_XENw#KIoRfK+!+@)Z*OT2TE9~B+y7wp~bu`!>y>*3b%#c z_MH6zZoC7-39o6Qg~8&~8J?HB)#lEPsC3VXOYRnrcP$WbzjM_^&(}y>Hto`Obs^+? zlffj9C%?{%_m512fWF;|XnCwLu&%8!1zKf9+(lLkkFhVmLs(fQL=+8MA&<^TH4d_DTjwXcdv-mW0nRnMelH`CrgQ`ZA zR@1@C*p6R<|3Q~&5W`8hf!$`qMVKl>)FfFJiUGD#fdw(ZyvYjOX+c2QOj##S9Icp7 zJBh@>MIi%--z&N%kW~_PVV~LE>gmSp4K;lR3H^n-Y8*O6Y@l~F zmW)%U9cGvCkatzYE2XeetSC)FD*$V#E8|288p2UbszC_ zw@90}06C9+-XC<06N{xOySf4O0~E{sWG>6(?laU|JL?N?GZU408~aOAb(YYR8ozM! zfQ=+-fP=U@>^kIMLhV;qvA8;j3$&9+oR^-xSgz1)Jq1z81NO9SpEnA&s*C{pw)E;f zin#$e4MvvKuYN}yjk$K~O|b5QO;hopmD4gnq7Z+&Eh-84P1ala<@m7LS*nHp{J3~t zh*RcFjRmOI$Lya*0JD}Dw}#CE^kgZt*;`<=#%(|`pZ4zjH?ZrhUGMcX8kf7$qX+0s zM$*fRs81VVbxfrx@9u20kWQ!4WES$X&V5Oq9%j{>22FJ|LSg)H$U<)pqpptCc%3^m zu5b<%N*}r`x3n}dl0*=kVfeM|YIi1l?MJ7n zi;jr33UyS-d5yV1XRHZyCuDB1({*w2>IQ`N`D&hngcE?_a&RrLZm}I%LF>bp00l*| zu1vKH+DjMX(?g1<73=SyR8sFR;kN58dG+!{Li1D$*L4hBWRjx)}z|9;xDF?B&)Mi zTvc~DXfO6j-FL}?uz`@MI+XMeu~f}#L!B!%{Y}`8AIUp-#(rDc*Z|@1y*3BCi!JHC zN1VR65ij4TqcG^=i(8*+2fDDk(W69BcOM7t)n}E^MlU;N>>1=Aef@FqG|Na%bK`e>`)pp>ilJtdu_zn8g4Zr6cSq(nBShx0Qt3UE%B8O8ukAz8 zuQa$oARoeS&W{pcV*7ktt0#y6ynP)Sds<48A!!wCu=GClr0xrR&qT~0unf|&c)@c( zEj?la>@nF@L4r6(?T)6AQNhk)Axw3ix=C6k!*hA~rVIJ2l%Kp^hffC{sEKZbV4G$( zKFY)IeS52x8M3v(S=6-+(<^_}^7Sia_@eQrMO52H+=^qv##6VYL2iFfxEQ3)&6&J{OsnW7(a70@+E~;!^)S z2m4oDeBs8}c%gPc@~80IHXF#I^7)=l?&>iHDWlv6bXxUldHc-4YsqS4a}lnh{ZYn7d=eywFUt$OvYI)v-K?vXl#N=qn# zU9;7izxey^|CLF!nQD>^y&MyI2=eC21n#-@DHom__ft+UesM1RWHW^URxuVN>h#=q zpy_pXt_K6>RfT5a+e19d(!uL5RHvG5t)u5M6}m+4RoR>Fzr1Od9e_D{qQ)qJ_29|3 ztTny}OJppMR`a6pBC98SI8Ap!8Uh=v8IGgqO{Z(f?j5uRWv|DFVy=7}K7$*D*FUpj z`8CBt!lt{9;VZ{Tb?PAB0e(z`uU%mVNcYo#yl~aZ2GJto%HsB&de5~;#}}e>;?SBr z&V?(M>bfiHx@)1v{r2V)zH4G&Pu$?qz~Z3jViRzlcY#ISfern@1vQB1#{mM-uz)Jz z@OyCK>`4d_aw)Tzm@y7U%)YC88og&(_^};XC_~tPT-u0M!PX!&zth;x;}x+Sk%;e_ zpuuY#?4rhPrXcA(0bN3K4;-?1tmz~+Xpk*U!ZHEiO$;|H^FuEPy#7&PH_@P1pi`Xk z&0=Y=Sfljer;sfV&I#Z#+DBgWtT=4bjY|x)Efx@ZBK$l*%vy9NCV&1J~=)L;4 zB^=p(AMlfebAI@7BbOTURT*Pps@7hR^JpK(g1+a-LQSfPA!+zT>f$X1^kV$k%44u= z9t`wuB$}`7r2rPrMNkVSf|o;#Qh|i8NIG8TLMR4XVgP8&Eb^`C$ERiBeI_s6H`Mx9 zE)=aYiUu4(v5lEf&(S%ZeM~3hhyk0jS-EX|>$s4y(}Wp!VRK&!0zG8IW!wn~K%d@_ z*AtOqN@+!9z!nu7lo#kT+^(k`evs|LzVFeep8;V<0XI{?3f>2?SRnMtXtidVSRJxn zuJ7XFf&?w3u}z07jyixJ{RlO8ou<0W!Fy#BpdZfSb3FV0{?FBM1FcL@k5C3VaE7G; zGLL?Wvu^r8$Nv5`AnEc>$c5?w!AFd$n@Q~Pu6=rJK}f03T1KS*fIYbQn(>cV({z=T;-i+I?5pg8wXxCPjT z+v1=X`=%{#-8_T$5qi#S&>Jl~(^G2J*3L~m)dt?X<^{TMr*+?MrG;WDb{VMoH!|VZ z8R?@-0_#49)qM|JaZ%8WDPy)vjj(-LqmNYHpS}h*m`soCtG4-K-LMaMd1Zfb$b|pC zy%5+$R-AHeDBs}bn9ou%cp**rT?u!0+%0CoEj+xfCt_PUb);&dJF=nB7M=}!x+bR6H;pt&|q>Bj{qcfk>@vGM#QU6BXrn8-k-gs0eNTcrSC#& zU2;G=`W7nFr$~GmQE97-*Da?urbxVkGI%;x}z z8M9B2#2$Cx(%qTFOX1Et1@guUB-att#y}K#vF6pYY=$hgG^RD(Es!d3+#n!_j-ff(!LCtnAWq~1swWdcT~61jzR_^ zWx>K&x+MjQTM%bdc(=*22vF~b;dAH)@iM?BZis#Y5(_2Y8F%f)e25ZzCc@2wD?DnW z0QFuZo2-_2;NzBF4k`h|08M6g8QVm$?MsbQV}g zKxe7?)rNTCq_g3#Y}xD1LYn;N){(z&_Yb0DM{hPm9;kN4UuNU=olnT+xadj!NG_zs zk5I*radtR4UR)TrfQRP6%ePETG*vJ zp~W*ai(q-Zg{H;?*GN4*iL{z<=umYp#=UzBg{ZL86hX^n)gkx2PwZWC-KWgS-+nu? zA@$LcwdmZMYb}Sz1t0B|wGHq9P@o?&&jLHQ#%zl!wxO<~Tr_O8i0)>C#S?db&#`6s ze|_8Oa$NCdT=BU{C@S~%p7w987%Rtd>6)BfNK4Or)pZQPfWSxu#pYeeGL<(dyngAj zV;>ZCH_s;dKn!*uRh8Sw%U}l<<&Xe0S{bc?W>+KxF8`ADF~9}pGb`!=2W>b&e78#A zX?~6AjUuI!$8QWiy>}!lgc$Y)lD-`YUUQG-RkA#GsR?y}hjpZFdc;~{m#!E28P)jW zfn7=GT`=Tl0a+nHIplQPa~Sbw*n9j?w4nB+ckZ3iOG0bJlyRPD&$^&;D$Vcv{@ z17}zRYETxH`|DwVR87(@bYI;Or`()W?=JT}%HqrRO27xHbj!2Seu z=&J2K=dgE~qn@LQY7Cd%#YtRz7ek0OQtKXm`$({HzZtg7g^PukuT)lfyS9yG5FSaO zqdDnLvRcel1NW*R-Xw&N(Q{+q3~Z%?2$~S&ae=f~IvwjcYS`Ct%a|o&j zZm$~S&`mBb#mP*!jwpNOJOxPeS$#^NnY!%Q5zR>Jdcaap#2K(^Lr#am^gqqq4ILde zhy7eqMW3aJK5rAL(%DGr+?oGTMPi9~uljwRt!k!WURpms{2eQj`yeI$SjXD^chWui#POm!;##B?mX$O(?5exHX~dsVOLg`loIdo8ztn0I;I$FoFMwXg{0 z1hkXH9m9_7@a3>Jv+zPwH98U1~^HbM5}S=d2IB>^N}w_$00FEnr&`g z^DMGgeJS0pd3#A}Yz78!sOr98MG+7U@)LO*S_O7Bz*xbce;Y1EXy=DGS^Xx$13Xk0 zhFB06_pkRWJCW- zucFrplT*+-(|UQ+2vl{$n?NlkXQ#(UwFf8JK0GkB_}*u!e;&B~?oiTW_k)C(Ynxt35V&6) z;RwEi=1_b0`tGlCGK33sH0vD1KKR>VDH~TuG8C&VH9Qs4|t&fAb{- zO@uGxdwd?(F1eX`LyR(p6n5%z5R?qid+e1Mexb(;8fM{sv>8?xULTw*8V}R1oEIOu?a;XEvjt z$C`X+70wzs99{-6$x=-&i~>{_waVg5q>i&N)tABD++mocoN09pZ-2ZlGUlWJ)!Znx zIQrw_?1&wTQyfuI>nuQBCLiDdY~FHbXdVB# zd;33@TkOQ23_X;w6aR0fT%d~1V!n6_teU{{lMmJwZvJG~OY;}Xd78vULQ^TqUh9;+4Bm~=xdv)d-J2V`$F>c($RK&fYfxva z4HvIUU>C9IW$Wf3vYI>>V?K!$<9X_N9Q!oj$oHGoQg?SdYg9`)KdR)}K&rex`J;6K z55@ZVxG3Ho8)9$TgR`~8xU^uFUO5kGA$7(+eOU#w+^Y|u5P7z#VFBDb#@ibUihs2* zZ}aS$PmeQ^ph;Mn^1BC;NbV}wcp>N6*Pag!h-%--ZkwacLc7aZ^_sR)| z!svMRCZFF~MtjPPtXp30kCEhzM&ZJ0(IUj`#ah|kxS29H-h+y8&HFp&(g1SavCQEB(3oe*=4b;m{Y@VIi z;^Gx5RP|BK+H?hb2nX8lSRb|+sBaVB4LPZ3LAPrRLd6_z$>1h!QUaeeH*r8z zRHF$4%!HZIQBR5OF17oU?-v(1E-)uUtj;%4J5z(oNaLi|5|8 zQCP5nddar$tjF}_u*R2f^m1CPAJOBO8fI|!)xJ>Rt*ZgnR4v*ai*lMy=u(=xi_0n(SMzVr zGJii0Bn3LHQ#)B5$~iB+i{)isUFRwBOX+Rf9CT$~+eA~z%jd1tyDDoV0cwVhe?gER zt@p-f?$wG+SnA?PHKzs7yk^$LFJ>l0+*dDrR6ow>q^WWLg>j_cQbUpQ+Kzu#O5Nlt z25MeAmBHAhs#_HAsQZny{VTTqJV zX7p^c{yIBotz9&hDGm3peJ*fFzthnc+7RN4XJ>{+b_hid%>vAwV3$Kb2DGpXI1LZFwj9g1-Lm=k*Vtp2J*g^5(li*jO~2mTv2Y z^x=2r7g?R>KbzjUAK{BU5*L~P*gz^~Y~nr4v40ppSR{lt^flmSvGs@7R*kV$3b_2! zHe~U>I4QP57t8o+Uqydg;bIpxG}G|Jqi7vP=(X>QnNu_ajsk$u?zHJaW(Y(d#@H9{ zC%PF^;&k=XkjS2Zf=2&H z45cNVrncW!t1&r4C5&S=_(dhADYX4LMC@S`<#wXI)` zV^mn0Jb|Q4)00W->u1V}FZM2e{ZdbHzUFl)sand2S4cB-42TG>qvBU;H)~Rwo@E+q z7dsC{bD9bCGSmT5H zfylhs1#gk8td_Y%EuqLij>{Le=LWQZbNYoM2dzi?@OlQ*^|7uURz#>|$g&%xe@KX2 zYWV0d9$C}UAn9L9O1EGdb|eqKT3nVPfUsqsqimc|R_S3!0gSqO5E_=9?%D0Q=ag+hD&s6~O6$()&q!HescdU5A(CCIFueEa$raFW`rgr&9*A3980 z!7~Gw`{r+k!=BMQ@bbi)^GLSEnJn{OLoI*nz&Yx>ZAeuRDH!gWXO0J{mXw;9K_vYw zzJoN+E34J2h%)n|vs1`B()~gp2~?@C#234y1K~`YNL(+HY>Zq&;j!{x<^b?l<~tR< zk#ZsKSZX~L_+twNp$<3&@7|Q_bsz~@RNQ_15(ejecxoc@VBfBX#W#!3c>(L*eB2Us z)G&1YF8L9^uLINxb_#YBu}T`RJzX?A!;Tv{L)>5WKfu*I+$L=R?`1w4SN{C^A$(Za~X~D ztT}TtC(jhvcM~8wJAmdQrH;4CUiJw?_N>pzMc_qrl%HuB>XrDZL^7Eo?cIBNOr!hwF|~7h$%^6L ztuHFU4X%aKh|(0(Pvr=Zq0rqz!kswjcqDqsOz%2NY_hk=fAU)w)(4VceyJ^*Ll|D@ceDTU2sA_v?pTj^*%o4m!wi z3AdPmv#p|J^2}5XeM)pV{>`vrK|rxac5&Eb$X-xme6~or6hI=ZZD5Wx!{cQYBDXC8+ugF@UR2Ur8 zDoVO*w4>`()reFV$#I$6zX;6MEHeQeXxQaL!ZTUN?-nyB9Wo5_c3Ga(OF4i2%)pT? zF554=)buSVcYk>T%%!RH2k!O~Q(2i2PedNY)qY9V;J0g#bCKb5N&4NT^R7+=v^P{E z%xK9nfY}HTl?579!T7E<{R(?7T+*)N{^BO3Fa4ix%}=qg2+)MW&vrByu!&3EKW7j+ zL!?^MhFv9x(_~~sm&F&S-Z~dzn4v}e*A@j@JO`2lwQ)t?W0%876oVTM^A6_cA?p2HnY1*6IY{z}hsA-}~ zd8-M*pdDB!Dm6-Lp9^1f(PisARu{Q}Wc2n5EoL)q{egUoVylI+ufsANn2iR7AJtGt z0meb7<{h-LOTBpASm)Y@tN71Z{%3S)bXgR}hX%vnkOj)kw@oh&etLS?ee!EmpB!yz znxnotXy_}k24O(1VP*>)G6BsYBZH?&rMvcLe9sdT9P%a2Rh z8W<1)xu^*gVD%IGuRd~X^&hQv(#2o8$Ghe6j6MeAYzE#}{^@9DFw&i&_!r$7-|8b1 z>9z4s8sK&)hoA0Z+%S3JE7;3OsDM-i8m8&&e=O@}UmKaF?<)+se~=>8-2mt9^`Q-T zXdzR5RaGy=ht_e>FfS^~ac6LOP5w&O43Id8^pIWbgHDszuux@BKR;g6)mI8^>aVRO$Bfk#niGGWl*0V6I9o(Jm zHqlc(QrcGFz5m9|`^5}O>9^+d-`DyPHp^+aw7)t*!L5t_HT|_6K`Gv)t)K z54wGtMjF^f#Sf^g@-bfzaKAFbe})XXpz9UH&tIVKK*G$8Gj(L9VYkvm*+Z1v_iWgC zl8-@PhvPA&r*fM!bWGRx_$PgqW%%uF{thMv%I+M|HRmM$B&mImWaEOfa%(s(AW6S0`oH8j#EiPxzMcaHN#UxC`3)RF6 z-kUESke{k|!-_9@bG=bAgUH*t3tFihpjE444ZGZ-PkX2QG|;;;i@ry%?u^}|baR!@ z3}Z{_oamV4hp3G(<6MRgG4PQlh+0~dT4h^sseDV~E*0!t=xQ#nvV{&6T5gt#T;6CN zKksx$Rx*ZrszY(k44=j8)+Gk%-d5SM*Nuxk9nB+lOK#!#ufA&+E_!Tu@*$=_=EhCm zFj4%mn}<;msPC)Uw6JLA)wQJt*vHH%Xa>f$8>-dcRAO8o?T=dJ^K>nmRA&~X;We!7 z+f)4QE_Wuyp$60+An{Wv2NKU_$od{^HFbYFerl2CZRv%V{xmPzFV!)ahoKAwtXj>$ z_-a$(%oo+Zm$~)+keub|yWth7_|ZJnj?U{R%LiMrtDj7Gu8P3r=hB94DAeIRH5%uM zwtiZfKUB;U3diz0pG^q166UV-DHfD_q?_`e)0ah&Y706Tn&abUh49uaI-agAsJv+j zS>(}AYrvy>JWjd4%7<;8@%6V%Vou*b%t^WYmNK!MpL81CFJ$KTK+1c}%xiHMGU5lB zfHRGal5w#Tw%)AYXfo$##kTEP;W6F>TLvz-KB!RdhR2XWrR3 zOxCP3TX?s%H<6!mZ6{^uHZn-XLfKGA%~Ra@y^YQGaFwTSo&Fr*F(-|o>&RZT_h zmIlK0vvfy%hpYuf%t(4hPdajQz59*qe5@Pl7c*S3S5Mu9uNqp{wxA)(!MSnA`CD&* zpVapv#@=6cqEs9NNpfbn96)Y4$PGvC7V}!1S$M;&u)&wI_iCApn;1OB+C4iD0V_os z>T2fS-rX_Duxv`vbO1o95{gLrehCLG`f%weL0|S=huRW`T3%YwFwJ78*G0?j+kt26 z75J2g=2-jGcW-rpZ{a#;VLapU!HesNV>>c4*KL4Nd9#W^lFF>);J?bJXVb!L7KRt- zD<=_&A4a&sqPQhTq);pdIp{V;@1+hilt*KXc~;q+KP-gIf}0;A;1%T$WSdv` z7sB85g{X@70lT(X=MEe+Q7Kl%lRJmegiIt%t7@EZQIaZ~nm*ymgg(@rd9h=@PHGI@ zk)e(qTmQlfhi?yOdb06;`ld=|``iYVc7PYDn@@flE$({nR zduyAMmwt?0z5!^-2*Y!31Q+f_VS1c^Xx8_^BCDRbItA6ka{H38xsujYe!v-LZ+E0? zV8=*F-UVoFjg+!QLT9|$uv?wfpD&#DLhgB9Vl_8SSG5U^+#i`zMj3(0+yu%&pl%dL z0Vk>>VT*RT642luyl0a=2*(m|o#Jsg8OSGJeBVBFw9hGlY`ipUMw^|mCXc(}xF@_O zJTdb!vumKCCjPwVY)yeYu4y6yHM}lQKLOo2txB;c{c<5S7`bhlz{NhCwO=&*kFmI^ zw4VXOeSgpKBFnKXq&;p-is7JXY2i?t7mej{0d_Xy^L!r}>TP&w$o$M<-3MX$$ISY( zKqRIL7&e;(^8 ztPHt6BRad8bMDJ;;V7#nlkKOmCYMC*&XXcky2E`4oqu(~ZU_C*vi06n;-naUe$gtQ zTkuV~8!kWV25I_>>kp`6nT`o`JV0}>zIgJAVDwiPR1E0r(E!$C_=A@6mGxM=W%8|F zdvAY`Al|X8j!zAHB_L8Z9$hyp;(3G=A#6JCP84n}8U0K!%6Bj7t0^!>CDP3zd{56B z`nICa*!oTIHOC(Mx$efF^xdCwkhF#I@ii$pD<$eyZ3ybGE^NC(pZP1u%u5<_?EX3P z`;4H2{y#m$#H&-GsGm>$K!yGYwo9JuKX=*M4f{^R*Zjv(`O|QPDI?z#&Ix9t{hC_f z@@9BRQ5%{2gEYaT^?td)N ze=^+v-*IQ542V$Y2WVU!FB*tM4-Sn(fX^emjv%`)c7sTWI(+5qC?r!3pN=tS z$lH5_R%WiPE)W_`Vm*NWl2UT`A-d$9X~z?y@PlHZH8sCnus=`tRG*OaO_5D>CDd4Z zWQ#lWpY@KI(L8`9BIP~l%U=#P1TM2Kb5*?58uZ`^e*a*vQH!bg`BK5V?EF2waUw#7 zN?7H97tIOi#@fU|Yl6}1Ylf5>sSG*)OgOCKPZv_C^jj%;LT=JI?IiUxTvDoFg5Zz? z*vY4$t6B&@R3$4?hL8^Hs>;s}YD!p%fCGFr1^%fm-}m67 zl{te6Min|XP9dm|&-_mjGtATSj@A@h{kEJ~Jcr0yG4Ev4i64>cj)%9E@W0Vov2%Ha zKo4o5?xP(J=(v{nzn?n+n)a|Jc7Kv4 zTKLxsv#04_?Y{*Alh;395^>|NwjzYmR%$vzyGTiQ+74vYSP1AHpmKs^Pyy zqQbwtOb9@asHHipV0F*tdW#S zdHMZ?QAb_Cyhd8;ZGi*A#FMS0XsjYKAeEwgnXaJo`$pyqzT$LnR?##?Fry zXKZ4YQ0-R|@^Ja>R`YG7sC56 z$|W#sotcQ8jWHEa*4u;MCDAydiXFP5by=Axtfg&6AC)JvFiRzU_Bt!iBf^4|?;PNq zj6CCt`y08-dnPE?Mlo$OZRk2XBWzYUi++ajMnxVQ$0I674$!kV@cby*Y0Cla z$QauayPVnA#!BzD)=GW5zJ8w?mT8AT7f_?vl5c(?7_BmYXHxykg;Q3`&!P;ejqvfJ zf5rbY3{c^8h;?Z(5Z1Ux0!0DrfmUsuf2T~$aIGbVPZY!Cj95R%*iv33Xv%Wd7VwtMOoXErx94d;MR$I7 z5~Xjt)2G00${3{3!6d|@Jz?}gq5HVSfNnQP){OA=dcY+v8MIVBU_MV#kZw!5laDLB z?9jC^Z&@z7WywgDf+crP1R=F|l^~w>FSEMX(^y?~spD9p*NHdNBvwB()rRstwE6sj z@QL$H#G+GJ(q)OTv;DH0CFbb~l+O`J%90-kq^+24nH`hU2{gGmfRNn*C;n~BtJk%p z90+;=IV8PKRu4Ntkqb~SLQk{^nUY;@sCENEt=-`*)7IC{cDkY=DBVymFH;x+ZWsa7 zX~MMGh*ZMMyIQH}d(R>ovc*_<4&BD5uza^bGi3m%EENv9 z=-P$-j~_u89$^f<3?(580wtX#snjjR+2a|Lo0gL z=@F{zOn7~D!C9-824(E5fUeAL{DCAZk>%Bo;6y)N8?yGEG4N9-@Abvtm##Lp+le{c zTWG>}ZLp9$J8xEpCpVY{p&H6w56MZkUG#MaS$%V0o7j7pgmd+SOBos1h4%Aqkyb;koDuhqlb97RUAe*hKTVGpn;0&7_ zJBe*nD?FE&aW>@pF}hP2hQ;0*Bfa9k&+qgBe&+#z{PJIj%Z&C7lzZRf>70qy;fgFP z+#{k$zJFovOUT{5tl*Qj9K5gr7!&sHYPwtmqwqb<@%PZouBo*KB)z>gJ_9@PJ|=07 zdRW~vj$dz7yIqV-iw#HVh8$t1&+Bhnuhhzbid5LE#T-7y*6#Z3@`eT>(&r5NH`;J(o}70t@x z?+@g{kCU~A;Ow$7X@T)Au!2S$51-iHA%xY0Q@IzbokN zsDYjh;fRiKhxYkOpz}nVhT0^ecN1*jHdgE1708rU zXS{T}{_faOscJBkEvQVJw#7Ve3N>bA_(ArM(KL7xN%IbK6tPCIDjy=^LM`p~tlVf5 z(;}6BKG(9rUb^@88QIGHW(#`n=XkiVwC*B@&e&JwWQC_H*wv8!7nK zt5N!P`9=%7?6l%%k(X54a=KRy${?+%UX)@#9VI&hGp2e=^7oY)D`J2;fD;0^vocfO z2QOVIoln7CYl}be9%V|^HC`H8V?LV}&RT~Qse1wGMzzJ;_@d1;)Uica-`y|G8Z2Rb zeAIk1f%B%#4h-<+g;hIfxB&@iuIL%{;{2mce-@n1Q#kos#NUlT@t=nV7!?6>~1(O*a`>0sU>Y2a*kp1ng~Y7Z5A^$ zK28_)>`4}nZ^(NsmteEyDBlf**5vObV^m1v_J#pNLtV5&>(v2zH1A;ozBdcE=0gHJ z2eP2fAMD}#hQ{Wo=k0c$l z2}eCR4G9EK@Jsw6yx@qMqf8&kSha4t=<1`sM*~h?*6|Iq0!QPfU?_Ae2Au3%gOFYM zq!qTw{G#lerJpe|=Voc>kLv|Vdpqge6EEU9nRz73kQ=A#W!`V##d*Rx4#k(+k)j>6 z@k!V=WE{g%I6_gF;8;@w+^iM6WemZ&a=Hj(7~epVcl78d87D*xIF+4Ix0FjDC+p@7 zIMLc~&_T~KBpX=fxbSjFG=H{!%!e`;Ph5;&HQ5g|Eo)9(QgbD64y`eC#@nO0Mcgj8s z?kuSH@9*3*mtRl;q6l2|_gER6Uf610FxSM6{Y%E6_T*F$2ovxSdg9FH|+Tx9T z(PTjZJaWHR-eke=q%QpK0)2R2j%oDx#G_6jcosjnJ@RPqy5(te=y65aEc%E?A|vj& z_?^DoVX{+F-aywecfb}2>efkd5prvr?X+L`V@fN|{p`}{(~<+?39f>>@UBdaSTn5E zK%>12(N6!+7`Z>jRbfm|@Q)YDA|_a+x;7vyw#0T^knZ7XkZPXDCZ=Wh90vNe&>`=o zg@xuU@8;~7+${7yNuClbWHjxmM9bc9Wc!+o(V%7_e9&is&pGJO`%f*Ds|ze%pK(5j z-(x;_VIy&Do82bfIw`+xGI}So`eiNaW04gj^F7sqK8>G*!4`IhNQtaijVeZ@nt`Lv zW^vD9>Or-q()RTkegZaDSF3m(FFo6I5s z1*mt=oSO8fZxbCQ*fmVYk1+jMJ*#}E44t;QYI4&b#3>q?)5C#v<9=#Vc{LV{^sYw+Sk0#2X46M5lnnrptCqx&%A zzSM*_KttNZvQ@-((1CcztvmwdZG*f`W|_Ok(o*uYlTeApZ0+9-*=@9}O3cYBqS~&z zL7bm$NZunxcz+-|y|###5undl_onPAyzq@add`FlqVu_nz|Rf_xrQf~X#9$lv^Em& zgdMEH_g5^JG!K&cF$6u=s^M@{)wR&U3)nN|pK^8)I>kEn$V8wvi8;SqOL7gA^b6{b zIr%%U(6d&e7v%325GoHvB?r;By;1>q6*twMLb@|U%pWgDl%$;@8css7Yok5omzAAU zoGAo9CGpZQ?{K}M_dF8ss$X$?tw(YVD;SBZD;U}6{Cd^jez<8`HxEtW!NU1%&k|qA zEHBOO4e+9@E*AfKQJt?4=mrCMTau@bm)TpIx=yTxKZjUA)RfkSTNUD7}~;tCS8e;3m?gDWdRtR4FYa@9hMq8Ij34JeSBbwP7B z7$Vz;C}9Z*p&jL3d5>sMqcf#yh62Hl2saEwD+iqd6PvnX|047_6B_c`kJ3p3rd#1v zML2$IJHGKc+mso5TijZ9D)JgJjwTy)jkn zMq-|~lSwN?cq8+8V|~C2Q6eEf-a5k)nQl-lG*GaCY+rXrDM2D=iiwV$yNFDQ2 z(YMqpzQChf)ol!W;TV^`(1fMSjW&*u3*JT}->kN}!4gOFQ1DbpCOLDJ9>8FU140-@ zE_7VsExKKM4qj2Q-??ce4gD1H2B6YHSEttof|$AX81pY|B@!ijYVvJF7Wk(EnAjy= z+5ydC3K*k$#HuBnm=&qEb?jr zMWTi}n3c_6vsMl|R&pRrmdvp1EWqwoxY5&U0g|C=S*od1$?VjLD9;KjHB}i%8~TLI zNN3^oP+?i<3_Myt=f^ZzGf>g99=qZjke^ifxF0~JA+isO!-ZBh;Ni{EAV6&n7>=5d z;_w1(yC?t5XNZ%(EYzY*{p%VOc9|L%7hXE!g)g5(C=s1_>fGH{oup&GW;Z;1 zqyv`yza&Iwrk6Povk=UVsKqYleekX+58t?9r{kod9VMpzny)5zWkLeR>0Fyf)r;1o zPYVy~XovYI1+p;_#JB9;hwO2bxfgI>&HwlNMgeDL(q{qkt!`&M$Stpo1uyYSc)Wkn z>XJM47DF=ZnhH*^F?xMpf&v6cPWZnNk1eh8 z?6pWX-40#BwjL%@TwAGwB@W$JFdIRV*Is7nj9}bmXlTFDXp`2m51+QT1EWA3o6yYD zCuSHvqYUhZ_(#`s@2?lWFQ>GK#hZwuJ2Yfred1uSjCD{k_83a?t#=7t6XZ|xes@O< zy>S0nmhi|evxDyYs8bkTej)wR4(Do+EG`FfE+6PDp*&1Dx%>YL3D>OHP0|x#{k>1j(AEa-_@x0@u6<(X&nIQg{zN3km%F`4j3f}>B{ads2FeS?a$&bEt9 zgMb{)Fa=Y-n)TiljZ+wWfidF+_NNUG5M1L#_SaDRV=f=O!?_p#ZeJ+qg`cU^x}ZrE z4m?~FMEK>xvwh1EPz2!2`3qeu=qht1-zZa5FQ!;Dv1Y15)H}x_kMJo6?R45|P@>jt(Di87emsvBm&XGl zZ?OrRTdmZ+u)C&-=zI2&@8gIcckY!~hFSkh>brievDa*~!@G5YuaAU2klfCXLH8<) zT?zIzEXJk;cuy0vdL!B`tKRWiV-xzA`-DJ{8~MAv^h#(p&VrnS z>HukIzH#+9*=wJ|WU^)Di@pMDQv6fnLQvda_*^DcRe*CibI{Z3jH=0K_IoxPDGG>o zw0)T^yAIPE_X^!#l6iArx|nN--v~Ek*`$7lXm?7K;@KbLtum&w5AQnDrAklUzHH2% zN>JZ&U}dxK)BNdxB2Mv0cqA_Po_JBgkG^PVTROBw7~fiZ$BUcF;=K^J(At?9NJ9@N zXLP-qiMo zT8{^Ej=q{XWbadze9S!C0bI-6rhs1X=*Ep#h>95S1k1UfTKeiKRX$CdB^0aq@}Y|Q zBOKs=R><~D1^sW}-HeQY_)y?%EOWr&Po!N9bug+_FB#)U-5BFu1(ku|Hgx(}BC0R- zb8CR=)~eoQ{$PyR!g;ybHeAKC1VtV%iI)?tL9*l95YRIYpmQ(OSn5c>(O4mX@K*tK z{pehbze;gc^i|i}fma`P#i2+mABJP-?#|g1Mrq)|gB4Tpu<#A^nQ>Xhx#&T(nU-<( z9-yi*_6lN;WcACa16hX_+ppxx-V1xA{5>6TU9cg1& zkEAsuJy~#u1)QrJ2K2&eag~e0_|gv>`;A_>k}PTp(9?%cg`pmIR4x<1Dj-!7t=H06 zafI7hx`4TiAmA5XU(*c99qfE9^Ml$M$ST>kqlQacdLFQFd$Qeu-QNZ?Lw>GcRx6^ zpaS7;BaM2t&}e*x&15KtU^L`@0!tJI#J)(pwgqKrtUaWQP2KbPu?(Sed#jE|IO@Qe z6kqzp9YK6~*D0ZuJTG|t%L_TXGJ$@H1g*W$(J7*PiBSE>0R&#l0mZfeJ)NYWZXw^A zf{rtF`q;Qllj!jp;3D7a; zbav~*iN%+5pHB1kC0k+i{}X%G}}GxIv1ABAp)&-00iUP*g*;9nE>6~htR!qIvdqMA_f zv-C%(&(Ex81%J7~-i)L}N+`hECO&q3jHXW~VxQJ9AgUoVcGP*wJjlrffwfY>;|B5W zeGe?U3LOv>t_t*OWe&X(soU@nwY|2R`vkT-SJdJ%DdK5YnHZr{o_ts(;+G4yztcza zJQ76bUZ;a?E9>$vet89dmtb;v{c<+^7k}_G3jYMEAqy#dyYh>RKj-x4g8lj0 zD+B+}1O6P^KM&*`J5gdwF3a03}l$ zW=PI&Tlnp+-R#8o@%7aOCDb@R6Kvd;aeEqZEEAJ_DiW1^r=$~yE*ZfOeeUnH`Zhi% ziXUI(jzdyE4}>W`amBuJ#nLjZMOq6Hz^@RBAH5(N_5k_~cf|u7i2D{;wB)TTX0okZsar z*E6r&oiUmPDP2!NU#mOBmTzV&eky7g_! z*QaQqZNoC!Arms&IOW1nOeah%&k{|T-|wWGuG8mWq|JTyEz>L^$l1Z_gJq>=d5zgP zd{*9?LD8gM(aZa+!d(HLXDMSVVqP>QqP5Q^ zE*SoLVfYa}ncocbN!&F}%1Sx#vgjlnP?1T|pJ#_-w)6)MyNh=>M?TS+Z3Izxf~uE-M2g1nB70lZCkqlDDT_|0y*0Qv)T>M$rDZ*&Rb3U#(eWNQo*rz4U^FK4n$UY5kKx0)Lq8BTmY=kO8*x16AyVJ z$VC0dPu+=V2fM=yb;i?D#4Ie{=fqT`h;aiidbb5h2ZXnl9N z?e7C8i1Uk?sPitob@D+oQvQLjq^>lb8?6Soegij)JtWr?Wu;>MM)59EDj_JTrUz%} z_N)Ve60uMDewum4!&{_5u)_i*7*9mM9c+~(;a}A$*kJ5(8^?1hT4c*BW9-E)nh8aK z1T2QcpX2_y52=GzXtVGI4$!7+R%h{!5`ap%17%0LhHx~hx>Hu3$7G8%=ri=fK`=H&{u_5o-Y>(GKey4chjK^}o%v`pp{<#Q zMZ3JwG&_e0Ma)rC_qna~qJ~|5yTCY(Ms0KLG#{}lx_MhP4E65Y=JU~L7Nv8AEF=LQ z_0>r=1*^($mpuICLeQ7xX9)lcVszVa`_I5c$%Rn_yU;5QV$g>1p9u0j9QQCwS>^$L zC@ZAA(>Bk#ie}lP-&ay#%2#J*(4w^!QPiV@>%c{d$eg$n0L=X`C>pph$tAQlpQg1f zswpPA9tcCpdJN%(@H67p$;V0gi36teP4_SeDs0ZL&aYc}fOQP`g-R)N(Cz>*^dK{t zOX8|656Q2g0%}dyvsGE_Fb*}ng;Ns^_stG26htg~&6nVL5q`<;T((>2!n!fW03S&uIx(N+MSYKU*uT`@JzN?!*+eR34K zo2KAMC)N&f`IxJpB4uW)#dJnK_e_VnaW))uu+WpCUphBJj3`zFUSr=a7LjX19ciX} z@M!|%#Zu^{kb>oje^@;L918^l)+8+wo3e0%Cm}{d??kE&_JpR>ax?vDeXv5s?J56i z7*O&-n&ekA^K8JwSr*J=E>$M;0jZc9Hv)UE_t9&d?07euE|1xxIUciVFHXSnJ&34$ zfVkK7#*wrRDDnOeAHfJ7p@WnXv|>1R_yBjO5{N#E*H9&+F4vc8YI04sn$nu#%{tq&>_x-gzdzryb_Gl-Z5v{wL!Uw4S%*Pf)m zBak;5FRy+zc+U0Lg#JLKOfk42tpO<$v10a>TS!b zSxq})-Ia+x&Txv?MW@TP(@xFOxvi?fkHU5|e@G(rh=`}d8DsCcjX9%9ds$Vvz$YQ6 z>IrDXcTGm89jX4648Nw~1a?h{PXEha`SVtMbUXJ1DGrwTiLV$v&W-LsVCWq~VLWFj zsw5RNjslF)QoAK82>X#}jP-V&_~=kKni~`03FpyO!ofVvGe8*!&6fWg%J>IH#M*=U zgHW`d*HTey#hmt>y@#6h1225_A#yHeQ$YOEnfG5nV3Gq)FAf5>@au09MN;5nT2R<@ zgvyN&ek4|7upaUo8!X{xpebH`c-eqtz?h=CYr|JwZTq^H#zlvrwQ4gQy=448&8=hX zrt}=G6Q$Wpq0Eb6NlnJz|kW{J>f=}b-N%R~!3t_-VYb3u- zM;Jv|;(X%URzIQTHCqd7M_c`r-OAlgY$=4ARxe2Z+j-C&5=du93rD=wvd$Du`Qu~` zZ&57EzT}4-QH7ki4a_(DhX{QMZ`_&<1fNBURBJQ$r5^}$U8rlIHg+-tn1n~rv=XPj{G$UB2VI~>I0bS!I>ahU9i0dN zz9PZhclkzB${o+;=q#p~gAdDNUgE8@@a1^#S~?p(1bkv7qusWZNX(JR6?D_xp}CTq zZSfvIsm^^u6Z1J+{@z`D=~8;(LL+lIbjol?c*416<)B2|lP%x+8wnd9i+1v2K6L>X z-+;6fB;LehDAbXLhNGEXtv31I>1E#2UY2RG$ny8!3{|Y3P8s=!kPYsjGh_j(IQE8h z`?q#)=g9AQIwW!Qi1+Llj$S{0d}DS~ZoP~UcJhwskw25fd8H*927F8$rAL6OUGsyg zJ^cnagzTds?+t(@e)d1G#1ZXKWNY6Q?{r?$bEw2i^c+OFTijxu#<{3*;20rWpLKDTX;p z@rTiSa*H7W&PnY9`Z>_VY592OzC?^S(1>HX;%^Wi6%M1gFrn{39<&$SVV%`k-wROGdqc3!`F@0`34 zo3yP5a$`Mc%}bC4YHIFPm-nwCMNGK*DjxvLaD}XW6v)~mjqsMstW6s5^2wfrncC?h z1t4&GgvQpp17keh8pzwZ({zrp2NLRwCZooGtY#m7Sj-ERr0D^v*zPKIiq`0>Rx%I? zTMpS&k}Zau0=Ob5u%jwu@g(T#D3Yiyz?cJN3JBpN@Ee^E@7Yi4`WvqL%YT38(lI+! z0MaiV+kIwh7oU-xc+w?Ldsefe!9sW9XJ)PlFDT_#?!3o?@Ya7L&C#jVj-FR2bAM#P z|20=#cZ}49aW$onxF|xO>Yw>YPm;lUoY(Y$^K~V+2wN|Sezi27%gSm04vL!OEX-8c zLi-6f-Kz>?>L>qN6X_SRB%pC~+ZXOcR;+7#*K(sp2vY7f-m`v1gy)+6#ifO*sdO_+ z+9fGVGtzlmY1c+%G`p7i8_M6$cZV3Y^QP-#c@x#1S&#%ao~ZJF_;=Lj`=)hn^|41C zV8yt6(5|9S;}avWg(Z7Y#w%7MXk8$;l!!NF#7t_G7K@{K2ew}eMeYa#kr0I@d+LmZ zqthd<(MZ(=xKquhH3imwa|ywJyf9;kMb4K&_i`i*27l$GZ0(REv@ zY6#=$hFCkksQKWj49p_>eaJlMhZWMOh#~r8`%}vwyN%)-kmew04Eh6WJ>x%gKf&pR z=ACv0b~3q*B9zE&v=`r7kBsejR5>C-4!AJ+e&Wc>s}TA)z)>Xs%YIGM2e@SW5!mf+ zE;-Sn{=OT(!*Qgk&6G4&=P*@6uBhd#^Uq;t?vj|LcOfDF@G$TVa;glN3f7T! z>(02o=MDH4By8aC_EhaUOcrNV<%L)lQ4#$-Ps@4P5lL?i{9c?15nk&AP{@1M(u0%? zd>E4lY2wuWfoJq)z7+!QWej6U2R;?gxIwjPb&6qW#O~F&+hJ2LQDKxP(3n3;&n?`E&Y49)M#peARqa&OlRjAa*62sy3diI&Y+mSNO ze@s&o;}U6j_M}{bJp5>T%A_uXldq93Ko=qXm7bK#`>t2-yjKpmO|+hO(-cj7PGK++ z;^d^MaD3_J$ONyGrG?H8??V=`e&68u7(XBykFi2Wux{^a*HT8N#TB9{-ujuAcgS1)QK$S!;b#CW(snwlA)K7 zPu6wtlN7CI?n1}+)jnSuF|V+XU$OP|ZA8G69kcMh4Kzt(O!bo>ul%#P$NxN$MkFTN z;LtAMr*o=VO5X^cFRxNBJFF78t$n80{)ouZXScrMI_fTr<;wnZ9_zm_r43W21l=E8cR zv)DVD0_~5`p@B)+&j?1_u7vQ`{Bl8AlOBpZL0a_erivc_eEA)MJ_*#BjsVfuG-wAc*zzAk@wW{jiFC6J8oGupqeX);1UN21|g!ToPEtywY*F3c#eSi4n zlTFyfr!mJuZS2wKM`pPCWt~fS#=}r-`&r9WNNn9A_hkEHPMoz~hF1Se>dUzSQF-aR z&7pqHN`Tw$FdS)E9)Re7(iNZ3w!fSX`1MG7kwb;_fVF2FGc)Z$+#h z|Edfcq4o?Wby7YXWG>u7%ifPsY9*@exXl*vUt~p#m#Z&(A;=Zo-gTWm9tfJ!y;T}c zU+P6vTZ$?|Sb(iguE%kw(#wg~AX%pwEu1Mn@r&hG}; z{-40Rx6%UQvkF?$p8$vVxlqS5P~acq)Fa1WUg|<99 z88Z`qcP!h(Pf6b6IN{di|Bc#Mx1+YO`!;~^b%44?k2+xM2$SwKF>j%$);j5qoBREt zZG+bd{|;8Ho|W~t+u*qE1(coQ^&rgwk>^E&@ST9FO7^;$Lei4EU)J1u`@v(P5m-X> z-Gy+o+3^vwfSw@P#(=og`wlkSCrGRq4Q%o+_MjKkwrmU+pakIjO+OOu5wKu@x%Zr_ zrch&Yt}+C*GHe!j1Sgswuo1NQBF(BVl3ya{?>ZVN%2)(ai3kFoZP}D?*rKWMrXxbh z^pMGTKdW`tsSt@2!!S2ptyE(7U=QY6jck?FkVAK-CT5Z~NdLkBA_~5`+OiS=oxbaYZsghnCC$*l-#qK%He>*Z z{T++2bX%SPy(bxyXPmh;?!m;M`YnsKt^^RCALQw@w?? zNW~wOKIiKu;~8}>a!&Jz|IRQ@+|D?j)?_z^ZHb$bs|q*0D%tR(n-Q#yR!8dh+i7E3 zhH@YyVBkgw)!ECt-=Cc3If>LfVVnjr4Uj}q`Gjvuq#M1&J1)Y3Tac<9-MKOx)=uc) z|G$2?v$2eLByiRm`#HLSyuFX_*88g49lFd|apNbQy4YsjRB0 zD`0L8op+F!6Vew?Ck;mHu`6BT!X+rRjA{Sph1UpP>eUCh8!oyyTH+@H<*BfR8t8V~ z^EyDttnw;kCJNUtZKkQ!!WadlPD0itd99hPN^10Ox3Ct*hJmNo%Pll8b@Kb*?+bg^ z`I`43c;5SSnooY6g}@5+0G*W&!v#}&&09}X4;s*3&xxs$>ayI%r-6BRl&9y3TWCPa z(+~f7VQhmsDGwnvFym+X9YNAt3im%qAC@aFtdUxxj_we_{oB9W){&87c)JZk#};0KRmPMozs+zI=w9vb;!Tki4*t73(^}{vk-zcKd*WC+fbm_04Rrx zwWreO>SW&!FVq(P%fC-8Ac?qwEr1Po)HkL~*4<9^w&kk=|GG4PU$aE5b(f`33twfB zV!zQLfcP-4VJf-KTT`K~Z`|%^XWb*t+5F4D&k>R5c^VBS`E`%(28|VRlR5A`YX7=4 ze_yj#4C{WOF$caXBF%7v`3||wxMI&;1yQSmef+qu4A#b2x9ETQ_l0$YbP!?ZEBw^D zF^z`Y#3(+-z1Z9CW$=z>iFZmXb(LPITY;o^qcu( zQGx4*4i~+@To-k$K6&`MS=Bk6;HMeKc8H%36F(nC8Y%4^S&6+Ci}taYM7K+qb&R)- zpKwjj$BvicVl7+)7mUVBIj#y(@)W!c|NQtm-ikUgU%9bE|EB+OAB^%*eJg5o`kyYC zQd7};Qc!a|g>6p0l9<3n%|Ss)t3a{-U+-@{n!WE!<$~V4qtW}Tk`KG?r~S^J)=rnUdNQh#4UF6E!A zbcKOqCRT!9`t-(vt^Ze!XA}$uf4YVJ6Y>O_AGgc)p^Yvb?ev)6F+AWRKq$%JAe}+} z+|vr4vovKt>2F_lS>5xA(7&$KKbCO67rahbr5l=_!05A@~-ti&)Cf|GaY5RO!wE9{g1JYZ~Gr(`yXTb|Dk8QWt46D5W2inc{HAJ zn0OHLzq~l13+oz?r z|Ab5l9q;e71fTnDEQxrlfW7g@?_=VxRvg(lGB||5P2X&4{Oc?N=k=BLXm&WONRNw% zVbM9DF1g2i%+%WrYQZ_6=_FeE%8wH2T$U$53S|&cj03^+@Gc&j4~NQES$AGCc$<(Z zq~<-ah2ZnCjqwfM%5TrNWiQM(yZj1%LU#hS`}&zz4xJz6r* z^w=>Y4~?q!Q1W(3TT7fzy4Ci&`OPJfV!&{em^Pi8;DFzHwDa0MFU+;z{Exzf?)Wp8 zjpI<4Z%k(SVVv;v5lnbi#w!y)e+#I`mX}6O3;lIEQ&8I@=V^V_Hv5l|hP^Xdf`<^Q zTIPPB_eM`Mqij*X7^I_;b&}P%+tvi>Df@6#UO|?Z=L2XRynIgm`Gfe!pHT!!#j{)< zyY25!h6s`io#8kmn(MM~t9T-(c2J(Y5@hM?VW0+TB{!C5zU2Vk)bqXBe00k+>UM1) z25<0{+IMyIu18a0(bm07Ewd!XLd;3F^+)K0AHbrc`gi~JK%R+6xN)gyv?aB+nm1NZ z%=}AMb2V^_)`I$&2ow$j!UQhWs~z;~k&v+ z5QUe=s`z^@?rwIZdl&8FXTiA*at7UnC{)b> zCpd=HA%rqZJqj4(tNE#3gZ2y~1E(zl75C>Z*81Ta=E%Hr9m>J^tcrRSB<3Ta_!few zY{Kg`D`(-c2NQ~>6Ivo9WV%KTeLqGRK3!Xe!H>T*FTU%KDQ>jS6d{zPGP7?)4?Rfq zkbbZa`E*pP{~&q!5u#@OdEypDnQ@uR=Lsbtc)(P?*85MZ3_wjeI5| zO;RTIwqGA%Z1LWa>lShDB-}SsP{hft$lK$B59EO0a%`{(ufM|8T(0s@5wanmkE#lE zMqEg}(^jiOa#nquh12G`%#`z6VV(HxHF4W*u=Idf(=$sQW>G;2G#J+n$$c0>y z#M3|p9-y3pGI|raB}Fq7wsj4#3z_{Tl-oF|NdZ?s z(NaswAZy9B0;|FUeb3s9J8yQAoU*TaJd)-#xKu;JSi9JE9u^g1Tsw$WyrH^yg@Q%O zRuFpJBcKuKhS)ZA%~{W-LVpS&EE`&g>6QOfJc^l8-(7~FFMN)<_Y6*yDsRncKHrez z>jNat^Ge`S=*Qv@4|yRmUs!;rNxysx`PJrtBEY0XlL?3fw*UTPr+s#^@yEK&sWzT7 z?t#JuFOjgofDX(FJ9ho^V9c0_^K%|19U?EoHx{Z2RgM-95h7{z+>688Rxcnz(1wm} z7rSJd_(AXsPvem~*Fehn>N-Z{sj9BjN4z+g+1W&KC9FN&p!U-fhGeJ7?kn>t>Q`uI zND&WT5|A%H@DIzp7V@U#k9-+sPb6q+}}ea8(WWHNwo=vcj_b%HAO*} zW^NNWKUYHT49Lwpo=J5pQ~dJ|+bVJhIWpt!($X)BunXPWgSaeZ+D`iAj`=T9DP4+b zhQ&F^$qf|G+6==vIj8#N1vDJ)E1ZWM=#5>@*y(KI63#>pNH@M{O`OwAQW3PxLWL_;*E9S$ZWo~}Ms=?RhL=gg`l-GB6={}QrAzP{nqQg6E zMY@Rg7PuPXFUKOhINNQ3}Vt97gOvFxUb z=tg7QspdkINtdbLlIal9Poq)2n8DNca>73vkpCin)7@HJ zT==+4{_J>rx*3!@-tV*q=h|*%5bVpinW%n}#906w@SXMK?L##Jnc@-qc<;Sh81JZk zapKO6+3yuPnZ~j}J^f@lp%^r*?sYjJJ_L>=*Za(}w%1@tVXLD%(?0$SPeJQ`wR>s& zpc30OYHt{w6b*qf5WSZx@y{P*xz>-8@?ljZ2uhRD-U086L&)ni4awx*(7qEuL(AaS zqCmFFEaQohan^dJ?c9X*@F<33MmMVPtG)UPG;45eJ!&23<*{SW4rWnBNgLmoD@LD^ zAwV;3f5Pdm$e!*GCo?}+`AL*Ku$h%%_jwg|q#bUya19m`JUbm*-!zN$eYc=qqL+z* zn&cdiVyL~*OQ%{j>v=^cwrgyK7PHVRx#myUpVONZ0BMv+OZ`^f7aWLS@*4ESH0N4q zKbLlVZ3qOsq7L}R<*B~YWnP=M@U*TNfsl2r>rSJSSJl4}^Oc_1ttsH_in>krp>57{ z9*}R8I*?ItS;@p8jJ9&u{EkNgdh1(>f@&1@lD3ME3u4I8e8^(#TLCdhj!0vdw6_L@ z^}78KVT1vmEo&u#=nn%O9(dYnUcM!Sx7QwIyOUa@Gdd4!BC`xOz1?16>|pVRo%NKL zf;5~I$y^M^M9>XC2vb2f|?9uW6ODuXs)=Ij>?t*sd9ltT@}?!da4X z;K^pnCMlh-!Ml7g!CXNO3iAPbod+MpqXw(E*yzz*6?IpChG&On8bm19J=|gXBM$v0 zG0%_RPIv*Zc0%bqXj}&On@BRbkY-`jBO=FYb}&>sVkFi>j+Mb|EcXNrDOb**RO1Gn zhnN@ngVoj2EgIHJndjh!hal?O#lHNChI5NG+tp{X8QHcte*kA(U_4MawVfKTliBHBG`RMr58KyjW;PeGph z%G{7nsutU5WBh}!4lSBFF!!OJtd^}oM;3r1IH>Tf2GfcW2upIewXY2l^Ob$i9XN$M zm!*GwFFHM#bFWIH)ZVx4`;|dKAAhE)3N&ILfZG3fN@S*Rw{Ch z=7}*Zh>i+~_(PVI=?3kxTbBw@IJsou_y_THRAe6oeM_Hm?8Ai1!+RJEnzqeN7UF84 z_Hw}O%j2oJj=kg|(^8TVU#u_#^NMa)Jun`6d{jTkaY(0L|eO zgKD7VsO-)y$E6tI*OuLRX-LT?9<^2hy(T9!W*vFt$ET#*Bw|g~`{fVf2BwQa*JTiV zN>JvW-ZCVf@-8_lOuP2zRNKyIYSLumdvlQxasUGJ$M7?#P%MkCK zo#KIprhm1D+YQ4Qh+eD0FBy*tNjFwcV&nRHRvD#9LH!ZX1`?lh#lJR%e zTVXcvDZY4^WldX{ATL=WBODWZONHs0_r(h;^4J{a%*#|{!m2AYmUExXF*iMD^g)=K z^pVH#_$oDtuCgyY_<5(V79SDQ{OUXxs_AO?d4mfKC*EA?Hh;rv{&p|3oJgY*QMfHp zC8%7kIsiDFW}W&fJ8&>N(dYbjSsL5mvc0_j{e!X;=s%R7DK)DmmNY7eOtS>8v%d-6$|o^hudzeV?91{)jQR2ITbt3pE^z4~@iuIRu04>r3-J+7m4lEg z6}e=8KWyFlWoq6RcK1($_vyP~CrNaaTIt{SPQUsyFBsgD_YlZlzj z*<@X4B)EFN;^9(<>Fw=BJ{TGfeQPST5M!mM72b*>p@OIJ_XSg`pQVLO5GtQ2PY|HC z=Iv)%Zm1XmWaeJ!=$#WHdb$N;O_a}lQ)Z^y)1%#>$Lx8Dd0U3pBH*ou%<$s%sIKOwbogoF>O)sJ%Q)1eFDd&GOUG@1CGy>}@1jS&pUY?*k{8)x`J} zGbM}xTw&YPmYtn|p{L{Xn#86gmR}UKrXOxDRO&Y8Ws!Sgw$Ei{Brju(Q=|8MY^^W> zyivD$;X%{|GHV7UK|@J*#j2fG2svk#(ENDzwzIi=-oZPtFQ?o>?U}1;Sp&D(GDu-@A7OI`b zGe)|@R#EPgv1#)YUAiUC)51t>S!iIw>lRx3;UQhwh0^MU*$!xXdWR_N&R*Oa>-fC{ zPQ2#4o4RS|6MaCjNdpS^ye+BPkpZkyJo7@ujd#o?9=~Y_w`q#8KmrleOrc)7zC9e7 zwZ^WK+$Ca$u`A$PA}n6s`z%A^u-x8DOkwDjnaFz3nr?6@tOe$@o-e-rnze~6q@Opl zwrlQ$kwJD6q2q_&g7XoKB@IR0@!d{c5X)Q>tB|8MJOt^MgIfNrHhvhbxwfFVGo3Z& z%r*b{N|#_(i^t0z`{tCtI2F1-agA~^5j$8B37q07;txIC%5WAt*2bO1j1@(G zvry>ZU}rhh&*&8gHqcrbzT0jNTVPmab)a5OfNvHN84Ya&2M$ z)C+=}J{W)O6UGj@yZB_7>sf-2wb#|_sDRw8f+aP^Nx$X?2Abg#{LXE-JAX9oy6IZx zUCFZy7Psy^dN&JGr!=Owb86u9_)IaUm|pV2JV=6s7}yqb+ql|4z~NS;9mlGGUh>A9 z^a8c1m-gv?ToS9QbKkv|16|h2&#SNv39*zRqQ}v1Nncj2@=uyM?L^o5H{RNW|MLg> zO+QbM7s%y4?;Lg-M?F~XQ2EIGe8>f|A_O`-Q2cyIn?(3{1(V5(W~t)S4@Lmh zm~nb?vr#_shh2sxNp;*Mwb5uC;`$t$h}; z?UG4&hEXZcE~f4Uz`0~?#I4TQH&wgUQJ;(W?$S>ldy^SqfGU&N>_sz>yCSPvd@N`i z75SdR03+?*t&oQ&?4AR?m=H(}C5cx{D+4JB76ZuI2|i}abs)dUV;{FALa9Ik?DXUE z?0^!}T|zj5mWbEtV9Z9Fllz>xiGitpyVr6~3|YzeuHN-Xj591c)>vpak+`23{pSzN z%qSF(+)wn`t0e&=Vb`zci&=K>bxo3a=r0&P@@ zYpXbNVCJ@=A0QA|7DMzPF-KDb3COz4KSlT_GCq?N*=^!V8^6~ADdYf~1icB<{FTaP zCAwLqO?Bs!v)g_+I5NfDiZPro@`g=)iT}dmNqfOFw@=K-yUbfoJImg?{TX7;>Exrr z0@2pldzKp__7XYHxk2jdtE^jR&9GcZKDeMH-TLJ7^DLDv`q~Si%gy;@tKS}FqXO&a zkf;j83->;p((e#FcMv@vjEV&MO=Ftv`}=zsFBIGMN;h%6aJc4W?$oRl|GI_Izp1LeeiQTE5M3vnr)<|ETmoDOsb6(nclo8aPjrv>d}%GBVNA&zp4El zF$3-uA~W)qjU1ymEY`$%z%i;_>V)h2_($wxo8O7;(1ZRvRb%m)*~){iGA*LL2F-E~ z_;UccYgZ>PCo}TDE|yiH#+zMa z9mg(ofyp%&@JivGm1RkI2yE4C=Q2>Glafsus>OTjYLdF1fA?c&m~S^gse(&Vf3tw+ zCU%s$Is+7Df3s}IL5TLw9XffiQR-xYY&;8;Rhx9<8?o? zjl{?EAjC567hXMp0>c^=*>Su*+{Ar_dy-*gGs#hf3@6HfYfws__RfjL1G zs;(2E=KKWk-AMqtjEF+&z$)bq=s9X;&)I5?gkXM(7LNx{r1bL^RQZ{O8e;m`GW$)5O^-FyDuPi zsRf!^hOrQl`Qw|gcg#~Yqj?Y*%YG@Z!+qeOL*pq?h;j3lo>?M#N`o)~nVQKb>(Zas zqV>b3Hnlmw981EyZg4;4QAZK?o*rz+3~Yzn=JS0|qjlTawS!#*#-WK zwGz^9U|PAQq@M|gj8fCvv&9jB4*3fA&kI089}0=J7O-#}#twT+6Wx)Fy~be&j!H7B7v$>*L)W5-F| zXu*Wv7b-Z{K@|m##}r;<4(Ys8<;SSpy~u3NUyltLt}a*3(X9cdJdm%*0XbaJjUsZ@dZCT@)gP%vf`#MkW6eg;QkP@q}u?&yF@K zw+6h!o@WShIyoN@;oO=Sk81st$hHfeIK;MKiD#9VwX8gYv%Dw9vaxhX6>$^&5l*TM z8q!EZtO$}9kVY+Hwi}l^rAN|8l189C@f`*BrP)Cf1XPAfqHpFL@$LINnokaz=;z-W ziV-WVV0_*}Nd~1X;~7_=`kCcX3_6IAC+`{4FoIC=GVD*@8t&o)F8KHaFH}oumr>U& za-O*QDXi7=_CZx=5cq!?j3&-MKHZO{Z)Ikna-zR#D!{@9CrVFWZSd~lNFPCWPwK!@;@^Z=B=xWx|3 zgZ5R25@d!$)N{@26qyvfv!i<-mXpOAPI7^!fDe9J#~vcH0H*m&aPs!LH#(?ETxaSO z=(7Tm2h9R#piyuThaP~)5xkaWun^5arI8WiSDcwOBbZb<;R~}Zz4-)r9_nhRON=Y?0Z*i z2rzM9MnQ^ZbPd#G$2Hw?fG{Qa(S-QXu!%3E<4nB>fFyb)cEbQf3W^gNE9H^a3J5)Spbya4MI!Io5`JT~ z8YCXV+_S`@eJ3t+AuhJCVeb{fCH+i^01^_Y7RdJa!2hPX*z0Vv0(hFPEudK`c+;w* zn5CrcdU4zSTkQnk2nuLOjv<$IKaoJh$SY666Z>(e%vk_BlK^1olh@>Xb92$rxsVhs zW)bKQ%0%zAguOiSlhdpho_Xc$T~K%aGzmM@?AU;7=1a}Y(nSEgVw*{s9>m((XHT22 zW_@TcCqu|P6tiWbatI`ggOS=i|Haogo+^@35V&EAx(paT<{k}K#lWdmsu3+&w;Odk zmww$wT!PqT2Z8ThO}_K|4#l0F(&$okAy&yJ^JL}25>1+;x-f_OsdHAIWxiv(w7Va> z#2qC0z9Ma#jqWE3Nh&eKZb38L+uO5`z!;tIq~QVP$w}YIZD>EurcWcMaEk1PKU0Md zJdi3_f9QOq;7x74ea$M)=t1Z8%D@F~#Q&cFf}p{ofxu9q!mrJR+HOM-t@47#v#wkC z;GX3UR>(mG;J_$hI-+$nZ0I2&ZItLZk~x|{$BH<@k^$jjhpDeu#KTcg=L(7TH}2WL z-0@`_(4apBr2zh+E2RL&+`QRR{E?cZdxJj;G6E$oGs|5cKIvInGHf3|&1Ga4Rxoj1 zeK5(USyHBLrX`khsZMn~haEMrR2Dzz12q&?#Bn0y&Z-wb$3kS7w`9auoXCD=9@J+J zUt+40m_{|79bM(XkNYKC@;4P)Ce5Si=NBq;#KZ9h;yMI5S^~i|e$P)V0BP@xkFPjN zoxpqbdOt$i9;z-}e0Z~umV#9c!LT8*jUvl@*JF`l8q$JsZ+3KpEfI1g9T*h2b`#LF zcXY~fnTEy9>X~n(d6??Fk3xX?8+1?N+?mkuW+r!)B$XmwF+kXkLH?B)Y zj^Qs{#?svD%raWO&_*rvgY#C;PPS+t&!#G!$Wd1_*uPJ5Gf4_+UWim0N+A)thWdhc z6l^A5WS}BnTQl#z7Fv#FlkS|@48G!3E>uP#u2h}5O1^a{C!9^CoLVL-exdpvC+@9A zPZ|n`v4;}Dase4oNu#PxP5J_Ke}vlku~W_fMId+UzLw3?sAFl*34-|O znxtp98VjGbYM>eCHO+K1B=80rw^HQj2q~iRG(U}k2aMucbA==|C(l)beh+|S`k1q; z_d^j&vKWB1&^Ngy?)_d{lANdewxaKBU9;jShR8l)q$6Ul(F+joyL%HHs~eQ^k#Z!0 z2`M>ONmzF_s+ZwlGAd*$e(l|2x785jzjOJraK?`Un29pj9}bJNW#@q-4`_C%-P340 z=PU(Nlw!$sKCn$BU(pBd0#N&#zJ?xxW^2~@B&rN12^o#~IlK)m=z8KK!K!Hc;!uve zuD-Kl)780zs*rjHUah^dda6Nu`aq7T>Zm8rs~I&Y6y`tu_IcENXS}Hp)ZAZB;2IwO z7u%)g)|aE|=^^{s3Z~SP4bxciOXu=M1j4yLKdTHYF@B;Dfb7$2lXH;&`@Uo_d9^1j zqX=TUn@rT+LzzMV1gXAt88;1BPH?fJ@y=qp{we7BxCIrtc+Z_wc`py=JpQm$NKjNT z0I-R|_}cs?$+v>`sy_f4$a5>u7_5kGc6r_93!-r$i$~(%Ah`h4de+kKq(GkdHr8UICLbyit9!#fV?bo1L}z zI%fMGiZ0yI{u3^+9r|ob0LtPr(~woU6eQWGN<8AX!wZy9^d7Tk9z_`omfbc?yO0)= zih~o6?UEI)vhLUn)?rj<1)}T;hL2e)AngzVr!q@{Z?P#S0-N3E9e zlwzY^fhSpm6E{IV5t#+{bM{xTw=*C%KCzeeE2K}Gu%U%R=+vg2)n+sO@9pa(Iu2hl za4v)Da5@9!BaU+%NW!{>1c`bGAhK0CX4yuwqcvI*iuHC5+4U1`oFCmamTnlA31{Gm4vmAJ0FqV2ENTD*^zLH$=wL#u0#QX zX?m+QtHgA9=l-J#F7x=^CN`56egB<_gREFj8!s|UW~vhwC!?vRAQOm$j9m=L+##ep z0%=!~)RoB&-kx$YG=r8^Pf1wqajv%kEN~7K%#lVw5zQ#^Sw>2{R(?oA+VFzO!)^Qg zCovsYD*#5}cUiQxN059Z0{}Ij)WMXq8Ib#RJUoE1_B15k!Kd!9F#66Q(0j!>n-$@m?CrfBX9!+p) z`jHjt=RWE&6n-hmsw1OXldl7mUAznZ1n+IR;#LV^tc$k&Yw}&0*;t9HO!r$(6m@&g z>4=M!M;O@h0lpsKFwxm0^T61{ywo-0lU9j>lCZU!b@FN&^JWLiGv^e_r~6KCA;BxT zJH5o!C#9L0QOOzRcD#` z1rd#$Q`Ve|t8(e6dS#&yF$lnxc8?17BP0^3n1#Kte@+G?xbpzdY zUMAxA%#km*#uf*|vSCTXnT+qA`qkz)m(JHwOigku)W^CLb*#gZPj4EJI;J4Te1mY1 zg>3}fOQfH0$&tyY>v>9nHH$<1{i&!}2`fG0(20lJ*MeB3w052tq{meX3Pc`sUp@uV z$RVU^`vO`|FxQ{Gh88T}!%iIi>|PokTfQ@e7B4wCxxBsN1#OqC6NZJ#oRiJd{<|HmEp<@sKJ7q>KI3iWb#p#n!JFPl0Z0E$|0)Ywi2jfwQ=^n((X(H*Iq345 z-t>^4wn(k~b2(OP?=}6Mm(iuFau4W~Q*5Dm1WDv0Ri+*| zlOP%*@5JMF5Lb6!=Kt)pYI1Y4q(~W30*hG5{P-0 z=A3mee?Y+_`D-rpZPY@)2^#z6rrmmo3&01@S1DBAyXNiEpfmd%<#dQBoJz+4ti}c| znZo>y4`U0Uzr!H>*Z>nD>YL@8ee?MjnZ?}WGnoSm^iTc1@Oa7KAGuy}J|T(fGt>f(%`ow7u&=gQXzY5D(!DG3g?$ZQ!pL`;eKBs3N6uR; z()31KMO%519hf=o(Nl3GL^sNk5b0kVNZzw&&AWV9WxWZmulSAy(&ll03#zCB$43?j zr<=M|)gEN(wbJ3fb6%lk62ZZ8fH{w5u@q{rWdkANwE^a={FgFbAob$D+tMYL3c7Qv z`zBK1E4wC`OQi->Jui2^We%~0&gf>V9=RD( zcgo`{9*rG>+C6XMGi`jIEo?#8&E?gx5Pn~H`!mlH1@s~I3H>C!-DA&mFAt?Q6*^l(%3Y`Qq^!_7+^E#gv@y1(5#B-dW=~aW}Pmg=q)3!PGeou)*O#{(Ga0;X^*9` z*jA*tjBq1%YGcJlm-0f87RO?cnO74|U#(uXiaD&zcG@KIc)FMmrd`#v_0YV8Z;Lbx z9{##;jAp$*g1!nV&nhcAczViE&u_oeyAKPL#X6}H4>4LksRD?9l)BA|%P6htF0f%W zxkxAmUp5CBK&0{bBPdhdJGJ7i7V8IHCp7@zZ~<67t5ZX8P<_?RMNxn9hlU6~bENiG zQ?FaKmq$#}OF6X5hcb7j^>V81ny^txRj9TcAE<`SmR1yMF;QwfJ9>&{7yCqXgq$v+ z_Xxln{Rz-0jA;nY$a$JZatj*`ECB$2l(8aPTlJ|z@_h%esIXXPqP#) zVHJ^H?N3zGY5wdCxm_y$O73$K?_tu*<%Xyo3Jn-gro1%pMya932vqP+8?8MwXLy`| zzcZQyO7%~@$cE_reEBqz%|#k42fZA)cikG73C(5g{wZ46im6EJtN13J&pCG{rU&F% z{VwEjOp-gYr#CkURqdbeuxOq>P(s@hp3oH`?y68dO+>Hg z!)4)e4Ni2gX6PHf+o68!H_t%_!r(353?dadO6ZjCkr7>lAL9PB#+OP|U08gjlKSaB*+B10 zr$pXPhDcxkD`CgVuz{JC`);-$j)d2lgtR*$0pL*0m8?$A9g2N8@vFW`ENDN)D)AsM z%+2z>suBdoCr@v(Z$h_t@`c=*7us{^-u89(YUcyvp2~|`C7T}t#g;oSt z%?Bp!36wsP0L?yYBBwQZo)Nx6zi0d2^7pb513vcO2=$|8}GkSSMx?9ce zqgsT$Rl8G2yxi?aos`A&mnjLQz_$lUQ)JC(MY&a@!MY00H&Qo-kk+k{&BTS(hw5g~ z|Jw}Z(do;)#1jx#jx!0*kV@uRBT6&z(K2F@Qwk=Ltu?)ufKBnax^rBn`mLcU0m5*- zzs%43Hai6K5`yzZOjPuC_sap%7@rjNb1s0gmne9NcP$-n(vd6($ndz-BLtsF2|<*c zvK{Ii@%72jDd6!&sUMe&(|3F5DGR2Pqv8j-$I#+Zk&fVIBFUhFWFw?gcE4*&lP6g! zyO%gk=FhRyY%hQ|Z-G!AO?%Nh^+@Y5wAC%@tgZ)H(8KsPw)7m^Jr6o1xv1MsQY`fk zSvI5p{J|fYNddtB2=Wl#)bfX5{)2nw6NC2M_sfG@-kM-BCl^|E?SUzWTgvR6JCu7A z8!}AtjyMUUbeb+8Wh|6Vx=bYG&~M*C7l`r!+tydX09$Gz6Gxwn!<3P!I;023e@7P_ zWlc&)_Cmvc5i-LRgc|DpnkMyQ|(#&8O=YFiBPn~j}7;;wgC=G`ZHDbXN~zGo55qcp(&%fqKh1dicr$@1ec#G8M?r~m6Sy^bCDUl) z9U%zC%KX`6^gxyJks(UxE>aAx1{_bgq?Qa5Vqx)T9x;ARwPx!l)Vuwh&82jf9=5aVVDRXyMBu7cu#R9^kg?UYot}1sZL|i zw8{T$39XJ8ZNPyO&8u=lHFsd2C0uZKPe8eUH7uq>8=dEumv8#diJ7VU8|`j`>Tv2~ z28(vVHVZ_$EzO{^WfnWXbZ^eKx1<_M4Z>}2DxEg#tZ&P!xiGgAQPvFHPUn$6I66kb ztY%eWFEVvF1lg%pG`dQya^QsyZgpXt7pbQ-C90^yIt=JflB-s&u*zn$^yu2t4J6m5 z4)=hX?}rHgIeRqq@aK(b!HrFQDcS@#_N*N8_ag{-Ipp6JRl1h^eb7p!qy>_rT#H9H z_L?mNxF}m9%uGp%YrhNax`laM;LTQn%h9M8{dw z>u%G&3I#s|&Z~tkux(=+$!g()@I~Py;;KxH1Rm+sLYQ};_m_!T-u+!`hLMa=2LxoQ zFl3>~B-5z}rGDK{Z{_h;5vexqjUAFoUkDE{=J&eIcc6b=;BEiuOP?b>VTzBPz(?S> zB755$LK38h5nh`}4vcnQfu<`!NR zu+_mUiH6uB+XC&ILl1VzJ%z4Weev4&Ft?Kg?I!1op0(_MhduGlK0}Nk&3i>_XCg|g z=*74{rd~B7^dml+L3l(uG4oEyx1;M;aU6^lJ3#eaZo@{9Qtz*0$L(Yj+h|mQJLH_k zl5uXc&2+@gT37oWRFDxY3nkc0yJU@o2!f zno}p)Gw5J)A?BV(BB#-_+l_@szb3NmL;v{$^S*UxMA8JQdYeq%v){;D*SGjbIG07Y zw~F3*KHpn(JMqTY^)9ZVp?c71;D95zW~J|EY90OqeZKdpYAJLiDGhBK@K%5lyXUQ7 z)%pCh{`7{YaP)EdH};@~_Ha>OYLudW^$cQB#4xA zx!oF`Xbz@gh6S&}&;J^M^WM1N9lb!(H z>Q5JT3;e{yN6oYqR zw+UaRS!$Bn#8DaD)Nz|qtOz2EKiss!*GKEGc?4XWvzF+Ap9m518(GLW{=cr(KNd0j zXS`Mw0SKP8Y7?A>@Q^>fCo@84@TYh8rC*L<{->M1eQ^EnU%<#O?nJfS{=dGhTn`?! z>rZy|kG1+AgZuxm!L7+tP*9M`(T3#YU)+xqy>xj-7ttal6LxQvyNCB2qJB!n!{KsD zG5%zj+$#>+yRJ9+?rynyx05kzZe~}uf>H*jA-%L>+U6&%zE77Go^CtOx0xzZd9UJO zFM&=LeNNh%Glv@Fc?~b0khRE!nL&NmhT_E$|MyoXWk;fZNXg^GE`4}eeeKKZNbCou zU%YR?)5b;z(L{>FcI<-ikSgXprdp--yw}|1W095Up=dWnwtYu%fy*J-EB|?68$z4s z<+IiQ_6#mdQRufES)2thhK(_!FY!d6E59|C zVvEqKLW_B$=a(@?*CaQzhwf^Yzb9Mm6-LOyJl84Y9C(1Gt$s}hYk*1PZM}k?oBSfO zP&yRtayq7s7x%{tW9woTDO-;(qx2Kn&un}TXxxZV9<*NY#|SXg77BckjaJ0SCQjFU znw#8qr-3IJ)qpP*&2}sO@HJO;ut$uLp&^LNI2ye{LidiRzn?6zjp;AKH))2M_j(rY zX*eNVE5q1ZxfQ43NJ8(NrfhuuSSadG7Y;3~%K@}dA6SzTIzN8s=kw9yp97bf%fBwr zRM+9T$zN-#xj}Sl%wRV|W_d{Wi-nPrDrPJ2(1~GNrz22fTOIXc-DG~K6Kz*gUY@g9 zs4o1|g{OpdVz!%9J4w+IuNHo<`@BZ^7K49Zpe5@)D~rF@lxO3iGGXOC0zUkvL8b7j zr*_L~du}r3Tr0KUE&WCt6SI=+Ug)N){VJC**z>0gXVJ)7H;PkkA#19)y8N~W7sMR? zXhHv2puZcPdFc8>d3C_bW0k+%|HVw`sjzOR|7uQux3eTUWUVcuJRgu#xNph+wg*|n z9R6rQ|5%{E8(psI`a_jl!OEK?+MW7krV4S0ojU%TIsL=VbRfW`rWJ?Hi9%dFGF=9?*z|=#=`@!#9iNK z{$`0*UI_g)Q1Ns>6SL6z3YmH~U{>uhJ=V<2A1R=r(eA*#!Qj~p=Q07P!SxKslw1_z zz`a&#VOi(*bd;D&2jb6Jg!uC!5P$yjLhIf2R|Z|ba3M0c{E*AFq&#D`s?no8gY*=GV*`R)4dHZ zrxSTckC5sjq_@XrxCC`qq7VZC^4>-061kvOle2ThoY|m9skYGOwV+ImRBdOsus+5{ z*R1P2B3S7Jf|b$ywSjYWqR?Z;W)K&uZ zgo9QGqgAwCZ!q0jZ9BefqWkeL@>i2>Zr|uk9)s+_F=J@52<(u*R`w*D^hwk7gA41Y zqT2)4NByu*B8}Y+f$m`>TafJ0a==WOpiv`0HI(O~sImNZPMrC=M5O=_`(|8NB6MMU zbEhga<|ZGyEDeXDmfpG^3w3jix82Z5Z7hb3T)!jW*D1*HG`k!sg-pJ7Q;)@sc47NLTG9a4P`o!JpWXug*#71+vSmU3fv1wGH~)P$pVhbuAmr_4Ro;pbxuxR#*VbR2WoHlAdh7=|KFcqiX1M*> zdhh%akGgv7-8J&(P0k(y?sku z?78UATBtR#=<2b@wami&oo#M<4<<7vP#{9YZ*{ul(A07}u2Lj+`HD>8+o{|(J%b7( z=iYq=C?gwL8>XjrDu^!SX3(Xi`sDox(WSfqf|UXa8h2^`^TLsUb$^k^2lt|Rr1$qp z9K~e^Qyaj4@uF$*e#-P*#_<`V2V1ljF{WUeS+mkho?^yh7|lmoA=H|V`ibO8e1dM2 zz0hD_q4Dx$aXN|EsEVo6x`LTA(k=gSPcvXXa}snZ$C6I3I3+}q%H}|@QVRqtd0nP9 z9)Fbf__8)HUHEU;?{KOgsq}CK9_OTS4;wmb!q6VQ|B+k&K=zN9(`;#0$&(FoWrr~Y z=TtBhl9j;#=10u5KQiTp$PDO_d7MK~p)CPVGu=$~&A7Q=%=+L~ZoPz*!4k4<6&f z6;|a51C>Y6Eb!T;!((yCL)gk{8Op*Hh(JgIK{7woEjnsKU_9i%{`*Go`=jQGqQ8HC z+h$-JV1d)Z*^Ru4J9xWw(y3eQf3f%G;Z(M3|M-)XMhz-OqGTwc!7MbFxiXb$6$u$u z=4r9gBnf4#gh~h@!ZNPaV2Y4&nOB)-mU*#;-+3?P>1jWE@9*z@-#?#YAICoA-rIfO z*L_~sIepI0CtxJlR%`DiRlm+zaFee@zkO30#%MD= zDB@MlK6)?P`-}r>(e3wQhG(;A9h#cCUERD9N|y`qZ3A!n@GTu8SPzpohH3eZWiVGz z1hq5M7=Nj~!^FZ<|IL|LbcA9e9w$mNBWD6~I?jy#F(~xo(3tdo4=t}P2;dnJnvP9s zyXu20Imer`x8ccKTT6nm1ideXyE6&vGIvi4+r`i83rF_#LNE0JK9XRptfS~S&$XIY zE?q6JBz#g%;*YC8oecQhF=T9EF7{b4KJk5qi^so&_BYQ_4-{r1g0#=5ElvyPzs}3r z_6_c6F4lk^fTnq3%qxpx2QQdMI(2xn?YPI|-1s5`70lKBpYHV+C)BVI;;({Fe&>$J z;ul}O3W-mE4ko%L)SsJBS?)te`6r+o(=t%0+J>w zsS<;U2<{N0S`Gert3&)eAZj95WRkT2{;b4`{)m4f-;T3Yi#;zm%VFLa<@=_p{ zwG{>h(M|Dt){fh#1bs?9j{lS=(QQPL68QX`xRdnV=;?UM@6S{tw-6DZg?B!#p8sd! zw%!{MF1tsvtfh&1>t14|HCKmm4zZV zMbn1NRg)B_-&mPMY|9IsR!+A2w(g*z-AHtC$7yU`dIq3$!~xg35v0@Y$`zR^mD6Z^ zUL!pdJB*qKl}ZA^UEp6-Du=;_(Igd^Xk9o*y&tfKiKBJ^{?bn<8k25T-$DSTkU2(P zq8X@E22%T`XKPNDgoh!z(a4~UPP5R0N+sNesZu%kQ>F4z()hdxfiO-w%|FVa!2Ih+ z${=d_i%Mn8mhA2@gAM>=2^TOPA0ljR8O|`bj1J*enkV`c21ScsyVwEfFm=otLS7jlL7k1?_d zAoZzVDPz{`On#y1nP=v~zLv!3m9_gc^v)nm4y1}_yT!3hrMxrZpq1225S7Mutc8y? zNF)_XTPnLTzgw6!$W#uIu8xul3A7OAxnr~uB6k6c+MhRD;iz{Bz*_eZxMue&?3faL zuh-~Tf^}2W@lWU|hK`}@czc?C(Rf&QJMahD0Tw*Vx+?Q1K6SSc$&hf1e+X4&J`c8X z?tLn}<ILE?x#i@j_VOgnC+!=LLCk#aQXQ(jcnNtLK7 zLAY7wHBVsFN+ z*?w4q(bC}4Es9oqdzp?)y>EcGKU!Xf91$=YBCpR>G210c*|J5}xQTEa1j}|@3#yfe zd+Yza_g{Xgj#1=Ha$rXBG%@h`B*M}+(LYj#uSdK!L6gs};i7q_VH37xqJS)fwX*|L z;o57E}!(=YIYkk!lARpi_D1Ft%`8+Kk6l_>Kve zi^k2!WK)*_oh03?ee*D7;|n2argc^FQT#2A;L%Hg&ye>9p~@fceGB<<=us_lcgw{` z`d}$JOgZf8v>@83g!r1?$>g7r_}8=E-n=9kXj+9O$&Y`E_TwQ<`-rwUSY>x*C_ONb zGHgvB0xikH77~3r4734DU9Ovq^9+GfM0A@de833Z_OhKdwj3KUr6# zxuvz{@swlkdW!Sj$J4ZNb@^zGq+oRXO-qtEDPy8PL+;lLgE$PZ%y7dly%29bvf#i! z!=a%mwsF~_Q+3?CALeFe>r=hz?H^wZ7r!?pWZRk!vWKY*+7mlcBne~>n+HPCHW|um z_K|j*yiQ(0X*d;8_7gb>=&!EF@)C3Fd=-JCz92~YPh|re z7HApWM=|Dzq9u$zlBZHb%cU6P^U2KeG8a(-geoJ#`o2w-y1YGI3qqBazU;<u2ab$6HiOn4M`YOG!5VcOU65U#|~`LCAg(@iVLcbVq3K z?Zr>Vif3+T+~7AaGG^Oeo30Rlce6-XiCf9Yc3|BsO&HP#&B)|@1ztnELg4cTmM#Mk zbQN1Tx{CjG<{y-XvX%)DG?`d&P4qcw+!IkbuT_^$uCM#X{9eyF!D}w&smNKRzYNjI zLG-NjE6EL>?VMQ5$7Qhjugz9rJ%q)S<~?OY4Ly>_lOwa^IZAQy)J#n=QnRmzoHxd! z3^m{N>xFhribm97*(k5M{$_Rcg+SJQCmge!13e45IXuK`olA(yDga<{&rEAVL_6y= zNzKSx=JnE!W9E64s?)n5-8ck@^OWCw%h$TY(LPVRCIfmM9)TF}Ny%WFwQkNsT zs%U^2$sYNah+5+8B_J|OgV-iKWhlgtz#WW2wZ}W&IKh>gGbwKReI1^d{5H8IjKz=6l=`a#DUE?@T#MXs_B#*h9@^0EYWaC+Y;$WWOlhy7V)sQ%3oyf! zoDnOC7N!Q#DS6T#e}sEx-d^<4m=K3^+{jjO)Pys9=1s+kpEcdDC%ut4v5?U3)!Kye zl~%(Va3&=>Csr{kE|X_X*JW&wY6YG{;7!Yl)$oK&R4l zT^q-{Dy77XLZD{Z6#yJZHHtY>Wgrm_kyb?9jox=CEF#}ni;?p;3?-o`v`~g3ms@n- zLT&myxQA$aIcCJSa3Gxd1`=}@JZ1EyN+BB0e4I6GP?o#9$+=#LVNSLFelsjn{qg>t zh-}g+`eTM3rzi@dyws7qRJsiWl+$Rh`RV%m$UD+Kous>F(DS&ad1=Dvna#{tqkxMDkV6gLcN?->S}%D_=d{NsrxEfrO{xxN^tnx17e zQ>uPmnq-6ou*ge3y+J}11l_6A|ehQE3|m4yDR`O!$PW@nlXMn zkcLWY!DG)i3xyVQVkZm@r3Rn7PCpS4RppvJz6PPK&ZktMZU2vw9X{_-NG;P~GlZsC zM6MhxK|Pz9dYoS597{WIkw217xX;Fz>Zv*3KHol5>@VGZp*en!|NlXfwcp!?FyBUw z9L2;t>S&SPb`F3{<>j_1)%mG(a@44otnHjnV0hSmC_v7BSm(7KjzkAbf}ms>PT7*( z$PeT5RHC1UBDJUDh1H|cv%!J|{J$sJ$#kUHe}6m;jlIFVNkF zrd*#B8@8plVkpibim{)nC*8HO?gvOA=QvryZoIimxd1mDP;kaFb@JC3@Tj`V8qkiTM| z2LMZuBD=!m*IZQ zD9sdrM(n(6_gE({L&llpIkR>4q1CHY=`W3<>`MJLjTSXm`JIxJX(I+H0THQ`x_e6P z*ylG8bx_5LCslR2s_aY4rF}w9p<*zn(czyW$18TmRuhFu)h*rE<1r3rEGH$)Tqd1m zTlbK*pFhvf+*B4QI1=Y|-51rjB#wLo9)bq|6~rfpTEDIC0`_6Cd)xAcT67tSaMt?;g58@#GEC-Y>c% zj;xRbjmp1XAbw?<+VgU-ySz2FazD;W&_G*_nT+A}2cAg6@G)AFJ-p$;$`Iap+nYDJ`-ebY%ax&~cb(47mY1xqmx-)K|tIi9=tARkpg!9xSIEJ9# zA&Pp$O=B2iEqm^)xGIujzgP@_Pm8q5@one>{Dn zS~B8Pe!KO%iVhg#^Tpw(lKH2HOW0vm24GdL!>W8rcr_D6TP$nWa;xHh*1LcB;pfuu zJRY64e<}&GvM#5`(SSs5OE}QUzD{fcb4;TdOy4KDQP%xookG?qqN)_% z?GB?-zt7@MuI@afM>v4mj@s&@o53TLu`NWug_@T0D8ytW;I zvbR2d+~zd&BzR&+X40^E==O?j-7V_FBUJ*Vkq8%uQxs>d79h<>f^0eP@%0zE76tYA zOudmA)c~1}d!S2U7nHPBCPNNz;hiwF8|c(NZ{_YK35%zMC zL_t#JTCE*Nf4}goefjDDi9s6A1 zRcLgcOSHwi9A`>X^3HwrF{yre@_2JDrTF$rj*d^f^y|-npCt{}&Dmk#6fDcB67KO( zv^0Fa!E;&IK|6IbZAx6w2?fxm+(Fbn6^h=g{Q4Htzl6Z6h|g53ECyfkEWjq{S1wLC zjD)STlqzhy&K(eVDFHn94v6YCpg9whHTqw0 z9Iv%6eOsE6is=?%Dqun$qu1ZaaC)mQ)wDZ6Zs4`da7L{ONXbxAD~ySJa7dly;5v_A zdFR~mJ>$tsBwy_8jyStj62vM0dZGT|f*I_BwcC7@>+X+A%#r657U&~rc09F8@db|M zLWJky^GzX*XIn?zsysUG@dw-kvS#51==!J>OO`gTE_E5Mu+Ke?myOMw&MfXpa_cc>=?c@0{6Gdn zY6RXLlW%{jQ~u{B^^flLf;uG~?o5i#G5E7Vxt0YQEQV{g)7MewfrdV?yF46f!F{Cb zpVpcJoI0&&i$Ni1_gs72^so&&)zTpz(mogubCUm)?v}OP?G-yo?M*JGS1HcNb6gxs zaW2zsk1yie&Ppxla0)|Iz_}%5vAFkdemLR`$HF^5wGWH-`)_`Dz8+TYLiVCc{bIcF z+Ye1p0;v)^xNOlWFP1d_`h)i|@Ew=GFL?65Uix`LG40}t5VU9VN;j$fwEZ8Ql7IQX z{MJ_Z%OCmWfBY~KQvRV`S_=h$-yZ(gcmHPre_f3~7wVq{{Kod?I#}p(Vq5Ir8pejsY;BLd&ff_kMS5p z%?ynv8)V};%YVCoc4k&bl#uF3cn-7q!93x$`Nm-AxEL4-I{~m42q^IVI{*ca^vFzs zQ(yFSw4Bp5wP-~JK$mWSx!co2Kx~whYS8}tg*kxIe-4ZMIt%d^sQ-@v96>?m2maTo z4g0?ZrH^wrTk!lHD81Myw{9mNplNHjY2p7CX8Y@f`p3+(`qCt%)ThzC3#edt5O2lx z7l3&lQTk%mQJ{#hWvNn1dP*vgoE{8-;(`otXqp1cqm#O6t0Zez-ttpR#k*VHpCw)h z!VuqEe2c|!Zyn%L4I1Uzx;8%qz4W+|sq(nd(oX`+t$s!gQfU)!;YHX#-&$|)rF3yD zrQ8m9rryxc(c9i!yR}y89;lKP!&lz66ub4k-W1Dta=S$AhVF>-T=jqtA5hogI)~pA zd(Knz_Y3}Kn0`4C+GVKB*W|@oLON$QP!sAhlIH;_GU+G^e2))6R#-$sVXE%&{>#TS ztt@N0e#f6X^yzoW*S%66E&Z*{qHcwtl)VKJK$(4R!>OOKz*}7w@%*u42xF5S(6JT2 z4}7cEZ3PUQ2Rx>S8UZS*wP#PcTvvpdwGF2zDJvm;NSL(G)GE({lBFJMcKUb3`X+Vc z4}0!|=0V^moa18c{Ma|Zi3Nhv(BUnj`r7$VzSaXsyrb2j${&Haf*cY@;=aK12;@8_ z`8+-*w7gUCKcdtVf|w@%22jRW^|i!BKkm6_?2b5QCk*Wuy7tG0%Na9nL$P&^L)v%c zpv`( z2F)4HRk@spKI1qC+9I^@KT+zrvJ0i(UNA^`X$h8}j^Ri#3!<@sXxGK)f5;W*9cNSq zmCN4nn&>(Z2WylT;_MW#M`J;d$D#BSM)U4W8$D2@7)+0%iwn z^bXjz%B8i8(v*j$qMkspKFf93c zvo3JToXRjd@{y-I;=0V{5JA$a^&C5n;aB=-9km=!{^P*+on60Oh>>7g<<)&Pk@#YddZ1g|gGoH-E&h`$9`^Adm zzc#E#5X7x8eSkFfvgT%v3sI!x%_wCq>hN(SC2I&-*q>b#Ci%O0#na8v)GncxftEVxYh6oEeWUvSet$>g{t^CVi}&BV=>qVVAe>8N z+9(!Vcn;TMStZkvD(2@zBwa7WJGo?!Rt|Q~i70UaVn;ag8kFD>b8G}LCY5=~%yW2k z_32Qj&wBXORj!JJy^pF0!z4^PmwN0tkH5a+Oz9#@y=P(C?O_&JLB_;6p2cl@k_9R4 zBaDXp0a=|b(|$?{{XyYaYx?aC^Ug`1Ue4S4rBa7|5nyP|9;COinp5n7zf|+<3={`< z_s?;rg_{4hG(o^ms^oKSK_e{a4 zC%FRc_0G{LLZeMeaW*X{t$lev-G(BMpPC^@%5sLMm_|A^_yVmnAHr_~z3git|7i&7 zFHT4ww2|RR`aRd=65bE!$VT&MPZd2WTdJHm?;KV#4u83%#;d^A5Ij1yvq-3rUn#dB zBLs-t_aWqk|3}CLK5yV?kpPyeyEU5&dTG4=3NuQ%8Jc_j#ShOrGQk?(bK(ucBw$?X zLiY8`5HS^)MaM~$;`S)B6WA&2qctHe-7O)zeNa9a9~aBAyx-9j%7Y6P-Q!^9>mFO}h z)09exs*0+zRE#{8QZ|}OrI%rO{qr>^7Z!34L@k=KJb#YUJ<$J#^pHU%`uQZ?w32FK zjjN4{^*nD&9YXPLRQ1BYHuFUgIjuk?1`sXVhv?gwVUpp@TL68{O-U2mxG(=e_@p0E zV$7T?%X!TlTgruo2*IP_>S8CR@8x@tLZIIlNwJXa46$q}@==@`1|*vVh7sLcxb|I@ z?Zp9LmF)~CP|f(Utpp|#E31|2D~%xEQCJSrD}nAN1WRO)esm)`9UD=AAa>2+OCO>up|NQr`VEPBfY|?DOaYi}FbWhK1&J32cb&7;v52Vj6} zL-br-{BJ<#eDaI{~>;b;W84uHyE zfBAG4Bc|KSY{ktP!Fm(0Y!{S%oCwdoSxUA$s2KtYqo`gMV}p5N`{B znIpOCGkoUH#wFL@u{i01JuvsvXzwKCHD26^N!kW0ZZ+GntfVkzU3**l5U@DOP6L0` z|2aP18X~7OSGz&N5^QrUV`D_V?KM+f?6A4wCZ&tlRZSae`;T?CBwT8qDVH9`$R%sw zBUiC|kw*pW^X0_mfx0v92FS2%Z8F1Td1jv-{)@vfie!Va! zN&@sx38GXP5%0Z_exyPxdPb!^e=!5fT&ea{VviS~esJ3vG1wW}1@tX{*sKkZDKUo( zsxtt6rvLI5k)QNohq{LVDNaDnRRN|C7BAGIDaKCqI%My4m`nR_E+FroFM%z~z5b=Z zVxecvmd0yI$sIt8%}ed!O4T4tPB+dvMWm87+QYr-7~iN}@{KW;FzKCff-UtlnC#PO zfV!zVj=BGOA)Yio6+>t19c50hNFbmmhxCkv>3t+&WeB~6sgz<+1lMjO-dd|uR(R5cYTQj0=D#j{C-Q*HQaKmw%lchMR=H1J~&>Iib>E8bJ0$}f9+yd7L z<4mtBYTq+g?71GimWKeE2{=KVjdwFEs5hi$13RKY0E4cj#4DdaHG{0*_5ObBCQU{M z$w4G9%f0*GeWYK%-b>PW0p#!|fJ06FPj^J{9VP!WQntik#tlU!ujI^t`IzrYjNupA)S;PfZTsQOGMsZqlm;Iu zYMiK;gRv4C>1Q1u-h&+uF8}3%n^Lztq(ZKsD{BRlY|O*0?+ygm#;+rS7m_SU`E{Tz(Bs9-oM+^2j?|Bg~)&#u#bV&EY@5 zF?y5OoS?{%_9^NNRQPKDmBgTt0wKQ4H1nujI}W4Mw1~kSSupTOp!ei@z#ttn$kf`> z*UL~;k>zB@cSmO6M02TXR$DyLjF`!xTBMw0lv~e}uy-SG2YCzUHFNTrGmi?6nqL$d zuWL2}BlCTkk?c;D`L1SRCh~q7jX*77L=Q8K(U|C$u-EazcF*Y1=PkNCUoIed&LHDc zW&I1$ACitf=lAB&khwhd-J*#PiOfx!v(S46gRZpbL{p>5@9wPDZ;-nD@isEd$3-!v zql_{z-Bt{IF8BC64uVSvorhcWM38r!pQ&$qNP;{Rh6<>7ejO&rMh7Chc&#L;wEvGM0(mF{7M-p6@MjsZ?s9Jx#k|xeqb^W5ceYc#d_MKLsB7_ z#kiy~?>CLU#^QyYQrU&X&rwwg3XydS@nkbH5{D0sT3p9%tA+>+I+8Zml&w>k#unMm z)0HiwwYgB#3J1HUH@7;P%)>JFS$WEqWYaH*nYYb)@rP|2>E``8& zDbsPEl+og4*NDQ%!I1sPJ9j^O@Woab;iV zW&u)}aPamsc;({r7tf6+KREc=yX&_LFNGIsd5^%!1kAQB;@2JqzYoMxJCUg{({IO9 z9^u>fo8n}suT)KAlq3A>GP9=h&^dCp_#Q2Ox+}w*1=KxX&3-Xg)G`p2g&D!^=ea!n z$3fnK-KFe5A=Z`s7lMc1hrz%8;VqJye=q>FfwFYdKb#8g4Aji#yE>GPHlQU9Oh)u# zHxN=XaGSQV;YvY-gAKjtq`iaXWHdN&Ns zva{~O&EGB<9$)b5VsM^}9?b4vtfaY%k5vbHNg#5rS*x4es%~|*VW%H8x{mkQkPbZ! zKnQ%8Wrj*8&Z5;-3T+MR0r@;UaGa5#6`xg4;FbG~YuZx|AYG`Vr6xS0XVN0PPDN@9 zlQbr42zvN_^E%}_>-OBsKkjqoIw@%Y_ls0VkZc zbv8N9hD6jT@m|V6d@i+oZFmXx@N}oM?ZvjCU7!QJUhz z3zyEcZklPAl8&0II!(0ZBD+Ji+YixEZ=Y+x1vKJ#ukg{qC)@}b%B_~t6+{r)95AgBP9#Z3)AD?Ooi|=Yg z9}+O?=7sk+i^wH9LRB@5JQ&C9mpBn4wwjh8{Sq3pUotzT#n@IJM$d{Nv)Q)82J5`b zZF{dmnv~y}HfQz+l)x=1WjG!vfjfufl1k{qcsFVD5VUk=o>DmGCt}^kff=ZH4}g!? zGq>EmXf_X)jlj_h6>xqNm?%_EG5Pj&0Q75-VGAXg9*^8@rIsU1iF>BeoB0{%@G0c< zLv&fcqj3aAURuqK_4f-u-1!NmE|Boyu1`J`wm8+}8& zqRq&Zzo3quZBeN-uCO7+n358yXIs!6VPImV2eT5DBDnV7;p>mxkyOP+P!ty*-v&bW?us9)>x6itq_jk=0TV0;oH`a zVNy;v(UVIgL(g?bsBE07+evaa)Y_(lPuLBzNyzs)i;UK{o)-C0IJj{ndKE}=>_a5C zkg&C*SBc@Kw;70Pc!AZx6=^;}qvk1A?Rb5>(1W)?Sl641v0Mth#9b2Ze28kp4HG(Q1Z1($q@veFAhY~M__f7 zjGak-ufnJ=LfykI*Y_L_Oec|TL_RBVK2oN1c|SX)C#0d7xc5K)!(k!dd#wclUu5o& zEdfhw@#a0~TOl2D6s-zeR`%RF77*inS7hI&;R`yUh8E2%tYxsPckue2W=ikKv31XQR`3MgLL@(D*x4bel;tL^-SaW zigeks5*!yN)sb`TR`TDN%)eW(L}ppd9$^PGFd0~2_5zbQqYIz^zgo{<%xdjIzqJ`5 zt&j>0SlooHWnh&$e`7NLX2Fg!5BejvP&aZ=Ra*#yn2Fq~*-Ik-)p~w4tC*wAsO?A# z{Hd0>nnyq5_ZvrYz$(lB^B((k!Co`DUP=)$qLvv6HH0^jn8`tCT?6+HF&JtPPgqef1T!^=? zChKosjK8J9Yt;Xoz}1uOgu(P3L}L#dMBZ}*a4Z+nOiq@3T1UNn11W<9fP9ZDF|U0y zhv(obL+3T^1^1}}$FRt@(Z$SS z{RLGx(vt-)Ty5))xOhoCJP)LrvO#~eBS=-u7ZQ0FWGGxrqUZEt;cJ`KjfB4=$j!>~3q${O zVBRW7n(^^ROu`ItHz+n#zwT*i$O@$7kC)Xebtr{iiw8S-XI>QmUr|0Z zXfB-{ct+A7gR0nKF+DJZqDUFMA6W3*E)a3K0BP%DFtK*a*ax#)pFG_x$JM1tfIv`U zSE7zx$gnsUbTWWjTC5UicpihUDo901eddZV4xzOS-R>44U0p5V;WV9!i4eWyv=H&; zK~Ru4Xd?xNuv`3kn-j4Jq1#Xy*gm@f7fqjxe@DIH@i|B!340hVuwHzfJ&5EK?eG{n zSAV^D=MlbhOLz?hxztXYea7u7z3)T$$cbN%lo%1I)X2cY-@A`f))t>!L-lBVd@ojn z-&lVGnOe@!9?Jw8k)qZd2V_Qe^MkARGB>=%5>JT`@Z%NY?=llE;H_dpQ7B4JcvG|X zK2m0N%|fu@h-9Y@$N76kgw=UZ0tl7Gs z&!lxTf9|~{O9_`RZqTmN(cR#5a@jlKOQ)7>*lehMgjM_OQTw1Zj8=P}e(g4edL5%t zo#;8m$PxE`yhEJ((_Ed@u|ZB{##}#$9#0Mus|1}9APCOG_wkuQQ{?G+6 zCsf3Ju|FylKVx(k?}mZvs<|7;a9JEaof{6pIV&3cCF(RoS)kz9EsaHNBwnYo+JKQB^Q$hTg2 ziH7yE`_hw@WbWLX!%M7NveZGkQ6Nxpu5a4&Vu5h3FKYUYU`YB%Yl%U_=I6kt2uygeaS6AxtH8Ncz zWbX7EH++x_{W}&^LLMHuQHW((-+l~{k8@;+6QoPqwhDKa1Ty7)x9q#X2dX_`kO}-Ha=Ej&EN2?FihGuE8}CTY#gxJ%56LBdVP9He>!d0FNo$t z+|rbDY)KrWJzCY@ac3Y)hgB19JmPJEDco} zA_;r&O6htxAWSeTy8kkGz%mPwM7*d$05KoSYi4>9paEW`N*gq~=gX~16WaJ)f%T;_ zUu~~hVwI0Y(7m2FV^(*GOO`G_6Rqf(zAjFuI+7+tWH{_$3>k#ttJl!|$Zj z76mRoKN}b<57AVjX~WSQdeSBl)P6dpIo*43MX8hkd*AhhoUexnqFxm)A*#1$EAH&D z`5|Z%D|DoTF!{w|38>NV)v%ryphM-`stHP?6LO=_;M1|q?H6shfQ00QK(ZOc5t41Z33AX4;+~t3F7N zmtNnL+NZ(5*osK|=4T(dGQxY&-yO)o%3&ga4+z`K6BR?@3(igzSA^-#W)_9uWXOa5 z5u(XsR(^ZXb{#}+Y5gBExA`0S$fKd#hxtI_=S0YjmA(w1yjO#&`RUf*Y^ zjN5op`JDLaznB66nM|k#&gEPSTZNlOn;I&IQ!HW+z*Pu#;|mNF6=nKd_!~s((yj zD_jU+|6!_@D<95k`uQ9~&a6A9;mHZ@)kKhH?!AaBnw&Z@JNEsP(dbQsJm+MlG@I+# zrKWs02fKH}NFJ+}^;B63b(J}O{J;+H=#PwMr;yVg5*pakCN zXR3hE;%+!)9Y)5m(Dc0m;Qq?KUVt$=4G8HTMwH9ISNi5H^+4V8#j5W@2%pkz45|6m}$5Y>`m|HRa^;L#tTL`w9TEOGh&D-d^J`sk5rcL8y(%!ciQh{ zF?B?DU`QN=&wuyy95icc8p^ZIG86|@R_4xCgMcNUySLd;94VDuerSJxb7m4E^NN4GQG)>&cjTAcSSdk}R@K2;XM-E>K9g>%VOeWV8RXVQ zOY!Z`FlgdAk+M%^PWnwqmVVzIce~cN(I@e!V5v>&9|O0`9<+&6y8{xH551SNl|h$5 zC`pe~;oAF8QjUFRL3Bd3LqBn~Q;=x(T9dgp{XwVnncVL){-iM;)@@6? zZoT66BF(NP9eudr8(f%NyC48|M({|7(vy`Q$cEe`u|#Vl!&sp2+7i%b3`#b@6+3~^ zp-VptMQ_K6zm~r9c(!V;sr=~CDlG!+W7g)5%W$ftzEfasD>5`mc>t;(XXG^eR4psW?JX~}BCdw?0A&vUks1(WIqCCM-#Nio{1 zEv#?(9UGx&uPn}jsF|J=FO;z9oRzG&M<4^6VN~4IT~zIZS?+aSv~Ej%G+bCgW?J}> zI@s2MxtDv`mTY+)P3H9xCpxHi?k~@%??Rm6dCz*Fs)7ZQKnHRsb zmLKAS{=4>m>j*hy5u3qnhpv_ydklxt4;*w4Cy0?zaeXORgDmkdzXx-YHW|Rq_T(@q z6Iu>tZ&m~hkUb1m6>c6(r_C9iA#@7bVyR>wN01U8VOdS9Ochzy7sGZR2b|Md?$bki zCVDD@d0bl4TyYZ)(7&<=@depHwy$<-tzhXH9VoTgslZrYh3~`@BJ`r$D8_}SjT~^{ zm>Sd4*PFp99b*k+blaDBW@U*L6V%YoSxx0DQN}*?b?wpSV-`h7$Ug$w|| zS%2p^4ULAAj9QxHwYuvg!igZzyWPHYr+wYI)5<^`*{Pw}bI{q^prA#$mjY-dSwoZZ z$&M8T@+zawHNEaQG)Xb*0)Pnw0fzbtLz^O2I;c&J>??Tv68MY9p|4d`@9#MQ4Kud6 zFKz|%Dh2LMFX0@Y+IMN{l~B?|8n|b+?(4;TWL|cch*RoRgQr{b=*d^#h5z9-F{nZC z5KuH#+;daJnWHIFrN%bKrLXsW%*2VFREM=%ginK}fr)ylBlEvGCOFpIy5W6Lq%;9d*PKwM0(>c zxZ@-X!@7@xI;+D?d1riT{`mb~znv!YpL-;I8P>v07kvr|k?rH#B!#lZmE{?x-=(o% zrsgd^-CY~$cg*Ot%{iT}4kIH!f3e4z`d1pOERWZXmbK-%Xc`EmU7zf$u8f>Xn`c|I z^&7?~pe=kLQ*rq>NR;jFrvUsZgW)V}o65~wK3lwLxP#$f~8J7G{ z3+wCI@@?JOYN21)(He_tR-6QS2yhr#S^F+F>Xd{V9 zOZyN%(wk>rcAwFbK=qg$AV?pl+*6;BS8T+BuB?pfc3Kuwjy#f;9gAMH>Q*n)C4hSugm z*RgNec;{wIp=$Y>@yyL89ViZo(SjLc{CXd$fOgMBf~ocT>-_J1MWqi`HwKcZZgu9Z zR^v@hNm@r=xvVCdh^$f+*Owd!B^e&C_p_dwSG5eASAr?n$P&FG=R|CbZa}1W-ac1q;EbMtD}y$Js6eRd)!#c9fZ~^6-NNd%^tjcDl3B@P$1ffICcQu7 zl;Z=qu%4Bb=|$Hs*~nP7J0js!^pUtiEC`%F>&g>*l3G4#MN?}z$!WA3Lbl*h0;4_0 z0HwJ?&=%U&e(yF}b?v??bOV89 z7~+}hMZIFGcG6%p>O^noaGZ-E4;-5sd4*D-`Wv!ZEc*z%%(p)*LA?A>FOr_nVQ}W2 zm~;dL2{DtmE$*rX3z9bhshat*-Liu(By{w+fnG9+PNa^~v& zJIDEqz#ax5JNQ1`O`f8RdW_`6@6*pH-TW11p9R5wh|}XpDYV<+6r`0-vfUwCSwz(- zlg77~YoM(4YFEL0AF#kZ(ukB(u1`E!WvSli*fb3)@M?5Q6*f){KLeuD$Oy`9-4XQE zxur8CEcX1J%1C*6-|q;XEx{V1yULa7Msx|gS^@Q`+vecBIQ(kxMEO)f&8TNt8gU;H z{f%y;q0J(?j=H)QB)n&w*!i%Qr?Xh-sdoW7;Zdd)vA7}$5r&7L#s>?kspfc1-W>3) zhUFZq&wB`Fc|Br`Yb4P2z3bA)H1g5xiGa89x$wc6dD zot+@dFOj>H%9SkC`)w@Z(6Jc#_i$mwLjJbv3@kWn?7`TN0^h=Bi2~G;KwfcpEsO2X zV2s}wVffDWEJ8zJ_l$Xyt&Kr`zfoOoT^}!&dM>}5@?xdzCgf z_0o3|ji(5vyau-0gQmr6e7iU4?R+ks`zld!Myg(g)&0?(6sd1cs!ks-=ot%p^&}C! zZe5EPN}t*l`iTE@-1ZKnK7b64n_WCBuS{h-8RoU~k|Goko{S2IuOLUPE*NUTgGP5v zrj_2z^7?Y$*4DCu*0>rpm=wuBT_Ph%LGQFh{Ru^5N&c%940u9{a^%P74>PX%%zhdreHLe7JR zYS%&hTxeLc_0fPW41NvDx2ETC8%8@Nmd6VUwxB$QCC%L^h2)#=O6HZ@OZ-!sA7=M| zHa1$x7hX|UNHE6ihnNlq)cyN91th*JS=Q|Cjc>tBT^s@u=Nkrgm*!zP?}i2JymT0r zb6cu#{2$~94*VN#pzv(72UE`_l2ZE)X!c6ji{lcm9IZCZxckM)|H5bkD&IBD%FW~h z5*xZNK_?CaCg{3uwTezy1wK|aT#j^SN87P1gr;mE44OtJ6C8F>+x(#LA~rJ=yXWt$?#^-ENp_*C_B z_we&WQjp{Ko?DtWUsKSKHu>^S<8WMrZ4gG(A+gG!KJQJPQ%78&sw?;?&^3RV_szah zO`Ym8*r0Cg`u)zEt1!eQ+53ncAz{{?qNSYZAfaDQvMup|5={9HJ>y+Pshrnh(J>;N zu{7J6aQ9t4L^012!K}vf85`@9Nxp^RXEt;8k9U{HUafd)H{aUcXEc~w7H+)#$+f(R zO8Hn6!7Jf@%}7gzWpE#V?&L=nqa0Q7IUP+1;#FEIUmXiT1zXSZuJ)ptUppT-EBW~@ zBkdaz8z(q*4=$`c8gFf17`ZE#^QEoYy!TbtbC=w~&xMWZqg#pc zC0aHK8yRrKhQF(GPn`1_tu;e&=!we!gLvd^m$i2N^_O#}NB-q$dfM#=7 zf&0{6uR-eJ&IgCC)V)4uH-FRhoztLhFG~O*o%s{yVX~_f*kT{m^REly5*oU9tb>tO zvLhHFUB7&l;-T81gnhGZB}*JqbhSK1^=Bi{#~laN!*S93FW)psJ2O+f%R|>i9G86M z{mia%shm>^W@VdXL5cx67AHxpbFNAL+|q(uWJe)v8$Es!}KODP$;G48emf++AxUhcd z!m4dVR_#he&Z(#)iBKil$$*weUZsLg2WqrW2^`P)EFd27+btM1@nn`LKj}o7ZdBL2 zJ+QLcu?6n)3JgP82mVXI-NiMl1=vUiTZcCpC`FJ>2R%HKllH2`!V$LaeK9LcQT-@-_m6oa7Qx^-Q+b^Q~2c zZyl~z!(qmkn%YHDx=(%Ekoi=Q2=SOJaa?;@uF1{zN26JxJ-a>wa;|YI%{X^sJ;agw zfH5>&KsD?Pa7LWu9gQU!IO*&Umiiyq5~OT#p3B`63$eL*-#%$jt#-{I-8zS6sF^*1 zIgM6)q4b>0E$ynEk#bInLr}}|tv4dYb1{IE^&ymezGnLKE)&PkzC=)cd-7^duX_e1 z?>*)DLlTnY)Ijao5+(Y%#6A6^44~04gQAb9PM)Di@N88m-|)^$;&3uG4T?pSk|Wl} zwhmg1yMoN+2+x$wp7SyJ9yvAwRTZ3+tD#nP{HFsp8BHZ-v_Q#7%-!2IF&U!TgC>P5 zWC!28JHNSPxOSA_u182Ie9qaz&Lwf?RN#(+0*$IDg-Ab9ETx+hiND^V^h#@1FIQ`Z z)I0qb*l%)4=X4Yd@sES;6b(;I_|M6Je~3ISwR<`#i5Hn`@Xd2@M_=o}0uRqi5ee9k z#~~iv4Gl~?dhO}$$Y9JKitq}fM@Pd~JA1UA)Oc&i6+G**L~9j?trUFMq0OFNn6qBksR%CP+KNOB&d2ab1w1s_Q&+ zj`qUkQhFlHsIz1Z1tX&);^*sEZ<@SmP#~NI%_YnEQkR`2XuNN)%VBCyxC{RDr{Di^ zXvf0VlS8&1o%=nN|K$6ILIzB6;)Lv`GNbmjzP)_wotnjvraJZ7O%z8#ngJX@SH6F| zla_ZL-_VsK=F6K0HLo%^cFS0mC*gjPP>}6=r$&5pk)!2KUng4%WVYF!UIo>xF7wmF zCv$e&f|ts0koIUwr$^07Ns^;w{=fF#JD%$Hj~|yb&@e(7SsB@x$t)|$mXcK=$tsj= z8lplHLPj!@aS+NpDqAIrWA7Q_m%P0cpWpBM=Y4xT?oQ{*`~AMI_jSGI z^YwfYn&)d&V^5~mHf2DRJ+I2eqw&PaYN)Dzm?QU$2?D=tYG?|fSS0Yfo(3_Qv$bwY z+Yd+C$^Mdxb}#w?y34D5ORa*cFX-9sYxPQWE@X;mIqM^y@J9BxjovwB>ba0O?0ilz zDkDu4(mN5-F4<`&#iyZJs{IJWy&r>fWl}(%v%S(j?wY~`Bl-#NR#77S=5P5l`HiJ8 zFtVOQx~~Z492M17`BD?&ynQRPB>hlcx*ymh^6oK6Xh3(BA z0UCUgH`NnkT)_QzyL4Z}z}R)7)xAN2*sk7}M(BwbIM29(x3E&6mm-_I3<7R)yJnt0 z0c4vLsjIOz%hgX_H-|TE09cqb!_-nkS@-ZnLpZF(;OJ_vu5v-dJRgMe&b`%j`99 z*>-6`&mjgK=C(W<>HoR5=yZtN7T-|mg{M_xxfzFxL5(}SA?11%W~rNbb^NsC;3KpC z7j?2~2F4Q&Xc*KDQ?h^%hTid{m@S^Vj{Y?!c;#~QFD~5@V&Tum%3(lwCz7q8$h<}5 z?`3$Ll8Ypp<%~nGX?QQjLK(_2`uNW46!5fguH>FHzM~iJp_waY-)WRlvUqVQBNy~ z`=No5B|S5*DA>!bn{<5atGo#vc`z^?nHLsqBogQQMJ-**2JQj>m*WO}{j zUqe36H1kWuwOyo-B5@e=(G6P|S%C81@bp;ZBip952vM76Egt=}VSKV$;Fst&5Xx6p zqNJQNkVy08<%gVDl*fop^5P>OBk=&WiANVP8O8z~2{uc>?j+@W#kR9ZG6g^3MJRl6 z?ud?0kQ4;MFy=m-+o>cx(r%i3rkv3S&$?JQ(~Gcj`04gy0u(Z6CTT>|yE2lT5kOp$ z(Tbp|$w9)I*nsI<_zZVa5d6Nz9UpJqU* z2e2srtHzN|kV)Ua5fB@12O!vRHS9bJJk**)SyGlC9`9deOcU6z-fIf@{mjDvfFa%^ z>0oXo*HTh{@QRC9@ib;aUC;)+-Qr;;1OK+ml}ua(pRGSSr>U@FQGtyivY*7Sexl)m z+#bZE?ql|?Yd;7T0bgON=Z8HR_YWXc(OQk2l|r#+G?q8?)ULL2a1#Xo{B0 zv-mQvZ7L{C)eq26eg1rI4V?@oInga&or5~;PEr0xv2(_eUH}}(7(nv5V~|IMz*_99 zCe;&{pl+iE9P+&<6S^E#g7z}K6g&nAwx{MIE&@39^Hhm4xPS-m`}4acB2*vFbxRx* zk61JKn<1cr-X7*hNo+tR1ffo6W9xX=j)Hiph;w<6PN-Ks_tLK8(0Ti-f=H?Z38M7! zgo|SlKT&cl^DXF8Tu7QLSFx-POJg2HoetIo$U#B14$V-J=1FCQyuTmPD4JhV0VeRl zY?EkkbPzv}KLL`7egXjug^)cXJV06uTs_Hx0x$AG?=E-Cuqs>~2@9NmP(ys>OI8SssK*$c-XoEZXIo%MG&#f;=!T){^fNw`(>-I(t zvlGo+R@Lr+6}dQ9=h8!muIHo}Z@G-`m1re-#I&j{EBEi{we$m=T@r4}Q zoFv;JgCVok=Ca2FT+my}6bV4dB#PXxzSJvBkyhb!rwgC*u`zIl@@R(PkSkmC#X?#` z?M%he=;J3yD**rTdDncY~yfxR_#?fUw7n7hCE^v8iYJs!E zArJCXFGe?QI9$H_Rv7_M1>6X|6cw9HwMP}}?S13(Zd?4l(EgF1nEkyRjULgHatWx5 z^=TU6W^bE`7tf5f=mO4Y&?NV;Qm}PoVR)O?)PO^9FuFkbd8+ov7bUwOS64U#1K>$q zZY6*)Xn;&Vri6;T;@%#1V5JCb^8k~o8gMW){~u3;SBph>H5k`@@hC728X5?fX&Q=L zkO(n=A5@=fN0xaZNvjy2=guL^5P?zk7__eZoT3|Dm=5YTD1miWGIjC7MwyYQy7xEB z2%!8GCk_=tbSGM><(P<Clo{-{nSm!;0hSU1hP6R(xy3&s|#LR zhDH-cwM#Q2>;(_P>|80mP56Sx=jyyxbXQj9(Um9aKHtuGHiJYRW%D3lcs0H&-3I&> z+w2pIr?7^u=f!hNQR;cen0YfR;M;&Q8Lv(}X7E&vDwT~`_eQVPmdk+C#tOz>}e zEa_+t29#!d^xQHMtc2Wcyd3pQ8)^a=kRcF5s2Y49_0stXmw?yOxWo|2;9RteD;6AO zsUS++!ce?rA*T)h=^P3V{ZvdSB~<}$V_POQ0W@zYf?~=a?nwyIaPSN}PYj%)H?}bz z6iQK*4M*{pfjxWas(hS00xW3l#YV}Y86@l;LXfL=YCy@kywKK7k2?Qj!`-aGG@3n& zPut!-UUuDJi)C6);rTMp;jzox&h;f)w&pnqsFdfN+J1sBbq*5crZFyd{JKROGSs(+=hST&PL1p$)~A3N3r(c-7Jt*aVQI1BxvqLTD#dpMIBoSLtzKfK_QrAv z3%Zmi_r4O#%AhMM;Gag#&J^|n==W|^)%WoGyqzGDgQx`*xjY$aL6 zr;vfBCJ%#7m-wGJ$Zu1>KP1GlAF7xfEX%7e0T*{F1sQ z@vLrpc$=oNw!Gk(qt0&*o@f)ICM@!G>D7Mz5>0U3;N4bEap+_*yGHmXjoe(TPs%e{ zWDzT`Bc7*+d+b{&)_S^~B>phK>yD3gJ-qPlRpxo0F8pE)1h`-H>b7b}?c!LzQcZ}T zux+v-81J9LU(e}5!h};03NLJ`pLn*2iwE4EE$S&c)-?;+k>f_01;s3b<-oHwfYckh zN!NjI#a3V%f-8@jL(z49dAfPVZ34JmBwDOd#pWQ7h0nj+jbQ0|OV|ivVLqb&;GrFU94x9*yUj)+12+%()0XE2o z2iYma`gtJ3!=QKRvD0}rBWWdZq&^S{s_un-M?tP*cra($>_UR8gm#`GXkOReX$Y!Z`FgP?k6<$zjo73OYJwI;OQt8c zodU37UdNzbT~ZlZLvP{>#^U_)@p~gvFOZH@8jNafkIKBP{I!{h>sd?v(}!Oj7o)}} z#Z!%yfSY9+2>$2Hjfa8-ZG#gthQi61Kafv@Yo#$SZTd$ z!AOm6PxZVIHyhmh=qJ@O-7rwLc@@N@ zy$>)NK(@_p@i2qlqGL6I%%TB9{=!K|``pTGSA%k#>Kv+M@fgy} zDQ9-C`{d!F%csIkJ@Xy8yds1xpcFy&^4{b`y6p(ru7z8AvfA8eACU&^tA|%+E*raM z-IC=*46Oa5!y3z-QV@AeE&@R4hhrugq1(B;abogEyji;?%e3NYQck7PHo=f<1X z@)#H(W?e%#9i{~LR;wVw<}>2np8Q%TM<@!h5quwwFE4@PH7I>@1{%GE_)~M;iNObS zWUJ#x*-4zL8_EiUom^Zz0-wiRXp~Bvv2!=R>Is>*Yzwt|hcu-q*&!!zwKO7RHBC$l zZCx!Nb}mok1>>CAb+b*wpEjy+4U3o;+d%lb5NL;~9l%v~vx;$Kpdv`JCv4n$N)2+z zP0wq2F5%DLs-8c#SR4oPEkcmYCim&lc}_NqyZS^zwo%$St)E2X=sGlFWitW0*r3{< zjN$+^sJjceQ2l8yJ{-$${S0W~A!&hbaCpMtwD}1Ppy;SCG@t=P8c|U9TzU?x6;VPs z-*~n(^q}X)&5RE7xa|h=rEmKr&Xlq`2X)=wL?SH>1IOoAU0@;~5ebW%Gu{YEsT(a5DW3lLicsJOQ~t!Ijal-RD~8v$!tb<>l!*?i@pdnVlYj_;xh8 zSSLu#G=0@(@IRuZ@K?$aqhM*zv6MwNtSOI?p09iY0`1i~6vBC%a+p$Ib6L-c!)h@N?aADzVgleU-f3xtj3;!|qIT`g`MdMr zA%!95m!kFLNR@UXb4;Iv40D?r*1GWZ4EER0$JVj4o}fM^B(xWA$d{20Q_V)=0y}0R z;ulTxqGT=g!r7u_-0~p2u=9gXLaQZ!Ba|jOF?VT1^U;7$KHp|h;R@jYXTwZ-S*z#? z1nFhIsB+XMsu@#2?=8zP3$E|o4*o^HMo3MRjo&d8?Z3M8ZEWmmEut~u6x{;UV%$^(s9V2Snn)6EanVc&A_MO<*tQ0&x$GvnNJjT?Y zg_up=>)JEP&@NaF%b(z}G#AfnWYIpyv$*1@QBO5Hu>I5=p)G#GgPqp=_~Pi}8L31Q zbL+(^FD^(;+_-lKYq2a);fT^OdRzUwYDs_QeX&>?s!N zn_4+U=fN17amQjYsVlV(r22T5BT&$e8hKl}w$n3LqPB+>Rr;AujM-f9H)Y^+38F-I zwzPbtLW`ebQ(iNDk=&SecC``Q{~?K!!3ya1^3ai~`GRep8BsCboscE$EvWbUJWM*$ zkanE{bGXVL(jbncsuK+_djvAeB)?4Dj%l8=X2(v@sV@3>w;%!4yjY{BZ|yTk9PMK- zo?yb5q=wN*re0_v=qH+PsLrjkAzR@-&1Bt=ViVD~)HQTU!Go-$U+&O8`QoFB&`4h~ zG_JinKHbn5?z?Rd8$7|>oJUd#$kxVta+iBB%z;UB*G+ZbPvk?hXR?mb;r(3*l~n@(6<_HCXf7OZdi-{= z>h?$R1gpDcXmxaM8QBI$B=sc+GhBLyYGK@AX);aCTQG~6V`1SynZkURYWeCE+Vs5NO2a+|-9m>UnII&~5Cti1BoD-C<_?bR+ z!tLGW>7jB_?=}o+2Foei<;7aW4i8Ppo@a@JfK!6SqFLs#r)Wm!8|2jO5*(!HAj1X?h7 zfyW>S`MB_*4tDr+N{G|fb_1k%VhiIX@8OF?DR}_fy(@5K5j@P=1l16o6Tx={3XaCO zeo1*~F}4Oj8Kwr|ki*o6;X^OByQquqq!+C+JkxmRE8ae(oQ<{Ml1TMS)3i;v7Ne1y<2llr* zrFa=nb$tf&PETZ|?3St5!N^u}&u_t=rzyqVFi-}^3f`$UREWtvKOaE{>#c%t@!m&b zgh)vPO!R7)VLBk39s-}3KFVagy*kHfq#=Q0nYwNC)<)N4_fQ@s=b)^$(|VW+nN)5c z<~VxemK_geJr=7V5A8s%Wxs;bcG_@ZAe)Wmt^m{FgezE zkc?Fj_G2xW(%Y2>D1tWGchYU>k~5-t)Oh;Tity6OQF3K7 zsn;;Db=&HBV><>ZD8+JNeyAUaD21v=cCe>vmlG~IU>tyNs0xC8{8C>}Ibn9g)*UA~uJy~^?Rk-(C1|};??XQhZAAmgVVacKclMH*^lzoxRn9m!qp3y?ZBGzy} zA5gA}KwzkhBo+m?Ii)m#M9};oJ$>AC@hSD{PRopf4CdHe_FqsWz1QG!=pMZL-)f#trBT@j6 zXjvD76nUy9d6n%G9pYd6KXwt>w4&rX4-IyKDphH7aQ6;AVCqr|wl!iy|KKR4> zz#5fTl|d#3by||ZE6(`gP#w}6TW@e=^AJv5rE*0W-#Q`Gv28LcFmTxdm)mu=U{ALM z6H^WHVqjCQASR8Rt}7fDKK4w9fn$JL$a9BC;1mS0Y9aV(0kSOHbJ;Zo;4hz_pQv3x zfv#)FFqX1vNbt81*=Mx6npJd=;2F5l#U`U1Of;JUD4K@Dd`z-qEGD%Zg$httmtmcDX|g~ zUsF>d`ZiJ+P&l({-co3Tb<55ZX0=|-hD@g)NlH)ENyoDHO2p9V$d4j0Air%5^l%gbJv^`-BwJnDwdedx z9q40X9Atj`;lkmN5vev9-=F+p1@j7`bPZ5~HPVzGg@^jr9f2?9EoM?idvs}dD_+MS#XUGhXUn;lP| zQvNb-a+Kj1yVM&P*h;o`e2>HNy*VH(w)*CvI0_CA9uX;?E<v?4>ZI90rJQYl2u*oT z<%0{606v!Rl`nb#EA*Rw0Y1?bSZ!*6gYWwih4bzErC{SPl^kUxYvK~A=&89Hu_i;- zbRrZEp5sNmnwH(|IUOjC+IW7d5R^`}9v5oE7Ez>M#Gp|#v>tHBP=cdsy0Fwm_?qUV zm2`X$#+8%C+~Fm_PD0*rgy~R~+VA%<&}Tx2k@8x+!trwm)jsdT2%o)4zIy8>I2889 zM>yAx$m)AMLxyB9&#^r^ILWZ>q^@hZy}$pJ(WYvoB60EBO|aMx1_?dWO%4>bYEXo3 zA6u#St&<>u(y=&1dCnx|*0n_N}5Xh>5Ca=4`8X zTD$34L~%85zRtvqem%9l&>8P*%hHU+GVpa}x>ef$-GHg?tN|0unisNH?^D7Q|9`Mggu`8lmiEj<6TKCRYmZp*=o!leaCX{I=dDZH%H>) z_l|KA|FosWVJU9?+vEJ@ITM}N_L@8!sCR$GYde2jjc7v<*uDebV5322dok<>)gu2} zHhAycg6t;pGtV4qDlumXi+ zY*1p5WdvHK%~(3_X};NqboL{_qMmuy4I^oghb82*UR<6E1~K)mdsGrM59LZZ*(= zVxQ{)q9_8%2RU99*3B4jBmZE}>G=EDt_3J>Q%G#y9yTDCaQh&IzzM#lj zr*R2Ea(^ge2?(uj9twwiPEGpDP97vWp!ZA*II`nK{|NjF8M@wO&Z*scql>%6TwwL> zeXsuh$wPd2rxaY;0RSz9AiOK_YNsNzb1&2%0XPmP)PFtO&uXw|i)Z@huqz*N@99HB zX|UFbd1*H)?3!xE6wpnN^{OE9B2BmOX6;p!lUy; z3>K+bOEphEjH(Ia=LP6`GJq^j0+)lCmig72*G;9QT>_}kCe%3s$bOp<8Si1$kq$U>hV8+b03a3qljHl}3DtCgKC znV#us#<`+HCAl}u0wtA|0~z;2dF4zZ`p7T|x<2ooDgL~gcioVU>HunQrTxmO+XtvT z-^RtD4*>F{>Fm9mT|&=9IM;5<;tXfP(emidpPUt1>fv?XrBB;b1u&Q|Ovu&7Z|bTI5=pKqiU_f&Mmg#>z0Yw(aD-WAmB+w66gk0< zbHFPvog_lzuCSL5y|^sgI>|tC!f~+-B`I*-j|6>KD^YXZtMCLTrEaIC zvd@|Wge;8Ey;8`*CO`^%3Q&|`JlRZFx!Kd_a2 z>`a*ZY;|q6>%v5q(jHR@+g81b00u6804tq(Nx|DK(b@z=FnT07cMYZTA`eFNpd?m= z4Zo=Rsi++qw+3@rJ@G6%Nle_aN=US0SSKtZ z4>oMge!32hrM>PtNzY5dP^a^bDld<2Ubs9sWH9UY7jh%dhrB%!HCL4;?{=Ldc9*3K z$sB^1hfGG1;vG}L&tq0MOx=uJ{CA;Ep4G1l?K7P4GS0iQ^8EBNh_-^RP-)Gx!rOZz~9}npEEQb!v#EVksG>Y zil?RSp3Z-CQ$|kGbNuoEhYsHL$$3o%d~08gS^{qNNPzhIf;)n6kicvY{z(Yojw1Om zhv6r;YmmyYEKJ9|^r-@45k(&T&I4v0alFTuX2e{^yCiBszBla1g${{Tuhw{pR1F26 z-};}Q;RW&xZ9D6Keg?%jsH|&1dlBvx=h`{nd)#QPRiC{;Bi!`<-!4u{eL__}offt52Gf6DK>=1|1acs{+5Yp(J4swRSZRbcG;UdysB!yp#AfdWZL zkHM-!WN|U)g>CB>q7lzNU7eC69{Tk9wI4}H`h=8wD~DH zE&;`04Fnrwu3nYGbg2uV%@XvUpZnS|6l41W} z+<2c55E>!y;)V}tHu+OZsswEvwCX6#SD-!5HcDDzQaEC{3u~n?0@OeER>^Xn?dp}_ z;nc0$h)&5@v=H=sdQNy&dNL|fO>gekfd?+D>VSyk0laL@^KX7q<33v{hoMjp+W9b8 zS$fhvQ=#fn_(1fH6`E-3Y ziqED)YafVk-HTIy&%xt{{bit9dR>uMgRJ2aISVP@Z-Fy;#jQZwq5w9hMeqgeB$xR*9}3V0>@@dUf}pGT)AK+s#xVfrQqrh%8CT%Ih#x= z8@u2>INg$?$<@35a?Ld%j5(L~452YX>K{PfoY~_8nEq<0%X}dI<*vPlNn~EToGsHQ zve_fPPn=^o$Ol(2VmZe`d8(o1SOTy&Wn8flEEV*GGc0pQ68=Db)Y(htQ{@6C7v2qt z#MUKw9HLyG5Q`gVz898C>fi52wBhbno6aHwdT|?W;MBVsP|(tOyXKacS50nv|MeCE zlVDw^msKkTg4+PBn})T_~JqIXXy~nd|dv zOm@T8M-Zq*S6}tp{=={+74ZQ@Spy_A_wzi0qGG~8b{w>23L$6CAJ}Ce>J?_~hadr< z;H4tBeUNzdW|$_kM{vh*G&*l@F&iU#?T3?^yWfjBV(eD)no#zFmp2q7#{QF#=nwXm z?eiS|_#}h9$?jF5kw=$Q#(?-PcWjT^R7GZ+kpln8jK_S&1)>jXm-|kVz=bompnEnU z*T*l5;aSwF_pZJ}`^b%DlWc)q7nsq$ghk4=8n=Fb2zpBUoZtOpH6aRp0eJV2>_f(l zy;my492CRR`C)BZla-M)!rjjmEJnlLXY>T_l{pPPz!fnLhUAk=$&&ciMFze%*xDfx zxEm^f1h-V%Wm+QFcCCM9k4U9`PLQdamx%vbtr#AXL|vMZGiw>SwM|3fbBmnr81Vcw zR5;AaRU94PT$q?{rqJ_ZqhM%@X^`{wTqp)|fU<9RlvCbaRMsHx&i3&Jc71LFd*FS! zZ?U^<_^ICM{F}D|Q1FdyK;6)1Bb(;hAxHm2Zco|pcY$jXNNX6@m~PRYXnH+T3;ULG z-)gl7l-d|u>KE(FI)91}6_IH~F|Xu72!tW9sSwtZv@$Mls>@^gl9lP-OU0IS0JWunT-XX{8v1@CKeMl4Ce zA+PUmoZ>Js5O>_~d3(F?!x+xaqyOUv8RJ@8p5|HeN1M!ps4^@t4{nc z(}~d7ib;mLw@J>7=xVs>`aJt=u?vjIx9~)5z0;Br{_1Wx_rRAlxphOh7VH}716c%Y zxFdOW?LYL8BI)o{0HozEMR(+5WO>rtn8r`r8NQC|RH>aRCpUT<1Ly(9OQjn!2J&dS zB|3L|_PI3A1@^(G*gCAVja%iAW6=TG*wt_AhOU{p%T`Wjjx)hvMf}_XqnuRd>S>Z^ zg?xf()m((sk9FtvK{7h$KRg~eRLDEwqWeYc;s|o@PlRDsM|5;Q{Ty^-??yy|HAzJ1DV2W0qZH ziEeMn>5?d6apCFeD{Q#;p>ZwaSnF?)l=pT)rZ1NJ>6ttoIWAmv!$*#DVTQ@xPgDi7 zCt71N^3Oe$S({ra;xY&59+Ce^=jKyAJCPa&pyfBYQ6)j@!8IpR3&YyU$_HFVD#9H6qK^ORvM_LK*35!Q~PNWRN>Td8STrN8f-?STO z916`iKs5pi8C^DU&&By|H&(|Z+4X?im}j*P0lNZwmHpzxB!k#f?DT7U>7ZxDZ|y(d ztEk(8#!AP}?Lcdb(96XhnXX0tt;-s4s-6`*|3d;1q~G_7cPU|Oj$H?m_#LQuiYb1Y z_*LLN924H%vT@1p7S3O{J-dZg@b9^Nej)kU71;$osEWqHo|~oD8V;=8f`t!W*;}@{ zS3kWnAIgLM_Z1NK!;Ixv=jFbJi)eNbKfJr$-)3cx&Bxl3OJ!ceeh1?GBSMFmJpLnI z1CU#XoQ5fOYaUvUXWUc*S#YX<58V&V#v()vDl!~g=B8W%^@nlZ+mAoK&23M*&TlBL zMz}j43S>deyROD%^-5?JJ#h}~`ba}UPvp6=oWXSnx83+~+=lL-2IQYC7-yUNBZa#@ z!ikRSFK{==-`j_y4}6yZL?px<>u{&o(cMc-SD!!jY>|VB^5Djk_e?e0Cb#Z#=!RQf zDtod6;z!smvVgr*Vf;db+fL)veWUNEforS_>9Ea_&OMiswyp1xtv5jE{BWKTJ4HT@ z#Kd)kLlWrsj4PiRxY6aW_&`yMEzcv-uX|jE@(Ya>WPwG^VzneroVNRA89jt)nsImf zBwFY}ydSq|eG8g}AH+NIZQamc1aUlW!AMxjWKHfoLFCRbdc!;B@9V61zx4Vh@&sVo zw6eU~Q97TXA3bxW;p&;AJ<~S`oFZt~F0n`e?9>9I&BSRtvkjhVX%IaE#dgqB*AQfk z7#+jh)Pm7Df6zI!;w2z#b3qH~I%b^}1U(S;ueX0F51f+bU)%FDUP#T2G4xNOjlP_= zQ+U02#oAmRb;DvhDIlY#1gSr+*`NF>tnIXSm)!dvOjFkx%0N@uSj0fH-B;RWy0S90 z+)}|lU-wAT^Qi~Ebty`iQ`mKh|K*c2 zU${^;sS5TE+jF6=G26VFs($E{vh7>cR^%=+8OY5RmaIqXdBM6^{jnDIIkGYzTy3&$ zlTE3>K3+K`LiOXY%I_sDfQwKm5dWwOss(nr#yM`uiGoU_T6KX(5@y-k3SYW`@d+@Na3crR)qMop3hVG)x^C5`&*@r3V%R6`u znQ35G@6*>Mv5e??SLkRI8%>8<>AAruP97KxuG7+*6%Fvu|*zn&V|L zi_}=ALM!C+Jw+C5T}FaO8+@H07+V9Vqv-$N4g{+Y_#q|``w17H5}+f*j&+T^%Yo7g zBSPiHJq`1M*%uesF!yKJd=U}=V7(kT&Be0`7jUQg^K_~GMFH)WyIs#79hcdOPa32O zE&g@DeDB&~`XFl?5|O#(`>~W~IYQFuMesn1;~PFabH7zHg;_x%rf71y<&mDd(C*M) zjyXj9A+TMkc0%L(oGbA1946;+(ks2{49E0WEHI$55n@ok1V zBiutKOOF5pCtzDFL}sbJtQEtImuShewQlVJs>NdnM^)<~ZQioqlqfU4V%o1&3a|YB z0^a?H)_u3W!jp(lz;k}{6G1rdBJp9cdv$p84#r29Gi-N^vn3n5qJWWE+^oa>1n%)q8hJVwg;%#9dQa*3~0|EXIzAh|Nq@@vvS+0=fdbPAYhPM|IT-);fW< z>-rm%J$Jy^RBVOe-*Txr{`~`&=p^4g8Md-~RQ%?#aFYswf_OvF0y7uZa77eCw?V`?8d3aY+=0GWS59HGel$G#!8ZaY1hz0?jIXAZI5q*# z{xecSglVxLGaCB{e`q4gCi37F;OJvV(1gHkW8RUZ#RCQ3Nf4ht1tEpX1|b4);Qf)U z{yc=m?0vVyuZJ0sQ&A=2gOI91zYK1ut8q_J7C2dQ9adw1!{A5V1gq3JouuOtI=MmV zMHgGxB4k~1+!yMP8cemdH^<*YjW(t8BFz&32rOvUaU{(fLd2DVp1PEav zLuGY-lcQYf(~ECVE*kiITOs)PO-0KeM9i|c(b_+mW!qTdoLlF=b{ImLdH?>%i(;e4 zUKH;PHU?Phh0kdLV|u8R3%%90hPm#+e367=yr}h^cTw1UzDFYJqP`7wNVyKe{Q{+I z`mITuPB5eEn*vPnNsv%*Janih{4!AMvs7)JWZ2g+xm00~+j0FK5&CdLqW$_lnS>|W zQum1Ro0|+PwT(;YQnx&a{UDHq9LYzWo6l}ZDbe*vG0OGTOVc}1^N^p5K?2Xp(1Z#N zRr3r{XTS`6MvH$1s#qlQ2I#`#$4fj9cT6(ae!`X8i$gBqgRjoY=q3DMtc$ElnZ2yl zkQd*NzlhMT>tQ*N9?NE`nm-*`T522=R3<{0IvXgR*)5-_rb0l|1fcdN`yq50d?1o> z*RM5Z|5kiPaa0{_nTrBr>w$Y@S8&S{Lpg2;csE7>jJ5bY2Hf4;ASgnqHKpsHy|N+S zjLzG`Yg~lmXA*3?*oP4}OT0^mPVBQVw8gicPjkZdzH}5WqQ~_}&T9$|yx1H$uVdGv5%bpE4uXho}(SPRf-2Q-4p8MB<29EU& z!37(_gIHDKrwvihH3@}z)xZxeC|Er6;9ZPNTR15|Sx+2t{>q2Yw4wo53X)UFbTKic zfx6UwB%5KZPP9C)OO9_fPo2r?eYxJz0(QdI2jIuB zj5J8BJp-c0PG!mi`g8(656oSM=!k)-lzZHr_r&vkAWWRYCggyTD^WLDBb%_Y zbXS1s{dw9A_{f2`6Bi~&CA42GzC{_x;sZCMJ*mY~<$fLTTg%^&g}{y9%zQ}uNjMO* zWV+5?tnWv!7y*4F5`divTmfDW?Tf%d1r?e1nXWdodIygPGm}jI(h$JmnpwAm^xVR3 zpplDX?A(pp9^@gbTmd})=lY z$G4AKM_^x)lCODaYy0xs(BJ-U8Xy6>4q1?F`@eXW)iVq61N6;JsGmgFA3rvMUtu3D zO#Bnl;3vKC`^Ue@srSEAK0C_zaWpdH=LO z{y1L_`1j$N-84To@*iJ9=^;36>AK3Lzt3zpePh#&v&#-%;+3f)BWr29$2>zDd#Wx=JB_SpL9~!B}6a zFbY!ksx%i0>ntC)2jeX9@PJwi&qWupV4jT26m& zjP?766C29C8sE_U{Z+m<@}@YdA&-qUGT-~X#rxkr9bDx(@g2hRTY{^N)Bfq-z&hN2 z?A-bMKDx4{PDYEtf% zkh|}0K;zp>silCYr*L521|I;8&g@>o3JLW_GMy@`B| zY!bI=rU0Ii7^lUsurS=>D9Wt-4oyhRmER}*{Tmh;_H(Q6%+|Mp!RJqkNr>r~7!n8R5gaykJ;6&01YyF-t02$+^Ij*pLT@4Dji+r+=m zAkGCg?E9#IaM%%NjC^8ZVurtd-Ro?hUEbN*iQs>@q|UE4)BXD$$ifN>aOA8D4=jN1 zCWtdEH$xe)?O8@SPiaW)xEnef|6yxf(171|9PB*-Gkdmw63P$DAwXjNRWqj^gP|j* z<$E;jC-cuc+NTUQssE&h#wnh<{!^7DOC9uXd5xK&(WLa2sL) zJlAL|1o*szfI4%!a;4Fu^6(#KAPO_E>hBMQr+lT<4Gz*hAh1XM$+znUszu^2y$Wyz`EZj?0i*jii8p{Ls-J=uuS>X=(HO=KKCl z0zd3RDLE8$mX=3?2JD2#GH&9YK#F?7%F6%4hYzP9@KyvR!1eX@hgTb0{=;T4gVm_$ zp%3`y1zUakDLO6uP;3_Ik0#jPALZu{EdO2Z_jUen?S4A|^8for;a2PhCTBkp%MbUC zOF4gSzhG_6Ywlq7&yu=^Rtp~A6_d?lazC|R|fR{nFKQ$6c z%*n}V&mP+H&&pL^7Id|HwkYWa;#;IM^YfM91v7ehcu;GsKn-IBFu+l#r>9fLyiomp zs^7l3p9F{geS1+PRHts*yk*PN)KuONK%W<027w76p)Bz`mZ*bqx&8YF`u$N6}HqFWdgG zl5DV&ir1%(!yVdpHAA5zk|2@r5}z4@%)c3Mplz3j^HV~Zqxa$!0t-h{1OBP-{k@JE+OOGTS&nq}H z{=uTXV27x`p+2N{>u&Z-Q~lL#i-t>36K;wJ{bjb|LHqq>{r;f=RxUX@7DLUV^{n)1 zkSsm4D6+n=h;YMS{o3C0k?Hmy#A~FfJhltJ$AuNvwqom8+e<-693~zZ&80xsyEXko+Y`ItbYZr021a!zKhQ#7d&Reg*AMb-&;yb_+aIxXVjz2730%GzD ziU`62f$ekY2?5GDE1@`61+zlPI#+(D`Guus&dZksuNKg*|D?s`m0<_mdm~D=f!Kl! zYFdGg;?1ptz{*`k_<&AKPq5nMJ)mJ|SZqHD7 zivRDg<1gR*ce%fLU;nM$FVX|f#{a%ixV5@LEFzB6L61czJ)Y3v;bA{GHI+~bPySg* z_@B2pA8c_0m#8ozpLZmnDwt6a1!V*#;&)I{v7r=>4s+W}`yiFQF6Qq_HvQv)Bl3|3 zx$n|{fsB}hY!?pjDP};NcNu6JNRz#QaJmobW)8T`jgi*1fK>da<8)LPoR;%;ChFeu zJ;ZOlKB3`^f)*Qn0O(zlnYlUOY7qHyXoBJ&XX7n@2oAne_LL4h={+)}4Imc$z#v3@ zK~ypeIBs174=McP1F+n`4+`{2rZjTm-x6=!xUnAm9ifUG;3fRpc?DzqKY2~Sl~D%K z-pY<^9oPcq^gqu3FHRdIq050Pz>d7H%%3{zLVbK&!6?-qrtSzafhZ9= z6nIwh>OM?TSIdAZ6!Drak2PnV0gVlBg*h#N| zqXB81@jp!+z^N5)cgis;C@A!O#hiXDVyS_2C(6xB|3heK9|R3FMGw|-%E#IG07Xd! zbnT=+f&=-DY^HzF*$aryjt!V)N4ev;qQf ze`)zTF8MekAILhC191%n6m&#W^Z;`FZV!6AtEox6M>6Vf#Mn1MZv`*m)4TVnpk;;F zz+9B7ZnC&ZpoNYzm36I6)C--9M~0qJ!l*;kX>XF1q=jx7F%i{qJ(W sAKCxb?r(PPe;eh$jq-mfQ6`xWHVbOrSMUlXg8x*NG!!$AoVosg0J?0a=l}o! diff --git a/docs/src/examples/mps_from_tensor.png b/docs/src/examples/mps_from_tensor.png deleted file mode 100644 index 5298f8689b5a0a2cc1f0961d3e13cec0db7cd1dd..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 48581 zcmeFZcT`htvo=lr%H76sxxZtR&`b<+*mG_wk(9Y4>mW=GtholthXSy5AZwP^- zst>f@zeya*;LRd?@xXyR(OjMXX6U;s4=&!hr^jsICQ|s`(r8GZk*fDaXM|y;$j3{V zx55-X&4!h^c4B)pEtu6d8AKme4wjioNCfSI)=bHMS#eg$(!QkTHMyCfMs+&ObR}WE zQTYwIY|eR3vaF7zk7sDw6nLCiW+e!$_#9h`cL`Nuc11x?P_n_Og{ zm0lIBxsS%nex-T0qpp}svq3?z12;z)cC9{sV-F-lN*#+?<-0jIcMvl2m!Sx>XeRH2p{OKUa?odg*!F6|G4F@m~oRqgkq*{ zAYY<=>h}vDQf(N{za``9d@IaLd8M5^RE0+Ejo7l-=NRdZ9!9Zbno%lAd8Q`!AbFF{ zx{Uk2wZ2|tmfLu3#VbI^Wz>}~W?9u`<>c*K?;hmJ%GuI3U1%S^O7{Me?!8`rvO*N4 z+v!yU(cc-Bx!ob+Z{D}E6sl0Ok}+9ca1P~jqsne)IroNv;qoh$8>#e-DrD91JuGkY zE%|Rz@LTdtUcA%pT1m}G7WrLb=$v+_=fai7^R91O78uvh2QTomo=1GYb?(0Gd%EYm zT)!A3!<>0F-jn;Ql0Rd;O2z-+_A~jLZ&!w1I7sVJBt5wH?3x}WSGcyyT8_{&!9%jm z(DS)6L-wpx36^(C=`%W{tj_nnQ(a{B;oS~I`;C~lh?oO-@b zz0d3k&#Dr0RKAr+rR$`n^No9yE$3P;w=kK+x%~8b6NT z%a_KN*_U=A67$~RJI4~&e(1@~RvPF* z^Dlz)A3l@PwJz>A=rs`1RVepSb$OJP<61uOVy#fjph6FjJD;!o<^HhS5Y5D|al-=l zXUiYE-Uw)qno6l86c`mYX&kiLiBYLb@&^v`}AmhrBku1~+CmevZaH1r<_n8EvU= zhz!r<`I-UMU@Cw|7Nt-#LM#3gd4Kx^Z--^nceb$msvmP3a);ca7L7VsyhP*^vem^3 zEOO{MHrS{*e#aC?%SS(q5s9{rF<_hE07zU<7k@JHq&~GajV^;dec+ziYhx$$P_$*2 zI8&;Nl#>j`($$I^2og}L7ys^yPCqMqYNs8&N7?Ir zs+bK8jA50bE~YN;?kTv)+gsW-4l?u?W)ItHRohOlPK{3=PY!SY*nT-BJ=K=(C@ z`Ymlo3Ody?X+MRi1Fht2Ff3K}sPGMzSsG&eUrHr+C%o_jL)V%De8TcR+Urc1ud);GEZjJb%?-32R}D%o&%E8_QR zkNOT3*X6`}UonHm=Z)ITyUaN&?o_w}n|(i`;^ya+A1ZT#UxV!-0uV)5Fa-BR$nN|B z!$A_b4cuAr9YNU}Ru}zeGY(0Q6gx)H4AH&6R7m5;?!cMKZO9?ZxpJlUW}!4I6DND? z?Z&9G!PRP~$&Tx;*Lxp`zWe+jPgUpvINT^w^M|orM~$i-FdOF=JHwzPi_`_^~cJsk{DX(NF&FKb^tv zQE_512<_CgO5sK0FRow2tK=@ETurGJbF^`7pDpvmd^YD$liJ`gpLE)MP_qg) zY?v*348fL|GI*~6kOTaYgGxkr00UcS%Py}{iA8RT!`7ZxNIavKWh7b?5!!aFAv z?1|24*LWttdZqf^wI35~-`QHElx-sHcL()se%k-BU#`)u)_H$dJ?#sv+ zmTtKFmTT;e&su2~+JLWq9!R*?Tlg!-%gpsQL3=)7Ez2Mc#+B^9N=#3+Fb! zoh3ZB9Scs*NpVZ56_GY>_HZ1OT&*sK4$b;t(6;Mb>9l%dmr|PxrkZwWAnIQ`4mnG9k__%a>^QawZN368U)pKLcW%+^vdrx zmjGn3&+uWdf~V23m&!SOTsmE^y0kX@wjT|B5i^1vneVTWk#Wrh-kwLgZoMrt=*jH$ z0rI&f0JlWxg@E&}&B4pZV%Oe&Y)J}9I#M|FDV^K55(KW5nL%1MeYOyyQ@B%&#W+;S z+hX>T{*vBfOVrAaKSahAh|!4#zdqjB6`DeMLmfb<&_YU<*R!u*U*6=JeMURc<3vr* z&(rKbNpSPbaxv3aDub-h$ld@TmmGSA1G^u@)p6N#R$!*K*SIbvMl3xnba!8NOUNl| zk*jJtQ2X_vo_R1iBEd%x1(uZ8_cz`;jF9S-3b(&%e=rzZ`_UwFARoNtVls5R9`xf> zf#rsnmnmJF-|35m{DYmoHB;JG@iB>(4i*@(K2e7E|EKPk2hi<%!rX4p*6U zU^C2lBlC~{YVu&2X9Ess(cA&?+}X`+$wIwC%po4{ZbAlI0DHV!-?t+D={0z+DRwD} zhJ3+)-7cCR-LxnRDMR?|D9SBoo_O3af~zB@XSwNAzLb$=jx z_KEGStnVubH;cnkhGk|uXz2r6kf|woq3YPAL+5L%1)AzUTC!&cRF=-dANQ#$6B0ad zT3;QtW0|TR{qp*7TKKH>4>j2YMZ3X=2NKP?xhKsmw~xum%9@xwajC4lFjh}#GFS4T z0&8pXlgKB1SkT5km_1s5fev3pIQL3|N4qZ!L*%+-A8ida?Vdg*yF)yuAv;ITPDVvM zBPSlpQ2dU3r@06b-$f${}E5ySuhvHvP zQ=@Yz|8-6-K)gqGUsqLAllZP{?O|){>gfRV(xA;)Bwo1S_Q=SSjEtF|bdYPlxVc9h zf5K7E(97`Y6M1W(i>Q?i@RhBopNku595O{edE%*yt(O(ApNq4rr@Wuijk71@iRYxt zVmElt9`SNgx?%YA8LukP!+37(DdE)lApaUi&DPV}!_m#l5$MWG z8rSL-(A!Jt#tqU$|9<_oPFp|6|C-6w^Y3jDHz-EBBPJm#F81%SiCq;*SLL5M`q?@g zsyVt4Wk#GsSyKG2q~ckJ|LfL&P5D<>eNS5tRiF#8rPoadn$^Nw*DWY_)E@bSBZjFzNjenZ>1?;{K`8QLewLtqneH$@tugWvsVJ~ z?=9j%dM6%tsl%HwPGn?Kqnc{>_58@U=d|MP8u<(E6)tJ>U3Y&jBYwr@Lj$7)*-a@5 zDRcHt?w1i$yyTLWj_s?I?dJ`7s}@!-(HP!Xq@q=!qoGvlil^;Gk11zd=$`QV4cHP-491zGS18ZI(Q<@bC>8ImI~|240o_>BQ^0 zn_UhxO5XA2|MDOMD;3?L3B&(uEYdV88WiWz3U`vfUHPBq{A)Rsxa9wBO5UOPH=+30 zkNHLaπwYgd8V{!d%{_wK!JTIN9WUOC?R-{ut`0$6G{Jr-uy#A{>^v( zb@C4Z`A-)74*~gyfc$4b`48W529y5}0WlSE^YOU{$-%b@6PA=iwvg2W{hlTMD5-HT z2{G&WdoaA$cxzpbz3p&JzKm1wU@tPrs3U2{D6KKN(-R+MvxP_;(;?BDU!vkW;I6Kq z>5p-Mg5TF?@LI zpC4DxxP+{Js1G4OM8JF6>~{+&pxC?7VrSZzupADBnOTT}6OMyvZ>y-SaOSktOd!_Z- zOzJFg#Nqwexx}rvndOi*@4GoGLcXJHxX~Py6`hm@?qElt?kqo43T?tUdKSl{+86QD z)5zwcnqU*9fK1|o^19jQo@OkEaaIx92?B3nA!DQcJ1ZVl|Pxu@ksH@d_ zS`JyuT5&2Sw)6@gNE=W<-;Lh2;p#pq=sis-oZ6gmOuL&wf`IEShM9&*AR8!nYCb8> z!Q6aykE;!vc&>h&Aj^*t%o8!$0x7b6D>wvo!q)aX{&;qD0R|6X4!Aho;_?2OD@S#> zwx%o?DT54!oTEEL>5G-|lNv(j2A1g7k?HUg$Kc?R*nmu{YBlEv6ThA9-~WjXU~(7P zR!ourmhJdugf*PjaQUTQ{@S%UEg(Vxx2mVV*O%n2K{DOnRLQtHyHRGI7p22BE`tdv z2fXv+XdY^^-}cUBg!tA>>it8&}A z<2v1X1>mX6Umwn|Htw5NcKXwIh=vh1HaU1q9i?POH<0G;8~A|ar}eGDIPX8kBp5g9 zhUQU*kNP5`fsPoTg6e|(z$yTDTkLePqpu6P2vYF8K*8mwAF6ApDdz(Qm8OIG zwI5Ht2a&hZO)tGJi_P3NbxFDU6%yiRlkjCkw@W#{!-EqInK64lV-ubAO z=HxFtQ-w=8_CB^Sj=_KP#h9F8cyRjbyT(DzxIctOg;8C!%M47nIHLZ{qHtHmhd==D zHWI#R6pJXVHOjGZ)RC5}c|}u~YeiDN%;?5qPm$vf>KmFW4@!gl2YY{*>evFMDn?I{ z+D9>9>umv0eXA4yQ$`Xt?LsF3E>fa_;zz0(EG#>^-xQeMqr32^5p2`1V+-bZQNb?| zr{$D7-^_eS$&)zU=N7m)?h;g|ODwv+!gSnn2A2HuE@>%VqQrW|0mW-&QlpfF@&Mp+ zu;MRJ1V-|PqQ-#FLQ(c}-NE-QnbeSpo^e~=MYdh|2yRpsl~{{+KCmxA44_SFl1(xI zCiAy*!6aMxWR|1E*SOmgZJ{yxu0GgFTt0Jl3RHwCw2oPOkP^8$WA!{k09ZyCR)ck} z44@SR#|gi5iJnNYF7X`+JLSwoQPsuSUmyxDhcC1$&pa-t>(QS1ObwSqi9h|)Jt@i6 zthj8^B$uUc3ZLZ18*a9hUQc-*+DaWeI5?oyGuO)!?YM?bVQCo@p$)x2+!q6{+Ybu+ zUTV~9zKI<_MUM?Uljyv3dVL+fTl&$9Hk(oeKEUMV^17?t7BK(Ly!4NCkSS%+a>^v$ z$5ip@8Im}Nl^7^v3JVj-J>I6}RAP{lIjQFUqiDh|)Qo^u7zw5}fAqa?47>CXLPSqhJUSQ8=#(6>&DwwE78~~WEYZC@M+m_!3ybgE1M*2 z4_e`w1_3)v@NPZu(v!#pqTBH@{L=CoNwjv~@IQ-UVj%EZY%*RWQoZ%dZ>`{riAnrD zRsoW3cw>l#v%C3M&y;qo;IaJ1!S;5+K?=}~r`n8N8d;z9$LIDhp9R^=wRLP;UMqvi zGM70ux&oRt1NYI+{`8_*ay)KPSe6tP?~xeNT4=oZ#dj;DXKJhV`$h>k{~Gdd$z@S4&Iu&Xjx0GKVnK$w|`mx*qTOhXvLJ{fuMj z1&VzE8+iT=MTxnjxO=4Ng&sG~PGUr`9>!v)E+^{v^Uc-L#pSMHH=01@ zV&a$Fc!Tl~f#$OwsvcU@bDNH|CDZG~wO!=YIkQ>|3VZvt{O`6WGO<-Zah!ZJp<`iE z3^z_P6)ffh%p$J1a>6Ut_9tFE};W@fCnaJ|f(cn*c*%TA= zU4>3~=OdTAbODlgYNc@RLRE8BU_?j5uE@ivr4C*q{i@530I@wfCEnoH61yCZ3g3MTWm{^jIaWxd)a2UQX8c$^#Okcw;2R<{kj;rJZMK2!k21vvyt ze`z)oQ~0(VQY;0*6v3;KMKMkXc+^RHCoazVeA=1|F_1JRcUOySA@&7(ugM((VuueU z2ag+49yWH(zz1X#g9>gD`gH!xfVnOR+XwNS?lfoh z)`J3W+HD#Uq4iaCHQvo7xM7GO``}Xm5%QbJ&h2eXGgeG^-E&AuBFnSIX)+>!u0zrE88DNRugML(TkZ`4&BUIu4Ug zyG@evFX2k5XNGTgfy>o9P@8gil{0aAPN`pth#aVNgxSQaEg>tG^4McC4tFkNCy~*L^ z@wA*U?2B^&VambdXA(OiR^}~WJwhbIU+AP780=Mtu1TS1E(g%X+iewAP%{_&=$vR9 z*N_I)Ykl8c%X(1(kecvCecDfyv!DN58}z}wwEOv7I$>$V*Crw|@CWP zw#6>N^*jCam1UWX<%mtPM%Q1eF~~13$EZa0g6fGu|1}T+7W5=drS^RYpvt`r(*kDR#H zbPTON0=SCBAEfP%>{`1V0VH4g8G&3tQppAaq@f&${zc}mkdXrn#f1(xp7?srj9!XP zmU{BdT07pYA%rLszGe8md=Ws(sM9yODWqu1E_g}gXAE{~VBG>apqVmxpjM9-rINl+ z(pVAICVr#(slw;6;~kpE8Z|`Hyk2-0^|k0ZY?3fAPiPc$;A;DT*Uh@(5>Nm#6_=|R4Osw)|oAcgp1e*O;jE+3qqw&byr31 zr!k*pGtJQo-+ ziJZ|*g{$vlXAid|6b-risjaKcjNO~YESbFXdr$SL4kY&FRyl(py*@cEzW&)Mo1dk- zTBO#L>L1znwv?BAJ7X9%?ZcrzuE^GV*8_tMK)xW6V)<*0m(Sa8fey{eG8!P`zFN4r z4B8_c#}0a?v`y<42+ZWyKIoPycnG*DJ>Z^6e5H8?DIQ$r+wdbo_|#_-aQ98et6uFc%)<+&GP0ys9-zt zd*6)nvNH@VO2FecUaaox7M-Gbf(^D-%U~6VU4@k`(DrIC!J!mUJ;J?uggPDiS|C>s zRnpfoIrGJ?MC*dZWW&yqzl1B3L}Ge0nFljR)R@Uj$(P5wH?W>wR#Xq3XdTs49Pr__ zYffCr6n&6^{S@o*-04Sm1vfdov*e9Ey?e9p8ajUzolABQxgX@tM5Id;(LnQK zb)tx#UhHf~T?uK1V36W7p{YmhYHHD{+M~-s{=NCzgFWy~pyy9?kpJ=v+|%F;5>w5; zHk03&HIOV*y!kgz<=n0K)4IhxKy=XPe>>mQJvMoi-Ux_o1d&O`WoXX9!L#NN5^qO+r8(WzDV z_cKJ-$fg@F^QtR&WzZ+$cR)_7*+)L_w!gB3iox+(kb=ORh_Z!Q$__iI-@lC+|0# zDIU>2AKoJ$YGyv)u2yfxm>|rk(LsF}xy!1ve>bH>T_fDb#|RQ|Lv-%h>YwkQoOxDK zKch8CwmC)h4L9m<{jwf-#FV}~=r}?>C2v7S@`$abGOqZL;4O~6lHC(hsRKwib}F_G z@-lB1bb#z2n8^IoNl|o!XzDt;kMEN_$HIvq^1q5chJ!w_r3aZd_A1?>^>h46 ziALK_qc-E#{Q|h+`a4S48o!+qZTZtC(O(;zf#&DQR|kPlQczjvscwLXC|=hJjFR}| zfr!R4cb!Gj!<%l6(E56a>!xXy-O;-HYobkaJ9){t#L|0o5&;uvJHrDTOu_AchuP!e z^!NdVzyxAU{Vskc(m5i8`<3~=2#Su-LmcMAEot*QqT z$;KL7pJZ1<6-4}mI2FK2jY>f`W_y*C)8742boy7WBiRm{50mf86Q%|jO1O>$mW(I; z?^w?p&+Ol$1|HaRoMit~?)j(O^G~_wpK{MX<(_}aJq+vrlzaXu_xw}t`KR3T>-t^` z4Ci`s+yGSyI)H*aPloA1cd^^{0G<%^C^*s0cIKYze2=tK56*VGGHD}{JLtFD*0@lF z2Dd+aFMXmlz;ABLT&5M7RMAKNS@?}#qv&xI7|J~NeZ+cw z0SBvyed?2f28|zPvtIsN@%tVbBgx?24e8NLV*aj$(%Da5dWd(nKNRy{`>M6=f5Rj= zy0*BXdIS7f_T3F%GcCg@Pj@$+;{rN!36AD-dssh1boe$kmq0GUOi68O-V%K`(bpz=dj>x zL7aOj^ayxJAF>CC?B)^M0(#C^$fR`lb{;NmSe+Ve5{AO)lc0k@B#QXE&SHU?qlsfw zQSnr5o9MR2W=K~7K;eD=bHrxaauC( z4g#YVHa5nJPi>E;JUHl=o>Z7xSbW>W24o;M<6C6sYg%2?Qq7wQ2Mdk_(7v;Z5G&!8 zq{!_p!JzRy zBD4ej<`1%E6!+RGI}X4E>>`zz-}k+RF#5}c4O=k|ZhRD!)%>W~;1WFm>*im9jy?N4 zNM!*{;SD(6?3se6B<_q02>xeQx{1V9Z z6T`*Gx`i1eCnP1QC(0s80dK!o?!RK&QPO22k{$ z#TEimtOJP0!F~g+LPZ zUM_vG!^vDbwuNaL z40b))iv%c|Pb%CJxR{9yD=(dK&34-1_r8awPK^l-tu^sjfQb$$<^frZ?1bO9Q+DY z+G{_Epif3a5Wxst*oVVxZ0VLu+np{H4-UUe?RC7x_%mybM=|)ToeSm61W!h}K{|SY z8X()cri5V2J}R}ZgT6+Ko_mE0KE%K*l-u3=06i^OgPo|Zw1eom7C3xI%g0_jF56%n z20H0(#zTDlR;9rH+x)9@p>K8&Tsf!LNPc!Qkyyfbnq*=~f}%7Yu|FEi<| zCv1QtCh%~Jd4E}`92Sp)@@!#>bU*=(r506HW4^_nx;sWCX`Vb=psjJf4toHGZ3|&0 zn&*wfwV<$bdmB2z-%6qT*X}r70vF>6WhNE3j)+xV(O!i~{Crss?CjeHWi(h2@fSV> zf|dL}@jo-Dkdc1gvu#HGM*rs@WfMb7a`Jq>M~sZlZ}{(6cF#-KW2^Gspf(WEd)rE5m5yAsok9E@UFhBG;#NP-HE(>&aF}luGrOd?y&`TjyK>dT?-=mz@ai(?QXwOQ5Y1bn&G2NCClK-Ct>y?UgL7Mr_^`o7R<;V&t)GWa+*fO%$ zU9bOMVj0cK6$N6a&(iN2eM#EMI2{WHl+q0+hf#9~znA8f-H9W>duMq7bLk&o1dKaR#~wBB+#p$uiXpZ~+{ z>Q6u8UraBa@5=7@bw-W!TuaW;V~tI|qC@Nd;7$)$0au*g^MLL@uo45YfTa(D4yZZb zI9PG`->Q*h4(;XvWuj3C9gcg>pYBEQrM|F|y?9@-|KRFav6lDd_5-TAi?_wZ!1r8D zwKDFN7Jy$8eo^<~-%YtkvET`RHMj`wJhs!8sul zht@{UUZ&6c@@fSl{e-SXe0V)xt{NVGqz-91J+1Zcke{v$5h=rMXb8^V@^zGfleQoL z8)29+Ry_12+d>2k!+I7iYM6T)ViSPom!@x`Taw31%?FT2=%|+AfFPH7)PDXPqNrRFmTnfU9M0y+Qel=X`ZODE3vzkB1 z@OyvKJ>dMK-jon67zo2JVtG2?i;Nkl>@^`kFqJ`q=|Nfvj(71g(9=tE?%iC&h3C@Y zEAJBD0}dOMw!~IiFxeR0gl{f|+Bh?=7L&-w$DGsEwtTooFUDY~19vY9iE{ei+lFvCt2Z4X6E>Z9!&o?W5PM3%MV!(yPC zZpQDr-!%xg)FGrHm6^zH1z3}xuj(kpg9+|gk-0C(Fw%Y+~x9F`Qx*HM?TwinXM z%JW&zv~{`dD!2oqtQo{&WSsto$JtxLbTakv2R*W)uS-i=i#)u^SL8^yj-Z==2zx$j zs`N@W;GrWl5c0P|$>s;R#4X8USS4;0=}X2gFPYik6EZS1vYNS7=bFKfxljnrQ|Z^6 zUd5?O-0SXsD;f%#3pufbRNwvI;ymbH%3@AqB(!29!Pg>}z96RHk5KZ-bnUwro2dK- z$I*wE!%)*s6}(`^>qG}7Jz=UEhd&*@SO)MgB&nB*39I`=Ip%P}@Tu zZBXb-F^emYWPB?V!0oaYSpZ2jB^_Lrz79xbXjn#3$KcS_`DRG4xKE~#@)_1$-E*tqy|s$JEtD2y*P zF_ZIh&lsygMGL*1e$S>1wv0g4{DKV^mi;)yx$NM)lLSR;!T|b?yh&~N@<-X%QrBFj zp48ehK_!|(Kb}gJsAJg%M79iuR{v-@_NLor#?@$cd!a9+n_bp;0F{0teyBtg{!?x?^E2K4@8ENRhK^JAQv^Lv`O`&->%(}BZi*rQAYQ*;@oUcp6GZdo*c(== zC=Q-;;!msI1=B}!02vbZX+0l{s;#EFDyK*7clQILJ~j+>OPPO>vEWwkg%Bzd6AYnZ z#=T=z#rh>L?^<`!y#1UmvIdmDc5A{EceQ;l;< z;#9b4;yYTKUS2;1E;jW*jUYg%VtR-n@=~!*F+iWtF-X&&DL1+z^!}&V5G>LV)Rz6O zNZ68GH+n0O?XhS$LGXdK@wZ`iTaG;~ugj9#E(6KJ8UJ?H5h~$~m0|dc zs&;IDhl(qVhIOy6WqI`6ZY`2cNT_@pE9#!-m8y%n=UG(gFL+w=r%?pH`p2uuTb0W8 z_u5l4Mt(zH&#B2rqjx^Nxph4E&7n15@8H-mcDqNs?DS_ZygI6ijpHGox!lR-l~Ozd zUx!6t+gg_`t-~Avl5?cBTBWoQNqJ|X*%zTL)^>PB+Id}h!X&iy zUo@oh;JUci`ZE&y>hZUht0j8yz)f}4lToY-_S^^@?tY~L{|bNGQ!fi(`8loQVDJ?; z!1kQ?uvNH~nu^48z3Mc(Cjk-jev6FLjaFsW6OxCyweNlldv4bZD&KXPzRQvK!0iIE z?Q}I`w&N{#4UHVhb}Z7y}D$Y^7r8AZ=Wl{k%agD5Ya+j8F1D}3!5 zP8@{#$7n8enJ4!tcBn|XG#!m^os{?9yh*<`$vMjR;0rW2c)l6e@xT^A88B1?Z48I^ zqG_9|SQ~!gllci>zT8OvjaCSUbJPvKzWM`04aZol4Pb^2H|)T9>zzk3>7PH=@5@}f z!~8Epnxi3-IG6NSCiyGg%bbt8!N2ayX48VhXxU{83CHmD7`RAwX_^-}EQ&Epf)%b)7oZ`a>R zU41ST*@6F^AUKnO`uIpWtoSSKX|JASq(}#zWlZ#|rzlRE%WeI)>*WoN z^C^tf?TXM&ER$-an5d-nrB$%<(^YUZ_a-Xx{+>7XxuL;<{XAiR?K)Ng!Ecu$F-EzAfd9QJP=oisQ@RbhO!P0kwp*zAS z+@S8N7-YA7J7IsaUl@i8tJcZ@<)E{b0!PM*be}?^e|iqVqC}U(Vz!kBmCQI7i8L0GzO06vJRAKZQHFN7O}1rPEWf?(n}D(-uD zw7Jz^VVY>sl{-WQ6Y8z)<<2DdE8yJTIZV@9wiZ*D&cczijrx{B{{@;w7GYYH>>Ku7 z@7kcx7Cw@Bt%hC)YbRk}WZys4p06H02%OsSh(vuJw11enyY*&RY&^_NQgekC*txMv zx9Jv+x#3pv61oV)>HK+kcpYV0gaT*s0|Pv#s1k9OLeE)&Alq6-$rwQ30csL70wDUj?=5BT(aT;Fs4lnjGc z10RfvGRLJh*2=fnkS6%=K}L5}h{G4 z5VK~leCbbO)>n22AC4>3OWKSe^wmKo3a8c1M!SXtgK(UP7M>rQbHVw5gUJw7Rk2{U z0^xsr?ul1#S;I)1fCUFA7$EUChbP*{IU=dIxUeT&a>fp(V%y)4Q?4&Y@-Va7v~4n% zy?_7d%CC~SH|)N0Q5V^fh!l@6oc+D$X{o&~iE`L4?y^0$6m+tI+~No%zrKc~dHZS5 z{$hXWE`x>A?Keg@3z&Zgy#%P(%g#Qm{iXIJ-Y+Ly)|6tB^2Z)aD_PtT^vNhRrUa75 z0|*GMFEt^j`h-V}sp#Ke|9wa#a7>dAG^?ZjCM|U;mdjl+?<#8g(xtTu5_j2WTfrtC zYM6n@5wQvk-(rkq7R7pAeyZxm!orWw>#N$Spyt!J19c4Tl|Lf!il9%j9~NaV90(Hl z_+p@0ITyPz!+k7{+l4c5d;1^gu*JtsvP^c+1c9fYD}YU;qGgL4~=8#gtFgzRb)`*B$A<(zr*0M1_O3y~>== zY7vsGp)BTpp~URV+0n{oZqt~v>rL@otJF>a8Hif~nk?*8^`hH;2p+=iNMI z<)+TjU@De%&lOSRYp z1IIaE3-M69*xg`T7dJSmldZ-ukjB6e4d#(-izdpSq&dnJ2ly!cs z-Fp2-Jn(+fhJ`I0fLiuluaLlbFC8`P1pV@zZ}rz}5GBl-CCgPn@7u@K!aCj+1~+qQ zu^gd}W^!%4t!GYsJ8sIJ1H?Bs{phbE5xJ^m28mU zpH+C;XoSsMQ!mRdkQw8b<38A%Yi{kCO>;dwQsC;?H(Wx_Uj(srjHF|GKPVW-1I1Kh%UgmjsmQ%BkECOJ043lM zy5DSH-&~nVgSUjjw^ZoTi&I~FWyDgCTnp1E3gXi6-`8ot0zy#N&}-H*8}b^v{ixKO z+Yjit6;mTFbFs(u?LE_jm}eKI&oZMGY;OMfWskF^ZY#cnD=EbS{89R7jJ4_&u!8%X zzDf92jL|I^5ONwpBvWb}Mxd+>o1338fR(`T$u(mJu>)5C#v~aF2*u;mjtuBN&Sski zfsR7zH!EVGR$%d-zraoOu)-t{YjjQodgjWE-nYBA#pcO;YyB>Q>K&D(3Ar%;hW=!$ z4)CPWD>C8KjTeEuz#rGK^({*5l9=50-~Pg4(bCif&HZ|d%a3e+dd#O#^HAfPwI1~_9ilxu zWWS=Who4f^Io!E*D@O-_OO*H7X1!@Ie;t`6e%)3|DLKZtHXknxGp^HzSjP5)dO`s! zZ>AnJuMozC@`!=$;q)RxIlB0H-ueD)C4~}rMlnIgbvmN_^ax-G@#8Re4{O%X$Lqhu zBU%;H&6#lFip$5{TCB)${B9d+Uir>4ZsbU2o4B1CqH1)Cjasy)4=aR8YJ-Kq-Q$6J zmiK9GxXeGd9Zz+^92&%@|E;vEB&FRx5&kP_o{DTgxv;!ZDmC~yf;wuflR-utG8}HH(f@fzf4}ijy7SP&2fq(%QHcc{H`2TF zuakc*(dmVwO(!=eWVpy8i3DiykUpKP!5dTKF`$Qb{N92tz2Zr|s*a4vW$B75kQ$U# zeYNw7XlZbbg)R=@O{}!jcquk5dX!=E;W@j$)cZ;Shws;pJIK3VHK(Omv`0Py6)=~b zx?Q3VX>$B#95hJBL!a{H7Nm0(%$^92U(&m|uj5L)_>i+3A*sn`tf?(A2sAUa>oX~X zzR%_yk6R6v?@x-9H&uTUu$k#OkXvQl?eS!_r7$QMGrVaj(tdYkhG0O*SuI07_(;M3 z;#Kv7lL7;LV?I=IGG#U72l9`Cw=Ww#4!Z^@Sj!I7_LIb2>5;i%rt=Z3dWw~z!x=(E zOnk4ljue2B5&<*x!?3A47V&pr!Y`fh7m>sX^ChgiY?>YNgYUqytB$*ODn>0r-skxX zja#UM)4nUMy#C4e6o~Ddees)A+c!%k+g~Twox%(EiwnKEnSMEX{(T+C^I0A|Nc}uFWsY*CSswPo+2tu0OkfZeW4B+I*$qLRR>i zJ8V$5j<*wx1T{R1TPElTB=ciubW#IfMZBSGG?nwCX6d%Vf7rMUM*X5B&n$;~u*UlD zKQn7RpR)4tQz<*Yfg8&KOYu#X)gQKFP@n#b>^{94wZ{eU_!peNy%)wkJv>hvX4jra zUJevEurcgov#$;k+?!xuFvVO0QOjz8kE1e*x-USlGQMOcc=dw%HSVLe96)1UOe(qtauVyVsAVI_%zeTgAp-cNSixdL zmZno{P?N1Z!R~rgrV#|2I7B4Q4ueWHDj+Mc#oM5cV+CHdRR&ZM3-Q07{#5p~q@ICQ zZ(1~@?;MobR|Xk4WPH8$Dn0eH8Eua>Mp4k|aH7j%f~rsIU#06}I&y{e3dvh#ynnGC zp3udP$kFytO1Mkviuyii`$@ZZ(__5u`WCZQg$&%pu;I2+t86ljaW0Gh9IL z=p+iwq(3@7#oYi)MY?AW^vSY*is3>~SzUD|GM;IPQUW>kD70r9{H346;rHQit?kZZ z*7B>`JC~i|-V;{Tr9nVI1*N2=LnMU(kx~f>>5@jIQ(~k` zy1TnWNf|<F(>A9f*XweG1gVt7yMI4oE}{)*ar^d%Y)!B$+wG39twLG!Kpn)qA3Wpo zeKdbcQvPRIBiBVpiM!Ao&;^;UHU6)DsfoF~nhe&P|9q}h{QD0cylZtkv|8;7stpoK zqqVI(wy>y|;Vtwx&}4Fq zF5bRbDvIQl4F1v2J@hjP8@2erQX^Li>9E2ItdWCEw0w(Yob#8w+GB z*$7hl^s1e%@ND0xaFZAIBj(|7_930>xEHBz$17=RrE#c%+q`)>vQ>;N+?)PU=dwgI zas|~T*=P%+BgEqBOkUaZi%{fh_P2RA zdAVbggQ@ycogrsIw*6n_uvO>z(0H)|Cw=`#ZMmiWB>^8-Y3VAvdx+Af?XUFE&yeN1 zH}&2ZoDCMM5R<+sp<1M(3ovcu1InB@|3Vi=4JW+gG)?h@1RfVS(lVIH$dD#)7+8LMu&0YdUzq zI~2OM-5)>a{gVhUnT%a-f;rvkWMg_bwKH&5x%u(M#dCS>IQDvn`#=Ei0jT>c$cOGibIH7f)+;a zdozEw+-USUjP(pcBeoj2zI#4vMEazUnUTY&Br@j!|JM#C_1e+klOuaS1 zqGhwY%fls+3~F!`C34vIrlKw!pJ&3C8eAwm*Yn@rmk-nXn)Go6{F8)2I>6`~Bb&*h zNIm(}ydio7|9#(9=VTIz5|p7lKWQ&M5f8(AyXN6X9y)^&c8Uq{7Rrw4#lA{^P8n2G zv0;in`dH6Ccs($A(ND@#I?EkX=_~h2%GfI78A9S7rY|b{g{nxq6so+oNz>xLn11O= zit>jO{dAK{`6>vo5^DDpyz`BGtK@%qR2x)i*rDEOr$djAWNr6FF5t!Y26;B4SzqicGnK6fYP2d66VP*C>Dsi;JK zPPUT<-wBKv9G6W|AUFgL==SDO+Ui!zF!a_+R_)mbqYrD(5EPK``&9Qi&u2?wRR*sj zFW(|w2>RyxPFb}LXHI#Oy;*;FXg&6Wx99=png%rfnrLFnhq=;@c*5cEGXQO}DDbXs zPdj(tc>2&@qr>DTT~G@d$;!EX0W=E(Xk=sW7?H}-gNAo{y;{Pk&F#(cI{M^yfL$Li z?)g0WXl)w0^2ymU=p^l+lbzUbhuO#?yCP<=-6RzN2B4WN;I-0|l?;iVba zI1!$Rj0Apkm?>}WHb%@PxwTyO_k)}k20rgAhsPpTF}APvEEL;tL`6NP*5*(NT5rHd z>RHvja%|$@r@Rq5wyMVrAR)mBQ3H{UG5W$a-nu#02PR(XAK;7#=@-Qc*hb9dqtQ=_ zb&y}6$J>%@5`C9s``{w5rWrU~81*Uw`Wt%dS9$6?yw;6_sQ^+*g;^u8kKnWw=HFUB&A#d*LHW*!PhjF;i10GyS!fuB?;WFB>7dx3<>C|$0RK582eIhCX zWw^hgH*j^iS6)4#$d-BkyM#=M6xhd$d;$9Aj1puJD~Xt_L{sNLxS)C5>EFsqb{!8# zJdnf(yuu=w`{&au{J)Z{2u-W3Vq`2S@h?!rL$@TeTBhV+7#F%!?z9jO=oN+$3d900&3?pPc1)$S8Sxos!G*Rav)M^xXqUD^*O`C=ycbwM; zX3$eL?Shx_?yAokUswN0`q>J&bUW^l8w0_zg~>NbT)A7rY5iCG)bVpDYG1U#bGw=M z9ur}py`cC%vAA$_Pruejf@;Hw)Tc({(1#U}F>1TI$-bJCx5OwUXT!&Op}43(msR>H z-UlAm`#zdMDA4l?(%~By-Jhr?idE}P5-kh@UVq! z=bmKaE;U%kw;D8d-&^t~c`kOP&tbdpPI$OR;XI@6Buti5NYV^GK+R*f%ju}yVcLkB zo|e(y>qs#7hxgY@=;4>wb$UZ@JZV;a=6hmc?*gYuD}LYq*bTj=KkGUpP0B4h9KAk* z6qwUXZr_iRsP-n0m%+h#ZnasKsL}fV(`GG3-cJbs0?wze4@}{}5hYh=G?9Ne!(Je% zb;^c>cR(d~>2FyE$`~&-IlBC5-iC#yv_v1)eN=Bb>@4kEx<5Zg$?w1RSj6boR-vS( zSmlVc^D(=%w|*Mj-;u4af~6lJocu_{NL!4giH~iIY6v`Ut=q>CyIBHW*jJh0L|+9w ze3IHlROfy)9#EtdfkM1ZF*IQ3H41U~4ZoPOpZjX}2o7*n?)dg~3%iiO+T3(Ab2x&0lH7 zlSXdfqdi+JjqD+NM+<>=CO0&pzHHxtaRZ$>i^{tY&CN9&kPw)Yyb)p*%(O))z50pi z4gZpKQa$$o(KUS4eCh8eX0&&nS&kI^S#EvPs?I&+1cO;$8Vai_8%Bx<*`095b_N$N zmTc76n&(MDe(ZYwj#z(%9HpTtF8=oNjZou2vTYW0bIRCEeJYu@bZ~FG(yTkX_4OPZ z&b@uGa>QHofGPNReyN8de3~ zk~MHWXjD2!a0XZ*Ms2^J%@`$CC)+ekM?QX@M=zk=J}%_dGH6mfhU0VW5Fu=#;7Ib0 zT;oR-7fQGIAz@42MPtP^rO#>V9UpuEz%*_6%n=Iec3noC< zgXFsLw%{N}Mhlh_V&Jo#<7-*PmF(LFud$2zD(%mOIFSARtx0XKy9HwAB2E3MnkPjpEiQVy;doe6$(R&JzK>pV*c`BlZ{7w7hQgIUJn; zOUO_saUXBve2B`>p?ECh^1H(?H&pNl&kFBInPfrGrWefQY?^z@h_Ez}ml12_St+lrL-l!$lrRaQ^TVrZDI zv?39j?aL)8n|f);!B=p}9+;9#JF-|`Bqd7ZM9uvvQ57v9<|Tq-ckH#Ou>VNzNcG$G zgg=-9IGBwQ$xF-w)h;v9Hf|WdNXA8zJX;9a7d;dbb}f#LuW^wlv=1I2QHlxdB74)!L+zyaC<@0s2LL>NsKc4d z$_e$~gH#gV8cxXr88#Qpg&6+wruOU*5_OugShxrl9W_ zM=~Mm*47=H+~3Jg4V`y=QL!520^PD_Domk(at9|yx5E_CtV(HDR;TrM{V)#y+3vHZ z>8g0E5F*v4jcd^Oib3>AZvcodukW2CmPEt3pE^F# ze}|t>k~RNc2PuP>Ej5_T6Ia$10in+$mAv{wBDRymSDL1NwRCHibu@^nIg|Be_t4RFK3rL+qdUf z$~6eBevL4QbrRkkL+1vl0Cd?*qiWhcTC^nMSVjyJ<2bWX_Po(wxB~Zggp@ibn^GO^ znSj;LyRFa>yS#(SURJA+AO!B$1arDm#4dsz*lIzMZ=q*mBzuhS2R5dVz zBZmgp$iI05*h2^Sl!E31_lZvl?^_nO{K)f@GZ-ye;1+XcPO3VnnY@kb(ZU+v=0W{> z1%{W+NBFGXi7GwskAsnrny4aWSCzmqQS|)>{Q(|4(t8cpIPbgZGkbvrYq*o3=khg= zcN;+a?b&YzEh1%}v(wt{6ez~473r9rzr1BiH55UW;=sqH#k9ot05;H6)QSJ7h)r$i z{`*Hn%uYpj^^i6cNI!_r1jt;t|EfZ4sf_sod)tePVcO?UESgrBRvVbmunlAUKVZz&0%0e#*{Jj))1PM0=6 z=B@13yu9A8o+Mz)h;ith7M^Q%woH3wsjP<8h9ue?QNt@MxmxEs@CZb4m-==_@qAL5_#9Wl?>!cO}8T5UnMv zLeAEDr%ShgfVlbIL|6c8CL>Wk_Se@D?nu>yIh%pE$ZsdJD%1i>?TWN>jQ=^R@8Uoo z3Rz}oB>YFx7~&sH*E0E>Zzjj4yR`=J6zN2BmgGn~6Q88%8JTsNNUnTdv;0#llKeoa zrnrbfIX+T2(5!OgjmgN@iJoSgf)^GYxFM`9*!|m#KCD2+O#Wj6cpvsR^Vv$}kbIU& zI?DRO3*HZ1O(3_qdMuwu^|@(wvwI=8B)O}x-fyfdYloz0z-DQe`kEfhJ^dQSW7vkT zQo7il)w@RuSRsi0DfSJO>&tH{*LYGEc;jW7oPf3wD}JRs1JR?)czy2viNkp3E`^rJ z@!BZFmwp69MwA#Nww^06Waj9>b!s&3m^`v2;{jJ+;$vY!#NUb|i(Prhw`-s|g+9o} z9X}vtb&g#{I`tHOH#)l4R+_P7^}>YuTxAR0K3scIT*6esFRP2Q4>7-@)__RI9|l8S z^nJaNM=Jy%vLEq>N_v6wo@}hXdV*UTEcl*WKno>I>J{+#V8OO{95V){hTu(D(qG}p zazfGHI5mvmP}uML-$#d=ctbAkmW2B9*VfNN>o(9s-LsPiq&n?>u%a0L1vVbwdmchZ zc+j@g;NGvEQ6mzU`n<-c|9$yhGXbP;V(=ItaPw}yrdL0}!D^t&+ZB{I5vs{)s{p|98_5{dS5s3-d$a4(qR9mbW4DOW%q4Ek`R$yL3?B zODm9W$F06(;cFtV@YQx7H*S}!b-uj~aGmJ@*=~u4a?}rfydHNi zeZR^hAdG(iIg-j4+|M2YdGb6M0n((q{c56NYg%w2-vf+DcT)R*-72 zQf^4dn82V#R)b>l%*5;|2X0jt!lbTgL>Avl7t?Xv;(&&-Aoq>$fNjCP7f|n z<~nf>x#ups{$799J)__2HGxJ};pUwd<|O2vp8M6l5r)~J?zK<5dHq&hvEI?5c4yfHdRA?7sM4RLbauEe^F!00farkft6FA@tJSc|N2#>(E&Pp* z7Dp0OQW`wj#)K&oerZ$}WivC&Ac0YV)Nxstz^c4-*px>MnOnKhD!v0T{;kA=xHwwW zg(BLlLV&-Bf&Q%eE8Q8}4zwEmHK42I^OwhkyRy*?3Mn&$C=plgth`v zyjRguQBQtAcwHNwObaIH$QH zR1m{p0=-dFc%pZjabL~-e43b$F@_Fmd_0Jrjjlz_l$HB$er!4EP2QgZNGR_MBsB^)#_zTLG zH0%~ z-f^QL|8Z^``-O9PQ_k=D>6f_(juLb#8DjV7lC2-P7x+kcy)1s=gS2AI4YAH04vK?O z?igNhi&3F}`G6X=&iPfu{B{=;V>Av2p!G-JEtuzmHbp4J^uy*LRw#u(RxaPDz^~eg zPpmI9WiZmWrQA{nY&zSb4;vXzqDgS$e9DJt2w$)~+s|CaXxx_YNo4kbN_k+}`^<9{ zJu+))oI@4hngJkvq4S6++x_-;|2;UCzXx~6YW^P->4LpCQ~BoOY-M6*N8bdum0Njy z@V(6kJE+`>7Ewi#7}xH%q%2EGg$HWKE&AGwoIb3ZV|0a1!N<4g$sD?J?$0tT<_zJ{ zKOkb6ZWgHYU7k1fz6(ACO}JO0!#IG_xgw%RmO@#}x(LW=jkZOy2sv=x=|!C~t#5ud zB>Y3wHPe@Blb;!sPF!kO;{^#Wg5)~n-I85BY%Ik?`L1lNvZuH%QcW+yR=G-{;1+OS z48v3b0m-dlbAdaVVy=13&fBc}>HBD`Bd^%B?^g6hrWG9wpaB1QR`N5Bf}z@&hwfsa z>3q4dzH+$#_8onnKDJFo5fc=4C(-CO+nRpu{ zH7EO*K4Wm~Pb)RxJBN9Pnb5DU#mA{{5!gQ&$c~GWE8-gD|0(hK=S4IBqUEdgI6K5S zd+OOdx_4eO0@dQi%8)XK;xkyJr@GL7sh=^Phb8tz|JToBk~mDwt!}^O$aI-dB$%kr z`6m(lzY$qN?6l_}7))yIy1C`!mj zIbS!L-c}h^PKn3r>5IT&b24_Oz=KCAj%>vwG9~mTt_yvr^u?J5fM?5V%ITmpz))8D zXmzp?^3!5>h3YQVEUzI6J)O{dYQlPpDqW_S^!SKa#b5MaRTwR;-|26?gL~1@KI=R< z?eU?HTgx-Y)iIt`O#>=|9GP>22fNiTZZdRNm)KObkx|RZFX~dje)exN5TN%1G>*xk zR1vCkq`VJ(i_2Jk!{u#>Tyum9YxeDOm=sz>HiU`?51TG~-Bi}ZfkiQ7-O5oinv*wl z+Bd9Akh01lZ&OnZO&^@`3dEz6GKCJ1leHm9W4`>oa9g);s0PFVWhw|_B(~POK28oF zOD`DJ_<=aGg!AaZxV5^?#YI`h?=cixXFp*5X_a+19$ko~3I5i(^vZF!+o(o4AB8qK z?mt^6C2CF^8iV}w-R^D|{u~DPAb6Aa9UbS#RH8o1R33@sE=Mne0ykSXAbvtg)2O%E zX!FIZn-#C4afT2Y^6=%S|7J_ThmXU|AuP8W4*uypVt2d_zft;}Piqwl)$2qLmdXWe zdH))r&i4LILeKerek9I-h0@JSHO`-w%>kcRgzOcKIM|B!HhWKENTkT{HoM8HQ{dp= zKuap>@YYSew+a<#X zPyqTVF9oCNzn-;~Y-f7k1BgorxN!&g6vPbW%Hd@x1r3WUG72Sb@EH=!n|0kp_t0!* z3nkUq#fjlcMTFcoTc@~v%onowg~G6gvAA0B_&lUIPlK#Dm~eLe^Ld_PJULiKxXY#hxas(JuAc<$MMc5i=}J5C)2MRXm6!fn%%IV# z%y51SNy20e(JsZz9Vbq?2WMZm!I{e#;AS?1*4E8ID1ja z+8kaOb84#`iDr!E7X47{AdZP-L;%~1Tld7q+WC08K^37BWjUxL%8}fVKO$N-W4fDm z-Fxb4m~3n1kO{$SAoOZ-(h$Q|khkQxa1u&cSYCv}^2$&K9r;w33jn={9tz5x@dn(s zo}nEbp0_c6(cYNq*hV1(giT!<%*N6lCed=UWj%xiY9xp&i_@LtH8=&aJbe)S9N9r< z8S2e2@3KycU*KECMSG227UqJA!!KKzBJV2x|IF0tHj^BTzYsg)2*;1V%g|m2@F$b%n3?!3E6b8RaDU!a5!<4t$ne};^21W zD}5wY^N)e`W5mA?SXdU|_ai>(@6ncG>*YxVtnwK&*9l{qS_ZO!_LLI+9OKlymcC^Q z)`-Y!*wN}jFd4qh(4%{@(JX7iK!1EWt#Yhg9v(qj$K_z|2W9^N}$wCrS24`T|{vTZrMStboy1SdU%!1_|26o`0R6OcJ}Kx zX=tZ~?=v9N3FuBCWO>{ENk8bIrWT9@3t`(O zf4gfeP)0k&j*$ipM|$`!x-}mCniqBGUR9VOY=RK~x>*jAiNlur7U{vm2=$1bxrjim zfcOI^kC+r9+<7oqrDZs3PljkkUv`n}niKR){Zmh>%eoLMIL2)4iu(J=f2yw^Z4gG; zd%96+`1e@L2<~(T6X#RgH8Hp~*4^s~GN@c$P>YuVcixa9wrZdiE;l-%-6SQTg3-=Z z#*l#Fr@bLPd>$PUFJP<`^8S>18FdV7yHRokG)Yg*O#kucOD2B1*;FBi=HMjM{-cE^ z+uK7abIe!oE<3PD#{gwA@0}bqpam0Rf3QC*R(PoUr~cM0aWrNZfc*VmKAF5CxbH>6 zPOW(!;~{*mh*1nO2gJU3VV++*@?;}QfV5vY&Q|Oac*-K=hgBc4TD|*Mt1|?4s#?RD zN#skz_?Zo z4ZvNNB7G6#2m5q*MoIhtNx^Fc8E7md1mo=iOgeftN?)d;6KG{@ij0Z1@Gz zVCW*%wf=rc5os{0&gUT;3@UmEfaoEp5x7;EHvQ^@*UVf>gB~|SO5aX|>ksNV3jf8Z z_^{oW@3i5it-af(^^||#_^%P_P!8&7J!*dw;tqOIM@5vJ-!#6|_IPUTN;;w2w~2jE zn9cR(ZS=l@q+x=oFm8mnn$9igALc!phWoAaTs%k;?M?Q3O{L4{y} z=d7LbFVucJ)5&WiZ^dS=j{3;lB%Fh0o(6ZFRU?4Enl%Mv+k4?Zn0Pw(|m zvR39$8hOVmiyt;9e71-=Txcw|-x|fMAW&WePmJ2zD*2dMPQ1UY=YUZZ$N)T`;wtj#yt?bhS=Hp9<*`N(b-MnND>be0 zkHYNoskiZTP>dE_0_5#xdogag9P4X^@?kVY_9C>litN5h(eae0Y*(v=(~B`jb84t_ zXrkpOdncXq?Pk;O4!pn97$8sNi*S7Od;>}_BIpj^OrDHak?bFa!NMDBdgy4aJC;X_ zAAoo7S|^~nl8&g7s=tr9=Z%dH2YG`N&h^^Pr$H|B=E>tmC>W)kf)!duXIlhGJIFZa zh*5{iz3n+QwPS#0D76#_Ybq~$+h@C5{0$eRTrft`;=o^8#?x-xbd!R{s#^b~WoQqZ zzJwQjA2n=WNh5eAgnI8>x_AgwGmM34%L~N4!CnlX&i^V5s*z+OdnNfhnT^n`Z%I4@ zHQ(~^^cG6G9%h!lYpnMG-fJhS(UZED>XM#fw2FN}!%Chnj2Kg*AAQq4*;INO6_=O7 z>rM5=dO7pH+tgRDP&3z5)mmmJ&-W1HpT>k)e`zcT$2Snj_v}UG4V31asBPrb8n(0; zER<=tvT?`?Y1_Czv6E&!-lFHS z>ny)H#*i6!DJ2PbLa)hu2ooaK2|rS*8KGW5esmf=EKAA?z16Nd-C(V*+iX)vLa|~c zf2~{0BR^S)MNd+k>ifcPG(_R^Re?ktbe^3NA=JX@^Q5w10%R9#l;#>5KKG z2O*rYWg|VL?S?$vZc|}8YUQe(cZ&C2k#yQn*jsLO7f`|dDb)_#`TPf*?|cioF;Ku0A&Y+g zpICIwI))pd4f!!```2XtCjC3zzCHK*;ldt@5&E3hD3eI5@Lc>X$KXQpwdWE;`{w~} zdswT!cGG&3A)ynINf_408n5uWc}2H&2h}*4mst;DG$pl|vsGLh^Rq9uAIA`H=2BI4 zBtE6adkO)6@vb>jX9duGEicYk9@Tszi`BK13gz+_@QJO9HMnbmyy|lk^5pV z4PO#zgM)Z&whh!jn)#s?uF~h%tM^L&+v8xuun;EIMzMd6S1|tXXQnWMJ!5`ap*!zH z;W$%>osUnebE0?#JWs+8<>#vsmKBI^u-v?Zjp&{5D8(BmODpDru!1Rf$psRfQc;RBA!8)Uy-r_E9-&-044;aw39Aa7zNC z#IB!ycHT=jX~4*jHJaG3Z(KmO?=hpr{%o{e+licKx^p>>P9B3Q?j3qCg5ULUMw2bR z)MNhZ`sAOhR}DiTlbPuTaXW1vI(gBN%tf}uyo07a@nW%Z9BP4DHi-B^*MW5y{~>IW7A?PmM}oxAM(G=#=AX!4&72%q5`B$&vsGW?>|On@ zMA}_LkfleTM*9M~zT|KmN56cuX5xyvLlL5Vbn z7ui5w$DO;6@=RS`SZW`0%pc~0-_6}(&NzlG>i}xP3%)J`T!XO8 zbI`9Y+Q%hsCz=|)AM34Bcj*rZzxdTEnt+qQ*N-a=u3{Y&dYvd`Rj~f@=<^n7KsLn> z#t^fA@NGL-^1)20jL-F1t;(5~>QJ=+w(^Tr>@*SUIA!+csS>0d3x2i_T*Boz*0NRb zE5U);N-626b&B<$*6Vk^8(lTwOk2kH$3>J0w;11U!)P?+AJKv(Abh7dZR3{uiPS-+ zE9K9vI^Q}Y(`TxS@a!bwfn_vn5urCF*=V1OdCMt44ffE*4OV_}@+wWl@xz=z=&!JQ zvG`d&@MRZZJ<89GAi<{awQM_hwGo;^!ejqYgAH75S0}b;2RWd$J$5i(Z=LOP_2&Mg zKg!0)%%9i$2L2u_*9V;1?eZB+f-dZtm@da&9}*lK<_(B?662gu5W20`vcCQmxJOdO zVqTVh*6-=GS{^#P8z#}jcBFKGjlo2!p~n=&kj=L<(4%quT98%1QR6WXP1*(M`C&q! z1XKR#$oMyj0AT+&jGi~WW?lGqML5hn9z6htP+yB0;fxH`{6e)0{c7Y@Y_UYh*P zHX~MVPX)UE`{mC;N>&NqDE=T3zu%rSifWw5Uqm+OABlRe!YEpBUT zQwWd6+~w!k#?i(_#uYY{Gey8ku8H5&n8sBPYk1=OBR_WaBGqccA5E=})$7en8Ao99 zHMnJ@|ANcmvT2E9y^zyVpi4nNLLG+9PcwQt+P7ZR#on)5HptHMq#Rr$3#p-~1eu^B zPV`=DA)i*FEa~VJvVHh;L_`C`z+OBad!@xdWAkG}{n-L|JyLk5$*`CGfL)i+%4VuU z33){KMoGKf!BDOCTwWAok7ch4!!1uKsi#=*XL>9$y`xRxax{Y2PRCU_?>{|0{x}2O z`Xu^VP#)p3gBdsPg2UFI#qut6C}#=YCTmewmIyNR-QDD43}7FJV#9-nr%^_#^)a|h zNSej%?~IqU*5JW7VEqj4MDbCv2Oo<7$}Aoz$kZ1W`Ts>l%3cE8Rym;}XYb!eiA(Bt zG$}H+irym=N}5Pq>mV*mxeNriO6JLaunSig{YfFfN`kZ@*DeY-;tTMfS@bBxDhb{y zS$x*Dee$(H(XVr#YUs)4ZgA+A+-a*B)3jNcbQ*1#}%(^;zyAz|LhC!}k9H9K4{R(TzPC*XO3?mbox!DvR zi6sxSZ^0{x2EV3yE}i0c1*d*(p}C%&f=pf&{~1R}BNg1u`4i*Q<>`F{BQW9bWZu&s z09KyGs*-hZJaK{RY%+^@Sb1pyM?c&lXLtl>Vwl@9i9Vq=I z>gI5pLSRJ;Io;S^Sld1rQSG0BGqk0hVP6YeSa zaYnuYjk>XH)Azy3zb3l3cA?U&+wi9V_}i2Z<-aIOSYB&v(F9o`3cE3n>}4@3-q2&u zt}63LwY3l;5!Q`EiaVN?Xn={3NFn6@P9+%9Qqv!3d$kC@z7PF-h3EY`wuaL;ot-e5 z%rRpwZDf}oqi9}@4x7!?A=gUIRJXVJ%&^>YPlrXJxCiNLC;g$VnU0soQY@QKG_8E~ z2;aEEZP^Dp_8Z$CmDx*n%-(dKK!DTD=Pj*A6^q;=n}mD<#uRVCtQqiS^S-*RUB-_v zxr6z337?Z5N{xKE$R`Wkq$=Ix7omDMkFiG4U{+dB@(qdpVPAYnHs*BdR#57Ztx)OdEb{^F7*8e z;M~_vCOktBiNNOSudA`w+rEg063-F34*S?5YI0Jr;WJ1P2)U~QCd{kz174Ok^LieN zf^lUFl3RgdbPm{qm#cGH9s?1ejaX&4Ogor%B|AJq^TN@@o0^<{BLy=uF`RcE47 zo1{`!jAwy%yTRuPpq6Yw84`|-&NNKcP!tyjF^o_&-oGP9x3P4Jqv<$e-uY72l#6D*WMrz^T8ugLMNYsYkMZGzq4W(g>6{$dtd-bwwGs^#&X*dNrmTrad)6wYs`VlcA zsr3H@KfjL%Ufxx2Awe^2@` z3UE`JkbSVX&YE}jQem8az8tHOX2Ct060(fhZM$WaV&`fE@g^*p`-$=NlPz*Nk`r|J zUbe=Qs7O~UK|L%Nz(n=!e^ZvF%+?oH4?cs)M+;x4Ha%_p8`Lv(J?6MA!H--lL&B|5Z*`wrmg(M`qo*R< zzC)F|<7&&BCMb#^RvtC|&r+g}kuZ$(v>PKsl8f2wMn&0E6_RY+v~-!XzJrjIfk$v7 z9)azm(996N{pPl-LZpud^P>S?X<5;+3^aBdp`ok2Sui zqtdfzGT}Xj8cwdaq;K$!hHunT;00gbe9VBJOTwG#yFzPQbND z3-5Su{kRJhS#aN_PSVjsN$iV>yPM_QCnp~GH(#HPgbML~wCcP3EkjZ?3cAss@e%`_ z_Wd-O+wL&k?UDrNApFyA7cG`y6oriFqKRkGCEfw8E{)uV9=R3CN1a^I(R`TZ&pYhm zjCR?z<+rw*%VscjbrkeD0L26AnZ=JYm_ofx`KJn+6>)_E`(20vZI%8f@Pi5Fq1^%& zIR2fX^F3_tXh2o|DojfLMn5Seym>?>o?i&e-qQP(Z$0Aqpp5(=YsGVO?)79yHQW1XS zTtQN*QZjwVc=u<}xBMp=B)!M}#P(lnz2$^|K^Mc1!VU_Fa;RTrG=hk!|GciZ1i!{? zr!07EjhU2zuBe{ri;OSL#rl{uO|YotvB^4e2;cgXm`lFVT`IZrJ>jJL-Z478x9Mn4 z?lb(&T?V9Xf2U%I@T?h%E!N=ohs}i%)D`TflN8lq zjObVGjWBW1dvf<@p%CUqp4Cr0J2C1&|j);g@rY(r8TQLywg)OchA?NEYDK_@cg%{AS!2=n}D}LV8 zf)A1J?n`LF&qil~WnI}q@JFA|kGP;PUD-s9&-*kaK$~u3WP8DnW(Tbx4ZC@Mc?W zKEClnRBlg-T%Q0uNEmY2YzHLczS+?jHo85Cr1jzSIT_dMqMO>rKlj%LKpbk!VnRgl zXTM|1Q}eT(-ct7?PAr{YH7{>P;Y2@|UFG3|u3U~Aav7P$oQDjB=-9y=a8eQ}_wOM> zN;23Q@D1h$c+svO-%rlPHjYzfx3OR5;V5cWy{*DBz}l=62k>X>t)Wy_qhBA+cE(*a zk5;}aJ;CWz_{o$K^pc}uF^0z&x%?5GHPjuZ;Lj<8Hm2lY!hE^0xn6rqq|fBCxrd4_ zWM7>;v-^5cb3?D5wK}_gDGX)kYF>dBKfBX2=|D1FI{W4P(s0k?)?gzc#*;A`z3?1* zn#2o^WODFU&qccX40{?3OXBg(Ur|e${AZObaCwF?_gzTF^3i{w&ugQ}Y`GT#jCTU{ zoNf{uFH@NyH4KL{=WjWz#{6fO6n=w59eV!|*_S)sBQkMRun5Uv3ZfJga&S0~uDlm` z!b;p!!>`p96go<9HB`#yz>&fG*i{!P6$NzoF86(kzPXwx-zmR@-fLSoT2R z$N75n)&yBUOCPfF)8u;+g7oJfAF_+IDkdHsQDG;A25`L${Ed$io!UHB-(gO&_wf~{ znwUJH7u^sy>56s@L4?Y%Y(%#|`?x@KcAc`?U~wMp^v3)gBC+sC`e?*E`E7gdT;4V3 z2ZH#FiYIVYtrnWk0Q}P#sgVsONwON{g(Vwy@k`2^Z;W2wjGPKO4#{vCc@tM3pzx4A zGGa>Ub)Wdz@RvSQFK&B*x0y@cYvvM|b2zzoR@r9fLe3uxntcD%2+`Hwh4rGkLyo@Y z&-M~q-VDK9&)LG2^3NB(SN02&WtX6m%+Y+P`K$EMoS^@lY8@?L#f)Ci86W=bsB*~h z@1L;Y0jt3E;fEdC{Np%0lTJLp?ur;!*UJmrjIH7iu$Sa}aX9Tn_lThwA1zf1S0?k$($C{EZl;@Cj-MY++vwTRyjI!pm; zCLqtD`rh=(b%62tG$b$bqt$dC zY)gQ%dGi^QGvFaISe<9Ah`T!bS@=Fqh&sJ!rx6p%YVb zRFyngdkgz$w5*h$%Aj^0dw7BOBEUtm&|~`{XOUW8eq8B-_jlmR*^*g_UQJE@83q7BkYzhX)TbSs56dy_`qPF z(XjjTaOJ!Bz3-wadxwrsHfJgZt!bjCs~u{$70$25V$l7m>1Ny7S57C^JS63gil))| z;qbNZ>d(LGSJ44~W3xG`ch5bguDC;d)bxM>vz^z`sE-fHxo|~+JcH^qSajlP09&l> z4{9067ITyy+yq#7;fZ&bjs5U)$80p7F4G(=PWihS6p&;h$>!?Ji3~pP7M>kE8|_oo zN<+GLON~uDe$m)N{d<{2Jo92MeY;TW z_v8D`x4G_XuK#kbbI$$0&-tC-4_*Uplm0`?PWxQZPRiL7ePfSLv_B8CxsFsWFj)PO zuYJ4dqFUbs%O}^0aEzY_fV= zNjBJo(=;hoM8}^Q``Zmu13bi0j|r@y6}@!WBjPJOMEGLPt1=hFS+&BKw(gPUQhFz5 zj=ym&k2Q~62x5)CIb@3!Lf;VM=$)dFpOQyWt*nS9A>qxYr(;DznY`>3(hy$n$Y&i5 ztZ^zdFK$|AWFxGN1qPBtR?(a;V{1T)h>3#zm`Kmt%mm|@1L+C<5&=TEZ0 z)mig4_q0vy0Eoo}Tcoq!_$5%8V|U3-X196S8?Uj41FZ=69JKFj1$oH9l_&43->2{6YwF_82?+6l`PQFLnH#QpCng7geMBn)6H3;WC)i~vDYs)! ze$n9+O^dS?&-yaVpE{$5P{B99WYXwjcPnBQb`x9+23;Bh)FW}zU}wEmOD1~(VV*90 zF=qRT{q`>^oxJ?d03G%xmRBQ)CP}Uy2dm+p7u2qQyZr#s7-!>M7Aam>!#QqM@)RDl z<8cguGv4;zNNhksg$%obrC;yk)G76%`OH|-o^Q`pov%274@(D%SKlP11J#!9sQT8E zVpcmvCQP2M$m|~6?0D>h1|^(hw?l#Py+GK%SC5X0I^^?gWuzaD;ND+K zwCFF^9-6v7Vm_XtH7EMEcX9IulLre22B%;+4=y)(lEAzp-NTQ~*lz9LY*Tz7h}$~^ zOk_WU3(YyW=Hia>{I>iF{x^01V?o}yc24WVPs3l8+u4-Gm2tf-)VMa5_+>vEj)?dnv8Ka^%=PIm8M(Pp`fUqs+hIkUOa7%3ZtxGb6 zw=eApzVht~cY}q{AWUsYP5n4ODmZy&K;UOa)^w0zH#5G!Q3sy|YBFs(kn7ejqO2|!-dR;O% zwb*#Z!6cW$%Lv#Dl^h9&t$f!gy+*|@wCym9W2X>wO#zb<6if_HUkFIoCC)VTkFij% zo04?Lp25*NUG_!MaHgh0+B+pE^z%KX5%p_K>HcC((EBY}uUYKI>40;QcEb-_({vDn zQ!)ph^3u1tC2W>M*xm6Ebd`Rd_Zhc|aT&-_}EvfO}bX_&{%dbd816pfJm*6g^u zxutqSHuUdUi4K3BbdacvT3`O14_Ar_Q#bohDtSPyOF$OKh2l1}19;`BmG+SBL=aa+ zWN{C`^lt{}guOnXr}kU-M1QXk$ANKA24bBkt*uf0%p`ypU+xWnu>#4Fs~30hOsne> z_9@dEA5hre$Fc)>iI(lrF^dNpkgec#uoYtuv}t zN%=ru%s3)8B`#fOYNZoszuEv=9TZASj{zUnSFr^NzqMb$obKZt#Q1|sF-fP)I);5@ zDKzk6;8sX^FP2M3=|KkbuNp$~f^8MgyRX3*DQos+!bOS(bfq63bPoPigu5jed#<$T z9QX+=2;Q(UluoVa(kWE8QxmSXB8NV1Z{G@z-M;ojJ# zCA8@)xu1BZxTYk5g+eyZI28T}2ms);wlwauIQniG#fRhHe2_GA=u&28?`443@bGvU zLSJXoQql9N_!4bK$A zCM0E#-rA$>TB~;nY-A{+QWDBdK|y1B0QM|nW#Eh36Zpou4ER5LI`2^h&%6m0af6wz z&j**5(`baUlpnaz;jpRfDj9+<{<7J|Xt+`>3Mdt8JC0I5V3VmX{HZBQB`*_R0x5Qn z2Y{MRdw!9+UKQD|0u%$|A2<#E+N?Vz2W$ORxl970z+FN-55}*5+i!S)bLlJj_)c*V zn?7Y|3`uj>9# zVE2cWl#GOpl=8xFVCuPn8RO+qbxMN>NpEACF@F+fe=KD_xZe%Kt%kz%OyP0kJw^5M zk5;<4xo(wi-BWfATzq$TekBw4hPTX|!qRIslfeSjtm!RhEvYn07nO65P8}e_R_I=f zrAmB5-!G{EcQ6r7q66a*GIL$cfty*Z9KuTDr-2Y>0_f-zi?_5)Ex=Nk_le6}&PM9G zVN!KmYm<2?vqQYP(0X!BPuN$9IC>cEhB<#OiClSupth17<=dqQ!NQ;ExHUXoKK5iTUuBH5Pnb_jCl}Sv6CXg`kyIWBP(VWPAs;OoY6$(n$>ckYe?4`2B5e{P?$tR$rvU9@kV{)s=Ka zCt@gr>eZD&Io;lQy{_R~O~9wls(|;!9MqALkHSpkO7-kO!OASDkXD{KRO1UCLc`7j zoRnt`3=Dk8*hn8jjv1bTdG59gu+4_(5becfDzV_%vKA;qdz>a;!?6lsey;EWe<}C8 z-}WK|iH#kaopI3tBf*g*V7eI~3`zzdz^}1jr$nE*;{N5nJS%*;3*<>U>7SQaIYYl& zInZNKh5uj!hvWxVuhb!W7SvPOlL4G`*@drlL@xbf+IaRtQ9Y0{*gZbJ$pW3lyQ1F9 z!kP~P0fEsUf{}nP{?{MJZI0*R3RF)EL?!>%f7Y+}ziqQ_zyIzy>;AVMZvqRf$H#hn ztnVM|`^Rq!tmlLEe6XGmen-Q9`oemA{HIO+KmNiIKFf#r6esQd6VM0WaORZl$A|oR^BzN=1Z8EaG zlVoJ%Z};s1pImTZyg)`q)oCRqr6MOK#jN58H@C7eBO|*R5EZjmCGsnsZyP@C@jqnb z$uu4`$uyGkljO&fAEy6vUh=}(M{n+E(P`Oh969>&{e#SNR5_I4h0JtW7p}}O+*MUg z2rBDSafdDJE64n(6q;`yEHzSU6(4UEzdW%w(R2BH#4Yvx%nET_A^TR0Yiiiv-NgFt zwN=?=khriRd`!vB?ZO@YJiNv*iu|r~m6iC^GzyQ;eRJ-}j|KAZ8^Mv3!dn-E2l7@K zjLx-Cewh8VB6GlzH3>m(s=*P>Tzgnn*d=bStgw6V;s#%A`aYvzL9%**Gs-WiBrn&l zWFfpce?0#2I4@h9`Iu=ic8{|6DP`ZqqrqwBW@cgU51KkP8&XKJn>H)qBHOyEHk6|= z6N|UU#SD9&hhuLDN8B>}h<6vn>%He+W;1zi5){pLDDcEk9;Q6@DdRZ{ho$Cs`|0-A z&eUDy=DpzY041wC31j7z96lI*qU2A8jsMZ<=z`OPHoP zQmMA3=}Gi-=Rf+T_)i7tefwMKYntP}#Rb=g%P|<08os1I+h%ludgVnh19Pr8*M#zF zV98Q`sg>%rrGEEF^nscf!) zjT2+wsru|}Xuq*r>c~I44jlV%M&Hr@fGq05hPeV&@|&&#eY(BV{AYu>ehl0)`#R7+ zKpwm{vvJvtp06k9$!3F`!Id7VnRV@iXNpr1Tx7a|;IEZ*39 zs&bwpKsF*={YLQE*`TKyH#u&Hs6AK1VCB6r^vBmor*OreyT=?Q9;Vt3K;mwUa zRYi{qh6Pmx%-0+qsX98joACkd$B+-K)kkmdKd`UnANfyqpHx2yf3m#}`!4?UraJq} zqXti|RlAvpI2?80c3{X0=u?_uTRZyr=J^P=hQP6FR4j`(OOs7H=sLVRI64m9hW%vG z@yk*!V&5B1Q*W{)@5t)Tgg%engFfm0{LKx%*Cnc-E>Osmn~9szr-V3$bSR%pI6(S`O(c9%?8a*&E6Vs-oD_}5;Q1OU-i8gGnM!<@MV&2aR2qf zQ-w75_+8@4GDNh*)x0V<&#sD2C8SL%eZO^7N#}BkLt_XBlR$xBh2TG@SWhXQDi)Nm zelLI(ye!ZppeMj&^}X<7p}7@zdw-6L(sWeO+oJL-M&&m!;cH%u>UFo-uUkA-deut% z>a2nIkfcGt9Np06vfP!f)r!@QRn8TqsY8>OSH)MQH)%K1R*c?tX`j|)%M5gh7PL$> zJ#shP;$3HA)1AKd=1cwdDaMiCSy=4EF~W%ERA?@QEsX5Dw({1=OK^owtBzq;6Y z(fVSp&TGw9&2yQynnP;))eO=C(|VL<^HO!3vX-+MvOI0OY_HpD_80V3#G8w88|!{8 zXthGwyt27$>-tsVom$?3B0Z~K^YxzLj(E6{sZW#N+>vkZnqPN+4@rtjYIi6fHDFKe zJebz4#HeKS${;SKHKcVHPc4rekDh@?-`g4cHQhBztp}N5)BL@oISGStLykk#rPw*U z+4%3sHZ0mX|L3Ql<*DoLCp;F!?t9dW>BP&$!#o-*4pz7s78)v7gj7%tmsch@T)g@- z4jb1iQ6zCYAxVM>F@%WOD8hW-qQJ^;m2GKkC1EqMOnXo4N!}x{``{iCN**ek!{>JU z?~bBewRb5+*onJd>SJ}2TljiJ_vYQ3F7=TrO)6HF0|%XD7J@@ApJ^3%!4d!U*dF7- zcOk}MpISamJjU|}UFAY(wq&Ipy?-=_MTF(W)8%vP(#Wu-09~e4=6*2|-ZGX)MYz6~ksB@atXXmRrC9y?IQ&P|akm4UyLVkbY+Sma6DTY1 z-ngtD))kVm%gWFy$}0B;^3A<3u`7uyj6a$9Kk={K4Zd4ze#Pu*e#HCG0FCOCBl%xF zz8=UgXVr`_39vy9Y$-19O*z{!Bz&_qF0Myk!{%yPghl+ifgv1e;jM_PqvSh4&8u#B z!O$(`!}~jmAM-!?eoCPGPQOU+a`B3Gg^9!bvDS!J3QtoS6hxy>a&zhSREgQ6hiSWL zPjcnt6d33rm%=piGW!FagQDqIz34X#r1DJi3Q8;rF;k4F(JFc`RAH@gOSB+Lem853 zysZM4LRHpyZsp3up~D)m;4B55-ojyb?AoE~LnDV=8!H;uqPb$|qt|WY^72X_XsF&^ ztV?x|<>BqJYY=w$E;qbxN z_^6QbAu6?CArI@n!`<>yB^e}cTVnU_yGM`hfy+3TROo-H-lPuJwq8z=L)~RZ-SMZYIqaLl18O`s8FHb z=#`TAP%mHZ=*Jf&my26lMFw1W#s>YD$tT*J9YvH47E0{5-VD^3#`MML@`xJQAxE6Y zCpFSCWy|BrU*?wTTUX9`RAzg6tT!+i3Mrvh%H45v>pkNGm*K9c0Hjkfd!FolE&FwJ zWwR%IZUV*R{ch{*cjE?@6xpaSmovS%dyCeQ%{uqr^yaQNM%J?*-rVrw^=U%SD)@X+ z+w5$#*le3h7=Xbfj$E?G*38gjd0BZM8`0vW&`f$pu~iaFT@u&v!4bp7C{O^cC?$q@JO+4_8UX(__;{5s@QzU~@q~{6pBnWMqb4>-LAM6bYMr41E z@FHtByd=>S7Wn#{Z)E3%A?r7vQD%r%yjqA4%2;5R>;?v>{PHIq!>)P`W8mcmnrX|K zD=Lz4g6n-`yU9Q>gm3SiOx#7dCVvb5Ms`h2N=^>^Rx@=pGqZEDggeW&u;qXc_S@go zaUvt5V}*XmEN--Vl)@wnOAL+g-+0&t<9e9~?r6cwCoC+?dx@WypPw6igWJj7&e_>T-;4aSM=gvVJ5&!)@rd7;1X^6^~aC9Vw~ z6@xw%QL%C}v(dg`Weacy_K@Je#4jNB>w&-iMB4KAqZ&?Tj#6-2@T9W@3G06z-u}n` z`QWcTbx3;(@L$=!<+i{4c~XoQ0(~1Qe&hMqrvT9sRARhDqDfG(9X`7V=y=-dhO#>N z4Y=&rzi#k<5QLFIzroMdQ1Nqtp5V8x+>LAMZsapV?$6Z6UkNQR={*dj2n-DIz4PHh zZMv#Hvy}M_hk#>A0cS@)ijOkL>DO03y}&nm;WCo5_PLvWz#?suWi4y^H^*SAz;aa; zI@sy{&d05*B80FKKeiGOHM)If%;Et zF{@CJlY8wC=f3uDVF1hQUP12qmoWa>CUG|f#pW?)y$AmeAJBLAO?vJ9SNI|vS#m$v zlU1^4H{pYSwdj#{4E28`!ZEO?g0Ic~e@&;~+b}1tGyX@!vK~3o9z*u^*nh%@q~yHl zf1>$rd1mHBUh?;>{|O(yzQeBniAejNNlHo@QQTGdPxt_gHvg~iNd*7DjP`%B-hUa5 z#H0VOj7H0H$z`gmWU-dPbg0aQ&u*}!&ooS&(;R-~`d`c)?gezyny2O7ZE`lYn~q!e zxae$wT9!_J>bJakB&POqo7m?^e{-|sJ)l4`to*N&PJN{BxZiRc490`p7w0WmWA(ZqxBU7 z9EmpX>gu|YYgjq*Vb|e{Vnq^0ePu4wBTG%ZVc$PLt~=v7lV_Oi;^N|u4x-}@{7fx+ zamlq)^zW_qCF@8^Cj6L74Kl{ssw&(Ij>CubHf|w)Fw{vIbXT%F*>}BZUo-A8SCpSVaup` zn1~fWD*CHyeWs&1<#YG?%pOxSj+Bgs2AvW2sGiQPI&)ZJ;|?U}#$xyp6dsEjeQ2Ws zP7h&m)ilc0#mLoc?Fu&sY{7E@tJ}e+h2MYo!UmTD)>nDw%IA?+r_z(AP__AO5_)cN zUZ3~xq&x1afOCIL<#<6lHy!1YYxM+^DgNiE1q`DT%pf$^nRd!>!sns)kLDNt>k2VE zAwU;g33lH}oP&TIUc9f^LgHjy)kGk-(z%9OT3XIxRtryM&jc8Mev~aFW>Wp61T$CO zE2@S6F#*H7$GP79Nw2fT6}cCTDU3#~3MyLL<%?Zl0D4Zvcn)Ne_0D|z4mSggu%ym7K4MIlSK z@Lpx$Vd1gx=OQjs9urIOrDfNCtF)_|lS!j5t#hmd=5}xx{j&D&o6PUwTRldklBJnn z57p$d+2J!{SIwKBf3e)FrSBje=e71Vq*ZhWBql}xbr991=y{gF%*^c+6xk=+@woM> zFqSZ}_o`8|1I1yFnY^z)qGs&3Y~IP&j5^1hnEOnre=muF=#E*o_(~X!^di>^H$F4r z+SB`Iiw1ZM-M;%ze$BT?;$z=|wbdU0^>(bx^6)m2sw5Wf+O&vwXIn%0;L|fRt<_h( zF|+hxFq|7u6#}Q6c5urjdnk9W=*%Z>lIU9SPSSes3fd9K2g7Jx7=EzPXR|Y*)}P*R zqtsN1gg_!D>)!*RZW>Y(CBdY5HSpuJSma)S7mBHMwIxw7ezXKG65necTIp@`&c1kAvDpj?`?b+T4s z3dQD@!5j?Tpgi*ZF%ys2eF>|&y-Wyu<5r70l*e@TVWsbmO zP+>SMrVg0Kfei}mlHB!MML)ouD7GQ;-@As3M6b1q%=_wkUl$OdYg~If1zFd zxq+5eZ~+dtN#ay zD%@-_>I`JbPh|Q$_0h*c&VhD;r7AYVZLx;_Kv)O->i3;YJUM%F#7BRrhm|PH;kW&wsU{&7sbj)#)YZexy}y6* zS5;GsEE$!e7CwK*WAgQ*#krw!SzIVunaOK&CBSDh%_lH`hTVJ9IjI*dI)4~AgpZbc zjeNY~Jf<=Z%~uj7gmj_816_YcCbeC8X5MDcWW6jTEscXjD${q z-rC%cMx`WNvDL*6d_rj%y0dPs%`&_abv2ydvOqOI=VRG(_{^Za$J)%-yy=|sD;4WI ziFhecT;mbNJS6daMpbYFsAukt?Bd}vuoxY2qmza zucSYf0Ihj%ceh~aGQFYOS>#ewZk7-)z7Op=SDLm^kJ~5+#0Igo* z(reJW;4Nhc)IR4P>eJdAZDfq1_k&+twvfTiAxK)31e*ZlHMUa<5>ImE=2J<9-79F( zZ(+hep39U^0i}A{ee5aUoe#dB<~#5|gsK8uQC`Ky!>Y23w2^Fzf!sxXZ@%k08D(kb zy|M50!WGXfS9$sj*cR7Z+soKB7#1tLgEVzR(xhZVJP0|(*JG9ezPLcHoF{yywa2|N z*p1P!u;O74uGCvEjsd4^Q$KhsX>vK?qy%DPB2>s_3A>JhT+4Iu-kU!^b@|{Xd_oiO zTN8OLR|W)DRY!NwAo6TLgPbh%C#sZvccl_pDK>$^xU__C6`qI#>caJNt&#h9l-peC zgvrtE#%i>y%H z6;EtylEBR5L-EU=vCoHLVt^ew7KV33Hk4tR3$=#uwR zpFRQE(7^5}nZLMF1&ZVDO}-Z=S+G7bKl%NW%EIH%N`9usQi(EMvC$S=X_sP1yu(R6 zC-K5Bn*K<8aYY6fX@U9z$&}qIz|An%cj?yfH{ANR7iQ`9>gSIO@kGc87tEbt|69e% zizEPa4HItz3pd{iTllbxGSNDxluZeSU5T!9l-_~c7(i|eMRrk*q+rw^O0w==$<)ja z9Io`NgCbI3dGqdkeDDc%Z!%#Upah+ptOXp(;o>m-J8tW*fKG(@t~&&3&m==R`A z{`20zA&`yXG3dd>3GDzvBP#$Qrox4U)MRz$5OORCAKxe^NaWdn|0Ht{T{krGd6QKk z`grXY3WLZtD8C;9ygxqR zD4*KP=&W)YSXckLZ)wl8bpBpi<09~xM}7>5kZ{sQbJJQ9|5JDQ`_|HUQldUK zM)|P2;$Jc#$y^ASu&YSMn6z>uOSr!;QjA}dWdDhdz&k7Ob;adnD-t`>vV#MZME4&d z9)NHmDGx-P!|ES?5<4VI1AdEEN`FY?H^N0?2NXf{sA5PF#Jd1!v)wBWq|Jz@+zFRr#;e6KhAgaZ%p4w0?f;xII&G`IMslTf8kZg_4hHB z4glW|wTqUd{%=NsKtf|y z#0+8A#4NxY@0awpMckVJ8$YK50!fTWSep}yVorJ$Y%?(2z-@r@$p3d&Szr3#t@Z2=();5ij2#nXdGzqmtWVefeQ}j5v zXpn^B1rREWA{VN^`SWHe5Q_0bx3{?>x4~8l+;-xm$N#Xt-@T&%a)q?nNCLpVl9vH| zE&uTU%|yN*_5f&vjx;-y)`{W)nr?-9fQdj0(mFdu5g_x`f5MEE`69r2*AP$}P1jF+ zn#?a609c%rmihOWkE8=~^(t(T#Kcz;h}nD)5nrT>L>Iv3*`rf$NwUt*1*l{*eO!q| zra#+;I|2r6WJnex!IuI=Dn@tEgY-ColHE{ZCR%BLod~&~IY3YBezQ}=Ply+Ohd_EB z&iwo*X&p`|EAf-D2u9kBKomzP$Yq5akp98}kdaov*Z8;m)mH#&FLM_Vw<4$D1^~i! zl(kq$Xdr;90ys1`W?>sQ(^6eDLW)6=I+XTleksTWdD6L6I1kkTkW{^a$6-xB_>$+L zyfHN(e|;b~$h)3-+h#<7&L*z263BUkS;dS3$|9q1YsC&AZBq?uC``cb8oKL;u=E*w zDqHJXg&Ep-?6dLMa-e9I&8U--fLG#zyZ5QT1XiX)TU7jqabw>zyusjDE}B;nx08eY1QOZ)e#7 z+p%say$Zv8Z5ydrZVduyC%Z8X1j*ec3Zdi`pbEau#J(lja)Dz|C2lnMa_bXi33T$8 zzC8))4gM3c#X)4?y@J!Ju+L|>$#MnAU#|I@?Ma?&229jLt+jhkl}{_Ev{@P z%ab6HrsOG`d~-s`PXCK=z(Yo-vxuq|mQA~(Up(`l%rUZv6yvvkW;$KNVlvMSH(=R) zDY?!K*JJe}Jgbh@fpqy9wKSy=kW3HRC^ zrjcNrbx+Ws;$q%F^ij|8xjT51i6?8Z3nl0pE*3*Wy2Zi0N1J*DtkIitP(H`1GquK- zeAkpRdYdXDU@y@GAdhxHmw_mT>3jSja^_SlHU5AL;!8HYoL+BfmCM?wb#Ao@TKwoQ zdqVGY7Hc{$Q!%hrKIJ;-PzRL@SEjl%g)^U%d=r8&OETvJv(Q|m^qolUZXWxeDKWLB z6Y&FxM+T0rjZ9xh=fmE?acNf&<`U@%IK%4C3OL-rYD>u{yYg8&K*PJv(>>WI^A-Pg z<(R`2A@X2S?-9u3$4Os;T^leRtAF~`r`ju0R&j1|6*YL)q;2rka*9}Vc4�adj+{ zpmldHK8Z8laAb)K*U5%J^{IyAt9Wt&bBEnvEbpU|)y3n)|OZh(YBY*F89 zwd%!E*}CGD(V197^!(+XaonztETy79m+>zh;y`i0p@B6CjnQ=f+ytvS!A{=A8DM~M$#f4OXs#l=c$7j>` zZiFU;na&L9Cc#ckqHf~U1Cc6a-hf?sO=A>S5WHai$5Hn6fXVPT z_DJMNjH7D@{)#KR7foFi&|S4DF^2us7eAA5KwRfN2;tf^tJH~%3Xp*USW{E|L)C2Y_T>KxmTGcK@oxHOkAsNtC|jWtM5oYv&DP2Fi^aH` zomKTKA0FM=b>whlsYV4 zura4ROS9EiFX%8x&}k%OlKEu;pF6*U!-Xgu>M4>q zp+4X_Y>rjJ0a;RgmfYxM;@x2*u0C>E1)x5&IP5v^-t6@zoFFu$b$43s15f$PF%YRM z4r4wJNS`jSh1QeMrCDThI@iNWFKX8K>?&x+h-5#plkOr19JJ?U@(m&fO&xcec;#yH znDPDvB%06YNneraqOi`1R~wquaaF9E9w6IU3#CxmLK%o+PPzyOt#)2?Wyvzc_1+#Y z0V(tm(2W8Ps5#40I~O4qpYoJ_IOO^Jt9c`5r5=GQvDnkAcH6K<09=v|;>F2`Q`>oQ zB#1W@>NRnn_q-pl(Hhz?s&h(b!=btx&Zar|r=U7gLRf-?JD^EH+Rx&(!abrSrOW$) zz4@SG`&cDYo>#S2IeVCe)m>YIYIHt}qfbO=ib9M`Z;nCaqgQ`N5X|8o0N5$LWJw~h z_h{>3JwU8~HViOzbD0!pN7UtCar`;>WL=A06FZaNRtr*;kvqfM4-BEzeE24j72j&! z={0Z-0fi57cf7^y=`TrYOKv?fb{h##&|Jqdy+Pi~bYtOr!01j%;AE)YyGETsjMWnF zQG&Vy?E!k8Tf=zwUFXW2Z=PxEug80ej&zR)Xeh^v35SZW^;J9UBxfBUeo^*(+(k^# zs}xdzHlrZWN5Jfkhha^s&1WG{`Q25kmm4szt~!{+xs3$q3`BVlNk@chA5=e`F!MMu?orSE5Gp!% zB8YB$j#F7gY(zq?)}26?ZO>v7>&*w8zQ|h+Ypq8IT;(CWgy3oT z;L;}Dhs72op)A_}9?R5X3m@F3GSVW}eN5){1FOs+qT%K%o~&!KYKjA|_k@6@%FYaG z1tPYzN~tN5FQn_Inj}-hT|5|3fzMrka^k(>l3TP)Pmi?zB~cRt$1k8w%g!cW(SJtB zJywQ@*)CSSh$Cg4t0w(-y4(L07k%s!i#uR6U1V;wMBkS@7mL|ejay%~Qjo1y9 z)nMkU5_V?Mox~9!{R!kXyU!1a+=vrpb{A7%=|lyU%)hD*gerHCZ?-eEJBg$J0eTtG zdlNX1t;4buWl|u z56;HhCPZ1Y;7paOUk=WG~doQ(#(54%))pKopLl1AE#|KP-Hs_TCYO3=G%8{!GIcq#^*9#5p+`G2qo0I zk~)7H5A$N#w23ze%0xZUF5%C^`E1qs1cSP>!2jB+e0Zyo`39u2^PH{bc$Sk*d%0PZ zCSM7>oLh!xJt@KY1ol8aRb56}ysmTTH3B~nKm$kmn;iH2=Dp@UpB~N#I+KKvo+;7! z!jW;h;{LVj2Vut)GBYv)fM;1ZI!oh!kic&w4!kP|fK9Q>h#~^ZNonZuGo{Zu7f`p- zl_#zZHicA-9a-)W*sL=<8c9c&J^2rf(i0>6X79i7c4&dR)f7olA{^fDE!_zTgxjsy zE7>I<`3jF>?sGxT4p*;?J|D~k{jxf+e+-xZkt?_2*si_QxBj>8j^?*NT|2ZWpT~nk z%dkOVlY!8hi4^0No?;e59m9eMs8PxR<+V=x$~2q+Ajy91;6mws3zwn}cznhqz2qbH zD;zEz^RlRBBFbqcW1VFVt?-4aAH&Q3{vKoCmoV-Y-Us7nFR~p zDkVBY-P{YHVsn9~YHKO==-Sk=#q9W_wra;as!Gp|q!yp+Ij%fW?mB-4xEvusT?kex zUnY^A$Ow0D0{Xu;=OEJG_d^|%&uP#B1qa!aOsil&JsE^W-4kjKw?S;y;Cu_J=v{-o zL{$b52vM2f6JJ(v%_P`Kq43}7cc@Bt@RY!XC1bD1I;B`ygb;lPy!&-SM)sBO4eRGp zD>}zaXP2jc#jcA%Mg_VYJ9Q-{{#E4sAVW21|M?J9E*G5d)GG+uxIgExUOqOL&O5&N z<>iwGpU`^z9O80l<@&HF=yW;?3g|YqHflQ|Lm42WGCt!u0qGzUcLy|_Lgkwu)-x|K z2AiLY;n?z@(~ebq_^rK14_^$`&MVScT?L&_M_Sh>za;4_E0RnW2^~<9ovu99oFEe9 z6nHNCr3Uu8>HwP9dDTda_~u8w37jp>mr}1~SZ~}5^d0Z}b9QLim>U;$zs4ya|6x-; zTf_%kmPye1nh!HQw@oAb;fNjb_8Ufig4R9N``P1;fWm(NrN}kgb4#vW^WB1P;B&51 z13!l0GkG>H)l4zcV)+6l4Q~&XHQpbvrH4FeaiDb{N>~s>LLyGccG~Efq`_S4Dk$_< z|FfS)!sY%hL%SE{kw~A3s=cd~LP340Y;U~glrYJr?mXc>6Om7TCptVaqfZ7~mKi)& zrh9o)6=Uf@Gvo3j?=A!hPegWq0XzbW_qKuCl1kd&pmt9X{R;Ku*bdJlg^-`{@eQb3 zs1zyS=$sSz@n937S;ZOi!6%}8Zf~i*_o__tVuFrMd*U?*P{*!a8-U|q9EI=T`l#oC z43?v(wFPl|jSCA<%PQy@HU*6_8G~LRs2+)PY7JN%t&@FBU5elRZ3j6}kp@!Bd9Ns& z6jwa{01P)1eu)=<4CrNxw^#J_^cbM7nbY1I-!tAS@DaBpUC2Wv%cueu63;%T#nFL*(dH2}_Wjv(X7*Tuf(6so@j{^AC!9dAXEWmrR zbo@VfFOJkm=XPYMrEPqoN2<8lV4=1_smIhIxrlMDr#{I$;gQVmE)ePWAxBAIXmGRx z7Jh>fTJ5_}|XZv+KZ-q%yN6ZCD=;NF>#An^dME&`L5DL~Sqb#h8$eQh)?(zZ#NF zR9wUhl~-WTshM&E(w;YgK(cq7*n#Wc$$>8`jw~CKkP`*fkSpT*j}mq#suM{OaEzam z4>?K9oh$?N@Jr1L;B6wL6@E!W;wSKB+8&hwQjGjA1KNysWhdMvfC2#?L2p5VP(2$! zcux;LN)j%@zS*G84mZ0KLy9271p%>oL{bg^$0>Yw$QK;B@D!|kqdl5*G6M3_MZhxM zg@cbs$2blm2>Nxt9qjxaQxIP=a7{Ro4#=qvi26FDko)grya(Y`(=Y`kiQYFsYoCMx zR^`zDM4hkXIAA2Mw3z!OK}nPb=;Z!1d5*Lh0UNtd?ceeNgNAtgxNkEx8JQ#*G-WtD zv_H;l^c4&TpD(b}y{P#3`4PPh>!yQw)k`dABku6CU1w_vXFh9kh`aw1ThZAu>2qr* z_Fp@{&qwi?=Vg-s!J>d$4-&MmU$mX=@hRRaZ&d8Aba^{9TsGm}T5gnEu{rD%zZ^He zj?I|(CXw;>=<+a@21kO8S@Ij69!i|49l-DZ@~lqoFNBa&9HNxzON3jFlDeS%h2U!b=yoq?vS}g@VpeE=XO^-Kz~_VuIQL(-OQzK=rKG3xNxI zNG)<96o%VC!?8reK|mt2B(oeqqg-vOC5axhG?o*4u)yVFn9eRXtN@KhLx*l9NQhIz zwidMS_)(T!nY932=hNlm%AozJ&ue|CJa=)YhM+)-E0b!sw)gJ$UubVVwAmk)T?LZ& z<(VkNa?7Yz)uzTe2F-gfcBPQX4~B08jg$1CJ?QIJG45}&GlxUeId7`At?6NHe}VNw z+-k2;7-(s&9V~S!55gDwV2ZDfJYgwAwfQVgW#-lbcY~=-d|kiyH4OVxIdA5DQ!8v1 zPJ)_9KRLj1%KTTPN!rpfL9+bHq|b6J${Fj1S=Tbe7_5NliYd%tmnvLOF_IP2>rY!c%icX+*)z17+Go%BY%P{i zLo+szzmNqu(522A?v0%`s(Z$vW&*meXI)zVQe(g-WI%`H{y(M}@+C!uCc{Lg($l@_ zwoo`7yVCIyFn1CN3YIf)Wp`zwOF+C}W&nr#dLv_-1xSf)2jz@sTkP7r5%06@Zb-(*|?Y)g+pt5KZCcZYAKLYhohl2iF zi8eve4ZudfYBsB)bL_XCx7(y|Q}#q@6=4I?YPw=aEci%**k^V)F#)tE+q`dJ;{yqF z3Gy7|=2YLk%3l%CJZ7uh;U?3Be8Z>(pSMeDHL|5JIp6?ZicQ$kN9HR(cx z8*J6NzXV1^wqL~l0?&uY9y?npuNJ}HV9>DqT5Fvv>)my+jc*U3w3Ok*y}iS3qahpp zy-2!We&}$8yH{uiqrT^CVN%i8s<8-#E2p=qVba>)kH5PHi|e10Ceec2?>o@Zd{7%= z9%!y!<#2?8BO&Bsh(w#$PX(huChVlF!33SI4Ngg&M!IpmMr~cqF8d$l(AWhulb5+B z*rS~^E!&Hq4Vd>{FaI@P3C0dbpRj~ZZYpi|vw6+7f!cbvpw{eqU)zhzX_yWMcm>}T zfQq?w4IjQZwB}G54YsM6X6m!6GMYNUg?;WkN-6y?N_<=t(KC1B56lbcmz`Q-FA=(ba@XwzPAnoGgoCOEv$6rQg)fC z4kiY!6KSPruw)xzs$=n#c$1_<;5}hr*4oZWi~q8`e#fXb$lm_*y5QY;bkULgf7A*& zQE;zmJqL)>pk4qdl<9ca>C`IB4r0e*!wHb=oYS_>E-VX!w8kPR9hFYYj>7X>t_;=T z*J^;BVu)l{o|C8m<0rDVQmBOW-MUe{ZH`p+#c`ztZ#mIL^6%r8NjDv0e^}LT1(;Cq zS}E#Z2bJWYM38Af6nuuzlhJ7m(2bf`5}iE`c-0t6&n!M<+DbIe&8=M1y;!-@u?VED zH>4DFVVbUXYt2g(0b6?UFk8uMo+Pe;+{DL5_mlJ?Nsg zqJx*gDv-4X73v~w+rsV{?)hQ5IZK&kkbYAyY8D5h4$>_4@IdQ&9U&If~v(6~l)v#It+=P6IvU3PF>asYD0yJkRLuO0&;#p71vorcvR?Oh{_fS01#)%vuE+E7 zON%fdz~plQjQ9J_-ow}V z_9yg*6(oQ}{|p!gP{nwy;a2v6lf{3n;EeS}G)3REo_-3q7H_%|OS)+x^c>#Q0a~>2 z=byCrGi^XhR_Zj$l!ju;C#>;rXwlHv#xDmnT{Wo;8H5FjDzFN-ntP}`XJ}c$X!CZj zYX?x+9f#^F@zrs{P9wmh3I((4mm%L|jwua~;{&?P`QUEz+7 z7E3%E&5k-t5xwO|x+x=cfQCN>>NNOM7u7E-b0HF_)5d2q)7DSn(~3<3rd&&}gq_U0 zkb1a+PNkqWuQ?~xfm;b+7`^w=rh)5FccG3EhH5BOy{zRG|DAI2jJkBeENIt2k=@(lyt~D;Z;$AF=RSDB}&Or{1t8JT%9ZGoR`MG?zz%fe&^y z56Hrrh+)mHj4{0$8{IGY2))WQm-#N0O*!gKeLE`BpaWMT1&5{rC6fz~@!MIco#w*Hb zEF2!SJe})`USof@=8jH%W#rS{R1AAv(AFe0yQNNW$ShL;qBQTl4El|@Nje~jLekN^ zW3gsgLyF@O^IaKgqtF`zrmVrRi2;ntXJzuskeBAFG!I%+5LD-*;Xc=`5@O)}=vA#s z1cpW)Q-SDgRbC~S3{9Y#;VG%5zcrGd9XXSvi`rfEb)-&h|ByNl8X6Rb@~U%4DEiR` zQ>W!9t^eOvHv~nzMSotz>&#r%Xzg?HfCh7CGPlmfATr~O^M}U=Zn0y54uyR-XRI%N zp|m+M7VG^;gy6Uo$ODnfJm2!CS*mM4Q0Ad5JRN0^92?f5hBt%zUsE!RLxhH} zLP27gMCK+NSp6kPD3Z{L($*k^Js9i4j4~Jb znGgxzJUK;h>e@L0J7z}+{F!xEVgI%(T_kj7xL&CI=Jct?vnXh`38L=Dy-dNttXzV| zHtISQcnx|jG*EzjH1fGRiV;BK3M>|4^3) zP(*qwi zZqh=!9~c{YwaSI6pmZkI&@UN%*n>sv_xTeNanml4n<@w^2)#fk(90=ZbhM<38FYH zIvN2N$hR!Du%edCIvNAVh&OM}5eStAY`Uxbu-C8rk>rnGf_(=FHVp+=cEN2i(0k-k zYzQKrH*1Vbnb}3UP~f)+%1!*xgy?f2xFG@*RDvM~4Fp?yu8Q3#?J9R4%KHCTc-iCZ zub*~qzozKPD0ojDu@spQ49)HJ09Bf5n(6UA3y5Zz<~lb@Gbq|x0v*j2rm4k3ygA%W z$moc0fVd-abMALafgh6n+V9ALmx%EJ4nJVwF7p6$u`$YoKhXxmBYIuyc=M`{@wqRJ zX#@Re3f`j;B2A~0tTxW-U^Fch@O+O!GGeb{Ott@#;io)Y-Q z82BTwi_LbwQ($i)o~*bWlZ#lUt1lOw-j0u@xz^1VQKKP3)_c#nI}g2?b!-3Rbp0XU z^EqdO-;yGK(I@Gq;!7G4yT)Vh+V}@W!;Rmb-`gnE&__kjjryc6NBs;Qk;n1!k7;P9 z19D+&EXeU&p+YC9+A{{R9JFN=e3-)B)dJfn1ox{b&-Y$#)gq{3KR+nj^dvg(kGvWK zW~WV+cT7Y?24~xUS>nDGH71E>JE`~m&A8r9M(_jwhY~gW+d+ z@=xpEafoSl!Z$6CE^?=%5tCGsdO)g*{kxNYqZ25DLoYswwJYN|17?7sDhBjsk2$BAt%nNSh3Z))BY}Ax@?16G zgQ1R)1xhv{_V|yD1t}Pcma46!5M#Ed32t6p-WA< zM%jBQvSec<=vM**c%9UxQ~jw}@E)PB&>KrkMni-`pR&kOyDQNgRFXtKbW#}SM5jh{ zeVckV|84MgsXQ{{ykLpQxaiel^_&Per`(|&B{X$wiw41|-~=H?uE~_g??S`Ftt`taoJfqK{ae^TNlktv}T z^)(Iu_J)lR8>rs~Hk{S8_jd{7#B)ho7F-<+=WrSn$_-$jyzgiowq1c|E)8dn3s5$Lc4_1FN6;tjO?}@&3TP+YVG-a^_8{KtJIg z2bXCc9n07LzcQ8J4Gu8iVh)rAa7jcWmT?roo<5fPuv{wM*aP>}Z{p8HG)3I-oC+u2 z{3Ak075xNQR2CAA`sHT(75yswg9eSNtJ7KeZ`P6+nwBh}tUT21aRoH|ynpuWF5~~T z^Q9sWoUfE%Y1Ln}OU$PL1L<(kOF#|1tHHS_e2*&=@MwR%9s|ULCxjjEK2)U^Iu;IR zc_9|sBoua$Sgu^4zZ)+b}3soplwy@8@sw(Vs3L??7-EXdSK7gICbOk$j@&Ra-G<+YDmw1aTKV zQQ#>(4|*TEBG7Ly1t6A{z-zX-IlrOx)$eW8>QsHU7zz+=_@9a9a>4{3GaQ`2eIoOB zqs1j=zobwRPN@7wZgAOWp~O{a6mJ*hQjRNbrn0U!QSiCScCn&wtJB4&3fmLAIFj!Z zXc3PPz^#859XWg$b*2rEs++Do?v)QZ)anF#^=Z^nZ*h3YnbkE2%}F@y*!LB3GY`iE$ezpsn5qgdFTSApr#E+61T%u?hA@YZpn*_$N&9}JfV;HF7us- z5)Y3Q{;sS(I%mJNSXT$aV|}+k+BCbU19N7oWTy9bcw86eI20XK;%qol3eY}As4z%#0G(c_g%MqI=j z@m^x1%5f-Pd!ymRugEhzAw`z#IXOOxPI%OGN9#u~?_Fqf1fq2=cZG^s%2?w%F9j?mL`O z^LM&BNc{`>*T6SX738YJ!B9$7!#XFaEVZL~bHW*)WDi|99KVfbH*e z>Hp5T9HVyw^=p&QhlGMbJerSEipEcF=8Xy|g*Y5gnsl`(-#h5kWajWy=W(s*e6y`b z=1~@z>R(k@P&VvzD2;yx&uKvR#vsN;jXo^9BX5ZK=v5JbE_BX$3K!8ilzJOYEz=_S z$DEK;JfsCdsu5Q5CVWoTLQ|9M~f4#^8lytF&nx-lc`GNUbmzQRxBf81a=Cl^M=3@gv=QDRL zwQ#l?w$vBX%W3W%i`9sGNobcje-`k5!1Lq3D+Rjj??4%_28?QJv})eDX3I^j;gNE*2Zw^fMXZB%klfyEJPX(Rmd80&~A%JRRXAcbkdeGZcW}Gc79S-fxo{z8iX38E}NSUbZe~@SZs} z4KH=-S{hC|R{w^7~Dfh8Fo8X;fdEcMwE7c3doX5;bA-&4S9M%8b z8Kwx+zTwdzdM;SH&HanKCkj4*d8y-2%BTyuT=69#C0b8#O>D_Rb)ZXWybTV=>1=H^ z|5R)m0%`D3@YXZ92-|8TcyHBe>>Ov2cec@V?6r!YjaQls{ zbVuALUeR}^Dq4%Mbw@6Rgy*hWW24nREPsXhh$o0AG@+fRsyOl9`E;jR*_ab?l0+b9CZ3??J6|INI!%PrGZ|t&AcQ=DER=27rcU8~aj}yPH`oc+-f#7_BHh|$? z@?Ur9zLK-9?-IbP=?;k+AR3qQ+VZr7#1cLQh~`#Z&AI9-^Z7QIEMW9OHyOR(;#>a( z(1877%+@~uuFVN}1HDz`QWiut+xL?4-i@0rp4jNECS`Ym>S0(x{vJ}2q}{Cv^sO7H z@iG)``rv*tZqoB_QF@HEKSA8Jc;yN(pkXeh2bSEJPf zM`gJGf9+jqRFg^8wzXUA_8=;P%4)X*2ne_!0VBb2XWtZ15EOTI0a+x3raw`%H7Gl> zL|Ya`Aus{~m4snm22f;Cf&?LeG=wF{5+G!5C4{zn#5vzN-;eKyhhN31q~28By7$(t z=XqY-1S`DQyuLRnau+FJvjx7J)8^}xR3G>}*`sl^Kq*CrRImHaUx+!_M23yPKKo!B ze??!e4mQ*%)XW1}fU90=FFr7}ho9fVl;3DwoVahwd z=Zq&Sf-7C)aFkYPIf@^24R2TlgXhc-j@?n;+pefAJAxn7SFPyvGWE3=))K2#;gs^? z?VKi4&b|($r6}X`=A>xvR8TX|XlB&f)Wr{%B5tZn2udLA>*8%M20EP_f*u*DjoCiN z2ZS=|r|xvB_EXSrEHt***~w{(i0<_Y+kb-$kC4{GX`}xwWn)sk?uG72{9s?5P=RK* zKUDo=Fw#+i=n}mQKuqcvfq(;K@fuk%qNrZ3bIGW3Vl1|oH97IFIn;l&{esOnk{tt3 zn3EqaSDc+;YD68AQ!ac;2uOoz77R3Y!5`NWko>W3w7~b6@R{sLR&MpZQ-d#rDNdv% zI0YRqa|uCwiQCPpkgNlu!FxQGHO;j-!>W}|GG&$Oi@3;X;3iI&{9_38a3`l}yy>gG zme?N|Q>9GXp*yYEehO}DeedP4#&#H0sfS~S{s|+M?jrT;S_JfR>_|3W9=QlCR|)~= zN;u;?r`B%ukBgE*LyLvc736}3^RAw$2iQ+un`h1c`N%Ni(x$k#lh|r8ZBDrjbBST@ z03k8ygHqanIPH1YU6UB|yVErkP4k?Tm7KhgT+N8;1shgs40KFDrrR*7Dn!hheFw?^ zkwZtr(9Sly5Da;o=Im4pO-nPD&?=kVv1d6LaqD$b;>AOdz|>o+K6f3frnu&$#M)3( zLhrlouUxQkSdD?sU~sJwljnbTo+@#V0voDbkto27Joy%3wh=t1BxwcrpNSB&!c63!iZS;ETe!wVw(>usH{nE%BLh8Xc$~1iVHqywXd%fHvB8h&uiR7rR(SkHASy;^Ih7{hR-DU1>{@71^ z?g(sqx_2hmWah}`XZuFC8Y^w8s##BI6b*MR6@kxH6Sp}T zBGCz=!tUA(B_HiAxY<6kCdwKx_CY&{i~w%z=(w$DP2mi?<`h6?FZkh_#MyxwI-Jxg z37M1J%Q#zFv-^$3vz&esZI-K37By{O3)iLnn0I6&LABV`KtVYOW%aMvsF5_t|U7mBGvSrHn(s39G z(uLG#mpAgWlw3m1X0vV{PQ^>zNfVeKLZ3^J3&Af(w1&?;+eniodnpwK6-Lg-I^)hI zt#_DZ$HWu$$CB|Ina+^@bgvo4kJme(#Ml#;I7P0eIdCRad4Y|*om=Oz&L=$3omy{? z3g2vRClmqKv_1Fw+b6@iVJ|dO@qS}x_Zs*QCOqINpkj^|A)=}v!{^odiuuUIgJ=I1r!(zY{_$h0g_{-=3~wosY`1J zr)wwKH%XPjEiUv zg)yPMPvOt&ZIa3F!m||OlflA z>NFFc_lud%?V^V1T#_jNOb19gDPmRk6*xBiV+hBjQw-G)cE=lr2TuIRpdRgybIe?ON!*-(R# zwZ0m$HgW2f-!Nu-d^9_*9JF!oo!ZA`V#D zrwfP!dnG3U5U?HzD#zD7ZMzpfJMS z?fUQ6=B~2M2w+Uz^@l?Ep66coVi7sN6G%{iTZg?=;@Txo^E84t38XW!OLlYw$x}DZ z_Urlns^d<3`!SjiP)8s;K)Gtk#&p3Ck99INWwWv?N22gvM!9K9oN*iGqybeYwe@ub zI73go0#q}agj}F9MnaGrcLtkvnm5zY|h#-yBJIP#bxjPCSVoS?K-L7X&>OHti6W|O|O}1RRr20 zvS|sP26QL=la8F!+YKGgaNEGL$OkZ4vgV{zH`ed@n#@S|ACyI@2RJL*)k9Rfd+c33?-x- zLjKOK#h&3*PjI1(Jk+v@=iE=3SudKhhqGcod8Va$zVQi1WkMAH3tqAYIPj0RCkaoH(Kw({ll9W_di7Oj&bH21<`XmVD~*e*aF<| zbp&`el?_|Y)g`=vssegb$2v30gNmECb~G~-Q{D27H&fVprNAE*@#ap8&}NFaU*xf@ z%Z_90R@dusC;1g^kbk;~h?va)KtvVopP}e~oXTby!WD`}6kGNwp(aLORW;mQbw>>* z20!1Z6&`Ch3{zlN@T^J&+zvDI;8x#Ed5@xR&|Q?M!HLdfg;OC-m{3}P?*l@dL~6N+ zqQ=@>(ccaP*%LUlNyDk7VB6 z>@Jo`AQTX;9{a|2H?`2%bLUTtgoc54zv2Vpncn-TIw^0TujPzuRrQ6=R|Grf)k%t8 zg5ikO);ICJ)&|31gdavE2|wT*7`}CI9R+&KMA#{!jv~-iEvuz!>>}|s33V@>BZgjz z*6jM9FE)s+!Z6f_!jQ&x3BiTd$yER?T#MKsp`_W5X(gVt-!O4dtxbSxPBx^RYYR*B zao;lOA_fPpG2_i+0|A(eGuyWFZ;YG80}muP&xJ|1batRXvGgOiT^gmHc7m|fHAn%S|^|JM3@fBio`0LtkD1m+&O9Cc-Vzevj z$lh@`xiCJse||Ek1Z&HDf z{<@hUdGz$1Sa>6V3>bAb9L{sc>=lDdcT@d?6adebv3eNq7Ik{W{|TehN|_kxe&dk@ z`6#t4!$Jljb}hFHf6yrUK_tL|qwu{ud@<-Z4t2tw4TZR*jWD6nAG2QWSzeZg8+-Hy zaNb?7NweM{iI%A_diQk4VzzAlQ3LBrOiIpD`^R_@x9A25wy|u-D+i^B?{=ya=U5`Yx2!OQb?-#)((|XK1Z_5@Qx);wBod!|y)IM2M0G+Hnh-(-- z)!zbClQ*X$j2#m%HD7NHGa65`Tg+ComdnUIK+TX;Yr%iHkuFTdB8_3}x2xygJ5;%^ zBg)$TI^wbnSvv&o08Jh7+@af~EhqOF-PKf%1|TdVSt`D>B8$TM72v!3ISGR;Kaa(` z`RrGTabK0I5EYja^vM+PbiC_K=xJ>+- diff --git a/docs/src/examples/mps_onesite_figures/operator_contract.png b/docs/src/examples/mps_onesite_figures/operator_contract.png deleted file mode 100644 index 45d222b5e58fe53845772c1e19560e3d95383d53..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 43428 zcmeFZcRbbo{|AheXh?-3BM}m^_h{G|WzUmMR`w>kN>)S>S!ISovS$O?tFp=7dym8Y z`kYf;eSg>0_wl&zfA8=0c(~|{&-uLH@7H`jpRePuq#$*Kh=K?Q2j_^4^tGEfI0P#= zIJgl955OZPsFG(mI7IE{SFb9`T)j%GWM^YyZfT5zBklh*l0YfEi|lUmUaH?;IJikC z+)gB&5SN|6J(+Yb?Js(9rn3)T-cl!1zoUAL z?o@IWTR2#Un2xSQ1yRywiLi_-t_Bn? zw@|BBe2%yYA%GWrfFYPqn}Yz5>d2kKS@HXp;QJBMS0 zDcLGMI_TZmz`uIzuYHG(S5oWP-9IEXz_ei^N0jvP%R3!1f@#jPfh=P^*NwY+x_fYg z)@C-iT*x`T1wP!WmC@z>c6DZ5BmRl}R2U16Rsb&doZ?ha!M;lYIMG5Iu5CNEeP%+q zxM`I*sx9~;m+OVKKS6$LtmF?hdiY3T@3wzr*W7i;gqcYK1<)*ow$zK ziW>N8oLxIexp-~ou@XI= zzf@SL%C(@8vw@FQrI~L&QaLB|*lomg!Er^IIe*nlD14oR3#j5C_7Np zNeY-9q`2R7S-Ex7?5x&bZ?0yvXgP>=2gOOUCl=f|y>*=G1FiyY>Lad7&EZadl+GzO z+(_T>p@<>bJ5LL>bk4mw(&1m?A3mrzC_3oxaz5^UL3PRSy@gg6Q7=(1W-r!qiVE_9 zAhEy-|M@E7p$d`-d@5hk_raA6pGa;J9y(a{m+S}Y4>vvte6YGAHYoB~T7@y1MEBw4 zPc8<6wj{Q!wv;*koeDGO*GT-N>BG*~28>)LI=3iYlw{CG*5=j5+;-%q*yK4)pG?Jk zMuN~2pAD8|?HF8XHt9DHY@T*~_VOCXi^3ZpnDAtAjYW*flY{Mo+Z0cyTu^RIqyEBl zThMWTUO>0Bh?J+aXKF&it+emym*g%dW$V0<9ZdMJ|GwURMQ+k)hQ`PU>H5UiiL)>A z+UHvPTPGcqi}^%+;^a_LS*h_a#8SLfj4pID*^%xKs6T0O@;RvuQ@M%Ksq=@A8nPvZ z-l*koD6Do#zIKCCbxC!InN>CYhG5E3=b#_%~Dn4~WVemSMf+kn8ZCx-k4fi{qQl7sk7%1c^3V1{<-g2XP zxVY=NwYh1`2lFoGnV7S-biZ*_n0}g{o?pU?D!H~Dy5>oxQgf5>is@s8*e250v$`UE z;=2BGWPM!4Su0;wOIO=gFRUm`9hu--6xD})oL zn55*SWXNk@kAH6d%(juJk)nY*y1lWiVf=YV^uSwMt!!4#i}{*51=m<(FScK_xR|B+ zLaj-SF2hQ#Pnl3zH#H#jo5E~Pil%+$@@vXWcdIW}SFF^!-*uM8nFzD$YjwSAG9R#v zwdAsL?h?&b&N-B?ZQfzB{;j_)&IV=ZUGFn@?0a^@i?4&hiBA(-Y)gi98B^L1r+!tS zQb5J(#w0fdH|=AqW|Lvl)^+PlpSiQ9wYFc~HzQ=4vt#&8d~ZyjUEk3n^qlo<++cY# zdeb3y^220F%DU?*w*_G*x6i_waWZjYZgr)HOI`Hx^b|{jOZWGel*QX#dx#3}f$9(p@zVn9`%e}7WChLrX3}lS`QHH#xgE4{jd1EF`A{-$c zA$;?9j-O@bHAl6P>z&Gq;Cs>_-ybdfd41Gotm350p$qu?t`O@NEocTv$-34ptAuuotap)+!iXM}mOLFDg zTk`L7KivHgPc}%tNbY!%SEJOxcK&!%SghRR0^5)Gu zUCr{P5Y?QF?f{3t2=Y}=@(taqIR-iJ3QhC2r>F*oE66jqb zDl$j2%2w|65vz&?Wy)!G0iSP@)77!^Q}W3KgSYzMlZ!E;J;s$jzj*X<=3}tb^+%Q>b;4S0 zLEPlr#Q8~ipS6ayOG5V3Oq*6{nNLau_QCcz#@wvpcZ%edps4t9cyx(kJOc3$>W8Af(SYOx8StjmWSMkiEL zGo(slN}{uhbS%o|+{#|NyRFw!>hUQItdzL!lC6Im?cuU<9`G->FJR1(a#FvuzFF4b zZZkJNK;xCYb9PX__FS^m(-24Mj@{df7U2z=PA@yM*6YGQGZJrYc(QxfZ_dhjw<>S7 z*O_iLPsR6$iHRP&bO&8ELyl%=VE03j?p@ua(MCP55M3%+pI1LHd|-P(dLXD&cUxh( zFHY>t-oz;)8n+$MY4n0$SV%O@x|f0X)Xtgh&8`Js_9S*y6l;0D=b_EDJMS~P!v3=WQ2TgCeIg3-B8JR|fg%8~ln+qJfAACMUQnEj4UuYdpc|>r~;f~-SkJlf= zJaJm|E{WEM1iYZT8{W>;XYummfU!r@o^r7FfIjy=sg3Pj#pp>g%6*@i^`XiQFxHSU zk(bB00Dm9E!N;Y**$;o=!jC8}<*&abahY)TVIIfB!SOT4!T<3Z1^A8p7YaYfd$7Ov zg+0I_fPWo_AD0BYpRXoZN!a)EZ`=rY4(GD+RT&xht!!v#Y;0|BX5%0ey>Sj6AiN{3 zX^(?L#(@0b%G{)1fa`adt7tfA$X^#Uw6S8-H?lD>W^=K+gIotk*hLWjv@&+kr**Ni zw6+&?5uwApLJoZ}hu zYl|fTUp@Sv34V~@@FQiI5|vsX&iAK8SKY^;cBae1oY7adXL9ZuX;&*Xy z@$e5G`_mtRB|;7 zpQu33+8DN{$a5HcdN|RUV;IbJ>DIs3fzlOs?|N;GOA`}fN=Y9orfi4FwocP{PlJyS zjzncpJAS#bIF*q#S1D9@@(-n7CkGzhmQZ!8Dz+T;5hf|zn#vg7n$Buo%eNh_&T(6v zn#no*hj&W{!X0RtToL>6ZdnqujyJj&wcatgXC;P+3Z^K=$B9Io{YQ?#9<(`BuZs28 zCkU`tR~p2#A8YWxcT_N~)3ZA0^a=F#I=kA~fB4F8W|MSTTwLr(ZUQm(G4bSsy)MJS z93C4BGiBWcHZ=34%L-p!DKl!Hzxszuw9(Nf#79K65M#F&5ZbWWRg~-K(;3g|M^u#P zt<`$!`ae|ZKHP){CsNIMSMFnL`Ls(u#`O^r^%1Pd*+sdZ@mP4j_)XhJ;wmb1d?|5pZ}*iLIqdTz!Ifp}{gnF-9R9qvo>M~|*k9&u8e;s1UO~JYAAhBt_uSTx z6{uI$dUNOWBu z%Q0z;zD~m^AAM59eeJrHw`0&B`W1SxopOR7`=3Yt?2`VK7>}jeC|jrbevvru-Obx# zCj03B_4Ivq$d9!&?+I|@VK@I43m&|R&dG*fb@-XaSAQ-O1^tac;&kP^cS|-NF6q~oF6plt{5B+sQ z;o&1HA+nP0vBu|Ivyp7N6pXPBH|aB08UEeRXhVr%p_`ZFwxltqbUQ)(_`{05-EG5- zg~7-8!v)v#ZhsBfM=X4)*kSVcA+n1PVJT;uMAmyZ(DUA?%t=a{QW7a@S0nmals9^n ze`n!i;8&slIE>%?%bXOvV!C019P?P(AYN*`mz(g?=X09(W1YI0n}nAe%vxVcG(6{H zKd%sTijvpz^Ebw*c=U|vVtD*cl1A6O*Ln#*SpQ3#f%3(LkHq8|_J7z!{I-kW0b^aQ ze3*qRUELqH-p4p!#(sTux+7$=J(XNrcW)?=z1q2AyH6wEQd2+ol#r9<%5(?2yUsJ$ z&C$q2`RBZ?>8cqY=;D8K&EI{7b`|#SRXiOP)(%uzLM>HMYO?v&1Ff&XV;0vd_C(yf zeD)Dkr`PuxwU$)Hm=-Z4F&Z%dYIc*b$gey5f({u=mat|RB4Za02E{>rlJ+Da_O z=2d*fjR{ubJ=eM>S`(#m9z2~oSxBLB2z zM~IqPY+UANFe^y##lIkwP~kM&ZPu0~lc`rUTPu?% zockQr{oVHeY27Gf-Hu}`&#~(cL)IOx?>O7d13_HR;ID#yH;cokIg_px{qWd-tW}8$ zE|D>uNpS*m8l66Psw6GmQ?F8oo+v9Ya|9GSPFFu+R(|QeKF2V1@^>5g?NUBd=fSc2 zOuo2a)?ay)EG^)!7w2DwAtJnbE`#@vCw9S~Lydd`L29qHo~9}P$+XkMAvHNh%Nt^| z4=JsZDlS?b<;jEi=yN_BHjebBSfLuo5Q%zaZvX9uAWNWhP1^Xehqs?lMO9qvbkoA% z$Kh&9n~RP{V(3Z5;b@CD%y|lDfAiztH#+SoIKfwavS1(V?k0$<9$4YC>^#<2;*9uN z#woKGMS27>xdg+#hA*pdGguN=k@*~KAZkh-NcfWHo4&@q4^+fn=+mUhfjwsYe7&6wC*c@ zn&v(FRQ;v>cvIKXXnmw9-i}NKHIxDqaM1a0E`?hTcrUyssDXHND9~W6Au8%MI(Pq{ zVo^c>vbipJy>Xa3Owvtwxo=CR67A0z_c=4kd*rcH-N=u6D^25hRUo+(^`4eTi^3n$Sz zsYgMV?;RLD$$WU1oOhxt*YtWc_p60V%K{ePGR=DOt+=gw3pF+)2-;H>L%@ej0>{B? zs01B8|G!!1;R(k1w*!tHy196)+WA3HPDnx}oojqQYIAwK<}v}*NvJZ|F9t}_cydEb z4HxN;XqYV+puYOxaBYlTB`(3InXX(z3&XyMX^S5f-9O@7zB*Fz?T^JSGrg$tn{O|~ zqun+d2zuGHG~dv|4fPw_y5vG-{)VRG-q-LXJJuD?S0RSeBXXwOMV0FHocgC@Tms zzyViiyfu>Pg?zNwDn(=56YDnhiof+!RSN%--b-kMe#y=shJ6sbIk}MY)5%AP9 zI6lVy10mxRt%|%4A!57y3Zk1MF5p$1@)hf4^Ho&Ny(A^G`90qhRIyw5&qR4aS6sZJ zt+p=|<2NIb;t~f|7@=0Gt*B=!x!C>AJp6tXkA+(4?$&gU-Dq9Ig`J~Rf=0f(DBgu~ zkIki1B5n?2`gdQlpu;KW-qy1{&TO0inxVe7`zJ@GJp!g{{%pS|HZn4krG19Tad;b& zf!x5 z7dH`-QBym`-UKRH6C1@XgzmcF|L$KOM&4JTPmagE?lEojOA0( zBJjS7BzR9IM^?u=_eVvQ$gvgCKhKH?H&^<1rwCPtjdbPF6~u;!3#f!*f))4GDN;o- zw8Me5HzjichuBNAmmc>${zGbdg485L0d&}t`hLh~>P!m?!ofT9_VY;P09ARgL?D^5 zXvL|rGrvcH-+U!a5Nd?$=rC)(l;lLTzx-yXfs2^!fnJ5en{y%S}S;u(D#aZ<(5e zn|%E)=iF8%NaDS=%+Gjj3@$>KVM2|^)T)I0r`x52+r8lHtc|^0cD&7#EsFU~HqZ)j zbn80z@jM`=p(fXLxWR4Nd1-es#QRgB?Jy_HGT;%EeZ`J}sc~)>qPfj)&Itc#8UL}h z3F%0>bvw`-k4?8awDEXb&v~r^pqmtmxTtya3Qw1p7~vc{s$5vPA+o+NQuO?Nz+k-iff9^^!bl_Pz z$~NDz%k%(m9kp)@`1d0?IAaEeNyR$lhJAH3;6m!Tggj^deN#SD!mCT8E>K}(i|RRW zWS+S>*?->@*fLLoA9;Zj&Z>Uo#-Do33)f-)KA7n~!`!9{F?`GvM~LJfc6*Z%ssV5N zlp5?!--K(kJnd=xEj;{I<^Ub3DTiO2XOxT0%?^NVZTUiW{zSmO>2V> zeHtlJ9zzdM9V1|G@ZNVn2b9sA+XF8p0?h`>J!c9AyjqLw$0q>nI*%HYx$uWW$MJa$ zvESyYG&VyimFbgu+5_0I@EePlTbg|;6@npX5BHh%^Qe;JgD&c2-O0IdjD z1kW-|sg4nt!>0YZ%}V z`O#;PicUaGv^q@5dlAktwoY^kgJq8tR6-YJ2+)lG5lUX819o3-NL6KIl}8WUYja1g zm6n4?8&AvAP)FOSAX-O$;Tn+T7)T~*_JPQqdV|F-o4#3jf;reH+TV+(-_&PzGp!R! zP<~@S*j-?8vejw)A@7lM(H$XXIt6Bd>`T*!@V05P0ynb1zwnW;pxf12uyK9A7%5Fa z<)B2bO;=#qwImladlGvN#o{GSvj&h{yXyWgkiSWHr^`693i`;1*||9PxzkYnJpw4z zw0?JYy`qLWHRiZY>7r!*3^WkIqMlnuzv0(Qau7)>#^h`;E-oI02c6HOK=yL}-F5H1 zf(9ps3x~-1YH(5h{F%4z?{rW~ed&8NJ0JNv*u}#5({#LK=GOXJHN#wFBCVz_r+Hga zr%eUg?wt}AU9V%mWZD!XO?`?b`X_y$MgOClWDnhW(*ezkM$y~v&ad~x7;tyBt&5Tu zE^P|{>A*kq@En~0qfD+zfMEBBsX?ML( zW=?dz*4u1mQLGQ%?=2jydF+e#ZsNm`x{BN0P}$aH##llH;yU;82qwOKX1+5V%vTKn zQtm2%DIOG$4iKoa86)acuxF_nMr`Pk3*&$G?PNioo+` zo!Ph6d45F&SPVYijR>yT3yeTL7NYHzln}4)Y^|N840ZX zcrW(h>luoU?{uV*vg?-4%&c#c2|-P%+3`n_TK{*jy}2x&eR~g&&oZ z1~_f;mo0<)D63<(3h&j{gxuWG27JuJx0K)CMw=c|n_}Fnd;H)CB^h7uovQ*`x36H5 zdBCAw_xUo8Ta|Y$2jTO`%G}p~VlF-&$EB&2EFez!4{QrMOdR7fZ3^02ohGMjst%@1 z1l&IA?(X;Z_X58P{0icq*FVUFzDpKOdE#Fo_j6~@wJ^~hYK^Kqkh~ewJLPd2d6ETD+gruK4fCGV=WA+eRO&!JsS>lM}z_sD)RR`=%mN@|vy#X_)- zuX4!8zFCBC4oIHkhOgAK^sdJW@(4Q5>AY0<)t`uaA>_mQ&ZG$9)n9J;v?~a)QUh%B zc7Jq7U}2%|wRq+a>VuNp<=F;e%wFJZ-mp-yTa=r<$^to!Q9uf=MS+#~YcWTkx4 z_a@!q&~E5SL!$itc+?W{O^Fhxt;y6H=%xb5YLRP+^PJSE@bUxz;?h`iJk5OBx*k2t zQSAb2s+u$Q*G(wVMxT+c3BoIb0{`G&2#3vapI-d&J5;fQd5lAw%;U^&Saw6)@wl`a zpirxFa(kQcd#Bg?oKfbgU2l;}?!Xa>lK^Rbjv#IP@m~_n-*alu3s7kj>sQqZXTyos7 ztz?4W!Ik*|vHYFW8vJiYa2G}{7q+!2D~gcT&WG22wAGda+QCr=_!n)9lwT=4!568y zPj!xBT(a^Jnag;rGbe5^{la4|hK!}~*J~E!nQhIHD_e{t#CxcyGW2_5uh70LMGX%y z^)c-{epsvqwd=@Z%Zl)h*Yw)E`DU$j%iS>_YKOy^h8LL8K?e)4uaodA0k9_DYzB z?s<>RXd>)K*IVNpz~@FR6Mc~^wxmewI^em~zx_d_!VCRO)YC2J>neNshJnX=$w1@I z879l0xoJ>;Z5f=qsdSsqH8L9ufG*wm-mWMXOf0r@3E|bGsqI);8H8FL=6Z`h-QFHq zPZ`-688>hJ0-#(&a|d|7*m8mcf)l9K08fQ*REu>iSw{6z!4#zX2#z-Vh9aw!gBy)L zFFA*Cqm4Ga+PY6jG6KNg9+il0BH6cAs~bNS?W`^pq^V;<9B##6U&j8n;RFUd5x>1Z zEZ)j{Z+jpYEr&-SgQH)+G7_#>{VFvsn$-(oJTp%Gqy!S85AF<+S3K1J_@D}yvmw9) z>IKqAOviwXl>5>gFILexT^II!?}ii=>A>FZx*>E1QR?ka_+KR?Wb5kpX`0585(_Vg z557>uRCNeob^+kPEIfw-Y<5PeeUCDY^9QHNnQ3S56dJM!uD1>!>+T_!^w`LX5fL8z z>jYhwlP1zv`%Ik;ohc2o%s_GLy3ghz`V=S+J>IG=1fAyMtcmwnlHs=M=GJEX(US*? zzqOZtdrw|d>_m9Zd7o`Sf+{{`hsQsS6?CkNV9Z#Gcb_YICgSeIZ5^|{UNv%dNivwC z3eq6g_<5m0dZ8g{OfiBaQ&nhJQI2bK*g7v?lh)O52O?egc4^cAldT9Qk zR(3UW>#k8F?a!nvYMoBra;K06_p`DOg1Z9VbR%;e((t1iYWGYH&B6Bwlnhh|QoKf% z8Oje5iemF4H4#0r9V4;B`b-xWFc2zZ2agJjgt~0c76j&PP9%mH6!baON0e=jJ#Wp{ z|2Wa|;%a{8y9TI)IA_CV*TcE(Mn0e7Hd8W}fBH-8tr7-C!*IC-TLN~HfYAgR?U!u& zVjuEw*r{1FmYg)+=HbR>3CVN2b*G(;#nueEw zV6RO2DRnYmeox5ue0x)@7F#YTWUvUqn(8hwIXar7c9| zAvlC)Y8yF7BLcQ9mkEp&y3A8!=>)6?M4M?S2FP%%FgpNS{-4TLCi(A3)0E}4nvtyP zd4qSwf~2HlZ-^gdZ%DMeu`*{W;EoVql1mCQn4)wEb!)aeuQf>~oJogJga0Kx|KrT- zy_^iQ>s_Yt6Ts2*0?DGSCWKAIuI$;+@HFJF*EW~Zqc>{A9)V(lW4sVUM`Yi+oG-e0 z8rgS~kOGEf)ZQv;@%g~cd3)Iu{jvZ!bN zCZEB^ZqfAetFfvOx@z|Nu3fOQQe=|cPru*&jYZse;0Tg|nqd%iiT=uIKE-PO$57*4uZyYGwp%#8(E zzF!SRLLJ2L3=bj)EH3p#F~NIpcX4}QPtCJw{}IdcS&} z9TI9jbNL`OHioe;Q}883s4Xa8(yzs0)dQdvz>r7VGr4kyUxQ8C0xc?a{0<7pCwg48 z5sznBnCgzYuA{1n`>s~r?Wg<&?|esk5jXbQU5=03I31&7GxGWI;~nY?fBV>L@UgF3 zB|@0M%v2!$ws8k)AiDyw0>@9dB_nT|3!#{OFdrik1*?gAi6 zBG3q3mDgJPo#%Q5QWRn-YI#5W0Qu+eYD=d5pvVUx`L>SRWO5YgMy)k(?His9wbyk= z#RwM)ta^B%$*?g}>#(W+Xd^@G{k_sB2TIeKRoQw(*tB?$W+&m@1>;{~?J00_D47;( zY{1M{Pq^xL6oJNiBTtvwTnjt^d;HpDWq^+CO>Nh-{|UxwLdKZTpY|GG2^}QY{50$9 z@mjUY4jp@WbJaR*5uJA&F;n`zu#Q%GmeE7)(3>Er8^U#6cYS*k%wg;P*PaytAWTk) zd3ksM#1%zwAZ(aebR*#t4^#sYMdN^dsr_EBCty%vRrW#}9jop&I7oc2=fIj!a`r!B*~E|7VeY zpp9JO4a*x}?Ls}Ca0V}soc+;NVi9g&9m9{mhJs^g<7*ZSOA9l17TOx_Y;{yTg3jDX zjV z=A4B+*lw1cr3H*M1~;IACyV*u1Yeo}E*cs~KF}AGve!=vBklyO-P=R;r%hvSy!5Me zs;DK}8Hs(boiZX)nJ5*~|F-axEks+xFKIWBcJsI+5ylK~d>$h7DTb1t$!+F51MG_h zu*rDX9E98nDFRlLl=l3<+(|n@G-WW^=Bqkg6vn!4U(1B-y3a{M`izOJ;;Hm!oW`;y zlsh`Q)4;!{G)An>SL}Jg&8KMC+BF)jdI62hnL)%}Jn<5Nb;rso*(-MJOt!y|lzw=( zJy&8%eOS61o2u@L%lM3p-(FhIyTnjZ#REvre{2v!@zLdLV>f7)WCJ#xdcJ(ydQR?e z7oR~h#xceUyaP3|&zok2uM5Sw4EYO8t74KR3^JFkv=sv1G>q`fXRkgOnHhnbdNdk#^VwY?6I#|8t8l0X+W(}MIH29z~02fV($Ylwj{vu zVaE^CwF&F9342od7J4|I6n$j{T}T&Atywr|q2Fq33P|JUnLg0&-Nev3UO}>zFb6fxcCBU(_q**DA1%Wi0`= zdKlEaj}X0)ZMS){*n?~|JZ+5lF4O4#MjPoU=5#ueJl)1J_}nG%Kh4fO+L z#eQKFYZyopQiUXRv#1X%?euW{)$jI_Z#V@$v@XFUq2TA|yg2y`l;;$@S6ZbWm&#!a z!JXs$BcWG0FZ&qvY%|D3Tm#b9?&$Z8;gFEpa7NQfaoB++27I()WKedc6nQi!PwDG=C=Z_D<&rcVeKP`a5ofk*)#v!P;5`+gOFb*ii7J z=!vtTfBd0_Vc2U_#3PZprLWzN^voGsjEZqFu4dl>qj@;|NXS~7JrRQj9s6wIGs43K z*jN1{{YYIWf116G8V;9*?+pq92;x2Rz68OS!aCZJj;T6?h6aI7*3SIWM){;8Do+Y$ zXDr%>lKl<}wh%~FBckgWtLNCzT43^NuEXIEZTBK`Q=yA(!goaycWy8dCSjyjzAose zsAXf3Q5uY~Z}MLs@D@99Q3DW|S5yl5)rw65(Amkp9?%-Bq47Qgnp?Zlr8<_Vl!5L3 z^`S>Xa}WDKx@VD^j!nB?Wob1X_g2~!BTp$KtTMPp6v0$j{PWs*ZK%ndbfB7wS~||i zgcJvuJeTGGaHTAx^Ht<-XctEJ$AK{Xb=LL0tb7u8zO5y`S5sV4yF3Q9-7#ewfbpGf z7FpN%X;*qaQ0z!9&4czPkk_i)NJtPwUM}d>4#Uy9r%|dKGL>SR>&E-$5x@&%xu(f!h2n3FSn|;FnP2MNwIX#X77Fc<;UZ zy7ViAtAZLDRdwR~8s?E(+W2A4I0XQzh;Am`B6WZBWsrqrV2@5ZEJO1`{a*3;p}2~t zU3t*MGO`tZ_vP$KLt1K|XqG#TJRP5o@bw-8tyr^vXr`#gT9@f?r(sBAmn5duIF|MZ z4?)O`7`&D$wIO(NW+iUcsS|nmJ^2-%Z?XVE+x@DQ1G9fh#g)Wf3-_dbyNy!wt@+oHD64EN6pEmsP7olwxsro0#wSj8J`Tb(k znYjT^|HiX_T1t`(FhcUVFW9kfj=^J$xpM{4_RW0r)RsK+HFJj7{FQDA$Iv(5ui+WI zBQrN%`}&;-bG}FeLLY&iH0*VX@Ifa>0^#cXW0qt8fLg$RJ-C4&n4GnV(<2OjoKNA|HD$@AWzHTag!>Ao~;<1K_(?~fD4 z#WN->UF)_$^0#F;=pd^$7Kd|d{-F)?d527hSV^v6=PRBl|4fjPeOnnvcFR`1%DRmU zGU~b`#U8or%p0#IAg%q_a~sK}azkt*W*H(5(+LPbA0RcXzgN9n78obTnAM z_)4e~|G8HZ#Oq1GWUiikgbI5lA zn3`jGn_n&n6fz>CPPcVd$J)wA2nc&lAX5wbzN^+Q8cf&3c5rI`ZM)iYHH~|bYS)7H z?qu}LFs@hpR94@2th$ffcY+BaDD&BX?>T`mR{#`MgY2^E!+}LB6x=>+BjisHn;1BP7GS>_f3%M~kNi{b;BW z=_|lzkr`!@$kmkW_p4>;Q33lMAm~WsRsHmFSjQLotPGhpBshVtl_fFn!ar>3E7F{x z&rD!>j?S}qAN`0!0FKGo=@5J;a}B;2%`0zrtdYdVgJN;z?L!{Q`mYsGIGF%>0ApY3 z?!lN-8zVvxafClXI=vV`PHg)eJ0yT#mAzZKSWO90J7fd$$vIcEnJRKrgD}3*%NPF` zED#TX!&l}$U5q`MeD$AM@W~$M>TM~9zP#@P)+E8x5qqUe@2=x*+8Ty+HafMqC0s|LP{=$_DGQHo&^0s5CVE5RDeh09%1d`crGlUA8EdyC+MBOV4Y-9S@&V0v9{t`=-R!e&m z&_&)s)ICAa99m5qzx!?72@fC>bYv6qVIp^$D;|yaj_uBZpI8uNZGP2ziv7kOQ2t!Y z(kr|9l|A-Xz=hdNq`^J|^G1t6ud~{}=W60Vw+r~FiC$oi;an(69ayu?c#!b2yp5&C zhI7uJ@)GQh`yl#jIprIYn7SN`9v59CMPs#w1jd6%QWKA$?_Tq80cy0v_?K;rd-Qm1W5`I6 zwTWRE%T!_afkadTOff0aJwSs^@KmsHM7J6tDT7XN&>=XlY+1qczZ<^tN@V2hc*(Ug z$h5XF>7eFidh-q#0 zcWI-zCVaH?dwUkoHb#DabPY?tc79-S1a#MAP0-N|Gi?4!9{6jd$}Y zg)Hq|Zo)P^A8~2LU4iYQ`Z7M}^|`X5b?YcoFR{)j0aKGE*QN)24JGy|^}hGzG=7E- z349pcx|uYx=LF1`m{z*-5K=p5ycR+&BDg|fCE~hu z*?ddL3=%`Mh{T0>kj$^ZtVh?9opTdl|Lc8blk*=(wFev1pvcCv7JHUP5J+r zU^P<{zfz$|CWI}|d`j>B<)!!G2ZXC6hi5LhjL4EYMs~i8c?0KW2-1ZRB&2q|NgA<| zO-~)MDw5l>tvu^@CQR-vxzKrr>h|z1v5oh{vgPSa3_;2t1S^tgD3g|HAh&1JJ03C8fM(A|#wj(QcjFI6WhOEb4D|cEY$g5y>@9T#} zdG)sI1W;GqE-uNA-LkD?PCc0~_d03B?c>NM!V8Nw%>vzz znW0kCGEYvC2+ju?QndSRG>8~2-c@{&bo1M4_TF87sS3eA3sHR#jY0`x69W_F_h)ai zKs9RH3o22D@GbchThM@;u1vNwwpkt+WivnE3+;Y+*A!J$OEc=yo?-g~ZvtH?F_4MT zhG45`!n5e?!C=1r&osw3X&g>EacjU}le3-JO_z;9KgyY4gik}}o7=AA%10ujv&hg~ zN{M!_LN3jS={4!6?Lh|+v?Q?R)$gewe?R?7uE?$%R&A(t^78;Ut6I)r;}-+RnV$*9 zAz%FCUtYydZt-r>cA~75R_|^kC0WW|7Xx=HO1kYvAzG32s$2PH#h0aIcF$L>+s*CU zz3nH~p4*hWTDzggfH1e;QP(dszaoYTP3cfnj8An+POWVh z@LtP}XXR`cce}X<{bUnO{%6znzr3OaXysqWqarj6{B>xYI`u*7y9eW>{wpe*0JBA8 zELE|4akO0|sESxg^8Uy;u?NHJHJ}dAOe^PCoJb9~ES^lU1%bj?)B0w!x9Gaty$`kI zZ<5r{AY5*dI@1i4O$ansq!eRfp<$Kxm8fU)sg38fc9CtJ4MHCjrLI;$L>w>Yn#?b=B5iLC&G=VVNYs2`9c@cn#`*sDe$3}4gw14!CqGJrxWB)s?DowB z*Z@a5k;`#$l8TBcd^&odH<2bBTKVGXCF9>qs;H> zQWW%}rxE>YVrr4{keF^(kv(5zi=xbyLm4DvK4Hfs8d^;JHD914OQNAm`s8gd2V413 zkXh3kM6eQW-DQJeDJ_capHh>YWTkDOmOTaOqD0xJnDlfURJy$dyCsEBfYtb3cn%_wW^K_4-m8bc1+n(&2Uxa{o zPv-{3^2)vYE71?^BDr}fEweAb*n*}hW*b*R-;eZXxrVeN+~;Ut3>*&7#Ws_bsd#ZM zdtW|KV7e4qB_TX;mq|i25?J#=W^E$$I4(kA{&6$=r01e}z;23Mw6BQBBpPUw(SYdl zVb8xQ_CR=&nxk6ZdOv)1qVrPL(Nu6+CpnKbl*ev40C#{uPjenk;fVv-&4 z@a++qfPmk(VD#fo_VjWEbim-Ls#>j0HTdhZ=UQSF=R~Z;K$$MBIG1V+yB`eDrSQ6g zcez4)Gg28Nso=w$QH046CTE#kUhWtM+D6)JAc7*@3$fD~*qm#mB7Q|IabH#sW}6ap za0$_N$zOgH1?e(iPJ0M=lG`j=Vs@fkdD*;pb?4@c`4Xa%X?1T#rV#Ts;P#6}yUHP;D+dvM(VIy>o&*diA z^S<0%Y~X4JE&VyNR~3aq-CQpQRtUF|?~VXHzOEUP%e&3S?jt%}Bv4*)m0e3s^7fKZ z976K*X5$99A+;a1o0)z|(bLmhCfD51mzU(_ zPqxeVVWK!Gp4Tlk(vM2Y!Di85(}|I?THZ^wjKOh#v9F!x^DTHXa7pC%s`|KwlG3ST%*T*$Qq5|Yg26V1eoEsSKk0V4Q z@XI$Xi`O3HiLl9F{uWOXY?xnoH*nNk+j@V^OJgz-{EE3V%whB#9s#<5;B2gGLAnKj=@N&s$w~EO&?iT|E zHUkI(M}ZD~G072_+Xo9@nezFnjw)-3feWOFg_<$HsA%Nv`2-DuCjS701RV|byrp{7 zE@wZ`7CKK?ziG20Uo&y@E9KMVd3l%zGu++TEJ9C~gqL3>!<%KM)yq#!vH#gGrC){g zD_DJ*1yk*ceg?ZpGaMg7$115H#K5@VF$i-U_6~0 zg59|Ecq2P%t1XgFzE7`d3Hd$_`1Tv^G=$CJi!K%;1@2h%@p1H~F=+o96=LJro4r$< za}aQTv#~dPyixw^0QZadxS2h;|C{`8%GA>ZUc`qjf&k@{fujJrk`~>Pz$53@_yMpO zJHMpn2Z}QVW5QF%naU2|~WLAEr+}a8#*9=TB#UJQ~RT)T=$&{E9c!G`0{-cwT?A?)e(iqNrG%diofd z>1Kne=n3=lcfVakM$duQl!DUV06h7%fQl|azz-vhIbQvg9;Gou42h;-3&zqlvFu7m z`2K1;StPbu3(o4jjiKJ|Gmf{(uLtvXGv#+01omzlBf4xP`5^O8wlE0oAR57fpcUeN zgeq5sr?Q&_Z2b;8T)QGxCL~gJRit7DGK1~MBYY!#a$e==wI1ZVj;_RV_j!FxT~xjd zX{ku@j@UxwzQ_IJaj4yK?_EZu`P?xX%mrsdMDj>;RM+F49DQO7h>!p%vEqx^FmWh1 z9-aBM4{qM+mMMx)zcOujW)&mSJm~OD zJlWzh;q4iFrBXXQ+wmz~o zi4^1YM$60o?O!(6++dA1UY*(c`pk+U-QN7=;H|W#BQ1T6cMr7 zLhVbsmOOogO7LB_t|bFH?G`v2+sV}R(!K8lVoa9f>JWp8L9oXU>1nxYc2~2J5N(y; zb$$-GL!Ed;vF-x4-BY&L7<&obw+2L60Qaib7Oo!zltU0uSf@OG29&x((i7@lhu>in z$N$sbmxoimwr%gE61&J)WGG1)laMKPg(8{fX-8(Ul_|rLCS_VBLyg1bUeV+Y(&-)(VaeRO7{g?Jy>o?rPHJsOZows&@JdYEVED%FVQa=OE zqjOAZvCCo@=<-1jHhvFXgWAQrgKEQZ5U4&J@UlmFpC4gQ+GUw^&MJq6XWaCJ^T$;E zNU-6*9`l+XedxvMN$@(aqN3cEdfNvl2_gyC#2uPRE(M~B%`4;Pz}6rHsGoX=o=hR&!8Bx%nEtxwn>%_ zfEQsY(%1;81Q^r4?n)5${z3zdvTI4lcK=th`dv>`Q%uu>bJPb0pQ~Nh3 zUy2EdKHvzR_zu-6d4rz#>gG5ni=sGG1C)mIybeO;O;q=>gOgo8c3=LUW7r0D0o70J^=PR&H7U{#JoR+Z7EYf<*KA{ zrsli|N-4gCIQlpI*~R7;Vr5F2ZqKh15IeYJY?(%y^z!G7FG(7Oe;yEcQY%15gcj8I zcHfy=Qw9^#;|qX7B>-}9IW422vLkWy%VYizRz`AsiT2*Jfo30_#0gi^W_v1A;h`B632!Zf zoEA+g!aSH!Hx_lTs@|b_HeO6z0w54msL++lFLXEJ#3OB-2;2dHXK=O9o8vvp^+hKsZB<`x({rE zc8SA=WOtLU9Ipx9C3psC;ezJQL-%>B@woXUg~jg^sk~oNTa&P+BPLqYRw$o5q;Vy~ zvUCCRVYNI&UEB|{Re@Zfu-1|b?dyul%xW=G1sW^R3^|OYCT?s@Yt|NnK1;zgLdEeV ziG8nu?RGY5NKUm=4v7DX3oW!9tqiNSQY3Q~rNi^63ftv9*T7~XMNGQ{K}ztVm=A#n zZGrby#k-$r#NcP!XvYmK`j8JI`m;X&y_0U2EYQ% z0@il@q14!py<`4+Ax7eX7%B4E+~yujSAsp}S$$fCyvKB?wSV4Ke|3QPvtC%@>Ujwr zP9VzkwQ2`bGdn&zYSqecahKDgc$Nhwhh?V~A3B>RUX}kUiGgtbWckcGcvHs{BR(Q4 zP)@H#?MZzu#Hw*Hd8zyEwJ@w7-D75Ve&%KcjIcS-7KC*K-DBr(-B^QmH2+FqK*tOk zXEp*bCnt{jk>ZA7S!CFpmJ>T!PVoh(%~sG!o0Ykks(0ks*j)7LtZgoX3^WSg*~iS< zu>vIA*B|UtSR;&hn+&(i<`tNaZyNb@Gk8Zr%{dx!ZAD=l-}N)2&|DBQe-_tjd51~& z9x&epmTnui9>;PY^*D;~Af<()hNN;sz=^J^y-$rz`HJ%#DosuXzAvI`8W)@(5&?!D zY&lSpI@B7reDtUcB+Uxr!X;lrAJTt^klef0{blOb=)?1^zn?`-chg<-x82{5^=J|2 z5$kUzHhMem&5bR#dbP=83$O?@EJ0Y!ebX8oCy@YMfB^1@hMl&N2V32m(qd8&i(bBmoaQ*bIskhz*@m z)eQXpzNJyN!DU5+mNi2$PhmVipF3EG@I?XFeaWAF2voa*BGy+Ua%LXMOHv5>#Ib%l zq48;@?H5X$hS0u|8hM(Hr9y)(m4w(7X&=a{SVWZ76qDI{Ra38+JWFIpj!K;cjic+9 zGzNB|2lO+xJu4_TlxW8h_PpKB=I|hzq9S?5>9sR*9!Z>gp50zYh-eijZuwH*U+;H` z0R2(l?s>8~xRsBx4+kMmUHf9`1GsXMy0B`inPV^Yj`k1A%7)*}R2~o>6AV9v;=&%{ ze*Xa|>4;UpID|~o{?w@1r~Z^u?!>cB+GkIGZ&^B29&tdmU@(GnJEL?3kup?X#W#g# z-bW34D{!U-yj%%pUKS};n6Hi3+Q#Q|=194cxNKHO-&M%Sz7Cl68XG+Kv6X5EWK%*y z)y|W?5ep#Nh1JJAw27w;^=T=R=?M!>a-e(@KfJB7K19N88#2TwinWH2!@aP00?ShT zSQ*mhcO{^{W`Lw(y+WT$zC!9*2m<*Qz&hnMn&6Lg;%ZgPSD{;)r7i+ciazZE#0Ciy z)w%LTo*!Md={ZGsh*C_C0ZV-*Op=wqY0b|3Xb4ap!!G^`##td4Pv1wqi#(z2M^P}Y zNpY&&NUVIRU@Q-e&G&)(y--0)Q7D-@(LRm}x5XgomDDyLimdoE&K&|^9~)w8E@G}V zQ(mkxsgAnXlNmzam&fV|=i)8UyDqr8ZN4iHe8QhF9Yr3>yOil8chx&74oyYuhgEL zD7J9;1&B@7pt8W}BIrSgLFKDUz0{D^8q(7V8_{c$Qzi(kW#QF(*hB@%eHAf$>Z%gA zcT0`Np9CH?^B4)@n3!ng3fs5BxY3Ww$aIX zl)5++c3a)Kf{ng9J4HJqKK;yD{&5YkG#pAgaa1nZF#06}+2jHv72W9X@CcvO-F9I^ zqyC;E#`Vk*o|YWwZ#W60Op7O$5w^Mx5W$E*KMq6+63j>AfL07bip4K{QDg@t2jjeT znEv|!`xL8p{KdmV@SRWMWO7TMfG&gD|9X}fCVYBQk>NufxMOBW8IHk54(uUOAZD|T zL0u3z&g1kwWX4IlcKl06DmZCHE(k?YLI7za>Y0J|Fe+U@Ch7I?CxVnvrQzK=3Q2Q; zcz8A{-M5C_U0~AbRcJ~cJ5*#YEkS(5Ikw}s&3XD1pw!-OpH&+rPgwn^B3!OL>PC>h z0HsxGtN_&%3PTk70Beju8)ItcxzWyn$F?FEE;p*}_vzJve6Y-w;d-X~P)MKTUL!s= z2{1D~w_?ye;8%-CdVB|NxJ)@Z( z+C2U3zhl>JIt=rn>acsWbO}m0w{beO_-e9AsH1V|*p*-zVQ_c=A_!xDC=U#D0S`FP z4fjY@{64@-@#?RE1X=?M1cqIX4RsL1u|f9_DO~z8G;83IGlPZH8l{znv94pdnha*s z7+ww0legh}%F6oPNv`JDv%t7lp-rkd^N#$fIHlC^-6miWp?+(`mD^v(Y~-*9Or85n zc_gL=f*Rt$n=?Ej2axO#i0Rp0D(wqDamAPL z_Dp0nzd}ey>QPP5IqzAg~yW+y|=8U`)T(Rf$Yds7HcO6`MJ9 zZ2xt2eW;#z+G?I$_K`kAD?N>+Bc1&^`H6ZP|c znUzV(tfk^;^R&H&tF2n3ZSp_-QeYxg6&yA~;b6+Vt7k?5DG7wqR$I2@sd%u2H zAr`=dGks8&Nzcs7Kwp>70??=6)Xg69G*Hp$!#nzFFnEzSjNf(&gOUR-NMgsAwB-BU z^Axa4F?{!kK7#xFlxZ&WaOm`OFCbGJUkG#D!y%Wona-YoC~NR(fX~K&{lUdAaa*W# zGH)5AbYdo!tfH?lBYc^9J4RKA|4CtRUnxAY1LMM_S%Ue9P*Uq9SE091G(botF=I~F zDdPIU>6h6@Ck1_>Lx7JpE)ec5}Gps$(lm=_Yz`%Pk2oxeJ!Id$B2l_y0nvjD*PgmxmnN3T7BZlb{*lEg_ zW!7G5wb@+F=fSpx%4|?F{0z$voPgOZ2Bk(Mbeh|)Yg7gCs)@@nVW2T|!Aib1f@qvy z&Jz=NV7zg^i!5pF9&KalLYN%c6J|PLQQ0-Ajag}X4!$F1ccvrRcP!GUwX>Z@N(jcF z1}k}Y%hv6c$P6Jo6`7$u4L}ayL``V;Q-mY0=aYpMdZv4eoRXx((X_%i7qH^_gj2E3 z$p<`JW{;~RW#w#4k}it#j<*oVjS@Xa)H5V*>zcT>d(2j`uq}9^L~5RL9n>B77IMwCzuuFg4JAM~M zf#;b~C<_H~<7M(L<4Ykx>~(mligXil!L~(YW?*bLK7RF#*|a>nY%KIMV1R3i4-v@C zM^u;pfNRnuW#X~-`BQ|_Zsl$hI8j!>K1~pJ8RbfStxHgr3`V`>(05@GC|G!p2WrD2 zF8eV7lo%fM`q{LXLB{0$c`uT@sL=Zm7$A745NYSeX1*yKsw@TksgE!R$hNIAN&KB8 z)>Z^17gH;{VpnpbE8T~CFoTNQ4JF7SiP5$`I=hUg!`O7l!p|1lT!hdS)AV4~n=0v& z+fsh>6M#i;eSLm`tjHa9;PYMeedp3K>*P;!Pl^5FB=#KYJ79sc|DGs>K0(GYiV8N% zmfVeQmlMo?ok`2}7sT!mbi+e?pSvEmeS08;wzn_K-*`$c^$>VWN#$|uGJe0)6dwrt z^0pJ<)}Wu^p{;N$fxMp(Y}LLy_%5~{;dVU&u+gogc95N}0~P!(*ttUm{jL;#H^u!7 zvjOV6;CEs{H^ny}vquj?X&>2`VD!vXZ8104PP4Z+Erp1g7dEWguTR zdHJu%jXEWj%T0JrUanF~^fNjz6%x1YuFvPAQCi^U;%INwAK!^Sa(N2Tr*q+_%@YXH z!r*twUrjJCP1SY}?k9F!Jqm`&Pl|;OKkj{ltTx!AAYbOdqR6|M7HfyY?{s$gSCa>( zjvC2*p(fs92%IgTB!e^@tD)mtS=>WMPct(fN(O7&)gPK>fU*{F{51Vj!eLkV^O%v*hq92y`X$M-9I!3E4vUXCPhm~ciUvVjD`QHez% zDm~XQLL*7B3gPnzZz(u&UK$&-t#HlVm9BQ^_0!nk^&RhODq)ieqce3Fw!yD#JYDTG zpo&2dQ3;~+Aja+c-Z3LhIKV48t6=Kbppz)8PTUTYbQ1udp1*we6ShVcU<5iydnXQT z4gt#Lu4r5%vrN;r_@Vp^5t%AM)r+~bK-Oz1@DwrPUTpcQ(1myI_-;|T`v#(^HS@MJ zf{P`sI}zet45rTlS`8c`=OnScp^caKs!kH>_fm}H@ZnzSPBs#FEWywGF@}B4+NV#5 zof+SAr};eg(rDYVHSOOhp!~Zr<1(T^oS>)Bg?eS&CFf4MrRlg4T~q1pczqE9 z?f$;Wl5ZblnY0F1^vYyc?s&MiaL?8~^Q{c?t>|;;k8sNdVj9ASYL3?nv})>%pea)4 zrU+!fzjB#jlHH!OSJppsuk7~VplPE2*-lep^wi4D>(|T&=9es{^NY+Ey>hB2j|$h6 z;Rml2I;u!lB}Lp0!=jtNm$V2d^~;%UV?)#Tq4*8*T5q<}2APovomruyEy?zoh3mQ^ zR!K4D?IO`iOB}LBuHNtKX3Az8JqQWW!?UZ|f%ufytuNB57MOJP4Az`CkHb2O%iYpxf~_(qKcMEKMmaM#UT}$fCY)wT=1IacOe%u1d6UC+ z&rdo{KlS+emTmZTwD7&90ndV`XRf*;49Nr7#ui^cEM8IT-ldM|=`tI|7tGpR`gV$t z`J-gCv@WH&d{8schj^D)|Cq1oM{l3g7kp=9@2Jg2lG69irX?g|HOo;dSm@l^F`L51 z>b|fZ7bH?VG1zGz6LhVL4uM96Q0Y=qwF7gn@36m!$Fg(VEphz^B$9DTiwAiY<*u*1eZ0PA zX7RSK+tNf?hA;3u4>nx4V7RuQ@Za&-updC}Qf`3apVJaA%%7j^7j|3w zgtwT=mCn~LFsV;1C%LW7N+?xkJmL_VC<8GcTSVdl@U%D8NND0Trl}xbx- zWBK^%ZKl9gMQHsngdujhyze8;1lL!0e6Q4eAK{eisHBbbBm1rqbC}n!IU1zOp|UDE zH6kNh8QTd4ejSxb(_57)>Di`p|MDchWwM>WxowW+)N^0G>A0(`TpZy^&vv8b7sd^i zbK`F`IPzy8EWOLxK@K{CcOr;Oe86Ma0~(r*99ER=gM#~MPf2(#j18Lihh!i7vOY0P z2n-2ap-Gh!DuDBpc+djx0=l7!jur-TF)`by%8W~19=TJy=A_ukQO?&les5on+@99h zVj=TOH!Io>a)h^NVBZ}9g35^U^9`~u;Fs#)>=W){D~e0qf`T{J!yGZg1q(e3779a1 zQa>i|w015z>Eu!oz7zTz%hgAb0(hbW@B?p1L>W(twV&1{} z=C?>8LM9pCUHM)PL@{#8tgSLXA`EGayf#Crp`RzjoD-IZ7B-z!Qwc2In2D*4ytJPZ z(SG$Ug2hm%&eK@(ZnUwgm|Hl6LzbU(=yL1bQwyI}XKM!cH@hqy>_sQppwk_GC85IT zDMXek@(@BK&L z^#%i;Rn$r3fxt0zvc$yELrt+1Bd7jokL}F7a}A{Van1mkwx8B2$bCdvcVDTWKzlz~ ze0BYu?V^{vW`?2flQH3S!Be=!#n2rAN<7yY>5r&zmA^xqQp@(pYlHt3Axq*qY^qhh-sG{bNLr-yo;u4vH=Y;}*(2W}kbhh+9Gc z9t;`_g_qgyQ1)oSTDYzXjC7N{S4B@f7)?q?LRVxX_G+|WILhJkW2xSJQCc|6A=N`W zROx^p^ES=Xauw$$zw>x^KtO3Q!=34i?Jk(#5E!=^eKTuv(#d}C1!&P#0eq_ev(Ie3 z+F8V{S5~FtdBnT1)&cxAJW=@qX_T0#Iu7a^&}n%TWS4aELnsifC}FTHI59yQHGc{(14fd%1CD;3^V_Yhi5+8y>@8e>QHmM>T4F+9lj|k z9-17)=q#Y3QD^>~0_Vx=ZB+{O86}^qQm2FK8`m6b51YDmTcSR-P*+BJ z8(Q=-eERn*?F?K?0?X>Pe zMP|OQZ5W`drWYmm0TKLMO{Vbk1{Gz%CPE9!M=F#4V4Gr>_0Kkbd19uYSpQghDl2h{ z1s~9IgiBpBUJM51{-MP=`cw9*@?}AgeN7oT0bacM|zhlco`=#HzHMlb2GF> zmyOWVITd6jkw%3(PPD9b$5oaV#n9*eiEGB)Q&O;Hq&vd*;v|gSc^))&87A^&YZZIw zg)@lJ>6_?M0i?=$t5mj}*t9O=-vCC+1PIQNG3TbSqN)db_#-m4XzoPDE%bZL)_Mhs zFF?c+0>&cChJ38k=t)0V)t|F=2SS+XV$3Zy_EC%T)d{N#tu=oafOB`d*)@z5Y4=l5?eS0PovC z4*fgZX!|^!`*0&;3Py#shodWj@THe;jA4Z?Pvuo+)9h#bL|l3;q_VffczhcpmC2oo zU9M2bIYQoDY4SiFo@054nI9-L{A_Ke6P7D@u~~?+350yrsBJ%4T#1kCM{C^r33Osj zqTc#s7yA%t%2&7fry-xm@JQ4pc04Dy$ydyuT6*1Ir`q{@c<0HU8)Oqdszk{3YrQxGg1F3=7ZbnD!H!6y@rxMJAc+;>aTyMpEIuX>pq5>0y-%~67jNTR znUr|qiQwpm+pbm`M%~W$iMltR4;+9z#X@&BZv*N;5s*FP)J93kSig4LQ~3zTv}xxL zITjFbq?d^Bb$otFJII&vTDnBvxqs5^MZ8C7nxoWnZc3`N5kt-NY&d>TyneZ(qEB6n zT;TP%3t7?Yj1%`fv@fJAtiGK8UcMj|<8`|d_uaForaf=|yHc40-s!^kxHDyw-q-ZV z`99^rL-RgZH9bCiTb|3A(HmNh0hj8t3?+uIkBtq6MDm`}mLMMO*duW}ptegb{xxMD zBYwNnb#)iclVy(E{~Etl5!^lVn*5#Fw8s}FmaNAby(V1wuECux`+Bdo3lLZK2>1Tn z^?Xfa$nw8x_spm-ygYWd>B}&QSw%ocZf97=C&?qRK3^AKkqTcO&|W%| zd5iKYg+YMQnLuE1g%W&Aq|ly9W#wBaZ4Q&+KW{8!FWer zTflByzoMvZ#y*=zs6HD=UiQCA#puQ~=*GFHszw!TaYv+pVCp?$f)g_Vy6U-#ept%)b{Gchr`igj; zhJjE%Vb#YoKVfx5I*Z$YFgIYE7I^)@{k1B}$;$UI!%Ah7mBPBxrpl))*hOMCjzIuT z`qLE-d`!rl#xrA+dT(~A`c3lP_Mu~9y;fS&67}ZLFD1tJAlc(G^_QNtP-XS}!D368 zx7FvaADs;gO8t`o+Om2T|HqKY@NasUK}d z=3|z6&u?k0RtOopRh!PXh|;#H`UbdG?n*E}zQ_P+k3dz@TO*GB{X04uX+Nf%@^Jo$ zTZ&s)xZ{u_F{B}SZ=P__;M`jDf@K)LtXW3-R56|+S0y!cS6yV^>dv-EdDtqHNO zo>W7Xs(L&2eN*IO>4G4@RD;7A?$`q;Q6MB6ihX4^_`=@dt}dHWClO|a#)}pFUvrnUr~6%r0sXZV`Yr%USyo1{wCI$uK=?lXct1pBAMul zzeU+25l(*35gva5Hhf39 z1$5K}gSRQlCOi|(Vt_$%oqBos_x^8b9WnXJ_P#$`Zp}7}O0Zms@z4%a+yZ*zQxBUW zKM2=l#0Nl*tG44z+1jtJ8u|Et2c~Yk`C9u30EdS>Io_iNzW4t)qtbcw`c1y0W#(&3 z7A?cRf;~|{?UX@u!o(43s(gR>s6E`W29~x0F9va|RZvXm^hnLRurrJSf7ik1tjk1^ zc0;N64}85vhfyQbl>g>41wO)_DLtMVCQWUJ!vFA058i;5?S3T2)|}yMTf}YNn&Vw$ zb6t?}bR#A(;jBE$a<5F4z%`6A*`f6Fe)vbur;Z>zSJYr~2BAvIMPqmAlmvx60flHajH zK@A{h6Bc5$_=SNk( zMjq1EKLZZ=wuiwPGksO^9->S~y`^^kitn+}d6P(i4p8{=-X9%C$s)Mc7~4F76M5}y zbLI6QA^iLLp2(+C**1fkZ||^}?uF{ATCEiI&U}9~AORZ#A~~DZ`J>&uwlQ+u$+O^9 zL+sD17koY^hBh#4M*rGqNRC9`WQEwDNHPk>xr&ib`lL%c4nZv+8(jl=a|!PBp79#2 z=P6KW@10TVtUAu0t&^^-6Jg?ACo$Z99?y8is4-S{7)bNS`*9ivw*70;*au0Yq21ip z#G26hj{S|ROJ;ko4R|HnlvZIglYgYi7(Ir9IFp^eeAzt7No;MQ-37~{kRi7zrUpD!QB{byB&FJM|Q`Vjy2gtppthQ^*M6Fb7&3;MJ`Y@LSK+ zVL3^B0|ju@uJl2l4-bCOb|$Wp5KXFpTrv5tZ@ipUB{evn6jh&6n@24TgIIqPuxGGb zQOyqs#-e~(4#uWt243^|E{L%Z3<#~JGZJzgl8sU2$GauJOB^v?)j^LmZRjL6>;@Rs1g|yvC;U9O3P4@JPz%X9p)xXRBa2|7p;H$jS-Ke za{>X55axc)iH#Q+{C~@}-c;hnwq~Ac7I}4%~024pV;cJ>mbQOpHQx;G#6*kDBztbq^ z>~1Wwz6vCuwa`d8koFh?QmG(d5Xfj~E5`SEv|St1v8ZkI<$vWerTgS=MDciL(c!8c zq+;(&!P|K36z*Zz5el|<0;EM5a*({J;Uz-B;2>NH^T^}O=d>bXp{+wu{OZSQ>8j4x zUUn{7iHGU{hrK^YbeI7{tXsQC3rF%`ENSRRtM$Yx>hV`8Myitr>a!TOcJ`t(Yz<4Q z#Y=up9?hw2^B21RSgz>&`C--B%?hVYIl#l7qDq^qh3f3N(N&Dm@tYqj^J5VUMTZw} zw~IIrRWf_!5(q{$*{dWO&-uxAgn|C(v+%ud@9U!Dfd?ne|lDC#VQRBEH1iwW^s9&>vH^1-bV*OAd|3!P}C}P_U z5c4<&ZBkEn+XpgWrHxetP%r4)-2wcwKgnpuZ>sZ#Zmcaqg|@P$ac$;ZlCs*%xQqN8 z1@w9t;(CM+I?e!@A@gG(u=|%FPd+|^QV5hwm#5t;jC3EZ?%vB->OC{B@A~nZWxG=j z2pq5dv9|KgVt9pjnr|Wu@kCL40W#eq){I9o-jXqJ3^4B&j?h0z#N}K{lGbrtpHu^w z$~VZsNtJKn)qh6I|E%*F-0)Gt6u>t$M%sjy?$%SeX2Dn1S3m4MX`|m=lb<|<>NWRQ z*BRYSeXJ0Umv=E2^k2S(u7FUJaK>vgUM@?K6|7Vq$;LqJT)yfu(VD9^k?L_`&J16p z)HuM5P&}Fo|3fX|^G;&W?lMPgkbS5pfp_N&WwWsUlGt)WHA^8oi&>YI@jU0rVpMv0 zOwap>gJi!pRXsN}m?NY0=|G7TVio{@j|)MIiz0kz^~<~N%om4pLWd|JyG&!=#5QTP zC&RbY?Qm6+fs-stt68!1H{1`!?(fX_x)e>UQqu$ILgAiIsE-lbK0&}4vvzpnij}U% zjga69QUI!k7VqZt5Vx6T(GHGMi=b|g8vRT2+Y$F$mwoWX&-|sqkW`Yhii}%hfyhWh zBpY)<@oV7w)}W(XL6+`fpsNmfdotPB%LHWiCoNr_I2-qH;w=7c+-eS<<<(pMCE|R+4u< z>}l$;Ob*u#08J%sT>$AjIMPpHMk(RVP<2vv`Hr)I`JHWi%dZH&F3G6kE$qjoMA;Y# z3&W4Z(X7X!Bkhl6M@AltLJs5#UieQ;@-Lnb!r1h^+k??tW0(7PVrC)DQ?OCaP$8*- zbIy8jp?8$rpb)ZMNKA$>Z*5651w^vVHp5b8^f`q;RNB zOD!0PW7?K%VpyKaY$h*b=zYc-n_U766hVB07RYK*P>W%Pz^5=xl6=}q+k-bqDFxAQ z5gRTuU03+!$Kyy_w;CvkW;# zm6h(?Osd>y{H-BqpvbYY2kDf3{`;?akT{Kwb`s$oQMq*J6)c2p8^Q70$vaG<<7H5&?rGhp$$!Vai_Q?Tr?IX`J+Z0{Fh702P4*b*9}{;Y~$w6|L>Q)49(;_2!A9wyRrsH_-#;7q@7w5?Df`D3{wF8!k30E~=H?%d{A>UG{f~cw mn*X?;{t0USKLj=Fa#m}XDj9-KbNmMXomaScHv9CSH~$Yh`BjMk diff --git a/docs/src/examples/mps_onesite_figures/updated_mps.png b/docs/src/examples/mps_onesite_figures/updated_mps.png deleted file mode 100644 index 35f33fe5efc964f5e38989032b9fd018c93d024a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 20774 zcmeIaXH-*L*ESpwlp+W!pn!sOM2J#EI!Ffrr8fzPkzS-Dp;{1?-lU@xrS}d2q=SG+ zuR#dXTd1Ms+Ze!ep8GlX8RH$_pLe`6#?D@QXRkHqp6i-(UTdxl{O&8uk`Pf7fj}S< zc{wRn5QqS1f{+BKfzPvX^>rW+QLmY#-b9PG=H`r{2_~B_CG`e5;Z; z`3iJ;9EVW*l>Kd4hDWbnz6437PFJ_>4uhiK+h{9o?`&;hG{VY+3D1Do$bUS}ei7{u1}}AD-n%p(UU`S?J=E6XhcZVwYUJIcW1dFw3lw(Vw`qn72(kC_<}@# z=)N(oJfy-6p}r*2PV1-gh1Y%%UW+Qaye63{AU)n0vfJ`RT8T$xf=Zx$B9p*1^J3mP z`{==msM7E2re*P+#x48Pt4WVO3CNQ1ct$hN2G{c37PYI=roPshzaF+pykM#V~S~l4&Sj^F~Ol9D2wS zY1ffS5}IH{EyQLSEkRFk${`4};AMZ?X0ZMWzNpSe{XF{c1SJy&jmD&c7qk|N<#GOT zoS+MQLXIS3GWYrzlAX+DB4759O^T?pHt>`qRYAJMp!?PQNjxK`Kzwvw5y}DTu{O~+v2bL=#7g8d0$(>edo1hc-DuWR?6En=yWpiWeLilEA?K& z^kQsyZ@eZxFtJgbzpHABUwHMpk3u4mDV_Sc{j*nmJH#UsMQ6BpK_rqkiJdozSn96l z-nKm3>3#X;1=3eZ<1cNmSK@U%U;EK+E1-{0=cPB!vqG^=dE+^xCW+09;X}{;XKWl7 zPfa~1l4ed+z5l4^y85;Bvl=hW?-nJM-{-&2-J{sU%1fyv+1;gS%I`ogNZl?0mdLql zb5|>I{u}3v{S3_v&N{&k%`2a-FWQFsw{F~{WuYjzsOH69*JBu9)M0FG^y=nvo^~d6 zAKv^slKMvD?%cM#nB0oI_5o5$IwvxrH+4UFZ7J_K>$3&;w>46XJ57*|D^F%mzOtoT zWvb*FBV7(usUcpSUx99)-ecc$+*8}TBoS;VWy}2HeCZ4QFO*+A?h5^&HhO(iMx7y= zY}D^TJ@16>BY`|Wm-ub3?FcD`*-(ACA51uABx$h@uN{JU#doAMSzdf=UEm{p@Xemzt3v%2Yn((IKt3W?MYqI#rz zgv~E+5NuFyq%cTyX?Mv6XO$}Csjikjs-F0KUa3WsU7cOcEL8RRcjgq6);myRs2DWc z4sKV3(wUN=e)B|p?ZZ>kd9z)!X*0>|+7uTkA{3$(f?FlRuZELrKsCu2jT?REL=*T>s`wqHNJOm?mEXWQWQg3%}TPpCRXg?HmAGojfOAByxdbTjlU)b(VA z#PyP^)OIsB6|eAJ0mt#ism7@Z)|K-yFe@=d5 zb9Q6Ke>P@Dc0Q-urq9zSI*590kM7gnL-b=)x4e8nA`|6YrJQC)ew#}i6%-tg!23vX!m z)4S}3`+MgO|QvZb4WP&G@JCHd1e)A!V ziZb=J6V!xEI&5ZbniA1T)@f<$LbKulvdYkorj%QXfj882S@iqjEaEF}ZJEV;j;XYD zwF$HdY0YG5S3ELgwx5D)d`m}a#AT3ZPUl?Jy^xz~&H2=7XDDOdvM)T-twhwx-3{(^ z5WQEsj0?B8ai5KhYTd?mICsl!eamOr?e>t(NZG31wUQOPV5J8-U6D$$N|Bzq`ycjv zSKHZR+=}m54Hi&TXq0X^<(WKv=%*@g&@x`^I;^WOoANScxfnd=k@oe28xr(xioaNP_?m{g$o361=F(Qx30c@WX3Hw zt0mnGQlM+!b&DUkExRTEQOFu-Epg?;L|wkD zZeY3x-)@o4PbMLOaiMSfcTlVQ5*_1j5lq$Yms>sWx~gufZ<+1$RG6^7+V^!!0K15; z-(7#S0IgkCs9t^s-I<%AGw^f>toJ`#MOl5-vtTx>ad^m?bXicgPKK_k6DGF#VI`@t zsa)lq%EJU>F|jLpJNR2Edl`fMn>MwGtM5z#0^bfi+I-C6b@y~^2yRV`ZZd1Y8LvKK z(9>FV6%qDtJ)Sj8-k5qd_pRhRIQK`-iTBun?q=iW7T}s0lVjA85c+p-W=27lHefo; zdHFY0qha8D*6-26v~->pM4;?TUO5eulg7Q1laP$y$IGX_1q=1Isl0~cf&v0AIFv-; z972PHTHc>>$VENI~t5?aXW)%xtXb563lnWaH={!oYAS z=;RNpC)CC4w$dB02!j(H+k;} z|LX7`U;P$|?W$o9wUe~50(v@#{Ml47L$|D%S_SYctCz2fpn?I54I6oZ5 z@;~YFB%}R5m@cbz#^>kf6T~pPySoX~q+)vFh|Z<8Zpf!)6@0N>%*OtLsMx;a&Vl+$ z0S59PUwK?P@~LrtAfa!Qgzw4x3OQtyE8ZNmTg#?9bHkLTD$@slKNasr2OMR5+iQk6 z<97#nuyf$eK7Oh{S)O(#rumNVH|Ot6r0YtLVBgFa32VTUxB4w=lJ+h_KJ?Bv z{@^aMBVzCU3Fse>a=U|7di<2u?v-y^h+=U!!L7faN=Ew~jyn+p?3$`I{s@{gX|4(~Ym9b#nuIcqSv))_6`7OR<|L-TOExr?1 zyIX@>^D^J$2p4L=q+kvBzlr+oJJdc$9=D$)k*8akjliouApP*Q7mePytyN6RmGgJ+;^N_v zm%SlPWILOv%E9DsMIHfDa zMI`4%q*>#mkLxYI6vb32{Hda4XmA?dCx{RZJU1O%v^j*?Kra?4v@mR^r&qrK{b0ln$GS|&16M`zzt63V% z^JwX}#;GuA+9?u(Jn;qL63}}AA5xNDr$Alze_q92(kEU7x`**<@&@Hl+`7&x0au{r zplVNTL4vf9e%C4=loQhU<}|8^L59Wqc&K`HV5nPD(?5@fs-d$T+&>!x zMh|fsZY^k6BP~k2MhlGhLvluOojLTX%QU5j za6D31_JM`}i%$1&PlieZhI-W6*-nl%vX|I+{_bV^|F8oyqym2-{>ps+5-jS%`DpK5B9A-tM(b)v9uCCwH)L3Un$_mzJyJb(Gy>x$fXoG>;m_ZzV z%P4S2sfFLX=T;dxy77gGIX^Sv^MfX&43g%;=I7BcSdz?@P?}(ydjnZ(HY@hGU77n9}*NqhI)UN zEEa@5J^iFr18Je4TH7lRuPuwRqd-T8sM|V**w|X9>Py2@nJDz*kTkJ0TGxJwIQ(#- z8V7kfb3DTe_41PSPEWtC1BaD;jpkDn-I-xb%gkJNfsT)Il_Aex7I{`EygmLrBqoMt z8>CO33&_0pqz4~qp{|cZr3l9fz3hmCrl!!UX$Y~Yc`Wl#94^y9VVj4fxp|rDXl?+R zFX(G?OZIg`Eat)AKLTz0cs{GP@(+;2>t~u z4*$X5YbJ&2W%pm{zx!V=pi;-)3i5u1e+Tp}m;4x{HP35Om14{>lm|cCV?@>i^dr~T z9bETTo1kA_5Qc<=oG+tk7;HvbIFBP0;ay~go&oQ;=lQv_%ik%$%bCcd1e;GTB(5BJ zjhFZyp)H91fZL=Id*_8l;^)tDje&GvaP?k(&z`B`L%5*h9Mjr#XXL^D?!0P9uwzh8 zaGonyG)zS8@?bE`|6RL%7jizTwK)K3@z^r{EeoVmUB-zC!f2didi*jq*8y~DQ?Q0g zU?WjEZi2^7+F6?0_cj*>FO1T-Rq8!@^idd%4CRKlUX+)UTXCTnB75E=4%rpK^vbm_)Q z8{|N>G2jXZGc)tGP6lSqhtw(j;bDc1A@AQar8G<@Czr~-AyRoE?9>iAb%r#o;VqS- zrlzKL<6B>e=KiIyx~Lp5yELrDKV?%3@=;wQOisf!O+6x(gqUHT-?q{AXWId7sb_Ky zJwlGS$q9z?&+<%OR)z;t1}ttPY4kjo0uDX$7jnlBQH$`=I=*~hI8l;c&2`ts&0w(=>7gu8&*gKo%emz26KvnowA#Jixs$K(2ALzQR}=a&m4Y6I$P+Q zkD;mtE>a3AF!q$DEyadGy^=dxjlXic(gyN>rae6*N91JKwn;@8{v}P6-_^2#~H(ltf?O-;!ieiC=Ji z68CI`?YZX)T}Y^N)U<`qthPPbxJrgW=s83@V@iW`{HvM1te8x_66O4Y0+EeCUu7Ud zw0i%H`Tnth7ws!#?pg221T`ncjUBF*c(Vzk*O*F)w{FKJfg=8HbtL@FbG;eN?CjfgCQGw2ryUp|r|uGc zs24xPv0Ug?M7WIAvlq5aHQ=l}p1R65I!sVjQkU}g8}lYaOpr~J>KOK%=Kio`pNvr0 zb9nJ|vwDCD6#P~=l(Qm*C0)TxTz~hV!LN(gnVEFdkZ1|43fc?*V&+VHl3Nv7s z+UXRXHbt5R8b8wT@NiVuFjyVK-_*ULmU=JvQ-a5k8bnVhI47&c;Jp?-ngv=Ml3KN# zeee_`a7!XBJw5Y49+Y8$SyKhgYcU1E1{11;HJ)u;gH)jiocWM6)(@&08Gm0iO8x@j z;RitpkVU_6wz^=Es?Bs?SNHnum-<+?l7 zl$^iQ^tQqs4HMNVG7mpXd*@qLOpz7R!m6<70x=UJY=pEIv4_!;ms9o^XhlCCPPQG6 zVdy=BI|A3PUg)&$^qsZx&V(Fn^;Z_Q^jChxO?wYJ)y16yMn^|prod7Oz-Lz3fa!I) z7bk$dGycGEt|yJ#YN+tEn{(BZ#J#psonA3zv6NbP*Icg2t}JS&vrz#mT{h3DS7OZx zu#WG^@)!C&Zr={c@lV^CM)DzgDy_e-moL8sOC)%bCL!hEu{+!SBv)|__w&Ve6I?0` zTpw2NE$2j)g#w3bRy3?iA!;xX*8bH${n#tKT`r7+#4DLQVAwk=Y_{VyII{rSFH%ue z7<6T~6>(1lYcUBhZ+hBqFB9)u)^Y2g*5o}gPvf=%g;B2oA4=+&zU?Uvzx|10f)9a= zZDXofu9{H1*GG!n`>?253Vrb0G0&q{YN6AdCQh$18-nT;kan~}W)$arcnPzTdmwTK zr7`@?=On%-Mgp_A-um49h%U>FXICtRIFGqD6~)I^ZlY>DIU@=icMcZOMau_w#~{4j zj#pb+TJnvXNN2yNytNREp>F9<<(=vpmdeD}*ds4j?ZZ}q;d{@1UK-pQr0!RJMSA7>TPm>A zqOl?)dMM=a{@O}N4ii(K$C_A~S;o$pE@XG~hTEJ9aq@ahRW?PQN1X6Nr}DYbF;^Xc zfUAD&&~53TH5wK|6>&b8hBf!k)(Ran0uW9WKyMPAMb)vZ1Mz!!ug+d(P>keqtlF{? zbGf%eTHaI?$mK8nk`>w!Hk>l^RURr&NJtoxkN}PobX48UOYw$T$>$%qy0q|5rVle$ zNn&=+d|T*l4JojLZFDB57s}~ep6{2hJ6Kr1IU5(bp$KPTp34k}#a%6~VuTEzckJ*5 z+Lgb3@S7gM9G*=}I%0iL>Y?G*(x}~3xRydYqEf?ss;J_w$2M{~`DcuED$Gi6SIE_3 z>|(!N)4R)z;Hvcy3%YKamPlA=I;PLF0LQptn#W{69%KIYqP1JMBRbCmypk(cAY@Vr);uqk#4a|n{x-iON?uMSSE zZ++89tgO@rI0k1^z!kNKfgkAycAeGLA1(XKj1VRk$fD^bXU?a!KKR^^>IwQqQvpYp ztDKJcUYYcGiw25h)VFiUiQVbW$S!Jf1F}kXpl_f5LgzwfCL}BJv&GY23iLbreS3Z4 zNo1!E*E66vJuIW zy*W*ME6>B~S+Z?Ai2bH(frCIE|L8>0yh4^ItG^M%fhiuWt*y9Hv*G+QQ^ z1(%n{$QWR)!2#`ZLuIf|ipRY2GsdR%+Xu<;{wGgL!sNx&JiGN>KfEHDN4QCA!Vfa@ z^MOcb(3kx%!;(MG8@8X;U<@ZTi~0`xmgIr=973`08W%gd;Wl-4;ub(9rX~%gP61+B z0Fw5lUTg0l-5OBFQR&{!3qZvP|J?#lr%G%b1^i?INI_oiQQVAJ==hv>nM?bGTy3J$ zGT1%mz8ti?ZKpWSnCa89#BJJ%Bv5ixOa zv?ZIB0lFy1yR`97w$_G>tv+M*`LLXRodX#IRm;f8QSG1~1C|AvqC|)42m$H{irjnF zbf`|lu{v>PgT-P(CQT>ty@u;hop9EIqZOa2cQoC6DNv7?+He-z`{+>h^9i)VjHG~N z>;`@lDSO*krE7t{+hS3xQ#Viniqfu}8ZO+bmeS{Zd#=~^8cxAVM=hpD1$@GHDN%d=h z89;6Y>hbL(W&Sn1EUoZShU{qj(8e7(@z(Gj8cNtvBEf!n!&SD{nAIHqr<0wR2UxW5XjJ4l`p z31gJWXsBsFBX*6j#lP?~Gp%&&Sh>`jbJiqH_ch(ygw;Y8CC#OVX5*Sja6y<0X+wYqH=<0*24Zkr&w@dqf*#n zDxWQ=Ch8JeP{h)$AGXpncM4?Ickk^fPzRe%weq>)vEUplKf!59`0w@w8#El%%3-%$ zwIxXY+urnvDS)`bcP~F;+1P8ic+71p~tj#7FV|?1#|Kc#&42fEc*GuKxE2gVwrrK9`e_ zZ{y~bID{T;gxJ)Z7G1hCothx0nksU}h3IxALrT*@=F2Qn>0A#-xodWOK{iNB$4gPL zzg!56q1=J@LjoDF*E^p2_tJ}+3}C^h#CebEeqh}FosCd`8vg5_cApc1U}hxDay#imy7Xgx+D`NEXh&IW3n{}tQ^rWeUSGgH@ zM((Y>Hwj;4;oI8)S3s98I7g}5)gLNxp=tfJnN?0iYCjB|%J*XaC-hTwL?~T5VZW{B zySjgZcRZ>c@SIRq^sJ($aOsIHChh{JcuLL_D)ozKCmMN6zol%+HL44{QeX!TCd3rz z0x`I*QKLF!%VR$*9|?EgZGA;T6Sl~5J19r22*?H>9z^g~Pa*b!peMN1a9W^X?cx7F zndy$hOi-cKaNowzDi4N!uzhs5lr&L(^2?xD#9kI1TSnBKryqs4M(hD%L(|-!pj-Jf zM#wgVDV2Xs^3u*+PilWMve_ECu?kcz3Q)m^3~G>${QoFpa{|fw%wV-=HEPF``nJ4v z>AnZFf{>(Py0i7&Q}>k0Om&?lq2)?OVA=F3f3)W`swzky(5A~ODA}3XaBfmh&l#$d zN2|A5CR-4@Sd7CCDt-!0U=ia&keFO@WH-)%Iq? zRKNHgkPSlYyCr4B`lhRguD=|PV4EiEMCSv{nwXJ#AYszY^T|GS-o0??e1PBYVHl@S zi8#OO5Hg2#2y)>P>0c-jm-U9WMeuF&BbMDJ9Loy3`2-`D(^4Z3>_5|r=1LB3rN&r? z^k!%QgwL8D6715+zx(P8Q;Lp|G2P?^Pnblj$`#E%y5(QeN&c1B2Ltiy5niX&5wCxc zSXiizLZOlq6UB85A~UKv#V(0y4QAWdd5^|CK3FHuX6}gLS6Ql97X3}@qx&`TcS>_` z9wsb*pWIC`+;7XCYWP3o^#wKWZyB3c%g<>}o=bqK>6TQtX1$HD$`RhrK|v3mK&{x> z+3k?c_MIZAxs06h&0AH$uoixSimb*$T17xX52zLb^&iQH(s>o}8x~tT{-dD{aVN!#=ED_+UGckjzj&=z9`5tg zBq16Znq_@rkJmBb2B+ZDL$Z;L6ZR&L4_Cjr4$vozMRlk`-8Ja--W>QkuW8N9vzNfc~ur( z%sdx+4ru>1TOL-R);oOXj#8Ol%^5lvJx~yLsB*SbU_^HnBIKtl`UU?wFyR+W&^Q9_kL%IJAkXHdCee#uG zt^{@p`Pd(rWFhC*a~H3mhax`PivsQOlg#SnHFj9F5TZ-RF3{}GFu zRuoz+is1)^3^f-)ZqV@W|KQKx(-L#!ymsr>gJSE^>!TIU&Wk2$K-D7b$!Hl+Az&{E z=dJjGv&x*(b45HkqVx0HBL>4`E!^g=Q{ zuOjuTi4#s1LIg91>yLfz4-8lU6_O29xs|NXbV;_hw(8H%_Dsr@?fxhOwib{>U`l_t zVs|!T6O1;nIrY#+%xOV8ipM0tf^HZn^At77Q@M+VK%ia&Q|mPJhg z!8V;E%`WTJM#fJgPtD<(kqJj&BeR1&-GUKPD^b(VAq=OgzAH*jv1cFpfh zd3v^=jRLB2?H0Yrd5zYu9!QHP*6RMpYqk0q08$g2Pk+Asbw?b8JKdQOIiFpj?6f%4 zJFcO%PR%Cixh@tG8#45(s#!I4E`Pg$ki=uHIyImK;h_iAgwD@~6w)TZirz6>O5=yv zm}gjjFYLd$%FN=scu-5KzSrM51-KyZb~`3|RR<_)){2^xL#Ub$m2*@nuy#l4_1CAW zH2*5Ay@bt2uMnP;TK-32Oq|0zPRLmSut&rvm{@H`FkT{Ml$1gtE@|zQlY0k>&S9$>^!Q9 z9YycXJs9y^@NBozu4eE?0{cZ1b(f#FweX*wc~HrWmLV59UBZC4lqkCzfuxbPv_4tv z(vyV&7W8>Ed4#qJ#qp^~NW|`PHKIG2hQB4k^Xgir!_uu9HH@vJAW#jcKBk@OgHD zkmXr;CPYk)9>^uTqxW2Y@4UvaYX#@9%j0XY0-Lps8poLwgxdxDQv`8`*fL_QO&f`Y zHwTQl-FF;1*Jj4HgzVb3=z_m4M8xM1rXH3SE%nw?E2~XYFUASXn!N_Lr2BzVA{gAM zhH;sZ0yceUP2px%&*WrEhMDpnJocm(Z{OiOFn=41hNY;7Q@Rp)Z|Fs`e*?nmJoVfj zA$l%Wd8R1XX6BV(pg3Ic(}ibNNSg9%jK6nmrDOm?7W@>dj&+mFiA>%DTA4qy@QUSOSf19xg_%C1vntNF`I;#1KUs&;Q2 zmfb$B&viR#{H!jxldVyI!GAEk+P%Is3s7X}Mz z;j07QutjB&0T}Z&`NNGg87nshyAJ5Y8h*70M@K@$M4b?F4|4l(7ZAt0z+<$`ryX$b zQH@AVR4=n%bB_c(_-QXx{rAPODS;&6{SAmXWYKh5M@tW9Ts}{G=ZShhZi~Lm#GhkK z88015(-ZVH-N_P_e6N1bgbJmhPlwHA0LhG2q*Uu5-=u(cU_Xy`=TSvZ57|sV0BtUy zrW8VYMG)_qnf?Z_mpMtwhj>xQ5&~2blaYmy$c5sN_wkJIdM0Mw2J~HaF?*@j0MB3h zm^!t!a{zjowO^BbINXta1R{sW!WhrePX1`&Z^V(>-bB(EjbA#a0>4G-HGHzQLC|xE z;49}vk87V`Tpp8-u$E9(MQQ1mkxQ+jq6sS8F;ntM>!~5p5nP15h>m3Q_)7XeFhiuT?F)9U$E2JZ$)LKC{ykW-9=TQD}?z~I);9d6vMn#(Woi;}< zpK!+-b|CwFI@wEKcYrvc;z9DQAJF0sV6_Lxx@v?sXLVn z!$R~Cr!5L8?>g>%=5b31a~@a2Q%ql$UR$N>+T)e++JH{i3-5uSRQ!bM+MxJ2^J?ap z5jB7%#_+yNSgPNW>qcyh4r{VJ>k^aNB)xlEWUkM>-JES!7iBSx$rf!beYeI ztOo(;WlddEk(UqlrSs5^cbOn;X>NXP#(8@*7G|YTu%zEylI^FI5R{{2))z;N?&Hgw zy?`oWZ!fpMhA3yb?S2=2JEcYc$V?!d0syLpng;6FMC8?n3xz_totN}om&&G*7N8sw-VrAxyD_$E$Ac;oGD9&VUQ&k;NSb98f zpuW@+&1Yeb87{KWTJ-ixk_{{mcpnkLaz)4~b3gCEeP=xw=hSJr9D}OlpRx*TDUl%b zT)1p$#x;cP+d00g|MlWb?@1Uh28z!$5lm5Mu()|e6f0maV@`_Gv-fOsL5Cd-j*PDU zkq{hwO-D!Pd%6Y(K;U0>1mw5$FPV73T7Y;qV&b)7VeMpaofQIB__=uvVqdVPDu90W z;N!UyD0&@ol@SNi-`hdQ5#huHzrBzQC~jUle-Zkd7M>{kJ2xJ+##sPN@LxHq5dHI@ zhl|0RhXuMlaocm}K%U|t4&e9XAbDwJsqDLkKK~z{1q8kT diff --git a/docs/src/examples/mps_zz_correlation.png b/docs/src/examples/mps_zz_correlation.png deleted file mode 100644 index ddc3ecd401c21cadf129d929556ffa71abd5cccf..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 139285 zcmeEucU+U*vMwMhieLxnNH0QEnt(I`5fD&%k5WWxXo5l@fEDRo=}1wKj&um1(gmc3 z9+V)xg-}8XcLhGn-e;fl?R)P1bCVw=zHi=DX3d&;=9yXg-%?kiI!b?(goK1j`G$ff z3CUps2?==@#UXH|H1K913CYo3YkB!w%JTB8x13@3tnDmGNN)H?#T?ewGC1wC=s6uk zLQ0YT8eZf1Dv1Yql%7 za+aj<{F}Fvj+!TBETB*9pBVd|xQn5C=(u4g_)+O($(Ju*V(-v>%y>~vL2{J&?sY5_ zo%yi|JP6GsG3gI>NV_?LM%b_C%H-|EyUDl~k6M1;jBT0K}h zUMM||hZ|!gORk+vChS9fktCz`Xmjh4GE&IcVLw@(7i7zKPL7dCH<6p=pUEb%l_TXL zGpcjDMY`r^bL%j*->2lW@g!EY2OSUM{EWt=Lw(OLkp=p!-#O;T%BbLbKlD%vE5354SLlntXSs?tL*=Rtfqx zIfz`|HP|)1 zzPNZ@n=SUli2v=+!ru+e#0&f%CT{p_L@TgOM;PDu#zAULl^O5)!Zl1-c2hx@D=xyT z$=`zakx+T`upCm=TQ%iYAg^g8xr(TzPzHHrh+V^jl>SL~$QojqG5{jNiIU2`&%HcRY1 zT5~sB4z<*M1I5#KzxUxg|2|>VF_dvuxzbA2kJbUcqLF(p_h{RtuWlvMWLxH*O(`~h zZ}{HGR@+EPQr0M~T5BtNO_fQ636db3pqZd0@u@=CZ|hA^v`lYJCxWB)(w#@LN~~NL zujF6JPfBn5ozKR8*p;`hToZv76j6aH5W+7QoKHhE{r{Wba?WVoxs+v=Ermz|qjt z2!F8}B;X%a-{x;b=gw&=WO@P}?W6ia`JM7Pld;k}TYf1Dji3EK4^gVo2pT=K4*T&Y zP?4Kfi?)cCMfoE~J(mcF*WKrZ@B34mvoR*k?2Ih!)a{hHw55@fUtA`mmJ&0{-bSWf zRehi_5-ss%9TT$*Jqjm3>xZ)Twf5|mPJ5n#$_TOgd{5ZwyHLdC5Xz{h(;r<7Bn(;$ zHx+2!kcHX#&xX&dUjJP8o&E@i0grX7u3T)YLuMvUa#}W6N!_x&G5wNi$OSD!F5|ug z+r-LC>(+^gQH|ElR`FIzy{R1iO0zqhE)z(duUQzKg!fdslX=XBr}Hx$1f1+Q2j9=y z^+jcSmP)&Mc_Q5ivD3K@F^K$~l@1hV> zVR(L=gLz77$}>NwIm;SaWa!d)-tmUV6uv`Q1})79FI%A)ztMd=HMKIhGx%(YtU$Q{ zd(O75ff3vYy_`IrFz+zm=v_caUwbm@-x86W5T*s$Q9MXc$4QJ1+!{ zKvzgsl9y7xe=1Zm49W5q*(x^p!67L=Ci!*e3U+x%u6-;3%~9h;*MhkANOMhl-Fio; z(u(`p&U4Qs$U|w|HJsU;W!<7m&7z0p=FAj}DZ(}6bI`GBni^(A(R5D3(4af@qJ+|? z>nznBaGAA~rPPMT3XMPwlO%H)878C6gX`(r?*~w8usSqzpjB{4K)>0V1;5X=L-FBc zwQ+`N+`)%^`Y1`9>a;bac)#`_YB_v&KWlldr(GevHF8|G$C>0>E?IsNnYH=JU2P1M zHSp!s2#FmG!m@ix`DOJ;B&3kLEA}cQ3&Qp&N$xqHy!!ER^WO1s==(5>#RFf%Bzs#m zULeUxf`d=HmcAw<@M?Jxplcf)bT=)=NC@J)M=B4|T-sD9`V4UHOG|y_dunPVeBd($ z2{|b}$wBam6#Pn)GW_~{os@^2NF$$ozOFd_NC&(Gvt;2w#bmb|huc-Jy_wzPC`xes&Y;}*IAE*x>ZVcY? zB~{ksSOV?$TWjmP>Z{$9Fo)R--nD?4SqeV1cOCc>&&10RHrBnbxY|mw>8sshm4`W7vWf@_3tnWCKFZ3Gt3BsJMAeY3&#UU3jLoQzy0N)UBdF0@F_o0A;3;TW}Kig5TbTN0f zc67CdIj|Djy=w-0;3~z&M(pU<&;B?qA6ozJ$-(8XX@Lnsh<6~D1TR8NSiC;Skj-!OLsSRs{cTqoCgdyW7(XZkm!FI#IydWaxE z9g@rbZ-}e5R;K2_zcVNO3$?X1_1`)l{Ec4ki3=#J`@bRm|0wH!l=VN#`X9^sAIti` zwXD8A2yY1%QFUtbGKBrPd_KYH>5C|#9BQ)dHMojPoX=?RqFMiG|SfLt}Gq?0hbP`bgj4QnW$XJTPNZ`@7GzBKVcY- zxYz}S!t8Ki!Z(p&7aRL1{`rP*;GW;C^#CpmrL{!%hhK#G<{>v#m=W7fbW$E4s&>|f z3h%YiFEYU+XX;6mm>xvNyLKrTqP@42qxf{-bM;(zDMDfAvVQa^BY#+&ysptCc1*`* zh6CzSNRi5oVR)7${vbS0+~7&)X)Kj|l*B%;c~=E?V^pcR#xF1Dd+LOKsY5~_i;U?r zY9>CdjGHGDaxZ6FzRxKM7wWjGC0I5_X8ko;PQUFIES3bHahGF>6VG+A=Yu>OS~!p# z$$=(+7VCBc6Zb5vHU<+{d#jt;AKrcO;hva%eC`4{Gtf|s$o+Dc7v*ISZ?JgbZqo_h zzh%R>%j2n2>1IFO`#uIYKct5lK6-y$Xc5KJX#-?@_u)Yc-tzUg`X3zyuft_`FU(sUo`h!+>?Gtbm|8;n~- zLj}t*r!sG+UeESY)*Q#gr3t%)JcYZcg@1SkkBGdtP!I~sRSxe9g@t#0k)px2i70o` zW7RZc87Ya1A{!3Cr-pL{Bg1dCkt1u3fy4wrkopVN>!zK&7t{<4OYC^rm4YLwnIv9y zaq@4;A$1#mRCZ%#<62s%no%wNaQ9famzF{KdAuQA+@g9!9*mD)j04JV{uf&21YiVl zZg)##0}RZ@?d=fvykwg>Mtp@QT2hE%aR+3`$UVR=mAovQmv&_$dRYE0z!l$4JS zyQ96F3HZrY7R2^#+%oE&j*@70QD)CO9iuGW94>ynZ7xK%zF;eYkTO+K?AM+k+4|~| zd78sWg>R4E(Wp>Zp%xbuyUJfXSp?0TDjDAnYL^I(ZW5zjof_moPi2qa_lJ+)daJfS zf|s;+#%dnQ+x$j<`TBRpYJD%?sr6A|l4~oJBjgNy(AYs)3?-xyqKBHU$Mv{p+xE7| zSPYaxo5B^7nm&pPS=PnVVRuj~Or>q5&W%j&k;)NV%Hs_|EB1!i2~6A^loZPe6(f~S z8nO6d(1EvAr@4!J0{?VN){VFk>hOg1X}%n{1&-b<6wm!#Tu{xcXs?!0in%fLU6{$`JmzKs}Xd)*;mO$00Fcl8D)^Gyo}XwTYbd?Lf23Tri!ois=zmixm!=?tZJe=zjRvXegYDn%gHg3`X+e zke(d!(DT!>s3tKIg~#t%p^rZuNw&o>ki4nW3C*kf_EltmEbyZjmljXIZ2KKngtK3K zxO4}!8?t!7rhLWQd81iRW#_ul*67>I3^NBwU^l(>O}*X9vW*Ld_0HN4l|+Hvs8a2* z3b%(IWeD3>7B$+j6>*bK+o<@OA6D8ok2jZ|lUiBlgsuqjWjx0i1u}23?cx19!N7$5 zu#nUE-9Oujhu{6V)09G(-)I^9FtX{%VpmFXN|^hw9)=gC`mjBju8L`{6pVQ4GK$k# zL~bqcL#lD%+V6GSP;1JgFW?dJxfJ+tSUE{j$Hb|%ZOnEt_xl!HB%D9>UWYWsRzo*x znisEoJ;-an%ZpkdRuXba(fb`&oTq@V4OkVn3RX5jYPQ2m*RHn>Hu=*Yi)f=4ZLPFF z$B=DOshe@t({ljv$x8>F2bC%R_KX2;AJ-GqB$nAscw`KeVFD;ajB5W%#0n}go3C37 zn-dr-wH2BtWuTMs7Gp#wvbJwUYmI^TXlr|xc>Y%aM9!1%JfSlEHRW;DYM;>~Tl-Eo z*!1%HH%*sML0AWdwL^<)Ub>0Z>qiYL-GZ#N80_+WZzf(fkZ@m^D>u=kZ2rF=gB@`U z?axR57z4+}JGCTqR~+w*4i3%@8mLc6eXtwcrhil#Wb?RuQ7x>|K@!@RYcM}x0d}Qj zd%Sqzasz{>L(J!EST(b(vyBSKvoy+ZMWpi0;_t7TNXX-MZ#_Q^HET6@5 zUD63YLUftQJ-}u`R?#HEEI&Oc52^1#N}3x(CH9u}FV7|_4OOi!bgf{w(mjG=6Nl<= zZ`9!?iAvBqgqw(g3O+u3yuH#ST$B~fk^b8A0w&HjJWb?(bO#vy{ndX5;{TeDet#_f zk+WBRyfU|#>n})D(yQqdbzTrLPt&z`S&T-^<2^A?JLRv2q&IVP_3!)qzaO0VLR?{Y)Wd%BCaxiDxQjoGczezq=oEqb@Gr&v`d zOFK;5<;M+x-b_lBRY>t%oXV5qtWc|%v}{gv8pkx85i>g%9$Ea)L*i)k-E-rzxnQf8 zb7lVq4>w-aHnJctNY3Ryo=U(vsHXlHYDMRI8M@{VM^B8P5~HoI<5uPt#3$p~fd?y{ z6fL0*hv1zFgPt@l1n0e1ELSaB55e_uQYfhdE*-Zvt$^5H*d+*H01*5o3cqY@{>^0f z#Zl+2`BKld%`b%1v`6npu9jZPxdpK}=QMw$@IK*iuE~3y_t6&&3Kc{LEKZ~B?I5gB z?8J8Q`h_3c6MHWaHJ_S1!p$cgR?3yFL zx<1>m+r&XFXbP+pB(8rUj9he{g>txrl6xvjmRn*Fq+DkwtK+h07n8>T%ia z-jZe&7+jby4{Cl|3Vrn90^T{-mbTJ$n(Z{Vx~o8Gxer{iDP{%L=kjVLW+euJu$S^w zyn+zld#TazOza{ooCWt=XO#fvA+;!jbKhkE0sB%M#L=y@Uk`M&FOVCjasH-u+PKx8 z=i%%`=Vb?sPOeFgZcgm>(x>D8@e4`&9j#)-s(kjlqLNXTs%XTwg4e8*-#OFI__(H2!mO=Yf#j$~pAvF0sRnc@&G~(m^JR z`Pkmv5hfY$Dn{>(Z@CjCG@*-0&0%>tHFa$9HE?O7x(xEv*3@iowW2#j6Uz~6uUEtd zCiALmUgH>v5@fvr=J?x16P1XaoCacg!N{RJ-3ltmD14jVt~_W`RP>DWRgB&1b;A2x zlX>7Ihh4kXawkM-LSc$F7^Ay)$NAf#lRX=7H@AYj0!bZbd6)6V#H-Rt30mVT0|WKX zis90FX_22hCSRtGpC!&wHWRERbf$)sQRe5ALuGy6CVHrVt^UKqBCf}lCuZb_wEjXx zs_(&Wu3<<7GXrL@D40pI#Wt_{hoOTxDPA}2I2YC#>7#;4=@mhi@5G++qC zt1IGSBGO4NYIcU7CS4Q_mAB!!S0qgL?0sYfkas$Pp?3KfKs|D%jIcSIj@TvSDy_v- zheXySihV8~&hX3o(Ggym=Ty^_BKP#kL7r9kYwqY_F!y$m0&6PJvGuRH zyMVciqdsSd5a%BCw{!2-kTGEf8w15Y+m#yPdL=u?wp{I?t!`fUZ2PcsUyjl2;5>-l zlt(Han6%pSv!cIzZW8PMl;Iv9ggmrfGu2Gb^9g-cIn)K9wP`m5Q3!QJGy zPU?8A)F??*XJ1V~>()eePxk*Li+!IQox%lmXQ;VIRYV9@$PdnWgxZ$dAC!kk-H7%o zZ7LoTvTBb6Qy(cFE+NCV-3){aN!fWe;^SVQ1MM=GRD-DsT>{+`uTHw$GlNT8dGxOZ zcT9fy9KME+f8<&vPqa!rmO$lH#l3=m5vn)d)vgV}ATB??I7wBkz#gj8i&{?@IvCX* zc&r|SFM_(yuYLAEovs=q2vq!RDD0jYCT{xfkYqcUKbROUf1U}9z#j;JcomG`G#E_C$A|ZPDJHPr;PO6 zHT?Y_X`dN23JzaG)*p?nmchp+XcSblqsIRvk?5V>$VE@7+Zy7DG&VM@~I+k%&;PC&%R2?AyYc(Z(qXGpOn9!z*7uY z3TEVeo8;|rKT&%>Dwj0|j;@hYqw?nrNyt);^yC=nd2dd0qc-!(W(6{JJk3$gLr2eT zbnGo+_*yVLBKrQ1z>#6++bxzHH+Y9-htNEq}n{dhe*#{U}T3Cbc`ktF+ z6$CqCJp8T6Z%KY7_gI_bJub7IZ@cq6u}5P~9OsI1jyu|yZ6I)<1K5IG6LYOn`FVc) zvla%=2@HcLd5W74Tz(>`+8G-p<#$U`7MX%=Fcrr)Fw4DT0-92@OYqEu)_A|m(B98F z$ubf(B}F~%4^26GjPc5?L&wgI4PLJdZftUzJmxG=nmHENArmc`C()20_G0X!_uOF1QjJ6&+Q%j@ z+P(L-6tBfAfmqnh<41+y= z8kBbh@ehU-*f`=7x@*xc38`57(WMHB0-mtGc({QKn{y)w&VRuUUXP!Rr%&o= z7ZlB&SSe@Ylup{aI$7jm0|Md7{`_d*<08X1@WxKARoC`R-UFh^+Y$2_|CR4L#g{){ zGSsq-Uoi<;7+Z*GMk>Y68#>6>y+k9T>cFIX-|JSmxdS}zjO$ba(>IhwBDj>0C4@r( zjzowAP=swN2|!DW5QNOo!ykkJt9GmR^~~ucLYw&4)*ftWHqg$5s*#B)-(YO|21TMO-Uk%X5#^b zu$>*?|F{QVK-Yz`yt}*X5iTjPJ%M%}0aQbRh`{eiN`Yr-*rj8lx_c-4J#)l80r#MM zy*kkc>%AkG_XmrN`K88u4q(z_Oo4*^KZ7#BZSn@ui-ZH;SY9&d5p!Sy6R_wiAXa{- zu2IMW6C@>7dByBAh4^fC(bdv|;?O;422kKlK&0T;V~D7>ZQHd4%}+pnU*sq3-X^R( z5!e^oMFRkEBf9^Z=H6TtkHovQi)Yr|&Ql?{&1-c%jAN=6y;L|yU{DWx>T<7QE$4a= zec{99?+WTGQ17oGD2}h0uzKtzvyU?8C1I_e#N}(n8+nWNuSe`5GOwC2+&SXLV7*U7 zWHjm0zl;H?Ir*sPqSSz8jLFrlZr4gT=RTc~)Q3E6w)S(9yhty;0crB{gP+B!)ccX)pr?gA^YRciJZk4Bs{ z<-rPomm|0}*5)3egm!r=A8fv#6wNr3yD?8oH@ZlHe>89kB{;2ktms}_IFnm{-GjKa zGok++c(pgmA=?F&4OEfJ%3b&?O{f7*UfX6dUS(&-{Vj+A6ikxsNBUp$^d&!1wedZm ztK7w^gFVaCTwWYD*4=fdl_qiM^{tK1OEs-?V%#2X6HV+Pk&}s@?Jl6({}S*0FS>nl zgweU_JoY0cOFf+EduIYdCH`l6HWWZV(Mi=%Iww3Tg5gyk0uk`E#4-$VTT2k zK?wSPN4t`coduHC(_cFJ+x(@F0US^;SGCe!Xw%Er!EQ(7ow^j=ReG~}ua*X+IxbW~ zTHhJ%Y7n8{z&3s|3G(Ur@%3RYYta1%1)-4V0E7LV`(`}84?Yk;-W@Qq-$+E6!q{!V z^0Gx!XjNl{%ky04xdAa1z{{^VF!Uaxoct2`+(BAPH*fP(QS%`x`X=rqI2(X2E5m~R zp!d(({K5Xf9ENYagh#xxslNm)v*Iae80?f*&J`|fPX3MW{6t2C@$??>`ma#`78@CG zNtJU2jm#iTF;}Hjwd0VGKJ;ied`86P$B5`0m9+O(&E+4#s#xkxTbV?!HG}EFV%vU` z%Z1)pk;~Dw5{gLQ+w=w}@$_`6cM{mpwLC%DFW^CeocZcuusf00brq1;8?L?G-^M&D z!1uh_=%@Q7{8}zw;9hSHmck&LoO;M~gZCq8o{L77;}u4jA>j{L6t@)5X{C`!cMrt8 z?BZy(Wq|Bf zDVnChvX8F+YgB&2r?z^1l;0}Jt0a#-v)V~Yd?8-0%Vts?S#sPa745xqJCUwa;|Fpp zJde+P+jB*m6mABJxPjKj}Y+r89auLc1$`ltN&h{z2;*Au(Q}=FJ3zZWV8Bfc(a&^PsgMy z)e6*w5Sw)Sk`xEp4EF^c18?hi_lh|jcFAKdV2=jzAp*I$h=WqUd5{;klJ)XTFFRvy zpauHg85ph2bZMuzEW z3%Sl4Mb#`epd10gXHYKT<8$v4Y} zgq(ncJdP~k=h9Cj=q6#?bWa8jV5zjk2o=p@gkvA)Nb%1yRY&%R^aKz{h>eC1zgSt; zJ7u+@=jGEhlB7W{FZPW`McpBWCayS?CtV)n*St?P)AXsLb?acBLvEH~4TrEKDiov2Kj9v;XkJ@gU7= zj|c2q_nqH1p)js57a8z0u=~zb7+YtF`t=}qfS7RkA*_ga!uPG~$a;oY;{*8Ecxtcx zy--W^@K&^a`^-FX-jT_*UFoV50KP;4qu2E=6*gQ}5P0s=HENk{$J9+rBdaD<$>*aW zOr!Z>In;--H8Oj;8Uv?XDYUo677G-7+*CzAeKb%af**`Nx3d>V@cdTIreub1Vd(K^=lW=`kwLa?U+m~Q6kP+v=C*`>osS2sqjs?kRlP_B*$eYB-wI643 zKp%gkNa4pYkY&l~gy+fKee(4zmWs4DnXeg@eDT`UQT#I>m8gBiBPU1;!ITvi3ZuJgU+=NW_rRtQA^(Rz)hW3Vsv`alWiar>XDh|NQxC0{*ocp zgv$7EbdeuV<8ZH=8Dpzu_;fuAPAOJoeH>fNoAVKHI#1&Y{^E@90*}d(A^6}IKSaTy zd#Ce_B6j9|UIrmf%F}J;nh&RH^h0J?j$yGy<$Q^%8oCBR6P35>2^OUzZl@C@V(TxE zVnz5vl0D^-asjbAEcn-BwrbNfDhF6R)N zEYkNPz0naoJ%bu66FXY=Tu{yvcu){mXmA*eHLyYV8nWT~=^KqrVkfTi{gulp1xy+; zv;V{|=So5fBSrRO*9xj5;r1F|D|a!o^X8ts_`$?v_uXdHP~(~=wpdDTkr0tr8Zw`b zvGw%B?PWn~MFIM?R}Ccm-i_z@YiW&sb!pCj9%`O?B+VSdK#^Jy6`EJjN#1n{OZB9) zU!@7vf1hmn9RAS98=U(}vq9VqVrLnCx5hdq0Gb<>E{O@xn?K^Qo&>M0YYDf3?>-VQH*`xkq^o@WyL~Gl<__fb`d`dxckN$;d z{@aD!4A~n-9MFpr&g+Pu93zN8&yEEM{Qdg!)DO-+Y^!x3@ELaR>ApSt&xz8g61av zPUrtz*y%kI+{Yg1#n4Z$vFG(J>iR%R0{`Xy`*Z#NVKaoO>~HTSP*G9?;}iXz4w9`k z0?k$Zjn4nMu&g_Kq(#0O(2Jo{sWtmMi>NFON0;iO|K_3{pO9t4W!~SU5yZ)p5^v!CJywQ zjX?Bkcu>f@chupq+wi%+QcZQhSJB>Vp#I%+ItQ`7j1DBEk1E~0$%$n~3(-ymjLLqoABh~?>M!YJrndZh?rxSro z5*oah1ggCX{A*k56XxIS@;TPz1KR-ozdJ@R%p($oh&0JEsVu?G^r~*I&GUO)B2ir; zBi3Bbi(ujb{xBeOv&hJo(E1*$OEg}E>h739ww>ZyB6^8Z$WdBwAtw%cjuIa(C| z48~|hiD7u_yxnPFsA7sk|s#ClGyME@imnSI}5Jy;LR%Z z<%-t1p+zgaCGHrb?Bs#1h`a&(nITE6fbQ41oo3V=d}==tj0Kok!!(2OBpJnLo~2$r zD5$ekBb${&<(fpt@nfE7%b5j@#(-DPfqSX3p}4Pc@C&;ce$B|nY1jF!HU9e6tiWF zKnt&7c9IlEH*aGmdk?Or{!!K`w-wdWfHC4zKI|S3zr+-u?k^IS=buTA;>J?7cYTg+ zLVecun>~#`WMyrphOsr{!|j!_u{IM&ByJK6s&d$9^DV~^fGEa(l|SAL_>;oXcV1g- z&Y<8l2La`S&|J`G&8xz`1U2b0t7yPRypENw1aw|z-3L+o!5fXiOa~q%ih{}vtWmww zAe-ZIJ#A}F>F7*NrG))1v*qKREzuL1&`-PG-X+FG>X?zjx?Ns$0DKWaLy};5Zxc7& z^}$+8CMU8-6l0|6bl5o`%;SHE6#qi1uE&TbfHj3U4`F?3N>cLgCmk6!I@S@c!yfOo zc4UC@a;QI;hqC%2L)aM424)rBDeRw-uhrqkzI2*r=r?wo_k&fB1pzMt>zG3rs)F{3em?!-`LDUTOt>C@Qcm{43P1F&}GpL8-_@ zq?URZi#JbYTuFGq=rRIj;;x)$TFqx5KjCIRR^#k2_f)pTr`_!hZRa+&6*8b$zF3;g z>8vtvW>ZdMHfbn)CAQeoeYZy6ZL)?iX-+IoA9nmiC0%lJfeRBiY?pfFGV)dMHF2b9 zlbG}tk0BSDCvx2OF?_rzV~|#8BVKX#>$IXps%U{?q991Qx}_Z557=FT0!RgP7(B^Vd-vc4RWE5qT_}CCjJA z0<0lSTdhqf!m;XLA9%Iv9E2UZC4`e8!>)D})C9ukJ3`s_3074Q<5gncl!7w8GM^&{ zc3HeP?%JsA7MM5Eq(xzTcL$2B0;HYq;;|FrTtsI`JX}E}MFEExr435?L6wLXF-xX! zc%4?z_*GFzZvXw%Ace!@fDAKg#3t)qUSj#cvl{iL)`#?f1*ne4AFrNoVB;1!nW-z< zR>RvGUr2Y9d6R8%4Dq|X*0S-I86`eif9yP?AoS*QADdfI7~5Ot&bvN^%gc(qQ^AVx z1fHn4hCH84ug!_4KXb@rkCx&&LXsd| zKuuSz_nRLl9!6-todhamKr-yafL&3WbBk+zUJZc{7_KPEg|bgh89wR>63vmkT8!&d z9-IeugWl-7eEGJA4O?nM`0Febylu=~{46z5JwTFYz~&lOy^Mk{dBi~8Gjhgq7S1T1Z1 zA*)`j^itkqpHI)IT&iT*ZagoW9R#FvIw(MOa(}$Wf8O!SrN$ehuW!-!oR}WDmZsaQ z^5I0MpceA<>x~>Mb6_Q742te4&-Ud=m97}Qn=EaQTmt*E6;wF!Nu+ykzS%{BO5xkY zO5nqcjy~HP;~~VFq^2*gg(h|hi@Tb;Khj``kq4+|;L#J1Ez1u8>dmxJ$X2)Z9R7G!I7bvKB5-nZk7^Ee z1!@$Lc(D-_X@(vBEVYz(7>EYdB7!oGkZ@oMmBu!)Ywdz7rkd$3N$&Z~Ubs#yNFAh? z@a`t1QE$&KS1)~}bOyEkEg-3&_5A$-g2WQE$r+T|w}KR4Tv6*&T2mh4{%GG!)_X;i z-$RL4F*wCVWAF75_XzJ-{*uzwE5?U2q++rdUc^I1r_g25llKZ}bB(w!V=2QlyjSO5 zQhyAg6KZgYgFi)VFDu#TM#ec*D7&sjwp;+!s-Z>g68z?RZP$dB4RH>;M^1Cy01&EL zR5dCLX3Ew`W!H=B-m7AI8R zFPUKPfOU|)Ez44X}T*N>K)VOk(o1BZieX{)L{{;ZM$+EG&V>f@% zku5vIcbB5zfL#`dCN4V+Us3*};&D&&?Ie+$ z$G4|dfnX;Az~L4ZlfdH6>kO7XGEUz=_Z$=1d`M#7*l`W%zIC@{pdlLWBEgJ(3Ez$O z^bUS9ES3c^_4(JyHuYsMcJz0u%>Dlz!S@=`gDdc-49%<<*6j3c85GnrR*ce>bgN6^ zv#uqj=5+8Jwl8v_Wb5v8CN2qkhZ6y#)iFDg zsU1-;Q1Pn1>f!E+H}RpQ6+E-E2+I7TK%s%^%8xd!o$ku~5Q!O{0gz&JS?ee@Yl-A7 zG^>9Z*oIrr0VVEqv$(dc#Tv+Q>5b$siBgiiAssTiS1r>!PV^CGnfxV0u)sLRU%C;d z71nt45JAfE)Gc9WOjD1K&RWX-1@#)2fuXZ2iN3pLIKQD7-`%kbsacu8Sc-SN>agB| z4ZW~4k+drAcfZ?qlaM0&hVL0qWD~fiuCI^)0 z;;I~BKurok{x_Wv)XM}MTu2XEWtN}UGlf>gIthm!!6_bI0@=C69)yH#Po>7ke! zF0{p@QKE~s4BkwFZ~EcjdGy2KRGTPxYIyKlCTvinRF-bD*v}i*XNhPz*_DVSN9Is0 z<=%A01Cu4Wd7sA>lcc>AlBnWfouqT1YGraL`<*irE6R7oj3(iY8#sfRS!cKNeX!#V z0^>H_rAQ#G*R-ZQXP&W&sT!IRYTW|Sy9C9eRVO8>L)o`yZ$h9$HXuJ~V}r)IJ!lyP zrKJyc_^MCHCE)N?5+_1ooz@6&7Ro_~yXeAH2kxa3hc-&%`s zjK1BIR#Y*iD85N?29<8f6ed*aCLmgIX92>-Ub8g!#&oH#$V#KbzL2spge9qBMcQj; ztnTpy@K+kpi48-R1IY;36t=WxM{S{MMZIypOK;}sAKw^PSR0k)`Eq&(ZCr%5Z! zC4$DG!`_*$)VpeTEq{y!y_;qqvdbLLjFSb@tuWMw_xvBpc z6g6!1SD2lyey?wJY~wrSV&xX!nxl7mM~@e%+TfGgSQK8$>mCnb5K~u|#$$R4#7pO3 zIiuEj-A5*kh7S=|%_1lvRToz`NNR-G%8GTSR>J5u+#_x_cvRn?{36ux<;uD08wxCW zw?WxS;Hq5BB`<(m#|T^K-9uje4ZUyOL!f{pYFG`R7N?xDDx;RJ6pTZiDF2XdTX6Ou zOz`ZZO$Jo%j8&v*{?w#j1y%*OEGaPpkke!t_18%w9E@vcNRxI)OrIrny!ifg3H~~8 z=$D5{(hWB#{5BBIxq9wx+K;%?56SsA%p}M#=i*q==a$*;$HLiVgA0yegHFo$Wi*TZ zykQMO8ej)Fh<2d4U+B2RobN7JbOXW4xxz2knMM_z8h2YEH5Y)1tdXBxcpoib6utSz z=D(N8!C`QqPSY!>V3z@)uQ`b9;nf-bW7-x=Y`FKHR@4u1Y^^; zZN***!8Mmd^c?9oP~ha23;^nHe`wB%j<{-B_hrAHV%hOn{s>UURM~v_WFkMPnTjzt z*iD$ERxj&`*64HJWY??f{!M^%s~6 zN#qHUYe!A;I&UW%EVjOq-T5fBd(F?{i3wBJo_J?(rhHSzR?Pm>U zO;ga7tF5+QJ46z`ad?4%5xvQ~-Hnp0?yC_1oKl=eq&A>L5=51AK$XCdg4Oh+G2g=q zR@=O;=g~6t;jyrkn&WbI9kktYJh4WH@v(7gC#2ZXtY_MC9>d$(xLzcN!A`vV(I&I$B(l}4;Or~Pgh?=V8WDI5QG($Mm2B%H-kIx!f7YrG>r*)Y=kQms`cB2ImynFq)!+&d zY>?sA!L=qa!^L+-?C5v(srDi8pEu&y_Eu5F1O#Q8yHpfo8xLgNLH&=o$aD-+;$4or zu2R8N!Wxr=>j@^tR#O~)#e>w3nvb^diqv=s9KrO;8!VireRQ77CVmwKRox}*FeM4) ziAqe(uX0vUf1e$}d5Gz)y+K@^WqNqvFtuyKy=@Rig?ED^Mxd5*U~WWf5p0)-kFd?$ zOs&JZ5>(rg_U~y$r%d1;a(+Wt0IyU-n5FuTUyOHZpnKDuxn%&XSHjwSPo@^%fjq0j z5dC~sOHkJ{r+pTLdl5iuBy}b$f_)Ks^s|&WvN&F?3=78Rrf1&JKA%!#y9+tC{aoPY zj`HcRjVrFReQi7WqP$yiV=wy_CPbqE`_;6;nJd-Di-UU3i=eP`AWu-n`rw<{nuphL zC_yTGv6IVVIn|V-c%@y^N-xYK53w}|q9Ni~UgPw~>oS^r$iwak z&Y&kQtQ`%_s~*5li{m$M7rHZGi^U{mq`09nDr*k%$hCV)9ec$9b7w#?K>Kju^eXV* zii?+9OabJG+VnF>>SeBf1O1o)4rX5l^+fSMTxHeYHbZb^!xY!*S^WvA7(g?in7gV&}#8l76nxl(Z-IuV`J^0JE?5}Q-0E*Wkp6P(~;%hmV2 zsp1`M3HP?L-siRqTt95TYOn}QrIES;@~lV9_T%HyuZmuK8{}wBO>TV>v1%+lOXOz2 zX}t6`FA!TZ9>#UAcSC6Z4|{JO4`uuIkC&24X-{P>r6MFn$WoFL%9gTCkz}3h+c4B! z?vkxSWsML*_G~k4_9R=D!N|^F80*ZKncs2g*604*&;5Mv-}8LG|NMThUj1`%>RQh0 zxQ_EU-pBiRpM*S{{YYymZKTO2I#4;*0#(_NW*|}VjAQx>4$GnwacR{u6PetbhNf{d zq?S38L8v!~1J<-eyg zC=MZ&%>XYdzg@)JZjTiZO~p$lJ~X{Peb@AQvP)>D zh~A-dH3dDD@!nx%vKKU?)(Q=MtCYE^oiieY%Au?-lIi2geRX9fH1mqb%1$151xv-N zB^n^NA2ct&9RJo})tA0n0{eX#~k0`%wgSlr!LH`rESENYAVjJ-5 zev6lNxfw%y^*{nQzb9Fjwt9p2=QDa>$lB)`wRBca-{va~sc{*;GW&Z_e&pNjMh1uH zw}d9985gHx@mr5nM;#(K}~|d#1+SDGylg-zj?pXJ}HEBtKl*7b!G<^e!^g`RU+1 z4~!8CF04OcX^;iLcnySgf_FPiqD|CxlV^5j(;i;tTlU2ip>qNq^5(2HitK_vFJ8FU zk|3Bg<1=13s8bhOm(p?VuA)wNy_!Jn!Fq{W2+T*LW_&IUeO@i6a%wi-ZGm1&=(6cX zC9|5Xd=oBH)-T|pMIvR8L#;l7A`ONa@rE@Bh(PS~5kn=*Z0=HXmk+aEfEV4G-PwP>dELZqsj z5K*J*Wq+iHbA2fw`Aga;f1bg=5lvZYLe2b?1sMv2`EP-_ENe*ynsF6Uv!+GWAK31F z=C!HsVXV)%lpP@B%1}GgNt94N4lR4S^ke~lEfqSz>8D-i-fHb@e{=0)RPj_KkW8PO zmVgr(wCsqj4w^mL#!SEvjf zS}iO_d&*V1dY?D6-#hh<7n8M6`q`OyC=)a}H)-9uN3hKaMC2*5+ipavjsWoTvg2s@ zvgLdyzO~sJNBE?lmQEixv^9AzgnEE^D!)aS>Hq>hX8^Ih#QZi_mzIe$%qSk0s~B?; zep$iqrJ;5F`^pLmbbfZap++)%AXX)7wFDfR$unMF_aenM`c|D5Beul+@S>9Yv1SA1-G>NvX-i2J}R9v}WWE zXWVfND_!;-4E*f+aZ>b+;Q&+65C~DrgFY4cY=V7p|4}ytb%p9~Yz{7Do0ePEu!*l{ z!PD3>h|sVy;<$M;CHlU6h%~@JE*Az28IvC7T>WgY2VE>ct_&4Y zAa~7;6n#133{@yOd!2+c>pcJ;@9(~|!S&FbVM?&s+nxuaeEXX$9+-Z_rNjYtpTfBj0Hg@r0k^W- z?3^!QZI_KC<&?}k>(UmpPSs^tO2`dm-eHv;(n58Ut};6coxJr)kTzl~Ny@{JXZEKB z2}(Qi%meM*AyvY2zgZLF4g)v{U64q{?kb^ySvrb*JBclww**M?%fSFMcKCw>jb|RM z9Ql?ms>GOnOy6yyD?Ncqb6&*^pwdrTVj7lH5i)yy4iqv{uEZJ^*pFR~K_`A`c!_PP zeJZJfc<}mwG^F{HdUG#$N6r7WcVr7PNj^w~hGA9my*g9p?Y_`LxzEexCV3`Es$h^1 zyz7`cgj3pj1kcyB*KMQyfd4$wy`XRm`4gA59f*@m_9>-n<{Y?DFF$pxU^)?LJaB8F z?mXlM>ECc&G=FhO*O{-GovWBg4BPVlejGi-L|_H0ZO?28cB)oJRbW$=ZCiGf7HQoc zxgk8jiv3Si^Pp1s>ZR5GgKSV^Ij6}(_B^36LsswALu6-U0mknKB>t92IP|!iBwdIV zZ%$L2nqNt&efT0hslb^`2^-yQpXW z7?4T-!Bklk?(|@H7IX?$m_4gNRP^cvm8s+;|IE>;fgDoJoDcLfIDS3JaoK2_@{({E_C#WTOK|(1qrq8BvF?of_#8Zds4ndI*Ix zo<=K`o^7zIFP_c>-LB;LcP*i4VXFp8?TMnHuE{cI=WhD&iC>7 zdpg@aLN5(83NK<ED}SubJ>(3IsaUT(*>;=P-P3Xg)dx|#Cc^B z0F_FJk0Y`Ix`y$$zMq0}n^n1IvJAjo4jmR`=?N@g{mC&qN1Nu}v`9_}W9@!1;^%~baXv7xznix|PpF*TVoC2wHX>mWg$p(ZUIHm+07AS|26zpL z*)LP0o}FDJUt^$C^n*Ruwjo|9g>%d8qGjL13``xO)L9@Q_1Lu3vp-kh8z6XZPXUZj z9yO8-B}kfun`D6E=I?NuUs8G?V+^w^5G!7Bq^{@a1QtC1?}{ktYVh?~Xo-t=V^m~8 z80YWVPl%F1n~q>;P+xHpskhW1O*4&bvsAUN@QOkyr$yeIgNt_uhvrMWe4Caplf?M5 z@+Zx4Kl|M2`D)Uehc6RLJCYt9Bc0BmoxPs!l|18=;lsEZWWd3gYqyc8P)mU3WgRUg zhVxv|O1)FKXCdcmWTuAVy(+jH|5hpS&k~A%bh;-Z>?;sW7;NR(z<oi z_8>SXXlWaKQieFEw|!Csi*BXHk9}^QyC0Z_^c0O68ua~^ZXA#`mk$;~LnbL3!(QGvbn?3Mz%>=q^PV2&dx$u}~?!3_``%HyNf2N<8kZSy_%xu9n# z-n$Iu=-=Gb-}o;ruf_7-QoHJpY6d4H9oRfS~HXcj#}M_=#88}I3g1z86Y z>MV(db_I~4)QzfkKD5@_9c`{Mtb+O-GaX=a0SWaA0j=Pz!2@}zNl4QbXgN&@k9lPr zuOKkr;0Z%Df#q?*ch-9CCa(RycfiQOxgG~h&6}kO_JNp$?ZNp9l)!KDBan?+Pu?U5 z5tjy%6#x+ic^O0vu?f2(I|!BR*-~8K{8iSg&Axa{$IWHfIbQovj%D5U(mTTuC`Wvy z;^>=?ebC#yVT^5MY3o&xJdpL@+3z$uGwzl5P5yBscj-_M#G2gROm~a$vHUK3ew?(K>G{JhiLo4A0xtUXJnDh)n({iXD5cF9^shdeATHN+bWQF1QU z)~lXC!R>UEIPG)maZ)Wbpq`dEtAXNDn7;T7R!3UuSJTJj*xYlqGPx+;;0IK*_7(e= ztM*0uZdBm%!yr|^)3`OO>w7m=wk!?&A)Ath^zMjc+>%dQ@g8Yc(|CULVDgJV>GlII z+^@A(@RY^Zy|5$VN)cr~+1fnYwj(afZz~2!zgA6tr2rqiQ)-LSCCJkcdRAbz2AJNQ zfd2o!qlw;7XBu>bx~jt$*}+Pvm^e!F9kYHiJyb7bS=tDd$Y?#Lam71)ap0PL&+EXL z4nZrlw3ur8ydnADUPnO7ZYakJ){VaI0&W<26e^@vgvMEDhT_qKruHgT zgqqXkn6Y-<(yJ2?nqDx`7|N|jNSv0@E|*fzDhct(*pKEu>n%bOaMOp+aHl;&_fpkI z_OwEgf$6oa*#=T?b-5VVN*VsSSBCqDuU__A`D1z=f>ZS=fqxAAAsBhU1flcD(4&O# zquDjkw7y%GG_xTWVp^RbNFSoI4_`tWd8h3qy~|x^5$6H1QfJ5z9F+l^kL z$7#UqV2-+IPR5}USO@wM z@X$k_z4-;!rW8`nl(uN=mW(JR>Vv1@WNs8@H*GG-^kQ;snCJTpO-_^s;opfqX%o0d z=Tre&`$LzO^RPoa5u2#Wf0Z9lgh!-|Bi7 zj~AttfD@5brR`YBhmeY_wVn~yMjuqRl&UmEmTI#6o*Y-8sdDZe6LVWgzSiea2(GV* zU6D4lw)B;ngyR|~=|E7#>u|Uf01#OfP_8M{(oC~P4rVWRJjd$3wXcl}br8yYO?X)6 zHgdi#KC99y-1PHgn`7uE&d#7id8U@sgZKoZ)yL@yp5aa=?QOE0%gAxN=Q-m@d63)G z{ThP?CqSBC51Mjs&M+zvpPB0b3Xc%&axzek^l6i-eMZ~y%c&!GQQRiZ+9Q(2wAMG% zsuC~x4qx@19&MBI&dbV#5Bs0n{r8M;Se*7&6KG#Ia zI(=*TK1WUmu7;kLZ2yxP`fvcEGB6W=P7DivOcyIZ%#P|mFS&wc$U!ZS%>44z1t6K4 zNBhoK3%M+$a5vqkH%vGGNB6B!2l2g=0R{a=BYr zf?Yuspme%8k-SwVoZ>m!LsbZVC`z(Ab%ddKI+mn20%xA|qy)d$w)#ZEsM2 z+7X`-Zsz_Nce9aGcOKqmmDUGra$a@&)b>@(&OCT0ok=C9$+I13Ji}Pv*^6M>Q&?4~ z1AYroM`In7dm2_cWUdhe#N*Ls_R^N+9?{u`mV>LEpq8a!GNP63+gSM6sid}g6~m>P zmECst^%ceWbJv^p)QykWY9Vw|k~GbIU+E5Q%c}=b)97?4$r@+inV$7(%Vottu-kg1 zCw;oYYlwKNsLM(?9;x1?)O7=y(ukJRN8(5-2v)XaPE8&7eTJuE`nE)p`R(9WS15t2<&thx z$_UT@{(K+t_c(Lwdzhtx=QnawM-)t#m{LX(z0`?Hx7-Xo-zZKMfEZ5zkafeh%a}<2$jdpYD%q z3?=nCoQN2)aHFFxOW!K>(5$O{cmOkfKJeu<)JanDWHAgSNS z5NA@NXV9xkf7c}o0!%eAM`SSzNM+G&1k$=Qa*wsCn?Jo%qK4btV6Kxwc{>N0?k%a6 zCR744JvM)QO=-85#EJT6_-gLbn|-Xd;w{GuQ{_3F9~W@%v8!)43aR<3s5`ep)%Mn!xyy6v|kmIV9ntXBy^ z4iu&KJG>Z@)irXrbUz&IGajhR6A_Ocrw_lW9$D^kI3^2}FLRZiKg6 z(Mg>Q#+FR0QiHum%_sXMupV)V>tEsV^8yW&9LSy$_rlc%A2=QnLn?9ky&a~MN!+t$;!*>D>lw$qMuP?z}-2t zUh^-5)kA>nULilS-@EM6n2?L&gx`jel7iVB=doTl9dGw*a?RHtq@2f<9Z!;bfYH@V zdRhd+-sY(Zl`=~MyYBv&Z_%sY@kS|YXLBZ$CNA1=hO;I%ur>lSrRCiQQ`b*vrZcI> zp;Ms;LKG`%VPW(t(>GU7^15|6++nKT$1@1{F{8NG%|}}O@3Ou*Zf6=14xv*@6`bwf zznTrNPlxBQ!_Bta7#-Vi6MvVr++kQFE}JdS>kE|l2ooN&3@z7@U7P4=UP;e3`DHI> z-;$sGj$SZ0?H{}|Q>^mac8a`$wLqR8{btw(pNTu(*hyA36VDh!(-pzL?`1xpjrDW9@&PR|Pztiv*14drSzf-3M=Sscw7!PZWu(ZZlnxcrAM~!3! z?-t7#j{!<@6@zqlxNpzFTtVvg>SrBSDVpMsPH{d$zlf?@FHBq-usRHexl!<|_PY;Q zwfGU&9bfLU;0-8Q$C66Rch#6`kOw8Bu#-wn_`MloB`u@4nRIknpiI|}Jhv4?)2+bF zrJL$yGW0v2+hnw*co~c#L~0@%6Dk|qfr^PCm|H7d$B)^e0}y~2;rix#7hB~{5DgCw z-eq0z9?=DMgJo`vv7Z$xt{d|2|3u;2eq;Y`WnZu{3C_#&*k@*ac884LzUjT~ zpe>SD_b{E&GBOEbW~c2=dLQS9Eil^SSnbTPnPx>?cM&OrL?3N{MNxGO52^GV!$NmV zh}}BYy9fgQX~x|S(O|I=k1;8?OsJsXxT!V)DS36;=~uKMFSR~aG8u>-rJtB-pNi*4 zIE0N7CmzDm^cBmHc~TVPQ$&>tGPq(9wnN6g8!#-3oWL;*nZ1nnR(v~@5Ibq z<$BHdpgxu$N~5nEX(Fz5KRm@ZBH1;9eRVr%|3+`6xe{tEI>?J$S|C+HieSWd5z#v& zrTq#Rc(=B>BJyX_+aXk>-k9ZW;C>WOEqFF1e7?(SpW8;AJXWJ?ZG&;M2#fV*&7jLjr#B>Yh%xenfc0Re{hpA~g zCXG1BjW@T0m$Wt806(k}`%3(PwR?l5Mqqa~J>{{>)h71L$8mk>jx)fqPf#AdaZ z(^#nr*64tRyi9Mqi;by__}@sGk-hK zQ0p=9C7K69FAEuy;t})jcv0c#)koJLf<+FMyZw)jk=-gr5`+}rj@q%3w@+y+Fk%w$ z(by|n9on#J9djDkbWOT7dBO?kTlwDpiAi@n-z1E}>w3~hxO3Ggijot{ux(OwOrD+Z z7*ym&g5!V~tI^pH&bwrv=-@@922)hrUL-6)<#soSLFbF`LZK7<~^O|57-H zW0TfvKr8D2m>*vASuX!=xqK0q+*dtc;bLG}nC;_Z`Lc(ic$*>!Ho zQp9!~{V`CDe71ho@YQBwJ_&;c+^w~j=gFpYs5|{`m)|DL@80bfvB40Qh+a25-Q;!Y zY&LuTTE#;P28Z=@s|~vWe0Ttz$gWEM4mDlzm}Hl5MuGjOUFJnf8Idu0>@1=Ht$&8bjsrZlI+gn~)|VuH^7;ImhX-PPJ9jJ92bRkMS+b=Ml&nw%(Kwt{^bl zn{sy(-b18ka7u{Z=y062DPzQ-0+MHLwEIV%RnEo|@*4&};4xki(}1K@xOb9Fxi2Wp z=m8WYgv1D4+oXJ@ysmjJyS&pi41Mp#`nc;R z#V0g`C;T>fFa|y!kF~}?=F;|xq_q>4NnkM(cLW}iy~$oRv9t&n{na|IUiAobOQ-1vIjm(dQ6R5~n7=NYlyPVwRQ1Ru@!&6v&`;~SAt7UwGTe4Eh z^O|SKt!KQ#M2*|Ku0Nyj?KKwmuabEjx9Um~hGR`c)ip}(-UAl!b@(bNb;eh_N<6&L=NN!}lwc;T)6%yC*T@`MLVP@8*TbJopXv@bvcC_ggoO26 z>Q;d~?%E(gosN&?HkysLTHuXCz9$%PvW|&R!&&UQ^!5{zpSGAo1Q8&>7*fjuvP%TE zfvv9l?l7C=7G8DSntM7|&r!rSO1*#47kTvnd-z`B(m?!P#9&_K`rc)!SJ*hh@!9j; zp=$j1;k(*r9qWSfYGSUP0JhAi7lD;4qQ8DQn~;7tv`0TgB4KZAO4OtG(7D+QyvN@G zT9Zo5US)he8C)aCCIc^Jvm4L!uD3D zsNnkFCLKw|=$?y@lIt@HvJpwBmRTCu6^1MUt8+KQ;-+5igWT)^RfzMLVAKXkgVSVl zrq3}h*}f2+FXk6vc%AB1(3*Dmv`Os~@Du!xd=3fbld(7E_2>(o9KL{+`*ztpaCdME zX}M?V1Rlern22kC#)BH)xdk$dfj0*W8293ig;uj%-g(5o0CVunSi}__F9@LXPCrOj z!gxe8i;%je^FvB9ES$&-VNA!7446%$G!lBXa=QT2Y{6-wj0^jH%rPot6O}>-`Gbq z!6lK;UB&Q`V3*3*R!$RCX)}~axAaYXUVK8;mCE~?>?QG|Goj+T5z_cyJFUabMVO;-jluc3#5WU#YFLlOnb>P`!9}o8We_pHADwgEb((Sk9)h?G>ZU zam=n33PcUC7;)T_GbT>v)t$oDd39|8{0CJsS4s|q3UXnHgD!6$vV>zGKK3ZgZ71N6 z^Z`rt$du_Lc(Wn>nEYg@{q%sI!Wc8oLcs>LA}=`O?Bw^m!&`L^W(ZTaTR!VKOVN}J z>#=@>b_hQhw3oOvV72%?34RTP56b?@^pEL9ZgXO-A4IQZnlhbDb3}*&33CCiHn%taL{#I=9eMnLsnr(v z1Os{L;8WmP`wDpw>0R7ln=`(_JQ*wG`axBr;k!tghdT5@t$DR({Xyby9}c|ON)7jY z{xyW6dEnt@^-vhNw|DW?+QAA?@V@>u@|qXl_f{@Iz`M7_rclfH9IF$4!K0ob!rQB-w zYiTPk4%7mFi@HK_6u%6yW4Tg57u1xs;#D z0NY}j{1XY{KOKi%KqkCx&E@$z@gwZWoGsqgOA$EFKU&bgnbw&f-+m_&DPK}o`eD1s z+4xqLylGy3VCn|-2LDxmNighhsfOWWulSzxrLNxN@t;(uqP+ z;XFEj2QB^Cg8qBV=^*2Wzd=s!ym8{gC62hyk-*ON<*SZB|I=~xet21aqu?$qxu4xe z=4|n{bhg2H{?UT|Y+7mwi~rZpp8e2*n`olIqHQ5(^Wy@~GqUW7M-E16!y6SUW$Yi0 zFH(^S4~YhkGP+0i@Y0EtQ{g=7f9He#&4PY8r~gknrz9G8{E`;HUp?3_NA+ia^xv+5 zzZ}({{n3BB2LAVsYTL>fP=$f;%N5!0%mLWuV?+@k3wR=* zjJhb{tpG*P^CP|58*P?TXXX4H>m-Pd-d+kt7_VsM!odi1A+D6)1XrpR*D+O7U`#X( z7AKnS?_OA3P6R)Ez-O{8ElbPUOxB)2;`~mskBm4(d{?6(}yD1pPzR{UHs&OAcY&wC-{yT4QF#wP@9KOcVRpdAf1+&sw( znT7)p5-LDt>AZZf9|^X_G17B5kJH!4Q`}GeCYJhkA5kR9F!nKqv|YdB5qg_$dfhpk z3#ZYKXFLW=X3D;J{-wwC+q=@5bAbr$CcQwRBzuXnz8|vAYrYh4hfD4NQt^JpIO3}| z36IrvG;vdZq|UCgmOaECP2FAExPyt>7_q+&1fE@7tsVf1i^_d9`SYo=xe;;#ojJk; zw$RY9@g<0(j{|wXXDA^!2$URVa>GTUjdA+wIUeE^JL!}Le(I%zwF8Bf1#K_Yz50w2 zk46seA>Mc_G8T#sYdFe{Jix1LSdc`LZR{7H4Y}@$&F)Azjg-}pE^lk}wP)4lU?GxV zF|E)`uXFHVja*-(g{=v<7%?Lyd-MSDkU6R7Ce>9l%tDQNW2aGzYE6Mn>LK}RnVQzE zn#ehYF2DQ0TFpgl`neR3umM770BD*R0JKKLXJ5Y!)hG0V=GA@Rul_`bT|Aqq29c`j`60j{dT%MrD1 z{TXQQ2xUXj+gp0xeB=rM_K`#7lt4lCNY==RO~qUNO9Ax>6`yW%GdD}%CX7$aO>a@* zFP44>9KY=#3w8ze;>Lwcp7|h1Nq4QLgrN)A#_KxO*Q1nL$P-uQdNe3wcWD#Ry10*) zodHr+1>7TF&Mm&Ud*YCj4&6Jm6fthByxa{em%zst6g@6VUG`|(!?MZmctOUmmmvy& zGr=}qHcxk<^VxjA&27$%Ws34gyI~i6zVJ{A4N>%ShQq+q}Fx|y*Dd-Ch@bM&o$~put}Q#HTswS&P+3#4{LJ9D^n8J` zOnc_Z6H=@TqS7uMUC>Tx2Ji~vIGji2H_KNRw5QY<;23M(D$ON7>QZb$dp_HGB@`3T zq=7y}O^P@dx~;0@qMOrMRP1`0j>E{@fJm1aX}HMuH!!>Jx7Hre z#IXwspZE>4g)7;&q0eZU*$0)%zYM5ZE!s@nd9WFiQ^RL|+{i}3i+p}kUychQl)Q}n zM=b~@W_24_y07JjkC_NaJq^l3Et{XaLVU=RaeV*wk_lr260Yve0z}AK^>DFqEPX z>HSFX;B!B`jt%nUIWuSh2xvFD;_CU($YQ3IBorHH=ZZlkXWY$KV)P|UG9(0e@Uc@< z8RgytLf3W>ees5}O;Q?De|#@dB$;P7iYm^13u`JKT8GadVzPH>BvSYu8Y+VBeeCO1 zx+xfrgn=E4UMwUDyueW>?QK7;V+|Xg-bprt_SFEWg(me5J(lh|!emZJl*y6oI_2IB; zx5L&{UZJ1&^4xsUxhWSx0@+x5#^R?FVqXioWP(8POP(*2@Tqrbs?BegoTXPdnf2?p z47C=Fa=CRmsv0|dzn^uzswY3$&?+%((Hfz(D{su8`3&{av9^eTegE%gARlHRcS@V@ z=M0==174;hqBF#&MxX>1mvJQ5U7pRS_R7ujWpFu>OC)hqnf_&u$^zPB94UQOl0ad- z40<1g*)+2ROxjaxZ>@h^SB~vRrhmsUJiZ#uz?Zs|WS;ST2YbP8HL}aohh|z%JIms# zCYds3Y|(-8AfrM%yy;Qqz_+wsI!ZCWH$`bC|8Nqcf*%D9hUtCOfeN!NKk@Wa|h;L@b+`9Ief5s^1g75xP0SevUW zNvr*+;%r?+nMhh9y@evf8cLgde7??FJ{Wp$%b_QO4;tg_2$|}E$Qf&$LeV+r!e;-| zEhH8G$ft^6wu|nHEgBz^O|4yvNDsLJPLwajqi|z1Ta`twco|q=OUP^qMEn#w+U4en ziO9(I@N5u(D2LkkxBLtGyZVj7_R?ehF?6?-YE$Z^eO1iC-9)BIYPJDR|6Df9qp|=S zkvbZN)_-(#!{Wuh9s)u`+oyjm-fPz&P52JLV9blI_fo`|jlQKXx1+yonD1!3P%gW_ zL?*^-pelSHXf%mHf6o@ELs_*Yi>#-lt3P7Yso)sJdKQ_Ov`5!jQz2-UxPF@`F?DYn z>qH*`e_nsKxLKs#0wn#f358wXN941fPLwL0hx=;5-j*nzMv|gH7XF zBY!{*5D}5P)Gz#;;Lj_e9gH8!F7|}f4T{5$TCWF;5oXV&XUztz&HVHVn_N z7D@XA6yjYr>3NrG6bof44RGK&GR;~tNi8!Znx^KvIs=J)J@3rSty4!;l%sN#ug08)Lw~S1gOx=l>cK4ALc@%1! zcJ#jX1H{$TB6un*aGG=`3Aw;PXNr@)4~{Yw#!?!=L=T-@^w2sG7sWctS^q@kcI3W8 z>Stb~S!#sd4JVkSthP(?`D2eAdka~)1}A9#g<|JBfYRLyptW6FjME*AP#=j_JG0A` zQRL&zs+}1Lyah)vp~NTbXDBRz#HfIF>rYQ|`cBr_?Y)Q?=@-C@9*1b!m%h-WbMgQ7z@bPfQ4{^YIaw@X-G83!8h7NihaGi%J#N7r$r)Xs<=|1bly2-h7r&`r-I<9&R- zOjdiR+-O1PWfC#?Bl=p%bgBXCr1ABUZ6PM+{< z?h2+2sf-fNA>4YHUcp`FLX<7Fuz;$sQwKD`h!LD@L(mGxAYymyaAZg)4=_7yA2ssN z4SE`m*SQ4n7Ief$>N;WVrK_xh=}rpiB3-ehoE%l;sfA1+96HNddl>u27oyD0t~gg% zR|XQMO^!FPdA;lWl$M#;P@rASpbAz-|+r^iBmix_wQUjjt^CB_~ zW_IzUA0%Xlq2;(IT#De0?=?%F<88`TNWNDVu)W-JLzi&4xJg6yD3FyZ<|liN@&~^O z63ajJ5r#q=rCQKV@UXSQvSSWddyN`KVZBhYCH%tb=N(C=r*_8jSPt$)X8GZD_)>*r zy`2z%4EcqgUfk?#AhfdB%NB3o^DwogmgBA3VvIgMG-nDAC`o5pv_S<`tz=jeZ4JM^ zgW=io7@eSIUuTRDybss#?3?|}D3p|(8$1)-5d*&vhPN;jN|2fxEdP}1inWi5wnR+p z@FPIbg8QaxqZcm=Pp$3OQabK}teW9D@}pA*6`nly_1Ovq_cCLBuAXow+lkd6+%R)! zD@Xs|zB1CBGEi&7;GU?6VxY5T(yWLkYxqbc)45c?E zImTL+c^}18eVyI@96lEtC>=a~is4a6oz=kUpU-trq}WZrYL}v3600w#Rux_Pr*RPdi~K}ZaRo*_QPFJ9y#AJ}JeRf7iN zeit8ysdg12>xZyaa{lyFpY~&uBkN5Gh?xY`9Y5gXRyX=Tnmguvg;J2hY-_^N3oCwL z2d3b2$zdf^KNFEB!V}*LaK|7C%n$Fy&JSgNYLY1_weYfEZz({}aHm{gg&DOaHj5G) zZa&^|8F%i}MZ--Lu|Vnje*KZB*ClS-{pSI&58&D=KntJ$e|%lgZz_P4?Tmu(75-Yk z7mPbxt(lG51-sa9P3AAr1iH@ghm%#KM{;bNhqjvYaPo=V#p);K+keS4CZQAPrH z)^v;T8vQ94KT{aKj|{B_4p7U^x2?-MiJNKaGb*TbsF@)|J*PY6oYV5^h$Vq*Ye76d z)?N#io3_2l3gdM%xvB2hjJhw|7oD6l;^Y{j?-rKehHYD)LSL_KZv7cMOV_3=N`Y%K zt%=EF%kCdFb)4OA{w34rTlRDFAX#B($NT($c~OgJLo@_W%y-`rvz*JYswgLPd7Z79 z<=tiK&TKnwP7BJq`Tcfyzo|!mSQcmof?voxiIdp3pJ6!%TVYM-*Lus=i=-)6V_(IbbxD*5mg{GyOkRu5BBD3F}WwH{U zIBD}BGrKMpF7-XfRru#Cy|(o&Z6r;enMhYtVnnyPSM&I1rJ7xs39B3B*`Va?R&$a# z8RYf}ssCpy46dS{sf3e~jv-+QghQlep6_sk#9jwEANH}7)MdHz7FWp$BotD#u|y&T zCb$vEImgy`qiP%dJr!6RbA`f=dXXgXpRwfjDLZ_;Lic++qR=3O)FIZQS z|Cl$TOL57MsQHiIB0<*zZjb(=%T|T(T*9!){patM2a8k~ZyjT)eXuv)pLsTtn%4npE$c% zy3MY zy8=bnoclkL1mrz#+dSV^=d&FV#UktsSf0~i8AxPXUCr3{OJCN%#?To4c z{lFoHMVL)dhh+LcWLN)oLjylDgy#@z$V9fP{9F$u{eX+uIr%tt;pNzCVY?Cwq7gIL z$fG%*;o0U+zkug!0o{AM^E(ix#E1d@0T3CJ?C>JTs{fC(>02<6t-NSJK05^6u2K2ksoxF|Q}?wW7obXe>Ww|a zq=nz?#X!iXoo7i&H;fOQ3WX@FIi{+>`u=l-=ev5N`?+@jzOVfGWp3Ji3q&T$uUJMm zJ7f^XbKMp)RkQ-M6VAAe${X|fC=CY9m*aVP1GeK@XP<1@uk{e6%~EG%s|xJ3%fzUcB%5hb;FMqWgagKrD*N{#?cq}^k)1}|ry_~G z(AclFD= z7xM-^L315kk2^!<>Js-C>W@@WzmutryHil}{Va7LD%=^Q< zSX}HT+lggctS`<)sjGBjX+-)*u?`*8X=Nxf4!bZ@JQsZIn_&T|w`lmX(M0T2T5K8K#KmV} zPG!y+!Ey~sNAx8@j`8Uc3-y+Yhfjh4cyFMDksko?E^7S}KSuirohD*mV;bvj9@tjx zcEB@e-=fpD0!;aW5QqL!=(u`a4gy2;_^#mk9Rzuk@gP z=CteUq^PS8txBzToRE1R)#ZNW1hay}z;@&<5OeWzAazv)#N4j0S>&^$f<)_NaTIlS ze>bxuR^5|sj32hpxvokP(~&C8>WefLR=u>B_;V@t1Y8#{GyX+=$q$A2DvwEw1`}m>d!q4`xMqshw~p)|q#a z#~&K5U*U}i%SggjSgnu(b9fH@J|aQ1t`I{}^S9i>+9x;CrCZQssRKD|)bJDnJ*%HU zUswHF8D}xFD{|31o=Ct=VazR;aQBU8jk`gL_(5-wyf}oLCtj)>0GN8&Ci>iBk)%59 zHGjgH;QQp(YXN zOLvk{|46KukzW}2=mB3)^6W(HdE7^|VwB|*@-1t5?nR$<03rN1grE7j%0s1OiS0U2 z8yV4e$U5K>^x!%prYz|?+Pc?9qwoMVDeB8to5$p@>vj20;tnQDO}F>SJAZtl7J{xG zURNqj0J)U^9pXbzGZ4$$*lo($wyAfFz)H`NrT7=aPx zLj%zEad$V+`c&B zqc1UA)+YR>(`UqHeM+TT=EY(41FC-+NZtLzhn@r<%6a?fx<#V^%T%6W_d%Gfuu0iw zP%5y3YG*2hWuZDrAA;a+w#DNG?Or%(=5ga0Pb=`CYA}+$o%F;;{+Ce)(HD zQ9&VSEvxkKi_<9tS7udy?tHzJovc*f=sxhwGCqGEMV|wvX&~qvsg6m| zTEIkhDEVMsLrZT~gSu$gts5#_{=OK0l|r|{5HxwzzxOkRkE_2cS_M~{BM>B2<97Us zmm3##_KVN)FNcWVFS>Fqn*2q8oKOXCekKS@G4)R$@W;V_p(+0L50=7X7qG&mdiaua z`KJ@{pV|J7U;fAi}r{^e$VIqJVI zG5_+Zzdn+`y3&7Je15IP|27@}vYTIP@qcSKzt-YkYw?fd`qxMD|D%uO-rhGL_4-r^ z>vIBj3if7+L8B_tG>c<>gS|B`1M=g46nDozbX*pP=QgJkh?7skx|WsXWHAUwtL2 zaSkVXRqvKAZreJSy9cY>*w;laKANB;Wa{_X<9UBVUq}B7S^f1dvidKw`Y*EjFS7bC zvidKw`hRGG`bAd%fvWr>tN$Xa|01jZBCG!*tN$Xa{}0IO4qj2QOIiT`Ys&mD;`%S* z`Y+=8FXH+y;`%S*I>M9sMO^nK}|3zH?zeimE&axzHbZ16Ul$g;C@n7%hY|_5= ze6NW4!H37_&vzIJm&tmK58knT@nqv#i;z8IGk9VJYmOvd zH`OuNg>2yE;X|JC>~|wy!3}zK#d`VRNz@os+6;-?YVUe!+dXPAiW=?YuTnbRx?w)d zVUC?%gZqwdap6p#ClKbY-e z+vvM&rQp?P%lxo4?}oX4{J3oSYUMb}N@PPQ6@RY4(X+`!{ylU)w@}-rmmYo6WqQWN zuIVLH)xDI3k3!e0f{AKJvYIa$d7Naye&DB5QCmb_ptJSQpr$qVzatMi?;6M7U>0R+ zALNfmADr$$2jIqS^d9&R8Ld@lKecS}k{!F1yNP^a6D$991Ml|u;k@b9ue1~P%v~-p zUD0O2dVD>Cq4j9JwDtBvlP3>$O~g`XIVqI4guXOcfzwt!&21pIavd~y8Zu0ZEA6b+ zRi`!c8-@aK!z-R zDMnaW;S}oe(axN^xN$492BGO==e8Ivz7^092NhJ*&hhEB@Irn}lwkL=<(*baSKlq$ zDeHJ$yUVuKUAL{MHzV7^dbu(w-7O@kmhk(knA;6KcR8=W?+EUjx}t0G#UEW+a-nP6 z%!{v0`F2LIk=<=u(1N)uovuAoFVp6jAVIsgse8_8M|5?BBvv%@q^yE6(Xk=D#BMsh zWL5w4GVhvm+=M^wiLwRm$)|i8F_NWwI_?L%o0_iT;ysXNkSSi(h=QJgZ0*MF**+i7 z=w$k+r09DoT*+Fnyn?g6e~30~bEVMC>fCIeLxFwgbs&2;0HgSkUGvrzLy@+L_1xYG z8{uZQs{cRiy?H#8efvKwiG+|!2!&EvBZO?(2}zcSQOJd)WXUilNed#oMD~znN=4a5 zixAnDtfN9R_GN^@%)TY~wgYCybQYV4RL?7>k|aqr^A0II$4Ni&4;K5M;9jB^h6@#v0KWqyv9{9QfsKdanH^?gOE) z2}mQynO%_01m(0g5OjNuh~ovb$v^G!&R984&}tBatuDWLX65y<*a!HLFJqzRWN+=e zb4x*0zh5w~BBvcz>n6NJ!KT}%{`7x%4eUog#(5&5SD+&32)+0g2MbCE(QW0v96 zE*rGrELMsRroh>bgR?Ec4VyS2Y(lBLk>D*Jy|e`rlRH*+IDJ}ar+z?4mHz3t!|Sg} zy=^ncl1WlVIMbib_JIwU>IKm7{s;=+h-$5zt8$+eOqmR4#TZ9#*vP2&P~N>b%n!v# zPqbmo-TFWab^HB+V6p%EMdHn$54TGQj0uNcwHb8aRI(;RMTEXx#Nc#lIwK zgTlqnClO^Z;C71K?bV_ICR9b(=kTec8^{vLXOgYcJd!&O^;`a5FL)$=?Y$8Z^&xz+ z*E6<1w#a=e`uWjMG5Rt4{b;}{G9|%a1*$K^YvbKGDMVY|HvqdhHIVJx{HPdItM3Z% z8+nG&QOdG!yLhy8A2BVRNXOZr6MrybjJUiT1pfZP5Zn5T>^K2i)9vw8_%BP2oP;!1 zhQNhngl zNs0S#$ARd^kojDvrTdIa_|aQ``#@Od*Gfe(b1X%Mhlc*?J2^IsRtFbeR=pynO?V}? zYf6oBYyPhPLCOc)H_oyOn?Y}nezs>=1xBqnx|G#q%-V88*EDrp7 zQ4m?ylS%DPKPGx)fOXb;M}XbS-&&7DY$iI{SDFCY&4$g7R4Thw&Feky)3HnPZ_n0C zNW^R32)iOX>JeTOHl#$+xxW3|AtIl{tkA27D$YGMueMM=SRFfi8(G#Mt8I`a81`KL zb8NYdp=x!`(2tmY+Yz*u=DKV=>c=|s4Sh7ZHzKqAo}g2L=&xp36k6}=d#~=dd1k2i z*je{8PSjqZ$qQZ4M^_}ri zztTJ;aNM=$sn4V}s7*OL4%bD=T=}X6TF&=2S@4r9MdKe4YHN!6c^+;X48hI=qzjnT#mq-e$&bqk*7~}?v zR)D|GqNDE?8{bDz2_FG%p!;Rs;}#v2PqOykoah@^&QxU)_NtD zXIt(gS)oaGr32N%I%w`kWYx%sjmtRE#%0sTJ~c{w zf=6~?rbqUUH0=6|$^~ZF)$SuTJW_u=(sijIm-ff{(}R^xKzmy?64z8>8DNipH9S`O%{*;5W?a z(*Nq&2DiimH{s;4q&wjx=eq^Qk1oDnz4siwjL{{mhJQV~5%aCUE|Acwpq6>GTb*z; z|Mv9yJK)fTtn6DZuc{^;ebcrjP$Xi4H%=f2)TN$u$O=u>yhza?Z-AXR)tC5o+UzVxNc zOS++z9YXVFVJxFrJLxddltp~UiqNUr2!5y4rP;P(rv@SG_AD#zf_9dW`t6Uf5;2MZqQXLIe{=)=y-TpR(V`^uxyW4O0Zzz;xO1C8iCV--kjz zWL57paE6}N71h3%;W#tAO$*>&+5H;WI5fvcwm+7iZTP_#I+56W1wY9sG#+<(f_lSA zsD0<%isj(CujY!jAx#e_z{sdScj|7EExMUXPNT{YgG0z_cQS=xP0?M_HLNw$-dh5(u)&7c> zv^O9Y{_z_R|pl}R2yeat)zxYq+&x`H1V)XMAF z&qvFBrzI_Iz2hcI34!N2h2kWvo~C7GQoYlUqrc|Tym%{ash?v&grYYxZ4DpH)pT{H z?ad2ON5^6nV(HXMkQ@ z#W&!q@r)5D5E=`W=4>vkef=4EODQ_p7nbhJRxUIzf-_ zy(^iqJXo zeb>@7f`<_$NiCK7AQ~)C0}$h-E?UtJqvPO;awh8kFuG zLGCU_fe;|87E;I;njE`@(w(1zzx3LzVlB8X6BRnFxQr4^GES6!Lz&L;WGC!fIhI(I z%-cx#_{_whxsi}{fTXsSyzo6b_AJ>bG@ANN9fg(Duh)hT>W{92J9{J%G)DX2D(r^y z8r31yG$z4-A$}4K2sU=U7l(UYnJPCiR$X>P2_O+lW`vRfxVimC59Q-P!nePC>p3u9 zbHFOGT;kICB<{fX5BDbd*Nlb;mRn@@p5zEZ}a|Z$BSDP_8w>aR;k2yaU$lSLGtA zlNGNzg0NZw++;{3CZACV(|4!i$^jy3WVSpng~p!kL;bGLZR-`qDyVwQr0{E~CB#sv zfs{NT`Bs;QQUYRww6Uoda)8SO${c1{;8fax@-h6pN)ZO4F&@7_5Aw}8*Z_UqRVWOf z-?3LOG!#qOM?EBC&U+@}@mp$2eB8&-J5F6>o>@^WY18)HjS)T<>b{SBuh1)Mw=O|1 zF_+;PaJQN$c^>uM~2 zNCnl>X!xZz!qOM6q|d(Uwg+#Ls%rNX1QC3qTQhaTrhq3KMC~+tfX8hFjBr!#lk&B= zdCE90H40UvP{c%)+kCCvNI}0%Fv$Zbd_2L|#2Q|hh>xoNa$8W+&*XT|tD-tZckMCd z%N1CTu)8!azp005r=;-4DN2L&`}mc-HWzZgJo&x!QS3~cp;UdXAXXebj7GQNr4Y5f z-57FfF+4*(yV~lWoJZ&BqE?Qs0>_;Z`3V`metU(6T zOO`E`c7EBbpzkr8NE=naM+LL1eqJdBZd3@2@Duhbz@|3>e57+@r`JSDL0JAA#(W@2 zW>y4t9IcHGrtK-dgCu{M>r+-Lqlpd~NeA;DjR#@bzPdrP=T67eBdovEWT4)CLY zYAa>G{-^$DX|T~mM$$Z@I~w^}D5Elae_{S7f!vb%vxcO8Pk0EN0jPqeBHDa&pcZQk z(`qazHdogn0=Lfl69bAXt-69v9bKDtn)(!8IC5z2-Z-GLzEa$ZftE>}OTOB7->xEp zwn*vd%e}3a_7?j;pHV7`rM@t59Ih2Iv|vJKu~c4YNIwtWt26ITzuC7lT@M9q>jA zyZ4pEm3fe_n+@FAtaJ z|120RZELaZCb2JL#Lm=p2-hzZwIBBh{C>3|q#CO#_%gW~yWQY{H)1tk1E;%;jEUmN zb{;v`(>ivq2RW9cxO?7CY+I5FPhfeuP!@rZ9)9UQNtDE@e8pQVtu;%A=k(rI;0`^O zp$?Kh@_mbT{k^{(jShw~AWc*vQueJ1fjU#&Q(FJkWOH;|<4*Vd<9NyDjed#;1^bo3)onZciRba1qsc3T^LI8%P!kSAh_b-W|bOoS^^ zjrxknKQdwfyoV2;`Jb2jZS@16$2H}A=f=dn^WU2nsSQknNTs+mWwb z1>c_UyCLvLk8=hMWPe0mz`H|~`eEKA(pGKwzNX_lHibr)R4E70cQ3x!QylI}@pJ1b zcG7uN^+4V|X>~r877KpYKQI7NDgm)dO{=wT{=4WSa}b-_kk8Z6(-c2*#eDGZ{_LY_r$fxa;2YHdJ747T6Ev4K&s$Jm(|MJsLMd{BO*!l~}avJ;K);WJ(Ur@yKBk zCnYJ1^hkp2ZNe2JkPaVw{$;?1AthE*%Y5J6a{EoqU?1g1cf6$5Nn^Acj*)(DpURI#IF1QGA1Z_D9P_kQ zA1bv{R~$EeV@?8R@M91U#MkC$($%~-yu(u$q!6E?Ilc_+nTwgx;T3As*~mfXBbXz> zOziP{HE(4($4$xw(TdY0>ZF5cH!ho36h9ivV0lB{5?kF-lwRjz!?;t~R(f1F$`p08 zESF`_Sc#O?qSO=8d~80vbY4{jyEOB~vsIfr+j+UF%Z-k_a%=xur5_Ybm%9v`J3<)2 zJ#wq;9RksIk2fqk+T7z-ynE%)+X2JdJ3kyhRQmmkWBO$mI#RZ$GQ5@xwfj+w@?Q0&)WNO;cY%MPbKopn+(giKv_ zzvYnxl^EWcZYG*3J+()bcxCLP;>W4?Y1LR7b#aAWX4C3e(LMAI@E!?NKopOCyO{y`{T9Bv)p)j#%PV_kQd6T0^8gG)!4uw?{hZ?udaaF!U%46 zx=y}Dq1MJ*17+S}C;e@As4!ZZu-0u=Ez0Mdj2p~b`5Gp*lYe!QWp#XZ35}a(7EKJIuWwC<+YYD%g4t01#vXB#tA+4|H(=|DhaEe3S|pzy-MPslKx%HO zJE&;vxV-JtT}~Rdah#Qbx{-Y*CQ7w(>ZwvB-~DTs^7Ltp<8n&p^yS*jnC-58+0+9J zwC|oMnSKa1^p}yw)|5XETXvh0h4iAfP1}$-o^fb2;5h24xb2IZRgzE6H;IzvYPT8w z3VA`~a#c4yc1CGr>nd#lhmAuNt)#U1l&L5*XvgIh;#)J4!n%0N-PXWv3hHe|CHF57e;yZTMA%8B$qBM3W?hjVDJ zVbgGdsBnkv^=ARgp3{=&J3mR07B*6b6kUa81mjKzjD1RyGlYb)JNb1t_y#}V#x7uc zzfg^bl#U^YelwA9w9?>!^kjKn-LyQjc!c(w=6tQ9NBfHRf9}gIU&t3HlGHsaW_3Dk zroRK7-YLvYtWHof-`b_u7e4V$AIgK~k)@8MedE(uS~>Bc3b3`}h?vu%En zyQ>j(HC1sGrx-73eH_JYe)u~VCCP!5v5)L+y5p*9I(%UD%cyDrkfpXI`(`(fl2bE7u5f0%I(B0Bkuw1haoK;79_BSL%Xz;9LiHgv)!ab2i|)(`p`pC5WD(}m=TeCw;Fr;)NF3fn;!^K zw3jm$-PWAPSTi^2Xyjow=0;D;-j;cYGIm9=OFz zZtSI@IpbC(Vp{UFYMK2>?&EUqy)T|v51Fhf7vwlNN(tzBq}3*j`xb;rmg$kpe>=O4 zU2^h3&8ltfgXQuF?L1r}{%?kO;8JAOrE#+-zC@ct8ohwyA!lWaFvKs?b{#+VrfnF2 zbmn_3Q4=-5mam-eE;8_}5~9nyjJ3rvgK9Vyjw4oT&mgYL7nt*!b28a8!2~Kd-d`BkM5eyhQ;+?OU8x zgwognKXR)srs0On8a*x0+xuMb)YtZbHVv2AuQkx_lsZQD4ibk$i~UzkPA-J6fL^8jP&^7aPrlrSy}Ex-61s&qk=6*hNREG-`qNon{Scr z(|#fQxZqo7IjJ#;AX>)L)?7xemT|ub(A~DD=gY3*Vjbf+E07ayS+&FOf<{^``X&`D z`ZlXawwIxe5zWH=e14&9@0#j1a!TftD#^f(O@Rpgsky}mleMY&J_6O^2M`@k^JWLI zZF7WP5$hO3q?MB`hvv*pvM8dK8eXwUfm@EF(@KE!vLvrvZN2qUiADP z73fS8kn}Ze#Z&Ii(swmlPv1SWkZNGUcrQkA zBNYOO6DGI-+VgZwAROVgXAZj4j`X&h)0B2ZZ(ij-oS_GUUaO zz?}<;uK27V9EeEmTu9)hMmdfVCILjB`?!t6q&BIpKuFgEALCOXDWrv8zq+wU~J1utzHUPKkr|rX9Lu(b`0~ zrF?=#A2N^y6+d;~9|OSp5oI(2{#b}!Wv~v%FMdq(EQQA@1*kRu+zS(?AR&4Jp%&vXmEQ-04U+d6kV4}L5wfo9@&Q4`g z`${EbgTwE+Oinb1lC6Ew<3(3h9?Pqcrk0?VbfKR;)$Y;s0+5wUr6_qvL$uwcoS}xo zu-#mlBz7x@c_##>mk$T7&g%A{#|y9etmUPEGuPg_`Q|37LjPq=zsZ-~2w2suP!ZiQ zzlqw}3?PBZ+$#g015N-8F^B%RxEyK=>cKPmF^V=Vi4!zK4E;ihvEb=cdOo z^4vXvnbluE?_j0r4X?xnT^e6ZIBtBt&CR3CizFmwQ84o|FGtnZt5v(oN#lWY@|9a4 zn8eRhL#ZSJy)f6>C9^p4VhXiA|2k9{E#oHNBWZS2(DK9j2k?{m*|(8!zP=`tpYlO7 z=X|Bs-!EE!0jMW;L*1?BN`7l0>;@)MSK&kMYfX&LK;-Kq#4b`H6GcrWrgi?6nikV{ zI_WO^w57ZTM6a)_Xw0PqU7CstpxW3VaMx8(xVJsy00Zqvz}Nh-RWZb*;_KG8+98>yEO?aM0Ao{*eKn0!|>&O!Z65v9j zoKZu(M2w+SWyhj&SSp(hf(&XV{iiGWtk8=o0B2X}`1U5VA}fO4An}&gboz@Za)g!b z^%+vmYv0}}5R8)vTwTiG-b3Uc>{V4E9e#n-WnK<^L!Z6~t&vxKzPnY=LfCPTl6wVT z{UX9luz!5x>Vbn0J)~}^6dvfBs(@v5U7C2_tiu!0suUQofZnXK=}mT4w--?;mh43# zY~4tKO)te>$^_9)F}GDc=4=^y_UHmv zqNIydTduMFm5D7=sbX^)Q~D>&zqM!^BK+B%eRXQMS6{ch9($4>Fg~uY$F91Rwi^`F zszyi;d0h(MV_v9~_tp-8!cbD?^9GZA#sS-g2F_+R0+L2lK z<|68##si|+xZ+@KdBe?7UZNS3T0;c+*|~)rjT{?~N^`Oj z9exvC**6roU}urL=w-bje1Nrs;sXY7DH9Sz=G_CRQ$aL}&!|Tl`+k-=2CCsQHBD62 zMV$&T=)%G)Kvo9xOaR!Pk!ViH%Mm9nmVqs{MQk_e^Rvvefy*ONIGvBq1V=@7NuxH_ zlA6^|j_+1K4Vz9Vk9s^C{@S_~^DW)%nO(-;FMfj(FH&4T5CJLj*H@jdEYK&GK@#F% z8K0^$=^&l3PIz6DWS{@Qm7QMrHBcVT1~7l$LZL%v`Gx20ka3D4`UyThJ%i(%uMk@J z2+Y9RW9!&xFXyVU_($Yxfoxtn%<(ImsHMHGVgxttwiW3$$PKU^rn;LLMNh2UcaN;b z%115XySX-yJ8l62c+^=sQsqbDZa}9Mmq+S;78g&*1VK9DU;BY6A}W)*v2|%jfz`9) zTUD3r{lNXNg*pLn5O0aXn2&Ynw=6KL3XiQq-I zzVWkrxYdqB)pP_zQc{TT@+Z8$gm9ykuRfn=!Kgt9q6m<*XA()}1%hhSOIdz^qNMn{ zTWk%7)!ciJkO1RE4kMB|PUmeXGqe+YF=`kXM1y{ePG$GKD4UoOjUbxAAoGbKLe|@< z&lJ!L1N}~)sG2a@>1N5{(C|~bnnU>0H(uae7)n3_XC~%i6Tv#mZ*AEA^@?m%k0E{S zJXp$)24M!E%$^J9H@t=72GQUB4PxML3!r~fyfx}J56S(^)UIa9 zw~H7!;dMA4HQ;_kKT&gpT~)DN_+t_JXE|K;kc?QZA0OWnV=>J;QTtp*z88LXxM` z%^ivO%ZImBF>thOa-pqFV>cR@_2guYPL}X7Ce#)V;5Ic13!=-P@wxzrSLP?72hDev!o zB?D=TXofXFc_pVS=8cWxO5FRzs_Qs-Ou{9I0llCqMA}_U|nlS5sC2JPYHT|6C?8VWK2f5w=dd1$fXDXe8V=zn?g)DWbhB;0 z=poYa?Y8%~C6*o8saRref2CkcmGvRdZ#hAd3g893asp4HK~yLUw66xe4K%!q=cnP4w}QtSWx)wz2%i0AzS9ENa@=*aH9e4y95nWtkIXYVjo{cG zUGr{$eBz~0j|Nhv{po2{Ruw1cX>Tq@pZr?7an=M&i9z~hoCF~WEf$XG*bqe1OH9tq zB5-m2bCDgk&_Z9r!d+{*NZM4u%P#WYkSsh-w zb;m=}qaU_2FEdC&L`A0*#>5m=NO4Rds)^0=$W%@}$P%VR73!{zo6=0T1<~vc7^p}m zj24z@EF<|(0jPO=H&i%pytK~*R6@t%86!o(A5wkxXYN)^Y0S%b(v=wbHfR~u_Id3(n$<2o-e>D|7;#?^P+MOjCqNx80etcIezQPO9j^!HwWFQ`*5$;nbqRLh z6)`jcF_OHqn1xQ#87DuZA-uR1i@d=Q-rjMCdse`W|%>jx}U+XpB6B~1=Pm!{b z@LUj&GJhQ=l7DgBHwiSoUyQkbXs(YDZ4mIE*vsesB5Fy3(wLjPZ~@vLGFY~?w}X@0 z0}9+1(H0qY^H3E?ZX}?r{$CdbT=n=>zOHvI%zmr=9)1m~oHG%xyB+M^MJjKXfENIv z#?)ai$kUWPHryE0wkgWe?GOl6Nt%LCeY~k^#nlEHB4%F4K`X zH4GuLY|hxPm`QOtkOw9Rb62AQa!WMoH@wjNNP^PSY>ZR}lL55o>G?q5T2hF3aUd9y z|F;&;bO7~;gTQ&yBL0j@l$tx&S%KTpr8`59&6%g=l-bX~zFgSJMKLb}&oBbT`e4f} z>^tj{izLfW3e!mTY#aXI(M8GV6Vu&YZjd$$NxWU%;SnV}-`(#VvHZ7E$)9T||C**W zh2s@@JnvS=YminSfqbNyr!Y{%A3(^FI^oF1gJQZLpczlgH>?sqR~+@;@D^r3CE>m> z#t-Vl<=ndNOvz9n9`S6Pf+RJg{M$tl{(ON4;|b(fv*uVtS zs(>6We)Q(N&S;bQ7%GUkIY6ltGtsB{D7NE}d&~ujD*VPFNImwd@qT%MMo1@QIZo-#m5U_U!kRKzwYiUTE=G-WuPZ`KO}z7a4wjZ zLY947j40Qk6>!kAmf!b|ohZjXZ&I5!AWtdDPta^P7->)mPBGgVn=ZPE=)%}T0tlIz z!1#-*U$Ry> zm+{fP&RU-yDkR|WRO`95$cx7Bgk+$Q#eG;LNj+_X!J@!wFEmx&4`DGY{~Fxp11)8{ z+iP~GigUi6f1VcQw~YR1(istm4<&W;kuG5?r}7|?2{gYt2jPMm61V5ik9QS{LGHz7 ztCt)0l)Tc9qRw%*x}`wN;93Sw>dDmSuVDfBY3~3LS%yS>9%-_AwKD{>wz9Zbbf(?y zOIwU&EcBu`REETnmW!%j#QaG(55A?FRoAjG4VV8hah3)1>9SYn7E0L@*1+|KQuR~N z8*TEWcvHBcZyaR4W@O{#i7e%_wTQ2fePC-5u}G@@bfK?1ZNS{pHZH}sWD#933fotV z^0YxJPmR)I9ocd(8g>d6si7QeDtkUTl7NbMVYmb(%mk>rww8viHxNVPqD$|KJP$wf z$9CT?L+9TeNvk^JG5l0NsU=<0$8-ohY|w8NFHr*uMtwb-dLJL5UG75 zH4t7&Tne7Tug9$_Y4x<)-GMb|rBQ)=_Eqar#yXEg{Aq}Pd1l>U0bSoE`{ z`yTX1;dSbEAFG2<;UkOcctqz!)|&x^!E2erbjge&`fSb+}d~0>~S3!S&6c8 zQDuIZGnj*x!uX1OALz*#+&Cjq+AmYiLRh3NH)AW@n7mj6^&=OlP#%*6{_A|yvxJ=5 z>*K*s?{bM==xF7?Q$`f}7}ZLtHuBxQl8waL7nOV_h?W!g7C}~bekbpec}vY^G`+@N zz%->Ld{&vB=N)PtxUYgHZ?2Hh#8Iw6-$?$GgjKWeJxI2!L~C!~sywj=x$p0W=)g|5 zLR?PZZ~S=vWW;qfK9Ciie*5r{e>PQQ1_i~W?$h*vS4D5v?95BU3oNH9*&dK5RcZ@m zeh(?j4@*X1vuY)DWwfMD2ba~8Xw}4WkeD7$ax3Dj+)^iHlys!trXnt|@^&D1TL8ZO zG&yOz?a3K9RNoVYXHtW2h97}iihuiWj#c7|tI%ZQQ8C`>^(B*P*FsBZulay>J@umr zyKsVA3w5Pt#{*-f9}(5T4mWoBFk#p>dN=N-DDA&<&bOso`ebcmI?mz7(AM>Oi^wB@ z2;Lj+KBms*>M~=ru{}^lb=nQE#9dWz@|$G6dSL+=eb~Q|YSe z;IF1!H^8`b6+9>v>+9xBJ%L|wd!FyI=d7>PxJm*%%&ZS145{?>^*mM!K9#D9q2EX$ zxyOs25h`;#8~J|05G{gJ?YHy?zf$?z8V!=TLA3$W?iYr-+$_Ye5ewZ?7ZZeouxdL3 z>xE|o8y`Q>ii~RVeS1@~Y}v+0{$ZEep!&7@MPA4$PMSedUkBYTH#zB~2A>+jQm{7il_U&9`; z6@gZz@2p@TrXNeUp75W&H^0;Nq8O0_P*mBJD2dke0u5E&_PuGo1Wv*0-Qw~ykhhP zUyK)B7Fg$Zayl$FGcroz!<`m#mqTF$O7!;&G(@(b?0vL zTP$=f<_ziWznVccG<`)hZmJZHUN@{WQj=vRB=_x!! z%18r75sOOiDbXON?;?lCI4Ha|q~yb7Z{uy5Thno-$xS?(sP7_g_Wm40ItuH?%zgCY zwLstJw>qU>ii^*HTI=@)HiiC^uG~?*++0g27(xo)t#}FqyH6owC0T)Q50NC5D$CR( z8^lFOtx3Gq9RqdQA_Gx5VOz;JZ=6SR2v;OlJ{?D#dWvY7n|oleW38)FmE>rTGlw6b z>2KNbWYl)XCwbj|$oTq3M73mR;7RZn8jf|h)sN6aO(yjiAh=)R6!K{eQ8aJ2knVg( z5*E+xC$kv(*bBldjEV&wdc;UQinxWc4C>lS1CMj^m&swAd49ZaZ#_ivmD(FAmG7in zRM?hC-gK1mm-n-3spBsD%)#L8U=F;+X-^$(JNSJSNJ4BzAcl*FRW*fXD_nmQP3=2u z&NwKIJ}x^;Ly@4KT@*Oo%R=8k3a_G4|Pv#^?NfDM)EycMHO=ftpjgs7}*wb2ZA;-?W{cc z6~fI}q#-1uYWu^@t4uM!AN1Q@*P{e@_+!ad1SsZ6=Dx}esLc0);`$(GXIWxSOBRba z58=TB(7U66mC?fUf511cS-^QN?)#=;Z=`c>A8?vU%=te<;c7|^%k#T~o zM-9-&$+qU3N2vU;@~Fl7ticq(weC&CDU`|;J?^bz=jqq@?TB(3<|WLlmQh2_d%+#P ze(_NeyC4D)5jQ#~c;rWgq%PiuWWLATrTY2?ne*@OJSlBj6PX!G7SZ89j>W)De|vBgr}BV&4ZDWjxz+>?O>%p|(Lc8|GSO0k z_Jw)5JS)YZ+o9>DZHFn=Z_Yk5ZYe!aEc6PmvXHhc;3609LMz=`A5Y{pfT)&jhuv+c z;(n~y;9z!*++?-J9UCuUv!C~5X%z2~-%gS*tz4v$DK{X$^Y^uBdZ=JRCIIwMnIeVG zR)+?dVVLlk@u_nvm0Pfz?#Dn=Y{l(W-V>ev@^MaKJ0-L*L?Br5O)aZZh=VjaCwUXQ z_gKe4^djnDpoCqrf9(}OR$ibjEdG?#dVyZth$EqkzYN1*S? zzN3dMYSmoYL|S8x0h%n^%3nw+*Yo``KJ^kNCF>!yNm1hE4X;re9W0lDUPZ5nWxzv`gc7)_>VXJp0KYn9aHoY|Gwas)f_b-x-?%dz-PMts} z&0McTxVf1}nw88(rz!udJLCk^)DoD*FYE8|pn=;cYGE9|?OOlY{Zk315yI!H%#}RAO z_M=Y(2&$_xSi605b(TVcRH9{4OpfR(Lc*0UDn4@Li8^BtZHu~zRYD`mm!T3WiNm2{ zMKin0Mw8qe0lS?#`u!Vo5R>2m=8wk-40lPg+sO+-(P`pjqx*@fni?p<1CGCj-##Mr zD?DEilCE4oq(=C78SsRWn_R|qzh&hJ6IH78PXIU_1tI#I&n=r*`84mQ^XjQ@JdKcc zsm?Xs(?ZKh(WbB}GAo6GT90h^6gzs0&`O{bBSrL>vrg6H=Qag{&)M=@wC45Yhr#dB zAClzUDyEMhjaiFVdtbOTNgWfKv0rmI|5B6uGPDovOmyRa-MO{3`_QHFlysc;u-8wf z6uD&BGrZId_Devbbq0)l=KR@`ix0e@9s@{+?LaQ{Y9LUGUZzeSoee!#5RD=98qW=$YRCvzk{nkbV7)oM_P*jt!^6^V9x$*x7W` z&kj-h`!Hb@EPZ$rRDjf{&vmCHJZ=LBMGVSc(oRhWl@YuSX{F{-MdmYr4{18QLZq@) zdn?%8Bl1~xm1xAL-s^eC!QjdpWS(zP505I2rs!$7RHE(@ndy|}clJC9(ktv(Ip3ZU zCp0aw&Cemdj%Kml(Fu_GIIvBI-5qLNg*TCFnvO&b>e5Pb&dg26~$JEv4zCXPrLw~ z{s@uj*@gI2#4{IgSUv1@!LdXgxn_R4SNi2Vet;1#PWSmdChCH>c9&raD@W|k9Qw8S zgs{dlLUa0Kg#K)yyie4S$B;dJyvfYv2=#x-7cSc4TuZnE)c4*^t1qQUALGQFpz)1+ zUKuZZ^+)NVNIu#nGf+yFV0RRMNYAWPO-2=<8;f z#o#N3>RChdzP#(*G||cg3KW<)RK`*fpP9Cep$Jb!^BldLR$y0I_F~F=9-FBIh3K7m z3D$IJZgdt|LMshsK3Kg?YnD4XN1Eqy0p3G8hg=3L{4DOScrdS)<4kv}I8uAfCSSB0 z$G|wb?pC$c%ShWD%tv%gmnDkOvmfSsqNmVWs(1OY%dM)vU2IYK$w_n=&Z?-60V6%! zS2YG9ftruVqC#TuB*h&H&L@(OnwG&ZdrB<}5@*Kkk@lN&M5Y{x(}%cYRF9Mo-C%Jf zwxplRF?PM8!3u|ESLjZz1fcZdHoz{j@*dHH^i$9eyiqo6y^-k-dv*o^*0)|Q+bK#1 zLNyXvy7!!5GD^n z9I>X*zR?0HoyY17L4c7U+jR6@U%hqGzUPI1<~J)i*U+_j)rQgTU6}(l-ed-e&O9M# z|0r2LA%rC1S%CG7j2E1UXFe9oTp)a*wVQMA$6-cF{9i8+`wWXjN527I+)@DhD~;nSfv1|%Z{MuM}E0F8Sm zUbc4hQ2Ad1}B0#<@LR2vwDxRSi8n&@<6i9 z4K`%jGl)0qpp`?!L}ek?mtWn!X3GwaYq=cP3DZBr(i=220s( zMdCU7^R2Jb3~Z<8RYx*Mq``j1&v_$-!k% zC(DL-#zn3r?F5%l7hO_P;W$kJU*p{H zf;kTG1wqAVy;TL_q@PE>uH00-^FkI1VW|1@AOA56<(XjA10pG&(4R0 z^DA2ztF3vRsknA!;Uoe;gLCn(aCo0D%O41&3%6?yw#bcoNIwPR)@*bCITTQb3{>|a z)hQU8ISc$2BWBji;-elTatCslh&IM$ai#aF;=0q41=2heNfv|h6brkJ$JUwsf|9f$ zX86?hu=N+Q=K5KVQmC-^;Ic$4{<8*Dv zYCvzHZF$!wh1qmH`ekzA!Fk`s0pA2b-?pEa1-`y`WMp#4r-XO7Jz?3+@a`Nr_+R9L z|HL#Hlyn^K$H+A4%AfBi1qj+aIi=1NYe5rBZU2peWGL2!J-RdGc^s9{d2m31ycY5W z8lW8^eBQKi0LqTqILRw=>3;)t|4sC|Zdlg68`sdAc<*5gWi3?xt3uyfRb5dwlN^}B zuO)*#6l>i}|1-Ez`t`~;2Vqqi);GgXGsPjTb)CuPmb#KFBZhy+hx;}}x6N6Z7h1Ya z&#}G^srttcF0-r~wQYv55a+Xa& z8@0a!dyqBAtAmxIbSQjpX-hYUp6;hKT;|(do-7!)fBIl(=lY$9#3M+kK|}SQzV0Hj z!~S<>mVa10gcJh#0*vysTG5n$y){Kn*+W33y zAHq{!{D+P7zb@YI>@?S_)l&cX9=O9@Le;?%|8`UTWw*UT0%|(?yFi1X_N+}t8o5|s ze7X&N$W#8)M*5$$Ha{2d;oz_3W}q&!{_#B!U~y~^xXANwH`Ska+Z&1XJJCaM(YtZ+ zLB9e{zT6uL=9#{o?=EO;~J9 z_1Bs(Cuo$B0l4vjDy9r!IRG{Uo?Zme%z~!YwknK2X|S@K487m}{JnSHhH|L?bC_Yf8#v#i--7^Nxs1x03C~SFcDM_;X6`+iH6Y) zCd&vbAWPe-ILx64O?XIS{wWx}y(5u#A&HKXw0|sKh3sv}-aeRybIG{WqlqddOHZ=> z{e#cHTxKjntLw7*+n=eHFVjQcLmU*ybP%9~OVl_P8c<^0-vixPG_-LU0UOKN9vB~L z=wiRs&8hx(!j7MW)!qY%Ex!C=B2~u;B2UULogkJ+Y{LC=J4Mh(KAj}=D;He1r$Y=I z*k;sbsy8A-Ow$Lj4~^x1Yf-2Cm=r_WHHhcwqW$QJZjbT|ceZx#lWSbLQ8oIY%#jmg zd+J?<)Zafi&-v5X+b01Q)vopIS7$a76L9#`LkmV(>3AUYob2E?gqC6_C~|nd*~F~} zL%3pNhS5s81d+T4$~(!QoT9UKlZ~R2UOQ+I&hFR$sPXp?!heEZ(I;RLqdT20{>e3L z;TrUfw#?2%6@Q;b0C)9dt=u~I(;k-gVHU797oR&^EMB&F zM!og$$Zj`Mw57u=jwZZQBH&2iVcMf>p45Ta3EZRKKKrJ{AVo6-p+D@>mK9X?BcMDW ze@$W!YFVBO?rDEali#>Pig+)Gd-tU$H6_j&iH7-{79#lM^2V$$x0g=!2ds1QK*-O< z-=n_ga8MO?*7ZX;&uknn!0BY%yE9C9kRM&v0^@@3YB^LH$``A`?kXgk>l z7b;&(|L~|a48vAlXun>IH2F5BC@VY$+8=v@-NYV(YmOw`&NjH6tmr|-afQkg9}D!V zoLnC(#0RuDWHp8k+dO{!Y<*4CeHkvU`qmEEGXJhZUnYJTX3zc)d+#09RNA!3ZCIJ1R2>ip^%SQ($w@8=~`U%lhAWR-8Aj(j|Ix%QWS zGp9Utj=apX>7J0(&6>qT(l6gqT}W$H-uHYsM=L8A<4prLHvogRt)&zPpyF{7*PqOZ zOVj#qXmhz0GhXewKv9W2p#R(VgR#AO zfj6}D9$p2>?!K9Q<=(_15ZJ8~X;sDkUPRpUl$cD?^z6&&MwO^dq^BH8ND-!2fxH>P zt@>_LdLP3!7}-r0i4)ji!61rvFlOH$S0)*g7?-4jG^q;fGteoIQOf6eW6m;Gov4tW zo4&5SK!9V@^HOAf`nt?AMX(cTkkBiDJe^cWA0Ij0Y?cv6+$w;Yj9oC38J!P=W?hZ8 zl@@gJ4KZF7s9jEX@spa*k`vsXpbP>r_U5$U(LtW5s#LA+@kDyKuHmtGed4h*<-J=N zJ%J6UMS@39*P9;WzH{w{H)D7Q8IfH-bi=+gNb^J*{c=RZJ^p|nr?{7bG zGY>j8HX}gJ541ATJMX?aw-Smk;&XQw`I;rZv5?(@bL*v#CF2yG@=S1(@9=(-Rf;Zo zSc8G>WRaLJIC0uA>THZ8E%|I1t9K`vz1Ki~*UvY4=7jAUj||&(qyM~~5)t|KaW(c} zO73LKr!V%0of4-D$mW8!so^8KLxi<0CAe>Ep_yEEN2WV& zQQi-xh(^@RD0?~&Ws^JaV9wg08HK%+56_ksl}jj*(_cN_rCv147`FnaPJ2c_<|xrX z-5uUNGPb(!33V3{Qdw%1ST55k1A$ zR$0SOZ+(=`z}(462JuCG)zqDTGv7r}{@)02gNF!Q%BG=p=#BeUHcs58CCWM9N+m~( z=+szqXOFpOe~eS}#d{kjOEb<8*V5o2WU0l9cout*F~o@Isk7-wm}QjVQ)f9fd4tCx zLk&{;=qSoes7HiOd>p+s`J(!IN0xa$U{vc2`OG5*_>3TFV#(M-GZHJOSd|z)=ioU zbk7(&(<=uBA8t&3;bgHskbY^4*GX!wq9GE5I2b&oAWtnTwupz|+Z+v4-% zncJ6pLp@$z26|gR87pIo{0?0lPhK(8T|MphF2KKdTUN47yl~|8v*>;;i)$@WR=3c$ z@u1fpQy$Y>k4RVb7KzJ6(9tO|-3-%rC1WMy*-qD@s{G8Hj|?(Qwc;OE^|lOc{cDA% zf4T#BDcnxsm2>+)%WNf`kC^QeN}vCyzn;Pm7Gk5h4a&c7k@)q zW#qJN@{Fk_yFj(?EZ#Lpz`+}`w4LQC@P_5%*~nV-x^U_h@iXDw*NeupLku_It1`aG z?Szx)ZH}mbLGpqg-S2k(_4B;7>RZ=BtAjkKv5f4i0_y?okbEy}q1@Fr2C zl&i**5KLMD(Vljzi|r#6#*&siRni|!Z&eYi>SgbhiyT9z=)%L>cf}dOx2c1dM?I4Iixue!;$E!`&gr|3Z?XRphwM&TZY>FDEDQFumyA|ClqD#H~2wAUq9 z-60kJgDXdg$tnAmD3koA z$|F0YJf>4(FOwlq1cc^{w0%|bQ6l|8DWQEA!Ke{lKB|hoEjyP6ulyN#Wdgi1{Q>N{ zSNCMuc(!s{f)akvx_1L|s3s6yVXi}No8RgUPZe)CA24m674FbIGG$>I?I&z_Wzq=E zs2dyia!B}$zN@g7vC8P+@E|qZO*ubVhnTuszaUIc|Xwxg%HsC2n zPD~}r!%!h*Q<)|{B-HTk2A5Wt|H1F`#`Lb}L1@1BfF9MbRg&Ysds#-(Wng0Xlaz*k`W`ig>vu5w1H|W6oFYHg05bV=6emy|mZF$vXEzMrQe%-=y*$10i43DQ$ z6`*D62+&Zr^w?bBD#L0qIKf|^xLa_qkgt%r+*d$W96KIPmJ;N2<37kXn!DeM9nO|+ zo=(+0MWk;t@Y+L8*ki0B#Ax)=9}Y$*hFoQ#yG=&VWEjzsjbGBo`!moI+1*d3n~RkV z13I|txaRduSjZK(udbNag>)VhyR;LE^m;wIaFd5?S2m&Wt^g{t=-G$Wn0g!r3nKcu zMU-JER>NeJ5Za@!rFNTS93Sl6MfMCx$bEupSeJ~~O{ES-vj@kg9wLH~06s8D{(RW- zdBu)fj&!G{UHJHW>^4aq(heC6#Uw1Vx6!fxnR*drbw&CAYVl^E-&6%?$~l=gwmjuZ z;&m<1xrFYop+EHBb9)}(O*U$CZ8Fm?=Jbv67iB7NoDkWsR2ownn}xuP@V3=372T>9 zUXVxA-g!53!;Zh*yxfpcAdM0P##66WMcz}~BmaI6yrcg{oJY@4Yql!m9ErqIYoYnO#mFp~v-DJd;k+2#1G+PDt!lo1~r*#{^3P91>Jwk~|Ga5mczs1hP z^)TAVyW=FIsxs0+$#0a~c0+mU&syJLjA0rcxeGZ4eXejOa@m3x*x%pQd&P96p(vj>dlkKn(FzLzp=&BSx>T?}U8!ja zgBTKlc44jZe}h%RBVL#K0uqPGYQp1POtVuP>{y0gte%<3cxU8g_vyhSpM@16=7syv zlWH9R)%BHg>5)pdeplLBlxC5YV&k=nS0$H~GxpU-xhcIK@nnIYSOCNEzuZ9VhA44R zw#n5VJRmHm!j-OuxGUfg#U7|G=iq{z#jhbjnRn%K(Bcsp3iI*N_0t{DktXy__^S$b zfI1etKgaD&f-}mslYA;5E(-mo|M@6(Du)tc;sAc1%Pp=u)bsu>;<=8JvQyfZaXr{% za?oN=1}ETbZ+A`y$`D3D2vvRVxmhf4duCwwrwTB^*4P?A;lG(}3)hve(V- zEL9)zmx~iJJlA!C-)y}QXS#UhOn1vZ@-l`osSy zaD6!!4V*j=>sQwQ+V6)7xNwfAc&p3jfr^@!N7-HJsNz@2ZuiC_;lT}vYe}DBUMm?j8YHv~%92J;Za_?g z>K8bOyma%@a56-NK5t)JA2AA6C*u`qRIJKfnM z2(&q48L61$H@YK!wBS^5;zYo`LxKQp?L2&{xmLmW7xo9@ z@37u~@t9go+AND^jo;Y=nN<-$;}DyH@?_Faj@pU?w@b}K~O%=k$DeuZ37W!kfqmXK$Jhu36v z#Hld7DYD1e-%F363%(Eo}3M!z15Hbg`R^aMbAqvaFebECAzn0iMOdT|>iPg1p;m zfAT&?h}yGmRhstUklKe&P&Pptt9Ji%W8(>~0`k5H>PToK-sgvFx5#;&2Mlts1gJ~^ zQ8s%C)3dJrYuBUAy+unXGJ>ydTU$oo_t$2Py_Mjd9%Ew`19QPHBCvoHl0&vG2|=ye z&zq~pSoKx>o+vf^nC7w6GsI(r@=i@g&!gIE+n~|vVdt!~M7dY5)VPaX{d0i*`eq(b zjci2-jI<=ryWbi^Eb4v8p|+moyp?B-5F`rRCBinZ#CmFJ9Ro~riW4-O78%>Ra`;vl zi)8slcDJgvExH?m;@Od&wuq6rR@VCkHC%O>{eh!I`vUC?R~dWP{3f9F2qoaetIgH* z@&~|`$nS7apLZoN-jFns1DMl9$jUEHRjUiU0Mehu%pi##O(>Wiib`2yB)m9}SN`QMN`rhQh)nFP z3?71%^1{jC!d0|nw*gu(Y8Rvt)kOLu-(i1bvafhrmCmQQzemy6)xn^+w|x!%VNkX& z@FwEZRXT6;bphk!=CjfQzO7Q3UoNJ4j3@d zc&Z>UVD`vHWh`!@;b3zr6!GLia|?hwQE@?HIGzn(stvx>= z8Izueoa_6}(FR7l8jQ^S$*?$S*Got5Nn^={h(C)E)u#dRHLq8a`DRsOgN`wgW`81Y zJbOb-{N?6%zn)aRtll42ulUcEc6VQ4ZzX@{n`tx%jQfUE(^ICq}hI8+)+n53^o)ciY z3ng%ja(@xX`^zJRw5QUxhA>YdBYJ8iIJ|W3C=DYOnNT}>)4_j}gl+&kW+ZlId@iSl z_?8CGM|o3V+=n8}e~8H9uxGRcXI3w51^P;ah|>q@PS0#}E?2Ree9y~6vtM}aw_|0p zFAlbxeu!?p8CLy>roGQsCTJ&F=J{T!S7^Q!(Y!Gw1m4$o=dzM}K!HmvuRZwPb}is( z;GYK6@;0DbI&bRQU@!(eGr@;Dk%=$B1y{d01&TNc z=e6{Ry$j=9e-W1h*H`Ux#5O~YSLmR&BJT&zr`|!X?)-~C_@P!`!*->fKQbR^{N0~^fxoK~KfL&d zTQGmjfBMDypD*}hX@6SEe^&5^?fl`{{8*@eWblXj`02F%bBce`F7xa2kDB>OyUd&0 zf7HxBYUaBz{BubDIV9f?!#|efA4~F&yEShl|N58z4x_H~TfYKC)15C`7vkoO^bJqthd!#7i1&I6sSmY>6AC4l)X(_^~x9wn`Kn-B0= z?*#Ix+Upa*6;=dUK}*0~!ws(j80?nA%<5mf<^A66!Kk&#m^Pn%E za#z6NzDuQ?ume#1^vz_IxnXjl4D1UNSw)oT@w$c3)8FDMKuOuLoh*G&#A6?M#ohNs zLESk^)E2L%ntVLn^z;e3Dera13$*B_fh~GZMz?HQ#Z8B+T0-)RZ{=~hdGWPbHevq_ z2dDtxem-JFS7*4tO@J!5tUo}|rrUaz`Qd%@j2JvLF|t(5T59;&E!3u+WSX|#mNUft zF=w!1Ki}{>IhV}a2+6$6*JJZa^DTcCY?2YDo&^IP>l(6?U>>Ve+5Wf!n9VB$Gna?2 zi5a}lMkfYc;bfN(NE*SFB?L{INxTnwCS1SY_vag7D-mhYN2+web<(mw!XNLW%Rd0? zpc8@@M$+yWp^MMTnh;T`jF3Yxqj6`?hPalgJYUr4Ttl;eCQ|NC zt2mq);uC@@*FWzTgbGR9ABI>zPbAN_`W|Xve$}M^1)e^9={NPC0qU>cDr!DPZIk28 z&}6JK5Dysa4p-MXmVB0YEV!H^&**tvcRDd>^fZ;0r9-^Y{YsPj0&ij==OQh=bv{Kh z@#-gh`s_dWbjV@>K0R2}2yly%U7{9@vgaZKhb%AI5nAy@I$nZX_1s%z)dDsva5MRX zacr?|9|pag$irm+(2X3=ir57K0EaGrs^IPb=}^}^Vs7(MNK~6aqI!1+qwVCLT$e_a z=d+;mkJ07zr%(qNv7s8zHk1-hXg~Y9C;%CQwD7ai1@T zHHI%VA5}Ok(|+B#yeXsBiSagOEHARDaF{$3=P}Z6k?veNpo{=z5KzOR8_wDEbW_z& zg4A%wBYT4OlGm-_+x8oA`8~bgY-nN&4t4J6{^>@v6jxxXUxL7Fd~tmq9R1oq52l`O zMzDD0d|_|va=LB?uwCsAk5!0SNCx?1a0@ytda=8(*DG#7XG5sT7E7Qk3xL7iyP()d zKFcxh+YMO#0~U;w{T+-Cv1-iXMig^1Z`^N;Rq7HfTk3qp#YK#(txqCV4f*r0Z~q9H zo-gD9Hv7w4fgqx0>P*N&jGviT?vXw#e1PB-nxkr_Wse(UoIn7|hoI_MEao>;=}we% z$fkyDQu4g6#Y$|`AR6e%{zjx>%<{$=t`E@I4D382n_;?(jDEXCGlnMkFwB;#TU5h9 zrPX~tVK9FbpDXnMF4=nJvRX-4zTI}>+(93^e>nKX6DYj)=OQ6V-HBV@{Q)QU-2O%mKSHeO4h*8 zryHO&_97(F&CoUV-zn>#z5tvcX~1{AzS6E9Mh42TNE?ur2zXoB6w~a(Y{rmPR~r7S z;W>mYi~_VN;zN4DY7E0YrA6b9P*$M{5y2>fCw7Xf88UA1W#@msAvK3@uC{=qhU2rI zx2Ph!9f38Z7_7C?k?1MRM0c|bDSze6tJ-OrPgU^^$<@9{G1o=|`lWmsDTyi znjSsNL)1JBGL%SSzSa}>5sMo!Y!=QaB9clRYixx4@954Z+1S7cEjM6d+9v}v_^}vg z)T2~%G8t`@#)WS;qOS3<(RAaTQB}xKyi#QS92&$Jf!)3j{saBCdHDg8%maI&X9JlE zSC>Hea&h#?EXTAqU5R?P80%(&rX)qL)Ith06sU9oP~JA03tuuO*+5l#hSh{XZ7SmG z@(dW`!73IEPPBK^gKgYb+kP2dZN3)`aeQ~IM4GnJpnk@0jw|RTZbUj+F5-g(syKtU zENdkArNo>_tVtgM)Q2=1uS#B;-y^{H3~oX2JtR)gd(T}--@*PH`=}fhO*wvUiBq)~ zX`rfXiJX@d`F+(>s5T3j<)uA(*nJ6E4wdoRT7sZ`6@lO{cs2{i z#lhRZpXNio;XY|E@#M;~cL=D6aKU?b`y~WuwwFB|r-;~~zDHn!RvpdZo%i0Y7b62u zJb;-gynRBK-DFWHBs5zZJ1i$Pi=Z8z;ae084_Gv&w?Q+(j#WoGZUPEbrerFkyE}Z) z-SIYkkYNXiGr1MhEP#WtXCo+OnB*&5v=n*tR50QjsXhapUtTY@asHX>cz_o6x*d_x zb0-7M^cArB)MEnO124t|tc3&vlpIa!&g#Oz;OW{>B4Fu?moOU727U)>{~3y2`U9v< z0id=(PPbzt0JYEl2cR|*4n0-Ic-B3$KZ1RCWK1Xw6(XB^@Z;SZWYTVj_p`pS!NY*6 za{@~X*zvTZ#Eg3A^8pw)o+=88j=vWc|tGyc1sKMjx$|06qr zS+zD+vURjN&~kK2@JqT<++AvUNKtP<%OS%Q)0*_5LLYvlQ4#^?ZYMJf08ae4P87iL z#`EBKJUl#CT?eq?GjY@(U{1P?!7#&AMIjXt;pd?UqR5%qqw`%Uynd}pE|vwl$khHJ zz)z(XxaF{IxL{nlPF<@p$(7+Dm@1{I0%#nVL=es;1m!~jY6ciMGx@Kv;fJbvJpzRMZFHW7q?6?>MUZS6k9pBhk+`LrcniEuYH(&@P3 z*f%pk#sd?Y6`WdIKy%+h5Ap>9G|M41310p$;qc?=l&O;8;9eJurGb}nQ&puX_fR3a zA~%AV{UZ_(5dj{u?sHjh7`l0Q#}QJ2nt*Q7=b_YTS{$W!s!jhH<5%~cG{KY$EluUJ z!|1yvk?4D(PbcX#Z3TR%p)k2~MJy&4T~}$H)j&5U!l)cq=%~PC*`>^3{!5`Yt1G^4 z@jQg{SECEF(_>@MTO=iTr3XMK@`Lu!BSW}5I$PJ<#a1AOn|!ec5QKfSw&exJ-nGYC zA3j30UJcxIj;K*kow>8SRi9FTUhWRj7feP}Pp_!Uo73|qaDsF8j1sw5 z$iCY1ZIX2`1{k$$5SPUcad^}(l#DH$f*g$~`%7<6B-Of+)h7j{LhdKLa z#QPtxy2dK?KSAX2=H*T5tft;kE(l&9@qu06O7U@BazwFn)-^-o!V<>vYAaodz#g+> z77?VNU!k*BGQhW3C@l1UwRlrNm5v=Eao{*h%~j+3LO|y_l;%jx>%Z1T2mx@`U`kGy zKc|Kw;cM^CrXaE(;nn5U#jxP0J6t9tn=>-~WFB}^!;k-JFZ$cxZXR?n0G3C=jf|Q4 zw~+XIf^O%~6an&8c{W?<*=Da{+;If94VjXS?+^`;bS_Hoi+!>I(VjZY?4S*MF0vO> z{o9M@O*A0qx3&t@ma$2U|2UVgkVQ<{v9C(=V7?ZSgW%U9Se81kp$}C{DaVQVBnjkHvg4jqZgJLflrM$D4gqr}-6lOXIL5-Uf2(@}ma{5ryi(!6-GFp1%kko= zD8+2YE5(tl{7sy@UCMVB?ymLtl8yIeOnq%j4@1B<$N&!bLNXPK^(FKD=pAhe{caH#`eVGOh9yb z07qX~<%rU~h)tu4-}(@9{(&|_E_x<8>3dl#R7woE(^CCzt&)Fj{8w)s;YkpB2f`jN9PmIpa>r;m4%d#3NoU^n}VV*@EQ>m{N!s8&N5PEdaG1FQ;hhxeZ@1ptcWu zB0bxttHiiaca+Q;aW4jXOz;8!uUZ6wzCi=nB@bnkJV7bp|`?0KQN$8 z&_zcj=}iGlz}}|}6GQp`?>ZmfEQ0Y;PaDFcPu!X)STDc!}hBxM`KImck6yQdPl9y24kwMmo>AhoL+WGRmllnHdTNG&cq;~<&4xr$C~@WhRrlRD6nd6&%$-BZ8knW5&_bB2 zDYh0f#uxEz0#s3GE+88qt{y<*!^3W>SmcJ>?|lb9F4uI0>2qMEMtBs#A?A-}k-G@T zx&@F65Z=&k1u%1(?a3*TmMTz&$=ua zsAmQ24|gQDNV-K%mwzEYSRRftYnHfEBw96-=rLU^yD@|YLl9Je?3DRQ*}w)u`M(eA z{v7XVjL+2>S%{}2>-0yjrzOuQX4H$aRAu^TXVPBCV>AdcRNRtXSIoG+8ygYAN;*t4 zdsk|0lD?*oOs+`-yyV8o#Iw-+{J+YhuK5{2^ZIojKz7$D45#ZIFngcwcvsKO(+J%y z$zSmLO!Ueb*J!7Yy14bBku#-0s(GSw7{)?2AA#|BD`xbQ0Vx0P%=90PUeZEbjr$D% zUpVsec~3xTha>2kB53JU>M(V0ndz9utK+x9dEfE|6(>Jy$yOSpbG-Bqm@uzFfBmVa zYXX#+SAB(4i2sfL`ull6dOzb)tkAv|X3u@QDTy6)sIz2RurZLpXuOGO{o45$KYK8R zT{gDRfjQOSR3Y6|09_GnBTt@ewIFH~oi#o~6Z1P<>fIfF=|IeeJ>=Sbfm=`heB2)<-?BKGPAv8cbD06SN?+3DD>-KEyJlB`2y{0fJ@j1 z$T-u{+NsxrP#J;13iE$T^RWYxh<;o3kN!w|RA2PITpm)`c~1y|y?%KW5+kj5-Xqa6a9X`USWM_zQ2jlh>$Z(+4~zRq*HD+2m1RZk-gj`5n^BONUy0 z1iMX!>#3q-HGmcS>DMY)vwmc?D<+Db%%{7*N7Q}(V30?$azy9-OCBDKiVg15a^V6% z{7SyMCbNlPWA`rKe5}U+x8Q=0z1u@S+1A5MnW!*KE-2swOltJl=*p1?!a@wE1L#?H zU*g3#7BFwu6`_Se+=-9oEqW}f$#52Ysht3z-x)bVI8XXo^*nMz2_f?RS0NMP{6!IB zbr$dpBiS0PmHV8k1PMNA+5rtM{y@gEAAC;bBu#bSO=|@l6c06?M@65r9j04$BLqGT za|Ax58s(KKkT3+qidwto1Exy{9D(Mg0Kp=!)7*_LhvbbgTfqo1jaxSe$ic>KT~$htYrals&=@Y2OdGv#!Fu<}|m zvyt#5j0J<4yI%T;KU}!#$oBa6&Z26>>^(ABo>YY+scoL+vV?*qMycqfy}>3ZMo&m{ z$3XCC#|P&@F+jBIal?my_5HZ!9Loc9zx-gRWF69Ci?xqbJmfaiJpI{PnGYCduO)$X zi~QgkGrEVYSu6m(2EvOJ@vYHztjuER;4{>4&2_`^$B4sD+L!eiu}^Nxb3-rHG!Q9E zq2xcbyvUpD&~^iK+FeQSDj&jhu~0z+qz14R21p%3_*LXxuc?oI6_pK~J4&s4pgJS| zr6sHJugFurC!`Y)FtYK>o&|orZ8^(1~gg7IRWE&sQjeRs>~WSfkGILvr#z0 z@i#lDKH!>%Tm=?&w=Su_eK!d8+r{)3>lv8~q+jfyhM(SKzwZRmURC?*NygswY64K# z6FAK~m!Z@I1#YqK)y4VnGC|_oIG&>jRNHT}V7fpTl&3{6e*&Iq^ABLjePDW`)GEbq zdFO1CGc-3hZNJ6;Xtblk`TcK1xftnbW2%D3yNHxWC>ci_?bmENWO=;vwWwjzeQ)OaRnOBBc@|z4CZ`lh4uzMyH{aG+y`B7uvf0gvCMa{U zc43KZzsFs(2=qPerxU->wB@(K1aOEj7K4}@9n{zR!z=gCQ$^V&n`XaNxRY(WAF9uH z4t+V=9d0KTvwko6?504$0W(;Z2sRVW>$bMuT2kfm_ZI+Z*BIi)K+ ztVF+D)!cuI7;|v1!hLk0Uu@8)Zj%*OZ*P^z{?(1>qjU0)kSv(Q);|8RYd7IU$Ywr- zTy-#NWckJ>39===&4e26^?M<`oB#=PLq<>31t%6wY_c@hfS4`(8jE;KCD7Q1`_X5D zeDe153s81@1hn_xDJ85gy0r8$>TONX*{5i&i>KVyF!uh{4XI^wf`k;$%k|~w>zHfy zZTwbb_PE4&JV z55t6fvg%vde$D?VJ%1Lft82KMUX3t(gv4K;Cpy}G`T$7fe>h`*Ua$miuG<+B26z+8 zfjwMlgfDs05Vrp>br)MAEdd!DGbe1!+)@W<1d2Q-L# zEZuJGtLhbBT79S0gk+{V+95`5Jvt=brb+y9VMmzrtP(-v&&VJedrnbTzx+#_V|!O{ zMGl>H`Q~bL$HKA40qEWB^c45PlE=Hk-5YBS7NAVsf=R=&p{Ujet)2_3dNXNzRkG;e z=;Qe!(2U)s#kOSZ6{>l&B53r@wCNnZ%@VN-6kPQs@%d->?cqYW5Q;LGFQ(JxIhdSg zS@S=Z6%qEYkJndOq!%W1DN@YV|U32vWE-z1331pGo zZ=$It_TJa3Dw)Y{#4FsM_kN_BCBe60J~-RGy_)cmQ-Q()#*yONAVejXK52yM=tqEN zkK%%#F`Zyt{>8#(8LX+_))s?1=+-?Z?5wCNYIu+GOZs2AWHp-HH7xsofpT;p4v;kj z4!wNzVl*xQLj}C7;@fd_<*qm#BO8{R@rQR)fhg--1&l0X{^IK#3rX|zl+yg(!ThB~ zFx-WDWF(8y)#W4MA0*=xck0bQhWq`3dB> zb(%*>lOLYa|4M^*ANf_+Yf&IJ^UwEL!qbRa7_=ht3F^)#yMr4TGOF>_`x%D&PU#_T zp7#L|L+iDHJD0C1IdOqOqbB0cP9O{s`}d3z$k;;|=ET{e!zZT}GucWl#DKw(nFkq% ziS6T2Qw8@_W;+kxJ-oy_ihp{*h_nTv$UtVh@OG%ESbtFUU6W(I=v|hD&cAKPFUyCl4%{X3` ztWKnQZkb#7Uw4D_Zr?Es=6};|?vVlLUg7qXeIuR-0!O;ZRIg3LR%RZ95YylVQ<{qq znvvQf$u-{QKS-5quhv=EHuSI?2i5E{%nQOBA3S|ySip=H2*|kJVDXCwR->RUk?s63 zFpXU1=EHfgT2^i4)8sD}-48u49JQU6BsI@)u=Pr%2$=EghH3J#{v@gsCn4 z%z5AmO3pvteIrLp>t0`7f{JHDK@=!ZRFgv;mFW*YAg84MQ zNJD#lyxvmR$jW0N1=KkvjQ&^Mj0l~L+4`2Oo+tIy1Hh2c=A5lb%-$`XE>BgEk~QKU zOYavefTklMVi4TEYSrzbejFs<)db^!5#R{NYa_xYfbKgc#2jr}WzLrcpx)?FaxjKv zdy7}*K8ZaD6TYd>u^D43`@|!cr<=_qFzGWB7(0%Fq5UMu<+p;pRwksB;HYvVwb|

KRKSoA9Kgy*DK=7FHIiHGfg7^3r z8E?!2_F4+U><}VjQ=G!w#93{>Ep0pjKQY?fgfe8^i5BHoQyj|)8Dah_KGbzf*iBjS zdp#lL2DD-9*{G_oRok0ZB#})6ZIc}snLRF-gthfkH4_V2)P({ZI{6cB${oj#WVTqP9M8)X(hi@%MQ_*i&7=nsNyo=) zwIp~=M|*75XGaZ$URX>PGgqT~n>>@w(d#&}smxLikz+g^Cg*YmJ?3L!_aaXG7=%&O zIb`?O3P2R)u(QcunH5TZj(6r#5Lf4-ipvo7N|@ApKp{3=Ra1bY0tG+{j8ON7(bU;7 zr+`K1={CUAHUk@hW$BY5PGu%^@CWbWky}2I+>dY|BU8z}%iFKy-p@C*XQoPD>j0Fg z6M5xYvZsjUOm$=@6>8>-gKQ5L1>h_Z3986ge2d8&r`y;=9AyF=rMrAk@JT_7D z)wo*6p+6k8pa*h{l3-^S5S3@XR22cDT23|fh4d7Q`Wop$L5Og}9p2kOVO|d0h~O_m zXfg8-M|vv4`4LDr!Vlw`j1lU^G#D5)MA}YR!iYO380rZ0rDr%g5#vRdg@bb3S2Ns- z+sCuFx>xnCz4Hj@Jej>N=*yxVj0pSdVjINCAy3}|8rd#d4<=nNT6PKajY?fdkoHXg zJb!JUwg94!4~zuu%cVd-(k<))qwPChrJ~{h5Ty6YGVEPg_Bg&cC1+$y zDv^0DvO|rvKCAkknB0*kW88PG@ zQ#oWo7;^g?ddN!12Uz@sl^LbFDQqR{3NH+Nvj7q!oOFI%KcC`6DN2q~CtGYgNi7!$ zjo)1pCEwk;t;t)Bj^1|i^^F{Ws1-4j&(Go}M;5UYB2l zwReUdtle33|LC(z0#e7N4t+Tmaw$mKcca$XU+-*rd3lKUvd_9<77ky>VULWqdsTj| zbSCxActsXl^|+5t*^x)bSqgFZaiv}_yV>JJnb#qfPmKEqcuNUO3aQo`oR0`zCgU!RxgXCO)MqQ2rs+aZ3I73_a%{G_MhiCv@RVd%h zPEBy1O|>AIzrWtU6AGm|zZZ-P3v!-GwLFN6ypgHP?54p;N0gUjvMpZuMv7qF`M3e9 z?h5nM?fv8=x5h$EWj*}4t>ZBzpDf-Qo>1t7y5}R;XQq<$y|1CtUIO)=-J*s;q-)Rb z+aLoKftcl0QvCU=d%x_(v~XfV&7Z&+VnF(Cnr^XZ_w@t@hEL3R7vrJ@6SK zDG$pg2O6}1vhUp3S9<;#?Gvl64#}5ieX?kL;5(aIQbvz5s1wKqK)M8w!i|~kZSqAX zq{F;J{M%+^d?oZ2#7LrXz#x68=H-dQ)|FdtAd?ee65;;xfl%o0(Uo@{&_Bb5BT#N= zV!sD?VV(mdn71~;K(xli-tzr$W@|+FePnj99H4&q3X5jDKgAq6bNlRC8t)zHK{YuQ zYwB1;DMK#s?*G}E+`P;K#R`q=5{rbM{zu(R@l%593iVAi)4*C_|LjL|Tj`xl-HTBr6Q8^oXA=+5Q z>sd+QvsBl3Q+1VH+22PMPLzeaRNezVLzP#a^v^~+W`P){Hcl5{!w0T4Bv5_VpJtHH z3);t8kgzXDCsK@3&C4c%g}&SObab!T*MJ9n{NmcD_GHOFMmdHs>>iAd=Q0jnK9J9l zyH&8iW7^d{EhY$UuTh&T*gZI6skNWc@?`fgBK&La`}`uvancYe@En*ihN0s#IhL#; zbNsJ^)h~{jhkJmYJwZORt+77C)@b>FQd)~m(y`wM0~?s;-}Y#Q9O0M{(b)(<_hsZ= zW{M*VPaJKm;&cC^xKZ4+?^mjVRnf0CI=z4wRTvd+V%ezQW9#1YQh@;N#Jvh$%i6f9 zTAcMnE^W9aqr7<*0E`A>>pOb!IB_P)0?Rl7jextvOfs(zxP#METB9x`hG^m&`6o*mmigDX4&|`P3Vn)|AGHr`fSNY8X)Q;E}z5vAe zf-La@40EYm(}BvR8)};2JqPypqOU{8+N+>JetEX!N%*7PFACOpFIc$b(g!Q$=xeyP zGnekkZ!T2h?|Z~?$LyswprRU*%yKgOX7GbesiWDxt-mT2_mgTs*(W_s=44ieNsA8p zc`?g^fE2KlA!}&V29T!1=6D+D5INUcoV%!Q%L0U8Nr6*GN^WxyfBgX+uk6zKR-aZg z6pD^^(iyrlF4@~#F#6jX6*=s!(^BpN-L?)gN9%pj^B48MBBwY{XMsF>_;bc{!~Vh! zOLA*imH+iVJB+MdiiX0Q)=c|w+d~%PbTdLr+Mspqj%z3)<4T;Qi9~F#WWP}M*Btjy z6&M}s3>ePtcbb)#VX}t^6xFgNZ#z`pDd(>iSm5wsOpQF;gc%0wS6NLAo=QA6sROLt zFHfS5pBBcbq`EEY4!hn=hMpKBe--w$sgL33CV^#b4jWHf3yK^$qy+qgl~aJdAU}AS zq(WbE7Z~&OCt>t@Qh9d8D+*b=aOf}wM}L0w)>)AowHm^u(_@qqICnToFUDp+o#sSI zect!trvJc8+28v`PbAWR9W?rRp~Ul#M-7Sq@<}p|Mx^b#{9)=KAvUUZ{Dl`71|o% z5M^H{oJ#Lcd=raSVAJDoVC0oPm+BUilsi-ND+f+DdV0Ta*0^`BX{tlU!L-fmHdI<| zv!O)V4PY3tgcY6_G4-#{RO)%0uz!x;L-6MS-0q|~j^%0{*wUHG&mQmprkk3-**c>0 zNUuu`Ig!GF#6SqJJj(A{(C7FOVmuA9cIBwN{O@j;%$*81ELL(oS&Y*$j^0=;0Mam7 z)~l){Lr14q$h20ES*RLcpXI+<`10{`#L=1Jb?;4I|7E{x38O@0Q-Qz3OlrxKhA5L- zfz__dSii*r^i&6_HwGC@T)DYT$@5iEHiiKtjv{KFTa-N0k?O01k3mrXNnN1R%0fd= zZ)M*v3M1!~nZJJomBhPB_-4Xm)GzmMIQ%~T?uhJN7i{G?|K5PYBiM1(hhsat1t{NW zuc2|$dp+euiEL5l8CMYMU%bz)s2&q4Z~i?QSgo^_GW1T9T@sJsmL39M*$u{G^|fWY z91X9#=;&H-Q-buFecd9(Z$QD-0z+?jslfRv&vx~gg=XS1o4ClHbTK2rw-rso!>&g< zO#_XJN$qydzyc?iy1Ke)-So2+7?s0<9Bg{DnbE!JCILejMfB_~1u_t)bd>1&alXQA zy{m7rH7loK?5=a;WxJ>Gx&an|N0yO_(M&&D)!D)rlCwA4d-#oC@6fr=9=+<1<49vk z3GF4ZvIlxxj+t5RVlo8<9Z0lS#aV9EyK2HyXNJhB_>6CnsjTWJZ7#(1PK&jRud;oD+Im)m=-+Mf!E9^e zZt{^=#|76SdfaXuI5DwOK6YBof*fQpOwqv>2Lr#Z2OMM^FER#<@YM^(G-Q6xu7(jO zc2$?JtDQgpOvM^PzRwA{v z>pf1;pY*gzG|P3?R@gga@J+wD-mN9PmjJuR&x*D{nNI`s(3D>Z9HDNQir^G!lwF-j zvA`=fX7*R`ng=RZB+FiaQQ<8&_|tlNWcmk}PJFsV7bLm%a+g`p*5-rv(Do;5a(KH3?XB{+(lU0OXu0_d9QrlF zpl~Tch9_MGW$d2z9ZW3LVP0wiN3Hw)d*YNCzN5^oMmFsw5hshE%G(>_J`*2V3Uici zwjQwdn1znaeAg(W7pq2b;8T`DT12AhM1oQ3$n_OdBVSu@Xlj!NU)k5}u%&~WUsaZW z->Cg`xQu-_@QS-)bTL$F`a3|)k9*c~AjS>`A4)4!-0T&kSYFZ_{a59tbpW$mIGR=} z94u=zc3z>AZqp0li%}4D{9`f&2dU`b7_Crwex@iY8b%yf7NP~0tD@eY+|+OT3s?1< zd&c*jT=GBzWeTi#kB1;Z^NY03m@S&o}SUp-6|W^C*=>@$wQ2U^0vjaU_@ zS>^^QFAPmuL5P&G%YyX#l~i5gmEL2+T=##74^9s8)GpLxw?2O$)3+jd))*;d&`SsQ z?ZsH;z>qs|NF#i|_B*zFF8tk1g#U1ME=4Ad=yobVP@(aQ^~=x0y+C6uW@P3XJm}Ta zpr(-f&g^)T4jL0?j$fjIp9rwV@StG0{7b`22H{%th}CkI1pB7c3G zt1t~QnM+c}xKZ4hXtmOt3l_3*D;cck6cv@Yv*IzuLG8B>L&O$ztxx+HjC9x9A5BrY zb4y}bfu^cOl?xy;J)5$z4DuqQbb6FfoE!=*LF+nOwMED=ZvhPwS00tRcqf==ATKK( z8#*|6cVcvHDoJi#`CvtW=Pi8u1*%LxSs6kc((H&k$;CW6XwdR`tBj^WNMZ^T^{XCw zafxi$&=D2kNI4Z@u~}@_G+p%SB(vMrO&M#ePQ)&*Fvp`SD(|)IKn_>4uxc?0^GvPf zWoQ09ALW=w$_*)It=V}=qt^l~YkiVnWXD55geqIPGwU-~1zPm+E@K{d%yAozSlQGs zz{(e5ZwF#!>!YLPz_4IK1)@x!aVesMuHQt%LIhvT?9n6I#?3;qE$D2l#q~{Qxt%Y; zk={Jswqhc9<$KPAhD7@y-y{q$)WlO6N_c(-4j=xyg1zOI9faV0`IFJJ~V70phkU9J(uOVs&nTQ*x8o`VVCK5{So~4`PoU0G{V;@;QRTS4&xgCv<0)*wJE~hUId*xNd<> zmm7JVusSqHtd8+Ct6*%L$|qPUhJuxBXld0+y<2CKIO<7;w9w%msz`NQw-8X1`xZ`EW~_RFA6(t! zyPdX8lQ`2!8lnT>N!8$YkUtJ?hw|Ka|+&)}?-o;!gb zngNIG@tF~WM_?aNJF7b}0hpuNd>CINA}D|T5%2~W*6UgDp;?%{T+2mdVy^;0{)9ou;HZi`J6dyR1nUv%NJ_s6bv8oe6GN^cx*4^V zRQUh2_ulbT_iy}oX(?$@l0<|M${rc1NV2lB8%DCSvuS7V9c9ZnMaVj|%#ghu<;W(- zJl6SM@1u0zeX9HWdHg=#zkc`KSgGT#u%}opqCIAadNc0j)`>|7ZQSq4_izEswf_q zs%Ma5xK(EzTv}L)XHuGHuRCHg4T!Hd8zSYu#5(oQ$^eAL?%w= z16A#!;YiITaP{{rcQT(CGKrdYTVQ(v>WInHXTI{DEaO$uj6GPI17=nCg~XF@3KCqv zZEsEpN0vC{y%8*MNZ7V4&auA8jj%%9kEa~0AQhIBTu4w@8d!YklI!b6yQ2PJz3o zJ4%qYSgDEW#ZE-WeOWF7>lu5SjPqP-I66>LFaOgFU7XEQ^3BOj9+$QbQuI5<#|_w6 zngk@>E3SQSt;PJ|6~Q`yz%0!pg9^0Ew4dk^EfyTHr^TO({V)@WcrW-qI>I2INMhF* zAN!+C(-!%lcXV#Fq_aYslj1>>NVN=2CIm|v!5n)ubAGaM1mkr^=hkQO!@KKZu{3h# z(Xq-Ph?5;D7k&EjF6G}!mqt%kq_$}WTD-8DyyWNH7eJA7hA0)JK`J<9c_%O}Y;^WJ zB&rYLYU;&K0EPO&7TN2faD_4m&8TJ}@$qS4Ba6#3bxfZ9pGp( zXC;@N_T`2I)X>K$MK%RER+(MkYKC~FGw3AY zO5!(c-oC5&N@>C)vM}bv-Of4{0=Ja&IILXdRX;674=ard_+qa1e|~WYU&fex)kFrp zCL5ae5S{izh|^xl9T>X(prGOk*w!kWAlPo%mz-

hsdP3LTa8R~{mV`unL<3b{KJj_c9Se=PkU%-q<8W-C;(&D#w zLpQceui%i7N;0mZ-e@Bos)~y#ZX$>`3f6~>5i6}uZ?LkwrMcof4-hPh2@GZ( z-A>Z0hf%%+@FIk4>sfZk#|+N@S6n~tj(vH0rIIDqW+q)LFOz3j8PFM>fdRHArO4@t0?-6n zkXd^Wm)hZ|Wii{5~blj_2Bwff>FDF1to!)k#-8Qrh zdD39woHH(Q0}@OYKrJG)GLIq$ClUB`Jfr1qI^#9HTr&lT-0)h5*NqB0x&GL z@6;=HL6orKzHZokC^NT>#Nkk09ks+t+PSb+{N{7^5_lN0m z3SgYA%f1ZMJ{~DbJ9$lN{idCw)BYnV>lfakbstvq<=ZaV4>i1ZxGmgTaH@80c%G|Y z#A9fFC-)UXCz374Bb@ru`uu~?TBC+5DKjDKh8hR!<01d2jP*SfMvJmmzHt7EyB^f9 z37QlRi4k=YK5e??6%GGAcRmsXC>0B=Vahlg*_4}9j4K^r@KSTk9YA6Mkh=fI+Et7P za`agj@?v|A4Dk;sFF;%A$DPMas$ShRmJ7}#Kdh%7T1o~=AP~gW%+Xnz@-^A7{37Sb zn%rW!@d$BuFZ+Q_Fsn~}UUph^Zo#}+BiqoxR4?``j4Br)*i%ovIXaK4=6X!C3k?po zcg@IR;mmEBT>|t-H`z=S$F1@F0c`j+-^w=oOPinvMhueBMjb^MjQ7JiUKG0a{`@8= z%He$w!19pu?;IKkfboYxC@vwm(8Y3Op6k8xEcl-ST{lnn&=#7HO({#2m(UeoZguJN z5s02rf37vt(}9FZ?rhmPt6Ms7uu)_sXW3jpX5KW{GHt}f3?gQBeZyz(ac`Qw{}PIm zv0F>UVLyzW?Jx_N-$#heJGKXZc&e=8%v+oN%e9Y1SkU{Hry#?_MoLN(l61@X5Aa^6 zhbsO#gK1%=k6{f>vZ6Z9qU*8Ma1$e@te;80zzDjonR}iYO!$55RDLrR0*8c@WDzWD zz0=0Q`8<|tMa{U0@K*bpWCh1l>Rd_ogC7sOs2#=Fk}g1wM=5oTh|3m`QC8*`NQXDy z1-;&FdvqD%91;sNBVp*pkLVGKN&v2_+t6Yj=~UHA7Z%RnMOtTHqaZ+|vqakw91Fem z1(01(u`hIRku^RVM8CAp0j7l}H&$lh71^ib=sHMaItM^Qx6yq?&1QfqWiU@qKe$hC z(!wlaH#)dr5ulNiz|ZzE&qB`u=o-Z*d~dK>Neoxe_K?wt87)f%q>1lo@71ZBC?SYl zOV~4!m|a&dPk?P#0AMNfMK98R9{MPMIGUH}P8ODXWTwS4%9H z)|O|lmMm^f%*TZSp1O`?mh|JKg+i!Dp8ovTPcaIpVJDP=7#>?XH= zpz@cdZT|Sd@Eryj3b|5zL(j*EAj3Fdv5+2BX{-C<9z-C*I&w^^L7rXYM>0Vi(T)5D zuieqNY&ADqZg>Ob@}f_u*aFlCz?(3tqj7Sg2AR8DlXa2r)91slDF`Pg=~^cCb#ll{ z5TriRY!OYC;yb%}VpK>(jmL0>9=EiojtzEv1@|S&!m+khDF{j|3Dm16P;w7pq>z|r z6`QEzv_BZsClWs-acRGL`7PWiv!4m@muLS42IG=<9hd{PUo*rBOV(w%Hf^TGtcYuX zNSSa!%eaG1qc5^7uih_2M1P|ucZ@)`v7rB)QSt=)qzhELXh>B~ey2WQR2L)K4;7Hkv9dQ)#msYs zoLrpQ^U+!Y7(PXY7HkArngEXKEG~)%`F_rZBocaC3`XAFlhq39ZsNrGFzeOU$$(8HLsk+i==$Ar_rr7q zp!7~Zd$t>H_n3YK+|_`Wr)sWkq;c-Xa;HWA+c~w!mo?x@h0Kn1 zso+J5i;=F{#jm2r@iNjy}E5I$cFi?3+migfs&aIA+E1|FRMK17> z*${>oy0TsPY*JN6&0|;rWf_mojI>haMTsfzlV9Yv{&Jz7W)HgpxEptoqD|lqEPS{p zoA&v`_3d~=|GR3Xcn1oOfq`CEh%?;D=Czc?np|aj-TcUT?V)Ka(GWKTL)=V0uQbFcf3)<{N2F8`y4*dRr=W0@S{U{rIt1XPivgIPeX@q63zZ;4i|k*SaRi7@U`54uSoc6|;gd;SuCm61AEN>VCt2G$u&~r_9|E7_a3KM3%F^VLiyPf##*)&+Icb$=oQ?mYR&Si4Cq{@18hM1^%)Zm z@ZAtpcs8O~f(Nk7iM@Vu!i=I^$YyCEHq!U@69Kb!F~sDAvU5(IQO0tM)67CBmTvX~ z4lBM493v{uKHU#Qsw@Jvi>k6a9vpH^K{%r^Q@~a@_V&R}`!6GM>pG~zCTxJ!Sb-!W z2EU)`zQ2+&0~IZV;zgQMDd^ldwUgeNbyMMdI4{=LF(k#rQj}~$g2y~n0qEh{krEk- z>d3D_eXVyX-=uB8-#0RO6I!|UP#sG2Mt0pq0A#tuZK0wN*S0F7HhPu2^eini5Oixn zEm{0m$m0wIY}UY3lc4Af6>6|hRqyd!WM0ovrNIQMS59-|932rU zD2fWB4k$gT^B|<9mHzsG8%VaPcJFL&U&<%()CmdzAsf!JFf^(G&`X}spbq}p73};g zyD`my&2%od_j;P*sVL(Q$T3Pcvl%u{>YaMMW#d)W=Y};0>UVe1@c;}S z>GZh^ke$s+Od-hBvw9=b07y|TFT{D$O3!>40?C6D&JZJGqY4}4p(QZI8P6Kj5!UTvO z+pgF((4hA~2yN?D*GT%(dqdZidzVas{3D8ooc~LR^_zT&Q0U=HusMvD<|xGr-I%;S zCylOmlr9#)`DjKj6OnfnTM&e4^A@FGc#heKxlPpwQ19u;Uj)T8KknWrZ$!)d9KuW* zvGTD()=3D~PvNq_=pqc=${%}?=dWD4pvSdbD3XwNJ_nJiE<+Im5SLc>Uq9wI)2!lN*cjJi6Hw=0pZwS^(i=iA2Fvfl0K*dE2RB%O zGI6PCHpZ%owbo}dE6)nStH=Av<0u4HcQToL2Nuj#U$HTVkEgacpt(=$%}fHLlW8B< zkOJ7cRduzyNzX`F90vl4@~oMeUe6Hf^h%f?CuP)-xVV?MR&-8*aAYH-?PBx2(Yi7g zQcb%r?Fm}Rux&R8s{~k{eu0fX&ln_jZH-ol-o#TkGUNQtbI`4+wd9P&(OY0aaPxzc zyLsy-Cd!H#%z*`g?aDVvqGRcB@4SbXM%6sP(iS9;N6)(?gJ^h^m9j}tT$8P?kg28+PP@bNY&_j)5S4gE{eZa z*;e>`Y1~<_XqL|Dc9{8$Hn#vTaj(t_w4JkB znVawPb1E3FA#t=W{}M$t8_Y;cFlRMoi|@k#U@QF~pqB5=-5wij*k>j+)^=pQrRscl zld3!c*%e1it&vO-`DnE9=m6^@6p(bcr}U5AAwmE%sIYc*?o{`5nPsFr4)`B*M%eZ97g z4QtIQX^RGR0VZ&2Adtv=%AKsSKDiX%-hJ}fl=VW-OvyFyFzr*9E`#Sfbl#ck-nxN- ziz(813OJwI0!k+a%*B*!h1In`hOE6j0SQwyDjnQ=D%Y0)QI<+T!5C6K(IsjkMr08P z4WNFxIDj}^kAxtM^mOWo(&xg?y3;#^&OrM$zC4Y9P%s!-1w>BMt9U@tlr{vVBVFh^ zTY7l0ZW=kG)ge~nTH>B3!|%3!VkhzDJ&*HhgS7v6p-3IRPjT>6h*iLmK-LvDt7J)}j&m;tp(~ zb5=x5+vF)X0RQ~~j4Mms50p;4;+|qSNrN@K?`)xRiASoWbAgfQ zryQoeh>HZuP?)!1ybLdd$_>`L$h|EA-#$3&qFX)Lts#Zm5OvSD0?~6{myh<4riiyq z#dVa-1DD6V#(}M)6;(s?e%_I^hj$gY$5_8)jK?jG>ezSW#+<{9oqZPOx9)Ps^yIbx zX*6Z!WaK0n=d048iaJ#nm*zmUWQ2%(!F7=Z+(fN6cF? z7dgeNoiN9K*=)=?$N25lP{xG^&WE^COvp+FzOcAq6HH40a%9G!Q}IYMdr9~>_gVbA5)8|u zoIahgGSJG6Npcjl4Fcsq&nx9kioT4Bg#DMK|VqJxn>ud4AU5@^gm;-g>glF5XJ!X;b{&H4B zs@yHOW0cI}>n5CQj$-m=1{O1xi#1Pb2@2|-C+F`~6pR+HXqn}NObMt%meiXq99MTj zRM(@7Enq8LG45e+T1(ej`bX&!dVO((j1&R35uZHA4Vf$Uj0#KA`__w8opztHK6LJc z?p?@!(>YSE=K7qbtd5NhFpK(NU02E z%spK@#EM-P7H(Gtu~dVHeHdC|+pXleE;|}GBs>~q-=vvq%8Ix_gA`U*D~HtFPIw*e7HRK7<VvZKkKH8FR}IGs%;-askWAE zb(|Rvxqi=5d}v;Pa}S$}42bcN;|wbmc%iFb&le(A zXEP4o2?2ysYd|Vu`q}Ve;!LVrXA+n+m5>gnsFFP`6%T~Eq;%PGw{2VWc2&qq=yf(i zD%6s>Yu8iXiq!tcgc*}BIb(|ZA}8-uc*$Drm_RLNb{SKg*CA;;N=sNzpY`bD##hH_ z518rFd|8U7Qhq`GJ1c`|DOY6W+*dW1sivnwMQ*gbCkMjlz zeF~zfOG=3d-u0J`dBT*id%h0O*X6{tI+vi5zIJ4?_CrWGC}F2`hJc7OuyQ?oK3qAk zm2(J!7XEf)9h=ku#@G+ja7G}CRYq~=s#axyNvO(1+A+9G90B8UU3zqWI;ZvPp2RS?A^!-fx3e?Db^N^aq+yKL!VE2uI$ngdu_t&Dox1 z8Ej%A1AFU$Y6;i{^+VG;aXDfRW3L|G1C2`Jqsl4_-eg-x6@RBGBjnoI-M_(k{rw)!3*~NGC);i1*K=MNIO1u#zP9d@(uf;s6jCU% zz%h*giuQgHTWWYY_XVn0&K$vf?z%*_Dj2VVVB}KsoguQbJoklz zej7YKL&I;H(pCmg#4&}FlV{%Biy>>EL~44>twGn~%mE=QeaphmxTQ)0{|tZky64MZ z7<{VF+(-};ipcuZ-%qieRr zUCe0^MMCZjF*SIhTQ~W}dWs97qeL_H7Xo10Hr-ok37N+zLlt5CTPGRY`0!Hh+HZ9H zEnw<=#1j=l2`B1&nS4B;10D@qfjBLKE5t0zt7RUW__{!l!N@K{0Wi-UGj(@i6N4o$ zzk9fw`_|XX8YQm9CEX)$3FJ3CO?7n=)bQl%9CwyR;$oJY`#YCYiyGa`b{8)h^9^_9 zXG6(JB`}!-`OBA35Z#JZQq4R}l~i*fcFOu@{@|b{P`)hh&(d-0KODGk%NqeHTOw1P zFO3-;@0ee0s9PxDY-IM;YuvLnR}4#&o@}hexc0!JMdSrR@+R2N`FF$p5b?SzycAjl zJwBZ31`Je(?f3!}%?jB16wTpE-=VEGpM| z0KfLoegIk4btL&9;p>>*ITM9U=|*T7#IS|d#(%JI-1eDnaRE~1y!JW4W9%+DV#A7U zbX}U17DA|~aAu}og!HPLwWLQt+hk40>A~GgWsF@6w1nMPv(s*GdUagqxJCl|P1o5v z+B4+@2r~311j*y{BHZ&zCuxqST!~`TdrPwQ70^w-KRgvCj;VzVU{{xSyu0A!9+#Df zofZEQehrJnEFtWvurN1ap|&ZN9LBH6dEB6iav-&06WPAtCdj3^zzMyO@&T|))Gc;o z%8=b9lH4awdNy}=C|Fo;1Ig_Qqhr11nxa^yrMk2mL5VXu2D!`Y6xdjrpOj5+EU-06 zRZUa-oOS{d1biMb1bg~h98E7X4R&68Fr_43r}YXTB}9$GiE46JXz4T6H0)R&CKN%o z({-Nfz;c%6l|l%iAM!gxp_rR0dSRFy)5)&Cf$v6>KN59;OlU?R*jgV>J5YR#mLOHG z+|OQqZ)kpL+fCOU0%?GA2@_TZb*Jy^&_1o6e$>}GeRrwZ&v_Xif#%0Y#`u>k1VD+{ z7Ab^TBJo5K#zVIEnggH%xns_N{%35&PX6Zp+m6{h7 z7@^-;(yK&YkrpDmMdHbR7=xG6HQK+7f%Pi@M3t{B`_5o2i=fVy#P;Eq40hmRC4fkl z1}3U8c2|`L?@MP!u#k$EW>9V!ZCyZ%V@7i6-`h8e-~80@ecmCCW^l%tKo8ywHD%oo zlJK=)WWin>A78}f;j`%1f?0c4-;CP}_?;eW=<1bQU7tYYsu>{I0|9`hfi6;{qS|>` zeODys1Yx<kJ zB{*UUFgfPrz7lin20( z{rOp)uPo!YHYe;jyUpJs;N}Sp4t2EZW;9bvYn&=Virju`# zm<|yRYbi}6I(Jt3%y9<>y&#b30=OBA>$rUt0t6d1lr9GxCu_A7byUnC;X-W1e|c8d zZ#=p2)${)K*JMuMHTNV)^Hj8MC=0xb%QubOE~E z%PCaoEmyS0(wQ(Knx9@V1%&fG&le|GMI&*84v}cL;oM&&ggbL=ZHTAFj{MUgc{d=- zNY4*8S~)Wt8)9@4_n7{2!8)qk3*TNDJ7f}s8rq6aqr)#u@wk}b_`ct^B$OOFi3N*t z#g5eZ<*Z`iM^#{lB;Q{;^kawC?}TYOlwP`c0Uf$=sD3+c=AQ-`xhkt@;F4aW*t_@a zs+fRpJg(zTruF@mb<;VOzuvvSE@gSg>ZB;y;5I(JdW@*fh->KX{s5+V^71iOWNv`g2it;bN1}&ULb?LK-K#u^dEQcZ%g@t zfAt*PgB%>al>ODKCyqGP-or4>@8?=H@)Y`?1}Vcsytr2xVew7l;^KZtk|eU5SE|3u z%fIP~zwX}OF2W@m;-T`8MC?Nq@BSaN-1T)AO!HTl?{|Ytpe7FG1u)6P`IVHER`&uK zY~!X^&kcTc_x`qff4d0jL} zl5+I>opg5}g8R+=tGoBN-TT{8{#NDwE5`m6V-NooV~BeG>*f2Mto{GJ7z-u?t5O?q z4A#bEg)m@%R6?F-0EKrnX1ne6h>1yc8tPXKWj0pi9T^Q63wCCV z=dASo^TX(>UF1OtN?%!U{`Q(N0;pmskdwvn1L6eMzH+}Xi0-Jjp!6n+Z^l>y5egt{ zO(aGmH2CBW)MvQikXd>46q&!p$2dbXKj)e%#=(w1KbU^ACfz`KFHU_|%ZMtd-o$U- zbrdpE;tm5VgFD^P8zGKr-5jNKRBWQsrcYBx&33P3b=VqA6_6*S+tH@#7T-_Tm`O|5&AF(?Nq9Y%Yr6qq6+phh zIdDlfwCk$2k>WcZbK?Uo&;aXAg?O3^`COt)c!Devis#Vm9|%Hqmylp((Yi3CV*L-S1M_?=UdL}p zqOYVxX3OE$9>6< z>ARe&l%4)$hMj9^jIhE^S2T0GXs%2->uP&mh*AGiSC)Zn1h;`FQej6ZEW?w`AJv^2 z#Dy=|dehZT`2@Q89Bfy}?Mgb^Gt!^kyI^Z#FdjwG??W{nL>KBDQ^!`zT@xh`2Gnk4 z&6lLPVh-x1y_+gqdvB7Y9KdgOzy6iE;*Sq0-(V^SFq>mpD7}8%gEq$F&hs~^Pu{xO zLw+N>{8U5VOZG&|Mp~S1a`>j1&F*dr4l24%vpY1hZy4xCwZXYHfF`&N(2}Dl6t#^k z0M*S3tD&|J;0%qoOLYfJz8GPX?_)=#>y~UQTbI0-n*5!Y2VZcmo{%pHg;suF(`u9n zxg+!>9>8mn1Vn*RIM^UFHQi1}=u7%Y;@cJJeofww_-Z$8HiaIZb7HXsdEHJ?IN$xv z>UtqS&cfyt)xi5}hCKbb@XQ+_kMqdqsh~aqF^#>-$~X=w4zUq`3+F7hq$I4C_kodb z-eVGMhv65=jMd#iyb#G<^AKW00!CR-4KiBGqQFH&sQ-;pmO;>%H789O3CLA{@y=LZuFEh+}RRW2kG<}O5E@1AjXwOT4;NxOREJo=|}uaWQp_a)f!9adv#rf@nll$=W5qFtbNEqV*6q)!!g=~M9A;P8_21r~(Xh{fMh2D8 zRx?2>&s-xc+m;#M5jmP<7w}&!K`ycc-xG|9>p-&S)t5*UeMmqN%4;#NGq=LI2lT)F z64tNH|7_E~4-w3M7*`L|OC_8moKbux%u@q(VH~+_3?o+jwMYI@rL3x*L^6AoAF!P7_qvVVYbMHCx$eFSo){AQRU`mEJjBHbpMDr)g zXszz`IpplW>?Ib*AS!V8L=q^akd9!p8d|NdhJ3Ahawrn)Xsu&I-TLk=>JQKF*638q zHk3nZN+bZy3f9{g*8BI`Cc^A6<^o03d1PqDWUE^&0~#S{-#5LADA(tTmjOn!0(ObM zna0ckzG{)>*x;njyy^n&R3r1QS4w5Rg6jL0jT~E^MMDQHZfK^MUfsqW^izeL1SgS- z*6#PA7jZdEGIMtRs6sD}M68Ky;7cNMvd-fIyqHJs=qaK?C%*XH6UZ0gw*34qokJM+ zl3kxLZlteM797DIMmTD}eTbD^eQ8i55mZv;5`HQXv|$Xq_s;+K_nzqmf@UZ*=#-n5 zaX_USSLYv7q z^NTl>8KPr_#I0TV$41Y%tZr(B9-Jp2s1j4xh%==(B!LV#e}IH}Ofu@wH=wBbamm z2+q;5av~y1bgTLWM+N%DjXiW#0U~dF7*Gl(N&*C~agcuK&{AN#m}ym}cW64=0_S~} z5W(r!(;;rM7dRzXhC1^sS`0o52RWauK;F}O0r1VG#cF)SRCa_>a#zmh3HBBA=Mq0F zI+kNZoXOq25-h){=pD8{P}rlImvPuiq%MxW4&g&JU7%Xwc&sgTkWr6A|B0;Wv`sOp zqexsRlDE>@HQLA@MT!TpG_(K9eE)nu2J`ArF~h-<)j!N+D*Xg8ZXv4S`^c5p^%z7$ zy+$_650ip)(=_c|ytLnJSD#vqX60FdVyuw(4j#i2eqCnY{EqBr2o4W(dtLQurBeN_ zvyiLkPS>|aHb$@2$WZEJWnK=*f!IJ~! zwwv+h=Y5^~7HE#WnS_;uWfkxxfklP1|K)?_jP{UK>N+Oo+P4@bQ$rLww;={5eoij; z9#IM)e_g(d^v~7@)44`7%*#CHh?ZI5HQ1=*Q}w%-4eGmwVpM5Q-g=I-&W)Ms6TIUb zlp0{sz~BqK_ip9BrP1*{4}hXu84NmzfrG+i=PHmvr9^#OI2=smU>c6bdKnvkhiw_ySJlIoXFgWnS;~j^GkK8bnvpo^810 z;7#`;=<@mV+j-#a(Z+j{_rTjX-asz=+$|)~brJ;_K+w*I4YB!`%7rI4e^yVpRMDUHay0E_h8@C&@&g z7ZguUdJh>-jPe*kaU&I^yrASZCfk;(Yrcrr7@a+^^s468 z;1{0$Mh6QAHK&FID2V1{4|@@H3Z42Wz~Y&77us?uiMb4!syR$G`8OX8JMls}w$8*G zmQn@cL%o06SmgT^yPS$-g6L0PonYzR(lcAY+m@Bemq8dv@xsPy1Z5ceVeb2{C2&k? ziQs-&6rm{Gil~qvbL5j&}GG@zf#f%Bqw)kA$*PvF%*n zXXq|*eJ#(`dQrWLN9#swJ0u@=8G->2xgVe8*f#6qY$nT_BZ`e0zS1dx%i*YRn)SDM z_P`8N(VBZkxl_3r=K}f4{m>@p{MlZpy@-gXeTeaT>LKs??^joXUiAsU4LD%JapgR3 z$UBuM7q+0qpQ^HF!f*RbY$VN{+Lh;}Db3=ZB1vQ~GaL&tvd; zkir)$v-N&R{f`@wnvqb2E_7g4p>SH)xN~wBauO{&pJ1xN7^Kd-@jy)vNAA<|kUR*r z`@~(ft-pPE^el7&*l-+B1#d@95c#(l5gXh_S*AMI?`9y_;S__Nd8m`X+K%ZI|J-!A zq5mNd+&tqlNHWwPY^O!+U(089DYi@)MF&~5C62>k<5M+v=hOWh__E48?C!go>v7~E zc~}}2OrZ3x8=K&OB3UWCK=4sf=JzE^dkI#EbaCDL?!Y4Yn^g5Ol6ehH^=^&F_pL9l znjGjE6%v={LE}GP%U>M6)~C|{apXMuDMgsg7Od9Am5=uFzkSdR!I?Fs1S|nf`Ds+v zhHoeIFq}}Inm`65mj}RKq6|Y>H)16GODfQUU4;k4rKfQb; z=D>iFeVAY{rjEY+9 zvYSSposW?%vE~S{75-QH-CF``ck2TQBxc6AAudDv=G!wwqhf`ou7W3?YcBmp&W&a) zq<~Yo_$8WA;K8LVx6t-8L5$CpWuK9HcCamoe@wC@R97Y#gRGyL6Bo1|!q9Y&`J7g9 zGon4H*b7NUb@+ruA$4)i&b)S>H=3IY)0_y7aZ3}~J5LHgUF8Q=kP$L0Bwou?LA06r z9JgB&Ehc!W?(XkKZ_p;lJfn+%$$DlYda~11*@}gQSf;uB|tv!S~rJ$Yj%Y8fm?&aM;;1 zzXk8Sa;&ZZ0J=)BrP2?65wE|FW731{_xfEWv8(3hq=-2MAJGh*mpe#4xmbyqgES?W zb2q$^+qvr{Xpd7~S)yx#X#KT~;43i#*|bVCcAtoCuyy82vMDt4(+J}+{_?dUaR_`T9WB6a(_CwzdF=WO~&FXkQa99 zL%qK#?Xpm6#OoR#qt~;i(mG$+8hWI=^AviVGdDO8#UWFWcgML~$CQ;xPb)BicJ)pA zh$m_+xphq(xSu6K72!N>Mp`&byl#Y=aSrTs(ai10Rc-0N5RQLG>lW%TaiK~xB;Hr} zJkpHsf8-Bd=!I;jFvuD;G*fLH>lA`4l;)0;BUXvg_}OZ|N-sa{*qm+6Y3i#*Zfyqr zZH1x?1OMP^N3`$5)$qPZy$kScQ0}Tp?~hp9#;-_3b&pG#;SA&DRu43Cs#ErmK*m!b6fnk`jvUf)soXbge?+5zf4h8|)gdZaxM3<>>zi>R|W^ny^xAag-4ncUHH5)|nk*z`_4mch_hT){gJsC2d*K}UjFvDPyG4&$jw%?0Bw}%Gp`aVDp9)( zfGPKlrj(DvdmfMn{ao1bTWvzYbQ!eexzu^#K#NH`%S%Ddeg1oFS6yRzH(2?pysI)l zE;AX@v)OeBQ{tw}k(zy$QD~(r%3wfrN399|$-ae|uvlzF{b~Kzi>NO-l|U*}?s1-y z10vZpBPCY#(=I9o%q~mvHeURJU6Kz~>Hql9&bxZIc7x?jcS5l6FV1su>0Wj1!+>~K z7c$V!ifir=@8?d%q{#G)P-gc6P~49o5~&JZY=DM@w=&yxSxP52mKZlCIdJIh-N&F; zsLZT~%Q#kw&(FG#j}>yQsjE;RuWa>;!H)!@@YML!!}x{zYY1}2`-8o2iIiHgq(UmmtR{fMv)xq-rH43m1Ark2`Y z**g9T)Ic_X^tDOK3l0ZM66@$c9PqaYFG$4j<3q*n95<0bUm4E&#&datpgPXiW>-N7 zWYsReDuqldeV~D5K|+HbUpJ)1C#;0Z5yL5JV+9v89LY7Z7t}S1ZzL`)EemAX)mWf& zQSA1RE{2qdgOumV;*4im>v1_E#yBR7GXT^B@s!Kl2TSUsr1Pb5J{Lt6TmmenzbK6d zIL9{`>aFUAjZkI~A?6WNz=>*@zKn#&@Pj{8TdV-?osa1~J53!Zf5UCsc05%hJ1M}H zH~%8m#7cN&L<+q^F4seXkBs&u6a=8$P6y>lpx20XSPp+w<&dJl>dEW^Aj01M=?}u! zlWp4&tJSKV4<(OQmxJDQOf= zWpE1WKWIBozPFlbR+lYyoQdnoX|6D()vwvPrvu*wn@PPT+nQ`Aghwu|EqBdqomp6OVaOlU}BsNWx zv;x@439Z9}g{;49EEt>i;k*7(VqtcS9Xa1nizKP;ixVjcmWJfnsi-4*cixabW&7g; zThup!N)G~c!)<+@ALhy|nKP6^;RXkXTHLkhnC8o){+poyiMsDe+_|B~WWVXrc4dHb z_g2`opZi3D)q157-9nEMOjNz4Q<--&TQK7k+VXtb*MdNcPllG7cbuVg>?d%q-qc+B zh9Mt^Q?gSz%Jt0&Ta#BK#9RudO2GFO5+H$Q?Yt&VkWtS8t=ofmoC!dwXNN?! z=c7=jBJQ*_PXv<$+OU7Ojah}jb*1cF=|4W4G9wDkXGokzmB(IlmAyo?4sVxw?qxqC zLYNtVa>oqBT>rQdASgFHzjC@A>V6nNWRL7hohkRW^*A5Ne49mjOhdc$3!^_iM7aFU zOj+C2HFuwQ9pSI5$Q>PJh=22ACw~9w2rQw3=GPzh`TMv0@bJHX@@PYxiNcDM<7<{7 zQGoyYum1=v{1ERyf1%|ev-HdZbrhU*zcqIkLmyA2M-az_=T7E zwjT%N8<6{pG5@C}`0o<`{xE!B_J5c7`ziVN6o0>Ye-jk`Y+s>lVRhS!YyC)4KI8>f z8Ah2gBViOyoq`I7fyXRJV~XKztBCN=vSIB6J_i`kE$wr{2?$e!Y~(j>qqpz24byh$ z56N=ITeT)xKt(lh2#yNPG%4#0C=+vweO9nMA}ebH%Mc4~vjUjqA}NbF*?%>LAO9B44n{+MyCM&q>QdFqqirvcZc&A` z)vAKfjs&PjCYkS7)0%$2wv4d0*C_X?BHVM8UZ@xo0<~N-k9tD|Iaw%qQxex=!1Vpn ztZnb#R+LTnTxz7E66CGxYu){8YQ>KUtbIBGm1V>Y zJ(Lhv2dbdTSYZRC_ia~FQ)4b!!g)0Td8^k}V-+j+Aeao8OTJ`#dwiWZBV(YHWIgE( zvkOg!wLhF|lx*~glDw@M?4R(elHZeg4{2Plp(5pLgKfLd?aJ`(c&Kzc?qSN%lZs$^ zQihcGuSnm9uV;#&D?v5e#|&uRvI0%gGcS&<4gAD0_N~jkFA@= zF(q^;NK0vZDE`Sq=1?XiWI#8|Fqt05)sCUW)@?t2`&+|OmGy7kl%uRINH(4)Gwmr7 zg2idddJVqVXW(npV=x$;z>c*(3|D>n}d|>y>_uExES!4{L;c0sRa2PvVyOacf;zfBmHruCS#mzDz$Fb^mgJ9`8u* z6W;ZE{_<%3W%~F{+X;A)Q2(D#&QD+Z@524O8~?8s&XcsO(4LD^4}CJ!4hrtoH#RCC z$q)bS5%cJPeePJ6RdAPhy9u0KN+{Ch4Mn=3(&p0Sg-A$>dN_kQg6& zx+2$kd?sOJWCR&Q*P5wgBJ&yi@<*tW72$${0!`tGU4J=)KaEJj8ir9bR3!%&=BvtO z$P>53RzUNYJlxyf-pi&Kba6<0?a$lsm+#;`2Sa$0r5yriI-X=FotQTy>6QZ+&_M!4 z;$6Fn9C#q;(9&YRgW?yr_1nR>$#wr!lD%jt0y4mxLJ*pyK?>Z}t|_%c+sVn_1D2sB zG`#%}b2triAZM-OL*{U%7a~$GpveJCN$v~)6fD=Ej*6K}-|;{Ez7a?&kwUsW=$Be5 z=mEZd8PrfM7q#er9l>LC_44H>|F8yUU=94^w1Z#`6gEN^u_&aWL6BbO!W2}mKVi{R zl#8|u|NW(Zk_~dtv?GTds(eprKZL^5eBj;}-a?AyI<@?M z@z;5L*}8uvsx^WJ4oGim>EM$RKnwRDI^?=AS|VNqbO_VZg|b zuoR%QMpPL7L1`U5av5^Ga#Q{=Y~)A&3ZS%5%8&l6+9aO%!!=QCkNbl}KsH!H3^oKU z&$QMp`_~uGLlT6Uq=DCJv+&=)8V~D=7Y=#&`}0dY8~-lc&zI}})xvGt23MSkQ~T8} zC`RX*nVAV)j+Bbv?$&p9YAK%9|LvKS;DCdXflg$SppPicF=_n_)n>h*G@FBVRy|=| zFZhIhuV25u^y=oPS95<7IoMrr_y?7?k!;+y?VyO=F-T*f0}jGm@DRj{XrSApFXWd0 zH>xY%t+Aw}L|h80RR>2uJdJ;%M(4tFTkTB+_Qu@ z_Wu3*=k+iPf4Fl`co3b@?xlt3jEf%2i*w~j>EfZqRNW=$x%t8wk4yEvCH;rhI}WS& zcAK>hjL;c2ZxZ5tx#acD&3or2Fn&l1&6tJDPloAVg~Z2v5FfQ;Wi{}R$Dfvd{dyCj z9Zo{kiLDrg*_*F#Z-PqjK9#yo|97h61{t`4%&>?(``*;YOJ9S~*FPxj0Yqt+FbaTe z4oVwZh`q=4SNDO@b70OqZVM2^J)x+J{B#x1AGY8=d8VN*o9zjFOa^S5{LbYk9ldem*M)BU%JXePr5?=s0 zvF)WX1;-zTwB|}kaCig+1nfU>z!Ly^X$y;qw%DAF39C)Hf2B{FD333-j_Ta_{dW^} z+BpXBqb?@w|J7IgMS>}UX{ncOpZZUJ)Fby#;xYkO|MHu^Ix`Y-aM)^k5}AMX!yk&p z;|QFoBWFEN{r=bzzkmLAJ`PUrz*8u)5IREEb cb!F3*jZUA*bObxr!GAJm<@l}$PCHKR>{at_TFS1d(W0VA}J$Ep=`1;lf9K}PImSl$2#}* z4jte6-1qPExbHvjZ;ywZVeO<5le61@$K~4f6mlPKUgW*d_irt05aKKB9K%C>? z*NgM7wP7&aZ|1jdDM;PAMXg{5H!-&~hQTBQqM}bIMD`NFl)61ow-B^15$ z6XX2bhpCU~L>VtVd84OFtZJitmf&;lgESi4OziM{YT|T8o>?*@Ma6`mPdy5*B8xaB z+Y@EH3(W(?x^k^TU$f3eqXnF>LLS?<7IXkh45iS-SiOsijCAdFljZBxO1-o*tYE>N1 z?sAd`hM_XkbL!eN5`2zvCnWe>gO@frVpDN+gSlb#T$J)LxT2i3tLYwI%oBbSemNOx z)aML?caO_^k;?ll5d^207#oM>o-(v=*1;5IFl?6Fi)`zx*pz?1J-KvuTu`U`Mfk25 zUxbX#XOt^9N;CJ`3cbM#gP`a1_<%@>;YSU#rz4|hkj5wFC2udo}thkJ>TY^$L;v!oiiOKQCH9q81@a>+_I6*SDiZ2d2Hk(+t z&OSPZcdn9B%kD9r1cGtXL>l+)o6e6~#3yF1T?%5E=$A3>?eFWy2wtDvAE$$p_cGWb~=IurV)t2H7`FMTzKqiAS^~;)Aj3)?Vy(;Ol9}(Q4QcBa zTN2xOsz8SsAQ2IMPb_%sQqWUn zNv6A@_h<#4x{aBRU9cl9_IIG&2-h)!xdwTN+cM%S-*!|~av*IK=QG10ecXCOsRL}a#pI3_FUK1y1<4J{UCDW`N$ocs3Hw5 z+D!cJfRcd7VVz;&;Q$x` zEd4rxpCnxbeM8{b4P4qK$>O&LUx~kZeP#NJe^+FRR^2yUzJTFGIAOiP@*TU&u2e`m zAFfK@c$=;D`p$4-74~DD$MRf6F_&ASpG!6+WhBkL&i^*wG1M{Tp!A7X$oG}B zu0&Q!!fTOa?|VkfeT;TQ*nv&wEzZXh!5Pa<6fV%8I&H|76t38Cy}78?<(-(~HRWaH z5hhmU4~qQBok?78o9`7UomOhStCLnLk9@OiPZSaOKCmKC^qu58x$)pTRD^Y023yyV z&C1Q%&GyY+%5OfrWLD+Y&cCROZ9^SMp8F zS=;+E9pz@C3O*E+@aUF^ZHKRWQrxS%%W%{5sa$+3QT!!sp+QmYfO+CU&QDpZool6Q zU)Pvd<)-m}a;^!j-QFVFN?Fy-?o=aJp-&5Re9mo_Xn58r+%)@JVv}A^d-K&kn|Jz= z!?d&?2>KaoQmazGn6;}RT%If$j2<7|raw<1IO@oO^|{m2&V3G|jtB zHoAtszJlu-dN=vbpZ$^D{QCQFNK#Z%yKTv+Hbe5aQz_r&DCBhGwd3BkhO{1It7Vg7 z)6{nB`7mpo zPHy#r>aV0;iMTbEo+@?G$=8uD4JpMQDk)2_y>fjjZa1!5xIp-BLXt3*$Dl{_X2JIG z4(9HioliT;J4w4~yHqDG9DjS93+vQzerz^e%QG}skFlb#*K8b%J*Zh{ZQ9}uCsWkw4|pH zI1vQV^3%S2xR4bRd*b4+zh`D~Zm9 ztmnMXnq`M#1nW1(;S&|-UGSK(j@>+?U%aRuC~?QNaphiEXUO|w<~rt4=2>FpZ|--* zt|qQhOi^8{y0&f(WxTy#6V|^woy84!qpdOLnggn*;AO`#l{-9hY~Nxq7mn^QwHR5{QRMmPj10 z@Tip<*e;xFjfj_i`mRA*;Q2*X7LBe7K^x={Q76$wmdwnL+Un)YVahpaeSr=^&q>xi zNj9}_ zRO8b0gjM&$w-4~{%}y?>@4cT)X8f!kSn(FP!{Gz3OOhPwCaG?V3=Li^$}8^rSdbg4 z7OlJCpxT_-n$RQieW9yQ`_uLeRUy~e0!NfqojP8lnU=z@_E+26i|EZjkCQ%Ys5|dS|Wl=WoR+i!J zw$VVQ!z+haEpgo=-sl?d=Y%^W0?O?R8FD0?RBbkpWzFvJ`AGzoSN6`OVf_Z$cM?%y zj+EVd_m?apo7J7(bZ2ceM%FW&+1m7E_ijSYNqcuFZGCGr-D;ao=ob+YK6}+>w`P`P zm;ExkpDq#V7LrO+H?~4}xnyHO_4w%VZG^VsudP*U7CE*}jAN?j<}6Akzl#vfuW#^!*U+EYX{djF#lb>| zMom_M`WD>In3|i7i|s0nFfKJUwV<7m3IAO&@q^33J0Th~2L~H|c6MiHXEtX}Hn^QB zI|m;hAN$p7?ANZbf^V?eyIMQwyRcf@)9&Bo@IGS3_J(%mHV)=+Yij7e`UY@E2O%07 z=s~~#?XT0=#T@-4Yx{$3feo@lpRjYVU1k4$Z*Zv~^jCfba~ESvH8FE5Kr`?R;cHj9 zt_%LU;IEI+kKDgh+1}Xh7TgM4=^%`z`oYCVKm7B;U(ZxWKa-P>>*ynod~$H5AUm}6 zBc#};^RK@GLJQ*xvj1k9Fz)t7_XOZ0xw)A9J@6Yy*{^>!;D1K&1N{a+{Cbhy79d39 zzak}eR7azwi%X`Z<&=+RlVvzaYn{$%~7wCf6s2 zJ#|9uTaj^1liPczyyM%DuVR$fq}6cFQj7Y)Ffg(HcQ1Uk@H)+4^^UuA^8d&0fm`5W zlEQW_{f{?fKHCmcz`o6e^}m1U{(2HIDiQyurTzL=DkcU#Oj$wf*8i{%3}|uq|6LrK z0{^AlKZwkKWezPM5P5w6ueDW~W%0+uGqo+e2y#b9N83H*WSw=%oJzi7MwhS9QZ*rm z*Un~-!#CN{^t{$U>%mXTGYTO=f2zAefzNTD+>_czOposs+$(C?)CCByZM7VO+n37P zy!Td?I+f&&Ih6U{;WWqcvTv==X{HaSDkd2W6xlLK^vfQ({GUyWGLy}G@nr40XdMl5 z&B@mI(Xx#p7HiFha?#~lRFtZYx{Sk*O5dfiNO|JIjQ^w%bVDxUc36AQ7vlY!|1zk` zH0>$!2`>bkt!=rs=S$!+uLO&WtS)b33c0TbBUdI#2!)Vik$4FXjPuzy|5{(Wr06kG z+S6o5%<{d5mFenvZ@f`^Ay4d@SPQEI7+&?5C4|i43a&JBa_Ck1h@Yf~qZsmF|6tU+ z*b*`H=GfNHp+!F2(6@=bLa;WJLX-S+sKES%HW8G^N2~sVqP9B)*leGEzA0L)!V8k3 zw5=xKupOx$DIogGVEZNms~Qv2a|^uz(N^q;sXbMc$_|C|06`sU=EC((4SFQ}Z!s-P5dTlN3+ z>C??O3EKmc2DQN#p9+cdmFmm8Ydwj@)5?-((7+ z$10CkJ6jyt2{ZfRN&-qyJcLxabkTETkVJyY)4{g?&N{Yr=~C?lP7|3+9bb1kipu)&%&rkk336Mp z0dLgqaTwV+$MJ1Zh6`fSt6%Y!8Qy+tjt z1KqMmHo3pB@6?GHiKHLQPN7xcgQy;Id7Q(vHSUFwyHn}Ta#KgS=l0LuJkwK@f<;#^ zdt9pq(owWEoi^Hj>rBX?Q-4NBs%nOY@^vpDA+|mGenb_Q5I?EOSee6R5Z?V(g*AC? z!kRBWp@5MJW-T1JwzaXK`{L=a&#}|~t?>eNz=Gf=m`e_=>v~E}m#lSZ){((W2~6W` z#kCy2E2sHo93kQXisGUL%f3jhe}otsD%7qv-rIA9bv-$Tw5~1{6L*!A2zR&Ef^IxK zUK7M#ej!X`&n3>L>g3}SEooE&Rv?|@zEDQXwO#am(`J@lp@F4Su7pR_Lx3AqoD7D;=u7g8Ra8t4v^A2Pq8Cf zVW#h!<+YY(+a|Pee&MdBCK=F5ONWIap}n0+k-7!1jS|Sja=c2{@>r^|9de!I?sJ~W zD6ExYh(G_u?@U<9LV1Z|vv?D(;MUJ14u_xZRj2S9;v;&=*ouBUxIXd15Es5BvRHZS z@tkqfb0R{%pNS7g&UkO$vK3I5A{7)5qomN?86g>+&13%|c{Y+k-gWBlRKng=!bpn9u6dH7 z;~#AqyUp0y*DRG;7}64(f99_aqbj5y+D1zu=A9 z4w)CU8>=q@N<=ni(IYv%(WMEm0S+y&1%}U2aYQB$>{25kh!S2UjT;5G5 zQG#2lP`kuwY4OXMy6{W4Vyb8BUx_aFKH1ydt_vh$iJ^0bY~)FXxOVWkzbO?(0$Aj# zq)K;S`+&CS31OYS{==v0-jcZ#N@zdlNymv;al03K9 zXIz#23JWSDE=wDP39b99>bSmZ$Z#I8t#euLHBGwZcUsoK>G0m@r`?kS#2LS$9q=FI zzUnmFQv|$Ax7#{4iPl*X9hYGZLnjkyz2&in$89|6b4sviEjy_=_xa)k#^J|!B(*P1 zU#Xj&E8ZLlN`fQG6Mpi`(V%Ivzk_=vfV20rvX{^ts`nLLu5_snzw}g4RL6b_2xM~% zH_O)QRGvv{$2tUfM(e`DUUHh&+P4Xj0OKA45K#4y`vQe7@B&HB3qv8Hg&o&0rD!F- zw&zTIeJ365GMC?N5Gl)}&Uw#H;N``U1BuVJ8FJz2Z}p|grhvdiqo8#f!|z0OT0czcxspJX9K|I%$Qx?;!p z6qT??hOGb#gMz9m3D7ee2ujGZ#dQ9l+8d^dnPtx(1=c1A?!Db#bBhaho+O=Y z!OU(NoH(L;WOsgKmdpnlhVmsh%mL`oBssJQ?=P^5>|siYJ2zSzVv+0JLIIp+K&Buh`XSlQGfj0RYdiMIM9h+omqCw-!RCRsxYP zsUj|tNA!}D663@0&v_j&?fXoO0rWAuZ*boWZKX+|xetHCHCMd|Amo5E!ZW0*A>~5EN|&)Zus^_E3Ue6N z28;Vyz|5-kAEJeKe}(@LJbZj6dM(8LTKqP5w-#Yaf|uL0RRnL-2ErRfzr>K>aSf=F z+CYU_$D5nCM1k{wytX9;s6eXPcnXW~ZpFU)0*U%_@@e zvuA`>6@o8v(pwh|Kk2k5i{Vre`2j_AESW_HYufK;5WDlvqaV^T1FZ@Nc_!KtbOq&( zi9W5uJwnW1VwV^I^n;%6sFJpy!)!RVh1$wOF_*sg1{md8>FoqzPlNe^;*e0lJ=Hg+ zZLexPqjlWOAzidxDBGw7ayUM+$E^xs_o^?`Pj*GNI>JT-5Jp`mZp1Ok(o=GDbC-Cbr72^Hm244ENl!vxO(yAd)!SVAJaU{g&XaekB( z7IiK;+tS3H&Zl2F<)2!Lv5KUb;okWHnErWH{G z87hq(7*}m_hxQw`Aeu`rFKKYQW?xypJ+C>CAfqedw)%6VAj*+M(nRwoFqR%WOLdPU z_eMqbXv^30voh8&ftgbyclqHPV!&Ryc#{qv>)msst|t)ay=w6V@`1pzTXC&CJnnPX z%>b?0zgAya3&n$mD7>p)XrP)wmyMi=tpk`&v#5WqTbBy4Gr_C0FwlOb-KO%AANe^b z+92b>s{r9l@6W*!X9vKDjy-N(dihu6zz6EuZa{XbWHwjYw!iTc9$ms+$>cG(niH_X zRSZNtgAU)GXxAVZ;vMb|xXonL%w>6uvvdqJpK16E!OmbR5n(xjxsO#0w6ENznebt^ zZ?pWdY?$v?0E6yq`yUBo6)-^XG7s_xL|L6l5@9bmP34BImPcAEGU&cFZY&@~p0n%f zoZ6lA-ZMQ##*o11&a{$$;rtlrnRQaPx-ZQf;o zyG3PQT)h+#LocnZtv&qN^Py`lJl3j!sd+DZ3n2=u&NxZM=4}wf<+dk2^|(p>(h4Al zm^%eWAj`fto&u}>Y9)3=a8ko7SEs%iW|ghqcgQ!=*n>#@SRRuLYY0&0IsntOuI&Ta zhiH=9GP8RDAn4l_Uc0z0$0_(7{6RP=eyzl3Jo4nua&$#-miO+AtehNWC&QYo@Itu9 z#4t!@FjI2c{HVOS8)3dH&1yf{8X~mbLn{n!E-Bm}&P7ZY1VrQhpP~Wttp%$kb?-Ui z!4k2tB6v6b&QJ}FH8#W!OHer88!lN~nQRLi!(Ze@c;vYZM3=9=D;fH2JWniL-D9ITR&vg4EWWFcF+5dd)m&rOX zTV4}HQb!;`uILge$hT>syR89vk(}}8nmq2A$G&1ihNH?77qSX{eSIUo4KjFAtdaCS zV@j}@nVpAJJJhWJUROcxPNVk^vsVATqNJD;|CjM9dm!J6cN=b3!FSiaG#Z&};Iv z8PjHS*pHWd-uXDzE3d7o3xX~-bO{76I1I&Cfe}+w0c^GHIYO6xVDjudKyeWgM__?( zDUKH8o@A&hJdn$s&mwu7P>X&d)fgwp!`*^KXHL>3nD+oxr+umZ{X4<}x-Yt!@d-D|l#gi1qe!AX!0w0#4N#acV2Y_2< z1sg%EMIKG@wBVG2c*eH=lHd8EE%^1-V9r+mw9HQ*^uI0OQ)kZ=^w$EX`34|DC^YUo zHPI4Faz=1bltb9ECwsHmR9I<17`3y4CyP}>@wZnY%K;8Cq-gVr+;0f1XGq|(HI-rr z3^3E_4S?EzX-$n4O9p#81pZLF8%^a)%`Z8VbSu1!b?dN?Se$*n|3g*^x_N0HXq{erpYkfu_t*(y`-E%8 zcj)r?{vi?i4_Ho_5z?DF;4&31^nw`fEPz(;s}xlb;asqLy0Rhuf2_%j4{V*)bfyu# zA{-+zz202ojx^OQ7Z8XU2C)|S7u!z~dzsP+nLmT-U6>Mo-iA8+31Bd{sO1r77Ea|0 z@}%rMy+)B{0OB^h%_`rTz77-uuTIOU*Vq=sGSPxMR|SBD&Q-#D`6qnt1p!(3E@{J2 zUCzgW@(Z@WKTFhwQjEThlnaz*E2jJ8G}kv)4`^P~{`%JFTp=P9hW!k4^~*qLH!=jKNQiK{^JGW*UWIC|+169X2KEeBZJ70_@4eJOf* zN7+r?56OTwRWCZ6{L8Zc`o6E@xpr8#oXqJX{`_#I|M~o0P{atI+o})!gXMqSH$4)_ zfTMxk**`J-pZBeYsyCM15>EfqZuH^-gia@neE83sMc-Gn09bjW65-rG>Ou&d?=d z=*7Pv7einC&-==S4153DQ;Pp-H);$(?`NvVsEN}`v-COehK7J_@~Xz zqgi0b{sY5yURuo#Hm&&|et(|;3It&FMEVW{N4D8WwCGc$_1Q8ZtNb7KOwnUO}5G;vZy2)Dffx zQbYMkv43CUJAmgR8n=D!Il0f)^D%-WX{i0D+!F&De*1zp=xIM(n6DkSAT^un)&9ve zc)<*lwQU;VyyQW$xH(XOcTs@q=qM2 zj7fPmn&rjZ%6Hy>5G(}>pt>KDLdDVPKk5x|#bo)T%O?`cFS}y>6+jRX00I+;lN_-a ziCP$E{F{iH*OAk)|Dd#~rvVqgD@r7xwKv=yTw$+U6>&@f5h3^hEfD)+>Kh0V7$+&{ zh?!By^f^&Ij%%{*vDWe*(sw{$KfuN5)FegpCG}j=6y=RS=<6eT8zS(d zj@w66gHg5e*oYl=o@e=%$vO-`qXqA`Kug zbJb6pBU^mN0))Q!L9ejxf7Rpn>=%K}YbxzfJ^ zX=E3WSRMiC&#=NFHWRCjL{g{Fn>UXNDWJ9-vCX!-9ao*<8MmY&cHD=TfUsQ_10eL5Z%%YLN z3mgVwQNi_B(JuS|F}wqC{@FUgWi%+(~L7Kpf!WXdRS2x<1C-@feWS{W~U5{L;jgYJQEq_hQJimVtK?RmrVW`mv}Vg77KhG%uM6)pdFl!FHq4%Dz#@E{P?6F79i_8Wm@w{0K-CR8zOIct+)Aivq8Zw}ZZPjh0B6@- z7-sP9H^Eec^Dv-+-M)vo2xS`QKb|)f>YFHUQ)s*Y(O=qI$Qrsj7Sdwnr{a{G!F2gUh4H* zG#?VVv2{GxzW=J;HAHOreQ04_V_&VSv?jPEqKr!RE|~}=W@xv=q)PX9aZ&W-tQ-Nu zi|0 z($`8=qy<$!N5f;@zmpS6-QI^ul^I-QrBhwH)pm?44=RR;2VdxDR;)tb;D9`{P%P*b zVuTK=62D^!?`@9u7)NVE#j$q_F9mm~$~?uauA#uP#j&4vd!Nu7(2cRegvvI8Am`Ze z`j%htX|bbOoPEu91Z@}D)^effY(cgn4pgqO{VK%}MQL$u4pr#1IN@!6xg#BijuS%* zbvG~JnB?BNL@4>osPu7kOmm<_n__^*!UVCAXbRm+snl_wBl?yiFHfblT!nfuL{P>f zB_jSvvGvtMAR`#=Ro52-x>$G&zBx_R`Jb)-P(?kDKlDg{kyQ_kBS-TuQuUq@-c7j4>fX z_yc6ALP0OpTSSh9RrSVn3aapIeTnGuMW4dxl+Y2V77z7|y|?|nr^6Sb&!OZ@F~1!; zIn#azlSJ3UR?Fry_LOtK{ffU~p=?;t+4`W$tVs#M6}kHbB-##&ao&5#e|D) zL%v;WKQI1~O8Iwnw?RkCkB-{}XUd<(%pG+7{_ER8#Vnb)>m0k*b=?)a74ExNV8ocN zB%29^wDVfw{DGhhd<}GRg_DvTV&vak{wJBCyXvy|;{`}RZw-pWNjeJc-LxKw>R?Tj zQgK-8+%IUTcq|o3|E~7$%)C03eH)*y5BV+WqAVv@LH*LCB__(N@Y|M{|+A%fZWsaqwmu<>Cc-OMpB!9}50MnJg#niQW zJWnZaZm7)tidNB8ZZ@=cKQIdQxj^u)s7hEL5j?3x%|h!OqlSpplK4WU>f|{{FP3Q* zKL`3t>qxc~SafIRxGary79Fk$?WLQ*;&QbpiqUB_v&+70+Ml3WVuDv;kW=W-cAovs zQe_1f{b+mN>oh;W2RdoW{`KwT;`wVpht0PIrv^5He5?J%WtXyzlum-Jg5Dhu&@b_? zr4FQ}!^WFtDnecr>?D?BB*q{2;iBBOr^|j*j`3q)^|%kHFG8(mp~vZz(H3c+T9pW( zW;=sbq=$M3^9@l+1Ucm#JLBEXpXN=*CE_37O9l1GQ;T4-sNRlS(Yjv_aA^n;i^|)7 zD|vKhgl49v5y5hrT(GwdJqf(NpSF(s;f7m9qfqO5Aw@uTukUz zoYaq^dH66?mhL!ypFfg3oizaFuWu}s)!tXH2#zC&&}3n0}(z&B60tS2ulMWVj)!` zPY>u|^yXB=pB96e9YEFP{Zrgp|`;elzDTZqZZ&evYn$CSlV$q2l@T})p>=0t}g zo}Qj2<=LhGQ-Q|rfG1@(URFo1mzB1?z^ec286oyGILs^wBhh$OJ6{=ldYTMYo6HLX z+RDAJ|&5I`91>|AAJjIXRirtlV0GGktxM5>OpYFoxvU7o8#sR^6d(i~M zKOu5Ce|h@)AV87Qdv{);Z=<1*2zv8!KoxmaU`H?7-|0|q-gvp8>=Wc$3JMB%p0jE` z|JNoA^gXD3axmW-WP1;XrGAHnpQei-^BrHqhfTiYg+X2AP=mw3DTz1eRUF`^??5in zGxg(tOKqh%^I_Tan}l~qD*;7c^1d?}jUph~;p6Uyn!tgPi2m2h%g`)T1AVt!gRT8c z5LozS5I6O1S6^m-Yto-@(G9hR-q-qVx(;828v`j*&kP+vqoj`!m7qnI0c+I(rA;K*_>Fkzrio0v`O#lK$MzcBwhcK>#9qNu%H!>)AoDQAHU`D&i{ z1B3V@$)74knr?Kuz@-zyNdwsRaqH!LJZVL^37(B)#kHF$d0q1rd$&OU-k3ZkL(pylp| zYTiY*qyPGLY~`CHpgkV%>q3F)Za=E7s|{1{Qn<6PZd;hkM0}?rPn5d{2#z}u1Ega^wO8nR>H2SK~o0#2uay_SpTVep*<0ceQON z`BUqAr3Kf$_qa|8QU4XST+RxQt$gRkF+!IOG9{WB{d%GZm5J6T1`o57k?&&)%ppqzV2h_pL|Hr)T~+( zrh>_YC%C3k%@p=MeetOTWdMU|IU3QUswqT>+|kX;D^ZZdgcq|sX5 zUfoZB_Rx%>%*UeBn9)1%>N$4R3_CC%21e23?^$*KX=2YxYFA?VbQ0b@g58gwoCR}X zOL+;q>07-|@B^>A%y+|@MLxPwv0~jNdLhjDW_bz4`N11+2DYT-En8dKRaiM+a=Wwa%#XEKdl+1rh+fqN#X4Pkb^f#7JUPyW@dLL81^y1P= z9j%EfyCrBA4u$3c+)l8f*K$BkOdK_sf=~D}w6983!~`8Q*0Z4LH($Q6KHJ|@#s32L zTPKkWth;Aj>>(D3mvSYUaDx_1cEgl-rw_dnw0|l;wIc@LyS$_)`hld3i$LD1nbXqG zg^37n6uz?%dLGMeQ0N_WK?NxZJA)a3+x%ji1y!*rQ7wY_p#EdIiJc1SqY1vi@y0~t zZ%OEcBthuX+J8s_5XMny-L1O|rWsc-d(8O~u-Z$WQMQ~--I+iYqh{AyP2EB@98{Z#_CSkpD z;T)+-aog=d-QkP1%$0&#$wEFfyP~f@xN$LbW0tJX=qH|6`T^#of}tcI=tUUV#$rZb zW)IO2#IjLxpxTGFXKqll`%ZDv?ycUe?u@3H@Y?c*hk-$=I-!jLLeLRClac21mzZ3_ zx&mVqlLl^<>=~35foBfc}ft?$^+RM01T*)?>ULy>Uq^4bs~GV}r4i+fUkdjeIT$iIQw+cIjNI*h#Ehg6p7# zaTFM)R+Fnq=;-GF+MPR>4WoX2^%=VQORel1DaZeQE)ur2;|O~%C#KEHs(|c4f!3N1 z_+H^E0yC1~Ki4r%oDEAHK*pdO5PX}l*veMFLd{pB9Z}Jo#DTgAMa6{2KWi;fOZj34$rpoNsahHQ;95%fzri+Uy4U*y`fRS7bm^ zSmZjGk0f9B;YjBtJq%F_v(xBmbNe({wS4HW%ou2GR7tstdW8L4*9&Ay)>r(#5JxZ`1;+2Bg8i-?iuM{fONKQbL;(ycHKg zuf8YHY??U(Wpua00Zyt{0Q@?|fq(lDH;E=HV<*eSlm2Qj1$H5fu%i+TIN&nqP8&J~?P0vA=DZ@r9JoFDL^IVU16Mb|HhG?XNjN zyI%ofgo@A3s96NHlhb|^uLewPk%B(60FbidQPFn%u4a*uZ}6B%C3W&yD4OmEg7WMR z*!JBT^J{2L3uIAn9WCrKY>nf~kIBExMvbUGc9-(E81KJOzy-$k4ndL>T5cK3eR%zY z67(}J))zhxxL!EweP_#+sY^j<|Cgl8mO(QEjn-l9y9UAY}h zkB$ctH!+8(*}R_Qn@}S(_>cIdgJtvr*AgC*q=+^TXD=jzWu3rzu{pdfD@Z+#mY97} zdUqD6$a#(!blm1Ug6HzEX!BxMjIW2qlo>kKJ0O`6Es$ZJQ1K(Q)bjz$qNLUv@oVB; z@_F{Ba6P*T5xLsz9D0L+>|p03rfL?)mowfap))gw@)H8&r}sMPk)}mo3|yCoE$LX=1UQbG~=W^@;PVsx_U#Q_M147 zam`k(mczzE1!+L`-%{dRb<)K*&F1M^njUgJD9t8*T(hWtOF7$PyGFUFb(vMmXqQ>7 ze0PLd%M?k>mg_}y1>t2oC39d77#{JepowY^RU)vnjSF zEVG(s&gDhGOT_rR8w=x#3)=3K&z&3JVM5qV+*dhZzOM$*gU+uV2XVl8xk@~($3f}3 z*iD7HT90Q2{5^C*FZkx?GhSzT2W)OZ4b2>$TdO=N*u-koUAo@8Jz&gjl$Hu9DHF@X zx<2TP=&{Hu`!!ZNd`0QX^(GIHmc3Z9J-9FeCO&=*B{CO7Gxs?lFX=6Eeiv+}u_YIYAw+Tn+{=&D{+vawU`c49Du9LMH83rz=Q4ho-!8`=Ft^&3Ue# z84=vbAybRF+;V{{XLu}Djrw9VEh6{^{-Lrl`hc}z<46VU(^(XOU0V6wIgMuh8Js|A z#=XUL!X2s%fqDbULg|W}#0@`kJ4teI0)e7yUQsUN576_xu-?B1Ro-TE>L@AddQx?% zgFN5>wDssc#DBAV5*7uGGWH3-ABNCge+nNostpw{YG!-$1aLDvXevQ*7qK)TE{v%@ z7AtQo-f~m4T%9pl;EBh#7W$yZvDn9NI1Wuy4e;-YYOe2}@2mXJ+klp3FrrfRa_lqN z(aS5%h08g6VEB42^#@_Xrq~V`d7O3`Dpyp;Qa}d!tk>mt>ySauD*J$vqZ7+k$;K31(Spc_GJhKsULw$D1$UoD5=6jy8!Fer3cCuFA`*_gBZQ z=ap_*t&M=Yy);sP>XFyUb$HxF;sqeA=V=*~56W1c=NB^KvXf6o3(5NJbnlW{)((npe9ZQ%}y5zI~lP+OUp3TNORNFQ* z13&sqN$#byYTY_0U=Y`l+sibAINXYg1)-@;aiTAksXcZS0N4AJ-q7J4Acz=v*LFEt z>zpo_O#3vOCRhv(-Z0F_@4DZwn;~ELaP~rYrRWMpdjmyWiI0apgZ1>uvHp8|1M+N$ zfq?i!ND&?fM6}`dn}aq_J!(vyAEf>|TXl`RyTQlr>gynN>|3~Vz&yDllND$T4ua`8KI9&4PcmW0}P^YLhE~f zgFp6v?g;?9{rA3!`=3qfRZltkBeKvl)jL)Z9~ z1c0mE;Cl!0+3GDInY)$Dda6j`s~^3d#i^D<~ zM&=5g1f~fun3YHCdbwKWe^=3#gN706K#MjNm`Br!-i=~N0ClZV(9Umvue?jYGgWc0 zZ?0ZNFA-^?8PbtU91ib#V;;hQ__$pEi4C3N5oy9x-2I5g*9!;ObSgFM zSrEoN=?aTjX?6xk#~%!RrAu=|#|QxFq;2yEk0nD-7Xl!IW~7HW_HYnxDtQp`IS4p)gV52ZzL z**AP(Mw(nUCqG=9Ed*EOR1Q(^7WEJ8OkrxGq96}EaviEWZJY?GlY+To3AHCN&RYkp zP^+3l$87{+%(WI63{51nU!y92DGEAHqUMa(;=_%F^-^E(Nq?nX{D%b0jy!C{n@ zve(0%>9nlSOShglSme&tA`QCd0pUaM@-F*@2g*>Adu~t6G@L^ms#~wS7ewL zJlcS4LbE-Se1?+~pHEPcL!)_pw6R+G}{7$r`4EvSK8~6H1af6kf0K%8v>1s z4xS~^FngWqF^dADpFJ5FA_A0P`~!GFUW1p$&!?7HPHb_iu&lK(BUG-jeAOD)eEV@r zH_c-kuJV?447}+)JgTH#9IVljJmesxqN4!}5_?ep{uFWoE4Dt{by5JDOEF6d8JUQM z&)e3jYC9@hmLn&G3R+55K*lr#ObXKjd+^EylzRnYnBf<|@b9`UG~)39kF6&MQR|MW zJ`UPs+Mp!h2$(%v5ZCPWuA^|S2BQ!q`D?}!<1^IIlmgT_0Kzs7FhSfI{bltf3-E(i zznu?<#}peWM|tE$$k>x3lV1jXIP?~-P$Zv{nR6ibPw)VT!MvH!YY(!zJY@)es-z!F0J>&>we7ugNdYz3n-i|*E4D$Ymk)tLNDO* z^B2y}O>onf3t*Dl01TM?6;N1Uh@x=vnhU*6yGjT! zm#rSzt+ZZ3O!V@OG=n#}_J`5HS15SR5WIJAde12ZU_y>z$?Vu5=_%V`O&0gcqqUvX zJ?8BbW^(!><%Rjm*D~O1Jv<8%ZNRqdKnFNofDViSzHAA|sS{uFR~2=cs{k6XZEGT> zx{xa61;d-8VGH7)>S=7~FB>(uB3&=tv1kUzz(R<#_*v zj|Z9f;4{L8LZ(@Kf%o9eoY)~EGXm-(Nss&ZVD;+2sIcJGIMjv1KlK_Zs?)9jkmWfF z9o$iv+pT68QV_Q8?fpgGYdNw2!A!+{|DMN!JHld{&Zm*ar*RZ~*@`S}`N=AnmJ&E! zHvELl;0r$Qtjo|enfjz=&JX|-BL)4|Gve+`H6(Rl&a=jPJ8cfMGO-u{>p1(>5<`?E zR42hG9-hU}`T8}WGNbbsd3wZ?NZ!>);7wuVp`nfdu?uy;h4TmS%;++Z=XAPnf@TOAXcr|wer{~L4<(enRT@xXg2g+lu97LIiEDQp(A2~*gl!n?LjSRwIO8P z>l1sxXY{To+e?fJl0`knfF`KwUNs`a_@`4tCo{@B5FXx3LAoDV(>E6Y-POQJJMLp8 zU|Qc03{8cA6LXx+MWDl2fLEW%&FYH;!~DXK_o=JcTg!UEZJ{E;iwEOp-1-P^w9S<& z*ML*c`;yU)Nvym$o1Qb(BQoITr9;L%ISJC4SMM_%w2Y8m0vi-(fs1YNq^5l z?UF_DM$`pg{Fbx*En&~$9`PxcQigVf2eiyOXwVEwq>q4_ww-cqo5uvFqB`Bxg`{Oi zd?+C>F{tdg4U)B%nsQMUyivE0%>}`(z*6sY8Q$_QI-9!LGu|GJFqo*#z=o7pFr6Qas2^2ZK3{qZXi(N7j>%y=#Jwk^=bJt*#uH&a}Bg6ida^q5h=hd{~q%OBm<^xIHdw6t;o26}v#J zX-S}}>*)v$_6vDw-zu2_2kdm34DOXK=Vh&fOsYnIHy;?P=eLCp-|<*!F~z7X4d3mV zhnMeO(CEg(8jBUh;d@SvAh#vFpizGEQR3m*ilPreD$eX`(YFI>L;|P7qKhyIV?&rQ zgfAfT8lAzs8r+@J86be5irqOlchHbgr0mGnOxD6rPJ?RyUltrBF(|gTJ}J76R^-AG zcbA|^Rbdpetx&`YnXud93|b~bp@UT*uMLe`CiN{94mxS319{cAn zKpzVn2LX~n6m8pi9Ine_n_r{LJDnuQWoH^UzNva84U7u;?u}`>=D7fz0~z8%3M~tc z8d=`K^JLaV`Xm#L4}6tXwey?GT@#vLaL@w((DS?ljJfr#Gr^*q@L&_l(+DT;e@$tT<{ zkKzqxfs+@a3YKcg;N(ziW>{1O2n=$7&!~ga!5v;;T)DPGf@*N7G>a@MivZ+Af3@KG zViWsAxpxlzsz>FNL+5t4A{T%}^{=m;n!8H1x9HbDluiW>l-SM;O@Kl`2whLP?195P zt|iO7yt_ttjG5vL-&pwd`Jzz+a9~gvG|M`w_RA2IbtHIf3l$xU0hy*`iEs;y<(8q-We3=jgrOVUgMG`<$PyufQ3l(hzm=`Z7Luw&6-NG_4cN;b! z6Vm5(%X^4`If#!r0qZI_EFp9dvI$VxArP3i!Z}R<+D1WaJ(KH_n6!ejx+hHZX8HS> zpewnOng|dihao2#`&U1Y_pjodKg`pb6@icd zpTYU&f$+%#;TwMs8u!NgcTO#*g8-DJ0;!G6nBNd#hM!vig&Hda0}7ZYPUzH>Ish>c z9af*QNrFLWaG5y>wzX0OMJ6C<4Qi}<*qF5Z6a^hv1RaP2HT@;LzJn+fK|%pp&hC1@ zGp9NjKzy^Dj}IJy%e`3^Jf1!(mW+>gp_D+*=i_7_7pB)&Ktrn4w$YPDgzu|z6@Z|u zjXZ}So(ZOg{=XRS+vUGvZ4k$D7}bZT?hx;NlNX`LDqX(weVJu)XIizQZ{2oLnl=C4 zcdr#5Q1Z>2GaPLLGu)7?#!$CF%qF!4=#WaF7}j zhwM2)V~c>FN31ErpVRc|0-)gF`qM?o+(PF&`8UR{K~>*=vUQNNHtjd?0lA$-kk^Nd z+-6UmFfoMWDv~!M6L#nz6s8mQJ`NQj%{G;IG~fP&Vp}v+^fyKqg0s2|L#FQgz=GkAgExr5+wQ(taJ%JJUIt1HC&21=X-I`d2s0Wvk(p?}H*1 zK$VX$t76X$@==ONMr7m0cU58hAH#lB^T`eX65JjynGvFidAR_UDb_nsQ3KFvO)+C7f#hQb%cYx(z)>&kaT4(+om6y3-_H#vaq%|svvQH&z3 zS1o^!3x@nl=C&sx1$9RHZ$Z*)Rds(fY|yew1!z&^ctvlU5j_ z$}u`*F;VcW$5Y_}KEFM*EdApKUPAGQhXxh*GFam4k-Jy+Q{L^dJ|snMgv?GQ9zpTC z@bA%=5CHYCLuZhLzE>$X`2o%%bFW)b88TBT7Xg506v&?CcB2pQ**s71$yB(XG^*l$ z6k!g+2J_a@>nSx_^;l25$ri4ifLw z7Xc?Ceq$q1V#osCx_&zX%*)8QA>7g`#f z^ICXa9rs^4K6x&l}y zB$>dm0}6-&>%@Jw0r|@FbX~3`l)!w(qL!Gw&w>^~(+m;TLa)_MDVGBue=%A7mIW|sPl2c5G ziVw54?SP~rm2s$q5DFQRnYNrYQD|r|iJ=@b(_k?Cu6xGx-KX8B-}m=={`$RMyZy1o z2lwZ`@9Vzq>+rtb7gG!h%%?@E7uj}!w}>0~e_QGCui`(ONaEp#j24gf;|Z&rCRsP= zdmSJaiby=vw>KBjnjZ9y&>52fEnvw7$1&grZFr66GH*n=#2!lR;En)`3<{3wCelBy zt9yVITelx_gn*&Rrbr(qao100tky7|9*q1G(^f8D z!-!99Tk@is)Uu)Ge=*sd12dFPJx!?^L3Q69Q3hnytZIA`;=@om;~PwL6dkA)oAE`p zCNU8Ha$-+G4jv8((ZW2aigdqK5UuNflwW;^)PYM2twoy&b| zpt~DZiy+dup5eRTrZ7!;n*)<^D|t!1$(O?z+_MEYQ(z$Ni#1toh0Q5}Ja6Q8x1SR) z*rozzI^P3F${cySzp=*|Q~Kysq9}3)sL-R9iBS?-874{AA=f;}>|;m3#FgOD)wYv) z#H(%W`IV;}BUxTGB<$8JT{-Tn%)>&W!a51i?D@N19TSub;C6vU#k`Cov%JnfgHXhZ zMFtZmZdj;8Hqe~Aa0zcUnRPv%8fs9W7oa%j=qZ5UhZd|L+Nk33FLLMox1ptBPuvcTbtUU!$ zfm)JTcUZQ*Rr1bPjl|p=3&->ZWjpDE#yYom7?LHIrL`dv=XCiS{ zj%!}^j;Ze4=P$0zc=Bn!&uwghs70$$HOc$Q_SH&^?WOnj2t_#+R{dzT`_F>Ux0^xJ zc?8VlCOVP1W1E6bayRLQVzufFESRYG1S2LGlDlu<$z-aO1DU#ChQY4Uc5+GvSGS3sX-My$iE%l^3+vU7BPFVNHuBkhDL{bSa0h3(ohz>@mE;G(RFYiq{h#g z&Irb#zr}ZTn8X}>-tHUtyrq>R6D%r?)SL{gpz}I|LbzSM^uE_jdfzOT9tC4_w6+M< zxe=)IgZqq3r?p`#OfutdhyQ26_)8@7&w~AT5|%Uz!r+gQ$Jzm>@)F?J4WvRw+i!02 z!cm=pbh2uNK?wE$Z0kqBS5yXh!iE9|woIg{=oP$0)2e`j+)bCBC@Wt3?uUdJq2ciF znU34B&yZ~obeD(i!Flol?N|;3tsee^7IrS?&w%M^B_LyV9EU4*0~9&vOQu;2!^PdE znIg(09+YUN$ujy(EhNmz$|+ZaRlj`@DFl)!JppsY=ztlIFEKzZ1`vv+z}M*#fAWnn zqA{(2M4caz^TtG75)f>j7@NqrR3>JpLSnBdv@j$mh`F>}!>_dHkB`Q5RjUi-85>jI z=_qmoa4|k6WS4!9{r0o^^B;bNh}0n*-+;pKw80oQ*Rt*76crz`6uF@E-4oim4-kS= zt+;Oq;;gh3n|=DIy3F^c^5;1=TomYr8KD~?_-j^fNfmI3#uy?*pBLpEB7WBP--OtC z`2ju%{2(qXBF^O2el^C%l;&VHR=@YbR6UleMOVZo8p zq)=ZhegVEvL|9jWtm^w%zRS9(p^l0Qo$mOhtByb1woZO|?oV8?jRpGRstkRL<+;h& zf|Yhj5wZBB^!d%^}0wU+1lK);u#x(W5W1qvLkqeM^ZX!d+4R@Hk1iI%dn zi}W`S4je7G@Z-uQDDJr0w{i^6+kg%?DaSM(@X_Uq&mr&Sb8^ z;upIwheIl)yckbeIrvmhFO@Q!^Fx%w%3n}Ys47~q0_cF_t4?SAd=aZj$igK3G2)%JLci*-9|q!ud*_S5j+a+c1+*W_aPRLYUk;bMy!mdn&ZV2< z$EGM*Prd0$F4nekVh$c$+h6V&1+DfhFCHwdsWlL-)#I2=IKS&38qH51`Y@&c)|t^3 zkPQjI4ZuCULO0+z-ocLSKxR&^mvQ1+FFirXR)~HTLc?!4)M*}uvCT|f_i>K7M3F7cEd)$t$MeQAAmIU=a zQ!Xcw*z5xaOGa7)javNc<%h;vKEx zX%HBAXzSYH<;+tJ2s4UZLF?`qd*k!=zj=_XDU{4mWJH_U?iJwH zT$XHZ|9$C)+h>33VaJpoX!=>7cm2hY*YZ+w zMJhqy);Jyc^6_b07Cq z*Ao|?nxGFwxsy2VEm6UJ&OU&>Er)#5cmw4nyg}DVIk2t+KwtyHK1`hCi793VuDg!c z^C;8VMJ}t#NZiY`i;8m=2B&9bfL~^2T0YI*DI*>V)zX@8ah_SooDPyCRJqt(RwuC2 z9#!6S3F;^Az5p??3J%4SETYb^V&h&EiZ@+lJ(U_tJ*dMy?q&%wi*s4!sUoTU1A)sx=HD8uMnk< zSghuzM|XTD-QB!gl^eUk%1>AptBDe+U9Puh3Xa2)naFN`@T4aLg^p?QgTCu|L4BhM zdCpCrMYYDYl84&t)S@B?ll_hdcnO1;;(icX$MNgwXR(@?V= zSTQliJ?h&Bal$Go8C5V!-&F6=;xGQ~{F$g32C6L2)S(#Nu-6}+XzN^|v#ynqNcj}5 z@gYh)!PZX-YThFtu@LV8-#{T2ayIC?_na^u!VEc7xvx?yn8m#W=V7OeM+nMsDAA{Gn@_i-adW4z~K9Fwbh3J`0gGjsmx{ncsdp zj9GWHuzA1M_`Nrqd5>>=h}ogPwBCu3?qQ5dN)W*vq|`Hql}%u&uN|#rU%}U^ximve zWomu%O-@(`tma56ME+Ld^j|I$G)6aalV9t@?A#YurnN@(Z z4DlnsbFN}qhm~BOaIkFCRIl#H(dMbC>9McGF$VyeA%vXF^t3O@}83tV5fCD8cM1}NW z3(WGZ?V-98U00pL)1-$-!p>4+u(|M5)u)#yg0;xlI=vpd(501 z?o+_E_sgAGhz8fW;&LR|e|J1u*FPX{>pS_r*@|xA zeR{NQRmW55e#g90umQA@*B%!f{!WQ87shg+xX&*%fQDzhW}XhU*kaI%JGg!-m^mfQ z2}$@UTu2!+J6Oqg`nc(I+7EO0-P$OR2=(LS`moRH9cMH5KWJ8DmRl^MuS*PK{gEN> z4VZvIurhUUSRxcl!6Is_u z&HS4*^!$7T6hn;_to6E&I2)`OmpWzDI4rB#;?rt0biwbZ99$2gK!l`}jWT79Az75= z&$BKWU5Y;eyTuzPjvC>Tx6*Q8IJ=IS#!ss_b_2cILzIx}X%7>(2WBQVpKd8w$mH4R zjGcxQO-AByhtq`h@uwW^i~{jH?Nn{Yu%+apZ7&nG@;r)=#fy#ACuP5LY?#9(--yQ| z#A32NZT14a1j6_NH6uq>r z$(B45(cvcsr&2p7Tine{RY$rLE&K=r%_(z(T9b4w)^+fT`~nJumk$+1IJhhC3(`I@ z2j8RE0vz$wo)$3)Jq0o^x(6R;qxbV4=ZQI6bW`n5V2#!Kz`|V)p+L*WB^MX6`uCZF z8E@3$%L_O_02hxiSPH8K@ks9fKm0x|DFc6=hD%1sRWS54_SvL&~ zUWUk$J0KtF-B=WS`h%-8JtxWbamk*A2Mhw7;wMXbA|xTZ`Up}f;^CSpj-J=?idVsH z2rp!g^(V%S`<;rG?e`%J?5p7w)!)5nH&EM(X6}9Pp58!hg8TKM$rc@5JK+iKS^&|| zi61Qmd7-_^43SN;XeCePU{^cdF=- zcfU?73Mp2AU2jZwt8Gd8PhVOWjQi1=&j>5w+_K?VQ8}jsg(YUVP%G<0@<7gD?H>{( z?RVzVp<`tk{YIXn%pu(h=Me_(aUUeVw`_vm59r%h@U+rSIvUWP;;nL;9AwHEFF(B&U zk+EWd>c-TLGq1I=*@m=POxUyIwVieaJBwfKCOha=5AP$pnB@0jXfuTm(i&Nu->-TZ7EwJ@>0Gh1t^yFk@@a2+?v9@D71{n@b-Q9@~O}37ahzslp zZ_m|y0~`G_^6@L4)HIVB)M#Z>2I#LuFsy`HX;v2;j`uk~2<*cv5zhd@APx8f&&EfT zji0{Qx>F-F3&gY9JMn~#j)9Y5cdsYdm9VIho5Dhw#GOADLG-@GEqaA5*|_wIo#>w* zyapjj*rN3oa4O&1vOb8jB7lpP-it2X)k}KuWIwoOv;c9darvEpBvgo4U#))i*!q9+ z%f70H@%>+3C;*1Sq4kusRn=TYmK)YEXIng@9 z7PuZd3s(C#m;*X`x20Z9oHWlKI~>&kvrKQlMbKZICOKIodp$lVi$zT+Ojm>m5!cV4 z=v&+?F>oe#({+Q9XwpG4qSgRtsPZWb#px2Bx%nCBHjR6Fh93tAjpsLpYXX3BMxsl5 zS?pzF19E{!&{YZVk}eyc#+;~Z9IG^JX$J(0(%(6j&fx~6Dlqm($a;gyK^INyy*)uK zNxJpEgn@je53%EZBMqQ|h|Aog#{FDzE>8_ky?XT(*31Q2)in6ma(?6{^q`>oi+4+5 zoF9Fv)S&ERndeBlj-hO1chW@U%>Y_brnTR*A4E!{LcSz}&N+Ss@9F`OAQ+~X)D6Rc z#rfP3Zj=~!0dxUOw)lB~_glQ|a1IQ<_q&6?ng8J7^Z7$f&HyT5D3Znlus)4quuQhU zpyNwK75g+c9D8+|&w~WC0fJqzY`bwQC=u{5M2$EFRz8&|w{35;eI4SuPWtRFjT3 z8jG@VJ%EGlC=++OR6SxJ+rNfykVOPKAmeY6r`prFs-16hI#84s_Uec~o(2MC zArH=z=~ckLTW;T(iD1Av6fT+GE?%13LGyNVIF#7;W$K+mV#owJV~2Ncl>+ivt^ZqV z>?yb*kD&8bIi$MnIZxMTafU{#C)2FF4ivQyiKLe6R1DbWl>(4Q10liwYde!~{dwW9 zy_GnYBc~qrs3ylT)8v|$t2CrSz#F&8@&e+jApl;*vO1>dimJ$brstghz&i=np97_= z*qgpSfd6H`&5ogvC;TlN#NP{_-m-s>`mJ4i0$OQRfFEv^ch^T6rxBn#@qC4*n;A7k zS!>d(hfraZHsF+Z4;oS_hd>PC?SJDSC_jX2s|*^tSmX_7o#JhKkoUR(xk01D#GDG{ z3z&jq5Vt5p-bzdZb&z=kO#`j^En??<*XQaxBgN`(PEiR$ZW>hxFu0NmO2EDA?mGUh zVYV(53O{QfeM%+1jjw;$&Mu=xjX9_kG>CW(o7@>9rD5v_DE&QgDHuemB4L}K9!tvA z{EL&NSG$9(R~+M#^Q~XzE+w>gG{M%}qakj^m)X!a^H8&#aD&OAD6Q_tow@5t!ONe0 z7G*c~(O#CP6UI#KxtW7BmBY|b$p>IlWfGnb%=4UXfybGuSzK#GZZ)mW-%WPen>TLX zNpHQkFR1gI2le*C%fuZH;uUMD3_r;SMKng z`#&g-;(R5f;Tk}#2kN;tpK9f_hCYjeUEEmJ*V*i=c$=tPy=Z*%lKw5y+yIUeUE|va zT0){f!!5|$jXt-R|1yT3W|IMxSAm*+_HI=?{?H!_8F6<0{C*8Tu0hyWT;I8Oe8!BQ ztPmG~UI^!fgLBuVQyz)Fs5Ala6iUEe;E?CE5@Ianmb%?*K^t%Mt|DDZ4|rw#x3P4O z{lew{cXJoF>peit!>2~0rY=HWf za>P)zj<(1NoeV}!|p3# zzfVWm!sR~$NLCfy!uOvxHPW4%m(#-fhjnzs>`#ZaJ?VNI(i`K~(Zi1F|P0IDP4Y;bHiP3l5JhuZ+?AizhX15IL~8IMYTikFuwt93Lenvgv&1mS<&0GcWArt z-{8t`&+c51l0b0$B$GqVA7Y2n)d`B=aHXO+rwKylr%o|SR ze(Ds$x*g!z-GMkCii+#z{5sBlD)9+l98zEFCbPWyluY0!sHpj(A+hHiOY&l(vp}G#kPOwV78Z3wrr=HFq$PyEi5u zu}lq;LoJmUp&xaoYp!GrZ=(!Oz&wL#m3w{xVipMi`1ochSQ+;pxbxzCg~U=)_`Fre zm)B%{kw(dg5J>}kH_b6nZGaf)(sG8^d3xwpMVD8Pe-I$)FQO6>U{h@NWLWpfFnrV0 zoVdelA@Uf#H*nmZB8+}(lsxME8WxWfCY0P9kPW6Cq#>}?jptg1uqRQ+P*nDv9f-uy z_ArOj!RS((Wqa)0JP5%u@3Q7tKCbXoHxRGY#kl5@=yh;P*be$$7#Z6c_};vCD~+uP0_xnmT2iQ6>=vGwgq=nPj~l! z8tRFtExmM#AyGGa{VXWXRuo!8O)yq|w$0l3D~L@#1ZKriGDS&?9Mwx2>WbW&jIBy< z9rdgF`k~)mD7U`IipkzeJSdxW@tK}-SnJ4Es5-}0)o2*vG~_ZdYwvG{Yf^j18YcbVd+A$r#cLlH7+T1zTjdAm70ZisRR{IqgzQ|(fl7BxLf8%&}VfNoDcP&MsMjb}ZAKqi9;IoW#X!QaIar+b7_?cHQ zTwJ#X(%SVuhb}5b{h&owpDKiHdd6n8(TXVB_kqu3r15*l>z_mYz18{uLpM%I)7ot^do_1P&Vc`n NjrQ-%-u}yP{|Cy6Bl!RT diff --git a/docs/src/examples/twosite_figures/gate_contract.png b/docs/src/examples/twosite_figures/gate_contract.png deleted file mode 100644 index 5242dc80c0b3ffc6b73cef01ff4b535e5cf2ed9d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 52837 zcmeFZbzD{3_C8Ff2pAwCost4dNGV7v-3S|{RYF3#Y)VQ%kWvX1q`NJoyIbjy?%Kbx z&~xv(_j^6(zJI@$&u3G6?X}jNbIdWG@r-9Ie>D|(5+Z6M3=9ksMFm+63=I4O3=B*& z0zCK+Cwqhp1_n`wwTz6KqKpiKnuDE%^=)$u3x!`4#r2XOmr!9?X}6sKjhv^XClhP4J}}x$ly3XPh+O8o)}o#tL7%P zL{PRnSs}dGI9P&GX_lC1mJpo6Px4r03BRI6$eb`vqok!R z(PJvEt{f%;IcV+S5ll0uDrlG8(ARaQ;j8(7OYvz@bo@2`FQei z&U-zEW2Qm-c&gsis=mwQL1`A|<{`O9OdT7Iv835e8&wV>S~{z?RHJsMmNh2CjeDMl z?#qgXUorlGb~}eQ%oSK=yYc)+U=$lkz^S2}-Le;9r5;fQH z;93d#WB!|7!e+`Zjd7!KLA9ZZG^i5eSW3ng6vxrEm>?R4ED5eD)%AelmAn#b^@}V0 zZV}tWU*Z%KMenQ(;#`tKE55KQsbJ}mr&A86;w_@Iz^h*u-427Jd&lC$X?UwXIT_n; z;mD9Zz$QLcO?S=VA+h`j$CiaMQSz(K{A(2Wa{`QkT$2M=%=-rV2QY&+=C=f0DbIEX zKG~^LG&TFOEHh%y{?1e42_PskpRu?ca-&A90x8g|f zV>xEmtT^DgEaCYOJmPi(2rPpN4SzsDP70DeeY&3rgJvs(P~C3zcG92 z=1=CuM~l8;)+W&#e`F|sd>!Ya0>SC)OtDWSZ`{*k+#sM?mfZ_eW5M#54-eIn4H{<* z4AWNN)Of1Jd?Czz+;aSs19iz0C+5viV>1l5Ku@_l935rHym}yT1tM$ivMNrpH@nS~bi})V(UCBPO z*x(v-CP|NfnSaDL<8PAR{9V}+9v0PZHUy^|k;t64vjYY-9>e0iYs(e|VIN70Wqm!!T)gehpT$C4X8x%k=jhS(kQJ3M!2 za{PN$=GivLA1kngv(*KRUnF8)Rwzln(N59s-Okxgq9HZItnZhhTF8zcdc5|=ijo7X z8~rxRHr_Us+w)hlXI~bpf8@YY!ZepKr%VZU2yR!UN&BnlF6IVcl^p1le4eARQi_m5%;0-Lsg-Zv8+weQ3?&O-@pFxvY=z3ZMbd5NwZW~ z!Y@G?C7+d+_);p>N6YL?KZnCf+<=A?w@$n`X~$7%p>~Sx$Wc??*P-flB8|m0t|_wW z0@^Fu-#K}-->HkGcD@!$ZqzE&JgV8OVVqvBy8UX!@nm?w+kmP7=@f+&m5CrF`r}`P zZtMtbH)=N;H99tWYrlFIb4K@^QGwRF@6G7hq}YJi*9JlTmkOv0j^7k;jw^jDrYoW8 zRk6dkeqlB-ZCd5q6>=4Q!IV4o!JPC$`RB^dJ)mZ#zDiwmPV!c+(Ed3=p#~vCA$sd? z1>6M|);z8Kna(P6k%jLH%g&?9WOqY1yiRL<)nLD58Kx57d@`QVNMcCZ$bW%iNU$_( zt#iG6y?y=6n#wH6wBWkL`sJOIJ85gE>`pye9k%oU=csd5Nv33Gp_bVlNew2wt&RNs z_9@pRzA-buBOl=S^5)~4PgbpZBd(8@Z;auM?XsOXUZ@ybXAz+iL7HljnvR&68<%#F+;J(G3rTFM^37~W5()xnhXz{PMb?MI{o>*Ez%n9_2WhM^B4LJ-QE!khN{gLpkvSokUDR1WEOj+ut z+bQ=Y@w@J|;`#}S2~zI$3^1oNFW08q}<<297w%Ed>P&(lraka3=H}!@EA19eh3HW7Ef}$7q2-5iU=i zri?W5yX1k)V$3mNt4y1hD??WN4d~Yy`o+cgN|_&#N1W26UuEp(D-_qE5azK}9Gq}{ z@2c*33yYJH?Yhgy*4ya57Z#iBVyqPG!!f4kEx*MDIu=Y?G)tTfIU6FpXn%~6^Sm{x zoznPJR+R9w#;e1z;^0Or-cm zxlHNIeO|Bp#+}7u&EfINVJUUW7owyg-H`1?M3DM3;?+hJ9~O$tcucWq@d<{Q`ET;B^=ghB^zZf<*JN{tctb!L!{bL zK^e;WJq5#V`x_*mNxqY~)R)(9L~%t^Ms3=}<>Zvy(^l75{+jCcf>%dOXQB7XP+XC| z&Pju=(l4$Jk9bh;I^Nm!Y;_9RboG^5%ssZbXCqwbVP`k^vh20f*D3nTQo}=3#d#&& z`GvVp^`cRmPP&bm&56BIU5nlQMy0!Rbjx&o!%r$K=Zt3OHfQDyM|{tZpF1+II_R_S zJL1cG6Wn`_)?SNvldtyM)&Sk{E(y@p=NB*v^{pNN>qlewX)Cj$I_8#N6 z>vha2@{u9VbUg<*mv2Qh>fe3UleJkNQOi!cv*pF-)3E(R*{4l&r=#9-r)4&AKuSuI zjNg9$%RJ>iA1mKu)Je3=HoYP0MU~`A+2*1y-WcBQh{8xvxzVo5>QI6dEqeMC5xx7K z%w#`CtklS1(iXNpJKQ#(0@H+tp4kk4Q zF8qcGFG){s;pf{yGLP*Cebz zj>bPo!v5oTOfz^7ru4VEQ1 ze~-16o|E3yD`KX0HoVu(>~5Izy4u(y=fMzn6@y=G%$=??xZ2#dbrf@zU^+ZP41P!c z&Bw%Wc!<+22`0U(Y78=V4(1H!c!hZRnIwrA7#PGI%q+w-WaWN+9R8AEvT|~=7vtk| zadF{w5#+UVu;e={Dk{pyFTf`tzyn9{IJ((7U3cZNb!7f|l0VKPYwl?3U~TVYZD-4X zocH<-J7*^eCMM)U|M>cOpXRRC|Gbi|pKf%flzjEO_ z2fUD<@KWiwVtXHffgz2dD0@-M6?1-wFiuq)UA@d_dZaHpAjdB9-0p$+#sm4-XDMGY z^Y@jL=aXGCQsn(ltgt96k%ygQd@$eFUdz%~Cl-YETynk&-`*;G0Ow%)^xw|_nRN9B zIP|o*tI+BHW?)$8y`z6KO@}u-YJ!P{K1wGZ`}FVU)fdCh#~c5@SS~XAY&aC3l%8@Q+bavfsxe z2`4&v=7AQvq|^6pDn!#0-?8xUHXF@5wYWuYbnLXme?b{I?6u zu+*}6xB}9(1V>KsOAot_k>pkB=UGIqPPM$4qU}XVTW*EWN%Y^g3Z_0czPnf@+#>qh zvq>|Y026%t{Y{4@7&e0}ZU7}8@8hGV1w2scIz0MCw()Mwe*3sE4=nOeY0uL1rzlgJ z&fy8($Nv33xTNM=?L%->%t#K_^pd&>3`-;PnrKL-YP^`XnDgv-*jIlq+(O%-Gm%`n z8&*lp8$J4HmcKZkKAevuMA8efkxL3DpP!JoXQ8C>zNRQehD&&CDCp@ukNN6Xf&1v_ z=`%8qj+~|Gdv%#uzu5kzTQlbGH`dED(xXyZ@(JFEkub6)HSClv;z~JAUa=mj^m@W6 zQdXv&t`phxLYO9AA(}V-c3mZX~O{yyP%eL-P7Vi1^Br#u&k`RKaPHzc+A}JPbdy z8Q$c^;URDNmcBivSAyAXdeoRGg)%6(X`bL{;^U_n!KMfX5cFtv(I=!Q$$y@hi|ph{R_gg4tqOPNzU=EVv*(|ru^S{^4y4GnYGWEJeR}#r zeZFCDPihkoFQUqdvX@^HD^#K(ukHQVhe$|-Q4(;^DeUsb-U zeH9QFzqYY3#PucMq@x_LvGsM%}cj9?6-x^oA=icl(Gxpp^ zL|$6=R;xCRki`C4$I3$4qFCB;{)ykZLcbGa3`>bvwc=RFAq?v{PAJzQdgBh45ifS< z*-piZnx%2WW7F*^fmgtM1 zcZR-P_Ce8zcb;eX)8MB?sj%?MPEPsqLnJikPmRU#7@i-S2q|}4b${k~?=ND?Kf+%2 z%j!?0QrxSPO@f9LbacEn1BH>E+beadQu{0@pZx;FwQ~2|?=kkcig%QDI!xH6Uub5q zBro4~$Ut&`O*2G5C0y5JqCT>{_>QURWjwF&%x-?yB@^q8)ZnvuKFi;~)NI4JnjQ(= z?p2H8L)j+G`BnC|y*CstK_Bd<*o{=unaBM_sH{fgJYR0H!$J(x_dO2V)mRn@_ci{3 z6XfKKbKUP(pTDoNXrgu-50fA7%`vs8h!M0@+THlkW*nF3guVH)U<|I% zb5yzOoscf-Mf74PpkYzh-J7A@tTlE9sAI% zadE*5O-#4Fo;+UWvdA03u6n-32%ngOI~1SJwc7d2i}N-DQR~8jA`0jHcnk{_BF|`x zCu9xqZVlXgshOrSNkQ^jJ|t?m!o%a*SHj&)33QWONfNrnw7l;p6BOoMdcXX_eh{y?8}pf zSZHe2^3%0V7(I0Sz1t;IK>RM2^ohfOpB(4Au$Z~~v5cJr=azIYidxFMynO3>V7RcI zv8eZ6an+gelzY{+VayX8i)463HKV1b=Q=e&6))#g+p;9cUHP3jrdQ=%(V1y9FlK%I`t@9wMPp9&R}bVX6N+c&8hs?#`eaPNz}m~* z97AekWo2s%Z3f1{H*M7RL|8P%)`hbwkAMFX7|yB5mgpexSIY{330*Duj0tf%$w}1{ z4biL+nH|<=wL{W|!D-2v^fC(`$j(vn7-+qE^~&t~XaC_ZacSYzZ#C0`v`^U%7JqBI z2eG1Vwkw@gTzrrj6}>CvzV-%;zLh!g_+QL%u_#!S@RTgiuMxk|OnaJPSkyda4NxW` zLBeBaJmR`4rWe(x5ekwP$=!@A&-p(j1gC2M-sgyD0Z@~{e`Iy7dEl;*r0)-E(leMUyo@ zFJH%@R^{zws_42XKX~Ws>0I*$3Qn~oUl;PJ^Cw_iJiZbuRF`HBTQaKPR*w=O3(xx6 z-v>7gzt4~lsFqXd9vV7JUUf|{O$sdJ_X_!P$wIkc>XjcwBTC>vBT7yVO6vJ_q^T-H zY@1m6_DQnnjP|={^SwDTOiWCFcNIdoN*rauAsp0DNJ3S43*7ijP^C&E?wWmsO5n2{ zRm{dQJ%DLB68EcE(T{)ni{cbKdI@(e_ss>Ha@v*tTUm-=V0JT-*B<=V&q^C0VUR*D zOcEK9wVE{c7nu9wB(xrBS7HR}U`HgoE`J}sOg9C)B|_A3vbH(?g8Amcki+(hxqnd5 zvv=C*^Z5pG>>cbbItvFv zS*R-B#q!msN2rAqniC}Eik|YGMMLCM2FJS}6%pfvb?45V_BZOx`UN*5Tdq1Eq4Qc( zPu!EHva8xJEMNLaY;h6`&4!ErTT^93(4s7Uf-PdHD&)^ExMTtZs!1zxTNx*hZ=P!v^>^m&r*@KIiT)k4Y87G`Gz8Nb zdk6cw4PezlCYjn1u|n22I+uAvo<5C&pzC2aC*gd~*J-A`PKH#1rA2%-=-C-&k4LP)iZuX63F0}Cv2YCFAS4%?9Nj#WYo#v~ zb+e2k!Fx1-4-Z{NP9&goOKZ^0$+qyG$8Dj|Bdjt4^ciLQejm}8}c+dDy#sHYF>E}An>6j{G z3f{jjOE?JLZly_hL;=7q;^v}yN{#F7vs%+s>8V5=l0Bj(mK*r0xJ_$y_dqvAtI<}%NavkD#gmCmDa zkwoXc0V@f$&=SW$nM>AmAhut4W;@IPu4&|>FM*WGnb%6hb=<)u6+#4lZ?oWG=!srm zp+{`?1<`nIm<;$3%Y5694+vN0^I8lFJJ-W~(=SD^T{W9&Ps!9An}SRuSuf{C__7pyGDVs)bwccvfbQ9|5n%yyvk%GL3yCe0g zU<YbZiv#DRhOg0Fx+*tJO`Bdpz`FwRwsG|{ZTWh)VSApX>R33W z3y7a&H7a%TFTr(2AJZ$l`@ZwNVb7=64iojcCLgfc&UT%kq_o7DH=2yqO_IhmEx_L# z!ED-8q4!z~bDHmMb3H3lMAX^^+sK3K&TqLFvRUAKWcG!94-o(KmL(hF+gpv4fUh0S z+xvW#TOajyMzrl$#ucr*-bGsZ_C=QdM1gnX*=DBz^rk?1TceCePEMYC{S#hW7mh*^ z+Yc!zR8GH>uJbkQI`CQBP|@yg3PuQ;j!xXfK0Tf*%YEZ(GmrdXLwVw^5C-tHklo zNi0=k3qV4?T=cA5zwY}ek~yBi9PwNYCJ#Lrk5Elke=SE*P*+Li2OClueXu{_&z?Z$ z#c0}+Ah80_t`6>O9JZ#A-Ag`<>AOU*3Smc2i?I}c{0P}n3Z#rR#PmBsuw!C3nU?G{ z?Ek_T7?Iov2eEBGE%H;swsC8-T@3_yb3x0~(^kwT86Gve%B3t-#vS?Anm%6$C&5gW zkp0_8+fyGRvGF{JUdkBaLBwc*RyIX1c!C`FO8}b?1eWoJi)&i%<`OD;Pc>t8E~|LgI?Ra!yW8lviiWI3)kkkh~anzhs!e zak}tGDe{beCJVpwtkz8D+lZD=gnPT8p?<&Hi)7ys>)xaET{y&v`X+t3=5iR@gC&l+ zb|aG88v}O1<}($$*T5n5l{%+?byyj87Ez`0V^8$d81_MXSr3&Kc<#)kdDAP6j_?u3 z{AR}k4TPjG5AYK+k?Xu-NvjCl*Vcvy3au z8lRud)bvihC;j0bE-M?G9Pomi!8F2!w=Y8C%xzrW%N-IB5Po5?yhEH`$SQPs#W&`| zUr=1u*C3uz)0>n&MrL*tWdbfy14x~`Y`4`(WQW8|(I%Hm7vAnew#kv54+vcZAOg>e z-u5r{f+h~I*5=7p?8q*c4kzx+(APYFyVqn;7yAn}KU+$Iv40O%rP#nkZB77_b1Zp`1Mh`Eo=w z%DXQ^_+}?Z$?if~)2h!tl;R}4+*jlNVvDqwLfVus`EiXDK`PZ6qYo*KLxGzZ;P5%v zo@z2}Am+$^cnIPFyIUA`XQB8?-LZdPbNE*PhLa5Qnd`cIL*@qHkoMQ|q*}BL(xW4o z_hpYCLihiKI5ZHj5#X@Oe>n6qf@>A-+C-GR+z7so6LqYADf2ie@vb%fk8EI)?QCRz zn|4cEAwIR~PFfGL9I+=wd=AP7?+(};qoAP34l(`uG!pW58s|r(^t@iX8zg#(mv*7< zQPiT>dhE9g&Il_;HY#Mec7^e)vf5l&LiMsyjks<_r&jsLbEUX8MbKs8U z&gf=x-|EgdYfQ29R^bs3gscGvJ*yMHjRj{AhVZf{T`*#up-xz(W$3Wke1T5LlQGFuodskwqiNJK!4=8!0cGs_iDM6*@in)AfX2?|A|G5V z0&wt7C;K6h@;g4jJ{;;8?7x!`fY*usU_1g?TazcmuXa2~^2xYSCwT{&Ef&xBU3Xz+XpCaMc%l=;q^ zdj5Aae7NfwAUYhS=8g?O{`&o{K77}=2!>%t-u##M^UG-LLy^)A3B}dJA^#Sk{(Pg_ z5EA_&f~mi~1Go(l1*Cd|7nu6{r0}1^80CR|X87Q*xS@aC8Z8SPd-48d&HrY2OR(z} zc|MB%{iI=F9X$reCVhDI_P-gPD+GZ}if^>POPT&`V)&5%yR@3LY5jJi{^^Z!1S1C8 z{}v}yGMpMk1<9Q`uH#m{^x192eVHv#3+2Yb_6=U)%Mx< zHYyXA3pTe}#r`zrf4%G!QlvdN)s6&$>SZjPN2jG8U|y)+!W(}{_Mfi#z%Vl4V;dlKj>GkTP{FRt)9KUA` z8LVJ4UOet_7^(bBa0!m!b6G9UQK39SNZ-r+dFB5+M*B+GX;UGL63A@O@=2TYF3X(C z#tyQRruz%eL0|y!rS-w!;aM5#zr zXBjQ-wo)7KGR*mqm@*1-O9d#rCCmCB;~ci2I}}d+3A6q)eesP4^g3WIWJL_%d{o*G z^aQo?^j<7TIU9rkU49Ztbt;q`T5m%0Z}(}4-JRzEhSSB}#Y*b;VBQ}9XQGwBPeDto z{%)Q{Q#kMgKuY#u5nvUPzqwYDyElT~Pt3p7)2zo+eV*hs;~_$R%amJ`H4QT0=fk58k76m2 zhJv|-4hIw#qoB;pTbKBWBtlghHF&{c{K>X2uTkkKbfT@re;{pBVPJm{raoA9mD{O6 z)+!^F@2uzIIJAtRyhdV(*k*0MuY>$YO4#9SejuFhFHo+JhAZ3DL~fKk#zPQh|MMb-x6viWCC7SYcC>UbW1tO2Yn#mxn(a-rXYy5zq7uow;|f( z6!i2k^>!r)I(7EaTGe)@);ldu*Z?699zHC|-LiJ{1G=Xk()mW6LaVk*6uj505*+`p z@h8BgBqT%*GleZ>lfmLUz=_D-A3s9LdG|%_X>z6#!2mOV^gRj3y~HPLOy8D$1d?}0 z64#cc#?b7`f;s5T)vPgDsZe5CzNB=;>3hd&604q$efKfmom0IXR~xw{6`dJ)?Frg( z_FXN#uIy2x6QajoOfSqWo)<(PZ>qtCneu&&!@%Nj%G6O zTe3OqF|jtYWpeWpr#DtrXDv_r*2Y%MKGm^zS4FYv*zLc?*5-~a^I^sv+eXE)kLH1a znB({fy^+3zSb>5e7QOe@OCU45<~#ScS5shmfRx&xd@f`izBb+V$?A=5OSa>bO374$ z$NY%B!N2=D=|59rz3NFox8=em9=wRU;H`+hi+h>qE8m>zB`Byq9*agNIBiy+H>D}8&<9j)oD$iJ24_W)6aLZrER;Zok=u#Pb&<$ZQ8EKUXLtqvSY zkm&&sFh=h$>e{rft#zn2lr2`y#np6Z{5kDLWJ-b6Dn3oRLj`D`j?PjY z5_NIxK*zY5$_2vK^fM!W12sY@{e9UP7W+^ozbBCX&@L|%sOG!JMLgBVE4H+`zf)pl zD}hiq3Q9wUCnG|TFdTXb3ko|9z;Pz>dEe^EwDG=v3KY`_)eIJ*^pNPJxWQehos0#3 z2idcAcScN!e7QWUO1QRk^bE+SOF{keX(|w9P`^3*Tn)&9g6?=e0ESpXqXb3J3I{>_MwA3p{~a{8e6UD|dK7I=2| zt&+|e!fEALDBkBHp~l+~@%Wsm%JSuHJ8;Q0#F$@0=r;|ce=@4c`-`EPY|GwY!T?5gx7wZ6)I zHPi%Jn^jD#|8?e^pfw`5+RtP^GPbH zTV3y15piPe*x@x8tbq(;f6wlMgcw@RrAEP)+Ry&S-w)6wsEl`&$>y46ADYL>V*9J( zBfGLU(3Dn7Jy-T!Mghew>&ns{?t1~^>G1c6PBZb?Po3t;9?Ghnly3#MmeD*e%2^c~ zu}5e{T6yF@7ucx7)hFQg*Ez;m+;DX=uOS65iujx?#poGmV#wb9vIxSs;MH{9tXLB3 z-t3p$sLDa{9&E3i*M>#54zL``px2hD7dec-VAT1HOYO>hbua8Ht?y1YUsx21@^|t1 zwg?ynk(PyYZANpM%xJnr?$k9r2y%*DFx;E>j6kWR?r!H(qcy zmt6jQe5FmEzC>l)|IqZ0KnRY&j5I_xt;+FMD&4Asopk5C^oe8EO$YC~rJwa0n;7rS zf4Z8+^Wv=s0iC30Z*CkG;#d5}$^Y>GX3KXGqVX%2()-9Ujtmjv6 zTI@L|DLmS5N+~Xj!ijQL6`u@;)+Q*KM? z^z9>VU0lp!>XkSx@#hjAg$O=>ja#au3$;#(l{GVf&aVtHtU#Nn>X|7>w1t0Yq2Y+I zPC&^PL31LS($OEg^qrzOb&G4cmKz|P*q8e9Zu+ow=TbUOHJ>!HjXgwtp#oT9_fgTS zE~+~zS>D@od|goZbRu=V3h+J(3Sc|wa>hi&e~4K_044vum{s8u7B#Iccu09taHKr& zTCa7CFP5}C!L7%?q!gJ+ycNTQHLScfC-Yv%ww0$W*Rq8-n%C(2#Lx+<({v!5e#r|p z>rNl+X~=FKl>74LrFdi;q$-KZ_EY3@rvm3r&?w22yPje0h>iP#o9`6@P^3qraGiWy zhA4+6tQqvMKOVGiEssrnp+ub+La)QG2k%0oiLjXEpFYz?d3BA|oxX7Q8Tbyj@e&oM zJ(UD;PRJ_+P+0bn%g6ul%!SopP!phNTb~&Raagd#SOu|@8yY%C6u{-0Lt=3#L-nm3 z@2m9k2uYhGuGk4rtBQ7n{eP=`@Xll-Rb3eKfrs2r=#Yyn(0K#fAY_4Ll3=U9vQslm zO?7!9R6LaX91Dk*Ia@$dA%C2-VG=a-+NoD!Zs@E0R3EexWuU5zogPJd5D^ncRCsLe z47q>NMp+)rovow83Z&$VnrwREA;iYSbO|JElX;uzI_>)2phygZ_mtpF;+a3aAxu?! zq4c$Dapb2YA>EOe2dBvcw{mj-$hU+{kQ}}@EIN;c-ofI4^CCG{zLk1njhk|tI!u)}2P? z=1}NkLo)xcxoAGrd5b+L4|k3EZ4S94lAqeB4U%fx*}*l{(t36!PNc2u`Ks125$D-w zIYghpdx{$%vaYs1k8j_W2jogv;|cGsO{*im(~$S^FN8&Q2CMXA2gHY&jv+{;a1x$> zcf_T7%c+)g<1+K}#i2*Pa-i}&(Vul6MyOnH;xw5ec&dzO`bwE^-el=Xa?Y5p%%(IW zg2D1?KdH;9vWDHH`B_O5mroqRHyl>AG?mjx8&Qh{=-Su;m#x*Ye@ejG5qMO^L!IEY zp&!G4{hkEP)%0KRlR+EDdv94D(JoET`z)@3LDhlaeImZ8gap?KHx2rkM$*hnUv+>I z7)i?{&CL$4#8QP!vG61zGR5z4A&;}1&$-!GI~5psqV~4_6-=zB z-&f+8W-f{(G#FTlkPIE2nUxvlgzll-+kFCWb+ZxM++wd2fH|f9;|eR-WLIp|6{Y zgGoaiSIi_Z>1&R?KJqZpr_lRs^~xop!%4e$>_}@ z(hxziAp?yF;~lv`M9}0_g7kP?Qw|UDfv~Y2q*jA+>N2Vlz1p%Bxm_Oay)gi6&!G42 z4`bHKH0l%G(U*&C1}w^J5nX1UZna)%j>Jz3(c zP7t_n&!%UUJJ=np0nCVm{a*{RJ2OChD-f24_P%r?<*`eM?t_3-(g-mwEe4;1y%ktl zL({b~WtOt3;f(G`&WqJGSeB-j<>vEln;?2fiN`!#YKJLtC^r@`9nTPe_{S@*px>y!9*@*DK5HjGI+8q!#0LeWh3}rM z7Lo#i2Lvd($NS!5!+lzUiTwH`Cq99Q&S<@J*as>7^HQMra5&g=YjF5!jp3^QDjoOd zWj26jmPMwJRe(5k5GI4njUmMIfLn_KnDEx1;GOuv*4Qh<($H{}G-|Y!QnWn6d$LnI z)wb-%`NdbhQ>72=fQ~Z*4ZB!}T4&R6?i2z^7xW3~s`fVqRz4E*Jzpo-dvSvHaQ8Bpcwi1C z8lm>Mpwr~RF?XOhA9 z%o}iXY76KQQ#>{ofST|>-g=V4q9uW6>}v?6k!UBXT@2cQ);N_K1BiFxuRuGHENa1C z73W~+oCqS0#)3*a>AValIFiswT&I@7-rlW>miO&*%YNu1I0x@tYh6OC3%07!7%f%e zrA5Y75A1zj4^{-3!jxe6JtJ>rz2y8>ZSfg+#Wt#av51BXGttFBR4zAfi1v6HuPcv$ zU)gyUc4rl9=V4ZJa>s;=b5wj)G8gZl$703zit#Gm9_U)J@`>JR>WHLs`hvF^tpy^& z6sBK*#E@ujcLW^;+D@fQd6n-aTOeyz1=XSX##Pj@H#+ZQL0hYysuF(&I0?aqLDFmM z*W7;e7euH^Zhv;JZ;>UMR(`%CFo^_6;o`;8c-y9=#IC^z+o>c|XF)~OMi-?!y~q4n zkn4JV&^_*le$n>&C``tZ6okn=He`Jmd6Vqq$|MzbQ%)Jnu~eX|6t>^1vSHmlS?n#7 zjJ9p)w&t(9Abnjw=4WysOFSLzy1%3vmI6DJKcu8xK6aX9yxmV1mGeFiiTE?xfmUvsJa_|_J>%|J&*?JB`4p%lESe3cR zMYzYFERV=;mOzYzuHq3v2$f43()Z(s8{WD}gLr0i^z#R-J78(v-F`mtgtI%!Dfc}T zI}oyQfT-Al2p~{%Z(!HEmtqm+wnL?oUrMi~^J32_LDDZ=d8$)l)On^29C%4UV`8a| zG?0cMwXA9E@RD&>#)STdKl%!uc!x>d`7Yb=qWJ$yTS0b3_5S7vAD~HcuN+6PF3f@S zbX@D|9*KC7J0A%!%2vReGL@Q}&$KV6YvL*FNJMu#zMmDo{t?G*qt6oi+C_B+KO|}* z+IQd~Bn5YdvUU-Ps}05A>e|CL7*UDkHtpP$`+j4l6nIfy@QK2O_V!-#GNU7pgOHx6 zY7GcTtRx7$k+vdW5gD_Q-3#&wOzF5J^Tarn%5SSWbgkg%-YuzfuChNFOQ10t=T?%$ z1bv`K7n5ECTs9OSLgnK>JtkE~IMj0CVOJTamexPDI!JGNcQF)jRcuebSot2vSE{ld zqu^JzP^!GKIAS;7m7ce{_C?gk8qiOT!2W`H>E}$|f){G-F`lqMQP8ZZZ_Vm`!XEeS@=iBfiE)5@i#$yMMggr*xX7Zi((5RS_uswHP`k#J$)Qc0E? zh=Bj&G@aT)yY#jCCPlwwT$;`F-DiRLekf96;K1zR<~6cKS=w7RK5tv`9!Bj!3eoc% zbKLtP0K8=2X9*Xm-f`h}zSC|fU$d|{9TO?KMAnK5 z8_LiGo?dlT<_i80M8L9J_DDjO{6_jIt>`!e*BZg&2DV*D+lJpk)Z?xRxxkZLY*Dvl#`Doc5>EWA0cgU>O~tnPcgfY059L49BiEfPi= zl}HUrz#|D*Hq*KRZ#MxwbWva{$F_t|{(6K#wI~+!3YCH6dgOMtw{b6^_!B$^osEvC zr*BG8tKDVwx~OFtGE%>B1p=9HVPrccsnZKaufFP=51D#ax2$=R2Dn^XdV9L^*H+7I z>1UM;7U|JjkFS-xw$%nadBQLGb%r=zM^QKAoz~m%oFA_;kBl0mT%0?;Riwd85AmD; z5r}7#rXU+DmJ)cw$;Z!+mLjsVo>7FPR5*(n{{;lHwySPoVPObEYPwMzv)$w8jrj(( z^QERH_OjT3vZdFMlKb5Iq98t>^?oROoROq{?=HC0Hi@BcYnmHtD+jcAR9$mE|yA~BVQ)2uW!m= zA3#sT>el=lXRwvzErc;+Q2{9Vz5ER#@=X@?!>QV`c#9Sio@5+igw7?2JtsO~*;T7Y zt}q&TqoH4lzmnUjU>=akD-X|`zZz9aai-rV0|6Mod-fe4)pQ1Pyl84VVW{oZQBVd) zeWmvqoVr2MI(LPC|GNut=nhb|&QpP>Uj!`^3k@i9(F1|>yvt>#owW_}xrbO+aSllE z4t{nU_>SQq4Onh#ZSr3I@AE!G(q?xX{dz#kdUT}VM?xl1Yn6{rn+@i%y~^3F3v&fX zBLn0$MnJ;;SQf8HY`(Gxs9Ro=U6?QFqMH|?j8wx6k&4I0d>GJWNao{d`^t2;*EGc6 zHfyx9|Gw&%tsvmBYLQohS#PSc~SmoxR3v+e_QRjzPf52BP@qk>3(E}iV3 z9ctC;Sk!x6Xw{w;N51l^{TVc9KSDrqSC=-4TfgmeM3EbM*$LB+n{u zzfe0YtRU3`c#c5ZkFvp{a0eshKbj^P?f^@BeCo2zp$8+JMrC8+U3mNz|DfaemZ~E* zeZc8}K+?U;C+=Ro`(| z0ZX$&>5RWJw6k7pRbkVk3Ca-ndNe*5m_YMU%`71j*Or#-^Im0UB$>Yb;A6BRmD4OH zV>epuS92aqOtZy{-Wb|U1%JuYMU+gSnp8uf3`AISxeGMd%~=K@*~oS&3%Us>(5R;j z-Pskl#wt6cYb=GD-Oj2ym^J<)(j3VZz1;ZOTW-08oa!R+v@(4uRu2?An09X10x`EY z98q0`q7XnTJpY=u6#Im33}|AZz4dFIlB(CYs(pOsszLES)*sUBSMjPV|p(N4&jS#o?BtE|5-G=+Z!14kAh90=bj4UY&u7E`&2h^aty?iS2 z6&|g1PdsB7J>hq2xnYa6*R;X#fZ>HM==|G9{Ct$Dk<{1E7iRR#=3e>q9UJ`(=YYqH!~UuC zZO>7kBTw?oq(*!IGNrqA&_&kYvE!~*^1v@qDG-T1TFSQzpb+f%g2 zZN20Og}t@PchP%SYK<=J34hgPGp11vKdzq3cxbb}ad_HDJ(TKr8aR=~C9{w6E7^x6Ke>Rk-%zBL*x+m)+m7U};4jXwGYzU_tGl2~|mA7XkF$WPah)S>q zl07Xl4!!IoL@WRlL&}}8nkb$pPoJLOn03~E@~hp-`3z+00>*Uwg8%HrJvjCp`sb(^ zo<4a(He|uU9aP8jcAa>oRfZI4JhYu%wroi_H#b8REasiL29N5YHWV#x{#ix(x75Z` ze;+Ef-Cm0>(Fz;(a}G*m_5sy>dST?d+=egsP?DnXe1<>B%I1^3`PLCyS;j;2(txZd z(z7ZBT0|Ax{BU&ROY7Kv43=mD#`B0Mc>4$pk->!3et*ZgtxFI3shI&%L_jaqlZBkC zm#XWq;Nua%81znGf*dH=Ya)IyQSD~g!KOUAEZO=OVfRpDbA}7fW8VPQZ5Q*D7+HaQ zMm1;%+@6R=WnAbeOI8@J8hKZcsX&Nzo>Jt_WwS=i2D#ZPVu&Gm|F~<4H~M+0w}EAw zet;Kn&g6=cgkLhz-XbEofnvD(>2KhCOt}1jV6WrKDrZXc+K4%OLCZ3_a=C^k{H&>* zVL`{!;F*!Csw4E0gSTUCBLyAC6%eUW90xzNf{j%X_HRJXV(oliUVLwG=xsqibSUjz zF+d%P3`=NuXdN1D+H5TS#}n)3hw$T$clB3)qokmS8cue>N#l#H{ zXc`$88)Jn06$7&m`I%mwB#^=6LV14wpoouCgF~(=?ifxZll^>b?VJ0h@87Gvya%%^ zQ}nYui0McS+0Z4h&t&IUkmTjUesy%I9e3#Pj|_F zr>3eRJi8=*AOn1N1#YJg-=iZ`-ygNU)nunSfBX4>>cY)lhKe0l8r7VvF*_%V*4ce%|@*ZF#f)vvB7Z z@TOW4vkZUMlYC2X6I{O&B8{Nk{yC;!mEd7HjYEtt@)JH+UB$X8OimD94HO9vqQ1M( zf7)d*Z2ksD5z*~h$d6K>Mq9@siQaRxhP*m=e_onF0}JV3Mu>T+AZ2=4;2$}~0QmZ9 zcN}2LK>F9iLEy=tg z>ea#7tTOS~HPSa?MKctdzP{EtQHd1#4oXEOH;JbGkF(GIqMA*N{d#-#kcZI4iMwey zjMp-PjHcbVX@Ge*Kkjpt*Nv;HeM#Bu*MKRWo?}M>C#TL)2CGjd@O_MhI$;A9qf|{DebBS5NP=_cC}Bv}_zV zA4yR-5{7OG>y}&bU=f^>M+S7`qB0UTpAaAwQ{+Kivx#L&?L!B_5eqw(7pv#7MoypX z^SoF@IlFkN%6s4GSDfKbsD^HH7KownT#AXjJF_K8(30CV3yK0YP>f9ENj|J0XIQq2 zL;4#VnfTpS%wo=(K0?|nXM3{vW|w)Pl6g*fmASnc%6B(4UwI^Wf^#(ff24hPJl1{t zH=?x7l&msBMo2%@dIXvi45=ldDQcpvZMeb|xx$!$DG_JhXtWnI&90tg92f-27!Q0AC~gX|w! z11!)8brU<)IGT00oPdIW%x8E=#SQvdItpNO9m5`W5P3=;;EdRrNHkC?EpIHl#kR1y z*ft}^Zs#4za&4&@DjR%ctn>71Q|Zo*cl^@?h&WBa35rhU$50t~3SQ~U40M5G!&(6? z=W@(>u}}u~U;&U9Wk|+~c`Uj$3ID*Y?8WQV!g-=L=Fa!5rgdxvAOmT?m zJR8toD^}$ihf7t1>B4T_zy$}ak%yVn9`kFr0}?=vn-w^wfe+`FIfcF_^u(*gJ3gQ#u+^}6YI?5hA;mJ<_g`cpgd;{6^U|jZ!E}+p z+)(@5_*(DHWc2~?BK`6dwoh1ewx*mC6_bp$Aq7tZ_AcU%%s#SeTfc!?!%?98ST!M= zBYOt*)z3uHf zP)ob(al3S!gNw^y3SoQ+G8prHblk6`4x5l*j1fLAkHl%xmcB@8q}fVlH2hjun)xjQj2#mj1P@eKxQ}=~!0VMcxog58+k}52#}}gZbOf6+7R2>K#rE>PxnA# z8=_4VR7C-2Q+#0be5C41wo+^*D8T7t>2NkW*AyVk?AC6&D@M!~oVi^Xkwz$Nqq2#| ziKjEk>nQ>+blSeTSZEe3UX(ZhpxbHQH+kEc zO6^%wP4J>DDoE0c2e#NZ!q~Dg2kk8b4?vB7W}bB5dI53Xb%ueQW!WRkRp+2mf}nudKL(!p zArfKht05Bk>$IT?%NMsQKEI*k)%!g2RU*vI9D+OKtI-1je>rkn=(PguudF;da3;C1 zd#Hyvope;Z70-9)3kLSx~tU z9IJlV<_O91pr^;4a%-b<_blXe5JE5;=HBREWzZD|xfuE8@81yW*za{~#OaM;AlNmQ zGvdU8IuMroyWbTC@l5ja?0H8-EkKk<1Nyw*K-x>5GuLcNW;r#!&RqzKAofLHZ%ZQe zree=bqb1*#lPgfo{?eJHTgCjr35paO6U#joNdB;5wr4;~=*9V)$uZoKOY_SnhH&3c z$!sk}FH(XHj)8iUWU0|#q%_#nGu2>V#~U9XK?c@}8l?_r)yi|~rlL+)eKtvx7gkuy zRXjoVkm^07pzwOY=Q0A}b%#ks=gTVypDR7NLS&Nc1eVIRCSwFpkv#I0>WXg1p0|*? zT;IDX8(oNG5Mw7iF1PM+ON8>GuK(I3LrwV6KkAz~$3Um(C>x~AfeDH^w;oh{IR z9_aT#PHNZY@5_1d;587W`eU}6_WAwR5;>e1!q%xkn`Op$?glrMY~8B2fce!fZQaTY z;VIE4&Dtx0_O6N3ow0gjDV z!jHJ}VUJ>Oe-Q#nzJ9$GBnTg{nxuHC5;DL&LFvZ8ZTcIj1-UqG0F5OOFn9(QFJ4XM|<29DB{;Gis~JWe+JqXBu;=eyn<8eU;3bUN_N@_eV;WezGDP{xnbMUn&_9fM`OZJx z`j0C~f(V}*1CF4hc}T6*#^w9t0mEjg5+}jpbi+%+c3LL&UQ&+?LX6B7*VdJe&hQEd zL3}xko5cUI69>3PpX0rp%z-r}&*{`PGy@f{TSu}ua?5qs;o*DE={EHPYN84e*2|3x z271EAm(%=5WxpS|B7g)$ZGi5OJa^Q~tkZpiS7>G}cNIR?zU{78-b{JEfa|hnM%4>N zAfMop=k&bH^km>+rf>$B(Y9x|LZ(+-|IX2`%>&Ngt_)kGDv;CYR3?BmMdFw!dR6nq z;65=JW6WmA-{z;M8!!mJ7pPO-%(xm0onPU0s^7h(W+I~8XpsN;T64|dGzE`;=S=tAZ54Y-Kh$fvG$jvXAOArLmI2~w3?rR zJCd-W=X2$TX~qKbWN2Y@$WW z3dThsvb;3NW}8r9?mnXnmT z()yb64inA_sd=qcx(eVj6=xBcH&BCAlJ7#0b;qm+M6La1)7YDiVG1pBX|3}vYY;0> z04vfxCw(r;K*hu*#BR)@MXnG-@OSF>c3C!Cm0I}B-W2s2d-A5JU)6AO9Nr-Oscslh zusVElN@O6Cjc+xViaIT-mPtbZqD&=1Q+V#So-;* z#R;CF*gZ_s*1GFXZsx7bicK>H3x>0q-orEm?6*B2IYsND+g-sTd35Hr>Sj)=Z!DkD z;EUe$%g6aI&i7`qd}{|G^2YQPMrAk<##gq7UHN!zvD(yF98DmfgKZGN9g;bKe)c)V zlG+6((J!xENxIx9449F22Vp1EAUdRHC=D8zlyaFt3SE!CxK}o^6Gec@al%f@BM=Xpo-mMj8g>}vhXCXI z=pStIzDpV+QFpBK?Z)J}5Be$=Z3l(qyOg9 zX7?#a5)T$iWy)tOFB21=Wj|G5e)$FI%*ZvUTr5CzDsk>*fsl|;|EK+7%nqvfj70A* zZ>Cq5YY1BfmB0#W^+s9a;#4$Caaz&sVVng(x8lC|LXFwc`E%A|H9(?u4%#reqRrqK zdR$E4(<3ABRTq}pFSV}1Jy8Y4@99C?f1N2FAyBOB#GW;IQ%i8{$%#(WGkX#ri$(2b zd&M3hVyCeNfX5^-6S}hMod4Mza(TE_0Chcuve|C=@#o;cRC6TLr5M^OvahTGNiP+o zW)rXYIU8i_sUT){?qb~@*2mt10aIho@fHyFXG(tEW!5<~k;p&?C423+wk_CYD%iZI z=k@naxboYoZ$g>Vurr16ZgFNGS*P_Lo$}1*Omo#6&Qk*)#C7j2rv57q9gKqb$7F20 z1*MRFFA-cFAqku$Hs zn@YjnTab<+{ndnd?KXFVHqTX4XHmGv?0*vGwiYIvk&IQFuKfUnijdtk0kTlleejcp z2-kMkSDJi=zMTCUOTN2=bc>Xr9>yDy?d$=y`YBJ+VDD6UVBZX43;&f;-5{kQG=%L4 z#vC~H%KzOoP_|H)gsW4^MauK!sxjOrAf=6h3}N_9h{3-Oz<1w$c3cghWHucc89$A% z0O3U4+jCfAV-cWVM}o$!IlLDqs!q-Ji%!i!5937^%;wFhlA5Q`MXQLqlI@B?cKE%N zM6J=E`Cz*-0@MJGh&sx?KLVcup@@dISC3>YDt$w%@>uVIff)cGQ)L6mS{Kcrcdv8m z(Z6oezpcYj#Ze%L3xj04cAL*2R5+~q6)sU_0y%W$MpvA-ges={*94`mL=;4ur^+PnBu&Xr%Kxv8t zRZhtndPrlY-Ys!#Teur%J%y%rRoJ$KjhL$7z!@Eoi!N=H^94@UcPOy;@LCyZs;kpI z-SpziXCOVlg8N5L%kqC5h~OV!GCQ^%23Vy!vyrrrjmnu+^hwPNe@JT6-5@r7xUrN# zl%XT$+O;RiekgCA;^27v!ry{(p1B#Y2VQ^yJ#MV~T?yaK4P9#{RnUL;1pzE2n#jV* zAl?Tw55M*HmNdWpgQ7HR9Z>k$MVt%Awr(PR_3WbO%>Z-XUShA_PTCca0}-jg!ER#U z?yLf4d)He~ZbQmy6Hrx-LQVbf6DJ`jOitfh3C0)RkMnRP56C7b&uMAP_?8W4xv3o- zZAqtBjz52Kk%Vn380!A;mshg&s6|_N zh)~U6IbbRA^c+%NH26Wj(E2m`=KS1HMH*0iTm7o_^~TJly1%^1oT&Cz^o3Hqb==|= zsngpKJGDA_tgfLF&p@?)XU$OO;?J&{DJImtc^U=%)*^j5b~N!>+gl?z#13`p7T%K{ zoKe0g4Y604StYU9x|4`fMZn_^c$P_IPgALqKK-*?oXNAt=<8`&-Ml%( zH~B@t$A_|~TlhvZ<;P?uy*4519??mb-&<~f9_F$tSs2pU00KLT41;- zln*@85nE{Fs*VgN<^K;fL-j|HDj)9t7PR`p->nW)c=DWq&yY>?0`9)G!CM25i%U>% z<<5QyVQfsAtyzxYn`G;_fgFfk>4BL?1oidXZdn8Sq(5aBG=EI+q44s?NVQpfR&!a0 z^@DoM7g|DaAs}GFK!D6hFF#`qJPw@#bEzrmDtghBVW<(HrdBr~0L@%ktU$b-2zXpg z(9z8!>d@J4d*aXXoh2Vx-hZ`2Gi$1_WztiAK&OyP_%fJ)I$sd z+5QkV``|Kn2|P0UYtWG8dDI|uT}CPQ)$_BFvn0Q<@3ykRD$_gvy(X&Q7_pt%7~B_xoWNyIvYm*fgSaK)6SvB@h#5s#;dR^W_!OJ&3n2 z?z%*5%a+_5akRwqT~I;sxZ3=GV&ELg`;xeVs3zVYU42gcIDi@qGQfErCWjb8NA3rL&z2c@Sz&DMP|*$qA^Jo%4}(S zw8ANSVfM${QZyPy7(~5oHv0bMSJjbU4c(uI&hrMR?A6}>A6(VlVS-hNXduT~yQ-9F z^W>)Km}4jiV|oR;p4#r}8y_Dpwkc5U&9-0JWrQTapM$n`=Kac=Pa-%OpOqUUM1t3goXIX29B-v|1*Y*?G)y7XPBy3)}h=08hzy3+R7xTKAbc#CjS5hL!8soK{9? z&V0(Fn|{7@D=!zqK0RUc%cc1OyZ@AAP7eT`jR!`VM1TkL&WXB_^kRL;gBK5w>vg|P zApY8bfOr4P&7h#5reV;-7C;qaL>dE*&a+%omq&JEHPB_ye+#>e&G00Eo8^E!HD*CW&*@dkHDlWNxHw%-OA~eTIscRZowsBYXI>=30tYe@Hrs%3ZhakVSBX(6~hH8hP2>K*0FoAjXJDGzT~0F1#c{VE83uR->dEWm4d5wMof!yh=ApD$94 ze13mRA8NpH;1GR2#SbH&@Q}`O)a7#_d7gH?*!37DN_gIdPf12tyAO%siamX&&9D@D zV;%q~yFo6c-DLRGUB6=y1*t6lcq=DKPNg2TP06s=>0b@P4```41H|8k*=AoIycHh9 zqpP^NCFQ~Naar`-IeqY`(}QT6^_Uy)kVF?`hMb2Y@Z4_&b#?Vx2B%Ch9?fN=^RA-- zzej2;G!o9g<{X2fnjj4Mhy%ciqGc6|Kd6M}^aVpt0H%O4REl3Z*--?#QsOt}!%Fu* z{qRkom70lk>^PHB7^9BWLpD;4|;Hf7khyX5xmAE_+v?S5!`A|N1*TzS)kH^F^3g0wS8 zc42Nc@VD#Jj>x8E?uZQY)~-IvdAQsiRIq$iH9VB(70cn#_1xQydW41>{OD;_>^~!2 zxN<50Z!B581p11hCjhB9dh-=M3d8d#j^a%uH|u-)acN%g9{}IMZ&NkEv=9DWJ5;9&c6}r!MuC6!!jXW0N2|Oq`|DOBu)2tjWc%nk zmK3*~cIg;d9W5J<^@3U$getEYEPbiG$CVQU@rpW0x7Y&$%-}o*IKMBUwioz@U6}v% z4F@Uk+l8L(J2o~HfkNlsyy8S+&;htW47*4VTeWQ%ixbpLizp#=RohW@wzMMMy?x}6 zvrf_=S(pB$VM$Cb@ePb)E;}R?- z4YpJFA^)f%?MOgYCXo|o45yPleVZKx0J4STC&mB3w6#!QC`E&|iE3Y|OA?azn9&EQ z2T9|`L5F8EP>fH(z@k`?ZHKVa$p*9y>agYfVYFfRhEJcGpa5(Gj5+9uX3@Uc@=~%? zFV|2TUJ57bZ2@6+#Eu5&)d&$VOnN(1;7-?mCHsNjK2sFlH=yC$!g8^HCrXz6LvfUq ze56xZiKALEc()TkusM*+mpa%?GayTPGEn*8CX}We(WnN~_pWtW@Qk^jWBCy(yVB6o zzT99&+#Fhuo&##5WUs^Kp&0q@es%fzOD;{%gifS| zpraqj)V%C4=beJWRTF9_<5kHO*!`=gz~XJi1}aOsRN@{of1r6M{ZP0X;*Mxybz|Iu zUt3$d|LXxn(LLw$5gd*Ugbh&<5u?AuUyxct$9$`kKG7eC@=$%5KWx3yYo2$q`fo=A zc7cktVNW3U^U^~&QJ3G>p;_nOI2vj5K3HEt`aBAEU#24#`6gh;txk=>s@pkAq~~LS zwSwk2FvVByW~0i+Ls?MwOPrt}C}wz-Bm-*>2`-42c6~w8IZy^^J;sDurfIpWd9A)@Qz#W^wwAugr-xJXqXeE$kZLZF z0Hq!dNC#+GS?Avo8SGW7!?&Cz^*a|wA?)ITSfkr>3yOR?smjz!LItFh_j#)wx*tM? zr=r*r!}6 z@aj}8URJY#aU-ZebWB!hjDJJp!Kgweb^dI$0p zuwT^QG;X?Okl;LsmWg=N2yoE~SCj((i!uHn^#@JSnS}H35NV4(6^>jixbVQc`}b(?zW#g+3BtS($; z>%I4}8y^k9emFiZ@Uzxz+o>(J(^~qz`gb_}Ka%VlC(ubki#m!Vnr@8TnPR>Jw=H_s ztJ4KEkiI5LN&dYY@Skx|0&JL_Kyyu?HAD5y)lZO3JV8=GP|xDS;ts+b1Xcak|G)JF z1!4g7MqCKzYLEhW>p5$j1?a_qfAMRY2Bi88I2-3_>qjnuTAx+{|AAp-!#1aY{gw|i zMdd+RX-2;UauAdUpb;g`8-2ut?@YREX0=RcpUFmXFuq^#=h9zNI2iDLn|q&EB;a32 z`oDu7im%9xGTlsmiab{u#Yv$*^T3wOl-!hJ@W{YJq{Im`OA{4tWazNP;I4$<&eS-!TdJVMgJm)*)a%)>wEEF>(F?eIKlBEOS%^+Usgxjf{mmof@=z3Bu@duLK8blmu`M5KURfC@VwUTY%P zqD4tHqLt}J)C+>G=C8%q4&oMjyvJrVAG^U)N4tS&I8uH>iFju36_B%l%peVTh;O}@ z9SWojfjoEtB3tujQ!nUj)rSFv+#n?J6eKpzhv+5a(YOwTwYb8dhz*$ z^Th!CFd2)>PQAA_8SV=oV=tyc!8ecD!W&|>21Fo3nGAA@-DHtW_#Anp|A*LaO;@#$ zJ{@#dp#rVZ^ZuJaBDNnW$x~0fV00nlg_qwY;Cd*S$)m|aJwqi%hn0Ne_>4}5%WS{z zIS&+FRDXDObsqSi4&l_O7kkB=hQPx$aS&En%fI*j_{MlF6vDv>s%chl?S;IohzzdMJI)CEW9y;HPE}EPG zRsMWUgd(*;CpQ#^s}faE4!Ya2XNXKrjR;sES*D4WUt{n z83|H8`eWncFT0dCC3%R2^A9Jfa_7Sf&&Ecl8V2CgpM{F;fI^mcAk^qSBt*=^m?03l z1c8vynCxkEA}0IqdykHwd0zl{pz*Bce#=+PX3zj!zz3q90)0va8u`Ws3E+Cvz%MFD zog95`mw0n-)l_@$1duJny<|Y(DOrH7aDXNkUyV9hp={9v6`|8GH7QwO9Q9FS$4Gh< zq+)--G%@#~w2E!uT-)E&NjmQFCV+iSbIc+x6t!@(agf!mZ&Rv_pZ~?bpK8Q}Yl39Q zdKzj#s68o%?LIL!(@=*D9u*Co$O)^-SH!=-Lru9{i-7d*s80x!qG|MYs_#Nq;^_$knIX$bidxyX21(NT7#G}} za0R{vU6hkxyC2;b;0w%lRkmk0L1T4VbvQf8y|TRh444vRC*I$dYzTsT7B`UlB~X`@=eO* z&Su|N1y}VimKQ(epxo+fvxEuIb{`99Mcf3xvL59&{_rxQ@6a<|rJ zK$P`ir=*z%kjhJg@WMD0jh&s%$6V1E8@UqXfmU8gRtE{KONh9MD*0m<)pwa_3<-E_ zDZq9NC0qYaeE;{G{Q_^6F6aK6@%&np7C}X+${RF3E(14~1E}g=u>duj0Ksc?@Q}NL zlz^<6Wueo+Ibg4ZY_87~0CO=-cEmZ$yYBHk)MMT?o;hzh2gMqG6+lnC8wI8u;f7rU zN=WN0ii#j}akrz?qW1y)J>YP^L5>SeexCAC`Ok9|pbCZOD)xeQH=nHUuJq)S=qpG- zC>PS*SuKEdi5ngu1nKE<$QIFX<-%fF?mgRp1a+m~@S#(R3`B&Hb6SAQ)(r8D4U`Lz zppP2V;i9!KZ$a(W9vUFZU~22dN~wLdM&kC9APDhI0I$`U0Hi9%$P$e)nH@h1usZqp z2K$XClkz-A*s6KB#!=7!uf)(C>}c^N_5TvUyk^3KMxJ5(x*uv?f;m7zs^5X4V64{Z z0`91Sk`1!E{+9&Q1BISE3@b`Vz8%UWjs&8Atx91xMeJIaOxhgi?^2I|8Ai-zG+?fu zZ{ML5NFthHz~&^pyMA{x)4C$l#*eL;=q8|t5lwaM#C7o8(RCNs;!<*y^Kip;ocT^vWV&x!dwqoU#C&0khODpC5!dn^qb5rN z@f?|~@J;$L$Fb3G?vOU1E`yHy!4J`6>!vSUsKuc6OPHBw_os859AtnFPn9t1haMRL z9Ts_*QJ62qh=#qP`Cdb3GL`n6KjIOI+TNJs(J<$N@I9{jmc)wkXe@c?%GOP_mp@vX zz_1F_c>WbX_@MW^O-c2Cw@3k9d$xgB_dQXw;BIczS8OCnp!B2ReXIvkKCMF1`xML{ zF4i7cOM<|KD4!g6B_ZzSJelIg_Mc(Zm2`awR`kPGcL*$&X6Tna$-^G|FXqG0Z?Os5@PHCr+;0h~f=t*}9!Cr6N;&KD@! zkddJYxJBkcG@Pl>-`hHi$`1}uJ^qX1!S)_~8m066c}Co%2zqvC9~0pqFDT>|U-r$T z*YpOgwSf+jhp+G3d0{(h^~v;E@re8bb}#+Y5_rQHsBF<#Fa*}u%g6Cs=VdDi*Ecpg zK4Fw_&qU3ph6htc;pYZCpvaY$E_D4xhGoPZd>o{@^Uiz0g$EXYgx~dlO;q4_t?tka zQ$HCFIc+zcO6uhuwha>u!kGB!P4hvFs9yVZgmrhyS({sr3r>8LOWBFuA7_$`-+R39 zXmg@oY@$8^VjLc0ZiOI%Fhytk&KLaXo5Ydt(;?ktd)VE4>UtoGVDB78)WV z5Q)R6?YW53i*ruAP+pA!zaP}!3W1FYv*e9u{MGqFCvfxGbrNv?ROX~27|eZxF9~++ z)jvaz=j~)`Y%y>t}`gU(ar+6f=naawJoYq(G zWZnXC{0PW-KSL^u=u1)0o^d07KTO|Nfu5hCVe7bqMW1>^1+cgS4z_!W4=_vUNqEiu zHvWnh5+F-S@pW^HTJA_Q(+j*YSB(3PE^gqfy=SB;8PiT2miIk)>*+K;%6viBMGG1_iSy0{XSo;_*q=BI`=f?&ZD>p5Ea!~)k`do@r=TJX93#h;{r2$ zl{8Hf-H~GyU(f#;DYUxx6|lMO%|%V9mxX$Y5u8egxS+W&8t4paQ2gbDAlUYRQv1M- z2K;X3z+J>J1ahGHaJuN{*n{;wdOwoi?A)*W3J}{GK4bT!89OfItpQ3b=b*adl499` zV)oihvICn>7@v>63&HPhj7&lA){}O0<6}SdssB_lAz~1PW7A2oOU{%W6R`4xP6T!5&(t8;PJPELP9#sX=!NWw^Y>C-8aS;W$7Nu%lZ*2wpD;~k|_hIbrc9Wmx0}E zTZGU>4?4ry5uXgIVrs~sLA-@Jj*{k6{vP)Q%fPo?c(X4xE#F$ExD_u*C5lBGQwJSd ztfrb_#2)EUrBTS6Q238pnr(m%j>d}iRr!>*c}>D_Poy}GE;1x128cJFx8Y0sfSQ`E zn%ZhA6&*DXh2C9Mnttoh#S2W4C$Yc9t|?u2H>BuPAc1!MnhT(UwSYQxVWPoDZ+{Kq zZ-rH4F<5TKc}q&xMB|cPrf7~(%d(}x`_<)D^dnw8y8W3IEAV!?0IaCGfub08!B2Z| zx9^o79YuOSs3Q^8oM(BjhV$Nd2w=*?y)Qb@_Z^`^Qy@`FjJl7&gN9Ie%QKwh*D7Zm+qUi8yHUH#Fof(QGD%5rmx^utg7{b#l-uE`xjKLw#}KSsZc!e3#FB6A2H z0;#<(kXBxU8bSZNiq{UBkng90l>B#S=yg)<)q448e7lAOw}si?P5h~w`;nA6B!pD( zE2E$Xvx{&%Gf;LP1x)isLo*R;%a&C6HFMWrx9S1Ocnw0@3`iR=`HaL2l>l$K?G&-e zk1dl7TUL&W)~J6g{)Qy0D?r4^DyI5IW_*n+Rio?=E8?IKkSZSw%)Y+Quqzz?I{r(lI^0~ zQDjQ;$q+aRpm8&>G7qRw(bK05%|bjXIJcszaR0FjZ~JXT=d!d8gzK&XO`J#z>YV4llC37$b%n%Q#@k%s}|><{MO zxh%hG5aYdPqaJ6JoFc3W1VWnQ7_*IA%RkdzkJnxRY_*k zMe<_j+1jAJI4C%+*2vrxkegm?D-^{&z1yd%-1^KnN?an|;QRag z@owAwJmP+n&zQFC3N}5g!V{`ql(Q}e6&PG4`i&C|@9!+8pLOg;nQ=T_lA75c1Kgaq zo6@AQPUQ{JA)IXR;jhQ}cwDEGMBHxAbM;kSd$9QiXE%HDTa4!tLd2V*PXTAiU3Em2 zXb-_i;SBe|{NRXw33>yH)Q{#N|M#KI=ekSgyzH;YJHL{|>$gM}%lNU|X)tL}2fyIC z4g_l0@mRX5@Go$dDBx!MfDhNam`Z+>prD$#2oCx=7=w6d+^0uzRe&s%1Z<#dhO|`v z@<)@rl|?tgcnhb&@JZ4d!H8g-ita~eGZ9UF{-c6+x_|DuKq`I>dPd>>yAIGQ$3HPY zm4>_0v0}jcCty$)Pu)a)%S5Y!A&Kggq~?}D%~mnDy?#J6DQi4_x4MD?@!QBdUz#Iw zA*$k%vMOC3AtEta%wcpF9Ntu%YxLtq_c~z)-?#gy~q{;)0TBh`qKXe3D^VQen zzN83{^xnSlJa_w6VtvC1Tri=6upQrR-#zDJdS4=xI8f?ch0&Nw{B^oo~5gVs!`CQm2#JX-3+WK|@x0(siQNb8W6@&TXzUGT=}L z^vWC)wo#V#$l4JL4tNO;r%!ynVNP;f#=7xQ_lW@6+sKCW#1|1!QaZ#z0s&IIAy~r7S++pGY$8TSPeVgv^C>|ydOP&cuRov#78ST_&u+b*?58?3BbCoXl<)0x z;;>U6e(VZ8<{U$7HEh#un*ptwzJF0$BTfcFEmjo>U1Z*xYuSgJ7F~Qfj$IYrqnD&g zNq%Xxq4>10pPb8@^>`a8voi>6e=oH97=YFJYaF-azBAiJ)}1X~b`6-H!3=o{B!TUS ziTRRW33))i6I92Kp9U6c1n?B`vzko{U6X00N~gBoCw+1m(lY2>m|=P@@}`u7qkiMU z6=><300B9=LLluNrwz|{E$m))0G2ClSlF;(EIM*`&x+^6Kb)Lf}qfqK5w#Y->mv|y#^ z1^fQZ>op!9uUNIk(l5+@_ViC$G%mEfhyqm`c;Tj(MZVr4d*T8Nvo!y;Zkngv=ff4w zyL!fd?H5<1q4BdAw3@;*{=BDn2|PRv8TQdTBqj?JH>OKvJQD5sml;b|O~Ut4-Z!?K z{;K`fcldlwMiva;TA}0?(8H-IAnjuk(&J!yi?j>*JFhfgj_l(yyGRm9w;%9Gp<^y6 zjQq{+3LH5!YXuPUA-jx_r&T5|JmWdQh2nD=f<8Do{%nTJzb@rb>O5rgXSB|L#wbhf z6^=il>fPDuoaj`w_+i%}!uZ6zFlJT0X7;VM?^d!F3jUN|*FF=nP6bw}|47>0l1uw< z=9=Zq0ANM_)R<0kpCWkqYpz06tO^W+t?m@E9lPU6ZMkU|r)*M?6iUct(DjA~5Qr|z z*C1mQdGS*)p1sZn5!`a$>foB1^2}H@^y>PbKZsYe+izK#l{0H10?ytZBSyHh9B+N>k zJ4WdC0cGf-bIq0~N&?%wVhh$IDhK_^J2p1-lFtm1VVys@D0)T7oZJ4r*a=8*BnSbk zfn)IN{yUSAq=aq16q7fg)PU)J!Y>7s)DbW_ej8edKi;cv2@{@lifk_@ZH24GN%Kt0 zTv}kL`e;|B%)rIP^~K=QNQB>jKdvw4ukI7S?l-Ddvbf998fxoPEmwa}4Q1n7dvO-x zx1Arg{psbYmOgRARC#Q~=a0&!kC#l*9us)+eSg2xJzV zO)E~O4BK-|X)xpZo*|?OVwSO@Ll590yO1uA>|L&U1~OJYm)d=rA-gE@eat^{u>li% zScn;TIfC#VRj&$>GLNimDpbLNnR_9LF1+TuNw&=Ti#7>Go=P`dLg?Ibs%e?KMyU$+ zc|PKV0fsOGpK*~5=l5REx~#(0OxJzQ;Wy`+@5c(a<)55GEB!px<&@_HfdRR-3o`SH z{XPVXQAl6S~H!VFxfj%b?QYU3p%mtKmf{ermL&rDd1K)C}AWxzZ-=_Z~y$Ce1Ib_F@!gr=o z2nWKIMM7@!Ln?Wt6N360{gpD!db(@Q=Avr0n!nO|IGqT`T`Er?=}e=>-wy)o4TdI1 zavdz7j+tj)UIF38npK-k(roc1Uq7@^^ry#9WBrnbU z<4Se5*291k#2E1%@~c z);3M=vtJ3CKDj0!MSkGqE8D${-QzssJ`0*dN1%5{1;{eVL@BIZ1Gb3y3j!pC^HU%9 zN!e}MJ>dSnM-vkD#FICGih2d1f1+=f&_h;E3-I2(4vGfuBQG;7Ri^*oP7b+=(A_ZH zuKK!>oaC$vG07<)>=aM96Sl|VC3XGn*OIo&&)IVKx@|HTz4gj=*8O|&2m~zmoDF{X zNES43CsV?u7kZny$WHD#%n}1=?ON*X^{!!^s^Mno8BmC!rsx*fQv8V#ZMW+{Dn<}m zFA-IW3X^2nf-sNvF_4_+xGfq2SEWck*fU)*0B5lSP(8a0XnNmCMIXqK1F80-`WpzY z<@m%tVyxybo+<6(7rRu~)lB$2x+2PPLRzD=3`%k_rxI_=l@8`oY>#co;?~_TZ!*62 zOXUsqIGaa)bF5I7yIScilMA{Jhu|21^|9_78f5m|ZK{1u&Rms4`NWiGg?@<|x30(VI#3hzaEllTHs zz^aOP-@iVR2T!4bUTPQ$Ft$hpD^~fN2AEa017yI_h=6fY_zSv^E{1Sw2GU5+JbUem zZP$P6G9y@idG;@sM){;IA3Wo{sJWH4r|vy9eQ|L4ztr85bg!`bgvWZu5~&VP$v_h` z5@b$(Raf-yYE%6f`!kVKvGf*QubgE=ef3-WE~cUC?tC+oSV4>Yox7vUCJMVxx3l}% zu=^>B4?QW=VD}3-!~q65dIff0oBdJ+1Q~&pln68hwrZ-+&l}>4Dm{_wO>RSCjD?oV z$trIr1}>KK=YXCdYE&p4428Xq7nR6(mnYpDYNpox(?iKWW^zx)se$=kShK`>0}?Q# zIuHnC8-(|mR%K(v+@r%%Qca&q)SYK;lw~_fR&h=+E{nZ?CeeiR7Yr+8!wOwQ?S=uD zM)he3Vb(i_h8eM?n`kt3Ad2ovv6j7W#atL7Sj`F zoIoC7vifrRyOLUskRzv``|cqLIyOu9!1y;}sh9yQjhBdiY1Z*`HzEL+oF?kLtak8!5^=v;SuzM5qoE<;SIMt$ao);L z8sleTX}ZFgFMAQz1vgsXA-55tHAXHuL`@P(@-&Uc(#eB}}jn04vX zL~m!58uC|rZ`mMTa|B}`(#eBB%wxnj*PR}DSZ5fv^Gv~V@X_UoHotg}cu7#9zO&dO z=0TKo+t^Dkf}3w2CnI@8UJywKi&gP!W!3=O@}u)7lw6>Pl7{f($doTSlR%2jr70vW z==7tRx$!ge71nNkF_XGK0*9N|I+Q%(tAIzDxcg8;Dqc>)GB#<@zV59}ZH#rFYiPIj8F7TzxM+MJdm(?`Nv%ak25u<~FMnJ=5+ zZ<-%Jjgm6he)Wv;2U~}tM}$nbppcyIfo~7R=8tpq2~P;io80~OSX@#jTFUO+?s2Wy z^NLrKx2+HJeY+?TC`(TM`-1fCl(smXO7$+v%v-ADcTc-sZ&Q4J=1=h`OQ*b&wJ%kf zW#5`4ha2{%qzD7aQ9)bJ?TTi6+wCMJ;}{7z_D$)Sa?L~Ya`^X9QRXW6GZBTt}@KRUZv@6saBRop2CwS5jTEi)2ek0Nw+jTNe7S^TXMGZO( zyLQiBstZ&<fYh&xk37(!Y=0l_cB(BQxb6u?%uVVy- zxm%6T@13s2hu33VWwe$&&KtnHzX zJuB69QKFYLR$p=rU(Lp>j^aL6qxv!VCA2ZyNiMN~Zr-c=EJXDn4_PM#|6Q*D5(SpU_H4 z5{3Y*PiQ;AUnUCoCY0bjOIu$-eFm|9wqm@Qy|QG z4J%DvgnPnU5~ou(zwhS>cJJJqHLJ0h{G_n!$3xxSXb7kozG%MMR9g|;lS z4gB#>I!DMPF^}ZG42Di|{&1lp)IV6o+Q}xM_D+{~RnC*f+kbLLp&$_a(NN z5dz2NJ?hu(EbKA3!%odETfgAgh9cO3+=*j_WAml4ryzR`@e3=XK^V$!pXrPOnLMB9GJv6|K%P!9ac%%3+7f%BwDQhKqwR0wx942W zuNQ6lS{lFI3Il3ML5fQycE9h&Y$+OLu(j_TKhpkB12MXDvr*r3p(i2j{ENa7KuR9QB1J@j`l2*0VWJXB*Yd{~Np|yW+RB^sRZZIsb)&9H(C{#3*C%ExSQYG){3z$Kh#C5PnD?02<+*#R_P&g|jUv>fG`YF!i_fL!Xh^o^64 zxq;k)K@ide-gtG^PE@JMb~Fe>v%Ru0`0RBfocbC3-6CPtg5CXz{N+X#h-O>mU)o`G4BG?w}^qtq&2FvLLcjM2es&QluB@tcBjY zfP$c+C`6@~2P4&_W52eCGvs?wxzT`R?D} z%>4()ArqeWJmr1PIlnSl^N&36{^|+?>Lt`qgxN^)=%_Sc53bXuoKD9d5;rk>CKrq> zY7j~N`!cNQuzeW@jfS2lh->>t8{`&`#5*VRDH88J8Us1@5bte#+rMN`knWhnsKqFud1N#v9dgrR&5@d&Nz|r12q? z6w-k+OWD2<3z(XX=Kq4%H#~4*Rs$NWCP@t(0B%;=br7HcF%RKgo~<2Gw*|9FZi7P1 zql0*ic)O5MG{YAnk-@t;a6S2sYROKfFF3K}>t0l0ISne#y555JE}i zG*vVyEdR^X??onEh&Ri2y#IpjUJPGtAe(oizR?l8@;g$_@H!D&Z-Gx<^W3F_UIKY&QCmlIOuNL1KFHf4a_=kGAP zu5r7m##P&j-iTpe@j`(!_y{xOl?Z#qJj3!qY1)oX(8DPPc?4W=l|2+Y_R04+$_a+G z37l@_4|Ou|WafFq_eP(%oeKn|f|CWwfS*Ksl1_Yn+ioz1h$({`jB3b%OQ&D50q|1+ zTFAZn(Rmw$;+|KFR>6-l1AGtHBpP{pKjxv#r{?Idl?LSZ`bNVgd;7wVK?*l!tAi_x zUW7a~My_d=-A?i5G@*HlxR5(`ew<#T3vhgc*Imgf-MZZ*wex)>bl(PFK zVsR+RzGcG;vaa+flkeC4tvW|Co93pD%LU5`a;@3av8Mn<9rP9FhE7x-s(NdSZt%Fa zkf7)-aW--}X+B?{Y>nlwDTK3D*ErVz!H=w$XRK_iVe#qBJmu>ISuXs2t^9uLrZoR zMM1?JV`KMxfOM8ij7UB_R2%**O6~Cz=s~UOVL<2a901%6D6UVG->;^>b+X&*9A(j6 za#vOC4@Rx~`M!HL2URAcTZMGnNf{jHkwnX!?L+kOR|z8I#Up>WN035Nf@Dq5x5K9l z&^g2s<$$@@$E8pW!V}wSn&TUD7_5@B6O$i(RBNyON0 z*Zml}oevo7-Jm#E*m>sp(~^sI?D0O4aUhTULaCKvjg{ zPw_X{Fk$#IeMIl^YSdG~HGLmK5F)`$vjz-d3UiDwcxjplvcIu^RvwQP^}uL zUSF|S(HJHFNzwNU_Rf!NRdc$#cvIh{vijAP@k?{E*$PRjnchhiQMu^x*PgP{Xx|uTy2qE9yP$aqemxP_Q;VV{%FsS`lMu*OH}=g!QqU+R;!)<)bP$V zWxL&WB6j&38(I4Ff+krDvK)L#gf`USjXl|CKE0Iy*sM0d}sr~8h zSR>9lbq&8y@#PgQu45r#D^o?m4pzpRv(Sp4m*i z3;sf;K!aFW7rS-I2tAdZeAF^2(WC5_%&#%Ub7s9~W^`M6kq>g&kHn$G6y4yx3CUNF{N?msktPg#=JaiCzVjaxQ z&995T`j-a?Kpw!|T@{*3D=2lwr}Y|Pq$%^6i309@H@wEJ!e7#?yHeRJ)AZ$tj!~iP zr~R9%W<))03BI$;vPeubDDok+qxidA8{*v6L|NOBmU1+4oogmL>AqHgS7rF`?8EaYp55TnAo zs|UoE$SU`?LD+25`yuX%X$@!cVAw7Ws_;Sygkita<_82QNE`^bf|4C#1u-x>8@aD* zhmE3xw>fUoCqQNnNHp{-OwAU2@RPmR51BdyKm~bN0i$g_JHGMw{YIMjLrrILoWY}E z{U{pVZbYe5+`j)Ie!`C;=ulkHT9V0$yuF%#Qm)QI46HjkI`kw~YE6Wv80Ex>gaa(G zc|u{)gAte-GM_};>o;$DS~2TQ?zc^3QQIp<8b)s2+hAP{BX_V+g@Qj7R!k(iogXQ1 zJz)oF;L_1i@17sJRIcI1|Io&b$7-9KPW#Z0s>K=~U#&dAwC%sgOQ$~cxv=vZVTnSr zLA)Fw@>ctNGK|m^Q9+5qedjG5XAADeCs|KBA1Z7uRZOGIk*Jd%f09awRv0S>egq>N zcEtA7DWI4bmp3%@k)Qg==#`sXP%2HHvWt(&+oil$P?u5H{UMDZD=Vu5E;OpDsuA9D zIol?yvG;|%Hif&#YJ0z)V)j=INki57H0S$OD(csaEu5K-@WsoZw)`y6$=<#@+(;N} zYFcp%VeC|rUXgz=%uC?F9MkToi1*Ew6WVsw6#VK$DW|h%6MB(Y!fE}00^)ssH<}X zb#=Ai>D*^2OD}JF${*6~bkzHqGbql;!l7^&v&f!%)O&W`racmPM&+MW)W?y145`oq zR25U=!O(Em94>B|O- zk#XPQ;u7DERWBnKPg?zWaO=Vd9XINeyS8w^DOWK=g#(QTCCdsxzaEyCe=~U8Uyynk zv-@#MkV%IruXRUX%u6Zr$+VKc=E@X#v2`AOgmyWnzFfz#F>Z_lIx6!PJ%nqU@WaW* z&COj0>~aS+Gb_<5ndo`<^E@qYkf&SSZyQc8V>ExsfOG2nlzMw)I8=8_yt$CI3J=!_ z4>#?x`MEQf=6RJA*AFF{XjpaYr0gzaRa$WEiJk*8(|GUj@T|j`vuDqmTh3t5%$s$} zqsW31{IVu*oa)uJDyr%3^*UKmI{0gI(2%bKkL}{p()B3C=Q#z>r{uLMqA8C*{uOr8 zq+M<(Tq~w;Sx9>z+iAq3uy1D^vFHQI_n0+u+J+tUiZZJ)r|B3}xey)0bsSNV47{fXd;ocWK2%^|*Gk1FH>U7Xf*(PX}72uur(h2pZZYOK!6 zO7pSvdQ+luY2JCr-ea4%0#x5Q_xQ2lb}MJ?$`>Da zq+g78!OXCSKGK@W24pLAVF~oPLxMMu)*Xbpg@t0cVlYihf`6Gu)wmI_b(Tct`zp~| zWQpE1FDNN7Mhs|xWpvZ;3|yZ-KyiZxid!7L&By#zKWKfBYmY)U5tB<@U3~&X7*;CQ z&~K~JOx+;rX01BCG)bGfyn7tarS{-FTf%sgg!ZZyrm68n?|5fKT1M9rVyFu^oo1bT zUN9hJRQFw(^NO6QEEK%aYc)j}(E}KfZG})ELJ_~|;RVe#aO!>vYa6B01C8$^pi2M> z=AZd+pIo`71`{)L5cQgx`!UzXNDemo=jjbht&#w}QQvd3Cos%7a@s?Ex0V$8a8q_Z zf0J3kBgTVH=co#7$D5#yl$PpO$36*ojS7?b1qlR#$0YhiJOayvzl{6IYm=15gAHwj zkm%r=;~V5(P56iCm|wI4&+Po&3s!&Lkp9iMUR`6t&!=T?ZsHB{ z@}`DxWzd)Go{CEC@t*07Bkm=Dq+QJ9$`{by7ITQDvcc3?H#x=7MxN~7K%@s(J>k@0 zz7yd`B7QrjOL7=?Nql^~|z{Iz9x>C^PS8w4e zp#@=YUAv|ZzmAFFiiJ$Ktf=S&k}N^aXeIVVTN)It5uXNRLv3tC5njFwGq`g{g1?|u z|3!xxa{0e6s64x>2Eu1)X=%5w?6e%xL4h|0X%LNnl90edkv@57>q^AQ_7G-UQvS}n z(m(0MZ)lcoleTa0?)GQ=Ac~J>gU(5U+-1+Vj+6-4ljh~h|p-JC$=JcDi(*+dE)WR9$_^9 z`K9Jcp0to2s{M2UP=Bfra#lOJV0HUUhW*W5!&t;>6}}D@w(W~<-VJ*K*2Be(MeD6i z`;Q+JcCjF&Ur|$Ii*BK%XJutYz`O_8AYD!18{5E&?|A#RP6l=AJ2&VAoc1@%_6X&2 zQw)C-O0R>U%E0^@%wJPR<`)_1_K%oTu~}JrNsE1uM^!B>T(6Dek2A_ZX=`go&?ieZ zU@nV`ih@L5?fia@a3;XiwKchiJ0c56@fm~*3sZ*?{2KQC6MqjH%brl#1}-b*aK25z z<)osbVu3Cu=4BWQwpf=yiV#O_T~`N#CSDQMmp3@iA(bvjyzW3qe(ZX|9yUTd4ty?uQp%gZ68zOF7QAIJ#@tggn+pC%m5@&=ZC5_VBrHL%-q`A87R}Kd3lQDq!!~_(E!7p8f-*gmFb+`K4gvB z>fp%lag^c@BM%T)AJRNcQxnYq`$2#?b+0NYLWIixIr{(0e?iu7FLSvun^&dv?-{EdY=)ciLmdgLXwaefT+Y=tDwR4yb Q&W|i+C#_FZAM?8TKUwkung9R* diff --git a/docs/src/examples/twosite_figures/gate_gauge.png b/docs/src/examples/twosite_figures/gate_gauge.png deleted file mode 100644 index 6287c4ba958083bf63b3bde6127e37fab4e607a5..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 22874 zcmeIaXIPWl5;ja%K|n=8Kzi?8q@#3@-a!aR6DgtB&}>NWy{R+-F^Cjt34%%oMS6=$ z0O=5FAi(<|WqbBHd++bh_v3x9&2=StlBcX$GxwUgXVz$#mWC2BAr&DO78bFJvb+u! z)|q|acJr==n%N3Z1uwzYS(!NO7wOG-JTl`u>m+;dPE`Und< zkIauOk4#qeC-&vMhc6#7%d#^(d0}BlZs?+a;o|%92PKSzukm9m>B(QQUtgfM*4EC6 zs2$Ssm03Akw>#Y+y4*Qd1JdZ0n(CGknmO~#e~mf*w%$2d5V7xHBf*mU#nSd{b;f@@Avq24F z?80wcY+6?&a%0JbU|Z>PB-4K)RTA^eJfkG$8@akIkn!>?C{hHgRhU*YolsWj)A}n& zAm?=Gbm*H>Bl=5Lk$VK1fmE8os}~~+ZEb8~%FkQ5cbenKvRQR%>?ic}!M8P&cV||0 zrX3K=xv+f(UDQ?$jyaq zAs-5XfrzI&Z7OEh2jmvEjB*my=i<4rOv15+mo(=ht8w|mvC<{BeS0yk3_zP;>U3>`-E~M@LnMZI{SqG=>wlsB8c)s?${xGNwP3zWcz4Ve>5Ysyrr6iy0?ODLz(D zJ`%|g5vQ-rp%bmgBoXI_v_oEXqpEr8!L$`?ZjI#|0a0{iC)QW+)YkQ&YF8Aycb4jL z_f6d|=z9z%k6z1_d^Yir8HvnR$fhopVHIKR!}za&FRY5^Hk0CG%Y>PFasF{i4~^n;(4cio0HP z<#DBc6E>u=z`A)cRGB%RwJjWZlaOguxhC&!FL`fZFGnx2j?65RamXvpDz-DRWUY5m zs%|X4bZBNY0h-b`^@Y5^^LN@!>^Q2}Hc~be`B83By_%E-oVr~vY5Umki+kc%hL0#q zDFrA86h3=q@$y?SzuL{bvOCXJzdvije{B9(Q}{|cOIJ#=az}1y?&9;xuS;JhzRY^) z){07nWUGOcpoKZlWeS4ytT{*6-LBw=cU*S3oN)!r4zty|%6gu}iZ?e_yX{8jyH7s( z^4fy>DE%KCJo?4j;st%V!g-x~Rk|d)-8$wa^_u7xDEBMz;YH!_aM^t2e2u9{RXVa3 z;k!G6=uZ7kvrhNUK>ZiRX`F^4W|evy!S_?Au*Rz_*3Ki?TZ@5Liw)kQ5FmE( za#z~g^L!q8?Wr-JR8?G6cO6tGzZ<(5K%>{9!*ZGuJuX@PCQn}QN`)7v>2 z_NlI2gk4mfwCP{F8aih(2GbGc^d@CIg4|WccdF%ivbeu;J8(mdpBr==FqSwOjO(7$ zH7g7+9MD*NQ()}=YOR#|mA_M;(=8{1k&2=EY+FelOOxS>ZhM4dmZOl9_po%C?i->i zQ~N>Nt$~T&Y%s_ws3Tc-{$ z#H+$UG6$urq;+y~rRgB!kd*DJ z-R~HjJylFCMjw;ASF%TU<|;uRfiT{A0&#p^LPt_YyvKM+_!};sH4tYhZ~h?`AC;Bi z3nt3e%ATzWS{+*U_eRfqD6T|C3DI^7r*UKtUm~y^D~qy>Y3gd42|W;uxWNT6=z3Ln z(d%LalQ>ga+#2JS0xSjaTap6i8QP0cHc82+$*PTyQKtT1Y$&&T`nBaO#72(>O+b7vWQV8^x%voylkD9T!vX z&deORqwjd5Hr_6K%gh`MvkO$;Z^0KJBH`0BXE*oBZ!EV^fB&{AxG9JHJH;x6C--%u z`n#^nm%8J#)Z+5n)FhHAdALjl;F2!ri7S0qD7jw0t}ru(p$Y=9@P)5A?NlR$B^uGk6We4VH#(k7rgJ8(cBzul?xL z@q`Cd{IwW7xZNl0)IVHnJ0I%wz)7^q9}FIQUYG09GDD{zGciv2?rqILMOAsUQ3`0w z!?5#pcg~PZ|MI|yS?%sT?JDi?#M1`5d9%6st=R=rMDTT_$oU1$v7o(RM96^R{HHAS zIraQA-z_HIQ^>IGL1wgCpGP+@G)F1jj&_u4mo({(6s8a+t;(xxHJLQ6i@~Rxzvm8? z*oo-k^N*&!1{Hhmb$3r?uI)tvQz4-yai!V_C`&_Xd{6*7`9AHPP<3~=_^2l@a_sRM z_Dqk5o4BUg$~za#i_wo(DMKkHyb>U1*bk4XpZbL*N_Cla>Cl=x4h>6w4W<5mTW!?l zq8fKi2;FK=Xk{ba*$&_f>Oe251%1)o`Py!`(=(SdDkCF(f!}5C;{wGV9}8b7 z=*oc{n$8rI0hdPAZ7mxTOcLxOlo65jX1f||b@7Z3W&9 zn!{Y%MGvn$;LGFF2l2qF0*KI?F3b+r@=Xb-Pm0*rsy-)P4zBhuOS?fE>`N0Y?hh%T zC9}pzSVVr4S=}C!V(rHAl)}=Ez`6y+aV+}u$*5BXcbELEorRybEX_6KJSaZlhlR{i z#Ps7R))+;_i8S2Ub}U$OROI==s7Od@b9?~SXLEk(j+pT0jKK+C*~cATG$U*v-3Pi+ zK?qA>T&3;Zea-Y)a%$XG4oiULhT9ma*s7~zaRSF@vGA~|u<(H+Y~Ur0O?`T-h|P|L z`|EccEUZv_EWG1uG=R^;zgXaPc+b%%Zu}FhGr+e?z{}?u&dJqh_MhRN9Ahm3=df<- z%BcVx8%x*9&Bn&r{T|puR`=uKUFTesjoq=Z$XO0w*eW{AE5P#y?e&a2jMQ(7TY;T; zEv>UVcVtLV9|7NjGa-aUFTZQjCm;1D*aI z$!X(b|Hnwq?#I&tCdhYqhEIT(pYQb9z^#&pzlv+w``9=d$=f>tIs?X#7UUPcF8S+* z|2p-@kVm)byW6;Nq>^?=!mx%Pn-+ICy6-(8~s6{ndes z&Nr*opz@68=l=FG+2BTK@JBdl$X`C_c#Lc0W!o~>dVc=Hu(FLm5Tbu0*3l^vA|ni4 zw4KL4O~PVznfmW0`Ri$+_}u%XEkfddrHK=1%C@(N|3Md6A$kGtj|bm<6xmLu^H+KyKY;y{Cg@#^8h;$P zNam9L)k99Ci3mdclO`_Q;I>!|x{>|mpMVCy4E#@;IQuWm{=<0x!t4~G|HAA)yyd^% z?9>td>&;Hw^)H0{8#GU&%)deNKRC+2-0YOh{9nS&LOO7AdS-f7;fUp&gRP!%oSmg5 zp)3tV(YK}DzdW=OKaLcQ_y7SVO6pYpwIB%bF=hH!MY8k27ME!^cp|AZdqw#PV|K_D zqu6GtuRc}^Aj-_FF*NAtC7_7vG%0O4uRr#ZA8<{ zRptQP+?AK$Kg;F9d=g@OE|qNu-E|K;f=?>pwxt8-yImNpZufTKSMR?)OpQLqoXbss zbKhbSx^&2BLT+KpI9~aa;v<`KVSkpZIqn3q-&BD6!@5|GjAOz1EwuD+QSci~{sqJ_ zO#TJLk>B_i5dQ+=KLW|WfcO^>{{rHF(TV@ZLdG{bpyqsVK-^KIkA)>deOPc_96#ry z*N<*l74=s(x`e}EfrC@7ORjzSiw@Tn?~1!c9duVd5Le)E-c2gj(@uP$e?txouv=P!U5i7d5AuI_%LK1SIu4}*)AFnMP01njD?u3av;(mNinVZA;|e~O>{T1 z1S0>MkQrnMY4>IYO)xr5L<7;-C^T{r(BixpUfd zhqF4I+A|my(ZQUZ{NtnVS2-Fb!%=+AWQsiEzL23yaS83SV8?{ZiaN|+BMw11`Y0^E zgZ3g{UN}-86aM!5MKRg7TcV+wP2!#l2zF*}3s z!5Jjp^K-G_d(O4%j3Bdk!cq+sahH0EIkH4e=Xt*h?0L;>Zv@0sg|kAuO;lLa@6@7a zZvaR`)LcrYKX`l-YzctI~p7S%6!y+t7bYQ@Pz$_(PMa=#<|=MyAH#yPam*@wVHEoG6Hk5-C& zU9erq0pei4S`yXD6e`^gzb>Vtqhr1D?Y-NenRnPs3idW391Hyfp_eZd4^_2)1Sdxk z!;4hym_bRsXM=c<)PcH5-O4b|{EPc0t)iHM%!_~EtPB_{BxuO-Ek+HB3m^*~{eQ?N2Ww6J8;_!JBF@NsKcU*DkNk7e1e_oWH$V&fDg_6 zVaWQ&J2iD9)oeU0q$>4)(S>QU$D-uV263I&n<{W{bc@s=?we ze?t#vNw1x{=HI=Le87o!mGw9aI9D1mA=M*)(ZssaM8wxxX4#zg@@4CWnC|=s<%fQ* zKhU&vbWFRXi5)Q9E9LIqY4h&>8lS7dpfoY}g1r3v$5W6mE|*Xu5>qbI8L63^o;uXZ zciqB2eEg`k`hHKxqfCo%Yw0`@_?r?P446y~mu1F!kwd*EX1YKGTh zmxDN0{T-iWWl4A^qt|BC-r9EI&`@nGA{{n)zOf?m+7+ zr+Ah3PT32*tAY|OO(L%8K!%Rxmru@Y1jZ0oB%rC4Ulxz6!?Hi_WmZ)6*58z{H-P8o z4capng>95*1oj!&`-Uw@m#J4#I47(7543B-k!{_3rZM78XjZg%H6v)oK%7m(Jh7IRwl+g^C}~PzPN4WpcRy(x__BSGn=7KW zcP3F!MK$tV@xhlf&`~XJcbPG^x6{i>7@GG1jIt5rJl5|Y;P+NJjpk~$Z64sY4lbB7 z^g*od-pvn)d7C$HznAYcD19j>_NriCRTrYIy=R{Z#FNf~UssLqNjnXdGHgYwMpoGO zsf*iWbCwDQvaG&2w@FcMKd!y~7E-t9Zd~K}W`S$M%H4W{6;F@x-eGzUO;kfFr9M9H?1uC@-2e0oHV5;>I{FjXoW zU?tx)LB?Y%aq?R>;ZaKYRZO~X&ZaSLk#xjPzLh(ew_u}K!{9Wky$yPm8dDRr`P-SD-8`D@tHX2Fvpn;aK>wR^4e?6TWf0?P%eKq2y1)!LCRFFXiof( zhmWIQf5en_Uv`9*Qh6o6H963xDiAITEazWZ1snFrE1i ztstfA$wWb>e4n3NIw5Ks^c16dQu4V-#Ko5>?J^nY0=J)kXfX`rzL^*V$S*3(k7i{p zY*!eRrZ@95(<)*2_nzvQ@0sAGc9Ze;Fq;Nn(bYnKVo`xt9k9^|X%>m!=h zAv+}s{>sxs1XX&M5y44BR^2-IL9gVKJWQXQ)j19tRwT%JOmt)$uK^UJt1Uk6h!%LbY-Lk}L*(RczuH&Sx_ zvs`qv$Ob)1L}a*ml8_bgYI<=hrdpDNI72w*tg5eX4FofdY(L0x^9Y!RD)Xal3#MB? zW;rLWi>F0Hngg^H6nv*Z`v--ngPLd!br^E~r{b4w`f2E=?_%s(5E^=W$XWFLYqa@H zM2iOl^))UJtBIWP{e zbWn9-O3Ls+6UK8NGsaljaHrQkmH8vvX!?NSWHF`(xj1I8!?3CO83-Mft}=>qRPQBa zgo2oobbb!9AU-d8`~3{i2NQTCmQ_x)y zx_Lz<>0m+Og&?7ChfK0z_Epv?oBS#ffgoHNnd-Kmta}m@^saB_FmG8A%NRvlXrM_u z*Fw&-wiJj{4wkU9#}1|w(=TL=>K76%)K7rwazvkU>OjD zmS~!(Z|@&g^=Dy9tkbSPFJfj<+~?bi;qmA`gD;4B?$omMdUHEkVk!0<8LQqX7)62- z5i`43h_1Tl9e=)dQb9%ewhjq0gT;Mn!wyVpL;*Mx_PU$YltVU;KD4kJ(OrI?3|>oV z9~+y&HP3>Sh5;T!af-aKrw3W$7CulJ2d#`c$AfgZ6&{Iw68N8z08q|lvQrHEwE}~! zLL&HJMiYMXJfoAQwrs5nZ7nJdwXRhy3gxRchF%@HJ@)Dm3HX>5)M;P?D99uDq1$MC9^Ek2o3?hMgB*U22cn z(iFQ7HOV;Go25Y`Lx-yr;-EpasEy?p&E{-}^2){UJ1~O|cncO-K)UhP*G|w7j0WJ4 zkt4u#%*fv~{`eR%wfiX5$)p9i(pB4ALz_IsWl%--OKd7E)jBBv6o{TPKB zt3&YUB#vI?0X+1VRlyra&f@{uPXrteNYPUDRZ7&ZRV1Zgm^N2+vRlf=Z7DXYyaZL( zZ|Law3OPNknsMFf6^VeY44R589}#k+ZEK8cIoW*g_2ww3GV8Iwcbl$NcS;k^yylX{ z{!W3H!F-6Dju!}#6)rsjxq0mr`m`q8T1gJ*X&VVWmMq41o z2>N=-M)Fj%!-ty9;>$gPapRjtUZKR#R@SY zxA+cEeqVF#h=>&vo~M((!>XC>XgcAbDV#u?H9tg~Ymz%dK}m2P!Oi6s!L6pIp^=Qi zK#D+c*pxu6Xe=L^q{gnc;@+rSiRrLVP~q6Ync<3k>WVp4KCeZ09c1$g|`?V4x?F!*@p~^Ha>@Z$2ad%6@7B0C�#}Jt*#8&sKU}fl5hqLq$Ur+X0%f4UuYR z98@v0+rrC~ui$q^jjo^C9P>kZ*!o3~C+MSG1xF0TH|Qg#GL2sN zkE~E$KKSv$(=vDMW05YcV5o<63vR#RF7{l*OOvUrv8f{=hIWr~ZDG20$cTSTPTI_L zXDJ4b$pA`nhef4V#+%XwDuvqLH=d-R>xvcAU9NPDnT81Ilp|{sT8u}2elCM_+)9IQ z+H~ju-kJwY@{t;+VP7!l3PAFGk<{ad;KF7-`8BgM$M|+p%jsdqkmJSjs%- z4eUoMiWbEtGLo5i+%GxBe0lELn#zM}o7v@2qXD>}Bxa=ohco zf@V|PH%I{{&O8fOM3uS;tZr1__i#Kl?)15`6|i|h61*|*K*`nO&K$C9uw=~*H*Y_gM9KR^nfOqv0BEm!N@#6cYolA zw|OEODtXuVp769OpMaV3lF0jB`NAhXqP(#&13eYoX%HU!ihJ~kIRSy7l8m1fY@c_z zQ2jtKs*DFGg@r-k`ngs_C!vmC6UcGb1a0Jx)(0NT%D0E4hZ(xn@Jth8AW0?2`R?#* z-RAcChi+|M89ZvveJ|r25wP)cvlaqWsVWcE+MYg?2w9%NYQS)>zS4J3)ptpH9;6fg z2_j*;nMfpYzW+vlac}iy$Jn624U7J0tE$$ms~5UxsylQV92aBHPRooKKsxxozC@q<2 z@UPCxPZKcjVQ(80oxj&njhM>K->P{Sa8Wow4@f>p&(AVmxiUa@V+1Ajw%?{#XvR^U zY=funRlOc7Z>=#W=fT?@Mi3X}##RD&Bl5pw931>mF$h9@Sv|5SphrV2z6ig; zO%$uGeT$oYB8 zk$LFyPoGmBkjet^0ApW4s$(8N-rCj{r=IcZ;DOhd5}rK!$?a!7CHF(WZ-JA%DaeaW z%u5FqV3iu?B^1{-w%K~0SsIj@zh&jyoJ*)~ukM&0M6y(fZ>bkbGj1eM-PeC>_6Gbq z&;aEHvG-Qi1DM{hYXTp7j=6E|E!gDC^&q?MOb%cnt|)xFc;ZQdgW~~paZwT}9Ian1 zf1KIb*>bxc+767z`HfhPF*s`keR%;ft4+UNb`VFZt7?bwm?XFfhN zLpo@s20ppugobVZh;Uo19NdYspg0{d_&%VAlH{`+M>&R82hQ;DaJlcAZAVF<30nKW z7}EFqp!?gmiV$UCdpD^nQQ8ME%W=pYUQtToQl0Lk5+tz#?DNa`Oehf^ZN%y!@3N11 z!2^<>LsUqlt{$f`!EJ{~&E&N@=2{`LyPJzFe0)8qR&Vpxxa%!!CcuJm>B^GnCheZtd&;wUXFR{VgknX*&N5P3NS*|Ha2JS9f4_4(gxpFdf3p`un z=@rk7xM;O{{zpx|cSrWa(J0E8(U4fE>CMXB1n3C*U1{_$5oJbDX0;wg>kGrWi&`&9 zPTaQaw^$(Ljh#JO!-$Ygn3LjJ;Q(W%?8JkhG0NqyBhXZ0YAZQI@) zZU;`T2!h&An%Uly%Cz=pFTX>-HQIT`t;4NtFkUf&0JT8}1Q2)xiGNl-obi})8Pue1 z@0f~+SDlul)(*>2e7cH9UR(AvPN`{i^2Yq2>1Lh#J-M3wnAt-fc=>X22SusIpieP{b}><_NA+mK4rJF#QWWp7fo5(g9?7`Q8d4uSx%B*bF98*1!R;9LvQ3H z52Ew!K}fDkUFhp=_H50xnYkNIdmArAkx?Ap#?cJvYfhQzJP$Q@BJ!h1h`FkH^0RoJ?q~Z;YR(hPTx;RnL{e*=LQ~D8UE9^4V`!Lv$gGsFC8&9r8d66Ey<#x zti&4TL22;xN3(^uQB!+Y)&_e%{1ypiKLGfTxhh4W_{*lUJRwsZDQa6w<4zHC69Ou> zdV$EG+qWYZT{%U%cYSg&_IF@TDu$qd%G(*&yEdOiyT(sez>viTYu^36)i$4y z7puO;p#4(|jrR=P^sDPC+P%A~Yy|DPWLgK`&-0;2CRkNP#eIPqKt59iPc)=(c<1MY zGR$_SWOq+x?*#6_G5{;sD=ItM^bcXb@^cWH!);X7=(SE7b)P%v1{Ag^P%KQoz?I-0 z>X=w6(ax=g!dz-@PvXi4N*Kv4slBqW0_NGEYZ|CU86>nfx5s8*^V!hE!(Ad|k-$vH z#P|UGT{C8bcrml;VV=WRA&58U&dZk`@cOa^X%6_D3F{tTTm0v6!3+ouv8SDoFJY#n`ejXM&9G28;64k8j%1QH z7U~)&NPiM18t(xbjfzfrf5iSS)He>>cF_& zeOPnVqj&Fapyx`Ud1@)VRV`_K%0MySIDlIN)pdVj|FhCQ8*6`2AdbQ&cfbKca5;x` z$NeNrupWy1`a_$^q5v*|hX&I|)k;}c)-L}6|pXO$bvciYWT zA}lJOPWR4J4?8K)ltRZ19uF=Vy|s<RYqdA4*KkswuURxOekOWjxd}gjVPpP)(iPaj5xGWyj3RyPql~zi&A)NY}RB z0!|Jm+&$Ncsq8M+Z|-)ppZR`?n#?Xh>yZg;wC0vAG@biO(!p>5P(&p`n~c63`>~Er zgXn8~slAS>tP*(s7^1EmCveQ~uyDRG0F@UHWGDM^v0e4s<|sHNjEs$7!5!E$L$zk5 zB}IhJaqkE1cVX9ft&?+bR)O^H0~zNlvpNG{M+P_gYXxrWV1;}@%cL;kb;E84i(HGE z<16uqW?-qe$$VHx z25Lo8w(ZnwpJxvN(bL*y*fW-Cpbpc>zwrmlqCdK9GM9(jvYt(a>+_XcVU!u#<@_Z5S%PE*~TP*fxX`Td9-U8 zT!T|$P#T{toWQneIlpTJME!Q< zePGYd#_|U_U`I}?(*kEFP#;5Na#Nl=3Sc6PE+qw3i^Fnzlx#@fJ z2Zt>lB5+yO+run%n~7q@P@9hAQ1ZRE<>l9Y+hR;Hr)VDzs+XH96fM%e3e?>FzBu|W zSGMGpS)}AT0JR(;{0wkQr!G>olt8(GYJWaR6UMcc(#ke(*R~)3VD9nmupF#kyS2JD zOvS%-Fi*H7<}susBefzFqOO|o=5!U>9u}kwvLlZW6GCAuQvh;Fq7{zE3N@SiH*h)7N)}Cb{~Gz zZEpBsp7UO0*$~>$f4L`P5r5Qd_s9oJZ(!B3$Q0&s(I~J z1RG-6i#LTuraPf@H_!t@fT}ED1UXQXnY0c*x?I`b!v9zN^Ex4XK!Xn2rww_Y?Yd#gQRAMS&Z*mSv{BwT7Pd^ozh+(v(@IX zw%NWD#iAsNKi1vZnRwWGQ0+X9LrWcXCKfte6q*i(+lx><7!X%+&)aPZ6GcpU zfB7}UxW*$Jf?mBn{Kh6gYsPYa#QkwP_|ENmkNa+O&kx+GD&kU8Kd?+3BPKwQR(dsLxS&c$xQPk&<#L%GhiTdv7m=Jjh!GaOT?b@T4~0RY_#SwyBP6^ z>en3}mIjRiV2f?r9QjfSU%}tn|2ymOc8B=zJ2uVh%NrQMGiX|c%LD2tVy;3rHYU&% z<2i?$NNNL(DGfzfyTYxTCq6*758!=t8`Z%7-tkKjcx?QxKjaTL>=Toc@CR2@bFDZB zF>k3_)Jz(TRMf+gwr|Ako#D!J397-=m+Fz|IoIvY4Zke}HjtFkg^PxRpkq#1oEvOM z?MmM}L>1>mJLAQRh?zOW^zTmo$bWYaewl_l5#;3<7ykqIb%fnMf#%iM5CzUPb=4H) zww_8P6jPDhu8GUe!4?0b{9*alH(m}7J`&hFE5F!f0eTYJcqlG&6e$BJs@Yvj94#)} zyL|!Ge?wCBj}{p+z&Vu3Qj#1whf0Q(*}|N5AVcQ`Hn57LI`9LcQri)nSH;}&BI#tz zFI{?da9`vnnUcyGpo`}oiUpxexPn>G$U&brQU2a9_(J+*CD4l>veW2HN|vL6EGdR6 z9a6?znu$?6^U%<}F=vgTSH?nVf;Qoq-V?sg8e|&*Z9ub0Ft90g2}e>UR4EHhO{=8e za|>3?N|s&QDk>RWoc$Q0ul}U=Bm)fjdAJkcS8khkWS_pLjSujNkOQXRr26`LDlz!i z1XfnocBkrb)~yMDXkV%6puK1`5xp!7J6IUZ3hKf=uO5O#*x5M_s#SCKo&V6TAR7RH$Z3zk4$wdg1$*g5AMGah;vfQdOK;~$o7r}zGRzN@5~-CI6&1Mw+l9XX zGLsVoV`gD_JXN2`?1POWp155|i!9MKc|WKMd!I4e_YmT#%C&9MCh8-Uq$ilb3i$?rg0wp#_gJs=cp>T;*V~#N6Or!0{Wj+0;AdlVa=`utNTjvc_eW$T z0gwZZ!c4}1o%J|cDZi&Pxw&W^wO~kpJJVQyEcl&5zc?MbI7#VU2?N3L6#WM>K*l}0253F8Y5Wv$J35`2nBV1^OiNDk}6OgT8>O zoy}C#)DjBRGgN`p$u{@J7!%G>Q3>f_`&vm+;lHa*f1KDp(&M2rFBAh*=Xv{=)3)bd zz0ZH~h7TykkRDIjCJHOUgZM^FHYpIq_L1+Y^Tlix6R@^a{yL7V9ri=(M+csa^t*(A{*9d; zFcJ=f7S55=`19%0aP;fM^TYn{;%j?<67GL~)shABJp5bF@czF{@75K-t>}sEoNkr< zjU3N{0ZS7;l0Ek~*PQDA5hp+?U%IXT(R=dOij&0#EPdr{$ff@|pFiDqRv++AT2nty z``Q0fQ~x1bFb-g8t_wIUe{;>Ljs6AlDPaBu@*jZt7s#hs?f;)1`GC~q1;llF&)pdK Q_fl9Y3L5gSZ&^I}KLCILVgLXD diff --git a/docs/src/examples/twosite_figures/gate_svd.png b/docs/src/examples/twosite_figures/gate_svd.png deleted file mode 100644 index dc592a34eda69fe952982842c5670b40b246d567..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 25784 zcmeFZby!v1)(5)TfRqSG2}pxT+AV^4emo$>n-Q6h-cP@R; zdC&Ro7k}U99-e2j*WN4U7-Nq3jeuYUIdM#MVssb`hAAl_`T_<+1&6R=)LY>9h!h(G z42C{v@$8v`sz zdP5^ZPx&@gUyVS`UIiQHd-1CrD)fBhcV!d=x%6CHB*u!0$)VL_3hu&ts5KWWb=1>#MKE@1un^qXoV~dzNTtGF*+Y0I8Ww}hd&wL&S#(UIN7aMexZQLlhT%Y)=7nIS z!VpW*h9%DHoQNXM>mIg$`Y<62RWFPa*1aobU@mk;Ag;7VXoo;t-{qp+)6 z@;=1!e)~9KnWiQtk;S)-9J_VlB6LRGa@R3^Lk*|$u@|fRFO~&#N8{gJiSkCv=zhO( z=e*G>esuW2Fy1gU_5o(dz3HNhnuI7)Dzldd-GyibXl+~VJS=SV9(OwT=gtB; z`b>Lof1blrgsJV&#gTA+Z@Pceffj2j-eHt3;mwQN9V6G)(-joEIrv7q>ZYkQYYeTI zu(dmJE-|d*og|4~mF_1(%04~%J4YYGNGJ*fnO5bGLn;qSsw@!oME&kN+$EZ zJeWdyDtsfEU|v;6Gb&Cd9ZkeqL1=pG$1(3&6u5e9F;ReowV~Nb*Zvgg8TK1QjJu6w z+71C2;m}Uq9$Bz6)SOp?`2<#L8sr$g?%V~3d1caWo|#i>zR7=<)350y zJQ0>8#+Fv0NOXRevB}q~6nB3NbUpRphG|>+_ zLzbSPQ}0VueKi~)81NZj7{GiXyiTp*pDSNZhw=`$!|*`Lf!3Y;jOOgt8IgN@s_4Tn zm5NRDa47^6K@-CCaEI^#d7=zPrJgLZA$m(bXXLVw2?;@QZwc?rlob7}xomdnCtnM- zzevreG$9A*2FP>Zf1>S)i;7B{_|YNXLK;Fu(k0U6mcyjTaoafz&mWz2t90vhJ9hi1q-K9) zRO8esQ$F^yjN3^06!IxeGi>5%8F3k|Ti5%f=b?X=ak3%8_Ah#a`Q4cavG1* zU;YSZAm=FMtmS+|OiL_FT)`=1Rm^e4`Iw`NLyLplV!n*I%+!LVe1vEcFZ zbNutnBfY{Q_4}$1azdPAIn7gyu#MlD6%M9!>5uhyvrpKk8^p|0Q)lB$(zj+cWi^}k ztIxW<-8WpkwRrIW54T+MQ>ST+Tntu*X+}{-j;xkUa)L$t%O3O|;%>4}gFSWKs|ll@ zW{W8_3t1j9muqNOh_ZZU9%Qy+F3|X*+N(;HW1~8)gr=mE8In07w_TK>;h1}vN0RGl zGi3ABMs=cetTxG1fW<&_ytLP1*7~#cV;k3Tp+co1jB+iDQPY!=nSmrbJtN;P{~heP z!tO7_^WkY9()wT4Eb7o@4BpNhmLrwZ`>d0g-W%SF$lAs#$*QH}F_yh$|4Z{1vf8Vh z$jwKii}}e@iPH|#cdD*-Y`2r<>-(azM$6`ttnwF;oK3U(4}BuK6%?o zY%eVQFkL*>Y!@Nb9~x)vWS3PkHuEq^8)r5us2lRs*D^vgKRxXqNy zB&sC3I3if3g=nE<++!T0q=r^C+A!F*V47E=H!*QUTg95EI@&DhL`T=I z-poh#x*ho;#vL|gU3y)&^u}U++3zJyeoe^)^Mw0^&dgluwT3Tu@AgK2mX1pAl;)2m zVqwx8X%MhKo53H#Ct}LaFV)egKZsN*%9#jp3XLT^_9i^lc~)dtR9b0PcCkS^yVyYJ zJzLgh&=bo!D}_WGCuJkeB;Am^Tu^uPdKybbI4oCMW3+6>{puHH3+4i*>yO$Wzhar< z2xCud5{rteUa2U)*l*8pPheH$Q{5SpnNFX6^*Fr~C$;YU-sCai;oXr5o$8BC zvVF4gnV>qeO`VO+ll3jFSwF5N&f8n^Q@&Szv;HGun{A(EH)PXM=JjX36AIH^d95mR ze2Hk@Y7Q5diLe&@A)q-B#zDw|RsOZCLvvB9CK8!^)7;UH|2(vZ%`<}JegW?n7sD&b zjn32<>P&Qb%rfZ-z0Y!j)1way7QcV2d|c7n%QxxFx-=DVh_Kq{Bms@W3sK))8;ACWZ^0A%k zY;e6}1znN2i<Bb-MV6NN2LqYs-DBm~IW1)X{>IA2A(t zSm&qSY`$G*+tR*2mCgr$n4R}+Bu@$p3t_X{U$t%#Ua`@#1?u77JUb)T(o1L%I;c6> zRlBu#>ta@7Hmp|XLhf)nN%;QF+C6k~k4vG=tG&SJ$WP=aK8C&}dhq`deF@dbcp*1p%s=zK5#_>HVcmz37+)ec_$Zm4NUEkz!&q9}(aE zfYrx_hut0x5A(`vj`oK2>#_@VMTUH#@{1XypSDVEo;C66y-^DHoi*S<6hFPVmjAR) zK!Vu8U;t2Vh>5zSsjMuF5gen!kPwJr$lwS8yo3-){&_5hKo3Lw{XHB83$%bC{dJ8T zc!z%8ffsbopLfLQw=fj&=`MJ=rNIAjH7YCx@gK*qVsH-jMCqBNBzRXcaxgKmbu_ng zlF)*G2VbDsOK3R4U<9<#3qkS)%^rCEsD-k+le(-7pOKvntAVkdp$V&-jXm@nn1CA} zIJ7ZwGN5p?v9@*Oa}%WceFYylhQ4N_qWFD@la(Nqx~u}lGdl+p3QkrIR(2{ObP5Uz z0S9AKz89imf87o~2~wFmIob2EvAMdsvbsKIwR13Id&tYn%f|kQ?a?C^a0QE_yRDOf z8;h+Y^`D3Q<2j-xjz$g^_D&XdwiM8F4Gis^odl_j%v^S_?_uQ&c)Q{(S7A9DZOlK*n%uR8_UAlCmy6o2IW`&%GrA#?$@e{Px( z`WtLFC=0u9Au6v7-hr0={?P!x>A?$n2QNJB3Kl(W@J=r&`b60cVQab}TcMo*Wp0}s ziRsBRYv$5oS(YVCuK=Wdgclm>>RRdh$k9{Nws{TKcGkI??1 zw7}f{f2e6&jP~cPrN}+#e{fb#7(B3k+0`eIBjCyL)5A_yvEN=@u5l4^5&!RP^$b{hN8Gm%VC{wB z;08Nfl~J+f$<`Ih|G}B`v&_LnxId$y0S7#n3-@{ArTV=rIc|62Y>X7_tqTsjyo=-y zp__0&pKETZ9ZKM({TtBZ$$jxVk7t(?nB<4Sjrz!dGgXQ2WMJ97kLah3ousv1j;lF* zU{O)Dcsj*_kznlpKAayTKrlcIhtzr*&d(3>IJI%X7mcXkZZUcf6iAnx&`ulI4JTJ9 z&uno&gx>|vwm>*EUU#$3R}>NEepU4u5><%bSyTyLkvwli zk=qe_XSd^}0z~9bk8nsMHNl4iKj(cqP7K7@L8tGt)K!`hoP_ z%lugp$a!?ECk1F`ZBmOB!D(g|{Um16$R1AgwN)|p3W!`09Cl#`?pOcduN%Y>{kPSe zkN{Iq0qfJK2R)w)Hh?IwY4wvtCAH|DiU^C-YrO{4ogsBcdVL(h@2nWHmxais2K#mI z%Ni-#alBm=Rr4OUZ~jS%NULkyfQEcJ6Qb`szkrB6vF`93B0N4l3CfAh!w3`9MmjS(71 zbX0SQbgtAL+^+T6)Idn@{oL=;+I&VuxVrzv8WT{rgv%~`V4|r;GppIm36UEH3GPj* zDi8bBi(rNf8oy*l?TTDZ2A)< zIt4)dk}cBNO0SwT_#AYXT|V_|9xi1{GWGksCxALHAhvp$&5$K6tI7uuxi~;@VG2ig zAepY+-SZ!8E$?}DF#Uj$gcfMz0DPr$V1GL*1_(!gnT%|-zYKC%QWy#Bs$eOtQU(gj-kH-GVv&fa#m0_FkK zanC>IZ)2d4Y7_{LE|+`b=M$k&ehW}{fEG`F_9%}j+AeSY6GW~UB)F$P`2is!Dbm1R z2q(F)I$aa8tlxvv(irh{((Oi$;qdb+obGLiTm_)D|H9v6ptJE`Y-J(Q(F5ZD#Tfo$ z?Ef+L{}}s!jQu~6{XdcYKau_a%aPqH18z54FQ{AR;aux;b?$b!(zAcMzow9y33O?=6HwPXh`(Ez1@2~fDc6Q!e@g6_-abtxi3n^?06SCGQ&8l5?zP|z{e5}sNl%QGJ>3>(E*<9^YiBN>~L{4*;h!n z)=kl8y{gNysOvM@o4+i#YyC3g&kXkI0+R5t9TH6DT&Q&(hmvy`&R9& zk-<}q@z{kQWt^MI9$XDJ;e&-y!@xTFVQCc*P;IE_cOgqb6$1 z@ah%xObQR26!x(;!?4pv@EmTsEfNcsVA}=sQ;Lb%W3m`5QiK0MKdr z2x*#Rof#H(kOdFQ*RFE#I+=EDkD-%~uo}((+(oPlG7*Tr zQ;eU^#f32?7;CE~Bp!%BGD6}ICf@i*y2Ji#P8gI8Z*Hoaf}>$D5g1f6+@3~p!+U;% zvS;TJ@DAxIMxCUh#S1*q5Gsm2M*3GTkl!<^h+^xCrgNvCp&;wIJrT*Kc!BhdO=4Fx z8=H>?>Fz+xP0#*}^i$iDUk)~|yPiVstQC*>`42yS|2%c=GD1DL2iL<(fg#h2ATS|6 zdh}>-+I808XzgdE<7&hQ&&T}8)`T6&!oqD~M6V--&QX(+lFA7??t-h)6&jb}SY!f? zNrIk*j;nHse9q`V6`RLP%uF^_vJ@!aGpXGHyDPpuQT8+C3F6{zG5^CTxE%JJn+yCA z))VjmkwCbB?^>Z!2}kf%0`lvI7k+TO6u)L9fqSqyb}a%K=#mQf&!0cn#)_z~PZzPK z%cC#{Bm64+OU!>#;W4So@P9CsWq@dz^M#M*>om&?U%dfqF7|(UCLu3>qo-->i}M75 zMXOvi@DAzTK^NO&1?Gx(Q+<{Q5oS4^m531A==YG^uKQ2e^=~$6e8>6F75w0qc=|9u zV$T5QR3MlXLw}VV)h|G0f`Wq;U$SU$y5gc@Vz$=0ANFU+lDJy&O-e(Q#=s3Jt1(Hq z^e0L!+Wejt6~9Zea?0b0d z20x0^GnDV6qvSL7K6}6R11jRM1nTNAUANt@PTFzF$QIfW$=YaVF?SbSo3ZqI39*fy zUc%nGHU}iMCqJfOT`)HON=+@;V^5elPsf69u|P*l#J~_NA?NYttN6a#d+fkdl!tEE zo+J-l^XEs7LxV!AOq8D8!XgWkjCt@*^Q@_uP0FjYJCaguK8X) z7I$4(`eO4|-ybPwa;DzflUcj6A6sU@>bQQ&A3P6+e4nn+_r_;Y=M?46efy~=c<=Ay z;k}8X7B}oqc_N@jzh_8r2*_K}sr>N!@+D-q>vtzVJs2<4HU&G*mB5DdN5T+cM}u+J zXwY5-qPDcRFXmOuG{v%L3VYb9Ki~?MG67MHLrChPk6q! zry*~m4hT)X&V%#S>(_Y`ksp7nZsRo^4Grxck3GdoU-HfD2cs?Du&MXz`P#h9>V+`2 zx}z7iE~`SejE(zS8Kixt5Fv$%eydm{2rBjb7i!C-EgO)w*gr$K1*lWL8Rgm&NxIf9h`t;_ZVB_rdH>ww8GqHJtzVMLE_0T0S8VB0Zxhqf415W`+z zv&wax3)<5N+_|&nm*tl9AFp!W>gtn1yQ(l}0SFUdY3lp!X7ur}33X&vdU0V@N92;%AlP0~5J!Gl_j+z~8A~3E?dw-MB44s}p;GD3yh+C!p4K|@2oF}<27doEffXqV;OF}FzdzI`s)^RF`&Vp))`gb zL-KP(etVnf6@yX+J)gO{f9hJll|V?GyDG$r9}8TKHL)?esu{eLP#wWPH=C9d@k}~3mkfr$(E1k+4n{30TJa_5+?MrV#OkW+D zw&eDh!W(reR&7bngwcUEMwM*+!3{bTHA#9+!W0s}reloMW5RMfWh z<;$0mt(Ak?I98pf>jUW}!$xC;2KR)0yvl7ZMIf6*f9ielt95I>(KTevRL+;`OZv8@97oc3r(&7gXLv6=spVNovYcc4=pX_ zz++TK1!%~1lfrK*!VJqTMf*BC>%JmNdUJ9>MW%dyV8`^EO&;$H`NYa9lzoPiJjV9v z_7WBKZYZ)fM!>5CeeH#e+vV)_6|1)Ou(0Hn^RLmf8^+*$0(#^#LiJVo%XwCW_D-{Q zJcU*)Wa|4_qAx{yu^f7ONg>Z+E(K}Z2CHf;q-~4HscC8ZQ;vP#ofM^af}DxVTZQ?& z+ssyeP(JK2fD;iBsn?hbT$k8XwHgJ5mMm0xwT9rA#K#fQQ_$LPPjCWf)5sIcs$J<9 zO2|gV%Gw(*C?GK9_Py|F0D6{o)uS2rRZ=`QUF~f8IJ;y{tJr-in~{+2_=ku96ZTaH zHz+k)`uiiI7@i-khnyTfoEp24JFGpO+xDW@r8yy#>>*|6WWlwn5Ner9KY7Wkd6o@} zii#SmaV?nq{J2>6lH0x76WgzBQGjG73uR*`NANQemVktAUrat9GQo?39AAKFqn@Q(|UPrc^V8(J~{t6Kzt(*EY!+i7#8 z$8CO=AnFw|hGEA$%C*q~^2^IhYla`J7Y}G@DVdpX2L}g_|KRsN=U`OLjkh5YRuK8t zSZvbO4%tQ_V1`NM}c6~ zOAZ`a7VdrZT+XY_RdVywnJq`>M@bPrOK(cWhz= zovts>US40izj^bfW5tLH`TFuuzSL^;P8^$FIf>Y}7wPP_Q#^}{2m%5QH(rifOrs2U;a#c2^G?RD=V!^H)N*`exi9~okP^8`&S(!WwS)8 zh^KNS-IK;okWo#LJQ_K&8<#5U|8~Qn;$`Pz-sh1UFHB5C zv|hZHNZxV49tB-(iVS2io^R9&$zn`iWNKdX%Twe<^1K)69sm!WbOg}ru4sH?o{~d| z!_cm>hnF;LQC*1%=MBXqNh=v0f)=ZmqAQy`;PbIrLL| z`$CJ7=Gon;s@DVQ($qpiLX$-;q`eEhSoQS!d5u|$Y3wdr>Y+7t=PxKsy>wCQh4Qto z7S^R~XXE^I(`>0^ulW`67+xTm^(8$|66p0d{yunIwz@i{eaTI#*Y^Y_)$Ulemu;@I z{>;ridvzb1aEG)#Td5=}ia|K#m?ufrC}tPJdD^roN0;Bv-sO07lrJ)F>lGQUczQo* znw)GeN1S-M3!(LPyIYjhg0khxy;#QX@sKwMDb#IPzi~y;0Q870rv6NM^6n%-t@8f1 zJdYHM1fgQ+I!;t;O{4>RqKk6Ki}__h67{i7cnx;nM}`T#BDR3#-Pi)-rka)k-xm~RW~jn1Ck zI6WV|y2gH8Uw?J9azD9MN)ne&uDeJll*GE~)XuVi>^1Ct#A?So2lJCD5jW55)h=(P zhQ#_Z@`l9EcIWhG`I-C7;}Q=bmtY_5Hvs8|+y*v3xtPUGY_RohVP;j&6;*;DXWYYcG!?iW@Hvs|P(kF5QLy>_K4g}>KGLRWNO3Ly% zZy0+qiw*ZR@ytto7>w_=;RXF^W$j}6qkd4RRXD`&b)%X4_httd$p!~ z^|-nC#{a>s;kwW^9KBI}>?LNJCT+n{QK<4X5WNvpBj;^_3e z017R2kl9<0OPyDUe7U>E@9_{0AFk0H`gJS}B`L;5Ji_mm4X7Zu+k*z&uA@#H`tM|@ zz^@Wlc?yT|#B$V#*>cE=AuhlIPvJx3m+1!m@!CFv) z-Tj287gBqDz6pVwmg$T2_4T&USK<-(zZV&`J7{ZHh!uj_gtlRrX1P=Mja`k)c1QpQ z;Y!xs-J^~tO_=CNzqZC-dmL|ct{ibrQPer8(lEv<&VE+VU`qP^*X?aL=i(rKE>RS>c3GRetnivJP8+wX;>O zR)t(S6}4wznskC!vHMW8+pZ7aCvSWFhi_fK#bsvFwKT*zEdP)+QQUU1V!d9)U<+%@ zt2XLM-ItJ;E*pCwh=f2-)EC}W`mCqp0fFGqyLrVQ&%Au?IY18gJ~tw^y~H>z{v8v@ zDdNqHfD8$3dstJJfIm`jdCcPdVxWPA|Mv5%j9rUV4UodIFPCshxZPvY(Cfvbv4{t5 zSwX}RlAU&3QPXeP(b?}WJb%xiBv#%yvWwL$T~Hp^(=3XfrKsN*o338t_R4w~0?xqU zcC8qs*8i@fHDR#L?aenwW7>@55`i)#N>He_lrG5h+lGRGPn(W{DEEMXv5 zdj|AtshB}3G%~Wii>={yk<|CWg7f1|BT!P4w_7no??4aoyIX|*R=Zy&y|`ZeG2Pvy zqzN<%?XyY5+X+tM<=cXuzc})TC=(=2f=6^OZ_H1<-r&<));_bI4@0QNf-5{qYSp>X z$&TbeYVpXtF z89f9TsMD$H&Be*?Vo9ISV_jxE|65h=oE8H}C8j-3K|1DG!ap!Du-8Un2c^5FCwlc> zyfZ!*HYFBQ5(o_b_1+i6?di(ZWA)ZzI73-?DdL_xTTrEZv6;2vapBS^w!OZ-1{HLI zgOwhpkK(duRdv9Ry;gsqx(k`-p{Gh9<)g2;!OwAb4F%Zv9^K(4(~Zt87-=gpeipAE z1BvSJzZF%TII~FS!7#@#u*>+f=i=IR^jBFFy;vN>>AB31`;r8TDKWqfm8JZyk%_si zgM_b7pA~;$8?pFN;=+VBB^yZ`=|aNAk2vKA4>= z)VSDeO;+?BMeX7#&``XR3c+K5?{{E)Q>!CJQ0FNMQZCM}*gZYO5xVE|tULlxMGc|` zO@cUPa5?0-ojAl zQDh|~P{{-np@{D`uY^Zc){dhasK(fALFyXem!MbbtR+`AW4S;^z(=_$Qp@X7~S zE=eF5@;TOsB;2-PpyYcm*0a3tU>W#z?40}{w(*$g=x(yXfws9heW%?SQbd$HqWcT2 zv?ZZNdAfDP3;FmjGmD2n$zCT*tsqJHQH4@tuAjQ>!ttmccY^6erMZK_P#%H#c~;d4 zfnYw>hGB?tT>_U)$gIzW>?0jav|rUgdqQ;qHV9jFO(uDB_Y1x(-Y3tS`8i=wO!;J7 zSH_-1%hZQd15$=Y0cxN+P2N-jO5K^N%v`J7NWtw!E64^6fc8kz3C0+7O(?16&FHvAhy=e_9~ zp@A3aY)khA+^{( z9(^9{_W|fjn%rl;EY&<>kdFnOp1NILoy>x4oek6|BLT=5KbZA8l+D#Flj6d{Wzd%s z5x{iO4=BFYduS^)9)c!vTt7 z4qx4Ce7bhsq>k(VU^Y^BHduC*KypN9dr_l!L8dUl&y6Nl-6E)93tld8i_uf^@~b$4 z^eUE4ci;%g%Co^ze)3|IDUFTUpr&+o<4a4eJM#zLCpI@kIRrio8d!fj^MChEOzdX8 z_k))6(sgOORG#lfVF1he4F7}^e`P|bE##_PT)zM>R8RWMc5)ZnwR5#EnfzpTw(qC? zL$Yq1F3tcWR+jSg@k4oAcHELU@MQgCgg~AM5Lo~X(ZsWOWrhuZ!>1IJg z(#&N;A7ahh)V>RYn~U65y&Yi*@z3RPQL(H#7ob;=;(oX?K&1VCAuIGIV`Ys6wQ;~L zU$2-`mwEr~x3f=P@-niG-8Z+z#4?WjR5#s5Xew_peU6T%x=<&?g;9nIExR-|DaHypHX+%lAu-VZ|j zoqO~oa4S~Y8;nnvGc&(^(duP#XimBRdY-#*QGUIh21w)fVtb^Tiqd^=keaI3{f_a1 zw_R@niBu8#&#hX6i7M|oz9=TLyx$l7!2}N30U^d8;J6@HJijM(?z;A-j9ww@4W_ta zd%m#DdT2>|`VGPIr3mR&izbA}JMw@PIWVujg_I#o+I9};kGl)Z;M zZz+|%`3ajk*REsdMu;zl)krRI$ghNuAIQYRib^*J3p!)yjP~Zf%iFn(t#)_IELtLh zf;iMxBz3v3^gVSt+3O&9oqCo*|ASv;<5angR;M@m@=4Q<2`%kl+55e#kc)fVCrQR1 zHivTpb!uEBGx+(xYk;hCd*B(iU;2JxJ2zh4rW^;S#^A26?sV8Tp_)Yb6B}R?sWM>K z=u_a35%dNs_e*jqe}8fF_Klc+%TW*4{U?GoJC`;%BW$0kG${Ypeq1Ori*gr5gJ_e3 zBp@K5?Q9-LUNIVkOs`V^scZ8)*nCz0P{qK9C!4nA1Zm?}t;nYJ4(JdzZsVUX$pG2M zLQ??7-m&xbbeCGb_LBymD`Uix;t@{n2GIG``8B>>#Sn8*8P}+$=N$k#DpFk4CTaRI zB#H`;bfnSfQls}wcdepT2qFh69mUV9Dx>-ND87#aj%E8jAda;eQVBFww+t-vGiL1s zYQuavZYmCKpO1QK#~6{)2v$gjyVH%8PkM64 zpR2hO!QPedKYsM+9k-n>4GoR4G~@@NgwZCUMA&+^p7hz<+uuQfW!uI4B`b(7_gn5p zcquj#__OL%&$W~LzGpT(6`zM{ozd%>QWCOVqRXa)2B)(DOH5o`1ZM|T<#qWWIz0X9 z0C782cr=-%vc;*S#CesJ6!n&&#shR=lZT`+7d5hNLD~=Crz|`cLi^voRpb_greAxf zUFp|edWdggsLqqETYNhOe->(NDkbm6E39D9UL*xsGafqNnG&xA^n!L6=6VreASj56 zDCnMSxs_|G!GqDT*sD*vsx4zU!wp~}a(N)*OEdqO%6k_S+TXs#Bv~2T*}_ciBVf^d zgF`Jw!N)g|(F_`Rh9E||fu6h#V<5kh2x^dt$jR0?b?`%1Yf29HhZ2u;tk5uLZ%EYa zzDlZIH2NcEX_RCbSEu1>Lq;r=cMCzCxX(^y=e5IG&Zke`&Q7W>0;yTo^Nsnn1wlA$ zLxRFTBW|G+fSM8}q^2g$V0%|1Gt(T7)pD6t>c-o#Ed1m72Xb^@`lQU%8}&ejt9}>2 z1zO4skp0=RK&&p-3&Ys#$SZbyxpc|lRN3CAC~RwK539_PfwiY@PgY=oL5ZGNYyyI} zpda~ta*}`2Ma@S-POc4QDU6=Ot^>x~w7X)tjima}w0tv`F0O7$HmDx?D^23q7l_S=mn#liOjT~`z0>YnM$diD1~ zVXRAaAH?oXR+}W25)2{jN_zy*ZxiIQoeBbp+p?RNe^xZAF1=1Qhh~{&z?0XvzStWG zwY6&KEcYG;vj2(&rN&4OmtZv4baK3GOKN z1|D%0msG5lSR}k-%hmrCPJCTzzJi^|Q>fC+Qkb`qNI16AUIz+e&QKqG`l@*XXfrE; z{i?A387hnrONea77Vd=v*iRyp0TUK&pwC}oqg!VA!8Ep$Q169yjSHrtqGF}PGI~>U zvm#ViYxN~3zh!H(sZQr@zk2pmb)nfxn#!(FZAA9d^;Jx8MUmy;v!3$rAAvWL@IZ&3 zo~9G+o!sHzls()f>1A!~)biw zc}hJ$lT|-Zvze=K+f+azp@k292?8S|#h0tVqc1$M49pH^ z7l2c|iG&rt{>m3JR|}$bBNVM8aDbT`X=^Y-E`1oI)OMP9b3~Hua5%qReJE@6#qqX2 zGK0S4T>p+9)DesxZ)peskXRz)@$bxv7Qtu!O5J?%q*WpRYXL25&sW#8B|aB&sUaC} zS^T`gHJjRD_c!Zl(lbiYxR~Ev$tlhcEaL9ZlA&CstlV3|Vl<3QRaR^(UL3UNRjs)) zU=#KEX~!lR`^-NP(q^uCFZ$K7{=wMqdIh52Q%#%(zq-eG%ujXRtYrv}aJ@N%TeZd~ z167Cys!%$lmCFRN!H<6XJ`eU_rhFXpKIl>Hwbi@^a7qLr4>kA3g3-nHGS1i;<@P;4 zi@^-Zf9GRHEugV$P@_CiOM;=JSolO4S? zN%@EkPg=9t%x1IZ<#FGZx71ClaZbS}vwPkBZG5H@z(h&~;ALo9IQRFkN+4SI$HyoD z)W$4=dN)^CK@8`ypO4yldhxB==0x3?Z4i4bI?v1=q=`l{#wew zNhDkn3%>x`qzP%0sz=E2?;O+2AJiGXnD%^%PWQMtHSFHf+1o#k>25|s&(fg)4eK<| zQE^Sn5M!0rh5!&Fr2lO%)_O({^al!2rL{jv2zv_=4j7s2C~a&^}y z8ZzX^`StwWPd9nk3SlkA00|62=*{V*=m3iG4s7pYW8c1ehb`c7BvJg1711>1McJ+2 zIDGusc3bK%oM({Mhv2L!5 z{t0=BV?=j&-ltz<_UIusXI^gjQS&!+Oee(YPdCs>k;VU=@xB4d#_>}&mj*JNc2o}Q zF>Ej@8E8DuxmnT1Ki&S@&@Bnog?u^g>}WD+^r-2&BZGc?9E9S*E$9L`K@kyRMTV_O z*6fFqzgk-ra|bi84?G#bll`*T>L&_SRmW8*(#y7^P6f4}xXH5w`e86joJ zeYQ;Om;SJ`O8y3Ckh>YoJTPKZD2bH8C4!;2hu);VW(i+~KZ#mX zbu3*{?|<49oZz`{_5P?qgEQQW#ciV~0u-fYbN*48V*U(OrhZ8j4K)OWerRuGi?gbZ zLUAoL@!3hr!6ZMuiQ?Vro7xViUrET%{oIKeK!D(fnB}} zRNGjm&eeY_9_4lf0qxbK=hD;n%xWqn;g>7PyOiex44vno)Fugh8`zsqkQ+3u4#9`I zOLO=9i5{EkH~JwI2UPr(0{<>-|1+o-t9%mj<`cR;H5wO?fjp;({}!Bqfq`B*3)|4h zsD#e=Y^c(fqW+@TK9*(T%z5^wXsz&RwROO1PeLH*KXt9VbV+^u8g}HcuXhv`y|2x6 z>0bJ6V02u^GHy|)iaa2_{Xti3dfPs;mcfGw9SDF(62bIm0<_5gr1oE@f2Mx*Wm2*7 z1K!TOIEt|MexvK6XUE9rLqv@~O4I-EjsmTvrG?_rqq|_DAR48UgxFvHL2f{Sp5O24 z7z>!>X%TInzQHoIlU8M*pn!q#QdA5K$n{^L2eCP|VCjt`dwF@yt**v-Qwu9LhKckg z2~yVMz4y%fh#SnfT*jTmcf0nja3q~1SI_7uA*iq_w;tTcs|^r6fq8&5??CBM62BUG z@KblGc$c5u7tv`k_MAIxo#)x^0@A9V&|CpHn{9e5n&Lrc7IHXW{J2B?*~Zwrwu6nf?=_JpJ91U&ZNF6AwOrcYk3#r zY;S=s)udn-+_~Bg0ov8)kiQ7_i-!D#d~NBkgy?G)-4))WvrBQ=9;}G%**Cs@WDh1u z|D`Do4G4V9H+=qkKtOKpV7V(x;mfNn(xRj!UF<#{CeRXZS9wFL)F|f9L@1~KD^BNK zR1~X5bdYIeNXXO9XjviS?do0ew0b#Z5N6M-CJO}Y=0#U>anX+@_$So_PY)Jkn8xh}+fQX7^2+R}iG zFOgtNZ7P0d4bRESmNM@Dd?s_1i@9K|D!p_mf~(UBl_x+Q9#r9;%XEuEdH)R@z>?Ob z&VxQw%+R0~KepKKAPU@2Sbtf82H)7P4qdSwS3g-#2ZG5$FiYC`yWIr5biMB%tLeeG zyuR2qANU#!agDim(l}8SMX%GX#+6Kb_rm)nE{Q8mhw5@T_YCBas1n3kf`5U{y#SkQ z23nGlnQVNcr}yXm1BM2AqL0-bJy=tBB-pl@T!seRVy)?ad$Y>FybYmiG-jaMK!Z>{ z_+y69WNgLCubAQvz4@VLcUt!mwT!Xa&it7{br?9n|1zqEw6L(C@YkTsFNzOV98cTp zW9HY_84@Cc@$551#Kdk9c=O-Oyki$9X#H=qGN8HkFWjah>w0c;2Jwq8>*9Bv5|bli z&lSwLH!qaEJYD5JI3x%2s`rJw?)3?tBx*aVd@zlqfVqNZ^#%v+A}>|ey_o{8+d{fN z8Pm!qKBp5d*1Qd(<+_^p`-(9QsUQGB_*YxH=}z%}KiEq9ee^XW^@oF_Y>jPcytXHG z4Q$Pye8Xb|VXG*`f81jVFX-)<=Xa?aK+ZM-H(t<_tEJ>8OWg*C2Z^0CnE?aIAOiqN zDi{qd_1nT)0yDW9CH*z7cA;nSK!2eS)$k`o4Zy)@!}r5J;mDyO<#n02?{AgC_ow3m z=C+q96Z-Ij8+K(y3Vw29Mn*&lSI3k0B=RZ=#h@$Mpuz1kK{VV1BMJisD*YK!dTt(W z-}2l0W(D7>WSUZfQqz=l&Z0)XtV|jd0vHj}p=zS`@UwDKHH+UPVNPW1q`JB@<)r-x zp2s)uTMrN<2$v+zSw3!(!AUQO~o zOETeH62N~wT&fS0+AJKy=(lG#*vcFE1?$S>qq`dso*QB#vD;>Uh_2(_kn*?-3XRd@fE7SYXePxs>h8r zu7OxnWedShp8UC7M7Pt<4opeG z={5MAt|ZvyP6k&o=wtJ^AJBkVHj`R6OSG?I_`Y8J*%VYwIu!yNO&?D7kEc8d;;sm7 z)+a02>aTujN%sthR+oW>Byl=W_>5ngkCUhmW1nwIkyDMaC0n>=HjszSgxCd4`sf_$HeM_(HgleGb+bKC<=vUEK(}TEuj!a zi0)J_V=79R+D#~lipnJ{xn#;^+`1qu_i^8%vKT}d<~+aG+0X82chC0snZIV<_cy=W z`#jI@xqQFxAf)CDWW7vOD}*>dC~dRP7S6Hkz64m5v3fX zUoo>kLb0eS>ry^~IR*>I+*zzz?N$fjCX{8D!pau}FzWAouLIG3 zQ*Mz}r-YTnh}Ro@e6ip5dhWPxg5>)6c>w6Hd90fGptPR+iyL#IFC@ZG8C3Sh^?z)A zKky>_G?5hccFeSAeCbQ(1X1Z&P~xlTLgc}E>;l)!esFzL9Ew#W1U0p^xSeN*T>xMm z9`z0o@tf7CH1@K+qT6mwskVj1QJf=bX2+K2W%5fu{|K?TN3)pxMrX(+kBnH0Ip0i&|2$S+wRpo`&^< zE4>pQN_P$5m=1s-hS?O=58zI&+55cezV1d`x&Nl4#|ft#?Jt-AnwSQOBE=t?t0rjHCth89tcP)>X$;H&s%8Xtz$Io(b4;rWW9OrE=9z;ZXx zp5v)0ly_c#c|$D&1mQDSuAaWWY4@>%Y0^}XBI4_8TE0(5mUAh-LQQ0Rk`|+_IKVr& zu*~RJxrO9flEdVtvh<2<-KpI_^=_1;Sm4f13>p?5drKU?`#2&`P@sTa5o{G~ua^+0 zKWm}&(^^mB;RtdUD`o%SD4>a^&YEUV4vQAZ@bh~$J6!ZtE1faav-!7K6PV&}tTYeH z&I=Bl#aj$No)n?Vi^uD{GJovv7!gZ?P*pg{Nsmw;vr|lJ=Imai z67egy6n#?W6h7o# z_6Z@Y&t*cry|lFSiKdGy+Fi)FsHuqplDZPKB0V)VlPPSkvq+owi(y7TujhT-Otgbf z=T>$owKv8`w0 z%Pecfi67%WDvqw%u*}_uRFW-MB>tm+L+UZkA$yp^v6Es6iewmeu!Pld|LH^|Z1TWlu*eBWbSWk}=8IU3V@Lcmc@^ z@ID##UUVCl%jwSBh+Tt)*qTQ62N791})LtWgg;JD7ZVp`zWsJ_-W z?g1D};~N17bXGo0l1l#Q?8Sdqvm5tTrP=VIyJw>RdzEUZEL~$d{f%mEmTWJuJwk`K ziS?AlohvxjQ{eLKXGjlv^CiclzXS<;$r3nBTDN-4HZxst`8*xg$P zc@vX;>^^+?sC$T^;s`-RJDn&l=;RdaKQk@EAK^}nhTPB3S5#^HSWHbAS;JFT7x0%} zk!%pbl<6MpNY0fn+8c4HD)@+OrB8_g1h+&W3V3Mv8jOU+0cn--CpY8lt*w zt-L(_JhW(rq*QL`I3g8XUCAj7ZEsfZ4T?DFmuJvXmShyRA4qKhyz+)MJdC*^gttoB z9#Y^DpOE0wP;VfjJomj2>#7D*nSnRx`Bg~Y?1gdJT@lCPTcO76;k;*Xq+NjeQ<8Hj zBX3!9;cPUh?#Bv#`mIBmT<_S5<(Wa#c>T7e0FlJ14i2zLSB)(Y z@l(20$IT^tl3;9_z)K*N+G+oMOl%_a`dQ{lOmKICzcrBK)T)AnPE1PtC9Ce1o)nzA zmB4J457Jkl<99A0f{Aslj3^Bw1$p=zpYLyYjy-NvpM)?{YkG2+`QQO1rs)+#{Rj#H z(Ap>f6U8){92TyV6)d^TAAQQ^t{!GvW1_uA0|<_%rJi~p;D1O~ZWY5E2Ne5H%v@1J z+th6t0sy7}peiFDNTkj$e>N^z{jdn((INpVmD(qC4;5lpt~8c7c~A*OQ5N0VG*3?z z;G3KS41!TQ<(F46`T06+kG;ZxxRMkwHCn5mn-ZnOz+GRxhXGH`E4A~{gVj|zs&7tj zpC7(^%4jB#(u|+Tj2CM_#@C``57_i0*Vi$P;0LWA4;%#Tpg2C8KqUj@xIg2-n4_XN zV69ME4yYTHU3bo_rk1$Qb9HYCmP_5;7NcGPu#h9;1H1I1J)cQ0UETE1N*U09vHgTG zV~*!tTvbjaor9g7Hy4lal7^$)|G>3HThey+;wKc1B(4?Ua*OU^{G)Rsw*PNWXXuD0~7?GH2WRa}37o^gUX+zFi6k z9dBn4sjtTm_+4m ztNZ3M$SyxoYF`dDi!iu?$ux_RRL4{N2n0TNhnc2a$)tC%TFP#`NRmVW+B z2a@ATW#tu0N<#ae=K@3R-u?S>!NBS!2Z6(uGfR0SsMWTkgw0;CCs^OWKrx8LVntAU z%K&vs2nY;}r1s(??JAu((Q$x;0Olm2r>BQvmqji&2|k)s445XMf){susAgc2Lz}?^ z>ghbtMu)jvZsq9^-55b(VYxabczD_g%-k-^yb^gq-Gq*$m{!d7n2-W`=v<1Y%GKGl zluqZpsFwjFaIK~XO!FAnnz4?tGXgy<>?tEv`TZ-$5!(A~!nFka#mzso`|a!NFi4t> z*E79^{@BZJcmKO@bhV%m+H(SMSyPxP`+eZQ`AZ@JnU=ex*2P~ppCiYieZdsYc?{F& z|Fu+X#BZ%{--6eRrlNNN&`8MrGNt|``E_&aeuq?hOrYf5{IUB_J3!xr(s0ZmH`Tvc z>=(OHQUl4D?sp65HOR<*ar3WM7Km^;q}gg)boBpu@4viCzqtgkk~PA(n>+tX6#hKo zG}7Zu(^oV8xo>R3I>`3iB;d5?RqW3lelf{$gqRB)U*Uk>Uhox5@KJ~MIwYL;rmxzb e{{v+R;R`GJPO!>C1rFliv(?1h_|f;yjDG_U5>N{O diff --git a/docs/src/faq/DMRG.md b/docs/src/faq/DMRG.md deleted file mode 100644 index fa32ae4892..0000000000 --- a/docs/src/faq/DMRG.md +++ /dev/null @@ -1,242 +0,0 @@ -# Density Matrix Renormalization Group (DMRG) Frequently Asked Questions - -## Ensuring a DMRG calculation is converged - -While DMRG calculations can be extremely quick to converge in the best cases, -convergence can be slower for cases such as gapless systems or quasi-two-dimensional systems. -So it becomes important to know if a DMRG calculation is converged i.e. has been run -long enough with enough resources (large enough MPS bond dimension). - -Unfortunately **there is no automatic or bulletproof check for DMRG convergence**. -However, there are a number of reliable heuristics you can use to check convergence. -We list some of these with the most fundamental and important ones first: - -* Run your DMRG calculation on a **smaller system** and compare with another method, such - as an exact diagonalization. If the agreement is good, then gradually try larger - systems and see if the physical properties are roughly consistent and similar (i.e. - the density profile has similar features). - -* Make sure to check a **wide range of properties** - not just the energy. See if these - look plausible by plotting and visually inspecting them. For example: if your system has - left-right reflection symmetry, does the density or magnetization also have this symmetry? - If the ground state of your system is expected to have a total ``S^z`` of zero, does your - ground state have this property? - -* Make sure to run your DMRG calculation for **different numbers of sweeps** to see if - the results change. For example, if you run DMRG for 5 sweeps but are unsure of convergence, - try running it for 10 sweeps: is the energy the same or has it significantly decreased? - If 10 sweeps made a difference, try 20 sweeps. - -* Try setting the `eigsolve_krylovdim` keyword argument to a higher value (the default is 3). - This can be particularily helpful when the Hamiltonian is close to a critical point. - This may make slowly-converging calculations converge in fewer sweeps, but setting it - too high can make each sweep run slowly. - -* Inspect the the **DMRG output**. - The ITensor DMRG code reports the maximum bond or link dimension and maximum truncation error - after each sweep. (The maximums here mean over each DMRG substep making up one sweep.) - Is the maximum dimension or "maxlinkdim" reported by the DMRG output quickly reaching - and saturating the maxdim value you set for each sweep? Is the maximum truncation error - "maxerr" consistently reaching large values, larger than 1E-5? - Then it you may need to raise the maxdim parameter for your later sweeps, - so that DMRG is allowed to use a larger bond dimension and thus reach a better accuracy. - -* Compute the **energy variance** of an MPS to check whether it is an eigenstate. To do this - in ITensor, you can use the following code where `H` is your Hamiltonian MPO - and `psi` is the wavefunction you want to check: - - ```julia - H2 = inner(H,psi,H,psi) - E = inner(psi',H,psi) - var = H2-E^2 - @show var - ``` - - Here `var` is the quantity ``\langle H^2 \rangle - \langle H \rangle^2``. - The closer `var` is to zero, the more precisely `psi` is an eigenstate of `H`. Note - that this check does not ensure that `psi` is the ground state, but only one of the - eigenstates. - - -## Preventing DMRG from getting stuck in a local minimum - -While DMRG has very robust convergence properties when the initial MPS is close to the global -minimum, if it is far from the global minumum then there is _no guarantee_ that DMRG will -be able to find the true ground state. This problem is exacerbated for quantum number conserving -DMRG where the search space is more constrained. - -Thus it is very important to perform a number of checks to ensure that the result you -get from DMRG is actually converged. To learn about these checks, see the previous question. - -When DMRG is failing to converge, here are some of the steps you can take to improve things: - -* _The most important and useful technique_ is to turn on the **noise term** feature of DMRG. - To do this, just set the `noise` parameter of each sweep to a small, non-zero value, making - this value very small (1E-11, say) or zero by the last sweep. (Experiment with different - values on small systems to see which noise magnitudes help.) Here is an example of - defining DMRG accuracy or sweep parameters with a non-zero noise set for the first three sweeps: - - ```julia - nsweeps = 10 - maxdim = [100, 200, 400, 800, 1600] - cutoff = [1E-6] - noise = [1E-6, 1E-7, 1E-8, 0.0] - ... - energy, psi = dmrg(H,psi0; nsweeps, maxdim, cutoff, noise) - ``` - -* Try using a initial MPS with properties close to the ground state you are looking for. - For example, the ground state of a system of electrons typically has a density which is - spread out over the whole system. So if your initial state has all of the electrons bunched - up on the left-hand side only, it can take DMRG a very long time to converge. - -* Try using a random MPS with a modestly large bond dimension. ITensor offers a function - called [`random_mps`](@ref) which can be used to make random MPS in both the quantum number (QN) - conserving and non-QN conserving cases. Because random MPS have properties - which are "typical" of most ground states, they can be good initial states for DMRG. - -* Try DMRG on a closely related Hamiltonian for which convergence is easier to obtain - (be creative here: it could be your Hamiltonian with interactions turned off, or - with interactions only within, but not between, small local patches). Take the - output of this first calculation and use it as input for DMRG with the full Hamiltonian. - -* In stubborn cases, try other methods for finding the ground state which are slower, but - have a better chance of succeeding. A key example is imaginary time evolution, which - always reaches the ground state if (a) performed accurately on (b) a state which is - not orthogonal to the ground state. After doing some amount of imaginary time evolution, - use the resulting MPS as an initial state for DMRG obtain a higher-accuracy solution. - -## How to do periodic boundary condition DMRG - -The short answer to how to do fully periodic boundary condition DMRG in ITensor is that -you simply input a **periodic Hamiltonian** into our OpSum system and make the MPO -form of your Hamiltonian in the usual way. For example, for a chain of N sites with nearest-neighbor -interactions, you include a term that connects site 1 to site N. For a one-dimensional Ising model -chain Hamiltonian this would look like: - -``` -sites = siteinds("S=1/2",N) - -hterms = OpSum() -for j=1:(N-1) - hterms += "Sz",j,"Sz",j+1 -end -hterms += "Sz",1,"Sz",N # term 'wrapping' around the ring - -H = MPO(hterms,sites) -``` - -For two-dimensional DMRG calculations, where the most common approach is to use -periodic boundary conditions in the y-direction only, and not in the x-direction, -you do a similar step in making your OpSum input to ITensor DMRG: you include -terms wrapping around the periodic cylinder in the y direction but not in the x direction. - -However, fully periodic boundary conditions are only recommended for small systems -when absolutely needed, and in general are not recommended. For a longer discussion -of alternatives to using fully periodic boundaries, see the next section below. - -The reason fully periodic boundary conditions (periodic in x in 1D, and periodic in both x -and y in 2D) are not recommended in general is that the DMRG algorithm, as we are defining it -here, optimizes an **open-boundary MPS**. So if you input a periodic-boundary Hamiltonian, there -is a kind of "mismatch" that happens where you can still get the correct answer, but it -requires much more resources (a larger bond dimension and more sweeps) to get good -accuracy. There has been some research into "truly" periodic DMRG, [^Pippan] that is DMRG that -optimizes an MPS with a ring-like topology, but it is not widely used, is still an -open area of algorithm development, and is not currently available in ITensor. - - -## What boundary conditions should I choose: open, periodic, or infinite? - -One of the weaknesses of the density matrix renormalization group (DMRG), and its time-dependent or finite-temperature extensions, is that it works poorly with periodic boundary conditions. This stems from the fact that conventional DMRG optimizes over open-boundary matrix product state (MPS) wavefunctions whether or not the Hamiltonian includes periodic interactions. - -But this begs the question, when are periodic boundary conditions (PBC) really needed? DMRG offers -some compelling alternatives to PBC: - -* Use open boundary conditions (OBC). Though this introduces edge effects, the number of states needed - to reach a given accuracy is _significantly_ lower than with PBC (see next section below). - For gapped systems DMRG scales linearly with system size, meaning often one can study systems with many hundreds or even thousands of sites. Last but not least, open boundaries are often more natural. For studying systems which spontaneously break symmetry, adding "pinning" fields on the edge is often a very nice way to tip the balance toward a certain symmetry broken state while leaving the bulk unmodified. - -* Use smooth boundary conditions. The basic idea is to use OBC but - send the Hamiltonian parameters smoothly to zero at the boundary so that the system can not "feel" - the boundary. For certain systems this can significantly reduce edge effects.[^Smooth1][^Smooth2][^Smooth3] - -[^Smooth1]: [Smooth boundary conditions for quantum lattice systems](http://dx.doi.org/10.1103/PhysRevLett.71.4283), M. Vekic and Steven R. White, _Phys. Rev. Lett._ **71**, [4283](http://dx.doi.org/10.1103/PhysRevLett.71.4283) (1993) cond-mat/[9310053](http://arxiv.org/abs/cond-mat/9310053) - -[^Smooth2]: [Hubbard model with smooth boundary conditions](http://dx.doi.org/10.1103/PhysRevB.53.14552), M. Vekic and Steven R. White, _Phys. Rev. B_ **53**, [14552](http://dx.doi.org/10.1103/PhysRevB.53.14552) (1996) cond-mat/[9601009](http://arxiv.org/abs/cond-mat/9601009) - -[^Smooth3]: [Grand canonical finite-size numerical approaches: A route to measuring bulk properties in an applied field](http://link.aps.org/doi/10.1103/PhysRevB.86.041108), Chisa Hotta and Naokazu Shibata, _Phys. Rev. B_ **86**, [041108](http://link.aps.org/doi/10.1103/PhysRevB.86.041108) (2012) - - - -* Use "infinite boundary conditions", that is, use infinite DMRG in the form of an algorithm like iDMRG or VUMPS. This has a cost that can be even less than with OBC yet is completely free of finite-size effects. - -However, there are a handful of cases where PBC remains preferable despite the extra overhead. A few such cases are: - -* Benchmarking DMRG against another code that uses PBC, such as a Monte Carlo or exact diagonalization code. - -* Extracting the central charge of a critical one-dimensional system described by a CFT. In practice, using PBC can give an accurate central charge even for quite small systems by fitting the subsystem entanglement entropy to the CFT scaling form. - -* Checking for the presence or absence of topological effects. These could be edge effects (the Haldane - phase has a four-fold ground state degeneracy with OBC, but not with PBC), or could be related to some global topological sector that is ill-defined with PBC (e.g. periodic vs. antiperiodic boundary conditions for the transverse field Ising model). - -(Note that in the remaining discussion, by PBC I mean *fully periodic* boundary conditions in all directions. -For the case of DMRG applied to quasi-two-dimensional systems, it remains a good practice to use -periodic boundaries in the shorter direction, while still using open (or infinite) boundaries -in the longer direction along the DMRG/MPS path.) - -Below I discuss more about the problems with using PBC, as well as some misconceptions about when PBC seems necessary even though there are better alternatives. - -#### Drawbacks of Periodic Boundary Conditions - -Periodic boundary conditions are straightforward to implement in conventional DMRG. The simplest approach is to include a "long bond" directly connecting site 1 to site N in the Hamiltonian. However this -naive approach has a major drawback: if open-boundary DMRG achieves a given accuracy when keeping ``m`` states (bond dimension of size ``m``), then to reach the same accuracy with PBC one must keep closer to ``m^2`` states! The reason is that now every bond of the MPS not only carries local entanglement as with OBC, but also the entanglement between the first and last sites. (There is an alternative DMRG algorithm[^Pippan] for periodic systems which may have better scaling than the above approach but has not been widely applied and tested, as far as I am aware, especially for - 2D or critical systems .) - -[^Pippan]: [Efficient matrix-product state method for periodic boundary conditions](http://link.aps.org/doi/10.1103/PhysRevB.81.081103), P. Pippan, Steven R. White, and H.G. Evertz, _Phys. Rev. B_ **81**, [081103](http://link.aps.org/doi/10.1103/PhysRevB.81.081103) - -The change in scaling from ``m`` to ``m^2`` is a severe problem. -For example, many gapped one-dimensional systems only require about ``m=100`` to reach good accuracy -(truncation errors of less than 1E-9 or so). To reach the same accuracy with naive PBC would then -require using 10,000 states, which can easily fill the RAM of a typical desktop computer for a large enough system, not to mention the extra time needed to work with larger matrices. - -But poor scaling is not the only drawback of PBC. Systems that exhibit spontaneous symmetry breaking -are simple to work with under OBC, where one has the additional freedom of applying edge pinning terms -to drive the bulk into a specific symmetry sector. Using edge pinning reduces the bulk entanglement and makes measuring order parameters straightforward. Similarly one can use infinite DMRG to directly observe symmetry breaking effects. - -But under PBC, order parameters remain equal to zero and can only be accessed through correlation functions. Though using correlation functions is often presented as the "standard" or "correct" approach, such reasoning pre-supposes that PBC is the best choice. Recent work in the quantum Monte Carlo community demonstrates that open boundaries with pinning fields can actually be a superior approach.[^Assaad] - -[^Assaad]: [Pinning the Order: The Nature of Quantum Criticality in the Hubbard Model on Honeycomb Lattice](http://dx.doi.org/10.1103/PhysRevX.3.031010), Fakher F. Assaad and Igor F. Herbut, _Phys. Rev. X_ **3**, [031010](http://dx.doi.org/10.1103/PhysRevX.3.031010) - - -#### Cases Where Periodic BC Seems Necessary, But Open/Infinite BC Can be Better - -Below are some cases where periodic boundary conditions seem to be necessary at a first glance. -But in many of these cases, not only can open or infinite boundaries be just as successful, they -can even be the better choice. - -* _Measuring asymptotic properties of correlation functions_: much of our understanding of gapless one-dimensional systems comes from field-theoretic approaches which make specific predictions about asymptotic decays of various correlators. To test these predictions numerically, one must work with large, translationally invariant systems with minimal edge effects. Using fully periodic boundary conditions satisfies these criteria. However, a superior choice is to use infinite DMRG, which combines the much better scaling of open-boundary DMRG with the ability to measure correlators at _arbitrarily long_ distances by repeating the unit cell of the MPS wavefunction. Although truncating to a finite number of states imposes an effective correlation length on the system, this correlation length can reach many thousands of sites for quite moderate MPS bond dimensions. Karrasch and Moore took advantage of this fact to convincingly check the predictions of Luttinger liquid theory for one-dimensional systems of gapless fermions.[^Karrasch] - -[^Karrasch]: [Luttinger liquid physics from the infinite-system density matrix renormalization group](http://dx.doi.org/10.1103/PhysRevB.86.155156), C. Karrasch and J.E. Moore, _Phys. Rev. B_ **86**, [155156](http://dx.doi.org/10.1103/PhysRevB.86.155156) - -* _Studying two-dimensional topological order_: a hallmark of intrinsic topological order is the presence of a robust ground state degeneracy when the system is put on a torus. Also many topological phases have gapless edge states which can cause problems for numerical calculations. Thus one might think that fully periodic BC are the best choice for studying topological phases. However, topological phases have the same ground-state degeneracy on an infinite cylinder as they do on a torus.[^Zhang]. Cincio and Vidal exploited this fact to use infinite DMRG to study a variety of topological phases [^Cincio]. One part of their calculation did actually require obtaining ground states on a torus, but they accomplished this by taking a finite segment of an infinite MPS and connecting its ends. This approach does not give the true ground state of the torus but was sufficient for their calculation and was arguably closer to the true two-dimensional physics. - -[^Zhang]: [Quasiparticle statistics and braiding from ground-state entanglement](http://dx.doi.org/10.1103/PhysRevB.85.235151), Yi Zhang, Tarun Grover, Ari Turner, Masaki Oshkawa, and Ashvin Vishwanath, _Phys. Rev. B_ **85**, [235151](http://dx.doi.org/10.1103/PhysRevB.85.235151) - -[^Cincio]: [Characterizing Topological Order by Studying the Ground States on an Infinite Cylinder](http://link.aps.org/doi/10.1103/PhysRevLett.110.067208), L. Cincio and G. Vidal, _Phys. Rev. Lett._ **110**, [067208](http://link.aps.org/doi/10.1103/PhysRevLett.110.067208) - -* _Obtaining bulk gaps_: - DMRG has the ability to "target" low-lying excited states or to obtain such - states by constraining them to be orthogonal to the ground state. However, with OBC, - localized excitations can get stuck to the edges and not reveal the true bulk gap behavior. - Thus one may conclude that PBC is necessary. But using open or infinite boundaries remains - the better choice because they allow much higher accuracy. - - To deal with the presence of edges in OBC, one can use "restricted sweeping". Here one sweeps across the full system to obtain the ground state. Then, to obtain the first excited state one only sweeps through the full system to obtain the ground state. Then, to obtain the first excited state one only sweeps through the near the edges. This traps the particle in a "soft box" which still lets its wavefunction mix with the basis that describes the ground state outside the restricted sweeping region. - - Within infinite DMRG, boundary effects are rigorously absent if the calculation has converged. To compute bulk gaps one again uses a type of restricted sweeping known in the literature as "infinite boundary conditions". For more see the work by Phien, Vidal, and McCulloch.[^Phien] - -[^Phien]: [Infinite boundary conditions for matrix product state calculations](http://link.aps.org/doi/10.1103/PhysRevB.86.245107), Ho N. Phien, G. Vidal, and Ian P. McCulloch _Phys. Rev. B_ **86**, [245107](http://link.aps.org/doi/10.1103/PhysRevB.86.245107) - - -In conclusion, consider carefully whether you really need to use periodic boundary conditions, as they impose a steep computational cost within DMRG. Periodic BC can actually be worse for the very types of measurements where they are often presented as the best or "standard" choice. Many of the issues periodic boundaries circumvent can be avoided more elegantly by using infinite DMRG, or when that is not applicable, by using open boundary conditions with sufficient care. - diff --git a/docs/src/faq/Development.md b/docs/src/faq/Development.md deleted file mode 100644 index 509d66b5b1..0000000000 --- a/docs/src/faq/Development.md +++ /dev/null @@ -1,35 +0,0 @@ -# ITensor Development Frequently Asked Questions - -## What are the steps to contribute code to ITensor? - -1. Please contact us (support at itensor.org) if you are - planning to submit a major - contribution (more than a few lines of code, say). - If so, we would like to discuss your plan and design - before you spend significant time on it, to increase - the chances we will merge your pull request. - -2. Fork the [ITensors.jl](https://github.com/ITensor/ITensors.jl) Github repo, - create a new branch and make changes (commits) on that branch. ITensor - imposes code formatting for contributions. Please run - `using JuliaFormatter; format(".")` in the project directory to ensure - formatting. As an alternative you may also use - [pre-commit](https://pre-commit.com/). Install `pre-commit` with e.g. - `pip install pre-commit`, then run `pre-commit install` in the project - directory in order for pre-commit to run automatically before any commit. - -3. Run the ITensor unit tests by going into the test/ folder and running - `julia runtests.jl`. To run individual test scripts, start a Julia - REPL (interactive terminal) session and include each script, such as - `include("itensor.jl")`. - -3. Push your new branch and changes to your forked repo. - Github will give you the option to make a - pull request (PR) out of your branch that will be submitted to us, and which - you can view under the list of ITensors.jl pull requests. - If your PR's tests pass and we approve your changes, we will merge it or - ask you to merge it. If you merge your PR, _please use the Squash and Merge_ option. - We may also ask you to make more changes to bring your PR in line with our - design goals or technical requirements. - - diff --git a/docs/src/faq/HPC.md b/docs/src/faq/HPC.md deleted file mode 100644 index 97f0eaa538..0000000000 --- a/docs/src/faq/HPC.md +++ /dev/null @@ -1,88 +0,0 @@ -# High Performance Computing (HPC) Frequently Asked Questions - -## My code is using a lot of RAM - what can I do about this? - -Tensor network algorithms can often use a large amount of RAM. On top -of this essential fact, the Julia programming languge is "garbage collected" -which means that unused memory isn't given back to the operating system right away, -but only when the Julia runtime dynamically reclaims it. When your code -allocates memory very rapidly, this can lead to high memory usage overall. - -Fortunately there are various steps you can take to keep the memory usage of your code under control. - -### 1. Avoid Repeatedly Allocating, Especially in Fast or "Hot" Loops - -More memory gets used whenever your code "allocates", which happens most commonly -when you use dynamic storage types like `Vector` and `Matrix`. If you have a code -pattern where you allocate or resize an array or vector inside a 'hot' loop, -meaning a loop that iterates quickly very many times, the memory from the previous -allocations may pile up very quickly before the next garbage collector run. - -To avoid this, allocate the array once before the loop begins if possible, -then overwrite its contents during each iteration. More generally, try as much as -possible to estimate the sizes of dynamic resources ahead of time. Or do one allocation -that creates a large enough "workspace" that dynamic algorithms can reuse part of without -reallocating the whole workspace (i.e. making a large array once then using portions of it -when smaller arrays are needed). - -### 2. Use the `--heap-size-hint` Flag - -A simple step you can take to help with overall memory usage is to pass -the `--heap-size-hint` flag to the Julia program when you start it. For example, -you can call Julia as: -``` -julia --heap-size-hint=60G -``` -When you pass this heap size, Julia will try to keep the memory usage at or below this -value if possible. - -In cases where this does not work, your code simply may be allocating too much memory. -Be sure not to allocate over and over again inside of "hot" loops which execute many times. - -Another possibility is that you are simply working with a tensor network with large -bond dimensions, which may fundamentally use a lot of memory. In those cases, you can -try to use features such as "write to disk mode" of the ITensor DMRG code or other related -techniques. (See the `write_when_maxdim_exceeds` keyword of the ITensor `dmrg` function.) - - -### 3. In Rare Case, Force a Garbage Collection Run - -In some rare cases, such as when your code cannot be optimized to avoid any more allocations -or when the `--heap-size-hint` provided above is not affecting the behavior of the Julia -garbage collector, you can force the garbage collector (GC) to run at a specific point -in your code by calling: -``` -GC.gc() -``` -Alternatively, you can call `GC.gc(true)` to force a "full run" rather than just collecting -a more 'young' subset of previous allocations. - -While this approach works well to reduce memory usage, it can have the unfortunate downside -of slowing down your code each time the garbage collector runs, which can be especially -harmful to multithreaded or parallel algorithms. Therefore, if this approach must be used -try calling `GC.gc()` as infrequently as possible and ideally only in the outermost functions -and loops of your code (highest levels of your code). - - -## Can Julia Be Used to Perform Parallel, Distributed Calculations on Large Clusters? - -Yes. The Julia ecosystem offers multiple approaches to parallel computing across multiple -machines including on large HPC clusters and including GPU resources. - -For an overall view of some of these options, the [Julia on HPC Clusters](https://juliahpc.github.io/JuliaOnHPCClusters/) website is a good resource. - -Some of the leading approaches to parallelism in Julia are: -* MPI, through the [MPI.jl](https://juliaparallel.org/MPI.jl/latest/) package. Has the advantage of optionally using an MPI backend that is optimized for a particular cluster and possibly using fast interconnects like Infiniband. -* [Dagger](https://juliaparallel.org/Dagger.jl/dev/), a framework for parallel computing across all kinds of resources, like CPUs and GPUs, and across multiple threads and multiple servers. -* [Distributed](https://docs.julialang.org/en/v1/stdlib/Distributed/). Part of the base Julia library, giving tools to perform calculations distributed across multiple machines. - - -## Does My Cluster Admin Have to Install Julia for Me? What are the Best Practices for Installing Julia on Clusters? - -The most common approach to installing and using Julia on clusters is for users to install their own Julia binary and dependencies, which is quite easy to do. However, for certain libraries like MPI.jl, there may be MPI backends that are preferred by the cluster administrator. Fortunately, it is possible for admins to set global defaults for such backends and other library preferences. - -For more information on best practices for installing Julia on clusters, see the [Julia on HPC Clusters](https://juliahpc.github.io/JuliaOnHPCClusters/) website. - - - - diff --git a/docs/src/faq/JuliaAndCpp.md b/docs/src/faq/JuliaAndCpp.md deleted file mode 100644 index 8f43026379..0000000000 --- a/docs/src/faq/JuliaAndCpp.md +++ /dev/null @@ -1,68 +0,0 @@ -# Programming Language (Julia, C++) Frequently Asked Questions - -## Should I use the Julia or C++ version of ITensor? - -We recommend the Julia version of ITensor for most people, because: -* Julia ITensor has more and newer features than C++ ITensor, and we are developing it more rapidly -* Julia is a more productive language than C++ with more built-in features, such as linear algebra, iteration tools, etc. -* Julia is a compiled language with performance rivaling C++ (see next question below for a longer discussion) -* Julia has a rich ecosystem with a package manager, many well-designed libraries, and helpful tutorials - -Even if Julia is not available by default on your computer cluster, it is easy to set up your own local install of Julia on a cluster. - -However, some good reasons to use the C++ version of ITensor are: -* using ITensor within existing C++ codes -* you already have expertise in C++ programming -* multithreading support in C++, such as with OpenMP, offer certain sophisticated features compared to Julia multithreading (though Julia's support for multithreading has other benefits such as composability and is rapidly improving) -* you need other specific features of C++, such as control over memory management or instant start-up times - -## Which is faster: Julia or C++ ? - -Julia and C++ offer about the same performance. - -Each language gets compiled to optimized assembly code and offer arrays and containers -which can efficiently stored and iterated. Well-written Julia code can be even faster -than comparable C++ codes in many cases. - -The longer answer is of course that _it depends_: -* Julia is a more productive language than C++, with many highly-optimized libraries for - numerical computing tasks, and excellent tools for profiling and benchmarking. - These features help significantly to tune Julia codes for optimal performance. -* C++ offers much more fine-grained control over memory management, which can enhance - performance in certain applications and control memory usage. -* Julia codes can slow down significantly during refactoring or when introducing new - code if certain [best practices](https://docs.julialang.org/en/v1/manual/performance-tips/) - are not followed. The most important of these is writing type-stable code. For more details - see the [Performance Tips](https://docs.julialang.org/en/v1/manual/performance-tips/) section - of the Julia documentation. -* C++ applications start instantly, while Julia codes can be slow to start. - However, if this start-up time is subtracted, the rest of the time of running a - Julia application is similar to C++. - -## Why did you choose Julia over Python for ITensor? - -Julia offers much better performance than Python, -while still having nearly all of Python's benefits. One consequence is that -ITensor can be written purely in Julia, whereas to write high-performance -Python libraries it is necessary to implement many parts in C or C++ -(the "two-language problem"). - -The main reasons Julia codes can easily outperform Python codes are: -1. Julia is a (just-in-time) compiled language with functions specialized - for the types of the arguments passed to them -2. Julia arrays and containers are specialized to the types they contain, - and perform similarly to C or C++ arrays when all elements have the same type -3. Julia has sophisticated support for multithreading while Python has significant - problems with multithreading - -Of course there are some drawbacks of Julia compared to Python, including -a less mature ecosystem of libraries (though it is simple to call Python libraries -from Julia using [PyCall](https://github.com/JuliaPy/PyCall.jl)), and less widespread -adoption. - -## Is Julia ITensor a wrapper around the C++ version? - -No. The Julia version of ITensor is a complete, ground-up port -of the ITensor library to the Julia language and is written -100% in Julia. - diff --git a/docs/src/faq/JuliaPkg.md b/docs/src/faq/JuliaPkg.md deleted file mode 100644 index c83008ba12..0000000000 --- a/docs/src/faq/JuliaPkg.md +++ /dev/null @@ -1,31 +0,0 @@ -# Julia Package Manager Frequently Asked Questions - -## What if I can't upgrade ITensors.jl to the latest version? - -Sometimes you may find that doing `] update ITensors` or equivalently doing `] up ITensors` within -Julia package manager mode doesn't result in the ITensors package -actually being upgraded. You may see that the current version -you have remains stuck to a version that is lower than the latest one which you -can [check here](https://github.com/ITensor/ITensors.jl). - -What is most likely going on is that you have other packages installed which -are blocking ITensors from being updated. - -To get more information into which packages may be doing this, and what versions -they are requiring, you can do the following. First [look up the latest version of ITensors.jl](https://github.com/ITensor/ITensors.jl). Let's say for this example that it is `v0.3.0`. - -Next, input the following command while in package manager mode: - -``` -julia> ] -pkg> add ITensors@v0.3.0 -``` - -If the package manager cannot update to this version, it will list all of the other packages that are blocking this from happening and give information about why. To go into a little more depth, each package has a compatibility or "compat" entry in its Project.toml file which says which versions of the ITensors package it is compatible with. If these versions do not include the latest one, perhaps because the package has not been updated, then it can block the ITensors package from being updated on your system. - -Generally the solution is to just update each of these packages, then try again to update ITensors. If that does not work, then check the following -* Are any of the blocking packages in "dev mode" meaning you called `dev PackageName` on them in the past? Try doing `free PackageName` if so to bring them out of dev mode. -* Are any of the blocking packages unregistered packages that were installed through a GitHub repo link? If so, you may need to do something like `add https://github.com/Org/PackageName#main` to force update that package to the latest code available on its main branch. - -If you still can't get the ITensors package update, feel free to [post a question](https://itensor.org/support) or [contact us](https://itensor.org/about.html#collaboration) for help. - diff --git a/docs/src/faq/QN.md b/docs/src/faq/QN.md deleted file mode 100644 index 7d66f22261..0000000000 --- a/docs/src/faq/QN.md +++ /dev/null @@ -1,24 +0,0 @@ -# Quantum Number Frequently Asked Questions - -## Can I mix different types of quantum numbers within the same system? - -Yes, you can freely mix quantum numbers (QNs) of different types. For example, -you can make the sites of your systems alternate between sites carrying -spin "Sz" QNs and fermion sites carrying particle number "Nf" QNs. The QNs will -not mix with each other and will separately be conserved to the original value -you set for your initial wavefunction. - -## How can I separately conserve QNs which have the same name? - -If you have two physically distinct types of sites, such as "Qudit" sites, but -which carry identically named QNs called "Number", and you want the qudit number -to be separately conserved within each type of site, -you must make the QN names different for the two types of sites. - -For example, the following line of code will make an array of site indices with the qudit number QN having the name "Number\_odd" on odd sites and "Number\_even" on even sites: -``` -sites = [isodd(n) ? siteind("Qudit", n; dim=10, conserve_qns=true, qnname_number="Number_odd") - : siteind("Qudit", n; dim=2, conserve_qns=true, qnname_number="Number_even") - for n in 1:2*L] -``` -(You may have to collapse the above code into a single line for it to run properly.) diff --git a/docs/src/faq/RelationshipToOtherLibraries.md b/docs/src/faq/RelationshipToOtherLibraries.md deleted file mode 100644 index 620c3a647e..0000000000 --- a/docs/src/faq/RelationshipToOtherLibraries.md +++ /dev/null @@ -1,13 +0,0 @@ -# Relationship of ITensor to other tensor libraries - -Here we will describe the relationship of ITensor to more traditional Julia Arrays or deep learning libraries like TensorFlow and PyTorch. There are a few things that distinguish ITensor from those approaches: - -1. ITensors have dimensions with labels that get passed around, which makes it simple to perform certain operations like contraction, addition, and tensor decompositions with a high level interface, independent of memory layout. This is along the same lines as Julia packages like [NamedDims.jl](https://github.com/invenia/NamedDims.jl) and [AxisArrays.jl](https://github.com/JuliaArrays/AxisArrays.jl) and libraries in Python like [xarray](https://xarray.pydata.org/en/stable/index.html), however I would argue that the ITensor approach is a little more sophisticated (the dimensions have more metadata which makes them easier to manipulate for different situations, random ids to help avoid name clashes, etc.). This design was inspired by the needs of tensor network algorithms, where there are many tensor dimensions in the computation (of which many of them are dynamically created during the calculation), but would be helpful for writing other algorithms too. - -2. The ITensor type has a dynamic high level interface, where the type itself is mutable and the data can be swapped out. This allows for conveniently allocating the data of an ITensor on the fly "as needed", which makes for a nicer, more flexible interface (like initializing an empty ITensor before a loop, and filling it with the correct data type when the first value is set), at the expense of a small overhead for accessing data in the ITensor. We have found this tradeoff is worth it, since we expect ITensors to be used for medium to large scale calculations where operations on the tensors like contraction, addition, and tensor decomposition dominate the cost of the calculation, and code can be designed with function barriers to speed up operations when data is being accessed repeatedly. - -3. Another feature that ITensor has that goes beyond what is available in standard Julia, TensorFlow, and PyTorch is tensors which are [symmetric under a group action](https://arxiv.org/pdf/1008.4774.pdf). The physical interpretation of these tensors are ones that have a conserved quantity (like a quantum state with a conserved number of particles), so that feature is more physics-oriented, but could have applications in other areas like machine learning as well. In practice, these tensors are block sparse, and have extra metadata on the dimensions labeling representations of the group. - -4. Based on the features above, the ITensor library provides high level implementations of tensor network algorithms (algebraic operations of very high dimensional tensors, such as addition, multiplication, and finding dominant eigenvectors). In general these algorithms can (and have been) written on top of other libraries like standard Julia Arrays/AD, PyTorch, or TensorFlow, but they might have various downsides (a less convenient interface for dealing with tensor operations, no support for the types of symmetric tensors we often need, limited support for tensors with complex numbers in the case of libraries like PyTorch, though perhaps that has improved since I last checked, etc.). - -Although ITensor has primarily focused on quantum physics and quantum computing applications, there is work using ITensor for machine learning applications (so far focused on applications of tensor networks to machine learning, so no neural network calculations yet as far as I know). In general, these different libraries (ITensor, Flux, PyTorch, TensorFlow) are biased towards their specific methods and application areas that they are used for the most: ITensor is more biased towards tensor network calculations and quantum physics/quantum computing applications, based on the available features and interface, while PyTorch and TensorFlow are more biased towards neural network calculations. However, our goal would be to provide more features to ITensor that would make it useful for neural network applications as well, such as better support for slicing operations. diff --git a/docs/src/getting_started/DebugChecks.md b/docs/src/getting_started/DebugChecks.md deleted file mode 100644 index 785a04b6e2..0000000000 --- a/docs/src/getting_started/DebugChecks.md +++ /dev/null @@ -1,59 +0,0 @@ -# Enabling Debug Checks - -ITensor provides some optional checks for common errors, which we call "debug checks". -These can be enabled with the command: -```julia -ITensors.enable_debug_checks() -``` -and disabled with the command: -```julia -ITensors.disable_debug_checks() -``` - -We recommend enabling debug checks when you are developing and testing your code, and then -disabling them when running in production to get the best performance. - -For example, when debug checks are turned on, ITensor checks that all indices of an ITensor -are unique (if they are not unique, it leads to undefined behavior in tensor operations -like contraction, addition, and decomposition): -```julia -julia> using ITensors - -julia> i = Index(2) -(dim=2|id=913) - -julia> A = random_itensor(i', i) -ITensor ord=2 (dim=2|id=913)' (dim=2|id=913) -NDTensors.Dense{Float64, Vector{Float64}} - -julia> noprime(A) -ITensor ord=2 (dim=2|id=913) (dim=2|id=913) -NDTensors.Dense{Float64, Vector{Float64}} - -julia> ITensors.enable_debug_checks() -using_debug_checks (generic function with 1 method) - -julia> noprime(A) -ERROR: Trying to create ITensors with collection of indices ((dim=2|id=913), (dim=2|id=913)). Indices must be unique. -Stacktrace: - [1] error(s::String) - @ Base ./error.jl:33 - [2] macro expansion - @ ~/.julia/packages/ITensors/cu9Bo/src/itensor.jl:85 [inlined] - [3] macro expansion - @ ~/.julia/packages/ITensors/cu9Bo/src/global_variables.jl:177 [inlined] - [4] ITensor - @ ~/.julia/packages/ITensors/cu9Bo/src/itensor.jl:82 [inlined] - [5] #itensor#123 - @ ~/.julia/packages/ITensors/cu9Bo/src/itensor.jl:123 [inlined] - [6] itensor(args::NDTensors.DenseTensor{Float64, 2, Tuple{Index{Int64}, Index{Int64}}, NDTensors.Dense{Float64, Vector{Float64}}}) - @ ITensors ~/.julia/packages/ITensors/cu9Bo/src/itensor.jl:123 - [7] noprime(::ITensor; kwargs::Base.Pairs{Symbol, Union{}, Tuple{}, NamedTuple{(), Tuple{}}}) - @ ITensors ~/.julia/packages/ITensors/cu9Bo/src/itensor.jl:1211 - [8] noprime(::ITensor) - @ ITensors ~/.julia/packages/ITensors/cu9Bo/src/itensor.jl:1211 - [9] top-level scope - @ REPL[7]:1 -``` -You can track where debug checks are located in the code [here](https://github.com/ITensor/ITensors.jl/search?q=debug_check), -and add your own debug checks to your own code by wrapping your code with the macro `ITensors.@debug_check`. diff --git a/docs/src/getting_started/Installing.md b/docs/src/getting_started/Installing.md deleted file mode 100644 index 6b4433daf4..0000000000 --- a/docs/src/getting_started/Installing.md +++ /dev/null @@ -1,79 +0,0 @@ -# Installing Julia and ITensor - -## Installing Julia Locally and On a Cluster - -Because Julia is a new language, it is usually not pre-installed on -machines such as supercomputing clusters. But it is easy to install -yourself both on your own machine and in your supercomputing environment. -Here we will briefly cover installing Julia on your own machine, then -discuss setting it up yourself on a supercomputer. - -**Installing Julia on Your Own Machine** - -To install the Julia language, visit [https://julialang.org/downloads/](https://julialang.org/downloads/) for downloads and installation instructions. Or consider using your system's package manager. - - -**Cluster Install of Julia and ITensor** - -If you would like to use Julia on a remote cluster, -such as at many labs or universities, but Julia is -not available system-wide, you can still easily install your own -local version of Julia. A local install will offer the same performance and -features (package manager, etc.) as a system-wide install, and you can upgrade -it at your own pace. - -Once you set up Julia in your cluster account, you can install ITensor -in the same way as on your personal computer (see next section on installing ITensor). - -To install Julia locally within your cluster account, follow these -basic steps (details will vary depending on your setup): -1. Download a binary version of Julia [here](https://julialang.org/downloads/). On a remote Unix or Linux cluster, you can use the program `wget` to download remote files. (Right click on the link on the Julia downloads page to the Generic Linux on x86, 64-bit Julia download to copy the link to pass to the `wget` program.) -2. Use the tar program to uncompress the .tar.gz file you have downloaded. -3. Create a soft link somewhere in your PATH (such as in the bin/ subfolder of your home folder, which you might need to create) pointing to the file "bin/julia" inside of the uncompressed Julia folder you just created. - -For example, the set of commands might look like this (where these commands -are assumed to be executed in your home directory): - -``` -$ cd -$ mkdir -p bin -$ wget https://julialang-s3.julialang.org/bin/linux/x64/1.7/julia-1.7.2-linux-x86_64.tar.gz -$ tar xvzf julia-1.7.2-linux-x86_64.tar.gz -$ ln -s julia-1.7.2/bin/julia bin/julia -``` -If you want to install Julia 1.6.6, you would change `1.7` to `1.6` and `1.7.2` to `1.6.6`. -In general we recommend using the current stable release of Julia, which you can find out by -going to [the Julia Downloads page](https://julialang.org/downloads/). -We also don't recommend using versions of Julia below 1.6, which are no longer compatible -with ITensors.jl as of ITensors 0.3. - -After these steps, you should be able to type `julia` from your terminal to run Julia -in interactive mode. If that works, then you have the Julia language and can run it in -all the usual ways. If it does not work, you may need to log out and back in, and check -that the `bin` directory is in your program execution path (PATH environment variable). - -Explanation of the sample commands above: - - The first command `cd` goes to your home directory. - - The second command makes a new folder `bin/` under your home directory if it does not already exist. - - The third command downloads the Julia language as a compressed tar.gz file. (You may want to do this step and the follwing steps in a different folder of your choosing.) - - The fourth command uncompresses the tar.gz file into a folder called (in this example) `julia-1.7.2`. - - The last command makes a soft link called `julia` in your `bin` directory which links to the Julia language binary within the folder you just unpacked containing the Julia language. - -## Installing ITensor (ITensors.jl Package) - -Installing the Julia version of ITensor is easy once you -have the Julia language installed. For more information about -installing Julia, please see [the Julia language downloads page](https://julialang.org/downloads/). - -Once you have installed Julia on your machine, - -1. Enter the command `julia` to launch an interactive Julia session (a.k.a. the Julia "[REPL](https://docs.julialang.org/en/v1/stdlib/REPL/)") -2. Type `]` to enter the package manager (`pkg>` prompt should now show) -3. Enter the command `add ITensors` -4. After installation completes, press backspace to return to the normal `julia>` prompt -5. _Optional but Recommended_: Enter the command `julia> using ITensors; ITensors.compile()` - to compile a large fraction of the ITensor library code and following the instructions afterward to make an alias for loading a pre-built ITensor system image with Julia. This step can take up to 10 minutes to complete but only has to be done once for each version of ITensor. See the section on compiling ITensor for more information. - -Sample screenshot: - -![](install_screenshot.png) diff --git a/docs/src/getting_started/NextSteps.md b/docs/src/getting_started/NextSteps.md deleted file mode 100644 index a2eeaffb22..0000000000 --- a/docs/src/getting_started/NextSteps.md +++ /dev/null @@ -1,16 +0,0 @@ -# Next Steps - -* Try one of the [Tutorials](@ref dmrg_tutorial) in the next section of the ITensor documentation - -* Browse the [Code Examples](@ref itensor_examples). - -* Read the [ITensor Paper](https://www.scipost.org/SciPostPhysCodeb.4) for a long-form introduction to the design and main features of the ITensor library - -* Read the [Advanced ITensor Usage Guide](@ref advanced_usage_guide) - -* More Julia language tutorials and resources - - [From zero to Julia!](https://techytok.com/from-zero-to-julia/) - - [Think Julia](https://benlauwens.github.io/ThinkJulia.jl/latest/book.html#_preface) - - [Official Julia Language Manual](https://docs.julialang.org/en) - - [List of Resources at julialang.org](https://julialang.org/learning/) - diff --git a/docs/src/getting_started/RunningCodes.md b/docs/src/getting_started/RunningCodes.md deleted file mode 100644 index a3c1b7b4b4..0000000000 --- a/docs/src/getting_started/RunningCodes.md +++ /dev/null @@ -1,124 +0,0 @@ -# Running ITensor and Julia Codes - -## Basic Example Code Template - -The basic outline of a code which uses the ITensor library is as follows - -```julia -using ITensors - -let - # ... your own code goes here ... - # For example: - i = Index(2,"i") - j = Index(3,"j") - T = random_itensor(i,j) - @show T -end -``` - -The reason we recommend the `let...end` block is that code written -in the Julia global scope can have some surprising behaviors. -Putting your code into a `let` block avoids these issues. - -Alternatively, you can wrap your code in a function: -```julia -using ITensors - -function main(; d1 = 2, d2 = 3) - # ... your own code goes here ... - # For example: - i = Index(d1,"i") - j = Index(d2,"j") - T = random_itensor(i,j) - @show T -end - -main(; d1 = 4, d2 = 5) -``` -which can be useful in interactive mode, particularly if you might want to -run your code with a variety of different arguments. - -## Running a Script - -Now say you put the above code into a file named `code.jl`. Then you can run -this code on the command line as follows - -``` -$ julia code.jl -``` - -This script-like mode of running Julia is convenient for running longer jobs, -such as on a cluster. - -## Running Interactively - -However, sometimes you want to do rapid development when first writing and -testing a code. For this kind of work, the long startup and compilation times -currently incurred by the Julia compiler can be a nuisance. Fortunately -a nice solution is to alternate between modifying your code then running it -by loading it into an already running Julia session. - -To set up this kind of session, take the following steps: - -1. Enter the interactive mode of Julia, by inputting the command `julia` on the command line. You will now be in the Julia "REPL" (read-eval-print loop) with the prompt `julia>` on the left of your screen. - -2. To run a code such as the `code.jl` file discussed above, input the command - ``` - julia> include("code.jl") - ``` - Note that you must be in the same folder as `code.jl` for this to work; otherwise input the entire path to the `code.jl` file. The code will run and you will see its output in the REPL. - -3. Now say you want to modify and re-run the code. To do this, just edit the file in an editor in another window, without closing your Julia session. Now run the command - ``` - julia> include("code.jl") - ``` - again and your updated code will run, but this time skipping any of the precompilation overhead incurred on previous steps. - -The above steps to running a code interactively has a big advantage that you only have to pay the startup time of compiling ITensor and other libraries you are using once. Further changes to your code only incur very small extra compilation times, facilitating rapid development. - -## Compiling an ITensor System Image - -The above strategy of running code in the Julia REPL (interactive mode) works well, but still incurs a large start-up penalty for the first run of your code. Fortunately there is a nice way around this issue too: compiling ITensors.jl and making a system image built by the [PackageCompiler.jl](https://github.com/JuliaLang/PackageCompiler.jl) library. - -To use this approach, we have provided a convenient one-line command: - -```julia -julia> using ITensors; ITensors.compile() -``` - -Once ITensors.jl is installed, you can just run this command in an interactive Julia session. It can take a few minutes to run, but you only have to run it once for a given version of ITensors.jl. When it is done, it will create a file `sys_itensors.so` in the directory `~/.julia/sysimages/`. - -To use the compiled system image together with Julia, run the `julia` command (for interactive mode or scripts) in the following way: - -``` -$ julia --sysimage ~/.julia/sysimages/sys_itensors.so -``` - -A convenient thing to do is to make an alias in your shell for this command. To do this, edit your `.bashrc` or `.zshrc` or similar file for the shell you use by adding the following line: - -``` -alias julia_itensors="julia --sysimage ~/.julia/sysimages/sys_itensors.so -e \"using ITensors\" -i " -``` - -where of course you can use the command name you like when defining the alias. Now running commands like `julia_itensors code.jl` or `julia_itensors` to start an interactive session will have the ITensor system image pre-loaded and you will notice significantly faster startup times. The arguments `-e \"using ITensors\" -i` make it so that running `julia_itensors` also loads the ITensor library as soon as Julia starts up, so that you don't have to type `using ITensors` every time. - -## Using a Compiled Sysimage in Jupyter or VS Code - -If you have compiled a sysimage for ITensor as shown above, you can use it in Jupyter by running the following code: -``` -using IJulia -installkernel("julia_ITensors","--sysimage=~/.julia/sysimages/sys_itensors.so") -``` -in the Julia REPL (Julia console). - - -To load the ITensor sysimage in VS Code, you can add -``` -"--sysimage ~/.julia/sysimages/sys_itensors.so" -``` -as an argument under the `julia.additionalArgs` setting in your Settings.json file. - -For more information on the above, see the following [Julia Discourse post](https://discourse.julialang.org/t/using-an-itensors-sysimage-when-starting-the-julia-repl-in-vs-code/98625/4). - - diff --git a/docs/src/getting_started/install_screenshot.png b/docs/src/getting_started/install_screenshot.png deleted file mode 100644 index 3e26156dc76309c17d8aef5f801650f88e4fd544..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 93122 zcmeFZXIK;6+BS>`A|OQpksd{*N~HIqfb`zGi1a29dJ_wXh=eXB)X+<$LuiUhl>nia zfOJ9$(mQ;^z4yKM{=R2F`2PHUyg80ZhM8Hj)?Dja=XtJcT|>C0x&j#qJqaEj9+{G& zoHiaF5gZTi(z~lyfFlcu=HhsGB>ncXvYJY=vLH=&R~vgLYdk!~@c2X`&DhbKFMH1N z!rtIrB7nb{B)}3t%`dYn5@``DfimBG!Xxprb|W+(yndaVtClnCmoc6Z>9eozpM7fr zWo1dxk_zcgQb@Gmnta#0#reH)iLo9UbsjxG4=yd4n}*-LtcTxDZL7kHA86>dLClS& z0^u`Lf4P-!nx;C##LP_c>BD+(JVnhVx=~ld$o<2;v$LZ7--LY3crowX_$*qxzSq*2 zX!zW)7s7L4%N0&de?T|Ab*o6ZjDR=jVM~FP#KV@3>_%iSAzVsutso9O|$r$2(r zC!`ZoI2X&1Pu{Mw#VGOB`kpNg9=mq@Y^RH|eDxq%lb)Kl+-lF8O_ECvH8V$NImTX&ew#NRJ>IjwWZjGH4yU&b`6=z5nqzc*(~v{kV%=>) znQdkPuF$pf_>uxn+Ts?CII1q|6WuJgO4PNa&l~Y8IilA$j|pdF(!*;nZI1C<2NxQA zk52Et`Gl~;aZ!B^OzE$&ANQlfW0v&zn9V6`emBO6#g|*@N$rF=^g8=+mpkjcZADp| zsGE>!P?-J1uitK=psPeT@4jRbu_e9vp+W0+cg&oUd_jv-g7A%z2)W>A zqTO$g>|R;DAVGb-S9_)AmlQ;nrQyw`eLUH#ub`A-Eph_aCBNS)lUt<}nhDuw%~T|~ z5q0-=!Mj@>k8a-Pihc&CE|BvARkb79wdSbZFJa|ycO(_1H*X%in$e^$e7~G=e@Qnk z(l7hA9km#Pn*3tsf!!tK%@UcSzCb%xKa$2blaK5B5|=>aFGxf@Q>9QOZLJ40EiZ^` zsZ*(I2n^nEwdu_+yFIA&njkY3#?j#T)85(682NBDFi2fxiCmmsoKE~T{EKVD|99j* z|31S$^D*A3c!+xMeZw23#Eg#_Z^u7O*HmO@TfM`0JN4ego$c#Vl;LuoSsZ;yx8y#s zlhl`xtH|9Q@&p#5gt1yUzF5SYda~*mk0&qG{Uwo{PKV ziH30BM}QJz_hK}lh}iV#Fm`2XtM^>@KD6Y`X$@(4X&bW}a0JHPF60}GTUL+E%GFBL z;mY3`s2Cs`@FpQ4xkAE2!cQYd(nZZlP0pTbu%xxGs*`Ed8(<$^$v9>_=3>mX#zqv&G=8w3dG<>|@jYXq9?$^}jY`vp}5QUq8W zrmMIsZ5+CX$8tUOR|cOCcDTg)#zx#{Ne$3o7_9F7*lV6|mK|v1uc=_brm2usVYFC@ z;xh9z17id*d>AU^iJz(@g=DmO(xg%&Ew^BTFxP9nuVUZmdxwW8&^>)qnNyk4&!0L_ zI)84-`1&jJ_8`>x(=$Vx5gU=Ga9aV4U9=XgPtxi=@3^5|Lq{!UKRdiYsknnP_I~VQ zE`2Uze?Y%jKamZDU!%g;X3$2sJAA=u3$j(RFwCt#jM*2N469!)Uxq-BH5E0LG-(yWzReD}UwH+4O-qzc zocUHRR5w(Dyy|LvXtk!;`PqX6a_=)GR4miN`5-A5I7anD4vthx324)$b0V zw`3?|&ytLx6BTxMoeN`6&-c{Y%kIwd$Qplki#MFNikHRe7#lJI)q2$+dlN`95BuW()olX!&s{^@C{JBHx1hBD3@? zkCrbu={boYQ>_xVWYslF6Lh1dvKhV&wxtQ{&H-u13GgCA$i``V%dTEGx1gKaS$dxJ*bOBndRzr2KR3ub(<2Vx+5_erbHD#84(9_r54Vs;Zr0rzttZ~h9=^Py}yKdaqFlx7T z*loe}Hom4Xppd@W&ef=Kepdfpqb?LO6=nauPMLctYG&)H4r2b%? z45N-|^FQ|Ek^UGE(407Yv>CqIG2HYjZ#!?kXFF|Bs>VO4SUTfuEW!wUXs&Ed-pF|z zwkciRAmksoPl1r;q4-X=-5A|O7gWChTd2pZGfmczg;GczLJusLtoqoxHTuO*Sk-V95>z3Ec6&U9y| zyl-`UxpdleMrFl*^D<{m$P+;2zPC11vQbmR;{^7v;t^b;$0G#xE&-d=C5C_PKe@z? zclod5_;`3>_IL#Uyha`PJ^w&~?YzuCelN#_;t>IV-2gV94E(>ZCW2>N{(Jw@JK!AN zBOO^KCE!=b%H7)9#ly}OLVmzV0~{cJp=jiRhj;Vd`F2T3`|fYx{=@dVh7dzFRWU19 zXCBMvuFtG_e4JmL-v>{^M-13?wuV@Oe4L$JJj8q?@BDRz7_fhS7<>ox*Ch}~$vcK> znjl$McWaO!j{wiZJ5nSd5J?J62mX@0V+Vn}5Cem~y}fz7`FUL3ZNa>v zqN3o3d|*Dl2f!5%JbYasmOc+$JXrs6lfUmHXYFC-ZvO&e@9F|Nzpv#pS5JuKojd0h z{p<6Oc3S(`|GOp^kAFTE@POd+Ghkkxhv0wR8z?GqepF1;-pAU>P|n^Nuo+N?6tAeD zki=gF{>Q0*SNV^kdLGv9vaZfRNr=?Ht^Uu#|9SHNTkx+sjs9IHzbM~-R{0-i{#jB2 zeE#VFVTym)`LCmZp`}P9!2jwrDUzqRZ-)W>$Y?L8p$q&1UiR1LJMhg8Z0Eng_M)z9 zOH%<4PX}JAaP^E3LUtCbv;JoW1AS zK)zPiZI9mh4z~0-Ds_?Db~}|Tfd^d{2`i=b>Y)@YJY4$4Q2+HtgHq&jVdeNV-aji- z)BsVV&$^Oo0fT?Xul`>Qeis1tqh9O4jUTkuW0m&AtU*U>HS=FCQ>e0YB}6_WfqRcT z3;RyLrM-J2{O)g4KWxtCR{EJl<_EI>`b_^(OM4_>JQv2T$JwSo_c(yY#4o*+u6xpM3e29`6ivBh1aOH z`wtp}5wjoC7J)l@qN|_3&5RfSfh%9i-~hk$uua-am<-%AOL}Pe__4GYzp?C8(>`JhKcFsJ=c&dCpV`RG*DS|h%zg^}iHhfBUdo6Oy?g8MY!Bm|Sijz6 zlB=3@W5xZ)dm8scY+hFvxY;!S_FQH9<11*f-=yFMO`fLxXZCh>?_h0I8WPvH99>*c zSl_-|f2wF3AgR5t;*Tz4mzNz5et)CLsi>%+NPE-+C1p}{M8wC(XMH^_)GK<9Mdt(# zeGFl}SVr{H0!BG%(c|pn-F*Tw%Zf|A`G*$BbI5?q;u3QtuE>MTn1_=p{vPE2TX*@=fS@QY*WayNoSe_rg~@~DG`bTK5+D>D$%h9d@Eys% zmUYx zTT~o4Xd_~S)7ja%?2Di|bV0cZA3JRBua>J0OLiAPG#wT)dW~3etPqxDq)i>5yyXx| zdV2ATmX1Tk(L^i(nn7nrQxPBI3IBy;|J9Nq+BEIHAhYW@TO<35J1~q%$=MUDm4ic# z%%iSh-{TdFvlxu`l;qk%lsK+-p^SkFz#&7de(I|@Kze4uE zJ8xS)B$W_tbtR3xJ_@N_$dx=pZiaC#cbc`@M3ooiG&;&QNYHY<#er$9b{$!#Lbf;B zSkeV#lV!w?Wc&`dvF<&Rdx|>!yGN7jVkx^AuW@I00LD_hwEo2g|E}vl_hiyX*{KAk~G-X3!Hmra9tzgj$ncvB;rA#I6Dfq`#43fojy>ce+n-%w>qlSvq1hr0CY$>TZyS&1XgM#Bd=Rv>_OCBog@?;DB*lecDfNqL&V66-V;J5gQgX0D#i2{EySDSQ>Q|f?{dUs z#is8vF&tclR~=+WNg1E+w@G(CsF}EtGCzUAtSt>T_#MnAMx2T!t{|GtOY0WsZLhEV z$%X7gQVG!hkp~uZ(o579?}C3y=~h>PH%q7>wwXtyVxKSY$As!XeGTz+hF8$;HJr>$!7%s*TY@H6VrNPtT4#fA-L1ruklu;ONfw z-MuX_%LFGMLvyoZXIm}(6`)&+zApU!<%EwtALs4>u(N##l@ap}f#5rRi40@*QdQm z=)d~hzhET;K|03`#jQ{VS{?&8v^Yd3K418mM4YDt!qkUsTPK>`^k1wD)B>(b z)gQpJ;GBXoxs*UK%#UP2$As0Isi2J~0FqdMB)wJ*0cL&mfk9ZEo}M0#M2hVi=;*NO z>FH@}Y2Eh6)#a@MSyk1M@#yuRX}fnA5I3YR2)|8#Ub0}PE>C;T9C_mD>6wG^ z5DM8&Y(BO=*g*utdfE8l9t?^s9zS{ATZq}tL7eAXD4EJ<^72O@Cb@92i+wOq383#D z0~J|27}<)s02GJAdC?xZ*itE`e7pxmseuCLq7^zlht1f;*BdI*NPaxV3fkYe=to8#<+j_IP7F4IWG4V^=R@ z0XTWg)3}WuS=}R;xF&QJ&Mz}dd7&?dWZeWPb88+ODPwGFz49!0S=C7g6nHK>EKWBQ zzu*TS&$7s~s5pa1ujG@`^2FVi4ISzJ3qSnlJR3l5TuDu`V_ucgYe{SOb(q{06vB?z zT!Uk6(|n&S^rVAT5_#em7HsTYU1?!-uF_2>73n9%$1}9e@6xTS(>6j4|Kxs(fX3Jg zxGnHqK(gFKwi`0V3Udgphoqe+pTF<6B!4P)&SRhcKU{|Ig%~S=P|j+ua`dY zPL~IsQNT|m+CI>+f}4-a1q1}XOG4%~f)8U&-8=4;+jJ+taqka3^0Qg7Uw+MZp_wib z_~`&*9q$nLls~r3bLAoCQf`7M%jgraLgM$!n?Djsd1^se-pLJEXuR-L zfX=y)n~HUQIYxWgetc^o#WleLCCcWK$>lxa7G?_RlRsAj=kz`|=7@+i=osRkq~yBT z-L6uH1D~?d>3KfH@s{aGH&{V))o)YD&P*6`n8*ThE)!FP9`kfOpv}d&vds|x{c*gzgC84 ze^M5AuAj>u((BJ}B@v(xrL4?aKQeJu*!HL|J(+Tr@LV2v!-t4e0XOQF0;=Z?25_H1 zUZ{Rgax%S9ox62Q_Q;Oe#GyJaLq$hNXVq_eWf&Z!_;l zs+5LY0*$IGySv50d`A+>Prq5WN;#KMs}41Zl%xri_&Kt=|>ZbHvrkQZPpLWB_NOEog}H~tuFry z68|e;hTNfS*8(xcNGe#cQC&$-fGBsbbWlrLRmgeVE z9+mwXLmZh6)=aoZqz3Me63ZGl_>`S%zH+~vYxedM^C5miO1293PwX{Uj{8pqK6{?lwLwiS;Pv%gW+hL@N3WpMPtVsMf=@lr zK0BCR1t!lpr_u6zmQ!&YI&;F!2a&2NLJu-0CfkK%y6-}(5#b*n4X^!J7aQ{g#@anV z5T`eC0ICssE;*al%r{8kf^Y-=#m!0o^$yym-)~1|8({OhNU=#k$)yDX!*cH%+l8bk zGevtf?AwOKPcUh}A97^W2#9}kU+&iZf|_03tz>FG)!B)Sl{w5%+Ux8+?cleA!KC^3 zd`5eZ6DrtecOXX*a(A9tTt%qUlmSvdw4m1S1C>!!zZeWQnTZttvc1xUU_#8QhHQtk zrLp%&Y<(EMtu%i=&>7vnUsswJn>F~hxHlDS0q8&wpvgbmjR(0D;1B3%Na{76thuu7 zSktaHY@v8<{Y@@!$Vc29O(9NS87hedCipr+u9K;nxXBT#C`mcnS1VQS=lW3j(_rz@ z4(|ePrFi{y(?%-;CXCHVBVEdj*>5QmZdKzvejh-UiiZ7}h^&g99kV|1waVC3uQB_q zD1|)}ZQnztE~yJ~EToC;7m9bS0Uf8eJZe8u>xsIYCL zlt6(?abeFjPbN`icS{!epyXpytqC=eU)|Ba7zs(1$l00dbF#C))g^tNq{YXcM=+#M zHz?@DAa*SyI?hYk`JPXsA>t#=_6LD?I*XF)k+-1IM+?m?uFRxz{9nN>jH-nbtlt zk2a=PdX}nrJjoaHfyDevip0L)H$ zY3~}=G`$j)JM?R$((YEtfsK#~RCA0KS}L$RPNmEyU5?~Kv-jsoaGyBdzsr<2y;9$W zh>seYNU8+)zXF{+7<*L z^;NfCRG^sSez)+{2b#L0zQ>4-Wve>dpPz06jFP6Srze{70Rc8Dmvd=8*&Sj1Jh+x8 zeNvDL=(UX#Wcl8te^;ug$BoWaGM$$$>s@pGQp0P*n!!xyEz;|f-hmB1r8bHytX;4D zmRWnjb#pgH0niTASOMZWP(Ux~;OZ8c?VX(?N5wDO?+ms1SN!Kh?thBSxUiQ7phn3& z>Xq=`@u_>x+GVn~-JKH^xhMEm%}rIowy6BNq?1MF*A^_s!X+}s6iXi+KrqJ6CyxdB zvNrnpsAw8+6 z%C&z{9MA5UBREOi+oia;oNptHAZXD^7(Fs_%U_}@wBWF5c41**#6Q)xCymKpxN7T3 zMHI*PJS-`6Ibc}1mMNaYi*wtr^=5Q^5z0|i;@h0Odms;gQHhc37%=~KhBz+EFC zc4mZ>&5*hn6p2Yc@$pKUb#ttxs`EcYgqPtMxnZVgOzCdk+UQC0h+18te*rF1tea|zg zA6JQwe_L!cNHbKQ-`_z4GfFL4@($=AMC0ySTz_??3x|h>lsOMTq%gwo0;h|Liq5r7 z?D_N%V3hfBI0_lKGiX4?j5q=GQ^z*8->NMMC2lag$7Go1g+#n{zF8@L)OB-vtbY7~ zpvK-oS>*q8ng4L8-Xnak+z!H(O!rcRU4@>J)A7lr`7LuK ziMTy$IV~zhD!bH9hZ-6c5azq^OLjm?!~B*K+2{qE;gX>XTj|&l`aB0qDaXlqm$7Ts?_>Fvh7JP&derX?wbF3QM$cS7RKJoWgGU5* zTp4A09sOei}2hU$DGw~;#;d=_{B%n>=x^-+VW!8w~zNFRSbpOzBO)1z<| zJyLwGlMRNxK$~;H@o9EqnGk%#WQLrDqy?-F)88HZUxTz6c=$NgpIqsX;VqJv6&=d` zqL_@?KB34Eh!~8<#Eh!k9*MM{z!l8bkw{}gDnoyYaUm_5gOtAc08@~80CM8U7=+qC zisRS;VV^ZHJj^y&4~(;U1?fYrhK2!r-G0wX4gKb)hj|u?CqLV?9nuaSW~p7wfvW@) zUtKJS;c7-{_N~>+ai$<=@*DLfzkwX3+543lkGu-Ue=;eq?3UC_an|*~$*3ma(6f&hT7BV_#2PAnaef@I^bQh1xb>iBs&(E&H1=;$4oK||K zjw5jsLt;RPWTgXn`RRi#Qd=68C^xXV9nPti@ z*(h?oJ8BtK%vE^7RZ8YG+^?Zy1_Z%|FqwrwK$xLFPa~1RrS=gJlUyF31H#bU!Wc-b z#AZ8dt6vUdhMyTL>vy)d=FQ9-;;nUvI7!%IhuFn!0F2>3ejFE8+FBPZ_?Y$rp`#b-J!sNH_9iG)iMM_6$|m*6SA2+}m%5 zwF8Rt`N(I>GP+(sw@2rwBz6KM`W#qfXu>4B@H87uH$~E5QIOa6o{gK3EnU(rCWdzhdN) z78JTFdm-kvN6FmPwwyPUTz48`CZSPGDSvo$>AvnJ8P}?$5IUr3^`{L0-pWs!94J!Whs3)n}DybG#6}H&@G~&ku-MkB(d)1 ziyrOW&Z>zW0q&vI&c;uCJl>kcNy!U|xSYIomm-m!{;=2ERy7voQL94T&>_-U@&{`W zix3z`Gg(|jPYVJ1`ijBT@QSxx4`c+*JdO_Qn(e&mhHXVYdFNnI|UWH=< zmy>*URy-33&`Yy|{tJn1om)FBdxi}`Wl*)V+z4}*kjg`BmEcJh zs~M?8MID3a%?n`#^zNlN<1R@kW~X8EiB9W$T5VFqDq50R(!g7K_aJfZn!PtW*7K3m zn}FbjnXFv;Z0v%2=9QE}D@O*k*%1TJHtJ7UXg;GSx8BWJ(HgrDtWU$t)8WRLPS=KQCamzRe317&(6T9TdLMjTJ0+R{pEC?j!Svbw{7zl$rbw> z^B-M#7FLTx2JydMV<;c7IYl<2>Cyw$)x%U9R66?Yzy4v|Zd^Sdx5P0xKl6h`4lWO( zA2=Cf!DBx8yaaznR3I*BHL&&W5VhjUCCN-kUZ7K7P;y2;U9q`L1Pd zj%t?v?(A>tIQ4+w>d+Dgcsg>+g#X1}Xr=D9oCu-C{@uS5HBhg;6Zl^@eg?yD4*?N=Fb zrv+I({{ytHlhkgpBb?A?JzSUh)qX*Uek3EXZ;boVgE_HYHH-1a2IH0dqW!#HNkE@T zl+dC5G^YS2ARsq^UO$qkINV15Mm;&rCtNU&^F03*S|8+_;FB5N0MZGWh5YB#dnGJ z(HWo7c2{9A$}vqzJZC1Uqt4^n#dse?IPu6M6W?Bv%X{h(O1DCoDLb%X>k|;N@3TwX z`HWlBsf006cOf}$?IK@vSPfAQNUTZdG2{ltj_-jxi`4Ij)`&+?iUo&R9fpTVfjl=0SOs z^%N`yY&(&7n4@^V`>lOT3*2CS`tuK%Nc+O(DT-0uzf;IU;0n4<`fW?)C-ocY4sy8a z%BCpF=z9a82xFbAGpaY(l}jE zNuG1^J*HIFIvb5!80)on_t)VB7t+LEY3yv!TNz6Q!oHZdEwVzHgB)T%oGw2_@K|q|bn|() zYWJM1wl@R8^3zD{r3?Ke!u+{ zXFwj~ z3K%t`1>gY$=o7D!=mKemlz~LE&oiex<*NpcYu#B#*F0;1S~_joRZZ}aD) z8>Pyr1&!D$XTGz-c{Ij2_0jrgFgY~Z_F9alykZti%V>9{s(b;Fa(7H`)j;HImKBO> z=9T=9Y4(57TGUa1tJ@z56e$=eNA6r*g#t-VyLvEmc}9swXzW{Z_#pVXT6^hu<`d3j zZV9el=cK9oYef-@jpMt5UO^5}sl$oes5{t^5e^lcA?u2*GghZ!hD!Yo5d#5QR0d9b zA-0A{v1&qPkUe(rTXwDlXpT_bxO%maO3B{pL8kJ#p~3PEi}BU0^257Oo}#YqjtjM- zH5kZ$ZG9jEV!GA^hE|){9n`8xHCb0V`Bwbk;f&D0$ClUrOuc&{1btxx@>A_6NC?nh zKOe5?b5S8~-PU_uycn5hSlw+ON!ULwwQ90XrQv3rR!yq%a1d#}!lxqwmS~nV zJ@XgAS=|ICS!-J1)$%#xhbxXMP5AbK8jyV|72+d9FJrHS^fCF8J*7nl;^8;&GjmV$ zZx&B&>6|B=46H--MY3g5V#3}X)paF9tkpH03kE6j$k#iCdu_k0=&c7vU*}|f9aAiG z3aT)YIz4c(HycRHvYDDx17T_qEZG;whk$goSsA~uGp2iF{=<30DcQTLuG|R=QDcnv z!iz4Gud>AguY)Q#Qg!|4ER$|{8t#+=^Lw)PeVr^Zr2nX%9%`6>)LO=JFFud69f$yS zAzGEzYL|5}ZF{aK8S+vR!Ab$*{qdn1gB|$@@&sh~;Ehev7HMooeWQs`Q>B{ziT!H= zSRZ5IwGL>HU}t;96(#w`U|@CqiPd_7x%&~ly+vO%)}Cg^jQ}kl=zELt!VZ-ANXSPH z_ihR-9#AXo@DcF}abG51W{XA8+fN+z?Ir$rr829okSQgE?4VS1{CwET z`-4i+pAhp=N6FCHY?5akR>q#m4U1epr-07u+Rd^H!>)rCLhUgo!vr|}*Vs%p!>`-e z*LSrO?|j02>_i%+SdW@H=2!)i=e;6gc0T32p}5$SkZ>`azt_m{xr}{XB6CPYfX+3! zpCW@iORFl&kdAp)U0z_}Jz6MjlDXaUIt=aCAe9_2LQB;CTc_kHQfSqM&r*DhCVnu= zDQTS3epr){$7(P`sdD+?UDx8eHG^;>3GCHV!0I4irP7E(9 z29}0rhcN3NF2JZyn5cdt)V%EX0U1vdB~qpIBu%t^KMk9|=Y(l#_z71-UQOhwPXL6LT+_bkg8sRY{@!GjH- z1mq}%GE0B99ct_efu+TehfG2L+)=?60s@OK6Xv|{!daze{Z=~!%1KKbc_Ue zT?v)U>%7!8x_a*T(>q)+dHH5_klA+PyGs{>^wl}dbBkg1L4kxJkW!XWSa&1;JW>n2 z&$6SWRv9qCvz}hTULcWzPxey*=1kb%M*iE$W>&>yKAS7i9|vw_B0&G}(Yr-;F$y#h zycdDuTnW(&dm8+*NV7?CE3GCRagUcmy0~Fz`b1o1bD&vXgU9A8_@)yHXUZMELk8LB zz0+iA-IR*{=+A#za?YY;ACp01Kc52n5NMEqtlxL68sBp9S0!N=1IL<%gSu^&khB4) z9%Ug($8Vi0o$qu~ioKP>)bwVoyVk2F%_Kd{0_wrV4`q>H(|fJ&E@TlX7m$o8a90J( zgvRxk((B*e5g%6kRw-7%tj9q^cAeND?B4UnHlwree)ACceeUS7IjBg#w|jkCFGMbw z?x^Ip{#3_0)@#VwY>X$u#gM~f%F*-YUbv_CKeA1LRlsHOT{{6z3*OG<-T;b5{0x!xjNtkGQG9=Zcba&aL%FY{k8Ox6n zVbug-Mn{u2M6rPeTGK3>;taz_{3)lY&Sf7jG@0fk{tpI4lL~9Lpqh0ew7F%*6?B13 z;{q4dWnUNeq5f&UE=5si2PWMfWG0xU-3pVIqd&_|Ul+;3Xc=vd+&A(XHpY%6U{o!q zK9YQn1@ceYA%CEHfpg_x`d*cJw1hufElP!)k5rH)WV@Z8D7=Z(`1Pjg1MC%a^x@|Z z5{}Sok}fab+&P^cISh*6M!LmU22G?DP47C0H(pmxTo!7C>)?4zQfhqLJG(HpYwJOvA5G*=y@V z*`-6WfD{mBw8s6wVED#+?&GQWa$HAuYaptHDOl{u;ky0`_xlnea!oS@n;VLbluP8t zl}2RYI*kfq7&>(B=!loJo-Eh%#QIhxIM0k&QCc?{*eUvlw8;Y)L*tY)W^Uuw4k#`1 zXsBys-7t!nWm#IMrk<6P3unT{72NzNYsjzQ0Z!(yvR|G53L1qcN*hv{o;{iVcD2Wq z`wszT_6O%}XV@!BkdN|O-BAVRz(Y*b zL}a%-t}CABEO|enIGDgY(!C*NAl;w^XjIR~N%mGFiB~#5A@YP}kzSAEKh(d--yW|+ z4LpB58cJK;+S*rE%AeT~2V|D1U>)3fF6Q?Fg7-pDT(d&Ss?WDuv-`sm)qQ@c6~Ql! zCCjzF>TO?b>^~+#?}Ap1^63?cjL>B89kW6DAn`;X(QPEV*1O3nbwI7q3-`WcCA)0u zq~|RflkBHD3#PySI?6@W;oF=MFX&Eovy|Z#rcvm_BO&b5q6|;_dTyj;j#D6Fu(BTB z2{d!r-JS2^W^$Z2^BmCXr^Ne2j&}!{Hc|GZkw>d7*Ykus@++>418Ez%c!HU)+MeX{ z!zGhdj8Xwg%+8j5tap2KrLW`0`=xnxQz3~O`2~x{J7v9}Y3#4Il`6o6cz+KSx@ZJe z?(Ls3-=AKdL!xr)lk2TJm>i$4%?>&34fxh>^-9&NgD|#Wrpp)0<93-0yUX^Hi5MIm zc#t)grLN@2CfKn|pvS&{5T4_%Dd$z^(V#2vX)1A*Tr-ybH~Ugi2n3+^-Q%t2vqsK< zB403X7^v+r5;+KCwf=H?k}AL9S+gkOcz=iTo2tXnOFM7fp<~Zj?xR5fbB3I06MwLU zK#WXn)Nfokb^b*al0^>R!F-ebDA>GL3*`Bne65j6K#`dd)AE~A#c&Kq#hljrHCK!< zW!x9|RiU*S0+E-OZ<#HFtrt#iA!6@o$4XckJ(!esEvtO6_At%2y>naeDf4mCoPy^# zQLW?&KDK1NragR9qM0h(z=+B5h=Ps z)Yt9)#y5|R5p2@^fdD<09rv4om#Sgkj2^U(4bC1PY7Dw)5uwHQIff%+TF>(LBq+#a zO^QdWqmiF=qw+skQ1@IeW};VY_L#_e!}(t1*Ka>MH=9^pA^E$e(Gn>me8N0sq^@TE z;*3>Xo;C70*5)bP$LV^+QMp5hFc_@N!Bbqf>wA}i;-IFZm&ey% zXQnKmH}49W?|s_FU~=EejKLpYxZtXphMycE>XBPoT58CXJC9q{dMj|bUktevgz%H} zg%5dzzAyXs;cUaP%lBoqRb8s-xiZF4r1W-*PO^*pCL}QsYlUwZS znYzm=)0kIkMEI2ueve+;^>3hFOjH6dXP z7~ACybok9!s+CqM1J~^S2R}{CNEaf?In@0 zp0x077Jud2-io~Oe%~SvRbp1AU@-h4Kju-}uCO56I;=u*ECF&?)r!T(N1D!mgVO56 ziNxO{wDN?DWO4s!U56_h#lS6S)jd3FWH7vQ(!&+hJejP*5??l@+?3{ABsPaZa6?!U z`l>lVUc&IjISKGdxzCBR+{!3_G4L8gb%As}HU9g95uxz}n;(-osp7vG>ACGfCYDU#J)t)gZ(9YkU5bY=9t$P{!UBS$;&YsTA zFZM(EQ0;#;-N5LqHc3SOxy^m7kgZh-WI(OX-2h{0EhH@;So? zc0YMmK}?2oC3h8f$aN>z z9Vt2qkpl%!4GR_YI(fV!6F+-?@E<+2j0o7bP?`84I_kxCFlN{Lbd+f+naAt? zYseI+fG6O7;HUHCr=I5$Nz7KrkgAf5;oiB7Hh-f}hg*e3Ya)q2MvuvP9+|Dy*a|RD zh;_So`of2gW7N|4DbR;z5^$8%39{*IQeDGtjMG{@ZwRch?1mK`&ZoByearl=AUz)V z;Xpn1+JR|yBgcb?vAJ*S+|i}Z;ruYJD{{}5ORV5FK6(wM9@bdIP-o{+iT8~~%Z_Vx zwjdlSv%n`+PkNl!36El7b;EATs}a-4siK~MxRg>0qD{n~}KEm$Y)h=DI$@IG&+{))2b~pcx&KXEEFO>XkaT{}+*N#9Y1&d3v zIhd+RxH#|2#?;XKTKTUr8ni^-1ATfdZ35G&YvsVY=4^gdpx_u&{2t2auCq#iaPao7 ze#3D2BQK&hbJ~e|?=5EU`XJix6OWhC+T`%pSH4%rbUfZmYI4dwx@Q;czgiY1ak@X- zx~IP)s9sfBS=r9p#>nci@KY(@_}$TuQOnFDkKOJIm)|u}__&PGzjCNL8PrA-Ias@f zB$j+CS*$>7%rbz+^J;d@l5_B99$cG5jKfTja3HA3dBxD)=ea*UuC#s?S5-b!bCAST z5|{*4>mS6-qMi2!HZ#7$3m(`S-Nu*Jxtdb6A>MOuYGu?^dU0f80oem#`E;l<+TJU&L}!vjZzNtzd=w`9 z+|#V4p`Y8shMPtypv7C?8A%~TRDl_asI8ySqpDW3%f;iL&Unf2_Uf_fBCKeGNTNGc zw`Y)x!Xtg+< z7aET)aFUYvZKJKsxF;sv@vKSd=2U}m)>;9J^{SK*#@QRMxGkkcvEJ+l+G-+#Ho4xs z(0R{ilXrHIy7&<0QOLVDbZQC70Ykjc=^u+DU-;~Lp20j5z9nZAsf zV6FD!czvCrxkX&Hi@izTZIel-1Wyf@grR#(ySE8XAYo2ki(=wNXOJ?TApi7}q!qfK zxUxB9AXCkG8}CD_*X^5o9#5nPom5aSR83p4?Orb`NB zf?P|E7u!Oo9W;qyYZeDlmMb(pVqS|g@lnP>xAZF;Tn}06Oc82E%d;f#|!Doc7h75GX~53q9s=-wz&O2n;wJGX}tXp$ln1@&{I%BdZF35E_vt9 z3x*5m5!wO=5D8hM^TgI29ngrHtwPAJWO@*Jztpq^v*aaM3TuFMAm8ilwZ zmvZrhQlv=Sim?*t6Z|C+;C0H_P)Cw*wPrk}Jn8<8v9baL{E{y<^-PePm)X*3YGB2M zp|iM(^Zc&Pf{DNKyM`7}CGOE=b#2+hy_l##GZj9dEpNJr}K%ne2z>>s2xavGVEQ^s%I=J(>jt%Lf-GjR_D1VKM;6q&ND*l~t{Fd1f~rm~yc?&LczoJw{kNH&e8sz8QUANd|L<p-BsXJKXy(KBbRXbhUEG`~pXUEw`s zJNY;QX+Jh^kA+=N^X#Y?i?Sl?}GYFd2Pd}s1JU;4Z-Y!WYJ$KCux^L)*ygO!_I zBcn&^L29aZ{@t0Db}x4FQm7H{^zc+-3*x`NG1;b1#r4u|`}}FIh!UDW;Xqn+06t7=+mlOjOZt?h7x@+z(wpV$j9 zj-Qb>ijF>D`g<)>7Q|ss!>OF6bgesP{qUKFc(d2Z5lIPUY-!a^!qDm}4t1Xuk-6EZ zmbOR)=t2I;CEkNgRBjHu%WpICdBS@K>lBq1oJ? zQFO($ONv##(K4M8G!^doTfUdGz29|6kX926INdN{ely zY%QKS`2)SW9i%9+w55>NmTM4<$HzI_1UuDs^da{Q7qX~nx|L_{(v1R^Vl6y>k{!qY zc*%JQQ`m6JJ*`QL3jE5Xyd|>FykTQ?^82_5^qaT2vE@l-hB(=@a!-lxP#61S-n8t4 z-%gXLb671PHa9Ni|CRlqS_ZIEgQ{P(N7DuV2Rv@tQ4V;IU{s$R9d(2*5*h#RfwSOG zh_}DD+Mh(}(DtOB`aqhLi+B}L@SZ!UpO0Vh)KEXxuIYG#V#({epAPpcMvRKKegAz0 zGMhlfA=dH4;n|SFR$()Bbe<#kIIiyMWMb{>^(+6#6pz2U| z#VZiFG0axNt|F^6QHaT({nZc5W%a(49NUf8nSb6C^yCR;E8B%RNg!Nr<|j|tSV;~% z`tn(u-xhfyS-o|CDSz(O&<(9&N%UuDhxuj!W!FDe8L=D9AKdK(61+}cmrVLO{5|uG zi~5+>wm~s{_BE{ulEO}jvC%5Oh0jIW!~99bIr9HZrd_?g#pvovM_uy7>hBMv`zU6Q z`v7C3zrTNuh8#tnDOTNNZsy$UQSuV^7WQM@-);9ReP71u2N3^tgEa%r@4%LaxpOTh zOlQyGJp~|*C;pIHz5ka!HXSQyPgU3VH*xwoxf}ToiPV6>A1el{zW_xscOWg4IC(U* zX@iqfwYc<$lMql^1HyhX{Us)g>0WnO9O7LeK$@f#Tx3?@?~X~2{YUo0e}zoHePZxj z=vLbJ6tS#%SI-X!9gsn}L!PAmm*F|TYVZ<97oQ{hF<~p-sFxRG;O`bj6#NZ(uRH%a9L`xU@* zo6B4^%@ydcu63hC}jE;Jd84N|=~r>jMoTH9*3(Uk3SfEFh_IwWbZkOPnMQRE+*b0mobJ+gm>; zgK>)_-bDe58RW-$l{bycg$*H}g7sOt*nfdZEsXAYDuNshCxtB`ch56dh~$7?7lHmO zhLXYu&ok!sjJkNCiiS$TpH-R zjMrOY{o{K0wK7JZ0G3cu98olw|St8+IiyE?KY4J65s@{k`iQo~GGZ@$2 z(x2!{sCBPwJ#)6)(bo9oKn|woOCLhgkcKd*$LBXvcXZe4^_>_|jjLL@Ri0tUGfipA zm4ZU3tn8tPtbT(1c}jEBV!wzt`{VT2K6RJ3Hlilx*l0@{>Dw^Yvr9>2&3zUWZ|;Gr z3=_2N%40R+{GrbJl~?d$gLR^T%a^=7#$lAWONnlc{OVBJmd?>HNrHGdTz42Xh4>0+ zm_Jka*r(0-Y5wW`d?UbJ=83i|nUy-G@Gd6JiQIeqSy~V>F(a8%<5*)(y@rp+;!5oh zbo3j^Q1XXkduFAfb?k1QIrQAfa!Y?nC;!wX5VHN9y-&G=t=50)rd)@o>luORNU&#Y z|A9_4@|u*IAo&zEemDE+d7Nv$;S-46ru!~b@k!As_gy(nK6KwdQ%f@jnJQw#Oz%! z52hf$0jl@#iuLdfWBuUc6t*UObnBQ_l+w#)HQXGm2?RZs(q*jkoQDD< zyEtg876C~XQhBugW^wluzJ0k*SNsd(QEg1cbs}pz`7^F@K6E=?jS{XyY32`6KT1kN$PETup8O!78l6NGFIO43gGfEO#-s1e`l3duRK^DJ8p8A+d zZ|?ZF(W$Yw%proIEaqyWsXTx!7w|W-8wrvQLl2?p%kJmm7?!qT)6=${1SK-*zEf4g zpnKMXlX!n8ip1-VVYET+L)j=Qw`4_XkIcXh4cPdsmGBT+X2N^7d?oq3hof*8Cr2H} z9m(z_roVpGJUrtGMm!o@TSW@nt1U-gp;HHhPiB^8!l6)uCQYBy4%+UzH0JYO1Q5sR z)iKpaPC_#j(}iBXGjesU11x$$97tZo09%uyq0bp)l&$}NrXl~2)TI9vUA~`_hUwdF zj#*UEQ_8d28ebNu*Co#soa{0!J{HTj!y{_{o{8sHve6g5(w_HqP9=(PQM2!#DyMsk zpAZ+x!RrbkwL*1&a5dc#X3~t##N}@@IO_UCgNN^ww6y3l-G&L`HN9da5V61LqnLo< z~43pSJ}YR7FANyGny!(@G?4`fBoSUnQz;SC+WNp9p-q3`3qC|t7ep*0}@UqF_|`*0+1 zRYhRqvHNCWK5_r=e})_1A4iXh>CVp!W-V~#`IJlzSTASLQZwurP=#M81qlqCef#8F zO|$q=4&L(NAa)-fgRYe5D$DV9`SIr*1wrq%)$oys%ohMA;6Pd`Dvcrq82duWVIzJB4y>K8RBW3AL4B$q*YQwLa4w)AU{8KD^*f~|0%kTd&8u{M^0z?z4 z8XX!3#PPnY=PEjNJM*ivG`U#eZCZE6ktx-#;GW!H^jA0L(vNj%yz5eDCbz}lF*|U+ zI=Rvn^t34qx3x6t(Y))7!Jd!v1fqM@)%e|65$ka8YjvYfNXN1`z_-WO=%Xhom2sYY zEOK|kJ^k1t59F*+(U@Pf3T~_ilXI5BwP(I9E{PELhC8t1uDRdmW$UgsCJLJ}#M3?N zPyMRZ8Csmx#Y(sw=Y7$rL(03_O+W?fq2@O$Oqk!-g1~>Ow5vY8^%5uND6M7mDOw2x z5K0y6xn!f>kYZosnKg~y+Ev9zh6`%mu_>;SKacIW-6LTL9K^Y$FN zGZkgRRX4j5G-p?kWaV6#C}FXP6q>!TS zS{~&Gb%>9i%Y%Jelj%*=V*9A6_dzku#Q?NE4r9Zw##2XBwO;Db`=pR8jR`f^Ukp*O zF88$u;P8{j|31?~C}B+4Mv4RGKLIi^B+l~VXx^6CagkiSLHk}@T$KvmQBR|TxorEG zFA+WP!A?at17PF?X9D-@o6@!HS47;WPMX z1A@|lx`{oxH?-Q)azmSO_@|&eh=qBx-dQYvl1tfp_XmBed=z&*ns9jBO|0YmLusttcw{bH# zS`5%jxRsny&Q6?{|`@^cF+y z&_uFjw?M=oAEQq^k{6DixLCQCGbCb=mTmJvl_1}1^V-7iHyK^`d!}k__1Io6*&ZS1 zjjqKBlQ3^RA=(Hj~$)0lzCu zq|kUUU+Y7mdG|HyjrHEkc#4(WAMHRCN+$>^s5Oyj_n=8bai?TpeK|P=7Trdvs32 zxyL@i10UHMi|q%SL@RKs^h86H1y$7Zv<7>h?n#az6>+Duw?YmM!fqiTOD)2>xtZJ;r=_*A3vZY5sMR#C|f z)0q}EEgsHHcNAY{sbeXNzH5|M^>JjN_Yhu-Oy0dK--(iAwE68{eaL9niE(G|>f>gf zx-PSJzOn7)IqK;-IImIq*?K{sH+;DMW0IrGi1uzl?Fr7)oYhLLWe?+_UQ%X2(}ObD zot08{_QL?{KG3G=V{6)eG%Sktk{Abq9Q>+;+Ur-uccyTuav#WQX=x2iwjT4GP1LoL z;%2pI3knLZ6y~SLTq|Obu^*$+w_Kygy?`;h+#=VVIk85QmgG;!_vma zNiz&83aC)~El6*6vc+!PQSA|qRQJ`S4?u7BJUxTAP!!+&a<6CQT2O>Vzy3vpA#riK z*M47xym+;Q z&x7d`I%9cKv`d$Z?dnQR;gF>g)%Pb~CLiyNssg5$Qv%qCt`9x@NVys3 z$ssYS$FK}Qe#|cZLGAg2g#-h%ouK5A^JZINkvwVD%{nB^1;G~{ty#*)Y z;Q5$L2LRDdAT6^fiBX~}oqUgWXd_9Rwb4DK-bEWa)jGM@bL>P8BIc|QRZ3q>3-)r; ze9d}6xf9(sG}l!WFLa>VW_@no8`;syO1j|F%4%8AYJozQ_zK$9nt9vH7wCoiRS$h{ zUWyc4R?j$p)2DFL1UKD8MF`NdZW3s%_2-h|R3gWFF zlym7$FgYBq3lhqs-~6jzO_qbH?6Ai|s&LX?UILXmwQKAsucw#dn20?(0P3a@4y z1gnr2g$WX#d6WeIAkRlvbG;QrOzh%`@G^Lg5BC=xOL{Ox&9AqD`z~Jz#Q;}K(N~E+ zAg@*9--sJtRTxY8o^*ZNBJD}(KxQmW)L@Z|Mg?!vO5aN?ohV_$wB`ZT%DN*v_!m5x zr#k?0iMX4eoy7K|?#gpq=bv54xtFTjU;2g|qjZPZ*+uk@sm!k&0yco`3PEF8o(y6) zl{5jLmuCN`Oh!VfZPKtT7jJTaM4Onv=qg{`P>saKXn3X*z%n9~zP?+Ony-FlmR%Jr z$9Ju{i%=S5*u7uRkttQJ7a1Mh7g7#bekw9EFED?U?tI9xM3OvIX{PJTykc{Ju<*1= zIFYgTwG=-4_GH4W;x-JR^a3D!t$qi8tf$Vr)`Bjt>U99iIsYG>Dzmep&&O6W0ngG# zHuTj7quHE%hboR%JpgX*p?1}0aT=$9C9zOuZL)Z_bumXzOtoA~828I5&mE8jJp+U;vHu0aaRpnrS3*;|X?eyRu|q9m(6U>=@9#Byz%B?kX0#lwex)@_RQ)I>w+{+ z)q!981%K%L{6BKzJY&a+=A$R1k(T(_N%$Rm*1E^hacJry)ssDw@9}}R@6Z&|ds2Aq zs@~eydS@iuq0Gu6kh`^t*5xWX+4^z3A2JC>V4=Nzla=lyEV_HSB5LS91)Qs9>!be9 z=M(0t)>xPYoQ#)J^vtMam1265Q@w?$@Dt8w1r=KlqG6WijG;D=;-_0JgfLZwb=srW zUC(h6pupiW^5uXF=VJ7BdK9e_obEoZ$3}9{AzJGBvok7G3gP?X2*LF!Y3i{Yb)tQWAX$qC>k-_RCh>R7hBVNr&}Cb z_T^I2k17gUTK1~gg#a6TS+{^rA+OW&2KWakL(ake{_>Ac(b35#RC61WOJ3B^$smQCrORft@`*rnZsS-WlhSBurWNQImtG8G9o`@3U8mTZymC4vQnzHD=hd zU{o2SM=4JW<+5d@(B$#0Qj=FoM*0N_*|$FlvG5wCSb8rVyPrQsT0)vzvfO0}ZTsd^y>c?^+A={CE`TK1036Ax4sZ!OeP3i`*x5YFuFV0QL*mFjo`0{+--(;u9 ziw|twx!RWfsMTg&)j2a%FA?)F=}#lSBc${VSG|Hp`s%u7-`GKgs}SS>FOK5dFV*Rf zUVK8UfnF zsOYKJk65URmn0*i|g!fN;gsUVu-Ap2jjzmWY zK^^ySy0D&yu-=>n4)VD`oF0Y2LYdLF~q5 zrufgMpvdUjlO9)2SA@R%_wR_;!f(IdU>c7^lHTmI`;EyjMYrb!$C%{$$F20QZT7u| zUX@vb^6}u3?V;%{Y<>P*X-ya|~=TPn|>f$pxH+ko^4mw}4~j#RWn297V> znoq^8TSkcU>!Q?Zp77@~?`PN>lfvUFwR(>@%X}A^Qta{5)8rG|lzFt6PIje&MJt70 z;4h&ff?vrleY{zj{pGfGMAPBC|Ju6liUWQ2tN>jg_e4k(_hpXr7eoG}vNv9N)wt*G4I5Z1akZho8m z^Uo8KL^CRNYp5Gr`p$8KD!FPEkwVaOiJH1P`TgU_R2l7g8ds#|r{TD7T^)0iiJZql z$ZsaU-|RIVL~jvZYNlO&z&Ch4y#Qxdji9TYfEuavCfwn;{b5S)TAKR{7=ISBpA*_q zCG_d7wHwV_b*0*(TcXtYX6L840U)VW6F(8m&rqeQL7>D2JMi#YddMrionGS*nFmZ5<9L6|!X!(*ZoS z-xioe=*{AxkgH$ARP^46nU_o*8EI*@ipBck+ZZxl1T7v%ACCI*AE)EenhIx*UFhyDA@buCA+or>9s+X8BU=@ssv(*r z@f)qAum;N@k_`_?UM_2;dra@#8yo^bdTKIw#vN&BRXERG@&UDV*^ztM1ugiWrkH#8 zkLC5cyXHryiqUU2TQs=uO9+Hg_9=Rt%1n1`FHy@%2naddzze;xfd!?U*xeGUs7U;N^~sYTjC24QnPu;MYntS%kw3&JNGU|Sqe*@ zDyop_1RZyfPEdS&`re!Hmf=9F$DDCnZ-KN+j*b97508fY-7OJ<<_xV;U5MqazDuCr zuD6+b7s$xv{RqHf9>l@Haaw#exbP@j^4lcWgtuzhQCjq!zLYOt=phw&ictLq2|5}2 z>1|ecojve>I#;nr|rxw(tFG*xKQnqi&tvma!Pd8MN zcSh|mE~>m62%OgaAaD_+I&=rtR+busbxSF^2waT`}E$0tF<+mrGs zb)SPJWCi<*wPr*VF7{)EIJf;C=TKpVz(V;6>)mdbEHHnvqCP^iIujXoI}mAZXqiYE zaqTwATqX#FpkWWp8WstAuKX#TZx~^3)Q3*yrq)0nsf$0oZD&e8-e3LOd8R5v+`fnwwvH8tZm4TkiyP2LwSxltt7H%eT@xPz4U)0{p1 zg)`0&J~=B#qZxMX@{fjY3%6CAEiIY4Iy)_@LesDp1{gv4b>8m<|B5)iW1mJ{pa0G>O9@>;37wjH zF9L5LWfd=`G+WZ>ZtA(6g3^RQS|=X*2%G5~#M9~zvlsg^s%ND_kdj!w(#YswBbHEJ zgL)zxmAmMf+CdHCtC?{nr^_tIJB4=C*=6*Eut5o9rp!@ITV5v9uX`<&M6wc;)*`H& z!L*jdCM?K?^tRggNCUS)bss{z8SIUc#V&uh8ngV(<9o!;+S}qIIWfDtW@!E@#(-HW z`q{(Go&w2%PmPdQvj+Y3rwK+yH9xias`1+Qz#YlHQ)G3h9obwZC7Z z)%%g53I1@`JUD;9Kb|>Pv8G|i5UG+bBxgHadvq^Ei(BX$>IavIh#`Gzrt(MEUredk z8*y>>lezoO$|Fa;<~IL-te9n5HE->oF!PcF%>c`>r3QJ&vq2%as|Tirc`c0={PoQ! z%hNh|2UE7LjBLPtrEn$ExHOG(Vn@vxwj5!19*Tf&f#^N(>&ckqakc(U+U0KTI%}|T zbKkhG4UP$6Qj|?RV?l-NlKS+hQCLVc+FwBKPuT=Q2%f$1B%3t#d``Xj4TKmG?NhEs z{V%JdWvO$w@6E+?{aR|+mX=-m6%yGCo%>EL;Bm$0v~VsXveUCZR@P;)IeHu=J@R2T z33dMm)g03wVZ00*&p)a+@yTa~KTbM36*DdV>W3DBtYfm!u~k>TAyK3F#-uobYW7qI z*>q7!NltICEF`4&M$G_@WdDN?9ao)sN+}k-(ew%Q(a-O^wv7~nnj2n-H$ahrhjT5S z8=sz9*$99B{5i+BT=aCaa#Q>YF2Zt zDs_Mit_V@ej$4};e))C=WhQ^UbszY_8Fod1Cm|aBO7cs~zYPhu)3Dy&*O&}ep=5e2 zMy!k767bQOoDfs|Htz2V*RJ8RXP=FwZdQ<&XcLi=lH#`g7Q5-08HDdACj0T1x-1GQ z$e)rlCD>He#8uU_FoWz!YH(ajFo&DXc@BM|&!XH@NhBsFcABn?sQ+zBIbr+BhJ59| zVor?|RN+LY?;+flsHNvg6XO_Z_xYzh7R*>}|J^~F!-_2|`;&HeIgrNHVPt%cj){pWFl_O-PfM#3 zLhbT^+Ij4Cx?MtIqI#9toC$KGl!{fqpiL9_&&bVt9Ym~YGc&7*oIN&hUx{zb!nkwq z8wmSb`!jYblWu|ks_Ah9M-Bsf*yyP9w2K{3=#OeWUBoqr2cppU-oWRoQK*;-+!(px z;r9&o0A}5+McJB~o5wo*B-ej`69r0Z8IzgydAPW^&S#*^OM*u$qMx{CjWKyJB15~w zv=8L3VZeDwXxfY!*?fSX=5xMb))ar)+$?nMss=l8KUjS&B^5Nh4^IQNW11FVo&;w( zKO9wQK|C?I85oSK8253K&@UV=Ge) z3*0J#7yZs0HyOWUe!W5R_LkyDoEQJEf2bmNcWSA35@^_>t>LkpAX}SPpmOmk z=)b|kJHV3#j5?fCS|pZLLL&Y7ta!Eg@Sv( z>N~0&f7FmU3wYz85<8sg{Hr77}L$EIjvavEQ@91@z}xDQYUCt#N`GVk4F|<}p;1b4@y^@E)r@9i8qUs`h@hJZ^UKwy|2X`# z+FEg(hxBk1&SinT8gDc>TBIEL7+KZDq2zS-`xz82HDAIHV;21y9^5yJ6H89{cayT&Wi{o72wV~$9A6NwXpazL+ z_DewaEGxynRYyyaQMPF|y-bp*l4#JM8xfI}^+*Qs1xO-)HXBOaa85Aw=kCvJ9zO)q z=&v(GyoI_-@4~(=b9kRS4LsVZwVk#A-b9BJdiEpT5mdrENu@LIJMg%DF5PAkz{zG5 z2yThln%EJCX>;FG? zhM(>&aa>ph$ES7=R~=iW|*PeSu6_ciw$ zam5=0UV)Nb2z#P)jara31T4O22@MO2pCjv0u2jc6r&kIyv~!m33nbNM7X$r~Z*N;Z zY*td_J>@yMTcKGx200QjG8NRow2-|@AyXB!pi_GmLx*Zo!xPw%`2iW0BNlDdagOoC=5~)F_ z){30s=Rj{f&$_IkTYJn&P+MHLba%#aSR>G+I9A-s%E|(CA)P?w_}Q&i-6#j9l-@m1 z-Hp1Itu}exHmzmk%_IJEA}@@sF|_sS+}bw5+<;MbNhBTgIP=5RnU(jPirVzq|MQvu z*P8SH#=&Hj#dtU)C&|r|r!j5#EnY8!!%S~3`1Vtlml5Dyn$my$!2@BT9^aI*ntK~s zAuBe$>d%eyZrf~g=9PVEzGfL|Q>5($FHhmT*6fOo-S3xaAao$7@Xem+-riUaT$T8h zLHjeBmA0y_QWpEFN1KhE`FC+w_823|3u>TyvTG>V5x1ZaedRDn2(R;P$xT12yfV^! zA+GrNI{sg+!LL0lo2+bZq3-3#3Sqs zP;lUoYyaSL-`VCv+E}}h-WLACqSxyy?H|6SF+in-NsT3EVS-{Yml`Zs!Y4J*aAq7# z6Xg45hHOLoH15<+JN5LIaLs~CAQO8k)J$A}nK|skhY!uNWqNf(%KlT;J$FP= zC6A{>Z zVZ_FanRqvtCC-Ljs}BA+s-x<|Nw+&pnf{M4`M=L37K2|j=0%6}n7j}7EiUd~w67_NY0@t#7Y{y@m#9^pnTC6xec1xXs6 z{+$7sJ`saE^6FQ~ckSqeFT&C0A#}E6%nuRJg*$>VG3?@$4*d>Fx^k?P>CahmGUCa) zgu^_^`##g*oy;&D>B?|-aqHUQaZD=-fc*Z-@9@X~G)fxv|8cqXeSs8ID#4;81>MF8ektB1m{H&brnOV*{%c z|9@X4OKmP*dBj4Ot;&=<{d(FaSR)3W3Sqo$ei^mqU>|Ma&ZYDRStIYJVhi2jbaFaq zomNcChu(@QdE1TyLm9o7pLGj<2C7*<~ddTjkG80dt` zK$~wx-N%YaqQler11z)NgfR_(V;1~Jod>Qjf(t=Dk`oeo~ZMq zZ1&bu39UBv0UZPJzB@Aw!0lzC3X80!yu|srR2E$+wY8O1k@3rGr&bb1{azT*f^TND z0Sug-Ynvn2UN8iSO`>*7sXUUG=4lhwxCet=wB|)1;@{g_$3)n2(IVAd zm{w^%Wm@)x$dj#9Ly6`oWX-VEoxm~x0$M)X|31qsS8Ucw1XSa&^&Z(SXBRaIu6Lsk z6*PDtnD4v>($U#jG4o7MGD&lvxR7y}r{FSw_m+RSXpkG`rORQ?!=Ds+zw8IZ@&KXJ z4mp*>JZbGd?CIVrc#Ayg-u_sZtq~m=rj(}{F_am;%uwKy)o{C%NY=*Y5sz$Kk6mz= zwfi_5{Vh!h5us!->MualLB=5u(9 z0%+;l0cL`V$pt^LgcqMMPws&e?tzMQ4HQI##DAziIj^}0Tr_S$V%!IyCsUgxQPbvi z|HTD?5Xr-Ngo*qi(S?DYjFM6=;Zh>(E>WAi=N>!UAYlUJCajp}A)TGEOQ?kTO3&Iy zZXE(}7OBs4V46W?bS2TKY-<&`RM13UAX4laKhgM{es`CaGV|x|%!r`k+nkA${3czp zCI)MEKPb?G8a7Ir8h@SM*$}u-Pc_0O+u={9`NIlc3Bh$nB2cYbCn1gDPU5*PuB@)AA?I3ZbP^^2?)4P7eBUyeG?XToqWt#L z8}QlRV(wN^E6#txzuz&p83<$OW_g#0Hq_OF>!NU`Wp~S%rRz zO^D;Zie%%@#KtEfgk&Tn$niIFXN>1QM`B-$mTK64g_yD0h6Ohtk}8}bmFh-TXFaUHM^oLaxMk#j2;YIKDK9J~=)Q2QGJ0JTa%C@7N(in( z!-^wI-Ih|9Ea|Y|jzFqi-N~Wu$ZfX4%-dVT>WrTroKV!gqrk(pG||<3Vdy5^n;k4C zQT_Q7Ni6XMAG(<&`^RFi-XyA*afXYs||x1<_2(e zNb)!=I5NCR-bYyQCwbb+1B}w+kfm60lH^pN6ODW_%M7fa#(rnzhqJS=ydgMg2<&b4 zL3wPd{)Ec;x~zz*hq$mBw{5ZTcRTCX+nEln!q|vAp`8)qm6qCOHA(efz_{*0s55d43rclB42-~NnGU44g zb>&R(u9Z@9OFFX70ah7Vk*t@Vu{=u3nP!@D1EWXHr)}q(#oT5;AINQB=zz@2ZjYP# z1@dnQMUl?p7CjEEoJFW%3;Kqgt)m#@{WNN}f2l9gQx6(&k79=lnj@JU#j{NFJ1}gq zjd%9BNUf&q$#i^D@^9tNs#-G$Cf~w**|q|yMw6}OoUJ$|B8DoT=IIO%pL)0Io6Dy1 zS4KM&3Q9*ei;-Wo%{zJPSaM=XD%U%Q9D}S+_=kUw#lt=i9?lls+%)TC5#(-_uHR0; z#i*b3(e(`OnXN4uea|ZTZOMj+K?iw9ll5YFRnTIzY6f>w`wQ`eWNEfc-G?lJ})t*YUU9a=pnol(U(XUQ3-Mgksto*DVk;ah1$wcz`jY@(Ft(_F*QRSw5EmK z;iO#Rln_v2nuflI$~4j8`2aEswNfho>k#7D2SXwkHeI)y?Lthm!7jqYHPja@uMZBh zu3E08!+Yln`sL|g*9pHM8@JfnUlA}{uK&v9wuN*1|0!d1WBJuRt9dZK0#ML;L3X4X zU;GodIZz-t+{n+^+^?YmgOBQP&hFjduETnVrD~~`iCN;Djvu4e<*7gZu_Of6TTB*H zf-EN`?>OHi5R$_}UCt{kzAf-p2lxfEvjbpzWX-1@YA{#U<=y zOQ;NPX0eY1Grhis>gzt*L#;WbO*@5wgot2uUzP2G) zHoRkIcew?cSGcT3z9kUmp6m>h-a1Z{_Mi$^6; z)Qc<=HBWU2Ud(F(&8h6h$0bvf&f!j!y)L4!j3AOT!Bybzb$01?- zwK|yMd@+U>Isx*T`#cu!|GYHR)RYD#MZZV4iXqhsb+;|+p*@}WCdqvEG)hUFL3A_& z#Lc($9lA)JfFbyLr$t;Ew`EM?M`&p_s7HN{ukL@k!EIG}_J&A8rLR=UlX>SxNz{udQLgxLBO{oFm42mg`xRC&@_x$8BcW>~u1` zkP#x*3@*)$sqRJ!UX4m!K z=~FZ7Uuli>mky7>vnFjF(6mhc+{Wy~VCrwppPW!$ULHFk3M4o=0MZh(#5Fl@T~9OR z_@K@=_$l}g`pOaXIy99mc!MJFXD z=670zSffDgaYbK3_1YMmiJtz)R=trgDqdL><>XIgYqU{4s+@hDROw=#i0Cg}dmCC3 z_;ZzHLvXVAhr-&H6X>Yf(yzLQFOvSDmi(`aqFnj=9r5JA0mPSf=My)}V$XifU&C8N zWD|WCxfi8oyo#IjEEPuiou}QJ1RSh&|DEe@_)JBDkN_d1724%7nPPSD9_wD+_sqP`Q$>NCGmJP^mKOm^ z?^tvP>SXlT6wWG(e0H|gcDjYN$U)Wfpok7{7^mm1)|Hal zV5yy>YpaaHKJ6}dTEg!ykyfbByS2Z%W6dY;4%rqU>Pruw#DJG{0*DlODRy^RUi~=U zncncjqWN~+v=wr%H2)3W`+ez$n_7nMK|=ytEW;J8nh9(MMV(2Ak?fE}J>=_b>5&jx zpY^TxlhX%N6jZJ>EdJcl;&I(IX5oI@1Wyrlpcbp*;Lu_}FEYBgK$tT^p+6eh;RTa> znshukiLZY_m^i4Pm_m)awN|K}ue&etZQ^FwAvknKp9V@-&-5CZ{`KxRqA-x=|Mz## zy~KXCWt3t^sgp$!Ue<;E})65bhPyz0SD?>MNXsFw=L)t&PSEhYJO+Hf6 zzBKJC5fMXh1W)_#R+*Mh0R_i>nWrx43Vk?p2s!#?@n2O8Pda(-JNSxbj&;_;HWNq{ zzd1{(7v?oePtivBJJp%FzV7M%@PoqtBC1#=Z*dA8zkjd>hIykCC?; zvaASJ?u#%Qm-%ixYVTDPF2XNHOyo*fn~jU#2ej-+*(NwqPikb;j!Nlou)$EhZEW2}&qnNsDbNv+Gq5s&-6=^+8AHlYC+jO&eoD)+wVinxK(?|~YM*;s@@F`= zIAw=l4W}{wwYj{AOr3GpU8&AwURz3fMkHu+l#=A|*}_dJjo{i2ii-CPK=RSwCgOem zz?FHr{4&tu4*6mz7vAd+0QodlUe`;(T_w{fG!;C((|7*Xm8O%gz7v1EoeXt%T!PY} z^<814++C~64oIDDVq@TwV`|r#%tNigSXQFx2YdY)Ui)%gw`)qmc?n`4|6vqlD&(z2 z0j|a}zdQeB3XJMIoW5hD9z7}r2GNe1-tzMUPcje_P{ zLH*GL0@supaf&`^Lyt(28!T5TDRTRHA^>BFIsBGow*7qTXFcZ`U#rSZgdQvHPtJ!z z0UJ+gJN}BaSZ_{aTE20&2*Vi@K+VA6Jk}cHxe{Ii2BAI;$uqw+>7{w(Hfc^T)ni>PiB@oM zo}+~1NtcR?$_P0c$Zd5CSD6jQWl{0kd;_yxF#p@Qm))I5T6H#U|JrC; zG?-&=4cM(OKA!&AtSKBMFcs$DcwHf_74_q>|K{eexk4+0Zpc!1MEq%Yy(ZBbKLdlL zLr;X8Rv9&R;mB73SA^nqPnyRP_UbIeF}#naSPQDT=(PsQRvH@dwvu5H5xF-p z?x;?d859EWKi2H-=+EOWE`D=mdHGpIowrWn#a8GZWtSD76 z7H1{pKW*v$7kO_P6=mDKjT;~!(hVXdB_$2gA|c&9q;yNS(hUmIp*WOu4=pVqAPvLN z0@B^}o^wCX{rrCa|61>}?(gpxKg?Q#*Id`W_PO`5kK@=P_EPk%qa5|guOSlhIOt{; z5MpQ}KZPVExzVyzUBQ{QUW!kJc1u|_b~~Go$70rOitz^#%aw|5yO}!O_*zP4?H~W` zrTcgN@=EaT465~>Rk}0h@lWqxgo>$7Zr46@HSOcaT%%k~bO-3KqNuTK|LhFx5gKfz`pveerh+b6Mj&+Q7QEO8C{+y=y8IJ_Tf{-Ha*{L`v92K z!@}M&E^H*cP6r=F@^lVB)an24lO_iLXVa({BF<7du9CFBW}dVN}Sc z5;t{_M=Ip32Mk!$m*8o{OO$w0)Z{4Z)tySNuZl>R*eZgU8@cd$&eq&4@foCF$$#q) zblp7qKznoLxn4L`7m(>x=+C{)tWPsrX3@^;v%6&}1$)P5ljKm(bo*q#_*^!5t@8oq zh?2_6i~m~d{R2w+q2B zz$XFhy!6200=OjRM7DCc@?*t9cFd7pjA{gCp`DRv5Crqx;NOANJ_>?QiI{DyxQM{dFIz6SD;@+xkZ z=@fVO zOPW|Cs=078kmq@|oS1%RRzS*E+sIEe{LcH-(_C!?^mkqfy%jjC{b$_VuxII)!9ugVGe)W&`&{i9uNtVa zd_^j|Rj-5j9`-gtvYD3N1e?{2HUAD~1%uE)@~7@O-utP5*_79*HmBK;;-1V_qajmG z6euvuRLIEtdRqo70s9LyIFFD@+vt&GCpzH6Ee&dG?0)$eiKU@&MM1QaDZdB99qa^9 z_qxl>KS*%mPw|}~@v*Pa5sbDdITG=;eE95LC>b>01FN|W>b3qWg8eUM4z)K%IW=YT z+Td|k74`Ouw+_$y(7^}hjM0t#{~$XED7{0q?!lYPe4rCwDzx@dX6eygVrb@KU(G)W z$zl0-sOx`3JW5Y9P^~9FxCE(6gAbU5Oc0QNaOvNj@3G1X#lm)Yz6ZL_3MLEVTY^j9 zexzOc=^q4PxFPtq%x=9)^BSvcV{8URG#B!NX^iEae{$)u1jZ6@=T5hL@N+i@>wkX` zkOX(a685&_pFFlBd}E0hKzq3m)_55m z$BZDbm%}%e?z75HX1es!IfsHvA9PyL`zOc6A9@fcM_EFXK7)bPn?LH?nCQRy&wOq!?Lp&7#RuXip)4WUY8TorZiEPy9O3 zZM5#!|JfI_8Hj4l&!lg=3`knJGEcmM(|B#AQBhHkZZ3C6J8;D8wv9o`PZ57b@Z;xC zD3H2^?|eJm^wHsR zS%B}&6<(!>nAp(PN^ea_vN$Y&V}BlJQREi!9|FWD@Ux#`Bq#S~Zm7!Tzmof@^f|?Q ziDwLVd)`1I*S5dV)7m(Ry1PL|dAi{(yiTCp`-&#~sf71&$+<@5+tyZgDxS*(p{HtV zYkEvtzbhpi{d2=>1GbP= z?pKjEN7@UwXUBj{d$PMWe^$qk?LTD)rR3=n%PJV_Rp_^-@Pn+iGU1e`=x8y z(9G+uj*Uuky|)&nD~7qQN>HVthcs#*iL*Zx!C*;S51+AoL|Omzl}~*-^x$*1DhZB_ zw?AE9gYc}YRZ2#=`%mK0>!yw;Ap6gBaE|FYv|hWD^E+f{^8yVDccybUnc+Ed;39s6 zD{%0GZhgkFHMi%9Pc(ya)?*P$kAnQ`%TsLwH`}6H07qE;{_z1`T%Zetw5V+QwF>b0 zCnDa+`9SVvd+IdI*{q9J1c0|{{2A5pqW_AAa#sl{)i20+$Wf=?ewHo9Ox(k@?VUw) z)dgURSCd8oA`{T zvO2F$FPch@&T^)GtVUfXW>xY|l=86Sc#|i(0;>t5Y*R55mzZlT@p%QfQkSG2ZAIqD z!4G9Hw$rrje5leOQ{ibwrhwLw_E>8rBLn38>MGxo3FPQ&7-@l}c?B%&a)89kXGO;B zfkleP`bbEF`}Q~3(`-Q(3m)vC%Psu(4X({cJG1HjXNzcE7pHq2IQi}EFO!+1Ew}-$ zgMmfPBRg3S8u?+szRl^6O2X1{?;zJpLYf}vRawN?X-^&Qu{w-EBdV!J^Ax{6z#{iN z5oDI7dbBHsBikznX{3588NE>)w0Kc0zpKC8<^DWFY&oT)-oEE>E0{p(GZJ@1;#R*@ z779`Bog}dde-WW}GMM6c?{sar)Jk=gMb7?riT|A!JpIPdhFTiH@5o!LQy-r6vBkX9 z6|zEI-wX+Hb$FvE%fF?ctv5d&Y`PdZ8}II0m3)vj?eGK3uj+;-Vr(CyK3CXg{)J@c z2Y(ltKUmIKqR7*RAkJ1OPUJ))m5XukwI1p_#^@q7!|G`j8#i zGPaz&y!wM1Fp(QJPlgcJaB7ZMd30d$|2o@W9#=H?8rRgu)0f?h-|G%Q*;qO@y^_V; z%$ABGrc7MB0vM&HD6uUXPF)5Vkks(8Y)p&`1W*dOYg`zrsC*V~rKy!MdwX!dC|U;M zciY?jZ2+7^LGICRCe5XW*>3xOERAy;8|Mizycu3!M28$&&Mih?!}eH%BK*9(yb@EG z7;2(C*n~^QinNDpe}psp0k;>KH?T)#gKwqB2%HX1@o%qjkCV}G{@oC5;DYnB!-)&@ zm=8n%wRSZ*SH0>(3^d5~#R>0H0;7i<^ekE+=UGmr+lod@jT)R}uo{`&PPnmhr4>p+ zUS_nPZTN(AMv~LSbJ_Acoy|wzhGQSEkA@|27?lnzxiNgkEr^dbWZ0`ucR9kc4sW^fvbsi#9&%)P@`o%_baFAo)5Px@67367lN0jeF_l z(&dm{@eeW59W4{v^gAOu0#qYiI@>i#lud=;&9{;OMeEbF! ze%KutpmZlc0t77mxv3N`Wf9{~){oaea=T2cO{3>x+gDLT!jvq6u~&K-v!TeL%cM{z z3oA48@XG!Jf;;aUJe$R#JbHaV^O~Vr)e<0 z3*3qg>%%2Tz{g&!yKnA&!TQ{=mZ*5tEYwERKx$-kRdYA28$(FR$6(*ejU^f`#)lQ`b| z>bV-Ki1Fbhd)E9XY!i%yQIUg&{rJziCd6fUlZ>=%>LoI*rM%k* zd<)Rui~7Bw30-T82NQZpFtx06C;)|$m zAHAjcyCEwE?IE0;QD0u_8#QG*sid_GR0TfVt6|hh6DuE+n`V>KH>;`9R2}Wk#;TRX zSpl;PR6ef6ewe7FX0oNWo zouV0LM~pQd_~W)8#5tWgHR_8Z-ER#PPFTh6fk9GX0Emw zf7YeQhA0oQxf=%S3m3}x^hv6)urL@0$7%7ThbrI~V1g3bN)CN~sbdYu#YpK^L z0EWcQG+*gL8Y>G6w?$a>_R+C*jSI{WsLEWsWY!FNPW9Q*d4>CNL^bXbRbkFdv*G?YCHC4EjhZy$eK_`x{hz_@UbhPHvcBgAA12;cU zuGu%2kvQ)m8`IU~sDkMq(qm`U5B$qgmol!a*Qd8pFrAAq63{Ezhdy^(^2DFC>ELY4 zo%B8d)xD$~A#FA5ClalZgZ{%ejduNqjIw5-7sa|&uG?*FY2=3lFIi0k0Rz5161{}w zY+l;StDg4voO2H)(2z6e;#nJ-n$j*xqOzjXyh^7JuMOu8CA$E#rspiH+*ARb?GOau zZI*@4T7=NZ(XBEY4844pE#RC(`^~D~5dl-htFG~ayLEDJj^sx1$v0pxy6-Ktjrg^_ zdFk4+RoVXsIKLhC61Yj)U0rOJts{9ppu5ty!B)_#wp!FA4p2b*NS#L0UH$2I_i;p= zWb5@wy0!;PRp-aTyd0GxXW!g?um;Yg&KiY`hE@qfZf=DdqPm+Q85zGni-0aC-TDV41Xu`zm)-Nl3Bc z&q$IG5HWtj(1r^e0S@_113ORq&GqHA=$ha7+8Ek|Q}bFSAptPhY! zEopYa)Fd3&W@MqRwG$edd=_i>=O>65QfpleR~cL(lTW_pimb~yPA;->2f zTsZefO394q8*kf4WfI~>KKi1-BO0o3cas%+&P9U~*hqs@gbk5|x0b#&X1Vl(8S#sE z6(y97QWd_*THksVL|7w0n~{ib_ohgLgTaXPNtS$y=YqxoqwMn6Gc+pS&$j*unDbFP zCI{2fDo~-7`jQS4J)aieT4>y9=pQNexY|cew$%!G%@Ywvf-8XZz)X1ZqYCX2sW{-8 zWpJtM8@|lOy!*In!i0=X-WZM?TjFxacZ(UAo5!QkkUrj$1)$AW#w<@-m{U^mJ^F})L3X+Ld(TLhZ;DnFNx$Di!Ly1ig)KdtrD+@cTJ(K2 z#=YJ9$tkWdDoPr4?edelkL}LQI_t>^=6f0Szdy>mDlRM){?H0U~C3OVBtI>(`*yfCx#i! z{dD^J8!Bx3k-GeKGO^>R-fj#Ua$c!XdoLR(o`Nma5_>4(*K{*LCK_e}tD z?jQ$Z)4}=Qw10a6Fg6jL$uELyKki+>24ucGkipbX_lfL5xPR&sA-B79%0+T`H%r+9 z+}Mm-3@U!KsVU-q*)RTssIN-PWsL(l-?grVyoI^@bY)DxO{8l-T?Y3Vwj68lzENIWPY+q&%Gxwk;6a$SH?qnFsb zO8wTg70WRp{;@SO13Nl}C_agT5aUhhx640AjXm3h)7dtN8kJ--U zSS`vvz65I$nWHvNpVvi)%GQe+@i3KkLMLeF%BYeTZ77Nye&mn1%@K!|I5;YhNsf$} z8p`YRXxSCzJj5drVyBmO-rXu1u&3J1yDEGuUo^;3{Ta#^5u7qeLvKbJ-;aplRzd0v$X6>{JAF0%+ zX#Q9`fKb#7Br*u0>xPw~Y+g49Br=!7LT^8*6!`K!uIt#v6R_G(R z+WNwV_sPVcu7xGni2`#?lOdb)ghf#j7v>|;>B0PIIl?i11Hy`~08 ztC|J9308^f4>6dW?bR5ZW`4RsrYB+scl2IY;!rCYWaDn&-Y1!$wzufka3lUA{0H?mQU{Choj) z`rgkjFRghZxS!UkZN`)qG9*j7A! z;~Vw%>gCYDTIvbnT(9oX-)ZwYKQ;_`JXgtH`Q;gA>olDEIv@IfpF&)T#GMpFRkAHB z=OqE|OM8{M;QDs+su#r?J%u#ior~utNxaEQVItUrAUGs&jl!&we6_1&zwp}S(vR@< zt_MGgwNuh8$CsPy&sk!I|1%=IL4sx3AGYafHRDU+`id0U2!`O{kN3@pwPiZ|j|<1y zVwg+)OPq{R)SlZj61CLzrzpmR@qgY@O*K51ljc=jbah6cHKHdK$>O@FN5VP>I1U^Y zEf{1c33eK22z$s_q5TJa%(vr|)u%%v_hSlgi8^fDTddS!Jjvau#HRZPMSQCDxN{{> z!@o5v7(gzoV6|$kWn7tAHWJLUO(n*e0O2A``h9uG!X%;7Ue{;@li&S4i<7&%WWXWF zm~4#Ypa9cy8u+0$tnDL(*w}7>t;EQLZ*IOgKh|t2R;ZXu9$$Hr^^7Z}>-^L8d+RNn zS1};OHSnSrmo(F3g?0qGhzc*hJlXM$;dC880M#I)PCPm4Psf(?Q0cfQY%yXn-kcqe zxpQkUD!$>NjsLXg>r+a3-p1n2vhkADI=rtNrDC6Wud$!eNxUp*;U^+21xsXC=b82i~+-je_^W|zX zWYeA|FrB`f)5Y3Qz>kEesd02m1>$d@$9kMHES;S1V0Z0AV(-sFH21Re0~?F2L=$lH5B+EE4$rO&mtr@N^J%k{dp9Ci9qyJ)40)jFR|?bTas&FMIJVa?OZe+*oN7y!w)$^df>fFEtmit98lB8mP<{ z-wq~ARqIX6kL=)BSnb~4?B4!KF^k>jY4+GPTrtBDwK|he5x%26E*pLq)1;4tLf6mf zy7>BgAeXY(bv6E`zLw}O;Q%V}ttL^E_cqbFU~)K*AI!vpL6uQWz_@o;Qs{1*9Hm2I z+pAyz9c3H)-C?n0S-S$t@RPTrB47`48OGzAi`}E+CBVEpp%DAM)WW0GSpg^U^n8c= z(AMv%=a=uz50&w|Q8qmkBFpiy?+&?kJ`H7jXH0+y$aZSSXz1WP0qQR8WNi|@mv~T_ zXwomRzR9-*MZ_a;PMgV0&)Y9IPNGBA%N-4~r$OnvVmE#kt<(dpLtB@ZZJuRhb=#n1 z^b#vD!#F+8Igy0G_l13yA{Y~Y(b#03HU4D!{_S$hpuy_~K!#_Hqye^zjyk&S6)6Q+ zaj)PgyT)gYaMpT3(Hb0wmbfuKP%Z8xQO7vXoSz2ueMtTr(MoxToLlHGzp*WpC4^2! z59TkK&#{+}zh|sf_{^?esR*%g^sfiAz7`p_((qthOmk}_SGcPat5&b&`fN4P{Z)w& zKd%v_v_TcNWoSER)LE*fzEXOtP(J>s7oT_`9D~h8wZ+kytb}s;np00L#*_weJ*!m- zi6q5Rg%<*mQ}cd=ef6PFJw>_n^`?5dPnujq?a!v@CyA9?-^$@z0ZX0NYTviO=p`LRFVR zWWnAr+{uRdP#Co4XJWDW$Xbu8IU_vSL7u4}!rmM1!6{=IoW zJaaN1j-?R)z1Wv`@kXM_DPWQbLDP{dG#HV_pp;RdLsc~Mp*q;dxrlS)m!poxvSiMQ zUJ%i9gYSQ*y-{;X2Pft-nXDv zR%76|VT=#iXbTOz@P zSU=H>Bpqx~yO#V|uuPzvCv}A}l81INHGG3d6G*kr7D&;v`t@O;)BJHw(g>}SEmqyE z5)GC}p^id*+@H7s(lqH3ha~+j4pIB&o|4AmHg*n6 z`DLZ_`j5%uCfbHffv(g$k=Mr_0Ac}DbE@@7)D2qld-HCSkhQ}5rgHg8nNZ>^F8(^j zG#(n3q>hx5QX-cveB$jZCrTkg8=x~N%&o6AoPKe(d_RFNhKw6j z$E5?9?z59<@#5XqHP=W}AZJJ^8&2{B?167C(g?^HBS-NJ786J6M!$H|#$&)P-qq!}#>^-p{+ccgD@f5Ds70sCC{OIklm}^X!Z~E|Gl*snE;w zGx@#WA$!bljBEfU(nEj~{)pu4lWaIf-JFb#&llEh|JFt}*|5XiFcsKg#oy^BClmA| zCIUewcuYc`c3S}~|r9l_i!{v8`Jc79GgC-h2bZgj@F=W(-- zt#unUH;71W!`9TkxBY%6)t5XlyHn8spWA1-Zl~+BNqi!8H0F?0ni9uh6=%q~ti`gH zK~3v3gPO*aKq4rxpj33awb2eNa`?*YGT-9I%*fdDmH$J|X4d>gZ{aiB*WJh?H^BZR zs}v){QpXtn;iu@kDL=n&ep0|=HQ`%$xUY_kSw@S2KE)85!RKtxr3jlEH&cxMQ{br3 z%T25k?79sfud+Aq0-+&Cg<$>aw^Pq7DMXRVO4JF{QYpLF%nGMKfV4bbLk#csb`)88zR#d%=VJ*3P-uTPaUk`u%uZM54 z2EbIR0FCx1g>D^D6G(dp2f5_4>yl8W55<I82qd^^Zo)tPLr|WB(?LY!#}kw21+p z4`5VJ5o=M9rCHaL&$l&kDQnM>n47ulanu3i-S##^E|81MyJpLs|E`g!)j;7+8Olvy z9e8ws_RxB0m)`&Nn8u_^a6Mw0Tw=}Oyxp(l(&N_Ky(hB#)P%+_9r5zT+LExf zolA|2h^T|4Q@w+NlJcGBwPD-FsO#c4@kuqdKe_!A-{n%@e1G^9C{LV^jwijNFkvsa z8)Bp~%d++WcCulalZaoc=ynD9qK>)vducJh>~YP9RrbZ-|d}rr~60 zd`a=@S+)v_b!Skgi&Q$8rb6~c4<5PBfwKk)6?j>qcTO2IKiY!do^BkN{^_Yp#P-Q~0>;RDZ(J_#)j*F-z|we)2m8-y@lqFl z?k^+OP9r-x-b(dSu*{eB?@>cOvXSF4d42tXL00l{uh(HKa^eIM-DkT5Z^?Q6?qa#UK84s#7O2 zf~4+FyJA#;^Xm!}?a^)xbydlzp&z^!@^yY)FAe!i-+=+!Gm*hqg*p`L46_W|fZk%U zaI_fMFStf)zSHoH=eC{sc1!zuW5L2~CY*K1`vRkI%sX0G>%4C>nCP_ot+Fn^762k> zN@*TcWnws|a&sLKuK@-ZRzbwpmL0uP%?UNkpZj6X6iypH`%v}zeh(E=_-j?wG-2M} z1{yC44PSG!6%ZJs6W2h-c^0!0g>eU0qwOK_05GG3KMOQW?%H_JfmECSc~OJbC;Cpn z+0SMsQPFqL{DU8$k^KU5{k6h-#2Y~O@@2NMMA7x&F=;nZcy4(lP*MZe{fLN&J~5bg zcMl&7UlE#Le z7i^kSI$h2d!!D0#KPSaZ&|{8#$rQ|=3#c@#KNgmpQJTYXXhsgJxC41gac=w`JRD>c z%H{wwEWHp*1o->tW zm=g5}ignQvJIZ=D9l1*X=1Nj=de*d()ch8`(bCe!%wgf1dfxt3*2R~Ng`FXMgYQ0x z@(b{Vq-9LU8`l)04@qP-h@-5B_T;L}T&BO)*W-R+gnm2G&0g|dIWP5&U0ujgsTZyP z(JQ}4R=Sfflm>GyL_RjT1HU~|PMSLob0(18Pr*WGnsxShBbwgMf;8Mp6;pBY$F;Gg z&r0)h*X;Jb^{q!61EnT$90mViZ7C2%sHm#qnOJ+g%+N^vb^k6F`!8I6Jw+#@7_mKb z_7>GV;0YP5yP-EZ=X0{9`szhZtV&Wh4)zxq;n>OjoJFvFflZlq3haAOOy`T4^WJ2G z!-mr~1MGd^$Lh)=nuYS|{H}jgF2KC0MpvS&|E~k5?}cTx*1N_XNo>JjFc2xdc4qB< zcvnvm$*Y3+9ghP)4R+_Oy|zcR6`a`hpOnz(WQF_y`p%z>5%V4JwXIIQ6WoUhF=$E~Lwe zhpcftM$%>$0VL!(Q^xO;%;HTKwZytIUVkPJyR{2P>Ke`#eu8I5r-mb{w=o8r0k}(JA=bG zaA3w=yb~L-3~Ji51uOyIoUm>yE{PKWCXO7E;Rldj*V36=Sd0J`=~h8sQ7nZ6OajlW z***M`PD#7BW;)QB1G|^qe4t9TsUn)RLbkNY&%-J$*el$Kbf(HTgHmCt%mwXC!geSZbMVS>W`pAMvC(kYl1aL`^CoP@{wcAualMMsd zHfg(aYuMp+X)J|2F|>vUNhkeFx*|S{q>z&imMBYHc+oWY*^BI$!0~#={C!>4nHDU_ zO`m<#^?kFY@70V8bt%CZ!McR1G@||9w)b*Dv(KzZCx1-WC(KlbZzxpz?&R8Rz}{_Q zWqs_(w_dznuQQ(gLub5$e*`YC6swhxI(4Ays)~t(ze`@VSEqAmO{ID-8>Tf-cOytL zjjSYIm;C1JBXVfh)7xLC=7eh1>$x$PCe^u+MN|8Mvw8$TbZ_+e_xA5j^mSJ|%)T(w z5}$Dr!jjkE-X}3e>#5{%<&6UZxw&?-75JfPX4?S`c}L5}|nOKMBcl3?^#nDyWn>u$?}uv9YQaF34K ze9q`J>oz$ImMgfS;y!2-@RHR29BcREm3dKuGA{Qar~)TGfSHD?#0$g_cm6IK|6IV; z<<@um-qs=4I7WF2Fw{$WLHlsp?WWyL>__+d$*vb;mL-@3vxw|{zu&M3aLKqNHho}P z+F)`d`-TwO;e3Mt|8j~RBVc~5VVD26r!6p8@{1Mw`deo8%GG4B`I^FJQYO~rM1Jk* zYu2O?>}D&S5?SD&bz*K`ybU#_5z*0+ooBXICPW+=;1LbXeTDtlIeUY-{Uyr{<{jE^ z$-?ZT!NB`uPFzm|_#AhDBtqs8vnX$Z8WcJPT z0l!h@)64sZ|FRXlRL~MG5Dw1)KE}B)nBMpeaO6sdt?r`P?2 zS74i*!rs$6ZDbBR&bOw;0wl;8Z+`Dz>3~Fe&9lq>o~J(@;{AzkOM7K%E|`~KS%}+4@VYBv%Mh=`=V+y z4^P42AJ*V{dHNB~&ha;M?)hC0WUI4SDP%Ogv?vN& zPb&W$N?-G7Y~!Rmk@Kmd-WZcCU`Ul!lclrPsUVumQ}{%TUVUo(20fR7>$K*3ox`=VgA^m zPbpe8F|9pIkn}#c6r?r9#X=B{E3mh+0KG9_uovgcK0nP~Gp<-VLDLeN5BRVroB+Xb zpZ|9y=to=&_xZW;;#RauuCFO0b12zq4ri6Fcy_t-cxmkLkza@h%{KH8u16@eNwa;t&RtK#%sqEI->_4Gt}itBNi1o z9JHg-*lB9(U@tms1E>S5Ig%HPZWzzd`|a}8AQkaDXBxy^`CjuBTn@aZ{t|^wvxa*o zN^sj@qNd^}jR(LGs>W1jVtH<=+BzXkGe5Z!=R^zDdfH_RjGUgw zBrxAU2Oxn8FeVde46MHuMvEe<6Dx7c7HeF%xtQ`k)`|)bK-=X)N-;OI#^$)67GS_E%{G1(eTdCd%9mt{siq#phdC7Dm z_Jc%T{X)DpA0K~2($a>HJ`Gas^U>vP#0|I7_JJH9ea*=Z3C3N{wUHQ&iVlPt*|y}^ z&c3RCBMGn_aUQ+Gz5cu=OczqOKq0y-Uc^jQTQtbJs} z6Ri|V1u50%rGgo6%oCBcJf%!IrvNk)JSu$Lpoa2t6}F|RkwF{_8^*;JtTi2xA)A&~ z`r`4mp=@Pv!cG!NA*`!X5DkI1_6YYGdz4cjI7*ajc$NFpI?;D2$3%hL!It)cOhV>; zgnBhLL^KoCX-Jm3_vx(1_&~TGxfuFOm9P}_v}mCVXf2D`TXCHqyJ}<}NLnODMD#1dCU|xcHFJ(U}Bw;p5}d zUO#%HcJ|oOha;TFrUjGv_92nmkma58uEfhfTDz2?8L$TxW7pdggz3ThFS9|Jz^yF* zHHm*rFU-akeuQwxGxI6q>z_EbB7v$m(BpR-D|FlT!^A$;YJO4MWLKOKtGoH0*cBAG zasc!?(CyfgO&Llz!huxo9(Be}>&`pa+LebvsV_p26eQqF{U?GkOQnjHW09SB%_#jI zQ9@b6#l(Yd*~WA!eU z@VcQ^OPs3Oj6P*KIkjS)8cu}eK-vngM5p#885vo%=f3GIN~kdkXMP!pk=cS@8^QPE zz}{!@F|eX8D~f0kUfl_2{B7wP!bc}qBBY}KZ_h|#ZpCj^cazrkeGC}2a~>E8VljM6;Z5kg`p zi!OyH`~OJ={s_;1rGhedP*u{~&omAGXUqyiQs_^0Hud^QpP7oyvy!mOPhz}3TlEbT zGBqj$6+Ux`M`iVo?$_Cq`x((kfo0h+#Bd-lINHjv~)aEu&4{154w+S#v0e^@bzv#Be)2O7dbJ|I-PXVzYd72 zS7VMiC1cmEcL#4ioYdxl$^rx_l%_O!Skd(})l*}MUj?dk@QRKA#3@p8_?+8FF&wJ| zSpLoG!OzjT@l^C?zA-}wqG z*_&KBhsP@#qW_f&_IV%7kvzVuGt%MK%;yKjswnR#2F7Y^wY!SCLa`{CibH1q$S?)+ zTv$fiMl?wTJzzf9;g=ss=ht3pHB$@cLv^{ej`IMjKFDsXlh_cV>EF|-OH}I{zdS#< zK&!*#A*u*g&oyr8W>?ADG$u#W0`Fb2FX^F=$X=#onDgr5L1#q+kRGLR$|C#@O>ynl z{@Oh@2B1#pof38A{*MjAzwrRW67hG!znl3YZEeajY40ahryJtZA_CaUW6XWKo|PgX zQi939*Iy}S2UZvO-mX5>bLk;&ujmeLaZibtVumz9n7`WPI<;oHOiO<&9HUy7|2^B9 z8~B!r30f)K%!K7!-vM21$Et5)t3fTHj%muuuF0KxRqu8vhu1i*a1|=Qm*g}qqT?_S z(*UYOPKi4NsLMA*!u|mVNnm~XI7Za-00z` z>wuy%=IL1?Z@3M^qw+ot#D6xUqNLfS*=}C>ogT$khgSQO7R#jlxai%X-~PiBE5E(L zVM4kL##i~_KJfka%A>8Sp*$qxJDm*@_G!APtVK@5=R|{*bm1*ro9khOrluwef1wi* zn(F-=Q(av$iQlntaoRVFbhn`OFKB?@5Lm9EaT{2IW$n3O)e18@H7#8dcd+Dt%qY#b zY`k>7+bsJQowp<*O-)m|EjJ?HaihGQK7Sw53D$omPIUpNZ(1-xd+kx|+!2FA-OU#> zgLLTeMLGO5D6sq}Fuh(@@%;Q8#nzFR#!!IOaQ{P>cDvs`xORMoSW@-pfhho=tcYu( zd~w%P=(&=i0NA`>4b)0sv>m=YFbFDZK1n-JP>oua!Z|JO(z(Mp3bA*_sYZp|e0%RQ~h?0ros zK9Nj3YsMgRPV`eZv+*oOq>=PU{$Rh5b97r9LgkAMB5ig);DcXkGxK4XTCZATF>Q{x z(qyFTvpD(}hSv_kzdrtYjf1kKH1wkcs)~K@WR*n9=yq4!h~lJ3d3w?^MbE@`lkefk z_7h^_L7Cpv#FbB@4L%PGa%4Gb^Ia97T4v1nA5osN zTPOcyFJGTGG8@pE)Hrn=a@8K%X*xAq&)V2{XyoRmIjOzd+GyV@ll)W?SVT!@(!k^ddsk*!E9|}*#NHqEV$AmCf&l+bY%X$V zaqOT@99n7x@g1|C7FO^+xdMUV^qYrpl5`B?B@)r5MI0jJuF-TAlA!|MRssH+JG2Kb zLM@E)3MI8loLzI}wE%tp-mk(eOXC}y!qY=Cp~kisci!ZOv+P&*ye z%YWhu!#d%brgW|A=<-og0&?w%7ir}XXA;fhLq%Vqg1{Ksv#cqQXvXDF9%zFFu9eqI z)KJ9<<#!G?!jZPVB*$ZvibKh+<_pjNXG3QAle-MiuRT*DXi>|Qp4!UKW;^vMNMAks zv2;(~2j)f+MX#iVqV&7M?#d?3&9?F+jOwM{(|b8E6UhA)5?xf+gD13dORxj9evc!~ z3sm=42O}Sqo(2-=6r2aN`v79<6v1eA~xPy}g^4y8+^ zySrOz(jXuu-6-goV_4!+}k?ISKhm~)Q*827m27w^=bH?N>} zsk4D-9I}6kmla>qEUTemf!*S=U!nMum@N|qZRzRrj~+Wa@vHB3y@WY*Ts zNR_E`HvEFYNI%-&r_4%`uCmek1lL8Hl=*3GcO-z(3=|4gEYstcPi7FaUZm|m(;=F9 z4<^*P<~Bq#Z#8~?AjMrjU6I#l#3cm!cs2e~9NTX?gP~FfEptupoPdp%F-RLG4G?t* z;cAe(Bg2GVfy70=l~XAS*m2FqYhqJLmEVtN91_`dk%k06+2PK;A;x_KjVZPB%J##4 z*4e(2cYrb`&uqKV*KU6er`rWmVv*h$x)|?>0mz~b0q5t-k4&z4hUqUn`@(G@eN|4p*LNGsxag3yWnvZFSQo$P&0>nT zq`{ShXJ6=4+qmXS`x=TwZPlbQ_J?$OGo2Wf-*z6mXfV0aN^;Z1z3jNR#u(|sgVdJ< zkV+_Dl2U$+h~VBF(?COd>4bXJxM!A?ujJGX%w3jwdh7gR~nOi zFFw^xiSxbjkQ@A*%8)$@_hpo8JyO=Jz=p<8j|>3m%TH~Y2ok8xXsyBI!4O9 zps-*Q;2rCRI*Wj!C*$C&iZ*@KBlunC1yh?lrhA^C4BmEzhvU1xQ_QMGHZES((D3ce z(Y0GaF8Hh)@9k?>!b2v{cQnb(C!&F*b1JR;12#h7mHPw8WkC$d;b zM@vtd1aBU$7%%MR*M{nNqf!ynSvU#j>rf`uN|~9q&aoN~FW1CavGeYZ`LZTIkz)@( zA5sxzzsu6fg5za;{EN6itajvdOls`>=H}*6j?Ck3oeV_v_X!MK z^?+MWph;H2_`>*4<{f6? z^B8wd0q}d4GCOT}oucdMOe{SfEgU2x%@?7-*l54ihrUb5D5Q0-+{9UBjw%EUTJak$ zW=vNMM~2V5rh)OCnQi5E{E%-hXBh6$YIFdfHzrSR1M{o3Gk!*>(M~096=7j=g4z?A zQw4WT5GU+_(j~jsGS#dk=Nu-3IE1ekOlJqt<>G;xkHEaRxcENLdkd+Uu#S5fZn|_c z*FHi|kYC1ss9D1j7Jypfao_Hhg%szwHwQn#rIPn##aVDaqB0k-`#`d!N|0df)#U;6 zbPr9L+oiTHG<@_H{Bd*lX{cmH8fvK-usvV~R%q;RtzUP-{@y+wT)U+SdBB*w>O3v` z%qGThT&In}c5+99-1q}lqDk|!w&WS&n*NDXz^WVNQ9C9#wxzxggmntkjp%Hb?4Pp!r--gfQUSeMwFr&t)*&eDK}k9+%3M`|Tl!D} z#E?@=-+WaSX_UvBzEYeaBV#2;%$9)FT0!?KJ1L(l&Ubqnd~S4fR97PxFHnSysub@s z0u}DJ#%>f2nS?}C6OT4@Z)JYB>UlMu1miFtmQYt1i8kG87@G@BQ8;0H4d^*qFACs) z)0FlSYM?%X=RO>!ZtZv?U+kHh>h?OyTvk9GeV6YK!PYF{;Irho910v#w5GRY-2s0^t4XUlR1-|}we_wbbi#EeoB4O$d z(}Hsr#LG|5elQJK`E>KHsL;0w;P#-{7IU0#*t{||HAViMM=YS!NH^{TAXfYElv;{k z!uB9fs=d`}4jXTg8Vm15<_5FXgknP;w=zQ4NI1ZL+Wr-zVdJ9@ENjR?aVnj=%>m%L z_}tFr4Z}}n-cJ4Slzbzw^mMMjsr>t@{f`x=YvCsZ_F9Ds4vtSWSD}2QdR~cV ztTlYUm9CQUoV-(kB7w`F!LCqx_2<~xrF%7-$#AAb{P5MS>#!YIY`s`pnt<^05ej;T zJ0Y?lOckW+ea^Y3RaYI>)?O3Bhc`LGZjj(tp5q;GsjPNSdT<=NX}I#vlmme@^N%_g zFSdxD!w*vktr(HKJH~d9?%1HX+93!-);`2~v0^{~VbolaflSS@U1$Zok-_AMtU6@c zyok)N+)6;Pk zzF&!@Lsq-)Li)#sdfNNRRLYy^PV*K@m%#Z_@t=#Vp)-d{Vk8RUtTZZzvXf*;1iSdO zi%n?d9b|Nhu8NZH8*?Y(Hz(3o1x4CY=tKmJ&K>5;Xe;K^+fT|8E#nd`1cT2NlA}cr z#<3^Aj3d4`KKEgH&L|n9m{;8Tx*&IS!Fym==1)WGfnZhxdba0}lk}TvbyI=`T;X30s7NHm5tZ8$))nzp zny-Id%W0}xMhyLi(!@QuemD#G8c)31edz;?|A3T0aDmL8ccIO&{w!Pf0puxpMoG+I`S1Ig0{&Q zB<8bYnoL=$aG^wD8t3#ok{clAZjK#n^qwZ4f0s$Nz^Ta|<<{vqfv(e5_(=jC&(+e? z&W@kJG0^Yzwn!nwopE|_k0!qK(CuzsCA0X%B_u! zepfX)Ha4ZAd6X8wAn{{yJ1a%}Qt+B5sEk*y4H`#}oUuT<-^N%N*}8|GyMyuP6l?Rg zub^=#63S*qj{ZcC+ltDzkuimYXoI_{9ngJ&^1F|JB%l;x9V{#pa>!r2Uj6UPXs8rZ zOQ2T2$}-&ng0%fvFD9bH@7x7XWu3ybq;{^gqt_}OUIb{9ZKr0AtP zEH!(=F?G(rC4#;~_ zU>cEhmktXN3K+VvcG@FwdbMsCeyNRpVeP^-%cVWz~Lp(2U7?9&z0vH`;9;bSiUDHlMT-2=|JhIR1oH?oZM%iJe&dMe@PFgDUaVK(At@H z=u}N_03zKC4>_6br2luq@!CJ79m3K|MtO1*f6vNIM`;U zFXsDv>_V6wn0`~uk7)+e$05S*Iqru%)%6)Iv?Bv`1Q#s1_? zb_zr+57U*%9Vi&dDJVo&Be**pWp}iKfYpx;bLH))X>`@&H?a|j^S&I%bN1;~vEO?@ zq67niS`hKpNq7w5UV6SjI=Z8Q=W~twZy(3w?|(Iagq08>@aIZKRsO{BdX9Z^49Si? zmFS!!yNq>(qqpfYUlmv8JhNhEm#i2h*E_b#inOnSKiU%KWac?$u(`(v`9M8XEKC~$ zDw{DIBcPSX4G`yA)0(u6HSirYj`Hz9o@LPOs9xyAizkMq7er0z{>J*8=Ss^9-JVp{ ztOQ;gh6w*>>5(l-M$6_8zEgoEsC*Ta5-nq7M$ZmxLblu=Z#BtKsfal2QUMTSh9G_o z1q}_YyzM#X5UA=Y5l`8Hw|pj_$EM-CCo8@GK4Q)P4(uJ^u+4m2JJwTz_~@73zF?gu zLcdA;jRz~ptvX7X;o>5gb|a4_$7D`haN~qlP>_?M;Pe?@{Umo@;g?*kG3h#vD$Iwk z_p>TD`{~cp5n_-?5jM$4(IRF*XR87C53&1QIM3sq?afLMoe@B%Rw@wQnR z1SF7k;{1MgFmGDafTRZ}-pkK-frg^90~OkV3Tm^?ntcMZ!TL4{I+J_A=^uIRvd3Rh zQ9+9cSBOj=Mvi2+Hx&+gJ$&XfZ&dj~H$oce@ESNOk3gMN!#+Jv14e^y^eh_A6}+~! zaY6q8$?wVFpx5%Bl28 z_kUi?RrQrVn)BI&hau$W9d1gbB?D19BCBSYa--pNiHMckG9;MtZKgr4)w<90k0FvM z9HR@3*vsF+%3~0~9zDGW`+OMqy@V^gDo)#Oeu};pENYzA$;JT7#k*?wububbzmU{H zPgmUxdFco}DbcI)L@N;_y9e<&^V&Lj)B0$}7>Y*?(bL!J0%+qK%x1PxVM3-!fH}kr zgggo;)xsuR!3i-2UHfty{WHo5@5rE4gOFyhz!D%;$Kx00iIM>QGA#=^mfeIMp2h*N zF{b^WsC|T*1V_efTJ(RWWnnGSDvL0i&-O?L7L(rl3HFGA zRR}j8oq%(X@~KgOzWux2si`5OPUEPUcnOiXm%>_b%t&ih{TUw+t1<@MjSa882PWH( zo}1*GXRfCxj<&``Upr?t$f7`4Vvvnf;brHjlI(sK%-4@As9ux3wq682XWKyIsrX(H z2rVJ=)6l-~G_RVEK4`eqz~}l_Y>HO$Zn@_@K;P_`On!9r1m`&(4;It`3Fgi&tHEp+ zG*?-C%;y#CrpOYlkOu@gka;sl4Oe`;K)fSgZzfV%5%G4h!zCvzFg_??=oY<(^;y>`G z5!C^D#;ql2Q=fs6{1A{2SyR2P_azSlnrlkrogFg@ArbrKD}!q8U)VL=pD* z-HEjJ0TyVPQS6uYez=$#0`;)!Q)gg_;L`+k0ADKg^aU%E{s{z!VeQ^`O#f6ZPr!=& zEARV8-t1G(#^Wk4;DKz6jLM@Ao*X@C0gSKre?Ig7btC-sO_FR7T{yB6c$L=U!k+u# zbq;XDfwR@Xx{3e4zTQ9S=6{7M4>k~mT~^x^DZuYVH!sNw@j*8j{+Ey6|F@w1&oley zE!p6Mhi6-R?Jb{-ysh=kdT;I%IOKDtwuKD;{+R#cW&i&zfz^~!@_%Uo{Kpaa=e7UG z8vWnB9RF_#{>KCVpZgK~m;bj5{_k3XW@gf)Maod;#E}3o{}(daov`U#=^ZmdDr`+5 z(UAH;>O~dOs1Rv**lTmz)Q2#TO|3+RB48MW_}KYBCxU8_Aq*|kKsN?898?`<2MC~% zUqh{rfx7D+(kk%}KHJYEbabJG^(SJ^07z%t>Vq7v*kT8fkg?WdjyRoSdQI-z27~sgz;F7nM&a1J9$Vda1{*) zvKm4txHPFy`)rHqWdv+`>(@W{21thhnj|kTFED=r0QoqzOKCD28NzmafwAvX0Ma}^yWaxYUy&F+^BOxyn+?7)|YE$*$ z#s$+XEZ0Nap9I?=Tu-xvrq8OgSF)DIhr2nG1W{D^N2NgB_by;{Tt?sVjAD+|?Ujm?c+vE2`i`1hBl#Tjn2zT~ar=wn71Y*p+yJG7 z(FX3Z+frlxs+y)Em&*f5fEB(R=K%5M1f@`kY@E0-DZ~oYLt}mAl-D;|EKZI#{0@0&rufFK04BTng(K%X!h#`YIR5o@ zb6jPBY5%WsuhM|9WSa>t6(&=4m2;y2wkYCpPv^w3;R=(fM0&S&*U4R1QbM?R6jG=I z_-I!+c?9)6D9lEHzEkXr0T_^Jgz>=6TGY|r-!498LH)o03ojfZ@Y*pCZU9J_3Lz3& zd2|xF24EV!M%t_0Ev(G7s40KEnQYnu-Na~b0ZujmG};X0j?F8l{S5kI!)5Tb=jMKiE$S#bIa9!j|;{ga_LUm5dQhoy#M^DW496Z?2_0@9k4ZXloWwlpY)f5#)CIamV zJ8=53k&M||45p!a9S8NNQmxf3tw=t>e60PNt%a(Uza~c(CMhStCjRu-CT?>{#pFKg zhs?<3EvWL_9$s2bl_|%#_E5O|2EnmfaXL!Cn=h_jZswD=O8{?(oDL#J;vIV!1q6!uRw>zc&b=ibXgltOL!4X zC?yiiN(0QvT=%9*wBPg^)59u>we5zDF4pKMtoacu>-mu|&bN#luS)l>}qr7CKkV zhW7q5unb2#P+Wrm+f`mQWi#0&1hDgCs&Oeui8^mNWJ)lkfLJTV40yndQrjRLWP$oJ zd7?m9|0x}P!Yvp*vRM~U3~t({-GAmZXk++sjIO(=8_F#NgrPk1n86r7p*fj11;+~( zrZZfYER6OvCKBF!19Rn@rZD?M9CR~x|7$ZJlVxu5d=ZsDEIdp4G=Mc?sER+rRCb#2 z>t4z{XHZ}L9eUOjj`B%S6!YeS7zOibia7$_um`70EoJ#uN;L5{gScAJsjn4=5r_`Z ze(|B0qN&?q!{rPLtjRaZrnFxHzJ5T9l=5eb&LF_srd;(?{ooT9dO~S)r2WhZjB}xq zWX~FErv;W`4-T_VQ5u#%@=L=(kx<2Rq$uU+tx8W~Mqg}Zonl}6Ihi4^x>SNY!wfGR z7kdO2nMWv?FZQr3T1do2`w?`DFw4VyK8uPZxM~L7_&A{kg$cziO}c=2Z1#F@jK6X_ zZj^$pyGA`eIo`%8ZOwiC3~Pv~_78V*p&ul7E&75SUu9(YkN7#QSK93?HeFtNHY3F* zHo<*RW2$ReyPavjGSuN_OrkUp;2;`ftnZ8nYg{DDT+|i7$M~;nC!L!j2zk5EzcM2^ z+i@b=WuhR6Y&cBFU|qPpsv=Xh!a*ut;o-|CCQCIDdc;92TG)QyFob77^N18mF$a~B zf%ge`-Xmb#`?6Mg00@A^Sb&iZ;t>#!qb~6)e~R@m?1lM-H?X7_YRBxWvT8aliSlG9zYI4ht&x8We<9JX9jQT<`{9-%5;0-Xu zPD>{fe`nPEWcH`5wzl@y{$*KbKspNH> z)GY&P(sPqn3vE8-yiXF|lg{6_hm7rSJ)aR1+P}qr=oUx7mEq5JO?OUbVVW4O6||oz zwbHsm`W|Z4M7*#ZL!uOTlu`-Y2}aWegN#6J*UQUMQDuN7`_1G1;^Q#TzxwL} zQ=x$clNPlbA?}aDOI-;K5QZ8UofJdJ+IV2drrEpGmvl$gVir zat7$~PqyE31Uh^{HZEeohbRFanlzvrGPUe?v>+1ak8{oh(J<=24cqYd# zi31aH$V8bbe`dSsh#{aN2(RJ|CKY-=Ehjb_)A$Rh9xwd)iD?iY zz+81K&0DqxsgOWxnJ!8k#;n|P&{$p-|IMYMrrY>Wt&LheQc$iNYG+)%{TXmP*zTMe znnx+9Ex>{NbL(kr*oA)y9x3;EqLbXIi^EOWfr}Hn{ZxVESBhOlwjnzIL9u6>9?{V9 zzS}%N{lNdVv(ZW`XyE1mSHyrt*H)x*J2=)kt4}xJW+56^Yj9>UZ#bW*w1@5VWp>v? z@zkwtN*JK)-&mf7B^%0>rJ7B7!<^^S2gJ%Mr*u&A3JP#QGevR}Po+gy>g~VQV@dz- z^>_s{3SAHVGuvG*Yyo@`b0Bwm25w(jeoy@sNi3r;?jVkSMM|l(gG-N>F1MSjhDgCN z54boie6}A+5SQ%2#`IqYwN+|qmRrEOC z{vXG#VL9hOu2O!KEH7t2etOraCvK1T^U^R@sDubr6}7A2Az*h0plEE&VTE7$U>flw zX+7-#v&2}r3=r2mmzXF~yZ>t+yfpt~oGGN${vFc}yk$-@4Ilw|20D!&TU&3mIhu-l zh#Wa&&YR4fQUpEeut#K4dyIRKVj(FEq_i0+s+)0GBYfg+5DdR#Vii9y7XiBR-u5X;j$2dlNzZ*~qWcDPlRn>;4~`%wMyxVvN%kWsf)Ir|E0vSB4gx98G1? z`EskMUW0!0gPfS4o2o3tI5{acX|SC|?tDQkFS$Irl`Qi#P*p}Xh~&d;p^q5i3z^-E zXkLL`&5U*38i@W=`NzuWX!n|U1&HD&y>T|#SKr*FsVk0Te?eh2*7vY1Q`FzjE!D|c z^3BUbyD<|6;;Oyqz9!Y^Gsj-G^|jsGZ)X9zxXA`(RHG(9xaV_9xPW? zzR+40A04cj6i(@)?F3UXn>P0Fe!4j9WL62yzq2SU+$1L7{XUJV?V0z6dybqp_y$SI!Ph+~9)@-+q54iatD+ zUK|^G zsY_?AKpQqIn02NLVgV#}4XTJ;&;3aM%K6}gvTmN~`HuD*ETX)=D=RvxOr=1nrLsn&FV4ioWRFAV zAjqjWRysSS_l8rlkBYwIah=XR4J>{~A$o`W)epJZXU9;$q{{&MBunv29|lzG$pVzl z9bf>*R0QrD!E)l;FexFLgF^<_AT5hvC%8to*$Pv`I%!bkmNv;8(ex}PcC&~)NA?aA zYVt%#r$OhvqgS^dm$wNkvXxyFuAy&CNOtdFNr@rOr*B$ZJvhvX$$jvsm*A!Al8ut+ zYyyhM!BC3|;ZnmSd)$V*lim<|Uf!er$4%Zx_IKR8mw_KCXQkHyGPV%ukAn`Sy8s(^ z|J(}zxc@v5b+}uPOGm1YVSiSi+O?PYQI$TY(HSiwvf290^slsF``oNn{b_~nu(njo zlO8N0Sb!2cGM>Ag`|#PwS~-6rg-3_So^T?X<6t;ir9y$cE1Y&DFE0d_r|L(vYxPh3 z-NZ>QcjZLeO}Cz$+ioKBkrAdsd+Yv06%`%KQSQO5%;23)J;yk~$cv9P5t3$926 zwXKR7cc)Z_uPX#-G{`W=>SOUpmdHHekq-U1f|TIn@cC{P5tpcBc@ z^IjN8=+k%VvG%B(yssDMRXY!_EKb}i0e5#6pT;Kl67--F?xNc22bK}#9VAN~_Ejb! zyMiLRg$Vt8W;1Oog;Ga_P^RxbPf=ZRzcz{PF=QSLZ;&#}6F_P{V-BmPoySWQpu+=m zf@Y85b$JS%^dfb(7ElB35%9>Hw`cA4zg`7GB{52KQL8$xoIr*|zX}6ATb-|i^IoFgVFB~08g^28*jY-M|{>55dtY17Baz} z9uVwR+&hF}@7w3GbSyt**0TP{Mxt4);acxxTW{A=KY8SdnL+9fw0>m zk>}X!0U1p}^%;bgg0D3?b2jKSoP_KPC_<{Nf4BqAP{ZqYo9mZ^`N-HKkl|}6fR_SsWnSRln4;%VAAv6S6n4~E_>OWY8v_rWsDap*4lj{9z+18vr%3`JQYRREU zwHl99UC&tSvY|9l4BjPF^VZz@1W$C1B9zQlt9B+Xf)45K!Bbh_ zfj4Kle(~6j_p<_d+xotoosZzv6vDfwJFNFT*OeP}tTOM4_oM%eZI;@Y5Rp|noJ=Gf zY_ zEbf|q){0{b$EfQ_4RO$zl54m=m{0l$3Wo_pMTqXTDK#BA5lft>^<%DQ?1LOUyR%6? zb6dfAu6C}?<0z_W#|VCSkF*UWC#Em!!eeb*6IYcvfiyq_kHgrvokL01>2}sXVB9yG z7o+B`8;(!udQ3X526qn?NOF9EH*=En2!I=e%lz)um>)?#9(i(WHG4#gdVOYJ_QBo) zKHVa(CP7Lar#W{I(}-Qd-)_1ZQ0RF?!wvp`9k{r#W*BApA2n<3a(AbG8sF63YVX4& zkQJ+{Bkgz{(|xbuvbN0Aow`20ItZ>_Jf2^>MO~xoP*jErtU_rWYb1lG?su!ic3q_Y zZ&tK;{2nr}*#z)$pYZKz!*qn+?P@-jc1B3h7G8C~AN@$L8gPw?R6+tv2qe%=oPl_w z>mu&DD{R6+>hT*mlrkt}db19HM!{>8IznjWqehfM4mwi-zgRxgl{FBU!ha zd9_QmH}gLIKEUg~Iz^RxHD|k4aNx{EB6D_e#HWMp35_*8&KbLFM&{2PB;%%Q;d{3? zlTE)u4sD1e`*HU;Nbt{`mli!zE4_b}IS-!#HPiM#FYykX%N8W=HA9xrwr>rzTH z38;21o{~0LEei-A7l(LB?sgIE!b2LlOZ>lGHgxGFggrR5lMmWXgt?02j4*lk!Oi5EST3gc(@4;OV>`)m#LtJ{>-083cK zqMQ0EDP#`_TluJuS-^VXy}@>f%p!-}6$+4};E$K8x!<3&E)&SL5m8p89j6~OM~y}g zmhhoz_M9N5x^j=RJDjXMY>+@Tm;ik7LvG>FOK$}+O0S36v`?ci(XmrO13V!ZP2%e{ zP8h+|506W~7|_?DJPoSjT~cmN=dV5KXImUve7LA8XJgmWhJC4(%_RY^>kZ4gkb^I= z9X+_qV}va5{wpp9`_Shf0SMlLa(!kWf-}nvIZGOnw{=dt1?xJyxzR zIg1E8lG+#}pwGm;tWMcmF{xo{xe(npW{bTjOg0$J)fmLFa&N5jPh76+eh6`n-$U;j z96R-@zZ1z?C6+L2Gb^OIdd*mIJpcK&26LqN!QV+G%u+gRWNL5Cv8*`YCZLjOeo->G z3b0{>&l|8O?*6@{XI^QRui)7! z&12m{l>f!PC7!}mV{C+uirT)%tn-%y(u4S6l6R8|O0QI;b*zbd7cHMfER%)iZYq4~ z|DlJLiuU0^An9SR@#^{h)XblAIn(Pu19ay4362*%w#UDFb9Kr!Y|WwgsA@~hmTX%g zop*b!gQtN-ilh0ec-ayr%|zTZXzOINq2=VID58Xwe&yuS+lY5svr4s*Pi0~tde<#= zpEQN%P#Ezu%cV@-?;0IXt;<({=;1)%DPF~XF7%p@9pnd z=mhY6$hwST?W5s&Quzs6xbT@*go<4e3>s7T6-0k}JWUqF}CW+0KhHpk&*xvQNX zv1@CyM0}V89^V6KnVX!-u(a@7eKYpfsiMGrT_$6PttN4%l`M=yQ}5jcmHRgDA&jSM zMGyVXX}Q2PWAeK|tzT_bnGb&l5jGi{=7rlyH&rFOcx;5g%~V0&fthb5nusMAn?*YL z(pWV~eACqmifS7o<>L`%dSyX1qIH@e+`br{rx0_pbOof^aJ?tWAub$u2a(wJ$T9mF zZVvxzz0&W$vtF%Ry3Ps6N<`Bmu|+j|vVX&PvZM2+ODtAN_~UoOc}grU#nQ?YH3e&n zzY?Fq2an6A3sff^uuI&w8xmd!ly_~q8HnF?&XA3wwPCVSNu01wKzntT;L~T9YwjIf zJ1aT~iI&Bhm#cSKc+9VN-l{(3hkTP>;>P#&i6Ra?+*PH@herRsRPX0x_1^jnk|MoM ztAzJx1mmf+XfY@taq<2Qk5BWaRY08e87_%u!E6U>HIC;6eTPeIeFs^jMWXP0u~HzK zhlP#~&$5uns?#5?*Nq1yPQpo>L5#W@-(blw7k8kW2Kw>xYPtZzck;%pCVTierY1MI(&-GrDZ84KZ@DZMv@7- z@iJZkZpuqHBIoaEHQCPHe7IC^u0lBC6W!wz2Qw@%Mi&peM)lm;ris6>MGT^xoRY_d zxYr)qUKReSXEk9Mhj0ksuM+LNvyA^;R8(~@=*{I_L7b1bwP64Oh}*mrBdXE+`LS>B zcQ);Rm^`dR8OBbk+VUGGQTlOj8eE(L*F zP}}!6nwALucvz-tbLN;iobzL?E=F0evYv#~IGX#aa&=ew=4~zEScr4dIQzS~=SCzd zS6=pu;=M@mzcDcC)}u974mz_I`gesy@KiQx`Cn|l#~(4>$j=Duj2Y@VG?_4@e5lGB z-+%jFx;L|CVz7LilQM}TWX^~4EZccFMOtDBi3Qiwepc=Ln~zZ3t@&u`R2+gQI~SX5 zBndYEli!(iGiW?X_7B5zz}ra#cXL)h1+sYmG^ zbLSk9K_(2Z{HVi($}GiahXhj+@Q^e61?KCJX-1I9C!Y9&Lz#6vVfQB?N8P@Sv?yJa zW<$flnBV>3*T8+5km~yug@%xCmocXvP)^Fh)W*cJjq)9hjxYt7aUlX-fQj%spnoe0 zMTC*l@2IQ^m_|EZw>$6K9&D;d{Ns3ui$%@;F~jx4#p?s*Z7 zKFJb{0H^OQ)K!#|JdrE z+^Vl_JD}DzB^;pVD9#NA1)9HHP2pf`%W{?v=j&(tu?R}~$-yxWEnPx2-fjXqe(}2y6Yj5>#A{tX zb{ORXfdq)8Xtwk7v}N-au&wqpNAS_oTY6|;uS8#4NuyKmOsKeUe%}j-ey10w2ggvi z9gCJ@Zx|d{@kg*VD&Ug#9T^-DiVj;^CZ+@X|2}eGr*>>A}O$&ghw}_(SohEa(vYTKrHrRH>OFUiY=L z;U`{QalgTcs4dy#cZcY)nR#7fTmH3Br@M~APhtA8k;pK}6@U`q5>F!M7_6)Pgwodw zOOQ;cw*(pPO>zvG{KL!g-f&5>|%My2ha|Y`U3#eITXC6JQ(tVJAO8Tu9qiO z{5BXJRv8q=kJIfD1SLPJW3{wO@PhVA3Ej?yMKepQpVJbBS>-Lo`TAksvD6`hdv#{l z@;Wa99z~3v;_xCoeOB^WnKVgC@o}ppU!}JlUS{KWyvahIaQCo`n`P8}diz!WurrKF zP^Nd_nh_2mFUSwID&9M;Ub_Gz8T+bgzqtQ8Ksj5&^y=^62H|l(&Ee_`auM<`HD9v8 z@x*GUkwiJEa+aPDGyk41zg>Di@Y|AF6#fBOHk_nEpI_4xLH`Ao0)(rPtwKs^wXXYe z8jM5C5-QE};HMSXQs>tc8P7?BUA4{;66)*wh;|b;4mkQ#A{rgZ(qBVDg1G0J;<98KNw`#wRTz8HM;-O0eTsIPFjIzd}eP4quN8D zep=4<5Wrl5kd9Tnq7o|{v7b%e_*uR9gRiA%pP{{%OWIqQApFv14&n_L3!PF(I;!=V zBlc=RPRiu)+1;PtH2g*DJ>Pz>V4YuOZ%{#mOJ2Vld2R!SCrB!!fBJ<4oM(RFl;0?F zGkBTcom+r>c&>Lm^P*n(cg{&a4zdp)mKV)fH(4+0g)1J@6xo9xn|*evd3+yokd<|! zbFzM^ORbC3a-E9Ed?~M7v7pBW5}MVO&qUl?&GS?Ic%IgzuBrQB>#Y35xRN{`rby_gfcudFhF& ziUycw6SgFW+r8QR6Ss7_6xIw3*m)%5bKlT>zL($PWP}J&ztAhwV9(Y5p=(E@)&{Ji zlg<#a*f%lodd#>*j5tQmy%N#IQ(xt%z6>9|j9)6$XqaI|&YDnc)J$wUqn*K>ap!2s zd;}vh2O}gvS)Yy{R4SIBpl|RoMA=4ysreoXn^cWym3yZeRE~i9Qp@zdwJ(H*q^mer z04HpczCSr-@hn-0kVc|)O@SIlwD9K6hm`}@##7c7g9fWdr64XI-j>|z?h3ce%`+uORt(2t?iyrcIp=Pg25)mmW{J6cJ66PbAD4xE7G;$1lnF`J2H)DKi; z9<%};*Y)YxTvQqf=607?Xx{8q(lq8Dl(qQz=atAc2iLgzK-bvPB%rv%5e&tuslMPX ztJEB3x>KLv=}Ik+2QLlVB_5K;oT1X>2aE?7oX0x5txCqb-B*42JCljx0zUa4*a zC5Z$`rkqGpZemlDqM=b6+2rBUXl=Y445P8~e-dn@Jm#wsd*_!W`33pZ4VT%neehp7 z+TfX&MdYIH6!T6+x8R?r_~RrxYc)vuH(F&sq~ru@)6>biUvM9^Inp$WYB9jr+Seb- zp@@F>M=PF+^+$8!V7PaSL|JuV?fciE7z2l5aQCHwJKH?*mm;%o8hWm6_>W`3zE;-0 zo0MV<^N=p|#dr?(-Tbnj8|Efl^WTCkrK_oj1GaAh7~97eGhY97ePnIYY3L(6)8cig z5>d*8y4P|EDoyQ%lg*O|Q)YUn;(VPmBhS3YD&&?r{Jid8IhkH;AVn+5u4}QPlCm7V zv6I{#|NDrE>)3}~&hXmm;wP(jka2Sow^~LfS*E(>S#|MpFfMYVIU{o6L_axlLiOTW zz{1R1dQ-Hrd6@uF0q5!L=%!tnAhqAHlAEMO7o%8ry-iAzOqO;Fv&M^zGfS`y3U`CI zkID4Ag}qj*!Y;=5Qgv}ZCx2lo#1fFAJwo!X#|qW!Zbn?&EI!9Yis(O^wp4GuBeChl z=7C{I16v%uFHDb`o_p@MVv$6I=o~uZo2W&#BIFOa;__eV!oM|R$GW?MwjiM+JWI=O zlLbEASNmzBVMCZ!8Yo{vyzY;mZmvycqP$vG`sO$Iu?ZIKz5OO* z-(Ai_+fB6nC5_^Q}g@!i8jQu%%@37Y9 zb5zRq1IZ2omNJVw@md3{4uXNrv4yna5n(Gar|V=7Rz=FpaE9FRMqu5T;;qGaRXQR1>w*dJ!nV5bGh?LHM=%PAp`mb|129h%NyJ6ZMj8VuIruAd>- z?A6=N2+U?7wq=z%HauxqBNjXko=HAF&AbZ}(dqhOmZvG*3mztFJKy(vnS9%I#ljR`cfu|?xnET+`Q0~;}m@xhZ zf_Lpi)V6n+b?g0CPV*KqL_661<)D6yAtlXVtOYNdz>N6^^3@-?$Py1h(vp*HSD zydi3uA%I1Qf0LP=$Q?S-r?#ow+Mn#*eX(|-dhLeRU+wIjGNh$h!A{XNj#M1JSV~PXJ&UnamcXp#@_zYjF! zXXCLuT;LYvMA=B;E6#mWJ>#&&b_7F|2|02PF;v1e(%`?XulYVVAcHXiVHxnm!+oM3 z^wcN=uxu$XB4sWd)%nwV2JPH2>}4G{$q6a%OQxS5x}8yB!^+-%OW{wj28s!+{Aj_v z@+kEArF`TVA81nTN#StG)_h4ZUw1I<{S`{1Kqw_5B^tj)jsI=+lr+d#06&xZrbctv zSQK%7Y@Mwr^_atMOH1`4{FaHCxx^FI#}uQ*k5=icC;SUG26kvWiHi+N%cYJN6ZvGT z&axbw&`Ofo21%ii?-1tuyT_lwQORpHO^ezW-b!OEr?cWhBQmYM4V!iD%a>shA<2r# zOVnA6WPE0A)+}3q^pUX)(sgQJP1|#rq4sKVHQ8N=B?WKrI{`n=#ec-h&kcVi5v_!e zV~1KwCEf%KbS;E}^MqTt?i2aSaO+Q$C8|;gNQu^n0xRMH6f?5pXrSX`A@(R}Yj$M; z;iWYPRK|tcU!?wQpgK_OP_1dOdJWAXvLB*v-}PYmnSNnGFIohghBWNq3J^fun;b|B zuCh4v%?SwbjX@Zb6@8>+Dn5~{99;0VhqImb*4!i46iZa(D)GS$RZqU!w_nLB`Qeh4 zY#FzgTsg6O(L7mO6Lx`SN?_dD!sPaA(x8kC-zp|KEZP2)Oyy$QL~}b6yE`~m-SiqU z#kFUSPqY>NYzVaXFR#X5I*zJo-0$MYJLHbSIcGP}qHQReNF4_1g`Z za#B9P`Kd=dF{BQ2p9ZC(jI*je1r>| z?6^(6+~y9MUfxx1p8RxW8})rM>!xjT3A|QAx;x?3!T!%_ zDK@!qQeSA~9#xO-8-b+9RW}DFRnJ3m*Jq9ZYTk%@{=5+L6`b}WDZZ`xo1Ncq0&8HJ zG$LqRsm+1bS*#|z#fJ=?VkLZe{#X6@bodN7*fPtvaxh7X=srcl?YI}KvY7wbx2cQy zjG$r#H%WR45cYW{n%^wIn4<&(Sb_+_vO?%WbXi1o^VGU+agg|0vYK+SLvK5P9LZBY*pt9I=B6LeaC&7Z3h;&~c z=941bxJ^b33oF=>>;3<2Mz0$BC~p0?E8|QqMVs=+ETv zBX74_zyl^AW)vE2T5B@A+|Y-VlfaW4dCzh9rH-+iUb+GD*m#9sC6(b29;+<;{QLcN z6sL|Z`0|rF{CziVxOlS6;SQr+xfO-ie-Ss8XUr_(jG7vZtCC#Jmr;6+090Zp_6yp|HuOC>u{US4hJVrt!c-1gCtJoC5(Sml&9m(xq2x&&CH8Tb=9h|KclBr3)d zf3~STsTsgS+JQ6omVnRtuFdh5#?brd=6F5R3^vYYnK4 zxB4qY)$%_v&Wq6Yfzfo;gTv$+yjuDR&!e@BqR<7^$_?edh`ll<=J!js>3Ij8y~{BP zS0lo)+;OntdFB#^UFdd(^yhR@dp^}{$^$kMs7nz9D(3e|@aNnknsNr4elAss=uVe0kzb}58xc|KP)rvl* z;6;+}9it~eE@w=OKI4>V6WTJ-0nr}Td1$ln&@_Mi3g`U#+~!yxT=~zZM;6=u&O!U* z!UldFD_FGUbn>#(Rh<@3kk5}?K0~~n$PvQeK^wPvAbJivEhKVF;m9xI$X-o!&ZAn? z&2b4}LN}9QCuDf;b=6*ZRM(?s2ikE%A0}gmgx+7hv#lc%%iSg2M^@=&Jx(+@1p+&> zK+530aM-H#Yr4p7_TtM`!WxAfiMyBkx4Wowr0iCpE8a5{^pgen_pP z#q~|2^$Er1Ed|=0w?wyKpvX>xc~!$ku(H+lKDJq2@~yx8#M={S#%cd|#u);@3|{WK z%Ah*6IAC65%-!5sc~=NDQnGyd?X3gFq|#}2I?9WC44rwx&3)`%9G2P52$`Rf%-m$% z{+&|>-?*S2>YwrVy2a-wJflc^zB$vB3M}ITtnL$S@N*_ByQ1WM%kiOpi1X)jVT(GVt zu_n)4%}L4mlz7kGbF45nxNGG6up%FwI%#8EA5{|Id>3)r(_x$cjfoTpErD2=Y!m%U zl%rSlVw(hpbl$qCa_o}vzVpYC;J#pwR0DmS(%8Ch@d1@xriJ@%YQvtONDNS5X&5{- zyH2qvXkata!VsukeI4C#pf8!3kvKVKOUty%6wgcAa26rYd|UbF|7-87qoQp0wUr!1 zNdYCKL1_hP7`j6QM7o8cQ@TSM6zP23y)4(aZ5kMFzp`OZ1}to{GD4u80i zHS;|8JagyuyTWd)d`%bvqZzywE2xhguXHjb8>;Ipe1L6TQhwON{-c>zyCSCs_lrTf zG4YM6SI)0(6w!-;@F8Tq_tAaouLK#oUqV9%qS-(BM` z#HK2#bHJ3}lO<7^nP9|@o&c4Zv%^5IiP9|K!^mMdU0(XhKFhCNGfF$(`%cY4HzQf| zo)e_;gD^@?QRxCr=w|2z$cCqn8ZAo(Vedyk!rd+0(67k1&5_g_OWis<01oAh=qqj2 zG7C-a7kAM~LHakS-sN1nshlVL#_>pd^Fc`eHe;y94(oFW&;xZZ04fi%r~Oa#~lWG zT8j9RxQeW`z9bXtK#3=LpgU22em2noEPMXIuk6N)V6BbWeDuaEvH<$^ICRM+*x;%b z5Z6e@r}wl`b-PLhm*~zU{GgU8LS30zSv0oNFQ>>PFE=1C3oHL5n#RSrsEAB3dHpvW zX+x{0VxRDlxWG;J2o~M0zQ{DqwAb>Av=mhDqt$LPJ^bonkwp^T^d4P(FlT%MTEacs z!^kjm<^CU#jf6ZXijY+0Z6g3xiv%0b4Pys0yjXJI&uH2fkC?v$wd9t^3-*V+^+vh+BP}{y} zPe*rqy^j{KgQGq@d_qVZPE}FWRU&PR3+>G$q6|;T+MCBrbIMmyQJG8}W~&crcaHCS zMf+8n?AY_z=`FdKSX1UGx0Zac(lOw|@xMRN&J?5g&miySI;Q>8iRg!iC@b z9TiEf^0AO)Q?o@xF{zGmlSBvrjplGsF*^>}?|gdDR>g#X3NeUNg~cVkKN!E@G*w@W zu>RC!TTrlZ^F-B??$VDW7V_bl{Fz}uo(GP{0afvg$6y3Mpk!e1jeplh=;m4i3DotD z@9Z#TL_GzEVh@i{{ktq-h!UI}R|E&Thz9N5AtZfE$j_`>eI~U&C}{WG5inbM_o?ti zXXG&0f+{Q1Jq^};0#Qj5X9~JFHnFO?by1t1kSCJ#*6Ho_0Xrnpc5}2HBXb z$@g@rJL7j&UP5C)hq;!+8@3%))U*C4YX1jzkzUhaiKqmi%SO3hms@-kXYh}oJ!Ncl z?R?kmtexK$F=W5n7kc&epYTC#e@3Os)7`9Y{py> zEK{>qd8;24_60$c+1)d3icLf-ca_>sG(3RB=$Mrb7=lM#|_^7!KQA0p;+Vzrv7sypsD|O=FRmH)Nze2 zpFA)`l$S=rH9^@4uO7%9bRtWah?(4*p!qngNRpK{F<2k3z|jGRQ?ILR>RnO*uj1#C z6F01Ixig7aaYs0%ZqG7e2c`*u;qQK@*tP?5nHiEVZ33XgkOS0aN>@kxi924Y0q08@ z8>sNo#!iR$3^PYZ$0I;>pGnI6)0$6e({COKPpgyi=)=@#W{zpsOuAR zsV#S@9%mQWWY7NSVK#Y5h6}cu(lx_ZrB3QR2<-E{&{*MKe9@(ctFp1ohqpR=%9o{r zL3IZ;XN#fHSB$_}=Z{@n$xPWvVjc!dP$F;^q_y!zi!_gw${O4CR}CI1 zrxEr17m2BgMwpbAn;xa=1M@sFj|R}ugfU>t*FUKv2SK>$_R~N!`7T?f3;l~rsczofhWQoOo;9Vu zKj+r6qd zCC8-lj8x`TI>+5^A87mb-j+y?Gw%wGP{Q zT9QRzd7}Z%FltD7b6Xae^8l#Vc-DPVRbB{{pwb-RxwM)%tTk>{9}Q(F8spKsW>#6Fam;?J5dZ;3!ZoJDv|QLFcwoK6~IOTK*s z1(-=$KosY`Am*^(p71l)=}dR@C(h|5G;*g0caY~ zi3g&n=zwzrWa9zX>>%ea><7G*AQrKrB1^6CEv0X{Jq`Rg$yF!!v{cJC|7lN?VPgfd%XXsFSbut5P~_ypiRNyTZV*vi-N zqd&wH4gs%y2HBbkJIlIo*DKjdGC6psfBCbl7cM=>9=7 zuSTTsvC^cEKG<#k#cJ6?iT@ z`Z@pbeuIzxj#R7zhxN+k50oNuZ)Zfod>e9jad%#9Qk^M@1^WAX~A9RYxrIdxJsNBRS#J8ockk$uPu@klRop_tk~;nvGN+8ZZFRZbH)= z!%SUtVigDgdD+83$^b_*+@Jhz^#L#pCiO5U-84V?nFVP=A?(*ysk(Sg%O+i{(kz|96zeRCMsY!wQ@|Jx&}|3{Am zRH?tq(s7mmw~NRVztEYB;r%`SCh4+VmKlG#F^BVu_Gsoy%Y(?dX^z+{bK1V@N@>Qb znIT|mjenezRq%&Bkw2J2jsAY}dw9X^dd&RTVAw}YydgK;$pGG4+Jsqglz5sCFgwOU#-o?B zblYk?+100N8iGI6x`n@_ayg0AIE+wvB{deAzGqP7-^7CHbDqbz7%36tD^_sh5$ z=@NK|0Xnt4b^;WH$?z@)QGeP|zJ5T3#A%?X|N7`lR?=s0Af#o(hy*xavP9UUFKxZy zX>=H*Tw?O4r(X});p8Z(Y;L?jjGX0^Q?=u$LKJ|@rO9<0M#2>#6x&kC59v+>8(Bl%p#PjLH(NB;1{lB@~rED z#{zG(v@A3qf1E)Y2(4)zE7(clb<6-3g;U>}`?frC`UPH}M-jSgm1lhmCYTPAjb*|e z`V>)P9saa2@|CyNqkmL5u%q$-Qj#|DyznN1e{miACs*+r?dCrAO$ohxqF9k8UrYz0 zs^n`POR@L?fyd=wzyNf-ZJ5#6`Ft+q!Uedm>(+ID>aWmxWUZ=|@#J>&s%)DJfB1QY zRIUf}`~cB68}>SKI!QF**tf@=*3L}opx%+nij0~0#Y$d|ek;S05I=>eX?1s#=g zYM&T8x^#5yQ#19iG^$O4JR z4Reb;St}?JGk&+C3UOD4DKteYT3TBEZonCHO^9EhQe>hNzEpeJN^;}};fv>USYN2F z^1E_CAx&L@-kuLgC+Y^3$ke{yhLGzFYQ&-raHZO-{+P9s9Ztyj5SV4Qsa#LgDE#mW zOoxtVF1}s5QCfqa`o*71FI4?9{{zBp+}z<0=%Ci(Zpx+OxbOE9=#@opPrYvAs!*?q z#xdR$TO`KOvRrUa83ElC*WK^$`VNk<_C98jp(bh|Y!s+@nW&pYME;&3D|Gi&9uL0ti zvqeAMA?6in_>VoK|Mm6YaHppB48U794tjIg=&}sW9^gdzrR|z0<%|>DO)R%3FPEyK zlg*7V)oBRXik%o~TUz6ml2;U&mT}px(@1>Z<=#@CpI%4;ANG^sQsB%Et8$Dz-wvkl z&*|V@OVOd(oj$MNIGheBe-{z`8Lhf*$P&)XzI;(9hV_CnytHqAUD8E-3quSMCrWx(7#lyjnvDlH4c*O!wt6?lta7hd!J4*Adp6Cxre1TtaNZoQpo6wqTX*DqzAcR8Iiv_@@Shp1V`}6 zT~k|pLW5Gbi2*ms*`@dXj`tmH-GQ1r6NJaKY{!PR$gdzMqwns4lHax9NCJWXI6WZ+ zplrtfL>GQ$je?6^K;*L|A(fdXy!E)@3mj#MsTUp>!_%lfc>C-0KEvvu!)KjPr3175Es#Z+^jxv$pYu;+8ysAEctq^newdp7 z7}|A%vvz1;aD|+T5R)K$VT8E5kzNqTKDf4Up9jwP7_k=19J8G8Bn#*W!qZ+II;4|x z0X*&hpyOsYcw?ucvQMnx)6ARu1I+Ul7@(-#5xf3A%vj>mc}DRKBrHbz=`N5A_*9^L9;vcd24(_6yNw zUC!&lxUF+Ljrx)uIhI0DRWw&ifBH*CWaj;39e%JnsR-Lx4kVHu}vxs&|-UVq#MN+jMbMYH&-)ByYu8NlYiRF z%9I*Vvy3nL<}0oZA#`3Sc#svRz|wGLpcam@Y}<@VL<4(iaCnEqP-reDE{>iSb7YF5 z4UV#XpBbhk!83=5)8?9YXZM+nLTw|s>d!k}@VkR~JURNYzyL9fEdCxwz!;Q%60?~h z2WYx0)10E1d3|6!N6n1IRV~62)GVBbUr96^U|;pE91-%idJGzT``YzGp#My1ExSvY z2{^RY6_37LaV7vBXW(;GYc>4*Wm-@)Vnq_IZ8~beu^_G5soDDN zlFLs&WAVN3cVAE`9tCCt^J{hCPfJU)MquMUe$82t&Uk=z=qBeO@e!S=*juXkQkKgYo^OnM0(65eG61`@>Bv|OF9X&KT4YpRnEunX`Fr1!6#X5{SdLWCM9y^{mePuUy&Ah z2UrLMKBN~fNVQ1Fs+4Ohcs)ot|6*(|N$-gullsx7T_W^d@bf;HaH;`%9s>p8w6AZfbw#^xOV5YpU3U^IqyR*H8r)F z(Bc|Y*|uh+dxRX{#pUE}#67-ApsJyOsu|}Su zy`I~b!aIhO0R-QLoLH$3j)-uODC+hP9&uxW^%3-Mz*CVshJMC0LN45|!ua`qY7yd2f=ra6Cl?sG3jCn9Sm;~iJR-LnE_Z%N zdmZyV+%DjPx={za+{vD&FArNSSvI8d{455#lbaOCVV&1O67-RFlq8W z&y`3kw490U^#1n&ec4wc#FjkkUvz#1y3`maxamf(RwYZ!d})uBw$Dv_bM6{-n7CmM z+jQw_V;c?vE)WfSOX2eO^}EXyi{yzjKXJkQ6Fx}w0CO!;IEwTv$q@tzODs7~@8jJp zV>%>}SCu0p)`{`ytsm5Rm%0(LI;<#{eTJ+H9!ODw*oonw5cL5Q}rF4lFk;8moCj$#u3Np)V;4(pR3z0!EdEa;4p)epL}-4=h6c2KZYdt=-8zH2anH?+N!fzcgw1&7S(@V1&P@qZZD0ksdt^u7bb- zRonEJD6$-#Q9WB+iE$ZEi^{F2%UJjwJF;P(57h|c@u9;v(esA9_<5-TvbN7cPE zWfB&?`6aMcp)@IW*)A@H48;dW=?}w1g#yRsTMs`&9oEGQQ8?y z3ikFI02q})kTg7lCcva_w z_mNrUT*0ShvD7@cg3SlfgwBl)YZepJmd4a{Q^b;+3A~d6P@pi^_U9>JC-N&|7sSr# zy(1=Ray@Zy;-Y~%AtL3sb*}+?p|Y>0dqPiK%tS(yHtYF2k;{pq-7Z*&ec1Nfg*IKf z#aNS^Yi(xypg;gE^^Ubw6YE}ekUqW%0MctKk%TL$&wqLwaEG!G^o8m-Cb6dvg{%?h z#Iob!Ut_O?0++F)(?^A-sK<=D>gv+KYJxc?Hgl``tZojVn z8UjwbkCt1`QXd>D>XDteKY@wJc>!|Y?v{D=je59Uz0k7Zfy;A&n_%7$YEDx9!`jVM z5iKXamJrIHr9kQ5HCMh)sE8GL;Xz1G8SW~f4fV<1xUn+(FY{Y9+gvwy_nK0;m)(;o z=YD?&!U~zB=JwI8UImpGrP6+F-<`)R&%1BO27TA!ULVs4_&nP=1r~*o>M|*(jEBZ8 zHJ*;hedP5<_48{+;uasZ4~ATad9SO(x+5(V#p~-{?)@@x+2881j2=VY88=5BXn<2? zLC5i#jAvqB4UF(9+mi3Pi&1XHj*ql>v3$2c<@o~@n2^5ieA06?pDiK1#N7~}=S)-9AsXWiY8pv6 z@1^Jb)7huAXDg%7<|d-kl}hfp3lF%Jhm<}SBCw!%YMLeq9Hw4PwDP{%N4wzc{O)T) zX9jpb`h?p6l`V%Jh(9j1LGDv=RMMEqlvj#le-r^6|iU=et+w%Nkr5br&72QrjGBpt7DPzetvaR+U@`fh|!ksxO)LP z@APr1a{OTDYXhJe+t2&5wK?2C>_!Fh&}k zZ>th4I|NF({ZKWahJ1@|(hslU%+TIT1%T4d%%&$1pxLsnK76z$ES77yO__g=!ZEC0 ztR0lpWp&nwD$Hm!TV?x0-DF>rbSywTrOOJ8BPC9YPh`&x{NXwP(Zp+(?nJoJBeKI= zk}2;8*W7*yOE`NvI`({qBTM}8^AAnmgTxhBeT=Dl?|!p=H0^%CcklF8Z0wd@FX{hU*!n z&u|XF+e&S+AR9Pt2;HPnkds+Wl5?gRw70eN{cEk+<=41`O3UXV#R_(hwAx6oZnxp` z7NlxqclL<*6b$IIV9}4J%l#u4kB!N54idshE2 zuX7lg=&P&f;xK-G%a{5rJlwot5eyookFrUh{NDtp@V*zcIBC=;+jSkUJ?SkzUE=z* zk}~DOzI#2_-!OGPPp~T--nw+P)Qn365-s$$cl5E@Zx5>+XU4zC7Wkyfv zk*1rbIq_kU>22A`h;$Ft#rLP(%Dd>!^u~2@hcBvN6!3C48gV_xGt8}$$UMz^IQ`lw zjN#XONjb93J;9A~*}>Gx32=LpDJFubh<$fzsYi;(b64B)u#t%mvP(?Zu{Fs$(W}KV zC+eG@E~6V2b0e|ja};lQwF$zyrqwcyx2(uyD&0*CU7%Ysh6bI2#oEctyM~Jc$}7cH zYn!J?bEb!fyVzcy+Fdp@mKPltuW{V9iAkS4)jbMk>C3F$e{{=RW8Yv`+;6yYsM&o= z<(oronB8Om)7o&p=WJudrkzyt21`Vj|NS#Q&j7QN@w!H$@-M#bbzUN1FMd&LuBq%5GyqLoktJZSgL_f{^>|+qd)U z4zhJXzHYg4XJ^g0tULSFxR=GsG71MlqHAmNOy{sli=0`vurK0Hh3*A?sY}e_b6hCt2K*z-HIgMVkS-f@g@;VFpmN1p^ESvpLJH@f13pwM zp?kJpn5NUY$wO^}mR5)M_xB5ps^f~r_v84M$f|rG#O>DYXMH%GnzInX#(F9?+_gOy zSJjR3MZ)vTuk$mfW8Ak|XD$pE?iu)=jjT+%d^$uf{2blZjDoBM@EhY8o?_YGfcW5k zgSQqq^uWnIO``Tu^OUpp=xF~1Bf~(?lmcTxd zrj*lb%W^i>K9nqyZBMI{i+W6Rv_(Ths#jq`v+lS{VG|1MfHLuz`-F zYUIp>t-8bOQyDIYhc>yQtK%iCg<9!akD@#Q<)gcReS$-^fCrtVo`mP;XsROX{u#@+D= z5vpw%V7NR?yW{h@d{+<^<&Cb;W#xq<1v-ibb9%jHh0U6ku27BrR?c9uxY|_Z4R>u% zQ8)f@`1_{-(ENGHh&#Fey$Ckr#KkI;I973*5o1Q^e(^zb*u>MpzHeCN$<~$iym_DY z6{b@x82ry~2;Udfe=4?QxD47kOa-`cx5ath+We8PP&Crz=`L#|Nu|xPGagj1 zfDY-JZo%19JG?U>n~d-MXE#+-xQ>-Y^%(d7^Q9*tcYMb0m0w&IR#aCyBEf0XJ$8#P zKF!P#4&EHUzh=<(!ZfwMth_1%*8Iw zb!f;s7aRDaSi9WS0rj@T&xDYf)m8Y7P|bUb@%3r?8j27rwK(?hh=?4p{VFm`o zVmk0>O4n-OzCVY|BLBXaE6{B-7JR-<13^K5sBOT2T(xJb zZx)ceSDxn@)-41mJNR4-%d#?SEx+_J#)LF6C&~FfA++H?vRNV2;@Ea;PUn>T(^y_a zWADaLVE$?BA!uxB$zl!a!>D4}5l!nbL9NZQOIz*tr*Dc*Bw*w7oSz%6W@h4;dMiS| zZw%)pIP7n4aM)j%X|P8PPh0xI4vRl>9K<-n^h9C8DX`6DIo|492bu4VZ=TxNz7gWB z85MLrGF{b)AmfY0A{R_XWYrx3I65adIQZd`qV-f6t>t`!0Nm^L`dLT#9DuVkjy|4% zDO2|Qcgv@f8TFcjvPhsT@4yQTLRCLHOli04=R~5AVtc4e@r34k3}8Iv0p9=wXI<#- zwj27J5GGs-A#b4p3{z{jVwohCC~t=m6?lDyw}aD!Zpm81+~j~ukAVG!4DWF$_Q=>_ z_D$mqn=xvZVP%CObgi#D7@vN)!Q*mx>buFIm5eKr^qu|>-c1kgcD6piEFs1;ohKnt z_d|T=5Qil1llmDmIZq563ZZlo)bdLBTl4cnCW@5#QpO(?6mWvE(TRlFi>BOeZKsmQ zAtl*YBb{D}a^zR5OB{S-4fH`qFWN;dQ>qW0r4y5qK z=>XHa1LvX6NOEhsU`GfC^B&;)XM;`X53_3lw-cJQ3I;rO52^*{_2>|ms`%+sUt!CL zzaF4BXgM;2Fd-N;Lg{PZ0Sf50t0Fq$;yOIRG1GkHr`kmB1HKQ#(@AI}vko%sySmcK z^zVjMU>0-l=wt1hXGXl0>1a%=Xt3~B}K8ZLvAZi~cg zBNpwOJu{fahAeFHr%KILT7mesr}zx)VU$kQkf}vyN5?Rr_9sY4O7 z_2&(U;rQQwQNe&nsoBP>%Ydom8QCuGQ-Lah(_5?Ui8i%@;6D%Xf4&F&MM!AcBpr75 z?nX{t;bbln(y)ideu(^^(f==QGD?60#{N>c26%%8?jcG(DoR#?Krdn({p(wxWO<+^ z65>QzC3c5+Fo2$(OHeZTubPi&oRBuSYz@cUSHxWio-3wn2np3px03!DwEyZB6zmk? z0ss2O|JnFuaJ7EMrvm@$RsL$a(LIQiN!eSe|K)@H<9aUO6Cd7_QT#VAO8`$Yfr$|B zU$y!_-zua7K5(7yF#f-JS&p~216^pq|MuMeegaWyLPESH=|}(WWzQ%mQo0N%|IPFG uzsH7HD*p3}iTc0C_FtdJ{|~+G7J2;fPg^l1ERQ?jM_OD#tXM?P_dft8SedW@ diff --git a/docs/src/index.md b/docs/src/index.md deleted file mode 100644 index 1cdbd7efab..0000000000 --- a/docs/src/index.md +++ /dev/null @@ -1,327 +0,0 @@ -# ITensors.jl - -ITensor is a library for rapidly creating correct and efficient -tensor network algorithms. - -| **Documentation**|**Citation**| -|:----------------:|:----------:| -|[![docs](https://img.shields.io/badge/docs-latest-blue.svg)](https://itensor.github.io/ITensors.jl/dev/)|[![SciPost](https://img.shields.io/badge/SciPost-10.21468-blue.svg)](https://scipost.org/SciPostPhysCodeb.4) [![arXiv](https://img.shields.io/badge/arXiv-2007.14822-b31b1b.svg)](https://arxiv.org/abs/2007.14822)| - -|**Version**|**Download Statistics**|**Style Guide**|**License**| -|:---------:|:---------------------:|:-------------:|:---------:| -|[![version](https://juliahub.com/docs/ITensors/version.svg)](https://juliahub.com/ui/Packages/ITensors/P3pqL)|[![ITensor Downloads](https://img.shields.io/badge/dynamic/json?url=http%3A%2F%2Fjuliapkgstats.com%2Fapi%2Fv1%2Fmonthly_downloads%2FITensors&query=total_requests&suffix=%2Fmonth&label=Downloads)](http://juliapkgstats.com/pkg/ITensors)|[![Code Style: Blue](https://img.shields.io/badge/code%20style-blue-4495d1.svg)](https://github.com/invenia/BlueStyle)|[![license](https://img.shields.io/badge/License-Apache_2.0-blue.svg)](https://github.com/ITensor/ITensors.jl/blob/main/LICENSE)| - -The source code for ITensor can be found [on Github](https://github.com/ITensor/ITensors.jl). - -Additional documentation can be found on the ITensor website [itensor.org](https://itensor.org/). - -An ITensor is a tensor whose interface -is independent of its memory layout. ITensor indices are -objects which carry extra information and which -'recognize' each other (compare equal to each other). - -The ITensor library also includes composable and extensible -algorithms for optimizing and transforming tensor networks, such as -matrix product state and matrix product operators, such as -the DMRG algorithm. - -Development of ITensor is supported by the Flatiron Institute, a division of the Simons Foundation. - -## News - -- October 25, 2024: ITensors.jl v0.7 has been released. This is a major breaking change, since all of the MPS/MPO functionality from this package has been moved to [ITensorMPS.jl](https://github.com/ITensor/ITensorMPS.jl), along with all of the functionality of [ITensorTDVP.jl](https://github.com/ITensor/ITensorTDVP.jl). If you want to use MPS/MPO types and related functionality, such as `MPS`, `MPO`, `dmrg`, `siteinds`, `OpSum`, `op`, etc. you now must install and load the ITensorMPS.jl package. Additionally, if you are using ITensorTDVP.jl in your code, please change `using ITensorTDVP` to `using ITensorMPS`. ITensorMPS.jl has all of the same functionality as ITensorTDVP.jl, and ITensorTDVP.jl will be deprecated in favor of ITensorMPS.jl. **Note:** If you are using `ITensors.compile`, you must now install and load the ITensorMPS.jl package in order to trigger it to load properly, since it relies on running MPS/MPO functionality as example code for Julia to compile. - -- May 9, 2024: A new package [ITensorMPS.jl](https://github.com/ITensor/ITensorMPS.jl) has been released. We plan to move all of the MPS/MPO functionality in [ITensors.jl](https://github.com/ITensor/ITensors.jl) to [ITensorMPS.jl](https://github.com/ITensor/ITensorMPS.jl). For now, ITensorMPS.jl just re-exports the MPS/MPO functionality of ITensors.jl (as well as of [ITensorTDVP.jl](https://github.com/ITensor/ITensorTDVP.jl)), such as `dmrg`, `siteinds`, `MPS`, `MPO`, etc. To prepare for the change over to ITensorMPS.jl, please change `using ITensors` to `using ITensors, ITensorMPS` in any code that makes use of MPS/MPO functionality, and if you are using ITensorTDVP.jl change `using ITensorTDVP` to `using ITensorMPS` in your code. - -- May 8, 2024: ITensors.jl v0.6 has been released. This version deletes the experimental "combine-contract" contraction backend, which was enabled by `ITensors.enable_combine_contract()`. This feature enabled performing ITensor contractions by first combining indices and then performing contractions as matrix multiplications, which potentially could lead to speedups for certain contractions involving higher-order QN-conserving tensors. However, the speedups weren't consistent with the current implementation, and this feature will be incorporated into the library in a more systematic way when we release our new non-abelian symmetric tensor backend. - -- May 2, 2024: ITensors.jl v0.5 has been released. This version removes PackageCompiler.jl as a dependency and moves the package compilation functionality into a package extension. In order to use the `ITensors.compile()` function going forward, you need to install the PackageCompiler.jl package with `using Pkg: Pkg; Pkg.add("PackageCompiler")` and put `using PackageCompiler` together with `using ITensors` in your code. - -- April 16, 2024: ITensors.jl v0.4 has been released. This version removes HDF5.jl as a dependency and moves the HDF5 read and write functions for ITensor, MPS, MPO, and other associated types into a package extension. To enable ITensor HDF5 features, install the HDF5.jl package with `using Pkg: Pkg; Pkg.add("HDF5")` and put `using HDF5` together with `using ITensors` in your code. Other recent changes include support for multiple GPU backends using package extensions. - -- March 25, 2022: ITensors.jl v0.3 has been released. The main breaking change is that we no longer support versions of Julia below 1.6. Julia 1.6 is the long term support version of Julia (LTS), which means that going forward versions below Julia 1.6 won't be as well supported with bug fixes and improvements. Additionally, Julia 1.6 introduced many improvements including syntax improvements that we would like to start using with ITensors.jl, which becomes challenging if we try to support Julia versions below 1.6. See [here](https://www.oxinabox.net/2021/02/13/Julia-1.6-what-has-changed-since-1.0.html) and [here](https://julialang.org/blog/2021/03/julia-1.6-highlights/) for some nice summaries of the Julia 1.6 release. - -- Jun 09, 2021: ITensors.jl v0.2 has been released, with a few breaking changes as well as a variety of bug fixes -and new features. Take a look at the [upgrade guide](https://itensor.github.io/ITensors.jl/stable/UpgradeGuide_0.1_to_0.2.html) -for help upgrading your code. - -## Installation - -The ITensors package can be installed with the Julia package manager. -From the Julia REPL, type `]` to enter the Pkg REPL mode and run: - -``` -~ julia -``` - -```julia -julia> ] - -pkg> add ITensors -``` - -Or, equivalently, via the `Pkg` API: - -```julia -julia> import Pkg; Pkg.add("ITensors") -``` -Please note that right now, ITensors.jl requires that you use Julia v1.3 or later (since ITensors.jl relies on a feature that was introduced in Julia v1.3). - -We recommend using ITensors.jl with Intel MKL in order to get the best possible performance. If you have not done so already, you can replace your current BLAS and LAPACK implementation with MKL by using the MKL.jl package. Please follow the instructions [here](https://github.com/JuliaComputing/MKL.jl). - -## Documentation - -- [**LATEST**](https://itensor.github.io/ITensors.jl/dev/) -- *documentation of the latest version.* - -## Citation - -If you use ITensor in your work, please cite the [ITensor Paper](https://www.scipost.org/SciPostPhysCodeb.4): - -```bib -@article{ITensor, - title={{The ITensor Software Library for Tensor Network Calculations}}, - author={Matthew Fishman and Steven R. White and E. Miles Stoudenmire}, - journal={SciPost Phys. Codebases}, - pages={4}, - year={2022}, - publisher={SciPost}, - doi={10.21468/SciPostPhysCodeb.4}, - url={https://scipost.org/10.21468/SciPostPhysCodeb.4}, -} -``` - -and associated "Codebase Release" for the version you have used. The current one is - -```bib -@article{ITensor-r0.3, - title={{Codebase release 0.3 for ITensor}}, - author={Matthew Fishman and Steven R. White and E. Miles Stoudenmire}, - journal={SciPost Phys. Codebases}, - pages={4-r0.3}, - year={2022}, - publisher={SciPost}, - doi={10.21468/SciPostPhysCodeb.4-r0.3}, - url={https://scipost.org/10.21468/SciPostPhysCodeb.4-r0.3}, -} -``` - -## ITensor Code Samples - -### Basic Overview - -ITensor construction, setting of elements, contraction, and addition. -Before constructing an ITensor, one constructs Index objects -representing tensor indices. - -```julia -using ITensors -let - i = Index(3) - j = Index(5) - k = Index(2) - l = Index(7) - - A = ITensor(i,j,k) - B = ITensor(j,l) - - # Set elements of A - A[i=>1,j=>1,k=>1] = 11.1 - A[i=>2,j=>1,k=>2] = -21.2 - A[k=>1,i=>3,j=>1] = 31.1 # can provide Index values in any order - # ... - - # Contract over shared index j - C = A * B - - @show hasinds(C,i,k,l) # = true - - D = random_itensor(k,j,i) # ITensor with random elements - - # Add two ITensors - # must have same set of indices - # but can be in any order - R = A + D - - nothing -end - -# output - -hasinds(C, i, k, l) = true -``` - - -### Singular Value Decomposition (SVD) of a Matrix - -In this example, we create a random 10x20 matrix -and compute its SVD. The resulting factors can -be simply multiplied back together using the -ITensor `*` operation, which automatically recognizes -the matching indices between U and S, and between S and V -and contracts (sums over) them. - -```julia -using ITensors -let - i = Index(10) # index of dimension 10 - j = Index(20) # index of dimension 20 - M = random_itensor(i,j) # random matrix, indices i,j - U,S,V = svd(M,i) # compute SVD with i as row index - @show M ≈ U*S*V # = true - - nothing -end - -# output - -M ≈ U * S * V = true -``` - -### Singular Value Decomposition (SVD) of a Tensor - -In this example, we create a random 4x4x4x4 tensor -and compute its SVD, temporarily treating the indices i and k -together as the "row" index and j and l as the "column" index -for the purposes of the SVD. The resulting factors can -be simply multiplied back together using the -ITensor `*` operation, which automatically recognizes -the matching indices between U and S, and between S and V -and contracts (sums over) them. - -![](svd_tensor.png) - -```julia -using ITensors -let - i = Index(4,"i") - j = Index(4,"j") - k = Index(4,"k") - l = Index(4,"l") - T = random_itensor(i,j,k,l) - U,S,V = svd(T,i,k) # compute SVD with (i,k) as row indices (indices of U) - @show hasinds(U,i,k) # = true - @show hasinds(V,j,l) # = true - @show T ≈ U*S*V # = true - - nothing -end - -# output - -hasinds(U, i, k) = true -hasinds(V, j, l) = true -T ≈ U * S * V = true -``` - -### Tensor Indices: Tags and Prime Levels - -Before making an ITensor, you have to define its indices. -Tensor Index objects carry extra information beyond just their dimension. - -All Index objects carry a permanent, immutable id number which is -determined when it is constructed, and allow it to be matched -(compare equal) with copies of itself. - -Additionally, an Index can have up to four tag strings, and an -integer primelevel. If two Index objects have different tags or -different prime levels, they do not compare equal even if they -have the same id. - -Tags are also useful for identifying Index objects when printing -tensors, and for performing certain Index manipulations (e.g. -priming indices having certain sets of tags). - -```julia -using ITensors -let - i = Index(3) # Index of dimension 3 - @show dim(i) # = 3 - @show id(i) # = 0x5d28aa559dd13001 or similar - - ci = copy(i) - @show ci == i # = true - - j = Index(5,"j") # Index with a tag "j" - - @show j == i # = false - - s = Index(2,"n=1,Site") # Index with two tags, - # "Site" and "n=1" - @show hastags(s,"Site") # = true - @show hastags(s,"n=1") # = true - - i1 = prime(i) # i1 has a "prime level" of 1 - # but otherwise same properties as i - @show i1 == i # = false, prime levels do not match - - nothing -end - -# output - -dim(i) = 3 -id(i) = 0x5d28aa559dd13001 -ci == i = true -j == i = false -hastags(s, "Site") = true -hastags(s, "n=1") = true -i1 == i = false -``` - -### DMRG Calculation - -DMRG is an iterative algorithm for finding the dominant -eigenvector of an exponentially large, Hermitian matrix. -It originates in physics with the purpose of finding -eigenvectors of Hamiltonian (energy) matrices which model -the behavior of quantum systems. - -```julia -using ITensors, ITensorMPS -let - # Create 100 spin-one indices - N = 100 - sites = siteinds("S=1",N) - - # Input operator terms which define - # a Hamiltonian matrix, and convert - # these terms to an MPO tensor network - # (here we make the 1D Heisenberg model) - os = OpSum() - for j=1:N-1 - os += "Sz",j,"Sz",j+1 - os += 0.5,"S+",j,"S-",j+1 - os += 0.5,"S-",j,"S+",j+1 - end - H = MPO(os,sites) - - # Create an initial random matrix product state - psi0 = random_mps(sites) - - # Plan to do 5 passes or 'sweeps' of DMRG, - # setting maximum MPS internal dimensions - # for each sweep and maximum truncation cutoff - # used when adapting internal dimensions: - nsweeps = 5 - maxdim = [10,20,100,100,200] - cutoff = 1E-10 - - # Run the DMRG algorithm, returning energy - # (dominant eigenvalue) and optimized MPS - energy, psi = dmrg(H,psi0; nsweeps, maxdim, cutoff) - println("Final energy = $energy") - - nothing -end - -# output - -After sweep 1 energy=-137.954199761732 maxlinkdim=9 maxerr=2.43E-16 time=9.356 -After sweep 2 energy=-138.935058943878 maxlinkdim=20 maxerr=4.97E-06 time=0.671 -After sweep 3 energy=-138.940080155429 maxlinkdim=92 maxerr=1.00E-10 time=4.522 -After sweep 4 energy=-138.940086009318 maxlinkdim=100 maxerr=1.05E-10 time=11.644 -After sweep 5 energy=-138.940086058840 maxlinkdim=96 maxerr=1.00E-10 time=12.771 -Final energy = -138.94008605883985 -``` -You can find more examples of running `dmrg` and related algorithms [here](https://github.com/ITensor/ITensorMPS.jl/tree/main/examples). - diff --git a/docs/src/svd_tensor.png b/docs/src/svd_tensor.png deleted file mode 100644 index 31e1b95b5ebbfc401a257ab7319e95585670be05..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 40211 zcmeEubyQSc`zRm?f(VF!bcrA}Akr-*B7!1a0@5YjgNT5%ib#hdigd?N2HjEv(%s!L zaQ6ZH-uT|%UH6~6)?IhKt~GMzoU`}Sv7i0y2~@r>hmT8vi-v}VFMmh+AsX6w1R5H~ zWt{WiPJ|`xRWvl*ZgVLqWqBznI%RuX6LTwLG_*T`kx|&n5&fjk+K)e9zI+!0=gx)7 z3!N9}m^RUHJ0&08zI=y;-v7g6byD@Gs)R(}@}Flg;AUgJDxxFJWZ_$+eDdG{vi3QC zB+5swZ-{@T(#>?b4P|e?e8XiqMo=8BHID7|lVn!3m~kp|>0|;PPB9<(`Ew#%&n`*l zPOj%OEv~LAe|_S4TwYO)-_QX6;a-ERK0em>Ya@HIf%Zx|Bx0|>;$Fx=?iQsILo52% zrJ7CI4To!to-{_hH{Q|J5y%QSg<#7HxV>H5eGr$1qaVVH-tdT1Jr-Ajw{EjVM2~ek zU_Kx(D;jp$CGXM- za30tIzd`(-PBr0Pe+c>eiFgLw(FWAy(+_o;ua;;9+d4CG+%m6xfqBH2bq1D}&Hc2m zRQ~$<07fWV8|IM6Vp1ny`dQBZUf*uhR_7p&+YyFca=Y{(n%h!LzxNmQn%7CKM6y_o zmNWM!VT1+Do+kB;4ik8Xhs;(VrncWu(YmqA%a#u6y&_^kn$E~HwNF0|cNn)SJ-pOa z`fY6Dq9oy^dw0-0o;DJ(Uv*0vjbmzz3VP@#c#p5w7~{HJOF_){!jhtrb64m3^GsLy z3=7GKLWzkH?3n=?D|uo!5;~Zo&fOwIHye)YAh$iI%!z4cF*9{{qt20W2@^B<+08T` z(pev(XkXrUQ9Px-{JTBojvz1Gf{#dyCJ53(}_IvWM zZ#M+6qedwPpC8cqin*enTcz1)s!UC$D|~=n;O8jN;EPEO^E#^k!Ft zC>3R9-eZ~3BS^EPEjzr-h~053Uku+ip2c${#1@c=Ki`IC8frA=X=hN1se^kzNc%Zk zo$9E?Q~Xjj9qi@v_x(Kr4_+{jle=CYrdSCh_%1i$w(GXbw9C88dPw6*e01^Xd=<9% zvt!w0B1arGOn!_2Lb*FkEzDPlSaHg6_j$9%f}_rC>#{wmrIXgHt8*kw~(uUY^_?o`-SC|4MM_aYM|Q`lvF^DhsJ9!7LJrK&gKHeGMBni6cnY9eV0c$>po z{pf)*x9XbeSUUTIFKUA6omn^2nv{!F@KstL>82pmx1P`Q2nE{Q-TAyR=eMb~Z|)8| zSI=d*d($-}G9)3SAViG!Lu?F%9EW0-v~{)C=U>l+bcY^@%6j>@7wy|Q#hkzY-Ty0Cz9)|L)#-9ne&)0T+d4E=9=68NDv#rTgi(RTN@i1TXIX>lDp`zsI%y9QEU;7 z#Y7QDk*P&Ha`=;%+QHP5qQ>P2w}@b7+IUYT?4G>tzz)qcl?(?>Q)P7}er5F!<(fN% z%L=0SqPCT{D(x$Y-BJ4&y_CFO!^@83Q8V|PuE)iE%Y2e(aOvvoRZQ|pibby3TUT%1 z;tAo?G3Ybf?RaTebMr1YE58Ll1y?VBqD@6Z>F{o1;PVU3(q{RMAAd~xeu**9oMY`W z=`uBmHK8iNC~z;pp1$P(+i;uao#y9wNy;AV7@S3lWPhN*tnl8rXiAl>yQ%-~lcJt< zOI(AXg)3`sV{Z)gKC&!!N_47p3J>*HOl!rBGl^4I3H^Ni9!J72?yb122y6eP`vV8g zj^&TbNu0BsZ5KI~+_rnR`Kiq=pJ&|Hg}JA=6wq6ECcBJg(Z1iop(}VrpW> zcrr}I0&{~kuKXlwd>Kz!#=q|MR;e42cJOohXW7ps?k4U)ZYOS9LzH2&p|PFH1Zll) z5HgIcw&dEY%Z8VonJ%`iCl%SL>3>i$d@#P$Z&E4K5XP&eRa$7PIhrrpe51K5N?+5U{DW0(d1M}&Hl7n+ z6v;N6Y**Lo^~2YNX&#XW=}RLM)nr~HWxouzrh7($_G-x#tQ1)9R%MQ5mF@Zt5k!h# z%u&=FC~I>YD&DHu8s92WsJy%L_RBkxw_7&RRqwyVYwGD7*-Q+ElCe^;72AHWY8twh zaWTuP->Sk5H4Rh!p03>^))T7jSe!Y$ZouZfuw^|wKvBG41M5H9llvL&;*7a$md>5^ zC5@?fx<{;^yr5u$xmU@lywD|baUY>wpu*d8Mkg+Npac_4dIw#v3p2bXtr2Ubd| zQ@diSiICsgpzHx(QCS3Pv`O{5?1;Y)d83o0t1WwwramT4UpB+rQRHHIDkUBX< z&WZ~{c0&vMUnt@HZ9|iT6^@p5S=j?WOI942otu|eS47I=>~^*)D?FrjqD1LLSm0^v zx#O?fQeyQv;MQ*io5OZ?w{+lCd%JL0&Q7guf3?>fo9Athhii|H&4+jnOzZd0?dOOl zZB?xJP1bfae;85q9$9ZX>=_fUI4%_-w|6I$Wv!IbwSLaw3=CvOOWYv?wNf$roL zM@vx#4Mk--DO-DEI$q9OoLmfIxO8-MBKA*AgdR%Eo(%`zL>bH+9iIxpU@k5$oGv_^ zw)UnlZb3mo7}rhM&6^zH4Gsr48%G0I4jTu?(@B2MBW>(pWN-e|(cIRC4w~1%(ALRO zlz{;P^q+sHcpAH!{{hLy;fyRmAPjl}ZVh+_V2E+^ z-@GYuGT^@+{Q>fHsH%gpy_Bsr80jeX2iMOA|M}tH2cE#x`~xO057(bT{^`ltND&xB z`kz>F%IA~2fYD;OBC!7mO$;}!Q7Hn*5iEI>9)Z6=%bFAm`qQmMwkR$(iI{>eBD>kT167!-QQV08bCmpqQS%=q?34thK}(M7ng~z($C(_H1t>aA6@`s zb0sA@rlkMJDR2nyJe25&eA0h0_Mc%u;K2-3|IY9)AAW1dzxr@$jsNoDUq1ZVJO8T> z|LViP`taYm$iH^+uU-7VY8O6XIGGxG46@-g)Dv}~3k^q$FMg*ve^O+hVKPF;EqEw3 zJxIh6tWPIG`RnFBC($UpV-F-^l&$IhqGB}N5|g)(UGD6p<9z>5Lw{%P68jj>vo|eg zaX?0`tl@L8QZ2sSZsgnH-bT=d2Y`UC0CG^njE*)N3ey(L{Jx9slxZ+i2bdE>?a#EDqQUrODDpldNu!PESiry z7N+jjU2{1I6@0u*y-J}lf$^8o{I+`i#Q;|u9mk(C#?2!1|1?@61wfXqEo6ou`(vtQ z>yYygK9y=o5azE7At#oj^>U>c%ZkFPH?ahB=>4_@)e8{(W(eP8dY_D(Dxv!#Yc+_H7;$en zm3;%fQwj3bU&#KgH9s!kApBMGLZF8fmrK{1*e1GFl(IZ1CDn%>QJhhH;m<0s@VLiAFWt>j`r40`c{lvA+g{F|?NTVZilVveXYI=P|h`T)22pe*XOVrd~HW zb@Bk_Gtkd5c1FCt7Yo|O@+^AN^M5KOiU=-O9W3V?CqfLBaKXwh9S?F{@fZ<)LTH-k zX5u-`lSH0Bhyh9TjsjTns#S)(16*pnKn33&Tl+zKlxsEkJp1OQGXwO&#NsTQd?PRu z@8D0pSGhAx>9Jfkle@qU$=jz5@5xGO<2liJYIVz%!=~gyNFF<&m44Lku;ud0veY4qLe-MaMVbi!iX4lMr@|4b~E+mlB zrj$P4Lyh2szuI6rXH-Zcn9dJ4PLe9=^FYNQlyBY0wV!IjhrMY72AxlKFI;SYR$w~X zD0Z>cD_a>}{?Ty&^`>9pOPPJH%r&Y8bIKY4NK zGMGz2tr~*zKUY+eA+PD&I3ESg!3S(59_JJ-i6#uo-poL3TF=~Nspwng53P!N)PCD= zESc+=AKCa=0Idze7g;gQ3mMXjmF9KhSLJOfMB|u$=-jwJ==mSer z&2+zt51|i~tjB@LN#Yd!9L%ciqMX_gavp!t>Ho5EvkVaMvMPjtz$9)0s88-1CxdZ! zRDfog427H(tI{|gC#nqG!2^&ARp<_ke0ws~kww8Eu+hJR>Ff3Ox;RGKx!wjQ^aP;S z_^$0MWJ3PW$ewkkMLan3hM{=TGq-F^ph+AR9uHWTgn}5AZME~;8NA8_4CwD97XUPL z60Tw>Q#jqc3tk5LmTZT<5ntD8B?%zd0MCnHvV~N{tQsJXs@sNaw~7>u(2pAV27D>_ znn=pkJ~y{6Bmnqv5AeeX%Six?=mXb9aN#vG~{@s>@3%!xAI;5BoqL4b=XBof--?c!RVG)|52kt9N1A`*UFuVs|x&!En)cAJs z#3BG=0s&)~LWDr%myiR1s11rCkK!l;I6>kp%mi-R0yes5B!)rO4JJra0t%EAp$0Bl zo`blVTjHx~`MzvQZ+pk!(vWFPn?F5X3uZ3dh^v)eFW4 z5Y3hVW|mzy9=IRGLO085s2URZ7kkHrfxDi87op(l3#gOCt%eAl0pmo~A7V@Y#d{)kdGZ^n32)RdDxvR0A4N1TH`n#m7~M zzQ0=AZ%jHa0C>!AmwdtKEF$33>K;I7uzUcleW}WQ9b)-!#u2hqfV-OSyJ9j3E8sqR@SP6+lBD@u!@MNA*#YIF%Xu^&i+S80+lzy! z*%Kzk0_u2YOxVESk8~hch2Vc~-5=AYS5*CFd; z-u0f=ap@ygMy_jElT^s`ejSQp= zX6!npT-TKnb$UkIgjYiNE5`8ioR@y(J8P~wHQo69mSf{&5!ZQz&@ok$Kjr!#`25DO zLmlJXxpQ`ldG&Sif)0FCJI#9A`K4=hlt#y>9opV8kV*!pgo_E$00)@A1zhLgDoRJ& zw$iEJ{v<*C-=fO&bS^cjPSZ3&)wu=AjTTcbi0^(Qr~|r2lp+@%R!`%>UUfJHlj}VK zD?kp*B+R!Lb;ws0Dpmhncp}rIL}VFH6=Z zey$d9kEDQ8R^~U){=~(vq!^kFynr@97Cg5$c3w&RaEzdyDN5(EQGHm}a0itA{ynYy zk7EHB65e<^9!o`bds9*M)+Ixv3&9R>dG$2G`bXfOE%={1E+AH3aUtL`F|i3&Bp?~Zz0qRIcNz^fCi@+hf#V+ykOQp8tv^Ef97u*i(7fDslKB3KxhI~zn+d!ZEYAq>(F}?K zA+gr~O*{yY>Hu!eTW5$f9l5|^8{DM)yX~Ajdj_7NvnxV4C36EhjP$hriH?qF@E$iT z>*PIj@ZLF%*niNVzrd2H1Uzz2E&QiaLNYm+gU6y6@{2wQ;F+Dc&CkN1%OGR%Ga&31jIh@AAe4F>D<7G#;6YTGd}+SSY7lrBmyGEr5oMX zdB$)R16(6O_?X8)=t4P)KEP^IA(fi>L~CYY=nnBm=_M&l5_Hsn3`Yy&kC2#C0qY}U z_fLv0HvtS|s*}Ym&<9b5$6;NJevVf$aW>-t8D}X&GNh2Y_b5)3lLy1*X6%CkDAk6@ zas)6ws5k6_Btsp*fM-=qT$e~50kHuEKQ91>$b0ZrQI^uq>CA0ADt@KPe>pGG`TXmCH_lLVa%AmU1fNeh_D7qC|RQ9m~j z*<(;~vrG$QI(B%v z61Ix@PhlkUU`z{~r6jjlNB0NQqPM}sv-m)Kw%tD;KsNabh!0nj)b}iHw8xE}n8Yj; z1l;*`4UD+1F$5{nC{WzzWDTnjoAUwTZ!=66Aryh2m;hFHZ@z9YF4m7N@jv)|Ww8`UI%$FGg&1W37)3yad=C^oc>pj9 zp^JWSDsRkR^g!U3RJ{Oq=&FJI;8LQ19YCs*>OrbJCVU*9Ib4$uRv>Ui}` z@V=A|1;i*LKsR+tG2~^h+y#y_&Hm~|@Rqv*ungOtuAe6=h>k&t)nXTa0SspO1R!uP z7(N2mW?*5QG4y>O;2}Ya2RJ)1F6?*;;Rm2vR7WR3_L)Q{#M!9!QHZmJV1T(0CBXEV z1P4%%q8FKGL_EC0RWF}sQ3pgJj#DbOBNa)_2=MR+w&x$K0kUp%3=W{RKN;=}ouTA+ znXCTBBf*Q%I>{Hn@Vx?ul1zQzJonQFFF~cP1QIYp)7In6EekOMMvnn zn5%TmMM#Y}pxRCRqX0-=x$<8X>#koAKhqFB0AeoHLWmTINgilORQj9-cuN^tbHmwA zVs5e|U@rc1^E0Hb0S$>bnb=3{3Wzg5J_tVn1il5q=>9o~QnNUKQ}NIFv+~#M2S7I2 z+7eV`Pz7q9F{%M_ARii1z^JEo)D#eL9Dq>+B)O0S0s={<1_EDF?t$FJPd9}4tM?QK9oj|hM!a@vSKvG4yQmd!+LB>+ z%bt`$qj1r2CYYQ2Vtz9xw|VDB*PaYLk{qukOa8+F=K<>s8%rkOKu$c6Hiq7vrFRvm zUN&WO_|as9Qe9`N!bI6jVlb$Q+V1kZZ_Zv7cDA|~&1LXoaPY7~&v+>a z?KIzwZ=liaoP4rQf#TXFblAt~uLb7Xm@VxC-_yG=v2>D%*<tTh$sl-D3p$vvYZa#D`qGNSJ>xys8||iJOl3*oZf8?s3ekf( zL3Ol#8e@7U&T_Xh0;4F~C4owzdfD>v<79n;Av=gCHURI_ow?*4tVHaO_hE8$?>E}_2&xg5}`(=yOZb9Mp8JmBbQ%Is<`!f=!Nmh>VEfUF_QA7V(s(* zgTYgUTEVP%^$YdJgTCm1x6c#vl!iEv1q^kMl-eR^mXTk(2&(&|m%bz0>Oli$`m5w)+?7&`keP5oDaGB-7S%w=+xO&@y*o0dDE~py|Z0hmff3mJVyNO z5y$YZL*qFjwj}Gu=xtzo&@QP_)7u*yb4n+5FrdXA5b-ofS@*5m7K~a;=9chbP;H(j ze(KdWgAB^jP11g!2CB2}x*)k;AaV8tEW0W@`YFyPXs2REG|$!UhK|KHxBa7<>ym?69 zOF}GF8wo2lZNgE**3%~iBq%2a;D=yxT@F8%2}I|JqLbp1$WWNhUdwPlouqR8bBsWP zcY#%pBO7p_4EM56szOxV1?G6dk|8zR5DKr51j4+PpF{_=l-z5np`hZqZK?Ni*U9`) zyc)6D9~?}vypsy^n)AnADmUe|CPaF6! zf;e4Kv`nnzLa~pHZW86!&bMHXg#=_~)^pTghX!?RbztO(FHC4Y&2`kLb4QGvk z=y+-OC2AP4R$ffeUz}i#eV&14*Sw}ZJ4oYj!GHS}*dE*MjHJT@Z(byKb-fJ0CYH z!R>$43-A@bt~)WmXA;_&7L~7ly-S!cJav24UWRZnk(|A~Rn8$jt%ZH|*(QB@N%hm^ z&RK_*1OZ-496xM$ySSuwqg`D8_(9eD*O&ZBq-!4%=0`paa3HgXckTR3yoC*|=O*wM zJmbB#`zfn(GTWwO&vqcJFQ;wYo==Y(D4Y4%A;~VWvYhSi$dqdR#NkGJI38zda@f1r zGv~ZS2#8EiW~)Ykk3k|$OM7ekM!fjYU~%u3W=CYT(DUp=e|Heho+UR6LMb;5aYI#~ zJMa+?c3kYKH?v>*9&s8@rVYPwoy_hmFF#@$4{x3q%8ubNh1!Dj)U`vQpbQ4~;&!{! zt9C?=XoF8~KY(<6%@jVoc%!>#$wD^?OLwoC5s;jQ4O9p07oP+Cn5Bn|$Ofm3259gv zIbM6mng_>hhB;D}C5lD0`SlFhM;bLe!Im`NK$nJLTfCs%-X5vN^o;hiE63(p3kAt+-uM72{uaOvT{6l6vNutNTws|{@5Ug=DvH9* zT6SMfh!L$t9UyJu+=CaXb&-cEH51iYZr4C1JVbc8lu-{gua5i!rqCHo?Gu>4eD~b2 zxHmR%xoUNEPBoSP@0uEp3X8A7msHN&{^@bw(C#`ry>IuYoyTghKtVf>h z9>c`yI(9O(TWV*zl0|Z+DSLr^{^lA#C0RY0KLdNK(xvPKytwHu>sIm zrC&h|q>f-#3absa^dwN-PS9DY$Q2jM&V8t;E=IQ28ZOO_@=7K3ZO$oQMz|gfjELiYrWwzi%w=s&XvRCWy}m!M+8Le;;LJBT>1{253_+aF_|`NNnF|mz^}dJ~@rc_JW)`$so~Y;o{ohpdj>bu(XABeguuVh-%%K|YvgC~FG;BPV zn!E6WXrcF!s3EO^#;HpLzVPOmeyBkD(F=6aP4w=MLT+)|^Ql*ZPwimg+TVdZdJOtt z@{oESwY~&m8Ymp+7%J|iH!okOq?as|6ycf34etA;%OwnTqh{sAgTOnb7Q$?2zS|R{ zNbe4bDt?Rqs$RcTScf8F2P zd1>vOHk_@Ggzp@_#J0(C-^8YRDlrJM;s*=ob1azE&erp)SQaQ3;3*?>vLm6+IeXrl z;_bC1c19Z;q;$o_!6#k}Zu@sDc#hFerc=EhnG*sw@BvIaGU-M5<1Ic^1>eE?LQV^d zmp-yfl_Y;fB96~mtB3D5xK_Lqx6)Iw^oavYH}2Z6jjqEAne6Eqj^|do{HI#d`&TC> z@dE>FvV@bcsnq~$GVErd4M1`|#^c-Ftrw>u({nZ0N^}-y8820OWNTzylVckky#Z5Y z0=xdVRgym`^)=y8v1d_&cxq!93}U`#)`F%bv9TuvDC>k^&2)x>>-B zTL|H2pQuvhnJS_F>U%(kU;fjn@nBdOIQV4aioW+T;(kywXE{6Q#~e}?r{IUZstmU5 z{3yRR=#c8dsOla0|0DcJ=?IaY0$(@qG6eyJn7(i{`}}ZNl}?j^>jxtSqvIVfmdP^$G@34y{LJ8(I2P z+!Nw=`7UUx=b12d#H4e-wlYW0~-A_J}w|- zv|B1d@^zQFIP#vBvk`O?g_tP^!@SkoNHzx&^qO^$!h90=lh=168 zFa#SAHO%3cTU`6}a&h>BHo?Y*O=Hwj2^4}=>w3D0CDqS%Yhu6}Bjg9@6iL6ZSbcnC zhtze!H-F=$gq-im__0HV|K5D>Iq1CQ3===voktyot1y0we^jZ+*4xIuWK_$OHT%V# zHUSrrs7bY!s&dzeJWU{RF+~Whs%+JDY-J8Gp-tXhu;-|5hYjT>{+Yy%`1E;DwDCK0 z{@!zts|Z+lue`A_c0QUzyIg2I%2`k%xOahbted5%j#`S7}MDq>=}Fqn34 zHuG5rnGYPaoNcKTQM38PW2|@|A4~-6ACAA$JLwrd?cyuc8;v#!3lwyiezfem_~Ipj zCnLi_X|=PsJI8EOMU{&|jqSAhTS|T#9T8y0YvW`tYcOrLu~v@UcEcB+=Uy!as_k<{9=;`_`lXK2bUhHM2>vp9puyI9N0D{vK1FMtXH=#`-B1+ z(}1mx5drWufVHGKx};N3dxSTQuC( z zt=mg)cCX`Jb#cPA!%hyQm8V$#l}k_P)j&*dH)ZyUTp4LSdbRu_m_e@!x$0G9`DM?o z{K>*H(Bd)`N%ifp>~JwVMRx6-`usfa(b|BzstFek?cUi47%Yy~CDZexRRK@2heCz( zU~}`kqJ_ZZ4Bh;FCW+)EPzXjnvaY`iiYDPet^HRT_dq{n1-)YoC|@jsepHTGO|l}) ze3vfcmGq2#fu#i^VuhSo$MLmEDG+~qhPytLGup(d4s#v|eD^)d_S8%@olxNK>5npW znO_U#VLW=OR9jiO*J>LE+8T<0OG1DauKBR?N)XYKRiW^?dNQfTq6#D?>Y4xCR?!$YmO?~hye^kKn#PwJeR65Dm){7XcMh8>^eos zaEa4vWciY}DsCB{f_6bTZpG)#gcg@}YB4Ixa@890kDcO+XPy{OO{H@#kQ)w%9roaPzib!RJqifHR_YV|WUIYD(Q-)? zbjm$11}*&VS^%4_DW|7kw=w8z5D6QU%RvP`b+L~>+F$nVWJMxfEEO$);t|1kKFhp4 z%FpSXSkOOquUODsNiY1h&Iwi-h-WrYAlWo~4;g(hp(DpJye7JJ_^`8K3=cu^^KOdu z`k^0K4KTk5)c`vTuvX#?JgQIA(w8*MQ4c(*^#XXk3B8Bhv}*4sDA{W-xWwlaAf zo8C?}k!_2+eZu$ z|LB&-PWcG6K3IjYWtV)|040dFxB+O(OzQ>+Kiv$Nx^8tK!cO%+45`8u6{lVH$jT4r zZx@@c_*w5FrnaxKqHle%Oz$CXIbw@TeC3@d$mLyb&NdNdyDgKg{UF(S=XUF0&F>sH zp((6PC{erGBDea!x0OM$ktC9?S|6n9f7;DUcEB)Aa8lE8H;)WwiZ8USk&kk;kqBM9 zQJcIFl`7s}HCI3qZXc!V94He^stQ8RfB7c2+n-G81P5Ilr^tPiGh$e;c*NaqZ};XA zANNRY|225H7Tf7^mHJ?f zQqmTTdSqTQKb4*R@N7}(NSN5u%yFD=E%4sr@7{j1pDPp3*}|k0ey3F zj8%t;=@wwwjHO3Y3)Vm+pjS2T!6@iM4NN*doN0Wz2r2Nihn}ucm)Ic_aqNhsUN2Mf#fk}Pe9wAGKRCNh=(A*! zrk6+Hrvi$h-vY?s4RbdS{Td*)`5vkQTM8{mz@UP7($VH;>b?kF!Zh~N49K|L9T_5C z;oj`C=#d~Yf0^a*@NoDzq;#xnsy@GUn^GL{w5DNw>V_zf_{!ZPU^(SPj$ST%33go@ zso~v&i$7cH$r5*JoYp20Q@)i?Xtj5}oki$nWlWE~8D=Tieg2|_v(!M4d6s(_`Y2YY zGtTW*N}aUlE~1l6itLuvJ>J7TRpWMYWvnB7U0R2OlZ?b*6|Qj>BrM{70A zYADjk1GFQa@-cQO02?+`-$>8M4gy_|sx*`LJ){rY+g0^OmUzFM*R9^QsqsGYkg1#q z6XvWs7$7jf5;wB68VmyRUCGB=!rsfekOjqOsgiQ z4S&^7+hCl8xwbCa9QAp<1>hSADh2@GNJ-Db#)a{~B5ydqT5;y4tt2 z-*6jGVE7J_$j28>8el&Gra`H;#0o$lNVgf(mgvOR0+k-oOj?dyw%hCnPS`^+T@vY= zx`Hg*t_X4mM@LKt!AUI=r1nouK(J}$QWgRvwTSQ{On>5&`hulEQQf->2#SsLUZKD} zw{2`~O|HTzqs`KnKg0<0+8=0V-#BvE*vRfq1DS=kByw@erFL-UIHTnwglz6+SguT} zi@&wIl3Azk$IOR?zPMZD+UZ+ z^lLBBE?l6$HYUnUKh{?1vMo5!;&M3Yuo6Ed*uH@xs5fm}X^Y`*zaG8{nrV`;I=jAt zv`*`0-_sxXM&F)e@eYWAb3@z)-(SQUCvM&!P4>MM#M3TP0QdEP(;4oZo6mu{w`PvW z#&3N2rMH;tQllN7uB%pTxD+mVG)}RwfP=BZJ@JJY@J3W_4OSFG0d)0@r`NSMgshf&rN5#KP z%IALEAd{o=vat5x_vSY&Cqr^JulH-&+6*ET&V_pUXJ^$MdNzJ|*>y~B_-*jLuJ_SS z)FNqK{fL~^dj3pQB#M7y<2Gye;-TlS>npI2`UOdD>{VPl%)Iq_mMs+AUMV{@6h?JD z?6gNumh+h>b_L0L*@wM46(-JgIp=7;Lq>#)I6D835)|j3%!Aq>*f8s^F)K($k%4Xk z4!nDDd>qs5N%{qq$r=7BGfKly|7N@Q(Z>GL@BD*wxuh z_gk66Ox^xMj=oDJ$sXtpSub$@I1MaqTDF*uODDPCuiNJKr?GmAsj}n@lGJZQpKfk! zh_{C9dLLiO+nn34bzkEoDo5!`@AFV#w{1~2nuqSW-R~$0Fn5M?vd-T1*z1?$3=4?r z)l1r)MdXAh^#00pCj618TeKP`;C-aXs~}!AOlfq$tFKYOo&(l46(6Ys(0t%fD}{qo zXsE<_fXhYYxLakwtm~ld&wXtas-y`O6|IVoc(~T}rux3;c)Qcm3fRx+Gl{dF&n{sQ z2NneH4eQn!RnXsmsN-s|qGYs}dS@4y*XSQ9)`ZyxMhjSPS~ul(ABya--VzokBX>l( ze#=_xV)oWH${(nye(8&Jb85{WQ~D(Cu6bzC7~d1j9GZ|j9`Sg>aB<$)?TBHvNo61p z6mZo6g4Jrhr8rNnDNA?sSQ#e#RL+humc}TfLJ=Ls+ecz37s2kTAydr$L$ws`{cK_(<<7mc_PdloO)x9;V^=j2{ z23zguZOM+J^viI_{W<_tXPmdMalHb9xcm;4G9!JDV{K_8oyrll8bL%bsQ3LtREEOy zZ2KN5TafiALw!}-s>F`QwiWw0sqsn7WLvi7H^+Y+zl=U!ozLz?887a>q<#Z)_CV1S z5nt~aV&5T|o+*pq7k>O&QvFwn_xjcp{jCn z0I%aU@Q#dp!bDef0K5y9$AQ5Nz41EV!$gs(fO|8{Bi_#XOO*r7c)B|6;2cuF@J*2W z?Spe9kzPz#y8VQ^|Uwf##lr6)$_`>>Tj%BS7) zHWxN*Hlf-5oXK84ad{g9fz6+@+kPX7JHfj{T?Oj4dYjgGU12x~wWuE_2R1Z72F{&8 zec|FKfZ+?LnlfAFpBN_r?P*Ily(7U&%yu&=@UH{BX zhVcF%OsO?IdavsxB`m~sK5)!ZT}Hd@p1Js*JV9281L8nxdR=O!+Z}A--BZ_}XLF(t^j9u!Y`7b_nb@sg z$ho!}`?{i9%LGS19|9jm6Lbk=x|Yvq67-dFhjHwifPU zzn1W-k9T?PCRBE^hO;l0aNHASoYm63d~(dk2ned)(fJxzG-M3Ds8B7Jon>w#Q@arh<6(UFc?Cqx4E1=iJ4g}dA zQ&%JzL$RmvNieeS*eW_Eem(p>wGBEww)UN@Dz{B5c;iYRSd<&}W@_>V#PgWmw*rM8 z-qRD|KK7vLF+WG4AUWfrcj=qT+qPlKdwXsf7sId7BVz)>2KOgs&u&ht;NaAGs-QR$FkH2Yt#rtmt?9TkzhzuzZSad;5;>p03pJ}!1EUc7 zRKV!-4d~dE{tjw;0M;hTU#0}Ftfzd#pH#lAZEL;OoI&9zx@T7NWy`ktCj4+0I`bYv z$)D|NB#Hy#F|^l~&H=*9{5ab>^$2BHP$|eCjGVgyRNe?%Q2#Z70HqNZL}XG@iPRb` zZMf^~PLaoztw7$5jbB2Oo04QQXI{49cd6g{%f}SRgW|Bn4`tMPjsaesm_j>C~wXj)YnG7AQ^4%q+c~udbr9 ztA|RV4{1m+}X<;=iU3Q#_@Jg!{vf?X1ve6^1aU(=VVm{ItRsci)R z?IX8@RWZpGu^V6sF-|?{e<=+iN0%;RgeCa^6?Di=GWg#U&wo=|7vcwzV7M zBC<7HJ#@H^rzv7KxBV7E60Xc4RKqum{q)_wc`%?ps*}@zqo0o?X~E$NC`q3u>GsAD z1jyK*SGsg^s1lsxd2sx74z<>GFd4P=peK2u$yV-gy531O9Q@7!Aff63lx_h%Uf*)Q z1>vs+;a}mKT`5kzHd4|XWh-k}8IhLuVC8Timz!9+8}B$m zyyG1tMTDhUf$eG;Az#)3y^-~x- zpd6rGK8|k}>c|4eiYW1`=?+I%x{7i8lYuUgH-4U-9@dXqEFVGHc<%n7%`P7f z@`M!7T^Q+Rl5oR?7{(`dBDM8+UA61J@@sFvtJJJrd?K#0wc_^wINfo2$z4WZppZ1Jn636X;%pN1!{DIf87lMS&3>GstbY7EzQHqM%nw zD=2K(v|rS`;c5a;3|LRAHvNbKT8r+fR}uix{} z^WXF0uk+GpPiwEW_S$Q&^)_HgOUo1st-T#=mLr(o~~xuVMTXMc)sy`|-lhfTe- zQpWVR*1ej8Ux=*f17zZ5tCqwur9?wArXtpx2T<6fmBkoT<;mxcgzQb~%v)tW7JSOR z1n$onYnb1P({){9eq(*Is(k85Ub~{0B7ePE)`*{Gj7v&&TDIq8PQcCNHzO0fBpf@E z1p|!J;2!AMSYBKbAh&urvAyl4-He^(;>=IS*j$5(d~1$ekKeHFN9TcvIQjEn&8&5g z6?#Zu&yx>s%97agc!hn2N$bPmr#9nU2`_lFS<(xR|B(sXiCM!YHL8P4+Qsg=NSBq? z-wgL_fQQ2LAsJo=M=jtIRUTW1>0ZZ%0}GmSoN>1Fs(L$qruKj6L> z0{|w}IrPS~O=PRi6x9n#1B+NxF0aJH+gCtxry5=4!zY{5C0ZZ3@|^aN^IMS|9HcX< z?^^f8Mls;U`kJTuURQg%pQ;GXnDlz~-XC00_!Cro=2cMf6HSktmJX9Vl?VFGhPv%8 z_fHjjYPd>}H)^=0I7$%)qg~|cqQD07-lwsbWJD@jm>CT5Uy_xH6bw&?J78VS_ ztHF(bDOa_VLP8r^srWN>Tgj#c zO1>$ZQ!47ei{458;;QSAJ?l!?)%y5T3BTjbOu;6H%F&X-k@{ys_bs0g!oJ6718_>} zWGiR3=EHR>_7fO&7ajkR#r*3j%tn(%j4Y=54+5McJ zji%Vmeg!9l$@1-zWS@$F8Jpawaj!?|XGZ%I8$M8ERT>}GS%B&@EgdU7knPJaxBcuG zmyplnl@-zss^0v54L6g|t=OlL&+<%$k@*xrxDxXdx_7x5GXD4)#0k0aE??hE*Pipc zI`n?ym!U5Q3;^uOlay?4!{589xy@#1LXhmK+jx}a^u4B0S8_t51i4r_ zk=7}GsXn5^&oAaFU&(rk%N)-^l)2IB-7}MIZ8r&KPGVb6hJtrE4*^mj7g( z3fj1@=8FT#@DN-j3;jsrZZaFzxo#or#VQMO6nkmj&$v`E*c14+xWF~Yj4%Go!b4`6 zh7f?|mv3&5M!$P3Qz2*GD}H&xLxSDU4+)?EL|=r7qy!``ViPFM1M!d-i+#%0m6QYU zHES>v_-JA`)2v4`4>2)GsFB49g4ypp_dOPScb7xPXsBRvU1k|a^_|Uc+F!R<=;Vi< z7uS=ht79*OsJN6z+uO+5EYa_tveN^#v{5oR*sa6eQ`eJkSK7J4(ML94_WFDq$?~pG zJ)t#`KjL(7FN85jHT>wTO^v-!MUho(s1Bm-tG~~X>1eF?yC=u4H?|+IuIG5^yxKaP z1p$A%5Gf@jG;_%*trm^!DI$uo7drYx@3i4d%+BK7&(GHbWKi}A*Cf3PGK>6U89NKJ zbH^AA7cR;J& zm=q<4!aGX7+7@Y>^vDGokgAa1FXkaz-qNecKUI2nlgn2vgSR^2N4M@!eREDtPnv%n!M(=T?yb2T1b{rjDHx(EUgR;V_AQVOb`c(EI4d|~ZTj&kFDNa9!ixX?cN`scF2Y?s z4YpNJuE!Er)lzm52Jc0Y?R^CtYmOw2f)m2gbf3nAhZZdu*z{V?0+tsk0#1#+dMT|x zJ@&4hfnDbg&~v!>qWN_D@!%XV9=Q+;fyb`pM*V>daqgl=bcCs8hCpji1-Fm9 zb4W7;`)@a|d+SkunhrW(Ek^TIU#TZLvZSjYIyV6UeSakX=#*3FGhYq&*6XnYnMfHE zTEKPOLWV`c5|D_pyZx?WE2IaRsNV%4BSf2yOtUq_EKYcQ)si4s zZmv9+Nw!}dW~eSLLx3HMgOCeHMNznYK=`?&f#)f)QFHrw>&r|i~* zH>;8$v`ShY@gV?@L8`^DgBuv?2^VsEptZ@7T*oZl;nt8KGiimWP`x!))yjhFTqC}R z*+C#rYWK?^!-n0Vo~F!3v{a|A6CmOcUKWs(KL9jYrhO42R@uXkCqpnPQ#h{|je%+o zl~!dw@uijM<;+~ZSA zm>z8O@aYmVjq=)^;9{fp1^j~IbdZ++{E8fo5s$Yg2r;IUXaAayE% zn)b|0QliakqIlk{&eT5&U9o6uO~v_6h?t)kTLsS$5lo<|z>r*wu3;;;UEb7;s=|kx z79M+^mmZ|VJ2d9Gi^ScY*Kw#tdi2#%CLe%Edn7MMcG*ddvV<&{D|qV!W(a^|BJ)g? z=%dgS;jsqD4Kcso@eU3~;M$)K5a%&Zgg`egasziqxA}=cd8sC0!|aV2k0s)}Upl9~ zPcl~4nRS1uSbydwU+ZTcOK-^d!V8b3sY}v^3iz*~R!w$@yRJoLBsjgQ@1^gUwD06Qai!?#>o0@U zZQV&;F>w@ImF9eZpW!cpzTTLv=g$}G1DY2m8i|$?tSu`6xkLtd*XdCpnJjU4gkVmi zrQ(0la`Pq3o%%z5$gNsQ(#YLEw6Cr!JjPreRlsOrxWn?GS6_yo5bnpT;T>sQE8eQFL3aD!aX0JjbGY zyTu*P8t2`uaWl!`Uq#}}6z!BL?fPJ)MT8Gz^$_E3N5!UNz(GU#I3jBx!#If0hIFK= zj>fkJE|9nLwfEH*XEfE}16og@<;Ma<;{Nfd!JTA#0}hnes+W_tou>_X zHw66(z%u=K1dK<*CYEjbTB2;X)!`y-M3yE08j)*(8zA9GBJipvz-RS7=i4b|n3pMN z>j*x+pyVulJ#8_2;ntLZ1C*X%&jY?GHkFVIBm}6kyv_Ml4+CjQPztv6rGgO%OD%+@ zno2?py<7o0u0cSZlK|^)-k}6YVV(+8-Qnm}hqvnHAD(&pGy1C7;*K!H-AS>y(!5kk zpR_+$H2+nK1=%XMmC2mH1;qp{k@cU(68>|S|b&A>(Sd|#^%-uH3{urQG?lg zT7C5QQ@*ErhO5+B7-4vMwgfUlx5&>7`{u1R4lW~qsi^19x*FgaGvWQsEYEXHuhL$7Y}`qh++N$a z@$S2c8n*20m(Nu#V@w?Idd3fF4Dmp>IIVUXOpJiq2D+l>R6$j!4yw?2Q&giKGw8vI zR<61W+lBxERjMh6incWOF0uGj3kK9HF5k0poqhYg0jAAyJj2iUHt40Ak2vcc>Pi}5 z6|An^ZP=GuUFULHFZb+T&ZoL=uFS;>`F`+`)Qu(pgqS;k8*vw<^B|g-Iy0uF`Rw`e zX!mH*&gY$eourfcrr839U-Q8q!M^=WYe8t?lUtf2r*e1ZXOlXwr{xcNIqKaDv8~&R z5ggyZFXPaZyqKQc(ZrH^w@$m{m{u=Y=Z)X7Z$d>Snal;7TPQkC2ZBSr;@sB3(dq(b zF4^=2LP&hkX#uyk*K?n}!YRtTY&C^!BIXDBaOIxi?nXeF%WMaQN)XpL_|k_Z z7@Xd5xkzu_ISjoA-*FzGQ}I;1JlbniQAbyhJ2)<_mpt3T5ootQiRneK^manY+J^5+ z?3Y~D`ERPZQCyOCS%wSV7QvHCe0wP!@Oj*#83E989YD?Mp@laL*{_?UK9ZsvNvm~` zZ`x3X(9AaOH}OOr42xFHxlu`p?&Zse?8P0Q#H9)1S&Q8p7QqUl27yw`NYL#FJZlu_ z3(8@5>lp@&dkel?u=SqH_HoEy($yp=$y}Oh)V|4k`5^D5oQ3h#TKRU@<}#f9*)Oe6 znk-MdtL3yztJ{riFLF4!JN(+{6>&)!(?W9EPH9Ir0SQgGgujnWDMR<8pc^2cnSC-} zRzjGiP><2VhoZd^Ar^L250vhpg9a}gA z6>On2+|jcfw{aP`^IRRQ8%x-3*P5CxklfNMZW#5_5um?LL(krbLg^&<)+QQHP`IMF zgLfDm2GF;4)U+Z1mF0t*ePbjMpIWJ><;IpjO@=>N!@{_&>lTfU9_#3<@n!frGn<mZ!$Hz7^1y0Z2x}uw% z&EW03z-=FJ!!nbOJ3+9$YguKdx}L|sU(B;h_As|kc)c0mFf+t>oKUB)!)O)k&5^3o z0ClEvGFi}9c2#!?Lc~G{mO4dcw#y7M>JU&s(=v0cs6?A7Np~x7xXNjc5MWUS3=k^1 z#fJoO7il^Axuv@43w1LT2mp>n!oZycZo;g)2^wh*QvkB!Rty*YkosC`VtOF+RlhPu z8W0-n`F%h}@!uA7Y_)*I0*2y}MQY~|`6Xuy3~^j-8wvy%e9(eNIfiQfEoA6GWE%}R z5+d6~F$RZf9GZk%iQ9X4(pjdSc?Xeg(pawg-4jW1r=S0O08FKwv0nl<=fcSxa@tV| zDrAi++#a1z9Q=q|wU#L(f6G}Si4OPKr4Be$+WUTDPC3uQ&Q3F(q{A>q0*Zs)`e{JS zQMdsuA}$SE<9j-E93f)hb28vF3;bRq44P(3wmV*?C4lYDkd`CtzzpZm=I67fW`Dug zaN}4Bm_du4V)ZNr@H$B@8!5~QT(bdMtnXV6^nkxG zp94HCzwK{+!2Xv$1>wGV$rh>e31-DE9yWk9C}LZ@B?2wibk~hTmTxyWU?WZ4vOa z!6j(H6|BWWg$iHEwGn`*_C+SguM$w(1luP}R6*$6QW@Bd!EH%Ye7TRQc29RTp${03X^plYD^pP&GHy@c^yd z0l$yCZ;D3_h_i!zXtUr+y*pIfejx)Bu;CiU!8b7V3mZT`(S(5X8XQQftfrc{I`?aU z>K1Wbp3dr-FhqA!#5it0vSHJ16AV!x0tP8D#&8Ou&{>vT^*jF5up2iPKT7~?BI-Ne zHHBD^C)!EDco=dwgpY2u#&t{sS1HX_ReHkb$Uw|jc;gWE`~iwauuU04o1TMJ6liJU ziN|HI&~nG()JYE-TGEszaEPy$Dd4A_(t-x%9ToP_z zCIWWVYr`tiW6)76flhx3^9aIZmEieXiH=<*w+bk$d^Zq50O3!EH-RigUhYDuEp<>M zaWAQmwTl6POH!>vN)yIwN50K!@}cuoJ78D&`6fPx6@}>np=>{yA$HXa81!Sxe0R|O z7ml*=z^H61w8qa=fJMX>_M>|8265VXy=of{jo4N8Zq|rhMdW}M1yvOYlZGvx0BPmc z76K!wMFSVc&-h^DXTTu`@@2%J@nQv7_`)p->#156ioQwYt<8DH1EFXBu<+RZkFC)c zAP27$_*{gYzBvpnh}2L!T>11y5DnF;U4UIRbqI(X8hLRWe0Bk0B+~t9*i}biJL(t{ zQ7oYYwIzt^_TZ4vl9gNvu<(W3(y2fg077fO7WY@-JV@U;rKmIXMJT}c48wegU9}9B z%|pYklE;8cE30L9;J**B0!LXh@rJ^07~%^EpAak29M%-(2qO$EOG2&@hV6^glypYy zsskWO5eJ`I(}71Wr~r$IiXjZ9fHbsdh~Pt;BODa8ei+OT zU_wS1?l%(x(SI}HZzlY$34d$C-^cO)pb2T_C!QW~wsgRm>YuITfJo~Fa54#N@8zRI zXHpLs+EIq5 z?Die*FW)yjp_@Rz8txR-9U$J?(Y=v&kk55aTMzwMyaaU@F0qwR?^eH@ETiyxfPtnP z7QY$==wpxyNVTb+o&TRdiH|0kY@0vmyNK8kpZxi0!DjDGY%&`HI zxFnrr``&zPg*RB+um4mnBeF|0zc%S01>t4jYNt%|T26f@D{aLPdXTqy|Jk$1Vp|YA zw_&VI25mxk>}MsQ^QjMF$ioLxis|=TMlI;=BC!C$&A))d%|AO$5Y1F?*!&&p2SQ;} z)!5w1#3&GxCK?ZUjT+||)8}~n!Zw4fif?)78eOw>XuvL8245$W(_2>Snp@Q()ayK{ zfS~i#p_9ewU5G{v-NH?BG^FXY3Azh1IZ`K@9NX@zh`p}wJFd(pX&4e_O1rH_V1lMR3z00CNjIRWc)^90NDQT% z8=-~`eAF8|)pO@0L@-rGS4`x}@9GsbBj3>K_#q#tPFF8|p(J z#&u}H#F_IQl*(O5j1&>Na1+DE39!HlU!o+v#0S7zlZ)vA0aysiq)NpSZ_s#IJ-O#6 zZh%7SP}>2z+9JIJUdh(@HVK|KW6-e#jhb&jrXg5=uRUuv_46PGG0NK|=jj?G>@vd- z8Gn(-%YlUql0ZXA-k|2I(kd~kHmM#M7SYutMQ^hVo zoQQ`oC9Sb?ZTRm8hr={DT@cDiGt0@|EFph$e3=r@C&05gEO{)1e!E<(k>g~1$<(qP zZm{U3o#y7oaD6Q-ck=8UWYNQ&ml(#$>GD!FbX^`4lj|AJgUWLF9!!4$39c%fo|BR- zf95#7Vz$Xc-;E`xu1u>+7TgTHM>^gh;XQH_N7wj=601BuPAo#8>^W})(4D{pmA^(<;>8{ zR-A(G8{>_3&8d`;LS=6gD7WC7SiD!{voAkkUT@E^xBGTF(3>^DiqfCnBCvc93$Yvl zIUbw}3t=SyuEo`iesfDNj91IOAU|@?Hy3peRFL3{FQ!&%LpHnzjC7z!Mhd8y4(qh( zG`55;PC>pB(f@EaK9wBaQrdGPlrT#!8OFF?7XZ}q;rjH7Fx@>_<2qf#Rwz1mNIcjF zySbEDig0VzP;joN=&K$OEg}%+X2M>9C957B4D)9tOf=f*_a)Wn4d)SNtoD}xkWenh zzbVtpb09@Hfc^=m{v^QfQbfUY-sy-Z(`_J{u=>$ZbR(>S#}XH;e{yv+sF`<#{G`EA zU*xrB$`9$bp^q!#P7hEEs9-xYSf){w=M4U~0TJs}Pez{e2XQ9GKFI}LSVx2e1LOHq&Fh4jB3-u%R_}#62P>WqB}gVGb+6ss)!bO6@2+HV#JvyKOUQFeu5dAKU=yNV%Xf~a*$9R zqQflq++K9Ujxbwa{%#DecdJkA8o7XdF*UMuKfS2mLIxY)&7K@h{E!9B8MTeNNaYnE z*>Vb%jIfZ!#IV{Mp+}_~cOi%=n=CRXWvV9ICm^GJli)ukFSq!Cx_TV>6te;^K(;{D zlYki9b0X{=?NnX4#ZBQkNb`iX^N3%lH=?s)-5Z~-et0Y3mxijAM;`Am%H2yX8;Eo^ zVv}gzsWO#Q@FFmK*Er;T2yZH{gW+&OzVMm|P`*>1_l^Yb-i`$WsbP50iF5Y4+6&mAds9C6 zd^P}+Yb4j=+X)fQ0b=-xDST5{#q!qiyD>oe#={Ap2YE-5hP$8!&FO}#ZFlfwx$}>@ z4ch|Z{;Zu3E15OI-q7FoBO$;j@=t#fmcZD=eqrK+>RxXs2DwXb__jtaJVr|!p1SCW z(NGAyJa4NH0Oi&^9e4zb>jHdANuApVQFKZkBdO3Ti!vZeeC4~q?ji4P`Qg0 zW52o1PAiAMck)`WV5teBS@1R1Kpno_BVPoSb&c^g69sktN!7pPg{y4ZrSBioW%?lr zdpKv&ipuD0$cQndr$^*t$`Di`;!poFh^^JIfJ8p)3rCzrqJlK1OSu&vlx*GZJVjzt ze&dme)GsXf5aHpxkQ6Ua*npWGD&Ybz<%O|Yz-Aw?K-NfzF-3!zjR4qUxUYQ zf>EI%#9jJ8IEqPbS4H--ASeZgWftd&>=%_BCn52Rc}Qu;zr*`8L$tnWl~ys_KZ=vz zfs?03Rj-V=uaE+hkB9iH=dhg_8wSAkWm9a1G<5a9bfE^;_&o6%St-$+uUNYs`j~$j z=);CiYn^UGTW&c6Kc(f5w&=v-$UWs)fw$YawB8=W)yusOc28H`Bpj0`x9{$WcjhjaRDh35{(1#9BBQW!vt|)5Z1wSFN z3iGMw{_9^q6g=RFTWTfwi*jFcvUfKGEKeD2(f{Fod=9i)!Agyae{~LP`}==>Ph}o4 zepBT)ugn(?c*OfHLBG&Yzp3)KrkMBRH&x~lW6mW0EkXaBwe!gHn^)!$<2SGT@2xnB aYqas>eFZsXt_$Gbp##VE$Np(>{r>^-liFnf diff --git a/docs/src/tutorials/DMRG.md b/docs/src/tutorials/DMRG.md deleted file mode 100644 index 9ff95b823d..0000000000 --- a/docs/src/tutorials/DMRG.md +++ /dev/null @@ -1,127 +0,0 @@ -# [DMRG Tutorial](@id dmrg_tutorial) - -The [density matrix renormalization group (DMRG)](https://tensornetwork.org/mps/algorithms/dmrg/) -is an algorithm for computing eigenstates -of Hamiltonians (or extremal eigenvectors of large, Hermitian matrices). -It computes these eigenstates in the -[matrix product state (MPS)](https://tensornetwork.org/mps/) format. - -Let's see how to set up and run a DMRG calculation using the ITensor library. -We will be interested in finding the ground state of the quantum Hamiltonian -``H`` given by: - -```math -H = \sum_{j=1}^{N-1} \mathbf{S}_{j} \cdot \mathbf{S}_{j+1} = \sum_{j=1}^{N-1} S^z_{j} S^z_{j+1} + \frac{1}{2} S^+_{j} S^-_{j+1} + \frac{1}{2} S^-_{j} S^+_{j+1} -``` - -This Hamiltonian is known as the one-dimensional Heisenberg model and we will -take the spins to be ``S=1`` spins (spin-one spins). We will consider -the case of ``N=100`` and plan to do five sweeps of DMRG (five passes over the system). - -**ITensor DMRG Code** - -Let's look at an entire, working ITensor code that will do this calculation then -discuss the main steps. If you need help running the code below, see the getting -started page on [Running ITensor and Julia Codes](@ref). - -```julia -using ITensors, ITensorMPS -let - N = 100 - sites = siteinds("S=1",N) - - os = OpSum() - for j=1:N-1 - os += "Sz",j,"Sz",j+1 - os += 1/2,"S+",j,"S-",j+1 - os += 1/2,"S-",j,"S+",j+1 - end - H = MPO(os,sites) - - psi0 = random_mps(sites;linkdims=10) - - nsweeps = 5 - maxdim = [10,20,100,100,200] - cutoff = [1E-10] - - energy,psi = dmrg(H,psi0;nsweeps,maxdim,cutoff) - - return -end -``` - - -**Steps of The Code** - -The first two lines - -```@example siteinds; continued=true -using ITensors, ITensorMPS -N = 100 -sites = siteinds("S=1",N) -``` - -tells the function `siteinds` to make an array of ITensor [Index](https://itensor.github.io/ITensors.jl/stable/IndexType.html) objects which -have the properties of ``S=1`` spins. This means their dimension will be 3 and -they will carry the `"S=1"` tag, which will enable the next part of the code to know -how to make appropriate operators for them. - -Try printing out some of these indices to verify their properties: - -```@example siteinds -@show sites[1] -``` - -The next part of the code builds the Hamiltonian: - -```julia -os = OpSum() -for j=1:N-1 - os += "Sz",j,"Sz",j+1 - os += 1/2,"S+",j,"S-",j+1 - os += 1/2,"S-",j,"S+",j+1 -end -H = MPO(os,sites) -``` - -An `OpSum` is an object which accumulates Hamiltonian terms such as `"Sz",1,"Sz",2` -so that they can be summed afterward into a matrix product operator (MPO) tensor network. -The line of code `H = MPO(os,sites)` constructs the Hamiltonian in the MPO format, with -physical indices given by the array `sites`. - -The line - -```julia -psi0 = random_mps(sites;linkdims=10) -``` - -constructs an MPS `psi0` which has the physical indices `sites` and a bond dimension of 10. -It is made by a random quantum circuit that is reshaped into an MPS, so that it will have as generic and unbiased properties as an MPS of that size can have. -This choice can help prevent the DMRG calculation from getting stuck in a local minimum. - -The lines - -```julia -nsweeps = 5 -maxdim = [10,20,100,100,200] -cutoff = [1E-10] -``` - -define the number of DMRG sweeps (five) we will instruct the code to do, as well as the -parameters that will control the speed and accuracy of the DMRG algorithm within -each sweep. The array `maxdim` limits the maximum MPS bond dimension allowed during -each sweep and `cutoff` defines the truncation error goal of each sweep (if fewer values are -specified than sweeps, the last value is used for all remaining sweeps). - -Finally the call - -```julia -energy,psi = dmrg(H,psi0;nsweeps,maxdim,cutoff) -``` - -runs the DMRG algorithm included in ITensor, using `psi0` as an -initial guess for the ground state wavefunction. The optimized MPS `psi` and -its eigenvalue `energy` are returned. - -After the `dmrg` function returns, you can take the returned MPS `psi` and do further calculations with it, such as measuring local operators or computing entanglement entropy. - diff --git a/docs/src/tutorials/MPSTimeEvolution.md b/docs/src/tutorials/MPSTimeEvolution.md deleted file mode 100644 index 58886a6c6b..0000000000 --- a/docs/src/tutorials/MPSTimeEvolution.md +++ /dev/null @@ -1,189 +0,0 @@ -# MPS Time Evolution - -An important application of [matrix product state (MPS)](https://tensornetwork.org/mps/) -tensor networks in physics is computing the time evolution of a quantum state under the dynamics -of a Hamiltonian ``H``. An accurate, efficient, and simple way to time evolve a matrix product state (MPS) is by using a Trotter decomposition of the time evolution operator ``U(t) = e^{-i H t}``. - -The technique we will use is "time evolving block decimation" (TEBD). -More simply it is just the idea of decomposing the time-evolution operator into a circuit of -quantum 'gates' (two-site unitaries) using the Trotter-Suzuki approximation and applying these gates in -a controlled way to an MPS. - -Let's see how to set up and run a TEBD calculation using ITensor. - -The Hamiltonian ``H`` we will use is the one-dimensional Heisenberg model -which is given by: - -```math -\begin{aligned} -H & = \sum_{j=1}^{N-1} \mathbf{S}_{j} \cdot \mathbf{S}_{j+1} \\ -& = \sum_{j=1}^{N-1} S^z_{j} S^z_{j+1} + \frac{1}{2} S^+_{j} S^-_{j+1} + \frac{1}{2} S^-_{j} S^+_{j+1} -\end{aligned} -``` - -**The TEBD Method** - -When the Hamiltonian, like the one above, is a sum of local terms, - -```math -H = \sum_j h_{j,j+1} -``` - -where ``h_{j,j+1}`` acts on sites j and j+1, -then a Trotter decomposition that is particularly well suited for use -with MPS techniques is - -```math -e^{-i \tau H} \approx e^{-i h_{1,2} \tau/2} e^{-i h_{2,3} \tau/2} \cdots e^{-i h_{N-1,N} \tau/2} -e^{-i h_{N-1,N} \tau/2} e^{-i h_{N-2,N-1} \tau/2} \cdots e^{-i h_{1,2} \tau/2} + O(\tau^3) -``` - -Note the factors of two in each exponential. Each factored exponential is known as a -Trotter "gate". - -We can visualize the resulting circuit that will be applied to the MPS as follows: - -![](trotter_tevol.png) - -The error in the above decomposition is of order ``\tau^3``, so that will be the error -accumulated _per time step_. Because of the time-step error, one takes ``\tau`` to be -small and then applies the above set of operators to an MPS as a single sweep, then -does a number ``(t/\tau)`` of sweeps to evolve for a total time ``t``. The total error -will therefore scale as ``\tau^2`` with this scheme, though other sources of error may -dominate for long times, or very small ``\tau``, such as truncation errors. - -Let's take a look at the code to apply these Trotter gates to an MPS to -time evolve it. Then we will break down the steps of the code in more detail. - - -**ITensor TEBD Time Evolution Code** - -Let's look at an entire, working ITensor code that will do this calculation then -discuss the main steps. (If you need help running the code below, see the getting -started page on running ITensor codes.) - -```julia -using ITensors, ITensorMPS - -let - N = 100 - cutoff = 1E-8 - tau = 0.1 - ttotal = 5.0 - - # Make an array of 'site' indices - s = siteinds("S=1/2", N; conserve_qns=true) - - # Make gates (1,2),(2,3),(3,4),... - gates = ITensor[] - for j in 1:(N - 1) - s1 = s[j] - s2 = s[j + 1] - hj = - op("Sz", s1) * op("Sz", s2) + - 1 / 2 * op("S+", s1) * op("S-", s2) + - 1 / 2 * op("S-", s1) * op("S+", s2) - Gj = exp(-im * tau / 2 * hj) - push!(gates, Gj) - end - # Include gates in reverse order too - # (N,N-1),(N-1,N-2),... - append!(gates, reverse(gates)) - - # Initialize psi to be a product state (alternating up and down) - psi = MPS(s, n -> isodd(n) ? "Up" : "Dn") - - c = div(N, 2) # center site - - # Compute and print at each time step - # then apply the gates to go to the next time - for t in 0.0:tau:ttotal - Sz = expect(psi, "Sz"; sites=c) - println("$t $Sz") - - t≈ttotal && break - - psi = apply(gates, psi; cutoff) - normalize!(psi) - end - - return -end -``` - -**Steps of The Code** - -First we setsome parameters, like the system size N and time step ``\tau`` to use. - -The line `s = siteinds("S=1/2",N;conserve_qns=true)` defines an array of -spin 1/2 tensor indices (Index objects) which will be the site or physical -indices of the MPS. - -Next we make an empty array `gates = ITensor[]` that will hold ITensors -that will be our Trotter gates. Inside the `for n=1:N-1` loop that follows -the lines - -```julia -hj = op("Sz",s1) * op("Sz",s2) + - 1/2 * op("S+",s1) * op("S-",s2) + - 1/2 * op("S-",s1) * op("S+",s2) -``` - -call the `op` function which reads the "S=1/2" tag on our site indices -(sites j and j+1) and which then knows that we want the spin 1/ -2 version of the "Sz", "S+", and "S-" operators. -The `op` function returns these operators as ITensors and we -tensor product and add them together to compute the operator ``h_{j,j+1}`` -defined as - -```math -h_{j,j+1} = S^z_j S^z_{j+1} + \frac{1}{2} S^+_j S^-_{j+1} + \frac{1}{2} S^-_j S^+_{j+1} -``` - -which we call `hj` in the code. - -To make the corresponding Trotter gate `Gj` we exponentiate `hj` times -a factor ``-i \tau/2`` and then append or push this onto the end of the -gate array `gates`. - -```julia -Gj = exp(-im * tau/2 * hj) -push!(gates,Gj) -``` - -Having made the gates for bonds (1,2),(2,3),(3,4), etc. we still need -to append the gates in reverse order to complete the correct Trotter -formula. Here we can conveniently do that by just calling the Julia -`append!` function and supply a reversed version of the array of -gates we have made so far. This can -be done in a single line of code `append!(gates,reverse(gates))`. - -The line of code `psi = MPS(s, n -> isodd(n) ? "Up" : "Dn")` -initializes our MPS `psi` as a product state of alternating -up and down spins. - -To carry out the time evolution we loop over -the range of times from 0.0 to `ttotal` in steps of `tau`, -using the Julia range notation `0.0:tau:ttotal` to easily -set up this loop as `for t in 0.0:tau:ttotal`. - -Inside the loop, we use the `expect` function to measure -the expected value of the `"Sz"` operator on the center -site. - -To evolve the MPS to the next time, we call the function - -```julia -psi = apply(gates, psi; cutoff) -``` - -which applies the array of ITensors called `gates` to our current -MPS `psi`, truncating the MPS at each step using the truncation -error threshold supplied as the variable `cutoff`. - -The `apply` function is smart enough to determine which site indices -each gate has, and then figure out where to apply it to our -MPS. It automatically handles truncating the MPS and can -even handle non-nearest-neighbor gates, though that -feature is not used in this example. - diff --git a/docs/src/tutorials/QN_DMRG.md b/docs/src/tutorials/QN_DMRG.md deleted file mode 100644 index 3d3f593372..0000000000 --- a/docs/src/tutorials/QN_DMRG.md +++ /dev/null @@ -1,182 +0,0 @@ -# Quantum Number Conserving DMRG - -An important technique in DMRG calculations of quantum Hamiltonians -is the conservation of _quantum numbers_. Examples of these are the -total number of particles of a model of fermions, or the total of all -``S^z`` components of a system of spins. Not only can conserving quantum -numbers make DMRG calculations run more quickly and use less memory, but -it can be important for simulating physical systems with conservation -laws and for obtaining ground states in different symmetry sectors. -Note that ITensor currently only supports Abelian quantum numbers. - -#### Necessary Changes - -Setting up a quantum-number conserving DMRG calculation in ITensor requires -only very small changes to a DMRG code. The main changes are: - -1. using tensor indices (`Index` objects) which carry quantum number (QN) information to build your Hamiltonian and initial state -2. initializing your MPS to have well-defined total quantum numbers - -Importantly, _the total QN of your state throughout the calculation will -remain the same as the initial state passed to DMRG_. -The total QN of your state is not set separately, but determined -implicitly from the initial QN of the state when it is first constructed. - -Of course, your Hamiltonian should conserve all of the QN's that you would -like to use. If it doesn't, you will get an error when you try to construct -it out of the QN-enabled tensor indices. - -#### Making the Changes - -Let's see how to make these two changes to the -[DMRG Tutorial](@ref dmrg_tutorial) code from the previous section. -At the end, we will put together these changes for a complete, working code. - -**Change 1: QN Site Indices** - -To make change (1), we will change the line - -```julia -sites = siteinds("S=1",N) -``` - -by setting the `conserve_qns` keyword argument to `true`: - -```julia -sites = siteinds("S=1",N; conserve_qns=true) -``` - -Setting `conserve_qns=true` tells the `siteinds` function to conserve -every possible quantum number associated to the site -type (which is `"S=1"` in this example). For ``S=1`` spins, this will turn on -total-``S^z`` conservation. -(For other site types that conserve multiple QNs, there are specific keyword -arguments available to track just a subset of conservable QNs.) -We can check this by printing out some of the site indices, and seeing that the -subspaces of each `Index` are labeled by QN values: - -```julia -@show sites[1] -@show sites[2] -``` - -Sample output: - -``` - sites[1] = (dim=3|id=794|"S=1,Site,n=1") - 1: QN("Sz",2) => 1 - 2: QN("Sz",0) => 1 - 3: QN("Sz",-2) => 1 - sites[2] = (dim=3|id=806|"S=1,Site,n=2") - 1: QN("Sz",2) => 1 - 2: QN("Sz",0) => 1 - 3: QN("Sz",-2) => 1 -``` - -In the sample output above, note that in addition to the dimension of these indices being 3, each of the three settings of the Index have a unique QN associated to them. The number after the QN on each line is the dimension of that subspace, which is 1 for each subspace of the Index objects above. Note also that `"Sz"` quantum numbers in ITensor are measured in units of ``1/2``, so `QN("Sz",2)` corresponds to ``S^z=1`` in conventional physics units. - -**Change 2: Initial State** - -To make change (2), instead of constructing the initial MPS `psi0` to be an arbitrary, random MPS, we will make it a specific state with a well-defined total ``S^z``. -So we will replace the line - -```julia -psi0 = random_mps(sites;linkdims=10) -``` - -by the lines - -```julia -state = [isodd(n) ? "Up" : "Dn" for n=1:N] -psi0 = MPS(sites,state) -``` - -The first line of the new code above makes an array of strings which -alternate between `"Up"` and `"Dn"` on odd and even numbered sites. -These names `"Up"` and `"Dn"` are special values associated to the `"S=1"` -site type which indicate up and down spin values. The second line takes -the array of site Index objects `sites` and the array of strings `state` -and returns an MPS which is a product state (classical, unentangled state) -with each site's state given by the strings in the `state` array. -In this example, `psi0` will be a Neel state with alternating up and down -spins, so it will have a total ``S^z`` of zero. We could check this by -computing the quantum-number flux of `psi0` - -```julia -@show flux(psi0) -# Output: flux(psi0) = QN("Sz",0) -``` - -!!! info "Setting Other Total QN Values" - - The above example shows the case of setting a total "Sz" quantum - number of zero, since the initial state alternates between "Up" - and "Dn" on every site with an even number of sites. - - To obtain other total QN values, just set the initial state to - be one which has the total QN you want. To be concrete - let's take the example of a system with `N=10` sites of - ``S=1`` spins. - - For example if you want a total "Sz" of +20 (= `QN("Sz",20)`) in ITensor units, - or ``S^z=10`` in physical units, for a system with 10 sites, - use the initial state: - ```julia - state = ["Up" for n=1:N] - psi0 = MPS(sites,state) - ``` - Or to initialize this 10-site system to have a total "Sz" of +16 - in ITensor units (``S^z=8`` in physical units): - ```julia - state = ["Dn","Up","Up","Up","Up","Up","Up","Up","Up","Up"] - psi0 = MPS(sites,state) - ``` - would work (as would any `state` with one "Dn" and nine "Up"'s - in any order). - Or you could initialize to a total "Sz" of +18 - in ITensor units (``S^z=9`` in physical units) as - ```julia - state = ["Z0","Up","Up","Up","Up","Up","Up","Up","Up","Up"] - psi0 = MPS(sites,state) - ``` - where "Z0" refers to the ``S^z=0`` state of a spin-one spin. - - Finally, the same kind of logic as above applies to other - physical site types, whether "S=1/2", "Electron", - etc. - -#### Putting it All Together - -Let's take the [DMRG Tutorial](@ref dmrg_tutorial) code -from the previous section and make the changes discussed above, -to turn it into a code which conserves the total ``S^z`` quantum -number throughout the DMRG calculation. The resulting code is: - -```julia -using ITensors, ITensorMPS -let - N = 100 - sites = siteinds("S=1",N;conserve_qns=true) - - os = OpSum() - for j=1:N-1 - os += "Sz",j,"Sz",j+1 - os += 1/2,"S+",j,"S-",j+1 - os += 1/2,"S-",j,"S+",j+1 - end - H = MPO(os,sites) - - state = [isodd(n) ? "Up" : "Dn" for n=1:N] - psi0 = MPS(sites,state) - @show flux(psi0) - - nsweeps = 5 - maxdim = [10,20,100,100,200] - cutoff = [1E-10] - - energy, psi = dmrg(H,psi0; nsweeps, maxdim, cutoff) - - return -end -``` - diff --git a/docs/src/tutorials/tebd.jl b/docs/src/tutorials/tebd.jl deleted file mode 100644 index 7dfb452b7d..0000000000 --- a/docs/src/tutorials/tebd.jl +++ /dev/null @@ -1,46 +0,0 @@ -using ITensors, ITensorMPS - -let - N = 100 - cutoff = 1E-8 - tau = 0.1 - ttotal = 5.0 - - # Make an array of 'site' indices - s = siteinds("S=1/2", N; conserve_qns=true) - - # Make gates (1,2),(2,3),(3,4),... - gates = ITensor[] - for j in 1:(N - 1) - s1 = s[j] - s2 = s[j + 1] - hj = - op("Sz", s1) * op("Sz", s2) + - 1 / 2 * op("S+", s1) * op("S-", s2) + - 1 / 2 * op("S-", s1) * op("S+", s2) - Gj = exp(-1.0im * tau / 2 * hj) - push!(gates, Gj) - end - # Include gates in reverse order too - # (N,N-1),(N-1,N-2),... - append!(gates, reverse(gates)) - - # Initialize psi to be a product state (alternating up and down) - psi = MPS(s, n -> isodd(n) ? "Up" : "Dn") - - c = div(N, 2) # center site - - # Compute and print at each time step - # then apply the gates to go to the next time - for t in 0.0:tau:ttotal - Sz = expect(psi, "Sz"; sites=c) - println("$t $Sz") - - t ≈ ttotal && break - - psi = apply(gates, psi; cutoff) - normalize!(psi) - end - - return nothing -end diff --git a/docs/src/tutorials/trotter_tevol.png b/docs/src/tutorials/trotter_tevol.png deleted file mode 100644 index bcf10ce93a3866da690e015a5d57207703c6b3e0..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 127752 zcmeFaby$>J+c!LngOov|QsW>3QVP;FN*jQLAT24zc%HrQ_x--_A3Y9Xfpe{Et#h4y{?28Pnu_9n@+0IB2xPyqlAJmO zLJ|pqz%ZnH!EaFAei#UZywgfnR!vz}7Ov)GZ((I)4uL2I#l(^5X!KC~x0FpBJ&J@7 zaXl27;Pt=PDjQ`e&wdPji`wL@+AHo5?!ps+f&RreLJ;Ix=c2UaSAAms<(A;HCb z^fu3?iCM0?53LSv4EE2V7KiE+#hjbnj}PfX>XLaLp4@X80@sM-4W3_pR9(%{>hKxnJ1Q$4sw^s zBz^3aMwI9=)>|)$naUwoN0|%rJb4bh% zD=2zyt(x|Y2h^npjd(s7k8P(YE>^G$$_nJ&p%PG%yo9VuQ)r&E^ST@vN?cEI*7AN! z{6dOJ@-evAogS09TZg~ALftTP7Nm!@IFJuTaD_xSg+92N)h*(5NB#;`?i+Qv!O+7N zK^9#xX^M^0l6)HUG7$`P67R(}?-L91s6_00u#AoEk-tlNWm_Lz(KTJ{n_FfXypwAr ztsyLuz(5D@ldu-{%kn?PsKLb181{Y{o3tDhyGD^$W@b$$_|9y*f1h8xK_U%TLPD5+ zb5GILT)_*b#CMubl0vTf@1^$Vb!hm)ms0w;QsVr5ZcO?@uGPM@hQiZN{cXh}EUay< z_f(oSsTkcXpwf6lnvvhqn;UuLQHA1Unn!1>J~{F{5#;)+rR`pCNb2aG*8cj~K&Pj+ z(POZa{-m79!_W5ErXIZDPkc9y&4D<%k^y!@jglDi)}(T;WF(oK@nIdZPBc^vmXBsm zg-#O-4Z}sD;b?jX@nR}jDhrGhSHnKuo?B0oU4Cd%Z zk4rB-d=m?rtL9P0wIjXIncxFR#m7BgpM3rF zwdpJQ(is=QVhSG$sX!KazxR=+9vpHGrMMFm^ghib_A2>F^ZU9NXue2(Ibd;UktL-1 zxk*@#N;kwtQh0v}wB&FJV_)D*<%&tbD^pP>6=utD=l8cUI)yg+M0O0bCujG~9=AKR zO2r@0^|lYQHBaV25f?)vR>x1-6o$W4Pa zcEa)ALd^Ah&0PC0UDEVnMa>jl;kU)5auU2)=1#4TNt+cmH4)7|?ru~VM$ z)2EL8tubF`+h;y9cL{cebn0byJzG^vd8XQ>`jP+BTnDRA>nYMxI#z1-4nvW~N+P3& zXHQQHZyKA~&|1I#_^kSCe9nVY?V>G(gE%xV=pXwAKx+ z9Ib*+BGx2R`=+)!_XwX8Mhj2b_Sy_~Kj_J|<+io&i<;S-?(BZkTb3_UFy1?tQ!#~f zC3Won+Bz}+>0|w+(e7bUjM?zLQO&UqRqdw%A4S^v#Z42-61zlGoVG_XU+>O1eHc5a zP0{7jC7adqg|TJ4c}T{u?qW;WSbodJNfl>Rr=5=mlMkofE<9WDn9i81$G%ubHJmh0 zJ9R+N(DRCSQ0aqGX^#?*?1kC+Gi$gF+Vv&ei8Y0d%Q)$U_QmtdnX8P;?(;^Uhp7C? zNXa=5e6_rOs^^3FmusmS!_OEl!Rl7{xk9_eSVCy!V(c^}u1-Amz}E zkTuFF8a-<6u)JVBI(K$Mwq(|!2ma>+m7iTar1!PNq4wQpxqzx0Rps(EA+h|$uXRiv zYcTESInV2y59hdg*ChG?yU*3zxzDed)_)j%K6)u%#G7M-xHej&;qd2T$Mc1%8vW|$ z)RUhSKe1C~{_Hv5;K9E(xJkY}_1gb+nH6iBZ2Q49RkJFK(-s)PXaQC7d$5TcSv)f@ z#;%l|b-s#LEAB9#2!HbU5&Y82L)84#1s2?FO&_&_f<`6kjl;cF%$sY?Pc??~YwPpr z_zHFLxNFR|OIDPoTu#+~rR|c{VNhf@HhiMyxliW9PxYA+U0<{7jRZ?O{bt^}R=G}0 zWxXsgG|Jr`xYNDgc1?H9dNH7UaVM)BS&r0ft@!9VGC22fXt83h$E?7r>#6wj^g~# zj85)|dc051U{Y1m-K4gK;e}3vy(RfG>*{Ya3pFORdUT##d6O~u-mZBz!CP|m^z^4* zz1ORY5u2=Yvx`Gr`nH+|mv6qT&+yEZvG7|}T}qj}E_6NQdIbw5QzT2MXt&n5rnbA% zVr!>Yp`p5rlPGpRj7?fmmyzeliqLamdS+rpcEyyP#3kpUoK(}K+KF05l|#wTu0Il{ z6P9f%YI@9DN^fP2vYxB2j83x6vb|WLv+Xo|Z#JW${X}{}K26_ZA4l_O(%~sew^GRj zFSlpT%Hx$qh0X;f?@GLyNs=l&-p;)%vQt=%53yrVHE=3&SrFKAYNV^>ZqjDX5;kxy z?CconZbuDFcunmeXS6L_^zO6w>X-Cx9Phl{_n}XFmZz}2k72;QKqkk?E6#bfr!3Sv zdv3g+l~rd0vY0ym@ZI}d#Rn13{e-sOT$<#RlKLv>vFtcHf;d8jz@2s!- zisrc0fMB0xy27S{oum;Cg}XgpjcdFyMCX_7Wxg<3#hUD=&m6|c?91yVg|%4JTP`3O z?5UVyqG0r1l3v`x1-y%xWIgEn$Zv9+X=7~ym&;$sKW22XBHxE%eZ?5zY;v}aI_Ie_ z5v{=RJzB^h6VeOg0eL0@f%7HKW~9R&Y#+Ffm{0L2o=c`ygj`=I3e+V+&I>`@wBWWp z)G0Sm6t^x>mT>#eOq-1uAyjhmlEUQ|kh;+UG&J@XA(eD{=HGajn3P(WV66AworXY| zi`vUJ)4ta486G~Vyn3X5I7MJ!e8)Uur^8rakN3Rl7DsahJ!|PaDR6lq!R9*378fr< zPJl5f1V(fOLJUSk;IA|hj&0=i&u%Q+r#!t7i5l=6oKu4)}Q>k{;qFaPw{E5ARgOUU-`5*uD z!Ef)p^3yv7giika&Y%A2$D@+`_`Ux`i|>T~_FF({X>v*aKTl1X{P5@tO>mG*R&uKN z?|{wlf1r-w3+MOm`0;1!6Y(Da^C2P1a%VI>h^7Y&LiNXdWafv;bAFcPlx97=^RiB5L@|O&5rSw$F&|m~>r+VV0S_D(5xDRe53XZR$%4_>k@$uzr z7%CPRDJ2}~4LA^VDzY`=;Q2>#5%=;r~}NgXV#)udnOQ zn)>>dj}=#YQPdR-uH6qOY!er#)ZpXMPm(CVS`iVO4q_Wc=1?=ce0Gv86`7?vQ8c9WfrMR#$lLxmull1Fo4aNSTk2G4DEa8R*;Q&Ntd~;l zWIj5sGGpN){1_WGK~Dln4?fOg@|p|^mf<7{zv+c$IP29OzT5g?4b8t^~PKUE2}T=DwCMgv2bPy z`u6trQ-Xptd3kw`n>g(KgakdDAS75j`&?QzxaHx@ zi&RSIjpqi-u(*+k+S!2;Qzs{Zm!B^t+^+ok^_orZYc65?K_+cOPjhS$&nao?j1*17i{*~(Fr=^pNu~cupSEjEt#4YQ>#813(9I5Fpa<)7z{iXNjE;Ees0S`>- z>IRz{4LUu=S3KXQhgw5JLnyxP5KP6;*v#yh+jKWomx230eafiPE|iH(1F1f42=8oZ5@YvR+H8#M5A}vNrw&+tv})rssMLI2e5cu`5wOFY|tjO18SicxcXdJwztN?~#Y)*SYP@>9t4vUvG^P zADOeWFMaFtc|332xjH2;W$VKS`3np(iV=!TT)qeVj$o%I0&k|sp-$#~oe`O}os%;> zoj^m7UyyJjB4Q`#(Zf;5U})H!Wcl{`l8=1@?fv3anbu=8`MJ!@FE z9!g~OJgeM?(?`BeDvpf2Nb#EF8Vb_CI7UjC>Pi;?xpa(c*duRLj*gn?yY-l#64JWo zq!oeLU!8N+wMwL1v$Wmg)#SA)m^g=nnJ{L&f~#TlJymr=OePB(8yj|^k-rwO|EGtY zWPhE)-_)o8;CQn~0-R_USXR-O2i_v`Ara5ZCvF4^Ro7~HQ$7o+e26i9>0V}l z`rx{(sAu0_cq-XvrKk9U-2uW2hqvYfSKjt4KhhtWS6Njh*Y7bB)>I*mU8D$!$QVRj zt(vHIX9P1RRB}|W9-BjPV_srLUUIXjN@C?gBOV zTYn#L@TVfBJ-HRB;c7?cyGwouc3E_y>q->s(dqP%vt$!m(IAjHS%R zm9^KhK2$?Hx>e8LE||Wce3n@m)f33NBVb}t?((i?2(8HK^V($qx{Hh}fedZp*>e`h zBK<2L2w9srIpw0-DvksdZnrday`+3rQmL`Sh&9r9q7fb*Uh_~k;9M6kVcvxNr9Xuj zL>-2aFZ2qsmhTO1U9TeM{F&4Bxmf%L?fA2rsU;a;}h(a7M+iV9{d#{PVI*?6)yhC~K;W}=vQ7hzFz2TEjs zMt~I!gXP-|9FcIF=1f_#FK5cZo*$dKOqzyxH7-K6vo_UcRRDB%Ux7WpLBZd-_b+oc z3ABBd_t`IOfc#G{CSc+%letoszF-DwAT;tY>um#<%6ODpgd z!nA0|ey3S>xWeWX)%Af}Ck~+FbrGzxLvEdm+EjBGE z?_4#gN;weseX^a4ikpqwZB~qa41*qzOO5%YKJLb{&w_xqI)3KRO=JUfJjKtqbYr$; zOs{+8O<>-g@7o+M5Tue>H=lStp5$&=y4Wo7WQv}!Mes-dB~p|+WnQiqE?$gL_3AQ` zdvS!vsNgJ|4$YQFC9|2*W~Hd0fSLdN{+~@Hy7)nGWF$g{FJ&pYBJ>ao=l8iLMyT9( z+$UaJCORshmOd)}v&nRdoXnXd++LvCSPf}Ubr_Y~-A0~(3OZA{ml=xB1k-V2zJ7fC z&!!TcdEIVe@f2s*om(rzK)jF&EefrjqoKafOFrs%f2)E`I1KH{=2oiB+WYXfupnNAF2okl`JQ6u6W0K z;7OiWn7B=N*Qvf>)9dCY2xvVSXJmB9hsqg{XX^Xt%tv@-|0nZn($~opi$2QcFY-Bj zzmk>gLyO|(h_&C+REyt7T**lhk=3<|_1!$UV;sPG@d@mqd6rUHS&5u#o8EqeoYjVR z@r%*$>KbQpMi#^{)NO5T;X?=ciWj@MUH(V|Ko3{tcT6y9`a?`iHU9vHF-C-G=8rrFh<2|{fE)dQJkhP!3Ir`0(gQL^w5Pg%2t;0W zt;SCK)RXoD`}bD^*IBpNZnvUhhXFXT)mtAF;Y8>dZta>J)7l1(t*e*d2e~2}(Jzj% zoz$SMa|e6lzWgHav0}vWJm#Cb87h8q&d3T*%n%u+&g)8`zYnweZ91-bu5OxF%_G-} z>7W^;7Z`@@QLBD&D&94REvtY**8FEn`;@6&+tH!9mgc@Ck2mkG+%R=2QFT-W_TWys zOK((Elqd@4@XriFv&_z>$LfGPFbIFsl-2G&J743Tv?l`(AKNT3_kT}|kA$_kQv7A= z{&udZv*0v~In1P?WTg6D6Hl;{nc1C|aU%zBfcp>o^H9(Hmgsu27$@6{lMd`&&TO;F{#PdX|EjZ4wmQh@KIG%RmKMh_C%s*Q zF{opKsWaeK3)l=dZ72AS_%X~DPi6hHZAz|GZ50%7jhw%&&Y5Oey{7^locowYK+-wH}LU z)XxsWPY2rpTygzzmk$c?qi7-P&MFW}tyTF6Q9Ev>!~kDG%^jV-ErO|ddM=%bnK?{0 z;6Tjoeg_xOs~Sr6p)hJnT(f&Qjm*}BY9j~pWC+d|d#W>YZ(~Hx+0-nkq6OO#RwF4v zBk|9?1^v_!((BiD7plk`Ahy1J?}HN19cA9DHCoyRpS;>C44JV7;|0<+C(z>Lo3#>D zEv$B8S4QS4W3&x>77m5{w85X<=bz+&Oy9AZF2{bTd?9Efo+@+NsN=9T$gN zIjDBrPMTz)UM){`)l%@6s`>iU?c$zB27={9rG_fQiP1?C?llGF%P%`0XR!q1W_ttM zNpx8)LC^t{7GuFi@1Y8?)Kyu;hfiOc68?uHNrMABaFkt;T1oD&DDIUAZo&<~M&+P=CdVNJS8IF^?@{ zU|t%=AWOwpGM#(>!Gq^}uc3mM?|h;f!l?t@JUPOACRGE^Nxs_&fn@<~!K&F=UZJ6( zRgL@=OphNw)(X(&*u|r&2V5E*)Q}2mqxM*zNFQ0B%8poDTZ?8yJ>$nrSu&U}C{+}>vIugQ!;AnwINPj*diWr3HJyqiXVT?{`A zU&+s?*@|POlb-V(m-ZgSIp^cV3^!4_rB6JHabmit0&Em!d_JA3A~{@C2c^GE zA{esQy1%brz^(65i}S7?eF^a2X363skeELcO+j<-a4=u~SGIlmg>&T_zB?2xyB&}M zuqC1IUeiHc={#_s0~iK_fTwI}&=qz+GoAPdtKJsKb$N|Dstyqg6U<7YH^B+7S=t1^ zt{=q*_`G`gRUMjGgFF@(HO-;Jhl7|U+;Gh!RZfDtIA}bENOY-(pd}^TAs8AR9i8L3 z_ywe9TDZz@w3E6R8IRP2jwvd*gE|x>c5{A8h;q1w-z0UBHQriZ zcAR=yP}3wb#Pt6C`+>qV>z@_UUwjWi_X3u&rn!BFbSdjw@&a4S8Zl7ft~g?vB}M9@ zmx{%9BnaCE+xspi@-KAXoM;xGdLcTV=#1%iZVAKNj%o%YrZ^*C&ld(I`32Zm z&(*MgU8yhQ<9fV$yA+7oK?0Zm=+ig^(s&I2h1 zM&zUX!8ms)>@KYulKpU>@eCp;vS&Z)yk8&#>iL2I{+j%d~EViLBM|UlP z`Z!Su~+`ax5ty zHI`7|KR`u@P`gz2(}Wf7*)w{m5DYmhsqtKWsH2ez{z?)`{%p)~u2XBQk9T6MC>#}6 zJD^2y`wj?T27)v<6K4ia&|OpEhSVAVKe;OXar1Vlf1!CxV{mNktl(sPQZMS1yoprpDHm>1oH< z7o^3Fi!T_kxN8&Meh?5H4s|R*Vcewwyv5M-Xm9M?qr$rqI_3597RH2*QGg!QSeUY2 zQhj9yhd5}}s9o+!(arql>VF>W0uZ@wDz}!89@iu^Jt{spwIG{_PaU_ms)0gZEl6RTe0r|s*m=dW8bAtYKoQ37#g)9{eC0Sy;pRf4=}hmN zq(hC?1XFPE^T7Xz&FQ5@57K}tgUBL2C)fH;^7i&_gli0_OJo!>>XBBJT!Vh+M^al0E-{E%1Bw#fX$Ay>fkdYQ%eJ!?53_ z{UXJjp<(kC{RD!B-xWBOU8m>-fd_L9%=xHZ-0dB{%<5Rn>^MLCHr(Jn1GIDnaishs&4Wx$m$Tgx(5od1|guvlf5TpO`EhOIWsgYuzCEZDKnfuGXlUmOY4xw5%$RdFAk%uSz)jS2p3p6wt^(!+|4#k}#5 z@>H6;4A)9a%CLJ^Z>O{*x6t`D`>qHIGMqBlWRhZWOv8z7T2*DIvmNlI#=iaVs9>nD zpp3@zqm2q7Zx>DjIP>_K-o0o;Q2%mYJ#Y;*j$Q6D@>^illhV9bX(S_I@*aft+$9n( zGMDo!`xMz$=!crN+DNDb!}b=hf5gqzYp6<%NknC4FcGR;Bkur$?UQ}O4lM&knOVb0 z5PLfIn8#IjXkKd2k_r}~t^J;$z^aVAeg60*&DTVHaX4k?DQEOSMOypI|0qL&ybRv* z0`vMXw2T}7mX$7j=Zn)ldFksn{tuA{F7hDarL$lF;N*7j4i$=>*=2mTv4+w9MS1t_ z>RL_v@{^b)Oi7p&J`ENP3Q6weblA&SY$sg=N3sV3#KYX-xfcj{1Q0X|NXnX-6$i{q z7rbQ+m!6(J@+gEa_$JB0{BoVgl7s6pfe&lOwE?anqw)e)tg~*WHNKaL;FMpV0ls})YM2N?+ku*p1iw!;opx2Z&qf1(hsS&DP{dmE zh|^JSSK4bRj)e-kx##?Yk@_n-6}~>D^Y19k8-4$Qw&?GfUY#9a)HZO{nhM!mo(!yz zs#rdCnXKc|u*_hP$F@Uby)QPzsOrvsM%~@gvtl46;H>nKg_M$%*vZoVhMrv!yYEU~ zN{o>i$W?6^h3E+B_=e&$F`@#D*byUz4qX zXQ%hSvS@m6KQWP~qRuExPJhP|Usdlg7|z%U!)0f*aVaV)jsP^$sVM9YgBU@x4XrWf)W6n*9bZ$X&n`@*iZP( z`!!T3$aXy`xqttDAfsL3!`&bkPM}3?Jo1qgC45*SQo`e4zxQ03BdE*78GT-m3Br|7 z{3u}$4TXDP3!bzOI=4t+Z?bNt{_hh4;tfl^a-Hhz&M{S7quc({NX1y8`Mp}~?Ms7A zso+a;YmDX1@!43}tuRoB$AicQuOR&aL*88d6O>T#uBo~tf}eZv_B0-R@uLD_pzl5N zCdYeq_McAoN!y6IMP>)JMwf4RC}%eGdr7N%$FXXq;9R=xrh&(7(fDpq4mb%oi341{ zdx!wT92ndaXXs^Bx;~jXrd_z4N#$3b`@PcR>v|evG{l*-a`!x65P%Rq`G=G{37|*z zQR!dPolwGO7$x*js=0*m0)NY~IE}a2-Lr6Zu9gsN zWC4pjT%mgLr}FsA9n0X36>CF+epDjZD+)?mfk48g0Dq4QWafr_gD-fHc#Jfm-JulsNXJFh1}*i|GNXwI9IwrB{thS6Bhk0iToykqX}2|eP4j6U%^yt8rSj>$1X zyC~)Y%I9gmH5I(|6#DsH4K!+UNh=?qa|>K8_Jg#V(7tLVLWa*p103{4+b#q$)fB45 zo(^5>n8>uDt4p_=`fvmk)~yc(M-j>?e|-wnQ$lZ+yg#xQ_WYa*SYf+%PfY}$9qJA) z!>BSIHin~M!ux;D#{Ie1SdjTDtx7cn`O7iT?&aRR^<5U?7brFt<*9CNeTSNz)I&)w%iv)^ zAe2IZ;?b^F@g_Li0NNRVh+w@s@YdUStRcp6x;tlqd-D}%WB_g581nL6s{35zqa^@w zL&tr&>f>X@=$`O11Rd_5kk@Z$<`0ZE%R7CEx2&mU_0#u7dv6Pvc%**U#ZN=#M9McR z&ZkR_gbHFFM@XO8rHYJZ0`8hv+O>l)QV$X*k~#Dr*ay1V^^icVo8PzVkcRUeVck}p zcn#pPj=1^H|J3fRji<1JDv}+rJ3hRa7Z1PN9h@!JVCWwK#UlSAA2EJ)IXO8Vi%lYp z+4Bdd5Zf+=-*>xhFOD%8JoVbA}W?H$CcW@8Xc zV?PMC+%x+8g#y915&fN_9uqeM4Ju9~fznQP7^oG+?dJ3SWzoKo;16s>#k(Ptn8`@o z54d8BJz{&Y>q)y^PcD1D{gC1RF2mnUTH)%T_sDF?9ACw|G2pfAWO*>;lAt??x3^{j z8T|B$Lu!U@m&_Jt{zJ#$Py8vJ!X3mcPWQ)=;NqxQKipH1ICY9jq~AeyH^jhy+O$ga zo*cV@;!bVce+7`p08XujYU?H_>yYBl_a4CY+ZWm8y!ivZyP5jmz<1#czGq@HQnu%j4_j{sPFmOd8GR_YVd@RT|N zfFti-@;VBlCA_x3vh@dQVjbaECXTUhvG)npg1XY;BHR(lGW|}u0JO}VU7EWa4*Dlq z976Zs_8t!A&I$xBZH!Syh0gj$PM}bF9INSVi`FgzZZMiQbP$X^ceYD1jQ5 zKl0BSav9Gsb6t|E-*|Hp_NBc~=JM^!2jx-jwp9~*UPk1dOFaNhVT!=jqXXXy@}_%E z=hT{jxQ;ghw0saS321$w)ryW;vBJCnMzDX4@&7V>ViXvhW_INbc7}>T7|>moGwf}Abec(;J9*Mm2iT3 z6)!Mn*FP?Fm3(<%ZPJn~HAaFeZ^MaBCGgEB)unjj@>qVtQrsE%fu&f!-$zSu>aTZ! z(LUeGC+m;21U^C1vaRnjNYE8d=Qf%EZ+5MV!*>uzJGDQPtsMIV=zI?88hbJqS-@>o zEq*jPs}6G5iBDP)0QTmwHWp{<;_`Y@ZHnNV>dt{EMk`TRHq{-xXrO5upW~X!u8;-} zx$5jY0GKzDDdCJ(MDTG+4}u1S0@xs5`G|)-Rz{+9=}gs!`ho1Qon*=74h_cv*DhD< zb6teF=C88$YXJ1OQ=IE)_WAlS0JG9(Z?su&$-rtW<>plt7r=Ho<#MT@C)~swE2v*; zu=W~bnaY_!5|XlwNyk}!ZmH2a(h_3Q@|cA9$j;+;;{$X3mMx5a&Ga(0At|aM1+=Q5 z-z8*DSXzCPY5O#$tDTA@BqDR$*y5vcnD<70pPk;4C&3Jq|4*hS*f7U6$D?iTo_P@` zT4JtLaJFJ+TRv&T80t#joo}nx58!03EFWzM67)av6D90hwOyXyu@*1Vc1#&7loeC4 zLy;t`s8)%H_Mi`Ggl2I8#pV8^I{fGTk>8GGc^mWT$WTzJd1LJT2Pev{J8vRQpun&L zH0FUJ&qN)&8bTG(R6)S%&4pf|fL#m_Tf_AD~ZORvPnBMe!^V5 z9QS`pu#W^82bjZPue%r&CJv5oHgqbZbn>UXtcKWt7_1n9j|&e<&Lcsmi~E=t-&ul! zt_cJ^Gqn;T>fizr%wx7vap2n~AxQ7m6;vabPY zn_?f&E`qoQq&lEk8b*MJ2&@3aC-Gie+rSQCF(Ldl*4btB0SB0afz9L}1RY+u`S%A$ z>Y~{2-r+USF=t_M+``gw1cx0tB_dKVZ&~JM^1zM)B=HqT<~&DQ&}cM(^oBls`0%zn zRkltJ;_^#fI?E*2%Zbj-VvZo1uK{f(81SNS0ECk4%rfYkaWu$QuJlVuI4Mcsd?P{P zLF`*cKJtzKjYqwlV{D@dPF|n#8VmjyTl@;7`YI317ynbc7MK97nwr_6>+P|leYc`M zYphxP7=Pya)z1bU9JF<0x6I_Au;;|Iw4W5XSdR|fYtKwJDF0(DDDlfIui4jxubSaB%l{aAocom_|7>)Fpsy1V%fGqQpjam| zw)EUG&i;?_2bEtQYsJG@3@;2MlbLa*rq-S8c*!;ExRC1i#<= zph>~W`8-Tr`fNE@y0%maD)YzK((RYW(1xHUvT8E9ase(2R(R*3u&r|W#Y2asW9&0;E(bAy!i{i=*lQ1amGkMBi9+BZySweSu7F^*6A<*`%- zpj6847faF9sUgoqU43>@@2)+K!X4GIgdgKO=>(J`HAo9`P~N2?qkKlMrl!gG1a%9g zp_1+8ws!f)xMlj+$G)Zgt2<`m@s)t!eX_Q_cTou^1yCo3{uuujvEL&0Tg1Mpy5A!9 zbCL4575jS}_}hy83>NsmXvNx<=KhJRc|SQhJU%}Deo|7{NNq%Ie}DhG6szghdr6en zHn;xUizsHQbPNeJw;@~b9-OcjBS&PM|BX}Gk`;xFYrdD#q<6~Q=LS7Cv9+e4QVK3b zy^`|wLYp8I!1r^k+MjlpdOGWSj>Smj*)tOGTwA$-51MXP20-ElbZRHvVoIhMqV2ot z^$cHO`7j10Zq;lNlGGb3GuECjw!`06cc&U+{%!LHcq^FZT3DL#Mxcdi5`zYY}1 z6VKmWIg?sTjKALsvhY&193SV`GP_s3y-6}9cu$!Wb>=7Z$j-Z9KLYDRrh=$gpWQ48 zf<6Xr%2KX!X^?GLy*Cj%T?OI@5ov@62=ULaJTMGIrvI-f5(g*X97Q`jfog*ggWAbX z$yDLmX!`_z+vM$~cGcQgUOjwB-=ys%j-I8!w?TlrMlhfuX^Joa9(e=rYsm@<5JCNT zQjIrFqZ54rO!@rce+4QT1-An<%a}?WYo&pb*OZ#Z&Ml03gI?eofHJR;WB}M_X39T8 zUSh*l{h`iC-^uxeEyCwp)zBf&5 z2R_7DzM6BF*{V*dmbp|axa98VyMp@2;Bs~WWMB$J28YY%7kd_qFfBbTz7}5)PeW23 z*RqAv$^^)S;g(&h-l`&`?_XJ3VO-63NAN#rBKy_hlR5_C)Q7KZW01{YXNm2$E}>yh zb2dMZ#5CHfm_NZL)dG0QR=uCivRYJ2S4(*JNovQp4H`vh&ZLPC{w=_y^)WaUmi>eI zWTVGZjX&^yl>=xDqr;31j{01;FQG@H&A)!E)hCWEa>I&Y!0 z7q;{tE&3TcHwoofwj@tdyyTO9zW$T~(MijyJ#!|N4;m00@dmW=U{7^@Vzn&;x2t%p z^jJ;_6p8&=NBL{ep#Zo)fpgzkf20I>f!orgUlyt)`s0uLEoN zm4hY>$B*(1)j&@5Ku(Uue(PM7+Q?!iz_Vx?KgBUk&DdX#tur+>#e$}v+WC2hiy|gG z1c)<$Ps$E$?&P?0IH=Qh?Ws*^0J* zL61B>`y3}?yM`cF*ncdT(!wOn zE|DMZpL&l%Fgu_e89OC~DK3pD5*g5}DO+lm3Br3ci(MuV?dhr_T6xSwFiIqt;cs8i ztz?nN0|s>yC!ucSk;bnDv!V>Ch5~Pi6o3jvG?3WSYi|!)^Eh448{JR~@n zgR4h*^p21GBwfPl!@WcmA&;1)ngbSeC-yNpg59gLwu{@_zMk58Hq`eQND$NA$cwzfuEe%rtqC z4kftCSNK()Pn+O?#!>(lkf^NcD#3v^BQ=GvBj(a?$x1?&x|{S>_e0N6-CuZFmljjg zdEn4Vq(V58)xVl@Wp+UCxJu$R>9<#!zmBLhbDEJu&oJCCQZSnqFy;4Jo@A#ve&OW5 z+BpEJ@@b^22&EVsb4R%?H!XL-DQVF~jB*Nm0{NGRC_ryia!xY&-+BHA5#Aw?P%)mU zrzxa5e8?LgXCX93bC^DW{fQT{j$MPr&G6=G8#qe|k;3GSuN2j>Erz|EX?=JT1$1PM z*NMML(H~?b4S|r2zLI<3i!d&NkSUbxxqvDQu?JmVW}n~P{z9YQr;?_e@#Ay`ETX8K z!(M(`eYd&x1(t^m()>m4@q%O=4PB)O=fyCzcmI((kXHS`QRwOy)u#K!>pCTAaOIbm z2w54C{CsfxPp0YSZ+|j3=#D+7wS5Z1e!W_cJ^G5Ux&B`K1KYeR>q&sa2Plp zdwsDJP}0#W{xH+zw~Huo%oMYSqhpTEz~qG)>l5z}QgZdyYc{&UZm_+2b8hr$Pt8i_ zbFK3yxRYG(auMD;*s)Wc5yXX+NDl^dCv|<*7Q8&jz$r&7+M+LCj7{+3ZCI}IotZN< z-CvGHl7NNE%gfjEKPA=mv*IVX`77j+$Y$#B^rQa%6=2GBZk8gu{n(~AUsY?*n{&I1 zvqid_z!p?Z`YB!@W1^Ae9}|(CzFHH~v;;-xEFTb^QGQ5qgg-Ad3f@#m_(SCLV~5B_Z`q)Glu%_9S|{|V>jrzAN1;Hk@j=AX z<^I#?0dXaL03vN7e()ivoQ6Tr>YzWLe&L9Wf;wtq*ZgA!Oeu zPXmrW6LM%q9)iC0ZTXQjxx^N1*kwM=44bj$J#ucn^(A{9%wP-bYFg}0BQ_&FOJ$%P zSNNYWJY0)96z25G2!hwd0NT&(*=AE1jpRe1rww4DPJ^&~F|yIOUtms`E=Ac46bsom zeU>B0NjNQkSgsM9YzeTNY(O#(I}+Pjf`h6H4hf0oXi|cE2ed>ja=-ed3?DL6flpP_ z#>B*|+0t0b5}+BfG5{Qz&mr0ew;<4XU@AWJ^C-1|Ba_brl^XR+m##{`otcpj4*8*u z&RKd*CX64_(^FSR0PB3}UupKK&X?RkaSEgu-v=8%iniJN==ez}oeI=5Pz~09Wnw4|0Bj9umf%owxMq-YGt$Z*_xHGedAr+uqkvw<#DNs5;n6L>+(fLmRs%$SZsh!kfy)@s@w*b>MIA0(M9sWjMD? ze&xA!R{CCaDk%>)gJ)GDeD;T>JTtL>hLHRrD+Pf7C4Y1>hEdj~L&$1S*T;ACtGXl> z!EKg}9T^&coZaWr419i&b9Qf3R7fb^S^~hY<$vpTG|7w4n0#O72YogHxA>elWnRm7 z$!L!MJ6i4`IdJdNV}*EChj+6GDL29I=kpZBEno$XXV2b!kx^`lNB-6EfFA#Zeazw= zs2k0|ITB2~R7VTD!S}<5^L{vY`>5=lf44}G2Z{p__y?+Lmk~}|srQ z{z6A55S8re5EJAffYpcwqRuY!HnmLtLD$9dgKtQ2SB$NS>K-pf=R3^y7q#Wv_U#%H z9KweLM;n%?(3GTjN3*V~s}Bcm=v0s8xnd$+f7Be|vvL~H(tn89P@iw%^L0~hz1P={ zhA{98@1{HMKuS=ikv})+_H?J-H7m-H##f0Pd8#x!EnL2ewJ1pu~AJP-akeBA3HJf~%#@oJ% z1b4Vb&&skBR80ijkX5YI*VLP4HKB*U(9nr#F%(c)73F!dUZqrmlhCHe)Vh-GHHU?UP&ig*9VMpBT z4W8ELUi0lh43nT)^MfVDRn4^I+d3t;mfG7Y{C0LdJA#DXz}XF!Ug-sQ^ij}@J{F3O zx$G*xH=myCTeT~E`4G?c#)`jm{sxGs z3MgNWCK5J-mw_3ymz8}F%peUggYnmh?Ts)A75A5?do-S2h`+mjOFU^gzUc~$*c)2! zQ4M65BumNZvDcq^(^#au>PG9M7kk8;Z~q6yzx{}W1G1AKE}RBiQYVWvzQ9g9?FFXB z@m|T56L+0FmZ_bs;-j8aU94vsW$I3op`GaEw}PzXMu){9?0vZfY-0iiyqV(Lf$5&IMA67Da4o+Dpl}qc8yRQH-<5)u4U#tf7vmg^`)ri&z~=; z=m3^Shb5Xh#Va{mdRdc0*Qj+SUpPuO6?I?zawIBv$uN6;aTHwcaIalOg4gKfs3bw6 z!Oj5}EA^ecDHXEv7zmJ#nbx{t5_UxKCz@(Pnl+#qphWx6K_;; zbA`HPp_A>n5iduN{`4ld)3(@7QGpkI+1#-e&ZD%g6m*E9A=|#WDcdKD_L+XWGD1dhg#==R+d*>|MFx(&4>-vG zpn7IVd?CE~nxx zKJ`v0Mv%>zwl(D_=rdtuiwHYQI0%}L1}`r^d}5L{P3s3i(vY?3G7YjGxm$=>xiTLAO;5ST|Mll6JEs)G45 zS8~mQmga_x0N1MHYFeSNHd-Ly0ii5Xy1K8gMST(M-v385`uEK$!55E!-*5x(UzbY3 zlp`=Z>i8TTi29;IAZbX?qZ7I6?0gp1Mt80KX;{C{>Ofn8{g6w_J70pNhrcrLhY9c# z!zf>!1w^B}dfM#k{shC5T{L@Jr_Vp3Y-mvk`WcuO{yW#%~}txlRF0P znu!I;p+=D*6x)#BP8qm*RR#2casvO8nz>B>(xs!oE6#PDy4;g*o8ZkD%F=c&8x|ZK zEH^anrI&e`my3@t_U>J3cUnh$2x?g7)%NPDY>#V~fq8_~N+d|;HxQ99!NGM>vA{Zd zec}mExC4<%X-Aqc{2E=sje$eBhyLV)5C}Ag7zdTe3E|ZX%I1x_(0>q<&|Mw-vZ^aq z>G&)O_Tu!r%LJaU1JB=NlIjhIKra!MSB9(Wcn08f4|iTm546Acu0f)ztKpS`ggojo zZU32H8Se!Ml5H+>PLM)B(+^YFl#HmNg0csoZmZ#|{Ma{o{(}U_y8_g9KL67tQnVU* zWa=x4>8dzfFSv|l&SQt#sD$_{^REYOoC3~c!{l59CFv6g8F|TjmEW)VO|;*f_FHa# zE1Tb{^S8bEZ9V_Layj099Vqo&90k2~cYE?I8!^=(V+r=uWQ+o75FjHLiZx5=VC|IEGUe}Wl9u4=n zZrktUMZzzl@i99HisaLao)TK+s0yI(ERPs9l$2J@AErr!U39pPNgx@8Cf~r(Qx1tO z$Ww_QX%yYbX{K2%O^VB1E1_wY)KU+5v+S30_F0H8j?3+uMA6{=cC1LaSB+~?|FvSGimW!r2!w&F+E zCnqei(@+p>XW(%U#w ztB;T5pBrQ-{u)wID#1au?EfL{J;S0(w{2lTP=bLVpdvw{ihxRz3@S;WkRV7_LCGMY zKr&?nk&Ggt0Ldaja+07T6eu|q86-$3C?rXSZ!Wca_da(&_nvdU{?Sjcf0WXky99Rd`6Zl?{Ds{ngwfNwR zF&^?>>KDoeW&Xm@9u_;Ly&(=?HMg*^GovE)pzNq45*&R z)b)R!pgp8rKt$1F`gR_ko)$bk_D?TelLH+3bT9)jIb$nsiWO4@hTfeI56Bxd(i+Av z*$P)(cC4vLWf(HY z`FFeMHweA#T(Dr}Wp-gwuUj|WG6Ik$v}SWi8sJJN*w4W$8-_BJ&fw<^iQv;DFp;nX zY)!q7?9eCJ0G7GqVtw+BN?^}{{iZ_WLp$U@WJ3+exYMJQgtL%a4kwwdE*?jF;l zJG&T&eF5S3Eo|LBp&O(7)w{!aJVYnb-2CT>iH*5Z{u{75`qi{IP% z>M|-;RQ(XRExqEWB(na3lPS;iJo0P#i)f?h^KV3(pVU9xKM{}8n^uge&i{*puqX&e zZ1;N{EeV+v%?S10!`HlBlt&54H}AI|86}Wde?9pZ^X7K3PK6O(Hg;((R!Nyg?Q`?GlRwSeOA?GJ} zK59A#8$h z8AK8iv#yMLSZu&SmS<|U%EId$^*j$3N*qq%ukYZuP3krDPO`2RPh4x4{5$Aiwy0t1 zUM+3`6R^W%YaeU&>aekduWg(<$}0SL&BL#JJMhI_DgO z>$y{BvOLqJ%A@w`=C5=0zyBRWl@)4Oi7u}PnCvTkU1Gn-Zr|-`j+Nnh47>&3g9z^z z>7~YpM8=_q#cb5(rl1z(UtR!iUeo`AqU3fUluZuD{pRX<-5Mdu3^}CY<$dJ9;h0Na z(sF}7IN;nif{z~XS8N@^DrXQz8T%>HFO3SU(GIqDAo?3GXps^6HJSQfzZzsL!YE(_ zoT#F?D>0FOe#fvKp)5HxAA^y`UXrSFKlXGfJ>{yBp?b1k{bUtuqP?s+EMK?- z{3)P8*$8TBng1RZ>)6sCyHMZL2lE_dclWaYpHJ!+S>jvPL7Ourl9c1O>2N%(#ru^L z+kt}tPIys~kvM>iNjp!*JljKB260Ol?v9#VS59n%=VOUtw^4oT!bSxIFBnpPPTkNKezA5J^YXj)FQQVr6kWwhnmrii zyWDnM4X;zAbU1ES%4H4*Y(1y=sp_}3PtvE5_3y(|;|qspn~B~4*f08!k>nXINj#_) z4qkhgZ+T{+iIl%7JtdzT6UW0yTJo;L^vY7%okU*Zlu~lLBv}g&VhDTMlXDW^kGeVKFfmJ3IznG`isWlp_ z{Mhba>5{@6DFHksie)f+DNvpJrVNe(_6Nvk+F=?7Zn70EC% zr+c@fGg52C!l?THRZ!62OyH0Gbd{nfA@f3F%HMb7R{>LTGbR6p4 zD7ot|a6tt@79Fb}VaV}|CcD%_`#Te?>w+IUFR8r(5K5tF-SF;N;o{n2-9<4A%-2+j0XHD*wMvh5A2}#wK`1*AGM{%d?dPeg zqIhT##9kHOcJ<-DgXW)}9Q;l{P+qNU#s4R_6af9HwIwJ!+qAkW%d~pv7b&wx5Gg3l z{!viA)4mA-H@`(@6W92HFBQolH$RYC!5T*_HdT*;4BB(2Jr5NlXytAy!7uM=t4-st z#iv;kdzeNml2vkwr`T@7fL)Z$9_PaW6p!CL*r4W(x=&T-ii@E)13mqn|5SCm)F|4;qMB3X*mMiLzL< z(B%N;9wk(yg9|)WwRn4yc@O-5PO#Yp-K+mG!$$m_VdY+t3#+_$oilK=N^+>))&(>` zvaKJ`r=dzYEsMDJ>P=zF^ZMz<6wF z@lhy}$=0d;h@3YAkv8lvx~pyPRp2_C!#;~U@N0oSU0i}QLI3#Rg*C0ahYob0xzs zVXE%M{JkGL%!llCa=M;ZI53a$;_i88)mT@qYl z3bS^#cq#oM_eqx3DibwL6BB`rD$BN{$HK5G6;2K}7_250+7B84HLyN%{@CdP=T}JJ z5Z_p4Aqhm9fux%}Q&bD;ez)6rfRTCb|3zi@cq~ayLiUmG*A>$E-IK9vlc>Mj`B_r^ z0!%;Zq|u|b0Zxkzy!CcVn|oRG*8y-U)z-^}X|{>yoQVGW`;JBhn=1rfMmKPrAudX@LLB0{{+krDfUCGG(HFD($?YMBlcBD_WF7?U?S zrr}4w=mQ1sj_c#xg&R8T*OW2QHdpEdgo+dOa!rt_{x5Fqzt?UUHFDUQeG&%zlZP}s z!6t2dwxDI`mS?D_k=r2p!tGlBQXU(+Im98Oziuzt*4acR1tag3E_-^|-ZE!0>j~|1 zdY3tian`W&0J1m}mTZ?N2cXCEgD(!jjZ1;mD@4n?#?7kqO%lxVz@r|>TOl=5zX9xr z_%@v51t~dpBo)5j9|HJIB2-#5tu?8=wxh8;I&yBnDJ8cbsli;Z?mV?biRv*#)cCGOvx!^Re{GY#?%=WVO^gvm`lt)Hb5`oR8{f$xxYnVEwqRYy+XnS{O zwK~}jPaD}cy!X&W_VOS}VB5(>l!F-Zg@(*}t=;!qPg!^%Soj#IcdsrF@B9>-9sPpDe~lB@esY+N*!&gY-&Wdap!z;u0Kc3FSY#Wc(b{{=+v|xCRWw9R@_5rT zi){vGg3FcX%yBgT`OAvh^>G2NTzdA#yBchrZbW@G{N_Pws)tf{vQevVlh67><~bD* zTxgPSPDx->QYamB!<^PmQwCw>;N6*dhsr7A^cSdfMDV13gpe`HqaIV5{0gm8h0?G) zBXN7Z+tA?!vZD<8keSlKXnwEzwf$`6ryeo_9=_2IPDKmc|DDI;Ce42@wh4 z@IpLw+YYf>Bcd*6YhO!0BayWBh#nORYS}W*Amqim&5i4{1a=-~7X7NuRo|?Zh?dCQ zg;@5Yy}ccmGr-|j;|j6={SnB{R7mLOcwUF1G35+mU;yg)x#n0g4OkZ!wMF*uC!>{# z*t!hx{wy%55IdVS`aYI9PhN#2_?xQO!w->V)TU$)Ks_K_8gTl;pQQ4xXA!msH$osk zQW3C8v8H>oH~$V%;xmjzT>9^W$ujQYVVPIDo^WdUK_MDnk(+~t5<*ZFd~y{mjw+`k zR2qP{u0;!3G#{!U6#kH45QQ){3Zbyx^9Kb98R?lCrb{FTjbfU3s;n-NyNH37Yh?NG z8EpI48;4HQPZy-;#ZZxX3H}&$3OhWO&Jk$m9LUITzA*YZFSZm){*y-#z(St)5Afz! zN~lEq)E42PDvDcoWlrR9e%Ywkq$92wjljQq3Fg8d>{4^|vTWE09uw0D1+Gc`xYEyU z5S8+Px%Er97pc;ZH-%cHI1wZLahf(mW9@as0ZZolAySGhh;z|;qzE1lRk7})wjRn+ zo0Pny2WuY8buc&3VTJvbODb1g>B>Gt*OkY+H?c28ypd0=A!X)MaM_pvf!k#PL4Q6$m0RfFh97LhN$w+XKtj zLZ-fI<)y-nCl`b~!P7Tkanbr~HNWM_}S z6`+22yn}m``Qfc^#_-Uup}pC$X{W8kv+{6nn!<^fxJj#y+`sb}*LhtfHf^3{JLv%o zJ%AMik9*-#~JnEi@cVQK5HJ zUz*W)C4sh{nU}g9UkDGk@fkz&aM<}&U`!DejvGCvefZIs>;!V25B#U|ym^k*rBxnC zWe1i5p2cMb_WKvAIn1#yVKU^|Lr4RCY22MKIMF5Xia(OadAWnoCMqI;#?zSJfQDQ- zDNJ98IxV6+7_gAKTQ(LfIeJQsz}7!Yv%ltH4F#wvt}j1$x-eE>VF%pSGD*5JNDx-5 z-yoMphRE(*wD-l|HP9Wrj|Tm$RWus@skvgUxmhWI5VWkO*eC}!3GfnU^M9Ds|o zhZ-71zlTwZ9RUu`bj#xz;HuCd4?LITPlw(>2Hh18+^z`l zT2=%p13M@y!T=cYN7?m5$JFaSIV%|6b!8T={T*0VmPC2Z+o(g!$mr~yJ9o6IeYOh^ zr%p=GWF(Kv?yhSri{}y8-eP|4!s3A!i;GTTw7Nic0}E_4{p}!TG1v6DF3p!m$f!=f zgQ~nuBv1^Bf%qNJp+`VI=1oo;@Y$Smg0ioi^YKc+h77)TA@a0#N@1aJw%N3&imw0y z0^9p~%g%TX)&**eN;K|;E(T!TlSeB!{h+sjDk#CR;rd&Dha9eL_YIZeB1Sb!j4IRfNzh%zYYsp*Oaa}OQw_;}Ww0_#wKJULkOa_S4JGH>O;|4S z-RvJ8haGS(zJgPt-fRTVRq*Qy`xQ%s@ATc@-d=%Ft=@WYr5@f*JJ(@p}s zSeNi+U@9Aw*t3X}l~9bU-|TsJ3j%aW>14-n3FL{Yt*uGfmZ_~_?^W#9x5K{oEc7~t zr#H4p_U^-d*v}UC(@{6qgkOT8*hlm^zem-usyxO(Bq)-upmq zKU%6^a5qWix_YtoN4>z{8bnom2kI%Skh7g!c|7I`-aqx*dToW}V+mTY@ZPQVU2r%m z{XJ~Cbb`93ey%TFgRKmHx-*UoTf|rIbgAF73D*yKqhh_%hBo2k3m(A_V>hbWonU!LK+e-RySCL7pOBRKb+s?mJ1c_Xu*uc!ejnJ~3TjBNbRO#O z^^@yCp3ydg+%D}qn9E0UV}xBEt{uO5wpYz-iAlL~)F|EUs=kW^K`6F>$LZ=RSKM2_ z9Cs;Kt46E0Me2Wsc9TW!aR;FD=1WZWhjxY!nyYt`?OY{Fla_$nB--$r_^eM54T8Tq zg;jF4TuJ01jT!H1D`tXb$qT$1+h1Saze*ZHkGlAQMFFvRS3F$(MWDjJ*&q+1sA5Sp zaVq0VrKJk~_|By4u2o-lT|e5ujz#)?zkNk2ItvUkRG{c%Et${t-L}z3I+p~QL2SH1 z`hFSkXdH}AcX?)l4MUnCM(od3paTcXtil>#*L0G*nM0>2-J}>_U35(Hq(}BS;xSm!Fwi^iL{ORxYb)fffx6Hmo3!)IGZHRw2HB{+Qvn9&-T~!hIZ({K4O#f znX?He3CiO=N!5BN+3n>Xb;@}{VL7!w& zrqr^#v(-|{gepbIF~S947a#FWZq-I+-2Kr2 z$9|_3IVDFz#y#U)U3vnn4psBao-``@;1cImtd4)(jep9shE^wDiaf|Hii;C>9$0yY zxJ$ZSUG*xtY)W0u>TbSG1iy$1&aI`vL)g9@SX$x~CH_phq*?78bZIR^hw~;!Wk}_b zTBtUo4G8u<@_uBUhq!d@*sF9OJ3nov8D>$I8osx?z4`^{8WHR6$Hlb`OM3OqSGB_G zim+h~MRAWm$N8o<-<#-bt~NmBm-Vdcjc+;cI6JJI^%z!})x30S#OniZ*uZ@8-0Dhk z`+7;&ddcTwTJL+;^@v$^@7vZ(tOIe4O&rv5)0uc3^8;zQb3g$(Gd8(E_B%k12;Z#n zTfW7QA2XBuyjreCfWnkQ=Ki~5AB9u?V`RYeC)U+)+=c`euX2X z$13}6$;{?_b(H+WePyb=mRO#C7m5CYW(mGqq&g8jNXs=*gg0?Myz)KPq49kE3GMB7 z;_gyb?J18M_*w2`a|WW%5X=?43$ybS@UGp|#qUo^G&`s%#dKp^w-lDbSF%Crhf1UD5ntbhTeL%E3cb%D!I4mhT3;+JN~$=SSq$Pbww zmY_1gQ{j8bmwi%%ZF{cSmwwyl#=n3QwWpO!jPYBd4uRl7L1t33-uB&#+HQww?a-6= zdmC=QMMvJG2Et2iOWC5>bW4+tU|%i&27TXpP#%$s?i@dHPSq_p6UIwm$z@<~0xhX9x%PYr@BYiIC>T~-V02FJa&u<>^IS{epx5m>! z>BF+XZ*+j|{s@U1SN_ti;(I|OUtyPnobMCFuJ*Z9&TCE$rNnEb0gBHUGWp|}Fh&$d ztUa2I3bAI7v=c!~k2NW7^`)OWH43WO`fQ=NQyi#E%`EHCG#W;qH+wR5>Z5Wrq=pJn zr}4a@8OsR>jl9#sjpZT1&$rfFe2F=BImsar47?C)Y0-^fFr4F4)MzO)=NXY-WIwNVZ$AI}-P>8qxru;U@VTgexbYn(lTZ4bX} zDW2fNWObCw|j@oxEEdI47;^+8eOeX z=gc*4RC$$M)c$!DAuxeEQvh*B?HhsTx|nSb&0t!CFbk#!^hZ<0F+)(Y2F-%|^QfBH z9P13=Va^dcBGUFrle`AiHzEck@`bSZw!IhH9`yoiG`YftV}7YcmdT=#I8|&k)D}oP zop)xMmSXf(jbRRNSTs5wLX&WktHqA7+O za6hz#SgR;f;fDeSrx-J~p3T#`QhCf%KER|kQ9xp&=PkZ^l4XqSmmP&u{0DJhJly95 z<{-0P^@$g28{Bk5g%8&}#$uW@(O2L6rTl8@a6d%qPKzkNB#ROYa`a4^>s26{tde?s z3-<{~HyuwqyRq$BQpZtG6`outMZt8Lz~=DR$qA*cr;8Xo zkDx}vlvu-nN!!cvE#@b=G^>c7(i?zA7W6~DvxwThLf|FGZ02(}{hpr?zhG~u&h~4c zwKfT1st(|QuH|=>3Aw(pgRs8I5$!>OAZN@aP0J&YgkAt{#+zfHWGZ1JQ*SpWPb6P_ zAtzd7WXdVgB2d#bI(^mhKy=sRl*aVwn<1sCv@6jcI zF!O*9`x$FJn^n+EXUvIYtq%;XD|k*S+5DWKCpX3)4hff0Hmtcgo* z=}HO9k@keLQL$uVv>Bi{E{PfKz~I}Rfez67M+aV>zjpxZvjMTq!KDHvn{6{dgnF#a zu7NnE#@408NAUlujS}bX?a26ok$XjtlUUg}`Sb)`<+;HxRzq)}UZwAfk?L;SG;n!I zDqWpJJ_M-XOvTbskh6k+28!g02&80~XX~6BjkMzna42^AaG!Ny#UXAvB=@{oU8&8! zY0LAVF=cGkG%P=aHIdIXP1O9Ai6r_JkoM6n<%N(sdRLc25`VQ)(33sX0TCWQH{r_1 zMGr|RxI-8DNMA^cT{B4TAi1!9N{4BEaFPrNqQ}8+u4P&xU3TloLzmf}^D##|-sGXB zzZn!c?N-bHY1kPr+wH9Qj3>AnE=pnZDr}xZ;9m2tGZx*C=I%gSz=Sop7}ar=(AOJN zgQpH-7Pe%UoS#hytfJ~IKF1rn^1!8XN-wZVY&nVC+tJR+paS*v^X?M}<5EA$75@Yk2 zI_Xx~Q5q!Tl}I_w8GMl=CZqdw39ovL&ZlG^pK_GAk@R#L)Abqu1(#<;$nKM$=Qd)8 zw-k3a@YQv8Uw&eEf<$hm%MR=27@gi&O+*b2gUh3vaA^*`amMJ=;>RMGtYPvvmv)s! zC^yT$;j0NQM=E!kChtIXurn4c6&$Cx4f5?v;;`Z?!lW}%Xmska$;N7C^T#ybw^}rE zBfJ0y%Ie=c3~Aylq=}jjs>#!}Vh2<9%wO`_d*Z!t49crRj_QX%tys;6n;3Fn$z_Eg&QhkCEO%eBS3J30Y|k-DY~G5jXJel}pW%oas%|bGzHayA5!^nBCNjTlDd0)b-^{L-ZO_nnU-; zf~Imd%C^~d*OUf4nk35g>E~k};c1hhbO|OV%mz23QV9ekrW5_|R_@UeJh201;6PiG z#KAHH^i1dJ-dj>{mQ?#c#>mBsJEytK4;eRp@r`?J28()}U@L_O9RVkgw2&2?A;UhK zc4D+y1eU|cg_Ti~bHb|}-*G+x_PG^AiDTz)Ka%V!GGU+pY)cx@{6!SJz@WQ46_x}O z(oh{g!FLghWL4p7`(z%eNXmKrmmd%}w=S|}>K%|6==2|7y0-s>>A(|_5Hsac<~@k9 zTmw0W%FB>B7<2%NAiW3sI)H*qVkY`An!PsI0(u^a-v`-IsGP@L%;%E8_B%EKo zgi%<@A`A-CB#cmm?atzNJfYBGTDy;6_79Z8NVA8>q8~D{K zSZ&*3vT1}j#@7~9ncg0D5cj_2bne&56aRI**g@oNdUMzO)|m9DjhT+`4E@4(!^b+$ zT6|r`PABi^W(4^v#J;i`C=~`y^G6WTH_XDc>2o!smfdwYu32LAbp6$~t*~uZn$|}i z9!@bVu@A`X0TPQom1Vn@AZi1qElEnPsaiuGgzQqX@SOcwlt}}~n4u+Ms@us*> zc4){3@m%H9Xj}1Lg>;Df6gM~%*%}iUWW0ML;$F;Al*Kb+_3<635&UgJcv@j^6BHgj zZ>rsT?=ZQ!Q(_-T=(nMxb4y35+C=PECVQdB9Yg7z&|ke_VQ>_5C7u-6{2NMp(5%m&w zt`Z9UZ<)-qOezg7M6=3Z%?b{a7DFZ}<_=%piwUJp**N#{IPQZFhGzI?z2J31fWw*B zH}Uj;|8Q~!NGBZk8n~lyaII+FOULXQP#j{jd|DwzldiS50_PD6kAam`WrY`cZ#G!( z|2;_gPKrRO(At%C@G8Y;C*MM6k0N6-9=GuY@X#H+@)C7Aw;@R_5OrPp{*RHV4i}e+ z#fdLt&Ggw>->?DnyyXGJo=;QC-+(1>##Q^{=EimukD!^-RXnV}G|s)~desWsz8QM? zkYoA|u_L|Zkd3mQY$@w%up}`QkT!ML+9bH%k+4(vNoORF(%gHeJymtk`z;$EK4iP6 z_#98@$|wfsbWnn!FlQ{A@W@jWnaYc~XE9kYFfcTbGa0c%*%@82uVmV5L!#|XcZqH9 zXmB(B^xz5y1;2;mV-s_E`8}BX5D*twrGVPzNA-i;Rc>$8-GTszm}?}1L2LVj?^%h) zuBA~jv<~_^$n8}spgra1mlKk~8TNdF#-dX@K{}{QhkeBJFDAB{aiGYBGha&S!GtX| ztOkMbsyDiFN8`1Xh56G#ngd}r%qxatzi!x zPU3S#j(0YJ)}}3xVfj)pZfe=6a-T3|R<2%U=*bHcg*>6?Z?k>95<;5?F|Eg#i^|nrB+&BlN|Es<#8PY&P2|s z?p)LP;G~WfTSDg>GxgyMbN;9hPh>r}v^ssN(h0{$73xaIhoA)hH9ScULGv77dGpFD zTGqD;3LVYHFWA-fpGn%{vTGv`!=8#)ba6!m9|$%UtAxY-YqDVS{6(fEsXqR~5Tsid zJ@ub;JSnC=>v!<-5g(A3FlhAzJu`2vkp5$5aNcKfN5*%#O=*w|J-g$9d5)UQ{%#N~ zF_U3=;k>J?Akni+ltn(9vs=urFVeAK7bXdTD21VNw2fF<^fZYJ)eofLp7!rEukY0S z*&?;1=EB5s(5qdASAlJEBbdiol|m z`rq+Qc-)69N7`*g7u4y`Bv#N4`|T5Gx=TMS{F=0!dEubth@~j0JLA#6Tr#^aOsW$8 zFE*KS93;l+&dqV+#Ep`e&~~zKU^zH&#TaO5DKHbwhl{C#sNyAeb~fh+PbZ+Q`@pBGEZ{p(6z)H*wHSAAu-{#)mu&o%yFOVO{N9ym4{+|wCWFIc%8E@_<0lYx%R zKq+MBVs#V*4O)xtiUeRSgL(n87qEY{3`=<3Zafw563@?G&9qBa3UM7{Re5%|zsfc~ zozrb)NxN8O7#hF*2ag{yPLo$!+XbK^0->{YpvRnD#us4xbQY%S!o&#m$2$kMp}x8z z*qI3YoA;p4xPO=xWN31TfkYxkwV=0FOg+PosOYu#c&Tu#x>v^|sww+gu|!OAFLa7> z>Bl++!-G%pP}gyNK~nTvH%A|zJc1j=J7<% zapBX)V{dd0qRH}6gPGdjtvO(yvf$S*{j$5UKd++fxA&;E9JB&B1<;N+B)?9;(7 zv7wE(>9%@Y zL|N(PU%a3k-dTZmGqFF+$i@>7ySClipi*E^>{)E;kGNE|qIHp)qhWN`t5oY4jG~p76 zs>~uBk%c!!bX<1GPg9R=gq1@BZ?e(PW+5MF0rD5so$*~~vWuEkSbTRPs`=~zR2wZ8 za;-~5`kQzH+FQLZ?R;z4a9#%{z2BrVRDL*iqudipt#)q%7g=vXHkjHII|*$;#|9cK zNcD%A!`^(e>B{7W33i%gz8GG?m#7flSteE7%#|oa1t=zPJ?U_gG4EjdO?pc10mr{_ z$@U&CJP-r&xf-gGoLVtI`Q!#D`V+x;{(|G^m8IwwCq*_lC&iQ6G!*4EUqv{~d{1Cr zWbpm5dE?5w%}h!xuY)nAqmn<+t9D-$+4!Dl%1I~@ew`t!ijK_7>3Cx^V13CQyigG; zpjh-9ZCZkI4YUK#OtO%nfNCb%&h8fE6l<5lV$`}Z%+_P>cZDUy;2Q6eS!M+Vr*&yo zO2p`=PjpcyLv2vnW9;N=s&t)TY2|%zN*G!5`M!eDKCNMfTlHmWHF!T+SGLN9yv=3C zvKr$3yH~88S2kL&OhZ^=V_6s|u+VS^gwyG+R>WS0OuG$}u5strX94o))+9=_8x21+ z5T4E^gc4d_8!=(Y<{(zw8*ln-H_17V^bl>T!*CuvN9<>NP4 zIkv6pHOH6;lB>{QIWX)`Ha*KT;G0;WXi~93`L@SjE|Khyw8q?9aO!x{5wc8bFW9q5 z+<5J~yE&{t?;8e$0&lmgGZ7jxz3LofZ=(G=j{uo~Ev%C`3Y{k=- znk?>bDS24P-z0~w!g|ZNESpT5{QwBn+iRIdypzbjCqbaKAS4CMfH=66GY%zV z8OsWRjAfi5MTaG@`8?fdk8vYLY|ah(mthxdd(OFK_uYMS@pZZswIU1lnXvd&4^%IiHLxlWVJrumpMc8N!MY-MTgy%3p6o$%{he5v;v|+52%HEgD@8P$)Pf=ynMP2tO^C{-GI+(xfXI(awR5m-JO z51x*~s}F z`zx~n^n1>hw(9dP()TJUlKy<{^{eaz;fZpSaudHKf)PQs(o)bVPa|a;cbhlu9IZ3o&D)T+#O}A4n;x;F-IbQ9m1T(q}A+0xo%PkbPT!iSIy$8dqX>9H)LwD>1O<#!)f05u+-+a*SIdTaX~4rt7)Gz5 z(A_gp$Y`WF-p7>vg9PLX1ttT9r=cW}_0Bf-{bMR3hT$H z276@$s~M7#%gq9-6kV3nP~T#$+#$ax&^sPy-hM9XzG&X|=mI&3AbGR&>Okq6cJx6a zOb70`T=i};^_(kO1e!_(r#ZSi97-Pr6WFZsJXqU%(x@R`` zDIQ&x!wYo^o({Tm!!s4Op3=*5XI#dkd1TPuZHWx( zIhH3llIN+~cA%1O4B(AFYAhsLd=T+jg|KHNOdF}P4h8PON;sMfjvYniCb2>**Q8SI z(5EU2bFRb&9MIw#lN166Ca0Hi65dW}xsu!N7oa>^hh?I7fY(7R#7SH-Rid#lbm}U$ zZAr)_%g;&9k=1sY@l-#-dzDpbBXKxrrp2zZE39#29$Z0RfI**vtIDP>UeX8VjDy>& zJ!Xfa0hpV*a1e&d?^NMN884)Wfe;PJtB0h2DaU&TC5NgJQ3xWQoTivE@Qk^4;blLT z%;`L(dGcX~542z{o{3nC#~kU@G>yDRZ3H|hgg!17K+pF&Hv|;rU!d_V=xI7VUkxvh zsM7_m`1gFfXHMN!e-AkS^y!2zhbTI)$VEH>e*Qfd(FYKN^}$#FM#SavGx26qn zUO4f_ZpPj9i%E9T!aSyw<7$18Z@r@gjAu+z7GWMz_O!F;MS|C2gAq&o7`*5Z@P^nt zbyOHiu6!nT`LIf3ns^njhcv0P{mNbiCDb2<0vu?PBE6Xd91u|$<{afz#;X+V!9%ig zDVFKy!dmqV$%}OZ288>2Bw1E6Ijc^6W4Q6|CI&GM0r5t`-j6WR{FZYvQ&vcuVLN&Q z7Soh>7~71l0h)ytM7833eti2hNE;WUoW_2Q9s_flFW%Vc!ujg$Ifv8@n;Ae#U+!nU zUnBi^$M~Gj*8TOBTwmh|?W?dh;|}n_i7_|{tzjLiHc@RuI=d2a%vS5=!woO=Or~88 zeZPZd`1&F7@z5&=Y=M&LhsaIt@3E3W$h^ODBi(<@jnzhK5?%b1<1R;gc_~QySD@KYX13w_0lPlGFz6YD{Wy9Uz{-yjl~Xqx!nyPBFWh$XxLJt>K~)7~ z(GVWE{D4Ku{PI6^@L6I;tO4oc9|lpR@Fm( zaB5H;Va#t4On1YfuT^wySaXF|7ahQFX2*!XZTs#+&Ij8d@LuS}Q%>+zL%NFcByT6%e>g=uRt$#bkgDcN75JjZhyW zGO#vZi3j2rSjux`5o(^>BTJyW*+kZ;b_x_f)*gZ0FcL7ImbPZX#z0to`S%4slD+1O z;|;_XVRcr}TkE>aOxp%lRUwlXH!1?43jV?NqTW6@xBmQTSoDA$;35ELg%etbyP{8j zOgIZS z_{>Bse;soKr85JrM_EO^iAcu`8+9yFOKZ70{??q zAlcMCGE7>^JBTqSmGE9pQHe;Zcn)u?+;wh-Cn&*V<}?BU58&^D+;{@P5Fu;Ko%@Vl zZa7UuBy(80b(nfOr{DL5@^uhjHf^d_$=u4HG028kV&uS^pNFqjHs5cp z^7ce>=^bbs>5U&^j~#wMC*2ny_Z=d*kl5HUP<33!=$Kwr*y&lk)nTP|ob>-PMH$y* z?`olWm%a$OWgCn;#f!4&pqMW%mJjc0JMIhScV@S%E5K7$Mf4rD zxhA-45%||RSTCUHr~-I+XL+j4o=#j}b|;~7GbDQ>bNAv-+Fa@!)pJJP#B1G{Q?eKK z1wPQGkLKAlBe>ietYga&pebNKdt9A{Cp?eMcH=tdUSp1_t4Sa7>OyaKe)fA?GibSI z&q;cfEAl$N7ik;TZtXjDI+B5=IFC_L)zIFsD#01gku1GBCKw!Y9u@FBO}(_*WMLoKMQE`BJ?l~@CXvDOVd2FRn2Dh8+K0Sh>40m+Zgz{zBE~( zBU1$G3>tDv_QSN#qk!&=C5=X1ZIde&DzSabF>ISU;Kj0D9&K2BJ;I?_%f2euYw&AO zCau3AdxtNdC3`)Vf^du>5n~&PpJ+(%-Er41vhGSW%*f5vFdpoc&Dg2v-F;gfmKD$s!z|Y ziy|3@w|BpbRnwyHEU>r@oSa!2?T{YL;x}fQuG2}r-lcD1LL9`Bl$V!lmEiScYr_|$ zO(eMLptANLIgaaAx`KhWF-R^=?5uETfo9>V$heJzsHTo$mFB5F$^GknvFb-&ggG2} z_7&r|OB%gnD8ILNHy881@8rSp_s={}#To7U!b% zY+pX@&mJD`;hqfVe46#|kEDwg8pK3r;?7>F62E1kNft+sTOOv;m(_ltdLyo%iJbEs zu%!kJ^wH)u9orfi^WQjI~=+%rsk_wJl` z$81?5#z<~&?_A^?8SYkQ;ZQvSGj66hOIlhQWws63>;>@ozQDo34g{9>xq&^qmbNx0 z`0GCt351Rq2P(z=S!Iw}*5@32B}d%j|F*GlIxV+XJZd8=Xb))^u9gELLt%08&t3G$ zyp7ChEnVRs?(y<9BAA~eOF;=cA;mL`i;JBvD*BktjTF^@j%i%gbfeu*8XB7GRX>Yz z58juVrB_#1Uj)k9_2bOUcc997=tFesDHH$8zGG3zZ=chSc@v8Ehk+k`k4Zh>xN4OT z^;)H5m2U&yfy5Om?p_M;pwVo`N~Zfx&rEZe(QP;?(%e%RxvgcsecrAJD(O%#d$z7W z_QNo-veH;)Awc86+VN{8#XeATarSP3Ol=&JXw>DWhXTs4dchK^Rny@=i&a!z3d^L@Ujh%oJ`6ajyrN-1F zYZve)p6c+TnC?mh|FLbxNHHa1;wDY^*s_tBl389`3m+fZWFP)u?B)K_%Ui|ihj-}w zh(MK;YE>0igAC0%zH<=K#J$$u`Ghf)1bDsIk!_|R&7p{!rg7ICW{-!ZR~ap76Jgb) zGgK~UhWzs5WxJX>I$Th|(XZ^?cZ)4k^F0_lbsi>U@P#1av)(+jr-*Oa7)#~HoCe1~ zRum&8dk&gIA?8Lg9t7uRJ2=J*c{$Z?^{ayG8~imy2;Iz>3SznH&)ME&-|6<#936_& z^~dCE7%+9BVNDKJ$m2mYWUg@}yW}}0l`+=KRm<(FY<;t!%HcuAlQPRjqP}JKzCGCb z@l^JgJtG|cD^1n+zFk840)BAo77i(p9|!X0FR!_C84#7^_3wj4zY%?+{5ON!m)}Nt zJ^en>_qG9FRQp6%aQ#rzmm3W%T5?QapK-?g zV-cHf%lczDK@31YN}boIfr434?1<)G1RBLSwdvY}0iZ*)_xJOqYb3mcu}~ur{E=u~ zUQwao-JIu<(W31GTLrHSl2#NJ>QGTpx?;i8%BA_gn8af6X(_{J;eVv6;&F zN<7)(7FitKMd*5Go|P9W2v)0`YgzS0o%8qWB2AaPA{wu3c-zb@8M<7NIh&(jWidN- zKi+7kn*(Pg$=)GbhF5PlA!p50ng7?zmBr(u4$1W}<>>5?i*HVPdlb7TP z@U>Szp(V3ru$*VqEXhUr+^hKZq0tEFkVsMa?qvMG?CRudB#??#;gURzp%o!<-0B<< zf8sURa7Hb4oLR96Ylg#Pvp(uvt-95DaUW*u$hDF?#_w**Z#4N9UmEA5@2GyFNoISb z$5Gc@L$$B5bG61oW+}Qb_ZH1RzLFuCC3}B38JgX%<{i9D>9aC~!#t)0&f~h6amoMk z!y!fXc{1YrB~)*l`n{rcFOlkQP%4n=-n;QAyD=g>}hG<3VCWt<`?C z|9E!)AA4^dRb{ue55pFekdjgX2`NcM1ObJOfPf-kAR?s#f)X2$ZbS@_?h>R7QVFP?IAd_0^PKzM_gZt!HRGDs#YYWmr@;~o8Ze*W zO-yyQAqf?2A&GBW6pKG~Yl(R|X)E$*#b6ogd=e-t)ZO7?UjeM%BbPN=;((#^a%MbA zTjD3FGNt=1{VK`^+4w78wjiu8=QK5=7LV-wMQiO!5P0&aMkLxM;)hu7Uk!kM!X4=G z!k|J|0a{0i`S}{XfJw85umXW?hAl4Ij7?4kr>6@jat1Z_^zcKR3kNuA^?2(EjT#>O zg^ixAl(;xF2`OpP)29l%9^CoPoeN5XFjt}v0k9BQ{4*duflIjxxg5uv;~u;R?#n_d z)h%Tx`p85^-cC8vbUZoOi?2wH{jq@Ju)A9lsr}fs>|ledwMK1e>T1*GS`Jm!^#(5vC7&X+I*Qy047xX}Jbr zT4QHtr2RQ-`iT`T5y+$_2P}y?ZZ7q7@!Zjoo3yyv zko6rC MTETM9mC$l@WYMf{%rK-BEX-AeP_53z{I~2YsfeS7uw!*gfbIfCU36WPh z)-7k)nEC0R?cSpmp~Zb}?>Y~;fcm~Y>pVLnYvtZ#-0oMOb$!;?!ZThkt>-7dkK`%y zh0&Rf#l;Pcjb5hpabg#3Vu1tGBretjv8LVo)^Fys;grJ^&YKsElUA~NJj;MgaIH&5 z(s$eUs}4L;rkvaMEPh4R9Vnumr`@(UXD+8og*&DB8C@a_oXK3iYdxz1!z0ZP)>1Pz z_qN_-eCW0Zf1%GGK&$1^PetEVrxNX^w*w>wg1A3iOis!|8CD9p%n-C=eT>Cge~}C+ zy$}9jf2tC&)LD==xOr(F>s_c2)tPyvzQ6C)2dPVb)QkBRhHAG^(V3!#)ip%nbp1Ob zl}Hb}GNRVe=NYR~e_8=$7_9uOM2#Kz1P@6X+=Qy4K37(Xj!^C9cXRT=k&EXCXx`<# zt$Fu(m#2GEKAESHP zQdVaaot=-zK43gcpyYw>MnSnUu(R`W2NX`bPeY;NF(EKLazjWcWRJr(+%jkRaR~6h z@i(Az`KiVM;HQcbjyv%}>=xyUIJMf-6P2Pgnom8NAFPPo8dfkiZV0ZPClcTe{c+fu zQufG~Mx!0`Ds+3vA}jgX-Vjzt>>KCFgdE0#O^TXkkmwR$rt_MOM!sc|@R1`LS{zA3 z1!(Y$g@Dz8B16y&%_Q@WyR1|O9MitLRiTs>@^Bnoo+uY%C-Trv1qIM@$u4-F!p4fL zU=OH@8sPN}p{!tUaMIB4x)BhnpRDfndantzBlqZ*lR}+H#~qwMBA5eCOnPyq4}*Dp zA=DOy&@aX8wV*TBpOqMtqNby{-H3C$#I&Rw&`ly1CoPn}_y17M*0$4Sq~2x z8Cm>v@*8jkgw0dWQc_ZefZQwuDlO$j$0mODhy`GT?Ufqfl9h-Wt_VNASs#rxR-xnM zVb!I6&iv@S_J<>Y8>z`dsnrf9<$19;Z=~3>TzQ5TdgZq;9JD8@V!SmTh){;c51!Ub zCDF^1h4#!^79@p|mmt(D9(9K3Cn&t}Y~kZ7y-F)ISGtY|@h_@(zxU z=S%Ks2Mw-Ma&vQ2E)<{N(7AMp8#svq71+;hDF+7!D`;p$Jlu2mB&32|NX4P5>J-wG zs)>G$f6J ziDsHb$Bv3sg_MuQ?9P!vYd^=C1^|}bBjk+*M)W2Ut?*-u#Ty9+iB8hFN=z|B{Zhj2L8lT<0{S>S2wx?}p z@?o3{!KqsG92Q+abm#=uu}QAV%WQXl2GLQLhF*T$`k?@K+y@h^BMv+H`lJ*Y~uvKL)s1~l`hAmV7Y zuJ4{LR|)gxTWu#(!Ko84?s~eFQBi!+^bHb(fH6%S-3Ap{W>jI{tb`$MK|&WPe|+<7 zlqF!v%e5N72fLHV{B0@TbGh?@8FE*5b9*|G#Vww(UthL}5HY>HN*LP5DfCQu!+5t` z>2>!eCN>6@#I(#8FV2E3MSh6}-TL4SEV?Cx?LVn=@lYS3xaQj$)B)ckkBX+OqHJ}lg zMrHqKji#b3p(;1vS8^N zS;^slWK%c$g1@KN^soMXdJPt|NjmHDZsuWS+l*7k&ox4?Ls&B?YA8oQd5{H8#kQw( zXB3O+Jg>&l;EuR;iW*p3=S+6zpD!sL=pP>xd!tryjM7}Y|7(e%;1z)O6CXd8{@;88 zu%m^+{Fr=6>Ax+pz<9K-+Y*C|p;EESNHtVvzB9BAKy`_+M2r`*W_*0*-Mi5_MyGH& z+&i}=`Dfra7auUJf5d-}g1J>Nb3i1(FKqm43Fj~qo8Y5{ixS4IeD}JMU&9qgWqZTLnuC(r}fL*rR54((s zXSCXnmEa~-`6Y>=ybL#rkvKV=>?nexSFijUXGfmaLkRPxyTvw8DNno~Dvo~r25-fH zh4Pmz%Al`N_vm;ElOZh0I(`4f-M&3R8GS2RzYu&cLwlzjQ|ARb?PxdZ?b}uPBLkiI zN#3CkV2$x|FjZb&BsWY=TLd$QgV+q@zSa`riZrofTSh7T&^ZkarTcP_`%Av~rSN~` zmz>Z0?mK?Dv$J!a^w`3IfPoVmNB0xGVryvGpS9T6c@xHOIH2L*=zI7-x6&Ioo>|%0 zjMLjW+*OE2e=QiYJIb{(wp>J1ouMfH#o3j*;`d^fV-BsX#t0VtOug^x!s4$Fr$XYj z9a((5miPmeZSe?L86MXOOz*zzW6F0|aEiJvuoNR7d(hM?wGIaeLW&V}X;%-hcUf$P! zhC7T9eomqlf?<6k2#;R%@2D73_s^&vGk_-OVFpiUIZjOtW-a$)w|V{Fty)IQq0~LL z<9M96X{b4<72W;vv(d?})GL@fUAlX*6E-dvesUm#!XBCan$Tl5SQge&Vh>6qa;6i- zB3Hp!q$!;=yS`5Qmc^$*t z@>7bmoeLUR#Y#_sw(SJmD=Y+&GWYy5WUhL0hd9Z^8Q3duY9jFlPDm8WAq4@{YHa@d z-IcaD8_{)V4^r`;E~b(e0!pqQH@h?V$dz;8aRiZ#lY7{C#y=yp%o`tfgkBGM)bM14 z*bex2Zn@P36201w{I0zf)B0kUlCTotB=tYPDRI@Y@Rg z)7v_tj~^aE!@ot|ps@?zmOyADN?}_^4Bw8FV5c4DKZ*TPVtESz2RWuZ01kBk1RQ+d zj`J7e)!H3lVz8kUK()VywzP)mJ%{e@an{_alu3aYqL+LW2l)7u`h*5DE}etszE8ON7$R2Q zk6T#rp9c~!*#-5H=ek#*B30!zKlqTP-wW+HyHcjIC9i1;T=FkpzJ${Ee;y0{{RVqu z=$s*yoMr>G3zmzM8vp)|ncc#}!`s@LJioE@T`FO+hcsNxps0MVrT;EE>-9wm2V_vW-Y^wncpuW4-+wRRf%i4s$T zd$^Kc3Z#KSwgimOG-NLhaH3{DTOh7bJT*|gHVpX)ARX0fs0Ki*=B7# zD$b-lENXGS3Zo5WHhUrebjIK8HJ8xCSxe}i{3SFlDwU-1PmnkC2F(u_ID)}_!ivW1 zee6kRH;er-D`yTHGXi4w7>pxFFfW0EjBbFRO|PTmEy~O@{Vhyvc@d=V5mX|~@YLm= z{iq9=(F8nKOfc19cK~$|+&vNYGlEK`i*qxvcCrFSav{hnDimI=!~n?S$YCB_twQkD z8ZlQi3j~@+GVD$;2W)EBPQ7Hg;$8ZDN%nfi;l+zfo-iJ;yXyv+#uDMOOMt?hK>QK7XAC>!Xw0v)^THHQDyXDG)?YQtE{_1;p-neoBwq7?rK z%1rJ4VluY+LOn*$X;J`&9m#EiC})~xL}^_sLgGKXuKOqQWN;Y+gfX1pC@T%MlWY`6-~Bkf&YdfFN&5g}{VXFx6oY+rGvKh) zHM1v#?un#)PG_){{AD_=&CL}s$m2654D(^)1c*6B|1)#S`M?9RTG#Sn4qmh7#^)?c z=75yKu1mJhjG9XQh%T{Y7AeYNbbDTvw|BrKZ53dOu9`dnvfW13(mn>W5v5|Yk?PT( zXCt0I&W*81OcQtmDdEDVMBTi7QL&hqWdEIXe+-PjFPO}b?dwy0*J*R;9ks&=o%;|@15|zDd#ZLc8ybz3f{B2BsZMcTw=Sr9i}e0(fCzm%_K92SLnOEj^IEo@N?pBJwK1Y5|g`&D6e!!KQm9|?WK zWjB_VC4I-%YtY$r2KO$A)Q~2F)1CN zKahgF|9Cq0muAQJ@3_Q|#_t$k9zBL4(kI|X8?bk;D;Jv$NA96rxz7G}3mE`J1QefEG1lV9J+ag@eJC|&P9j}e6C5yuEMiL-V68lvBi!ZXiQ?WvkyhH@VQT&Ve>2I z$TG>kZ@V-e_w4oS%a4Gl0J=8BmxFD7e+w9~J0ooQt+<1v_ym*GaivDO*FVgMJ9&T1 z!f4=pM6v;>jIM(@S@3)+R~gc~<+0)03a?q_CX(jF?ay^X+W~Liq^_hBGNu1zPsHGbst`Ch4=q;&!^dTkA8gX`QEN7sJnS$1z{1 zXm*K`vPrmZYbP6xaml<{NwYmCG{(L%joCb+8pw1|CYq-Wvr0yaWk@OXS z{37D;l&v*9KhvS_Pjj~-Rz)kT_dcY+#-B6$6LPJnOwZfQUM`T&m-sPj$&*Z&wrvzvOaQp9VyVuRaCqcc%FqLCfq&h6cYRX$Wj2!Y;Fn2g;_%d1f|I-h zGQJ-Yp7$TdCngM_bqNsg0m{6$&#AaGN})e$0At!v5j)>Yn=SIMPefk8G6dFiev`Cx zmdacX&K$4e@V*@@!!{NM0xO(cmC)>p5WW$Zs$C!=78B1!^pfgnT3QIOc%6lYep9k1 z^Pek-Pf*2*#?NQ;N`-dmHTpO_T1SG%Hf~NbLq5(!SO!OeH1URp1{UCA3j_XUwNOFb z-se8dl=shH8EtrM351?i(EZ^B43mY8r|Hl9@5ibt!!p>2>ukS=c7Su7A9DmU?k-d% zQ%{ybHMZArCIn{o9`p`J61zVooL_8CIO!7z;9=d94|h{740ixz^T(6E{pEiOJoU4O zG1B|{AK?+6ToN)BW5by~`_yZU%h})5lA>OX@NokeFnD32ts08oh(;5l!rD>nT!7Tk z=EsN1%1#30H4tEw2;yL?$Cs+HI+H-$YLw-5?8Fj)%TIkJ+gVkovK?<$d<`to@b89g zqd2At!vU0IAYit0_d)dCoqO2eCMHeMOr`&A`;q@z$^t$N@0|(w$J4M^+V-?vnmGQ8 zHR(f@ZRv$xQ~WD*yw}@sSYve^5Ze4>v$aQ%m@)fQ_A-IJHPSu7H?~epv zcAY<<8(Wd*6so92y~25jb`>~Se}BeH75E8j>vp0jB6eB8Ll6_QC7StT6f%i5_*XdD z@B6=&E@pkhsh@+5@@E-^TkZGFQauTqW$<2K4FbvQ4t@92DOH?RyDuI-v!eVLwv+EOcJ6U>XPm7*qB zTYnZre)tAiL3q8xtz5WhS&88F1kD6#flkykCyBIK`+f^>4MP6%{u+3{9=TgBKuhfb zf}y>yJTcwVDJW9OepRk`$U?-tg)ODhi#zKZ~tZRFSj)M8eGy`B74PwIOYRLQ4~@W6(4;A!+K6^HKF-QbKP!^_`v$B$>Z@)(a$pHwEHnusMc4}SfUh{X-^mDj<;y?U&DMQ5}kWr_uo@|P}E*kY)M zf43djgEt9sDI&mV&SY`%kchplG7xh_2pM`7Ba>OQ4H z5*{H*o*vhKKjpu>@?Qh{?^*fbRKA4(|Mg)1Z#d&NUndw@;|H3fhpAfm=YYvc5X5f2 zfXrdUSRdNgS+_=%Zx|W*|1I7;NMN;9=VU$FAT4e)$qmKda~c{4AdYSXE{HLp%tJ{1 z$bqgLhEZh_5zZ2X1x85!{`^Fn;6t+)btuB4lJrC$uKgY{KoeCCq7Hr%?{uhkXh76< zoq-<0jQj%vnz>5A_o;)>`U8NQD=%TbM#DWaa_=6fUL)g%h}_fU(ID}IzV(V{?z;Ta zH2EG8CBcO((Bo^%=7zqFAd!F6M|{=B)!zS_B={bgyGT&Qd(diBHGeXz8Ve(ca~V1U zGo2cm0{Fi5`4_@}R$9M5ov0nj`lr`NkE4j3poVCY=So{4Dz?(v(ozXCk@{1eIhA8H z$ED+Z(59wo-N2$K1iITI{Ogy@;E_Xg2F`yj26WeH{0HmZ_CYmk4-$fHe@~Ec+i8$O z9;1?$N2CKvXX`PKcM#EVAB2iQged0GsA+n{TdMwLMqggzKhiqTBcg$F)Dy|BlX&h~ zFhf;f68u1mYNd>!8nOeHuNh+Q`!1?nUo}9erXhAtj8TA;8*!Q z*oV@9n~CoAd7x6YL;}qnUNYGgqQwUhDqn=^8oKjb#mbom1f2)s%Kee?CIKKawMl=F zmq**b+|a8+H3Vp;b?oTCC}V79f_A9HL)IYLMdbOHC&mv!(!REsFn!6H(< zjch77Cv% z`_{ay#<&|h)}D{EoEvjioBW)SLEdXSkSaxk)H)*Em@D)|1qp)AFF2^l5V^f%^|Uf4?0C!Z!WhI-M3L0Or8QtilB2u~H7*vsfvHyUrW$XP= zC4C+$2M8Ypr1{h@xu@{KDz>>9o&3`(_`uXmt{Pc^66YdaKfz)zr$8-h-)Q48&>k@W zS*te;lb?E?uSV;z6%}=<9cJ~XabM^)O&h_?n%!b@GM<2TTNMn#ga$v-?4t0kZ#^kr z{ueVzfEay086=5-;^xXUJ(aRt9Eh2_o>Z8So~~Tx%fztWD%orLynKPGquw$15H(7X z+OE%08CPh{Hc{B6xkKq&C69nl?v^B+!xbx|zr@&+$hR1p&F{r0=tS}mvio{cftY1? z5nPs(0bYysxFH$qUr4w=V$B!-}PH zhOT@BYbZia3yM<4i%0{{G<`1)A2}?Ab{o8f zM_K!)zjE_LocB&2f&Ff!y)c5(+x0$449j2^i3m9XL%QVFNy(2g*oW_Mf-<})MH;(V z>wu)F&x?~lbQ%Uj@L27;W!+#=w+asar6!T70m)JA*hiSpsX+4S)1R=UI5N>zqz(@R zj0U|5Ly~OjOstJbZwq(w6S1oyrb{O1Ej)c_QFX~37(d>%D|=|T4Q6-TMMi6Xv)R&% zkmKZX%9nshLkJj?rp3{vZ7X*fZoqs=QgLx_VZh*Xxw`fH`7v|^fdZMcM*_I=b~|do z^y)-ghG=fn6}tXqlibIMl+|yK&QyaRo>uu0LqKW70TTun8S6IC#h>I}1C$$t3|%+L zv>Mgne6LqGc%cIcMJ6fpozL{{OTV0R3SlL)-LrQIo?VzneCzio!;88QNBM|l101vN z7e~qFkFi_nb=}&4`(^j%?az7TIPLQ?uAcw}y&|85){}$WP~HC4=<(Nttu&(_{sl?& z{XP>x(pRzLtG;+B4KPN-1JO>>#h*TiJFi*+rY_Qco6I7;SV0)F8YWb)Fdh|$XLdUR zT}xY)ZONLpo&;Gw-0>qeR|Wo(S!ctkjcQ7m0Yj&S;!x<@N_s4VPU`=($5z2#IwzJ# zdh5?4gdc^hM)W>sc|7-2*u!iZ5>VBLX`YxQa~$EyUHXad?M=*; zf7jmp4}SBv{_HTBR;M~PA?91(G(|$zOjB=i3TE@aVApe-{`n@tG#^mj;k@j(slz5d zt@_#rEbL*iY~sqYfY8&z&D9BL#9ES?JJn6S)%xS8 zpi8gAgGHXm_dz9_7tRWwXOLipXyGNt`B7G&fj@+~G9JLD%~SKWiM+@eFjJ&IX$qH; zNpE4tf3knZd1gIWKr0`{^miu6J{SMi*}dDt)}$l6mpA!;1npHLeaLV z(0zu9|6IqLugfef;;rQTroBr!M!XWLwGXF+v2ePhyFBxMw>XG~Yr&zKD*F{f4elbG zZVeJHT{zw^;1?A)wvQfXXcg8Tq4nqGW{I*3#{{5*)X~LdTIh6`CW28I4dFO_tB}E4 zLY8s#e?A*624^Mge2_JObWD)ZaUaP*vLK9I4TSSqj(N7mx=q5j+!XFPE2Y)Qm+o`# zag~XG$XWVdBaTNk-_8M%HaueVy$Re*@s#juXsYH+J)GWUQ)DH%F+MtO>?;lbd&fr)Usf9bYUMhT zKOLZZ18nL&Twu0{&iUorN+jmV)2T6Awj4YL4m}m?aq% zU!ilmFu(?eyyAVG#{YEikm&>J_9~}lat?DON=oMEJx4Z8D;+Sp{j-O z(1BVzcptke{959jNe{Kz`h>3KYP0~)IIoY7kLhsk^4#kLIUezs;cre8@X;FK%v79f zNz3ZgFJ7#IG1>pw-O1%>O31pS;C|ZA`{-alj{J} z&~kJz`{-X5L{$jnF(Ey z#YG^%3ITPgp8v@ALndAbWcYB!{yUV_KfkSj%$Oax{R{24g)P#q7#V*1(c|O+T{VOQ z?%Q@$f#8c_wb8C772o;?4{rPZZq*Ql89-XCC35N_w6-jea94R_Ht$UFr#-ReZ;7+4 z#?F1-&N)vt*4Og&R!MbG(O(q*a(QRzKX?&y_n*emf~^R<28?*>_m<`t$!?#)0JH{Q zli+KN^dj$n6(ijtrq+iMN(jF!Gb<_{4clCX0lq79nrwcBHzub*l-dAT68j zRERD@P+<1qQ#S=%kOZ2$**2T1rX~h(2$}D0x>5u9|aLX40H~ z&l;z`uU?b>(y`Wb{!5u?Kx9zM3&wPCmKz$z>$CiEDI?J;$};pegq>Hi;SMrY7JM~> zWiYvr&Y>*1qOSpk%h~aHo7R!}fryGX=lH0S+$2BSZQd>Ix@a7B(wFf7(1ugXs620 zQ?4kf1PIW+sm>-qT_rZb?6RNd4={+5D=bjoaR$nqh=R%~AmwR@f4PIm+=(179*S0f z>^%T~vOFJuGyow@O$Z4bqh1N$b4ob#*?q@8e?V=V2M>0>{VwkkmDn&|Mz|J#V|78% z5o**~Vw2fN7KJryneV*P*nB)UipS^lKfU!7a+n`|>M%BMb5v4-Nzmhus*`&#oD=06 zvpX5^+u)c?U5-yHmnlv=9=|*ms%aL$oBk?(`3zHkPv>hJsQ`}@S%=99?TGHPiP=kP zL)z(u8A~l6c}3LibeReTXIwXJd%TJlNgS09&@@o+=-(`N7?LaT6aA-2%CQ&!H^thru$e!>P`fQ* z=<3$ci?dJXxBJ#7b#MkvlgaUx-JF=QLr~q(LAGICeMyC>;bfbM5LQ z4NjB_xsQ+E1nvd$`eWn=M1AX7-(+3>%hM1r$f1F3IQra3#`_J3vi=NVlX+z^s1GI0 zIorGYSXASFMMCvUTuQwJ&h~@X3qS+@?hXB&;GvcE0Mm0r06fr0PJ>7Ifl$V9fx%8AAdE}Q&NzQ<^JA5^(nYBH?4d4rrkP>io*1iyhRSxiy2wY9jezRimWVW%Px z$M9)n$s&z7k?m#B7dk{@&gexIdj?AKybukg9bJa;9}_4Z=cZNCwqkJnHI=PN6>Y6p zk_pijx?%l^b68_z%Xx-xp@a}>)%_NK`($U%>Gb^Jy%E4isV{kF^NG}kqVlo1_cifO z2*lz9RsT^M5?)?8Ja-$&1I)NsFwoM{N8lBE#O|~}-}AJ{o(YaVeJtfg=#TJq=%za@C7nyM>qKq6w?ni`WSU2N7a(%pyo!l&dPjGC<69uBs*GR6 zu`uJZB!L8EsnD)Os8+U4=RJ09(@#HEXA*?6Jb4?se5!z&kr@iqKmG;EOo+|&)R016 z3E`7MxLz00O4&)~%Da`KyN26EP87fRD4%r|=W{2*=UbGBjvy!oO^a#}$e5z{UYH3d zXA)o5jS;sF1X@+#B63dQKZ`c|@P-23v>o1LfYzslH$8wFu0nVR5zO=UPH^?vZM^FhU%AU) zv-+)a&%)ylA#)hY@BPbf);^p=*7sqH`zmg4&p0ibK>E7LQEG9&j{pR^0$~~0lcaSi(Q*0^ zS%)dnk1_mb(68f-zF9A1iAOhMRvHb1twF($hleK-1kpcvP|=<|s_t|2S-CP&8(Z!6JHcEDg%YH&W2NnnOe*_&Wm3g{&~)YdY7Fb8MwEgudu|t0NoFlOdaoAI#1NEW#+d1H_4F zIAJ*DAXg|Ppm&fnC5Hb$&Fjvzu2zU@*f%hIyV$w4VPnT^3aP;y_Lo{bJ?5JVID@MF z@cgfTUPCOd2H!5Kt?RrWpyB8Qbr@X@;j;-zKzRHd2()uKL>VWW6C9PiHy9b56Not% z7{~nW95?9TlZZ~yV&PoFpT06*|Af;GeS~IxZTO}rq>qrWB)Sf4aurCq762V{Hh0qY ztKR-LN+f^MZ=d$t0{heckU4#6_qBb+!hJczPTc5E!!o2g&}uve`#fM_m;$|jO|bDV z-~%@Ur2iXVGKJb)9<9ZMAkh?ok%(=`LEktmXoMZnE2WaPQ?TT}&!GJWvQJ{sGw7V89WZ+d3Eq3^k zPqrGt3Ci$e_E|6qyZ_FSC$=Gsu|0Kkj00>wO(VdgT92{=^mxZu@1&1GpGqTuH&_u7hPY|>fHn)2CvG0);k zoBEEKf+eZB^qm`ROB6cvMbf=jirjR1(&=BV=`t6ZEf$M9&$hJlHTFACwex57taTQi z#@22R$eHmrG39euUM;8%sgTRw(^opzp)e@BZAF}FzsL8C5RN-p$#&SEJg~V})K}JE z1EEA|d74ZIi1>4mMHLZu9-adxUX5kb6y74X*+V<-KYU6z)*{wfmz85hYSPY{RpThK zgp{T(yOTefx61-{;YSY`mDCz`F~*W!%d%m`<%o!j0}SMgwU`occfZ8{Bo8i13oZ{V z)&%;PI3iVaoreqzwYTqw#U9Oi{(k~1)xrf zZOI7HJ&=E*LgU3^;kY=7FXjbgZ0r$2eLL>UVc<<%sG)}oa1hg~yzb5{dhwCtmGAG9XIU~ENR26jN8MB3I*HAW#@hk9rYCiR@3){G3|T3UOkDPQTIaiVZYiF zFWJwcV;?x>*{T)UN@v~JD{l4l7!39&UmyNjvi&;E<3IiVBEJX@^Gp4iG6>E_cz*{U zu-^cHAUeo10*cazLwk1yT9yWuAk8<@SHb0QKicBbmud9}D2if%EUR0+5K=A=3SE1D zAI6o#zapj7hXXTs6@|#ZZB(53yBRnb@A<(VrwM>KgyOs^8G5*02lM@pr8yXtZ#cSyXCuTHl%UV?A#B^C$)*%D8)N@h7bDtgv92IJOFPc zq3pk;jknADH$3B~BrA{=tA>MB!|*hnCIZ`29EqU3=0#dH8GI6+LSG}hPQ3a@qgX5A zba6U+<za3TPxaTfY$NUPHk4{{%<(+Y+N=N7h0zoRVnYBNX@h<$X<#2u?|<1}m&#@CGT)gH?sU9=j}B zK5jL{)M|8qbW^9(HI6#~2??2#;lmGFH|3c5wv1Itq<%byCO;jz z09E-!#Gd^e8{(vK>!0hRM9setrHFjRH)Kpx=9~Jw$oo!|@)yfuF>y~$=7a%xzY7;H z(!G3ER#tX9tSA50lR}^wZA{Y?9czAezHUJjfW(VaT}eQzDc1Eae9v%rPKhFu?e5Uj z>_QX}G}HEQS;cl2IpzT=flRN{=QH`kk;X{NAx*>v1Q07li|5G1M&mpZA!$J-G9pS` z5h1XP)n#)s?Q0t8hdM3<-+#vyw&yd_xU_hhS#q!Qb;s&0#&WhO-M+)l64ne99xlKj+C~DhZHo3Vq>Tki^ui13>N72mA|6j53Kocx z#5rBvnG8$?2%+1}UMZ;v(NOl*Lqrp>3vIe#fNm5zT#(wAus7RATyaocW)p^IlB#FM-_BaY352tbCti>gbLXYu9E9cdOBcH-c;vmG-87 zH@z&Sszs9%={N!hg6e%mKwNcDb++%Q$W}7@(52vJy`0*POCe-OyS&J`)xsflw*(5RmdX`8NImNA9FGQa4Y1W0JpNBQn;5sj`azI0MUGPHSB#@ zc|?h)rR4c3Z*_+(tqdExkr>g@DLGEqaclraQx&q)z6L}Lj-L=j(Ebu5t9RuD-kd)1g z_GqE7gBkWBOAs4f_8hM{!4>P2<+Rq-7m6sKiGz5R>m}@)9n8VR)SL>jlFpWNFm}Md z;CMl=b(7;pcapgYuhE*=LOV4zN5qVqfOtriV)2 zYN+0MeoN2qnJ|7)ZOkKc5#{rKG-Kzknk3`n8x!P;K5znLk?n%lwxY=U&yFH3Hv5iT z;T2AaLq*STF3)~)!9T5HGZG2c>QQ^pOt3t12owptC>u7W5U_dQ`miWc5a|{iAD>Ee zT$<)zbywmSxiL3}5Z9V^!?OCu522Ob6z|HN&_1866u~CXXmLl^yoLJy>slnG2`ipY%JmLgzZI<4s?2H7 z&EYXuCQCG0>D*Iqwbd-h&o-g-EE?9Qg+qM8W4lJvk}f$ZT4+0=2-$ZYc(&p zHP4vJ7F3QlLrNVq#~vMb^pb45G&A`bkq-l9Ts}}%iX8%p`T2~y(TDWzt`7;i2EYOA z>03Q3&%^(C>RUdOjQ@3Sj|D(fIgkKS%Owx zmybh@SKu9swNJCIM6&FZx=e!uP+ynMCnrjt8NLb=R7kw_!C(t~VoKF8iBH;Oz9Zt-{DIh`M z9@0u2{E78?O(`4?o2+&4^KYZFKHl5zNDx_w!W|h#Wl8oasVoZ7a+S{I9~JBh<gWyiF-4gxU1U@QM=M?@hI#oUG{EBDq&T?lyI#u7chebn{h6MP7t$6Ut; zQ}s#+?zR&oR-Ed8WOMpGxayWQb zUrCQy_0TD2rZ%_^Eqn_u2=?AXA3&X*Hgr%Y*l>$*AxdgM_Wp*}nJ@ntW!^GgZ``2wl+PZUVscq`|b?o>p^mJ6fP?3>LP-p=fOz24}2g7*5` z90|f%bC;O-Dt3t=2?;C5D)1(_Bey?0k3D|TkoNGY`_a4+V(7$ol44Qe58TvH1x1EQ zDjGK?vwCqa!l`-7Xp>TrThKiyLwiLBCL!WuW$Tqzoh}p=nX39m46Q&4okobL8tf4@ z(^_nubx1)pz!g9XNmDRb=nq z!$u3F;;Sw&Y7`}M2D#!xy>lk4wZN~Gi%r`+7F!>NTH6}4Nb8Af`DT982i4!mcw0&} z8Rky#Pv=mc8%XhO+EpHEvl`JNJgGzHUq(@AD(vE|Rz zC9ZQ8ZA__vs6dS$6u;fs2T@34_^xPaZ{yX1 z8m{DDIQ+gSI%6kh?9R)qRs6ji%P93kZ1x}i;A4LL9tYyO?f~J8pLG$Fe zGw1fMti{(~oN9XFw{;9#6Nrc#>*`mn*TCo#w-%aIfSOCWX44cAh~0W$LWh4U!U1lu z%L=WEZAtZN0#=s`Y|=(U4j^VP*OtIc{!y?fFhBZyU|H&**Uj!BVXGV3D#zyTY!zad znqYE@gI?Pz;Pg9hQ@*n^h7KdQ5sC4q@(ZJLW3UL%T*C;8dR!g{xg6E@PMlh~a=KYE z;>B3@P|g*aR@Z{j5+(-@SFxGh0b>Q#ePZOEK>}bX#?2>lnZ6hb9T-Y3yPOmlN(SV^ zpK&i!4pm$<+ufeN@cBByE)E1+UqIbT>`};&Q+Q}}_w8pr_(2)RSH?oh9SAM2H;%lX zm`Z_Z&qAMK%ev0!tA=Fpgv=)V74gtaK?;9hj6g@X>W2zME0pnEj zlj8Gx(pDcfpUViXNxx4|l&Fd(Er(dtWbq>%1iC?IqC_6J9872)OVQnU?t0qZIsL_S zlr-~nL-|eEVAt6U@m9m@k1Wde;6ReA+V$@BF3G};#V*|HL`HR-w8tp{KCY`Z;^|Mz zds+iyrD&ExPJ{PJj7_A};OmL>pabWVo!To>0to`MVhOD!)8p^(OG>-E3{!ysZH4c! z@inPxh47NDYcy>yI{DIZfk4PD|S-c^nuRvs$)g!gd))oVfn}`1L70#zBGioN3jNSG(dA+v`d8sA49|4vm@3Xl50;=rxA(10Os}n9UY=ZXKi33EgPhF z7>v-aoru*q27}eatLE}Za}nT_#ZVepC2YxX-I%aMf*c*f9-GF2xf$0jN4P7Zx4M;` z?;3*SM3=zDhI$KM4T;@tuSnl2j-8+}t73NQUekEI-z$ats<+BxU%oaMaM)9#CI#CU zvvb)Tw@khmXwipoz2B$q;wJ@fn+&bu5A={`Om-P~63GgDo9T#Z`D3Od7eG!DfsI|= zT*gyE{vTLPRV#XqSr~6YGT8@E&T44r?;Ai8 zvn!I&13Qcvgi%2!7V{2+uu9Xsp4KY8QaAifwMr=N+*Fj)FG^YmCv^bPInV*p%*(dJ z_do;tQt?VHX4Uf(Ooy%5(zQdSKo{NGrOpfiOJjmO#Ye+^6D}KmuIn-)X5HLK;Q?sV z#8yWW4yW*}Izk-aTP8Lg|M=Yeb`sGc3W>Un)bNk*)q*oWJlTCZ)D()%j~7>l97NYN z1VUOrn;i0*xYTpmJJtkPDoq@3&x1UQ6Oz3ydB<^ueWt60Ww}=$nLzt|fm52Zt=DF=#`b$~#;Xv{A$U`ID38!vXP&uG}AZ>EYTg@MqHl z?^LR?a?889(N%G=$5wZc_<(C#o4vZw1uJgnZH3hCiA2Koc~6Rm3H2~9?Xry91ip;C zav%K0XD3xz8zs7?egM+%f{wag^LJ#M*_U0B+S8XCs}Gn;m(>rAm#kgR)Oc~tv!qbx zg&kB{R_jlbS4kA$>#+2{Duwh)RBS9U$kHWq5*e-h^zL4D$wpU+Vu{Pf`i4rz>I`tX zxmvqE6Gkp@z;7OhILRmi8eNRHY7WKQ%GamS=ofxlKT_7>oz`NgjaShb^ z;zxIU>O73B5~h35%r9bMCdvH#=AFULdM_r$FRs#SL4T!@@F;nRnV|#MT7=^8yOrGC zAFNY1RlU>&AV#)fl`Dh342{3cIunR`W{;-LJ#6|mwUj_Xw)lN&DeKYw7T(X{UerW9 zN9XWk2aW|HX*z#E(c0&=zAYWewT}ASo4X}!QrAJ#av{T}*>L}adS9`#9L8N!O>KzY zujF&g%yM3StLV-DkG=PdiZboG1{K8s0xC&CvJxZ;f&>YQ2$BR;au6g(kt9V_8&Qxb zl93=FO3s-=5Xm`7DkNtMp-4sg&TZTC_S4<-&HMJu{Ft@oPp__~SJfS^>pJJ`v-dt@ z3U*9GZ-+*U<_}1MeX{qS_%g%(4~D&bpH&hQz<#S9#z>bhVh0C zIGz#tf>+Get=FtC_3sToLPo|LLo;K~R}!a8fFee5>wA57h<{D4ESxx%bO9aspk;8N zq+X9-lL#X>mb$}KUl>m*7FyzWx@ePPp>ZXJ7(&QM8%}v$;B|}dstvM{b`vSGbx(pB z*LA;FzzBi8vnkdA2p=*lu0^M0ojv5^x?bmfeG%?sH`qJAiH|;p0-VHoY4D!5{wFit z;OFXK-1xiboK;e-Qq3M}p-j<_GUK4h>EUg0A0swcCf3fRTJ|hKz(1hi4-B9Pj9?BR zM4S-YCon8!pK4Y+qo?0q8Fj7;ts(pB3=`fkJB`&Nb@}s4^4%E{>O7^T_xf=ebrV4z ziw{;l-1d-1kd00?ZPH6+fx13SgWpcLe^gyV}xu%A)A)MWf9=nqaP`&25D`- z{mnk2yCcO(wd6QktMGchk?d(R)tV#mM&epkj&Sg%3{APQZEH!yc@+-arA$syT!2uFH+bjcq*2!Pvi&aybbQ&nC}hur{=XSrO|({ zc^?ApioyPxfoCrNaYEzD@%1mXb~yhA)w-mh!VVj#q2rP?4p58DQSO4=eA(_&W;D=x3<0rllrU@ zL;AO9`AjwT_w-K&(m(E7owtD&j1V>|t}a>jyl!#5?WZJhJ=x8qyy#QRDY@UO#R8c7 z(bkK8`k5P$pMhcY@3LZ~A8=*h*8H0ns6wM=v6r&~frXDV+6DW}%41k7P1ZiVe;aMn zH&a@*lecvyN-voD!^ut9Jv<0xd07ax@639?>p>^t;$jwSe>1sqpv)D2G{E^m)Hm-zzgRgxjy@?(e^#ud&w=ADFHJ{M#gsqe5c^<{(}*Wn&A^+r!B>lnv(B|*KPo^lyJgPe(0Ehz|^&@dx) zQ7S2vy2Grn5)svH82n$0>JXe>$vG4u*6g3?39szYQE!hDkj0U*Y_aOy%noc(L=BBl z>+?ynN#yG?jgA8Jy#*B^JHBxj3_|ej`aF{!Q<|)8HWBakqnu_l{k%5HM{fZc%mj$% zQ3D|!{p6;S_|Bl6@}BckM8>-wP*dGDJ`zO&{EgQmn`eFA8&Gy!zQ z?|&A(;Cq$cefioJ{LNOE)q=$0oI@AC+;uPZ;ooAehi(Y$E(d#>PdBgxo!D_>=1Mug z{hOd^UpWHJRVp=!`0jFP3N+rlC9HmwIPXRm?rqWx$)}sJ!n>~6MDIFp4G^ia;wjV*M$!FW%27xl@-jg)?>qV9@l{!EgTku*| zLoY;7g#vSENwQwU|9k2j?daxnWHRw(K{jVkn)=wUd`7cpd}7Qy2HID|CSH%$uc-%0 zcb;#7ZSqPr=aj-%4#>dThGzw#OL__rDB58Dy~u@umznC4pUS)5dG)L31C@@GP`#3k z-z_e8!T#z*^VhTfG{O=(6za$IE{vY;V0Lc?^p+RcC(X9@fD(C*U1GoDQOhRrE!9srJ^AyBzATIZ9BdDFk1h3MmGY8qudJFQ~8Q_yA*gEp#%}w>Ul45A%1^n zrkVd-WzrZ7{-fkpw8K4Ubig`Tlk_#e**>57Wqe?Pwu?~NRIa_>ifQJoQuBQNEX9q^Q}*TErW%-EDT$6)3R(;@9kTP4!@U)f%>yx zK?sMpt3whaCI-}OBH_pgb41V{cPB(o^a1hv)!w@P1y*k{MiL*CeZy5%Nb!}@BeKo^3MLcnnLrKY8t;YkDm^}LA# z!U}Yd4?7=}yTFrF9Nsg-Nb~*On3c`8hXcpx>@q)!hm z2G@t@%$Y|gpMPBm_Z)17p+~Mzt)&>CJ)gw&z1D?+&22pIHDu=93e$80f5%P)r%%bA zW5SQcvS*7DTMcO+?jL%<;I^2%$DP_dmaM^J-l=a#nOR54-7&lRXl=b0$q;vxpnUdq zbX@2lE8LsDf(`*sVXth?`ofd%T}U3<{ESr(0sr7PZV0~NbdSM?4?M8)Hs+uTS(D7T zyG}WyIYwQz?33s5g1vq|ApdLotIM8;B(^-pEtR_M<*_99rWTT}mXeUVMv zu7TnRO!t4+h}FF*os1bikECr4WS+~moZG3ni!~v>Cy0zId)J?l zNodOV`*{XQBqaumc^Iq1vI}Y0rHewzSOGcJg+J49;jWm7p?Nmf@-)fQefo~M&RMz0=r*0c-BU|U`jvWl8(;>@E1nJm4$=0~>hMyZK z(LCjg{QH?Gr#MXJ`>0Z9C_$2NYfOX)0-+l@%^XmrKUTwL&V#j$DEt>fR zz8`KLyJOoqnaRst&*YKZ0tP6|$L|AjL%w^_(MXawyoUYjiwncRJzBIadUfK-U5J-r z6(tB?b$5HC9sNY1LPvXjKGy3jsB818k666Y6q*p1*{LIkc~F^SzvI_v|8vxa-SwBrc92IcByw zPd%!;fRuJ!U^n==Y36pV#rwFv?_}>OUh_)SnHPF#?K1l7XJ!YrYrtbPFRSLd~3HPs&6n#GCcTL5B_@eWrFZ6Y%FDHGueqDu}Qm2Q}TTEW&C}QTd+Spp@V(L5{i}f-q`7YpK3MF)@9p7{A zhxAQ!Q4?I1OgHBORtld7%p|(6jr1(3NC^p^9RwoI+2hLxop0)%&CY0N)h#er-CS6= zW#YeIzC6Wst=jPk8=At^Ov{{esK|yBrZS(46zp9`^S zoQ(U+wPcTgA~17#$0o3o)gWso(X=(Xacd@?7mszSdJj_t+r0KRn%;0ur9|k5N_gNW zD~P$N-a=exT$=4b@Wm!ns=b5WjXVO?udhNjZR;SD#D3tSyX8Vp9r7ibE#ff$M$d%mcH^S^{)%jgoD3 z(K)jSJ?ALJSm92z@K1D^H?k>nrc*y4XmiHzy20lpi&-Bo(Vp_Xq2zt%hfE3u8)?K= z_OU6qPrZBGCLx8ZP2vuPb$qZE2$6hTt9j+t zzHVKIP0oQQM#nQSj7hZ`5x!)j?`u~^pvbASOzhpB+JU(|7PPmkEMEHMJtY;%tgHN- z+@9KFHka!5JXgZs7G%~Zci%?ho;{!|;=W{Ya1tq&d}%8uLlsWvA3|IGP>ZXH z%KMDIbC1|PPRu17YUEG6VTY^u$OI-ZE*~g^3ul39hW+~7M`SkYnR~!?Vh`hT_+1{2v60eBu2(H$i}_NRy79uRH!yY|93f<=VS( z+=bC>FDq@{Y`!!lnA#$loYN^jNCGbh#AI%edCC=@;_NC-(aG|vuY@$r(_Xv%8tPp& z9LL!GxHRe$kC+rhuN8+!;O(g{w5naX__?&9a8)I2BcyfVrxDs9wC`>ml&Dr#qnPvs zQ;8;Ib`xr7yNE&rHPo|^LvfIt@GV|`vNeT6C$(M*N{~BQwKex)-GBP25EXd62qr0B zS9Fq`RO7vNAa8$((F=>jDUTInPzsK2FK=N(yU7HGitTs>Ok3nyvexwcS+p6ulQ1AX z*|nLa+(DT&L#)Gkok4XgU7<9d9Wz=i?;&astr+HbA3IK6jjNqDQby5tCCwoj($gKC zOzHEZsD(^V>Ug*BA|e1GJ(|ywII8O3k#E+%UJP)XwAbvo?tU8rzId^v2ht?er_q}Y zC>PYn)tu<U-62KZvTk%- zN$`d6;v!WsjtqUYBd+2UiJT@bp^4PxV$xBPWQlMP`Q0(t(9nQq{jOV>T26YD2YFKFIn z#%tivD!8ZeRoh);wM9V2cea;|m{el(J~|z@VLKbhpaJ z?SrM3M7I0Z&GrBX3-6QOqiemfPcmv-{+M3x-n@iQ$qShv_R3&H+r38IfMW6|)ySmI zy9(&^%D6u-{wUoHQ*pcaJvWW{daZl@Iq?c3X(wlx``e@qv2+^Kcch#z+`>PRdPxk& z{n+RML99H{@PlUlkVHBentl@97RAG_PH)=3YnXM#Z2C@d#H6_R%nDqK$|L`_6B`%9 zoy}~p8j1o8GM!{1y;gl;HOOW8t$3_u*NpO6%axRm@Yo z5x&9>z1z2cUyoO@V=e-{ld^x%t%y*Yj`eQ*`}F!`WgXknl-Z6Ks8^BvZ;axfEsCT* zyZf;{T{MGDT2rI=)<%p{eUyH3;=b*I9NECz?Jn0I-M}C@ruJ*VXi(B`NGgTHq1ENS z#ZH@jxP;j&A{U-Vb63Om;*2+FphRm6>Vt0BZCImvo5eNIOWrY~YKzB?WJXz9j>=8G zqPx-bsC<>Fmq+utR2#t*hP!@)`{b6pQj^kL?^n^!$r3st9Aa=z43rM6TwD?4{ov12 z)__*v@63a-(pnJNE{L9U`Zx!9Xqxkt91O{a_KL3ZDDrL~y6%i=IK3DK`y=t?vtP7z zzYi5>dlmm6zkQVnX7k)1pW1s^k3V6&8*`nllKc+#X=DH077yx`XP9Uh4rk>@@qGhq zX1{nzvaKh$2urSiebCF(CC8<(p~qW&2HcuoF-T$z?qL`E4TslkCmq|_ZWk}P^YGS5 z9Nn^4Jo@2oyzIf!GFetMM6SM?_YY-a_*`}Ni@sf@2sg)XoijU=Lj7Vx*LcB|-S6Gs z4^iCF-D(Bj0R=IOQ?3u)zTZ$Lt|O8;)gczdfWK<UBdc0d%OzAt+rv*w~Mn7!Jm%?>#`+wGctUPQ}074;XqIj|}cy1={ zTThJc?`L9fQ&ec%+oI-_u|5m%#9*Zo!tMMZzx{Ip5wJpfgxntd=;b&?i z8^7T?WK(?4DokQ<9HGrELKy3vevh958`E&~-dgrv&Ue(6{I_CY^W@%XcyKLi$C{OH z4+}ySoNvwduelEf{cj|Fubq^zVj$|pMoXOQ7L>8vR~}0(y453+%3!9tt(vyB{Ib~m zoyeee$=KDfUDXEra180Y$L%sjZy^JC{nrd&!i{tGXeAKLZlR^=+VGn~SII>xDXczo zB(z_pFBe|Q=^adlT&F>Sx01qaA$FL0#*FQOlCz)tYl@pY%aGx`qzWyl4voZ}qIe(s zNPfJ(1B-G=4lG;V-5A|oKwJ=hA|Er^=)FOPCcMEuKEmKe6-)p$h9^NX98S8O?K8HJ zjmC`X_r57e%cwP4=t;gsz6HeClzZvw_cR*kX55!_{O3d|oX3^t*aM=$CsZ+BVg!5p zYX|1qP$xk-CS}(Pdh*x&56%1-lV?=8X1YG#tAO#`w(%N5RdtJPtqs&^U+tTakEhL& zM*Av>T5sREEq(z(=qBL?*5Z}2hJ>*dt2bnczcRIUtXvxco5M^m>ViG-ton3P-t_kK zyD-0;32CwQ5zc+CQe4b0%Iw%x#!3E&BY1S3c}L7A=3$JZf}(g^!HB(Ha!Av$_{aW2 z(LR}8IuGgPozAD|f3XiAl#MW=z?L@jt*YQ%LPxA10T7eahCv>ZNaE{mM_75t0#c`7 zkU^!4eh4%gX)aG6@5rpU?RSSPF3R&k#uF)0g7hmdDspZZ&#Q&_;f6ZRuC>OB+;L31 zmpXh{SaN3C44>ZEpYdBk!riq%eLAWQXa#<_*Fa*m_ETplflotgwtId^WD##67fiD5VMAzd=?I@=Q$j;f2#x&4fHkRRTv$TH^ftQ>0D3Y zR38j9GfalNVcT89DQ`pZIqDgM6YiQ>tz51l?kaP#5}8kXx5KpP^mx&lm3%v6m)#Nl zECYD-$WbzuS|-~w3hxES8~~g0Gum5yC~ZIW0*|r?=sX;%gbCx*2x9vDSI;H86%;Ekb#v1e@ZUatN;YIgJ{znms&u$`3SJ<0C+#F0T_{-}`R7I_C*$k7vL zzImObvPd_HdASJw*{sDUEhb3pz^z%Az`wG5ITb}M)}qOyM!$45VdgckVbuafrXx$V z=Ww3Fl45k(2Gw%C(tEo~DX;fUQ1uUc%}5cjDv2Q@#cj_%=>q1E1^o^6h!sQNiLFa*jD zX%SE03%90Ynjt|sSIQaP$_s$dtG(8-+BXdqBAIE_%I7>c$M%FS-=PcDTkSeSh_;U)x76f}V zUWnKX>KFdzeQ2jVaxJo}^H70zrb~;}wmys<4Wo8V9?Z1x=qvitiDe|0;3HA{AQ4of zKL*$V^Z8x#vSs5D9ni|k_?)rD&&Lzz7R0_-ZF0|Lzaj4ojin|M0)VY#& zi4Wq731olrO)eEBQKf%n2uDIT!psO1?bp#;wBwjz-zlsmbBgj+;~qoxje7k@=feBTQn7& zTdqKDBzxKom*h$=+Ge$1>|;u_D_;^qa{vwM4G=y3BzPDDIRZUa$K=o5`cfVQ1nVuA zi3PfGa0<;Xbf|oAJLpQ~S1JEuRLl9dl37?A?s?nXI;y=1KAdLDRGYX>GXP+;`y$o1 zC3g!9ZYW=#g;M5+Z>>HWQ|{ds^N52nrpIhaL3g*$qc>JK)yl7DZ{$r)0DZY#0oUvk{~d87{lU0E`Me@b|JfO(Pal~ChnSDw7Js}$?>k4&pv3NQYg3S8;!qPA`; zePTba>e&&O>T;d@u5GfGu)Pm?hFY83R56>@IPnauN$L)9vKzyUSCI83<%25$cZR-u ztj%_^Y!yGfGHjc&VO{rCl%l>WyZpN-+N&)4`fV_{Q_@BeHeJVN85d(mUh2tgQ)R^qO3q6e`eQOAswTqS4#P$<8)PhEfItu!wY?wpp{+_zaSu^wDc_ zFFhO-kIq>~&rsTiV#TopEz6W7dP)1FuI=R6IG(mNNYS&0q4MOcH2lU{IrCKm#d7Q} ziu&nYRJ(XK?&eq;j{dmnoZeNvIm&2${oTgMIZzMG>nGm)3~U;Mh=+OYD}mb;5}5(~bDTNK5T9#}Bh{ z;ehf`&{3hlOD;*8z)aL6AFH^LxZvRADP&o}`mq|}p6DBDR*?O$93lMBF4I?UH=FFT zv7pz9*EOY1bzj?tylNxBksXuvT7q2PcSWNaQmTGcgK09=Ea|;?=Ru1Qg9i(AR}iMR zwUZUo7D0A^{hJznbQv+4h#Y#Ddp$)SP-w?_SxQeHWxGq}qNBe_}%|+Hdal*J77b z{)qO7&}`=F-G}4aV&AS7ygdIyA3iN`hsLTZzWZD?A^dY^ADNmZ15YlJa9bu7OQgEl z`Mg$pardDM47secJHai*m?Q-}ok^f?t@uW~+B~h^tm3`u@ce#wrgBv{p?dPPGR)^d zj5CI}L4x{C@{WiI%Qs(S00<)Q+pl$_BiaPA7df4uXg5cUhglh!o@5)DM$b6>c|B%s zgnNce@U2peYk)WbWE!MVBZ<>T|l+|sLN zK@EZP{n0PWgtX>AR`J!@IW6?9?SJXoc+C1b(}%WAI@XupHGZhf_Cu_{wK&-TzRGXV zmZ(8BMH9IDuc*EqvUV6h3v9dnYc)GttF0Ck+}9GGdwU*mYvwL)09$w~uCac1;)kAX z#c5hYEC%^2e1(D$`lt`XAgl6R#YxZB`BQJVKUeD2-(~ay5=`U0!D2h?C`p>ja}$1K zl^4ZpUdbZJqY}e!V0p7E*Q3!^y9t`{BOhD{lf~%CZKle_2=F@(c4Jt7ESlK8utM5M zBXjrVZu2h{Q`y1PO17xsCp&Q9Z@MIa{YVRX=yEbgp{Z4_R(wocqh7O+*Zs2lp9s)= z*)y|>L`#L=jA9dAO6RKvK_k#EsCe@#@s@(~h{c5hFhP|1HEjdI_L^gQMNyxldek4dHDY^<;6 zbZfu;`YP4PHiyB@)Odeu39{0StnE@+ieRszpHqM}AqO$VGk7WfI1arMXiBZ^OS|E5 zb3c|M#le$KO7d8zr2HDsvuK9XDXtv;`4oV0T!_8E%VMDL^x`9d!jh;>w{KvNfWtb= zZ(x4BmE}NfC>eDb3Obz`gmjSAR0jXHwTi{2_q%ez$!;1mP%vBNW$qY~37oxbmBwnvl z1^PNm!s;9ww&?{KUR3>ks0@&T;dYDiwqCo9sO5E5FSC^=7TSFknahxz-VOw3%})Q5 z1rIZ1z6se3CJ*nyw=$uWdsY2ED&s{f`3wQj$`l=X8DBZWuuc8`{P#%iH~x2$o=1EL z+S^T{9yEJ&su`+49zc`yKR*JBr>sr1bPnft;BTl zmQ=h+s`UH$)h5*1Jms8VMwlcZBwP8H?G+}ij4qlMewdDXimI<$9;@vZQsceR&7`_^ z>MTsKqS)2pY3k^|cO`bID&3Z4z8xMm7HapsRgtwe32}_0oL=RECK~5b*g39gq{hZw zghtR{Rgp`y4VvkHcmc#?ZHao$83{Fw?DEKc?!;cF>%O+c9L>Q z4e2g(xp%x>liq+di_Nvz%ZP3l>mThi^_f2gEeSfiJ#*@p5*5&-ObXgF96TTV;r2Ll z_~)EW)pJNnv(HRb`H-jEUsD@qT6M~B4brzstEx4&i^lpUiI}2dWrjZLSeaNF<4a$x z21j9U6E&DlUrz^90BK0F+D>N3r7xVkkAGE!NX%+{h<7>BYqkgIgshM%PT&ddqb{AY z;(ksinarAG9!~6z1@Wx!7%FzbWw%3DQm%jI_1p*@s`w45#YYXZV#{86?P6PQyfuI2 zgJck#?gcxhWNHj!96Wilyb?E*t9+=6ytW0g6tD111sKC^5Tb0gC-#tS6-K_3>8xBI z+_d@ncz4}yfgm)lpd)c#g1?kI!D|NW zH)^qy*N)Pi2TX(GC6npItV?3xT!PDaBuAa}dP{onBa6VSULnwO!f90OVvUaL2WfJXdc>sx zZmBBgnTV{l$r$B^?^BajE}r%Um~BSJt&OOSF^4HK$(`UIwxII-B99$UYyvpI1UKlu zC^k>}Gw+JWJnT|6s6ic@JpYZvOn*-AeDKvOoVX^@9o^l))ku0#M|1vq?xL?r6h>U~ z43)%Op>6n$X6GRrjX81k#7A&6hYYTQ1pTg?$GBYr6r}uJ3pUya@*ua~TP~c^RW8+- zI!#@X`SUi__}Ziiy@oa+qEn2B1Ws_FtXgyhNRf&w*0;qy{>O*%sM3y$z)w?Q{J~&n zI4UrFE}fS3WVn@4m0@d&$XTNEk4|jA9h=r1;d_#!OTJ2C#nalT!Wi=4>&6MtWQP-( zUo!)2{SC$emA+P8?qAYX&dIsOgoC!Fx{Yg?0Z!U#cX`n5i`}=QUwwvM!qzYJyWRx_E7C zZkZv0VJYi0ZJ%!R>qIH$`W)<8P=Rw(AoM!@Ig$w7Erc!GXh4OO(!PSf{ zx92eroN5nSPHU~Y{QjWQqRd)Svwf@5S2-Bq@BXsX^tnQeYRTLhR)dOg0jL@Oa0{BM^o}v z+iEYK*H7I^b5IFBPMvr71BEI5Orm*~J)K#n_Je6{`?GaUJmXi5r?(Y#3ZwSoH;$71 ztjP+!H$Is(tvh^FXne49eML|rDV9dAO614ceWU;SptWy=Ee#o`6#YzVI*9dmp$;GR z*ce%95om9KFl7R{1nN^>tS5eM4{I}6@N(D;`wS%l7{gR6 zYet+^sv>b`wJ%U@xFbo%f71I9X)wUrR|j^%H>x?mU4j$T8H#Ph+lHT>5{~O#zGw9V z>9qsjDNAs)?<_`!W?b*^^p(+aVHthY+y$L-wP5F;4>>sL1g%huBq?q*SkS8P#TAMy zFa4G8dAFmNfB=yPavcR|#qXf4aCUrlCC(PW7kEVH*`VtU$wPigV7__gC5OzCS46*F z$;GsL6v5Aa&R*;T9Ja_Q#H8o`O6!Z}vNw(WWexn6d|RfUO-7m~t@MmtZE`$2(&~bt zOJ6+YawjLi;nj>|^wK9e!4m=_4UCX-()pT_cB{qg)`cweTDhMOzX zhMtY=&QtFv1ehEj#0gMnfZs071B570jEBT^1JJCKf@b_$dZz9l96(n$=lnB0MF3-fpzy? zM5F$}mBK6K?A&al?DFeb(pBrG)G1Swv@(rLPQWD_^E#8w(BxLDi)W~@D;{yq142a; z^m(~Z{q{h;6u7zk{I~GN*KFGi-+etawbDGyhBfuQrCJCeI=}TQMt^tq=PnB7t=ha^ zVdhgm?6>JcZH8=biyh0w+>hQ=b|8un(?WYzdVRVB#GM`Nz5e6dTZpVn$F8z)(1?3} z0j-fW{O0V83{5X1_tpjF`0H<y&z=IS%cnkS?Ppb=b&7tCt^=xE6QVHEZL1IGR%VNHaA=K!HTKKNNRV1ZLm^WK(- z(Y})6Vl%(F0!M-=-h(T;$`62|_$L`6)SJrfVYr2{w;JON#^C9j;tW0BncM!GG8Ms2t=B7Zzw@x<1u&>sFN`4|Mb@oHffC(gqgJ7meP-)+8N;o z$|0~70O)-S<6e1s@r!s~)~$!9{1>oAE%fF-gcS$g7MrIbT<73)^El>(#=VjH81jYNU8icKK_P0m3V-M+x zp4dMWnqfXFU09(2(;3Nvr=eR726q&;X!3@i{b^KiMeEX+51rit&LMewDGD2 zE3J0=Ylib0@E~b}P|xGAs+>R_f>__-CILM=Vd@leBiCVIL{Cted4c1?$tRBcW8o!F z(Trkd?+B?GN1u{YI~|?Wxx4oi8|zN2tlL8uY^S<4AOf4=Vgdub1a!Cef=jifI?U(* zvHgN3UV>a=4J&Hfml5Sq$=mBGMxD$;*TwVzA>M;1Z=E=Okh^li3s~lRxti+gruS|a zN}}xjNR=}00J$fnqGFggaG{O^Oe`vOfz7d3#{m+WypMKU=3Ek6D#o;1RNt=U4727tLH6Mv`7e&QyS#)0O!AMGc; z>}l+6n17*CC>%AN?e>c%fc;u3>0aR(nf)3QvJLxHN9(FGZVCE`KX{d0VLZYv)8N{8 z7${Tm?OT2RcM2X>z*)wo&RdGz4AY(b;u5(U{Nog5@{?G9qMD|g`G_~#g>$^H1o98F zt323ty+U-oAWdf#B5QqhgO_I|v#_oX05 z6j&(f{`G^W=%SP$5;9FVneTkA;{atk@m+b3n)f}U2Tj-FSA(@&KM&Bf$=F-wXFI|> z?LM9hBXBcL0<^_x)KnHS86y}+$$Q@5FCUUZez}vKRQ-nZM1kj^8qTFPj!uFu@Sa|| z=w!Z4t-9y}vLs&W|h<}@ARH=bqP!M)63-ne%p1fLg;r8{aLM7d3=_;NSt#aN; zou6Y$7u2G0zYp(x(nS>st!o@~Nx?^RQ5sBA?m@@j>lRSwQ$VYk5+IEQJv-IGHtAfx zd~&ctL`=DuDUhEXK+ZR3iPEA#cL&PL+iR0nfB8^A`pXD7>3B=bmD_>B00PfmogN9XYx#Bw z8|ePvNRX?`B(1>XmU5QS$WH@wzlKdr>V1O+y5?Vxw3UFXh!FFpz)`{1x^k`W^m=g{ zz_TV>D^p!S2D9kP(1MArN8qOl4hxgLQODq@U;xxl8-pdVF$g0t+*gfwtZHInG7hVQ zU8;)edHA*0jaLqF5o=s9AeL@UXq=2WNR|F;ny9xu0YvxEcY&hv`he69l-R$Ebzf;j zY+DD6{JF2gI(Z2+6)Bho{YV6y7mNY_{<7wO1fHTeyu8Imzi?&%p8q@B2F|UMIrkDO zDs%%S_Z}m%M-#A7d-Ub3SpziSQlLX)QGOYj<_%cS6y%<-5s|4yzZoHNT+2be8BDYa zj%pNOR*-uBni%{?HaH;vlHdPFvj4;A*B)RG^$t{Xh9kNO&OTo4Y7w9(OhVLo130Lx zWrzgJ7Gg++2y8r0iNetg&54s~%+;=GL`Nhc9)`gE1-$aP3C^gguyo=+MCvVofrzWg zp!Q1ZU0?w=_GeY_+i-80Ui^apwfyft^~MNZHTrxE{n#Cf&ZeKHFzv$fLrAG;(fjdD zhF-ZkSbK25Dee{)kxL4TK}8AfQjpXW<|-ZnN;>TsA|*&BS_n4}h&c$v=wGW6N_pd| z(|F}CAN2q5iL0Y5fqH%NDKSql;xN*F3F?rAlxxq9gwWhmRUMOD)%0%z9GlX^*#e$C zOJ(>QWcB`bU?%am!!XOU+87=!>wD=Tc(i#381Boa<~@>GDdaC|2w)v;xT~HeSM^6` z=12L<9J%_y6C`i~7XNKjJfn30$X_WL8C)jK1?Cxb(_k^!4CDx7kfHCzHH$43yoR;K z#$nKW9bQaAD*SLX^A2z}Qz3QwPug-{real}Ot|W^#w#p53+k)}3(U=FpC)5YQFH#` z9(jro7ghJu#p!_Z<#(XPe#vmhv)}B^eAs4|}>;yAJRX%dcVx~kNGG(;L?YjV$SoENWxJvA{L04m_si5(<2peU_`jb z!+;{W+ROuIjQ!bBvbF`GfinchN;s=s8%C_>&Jtw7Lva?=!uv?QJg$=PQ0(E#1%qP3 z;qP?yyjx}5QlR}nb`)3)J^?DmjXhvWO&5LuYC));^T37dA>#+&GnI8i zU^*e<m zpRYlsGhN`N9~*Ok@XPRyqL<)S^jj6%C6>|QoyVZ&{L|9@xtH!I!TKh@lc;noEV3Gu zVN;MKhJ*CJ5x^z+j19;B!%rYBl?N9CV`#2-<(u6mkG`vBlW<_ldHCl+@Ux8iKiy|J z*QKz`^uSqvMI$JmdOO}$CdR_p;+P{Aqvr|g-$qb~Vn-`=X)1Yac{%u~`q3S3Y@^qS zsa>`wb#%Sf|8TPY(`8d!2LoHCy93}NolJokndsWrQSy83?Sw7lo_u0N1m)p10HvwU z3O~GmDMt15b}C$I%YWli`P8L+-}f;+eXkg0WKC;vhCq-IU_cf!aa`~$U%#Tysf23H zGa7e_Se3RZ)a9rmn|q^jKJMHRAW$5X_VZ=EbMy1@)X{5&$4X zX1xPh?SbxGsOoOQ0h7DuT`5k%VTe+IO7!m96~Y91b5DdEu;Et!cOk|M~`* zSvD1_z!L(l9Cy=mPaJ$lSRjXJY?{IL&)8AS#`+u~JMA5RjOj{%t;t06q}2nKr<<&z%C_n(#Ve|*pBGj zQL2#aLrh&rF7}sqp70#Bh%}5le&7vFUIK9Vj#!WY#%Ps4gfu{=$d<7yuS)K98FN5I z_7SHWO%>Uw7#sY@36G6EHcVV%Ow~Tl;fvTLVIT4A$P(&V`TkmWJ0)g=MAGAp#*2*A zsy=H9ttAg`J>Hci(U2`kKQ7+(l5IR@T6yx-z;y$ugBQcTyd<1lxbZ46j&W3lhU%}& z9M*at#L*%lQJO%=`|&Lk2M-;&!BAAz!byM~&`s7L4iO4y`F<|bLE-)nnvf5e{8BS6 zYL0#)NBk{f*4q+W=i6^6-=?%6AyNs6%+P!3o*BTSRtr-wkYwU-=eat>zoLsgH2&Un z1pZXqq2lXbrNg;b{T|J+VxJq$^{Pz*0aYwuAw?%<>KKLc{k0<@)ZvfoCR3O(txa>O zRNUGPEl~m~ApV|HH*uvoNTg!n&1Ili6@D}_b!#%_$cwr^ZHQmD>g@wdg!@DQE6e?| zY6Jj8$j75C;TVI5LX&z*7K}CFIAQ-a6%X%pt|Bund&QgMi5rm zDPt+TbrXE#FCaehH2<*seuIZdnqqhmy231orP`nbdRUQs zv(rs^MLOQ!{X2TrF;Qi)tkWG=7#>g6L;3U-S5ohoItXd|6EKt zcfg<_uC&wg@GA<;&0B%LduFZbt>L)KQe|tvO1nn`Z zvS9;2Rmhea;_5%V(X{mQ#=N|W(jOelSgVu-``SC&HhN<0Qd-nnH%VAWUzKF&M7d}B zzdG_O9sToKuz;_Bf58Yg%ZrO0UAo-^c1M5^@wz_k}DQET$0q{Pn%*`)}7d3pa%)vwY0 z+dGh|Hid&s=)LUFDL%PxK>c|Q6^dAe3=nd65?{-`MvX1x96or5P~GliscV*;tBE=B z+L;ol*`y=#8Fjr0?v?})D>U@`P}FWqRgDR8{lk!WYmkmwTRkeEJy?5qu+WMPwhED5 zFNXWZ5o_wI*bB!Zd6p+5n6X^MHYU(+Fo8jwiOpIk!5ix0@E4y8lMC1V#yl8DAJT^Z z!*2gIxppwX`x*g4N{MqG{<6jImqXATB2V5uc7|~on!X5=r&gycNpX_MZqpKx zTXz^{L`3+m8|`fVcm#Z-=R9WDat;ut-KYNDl>W%ZGxukDk$-sl|LF(50{l8KjWZV` zcz#URYj^F@h*Q5$aq18ibA91)gSOL$txr96&uws%{H@3toO2(6Z_~f~@>npTgGdy) z+_z(C6(XVI!)tb&py-&Q7W2wtabhMNQ1OOAFNHjDxTK3;5vBNoeCVfL#?kxK)4yPY zKd*Knkn9(Uhz|xrZ3SGJ*U)tEEuKD%{XQXHQ63z1GDq+Jmq#JF2EVN*L9>wGqAR&$ zit89v!U4*~Uy~TWc?R3%5Ig*RqaKWboir3nZzEfM@(Hw}ZiB6M>vCUgr@ry~)Ujr* zr(h!+pLMzvyV}t=pSuiZEx!NifiD&f*{%Rx!l!upDVXyej=bqc&hiiXZqP;vK10%L!N(FD8GE%mX4ia27TmFt(^PU9$WE@+%j%H z^6R=sGs#Prx_@5xfY}(~bO&|#CIOL(E&AY3!1sS&`J61ThD+u&@Y;3S^vs?=-<84T zE}{VQy1)583W=G1UgR8OQxpRB5EmUqh@TR&^Bwu0Z2KDtz>+|6m*qa)e1Bxi|Nf{! zEiuf;R#3>TT@VZa_rF_z)>T{6d6vB&m-|kP-b=nV@>7pY^~8x*CTm-`pb%jY!z9sv z_uZE*d*r|fg%%S{E9xKC`)l8@XP+!JbvC!66|!cBL0iW`>U+h0JjnEU{k8T|) z=cE4f0sDJk%F%*w)H>JceE7tvGy`;1ScF^qbCQ_7Ny^{_RVmU5!zvj9TCn2n9RvTt zJbt8GCPT;n?gK?aOGBbcCgbS!#^_wV6+xDx+J8Vaf7mOMG zC1eGeFJ9hczF*``)q3$_g_uAsD^Y_Xe@n4aLJcRQ0j0iF)}8mt^Zp#x3wDZ2DF-OY z9YdU@t7M)ASrkvwl3yih8YlyKwnwP#6aTk5Dl3?Y{ z7@=`tT8~pC|7kfF(odIJlqUeZnP6_Tde(3yJJ$Lif<7N4_Ial+bsRgqNF}#1mODJ$ z`<=84b@(IE&e*$;=w5u3kr{Vrfk0QBG@p`!IgV)3%JHhm6~Jy;jSv09o!i5m2l)SX zgz0{~R2f+i2jfr#2e&d4T;?((LLe!t4p&vNm zlt$tgX%EUC?(%r7cyJ&jVeSj`WExz(*pkJ3q1h+qnn8PhRI~KK=-I6=%59{7Kfs!1ZwFmORK&`niuA(NM>>ayN(v4ioOx)Ezl2E&h;au!1_XlidzJI?XsZVeU zQpZNpF@CE;?r18NhdIV(TH>&{qd`KR+GLYf;%?7_#H1wVpPT*X9Ud(ofRP{B!Q3k?_Zn)SwP;@!7lg1iYL>+6=FW=ER>o? zNhDh*zs^zy`}%-)qmds_8zvvzzEB96wQao;g2x>A+1vWdW&Rfm^_6hGKbse7=CSSF zJ7ys$r<#cT!+?DYJ+8fDATCx=2$**swDf)RX>~6Er4k zbR1fFn3d*##q*5yw|4Heu^oLy=AGBWdGu)hRqVf8T2kul!q|M7_KL;V8gUtC0#r`(m7QsXERUcF3WBw-qT znnRPXx+2I*L<18R|GEz+mEgF>un(LhNa4&G_1ta*j{I5Dhd13q;-a7bo5Yy-)ttFDh8TPd5fj`CWVWG6(f3U`7RitZOy>hR30s^2=#N<9X3CQozAP6#)nhHE{ht zLnS%C!Q84#6=ajNwza4K78Rtbkr{eLtg_{&_!RHY_k74}m)K6W?XBXL3B(AkiE_zQ z-+1!A>a?qaMN|0PCB&v8Y$1nF6Y8g&Ujvh#ZC|VU^uHUd4t5c>hrunL2`sC~?C#Oo zb&Am47!)Km@P*osHD_5i1e_v19JAB>pP$mmHO=yR|Jh8;yRbo581{L6O0!dqnv*NdnEnJ)W zu5NyN2GeQ2Avf_qtzHiiA}B86x3aZbSuS`-X^Hk_Z0<9;C`@R}Z{CT2wh=hu4Dckd zqqqUdl`EwuuKl0Bi@W!sq~tg6NWf)wGp~GGAAT9D?aEoTh+wrK>B`PzaGOK5e1K zyNd@ccH&_G6kH8oHuL}(HrxAu9}Ii;6NCjfXEiOYvzev;s3$f%cYBS;IWku_`Rd^I zy`OkrGGfbXU)VLb`@h=z4xlEpwrv&5x-RJ2r58n|s`QQpQ81t&y{U+Fklq4_zE-4H zsY>sW5<*QZbOfY#q7aO92sIGkyC2^z@1pa4^UpW`%s=zH&d!k0UGqHWocrA6Dp&8> zOB!F>#v4***_X5iab;Yxa+Y?G8OKZmSh7~)!C_qW{PTbr!$v zzDfRf+xedS??|Hjd%OV`KoXGtD-aOsLr@nYAz#((7y73Rt=E49C{c8FTM=i!XoJy z2;wCQ+ZmX(Yx-YXtKQoZ+G|VqZS?uyPZqT~Ufc9b9{J&vL;$oNB|iUaUlNS(lbc64 z7lXGL2hp9Fo~*^ad805OO+6^F$r;oRHok@Z@{p!%zS?~hI+cC!H%R?zadlfkE@%Low z3sIpDfwixT1@-IF_x43kQ*c%RrJsST2a0V$5RG_Pj$c-$a(j(qoPqF{#zz z{YcT}ESL)$;y--6f;1N`&J;jla%S%*y750dwS0-&Lu@bV{{vW{`GekS7RWgjD(CCc?w$LjnRk~cEw-4% zx~g7x+bO4e6kw(g!xYqO#dZn%8lL<6gW(SvDTfi0m+5;6!nW`JwUYwggO@Re<+nlB zGBHPbQ49btQzP&br09W~FCsOaU*=kHvFKK& z-0k0GUx;ZnObGYxGP7;pJsIb@pd$gAsdODYemWqs8p^;Td34t(@Z>YmbIpos@D*A% zo%n31DZ_C8a1ouvrOG%raA(!*E(k^EU&|+p{QY4_WMiTSM$X;PVr*zUt~Qq8bvO0A z*x0F#!YeT3n10;X*mm{wJ6I!Wv21<&)7+W2&IfL>4}2yw zThkerm1lmEdMEKtljYwZsw{u-8uKB$j*eAL>d;vI)R?k`lA)1FdU(h%5ZJNj05P*K z9dk{y1g3Pupx%zk0X67U1a50YX?mn`Uwrv|`w)AYFG3C*f1!w9xE+R4t`%bv@h}8t z)1My<0ZqB_IU@nB(mjsm#O93da1lKah_JGYfdP^P$dx=Go|n-Jtqw;RDS_RQLj2<_ zbzrNJpfwT!AS^}7?PF5gGFW$&#RK0q~O2oMH$)2!2{(U^}t!iHyL~qduc<{0uyph#% zO!N`Lj>kN;25`y4B>ppf0?P=!vbQp4Tyg z+BRsJ(J2{V#q}yu6Gx%Aq8KGyL|UkhBQ3{9xwV_l_+(U{z1n&bl)}1%0?-q+JkjC7 zwxSjbWsm;;P`3U1alt8=z2|jPFIX7BkpY)7kX<+j{q`@r^iHPsBjPM`;N6&rB+OIG-3S7Umq--|MmSz*slSl-8JfL zzC-V?HJMtNa4@XW)XdR8smS6KidY6yeSYbBc5o-QCNh%yO5`UwCW#p-;>7PuAR~Gk zC^Bb&`0)Zb6IOjir^!{8*NQOrNB z=M%x0BNF>F4v_Rt=m-@mp03wiN3Z)_k&$pATC^mEi z0}a~Qg0v5z-kT{6g!IdLM~y;SYP|Jo#MV=|t%J}gr3Zx@n`_Vst^@?NzVu4jM0W~^ z&Nkb_+wdJgmCSeO`7-!wKOwVeRmy;h3JFNssKydt{vhs{m)070-`RO3RSg;e<;JJQJr_}hnM&BH$C5P<8okX6A&++FFh?(bJ`Q_vLw;+uROY=eoPTnN|TDq?V%^W!4WYS-#!U z)Az3h#qKh6{1!RCnU9 zcfI_14iMd$2{StR$G`u2Z$y#4`>}%`2Qm~sRMM6|5|g#bYwPQH&_BSmwE%& zQV*IgH{Zs4$9$a!2hhnwnOa%epPPE@>-kL<#=8@UFPBte=*w=+FqAIL z5qcc^40Nq~&qj$_9wVYTRl}n3rMYVG4eWCWK;422#nFv047_z zIzH0DKR-+wTadbkt`)lil2MV*C1xeO8fx$V2zA8`@IcYiVZZg>R^IG}P?b_pOwFbf zAP##KC^iu{RUPy^p%qI3w*#r--^*t&PF=Y3%t&64`!IOKAtLTaCeZl}qcu4)3_&sr zQo528c7biYneokInK`tHL-BPxY*oTXFfu23BRut-uJyzt65K$=j&U4qJ2_F3T(pH+ z0%m-{wt<_-WcCBIU+id65j**R&|$fPK<94p94V{~-$H>rK@e~~MZ{9Txal=YkYj%H zi0U0hZdlMTzc}}zMslS8V^2z6US6N*hfJY^MPCzqNuc+rOzLdU*5j7(lDN7cHRxzw z?9^L>@y#2oq8~mp>z6u=tzE2HG!lG5+3U?xet6JNY#^}EdbT&NhW?bR0!2hTMq1bp zJ$uh=n!Bd+(_P9(VM+rXXY|Gt1ZsUHdAJF^fgyQn!_)Uk?V?f_yilUN797-U^4dvt zZvO>fYfH9pBM@>#?*UOU2+s9+LTVMtzysQ<{ixJ zg5`5+R!y`1ZE2j@7yd%6{Lc^;X0SRNmk*>0uw%4y&%q6CtOHVVNFOurP6MNNpILc>pB>?+nj4A2#1O%p@)?$CM?ow>=07rveL`xVC9H

CdsJZSxKZ&+bh&!0q9M zOR%PX-_#ubqGAsX1ldQ325jagidvjTiVs%P^#I7e6V#PUS09;LV@aSR!e>t5Su#kdSW!!5Jp$;NS zA7|5FvqFV4%8Ssd2y5_ETp&Jur>?URm<%@M^O}gHS(ahBx%f@bJ2QxECg<(CL#f(n zp3k^tnC*RH8Yl_ShlF|A$gSoW0_T0+$8 zd~4R;{j#3gRMu`f1xz*7Ie|}Lv@9~G0cR@=866ba4OwJ6fUQd{=mgmtEaZpwdy?KI>S>$`itCwsDI_39B}$D~Es^mym~BF#vlAroq+QB%Y- zKPI(FdkC?!Q+>b)X;9B051ykqoo1^s)9fzD~z##rW?dmZvv#!^#`1FfT>2#@lXd zb)jTlUsq)#bNAnr-pC)+c3WFGB4`LWedW=8GTEJP z`jv8G7X)`^DkOtJ`WjRI9fNG@YugazJjpAr8R_k!ETnNc|A{v(4 zGIv!i&jP-H8<=@}K|;3O9C#u2P*`cj4j{$tliZ5R(BFgs6O>WmplT#i_gwQ@YVMu-$5Q1C@w6GpVc?U3nA|YweRu=IhD^ zXZGcxN~do&sN1loJak7(+pp+QYF98ReM5kf7?UfN8dmb>J8GnO|F@Ct)JJc`&qSkH zcAiOMA~9`wZ}t7Q+vIZzP8Zkd7v*XK+em;0xZ)^23i5=?z_eZ^v+bZCE(3i#U)deS zqmJ)(9P>2$n^7C$lhPG1o_dZkV*B$*!t^+ zh5`UkM-0@Ph|gu$mmv6E)*Js|5}&-(0|!qRc$_1l26BTHnYW2X^eV;vCwFOKw0ROS z(rTI*ea4=no`RmBpkq!XzhZiAwwbECU-Ka97?JNIpZm_gKe+Dve!=d^15wXD_}u^- zNXi-^(@=2jczUoLU=;$0>W}5wEVv__`|K3cbw0>|$MJBGB5OL;xx>O9cScqVIX%-^h z04Z)@^qcYSeB?4;6DoLLSb$n-v~QgDhKTvv=}PqafaQAl&PBr)d0;o?q>1HdQO~g& zIY(J_`vt8GCB=Ju8PkjH;AE_ z@@p?n^lA-&daT75BAE_V!=lA(9@PZ2hY$y3xKUgClX`7Rv%MAVp@)&*&5TLsIQ0j) z+ww#9+E`#FTgXDJ`}KJ9C0qAv73J@D)DAWc?6yXj$qK|3(EiwEP`$7Xu03YL0&68R z0r|DSN+me}5-X&bkn*PGDNorkH%d%-r>@7U_t>nA#yaQ62!x4}+v8X6ZQ8tL>o0rQ<^TBMYw#Av z8mB!TicB>llLG){lW6G=21N}489KMK{*H%CN_xfmv-=!JAkj`DAHfu~D510j=a##S z1N_1N2nTJ@Ch1S{HyZ}+qxSZZxH^2Zr^=2$(2 zz2J+;4UI484zPcFFw1#H=hYIg)kT~p#?WcsU(fT$50a|#bj7n-nWAc~cSd0=v+Q8L z`-ZbxNnI(mY8qOs3JQ#jAOgR#NFBF^Ex@ z)KyYX=Bd9ghLjEEV%|E2FiGGG<6H3Q-O-a4Ic+cl&+4+s%;uDdmf%l+5tl;Aso38g zmXUifAS7U==etLbr|{f1W#gfF$tk77t-|B;;(l1PTs-|* zyovW}KJnA?{Ob5}!`c~sy&GD1ulj_pKUeFo$4q;DZ@c}&v%cc`ii|aoYn~nPA98$b z8--s62PXZrIY%vMBo#8pRVoCmxS;y@e?1eJ(#VaY!5FwHc!* z{GQ83cuMS2i|nrTMr?s?&e$FA>BXg)(^6HbvtPpbPxpUD94W&44KtZYD{l6b>~`X( zUplAA-oou__wn(1YU!~~_;|Ud3`0V^C0XoR@_+73cvkuBU1sc^?)TTT2we0eh)@dd z15*$um0kG26~UY1Dfm%_PByNB#aOhp@;aHs6(GGAt`LhtueV~Ih91_45lC|Th|REL zr@dl0SaMeb_E3Gpc{j%JC=iW-%ED|G>$yxy}{^1H~pOD2D(p~nn zkgTX->u2;_!E`%npz!#?zW2tXLyMu=t6)g?1XBD`*p+?k#Huz@uh}(Uj6Q)s^~*}J zNrbNU*h=x~WOtgEjkFZzKhE60SF!!jNBJDQUAyAfOLWEj3b9TdI;Q~(_s|(N7B!Gt z`jKHhe=yS6|0&4HgvEJFJB_v-21aYKfu2EpN{AQ8CnSPkPcoSeQ#AN2dy+Wdm;OQ) zxTcTH^FmCc&Uma$ST0h_CPdOmYpC_RFO!D^O3?)IH5vug87><#Rvf zh$46aOBt5Qmx>qE&zuS3VB<6d6{%NulXc$eK<+xOiNCZw zT(oSVC1l1Z)zd4&Q`rgbATb~^HMgWh85}28ef@JCcELKh-3-yy9fEbp{vX#N?IN8U zg;bkt9!vi4qBUy`x~ihwsHpv*Rd0)IC;%x22p@2CUz-7&(BI|23jJVTGHMDk(Z(+5 zTqv)km>!6S6vcX3-@{pn9uMOXPPGPe9UvnQA~fe|mez?QrL2pqRbu$x5y9~KxSrQ^ zs!v!J#S35y^_;Suj1gabjT(YI#jm#jQ7RFH86jZwY^RE*=+`i~N+b79ro(eBV}kXr+sk z8IpEHk57Z8ZY9XO?gz6A6GynJg|i(kZfLo`fS|yPo_(nsHbs3PH>%v58(JOYj2 zGrADrph$fJ@7pDaJU?OUUFz6r7r-hr2m`1mP~BMv#p~RC3@tJI9P=Q2V=++(;cDvk zBQPtti>q{xZ*+o_A2jKxqMTO-swl=t`H(f_JiI&gWIZsy)xtle<=Z^-$`n|3NgLmM zrgz$x&6CT}y@b{_iW_jZ1dT+#{0u2We{^(jEe?4GhgDJ44~q^C0lFUKm14JB^9!qe zwDB^ojWPSW7FMs-7zNE{}H z-7QVRERKqC-Z3dzvO~{>l+GL6eY?#Ln$kE?tH*`OroySFys4)AsmD;INCb>X$({VT z*)wgfddqAoo$XWOwl29!d)}-TI2QUY?c2D9UOZmA8iLhIG^1*s7l}8n6N~4|xb_sC z`6{aEbg3?$0J2HOU?o$3d{fxp3Bn(XVQ~q*{iAn0S{ASvTKaYh03W>d>T}QXZ-cO7 zk{u)LE#whb*3&k9MGyV4X$M9aI)L@egAW?lO9QUd~3w89%7OuQ%>npg)7+TU^SiFN5gE}&GN-x zqw}AqTsd|HM0G$B?$xi8FyJ%Cx_ObZKPmw-2I zh|L)4uU$EYUso%h?o)0OP+^j^U##vcb{KlX<^Ke1nF@rH=fIos5RuyGa*)x~P772V zGKDo3K_fx=v93jW@*q^(&rzb?x9xWKC(992RpVVDjMg#6(DEM}rwj6_J9}hXf0Vb0 zi$q6zwDn3+JMj_AeryD$)8KOuQcXjC95?5NYgjrb|hU_>%g z_uAT&jHGnIao|D~$A%-iFkr$}_!WkR#~6qv0lDD$QH7XcEYJejJXHBg&kfJ=N!?Wfp>6v@itQiSBy1LW#clA-c_Nz0JpJ+m(2q8TnA*^I*H5J53D8?a z^g3Ij+nyUhSAna)T?zo#Ep|P#v?1XtaOQ1qiG2gQ`1<fka1g;oE#zF}yi6;RUst(E*n z+fKxT?C{8@%&a^mX~?*8DRsOq(qc} z^Pt(xO!j(bcQ5lA6rp+fqdI3u(ORrwIT!I^2OGv*4fu)3-H{7~8`6RM7T={6BAgI4 zxz1uW@4!JFVfX}k^(Ebb(9ZBjBp7|Iz8HdB#_3xXPg+dDJ8ZdSe{%!F^)C(3YS=5c z^meyWlVO~Uhcjf$n}gy!rJ><)CSJR&9g%-#1`{g!9aPv>N5E>^)^$pudYjtX$l}Ys zyp>LsXDrU^7mFK2uwDaY#WU~Ui$Y64`a@=VNv2K~Ix}65mOHG6-rk-ik8f||U+aR$ za2aqrt_k8DgP%1v#>zcuXE)Q67=N>V8_e4;FH0qb7(-8F7lV3KxcdZFFW!9#^Z@;k z%lHl`krMh+Ob?|#0zqBc&|xtf{q)P_NJ0l)zgZ;49?E#Nmsc$xid39Gq_GK(MZ%Q} zFsNr5Y1FA7+(m<-9^dL|`C=~X!dCo@Epho8@)&D=T5*29D&4pgiyY@%N_OGh-0BG1 z+_iRA1AO~IfrTZ5lSF_U$PRce_TPHzr?RoIO1p_X0fVjA2m~IW2~C`~>UzBdEn#pJ zS#|?QrwY&o?yxdw6&s;e!|+cf$vg|!b<5WkPIjWXHEJ<-eHp%_FSO~(9jiwn_XeLe zDQBy3NH=C((wCY**#5XPbQxNNdq+N*^Nax-C@gB=09)2B)>AS#cbIvaG?l3m1*`1j ziZhY?V)~7BMD^717@a~A)c>-GA75UtrQHKGlX{44q5?nOepMhQYKr_2Wnu0&MXo9s zomnZK88749xH{L3;4WCM(qMaK9wLg8iA@2unPF(w1t5WiTQ#n4{1mW`b^74&HxsX!4MUDvcd_BZ22LgK z-PVFfugvT6aGmm8p3}C*w(H7txipn?g&U>Md+N%_b2Aomz@FCg**odI>12?_)yCd9$jQd&zZH2)^Rn`On+ir(_RQ8V?R< zHp{EHY)zFqjwqAgQP)8Vhg@hgR73yX_yN7BS&A4E7OuC!h6KjA1z2U_x)~iOeXcUD|NPycB@Dag4pnLO5_=Jwm>dqt>bpzS|}E9BIp+&498fcO)e` zJf&^`j@T_2?()gViTBS#^u<)bKxf}b;~>>3`OX~l6d4IC{Zj}kz!&Bl+~CP}06e5m zh$N_=chAOlScrRJ4Uu>1QK^k1Yam<@9=X`~e^}zZ4v8?6r-vH_5^`9z4L9 zJG*<2IY}A{>GefY3}jf`Jx&zrB+&f=e(Y8C+^=}7_94X#*2*?(8pr9fMUlC z!kWK`S+u_8o%xNFqJW1n#c3F#f#a_g;sV<+dC?GBlg411;(tI>`&HX-apHEjKEz6i zb!Z#=iBA-?-l}?jB>Bz^L_}t9FvT6jc&}QH=u|^8wYm~kMK!PZg00@ zKmIsCZcRK6;#W)leNd~OL_|Z3!K{G(f;mmtdY)Eg4j{ z&zp|JcqO?s954{`(6(ImAg&mi>rPT1LIEJzC_eO@YP*{C`V9_0pRE>>jeWmHTrU#c zo>gpTS$ey8q9q^CDyZs~W@Qb~ofG|UjRIP=n&ix>c%6vWkb1Fq^D9(apD8j`>-@y( z+_&9_U1UnXwQ=_yg#5H(8AVf!pX}!fOwis%x9my$9?xGI5Y7agk zH4an-7@_*FYQ~U98|OlZ#JM+SW|!-rXG1d?Q>1L2$X7&V=Ogq6qvE}A?G_9Yq`)Qc z(`+7<#p18wDNd^$lN#g8PS7Q@TwK50#dXGxnsmh@=1HZ#;7`eto*oky z5X1N|{c^O|8jUO`DDp%-L|h~Y20Gm>1V>Z*YxS@QdhT~-VIZT+a}%%#zH_u8PD`G; z1fgmcQ7dgk&M=#yxap>8MfmvzTc>z^(KX|RtivuXhBIp)aCPCKIn|JE|k-c3I#?6S73@15t2zgnWbQvaDUtN z>5{LIXlX9i-yCWm!tYa+bG6J&P9kEXZA#MHJx5Iv(PPJI!<)|I`AggCb*UZ7!Tj-$ z(R$9g;TzAd{7Y*%z}60R?wACo8c_RnOFHS%8q8>#?i1b`oYKoG?Rs}aW#qaRfOc-d zbhH_Yg_ydmUfo9wEQ}B~Ju=C-y3w(Sl)TAihY`(1Ui+a3E-KH7s}wn1evW=8IcQ9D zIkuwo`Y~~$b-1_|{Uv=;>Fdif*+I54$;H#fH7+;zS$==)dbnTu^ogcsE%KE3pu289 zu7oO>5h)oE7h+BAn-8QeT<@?LEIG+Ro zqZ*umv9H(>#NAaCwEs4udlz3 zlp+slT8<*EYhpemYxwrnQ9D@G)9qCb!I1LE;Dc>tQhHofDAonj&%R&O31m@mbEl#UT~~$g-=s6V$wEDoQd4-7$Yl*u#ZQ7! z?u`>UrIS%%t#D&A@8iB=mNo(gzyU#AilSwU3A0;aMxWjVs6@`y_-*HQp-0%s!+pD}mJvyhNZ3pXn?i3UHq9QhAg7Muob%L9ZVZI%JK)_ht7(5= zd}hIptkbu0n8~*6_w$c_dkT$ysZ25(;sI~hc1=;_Ys#HPjMsc-mW9=coL%MOf7B+{ zvO_4`I`Q;eCehamCB{ z_)+4riVmJ)>ucIQ`-p#(+RIO**&X#p|x~jqR{4|;(4kYSb0UylBfQI2lB^3A_(*wSPTNz~LJ{CdZ*`qHX5NGsc;*lTx z>;Gh+KBwzT6V{}MTt<6w2*U=ANVmWxJnAyIDXyO0dkW)(Hah0~eb8Z~C@l z+P9t_>zSvfNDjQ|51Ptjol$ zKyo|;Lds@`*{h|Jn4XO#2)4u$kb(n}7R>oA^>aEB+pWvaXU#NSq+~qUUj6Ip_KOxb zLiGmXshVZ;qq8%04!bK0Oxd1~2+AHzf3zTAnT^#4CmY|_4c7Uw+`L;{ig$92A4%NQ=@rwEIHKFqmM02uiGK6E>( z;C-tzz6Kn?X_y&6W+)<=x+$?h`4a$Y{PwAca{wT|GpC8a$f1NG+~8)Ic1fpwvqQRJ z?giZBOl@`k(2zI!@DxDvK2S-C+vHj%7kjPpQGHP2G-HgM;;Z{6-o{?e8XcQ(sby$f^_BIXe~cV4v222?*kvJV+3i3TIx zW@g8aXX#xc7@)I?m`PPeO^Qc{WVW6|dhbZY09SIEdZ8W*_xo$1ZJm8lA8Iqd{!0>hSE{MD++v@@!zfT*=K^EEE^(ml za>|f<)sAeYYF3@@Oso;>yv?H?f#w~G9J@w}wmdWXUFq(Kht(iQYoRMr8hYiRV9kZA zRulbPSK@~Iq@4wO>KA8SP{27rj&V@Q4#hOC{~ig>UiyyB3d!Rw4uw(d z80m&XWRVn4tdx34g=zJ8ii@e>z%|bjGasZLxgS4ueul=&;>jfX6 zH&p|)PJMY1pz(?pCkT-7XKMz=JphY7%ZQX@mDaM|N6s8Aght9G^#onY-auPJx9cU0 zR83uB%uuZuMw$osPrUr2uHXGV(s1X_eo)ZZ`b&ZO4S?MwbK5UdAqfbLdk{N$Y7$0T zbMUv%Sy{Ox;r59n_76v2JFIa+0k0w7@HLRqaN;>nC?}EUrR33#ap>g@I7;{FPDyY4 zjep(+_P;}9J+{;7pPh(}|IN?;*?+!~pYQBHE{~sEFmXZjGN?X{^$MhS2Xy?4?nZW mpIPM3Eb{-=EYfFv`+K$EsLE%b8aBZ{mlael using Pkg: Pkg + +julia> Pkg.Registry.add(url="https://github.com/ITensor/ITensorRegistry") +``` +=# +# or: +#= +```julia +julia> Pkg.Registry.add(url="git@github.com:ITensor/ITensorRegistry.git") +``` +=# +# if you want to use SSH credentials, which can make it so you don't have to enter your Github ursername and password when registering packages. + +# Then, the package can be added as usual through the package manager: + +#= +```julia +julia> Pkg.add("ITensors") +``` +=# + +# ## Examples + +using ITensors: ITensors +# Examples go here. diff --git a/examples/basic_ops/basic_ops.jl b/examples/basic_ops/basic_ops.jl deleted file mode 100644 index ff11b59579..0000000000 --- a/examples/basic_ops/basic_ops.jl +++ /dev/null @@ -1,64 +0,0 @@ -using ITensors - -# Define indices -a = Index(2, "a") -b = Index(2, "b") -c = Index(2, "c") - -# Note that Index tags don't have -# to match variable names -i = Index(3, "Red,Link") - -# Define 3 order 2 tensors (matrices) -Z = ITensor(a, b) -X = ITensor(b, c) -Y = ITensor(b, c) - -# Set some elements -Z[a => 1, b => 1] = 1.0 -Z[a => 2, b => 2] = -1.0 - -X[b => 1, c => 2] = 1.0 -X[b => 2, c => 1] = 1.0 - -Y[b => 1, c => 1] = 1.0 -Y[b => 2, c => 2] = 1.0 - -# Operations with tensors -R = Z * X - -S = Y + X - -T = Y - X - -# Print results -println("Z =\n", Z, "\n") -println("X =\n", X, "\n") -println("Y =\n", Y, "\n") -println("R = Z * X =\n", R, "\n") -println("S = Y + X =\n", S, "\n") -println("S = Y - X =\n", T, "\n") - -# Check that adding incompatible tensors cause an error -try - U = Z + X -catch - println("Cannot add Z and X") -end - -# Compare calculations to Julia arrays -jZ = [ - 1.0 0.0 - 0.0 -1.0 -] -jX = [ - 0.0 1.0 - 1.0 0.0 -] -jY = [ - 1.0 0.0 - 0.0 1.0 -] -@assert Array(R, a, c) == jZ * jX -@assert Array(S, b, c) == jY + jX -@assert Array(T, b, c) == jY - jX diff --git a/examples/basic_ops/qn_itensors.jl b/examples/basic_ops/qn_itensors.jl deleted file mode 100644 index 3afc2f761c..0000000000 --- a/examples/basic_ops/qn_itensors.jl +++ /dev/null @@ -1,31 +0,0 @@ -using ITensors - -d = 1 -i = Index([QN(0, 2) => d, QN(1, 2) => d], "i") - -# Parity conserving ITensors -# By default they have flux 0 -@show A = random_itensor(i', dag(i)) -println() -@show B = random_itensor(i', dag(i)) -println() -@show C = random_itensor(QN(1, 2), i', dag(i)) -println() - -# Add them -@show A + B -println() - -# Can't add QN ITensors with different flux -#A + C - -# Contract them -@show A' * B -println() -@show A' * C -println() - -# Combine the indices to turn into a vector -comb = combiner(i', dag(i)) -@show A * comb -println() diff --git a/examples/ctmrg/anisotropic/Project.toml b/examples/ctmrg/anisotropic/Project.toml deleted file mode 100644 index 9be1610d93..0000000000 --- a/examples/ctmrg/anisotropic/Project.toml +++ /dev/null @@ -1,4 +0,0 @@ -[deps] -ITensors = "9136182c-28ba-11e9-034c-db9fb085ebd5" -LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" -QuadGK = "1fd47b50-473d-5c70-9696-f719f8f3bcdc" diff --git a/examples/ctmrg/anisotropic/README.md b/examples/ctmrg/anisotropic/README.md deleted file mode 100644 index 9c739295bc..0000000000 --- a/examples/ctmrg/anisotropic/README.md +++ /dev/null @@ -1,7 +0,0 @@ - -Here is an initial version of a general CTMRG code for contracting an infinite 2D tensor network. - -For now, the code only works with a square lattice with a unit cell of 2x2 or larger. - -Run a test example of contracting the 2D classical Ising model partition function with `include("main.jl"); main()`. - diff --git a/examples/ctmrg/anisotropic/run.jl b/examples/ctmrg/anisotropic/run.jl deleted file mode 100644 index c050bfe030..0000000000 --- a/examples/ctmrg/anisotropic/run.jl +++ /dev/null @@ -1,27 +0,0 @@ -using Pkg -Pkg.activate(".") - -using ITensors -include(joinpath("..", "..", "src", "2d_classical_ising.jl")) -include(joinpath("..", "..", "src", "ctmrg_anisotropic.jl")) - -# Unit cell size -ny, nx = 2, 2 - -# Make the site indices -sh, sv = site_inds(ny, nx, 2) - -# Make the Ising partition function -β = 1.1 * βc -T = ising_partition(sh, sv, β) - -# Make the initial environment -(Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad) = ctmrg_environment((sh, sv)) - -# Check the initialize environment -check_environment(T, (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad)) - -# Run ctmrg -@show κave = ctmrg(T, (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad)) - -@assert isapprox(κave, exp(-β * ising_free_energy(β)); rtol=1e-10) diff --git a/examples/ctmrg/isotropic/Project.toml b/examples/ctmrg/isotropic/Project.toml deleted file mode 100644 index 8ea87e8dc4..0000000000 --- a/examples/ctmrg/isotropic/Project.toml +++ /dev/null @@ -1,3 +0,0 @@ -[deps] -ITensors = "9136182c-28ba-11e9-034c-db9fb085ebd5" -QuadGK = "1fd47b50-473d-5c70-9696-f719f8f3bcdc" diff --git a/examples/ctmrg/isotropic/run.jl b/examples/ctmrg/isotropic/run.jl deleted file mode 100644 index 322f266fb7..0000000000 --- a/examples/ctmrg/isotropic/run.jl +++ /dev/null @@ -1,57 +0,0 @@ -using Pkg -Pkg.activate(".") - -src_dir = joinpath(@__DIR__, "..", "..", "src") -include(joinpath(src_dir, "ctmrg_isotropic.jl")) -include(joinpath(src_dir, "2d_classical_ising.jl")) - -function main() - # Make Ising model MPO - β = 1.1 * βc - χmax = 20 - cutoff = 1e-8 - nsteps = 100 - - d = 2 - s = Index(d, "Site") - sₕ = addtags(s, "horiz") - sᵥ = addtags(s, "vert") - - T = ising_mpo(sₕ, sᵥ, β) - - χ0 = 1 - l = Index(χ0, "Link") - lₕ = addtags(l, "horiz") - lᵥ = addtags(l, "vert") - - # Initial CTM - Cₗᵤ = ITensor(lᵥ, lₕ) - Cₗᵤ[1, 1] = 1.0 - - # Initial HRTM - Aₗ = ITensor(lᵥ, lᵥ', sₕ) - Aₗ[lᵥ => 1, lᵥ' => 1, sₕ => 1] = 1.0 - - Cₗᵤ, Aₗ = ctmrg(T, Cₗᵤ, Aₗ; χmax=χmax, cutoff=cutoff, nsteps=nsteps) - - lᵥ = commonind(Cₗᵤ, Aₗ) - lₕ = uniqueind(Cₗᵤ, Aₗ) - - Aᵤ = replaceinds(Aₗ, lᵥ => lₕ, lᵥ' => lₕ', sₕ => sᵥ) - - ACₗ = Aₗ * Cₗᵤ * dag(Cₗᵤ') - - ACTₗ = prime(ACₗ * dag(Aᵤ') * T * Aᵤ, -1) - - κ = (ACTₗ * dag(ACₗ))[] - - @show κ, exp(-β * ising_free_energy(β)) - - # Calculate magnetization - Tsz = ising_mpo(sₕ, sᵥ, β; sz=true) - ACTszₗ = prime(ACₗ * dag(Aᵤ') * Tsz * Aᵤ, -1) - m = (ACTszₗ * dag(ACₗ))[] / κ - @show m, ising_magnetization(β) -end - -main() diff --git a/examples/src/2d_classical_ising.jl b/examples/src/2d_classical_ising.jl deleted file mode 100644 index d4b70a237d..0000000000 --- a/examples/src/2d_classical_ising.jl +++ /dev/null @@ -1,94 +0,0 @@ -using ITensors -using LinearAlgebra -using QuadGK - -function ising_mpo( - pair_sₕ::Pair{<:Index,<:Index}, - pair_sᵥ::Pair{<:Index,<:Index}, - β::Real, - J::Real=1.0; - sz::Bool=false, -) - sₕ, sₕ′ = pair_sₕ - sᵥ, sᵥ′ = pair_sᵥ - @assert dim(sₕ) == dim(sᵥ) - d = dim(sₕ) - T = ITensor(sₕ, sₕ′, sᵥ, sᵥ′) - for i in 1:d - T[i, i, i, i] = 1.0 - end - if sz - T[1, 1, 1, 1] = -1.0 - end - s̃ₕ, s̃ₕ′, s̃ᵥ, s̃ᵥ′ = sim.((sₕ, sₕ′, sᵥ, sᵥ′)) - T̃ = T * δ(sₕ, s̃ₕ) * δ(sₕ′, s̃ₕ′) * δ(sᵥ, s̃ᵥ) * δ(sᵥ′, s̃ᵥ′) - - # Alternative method - #Q = [exp(β * J) exp(-β * J); exp(-β * J) exp(β * J)] - #X = √Q - - f(λ₊, λ₋) = [ - (λ₊ + λ₋)/2 (λ₊ - λ₋)/2 - (λ₊ - λ₋)/2 (λ₊ + λ₋)/2 - ] - λ₊ = √(exp(β * J) + exp(-β * J)) - λ₋ = √(exp(β * J) - exp(-β * J)) - X = f(λ₊, λ₋) - Xₕ = itensor(vec(X), s̃ₕ, sₕ) - Xₕ′ = itensor(vec(X), s̃ₕ′, sₕ′) - Xᵥ = itensor(vec(X), s̃ᵥ, sᵥ) - Xᵥ′ = itensor(vec(X), s̃ᵥ′, sᵥ′) - return T̃ * Xₕ′ * Xᵥ′ * Xₕ * Xᵥ -end - -function ising_mpo(sₕ::Index, sᵥ::Index, args...; kwargs...) - return ising_mpo(sₕ => sₕ', sᵥ => sᵥ', args...; kwargs...) -end - -function ising_mpo_dual( - sh::Tuple{Index,Index}, sv::Tuple{Index,Index}, β::Real, J::Real=1.0 -) - d = dim(sh[1]) - T = ITensor(sh[1], sh[2], sv[1], sv[2]) - sig(s) = 1.0 - 2.0 * (s - 1) - E0 = -4.0 - for s1 in 1:d, s2 in 1:d, s3 in 1:d, s4 in 1:d - E = sig(s1) * sig(s2) + sig(s2) * sig(s3) + sig(s3) * sig(s4) + sig(s4) * sig(s1) - val = exp(-β * (E - E0)) - T[sh[1] => s1, sv[2] => s2, sh[2] => s3, sv[1] => s4] = val - end - return T -end - -function ising_partition(sh, sv, β) - ny, nx = size(sh) - T = Matrix{ITensor}(undef, ny, nx) - for iy in 1:ny, ix in 1:nx - ixp = per(ix + 1, nx) - iyp = per(iy + 1, ny) - T[iy, ix] = ising_mpo(sh[iy, ix] => sh[iy, ixp], sv[iy, ix] => sv[iyp, ix], β) - end - return T -end - -# -# Exact results -# - -const βc = 0.5 * log(√2 + 1) - -function ising_free_energy(β::Real, J::Real=1.0) - k = β * J - c = cosh(2 * k) - s = sinh(2 * k) - xmin = 0.0 - xmax = π - integrand(x) = log(c^2 + √(s^4 + 1 - 2 * s^2 * cos(x))) - integral, err = quadgk(integrand, xmin, xmax)::Tuple{Float64,Float64} - return -(log(2) + integral / π) / (2 * β) -end - -function ising_magnetization(β::Real) - β > βc && return (1 - sinh(2 * β)^(-4))^(1 / 8) - return 0.0 -end diff --git a/examples/src/ctmrg_anisotropic.jl b/examples/src/ctmrg_anisotropic.jl deleted file mode 100644 index 05fefce08d..0000000000 --- a/examples/src/ctmrg_anisotropic.jl +++ /dev/null @@ -1,300 +0,0 @@ -using ITensors - -function site_inds(ny, nx, d=1) - sh = Matrix{Index}(undef, ny, nx) - sv = Matrix{Index}(undef, ny, nx) - for iy in 1:ny, ix in 1:nx - sh[iy, ix] = Index(d, "site,horiz,x=$ix,y=$iy") - sv[iy, ix] = Index(d, "site,vert,x=$ix,y=$iy") - end - return sh, sv -end - -function link_inds(ny, nx, d=1) - ll = Matrix{Index}(undef, ny, nx) - lr = Matrix{Index}(undef, ny, nx) - lu = Matrix{Index}(undef, ny, nx) - ld = Matrix{Index}(undef, ny, nx) - for iy in 1:ny, ix in 1:nx - ll[iy, ix] = Index(d, "link,left,x=$ix,y=$iy") - lr[iy, ix] = Index(d, "link,right,x=$ix,y=$iy") - lu[iy, ix] = Index(d, "link,up,x=$ix,y=$iy") - ld[iy, ix] = Index(d, "link,down,x=$ix,y=$iy") - end - return ll, lr, lu, ld -end - -per(n, N) = mod(n - 1, N) + 1 - -function ctmrg_environment((sh, sv)) - ny, nx = size(sh) - Clu = Matrix{ITensor}(undef, ny, nx) - Cru = Matrix{ITensor}(undef, ny, nx) - Cld = Matrix{ITensor}(undef, ny, nx) - Crd = Matrix{ITensor}(undef, ny, nx) - - Al = Matrix{ITensor}(undef, ny, nx) - Ar = Matrix{ITensor}(undef, ny, nx) - Au = Matrix{ITensor}(undef, ny, nx) - Ad = Matrix{ITensor}(undef, ny, nx) - - ll, lr, lu, ld = link_inds(ny, nx) - for iy in 1:ny, ix in 1:nx - Clu[iy, ix] = random_itensor(ll[iy, ix], lu[iy, ix]) - Cru[iy, ix] = random_itensor(lr[iy, ix], lu[iy, ix]) - Cld[iy, ix] = random_itensor(ll[iy, ix], ld[iy, ix]) - Crd[iy, ix] = random_itensor(lr[iy, ix], ld[iy, ix]) - iyp, ixp = per(iy + 1, ny), per(ix + 1, nx) - Al[iy, ix] = random_itensor(sh[iy, ix], ll[iy, ix], ll[iyp, ix]) - Ar[iy, ix] = random_itensor(sh[iy, ix], lr[iy, ix], lr[iyp, ix]) - Au[iy, ix] = random_itensor(sv[iy, ix], lu[iy, ix], lu[iy, ixp]) - Ad[iy, ix] = random_itensor(sv[iy, ix], ld[iy, ix], ld[iy, ixp]) - end - normalize!((Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad)) - return (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad) -end - -function calc_κ(iy, ix, T, (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad); dir="left") - ny, nx = size(T) - iyp, ixp = per(iy + 1, ny), per(ix + 1, nx) - normC = scalar(Clu[iy, ix] * Cld[iy, ix] * Cru[iy, ix] * Crd[iy, ix]) - normAlr = scalar( - Clu[iy, ix] * Cru[iy, ix] * Al[iy, ix] * Ar[iy, ix] * Cld[iyp, ix] * Crd[iyp, ix] - ) - normAud = scalar( - Clu[iy, ix] * Cld[iy, ix] * Au[iy, ix] * Ad[iy, ix] * Cru[iy, ixp] * Crd[iy, ixp] - ) - normT = scalar( - Clu[iy, ix] * - Al[iy, ix] * - Cld[iyp, ix] * - Au[iy, ix] * - T[iy, ix] * - Ad[iyp, ix] * - Cru[iy, ixp] * - Ar[iy, ixp] * - Crd[iyp, ixp], - ) - return normT, normAlr, normAud, normC -end - -function calc_κ(T, (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad); dir="left") - ny, nx = size(T) - κ = Matrix{Float64}(undef, ny, nx) - for iy in 1:ny, ix in 1:nx - normT, normAlr, normAud, normC = calc_κ( - iy, ix, T, (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad); dir=dir - ) - κ[iy, ix] = normT * normC / (normAlr * normAud) - end - return κ -end - -function normalize!((Clu, Cru, Cld, Crd)) - ny, nx = size(Clu) - for iy in 1:ny, ix in 1:nx - Clu[iy, ix] /= norm(Clu[iy, ix]) - Cld[iy, ix] /= norm(Cld[iy, ix]) - Cru[iy, ix] /= norm(Cru[iy, ix]) - Crd[iy, ix] /= norm(Crd[iy, ix]) - normC4 = scalar(Clu[iy, ix] * Cld[iy, ix] * Cru[iy, ix] * Crd[iy, ix]) - normC4 < 0 ? normClu = -abs(normC4)^(1 / 4) : normClu = normC4^(1 / 4) - Clu[iy, ix] /= normClu - Cld[iy, ix] /= abs(normClu) - Cru[iy, ix] /= abs(normClu) - Crd[iy, ix] /= abs(normClu) - end -end - -function normalize!((Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad); dir="left") - normalize!((Clu, Cru, Cld, Crd)) - ny, nx = size(Clu) - for iy in 1:ny, ix in 1:nx - Al[iy, ix] /= norm(Al[iy, ix]) - Ar[iy, ix] /= norm(Ar[iy, ix]) - Au[iy, ix] /= norm(Au[iy, ix]) - Ad[iy, ix] /= norm(Ad[iy, ix]) - iyp, ixp = per(iy + 1, ny), per(ix + 1, nx) - normAlr = scalar( - Clu[iy, ix] * Cru[iy, ix] * Al[iy, ix] * Ar[iy, ix] * Cld[iyp, ix] * Crd[iyp, ix] - ) - normAlr < 0 ? normAl = -abs(normAlr)^(1 / 2) : normAl = normAlr^(1 / 2) - Al[iy, ix] /= normAl - Ar[iy, ix] /= abs(normAl) - normAud = scalar( - Clu[iy, ix] * Cld[iy, ix] * Au[iy, ix] * Ad[iy, ix] * Cru[iy, ixp] * Crd[iy, ixp] - ) - normAud < 0 ? normAu = -abs(normAud)^(1 / 2) : normAu = normAud^(1 / 2) - Au[iy, ix] /= normAu - Ad[iy, ix] /= abs(normAu) - end -end - -function leftright_move!(T, (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad); dir="left", maxdim=5) - ny, nx = size(T) - P = Vector{ITensor}(undef, ny) - P⁻ = Vector{ITensor}(undef, ny) - if dir == "left" || dir == "up" - xrange = 1:nx - elseif dir == "right" || dir == "down" - xrange = per.((nx - 1):-1:0, nx) - end - for ix in xrange - ixm = per(ix - 1, nx) - ixp = per(ix + 1, nx) - ixpp = per(ix + 2, nx) - for iy in 1:ny - iym = per(iy - 1, ny) - iyp = per(iy + 1, ny) - - Cu = - Al[iym, ix] * - Clu[iym, ix] * - Au[iym, ix] * - T[iym, ix] * - Au[iym, ixp] * - T[iym, ixp] * - Cru[iym, ixpp] * - Ar[iym, ixpp] - @assert order(Cu) == 4 - Cd = - Al[iy, ix] * - Cld[iyp, ix] * - Ad[iyp, ix] * - T[iy, ix] * - Ad[iyp, ixp] * - T[iy, ixp] * - Crd[iyp, ixpp] * - Ar[iy, ixpp] - @assert order(Cd) == 4 - if dir == "left" || dir == "up" - li = commonindex(Cru[iy, ixpp], Crd[iy, ixpp]) - si = commonindex(Au[iy, ixp], Ad[iy, ixp]) - elseif dir == "right" || dir == "down" - li = commonindex(Clu[iy, ix], Cld[iy, ix]) - si = commonindex(Au[iy, ix], Ad[iy, ix]) - end - Cup = prime(Cu, (li, si)) - ρ = Cd * Cup - if dir == "left" || dir == "right" - utags = "$dir,link,x=$ixp,y=$iy" - elseif dir == "up" || dir == "down" - utags = "$dir,link,x=$iy,y=$ixp" - end - U, S, Vh, spec, u, v = svd( - ρ, (li, si); utags=utags, vtags="tmp", maxdim=maxdim, cutoff=0.0 - ) - V = dag(Vh) - U *= δ(u, v) - invsqrtS = S - for i in 1:dim(u) - invsqrtS[i, i] = inv(sqrt(S[i, i])) - end - P[iy] = Cup * V * invsqrtS - P⁻[iy] = Cd * dag(U) * invsqrtS - end - for iy in 1:ny - iym = per(iy - 1, ny) - iyp = per(iy + 1, ny) - if dir == "left" || dir == "up" - Al[iy, ixp] = Al[iy, ix] * P[iy] * T[iy, ix] * P⁻[iyp] - Clu[iy, ixp] = Clu[iy, ix] * Au[iy, ix] * P⁻[iy] - Cld[iy, ixp] = Cld[iy, ix] * Ad[iy, ix] * P[iy] - elseif dir == "right" || dir == "down" - Ar[iy, ixp] = Ar[iy, ixpp] * P[iy] * T[iy, ixp] * P⁻[iyp] - Cru[iy, ixp] = Cru[iy, ixpp] * Au[iy, ixp] * P⁻[iy] - Crd[iy, ixp] = Crd[iy, ixpp] * Ad[iy, ixp] * P[iy] - end - end - end - return normalize!((Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad)) -end - -function swapdiag(M) - Mp = permutedims(M, [2, 1]) - ny, nx = size(Mp) - for iy in 1:ny, ix in 1:nx - Mp[iy, ix] = M[ix, iy] - end - return Mp -end -function rotate_environment(T, (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad)) - return swapdiag(T), swapdiag.((Clu, Cld, Cru, Crd)), swapdiag.((Au, Ad, Al, Ar)) -end - -printdiv() = println("\n****************************************") -printstepdiv() = println("\n##################################################") - -function sweepsdims(stepsizes::Vector{Int}, dims::Vector{Int}) - nstep = length(stepsizes) - maxdims = zeros(Int, stepsizes[end]) - for i in 1:stepsizes[1] - maxdims[i] = dims[1] - end - for j in 2:nstep - for i in (stepsizes[j - 1] + 1):stepsizes[j] - maxdims[i] = dims[j] - end - end - return maxdims -end - -function check_environment(T, (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad)) - ny, nx = size(T) - for iy in 1:ny - for ix in 1:nx - @assert order(Clu[iy, ix]) == 2 - @assert order(Cru[iy, ix]) == 2 - @assert order(Cld[iy, ix]) == 2 - @assert order(Crd[iy, ix]) == 2 - @assert order(Al[iy, ix]) == 3 - @assert order(Ar[iy, ix]) == 3 - @assert order(Au[iy, ix]) == 3 - @assert order(Ad[iy, ix]) == 3 - @assert order(T[iy, ix]) == 4 - @assert order(T[iy, ix]) == 4 - @assert order(T[iy, ix]) == 4 - @assert order(T[iy, ix]) == 4 - @assert length(commoninds(Clu[iy, ix], Cru[iy, ix])) == 1 - @assert length(commoninds(Clu[iy, ix], Cld[iy, ix])) == 1 - @assert length(commoninds(Cld[iy, ix], Crd[iy, ix])) == 1 - @assert length(commoninds(Cru[iy, ix], Crd[iy, ix])) == 1 - end - end -end - -function ctmrg(T::Matrix{ITensor}, (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad); verbose=false) - ny, nx = size(T) - - verbose && println("Original:") - verbose && @show calc_κ(T, (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad)) - - nstep = 1000 - maxdim = 10 - dirs = ["left", "up", "right", "down"] - for ctmrg_step in 1:nstep - verbose && printstepdiv() - dir = dirs[per(ctmrg_step, length(dirs))] - verbose && @show ctmrg_step, dir - - if dir == "left" || dir == "right" - leftright_move!(T, (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad); dir=dir, maxdim=maxdim) - elseif dir == "up" || dir == "down" - T, (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad) = rotate_environment( - T, (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad) - ) - leftright_move!(T, (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad); dir=dir, maxdim=maxdim) - T, (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad) = rotate_environment( - T, (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad) - ) - end - - check_environment(T, (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad)) - - verbose && @show Mκ = calc_κ(T, (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad); dir=dir) - verbose && @show abs(prod(vec(Mκ)))^(1 / (nx * ny)) - end - Mκ = calc_κ(T, (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad); dir=dir) - κave = abs(prod(vec(Mκ)))^(1 / (nx * ny)) - return κave -end diff --git a/examples/src/ctmrg_isotropic.jl b/examples/src/ctmrg_isotropic.jl deleted file mode 100644 index ccf43af56a..0000000000 --- a/examples/src/ctmrg_isotropic.jl +++ /dev/null @@ -1,49 +0,0 @@ -using ITensors - -function ctmrg(T::ITensor, Cₗᵤ::ITensor, Aₗ::ITensor; χmax::Int, cutoff=0.0, nsteps::Int) - sₕ = commonind(T, Aₗ) - sᵥ = uniqueind(T, Aₗ, Aₗ'; plev=0) - lᵥ = commonind(Cₗᵤ, Aₗ) - lₕ = uniqueind(Cₗᵤ, Aₗ) - Aᵤ = replaceinds(Aₗ, lᵥ => lₕ, lᵥ' => lₕ', sₕ => sᵥ) - Cₗᵤ = dense(Cₗᵤ) - for i in 1:nsteps - ## Get the grown corner transfer matrix (CTM) - Cₗᵤ⁽¹⁾ = Aₗ * Cₗᵤ * Aᵤ * T - - ## Diagonalize the grown CTM - # TODO: replace with - # eigen(Cₗᵤ⁽¹⁾, "horiz" => "vert"; tags = "horiz" => "vert", kwargs...) - Cₗᵤ, Uᵥ = eigen( - Cₗᵤ⁽¹⁾, - (lₕ', sₕ'), - (lᵥ', sᵥ'); - ishermitian=true, - cutoff, - maxdim=χmax, - lefttags=tags(lₕ), - righttags=tags(lᵥ), - ) - Cₗᵤ = dense(Cₗᵤ) - lᵥ = commonind(Cₗᵤ, Uᵥ) - lₕ = uniqueind(Cₗᵤ, Uᵥ) - - # The renormalized CTM is the diagonal matrix of eigenvalues - # Normalize the CTM - Cₗ = Cₗᵤ * prime(dag(Cₗᵤ), lₕ) - normC = (Cₗ * dag(Cₗ))[]^(1 / 4) - Cₗᵤ = Cₗᵤ / normC - - # Calculate the renormalized half row transfer matrix (HRTM) - Uᵥ = noprime(Uᵥ) - Aₗ = Aₗ * Uᵥ * T * dag(Uᵥ') - Aₗ = replaceinds(Aₗ, sₕ' => sₕ) - - # Normalize the HRTM - ACₗ = Aₗ * Cₗᵤ * prime(dag(Cₗᵤ)) - normA = √((ACₗ * dag(ACₗ))[]) - Aₗ = Aₗ / normA - Aᵤ = replaceinds(Aₗ, lᵥ => lₕ, lᵥ' => lₕ', sₕ => sᵥ) - end - return Cₗᵤ, Aₗ -end diff --git a/examples/src/trg.jl b/examples/src/trg.jl deleted file mode 100644 index 787bc0625c..0000000000 --- a/examples/src/trg.jl +++ /dev/null @@ -1,51 +0,0 @@ -using ITensors - -""" - trg(T::ITensor; χmax::Int, nsteps::Int) -> κ, T - -Perform the TRG algorithm on the partition function composed of the ITensor T. - -The indices of T must obey come in pairs `(sₕ => sₕ')` and `(sᵥ => sᵥ'). - -χmax is the maximum renormalized bond dimension. - -nsteps are the number of renormalization steps performed. - -The outputs are κ, the partition function per site, and the final renormalized -ITensor T. -""" -function trg(T::ITensor; χmax::Int, nsteps::Int, cutoff=0.0, svd_alg="divide_and_conquer") - sₕ, sᵥ = filterinds(T; plev=0) - @assert hassameinds((sₕ, sₕ', sᵥ, sᵥ'), T) - - # Keep track of the partition function per site - κ = 1.0 - for n in 1:nsteps - Fₕ, Fₕ′ = factorize( - T, (sₕ', sᵥ'); ortho="none", maxdim=χmax, cutoff, tags=tags(sₕ), svd_alg - ) - - s̃ₕ = commonind(Fₕ, Fₕ′) - Fₕ′ *= δ(dag(s̃ₕ), s̃ₕ') - - Fᵥ, Fᵥ′ = factorize( - T, (sₕ, sᵥ'); ortho="none", maxdim=χmax, cutoff, tags=tags(sᵥ), svd_alg - ) - - s̃ᵥ = commonind(Fᵥ, Fᵥ′) - Fᵥ′ *= δ(dag(s̃ᵥ), s̃ᵥ') - - T = - (Fₕ * δ(dag(sₕ'), sₕ)) * - (Fᵥ * δ(dag(sᵥ'), sᵥ)) * - (Fₕ′ * δ(dag(sₕ), sₕ')) * - (Fᵥ′ * δ(dag(sᵥ), sᵥ')) - - sₕ, sᵥ = s̃ₕ, s̃ᵥ - - trT = abs((T * δ(sₕ, sₕ') * δ(sᵥ, sᵥ'))[]) - T = T / trT - κ *= trT^(1 / 2^n) - end - return κ, T -end diff --git a/examples/trg/Project.toml b/examples/trg/Project.toml deleted file mode 100644 index 8ea87e8dc4..0000000000 --- a/examples/trg/Project.toml +++ /dev/null @@ -1,3 +0,0 @@ -[deps] -ITensors = "9136182c-28ba-11e9-034c-db9fb085ebd5" -QuadGK = "1fd47b50-473d-5c70-9696-f719f8f3bcdc" diff --git a/examples/trg/run.jl b/examples/trg/run.jl deleted file mode 100644 index fe0b92fdc1..0000000000 --- a/examples/trg/run.jl +++ /dev/null @@ -1,21 +0,0 @@ -using Pkg -Pkg.activate(".") - -include(joinpath(@__DIR__, "..", "src", "trg.jl")) -include(joinpath(@__DIR__, "..", "src", "2d_classical_ising.jl")) - -# Make Ising model MPO -β = 1.1 * βc -d = 2 -s = Index(d) -sₕ = addtags(s, "horiz") -sᵥ = addtags(s, "vert") -T = ising_mpo(sₕ, sᵥ, β) - -χmax = 20 -nsteps = 20 -κ, T = trg(T; χmax=χmax, nsteps=nsteps, svd_alg="divide_and_conquer") - -κ_exact = exp(-β * ising_free_energy(β)) -@show κ, κ_exact -@show abs(κ - κ_exact) diff --git a/ext/ITensorsChainRulesCoreExt/ITensorsChainRulesCoreExt.jl b/ext/ITensorsChainRulesCoreExt/ITensorsChainRulesCoreExt.jl deleted file mode 100644 index b4ac680b46..0000000000 --- a/ext/ITensorsChainRulesCoreExt/ITensorsChainRulesCoreExt.jl +++ /dev/null @@ -1,19 +0,0 @@ -module ITensorsChainRulesCoreExt -using ChainRulesCore -import ChainRulesCore: rrule -using ITensors -using ITensors: Indices -using ITensors.Adapt -using ITensors.NDTensors -using ITensors.NDTensors: datatype -using ITensors.Ops -include("utils.jl") -include("projection.jl") -include("NDTensors/tensor.jl") -include("NDTensors/dense.jl") -include("indexset.jl") -include("itensor.jl") -include("LazyApply/LazyApply.jl") -include("non_differentiable.jl") -include("smallstrings.jl") -end diff --git a/ext/ITensorsChainRulesCoreExt/LazyApply/LazyApply.jl b/ext/ITensorsChainRulesCoreExt/LazyApply/LazyApply.jl deleted file mode 100644 index 6b3011af54..0000000000 --- a/ext/ITensorsChainRulesCoreExt/LazyApply/LazyApply.jl +++ /dev/null @@ -1,16 +0,0 @@ -function rrule(::Type{Applied}, x1, x2::Tuple, x3::NamedTuple) - y = Applied(x1, x2, x3) - function Applied_pullback(ȳ) - x̄1 = ȳ.f - x̄2 = ȳ.args - x̄3 = ȳ.kwargs - return (NoTangent(), x̄1, x̄2, x̄3) - end - function Applied_pullback(ȳ::Vector) - x̄1 = NoTangent() - x̄2 = (ȳ,) - x̄3 = NoTangent() - return (NoTangent(), x̄1, x̄2, x̄3) - end - return y, Applied_pullback -end diff --git a/ext/ITensorsChainRulesCoreExt/NDTensors/dense.jl b/ext/ITensorsChainRulesCoreExt/NDTensors/dense.jl deleted file mode 100644 index 4ff308ae1f..0000000000 --- a/ext/ITensorsChainRulesCoreExt/NDTensors/dense.jl +++ /dev/null @@ -1,8 +0,0 @@ -function rrule(f::Type{<:Dense}, x1::AbstractVector) - y = f(x1) - function Dense_pullback(ȳ) - x̄1 = ȳ.data - return (NoTangent(), x̄1) - end - return y, Dense_pullback -end diff --git a/ext/ITensorsChainRulesCoreExt/NDTensors/tensor.jl b/ext/ITensorsChainRulesCoreExt/NDTensors/tensor.jl deleted file mode 100644 index 06f9484bb4..0000000000 --- a/ext/ITensorsChainRulesCoreExt/NDTensors/tensor.jl +++ /dev/null @@ -1,34 +0,0 @@ -using ITensors.NDTensors - -using ITensors.NDTensors: AllowAlias - -function rrule(f::Type{<:Tensor}, x1::AllowAlias, x2::TensorStorage, x3::Tuple) - y = f(x1, x2, x3) - function Tensor_pullback(ȳ) - x̄1 = NoTangent() - x̄2 = ȳ.storage - x̄3 = NoTangent() - return (NoTangent(), x̄1, x̄2, x̄3) - end - return y, Tensor_pullback -end - -function rrule(::typeof(tensor), x1::TensorStorage, x2::Tuple) - y = tensor(x1, x2) - function tensor_pullback(ȳ) - x̄1 = storage(ȳ) - x̄2 = NoTangent() - return (NoTangent(), x̄1, x̄2) - end - return y, tensor_pullback -end - -function rrule(f::Type{<:Tensor}, x1::TensorStorage, x2::Tuple) - y = f(x1, x2) - function tensor_pullback(ȳ) - x̄1 = copy(storage(x1)) - x̄2 = NoTangent() - return (NoTangent(), x̄1, x̄2) - end - return y, tensor_pullback -end diff --git a/ext/ITensorsChainRulesCoreExt/indexset.jl b/ext/ITensorsChainRulesCoreExt/indexset.jl deleted file mode 100644 index 1ce970cf33..0000000000 --- a/ext/ITensorsChainRulesCoreExt/indexset.jl +++ /dev/null @@ -1,32 +0,0 @@ -for fname in ( - :prime, - :setprime, - :noprime, - :replaceprime, - :swapprime, - :addtags, - :removetags, - :replacetags, - :settags, - :swaptags, - :replaceind, - :replaceinds, - :swapind, - :swapinds, -) - @eval begin - function rrule(f::typeof($fname), x::ITensor, a...; kwargs...) - y = f(x, a...; kwargs...) - function f_pullback(ȳ) - x̄ = replaceinds(unthunk(ȳ), inds(y) => inds(x)) - ā = map_notangent(a) - return (NoTangent(), x̄, ā...) - end - return y, f_pullback - end - end -end - -rrule(::typeof(adjoint), x::ITensor) = rrule(prime, x) - -@non_differentiable permute(::Indices, ::Indices) diff --git a/ext/ITensorsChainRulesCoreExt/itensor.jl b/ext/ITensorsChainRulesCoreExt/itensor.jl deleted file mode 100644 index a830cc77bd..0000000000 --- a/ext/ITensorsChainRulesCoreExt/itensor.jl +++ /dev/null @@ -1,204 +0,0 @@ -function rrule(::typeof(getindex), x::ITensor, I...) - y = getindex(x, I...) - function getindex_pullback(ȳ) - # TODO: add definition `ITensor(::Tuple{}) = ITensor()` - # to ITensors.jl so no splatting is needed here. - x̄ = ITensor(inds(x)...) - x̄[I...] = unthunk(ȳ) - Ī = map_notangent(I) - return (NoTangent(), x̄, Ī...) - end - return y, getindex_pullback -end - -# Specialized version in order to avoid call to `setindex!` -# within the pullback, should be better for taking higher order -# derivatives in Zygote. -function rrule(::typeof(getindex), x::ITensor) - y = x[] - function getindex_pullback(ȳ) - x̄ = ITensor(unthunk(ȳ)) - return (NoTangent(), x̄) - end - return y, getindex_pullback -end - -function rrule(::Type{ITensor}, x1::AllowAlias, x2::TensorStorage, x3) - y = ITensor(x1, x2, x3) - function ITensor_pullback(ȳ) - x̄1 = NoTangent() - x̄2 = ȳ.tensor.storage - x̄3 = NoTangent() - return (NoTangent(), x̄1, x̄2, x̄3) - end - return y, ITensor_pullback -end - -function rrule(::Type{ITensor}, x1::AllowAlias, x2::Tensor) - y = ITensor(x1, x2) - function ITensor_pullback(ȳ) - x̄1 = NoTangent() - x̄2 = Tensor(x1, ȳ) - return (NoTangent(), x̄1, x̄2) - end - return y, ITensor_pullback -end - -function rrule(::Type{ITensor}, x1::Tensor) - y = ITensor(x1) - function ITensor_pullback(ȳ) - x̄1 = Tensor(ȳ) - return (NoTangent(), x̄1) - end - return y, ITensor_pullback -end - -function rrule(::typeof(itensor), x1::Tensor) - y = itensor(x1) - function itensor_pullback(ȳ) - x̄1 = tensor(ȳ) - return (NoTangent(), x̄1) - end - return y, itensor_pullback -end - -function rrule(f::Type{<:Tensor}, x1::AllowAlias, x2::ITensor) - y = f(x1, x2) - function Tensor_pullback(ȳ) - x̄1 = NoTangent() - x̄2 = ITensor(x1, ȳ) - return (NoTangent(), x̄1, x̄2) - end - return y, Tensor_pullback -end - -function rrule(f::Type{<:Tensor}, x1::ITensor) - y = f(x1) - function Tensor_pullback(ȳ) - x̄1 = ITensor(ȳ) - return (NoTangent(), x̄1) - end - return y, Tensor_pullback -end - -function rrule(::typeof(tensor), x1::ITensor) - y = tensor(x1) - function tensor_pullback(ȳ) - x̄1 = ITensor(typeof(storage(x1))(ȳ.storage.data), inds(x1)) - return (NoTangent(), x̄1) - end - return y, tensor_pullback -end - -# Special case for contracting a pair of ITensors -function rrule(::typeof(contract), x1::ITensor, x2::ITensor) - project_x1 = ProjectTo(x1) - project_x2 = ProjectTo(x2) - function contract_pullback(ȳ) - x̄1 = project_x1(ȳ * dag(x2)) - x̄2 = project_x2(dag(x1) * ȳ) - return (NoTangent(), x̄1, x̄2) - end - return x1 * x2, contract_pullback -end - -@non_differentiable ITensors.optimal_contraction_sequence(::Any) - -function rrule(::typeof(*), x1::Number, x2::ITensor) - project_x1 = ProjectTo(x1) - project_x2 = ProjectTo(x2) - function contract_pullback(ȳ) - x̄1 = project_x1((ȳ * dag(x2))[]) - x̄2 = project_x2(dag(x1) * ȳ) - return (NoTangent(), x̄1, x̄2) - end - return x1 * x2, contract_pullback -end - -function rrule(::typeof(*), x1::ITensor, x2::Number) - project_x1 = ProjectTo(x1) - project_x2 = ProjectTo(x2) - function contract_pullback(ȳ) - x̄1 = project_x1(ȳ * dag(x2)) - x̄2 = project_x2((dag(x1) * ȳ)[]) - return (NoTangent(), x̄1, x̄2) - end - return x1 * x2, contract_pullback -end - -function rrule(::typeof(+), x1::ITensor, x2::ITensor) - function add_pullback(ȳ) - return (NoTangent(), ȳ, ȳ) - end - return x1 + x2, add_pullback -end - -function rrule(::typeof(-), x1::ITensor, x2::ITensor) - function subtract_pullback(ȳ) - return (NoTangent(), ȳ, -ȳ) - end - return x1 - x2, subtract_pullback -end - -function rrule(::typeof(-), x::ITensor) - function minus_pullback(ȳ) - return (NoTangent(), -ȳ) - end - return -x, minus_pullback -end - -function rrule(::typeof(itensor), x::Array, a...) - function itensor_pullback(ȳ) - uȳ = permute(unthunk(ȳ), a...) - x̄ = reshape(array(uȳ), size(x)) - ā = map_notangent(a) - return (NoTangent(), x̄, ā...) - end - return itensor(x, a...), itensor_pullback -end - -function rrule(::Type{ITensor}, x::Array{<:Number}, a...) - function ITensor_pullback(ȳ) - # TODO: define `Array(::ITensor)` directly - uȳ = Array(unthunk(ȳ), a...) - x̄ = reshape(uȳ, size(x)) - ā = map_notangent(a) - return (NoTangent(), x̄, ā...) - end - return ITensor(x, a...), ITensor_pullback -end - -function rrule(::Type{ITensor}, x::Number) - function ITensor_pullback(ȳ) - x̄ = ȳ[] - return (NoTangent(), x̄) - end - return ITensor(x), ITensor_pullback -end - -function rrule(::typeof(dag), x::ITensor) - function dag_pullback(ȳ) - x̄ = dag(unthunk(ȳ)) - return (NoTangent(), x̄) - end - return dag(x), dag_pullback -end - -function rrule(::typeof(permute), x::ITensor, a...) - y = permute(x, a...) - function permute_pullback(ȳ) - x̄ = permute(unthunk(ȳ), inds(x)) - ā = map_notangent(a) - return (NoTangent(), x̄, ā...) - end - return y, permute_pullback -end - -# Needed because by default it was calling the generic -# `rrule` for `tr` inside ChainRules. -# TODO: Raise an issue with ChainRules. -function rrule(config::RuleConfig{>:HasReverseMode}, ::typeof(tr), x::ITensor; kwargs...) - return rrule_via_ad(config, ITensors._tr, x; kwargs...) -end - -@non_differentiable combiner(::Indices) diff --git a/ext/ITensorsChainRulesCoreExt/non_differentiable.jl b/ext/ITensorsChainRulesCoreExt/non_differentiable.jl deleted file mode 100644 index 42af2f63a9..0000000000 --- a/ext/ITensorsChainRulesCoreExt/non_differentiable.jl +++ /dev/null @@ -1,19 +0,0 @@ -using ChainRulesCore: @non_differentiable -using ITensors: - ITensors, Index, addtags, commoninds, dag, delta, inds, noncommoninds, onehot, uniqueinds -using ITensors.TagSets: TagSet - -@non_differentiable map_notangent(::Any) -@non_differentiable Index(::Any...) -@non_differentiable delta(::Any...) -@non_differentiable dag(::Index) -@non_differentiable inds(::Any...) -@non_differentiable commoninds(::Any...) -@non_differentiable noncommoninds(::Any...) -@non_differentiable uniqueinds(::Any...) -@non_differentiable addtags(::TagSet, ::Any) -@non_differentiable ITensors.filter_inds_set_function(::Function, ::Function, ::Any...) -@non_differentiable ITensors.filter_inds_set_function(::Function, ::Any...) -@non_differentiable ITensors.indpairs(::Any...) -@non_differentiable onehot(::Any...) -@non_differentiable Base.convert(::Type{TagSet}, str::String) diff --git a/ext/ITensorsChainRulesCoreExt/projection.jl b/ext/ITensorsChainRulesCoreExt/projection.jl deleted file mode 100644 index 443d6e5fcb..0000000000 --- a/ext/ITensorsChainRulesCoreExt/projection.jl +++ /dev/null @@ -1,10 +0,0 @@ -function ChainRulesCore.ProjectTo(x::ITensor) - return ProjectTo{ITensor}(; element=ProjectTo(zero(eltype(x)))) -end - -function (project::ProjectTo{ITensor})(dx::ITensor) - S = eltype(dx) - T = ChainRulesCore.project_type(project.element) - dy = S <: T ? dx : map(project.element, dx) - return dy -end diff --git a/ext/ITensorsChainRulesCoreExt/smallstrings.jl b/ext/ITensorsChainRulesCoreExt/smallstrings.jl deleted file mode 100644 index 7b17476f68..0000000000 --- a/ext/ITensorsChainRulesCoreExt/smallstrings.jl +++ /dev/null @@ -1,12 +0,0 @@ -using ITensors: ITensors -include( - joinpath( - pkgdir(ITensors), - "src", - "lib", - "SmallStrings", - "ext", - "SmallStringsChainRulesCoreExt", - "SmallStringsChainRulesCoreExt.jl", - ), -) diff --git a/ext/ITensorsChainRulesCoreExt/utils.jl b/ext/ITensorsChainRulesCoreExt/utils.jl deleted file mode 100644 index 1379341fca..0000000000 --- a/ext/ITensorsChainRulesCoreExt/utils.jl +++ /dev/null @@ -1,7 +0,0 @@ -using ChainRulesCore: AbstractZero, NoTangent -using Compat: Returns -using ITensors: ITensors - -ITensors.dag(z::AbstractZero) = z - -map_notangent(a) = map(Returns(NoTangent()), a) diff --git a/ext/ITensorsChainRulesCoreExt/zygoterules.jl b/ext/ITensorsChainRulesCoreExt/zygoterules.jl deleted file mode 100644 index 65e0996e9c..0000000000 --- a/ext/ITensorsChainRulesCoreExt/zygoterules.jl +++ /dev/null @@ -1,10 +0,0 @@ -using ZygoteRules: @adjoint - -# Needed for defining the rule for `adjoint(A::ITensor)` -# which currently doesn't work by overloading `ChainRulesCore.rrule` -# since it is defined in `Zygote`, which takes precedent. -@adjoint function Base.adjoint(x::ITensor) - y, adjoint_rrule_pullback = rrule(adjoint, x) - adjoint_pullback(ȳ) = Base.tail(adjoint_rrule_pullback(ȳ)) - return y, adjoint_pullback -end diff --git a/ext/ITensorsHDF5Ext/ITensorsHDF5Ext.jl b/ext/ITensorsHDF5Ext/ITensorsHDF5Ext.jl deleted file mode 100644 index fe8a399bb7..0000000000 --- a/ext/ITensorsHDF5Ext/ITensorsHDF5Ext.jl +++ /dev/null @@ -1,8 +0,0 @@ -module ITensorsHDF5Ext -include("index.jl") -include("itensor.jl") -include("qnindex.jl") -include("indexset.jl") -include("qn.jl") -include("tagset.jl") -end diff --git a/ext/ITensorsHDF5Ext/index.jl b/ext/ITensorsHDF5Ext/index.jl deleted file mode 100644 index 587f7c20ae..0000000000 --- a/ext/ITensorsHDF5Ext/index.jl +++ /dev/null @@ -1,43 +0,0 @@ -using HDF5: HDF5, attributes, create_group, open_group, read, write -using ITensors: Arrow, dim, dir, id, Index, plev, QNBlocks, space, tags, TagSet - -function HDF5.write(parent::Union{HDF5.File,HDF5.Group}, name::AbstractString, I::Index) - g = create_group(parent, name) - attributes(g)["type"] = "Index" - attributes(g)["version"] = 1 - write(g, "id", id(I)) - write(g, "dim", dim(I)) - write(g, "dir", Int(dir(I))) - write(g, "tags", tags(I)) - write(g, "plev", plev(I)) - if typeof(space(I)) == Int - attributes(g)["space_type"] = "Int" - elseif typeof(space(I)) == QNBlocks - attributes(g)["space_type"] = "QNBlocks" - write(g, "space", space(I)) - else - error("Index space type not recognized") - end -end - -function HDF5.read(parent::Union{HDF5.File,HDF5.Group}, name::AbstractString, ::Type{Index}) - g = open_group(parent, name) - if read(attributes(g)["type"]) != "Index" - error("HDF5 group or file does not contain Index data") - end - id = read(g, "id") - dim = read(g, "dim") - dir = Arrow(read(g, "dir")) - tags = read(g, "tags", TagSet) - plev = read(g, "plev") - space_type = "Int" - if haskey(attributes(g), "space_type") - space_type = read(attributes(g)["space_type"]) - end - if space_type == "Int" - space = dim - elseif space_type == "QNBlocks" - space = read(g, "space", QNBlocks) - end - return Index(id, space, dir, tags, plev) -end diff --git a/ext/ITensorsHDF5Ext/indexset.jl b/ext/ITensorsHDF5Ext/indexset.jl deleted file mode 100644 index 22e9886b67..0000000000 --- a/ext/ITensorsHDF5Ext/indexset.jl +++ /dev/null @@ -1,24 +0,0 @@ -using HDF5: HDF5, attributes, create_group, open_group, read, write -using ITensors: Index, Indices - -function HDF5.write(parent::Union{HDF5.File,HDF5.Group}, name::AbstractString, is::Indices) - g = create_group(parent, name) - attributes(g)["type"] = "IndexSet" - attributes(g)["version"] = 1 - N = length(is) - write(g, "length", N) - for n in 1:N - write(g, "index_$n", is[n]) - end -end - -function HDF5.read( - parent::Union{HDF5.File,HDF5.Group}, name::AbstractString, T::Type{<:Indices} -) - g = open_group(parent, name) - if read(attributes(g)["type"]) != "IndexSet" - error("HDF5 group or file does not contain IndexSet data") - end - n = read(g, "length") - return T(Index[read(g, "index_$j", Index) for j in 1:n]) -end diff --git a/ext/ITensorsHDF5Ext/itensor.jl b/ext/ITensorsHDF5Ext/itensor.jl deleted file mode 100644 index bca520406a..0000000000 --- a/ext/ITensorsHDF5Ext/itensor.jl +++ /dev/null @@ -1,36 +0,0 @@ -using HDF5: HDF5, attributes, create_group, open_group, read, write -using ITensors: inds, itensor, ITensor, storage -using NDTensors: - NDTensors, BlockSparse, Combiner, Dense, Diag, DiagBlockSparse, EmptyStorage - -function HDF5.write(parent::Union{HDF5.File,HDF5.Group}, name::AbstractString, T::ITensor) - g = create_group(parent, name) - attributes(g)["type"] = "ITensor" - attributes(g)["version"] = 1 - write(g, "inds", inds(T)) - return write(g, "storage", storage(T)) -end - -function HDF5.read( - parent::Union{HDF5.File,HDF5.Group}, name::AbstractString, ::Type{ITensor} -) - g = open_group(parent, name) - if read(attributes(g)["type"]) != "ITensor" - error("HDF5 group or file does not contain ITensor data") - end - inds = read(g, "inds", Vector{<:Index}) - - # check input file for key name of ITensor data - # ITensors.jl <= v0.1.x uses `store` as key - # whereas ITensors.jl >= v0.2.x uses `storage` as key - for key in ["storage", "store"] - if haskey(g, key) - stypestr = read(attributes(open_group(g, key))["type"]) - stype = eval(Meta.parse(stypestr)) - storage = read(g, key, stype) - return itensor(storage, inds) - end - end - return error("HDF5 file: $(g) does not contain correct ITensor data.\nNeither key - `store` nor `storage` could be found.") -end diff --git a/ext/ITensorsHDF5Ext/qn.jl b/ext/ITensorsHDF5Ext/qn.jl deleted file mode 100644 index d7ae10d5c4..0000000000 --- a/ext/ITensorsHDF5Ext/qn.jl +++ /dev/null @@ -1,26 +0,0 @@ -using HDF5: HDF5, attributes, create_group, open_group, read, write -using ITensors: maxQNs, modulus, name, QN, QNVal, val - -function HDF5.write(parent::Union{HDF5.File,HDF5.Group}, gname::AbstractString, q::QN) - g = create_group(parent, gname) - attributes(g)["type"] = "QN" - attributes(g)["version"] = 1 - names = [String(name(q[n])) for n in 1:maxQNs] - vals = [val(q[n]) for n in 1:maxQNs] - mods = [modulus(q[n]) for n in 1:maxQNs] - write(g, "names", names) - write(g, "vals", vals) - return write(g, "mods", mods) -end - -function HDF5.read(parent::Union{HDF5.File,HDF5.Group}, name::AbstractString, ::Type{QN}) - g = open_group(parent, name) - if read(attributes(g)["type"]) != "QN" - error("HDF5 group or file does not contain QN data") - end - names = read(g, "names") - vals = read(g, "vals") - mods = read(g, "mods") - mqn = ntuple(n -> QNVal(names[n], vals[n], mods[n]), maxQNs) - return QN(mqn) -end diff --git a/ext/ITensorsHDF5Ext/qnindex.jl b/ext/ITensorsHDF5Ext/qnindex.jl deleted file mode 100644 index 996f2b4a17..0000000000 --- a/ext/ITensorsHDF5Ext/qnindex.jl +++ /dev/null @@ -1,30 +0,0 @@ -using HDF5: HDF5, attributes, create_group, open_group, read, write -using ITensors: dims, QNBlock, QNBlocks - -function HDF5.write(parent::Union{HDF5.File,HDF5.Group}, name::AbstractString, B::QNBlocks) - g = create_group(parent, name) - attributes(g)["type"] = "QNBlocks" - attributes(g)["version"] = 1 - write(g, "length", length(B)) - dims = [block[2] for block in B] - write(g, "dims", dims) - for n in 1:length(B) - write(g, "QN[$n]", B[n][1]) - end -end - -function HDF5.read( - parent::Union{HDF5.File,HDF5.Group}, name::AbstractString, ::Type{QNBlocks} -) - g = open_group(parent, name) - if read(attributes(g)["type"]) != "QNBlocks" - error("HDF5 group or file does not contain QNBlocks data") - end - N = read(g, "length") - dims = read(g, "dims") - B = QNBlocks(undef, N) - for n in 1:length(B) - B[n] = QNBlock(read(g, "QN[$n]", QN), dims[n]) - end - return B -end diff --git a/ext/ITensorsHDF5Ext/tagset.jl b/ext/ITensorsHDF5Ext/tagset.jl deleted file mode 100644 index d838d90b27..0000000000 --- a/ext/ITensorsHDF5Ext/tagset.jl +++ /dev/null @@ -1,20 +0,0 @@ -using HDF5: HDF5, attributes, create_group, open_group, read, write -using ITensors.TagSets: TagSet, tagstring - -function HDF5.write(parent::Union{HDF5.File,HDF5.Group}, name::AbstractString, T::TagSet) - g = create_group(parent, name) - attributes(g)["type"] = "TagSet" - attributes(g)["version"] = 1 - return write(g, "tags", tagstring(T)) -end - -function HDF5.read( - parent::Union{HDF5.File,HDF5.Group}, name::AbstractString, ::Type{TagSet} -) - g = open_group(parent, name) - if read(attributes(g)["type"]) != "TagSet" - error("HDF5 group '$name' does not contain TagSet data") - end - tstring = read(g, "tags") - return TagSet(tstring) -end diff --git a/ext/ITensorsVectorInterfaceExt/ITensorsVectorInterfaceExt.jl b/ext/ITensorsVectorInterfaceExt/ITensorsVectorInterfaceExt.jl deleted file mode 100644 index eb4026fdb1..0000000000 --- a/ext/ITensorsVectorInterfaceExt/ITensorsVectorInterfaceExt.jl +++ /dev/null @@ -1,107 +0,0 @@ -module ITensorsVectorInterfaceExt -using ITensors: ITensors, ITensor -using VectorInterface: VectorInterface - -function VectorInterface.add(a::ITensor, b::ITensor) - return a + b -end -function VectorInterface.add!(a::ITensor, b::ITensor) - a .= a .+ b - return a -end -function VectorInterface.add!!(a::ITensor, b::ITensor) - if promote_type(eltype(a), eltype(b)) <: eltype(a) - VectorInterface.add!(a, b) - else - a = VectorInterface.add(a, b) - end - return a -end - -function VectorInterface.add(a::ITensor, b::ITensor, α::Number) - return a + b * α -end -function VectorInterface.add!(a::ITensor, b::ITensor, α::Number) - a .= a .+ b .* α - return a -end -function VectorInterface.add!!(a::ITensor, b::ITensor, α::Number) - if promote_type(eltype(a), eltype(b), typeof(α)) <: eltype(a) - VectorInterface.add!(a, b, α) - else - a = VectorInterface.add(a, b, α) - end - return a -end - -function VectorInterface.add(a::ITensor, b::ITensor, α::Number, β::Number) - return a * β + b * α -end -function VectorInterface.add!(a::ITensor, b::ITensor, α::Number, β::Number) - a .= a .* β .+ b .* α - return a -end -function VectorInterface.add!!(a::ITensor, b::ITensor, α::Number, β::Number) - if promote_type(eltype(a), eltype(b), typeof(α), typeof(β)) <: eltype(a) - VectorInterface.add!(a, b, α, β) - else - a = VectorInterface.add(a, b, α, β) - end - return a -end - -function VectorInterface.inner(a::ITensor, b::ITensor) - return ITensors.inner(a, b) -end - -function VectorInterface.scalartype(a::ITensor) - return ITensors.scalartype(a) -end - -function VectorInterface.scale(a::ITensor, α::Number) - return a * α -end -function VectorInterface.scale!(a::ITensor, α::Number) - a .= a .* α - return a -end -function VectorInterface.scale!!(a::ITensor, α::Number) - if promote_type(eltype(a), typeof(α)) <: eltype(a) - VectorInterface.scale!(a, α) - else - a = VectorInterface.scale(a, α) - end - return a -end - -function VectorInterface.scale!(a_dest::ITensor, a_src::ITensor, α::Number) - a_dest .= a_src .* α - return a_dest -end -function VectorInterface.scale!!(a_dest::ITensor, a_src::ITensor, α::Number) - if promote_type(eltype(a_dest), eltype(a_src), typeof(α)) <: eltype(a_dest) - VectorInterface.scale!(a_dest, a_src, α) - else - a_dest = VectorInterface.scale(a_src, α) - end - return a_dest -end - -function VectorInterface.zerovector(a::ITensor, type::Type{<:Number}) - a′ = similar(a, type) - VectorInterface.zerovector!(a′) - return a′ -end -function VectorInterface.zerovector!(a::ITensor) - a .= zero(eltype(a)) - return a -end -function VectorInterface.zerovector!!(a::ITensor, type::Type{<:Number}) - if type === eltype(a) - VectorInterface.zerovector!(a) - else - a = VectorInterface.zerovector(a, type) - end - return a -end -end diff --git a/ext/ITensorsZygoteRulesExt/ITensorsZygoteRulesExt.jl b/ext/ITensorsZygoteRulesExt/ITensorsZygoteRulesExt.jl deleted file mode 100644 index 5da78e2c7e..0000000000 --- a/ext/ITensorsZygoteRulesExt/ITensorsZygoteRulesExt.jl +++ /dev/null @@ -1,3 +0,0 @@ -module ITensorsZygoteRulesExt -include("itensors.jl") -end diff --git a/ext/ITensorsZygoteRulesExt/itensors.jl b/ext/ITensorsZygoteRulesExt/itensors.jl deleted file mode 100644 index 58562a7cd0..0000000000 --- a/ext/ITensorsZygoteRulesExt/itensors.jl +++ /dev/null @@ -1,12 +0,0 @@ -using ChainRulesCore: ChainRulesCore -using ITensors: ITensor -using ZygoteRules: @adjoint - -# Needed for defining the rule for `adjoint(A::ITensor)` -# which currently doesn't work by overloading `ChainRulesCore.rrule` -# since it is defined in `Zygote`, which takes precedent. -@adjoint function Base.adjoint(x::ITensor) - y, adjoint_rrule_pullback = ChainRulesCore.rrule(adjoint, x) - adjoint_pullback(ȳ) = Base.tail(adjoint_rrule_pullback(ȳ)) - return y, adjoint_pullback -end diff --git a/jenkins/Dockerfile b/jenkins/Dockerfile deleted file mode 100644 index e2367a8c01..0000000000 --- a/jenkins/Dockerfile +++ /dev/null @@ -1,16 +0,0 @@ -FROM nvidia/cuda:12.0.0-devel-ubuntu20.04 - -# julia -RUN apt-get update && \ - DEBIAN_FRONTEND=noninteractive \ - apt-get install --yes --no-install-recommends \ - # basic stuff - curl ca-certificates \ - libcutensor-dev \ - && \ - apt-get clean && \ - rm -rf /var/lib/apt/lists/* - -ARG JULIA=1.6 -RUN curl -s -L https://julialang-s3.julialang.org/bin/linux/x64/${JULIA}/julia-${JULIA}-latest-linux-x86_64.tar.gz | \ - tar -C /usr/local -x -z --strip-components=1 -f - \ No newline at end of file diff --git a/jenkins/Jenkinsfile b/jenkins/Jenkinsfile deleted file mode 100644 index 64ee5d5d3e..0000000000 --- a/jenkins/Jenkinsfile +++ /dev/null @@ -1,151 +0,0 @@ -pipeline { - agent none - options { - disableConcurrentBuilds() - buildDiscarder(logRotator(numToKeepStr: '8', daysToKeepStr: '20')) - } - stages { - stage('GPU Testing') { - parallel { - stage('NDTensorsCUDAExt julia-lts') { - options { - timeout(time: 45, unit: 'MINUTES') - } - agent { - dockerfile { - label 'gpu&&v100' - filename 'Dockerfile' - dir 'jenkins' - additionalBuildArgs '--build-arg JULIA=1.10' - args '--gpus "device=1"' - } - } - environment { - HOME = pwd(tmp:true) - OMP_NUM_THREADS = 4 - JULIA_NUM_THREADS = 4 - } - steps { - sh ''' - julia -e 'using Pkg: Pkg; Pkg.activate(temp=true); Pkg.Registry.update(); Pkg.update(); Pkg.develop(path="./NDTensors"); Pkg.develop(path="."); Pkg.test("NDTensors"; test_args=["cuda"])' - ''' - } - } - stage('NDTensorsCUDAExt julia-1') { - options { - timeout(time: 45, unit: 'MINUTES') - } - agent { - dockerfile { - label 'gpu&&v100' - filename 'Dockerfile' - dir 'jenkins' - additionalBuildArgs '--build-arg JULIA=1.11' - args '--gpus "device=1"' - } - } - environment { - HOME = pwd(tmp:true) - OMP_NUM_THREADS = 4 - JULIA_NUM_THREADS = 4 - } - steps { - sh ''' - julia -e 'using Pkg: Pkg; Pkg.activate(temp=true); Pkg.Registry.update(); Pkg.update(); Pkg.develop(path="./NDTensors"); Pkg.develop(path="."); Pkg.test("NDTensors"; test_args=["cuda"])' - ''' - } - } - stage('NDTensorscuTENSORExt julia-lts') { - options { - timeout(time: 45, unit: 'MINUTES') - } - agent { - dockerfile { - label 'gpu&&v100' - filename 'Dockerfile' - dir 'jenkins' - additionalBuildArgs '--build-arg JULIA=1.10' - args '--gpus "device=1"' - } - } - environment { - HOME = pwd(tmp:true) - OMP_NUM_THREADS = 4 - JULIA_NUM_THREADS = 4 - } - steps { - sh ''' - julia -e 'using Pkg: Pkg; Pkg.activate(temp=true); Pkg.Registry.update(); Pkg.update(); Pkg.develop(path="./NDTensors"); Pkg.develop(path="."); Pkg.test("NDTensors"; test_args=["cutensor"])' - ''' - } - } - stage('NDTensorscuTENSORExt julia-1') { - options { - timeout(time: 45, unit: 'MINUTES') - } - agent { - dockerfile { - label 'gpu&&v100' - filename 'Dockerfile' - dir 'jenkins' - additionalBuildArgs '--build-arg JULIA=1.11' - args '--gpus "device=1"' - } - } - environment { - HOME = pwd(tmp:true) - OMP_NUM_THREADS = 4 - JULIA_NUM_THREADS = 4 - } - steps { - sh ''' - julia -e 'using Pkg: Pkg; Pkg.activate(temp=true); Pkg.Registry.update(); Pkg.update(); Pkg.develop(path="./NDTensors"); Pkg.develop(path="."); Pkg.test("NDTensors"; test_args=["cutensor"])' - ''' - } - } - stage('NDTensorsMetalExt Julia-lts'){ - options { - timeout(time: 45, unit: 'MINUTES') - } - agent { - label 'm1' - } - environment{ - PATH="${env.HOME}/.juliaup/bin:${env.PATH}" - PLATFORM = 'macos' - } - steps{ - sh ''' - juliaup add lts - juliaup default lts - ''' - sh ''' - julia -e 'using Pkg: Pkg; Pkg.activate(temp=true); Pkg.Registry.update(); Pkg.update(); Pkg.develop(path="./NDTensors"); Pkg.develop(path="."); Pkg.test("NDTensors"; test_args=["metal"])' - ''' - } - } - stage('NDTensorsMetalExt Julia-1'){ - options { - timeout(time: 45, unit: 'MINUTES') - } - agent { - label 'm1' - } - environment{ - PATH="${env.HOME}/.juliaup/bin:${env.PATH}" - PLATFORM = 'macos' - } - steps{ - sh ''' - juliaup update - juliaup default release - ''' - sh ''' - julia -e 'using Pkg: Pkg; Pkg.activate(temp=true); Pkg.Registry.update(); Pkg.update(); Pkg.develop(path="./NDTensors"); Pkg.develop(path="."); Pkg.test("NDTensors"; test_args=["metal"])' - ''' - } - } - } - } - } -} diff --git a/src/ITensors.jl b/src/ITensors.jl index 0c8496831d..ce4be0e50f 100644 --- a/src/ITensors.jl +++ b/src/ITensors.jl @@ -1,166 +1,5 @@ -""" -ITensor is a library for rapidly creating correct and efficient tensor network algorithms. - -An ITensor is a tensor whose interface is independent of its memory layout. -ITensor indices are 'intelligent' meaning they carry extra information and -'recognize' each other automatically when contracting or adding ITensors. - -The ITensor library includes composable and extensible algorithms for optimizing -and transforming tensor networks, such as matrix product state and matrix product operators. - -# Example Usage - -Define tensor indices i and j - - i = Index(2, "i") - j = Index(3, "j") - -Make an ITensor with these indices - - A = ITensor(i,j) - -Set the i==2,j==1 element to -2.6 - - A[j=>1,i=>2] = -2.6 - A[i=>2,j=>1] = -2.6 #this has the same effect - -Make an ITensor with random elements - - B = random_itensor(j,i) - -Add ITensors A and B together (ok that indices in different order) - - C = A + B - -# Other Features of ITensor - - - Tools for **tensor networks**, such as matrix product states (MPS) / tensor trains (TT) - - **Algorithms** for solving linear equations in MPS form (such as DMRG) or - for integrating differential equations ("time evolving MPS") - - ITensors can have **sparse data** internally, such as block sparsity or diagonal - sparsity, while having the same interface as dense ITensors - - ITensors can have **symmetry properties** (invariance or equivariance) under - group transformations of the indices. In physics terminology such ITensors conserve quantum numbers. - -# Documentation and Resources - -ITensor website: https://itensor.org/ - -Documentation: https://itensor.github.io/ITensors.jl/stable/ -""" module ITensors -include("usings.jl") -include("utils.jl") -include("lib/ContractionSequenceOptimization/src/ContractionSequenceOptimization.jl") -# TODO: `using .ContractionSequenceOptimization: ContractionSequenceOptimization, ...`. -using .ContractionSequenceOptimization -include("lib/LazyApply/src/LazyApply.jl") -# TODO: `using .LazyApply: LazyApply, ...`. -using .LazyApply -using .LazyApply: Prod, Scaled, Sum, coefficient -include("lib/Ops/src/Ops.jl") -# TODO: `using .Ops: Ops, ...`. -using .Ops -using .Ops: Ops, Op, Trotter -import .Ops: sites, name -include("exports.jl") -include("imports.jl") -include("global_variables.jl") -# TODO: Move to `lib/LastVals/src/LastVals.jl`. -include("lastval.jl") -include("lib/SmallStrings/src/SmallStrings.jl") -using .SmallStrings: SmallStrings, IntChar, SmallString, Tag, isint, isnull -include("readwrite.jl") -export readcpp -# TODO: Move to `lib/Nots/src/Nots.jl`. -include("not.jl") -export not -include("lib/TagSets/src/TagSets.jl") -using .TagSets: TagSets, set_strict_tags!, using_strict_tags -# TODO: Move to `lib/Names/src/Names.jl`. -include("name.jl") -# TODO: Move to `lib/Vals/src/Vals.jl`. -include("val.jl") -export val -include("lib/QuantumNumbers/src/QuantumNumbers.jl") -using .QuantumNumbers: - Arrow, - In, - Neither, - Out, - QN, - QNVal, - hasname, - have_same_mods, - have_same_qns, - isactive, - maxQNs, - modulus, - nactive -export QN, isactive, modulus -include("symmetrystyle.jl") -include("index.jl") -include("set_operations.jl") -include("indexset.jl") -include("itensor.jl") -include("qn/flux.jl") -include("oneitensor.jl") -include("tensor_operations/tensor_algebra.jl") -include("tensor_operations/matrix_algebra.jl") -include("tensor_operations/permutations.jl") -include("lib/SiteTypes/src/SiteTypes.jl") -using .SiteTypes: - SiteTypes, - OpName, - SiteType, - StateName, - TagType, - ValName, - @OpName_str, - @SiteType_str, - @StateName_str, - @TagType_str, - @ValName_str, - alias, - has_fermion_string, - op, - op!, - ops, - siteind, - siteinds, - state -include("lib/ITensorsSiteTypesExt/src/ITensorsSiteTypesExt.jl") -include("broadcast.jl") -include("tensor_operations/matrix_decomposition.jl") -include("adapt.jl") -include("set_types.jl") -include("tensor_operations/itensor_combiner.jl") -include("qn/qnindex.jl") -include("qn/qnindexset.jl") -include("qn/qnitensor.jl") -include("nullspace.jl") -include("lib/ITensorsOpsExt/src/ITensorsOpsExt.jl") -include("fermions/fermions.jl") -export fparity, isfermionic -include("lib/ITensorsNamedDimsArraysExt/src/ITensorsNamedDimsArraysExt.jl") -using .ITensorsNamedDimsArraysExt: ITensorsNamedDimsArraysExt -include("../ext/ITensorsChainRulesCoreExt/ITensorsChainRulesCoreExt.jl") -include("lib/ITensorVisualizationCore/src/ITensorVisualizationCore.jl") -# TODO: `using .ITensorVisualizationCore: ITensorVisualizationCore, ...`. -using .ITensorVisualizationCore -using .ITensorVisualizationCore: - @visualize, - @visualize!, - @visualize_noeval, - @visualize_noeval!, - @visualize_sequence, - @visualize_sequence_noeval -include("deprecated.jl") -include("argsdict/argsdict.jl") -include("packagecompile/compile.jl") -include("developer_tools.jl") -function __init__() - return resize!(empty!(INDEX_ID_RNGs), Threads.nthreads()) # ensures that we didn't save a bad object -end +# Write your package code here. + end diff --git a/src/adapt.jl b/src/adapt.jl deleted file mode 100644 index 53edb0987e..0000000000 --- a/src/adapt.jl +++ /dev/null @@ -1,5 +0,0 @@ -adapt_structure(to, x::ITensor) = itensor(adapt(to, tensor(x))) - -# @inline function NDTensors.cu(x::ITensor; unified::Bool=false) -# return itensor(NDTensors.cu(storage(x); unified=unified), inds(x)) -# end diff --git a/src/argsdict/argsdict.jl b/src/argsdict/argsdict.jl deleted file mode 100644 index 7bece157e5..0000000000 --- a/src/argsdict/argsdict.jl +++ /dev/null @@ -1,111 +0,0 @@ - -struct AutoType end - -""" - auto_parse(::Union{Type,AutoType}, val) - -Automatically parse the value into a Julia value. -""" -auto_parse(ValType::Type, val) = parse(ValType, val) - -auto_parse(::Type{AutoType}, val) = eval(Meta.parse(val)) - -auto_parse(::Type{String}, val) = String(strip(val)) - -""" - parse_type(valtype, default_type::Type = AutoType) - -Parse the type of `valtype`. If `valtype` has a type declaration, -like `parse_type("2::ComplexF64")`, it gets parsed as that type -declared, and returns `(ComplexF64, 2)`. - -If `val` doesn't have a type declaration, it gets parsed into -`default_type`, which defaults to `AutoType`, so `parse_type("2")` -returns `(AutoType, 2)` -""" -function parse_type(valtype, default_type::Type=AutoType) - # Check for a type decleration - valtype_vec = split(valtype, "::"; limit=2) - ValType = default_type - if length(valtype_vec) > 1 - # Type declaration - ValType = eval(Meta.parse(valtype_vec[2])) - end - val = valtype_vec[1] - return ValType, val -end - -""" - argsdict([args_list::Vector]; - first_arg::Int = 1, - delim = '=', - as_symbols::Bool = false, - default_named_type::Type = ITensors.AutoType, - save_positional::Bool = true, - default_positional_type::Type = String, - prefix::String = "_arg") - -Parse the command line arguments such as `julia N=2 X=1e-12` -and put them in a dictionary, where the keys are before -the delimiter and the values are after, so -`Dict("N" => "2", "X" => "1e-12")`. -""" -function argsdict( - args_list::Vector; - first_arg::Int=1, - delim='=', - as_symbols::Bool=false, - default_named_type::Type=AutoType, - save_positional::Bool=true, - default_positional_type::Type=String, - prefix::String="_arg", -) - KeyType = as_symbols ? Symbol : String - parsed = Dict{KeyType,Any}() - narg = 1 - for n in first_arg:length(args_list) - a = args_list[n] - - # Check if it is a command line flag - if startswith(a, "--") - flag = a[3:end] - if flag == "autotype" || flag == "a" - default_positional_type = AutoType - default_named_type = AutoType - elseif flag == "stringtype" || flag == "s" - default_positional_type = String - default_named_type = String - end - continue - end - - optval = split(a, delim) - if length(optval) == 1 - if save_positional - val = only(optval) - parsed[KeyType("$prefix$narg")] = auto_parse( - parse_type(val, default_positional_type)... - ) - narg += 1 - else - @warn "Ignoring argument $a since it does not have the delimiter \"$delim\"." - end - continue - elseif length(optval) == 2 - opt, val = optval - else - error( - "Argument $a has more than one delimiter \"$delim\", which is not well defined." - ) - end - ValType, key = parse_type(opt, default_named_type) - key = strip(key) - ' ' in key && error("Option \"$key\" contains spaces, which is not well defined") - typedkey = KeyType(key) - typedval = auto_parse(ValType, val) - parsed[typedkey] = typedval - end - return parsed -end - -argsdict(; kwargs...) = argsdict(ARGS; kwargs...) diff --git a/src/broadcast.jl b/src/broadcast.jl deleted file mode 100644 index 4f0848d580..0000000000 --- a/src/broadcast.jl +++ /dev/null @@ -1,545 +0,0 @@ - -# -# Broadcasting for IndexSets -# - -# TODO: delete -## # We're using a specialized type `IndexVector` since `IndexSet` has -## # a compile time check that all indices are unique, but `similar` -## # in general won't make a unique IndexSet. We need a specialized type -## # to dispatch on `copyto!`. This is like -## struct IndexVector{T} <: AbstractVector{T} -## data::Vector{T} -## end -## size(v::IndexVector) = size(v.data) -## -## struct IndexSetStyle <: Broadcast.BroadcastStyle end -## -## BroadcastStyle(::Type{<: IndexSet}) = IndexSetStyle() -## -## BroadcastStyle(::IndexSetStyle, ::BroadcastStyle) = IndexSetStyle() -## -## broadcastable(is::IndexSet) = is -## -## function _similar(bc::Broadcasted{IndexSetStyle}, ::Type{ElT}) where {ElT} -## return similar(Array{ElT}, axes(bc)) -## end -## -## # In the case when the output type is inferred to be `<: Index`, -## # then output an IndexSet (like `prime.(is)`) -## function similar(bc::Broadcasted{IndexSetStyle}, ::Type{ElT}) where {ElT <: Index} -## is = _similar(bc, ElT) -## # We're using a specialized type `IndexVector` since `IndexSet` has -## # a compile time check that all indices are unique, but `similar` -## # in general won't make a unique IndexSet. We need a specialized type -## # to dispatch on `copyto!` -## return IndexVector(_similar(bc, ElT)) -## end -## -## # In general, the output type will be a Vector{ElT} (like `dim.(is)`) -## function similar(bc::Broadcast.Broadcasted{IndexSetStyle}, ::Type{ElT}) where {ElT} -## return _similar(bc, ElT) -## end -## -## function copyto!(dest::IndexVector, bc::Broadcast.Broadcasted{IndexSetStyle}) -## copyto!(dest.data, bc) -## return IndexSet(dest.data) -## end - -# -# Broadcasting for ITensors -# - -# -# ITensorStyle -# - -struct ITensorStyle <: BroadcastStyle end - -BroadcastStyle(::Type{<:ITensor}) = ITensorStyle() - -broadcastable(T::ITensor) = T - -function Base.similar(bc::Broadcasted{ITensorStyle}, ::Type{ElT}) where {ElT<:Number} - A = find_type(ITensor, bc.args) - return similar(A, ElT) -end - -# -# ITensorOpScalarStyle -# Operating with a scalar -# - -struct ITensorOpScalarStyle <: BroadcastStyle end - -function Base.BroadcastStyle(::ITensorStyle, ::DefaultArrayStyle{0}) - return ITensorOpScalarStyle() -end - -Base.BroadcastStyle(::ITensorStyle, ::ITensorOpScalarStyle) = ITensorOpScalarStyle() - -instantiate(bc::Broadcasted{ITensorOpScalarStyle}) = bc - -function broadcasted(::typeof(Base.literal_pow), ::typeof(^), T::ITensor, x::Val) - return broadcasted(ITensorOpScalarStyle(), Base.literal_pow, Ref(^), T, Ref(x)) -end - -function Base.similar( - bc::Broadcasted{ITensorOpScalarStyle}, ::Type{ElT} -) where {ElT<:Number} - A = find_type(ITensor, bc.args) - return similar(A, ElT) -end - -# -# For arbitrary function chaining f.(g.(h.(x))) -# - -function instantiate(bc::Broadcasted{ITensorStyle,<:Any,<:Function,<:Tuple{Broadcasted}}) - return instantiate(broadcasted(bc.f ∘ bc.args[1].f, bc.args[1].args...)) -end - -function instantiate( - bc::Broadcasted{ - ITensorStyle, - <:Any, - <:Function, - <:Tuple{Broadcasted{ITensorStyle,<:Any,<:Function,<:Tuple{<:ITensor}}}, - }, -) - return broadcasted(bc.f ∘ bc.args[1].f, bc.args[1].args...) -end - -instantiate(bc::Broadcasted{ITensorStyle}) = bc - -# -# Some helper functionality to find certain -# inputs in the argument list -# - -""" -`A = find_type(::Type,As)` returns the first of type Type among the arguments. -""" -function find_type(::Type{T}, args::Tuple) where {T} - return find_type(T, find_type(T, args[1]), Base.tail(args)) -end -find_type(::Type{T}, x) where {T} = x -find_type(::Type{T}, a::T, rest) where {T} = a -find_type(::Type{T}, ::Any, rest) where {T} = find_type(T, rest) -# If not found, return nothing -find_type(::Type{T}, ::Tuple{}) where {T} = nothing - -# -# Generic fallback -# - -function Base.copyto!(T::ITensor, bc::Broadcasted) - return error( - "The broadcasting operation you are attempting is not yet implemented for ITensors, please raise an issue if you would like it to be supported.", - ) -end - -# -# B .= α .* A -# A .*= α -# - -function Base.copyto!( - T::ITensor, - bc::Broadcasted{ - ITensorOpScalarStyle, - <:Any, - typeof(*), - <:Tuple{<:Union{<:Number,<:ITensor},<:Union{<:Number,<:ITensor}}, - }, -) - α = find_type(Number, bc.args) - A = find_type(ITensor, bc.args) - map!((t, a) -> α * a, T, T, A) - return T -end - -# -# B .= A ./ α -# A ./= α -# - -function Base.copyto!( - T::ITensor, - bc::Broadcasted{ITensorOpScalarStyle,<:Any,typeof(/),<:Tuple{<:ITensor,<:Number}}, -) - α = find_type(Number, bc.args) - A = find_type(ITensor, bc.args) - ## GPU compilers can have a problem when map is - ## Given bc.f. map seems to make a closure with a - ## relatively complicated signature - f = bc.f - map!((t, a) -> f(a, α), T, T, A) - return T -end - -# -# C .= A ./ B -# - -function Base.copyto!( - R::ITensor, bc::Broadcasted{ITensorStyle,<:Any,typeof(/),<:Tuple{<:ITensor,<:ITensor}} -) - T1, T2 = bc.args - f = bc.f - if R === T1 - map!((t1, t2) -> f(t1, t2), R, T1, T2) - ## I tried this and it is numberically wrong - #map!(f, R, T1, T2) - elseif R === T2 - map!((t1, t2) -> f(t2, t1), R, T2, T1) - #map!(f, R, T2, T1) - else - error("When dividing two ITensors in-place, one must be the same as the output ITensor") - end - return R -end - -# -# C .= A .⊙ B -# - -function Base.copyto!( - R::ITensor, bc::Broadcasted{ITensorStyle,<:Any,typeof(⊙),<:Tuple{<:ITensor,<:ITensor}} -) - T1, T2 = bc.args - if R === T1 - map!((t1, t2) -> *(t1, t2), R, T1, T2) - elseif R === T2 - map!((t1, t2) -> *(t2, t1), R, T2, T1) - else - error( - "When Hadamard producting two ITensors in-place, one must be the same as the output ITensor", - ) - end - return R -end - -# -# B .= α ./ A -# - -function Base.copyto!( - T::ITensor, - bc::Broadcasted{ITensorOpScalarStyle,<:Any,typeof(/),<:Tuple{<:Number,<:ITensor}}, -) - α = find_type(Number, bc.args) - A = find_type(ITensor, bc.args) - f = bc.f - map!((t, a) -> f(α, a), T, T, A) - return T -end - -# -# For B .= A .^ 2.5 -# - -function Base.copyto!(R::ITensor, bc::Broadcasted{ITensorOpScalarStyle,<:Any,typeof(^)}) - α = find_type(Number, bc.args) - T = find_type(ITensor, bc.args) - map!((r, t) -> t^α, R, R, T) - return R -end - -# -# For B .= A .^ 2 -# - -function Base.copyto!( - R::ITensor, bc::Broadcasted{ITensorOpScalarStyle,<:Any,typeof(Base.literal_pow)} -) - α = find_type(Base.RefValue{<:Val}, bc.args).x - powf = find_type(Base.RefValue{<:Function}, bc.args).x - @assert !isnothing(powf) - T = find_type(ITensor, bc.args) - f = bc.f - map!((r, t) -> f(^, t, α), R, R, T) - return R -end - -# -# For A .= α -# - -function Base.copyto!( - T::ITensor, bc::Broadcasted{DefaultArrayStyle{0},<:Any,typeof(identity),<:Tuple{<:Number}} -) - fill!(T, bc.args[1]) - return T -end - -# -# For B .= A -# - -function Base.copyto!( - T::ITensor, bc::Broadcasted{ITensorStyle,<:Any,typeof(identity),<:Tuple{<:ITensor}} -) - A = bc.args[1] - map!((r, t) -> t, T, T, A) - return T -end - -function fmap(bc::Broadcasted{ITensorStyle,<:Any,typeof(+),<:Tuple{Vararg{ITensor}}}) - return + -end - -function fmap(bc::Broadcasted{ITensorStyle,<:Any,typeof(-),<:Tuple{Vararg{ITensor}}}) - return - -end - -# -# B .+= A -# - -function Base.copyto!( - T::ITensor, bc::Broadcasted{ITensorStyle,<:Any,typeof(+),<:Tuple{Vararg{ITensor}}} -) - if T === bc.args[1] - A = bc.args[2] - elseif T === bc.args[2] - A = bc.args[1] - else - error("When adding two ITensors in-place, one must be the same as the output ITensor") - end - map!(fmap(bc), T, T, A) - return T -end - -# -# B .-= A -# - -function Base.copyto!( - T::ITensor, bc::Broadcasted{ITensorStyle,<:Any,typeof(-),<:Tuple{Vararg{ITensor}}} -) - if T === bc.args[1] - A = bc.args[2] - elseif T === bc.args[2] - A = bc.args[1] - else - error( - "When subtracting two ITensors in-place, one must be the same as the output ITensor" - ) - end - map!(fmap(bc), T, T, A) - return T -end - -# -# C .+= α .* A -# C .-= α .* A -# -# C .+= α .* A .* B -# C .-= α .* A .* B -# -# B .+= A .^ 2.5 -# B .-= A .^ 2.5 -# -# B .+= A .^ 2 -# B .-= A .^ 2 -# - -# - -function Base.copyto!( - T::ITensor, bc::Broadcasted{ITensorOpScalarStyle,<:Any,<:Union{typeof(+),typeof(-)}} -) - C = find_type(ITensor, bc.args) - bc_bc = find_type(Broadcasted, bc.args) - - if T === C - A = find_type(ITensor, bc_bc.args) - α = find_type(Number, bc_bc.args) - - # Check if it is the case .^(::Int) - γ = find_type(Base.RefValue{<:Val}, bc_bc.args) - powf = find_type(Base.RefValue{<:Function}, bc_bc.args) - ## Putting fmap in the map call still doesn't actually grab the function and causes GPU to fail so just realize the function slightly earlier here - f1 = bc.f - f2 = bc_bc.f - - if !isnothing(α) && !isnothing(A) - if bc_bc.args[1] isa Number - map!((r, t) -> f1(r, f2(α, t)), T, T, A) - else - map!((r, t) -> f1(r, f2(t, α)), T, T, A) - end - elseif !isnothing(γ) && !isnothing(A) && !isnothing(powf) - map!((r, t) -> f1(r, f2(powf[], t, γ[])), T, T, A) - else - # In-place contraction: - # C .+= α .* A .* B - bc_bc_bc = find_type(Broadcasted, bc_bc.args) - if isnothing(α) - β = find_type(Number, bc_bc_bc.args) - B = find_type(ITensor, bc_bc_bc.args) - else - A, B = bc_bc_bc.args - end - mul!(T, A, B, β, f1(1)) - end - else - error("When adding two ITensors in-place, one must be the same as the output ITensor") - end - return T -end - -# -# C .= β .* C .+ α .* A -# C .= β .* C .+ α .* A .* B -# - -struct axpby{Alpha,Beta} <: Function - alpha::Alpha - beta::Beta -end - -(f::axpby)(y, x) = x * f.alpha + y * f.beta - -## TODO this code doesn't actually get called -function Base.copyto!( - T::ITensor, - bc::Broadcasted{ITensorOpScalarStyle,<:Any,typeof(+),<:Tuple{Vararg{Broadcasted}}}, -) - bc_α = bc.args[1] - bc_β = bc.args[2] - α = find_type(Number, bc_α.args) - A = find_type(ITensor, bc_α.args) - β = find_type(Number, bc_β.args) - C = find_type(ITensor, bc_β.args) - (T !== A && T !== C) && - error("When adding two ITensors in-place, one must be the same as the output ITensor") - if T === A - bc_α, bc_β = bc_β, bc_α - α, β = β, α - A, C = C, A - end - if !isnothing(A) && !isnothing(C) && !isnothing(α) && !isnothing(β) - # The following fails to compile on some GPU backends. - # map!((r, t) -> β * r + α * t, T, T, A) - map!(axpby(α, β), T, T, A) - else - bc_bc_α = find_type(Broadcasted, bc_α.args) - if isnothing(α) - α = find_type(Number, bc_bc_α.args) - B = find_type(ITensor, bc_bc_α.args) - else - A, B = bc_bc_α.args - end - mul!(T, A, B, α, β) - end - return T -end - -# -# For A .+= α -# - -## TODO this code fails because of scalar indexing -function Base.copyto!( - T::ITensor, - bc::Broadcasted{ - ITensorOpScalarStyle,<:Any,typeof(+),<:Tuple{Vararg{Union{<:ITensor,<:Number}}} - }, -) - α = find_type(Number, bc.args) - A = find_type(ITensor, bc.args) - if A === T - tensor(T) .= tensor(A) .+ α - else - error( - "Currently, we don't support `B .= A .+ α` if `B !== A` (i.e. only `A .+= α` is supported", - ) - end - return T -end - -# -# For C .= A .* B -# - -function Base.copyto!( - T::ITensor, bc::Broadcasted{ITensorStyle,<:Any,typeof(*),<:Tuple{<:ITensor,<:ITensor}} -) - mul!(T, bc.args[1], bc.args[2]) - return T -end - -# -# For C .= α .* A .* B -# - -function Base.copyto!(T::ITensor, bc::Broadcasted{ITensorOpScalarStyle,<:Any,typeof(*)}) - A = find_type(Union{<:Number,<:ITensor}, bc.args) - bc_bc = find_type(Broadcasted, bc.args) - if A isa Number - mul!(T, bc_bc.args[1], bc_bc.args[2], A) - else - mul!(T, A, find_type(ITensor, bc_bc.args), find_type(Number, bc_bc.args)) - end - return T -end - -# -# For B .= f.(A) -# - -function Base.copyto!( - R::ITensor, bc::Broadcasted{ITensorStyle,<:Any,<:Function,<:Tuple{<:ITensor}} -) - f = bc.f - T = bc.args[1] - map!((r, t) -> f(t), R, R, T) - return R -end - -# -# For B .+= f.(A) -# - -function Base.copyto!( - R::ITensor, - bc::Broadcasted{ - ITensorStyle,<:Any,typeof(+),<:Tuple{Vararg{Union{<:ITensor,<:Broadcasted}}} - }, -) - R̃ = find_type(ITensor, bc.args) - bc2 = find_type(Broadcasted, bc.args) - f = bc2.f - if R === R̃ - map!((r, t) -> r + f(t), R, R, bc2.args[1]) - else - error("In C .= B .+ f.(A), C and B must be the same ITensor") - end - return R -end - -# -# For B .= f.(B) + g.(A) -# - -## TODO check to see if this code is being called as expected -function Base.copyto!( - R::ITensor, bc::Broadcasted{ITensorStyle,<:Any,typeof(+),<:Tuple{Vararg{Broadcasted}}} -) - bc1 = bc.args[1] - bc2 = bc.args[2] - T1 = bc1.args[1] - f1 = bc1.f - T2 = bc2.args[1] - f2 = bc2.f - if R === T1 - map!((r, t) -> f1(r) + f2(t), R, R, T2) - elseif R === T2 - map!((r, t) -> f2(r) + f1(t), R, R, T1) - else - error("In C .= f.(B) .+ g.(A), C and B or A must be the same ITensor") - end - return R -end diff --git a/src/deprecated.jl b/src/deprecated.jl deleted file mode 100644 index 4a36145b90..0000000000 --- a/src/deprecated.jl +++ /dev/null @@ -1,34 +0,0 @@ -# global_variables.jl -@deprecate disable_tblis!() ITensors.disable_tblis() -@deprecate disable_warn_order!() ITensors.disable_warn_order() -@deprecate enable_tblis!() ITensors.enable_tblis() -@deprecate reset_warn_order!() ITensors.reset_warn_order() -@deprecate set_warn_order!(N) ITensors.set_warn_order(N) -@deprecate use_debug_checks() ITensors.using_debug_checks() - -# index.jl -@deprecate getindex(i::Index, n::Int) (i => n) - -# indexset.jl -@deprecate store(is::IndexSet) data(is) -@deprecate firstintersect(is...; kwargs...) getfirst(intersect(is...); kwargs...) -@deprecate firstsetdiff(is...; kwargs...) getfirst(setdiff(is...); kwargs...) - -# itensor.jl -@deprecate commonindex(args...; kwargs...) commonind(args...; kwargs...) -@deprecate diagITensor(args...; kwargs...) diag_itensor(args...; kwargs...) -@deprecate emptyITensor(::Type{Any}) emptyITensor() -@deprecate findindex(args...; kwargs...) firstind(args...; kwargs...) -@deprecate findinds(args...; kwargs...) inds(args...; kwargs...) -@deprecate linkindex(args...; kwargs...) linkind(args...; kwargs...) -@deprecate matmul(A::ITensor, B::ITensor) product(A, B) -@deprecate randomITensor(args...; kwargs...) random_itensor(args...; kwargs...) -@deprecate replaceindex!(args...; kwargs...) replaceind!(args...; kwargs...) -@deprecate siteindex(args...; kwargs...) siteind(args...; kwargs...) -@deprecate store(A::ITensor) storage(A) -@deprecate setstore!(T::ITensor, st) setstorage!(T, st) false -@deprecate setstore(T::ITensor, st) setstorage(T, st) false -@deprecate uniqueindex(args...; kwargs...) uniqueind(args...; kwargs...) - -# qn/qn.jl -@deprecate store(qn::QN) data(qn) diff --git a/src/developer_tools.jl b/src/developer_tools.jl deleted file mode 100644 index 356e9b98eb..0000000000 --- a/src/developer_tools.jl +++ /dev/null @@ -1,30 +0,0 @@ - -""" -inspectQNITensor is a developer-level debugging tool -to look at internals or properties of QNITensors -""" -function inspectQNITensor(T::ITensor, is::QNIndexSet) - #@show T.store.blockoffsets - #@show T.store.data - println("Block fluxes:") - for b in nzblocks(T) - @show flux(T, b) - end -end -inspectQNITensor(T::ITensor, is::IndexSet) = nothing -inspectQNITensor(T::ITensor) = inspectQNITensor(T, inds(T)) - -""" - pause() - -Pauses execution until a key (other than 'q') is pressed. -Entering 'q' exits the program. The `pause()` function -is useful for inspecting output of programs at certain -points while giving the option to continue. -""" -function pause() - print(stdout, "(Paused) ") - c = read(stdin, 1) - c == UInt8[0x71] && exit(0) - return nothing -end diff --git a/src/exports.jl b/src/exports.jl deleted file mode 100644 index 803312ba58..0000000000 --- a/src/exports.jl +++ /dev/null @@ -1,197 +0,0 @@ -export - # From external modules - # LinearAlgebra - nullspace, - tr, - # Modules - LinearAlgebra, - NDTensors, - # NDTensors module - # Types - Block, - # NDTensors.RankFactorization module - Spectrum, - # Methods - eigs, - entropy, - truncerror, - # Deprecated - addblock!, - # ITensors.jl - index_id_rng, - # argsdict/argsdict.jl - argsdict, - # tensor_operations/matrix_decomposition.jl - eigen, - factorize, - polar, - qr, - rq, - lq, - ql, - svd, - diag, - # tensor_operations/tensor_algebra.jl - contract, - # global_variables.jl - # Methods - # Macros - @disable_warn_order, - @reset_warn_order, - @set_warn_order, - # index.jl - # Types - Index, - IndexVal, - # Methods - dag, - dim, - dir, - eachval, - eachindval, - hasid, - hasind, - hasplev, - hasqns, - id, - ind, - isindequal, - noprime, - plev, - prime, - removetags, - removeqn, - removeqns, - replacetags, - replacetags!, - setdir, - setprime, - setspace, - settags, - sim, - space, - splitblocks, - tags, - # indexset.jl - # Types - IndexSet, - Order, - # Methods - allhastags, - anyhastags, - dims, - getfirst, - mapprime, - maxdim, - mindim, - permute, - pop, - popfirst, - push, - pushfirst, - replaceind, - replaceinds, - replaceprime, - swapinds, - setindex, - swapind, - swapinds, - swapprime, - swaptags, - # itensor.jl - # Types - ITensor, - # Methods - ⊙, - ⊕, - addtags!, - apply, - Apply, - array, - axpy!, - blockoffsets, - checkflux, - combinedind, - combiner, - commonind, - commoninds, - complex!, - convert_eltype, - convert_leaf_eltype, - delta, - dense, - denseblocks, - δ, - diagitensor, - diag_itensor, - directsum, - dot, - eachnzblock, - firstind, - filterinds, - hadamard_product, - hascommoninds, - hasind, - hasinds, - hassameinds, - ind, - inds, - inner, - insertblock!, - ishermitian, - itensor, - mul!, - matrix, - mapprime!, - noncommonind, - noncommoninds, - norm, - normalize, - normalize!, - noprime!, - nnzblocks, - nzblocks, - nzblock, - nnz, - onehot, - order, - permute, - prime!, - product, - randn!, - random_itensor, - removetags!, - replacetags!, - replaceind!, - replaceinds!, - swapinds!, - rmul!, - scale!, - scalar, - setelt, - storage, - setprime!, - swapprime!, - settags!, - swaptags!, - transpose, - uniqueinds, - uniqueind, - unioninds, - unionind, - vector, - emptyITensor, - # tagset.jl - # Types - TagSet, - # Macros - @ts_str, - # Methods - addtags, - hastags, - # qn/qnindex.jl - blockdim, - flux, - hasqns, - nblocks, - qn diff --git a/src/fermions/fermions.jl b/src/fermions/fermions.jl deleted file mode 100644 index 1ff55a7a40..0000000000 --- a/src/fermions/fermions.jl +++ /dev/null @@ -1,378 +0,0 @@ -using .QuantumNumbers: QuantumNumbers, QN - -""" - parity_sign(P) - -Given an array or tuple of integers representing -a permutation or a subset of a permutation, -compute the parity sign defined as -1 for a -permutation consisting of an odd number of swaps -and +1 for an even number of swaps. This -implementation uses an O(n^2) algorithm and is -intended for small permutations only. -""" -function parity_sign(P)::Int - L = length(P) - s = +1 - for i in 1:L, j in (i + 1):L - s *= sign(P[j] - P[i]) - end - return s -end - -isfermionic(qv::QuantumNumbers.QNVal) = (QuantumNumbers.modulus(qv) < 0) - -isfermionic(qn::QN) = any(isfermionic, qn) - -has_fermionic_subspaces(i::Index) = false - -function has_fermionic_subspaces(i::QNIndex) - for b in 1:nblocks(i) - isfermionic(qn(i, b)) && (return true) - end - return false -end - -isfermionic(i::Index) = has_fermionic_subspaces(i) - -has_fermionic_subspaces(is::Indices) = false - -function has_fermionic_subspaces(is::QNIndices) - for i in is, b in 1:nblocks(i) - isfermionic(qn(i, b)) && (return true) - end - return false -end - -has_fermionic_subspaces(T::Tensor) = has_fermionic_subspaces(inds(T)) - -""" - fparity(qn::QN) - fparity(qn::IndexVal) - -Compute the fermion parity (0 or 1) of a QN of IndexVal, -defined as the sum mod 2 of each of its fermionic -QNVals (QNVals with negative modulus). -""" -function fparity(qn::QN) - p = 0 - for qv in qn - if isfermionic(qv) - p += val(qv) - end - end - return mod(p, 2) -end - -fparity(iv::Pair{<:Index}) = fparity(qn(iv)) - -Base.isodd(q::QN) = isodd(fparity(q)) -Base.isodd(iv::Pair{<:Index}) = isodd(fparity(iv)) - -""" - compute_permfactor(p,iv_or_qn::Vararg{T,N}) - -Given a permutation p and a set "s" of QNIndexVals or QNs, -if the subset of index vals which are fermion-parity -odd undergo an odd permutation (odd number of swaps) -according to p, then return -1. Otherwise return +1. -""" -function compute_permfactor(p, iv_or_qn...; range=1:length(iv_or_qn))::Int - !using_auto_fermion() && return 1 - N = length(iv_or_qn) - # XXX: Bug https://github.com/ITensor/ITensors.jl/issues/931 - # oddp = @MVector zeros(Int, N) - oddp = MVector((ntuple(Returns(0), Val(N)))) - n = 0 - @inbounds for j in range - if fparity(iv_or_qn[p[j]]) == 1 - n += 1 - oddp[n] = p[j] - end - end - return parity_sign(oddp[1:n]) -end - -function NDTensors.permfactor(p, ivs::Vararg{Pair{QNIndex},N}; kwargs...) where {N} - !using_auto_fermion() && return 1 - return compute_permfactor(p, ivs...; kwargs...) -end - -function NDTensors.permfactor( - perm, block::NDTensors.Block{N}, inds::QNIndices; kwargs... -) where {N} - !using_auto_fermion() && return 1 - qns = ntuple(n -> qn(inds[n], block[n]), N) - return compute_permfactor(perm, qns...; kwargs...) -end - -NDTensors.block_parity(i::QNIndex, block::Integer) = fparity(qn(i, block)) - -NDTensors.block_sign(i::QNIndex, block::Integer) = 2 * NDTensors.block_parity(i, block) - 1 - -function NDTensors.right_arrow_sign(i::QNIndex, block::Integer) - !using_auto_fermion() && return 1 - if dir(i) == Out && NDTensors.block_parity(i, block) == 1 - return -1 - end - return 1 -end - -function NDTensors.left_arrow_sign(i::QNIndex, block::Integer) - !using_auto_fermion() && return 1 - if dir(i) == In && NDTensors.block_parity(i, block) == 1 - return -1 - end - return 1 -end - -# Version of getperm which is type stable -# and works for Tuple or Vector inputs -function vec_getperm(s1, s2) - N = length(s1) - p = Vector{Int}(undef, N) - for i in 1:N - @inbounds p[i] = NDTensors._findfirst(==(@inbounds s1[i]), s2) - end - return p -end - -@inline function NDTensors.compute_alpha( - ElR, - labelsR, - blockR, - input_indsR, - labelsT1, - blockT1, - indsT1::NTuple{N1,QNIndex}, - labelsT2, - blockT2, - indsT2::NTuple{N2,QNIndex}, -) where {N1,N2} - if !using_auto_fermion() - !has_fermionic_subspaces(indsT1) || !has_fermionic_subspaces(indsT2) - return one(ElR) - end - - # the "indsR" argument to compute_alpha from NDTensors - # may be a tuple of QNIndex, so convert to a Vector{Index} - indsR = collect(input_indsR) - - nlabelsT1 = TupleTools.sort(labelsT1; rev=true) - nlabelsT2 = TupleTools.sort(labelsT2) - - # Make orig_labelsR from the order of - # indices that would result by just - # taking the uncontracted indices of - # T1 and T2 in their input order: - NR = length(labelsR) - orig_labelsR = zeros(Int, NR) - u = 1 - for ls in (nlabelsT1, nlabelsT2), l in ls - if l > 0 - orig_labelsR[u] = l - u += 1 - end - end - - permT1 = NDTensors.getperm(nlabelsT1, labelsT1) - permT2 = NDTensors.getperm(nlabelsT2, labelsT2) - permR = vec_getperm(labelsR, orig_labelsR) - - alpha1 = NDTensors.permfactor(permT1, blockT1, indsT1) - alpha2 = NDTensors.permfactor(permT2, blockT2, indsT2) - alphaR = NDTensors.permfactor(permR, blockR, indsR) - - alpha_arrows = one(ElR) - for n in 1:length(indsT1) - l = labelsT1[n] - i = indsT1[n] - qi = qn(i, blockT1[n]) - if l < 0 && dir(i) == Out && fparity(qi) == 1 - alpha_arrows *= -1 - end - end - - α = alpha1 * alpha2 * alphaR * alpha_arrows - - return α -end - -# Flip signs of selected blocks of T prior to -# it being multiplied by a combiner ITensor -# labelsR gives the ordering of indices after the product -function NDTensors.before_combiner_signs( - T, - labelsT_, - indsT::NTuple{NT,QNIndex}, - C, - labelsC_, - indsC::NTuple{NC,QNIndex}, - labelsR, - indsR::NTuple{NR,QNIndex}, -) where {NC,NT,NR} - if !using_auto_fermion() || !has_fermionic_subspaces(T) - return T - end - - T = copy(T) - - labelsC = [l for l in labelsC_] - labelsT = [l for l in labelsT_] - - # number of uncombined indices - Nuc = NC - 1 - - ci = NDTensors.cinds(storage(C))[1] - combining = (labelsC[ci] > 0) - - isconj = NDTensors.isconj(storage(C)) - - if combining - #println("Combining <<<<<<<<<<<<<<<<<<<<<<<<<<<") - - nlabelsT = Int[] - - if !isconj - # Permute uncombined indices to front - # in same order as indices passed to the - # combiner constructor - append!(nlabelsT, labelsC[2:end]) - else # isconj - # If combiner is conjugated, put uncombined - # indices in *opposite* order as on combiner - append!(nlabelsT, reverse(labelsC[2:end])) - end - @assert all(l -> l < 0, nlabelsT) - - for l in labelsT - if l > 0 #uncontracted - append!(nlabelsT, l) - end - end - @assert length(nlabelsT) == NT - - # Compute permutation that moves uncombined indices to front - permT = vec_getperm(nlabelsT, labelsT) - - for blockT in keys(blockoffsets(T)) - # Compute sign from permuting uncombined indices to front: - alphaT = NDTensors.permfactor(permT, blockT, indsT) - - neg_dir = !isconj ? In : Out - alpha_arrows = 1 - alpha_mixed_arrow = 1 - C_dir = dir(indsC[1]) - for n in 1:length(indsT) - i = indsT[n] - qi = qn(i, blockT[n]) - if labelsT[n] < 0 && fparity(qi) == 1 - alpha_mixed_arrow *= (dir(i) != C_dir) ? -1 : +1 - alpha_arrows *= (dir(i) == neg_dir) ? -1 : +1 - end - end - - fac = alphaT * alpha_arrows - - if isconj - fac *= alpha_mixed_arrow - end - - if fac != 1 - Tb = blockview(T, blockT) - scale!(Tb, fac) - end - end # for blockT - - elseif !combining - # - # Uncombining --------------------------- - # - #println("Uncombining >>>>>>>>>>>>>>>>>>>>>>>>>>>") - - nc = findfirst(l -> l < 0, labelsT) - nlabelsT = [labelsT[nc]] - ic = indsT[nc] - - for l in labelsT - (l > 0) && append!(nlabelsT, l) - end - - # Compute sign for permuting combined index to front - # (sign alphaT to be computed for each block below): - permT = vec_getperm(nlabelsT, labelsT) - - # - # Note: other permutation of labelsT which - # relates to two treatments of isconj==true/false - # in combining case above is handled as a - # post-processing step in NDTensors.after_combiner_signs - # implemented below - # - - for blockT in keys(blockoffsets(T)) - alphaT = NDTensors.permfactor(permT, blockT, indsT) - - neg_dir = !isconj ? Out : In - qic = qn(ic, blockT[nc]) - alpha_arrows = (fparity(qic) == 1 && dir(ic) == neg_dir) ? -1 : +1 - - fac = alphaT * alpha_arrows - - if fac != 1 - Tb = blockview(T, blockT) - scale!(Tb, fac) - end - end - end - - return T -end - -function NDTensors.after_combiner_signs( - R, labelsR, indsR::NTuple{NR,QNIndex}, C, labelsC, indsC::NTuple{NC,QNIndex} -) where {NC,NR} - ci = NDTensors.cinds(store(C))[1] - combining = (labelsC[ci] > 0) - combining && error("NDTensors.after_combiner_signs only for uncombining") - - if !using_auto_fermion() || !has_fermionic_subspaces(R) - return R - end - - R = copy(R) - - # number of uncombined indices - Nuc = NC - 1 - - isconj = NDTensors.isconj(store(C)) - - if !combining - if !isconj - #println("!!! Doing uncombining post-processing step") - rperm = ntuple(i -> (Nuc - i + 1), Nuc) # reverse permutation - NDTensors.scale_blocks!( - R, block -> NDTensors.permfactor(rperm, block, indsR; range=1:Nuc) - ) - else - #println("!!! Doing conjugate uncombining post-processing step") - C_dir = dir(inds(C)[1]) - - function mixed_arrow_sign(block) - alpha_mixed_arrow = 1 - for n in 1:Nuc - i = indsR[n] - qi = qn(i, block[n]) - if dir(i) == C_dir && fparity(qi) == 1 - alpha_mixed_arrow *= -1 - end - end - return alpha_mixed_arrow - end - - NDTensors.scale_blocks!(R, block -> mixed_arrow_sign(block)) - end - end - - return R -end diff --git a/src/global_variables.jl b/src/global_variables.jl deleted file mode 100644 index 18ef622ac2..0000000000 --- a/src/global_variables.jl +++ /dev/null @@ -1,211 +0,0 @@ - -# -# Warn about the order of the ITensor after contractions -# - -const default_warn_order = 14 - -const warn_order = Ref{Union{Int,Nothing}}(default_warn_order) - -""" - ITensors.get_warn_order() - -Return the threshold for the order of an ITensor above which -ITensors will emit a warning. - -You can set the threshold with the function `set_warn_order!(N::Int)`. -""" -get_warn_order() = warn_order[] - -""" - ITensors.set_warn_order(N::Int) - -After this is called, ITensor will warn about ITensor contractions -that result in ITensors above the order `N`. - -This function returns the initial warning threshold (what it was -set to before this function was called). - -You can get the current threshold with the function `ITensors.get_warn_order(N::Int)`. You can reset to the default value with -`ITensors.reset_warn_order()`. -""" -function set_warn_order(N::Union{Int,Nothing}) - N_init = get_warn_order() - warn_order[] = N - return N_init -end - -""" - ITensors.reset_warn_order() - -After this is called, ITensor will warn about ITensor contractions -that result in ITensors above the default order -$default_warn_order. - -This function returns the initial warning threshold (what it was -set to before this function was called). -""" -reset_warn_order() = set_warn_order(default_warn_order) - -""" - ITensors.disable_warn_order() - -After this is called, ITensor will not warn about ITensor -contractions that result in large ITensor orders. - -This function returns the initial warning threshold (what it was -set to before this function was called). -""" -disable_warn_order() = set_warn_order(nothing) - -""" - @disable_warn_order - -Disable warning about the ITensor order in a block of code. - -# Examples - -```julia -A = ITensor(IndexSet(_ -> Index(1), Order(8))) -B = ITensor(IndexSet(_ -> Index(1), Order(8))) -A * B -@disable_warn_order A * B -@reset_warn_order A * B -@set_warn_order 17 A * B -@set_warn_order 12 A * B -``` -""" -macro disable_warn_order(block) - quote - local old_order = disable_warn_order() - r = $(esc(block)) - set_warn_order(old_order) - r - end -end - -""" - @set_warn_order - -Temporarily set the order threshold for warning about the ITensor -order in a block of code. - -# Examples - -```julia -@set_warn_order 12 A * B - -@set_warn_order 15 begin - C = A * B - E = C * D -end -``` -""" -macro set_warn_order(new_order, block) - quote - local old_order = set_warn_order($(esc(new_order))) - r = $(esc(block)) - set_warn_order(old_order) - r - end -end - -""" - @reset_warn_order - -Temporarily sets the order threshold for warning about the ITensor -order in a block of code to the default value $default_warn_order. - -# Examples -```julia -@reset_warn_order A * B -``` -""" -macro reset_warn_order(block) - quote - local old_order = reset_warn_order() - r = $(esc(block)) - set_warn_order(old_order) - r - end -end - -# -# Block sparse multithreading -# - -""" -$(NDTensors.enable_threaded_blocksparse_docstring(@__MODULE__)) -""" -using_threaded_blocksparse() = NDTensors._using_threaded_blocksparse[] - -""" -$(NDTensors.enable_threaded_blocksparse_docstring(@__MODULE__)) -""" -enable_threaded_blocksparse() = NDTensors._enable_threaded_blocksparse() - -""" - enable_threaded_blocksparse(enable::Bool) - -`enable_threaded_blocksparse(true)` enables threaded block sparse -operations (equivalent to `enable_threaded_blocksparse()`). - -`enable_threaded_blocksparse(false)` disables threaded block sparse -operations (equivalent to `enable_threaded_blocksparse()`). -""" -function enable_threaded_blocksparse(enable::Bool) - return if enable - enable_threaded_blocksparse() - else - disable_threaded_blocksparse() - end -end - -""" -$(NDTensors.enable_threaded_blocksparse_docstring(@__MODULE__)) -""" -disable_threaded_blocksparse() = NDTensors._disable_threaded_blocksparse() - -# -# Turn debug checks on and off -# - -const _using_debug_checks = Ref{Bool}(false) - -using_debug_checks() = _using_debug_checks[] - -macro debug_check(ex) - quote - if using_debug_checks() - $(esc(ex)) - end - end -end - -function enable_debug_checks() - _using_debug_checks[] = true - return nothing -end - -function disable_debug_checks() - _using_debug_checks[] = false - return nothing -end - -# -# Turn contraction sequence optimizations on and off -# - -const _using_contraction_sequence_optimization = Ref(false) - -using_contraction_sequence_optimization() = _using_contraction_sequence_optimization[] - -function enable_contraction_sequence_optimization() - _using_contraction_sequence_optimization[] = true - return nothing -end - -function disable_contraction_sequence_optimization() - _using_contraction_sequence_optimization[] = false - return nothing -end diff --git a/src/imports.jl b/src/imports.jl deleted file mode 100644 index 727ab54f83..0000000000 --- a/src/imports.jl +++ /dev/null @@ -1,197 +0,0 @@ -import Base: - # types - Array, - CartesianIndices, - Vector, - NTuple, - Tuple, - # symbols - +, - -, - *, - ^, - /, - ==, - <, - >, - !, - # functions - adjoint, - allunique, - axes, - complex, - conj, - convert, - copy, - copyto!, - deepcopy, - deleteat!, - eachindex, - eltype, - fill!, - filter, - filter!, - findall, - findfirst, - getindex, - hash, - imag, - intersect, - intersect!, - isapprox, - isassigned, - isempty, - isless, - isreal, - iszero, - iterate, - keys, - lastindex, - length, - map, - map!, - ndims, - print, - promote_rule, - push!, - real, - resize!, - setdiff, - setdiff!, - setindex!, - show, - similar, - size, - summary, - truncate, - zero, - # macros - @propagate_inbounds - -import Base.Broadcast: - # types - AbstractArrayStyle, - Broadcasted, - BroadcastStyle, - DefaultArrayStyle, - Style, - # functions - _broadcast_getindex, - broadcasted, - broadcastable, - instantiate - -import ITensors.ContractionSequenceOptimization: - contraction_cost, optimal_contraction_sequence - -import Adapt: adapt_structure, adapt_storage - -import LinearAlgebra: - axpby!, - axpy!, - diag, - dot, - eigen, - exp, - factorize, - ishermitian, - lmul!, - lq, - mul!, - norm, - normalize, - normalize!, - nullspace, - qr, - rmul!, - svd, - tr, - transpose - -using ITensors.NDTensors.GPUArraysCoreExtensions: cpu - -using ITensors.NDTensors: - Algorithm, - @Algorithm_str, - EmptyNumber, - _Tuple, - _NTuple, - blas_get_num_threads, - disable_auto_fermion, - double_precision, - eachblock, - eachdiagblock, - enable_auto_fermion, - fill!!, - randn!!, - permutedims, - permutedims!, - single_precision, - timer, - using_auto_fermion - -using NDTensors.CUDAExtensions: cu - -import ITensors.NDTensors: - # Modules - Strided, # to control threading - # Types - AliasStyle, - AllowAlias, - NeverAlias, - array, - blockdim, - blockoffsets, - contract, - datatype, - dense, - denseblocks, - diaglength, - dim, - dims, - disable_tblis, - eachnzblock, - enable_tblis, - ind, - inds, - insertblock!, - insert_diag_blocks!, - matrix, - maxdim, - mindim, - nblocks, - nnz, - nnzblocks, - nzblock, - nzblocks, - one, - outer, - permuteblocks, - polar, - ql, - scale!, - setblock!, - setblockdim!, - setinds, - setstorage, - sim, - storage, - storagetype, - sum, - tensor, - truncate!, - using_tblis, - vector, - # Deprecated - addblock!, - store - -import ITensors.Ops: Prod, Sum, terms - -import Random: randn! - -using SerializedElementArrays: SerializedElementVector - -const DiskVector{T} = SerializedElementVector{T} - -import SerializedElementArrays: disk diff --git a/src/index.jl b/src/index.jl deleted file mode 100644 index 6c38ca3911..0000000000 --- a/src/index.jl +++ /dev/null @@ -1,667 +0,0 @@ -using NDTensors: NDTensors, sim -using .QuantumNumbers: QuantumNumbers, Arrow, In, Neither, Out -using .TagSets: - TagSets, TagSet, @ts_str, addtags, commontags, hastags, removetags, replacetags - -#const IDType = UInt128 -const IDType = UInt64 - -# Custom RNG for Index id -# Vector of RNGs, one for each thread -const INDEX_ID_RNGs = MersenneTwister[] -@inline index_id_rng() = index_id_rng(Threads.threadid()) -@noinline function index_id_rng(tid::Int) - 0 < tid <= length(INDEX_ID_RNGs) || _index_id_rng_length_assert() - if @inbounds isassigned(INDEX_ID_RNGs, tid) - @inbounds MT = INDEX_ID_RNGs[tid] - else - MT = MersenneTwister() - @inbounds INDEX_ID_RNGs[tid] = MT - end - return MT -end -@noinline _index_id_rng_length_assert() = @assert false "0 < tid <= length(INDEX_ID_RNGs)" - -""" -An `Index` represents a single tensor index with fixed dimension `dim`. Copies of an Index compare equal unless their -`tags` are different. - -An Index carries a `TagSet`, a set of tags which are small strings that specify properties of the `Index` to help -distinguish it from other Indices. There is a special tag which is referred to as the integer tag or prime -level which can be incremented or decremented with special priming functions. - -Internally, an `Index` has a fixed `id` number, which is how the ITensor library knows two indices are copies of a -single original `Index`. `Index` objects must have the same `id`, as well as the `tags` to compare equal. -""" -struct Index{T} - id::IDType - space::T - dir::Arrow - tags::TagSet - plev::Int - function Index{T}(id, space::T, dir::Arrow, tags, plev) where {T} - return new{T}(id, space, dir, tags, plev) - end -end - -####################### -# Index Constructors -# - -# Used in NDTensors for generic code, -# mostly for internal usage -Index{T}(dim::T) where {T} = Index(dim) - -# `Nothing` direction gets converted to `Neither`. -function Index{T}(id, space::T, dir::Nothing, tags, plev) where {T} - return Index{T}(id, space, Neither, tags, plev) -end - -function Index(id, space::T, dir, tags, plev) where {T} - return Index{T}(id, space, dir, tags, plev) -end - -""" - Index(dim::Int; tags::Union{AbstractString, TagSet} = "", - plev::Int = 0) - -Create an `Index` with a unique `id`, a TagSet given by `tags`, -and a prime level `plev`. - -# Examples - -```jldoctest; filter=r"id=[0-9]{1,3}" -julia> i = Index(2; tags="l", plev=1) -(dim=2|id=818|"l")' - -julia> dim(i) -2 - -julia> plev(i) -1 - -julia> tags(i) -"l" -``` -""" -function Index(dim::Number; tags="", plev=0, dir=Neither) - return Index(rand(index_id_rng(), IDType), dim, dir, tags, plev) -end - -""" - Index(dim::Integer, tags::Union{AbstractString, TagSet}; plev::Int = 0) - -Create an `Index` with a unique `id` and a tagset given by `tags`. - -# Examples - -```jldoctest; filter=r"id=[0-9]{1,3}" -julia> i = Index(2, "l,tag") -(dim=2|id=58|"l,tag") - -julia> dim(i) -2 - -julia> plev(i) -0 - -julia> tags(i) -"l,tag" -``` -""" -function Index(dim::Number, tags::Union{AbstractString,TagSet}; plev::Int=0) - return Index(dim; tags, plev) -end - -# This is so that when IndexSets are converted -# to Julia Base Sets, the hashing is done correctly -#function Base.hash(i::Index, h::UInt) -# return hash((id(i), tags(i), plev(i)), h) -#end - -""" - copy(i::Index) - -Create a copy of index `i` with identical `id`, `space`, `dir` and `tags`. -""" -copy(i::Index) = Index(id(i), copy(space(i)), dir(i), tags(i), plev(i)) - -""" - sim(i::Index; tags = tags(i), plev = plev(i), dir = dir(i)) - -Produces an `Index` with the same properties (dimension or QN structure) -but with a new `id`. -""" -function NDTensors.sim(i::Index; tags=copy(tags(i)), plev=plev(i), dir=dir(i)) - return Index(rand(index_id_rng(), IDType), copy(space(i)), dir, tags, plev) -end - -trivial_space(i::Index) = 1 -trivial_index(i::Index) = Index(trivial_space(i)) - -####################### -# End Index Constructors -# - -####################### -# Index properties -# - -# TODO: decide if these are good definitions, using -# them for generic code in ContractionSequenceOptimization -Base.Int(i::Index) = dim(i) -length(i::Index) = 1 - -# Collect into a tuple -Base.Tuple(i::Index) = (i,) - -# Collect into a 0-dimensional Vector -Base.collect(i::Index) = fill(i, ()) - -""" - id(i::Index) - -Obtain the id of an Index, which is a unique 64 digit integer. -""" -id(i::Index) = i.id - -""" - dim(i::Index) - -Obtain the dimension of an Index. - -For a QN Index, this is the sum of the block dimensions. -""" -NDTensors.dim(i::Index) = i.space - -space(i::Index) = i.space - -""" - dir(i::Index) - -Return the direction of an `Index` (`ITensors.In`, `ITensors.Out`, or `ITensors.Neither`). -""" -dir(i::Index) = i.dir - -# Used for generic code in NDTensors -NDTensors.dir(i::Index) = dir(i) - -# Trait to determine if an Index, Index collection, Tensor, or ITensor -# has symmetries -abstract type SymmetryStyle end - -struct NonQN <: SymmetryStyle end - -symmetrystyle(i::Index) = NonQN() -# Fallback definition for scalar ITensors (without any indices) -symmetrystyle() = NonQN() - -""" - tags(i::Index) - -Obtain the TagSet of an Index. -""" -tags(i::Index) = i.tags - -TagSets.commontags(is::Index...) = commontags(tags.(is)...) -TagSets.commontags(is::Index) = tags(is) -TagSets.commontags() = ts"" - -""" - plev(i::Index) - -Obtain the prime level of an Index. -""" -plev(i::Index) = i.plev - -""" - hastags(i::Index, ts::Union{AbstractString,TagSet}) - -Check if an `Index` `i` has the provided tags, -which can be a string of comma-separated tags or -a TagSet object. - -# Examples - -```jldoctest; filter=r"id=[0-9]{1,3}" -julia> i = Index(2, "SpinHalf,Site,n=3") -(dim=2|id=861|"Site,SpinHalf,n=3") - -julia> hastags(i, "SpinHalf,Site") -true - -julia> hastags(i, "Link") -false -``` -""" -TagSets.hastags(i::Index, ts::Union{AbstractString,TagSet}) = hastags(tags(i), ts) - -TagSets.hastags(ts::Union{AbstractString,TagSet}) = x -> hastags(x, ts) - -""" - hasplev(i::Index, plev::Int) - -Check if an `Index` `i` has the provided prime level. - -# Examples - -```jldoctest; filter=r"id=[0-9]{1,3}" -julia> i = Index(2; plev=2) -(dim=2|id=543)'' - -julia> hasplev(i, 2) -true - -julia> hasplev(i, 1) -false -``` -""" -hasplev(i::Index, pl::Int) = plev(i) == pl - -""" - hasplev(pl::Int) - -Returns an anonymous function `x -> hasplev(x, pl)`. - -Useful for passing to functions like `map`. -""" -hasplev(pl::Int) = x -> hasplev(x, pl) - -""" - hasind(i::Index) - -Returns an anonymous function `x -> hasind(x, i)`. - -Useful for passing to functions like `map`. -""" -hasind(s::Index) = x -> hasind(x, s) - -""" - hasid(i::Index, id::ITensors.IDType) - -Check if an `Index` `i` has the provided id. - -# Examples - -```jldoctest; filter=r"id=[0-9]{1,3}" -julia> i = Index(2) -(dim=2|id=321) - -julia> hasid(i, id(i)) -true - -julia> j = Index(2) -(dim=2|id=17) - -julia> hasid(i, id(j)) -false -``` -""" -hasid(ind::Index, i::IDType) = id(ind) == i - -hasid(i::IDType) = x -> hasid(x, i) - -# -# QN related functions -# - -hasqns(::Integer) = false - -""" - hasqns(::Index) - -Checks of the Index has QNs or not. -""" -hasqns(i::Index) = hasqns(space(i)) - -####################### -# End Index properties -# - -####################### -# Index operations -# - -""" - setdir(i::Index, dir::Arrow) - -Create a copy of Index i with the specified direction. -""" -function setdir(i::Index, dir::Arrow) - return Index(id(i), copy(space(i)), dir, copy(tags(i)), plev(i)) -end - -""" - not(n::Int) - -Return Not{Int}(n). -""" -not(pl::Int) = Not(pl) - -""" - not(::IDType) - -Return Not{IDType}(n). -""" -not(id::IDType) = Not(id) - -# Information essential to the -# identity of an Index. -# Currently only used for hashing an Index. -struct IndexID - id::IDType - tags::TagSet - plev::Int -end -IndexID(i::Index) = IndexID(id(i), tags(i), plev(i)) -hash(i::Index, h::UInt) = hash(IndexID(i), h) - -""" - ==(i1::Index, i1::Index) - -Compare indices for equality. First the id's are compared, -then the prime levels are compared, and finally the -tags are compared. -""" -(i1::Index == i2::Index) = - (id(i1) == id(i2)) && (plev(i1) == plev(i2)) && (tags(i1) == tags(i2)) - -""" - dag(i::Index) - -Copy an index `i` and reverse its direction. -""" -dag(i::Index) = Index(id(i), copy(space(i)), -dir(i), tags(i), plev(i)) - -# For internal use in NDTensors -NDTensors.dag(i::Index) = dag(i) - -""" - settags(i::Index, ts) - -Return a copy of Index `i` with -tags replaced by the ones given -The `ts` argument can be a comma-separated -string of tags or a TagSet. - -# Examples - -```jldoctest; filter=r"id=[0-9]{1,3}" -julia> i = Index(2, "SpinHalf,Site,n=3") -(dim=2|id=543|"Site,SpinHalf,n=3") - -julia> hastags(i, "Link") -false - -julia> j = settags(i, "Link,n=4") -(dim=2|id=543|"Link,n=4") - -julia> hastags(j, "Link") -true - -julia> hastags(j, "n=4,Link") -true -``` -""" -settags(i::Index, ts) = Index(id(i), copy(space(i)), dir(i), ts, plev(i)) - -setspace(i::Index, s) = Index(id(i), s, dir(i), tags(i), plev(i)) - -""" - addtags(i::Index,ts) - -Return a copy of Index `i` with the -specified tags added to the existing ones. -The `ts` argument can be a comma-separated -string of tags or a TagSet. -""" -TagSets.addtags(i::Index, ts) = settags(i, addtags(tags(i), ts)) - -""" - removetags(i::Index, ts) - -Return a copy of Index `i` with the -specified tags removed. The `ts` argument -can be a comma-separated string of tags or a TagSet. -""" -TagSets.removetags(i::Index, ts) = settags(i, removetags(tags(i), ts)) - -""" - replacetags(i::Index, tsold, tsnew) - - replacetags(i::Index, tsold => tsnew) - -If the tag set of `i` contains the tags specified by `tsold`, -replaces these with the tags specified by `tsnew`, preserving -any other tags. The arguments `tsold` and `tsnew` can be -comma-separated strings of tags, or TagSet objects. - -# Examples - -```jldoctest; filter=r"id=[0-9]{1,3}" -julia> i = Index(2; tags="l,x", plev=1) -(dim=2|id=83|"l,x")' - -julia> replacetags(i, "l", "m") -(dim=2|id=83|"m,x")' - -julia> replacetags(i, "l" => "m") -(dim=2|id=83|"m,x")' -``` -""" -TagSets.replacetags(i::Index, tsold, tsnew) = settags(i, replacetags(tags(i), tsold, tsnew)) - -TagSets.replacetags(i::Index, rep_ts::Pair) = replacetags(i, rep_ts...) - -""" - prime(i::Index, plinc::Int = 1) - -Return a copy of Index `i` with its -prime level incremented by the amount `plinc` -""" -prime(i::Index, plinc::Int=1) = setprime(i, plev(i) + plinc) - -""" - setprime(i::Index, plev::Int) - -Return a copy of Index `i` with its -prime level set to `plev` -""" -setprime(i::Index, plev::Int) = Index(id(i), copy(space(i)), dir(i), tags(i), plev) - -""" - noprime(i::Index) - -Return a copy of Index `i` with its -prime level set to zero. -""" -noprime(i::Index) = setprime(i, 0) - -""" - adjoint(i::Index) - -Prime an Index using the notation `i'`. -""" -Base.adjoint(i::Index) = prime(i) - -""" - ^(i::Index, pl::Int) - -Prime an Index using the notation `i^3`. -""" -Base.:^(i::Index, pl::Int) = prime(i, pl) - -""" -Iterating over Index `I` gives the IndexVals `I(1)` through `I(dim(I))`. -""" -function Base.iterate(i::Index, state::Int=1) - Base.depwarn( - "iteration of `Index` is deprecated, use `eachindval` or `eachval` instead.", :iterate - ) - (state > dim(i)) && return nothing - return (i => state, state + 1) -end - -# Treat Index as a scalar for the sake of broadcast. -# This allows: -# -# i = Index(2) -# ps = (n - 1 for n in 1:4) -# is = prime.(i, ps) -# -# or -# -# ts = ("i$n" for n in 1:4) -# is = settags.(i, ts) -# -Base.broadcastable(i::Index) = Ref(i) - -""" - eachval(i::Index) - -Create an iterator whose values range -over the dimension of the provided `Index`. -""" -eachval(i::Index) = 1:dim(i) - -""" - eachindval(i::Index) - -Create an iterator whose values are Pairs of -the form `i=>n` with `n` from `1:dim(i)`. -This iterator is useful for accessing elements of -an ITensor in a loop without needing to know -the ordering of the indices. See also -[`eachindval(is::Index...)`](@ref). -""" -eachindval(i::Index) = (i => n for n in eachval(i)) - -# This is a trivial definition for use in NDTensors -# XXX: rename tensorproduct with ⊗ alias -function NDTensors.outer(i::Index; dir=dir(i), tags="", plev::Int=0) - return sim(i; tags=tags, plev=plev, dir=dir) -end - -# This is for use in NDTensors -# XXX: rename tensorproduct with ⊗ alias -function NDTensors.outer(i1::Index, i2::Index; tags="") - return Index(dim(i1) * dim(i2), tags) -end - -# Non-qn Index -# TODO: add ⊕ alias -directsum(i::Index, j::Index; tags="sum") = Index(dim(i) + dim(j); tags=tags) -function directsum(i::Index, j::Index, k::Index, inds::Index...; tags="sum") - return directsum(directsum(i, j; tags), k, inds...; tags) -end - -# -# QN related functions -# - -""" - removeqns(::Index) - -Removes the QNs from the Index, if it has any. -""" -removeqns(i::Index) = i - -""" - removeqn(::Index, qn_name::String) - -Remove the specified QN from the Index, if it has any. -""" -QuantumNumbers.removeqn(i::Index, qn_name::String) = i - -""" - mergeblocks(::Index) - -Merge the contiguous QN blocks if they have the same -quantum numbers. -""" -mergeblocks(i::Index) = i - -####################### -# End Index operations -# - -####################### -# IndexVal functions -# - -# Keep partial backwards compatibility by defining IndexVal as follows: -const IndexVal{IndexT} = Pair{IndexT,Int} - -IndexVal(i::Index, n::Int) = (i => n) - -function (i::Index)(n::Integer) - Base.depwarn("Index(::Int) is deprecated, for an Index i use i=>n instead.", :Index) - return i => n -end - -NDTensors.ind(iv::Pair{<:Index}) = first(iv) - -""" - isindequal(i::Index, iv::IndexVal) - - isindequal(i::IndexVal, iv::Index) - - isindequal(i::IndexVal, iv::IndexVal) - -Check if the Index and IndexVal have the same indices. -""" -isindequal(i::Index, iv::Pair{<:Index}) = (i == ind(iv)) - -isindequal(iv::Pair{<:Index}, i::Index) = isindequal(i, iv) - -isindequal(iv1::Pair{<:Index}, iv2::Pair{<:Index}) = (ind(iv1) == ind(iv2)) - -plev(iv::Pair{<:Index}) = plev(ind(iv)) - -dir(iv::Pair{<:Index}) = dir(ind(iv)) - -####################### -# End IndexVal functions -# - -####################### -# Index IO -# - -function primestring(plev) - if plev < 0 - return " (warning: prime level $plev is less than 0)" - end - if plev == 0 - return "" - elseif plev > 3 - return "'$plev" - else - return "'"^plev - end -end - -function Base.show(io::IO, i::Index) - idstr = "$(id(i) % 1000)" - if length(tags(i)) > 0 - print( - io, - "(dim=$(space(i))|id=$(idstr)|\"$(TagSets.tagstring(tags(i)))\")$(primestring(plev(i)))", - ) - else - print(io, "(dim=$(space(i))|id=$(idstr))$(primestring(plev(i)))") - end -end - -function readcpp(io::IO, ::Type{Index}; format="v3") - if format != "v3" - throw(ArgumentError("read Index: format=$format not supported")) - end - tags = readcpp(io, TagSet; kwargs...) - id = read(io, IDType) - dim = convert(Int64, read(io, Int32)) - dir_int = read(io, Int32) - dir = dir_int < 0 ? In : Out - read(io, 8) # Read default IQIndexDat size, 8 bytes - return Index(id, dim, dir, tags) -end diff --git a/src/indexset.jl b/src/indexset.jl deleted file mode 100644 index 397385e86b..0000000000 --- a/src/indexset.jl +++ /dev/null @@ -1,935 +0,0 @@ -using NDTensors: NDTensors, sim -using .QuantumNumbers: QuantumNumbers, Arrow, removeqn -using .TagSets: TagSets, addtags, commontags, hastags, removetags, replacetags - -# Represents a static order of an ITensor -@eval struct Order{N} - (OrderT::Type{<:Order})() = $(Expr(:new, :OrderT)) -end - -@doc """ - Order{N} -A value type representing the order of an ITensor. -""" Order - -""" - Order(N) = Order{N}() -Create an instance of the value type Order representing -the order of an ITensor. -""" -Order(N) = Order{N}() - -# Helpful if we want code to work generically -# for other Index-like types (such as IndexRange) -const IndexSet{IndexT<:Index} = Vector{IndexT} -const IndexTuple{IndexT<:Index} = Tuple{Vararg{IndexT}} - -# Definition to help with generic code -const Indices{IndexT<:Index} = Union{Vector{IndexT},Tuple{Vararg{IndexT}}} - -function _narrow_eltype(v::Vector{T}; default_empty_eltype=T) where {T} - if isempty(v) - return default_empty_eltype[] - end - return convert(Vector{mapreduce(typeof, promote_type, v)}, v) -end -function narrow_eltype(v::Vector{T}; default_empty_eltype=T) where {T} - if isconcretetype(T) - return v - end - return _narrow_eltype(v; default_empty_eltype) -end - -_indices() = () -_indices(x::Index) = (x,) - -# Tuples -_indices(x1::Tuple, x2::Tuple) = (x1..., x2...) -_indices(x1::Index, x2::Tuple) = (x1, x2...) -_indices(x1::Tuple, x2::Index) = (x1..., x2) -_indices(x1::Index, x2::Index) = (x1, x2) - -# Vectors -_indices(x1::Vector, x2::Vector) = narrow_eltype(vcat(x1, x2); default_empty_eltype=Index) - -# Mix vectors and tuples/elements -_indices(x1::Vector, x2) = _indices(x1, [x2]) -_indices(x1, x2::Vector) = _indices([x1], x2) -_indices(x1::Vector, x2::Tuple) = _indices(x1, [x2...]) -_indices(x1::Tuple, x2::Vector) = _indices([x1...], x2) - -indices(x::Vector{Index{S}}) where {S} = x -indices(x::Vector{Index}) = narrow_eltype(x; default_empty_eltype=Index) -indices(x::Tuple) = reduce(_indices, x; init=()) -indices(x::Vector) = reduce(_indices, x; init=Index[]) -indices(x...) = indices(x) - -# To help with backwards compatibility -IndexSet(inds::IndexSet) = inds -IndexSet(inds::Indices) = collect(inds) -IndexSet(inds::Index...) = collect(inds) -IndexSet(f::Function, N::Int) = map(f, 1:N) -IndexSet(f::Function, ::Order{N}) where {N} = IndexSet(f, N) - -Tuple(is::IndexSet) = _Tuple(is) -NTuple{N}(is::IndexSet) where {N} = _NTuple(Val(N), is) - -""" - not(inds::Union{IndexSet, Tuple{Vararg{Index}}}) - not(inds::Index...) - !(inds::Union{IndexSet, Tuple{Vararg{Index}}}) - !(inds::Index...) - -Represents the set of indices not in the specified -IndexSet, for use in pattern matching (i.e. when -searching for an index or priming/tagging specified -indices). -""" -not(is::Indices) = Not(is) -not(inds::Index...) = not(inds) -!(is::Indices) = not(is) -!(inds::Index...) = not(inds...) - -# Convert to an Index if there is only one -# TODO: also define the function `only` -Index(is::Indices) = is[] - -NDTensors.dims(is::IndexSet) = dim.(is) - -# Helps with generic code in `NDTensors`, -# for example with `NDTensors.similar`. -# Converts a set of Indices to a shape -# for allocating data. -Base.to_shape(inds::Tuple{Vararg{Index}}) = dims(inds) - -""" - dim(is::Indices) - -Get the product of the dimensions of the indices -of the Indices (the total dimension of the space). -""" -NDTensors.dim(is::IndexSet) = Compat.mapreduce(dim, *, is; init=1) - -""" - dim(is::IndexSet, n::Int) - -Get the dimension of the Index n of the Indices. -""" -NDTensors.dim(is::IndexSet, pos::Int) = dim(is[pos]) - -""" - dag(is::Indices) - -Return a new Indices with the indices daggered (flip -all of the arrow directions). -""" -function dag(is::Indices) - return isempty(is) ? is : map(i -> dag(i), is) -end - -# TODO: move to NDTensors -NDTensors.dim(is::Tuple, pos::Integer) = dim(is[pos]) - -# TODO: this is a weird definition, fix it -function NDTensors.similartype( - ::Type{<:Tuple{Vararg{IndexT}}}, ::Type{Val{N}} -) where {IndexT,N} - return NTuple{N,IndexT} -end - -## # This is to help with some generic programming in the Tensor -## # code (it helps to construct an IndexSet(::NTuple{N,Index}) where the -## # only known thing for dispatch is a concrete type such -## # as IndexSet{4}) -## -## #NDTensors.similartype(::Type{<:IndexSet}, -## # ::Val{N}) where {N} = IndexSet -## -## #NDTensors.similartype(::Type{<:IndexSet}, -## # ::Type{Val{N}}) where {N} = IndexSet - -""" - sim(is::Indices) - -Make a new Indices with similar indices. - -You can also use the broadcast version `sim.(is)`. -""" -NDTensors.sim(is::Indices) = map(i -> sim(i), is) - -function trivial_index(is::Indices) - if isempty(is) - return Index(1) - end - return trivial_index(first(is)) -end - -""" - mindim(is::Indices) - -Get the minimum dimension of the indices in the index set. - -Returns 1 if the Indices is empty. -""" -function mindim(is::Indices) - length(is) == 0 && (return 1) - md = dim(is[1]) - for n in 2:length(is) - md = min(md, dim(is[n])) - end - return md -end - -""" - maxdim(is::Indices) - -Get the maximum dimension of the indices in the index set. - -Returns 1 if the Indices is empty. -""" -function maxdim(is::Indices) - length(is) == 0 && (return 1) - md = dim(is[1]) - for n in 2:length(is) - md = max(md, dim(is[n])) - end - return md -end - -""" - commontags(::Indices) - -Return a TagSet of the tags that are common to all of the indices. -""" -TagSets.commontags(is::Indices) = commontags(is...) - -# -# Set operations -# - -""" - ==(is1::Indices, is2::Indices) - -Indices equality (order dependent). For order -independent equality use `issetequal` or -`hassameinds`. -""" -function ==(A::Indices, B::Indices) - length(A) ≠ length(B) && return false - for (a, b) in zip(A, B) - a ≠ b && return false - end - return true -end - -""" - ITensors.fmatch(pattern) -> ::Function - -fmatch is an internal function that -creates a function that accepts an Index. -The function returns true if the Index matches -the provided pattern, and false otherwise. - -For example: - -``` -i = Index(2, "s") -fmatch("s")(i) == true -``` -""" -fmatch(is::Indices) = in(is) -fmatch(is::Index...) = fmatch(is) -fmatch(i::Index) = fmatch((i,)) - -fmatch(pl::Int) = hasplev(pl) - -fmatch(tags::TagSet) = hastags(tags) -fmatch(tags::AbstractString) = fmatch(TagSet(tags)) - -fmatch(id::IDType) = hasid(id) - -fmatch(n::Not) = !fmatch(parent(n)) - -# Function that always returns true -ftrue(::Any) = true - -fmatch(::Nothing) = ftrue - -""" - ITensors.fmatch(; inds = nothing, - tags = nothing, - plev = nothing, - id = nothing) -> Function - -An internal function that returns a function -that accepts an Index that checks if the -Index matches the provided conditions. -""" -function fmatch(; inds=nothing, tags=nothing, plev=nothing, id=nothing) - return i -> fmatch(inds)(i) && fmatch(plev)(i) && fmatch(id)(i) && fmatch(tags)(i) -end - -""" - indmatch - -Checks if the Index matches the provided conditions. -""" -indmatch(i::Index; kwargs...) = fmatch(; kwargs...)(i) - -""" - getfirst(is::Indices) - -Return the first Index in the Indices. If the Indices -is empty, return `nothing`. -""" -function getfirst(is::Indices) - length(is) == 0 && return nothing - return first(is) -end - -""" - getfirst(f::Function, is::Indices) - -Get the first Index matching the pattern function, -return `nothing` if not found. -""" -function getfirst(f::Function, is::Indices) - for i in is - f(i) && return i - end - return nothing -end - -getfirst(is::Indices, args...; kwargs...) = getfirst(fmatch(args...; kwargs...), is) - -Base.findall(is::Indices, args...; kwargs...) = findall(fmatch(args...; kwargs...), is) - -# In general this isn't defined for Tuple but is -# defined for Vector -""" - indexin(ais::Indices, bis::Indices) - -For collections of Indices, returns the first location in -`bis` for each value in `ais`. -""" -function Base.indexin(ais::Indices, bis::Indices) - return [findfirst(bis, ais[i]) for i in 1:length(ais)] -end - -#function Base.indexin(a::Index, bis::Indices) -# return [findfirst(bis, a)] -#end - -findfirst(is::Indices, args...; kwargs...) = findfirst(fmatch(args...; kwargs...), is) - -# -# Tagging functions -# - -function prime(f::Function, is::Indices, args...) - return map(i -> f(i) ? prime(i, args...) : i, is) -end - -""" - prime(A::Indices, plinc, ...) - -Increase the prime level of the indices by the specified amount. -Filter which indices are primed using keyword arguments -tags, plev and id. -""" -function prime(is::Indices, plinc::Integer, args...; kwargs...) - return prime(fmatch(args...; kwargs...), is, plinc) -end - -prime(f::Function, is::Indices) = prime(f, is, 1) - -prime(is::Indices, args...; kwargs...) = prime(is, 1, args...; kwargs...) - -""" - adjoint(is::Indices) - -For is' notation. -""" -adjoint(is::Indices) = prime(is) - -function setprime(f::Function, is::Indices, args...) - return map(i -> f(i) ? setprime(i, args...) : i, is) -end - -function setprime(is::Indices, plev::Integer, args...; kwargs...) - return setprime(fmatch(args...; kwargs...), is, plev) -end - -noprime(f::Function, is::Indices, args...) = setprime(is, 0, args...; kwargs...) - -noprime(is::Indices, args...; kwargs...) = setprime(is, 0, args...; kwargs...) - -function _swapprime(f::Function, i::Index, pl1pl2::Pair{Int,Int}) - pl1, pl2 = pl1pl2 - if f(i) - if hasplev(i, pl1) - return setprime(i, pl2) - elseif hasplev(i, pl2) - return setprime(i, pl1) - end - return i - end - return i -end - -function swapprime(f::Function, is::Indices, pl1pl2::Pair{Int,Int}) - return map(i -> _swapprime(f, i, pl1pl2), is) -end - -function swapprime(f::Function, is::Indices, pl1::Int, pl2::Int) - return swapprime(f, is::Indices, pl1 => pl2) -end - -function swapprime(is::Indices, pl1pl2::Pair{Int,Int}, args...; kwargs...) - return swapprime(fmatch(args...; kwargs...), is, pl1pl2) -end - -function swapprime(is::Indices, pl1::Int, pl2::Int, args...; kwargs...) - return swapprime(fmatch(args...; kwargs...), is, pl1 => pl2) -end - -replaceprime(f::Function, is::Indices, pl1::Int, pl2::Int) = replaceprime(f, is, pl1 => pl2) - -function replaceprime(is::Indices, pl1::Int, pl2::Int, args...; kwargs...) - return replaceprime(fmatch(args...; kwargs...), is, pl1 => pl2) -end - -const mapprime = replaceprime - -function _replaceprime(i::Index, rep_pls::Pair{Int,Int}...) - for (pl1, pl2) in rep_pls - hasplev(i, pl1) && return setprime(i, pl2) - end - return i -end - -function replaceprime(f::Function, is::Indices, rep_pls::Pair{Int,Int}...) - return map(i -> f(i) ? _replaceprime(i, rep_pls...) : i, is) -end - -function replaceprime(is::Indices, rep_pls::Pair{Int,Int}...; kwargs...) - return replaceprime(fmatch(; kwargs...), is, rep_pls...) -end - -function TagSets.addtags(f::Function, is::Indices, args...) - return map(i -> f(i) ? addtags(i, args...) : i, is) -end - -function TagSets.addtags(is::Indices, tags, args...; kwargs...) - return addtags(fmatch(args...; kwargs...), is, tags) -end - -settags(f::Function, is::Indices, args...) = map(i -> f(i) ? settags(i, args...) : i, is) - -function settags(is::Indices, tags, args...; kwargs...) - return settags(fmatch(args...; kwargs...), is, tags) -end - -""" - CartesianIndices(is::Indices) - -Create a CartesianIndices iterator for an Indices. -""" -CartesianIndices(is::Indices) = CartesianIndices(_Tuple(dims(is))) - -""" - eachval(is::Index...) - eachval(is::Tuple{Vararg{Index}}) - -Create an iterator whose values correspond to a -Cartesian indexing over the dimensions -of the provided `Index` objects. -""" -eachval(is::Index...) = eachval(is) -eachval(is::Tuple{Vararg{Index}}) = CartesianIndices(dims(is)) - -""" - eachindval(is::Index...) - eachindval(is::Tuple{Vararg{Index}}) - -Create an iterator whose values are Index=>value pairs -corresponding to a Cartesian indexing over the dimensions -of the provided `Index` objects. - -# Example - -```julia -i = Index(3; tags="i") -j = Index(2; tags="j") -T = random_itensor(j, i) -for iv in eachindval(i, j) - @show T[iv...] -end -``` -""" -eachindval(is::Index...) = eachindval(is) -eachindval(is::Tuple{Vararg{Index}}) = (is .=> Tuple(ns) for ns in eachval(is)) - -function TagSets.removetags(f::Function, is::Indices, args...) - return map(i -> f(i) ? removetags(i, args...) : i, is) -end - -function TagSets.removetags(is::Indices, tags, args...; kwargs...) - return removetags(fmatch(args...; kwargs...), is, tags) -end - -function _replacetags(i::Index, rep_ts::Pair...) - for (tags1, tags2) in rep_ts - hastags(i, tags1) && return replacetags(i, tags1, tags2) - end - return i -end - -# XXX new syntax -# hastags(any, is, ts) -""" - anyhastags(is::Indices, ts::Union{String, TagSet}) - hastags(is::Indices, ts::Union{String, TagSet}) - -Check if any of the indices in the Indices have the specified tags. -""" -anyhastags(is::Indices, ts) = any(i -> hastags(i, ts), is) - -TagSets.hastags(is::Indices, ts) = anyhastags(is, ts) - -# XXX new syntax -# hastags(all, is, ts) -""" - allhastags(is::Indices, ts::Union{String, TagSet}) - -Check if all of the indices in the Indices have the specified tags. -""" -allhastags(is::Indices, ts::String) = all(i -> hastags(i, ts), is) - -# Version taking a list of Pairs -function TagSets.replacetags(f::Function, is::Indices, rep_ts::Pair...) - return map(i -> f(i) ? _replacetags(i, rep_ts...) : i, is) -end - -function TagSets.replacetags(is::Indices, rep_ts::Pair...; kwargs...) - return replacetags(fmatch(; kwargs...), is, rep_ts...) -end - -# Version taking two input TagSets/Strings -function TagSets.replacetags(f::Function, is::Indices, tags1, tags2) - return replacetags(f, is, tags1 => tags2) -end - -function TagSets.replacetags(is::Indices, tags1, tags2, args...; kwargs...) - return replacetags(fmatch(args...; kwargs...), is, tags1 => tags2) -end - -function _swaptags(f::Function, i::Index, tags1, tags2) - if f(i) - if hastags(i, tags1) - return replacetags(i, tags1, tags2) - elseif hastags(i, tags2) - return replacetags(i, tags2, tags1) - end - return i - end - return i -end - -function swaptags(f::Function, is::Indices, tags1, tags2) - return map(i -> _swaptags(f, i, tags1, tags2), is) -end - -function swaptags(is::Indices, tags1, tags2, args...; kwargs...) - return swaptags(fmatch(args...; kwargs...), is, tags1, tags2) -end - -function swaptags(is::Indices, tags12::Pair, args...; kwargs...) - return swaptags(is, first(tags12), last(tags12), args...; kwargs...) -end - -function replaceinds(is::Indices, rep_inds::Pair{<:Index,<:Index}...) - return replaceinds(is, zip(rep_inds...)...) -end - -# Handle case of empty indices being replaced -replaceinds(is::Indices) = is -replaceinds(is::Indices, rep_inds::Tuple{}) = is - -function replaceinds(is::Indices, rep_inds::Vector{<:Pair{<:Index,<:Index}}) - return replaceinds(is, rep_inds...) -end - -function replaceinds(is::Indices, rep_inds::Tuple{Vararg{Pair{<:Index,<:Index}}}) - return replaceinds(is, rep_inds...) -end - -function replaceinds(is::Indices, rep_inds::Pair) - return replaceinds(is, Tuple(first(rep_inds)) .=> Tuple(last(rep_inds))) -end - -# Check that the QNs are all the same -hassameflux(i1::Index, i2::Index) = (dim(i1) == dim(i2)) - -function replaceinds_space_error(is, inds1, inds2, i1, i2) - return error(""" - Attempting to replace the Indices - - $(inds1) - - with - - $(inds2) - - in the Index collection - - $(is). - - However, the Index - - $(i1) - - has a different space from the Index - - $(i2). - - They must have the same spaces to be replaced. - """) -end - -function replaceinds(is::Indices, inds1, inds2) - is1 = inds1 - poss = indexin(is1, is) - is_tuple = Tuple(is) - for (j, pos) in enumerate(poss) - isnothing(pos) && continue - i1 = is_tuple[pos] - i2 = inds2[j] - i2 = setdir(i2, dir(i1)) - if space(i1) ≠ space(i2) - replaceinds_space_error(is, inds1, inds2, i1, i2) - end - is_tuple = setindex(is_tuple, i2, pos) - end - return (is_tuple) -end - -replaceind(is::Indices, i1::Index, i2::Index) = replaceinds(is, (i1,), (i2,)) - -function replaceind(is::Indices, i1::Index, i2::Indices) - length(i2) != 1 && - throw(ArgumentError("cannot use replaceind with an Indices of length $(length(i2))")) - return replaceinds(is, (i1,), i2) -end - -replaceind(is::Indices, rep_i::Pair{<:Index,<:Index}) = replaceinds(is, rep_i) - -function swapinds(is::Indices, inds1, inds2) - return replaceinds(is, (inds1..., inds2...), (inds2..., inds1...)) -end - -function swapinds(is::Indices, inds1::Index, inds2::Index) - return swapinds(is, (inds1,), (inds2,)) -end - -function swapinds(is::Indices, inds12::Pair) - return swapinds(is, first(inds12), last(inds12)) -end - -swapind(is::Indices, i1::Index, i2::Index) = swapinds(is, (i1,), (i2,)) - -removeqns(is::Indices) = map(removeqns, is) -function QuantumNumbers.removeqn(is::Indices, qn_name::String; mergeblocks=true) - return map(i -> removeqn(i, qn_name; mergeblocks), is) -end -mergeblocks(is::Indices) = map(mergeblocks, is) - -# Permute is1 to be in the order of is2 -# This is helpful when is1 and is2 have different directions, and -# you want is1 to have the same directions as is2 -# TODO: replace this functionality with -# -# setdirs(is1::Indices, is2::Indices) -# -function permute(is1::Indices, is2::Indices) - length(is1) != length(is2) && throw( - ArgumentError( - "length of first index set, $(length(is1)) does not match length of second index set, $(length(is2))", - ), - ) - perm = getperm(is1, is2) - return is1[invperm(perm)] -end - -# -# Helper functions for contracting ITensors -# - -function compute_contraction_labels(Ais::Tuple, Bis::Tuple) - have_qns = hasqns(Ais) && hasqns(Bis) - NA = length(Ais) - NB = length(Bis) - Alabels = MVector{NA,Int}(ntuple(_ -> 0, Val(NA))) - Blabels = MVector{NB,Int}(ntuple(_ -> 0, Val(NB))) - - ncont = 0 - for i in 1:NA, j in 1:NB - Ais_i = @inbounds Ais[i] - Bis_j = @inbounds Bis[j] - if Ais_i == Bis_j - if have_qns && (dir(Ais_i) ≠ -dir(Bis_j)) - error( - "Attempting to contract IndexSet:\n\n$(Ais)\n\nwith IndexSet:\n\n$(Bis)\n\nQN indices must have opposite direction to contract, but indices:\n\n$(Ais_i)\n\nand:\n\n$(Bis_j)\n\ndo not have opposite directions.", - ) - end - Alabels[i] = Blabels[j] = -(1 + ncont) - ncont += 1 - end - end - - u = ncont - for i in 1:NA - if (Alabels[i] == 0) - Alabels[i] = (u += 1) - end - end - for j in 1:NB - if (Blabels[j] == 0) - Blabels[j] = (u += 1) - end - end - - return (Tuple(Alabels), Tuple(Blabels)) -end - -function compute_contraction_labels(Cis::Tuple, Ais::Tuple, Bis::Tuple) - NA = length(Ais) - NB = length(Bis) - NC = length(Cis) - Alabels, Blabels = compute_contraction_labels(Ais, Bis) - Clabels = MVector{NC,Int}(ntuple(_ -> 0, Val(NC))) - for i in 1:NC - locA = findfirst(==(Cis[i]), Ais) - if !isnothing(locA) - if Alabels[locA] < 0 - error( - "The noncommon indices of $Ais and $Bis must be the same as the indices $Cis." - ) - end - Clabels[i] = Alabels[locA] - else - locB = findfirst(==(Cis[i]), Bis) - if isnothing(locB) || Blabels[locB] < 0 - error( - "The noncommon indices of $Ais and $Bis must be the same as the indices $Cis." - ) - end - Clabels[i] = Blabels[locB] - end - end - return (Tuple(Clabels), Alabels, Blabels) -end - -# -# TupleTools -# - -""" - pop(is::Indices) - -Return a new Indices with the last Index removed. -""" -pop(is::Indices) = (NDTensors.pop(Tuple(is))) - -# Overload the unexported NDTensors version -NDTensors.pop(is::Indices) = pop(is) - -# TODO: don't convert to Tuple -""" - popfirst(is::Indices) - -Return a new Indices with the first Index removed. -""" -popfirst(is::IndexSet) = (NDTensors.popfirst(Tuple(is))) - -# Overload the unexported NDTensors version -NDTensors.popfirst(is::IndexSet) = popfirst(is) - -""" - push(is::Indices, i::Index) - -Make a new Indices with the Index i inserted -at the end. -""" -push(is::IndexSet, i::Index) = NDTensors.push(is, i) - -# Overload the unexported NDTensors version -NDTensors.push(is::IndexSet, i::Index) = push(is, i) - -# TODO: deprecate in favor of `filterinds` (abuse of Base notation) -filter(is::Indices, args...; kwargs...) = filter(fmatch(args...; kwargs...), is) - -# For ambiguity with Base.filter -filter(is::Indices, args::String; kwargs...) = filter(fmatch(args; kwargs...), is) - -# -# QN functions -# - -""" - setdirs(is::Indices, dirs::Arrow...) - -Return a new Indices with indices `setdir(is[i], dirs[i])`. -""" -function setdirs(is::Indices, dirs) - return map(i -> setdir(is[i], dirs[i]), 1:length(is)) -end - -""" - dir(is::Indices, i::Index) - -Return the direction of the Index `i` in the Indices `is`. -""" -function dir(is1::Indices, i::Index) - return dir(getfirst(is1, i)) -end - -""" - dirs(is::Indices, inds) - -Return a tuple of the directions of the indices `inds` in -the Indices `is`, in the order they are found in `inds`. -""" -function dirs(is1::Indices, inds) - return map(i -> dir(is1, inds[i]), 1:length(inds)) -end - -""" - dirs(is::Indices) - -Return a tuple of the directions of the indices `is`. -""" -dirs(is::Indices) = dir.(is) - -hasqns(is::Indices) = any(hasqns, is) - -""" - getperm(col1, col2) - -Get the permutation that takes collection 2 to collection 1, -such that `col2[p] .== col1`. -""" -function getperm(s1, s2) - N = length(s1) - r = Vector{Int}(undef, N) - return map!(i -> findfirst(==(s1[i]), s2), r, 1:length(s1)) -end - -# TODO: define directly for Vector -""" - nblocks(::Indices, i::Int) - -The number of blocks in the specified dimension. -""" -function NDTensors.nblocks(inds::IndexSet, i::Int) - return nblocks(Tuple(inds), i) -end - -# TODO: don't convert to Tuple -function NDTensors.nblocks(inds::IndexSet, is) - return nblocks(Tuple(inds), is) -end - -""" - nblocks(::Indices) - -A tuple of the number of blocks in each -dimension. -""" -NDTensors.nblocks(inds::Indices) = nblocks.(inds) - -# TODO: is this needed? -function NDTensors.nblocks(inds::NTuple{N,<:Index}) where {N} - return ntuple(i -> nblocks(inds, i), Val(N)) -end - -ndiagblocks(inds) = minimum(nblocks(inds)) - -""" - flux(inds::Indices, block::Tuple{Vararg{Int}}) - -Get the flux of the specified block, for example: - -``` -i = Index(QN(0)=>2, QN(1)=>2) -is = (i, dag(i')) -flux(is, Block(1, 1)) == QN(0) -flux(is, Block(2, 1)) == QN(1) -flux(is, Block(1, 2)) == QN(-1) -flux(is, Block(2, 2)) == QN(0) -``` -""" -function flux(inds::Indices, block::Block) - qntot = QN() - for n in 1:length(inds) - ind = inds[n] - qntot += flux(ind, Block(block[n])) - end - return qntot -end - -""" - flux(inds::Indices, I::Integer...) - -Get the flux of the block that the specified -index falls in. - -``` -i = Index(QN(0)=>2, QN(1)=>2) -is = (i, dag(i')) -flux(is, 3, 1) == QN(1) -flux(is, 1, 2) == QN(0) -``` -""" -flux(inds::Indices, vals::Integer...) = flux(inds, block(inds, vals...)) - -""" - ITensors.block(inds::Indices, I::Integer...) - -Get the block that the specified index falls in. - -This is mostly an internal function, and the interface -is subject to change. - -# Examples - -```julia -i = Index(QN(0)=>2, QN(1)=>2) -is = (i, dag(i')) -ITensors.block(is, 3, 1) == (2,1) -ITensors.block(is, 1, 2) == (1,1) -``` -""" -block(inds::Indices, vals::Integer...) = blockindex(inds, vals...)[2] - -#show(io::IO, is::IndexSet) = show(io, MIME"text/plain"(), is) - -# -# Read and write -# - -function readcpp(io::IO, ::Type{<:Indices}; format="v3") - is = IndexSet() - if format == "v3" - size = read(io, Int) - function readind(io, n) - i = readcpp(io, Index; format) - stride = read(io, UInt64) - return i - end - is = IndexSet(n -> readind(io, n), size) - else - throw(ArgumentError("read IndexSet: format=$format not supported")) - end - return is -end diff --git a/src/itensor.jl b/src/itensor.jl deleted file mode 100644 index 3a0c6c7cf4..0000000000 --- a/src/itensor.jl +++ /dev/null @@ -1,2073 +0,0 @@ -using NDTensors: NDTensors, nnz -using .TagSets: TagSets, hastags, replacetags - -# Private inner constructor -function _ITensor end - -""" - ITensor - -An ITensor is a tensor whose interface is -independent of its memory layout. Therefore -it is not necessary to know the ordering -of an ITensor's indices, only which indices -an ITensor has. Operations like contraction -and addition of ITensors automatically -handle any memory permutations. - -# Examples - -```julia -julia> i = Index(2, "i") -(dim=2|id=287|"i") - -# -# Make an ITensor with random elements: -# -julia> A = random_itensor(i', i) -ITensor ord=2 (dim=2|id=287|"i")' (dim=2|id=287|"i") -NDTensors.Dense{Float64,Array{Float64,1}} - -julia> @show A; -A = ITensor ord=2 -Dim 1: (dim=2|id=287|"i")' -Dim 2: (dim=2|id=287|"i") -NDTensors.Dense{Float64,Array{Float64,1}} - 2×2 - 0.28358594718392427 1.4342219756446355 - 1.6620103556283987 -0.40952231269251566 - -julia> @show inds(A); -inds(A) = ((dim=2|id=287|"i")', (dim=2|id=287|"i")) - -# -# Set the i==1, i'==2 element to 1.0: -# -julia> A[i => 1, i' => 2] = 1; - -julia> @show A; -A = ITensor ord=2 -Dim 1: (dim=2|id=287|"i")' -Dim 2: (dim=2|id=287|"i") -NDTensors.Dense{Float64,Array{Float64,1}} - 2×2 - 0.28358594718392427 1.4342219756446355 - 1.0 -0.40952231269251566 - -julia> @show storage(A); -storage(A) = [0.28358594718392427, 1.0, 1.4342219756446355, -0.40952231269251566] - -julia> B = random_itensor(i, i'); - -julia> @show B; -B = ITensor ord=2 -Dim 1: (dim=2|id=287|"i") -Dim 2: (dim=2|id=287|"i")' -NDTensors.Dense{Float64,Array{Float64,1}} - 2×2 - -0.6510816500352691 0.2579101497658179 - 0.256266641521826 -0.9464735926768166 - -# -# Can add or subtract ITensors as long as they -# have the same indices, in any order: -# -julia> @show A + B; -A + B = ITensor ord=2 -Dim 1: (dim=2|id=287|"i")' -Dim 2: (dim=2|id=287|"i") -NDTensors.Dense{Float64,Array{Float64,1}} - 2×2 - -0.3674957028513448 1.6904886171664615 - 1.2579101497658178 -1.3559959053693322 -``` -""" -mutable struct ITensor - tensor - global @inline _ITensor(parent) = new(parent) -end - -function ITensor(::AllowAlias, T::Tensor{<:Any,<:Any,<:Any,<:Tuple}) - @debug_check begin - is = inds(T) - if !allunique(is) - error( - "Trying to create ITensors with collection of indices $is. Indices must be unique." - ) - end - end - return _ITensor(T) -end - -######################### -# ITensor constructors -# - -# Version where the indices are not Tuple, so convert to Tuple -function ITensor(::AllowAlias, T::Tensor)::ITensor - return ITensor(AllowAlias(), setinds(T, NTuple{ndims(T)}(inds(T)))) -end - -ITensor(::NeverAlias, T::Tensor)::ITensor = ITensor(AllowAlias(), copy(T)) - -ITensor(T::Tensor)::ITensor = ITensor(NeverAlias(), T) - -""" - ITensor(st::TensorStorage, is) - -Constructor for an ITensor from a TensorStorage -and a set of indices. -The ITensor stores a view of the TensorStorage. -""" -ITensor(as::AliasStyle, st::TensorStorage, is)::ITensor = - ITensor(as, Tensor(as, st, Tuple(is))) -ITensor(as::AliasStyle, is, st::TensorStorage)::ITensor = ITensor(as, st, is) - -ITensor(st::TensorStorage, is)::ITensor = itensor(Tensor(NeverAlias(), st, Tuple(is))) -ITensor(is, st::TensorStorage)::ITensor = ITensor(NeverAlias(), st, is) - -itensor(T::ITensor) = T -ITensor(T::ITensor) = copy(T) - -""" - itensor(args...; kwargs...) - -Like the `ITensor` constructor, but with attempt to make a view -of the input data when possible. -""" -itensor(args...; kwargs...)::ITensor = ITensor(AllowAlias(), args...; kwargs...) - -ITensor(::AliasStyle, args...; kwargs...)::ITensor = - error("ITensor constructor with input arguments of types `$(typeof.(args))` not defined.") - -""" - Tensor(::ITensor) - -Create a `Tensor` that stores a copy of the storage and -indices of the input `ITensor`. -""" -Tensor(T::ITensor)::Tensor = Tensor(NeverAlias(), T) -Tensor(as::NeverAlias, T::ITensor)::Tensor = Tensor(AllowAlias(), copy(T)) - -""" - tensor(::ITensor) - -Convert the `ITensor` to a `Tensor` that shares the same -storage and indices as the `ITensor`. -""" -Tensor(::AllowAlias, A::ITensor) = A.tensor - -""" - ITensor([::Type{ElT} = Float64, ]inds) - ITensor([::Type{ElT} = Float64, ]inds::Index...) - -Construct an ITensor filled with zeros having indices `inds` and element type -`ElT`. If the element type is not specified, it defaults to `Float64`. - -The storage will have `NDTensors.Dense` type. - -# Examples - -```julia -i = Index(2,"index_i") -j = Index(4,"index_j") -k = Index(3,"index_k") - -A = ITensor(i,j) -B = ITensor(ComplexF64,k,j) -``` -""" -function ITensor(eltype::Type{<:Number}, is::Indices) - return itensor(EmptyStorage(eltype), is) -end - -ITensor(eltype::Type{<:Number}, is...) = ITensor(eltype, indices(is...)) - -ITensor(is...) = ITensor(EmptyNumber, is...) - -# To fix ambiguity with QN Index version -# TODO: define as `emptyITensor(ElT)` -ITensor(eltype::Type{<:Number}=EmptyNumber) = ITensor(eltype, ()) - -# TODO: define as `emptyITensor(ElT)` -function ITensor(::Type{ElT}, inds::Tuple{}) where {ElT<:Number} - return ITensor(EmptyStorage(ElT), inds) -end - -""" - ITensor([::Type{ElT} = Float64, ]::UndefInitializer, inds) - ITensor([::Type{ElT} = Float64, ]::UndefInitializer, inds::Index...) - -Construct an ITensor filled with undefined elements having indices `inds` and -element type `ElT`. If the element type is not specified, it defaults to `Float64`. -One purpose for using this constructor is that initializing the elements in an - undefined way is faster than initializing them to a set value such as zero. - -The storage will have `NDTensors.Dense` type. - -# Examples - -```julia -i = Index(2,"index_i") -j = Index(4,"index_j") -k = Index(3,"index_k") - -A = ITensor(undef,i,j) -B = ITensor(ComplexF64,undef,k,j) -``` -""" -function ITensor(::Type{ElT}, ::UndefInitializer, inds::Indices) where {ElT<:Number} - return itensor(Dense(ElT, undef, dim(inds)), indices(inds)) -end - -function ITensor(::Type{ElT}, ::UndefInitializer, inds...) where {ElT<:Number} - return ITensor(ElT, undef, indices(inds...)) -end - -ITensor(::UndefInitializer, inds::Indices) = ITensor(Float64, undef, inds) - -ITensor(::UndefInitializer, inds...) = ITensor(Float64, undef, indices(inds...)) - -""" - ITensor([ElT::Type, ]x::Number, inds) - ITensor([ElT::Type, ]x::Number, inds::Index...) - -Construct an ITensor with all elements set to `x` and indices `inds`. - - If `x isa Int` or `x isa Complex{Int}` then the elements will be set to `float(x)` - unless specified otherwise by the first input. - - The storage will have `NDTensors.Dense` type. - - # Examples - - ```julia - i = Index(2,"index_i"); j = Index(4,"index_j"); k = Index(3,"index_k"); - - A = ITensor(1.0, i, j) - A = ITensor(1, i, j) # same as above - B = ITensor(2.0+3.0im, j, k) - ``` - - !!! warning - In future versions this may not automatically convert integer inputs with `float`, and in that case the particular element type should not be relied on. - """ -ITensor(eltype::Type{<:Number}, x::Number, is::Indices) = _ITensor(eltype, x, is) - -# For disambiguation with QN version -ITensor(eltype::Type{<:Number}, x::Number, is::Tuple{}) = _ITensor(eltype, x, is) - -function _ITensor(eltype::Type{<:Number}, x::Number, is::Indices) - return ITensor(Dense(convert(eltype, x), dim(is)), is) -end - -ITensor(eltype::Type{<:Number}, x::Number, is...) = ITensor(eltype, x, indices(is...)) - -ITensor(x::Number, is...) = ITensor(eltype(x), x, is...) - -const RealOrComplex{T} = Union{T,Complex{T}} - -ITensor(x::RealOrComplex{Int}, is...) = ITensor(float(x), is...) - -# -# EmptyStorage ITensor constructors -# - -# TODO: Replace with a simpler and more generic `zeros` constructor -# when the new `UnallocatedZeros` type lands. -# This is only used internally inside the implementation of `directsum` -# right now. -function zeros_itensor(elt::Type{<:Number}, inds::Index...) - return ITensor(elt, zero(elt), inds...) -end - -# TODO: Deprecated! -""" - emptyITensor([::Type{ElT} = NDTensors.EmptyNumber, ]inds) - emptyITensor([::Type{ElT} = NDTensors.EmptyNumber, ]inds::Index...) - -Construct an ITensor with storage type `NDTensors.EmptyStorage`, indices `inds`, and element type `ElT`. If the element type is not specified, it defaults to `NDTensors.EmptyNumber`, which represents a number type that can take on any value (for example, the type of the first value it is set to). -""" -function emptyITensor(::Type{ElT}, is::Indices) where {ElT<:Number} - return itensor(EmptyTensor(ElT, is)) -end - -function emptyITensor(::Type{ElT}, is...) where {ElT<:Number} - return emptyITensor(ElT, indices(is...)) -end - -emptyITensor(is::Indices) = emptyITensor(EmptyNumber, is) - -emptyITensor(is...) = emptyITensor(EmptyNumber, indices(is...)) - -function emptyITensor(::Type{ElT}=EmptyNumber) where {ElT<:Number} - return itensor(EmptyTensor(ElT, ())) -end - -using NDTensors.TypeParameterAccessors: set_eltype, type_parameters, specify_type_parameters -""" - ITensor([ElT::Type, ]A::AbstractArray, inds) - ITensor([ElT::Type, ]A::AbstractArray, inds::Index...) - - itensor([ElT::Type, ]A::AbstractArray, inds) - itensor([ElT::Type, ]A::AbstractArray, inds::Index...) - -Construct an ITensor from an AbstractArray `A` and indices `inds`. -The ITensor will be a view of the AbstractArray data if possible (if -no conversion to a different element type is necessary). - -If specified, the ITensor will have element type `ElT`. - -If the element type of `A` is `Int` or `Complex{Int}` and -the desired element type isn't specified, it will -be converted to `Float64` or `Complex{Float64}` automatically. -To keep the element type as an integer, specify it explicitly, -for example with: -```julia -i = Index(2, "i") -A = [0 1; 1 0] -T = ITensor(eltype(A), A, i', dag(i)) -``` - -# Examples - -```julia -i = Index(2,"index_i") -j = Index(2,"index_j") - -M = [1. 2; - 3 4] -T = ITensor(M, i, j) -T[i => 1, j => 1] = 3.3 -M[1, 1] == 3.3 -T[i => 1, j => 1] == 3.3 -``` - -!!! warning - In future versions this may not automatically convert `Int`/`Complex{Int}` inputs to floating point versions with `float` (once tensor operations using `Int`/`Complex{Int}` are natively as fast as floating point operations), and in that case the particular element type should not be relied on. To avoid extra conversions (and therefore allocations) it is best practice to directly construct with `itensor([0. 1; 1 0], i', dag(i))` if you want a floating point element type. The conversion is done as a performance optimization since often tensors are passed to BLAS/LAPACK and need to be converted to floating point types compatible with those libraries, but future projects in Julia may allow for efficient operations with more general element types (for example see https://github.com/JuliaLinearAlgebra/Octavian.jl). -""" -function ITensor( - as::AliasStyle, - eltype::Type{<:Number}, - A::AbstractArray{<:Number}, - inds::Indices; - kwargs..., -) - length(A) ≠ dim(inds) && throw( - DimensionMismatch( - "In ITensor(::AbstractArray, inds), length of AbstractArray ($(length(A))) must match total dimension of IndexSet ($(dim(inds)))", - ), - ) - data = set_eltype(typeof(A), eltype)(as, A) - return itensor(Dense(data), inds) -end - -function ITensor( - as::AliasStyle, eltype::Type{<:Number}, A::AbstractArray{<:Number}, inds; kwargs... -) - is = indices(inds) - if !isa(is, Indices) - error("Indices $inds are not valid for constructing an ITensor.") - end - return ITensor(as, eltype, A, is; kwargs...) -end - -# Convert `Adjoint` to `Matrix` -function ITensor( - as::AliasStyle, eltype::Type{<:Number}, A::Adjoint, inds::Indices{Index{Int}}; kwargs... -) - return ITensor(as, eltype, Matrix(A), inds; kwargs...) -end - -function ITensor( - as::AliasStyle, eltype::Type{<:Number}, A::AbstractArray{<:Number}, is...; kwargs... -) - return ITensor(as, eltype, A, indices(is...); kwargs...) -end - -function ITensor(eltype::Type{<:Number}, A::AbstractArray{<:Number}, is...; kwargs...) - return ITensor(NeverAlias(), eltype, A, is...; kwargs...) -end - -# For now, it's not well defined to construct an ITensor without indices -# from a non-zero dimensional AbstractArray -function ITensor( - as::AliasStyle, eltype::Type{<:Number}, A::AbstractArray{<:Number}; kwargs... -) - if length(A) > 1 - error( - "Trying to create an ITensor without any indices from $(typeof(A)) $A of dimensions $(size(A)). Cannot construct an ITensor from an $(typeof(A)) with more than one element without any indices.", - ) - end - return ITensor(eltype, A[]; kwargs...) -end - -function ITensor(eltype::Type{<:Number}, A::AbstractArray{<:Number}; kwargs...) - return ITensor(NeverAlias(), eltype, A; kwargs...) -end -function ITensor(A::AbstractArray{<:Number}; kwargs...) - return ITensor(NeverAlias(), eltype(A), A; kwargs...) -end - -function ITensor( - as::AliasStyle, A::AbstractArray{ElT}, is...; kwargs... -) where {ElT<:Number} - return ITensor(as, ElT, A, indices(is...); kwargs...) -end - -function ITensor( - as::AliasStyle, A::AbstractArray{ElT}, is...; kwargs... -) where {ElT<:RealOrComplex{Int}} - return ITensor(as, float(ElT), A, is...; kwargs...) -end - -function ITensor(A::AbstractArray{<:Number}, is...; kwargs...) - return ITensor(NeverAlias(), A, is...; kwargs...) -end - -# -# Diag ITensor constructors -# - -""" - diag_itensor([::Type{ElT} = Float64, ]inds) - diag_itensor([::Type{ElT} = Float64, ]inds::Index...) - -Make a sparse ITensor of element type `ElT` with only elements -along the diagonal stored. Defaults to having `zero(T)` along -the diagonal. - -The storage will have `NDTensors.Diag` type. -""" -function diag_itensor(::Type{ElT}, is::Indices) where {ElT<:Number} - return itensor(Diag(ElT, mindim(is)), is) -end - -diag_itensor(::Type{ElT}, is...) where {ElT<:Number} = diag_itensor(ElT, indices(is...)) - -diag_itensor(is::Indices) = diag_itensor(Float64, is) -diag_itensor(is...) = diag_itensor(indices(is...)) - -""" - diag_itensor([ElT::Type, ]v::AbstractVector, inds...) - diagitensor([ElT::Type, ]v::AbstractVector, inds...) - -Make a sparse ITensor with non-zero elements only along the diagonal. -In general, the diagonal elements will be those stored in `v` and -the ITensor will have element type `eltype(v)`, unless specified explicitly -by `ElT`. The storage will have `NDTensors.Diag` type. - -In the case when `eltype(v) isa Union{Int, Complex{Int}}`, by default it will -be converted to `float(v)`. Note that this behavior is subject to change -in the future. - -The version `diag_itensor` will never output an ITensor whose storage data -is an alias of the input vector data. - -The version `diagitensor` might output an ITensor whose storage data -is an alias of the input vector data in order to minimize operations. -""" -function diag_itensor( - as::AliasStyle, eltype::Type{<:Number}, v::AbstractVector{<:Number}, is::Indices -) - length(v) ≠ mindim(is) && error( - "Length of vector for diagonal must equal minimum of the dimension of the input indices", - ) - data = set_eltype(typeof(v), eltype)(as, v) - return itensor(Diag(data), is) -end - -function diag_itensor( - as::AliasStyle, eltype::Type{<:Number}, v::AbstractVector{<:Number}, is... -) - return diag_itensor(as, eltype, v, indices(is...)) -end - -function diag_itensor(as::AliasStyle, v::AbstractVector, is...) - return diag_itensor(as, eltype(v), v, is...) -end - -function diag_itensor(as::AliasStyle, v::AbstractVector{<:RealOrComplex{Int}}, is...) - return diag_itensor(AllowAlias(), float(eltype(v)), v, is...) -end - -diag_itensor(v::AbstractVector{<:Number}, is...) = diag_itensor(NeverAlias(), v, is...) -function diag_itensor(eltype::Type{<:Number}, v::AbstractVector{<:Number}, is...) - return diag_itensor(NeverAlias(), eltype, v, is...) -end - -diagitensor(args...; kwargs...) = diag_itensor(AllowAlias(), args...; kwargs...) - -# XXX TODO: explain conversion from Int -# XXX TODO: proper conversion -""" - diag_itensor([ElT::Type, ]x::Number, inds...) - diagitensor([ElT::Type, ]x::Number, inds...) - -Make a sparse ITensor with non-zero elements only along the diagonal. -In general, the diagonal elements will be set to the value `x` and -the ITensor will have element type `eltype(x)`, unless specified explicitly -by `ElT`. The storage will have `NDTensors.Diag` type. - -In the case when `x isa Union{Int, Complex{Int}}`, by default it will -be converted to `float(x)`. Note that this behavior is subject to change -in the future. -""" -function diag_itensor(as::AliasStyle, eltype::Type{<:Number}, x::Number, is::Indices) - return diag_itensor(AllowAlias(), eltype, fill(eltype(x), mindim(is)), is...) -end - -function diag_itensor(as::AliasStyle, eltype::Type{<:Number}, x::Number, is...) - return diag_itensor(as, eltype, x, indices(is...)) -end - -function diag_itensor(as::AliasStyle, x::Number, is...) - return diag_itensor(as, typeof(x), x, is...) -end - -function diag_itensor(as::AliasStyle, x::RealOrComplex{Int}, is...) - return diag_itensor(as, float(typeof(x)), x, is...) -end - -function diag_itensor(eltype::Type{<:Number}, x::Number, is...) - return diag_itensor(NeverAlias(), eltype, x, is...) -end - -diag_itensor(x::Number, is...) = diag_itensor(NeverAlias(), x, is...) - -""" - delta([::Type{ElT} = Float64, ]inds) - delta([::Type{ElT} = Float64, ]inds::Index...) - -Make a uniform diagonal ITensor with all diagonal elements -`one(ElT)`. Only a single diagonal element is stored. - -This function has an alias `δ`. -""" -function delta(eltype::Type{<:Number}, is::Indices) - return itensor(Diag(one(eltype)), is) -end - -function delta(eltype::Type{<:Number}, is...) - return delta(eltype, indices(is...)) -end - -delta(is...) = delta(Float64, is...) - -const δ = delta - -function onehot(eltype::Type{<:Number}, ivs::Pair{<:Index}...) - return onehot(NDTensors.default_datatype(eltype), ivs...) -end -function onehot(eltype::Type{<:Number}, ivs::Vector{<:Pair{<:Index}}) - return onehot(NDTensors.default_datatype(eltype), ivs...) -end -function setelt(eltype::Type{<:Number}, ivs::Pair{<:Index}...) - return onehot(NDTensors.default_datatype(eltype), ivs...) -end - -function onehot(ivs::Pair{<:Index}...) - return onehot(NDTensors.default_datatype(NDTensors.default_eltype()), ivs...) -end -onehot(ivs::Vector{<:Pair{<:Index}}) = onehot(ivs...) -setelt(ivs::Pair{<:Index}...) = onehot(ivs...) - -""" - dense(T::ITensor) - -Make a new ITensor where the storage is the closest Dense storage, -avoiding allocating new data if possible. -For example, an ITensor with Diag storage will become Dense storage, -filled with zeros except for the diagonal values. -""" -function dense(A::ITensor) - return setinds(itensor(dense(tensor(A))), removeqns(inds(A))) -end - -""" - random_itensor([rng=Random.default_rng()], [ElT=Float64], inds) - random_itensor([rng=Random.default_rng()], [ElT=Float64], inds::Index...) - -Construct an ITensor with type `ElT` and indices `inds`, whose elements are -normally distributed random numbers. If the element type is not specified, -it defaults to `Float64`. - -# Examples - -```julia -i = Index(2,"index_i") -j = Index(4,"index_j") -k = Index(3,"index_k") - -A = random_itensor(i,j) -B = random_itensor(ComplexF64,undef,k,j) -``` -""" -function random_itensor(::Type{S}, is::Indices) where {S<:Number} - return random_itensor(Random.default_rng(), S, is) -end - -function random_itensor(rng::AbstractRNG, ::Type{S}, is::Indices) where {S<:Number} - T = ITensor(S, undef, is) - randn!(rng, T) - return T -end - -function random_itensor(::Type{S}, is...) where {S<:Number} - return random_itensor(Random.default_rng(), S, is...) -end - -function random_itensor(rng::AbstractRNG, ::Type{S}, is...) where {S<:Number} - return random_itensor(rng, S, indices(is...)) -end - -# To fix ambiguity with QN version -function random_itensor(::Type{ElT}, is::Tuple{}) where {ElT<:Number} - return random_itensor(Random.default_rng(), ElT, is) -end - -# To fix ambiguity with QN version -function random_itensor(rng::AbstractRNG, ::Type{ElT}, is::Tuple{}) where {ElT<:Number} - return random_itensor(rng, ElT, Index{Int}[]) -end - -# To fix ambiguity with QN version -function random_itensor(is::Tuple{}) - return random_itensor(Random.default_rng(), is) -end - -# To fix ambiguity with QN version -function random_itensor(rng::AbstractRNG, is::Tuple{}) - return random_itensor(rng, Float64, is) -end - -# To fix ambiguity errors with QN version -function random_itensor(::Type{ElT}) where {ElT<:Number} - return random_itensor(Random.default_rng(), ElT) -end - -# To fix ambiguity errors with QN version -function random_itensor(rng::AbstractRNG, ::Type{ElT}) where {ElT<:Number} - return random_itensor(rng, ElT, ()) -end - -random_itensor(is::Indices) = random_itensor(Random.default_rng(), is) -random_itensor(rng::AbstractRNG, is::Indices) = random_itensor(rng, Float64, is) -random_itensor(is...) = random_itensor(Random.default_rng(), is...) -random_itensor(rng::AbstractRNG, is...) = random_itensor(rng, Float64, indices(is...)) - -# To fix ambiguity errors with QN version -random_itensor() = random_itensor(Random.default_rng()) - -# To fix ambiguity errors with QN version -random_itensor(rng::AbstractRNG) = random_itensor(rng, Float64, ()) - -copy(T::ITensor)::ITensor = itensor(copy(tensor(T))) -zero(T::ITensor)::ITensor = itensor(zero(tensor(T))) - -# -# Construct from Array -# - -# Helper functions for different view behaviors -# TODO: Move to NDTensors.jl -function (arraytype::Type{<:AbstractArray})(::NeverAlias, A::AbstractArray) - return specify_type_parameters(arraytype, type_parameters(A))(A) -end - -function (arraytype::Type{<:AbstractArray})(::AllowAlias, A::AbstractArray) - return convert(specify_type_parameters(arraytype, type_parameters(A)), A) -end - -""" - Array{ElT, N}(T::ITensor, i:Index...) - Array{ElT}(T::ITensor, i:Index...) - Array(T::ITensor, i:Index...) - - Matrix{ElT}(T::ITensor, row_i:Index, col_i::Index) - Matrix(T::ITensor, row_i:Index, col_i::Index) - - Vector{ElT}(T::ITensor) - Vector(T::ITensor) - -Given an ITensor `T` with indices `i...`, returns -an Array with a copy of the ITensor's elements. The -order in which the indices are provided indicates -the order of the data in the resulting Array. -""" -function Array{ElT,N}(T::ITensor, is::Indices) where {ElT,N} - ndims(T) != N && throw( - DimensionMismatch( - "cannot convert an $(ndims(T)) dimensional ITensor to an $N-dimensional Array." - ), - ) - TT = tensor(permute(T, is)) - return Array{ElT,N}(TT)::Array{ElT,N} -end - -function Array{ElT,N}(T::ITensor, is...) where {ElT,N} - return Array{ElT,N}(T, indices(is...)) -end - -function Array{ElT}(T::ITensor, is::Indices) where {ElT} - return Array{ElT,length(is)}(T, is) -end - -function Array{ElT}(T::ITensor, is...) where {ElT} - return Array{ElT}(T, indices(is...)) -end - -function Array(T::ITensor, is...) - return Array{eltype(T)}(T, is...) -end - -function Array{<:Any,N}(T::ITensor, is...) where {N} - return Array{eltype(T),N}(T, is...) -end - -function Vector{ElT}(T::ITensor)::Vector{ElT} where {ElT} - ndims(T) != 1 && throw( - DimensionMismatch("cannot convert an $(ndims(T)) dimensional ITensor to a Vector.") - ) - return Array{ElT}(T, inds(T)...) -end - -function Vector(T::ITensor)::Vector - return Array(T, inds(T)...) -end -######################### -# End ITensor constructors -# - -######################### -# ITensor properties -# -""" - storage(T::ITensor) - -Return a view of the TensorStorage of the ITensor. -""" -storage(T::ITensor) = storage(tensor(T)) - -storagetype(x::ITensor) = storagetype(tensor(x)) - -""" - data(T::ITensor) - -Return a view of the raw data of the ITensor. - -This is mostly an internal ITensor function, please -let the developers of ITensors.jl know if there is -functionality for ITensors that you would like -that is not currently available. -""" -data(T::ITensor) = NDTensors.data(tensor(T)) - -NDTensors.data(x::ITensor) = data(x) -datatype(x::ITensor) = datatype(tensor(x)) - -# Trait to check if the tensor has QN symmetry -symmetrystyle(T::Tensor) = symmetrystyle(inds(T)) -symmetrystyle(T::ITensor)::SymmetryStyle = symmetrystyle(tensor(T)) - -eltype(T::ITensor) = eltype(tensor(T)) -NDTensors.scalartype(x::ITensor) = eltype(x) - -""" - order(A::ITensor) - ndims(A::ITensor) - -The number of indices, `length(inds(A))`. -""" -order(T::ITensor)::Int = ndims(T) - -Order(T::ITensor) = Order(order(T)) - -ndims(T::ITensor)::Int = ndims(tensor(T)) - -""" - dim(A::ITensor) - -The total dimension of the space the tensor lives in, `prod(dims(A))`. -""" -dim(T::ITensor)::Int = dim(tensor(T)) - -""" - maxdim(A::ITensor) - -The maximum dimension of the tensor indices. -""" -maxdim(T::ITensor)::Int = maxdim(tensor(T)) - -""" - mindim(A::ITensor) - -The minimum dimension of the tensor indices. -""" -mindim(T::ITensor)::Int = mindim(tensor(T)) - -""" - dim(A::ITensor, n::Int) - -Get the nth dimension of the ITensors. -""" -dim(T::ITensor, n::Int)::Int = dim(tensor(T), n) - -""" - dims(A::ITensor) - size(A::ITensor) - -Tuple containing `dim(inds(A)[d]) for d in 1:ndims(A)`. -""" -dims(T::ITensor) = dims(tensor(T)) - -axes(T::ITensor) = axes(tensor(T)) - -size(T::ITensor) = dims(T) - -size(A::ITensor, d::Int) = size(tensor(A), d) - -_isemptyscalar(A::ITensor) = _isemptyscalar(tensor(A)) -_isemptyscalar(A::Tensor) = ndims(A) == 0 && isemptystorage(A) && eltype(A) === EmptyNumber -NDTensors.iscu(A::ITensor) = NDTensors.iscu(tensor(A)) -""" - dir(A::ITensor, i::Index) - -Return the direction of the Index `i` in the ITensor `A`. -""" -dir(A::ITensor, i::Index) = dir(inds(A), i) - -dirs(A::ITensor, is) = dirs(inds(A), is) - -# TODO: add isdiag(::Tensor) to NDTensors -isdiag(T::ITensor)::Bool = (storage(T) isa Diag || storage(T) isa DiagBlockSparse) - -diaglength(T::ITensor) = diaglength(tensor(T)) - -# -# Block sparse related functions -# (Maybe create fallback definitions for dense tensors) -# - -hasqns(T::Union{Tensor,ITensor}) = hasqns(inds(T)) - -eachnzblock(T::ITensor) = eachnzblock(tensor(T)) - -# TODO: Switch this to `SparseArrays.nnz`, it is written -# this way for backwards compatibility since older versions -# of NDTensors had their own `NDTensors.nnz` function -# that didn't overload `SparseArrays.nnz`. -NDTensors.nnz(T::ITensor) = nnz(tensor(T)) - -nblocks(T::ITensor, args...) = nblocks(tensor(T), args...) - -nnzblocks(T::ITensor) = nnzblocks(tensor(T)) - -nzblock(T::ITensor, args...) = nzblock(tensor(T), args...) - -nzblocks(T::ITensor) = nzblocks(tensor(T)) - -blockoffsets(T::ITensor) = blockoffsets(tensor(T)) - -# XXX: rename isemptystorage? -""" - isemptystorage(T::ITensor) - -Returns `true` if the ITensor contains no elements. - -An ITensor with `EmptyStorage` storage always returns `true`. -""" -isemptystorage(T::ITensor) = isemptystorage(tensor(T)) -isemptystorage(T::Tensor) = isempty(T) -isempty(T::ITensor) = isemptystorage(T) - -isreal(T::ITensor) = eltype(T) <: Real -iszero(T::ITensor) = all(iszero, T) -######################### -# End ITensor properties -# - -######################### -# ITensor iterators -# -""" - CartesianIndices(A::ITensor) - -Create a CartesianIndices iterator for an ITensor. Helpful for -iterating over all elements of the ITensor. - -julia> i = Index(2, "i") -(dim=2|id=90|"i") - -julia> j = Index(3, "j") -(dim=3|id=554|"j") - -julia> A = random_itensor(i, j) -ITensor ord=2 (dim=2|id=90|"i") (dim=3|id=554|"j") -Dense{Float64,Array{Float64,1}} - -julia> C = CartesianIndices(A) -2×3 CartesianIndices{2,Tuple{Base.OneTo{Int64},Base.OneTo{Int64}}}: - CartesianIndex(1, 1) CartesianIndex(1, 2) CartesianIndex(1, 3) - CartesianIndex(2, 1) CartesianIndex(2, 2) CartesianIndex(2, 3) - -julia> for c in C - @show c, A[c] - end -(c, A[c]) = (CartesianIndex(1, 1), 0.9867887290267864) -(c, A[c]) = (CartesianIndex(2, 1), -0.5967323222288754) -(c, A[c]) = (CartesianIndex(1, 2), 0.9675791778518225) -(c, A[c]) = (CartesianIndex(2, 2), 0.2842549524334651) -(c, A[c]) = (CartesianIndex(1, 3), -0.023483276282564795) -(c, A[c]) = (CartesianIndex(2, 3), -0.4877709982071688) - -!!! warning - Unlike standard `AbstractArray{T, N}` types, `ITensor`s do not have their - order as type paramater, and therefore iterating using `CartesianIndices` - is generally slow. If you are performing operations that use iterating over - individual elements of an ITensor it is best to convert to `NDTensors.Tensor`. -""" -CartesianIndices(A::ITensor) = CartesianIndices(tensor(A)) - -""" - eachindval(A::ITensor) - -Create an iterable object for visiting each element of the ITensor `A` (including structually -zero elements for sparse tensors) in terms of pairs of indices and values. -""" -eachindval(T::ITensor) = eachindval(inds(T)) - -""" - iterate(A::ITensor, args...) - -Iterate over the elements of an ITensor. -""" -iterate(A::ITensor, args...) = iterate(tensor(A), args...) - -######################### -# End ITensor iterators -# - -######################### -# ITensor Accessor Functions -# - -function settensor!(T::ITensor, t)::ITensor - T.tensor = t - return T -end - -function setinds!(T::ITensor, is)::ITensor - # TODO: always convert to Tuple with Tensor type? - return settensor!(T, setinds(tensor(T), Tuple(is))) -end - -function setstorage!(T::ITensor, st)::ITensor - return settensor!(T, setstorage(tensor(T), st)) -end - -function setinds(T::ITensor, is)::ITensor - # TODO: always convert to Tuple with Tensor type? - return itensor(setinds(tensor(T), Tuple(is))) -end - -function setstorage(T::ITensor, st)::ITensor - return itensor(setstorage(tensor(T), st)) -end - -removeqns(T::ITensor) = dense(T) - -""" - denseblocks(T::ITensor) - -Make a new ITensor where any blocks which have a sparse format, such -as diagonal sparsity, are made dense while still preserving the outer -block-sparse structure. This method avoids allocating new data if possible. - -For example, an ITensor with DiagBlockSparse storage will have BlockSparse storage -afterwards. -""" -denseblocks(D::ITensor) = itensor(denseblocks(tensor(D))) - -""" - complex(T::ITensor) - -Convert to the complex version of the storage. -""" -complex(T::ITensor) = itensor(complex(tensor(T))) - -function complex!(T::ITensor) - ct = complex(tensor(T)) - setstorage!(T, storage(ct)) - setinds!(T, inds(ct)) - return T -end - -function convert_eltype(ElType::Type, T::ITensor) - if eltype(T) == ElType - return T - end - return itensor(adapt(ElType, tensor(T))) -end - -function convert_leaf_eltype(ElType::Type, T::ITensor) - return convert_eltype(ElType, T) -end - -""" - convert_leaf_eltype(ElType::Type, A::Array) - -Convert the element type of the lowest level containers -("leaves") of a recursive data structure, such as -an Vector of Vectors. -""" -function convert_leaf_eltype(ElType::Type, A::Array) - return map(x -> convert_leaf_eltype(ElType, x), A) -end - -""" - scalar(T::ITensor) - -Extract the element of an order zero ITensor. - -Same as `T[]`. -""" -scalar(T::ITensor)::Any = T[] - -lastindex(A::ITensor, n::Int64) = LastVal() -lastindex(A::ITensor) = LastVal() - -""" - fill!(T::ITensor, x::Number) - -Fill all values of the ITensor with the specified value. -""" -function fill!(T::ITensor, x::Number) - # Use broadcasting `T .= x`? - return settensor!(T, fill!!(tensor(T), x)) -end - -# -# Block sparse related functions -# (Maybe create fallback definitions for dense tensors) -# - -function insertblock!(T::ITensor, args...) - (!isnothing(flux(T)) && flux(T) ≠ flux(T, args...)) && - error("Block does not match current flux") - TR = insertblock!!(tensor(T), args...) - settensor!(T, TR) - return T -end - -function insert_diag_blocks!(T::ITensor) - ## TODO: Add a check that all diag blocks - ## have the correct flux - ## (!isnothing(flux(T)) && check_diagblock_flux(T)) && - ## error("Block does not match current flux") - insert_diag_blocks!(tensor(T)) - return T -end - -""" - getindex(T::ITensor, I::Int...) - -Get the specified element of the ITensor, using internal -Index ordering of the ITensor. - -# Example -```julia -i = Index(2; tags = "i") -A = ITensor(2.0, i, i') -A[1, 2] # 2.0, same as: A[i => 1, i' => 2] -``` -""" -@propagate_inbounds getindex(T::ITensor, I::Integer...)::Any = tensor(T)[I...] - -@propagate_inbounds @inline _getindex(T::Tensor, I::Integer...) = T[I...] - -# TODO: move to NDTensors (would require moving `LastVal` to NDTensors) -@propagate_inbounds @inline function _getindex(T::Tensor, I::Union{Integer,LastVal}...) - return T[lastval_to_int(T, I)...] -end - -# Special case that handles indexing with `end` like `A[i => end, j => 3]` -@propagate_inbounds getindex(T::ITensor, I::Union{Integer,LastVal}...)::Any = - _getindex(tensor(T), I...) - -# Simple version with just integer indexing, bounds checking gets done by NDTensors - -@propagate_inbounds function getindex(T::ITensor, b::Block{N}) where {N} - # XXX: this should return an ITensor view - return tensor(T)[b] -end - -# Version accepting CartesianIndex, useful when iterating over -# CartesianIndices -@propagate_inbounds getindex(T::ITensor, I::CartesianIndex)::Any = T[Tuple(I)...] - -""" - getindex(T::ITensor, ivs...) - -Get the specified element of the ITensor, using a list -of `IndexVal`s or `Pair{<:Index, Int}`. - -# Example -```julia -i = Index(2; tags = "i") -A = ITensor(2.0, i, i') -A[i => 1, i' => 2] # 2.0, same as: A[i' => 2, i => 1] -``` -""" -@propagate_inbounds (getindex(T::ITensor, ivs::Vararg{Any,N})::Any) where {N} = - _getindex(tensor(T), ivs...) - -## Allowing one to get the first ITensor element if its an order 0 tensor or an order 1 tensor with a dimension of 1. Also convert GPU back to CPU -@propagate_inbounds function getindex(T::ITensor)::Any - if order(T) != 0 && dim(T) != 1 - throw( - DimensionMismatch( - "In scalar(T) or T[], ITensor T is not a scalar (it has indices $(inds(T)))." - ), - ) - end - return tensor(T)[] -end - -function _vals(T::ITensor, I::String...) - return _vals(inds(T), I...) -end - -# Enable indexing with string values, like `A["Up"]`. -function getindex(T::ITensor, I1::String, Is::String...) - return T[_vals(T, I1, Is...)...] -end - -# Defining this with the type signature `I::Vararg{Integer, N}` instead of `I::Integer...` is much faster: -# -# 58.720 ns (1 allocation: 368 bytes) -# -# instead of: -# -# 465.454 ns (7 allocations: 1.86 KiB) -# -# for some reason! Maybe it helps with inlining? -# -@propagate_inbounds @inline function _setindex!!( - ::SymmetryStyle, T::Tensor, x::Number, I::Vararg{Integer,N} -) where {N} - # Generic version, doesn't check the flux - return setindex!!(T, x, I...) -end - -@propagate_inbounds @inline function _setindex!!( - T::Tensor, x::Number, I::Vararg{Integer,N} -) where {N} - # Use type trait dispatch to split off between QN version that checks the flux - # and non-QN version that doesn't - - return _setindex!!(symmetrystyle(T), T, x, I...) -end - -@propagate_inbounds @inline function _setindex!!( - T::Tensor, x::Number, I::Vararg{Union{Integer,LastVal},N} -) where {N} - return _setindex!!(T, x, lastval_to_int(T, I)...) -end - -""" - setindex!(T::ITensor, x::Number, ivs...) - - setindex!(T::ITensor, x::Number, I::Integer...) - - setindex!(T::ITensor, x::Number, I::CartesianIndex) - -Set the specified element of the ITensor, using a list -of `Pair{<:Index, Integer}` (or `IndexVal`). - -If just integers are used, set the specified element of the ITensor -using internal Index ordering of the ITensor (only for advanced usage, -only use if you know the axact ordering of the indices). - -# Example -```julia -i = Index(2; tags = "i") -A = ITensor(i, i') -A[i => 1, i' => 2] = 1.0 # same as: A[i' => 2, i => 1] = 1.0 -A[1, 2] = 1.0 # same as: A[i => 1, i' => 2] = 1.0 - -# Some simple slicing is also supported -A[i => 2, i' => :] = [2.0 3.0] -A[2, :] = [2.0 3.0] -``` -""" -@propagate_inbounds @inline function setindex!( - T::ITensor, x::Number, I::Vararg{Integer,N} -) where {N} - # XXX: for some reason this is slow (257.467 ns (6 allocations: 1.14 KiB) for `A[1, 1, 1] = 1`) - # Calling `setindex!` directly here is faster (56.635 ns (1 allocation: 368 bytes) for `A[1, 1, 1] = 1`) - # but of course less generic. Can't figure out how to optimize it, - # even the generic IndexVal version above is faster (126.818 ns (5 allocations: 768 bytes) for `A[i'' => 1, i' => 1, i => 1] = 1`) - return settensor!(T, _setindex!!(tensor(T), x, I...)) -end - -@propagate_inbounds function setindex!(T::ITensor, x::Number, I::CartesianIndex) - return setindex!(T, x, Tuple(I)...) -end - -@propagate_inbounds @inline function setindex!( - T::ITensor, x::Number, I::Vararg{Any,N} -) where {N} - return settensor!(T, _setindex!!(tensor(T), x, I...)) -end - -# XXX: what is this definition for? -Base.checkbounds(::Any, ::Block) = nothing - -@propagate_inbounds function setindex!(T::ITensor, A::AbstractArray, I...) - @boundscheck checkbounds(tensor(T), I...) - TR = setindex!!(tensor(T), A, I...) - setstorage!(T, storage(TR)) - return T -end - -#function setindex!(T::ITensor, A::AbstractArray, b::Block) -# # XXX: use setindex!! syntax -# tensor(T)[b] = A -# return T -#end - -function setindex!(T::ITensor, A::AbstractArray, ivs::Pair{<:Index}...) - input_inds = first.(ivs) - p = NDTensors.getperm(inds(T), input_inds) - # Base.to_indices changes Colons into proper ranges, here - # using the dimensions of the indices. - vals = to_indices(CartesianIndices(input_inds), last.(ivs)) - # Lazily permute the array to correctly fit into the ITensor, - # accounting for the input indices being in a different order - # from the ITensor indices. - pvals = NDTensors.permute(vals, p) - T[pvals...] = PermutedDimsArray(reshape(A, length.(vals)), p) - return T -end - -# Enable indexing with string values, like `A["Up"]`. -function setindex!(T::ITensor, x::Number, I1::String, Is::String...) - T[_vals(T, I1, Is...)...] = x - return T -end - -#function setindex!(::ITensor{Any}, ::Number, ivs...) -# error("Cannot set the element of an emptyITensor(). Must define indices to set elements") -#end - -######################### -# End ITensor Accessor Functions -# - -######################### -# ITensor Index Functions -# - -""" - inds(T::ITensor) - -Return the indices of the ITensor as a Tuple. -""" -inds(T::ITensor) = inds(tensor(T)) - -""" - ind(T::ITensor, i::Int) - -Get the Index of the ITensor along dimension i. -""" -ind(T::ITensor, i::Int) = ind(tensor(T), i) - -""" - eachindex(A::ITensor) - -Create an iterable object for visiting each element of the ITensor `A` (including structually -zero elements for sparse tensors). - -For example, for dense tensors this may return `1:length(A)`, while for sparse tensors -it may return a Cartesian range. -""" -eachindex(A::ITensor) = eachindex(tensor(A)) - -# TODO: name this `inds` or `indscollection`? -itensor2inds(A::ITensor)::Any = inds(A) -itensor2inds(A::Tensor) = inds(A) -itensor2inds(i::Index) = (i,) -itensor2inds(A) = A -function map_itensor2inds(A::Tuple{Vararg{Any,N}}) where {N} - return ntuple(i -> itensor2inds(A[i]), Val(N)) -end - -# in -hasind(A, i::Index) = i ∈ itensor2inds(A) - -# issubset -hasinds(A, is) = is ⊆ itensor2inds(A) -hasinds(A, is::Index...) = hasinds(A, is) - -""" - hasinds(is...) - -Returns an anonymous function `x -> hasinds(x, is...)` which -accepts an ITensor or IndexSet and returns `true` if the -ITensor or IndexSet has the indices `is`. -""" -hasinds(is::Indices) = x -> hasinds(x, is) -hasinds(is::Index...) = hasinds(is) - -""" - hascommoninds(A, B; kwargs...) - - hascommoninds(B; kwargs...) -> f::Function - -Check if the ITensors or sets of indices `A` and `B` have -common indices. - -If only one ITensor or set of indices `B` is passed, return a -function `f` such that `f(A) = hascommoninds(A, B; kwargs...)` -""" -hascommoninds(A, B; kwargs...) = !isnothing(commonind(A, B; kwargs...)) - -hascommoninds(B; kwargs...) = x -> hascommoninds(x, B; kwargs...) - -# issetequal -hassameinds(A, B) = issetequal(itensor2inds(A), itensor2inds(B)) - -# Apply the Index set function and then filter the results -function filter_inds_set_function( - ffilter::Function, fset::Function, A::Vararg{Any,N} -) where {N} - return filter(ffilter, fset(map_itensor2inds(A)...)) -end - -function filter_inds_set_function(fset::Function, A...; kwargs...) - return filter_inds_set_function(fmatch(; kwargs...), fset, A...) -end - -for (finds, fset) in ( - (:commoninds, :_intersect), - (:noncommoninds, :_symdiff), - (:uniqueinds, :_setdiff), - (:unioninds, :_union), -) - @eval begin - $finds(args...; kwargs...) = filter_inds_set_function($fset, args...; kwargs...) - end -end - -for find in (:commonind, :noncommonind, :uniqueind, :unionind) - @eval begin - $find(args...; kwargs...) = getfirst($(Symbol(find, :s))(args...; kwargs...)) - end -end - -function index_filter_kwargs_docstring() - return """ - Optional keyword arguments: - * tags::String - a tag name or comma separated list of tag names that the returned indices must all have - * plev::Int - common prime level that the returned indices must all have - * inds - Index or collection of indices. Returned indices must come from this set of indices. - """ -end - -# intersect -@doc """ - commoninds(A, B; kwargs...) - -Return a Vector with indices that are common between the indices of `A` and `B` -(the set intersection, similar to `Base.intersect`). - -$(index_filter_kwargs_docstring()) -""" commoninds - -# firstintersect -@doc """ - commonind(A, B; kwargs...) - -Return the first `Index` common between the indices of `A` and `B`. - -See also [`commoninds`](@ref). - -$(index_filter_kwargs_docstring()) -""" commonind - -# symdiff -@doc """ - noncommoninds(A, B; kwargs...) - -Return a Vector with indices that are not common between the indices of `A` and -`B` (the symmetric set difference, similar to `Base.symdiff`). - -$(index_filter_kwargs_docstring()) -""" noncommoninds - -# firstsymdiff -@doc """ - noncommonind(A, B; kwargs...) - -Return the first `Index` not common between the indices of `A` and `B`. - -See also [`noncommoninds`](@ref). - -$(index_filter_kwargs_docstring()) -""" noncommonind - -# setdiff -@doc """ - uniqueinds(A, B; kwargs...) - -Return Vector with indices that are unique to the set of indices of `A` and not -in `B` (the set difference, similar to `Base.setdiff`). - -$(index_filter_kwargs_docstring()) -""" uniqueinds - -# firstsetdiff -@doc """ - uniqueind(A, B; kwargs...) - -Return the first `Index` unique to the set of indices of `A` and not in `B`. - -See also [`uniqueinds`](@ref). - -$(index_filter_kwargs_docstring()) -""" uniqueind - -# union -@doc """ - unioninds(A, B; kwargs...) - -Return a Vector with indices that are the union of the indices of `A` and `B` -(the set union, similar to `Base.union`). - -$(index_filter_kwargs_docstring()) -""" unioninds - -# firstunion -@doc """ - unionind(A, B; kwargs...) - -Return the first `Index` in the union of the indices of `A` and `B`. - -See also [`unioninds`](@ref). - -$(index_filter_kwargs_docstring()) -""" unionind - -firstind(A...; kwargs...) = getfirst(map_itensor2inds(A)...; kwargs...) - -filterinds(f::Function, A...) = filter(f, map_itensor2inds(A)...) -filterinds(A...; kwargs...) = filter(map_itensor2inds(A)...; kwargs...) - -# Faster version when no filtering is requested -filterinds(A::ITensor) = inds(A) -filterinds(is::Indices) = is - -# For backwards compatibility -inds(A...; kwargs...) = filterinds(A...; kwargs...) - -# in-place versions of priming and tagging -for (fname, fname!) in [ - (:(prime), :(prime!)), - (:(setprime), :(setprime!)), - (:(noprime), :(noprime!)), - (:(replaceprime), :(replaceprime!)), - (:(swapprime), :(swapprime!)), - (:(TagSets.addtags), :(addtags!)), - (:(TagSets.removetags), :(removetags!)), - (:(TagSets.replacetags), :(replacetags!)), - (:(settags), :(settags!)), - (:(swaptags), :(swaptags!)), - (:(replaceind), :(replaceind!)), - (:(replaceinds), :(replaceinds!)), - (:(swapind), :(swapind!)), - (:(swapinds), :(swapinds!)), -] - @eval begin - $fname(f::Function, A::ITensor, args...) = ITensor($fname(f, tensor(A), args...)) - - # Inlining makes the ITensor functions slower - @noinline function $fname(f::Function, A::Tensor, args...) - return setinds(A, $fname(f, inds(A), args...)) - end - - function $(fname!)(f::Function, A::ITensor, args...) - return settensor!(A, $fname(f, tensor(A), args...)) - end - - $fname(A::ITensor, args...; kwargs...) = itensor($fname(tensor(A), args...; kwargs...)) - - # Inlining makes the ITensor functions slower - @noinline function $fname(A::Tensor, args...; kwargs...) - return setinds(A, $fname(inds(A), args...; kwargs...)) - end - - function $(fname!)(A::ITensor, args...; kwargs...) - return settensor!(A, $fname(tensor(A), args...; kwargs...)) - end - end -end - -priming_tagging_doc = """ -Optionally, only modify the indices with the specified keyword arguments. - -# Arguments -- `tags = nothing`: if specified, only modify Index `i` if `hastags(i, tags) == true`. -- `plev = nothing`: if specified, only modify Index `i` if `hasplev(i, plev) == true`. - -The ITensor functions come in two versions, `f` and `f!`. The latter modifies -the ITensor in-place. In both versions, the ITensor storage is not modified or -copied (so it returns an ITensor with a view of the original storage). -""" - -@doc """ - prime[!](A::ITensor, plinc::Int = 1; ) -> ITensor - - prime(inds, plinc::Int = 1; ) -> IndexSet - -Increase the prime level of the indices of an ITensor or collection of indices. - -$priming_tagging_doc -""" prime(::ITensor, ::Any...) - -@doc """ - setprime[!](A::ITensor, plev::Int; ) -> ITensor - - setprime(inds, plev::Int; ) -> IndexSet - -Set the prime level of the indices of an ITensor or collection of indices. - -$priming_tagging_doc -""" setprime(::ITensor, ::Any...) - -@doc """ - noprime[!](A::ITensor; ) -> ITensor - - noprime(inds; ) -> IndexSet - -Set the prime level of the indices of an ITensor or collection of indices to zero. - -$priming_tagging_doc -""" noprime(::ITensor, ::Any...) - -@doc """ - replaceprime[!](A::ITensor, plold::Int, plnew::Int; ) -> ITensor - replaceprime[!](A::ITensor, plold => plnew; ) -> ITensor - mapprime[!](A::ITensor, ; ) -> ITensor - - replaceprime(inds, plold::Int, plnew::Int; ) - replaceprime(inds::IndexSet, plold => plnew; ) - mapprime(inds, ; ) - -Set the prime level of the indices of an ITensor or collection of indices with -prime level `plold` to `plnew`. - -$priming_tagging_doc -""" mapprime(::ITensor, ::Any...) - -@doc """ - swapprime[!](A::ITensor, pl1::Int, pl2::Int; ) -> ITensor - swapprime[!](A::ITensor, pl1 => pl2; ) -> ITensor - - swapprime(inds, pl1::Int, pl2::Int; ) - swapprime(inds, pl1 => pl2; ) - -Set the prime level of the indices of an ITensor or collection of indices with -prime level `pl1` to `pl2`, and those with prime level `pl2` to `pl1`. - -$priming_tagging_doc -""" swapprime(::ITensor, ::Any...) - -@doc """ - addtags[!](A::ITensor, ts::String; ) -> ITensor - - addtags(inds, ts::String; ) - -Add the tags `ts` to the indices of an ITensor or collection of indices. - -$priming_tagging_doc -""" TagSets.addtags(::ITensor, ::Any...) - -@doc """ - removetags[!](A::ITensor, ts::String; ) -> ITensor - - removetags(inds, ts::String; ) - -Remove the tags `ts` from the indices of an ITensor or collection of indices. - -$priming_tagging_doc -""" TagSets.removetags(::ITensor, ::Any...) - -@doc """ - settags[!](A::ITensor, ts::String; ) -> ITensor - - settags(is::IndexSet, ts::String; ) -> IndexSet - -Set the tags of the indices of an ITensor or IndexSet to `ts`. - -$priming_tagging_doc -""" settags(::ITensor, ::Any...) - -@doc """ - replacetags[!](A::ITensor, tsold::String, tsnew::String; ) -> ITensor - - replacetags(is::IndexSet, tsold::String, tsnew::String; ) -> IndexSet - -Replace the tags `tsold` with `tsnew` for the indices of an ITensor. - -$priming_tagging_doc -""" TagSets.replacetags(::ITensor, ::Any...) - -@doc """ - swaptags[!](A::ITensor, ts1::String, ts2::String; ) -> ITensor - - swaptags(is::IndexSet, ts1::String, ts2::String; ) -> IndexSet - -Swap the tags `ts1` with `ts2` for the indices of an ITensor. - -$priming_tagging_doc -""" swaptags(::ITensor, ::Any...) - -@doc """ - replaceind[!](A::ITensor, i1::Index, i2::Index) -> ITensor - -Replace the Index `i1` with the Index `i2` in the ITensor. - -The indices must have the same space (i.e. the same dimension and QNs, if applicable). -""" replaceind(::ITensor, ::Any...) - -@doc """ - replaceinds(A::ITensor, inds1, inds2) -> ITensor - - replaceinds!(A::ITensor, inds1, inds2) - -Replace the Index `inds1[n]` with the Index `inds2[n]` in the ITensor, where `n` -runs from `1` to `length(inds1) == length(inds2)`. - -The indices must have the same space (i.e. the same dimension and QNs, if applicable). - -The storage of the ITensor is not modified or copied (the output ITensor is a -view of the input ITensor). -""" replaceinds(::ITensor, ::Any...) - -@doc """ - swapind(A::ITensor, i1::Index, i2::Index) -> ITensor - - swapind!(A::ITensor, i1::Index, i2::Index) - -Swap the Index `i1` with the Index `i2` in the ITensor. - -The indices must have the same space (i.e. the same dimension and QNs, if applicable). -""" swapind(::ITensor, ::Any...) - -@doc """ - swapinds(A::ITensor, inds1, inds2) -> ITensor - - swapinds!(A::ITensor, inds1, inds2) - -Swap the Index `inds1[n]` with the Index `inds2[n]` in the ITensor, where `n` -runs from `1` to `length(inds1) == length(inds2)`. - -The indices must have the same space (i.e. the same dimension and QNs, if applicable). - -The storage of the ITensor is not modified or copied (the output ITensor is a -view of the input ITensor). -""" swapinds(::ITensor, ::Any...) - -# XXX: rename to: -# hastags(any, A, ts) -""" - anyhastags(A::ITensor, ts::Union{String, TagSet}) - hastags(A::ITensor, ts::Union{String, TagSet}) - -Check if any of the indices in the ITensor have the specified tags. -""" -anyhastags(A::ITensor, ts) = anyhastags(inds(A), ts) - -TagSets.hastags(A::ITensor, ts) = hastags(inds(A), ts) - -# XXX: rename to: -# hastags(all, A, ts) -""" - allhastags(A::ITensor, ts::Union{String, TagSet}) - -Check if all of the indices in the ITensor have the specified tags. -""" -allhastags(A::ITensor, ts) = allhastags(inds(A), ts) - -# Returns a tuple of pairs of indices, where the pairs -# are determined by the prime level pairs `plev` and -# tag pairs `tags`. -function indpairs(T::ITensor; plev::Pair{Int,Int}=0 => 1, tags::Pair=ts"" => ts"") - is1 = filterinds(T; plev=first(plev), tags=first(tags)) - is2 = filterinds(T; plev=last(plev), tags=last(tags)) - is2to1 = replacetags(mapprime(is2, last(plev) => first(plev)), last(tags) => first(tags)) - is_first = commoninds(is1, is2to1) - is_last = replacetags( - mapprime(is_first, first(plev) => last(plev)), first(tags) => last(tags) - ) - is_last = permute(commoninds(T, is_last), is_last) - return is_first .=> is_last -end - -######################### -# End ITensor Index Functions -# - -######################### -# ITensor Operations -# - -similar(T::ITensor, args...)::ITensor = itensor(NDTensors.similar(tensor(T), args...)) - -function isapprox(A::ITensor, B::ITensor; kwargs...) - if !hassameinds(A, B) - error("In `isapprox(::ITensor, ::ITensor)`, the indices of the ITensors do not - match. The first ITensor has indices: \n\n$(inds(A))\n\nbut the second - ITensor has indices: \n\n$(inds(B))") - end - B = permute(B, inds(A)) - return isapprox(array(A), array(B); kwargs...) -end - -function randn!(T::ITensor) - return randn!(Random.default_rng(), T) -end - -function randn!(rng::AbstractRNG, T::ITensor) - return settensor!(T, randn!!(rng, tensor(T))) -end - -norm(T::ITensor) = norm(tensor(T)) - -function dag(as::AliasStyle, T::Tensor{ElT,N}) where {ElT,N} - if using_auto_fermion() && has_fermionic_subspaces(inds(T)) # - CT = conj(NeverAlias(), T) - NDTensors.scale_blocks!(CT, block -> NDTensors.permfactor(reverse(1:N), block, inds(T))) - return setinds(CT, dag(inds(T))) - end - return setinds(conj(as, T), dag(inds(T))) -end - -function dag(as::AliasStyle, T::ITensor) - return itensor(dag(as, tensor(T))) -end - -# Helpful for generic code -dag(x::Number) = conj(x) - -""" - dag(T::ITensor; allow_alias = true) - -Complex conjugate the elements of the ITensor `T` and dagger the indices. - -By default, an alias of the ITensor is returned (i.e. the output ITensor -may share data with the input ITensor). If `allow_alias = false`, -an alias is never returned. -""" -function dag(T::ITensor; kwargs...) - allow_alias::Bool = deprecated_keyword_argument( - Bool, - kwargs; - new_kw=:allow_alias, - old_kw=:always_copy, - default=true, - funcsym=:dag, - map=!, - ) - aliasstyle::Union{AllowAlias,NeverAlias} = allow_alias ? AllowAlias() : NeverAlias() - return dag(aliasstyle, T) -end - -function (T::ITensor * x::Number)::ITensor - return itensor(x * tensor(T)) -end - -# TODO: what about noncommutative number types? -(x::Number * T::ITensor) = T * x - -(A::ITensor / x::Number) = itensor(tensor(A) / x) - -(T1::ITensor / T2::ITensor) = T1 / T2[] - --(A::ITensor) = itensor(-tensor(A)) - -function _add(A::Tensor, B::Tensor) - if _isemptyscalar(A) && ndims(B) > 0 - return itensor(B) - elseif _isemptyscalar(B) && ndims(A) > 0 - return itensor(A) - end - ndims(A) != ndims(B) && - throw(DimensionMismatch("cannot add ITensors with different numbers of indices")) - itA = itensor(A) - itB = itensor(B) - itC = copy(itA) - itC .+= itB - return itC -end - -# TODO: move the order-0 EmptyStorage ITensor special case to NDTensors. -# Unfortunately this is more complicated than it might seem since it -# has to pass through the broadcasting mechanism first. -function (A::ITensor + B::ITensor) - return itensor(_add(tensor(A), tensor(B))) -end - -# TODO: move the order-0 EmptyStorage ITensor special to NDTensors -function (A::ITensor - B::ITensor) - if _isemptyscalar(A) && ndims(B) > 0 - return -B - elseif _isemptyscalar(B) && ndims(A) > 0 - return A - end - ndims(A) != ndims(B) && - throw(DimensionMismatch("cannot subtract ITensors with different numbers of indices")) - C = copy(A) - C .-= B - return C -end - -real(T::ITensor)::ITensor = itensor(real(tensor(T))) - -imag(T::ITensor)::ITensor = itensor(imag(tensor(T))) - -conj(T::ITensor)::ITensor = itensor(conj(tensor(T))) - -dag(::Nothing) = nothing - -function (A::ITensor == B::ITensor) - !hassameinds(A, B) && return false - return norm(A - B) == zero(promote_type(eltype(A), eltype(B))) -end - -LinearAlgebra.promote_leaf_eltypes(A::ITensor) = eltype(A) - -diag(T::ITensor) = diag(tensor(T)) - -mul!(C::ITensor, A::ITensor, B::ITensor, args...)::ITensor = contract!(C, A, B, args...) - -dot(A::ITensor, B::ITensor) = (dag(A) * B)[] - -inner(y::ITensor, A::ITensor, x::ITensor) = (dag(y) * A * x)[] -inner(y::ITensor, x::ITensor) = (dag(y) * x)[] - -# -# In-place operations -# - -""" - normalize!(T::ITensor) - -Normalize an ITensor in-place, such that norm(T)==1. -""" -normalize!(T::ITensor) = (T .*= 1 / norm(T)) - -""" - copyto!(B::ITensor, A::ITensor) - -Copy the contents of ITensor A into ITensor B. -``` -B .= A -``` -""" -function copyto!(R::ITensor, T::ITensor) - R .= T - return R -end - -# Note this already assumes R === T1, which will be lifted -# in the future. -function _map!!(f::Function, R::Tensor, T1::Tensor, T2::Tensor) - perm = NDTensors.getperm(inds(R), inds(T2)) - if !isperm(perm) - error(""" - You are trying to add an ITensor with indices: - - $(inds(T2)) - - into an ITensor with indices: - - $(inds(R)) - - but the indices are not permutations of each other. - """) - end - if hasqns(T2) && hasqns(R) - # Check that Index arrows match - for (n, p) in enumerate(perm) - if dir(inds(R)[n]) != dir(inds(T2)[p]) - #println("Mismatched Index: \n$(inds(R)[n])") - error("Index arrows must be the same to add, subtract, map, or scale QN ITensors") - end - end - end - return permutedims!!(R, T2, perm, f) -end - -function map!(f::Function, R::ITensor, T1::ITensor, T2::ITensor) - R !== T1 && error("`map!(f, R, T1, T2)` only supports `R === T1` right now") - return settensor!(R, _map!!(f, tensor(R), tensor(T1), tensor(T2))) -end - -map(f, x::ITensor) = itensor(map(f, tensor(x))) - -# Some limited set of reductions. Ideally we -# would overload `Base.mapreduce` which would -# cover all of these cases, but we need to make -# sure that the `Tensor` version of `mapreduce` -# is correct and efficient for all sparse storage types. -Base.sum(x::ITensor) = sum(tensor(x)) -Base.prod(x::ITensor) = prod(tensor(x)) - -""" - axpy!(a::Number, v::ITensor, w::ITensor) -``` -w .+= a .* v -``` -""" -axpy!(a::Number, v::ITensor, w::ITensor) = (w .+= a .* v) - -""" -axpby!(a,v,b,w) - -``` -w .= a .* v + b .* w -``` -""" -axpby!(a::Number, v::ITensor, b::Number, w::ITensor) = (w .= a .* v + b .* w) - -""" - scale!(A::ITensor,x::Number) = rmul!(A,x) - -Scale the ITensor A by x in-place. May also be written `rmul!`. -``` -A .*= x -``` -""" -scale!(T::ITensor, α::Number) = (T .*= α) - -rmul!(T::ITensor, α::Number) = (T .*= α) - -lmul!(T::ITensor, α::Number) = (T .= α .* T) - -""" - mul!(A::ITensor, x::Number, B::ITensor) - -Scalar multiplication of ITensor B with x, and store the result in A. -Like `A .= x .* B`. -""" -mul!(R::ITensor, α::Number, T::ITensor) = (R .= α .* T) - -mul!(R::ITensor, T::ITensor, α::Number) = (R .= T .* α) - -######################### -# End ITensor Operations -# - -# Helper function for deprecating a keyword argument -function deprecated_keyword_argument( - ::Type{T}, kwargs; new_kw, old_kw, default, funcsym, map=identity -)::T where {T} - has_new_kw = haskey(kwargs, new_kw) - has_old_kw = haskey(kwargs, old_kw) - res::T = if has_old_kw - Base.depwarn( - "In `$func`, keyword argument `$old_kw` is deprecated in favor of `$new_kw`.", func - ) - if has_new_kw - println( - "Warning: keyword arguments `$old_kw` and `$new_kw` are both specified, using `$new_kw`.", - ) - kwargs[new_kw] - else - map(kwargs[old_kw]) - end - else - get(kwargs, new_kw, default) - end - return res -end - -####################################################################### -# -# Printing, reading and writing ITensors -# - -function summary(io::IO, T::ITensor) - print(io, "ITensor ord=$(order(T))") - if hasqns(T) - println(io) - for i in 1:order(T) - print(io, inds(T)[i]) - println(io) - end - else - for i in 1:order(T) - print(io, " ", inds(T)[i]) - end - println(io) - end - return print(io, typeof(storage(T))) -end - -# TODO: make a specialized printing from Diag -# that emphasizes the missing elements -function show(io::IO, T::ITensor) - println(io, "ITensor ord=$(order(T))") - return show(io, MIME"text/plain"(), tensor(T)) -end - -function show(io::IO, mime::MIME"text/plain", T::ITensor) - return summary(io, T) -end - -function readcpp(io::IO, ::Type{Dense{ValT}}; format="v3") where {ValT} - if format == "v3" - size = read(io, UInt64) - data = Vector{ValT}(undef, size) - for n in 1:size - data[n] = read(io, ValT) - end - return Dense(data) - else - throw(ArgumentError("read Dense: format=$format not supported")) - end -end - -function readcpp(io::IO, ::Type{ITensor}; format="v3") - if format == "v3" - # TODO: use Vector{Index} here? - inds = readcpp(io, IndexSet; kwargs...) - read(io, 12) # ignore scale factor by reading 12 bytes - storage_type = read(io, Int32) - if storage_type == 0 # Null - storage = Dense{Nothing}() - elseif storage_type == 1 # DenseReal - storage = readcpp(io, Dense{Float64}; kwargs...) - elseif storage_type == 2 # DenseCplx - storage = readcpp(io, Dense{ComplexF64}; kwargs...) - elseif storage_type == 3 # Combiner - storage = CombinerStorage(T.inds[1]) - #elseif storage_type==4 # DiagReal - #elseif storage_type==5 # DiagCplx - #elseif storage_type==6 # QDenseReal - #elseif storage_type==7 # QDenseCplx - #elseif storage_type==8 # QCombiner - #elseif storage_type==9 # QDiagReal - #elseif storage_type==10 # QDiagCplx - #elseif storage_type==11 # ScalarReal - #elseif storage_type==12 # ScalarCplx - else - throw(ErrorException("C++ ITensor storage type $storage_type not yet supported")) - end - return itensor(storage, inds) - else - throw(ArgumentError("read ITensor: format=$format not supported")) - end -end diff --git a/src/lastval.jl b/src/lastval.jl deleted file mode 100644 index b1f0c7867f..0000000000 --- a/src/lastval.jl +++ /dev/null @@ -1,23 +0,0 @@ - -struct LastVal{F} - f::F -end - -LastVal() = LastVal(identity) - -# TODO: make these definition work for notation -# A[1, end-1] - -(l::LastVal + n::Integer) = LastVal(x -> l.f(x) + n) -(n::Integer + l::LastVal) = LastVal(x -> n + l.f(x)) -(l::LastVal - n::Integer) = LastVal(x -> l.f(x) - n) -(n::Integer - l::LastVal) = LastVal(x -> n - l.f(x)) -(l::LastVal * n::Integer) = LastVal(x -> l.f(x) * n) -(n::Integer * l::LastVal) = LastVal(x -> n * l.f(x)) -(-l::LastVal) = LastVal(x -> -l.f(x)) -^(l::LastVal, n::Integer) = LastVal(x -> l.f(x)^n) - -lastval_to_int(n::Int, l::LastVal) = l.f(n) -lastval_to_int(::Int, n::Int) = n -lastval_to_int(dimsT::Tuple, I::Tuple) = lastval_to_int.(dimsT, I) -lastval_to_int(A::Tensor, I::Tuple) = lastval_to_int(size(A), I) diff --git a/src/lib/ContractionSequenceOptimization/src/ContractionSequenceOptimization.jl b/src/lib/ContractionSequenceOptimization/src/ContractionSequenceOptimization.jl deleted file mode 100644 index e580b90903..0000000000 --- a/src/lib/ContractionSequenceOptimization/src/ContractionSequenceOptimization.jl +++ /dev/null @@ -1,42 +0,0 @@ -module ContractionSequenceOptimization - -export optimal_contraction_sequence, contraction_cost - -include("utils.jl") -include("three_tensors.jl") -include("breadth_first_constructive.jl") -include("contraction_cost.jl") - -# -# The integer type of the dimensions and costs. -# Needs to be large to avoid overflow, but larger -# integer types are a little bit slower. -# -# For very large dimensions, one could try: -# -# https://github.com/rfourquet/BitIntegers.jl -# -# however in that limit it would be best to use -# symbolic dimensions. -# -const DimT = UInt128 - -""" - optimal_contraction_sequence(T) - -Returns a contraction sequence for contracting the tensors `T`. The sequence is -generally optimal (currently, outer product contractions are skipped, but some -optimal sequences require outer product contractions). -""" -function optimal_contraction_sequence(T) - if length(T) == 1 - return Any[1] - elseif length(T) == 2 - return Any[1, 2] - elseif length(T) == 3 - return optimal_contraction_sequence(T[1], T[2], T[3]) - end - return breadth_first_constructive(T) -end - -end # module ContractionSequenceOptimization diff --git a/src/lib/ContractionSequenceOptimization/src/breadth_first_constructive.jl b/src/lib/ContractionSequenceOptimization/src/breadth_first_constructive.jl deleted file mode 100644 index cf5b6919d6..0000000000 --- a/src/lib/ContractionSequenceOptimization/src/breadth_first_constructive.jl +++ /dev/null @@ -1,235 +0,0 @@ - -# -# Breadth-first constructive approach -# - -function breadth_first_constructive(indsT::Vector) - ntensors = length(indsT) - if ntensors ≤ 16 - return breadth_first_constructive(UInt16, DimT, indsT) - elseif ntensors ≤ 32 - return breadth_first_constructive(UInt32, DimT, indsT) - elseif ntensors ≤ 64 - return breadth_first_constructive(UInt64, DimT, indsT) - elseif ntensors ≤ 128 - return breadth_first_constructive(UInt128, DimT, indsT) - else - return breadth_first_constructive(BitSet, DimT, indsT) - end -end - -breadth_first_constructive(T::Tuple) = breadth_first_constructive(collect(T)) - -function breadth_first_constructive( - ::Type{TensorSetT}, ::Type{DimT}, T::Vector{IndexSetT} -) where {IndexSetT,TensorSetT,DimT} - labels, alldims = label_dims(DimT, T) - nlabels = length(alldims) - if nlabels ≤ 16 - return breadth_first_constructive(TensorSetT, UInt16, labels, alldims) - elseif nlabels ≤ 32 - return breadth_first_constructive(TensorSetT, UInt32, labels, alldims) - elseif nlabels ≤ 64 - return breadth_first_constructive(TensorSetT, UInt64, labels, alldims) - elseif nlabels ≤ 128 - return breadth_first_constructive(TensorSetT, UInt128, labels, alldims) - else - return breadth_first_constructive(TensorSetT, BitSet, labels, alldims) - end -end - -function breadth_first_constructive( - ::Type{TensorSetT}, ::Type{LabelSetT}, labels::Vector, alldims::Vector -) where {TensorSetT,LabelSetT} - return breadth_first_constructive( - TensorSetT, map(label -> bitset(LabelSetT, label), labels), alldims - ) -end - -# TODO: delete? -#function breadth_first_constructive(::Type{TensorSetT}, ::Type{LabelSetT}, ::Type{DimT}, -# T::Vector{<: ITensor}) where {TensorSetT, LabelSetT, DimT} -# indsT = [inds(Tₙ) for Tₙ in T] -# return breadth_first_constructive(TensorSetT, LabelSetT, DimT, indsT) -#end - -function breadth_first_constructive( - ::Type{TensorSetT}, ::Type{LabelSetT}, ::Type{DimT}, T::Vector{IndexSetT} -) where {IndexSetT,TensorSetT,LabelSetT,DimT} - labels, alldims = label_dims(DimT, T) - return breadth_first_constructive( - TensorSetT, map(label -> bitset(LabelSetT, label), labels), alldims - ) -end - -# A type storing information about subnetworks -const SubNetwork{LabelSetT,DimT} = NamedTuple{ - (:inds, :cost, :sequence),Tuple{LabelSetT,DimT,Vector{Any}} -} - -function breadth_first_constructive( - ::Type{TensorSetT}, T::Vector{LabelSetT}, alldims::Vector{DimT} -) where {TensorSetT,LabelSetT,DimT} - components = connectedcomponents(T, alldims) - N = length(components) - if N == 1 - return _breadth_first_constructive(TensorSetT, collect(1:length(T)), T, alldims) - end - sequences = Vector{Any}(undef, N) - for n in 1:N - componentsₙ = components[n] - if length(componentsₙ) == 1 - sequences[n] = only(componentsₙ) - continue - elseif length(componentsₙ) == 2 - sequences[n] = componentsₙ - continue - end - sequences[n] = _breadth_first_constructive( - TensorSetT, componentsₙ, T[componentsₙ], alldims - ) - end - return sequences -end - -# Apply breadth_first_constructive to a single disconnected subnetwork -# Based on: https://arxiv.org/abs/1304.6112 and https://github.com/Jutho/TensorOperations.jl/blob/v3.1.0/src/indexnotation/optimaltree.jl -function _breadth_first_constructive( - ::Type{TensorSetT}, Tlabels::Vector, T::Vector{LabelSetT}, alldims::Vector{DimT} -) where {TensorSetT,LabelSetT,DimT} - n = length(T) - - # `cache[c]` is the set of all objects made up by - # contracting `c` unique tensors from the original tensors `1:n`. - cache = Vector{Dict{TensorSetT,SubNetwork{LabelSetT,DimT}}}(undef, n) - for c in 1:n - # Initialized to empty - cache[c] = eltype(cache)() - end - # Fill the first cache with trivial data - for i in 1:n - cache[1][bitset(TensorSetT, [Tlabels[i]])] = (inds=T[i], cost=0, sequence=Any[]) - end - - # TODO: pick a reasonable maxcost, the product of all dimensions - # of tensors in the network. Could also be `typemax(DimT)`? - maxcost = try - # This may overflow, so we catch the error and return typemax(DimT) - Base.Checked.checked_mul( - reduce(Base.Checked.checked_mul, alldims; init=one(DimT)), maximum(alldims) - ) - catch - typemax(DimT) - end - - # TODO: pick a reasonable initialcost - # Maybe use the cost of the trivial contraction [4, [3, [2, 1]]]? - tensordims = Vector{DimT}(undef, n) - for k in 1:n - tensordims[k] = dim(T[k], alldims) - end - _initialcost, overflow = Base.Checked.mul_with_overflow( - maximum(tensordims), minimum(tensordims) - ) - _initialcost = overflow ? typemax(DimT) : _initialcost - initialcost = min(maxcost, _initialcost) - - # Factor to increase the cost cap by each iteration - costfac = maximum(alldims) - - currentcost = initialcost - previouscost = zero(initialcost) - - while isempty(cache[n]) - nextcost = maxcost - - # c is the total number of tensors being contracted - # in the current sequence - for c in 2:n - # For each pair of sets Sᵈ, Sᶜ⁻ᵈ, 1 ≤ d ≤ ⌊c/2⌋ - for d in 1:(c ÷ 2) - for a in keys(cache[d]), b in keys(cache[c - d]) - if d == c - d && _isless(b, a) - # When d == c-d (the subset sizes are equal), check that - # b > a so that that case (a,b) and (b,a) are not repeated - continue - end - - if !_isemptyset(_intersect(a, b)) - # Check that each element of S¹ appears - # at most once in (TᵃTᵇ). - continue - end - - # Use previously computed cost of contracting network `ab` and compare against the previouscost - ab = _union(a, b) - cache_c = @inbounds cache[c] - cache_ab = get(cache_c, ab, nothing) - currentcost_ab = isnothing(cache_ab) ? currentcost : cache_ab.cost - if currentcost_ab ≤ previouscost - continue - end - - # Determine the cost μ of contracting Tᵃ, Tᵇ - # These dictionary calls and `contraction_cost` take - # up most of the time. - cache_a = cache[d][a] - cache_b = cache[c - d][b] - - if dim(_intersect(cache_a.inds, cache_b.inds), alldims) < 2 - # XXX: For now, ignore outer products contractions. - # In the future, handle this in a more sophisticated way. - continue - end - - cost, inds_ab = contraction_cost(cache_a.inds, cache_b.inds, alldims) - if iszero(cost) - # If the cost is zero, that means the multiplication overflowed - continue - end - - if d > 1 - # Add to cost of contracting the subnetwork `a` - cost, overflow = Base.Checked.add_with_overflow(cost, cache_a.cost) - overflow && continue - end - if c - d > 1 - # Add to cost of contracting the subnetwork `b` - cost, overflow = Base.Checked.add_with_overflow(cost, cache_b.cost) - overflow && continue - end - - if cost ≤ currentcost_ab - cost_ab = cost - if d == 1 - sequence_a = _only(a) - else - sequence_a = cache_a.sequence - end - if c - d == 1 - sequence_b = _only(b) - else - sequence_b = cache_b.sequence - end - sequence_ab = Any[sequence_a, sequence_b] - - # XXX: this call is pretty slow (maybe takes 1/3 of total time in large n limit) - cache_c[ab] = (inds=inds_ab, cost=cost_ab, sequence=sequence_ab) - end # if cost ≤ currentcost_ab - end # for a in S[d], b in S[c-d] - end # for d in 1:c÷2 - end # for c in 2:n - previouscost = currentcost - currentcost = min(maxcost, nextcost * costfac) - - # Reset all tensors to old - for i in 1:n - for a in eachindex(cache[i]) - cache_a = cache[i][a] - cache[i][a] = (inds=cache_a.inds, cost=cache_a.cost, sequence=cache_a.sequence) - end - end - end # while isempty(S[n]) - Sⁿ = bitset(TensorSetT, Tlabels) - return cache[n][Sⁿ].sequence -end diff --git a/src/lib/ContractionSequenceOptimization/src/contraction_cost.jl b/src/lib/ContractionSequenceOptimization/src/contraction_cost.jl deleted file mode 100644 index 508687a9b9..0000000000 --- a/src/lib/ContractionSequenceOptimization/src/contraction_cost.jl +++ /dev/null @@ -1,37 +0,0 @@ - -left_associative_contraction_sequence(N::Integer) = reduce((x, y) -> Any[y, x], 1:N) -left_associative_contraction_sequence(A) = left_associative_contraction_sequence(length(A)) - -""" - contraction_cost(A; sequence) - -Return the cost of contracting the collection of ITensors according to the specified sequence, -where the cost is measured in the number of floating point operations that would need to -be performed to contract dense tensors of the dimensions specified by the indices of the tensors -(so for now, sparsity is ignored in computing the costs). -Pairwise costs are returned in a vector (contracting `N` tensors requires `N-1` pairwise -contractions). You can use `sum(contraction_cost(A; sequence))` to get the total cost of the -contraction. - -If no sequence is specified, left associative contraction is used, in other words the sequence -is equivalent to `[[[[1, 2], 3], 4], …]`. -""" -function contraction_cost(A; sequence=left_associative_contraction_sequence(A)) - pairwise_costs = Number[] - _contraction_cost!(pairwise_costs, A, sequence) - return pairwise_costs -end - -function _contraction_cost!(pairwise_costs, A, sequence) - inds1 = _contraction_cost!(pairwise_costs, A, sequence[1]) - inds2 = _contraction_cost!(pairwise_costs, A, sequence[2]) - return _pairwise_contraction_cost!(pairwise_costs, inds1, inds2) -end - -_contraction_cost!(pairwise_costs, As, sequence::Integer) = As[sequence] - -function _pairwise_contraction_cost!(pairwise_costs, A1, A2) - cost = dim(union(A1, A2)) - push!(pairwise_costs, cost) - return symdiff(A1, A2) -end diff --git a/src/lib/ContractionSequenceOptimization/src/depth_first_constructive.jl b/src/lib/ContractionSequenceOptimization/src/depth_first_constructive.jl deleted file mode 100644 index b814268b08..0000000000 --- a/src/lib/ContractionSequenceOptimization/src/depth_first_constructive.jl +++ /dev/null @@ -1,85 +0,0 @@ - -# -# `depth_first_constructive` is a very simple recursive implementation -# but it is more difficult to cap the costs so scales very badly -# - -function depth_first_constructive(T::Vector{<:ITensor}) where {LabelSetT} - indsT = [inds(Tₙ) for Tₙ in T] - return depth_first_constructive(DimT, indsT) -end - -function depth_first_constructive( - ::Type{DimT}, T::Vector{IndexSetT} -) where {IndexSetT<:IndexSet,DimT} - labels, dims = label_dims(DimT, T) - nlabels = length(dims) - if nlabels ≤ 16 - return depth_first_constructive(UInt16, labels, dims) - elseif nlabels ≤ 32 - return depth_first_constructive(UInt32, labels, dims) - elseif nlabels ≤ 64 - return depth_first_constructive(UInt64, labels, dims) - elseif nlabels ≤ 128 - return depth_first_constructive(UInt128, labels, dims) - else - return depth_first_constructive(BitSet, labels, dims) - end -end - -function depth_first_constructive( - ::Type{LabelSetT}, labels::Vector, dims::Vector -) where {LabelSetT} - return depth_first_constructive(map(label -> bitset(LabelSetT, label), labels), dims) -end - -function depth_first_constructive( - ::Type{LabelSetT}, ::Type{DimT}, T::Vector{<:ITensor} -) where {LabelSetT,DimT} - indsT = [inds(Tₙ) for Tₙ in T] - return depth_first_constructive(LabelSetT, DimT, indsT) -end - -function depth_first_constructive( - ::Type{LabelSetT}, ::Type{DimT}, T::Vector{IndexSetT} -) where {IndexSetT<:IndexSet,LabelSetT,DimT} - labels, dims = label_dims(DimT, T) - return depth_first_constructive(map(label -> bitset(LabelSetT, label), labels), dims) -end - -function depth_first_constructive(T::Vector, ind_dims::Vector) - optimal_cost = Ref(typemax(eltype(ind_dims))) - optimal_sequence = Vector{Pair{Int,Int}}(undef, length(T) - 1) - _depth_first_constructive!( - optimal_sequence, optimal_cost, Pair{Int,Int}[], T, ind_dims, collect(1:length(T)), 0 - ) - return pair_sequence_to_tree(optimal_sequence, length(T)) -end - -function _depth_first_constructive!( - optimal_sequence, optimal_cost, sequence, T, ind_dims, remaining, cost -) - if length(remaining) == 1 - # Only should get here if the contraction was the best - # Otherwise it would have hit the `continue` below - @assert cost ≤ optimal_cost[] - optimal_cost[] = cost - optimal_sequence .= sequence - end - for aᵢ in 1:(length(remaining) - 1), bᵢ in (aᵢ + 1):length(remaining) - a = remaining[aᵢ] - b = remaining[bᵢ] - current_cost, Tᵈ = contraction_cost(T[a], T[b], ind_dims) - new_cost = cost + current_cost - if new_cost ≥ optimal_cost[] - continue - end - new_sequence = push!(copy(sequence), a => b) - new_T = push!(copy(T), Tᵈ) - new_remaining = deleteat!(copy(remaining), (aᵢ, bᵢ)) - push!(new_remaining, length(new_T)) - _depth_first_constructive!( - optimal_sequence, optimal_cost, new_sequence, new_T, ind_dims, new_remaining, new_cost - ) - end -end diff --git a/src/lib/ContractionSequenceOptimization/src/three_tensors.jl b/src/lib/ContractionSequenceOptimization/src/three_tensors.jl deleted file mode 100644 index 55763a62db..0000000000 --- a/src/lib/ContractionSequenceOptimization/src/three_tensors.jl +++ /dev/null @@ -1,69 +0,0 @@ - -# -# Special case for three tensors -# - -function compute_cost(external_dims::Tuple{Int,Int,Int}, internal_dims::Tuple{Int,Int,Int}) - dim11, dim22, dim33 = external_dims - dim12, dim23, dim31 = internal_dims - cost12 = dim11 * dim22 * dim12 * dim23 * dim31 - return cost12 + dim11 * dim22 * dim33 * dim31 * dim23 -end - -function three_tensor_contraction_sequence(which_sequence::Int)::Vector{Any} - @assert 1 ≤ which_sequence ≤ 3 - return if which_sequence == 1 - Any[3, [1, 2]] - elseif which_sequence == 2 - Any[1, [2, 3]] - else - Any[2, [3, 1]] - end -end - -function optimal_contraction_sequence(is1, is2, is3) - N1 = length(is1) - N2 = length(is2) - N3 = length(is3) - dim2 = dim(is2) - dim3 = dim(is3) - dim11 = 1 - dim12 = 1 - dim31 = 1 - @inbounds for n1 in 1:N1 - i1 = is1[n1] - n2 = findfirst(==(i1), is2) - if isnothing(n2) - n3 = findfirst(==(i1), is3) - if isnothing(n3) - dim11 *= dim(i1) - continue - end - dim31 *= dim(i1) - continue - end - dim12 *= dim(i1) - end - dim23 = 1 - @inbounds for n2 in 1:length(is2) - i2 = is2[n2] - n3 = findfirst(==(i2), is3) - if !isnothing(n3) - dim23 *= dim(i2) - end - end - dim22 = dim2 ÷ (dim12 * dim23) - dim33 = dim3 ÷ (dim23 * dim31) - external_dims1 = (dim11, dim22, dim33) - internal_dims1 = (dim12, dim23, dim31) - external_dims2 = (dim22, dim33, dim11) - internal_dims2 = (dim23, dim31, dim12) - external_dims3 = (dim33, dim11, dim22) - internal_dims3 = (dim31, dim12, dim23) - cost1 = compute_cost(external_dims1, internal_dims1) - cost2 = compute_cost(external_dims2, internal_dims2) - cost3 = compute_cost(external_dims3, internal_dims3) - mincost, which_sequence = findmin((cost1, cost2, cost3)) - sequence = three_tensor_contraction_sequence(which_sequence) - return sequence -end diff --git a/src/lib/ContractionSequenceOptimization/src/utils.jl b/src/lib/ContractionSequenceOptimization/src/utils.jl deleted file mode 100644 index 509050149c..0000000000 --- a/src/lib/ContractionSequenceOptimization/src/utils.jl +++ /dev/null @@ -1,371 +0,0 @@ - -# -# General helper functionality -# - -# -# Operations for tree data structures -# - -""" - deepmap(f, tree; filter=(x -> x isa AbstractArray)) - -Recursive map on a tree-like data structure. -`filter` is a function that returns `true` if the iteration -should continue `false` if the iteration should -stop (for example, because we are at a leaf and the function -`f` should be applied). - -```julia -julia> deepmap(x -> 2x, [1, [2, [3, 4]]]) -[2, [4, [6, 8]]] - -julia> deepmap(x -> 2 .* x, [1, (2, [3, 4])]) -2-element Vector{Any}: - 2 - (4, [6, 8]) - -julia> deepmap(x -> 3x, [[1 2; 3 4], [5 6; 7 8]]) -2-element Vector{Matrix{Int64}}: - [3 6; 9 12] - [15 18; 21 24] - -julia> deepmap(x -> 2x, (1, (2, (3, 4))); filter=(x -> x isa Tuple)) -(2, (4, (6, 8))) -``` -""" -function deepmap(f, tree; filter=(x -> x isa AbstractArray)) - return filter(tree) ? map(t -> deepmap(f, t; filter=filter), tree) : f(tree) -end - -# -# Contracting index sets and getting costs -# - -# TODO: make a type: -# ShortBitSet{T <: Unsigned} -# data::T -# end -# -# That is like a BitSet/BitVector but with a maximum set size. -# Aliases such as: -# -# const SBitSet64 = SBitSet{UInt64} -# -# would be helpful to specify a BitSet with a maximum number of -# 64 elements in the set. -# (See https://discourse.julialang.org/t/parse-an-array-of-bits-bitarray-to-an-integer/42361/11). - -# Previously we used the definition in NDTensors: -#import NDTensors: dim -import ITensors: dim - -# `is` could be Vector{Int} for BitSet -function dim(is::IndexSetT, ind_dims::Vector) where {IndexSetT<:Union{Vector{Int},BitSet}} - dim = one(eltype(ind_dims)) - for i in is - dim *= ind_dims[i] - end - return dim -end - -function dim(is::Unsigned, ind_dims::Vector{DimT}) where {DimT} - _isemptyset(is) && return one(eltype(ind_dims)) - dim = one(eltype(ind_dims)) - i = 1 - @inbounds while !iszero(is) - if isodd(is) - # TODO: use Base.Checked.mul_with_overflow - dim, overflow = Base.Checked.mul_with_overflow(dim, ind_dims[i]) - overflow && return zero(DimT) - end - is = is >> 1 - i += 1 - end - return dim -end - -function contraction_cost(indsTᵃ::BitSet, indsTᵇ::BitSet, dims::Vector) - indsTᵃTᵇ = _symdiff(indsTᵃ, indsTᵇ) - dim_a = dim(indsTᵃ, dims) - dim_b = dim(indsTᵇ, dims) - dim_ab = dim(indsTᵃTᵇ, dims) - # Perform the sqrt first to avoid overflow. - # Alternatively, use a larger integer type. - cost = round(Int, sqrt(dim_a) * sqrt(dim_b) * sqrt(dim_ab)) - return cost, indsTᵃTᵇ -end - -function contraction_cost( - indsTᵃ::IndexSetT, indsTᵇ::IndexSetT, dims::Vector -) where {IndexSetT<:Unsigned} - unionTᵃTᵇ = _union(indsTᵃ, indsTᵇ) - cost = dim(unionTᵃTᵇ, dims) - indsTᵃTᵇ = _setdiff(unionTᵃTᵇ, _intersect(indsTᵃ, indsTᵇ)) - return cost, indsTᵃTᵇ -end - -# -# Convert indices into unique integer labels -# - -function contraction_labels!(labels1, labels2, is1, is2) - nextlabel = 1 - nextlabel = common_contraction_labels!(labels1, labels2, is1, is2, nextlabel) - nextlabel = uncommon_contraction_labels!(labels1, is1, nextlabel) - nextlabel = uncommon_contraction_labels!(labels2, is2, nextlabel) - return labels1, labels2 -end - -# Compute the common contraction labels and return the next label -function common_contraction_labels!(labels1, labels2, is1, is2, label) - N1 = length(is1) - N2 = length(is2) - @inbounds for n1 in 1:N1, n2 in 1:N2 - i1 = is1[n1] - i2 = is2[n2] - if i1 == i2 - labels1[n1] = labels2[n2] = label - label += 1 - end - end - return label -end - -function uncommon_contraction_labels!(labels, is, label) - N = length(labels) - @inbounds for n in 1:N - if iszero(labels[n]) - labels[n] = label - label += 1 - end - end - return label -end - -function contraction_labels!(labels, is) - ntensors = length(is) - nextlabel = 1 - # Loop through each tensor pair searching for - # common indices - @inbounds for n1 in 1:(ntensors - 1), n2 in (n1 + 1):ntensors - nextlabel = common_contraction_labels!( - labels[n1], labels[n2], is[n1], is[n2], nextlabel - ) - end - @inbounds for n in 1:ntensors - nextlabel = uncommon_contraction_labels!(labels[n], is[n], nextlabel) - end - return nextlabel - 1 -end - -function empty_labels(is::NTuple{N}) where {N} - return ntuple(n -> fill(0, length(is[n])), Val(N)) -end - -function empty_labels(is::Vector) - ntensors = length(is) - labels = Vector{Vector{Int}}(undef, ntensors) - @inbounds for n in 1:ntensors - labels[n] = fill(0, length(is[n])) - end - return labels -end - -function contraction_labels(is) - labels = empty_labels(is) - contraction_labels!(labels, is) - return labels -end - -contraction_labels(is...) = contraction_labels(is) - -# -# Use a Dict as a cache to map the indices to the integer label -# This only helps with many nodes/tensors (nnodes > 30) -# TODO: determine the crossover when this is useful and use -# it in `depth_first_constructive`/`breadth_first_constructive` -# - -contraction_labels_caching(is) = contraction_labels_caching(eltype(eltype(is)), is) - -function contraction_labels_caching(::Type{IndexT}, is) where {IndexT} - labels = empty_labels(is) - return contraction_labels_caching!(labels, IndexT, is) -end - -function contraction_labels_caching!(labels, ::Type{IndexT}, is) where {IndexT} - N = length(is) - ind_to_label = Dict{IndexT,Int}() - label = 0 - @inbounds for n in 1:N - isₙ = is[n] - labelsₙ = labels[n] - @inbounds for j in 1:length(labelsₙ) - i = isₙ[j] - i_label = get!(ind_to_label, i) do - label += 1 - end - labelsₙ[j] = i_label - end - end - return label -end - -# -# Compute the labels and also return a data structure storing the dims. -# - -function label_dims(::Type{DimT}, is) where {DimT<:Integer} - labels = empty_labels(is) - nlabels = contraction_labels!(labels, is) - dims = fill(zero(DimT), nlabels) - @inbounds for i in 1:length(is) - labelsᵢ = labels[i] - isᵢ = is[i] - @inbounds for n in 1:length(labelsᵢ) - lₙ = labelsᵢ[n] - if iszero(dims[lₙ]) - dims[lₙ] = dim(isᵢ[n]) - end - end - end - return labels, dims -end - -label_dims(is...) = label_dims(is) - -# Convert a contraction sequence in pair form to tree format. -# This is used in `depth_first_constructive` to convert the output. -function pair_sequence_to_tree(pairs::Vector{Pair{Int,Int}}, N::Int) - trees = Any[1:N...] - for p in pairs - push!(trees, Any[trees[p[1]], trees[p[2]]]) - end - return trees[end] -end - -# -# BitSet utilities -# - -function _cmp(A::BitSet, B::BitSet) - for (a, b) in zip(A, B) - if !isequal(a, b) - return isless(a, b) ? -1 : 1 - end - end - return cmp(length(A), length(B)) -end - -# Returns true when `A` is less than `B` in lexicographic order. -_isless(A::BitSet, B::BitSet) = _cmp(A, B) < 0 - -bitset(::Type{BitSet}, ints) = BitSet(ints) - -function bitset(::Type{T}, ints) where {T<:Unsigned} - set = zero(T) - u = one(T) - for i in ints - set |= (u << (i - 1)) - end - return set -end - -# Return a vector of the positions of the nonzero bits -# Used for debugging -function findall_nonzero_bits(i::Unsigned) - nonzeros = Int[] - n = 1 - @inbounds while !iszero(i) - if isodd(i) - push!(nonzeros, n) - end - i = i >> 1 - n += 1 - end - return nonzeros -end - -# Return the position of the first nonzero bit -function findfirst_nonzero_bit(i::Unsigned) - n = 0 - @inbounds while !iszero(i) - if isodd(i) - return n + 1 - end - i = i >> 1 - n += 1 - end - return n -end - -_isless(s1::T, s2::T) where {T<:Unsigned} = s1 < s2 -_intersect(s1::BitSet, s2::BitSet) = intersect(s1, s2) -_intersect(s1::T, s2::T) where {T<:Unsigned} = s1 & s2 -_union(s1::BitSet, s2::BitSet) = union(s1, s2) -_union(s1::T, s2::T) where {T<:Unsigned} = s1 | s2 -_setdiff(s1::BitSet, s2::BitSet) = setdiff(s1, s2) -_setdiff(s1::T, s2::T) where {T<:Unsigned} = s1 & (~s2) -_symdiff(s1::BitSet, s2::BitSet) = symdiff(s1, s2) -_symdiff(s1::T, s2::T) where {T<:Unsigned} = xor(s1, s2) -_isemptyset(s::BitSet) = isempty(s) -_isemptyset(s::Unsigned) = iszero(s) - -# TODO: use _first instead, optimize to avoid using _set -_only(s::BitSet) = only(s) -_only(s::Unsigned) = findfirst_nonzero_bit(s) - -# -# Adjacency matrix and connected components -# - -# For a network of tensors T (stored as index labels), return the adjacency matrix. -function adjacencymatrix(T::Vector, alldims::Vector) - # First break up the network into disconnected parts - N = length(T) - _adjacencymatrix = falses(N, N) - for nᵢ in 1:(N - 1), nⱼ in (nᵢ + 1):N - if dim(_intersect(T[nᵢ], T[nⱼ]), alldims) > 1 - _adjacencymatrix[nᵢ, nⱼ] = _adjacencymatrix[nⱼ, nᵢ] = true - end - end - return _adjacencymatrix -end - -# For a given adjacency matrix of size n x n, connectedcomponents returns -# a list of components that contains integer vectors, where every integer -# vector groups the indices of the vertices of a connected component of the -# graph encoded by A. The number of connected components is given by -# length(components). -function connectedcomponents(A::AbstractMatrix{Bool}) - n = size(A, 1) - @assert size(A, 2) == n - components = Vector{Vector{Int}}(undef, 0) - assignedlist = falses((n,)) - for i in 1:n - if !assignedlist[i] - assignedlist[i] = true - checklist = [i] - currentcomponent = [i] - while !isempty(checklist) - j = pop!(checklist) - for k in findall(A[j, :]) - if !assignedlist[k] - push!(currentcomponent, k) - push!(checklist, k) - assignedlist[k] = true - end - end - end - push!(components, currentcomponent) - end - end - return components -end - -# For a network of tensors T (stored as index labels), return the connected components -# (splits up T into the connected components). -function connectedcomponents(T::Vector, alldims::Vector) - return connectedcomponents(adjacencymatrix(T, alldims)) -end diff --git a/src/lib/ITensorVisualizationCore/README.md b/src/lib/ITensorVisualizationCore/README.md deleted file mode 100644 index a7493e266c..0000000000 --- a/src/lib/ITensorVisualizationCore/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# ITensorVisualizationCore - -This package contains the minimal functionality of the package ITensorVisualization. diff --git a/src/lib/ITensorVisualizationCore/src/ITensorVisualizationCore.jl b/src/lib/ITensorVisualizationCore/src/ITensorVisualizationCore.jl deleted file mode 100644 index a538663264..0000000000 --- a/src/lib/ITensorVisualizationCore/src/ITensorVisualizationCore.jl +++ /dev/null @@ -1,16 +0,0 @@ -module ITensorVisualizationCore - -using Compat -using ..ITensors - -export @visualize, - @visualize!, - @visualize_noeval, - @visualize_noeval!, - @visualize_sequence, - @visualize_sequence_noeval - -# Visualizing ITensor networks -include("visualize_macro.jl") - -end diff --git a/src/lib/ITensorVisualizationCore/src/visualize_macro.jl b/src/lib/ITensorVisualizationCore/src/visualize_macro.jl deleted file mode 100644 index 0ae21d6e25..0000000000 --- a/src/lib/ITensorVisualizationCore/src/visualize_macro.jl +++ /dev/null @@ -1,271 +0,0 @@ -visualize(args...; kwargs...) = nothing -visualize!(args...; kwargs...) = nothing -visualize_sequence(args...; kwargs...) = nothing - -is_kwarg(arg_or_kwarg::Symbol) = false -is_kwarg(arg_or_kwarg::Expr) = (arg_or_kwarg.head == :parameters) - -function has_kwargs(args_kwargs::Vector) - isempty(args_kwargs) && return false - return is_kwarg(first(args_kwargs)) -end - -function get_kwargs(args_kwargs::Vector) - @assert has_kwargs(args_kwargs) - return first(args_kwargs) -end - -function get_kwarg(kwargs::Expr, key::Symbol) - n = findfirst(kw -> kw.args[1] == :sequence, kwargs.args) - if !isnothing(n) - @assert kwargs.args[n].head == :kw - return esc(kwargs.args[n].args[2]) - end - return nothing -end - -function args_kwargs(ex::Vector) - kwargs = has_kwargs(ex) ? get_kwargs(ex) : :() - args = has_kwargs(ex) ? ex[2:end] : ex - return args, kwargs -end - -function function_args_kwargs(ex::Symbol) - func = :identity - args = [ex] - kwargs = :() - iscollection = true - return func, args, kwargs, iscollection -end - -function function_args_kwargs(ex::Expr) - if ex.head == :call - func = first(ex.args) - args, kwargs = args_kwargs(ex.args[2:end]) - iscollection = true - elseif ex.head == :ref - #func, args, kwargs, iscollection = function_args_kwargs(Symbol(ex.args)) - func = :identity - args = [ex] - kwargs = :() - iscollection = false - else - dump(ex) - error("Visualizing expression $ex not supported right now.") - end - return func, args, kwargs, iscollection -end - -expr_to_string(s::Symbol) = String(s) -expr_to_string(ex::Expr) = String(repr(ex))[3:(end - 1)] - -# Take the symbols of the arguments and output -# the labels if there are multiple inputs or -# the prefix for the labels if there is only -# one input. -function vertex_labels_kwargs(args, iscollection) - if iscollection && isone(length(args)) - vertex_labels_kw = :vertex_labels_prefix - vertex_labels_arg = string(only(args)) - else - vertex_labels_kw = :vertex_labels - vertex_labels_arg = string.(args) - end - return vertex_labels_kw, vertex_labels_arg -end - -function func_args_sequence_kwargs(ex, vis_kwargs...) - func, args, kwargs, iscollection = function_args_kwargs(ex) - sequence = get_kwarg(kwargs, :sequence) - vertex_labels_kw, vertex_labels_arg = vertex_labels_kwargs(args, iscollection) - # Merge labels kwarg with kwargs - vis_kwargs_dict = Dict([ - vis_kwarg.args[1] => vis_kwarg.args[2] for vis_kwarg in vis_kwargs - ]) - vertex_labels_kwarg_dict = Dict(vertex_labels_kw => vertex_labels_arg) - merged_kwargs_dict = merge(vertex_labels_kwarg_dict, vis_kwargs_dict) - merged_kwargs_expr = [:($k = $v) for (k, v) in pairs(merged_kwargs_dict)] - return func, esc.(args), sequence, esc.(merged_kwargs_expr) -end - -function visualize_expr(vis_func, ex::Union{Symbol,Expr}, vis_kwargs::Expr...) - func, args, sequence, kwargs = func_args_sequence_kwargs(ex, vis_kwargs...) - e = quote - $(vis_func)($(func), ($(args...),), $(sequence); $(kwargs...)) - end - return e -end - -function visualize_expr!(fig, vis_func!, ex::Union{Symbol,Expr}, vis_kwargs::Expr...) - func, args, sequence, kwargs = func_args_sequence_kwargs(ex, vis_kwargs...) - e = quote - $(vis_func!)($(esc(fig)), $(func), ($(args...),), $(sequence); $(kwargs...)) - end - return e -end - -""" - @visualize - -Visualize a contraction of ITensors, returning the result of the contraction. - -The contraction should be written in terms of a series of ITensors contracted with `*`. - -# Examples - -```julia -using ITensors -using ITensorUnicodePlots # Must load a backend or else no plots will be made - -i = Index(2, "index_i") -j = Index(10, "index_j") -k = Index(40, "index_k") -l = Index(40, "index_l") -m = Index(40, "index_m") -A = random_itensor(i, j, k) -B = random_itensor(i, j, l, m) -C = random_itensor(k, l) - -# Contract the tensors over the common indices -# and visualize the results -ABC = @visualize A * B * C - -AB = @visualize A * B -# Use readline() to pause between plots -readline() -ABC = @visualize AB * C vertex_labels = ["A*B", "C"] -readline() - -# Save the results to figures for viewing later -AB = @visualize fig1 A * B -ABC = @visualize fig2 AB * C vertex_labels = ["A*B", "C"] - -display(fig1) -readline() -display(fig2) -readline() -``` - -# Keyword arguments: - - - `vertex_labels`: Custom tensor labels to display on the vertices of the - digram. If not specified, they are determined automatically from the input to the macro. - - `edge_labels=IndexLabels()`: A list of the edge labels or an - `AbstractEdgeLabels` object specifying how they should be made. - - `arrow_show`: Whether or not to show arrows on the edges. -""" -macro visualize(fig::Symbol, ex::Symbol, kwargs::Expr...) - e = quote - $(esc(fig)) = $(visualize_expr(visualize, ex, kwargs...)) - $(esc(ex)) - end - return e -end - -macro visualize!(fig, ex::Symbol, kwargs::Expr...) - e = quote - $(visualize_expr!(fig, visualize!, ex, kwargs...)) - $(esc(ex)) - end - return e -end - -macro visualize(ex::Symbol) - e = quote - display($(visualize_expr(visualize, ex))) - $(esc(ex)) - end - return e -end - -macro visualize(ex_or_fig::Symbol, ex_or_kwarg::Expr, last_kwargs::Expr...) - if ex_or_kwarg.head == :(=) - # The second input is a keyword argument which means that the - # first input is the collection to visualize (no figure output binding specified) - ex = ex_or_fig - kwargs = (ex_or_kwarg, last_kwargs...) - e = quote - display($(visualize_expr(visualize, ex, kwargs...))) - $(esc(ex)) - end - else - # The second input is not a keyword argument which means that the - # first input is the binding for the figure output, the second is the expression - # to visualize - fig = ex_or_fig - ex = ex_or_kwarg - kwargs = last_kwargs - e = quote - $(esc(fig)) = $(visualize_expr(visualize, ex, kwargs...)) - $(esc(ex)) - end - end - return e -end - -macro visualize!(fig, ex::Expr, kwargs::Expr...) - e = quote - $(visualize_expr!(fig, visualize!, ex, kwargs...)) - $(esc(ex)) - end - return e -end - -macro visualize(ex::Expr, kwargs::Expr...) - e = quote - display($(visualize_expr(visualize, ex, kwargs...))) - $(esc(ex)) - end - return e -end - -macro visualize_noeval(ex::Symbol, kwargs::Expr...) - e = quote - $(visualize_expr(visualize, ex, kwargs...)) - end - return e -end - -macro visualize_noeval(ex::Expr, kwargs::Expr...) - e = quote - $(visualize_expr(visualize, ex, kwargs...)) - end - return e -end - -macro visualize_noeval!(fig, ex::Symbol, kwargs::Expr...) - e = quote - $(visualize_expr!(fig, visualize!, ex, kwargs...)) - end - return e -end - -macro visualize_noeval!(fig, ex::Expr, kwargs::Expr...) - e = quote - $(visualize_expr!(fig, visualize!, ex, kwargs...)) - end - return e -end - -macro visualize_sequence(fig::Symbol, ex::Expr, kwargs::Expr...) - e = quote - $(esc(fig)) = $(visualize_expr(visualize_sequence, ex, kwargs...)) - $(esc(ex)) - end - return e -end - -macro visualize_sequence(ex::Expr, kwargs::Expr...) - e = quote - display($(visualize_expr(visualize_sequence, ex, kwargs...))) - $(esc(ex)) - end - return e -end - -macro visualize_sequence_noeval(ex::Expr, kwargs::Expr...) - e = quote - $(visualize_expr(visualize_sequence, ex, kwargs...)) - end - return e -end diff --git a/src/lib/ITensorsNamedDimsArraysExt/examples/Project.toml b/src/lib/ITensorsNamedDimsArraysExt/examples/Project.toml deleted file mode 100644 index 3d5fd7544e..0000000000 --- a/src/lib/ITensorsNamedDimsArraysExt/examples/Project.toml +++ /dev/null @@ -1,4 +0,0 @@ -[deps] -Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" -ITensors = "9136182c-28ba-11e9-034c-db9fb085ebd5" -NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" diff --git a/src/lib/ITensorsNamedDimsArraysExt/examples/example_dmrg.jl b/src/lib/ITensorsNamedDimsArraysExt/examples/example_dmrg.jl deleted file mode 100644 index 9129399155..0000000000 --- a/src/lib/ITensorsNamedDimsArraysExt/examples/example_dmrg.jl +++ /dev/null @@ -1,24 +0,0 @@ -using Adapt: adapt -using ITensorMPS: MPO, OpSum, dmrg, random_mps, siteinds -using ITensors.ITensorsNamedDimsArraysExt: to_nameddimsarray - -function main(; n, conserve_qns=false, nsweeps=3, cutoff=1e-4, arraytype=Array) - s = siteinds("S=1/2", n; conserve_qns) - ℋ = OpSum() - ℋ = sum(j -> ("S+", j, "S-", j + 1), 1:(n - 1); init=ℋ) - ℋ = sum(j -> ("S-", j, "S+", j + 1), 1:(n - 1); init=ℋ) - ℋ = sum(j -> ("Sz", j, "Sz", j + 1), 1:(n - 1); init=ℋ) - H = MPO(ℋ, s) - ψ₀ = random_mps(s, j -> isodd(j) ? "↑" : "↓") - - H = adapt(arraytype, H) - ψ = adapt(arraytype, ψ₀) - e, ψ = dmrg(H, ψ; nsweeps, cutoff) - - Hₙₐ = to_nameddimsarray(H) - Hₙₐ = adapt(arraytype, Hₙₐ) - ψₙₐ = to_nameddimsarray(ψ₀) - ψₙₐ = adapt(arraytype, ψₙₐ) - eₙₐ, ψₙₐ = dmrg(Hₙₐ, ψₙₐ; nsweeps, cutoff) - return (; e, eₙₐ) -end diff --git a/src/lib/ITensorsNamedDimsArraysExt/examples/example_readme.jl b/src/lib/ITensorsNamedDimsArraysExt/examples/example_readme.jl deleted file mode 100644 index b7404e3781..0000000000 --- a/src/lib/ITensorsNamedDimsArraysExt/examples/example_readme.jl +++ /dev/null @@ -1,29 +0,0 @@ -using ITensors: Index, hasinds, permute - -function main() - i = Index(2, "i") - j = Index(2, "j") - k = Index(2, "k") - - a = randn(i, j) - b = randn(j, k) - - @show rand(Int, i, j) - @show zeros(Float32, i, j) - @show ones(Float32, i, j) - @show fill(1.2, i, j) - - a[j => 1, i => 2] = 21 - @show a[2, 1] == 21 - @show a[j => 1, i => 2] == 21 - - c = a * b - @show hasinds(c, (i, k)) - @show permute(a, (j, i)) - - # Broken - a′ = randn(j, i) - @show a + a′ -end - -main() diff --git a/src/lib/ITensorsNamedDimsArraysExt/src/ITensorsNamedDimsArraysExt.jl b/src/lib/ITensorsNamedDimsArraysExt/src/ITensorsNamedDimsArraysExt.jl deleted file mode 100644 index 4b2c017fdc..0000000000 --- a/src/lib/ITensorsNamedDimsArraysExt/src/ITensorsNamedDimsArraysExt.jl +++ /dev/null @@ -1,8 +0,0 @@ -module ITensorsNamedDimsArraysExt -include("index.jl") -include("itensor.jl") -include("indexing.jl") -include("tensoralgebra.jl") -include("combiner.jl") -include("to_nameddimsarray.jl") -end diff --git a/src/lib/ITensorsNamedDimsArraysExt/src/combiner.jl b/src/lib/ITensorsNamedDimsArraysExt/src/combiner.jl deleted file mode 100644 index 880e8ec203..0000000000 --- a/src/lib/ITensorsNamedDimsArraysExt/src/combiner.jl +++ /dev/null @@ -1,22 +0,0 @@ -# Combiner -using ..NDTensors.NamedDimsArrays: AbstractNamedDimsArray, dimnames, name -using ..NDTensors.TensorAlgebra: TensorAlgebra, fusedims, splitdims -using NDTensors: NDTensors, Tensor, Combiner - -function ITensors._contract(na::AbstractNamedDimsArray, c::Tensor{<:Any,<:Any,<:Combiner}) - split_names = name.(NDTensors.uncombinedinds(c)) - fused_name = name(NDTensors.combinedind(c)) - # Use to determine if we are doing fusion or splitting. - split_dims = map(split_name -> findfirst(isequal(split_name), dimnames(na)), split_names) - fused_dim = findfirst(isequal(fused_name), dimnames(na)) - return if isnothing(fused_dim) - # Dimension fusion (joining, combining) - @assert all(!isnothing, split_dims) - fusedims(na, split_names => fused_name) - else - # Dimension unfusion (splitting, uncombining) - @assert all(isnothing, split_dims) - split_dims = NamedInt.(NDTensors.uncombinedinds(c)) - splitdims(na, fused_name => split_dims) - end -end diff --git a/src/lib/ITensorsNamedDimsArraysExt/src/index.jl b/src/lib/ITensorsNamedDimsArraysExt/src/index.jl deleted file mode 100644 index 0114635d3c..0000000000 --- a/src/lib/ITensorsNamedDimsArraysExt/src/index.jl +++ /dev/null @@ -1,65 +0,0 @@ -using ..ITensors: ITensors, Index, IndexID, dim, noprime, prime, settags, space -using ..NDTensors: NDTensors, AliasStyle -using ..NDTensors.NamedDimsArrays: - NamedDimsArrays, - AbstractNamedDimsArray, - NamedInt, - dimnames, - named, - name, - replacenames, - unname - -# TODO: NamedDimsArrays.named(space, ::IndexID) = Index(...) -NamedDimsArrays.name(i::Index) = IndexID(i) -NamedDimsArrays.unname(i::Index) = space(i) -function ITensors.Index(i::NamedInt{<:Any,<:IndexID}) - space = unname(i) - n = name(i) - dir = ITensors.Neither - return Index(n.id, space, dir, n.tags, n.plev) -end -function NamedDimsArrays.NamedInt(i::Index) - return named(dim(i), name(i)) -end - -NamedDimsArrays.randname(i::IndexID) = IndexID(rand(UInt64), "", 0) - -# TODO: This is piracy, change this? -Base.:(==)(i1::IndexID, i2::Index) = (i1 == name(i2)) -Base.:(==)(i1::Index, i2::IndexID) = (name(i1) == i2) - -# Accessors -Base.convert(type::Type{<:IndexID}, i::Index) = type(i) -# TODO: Use this if `size` output named dimensions. -# NDTensors.inds(na::AbstractNamedDimsArray) = Index.(size(na)) -# TODO: defined `namedsize` and use that here. -function NDTensors.inds(na::AbstractNamedDimsArray) - return Index.(named.(size(na), dimnames(na))) -end -NDTensors.storage(na::AbstractNamedDimsArray) = na - -NDTensors.dim(na::AbstractNamedDimsArray) = length(na) - -# Priming, tagging `IndexID` -ITensors.prime(i::IndexID) = IndexID(prime(Index(named(0, i)))) -ITensors.noprime(i::IndexID) = IndexID(noprime(Index(named(0, i)))) -function ITensors.settags(is::Tuple{Vararg{IndexID}}, args...; kwargs...) - return IndexID.(settags(map(i -> Index(named(0, i)), is), args...; kwargs...)) -end - -# Priming, tagging `AbstractNamedDimsArray` -ITensors.prime(na::AbstractNamedDimsArray) = named(unname(na), prime.(dimnames(na))) -ITensors.noprime(na::AbstractNamedDimsArray) = named(unname(na), noprime.(dimnames(na))) -function ITensors.settags(na::AbstractNamedDimsArray, args...; kwargs...) - return named(unname(na), settags(dimnames(na), args...; kwargs...)) -end -function ITensors.replaceind(na::AbstractNamedDimsArray, i::Index, j::Index) - return replacenames(na, name(i) => name(j)) -end -function ITensors.replaceinds(na::AbstractNamedDimsArray, is, js) - return replacenames(na, (name.(is) .=> name.(js))...) -end - -# TODO: Complex conjugate and flop arrows! -ITensors.dag(::AliasStyle, na::AbstractNamedDimsArray) = na diff --git a/src/lib/ITensorsNamedDimsArraysExt/src/indexing.jl b/src/lib/ITensorsNamedDimsArraysExt/src/indexing.jl deleted file mode 100644 index c05ae69481..0000000000 --- a/src/lib/ITensorsNamedDimsArraysExt/src/indexing.jl +++ /dev/null @@ -1,8 +0,0 @@ -using ..ITensors: ITensors -function ITensors._getindex(na::AbstractNamedDimsArray, I::Pair...) - return na[I...] -end -function ITensors._setindex!!(na::AbstractNamedDimsArray, value::Int64, I::Pair...) - na[I...] = value - return na -end diff --git a/src/lib/ITensorsNamedDimsArraysExt/src/itensor.jl b/src/lib/ITensorsNamedDimsArraysExt/src/itensor.jl deleted file mode 100644 index a9200a0efd..0000000000 --- a/src/lib/ITensorsNamedDimsArraysExt/src/itensor.jl +++ /dev/null @@ -1,47 +0,0 @@ -using ITensors: ITensors -using Random: AbstractRNG, default_rng - -# Constructors -ITensors.ITensor(na::AbstractNamedDimsArray) = ITensors._ITensor(na) -ITensors.itensor(na::AbstractNamedDimsArray) = ITensors._ITensor(na) - -# Convenient constructors -default_eltype() = Float64 -for f in [:rand, :randn] - @eval begin - function Base.$f( - rng::AbstractRNG, elt::Type{<:Number}, dims::Tuple{Index,Vararg{Index}} - ) - return ITensor($f(rng, elt, NamedInt.(dims))) - end - function Base.$f( - rng::AbstractRNG, elt::Type{<:Number}, dim1::Index, dims::Vararg{Index} - ) - return $f(rng, elt, (dim1, dims...)) - end - Base.$f(elt::Type{<:Number}, dims::Tuple{Index,Vararg{Index}}) = - $f(default_rng(), elt, dims) - Base.$f(elt::Type{<:Number}, dim1::Index, dims::Vararg{Index}) = - $f(elt, (dim1, dims...)) - Base.$f(dims::Tuple{Index,Vararg{Index}}) = $f(default_eltype(), dims) - Base.$f(dim1::Index, dims::Vararg{Index}) = $f((dim1, dims...)) - end -end -for f in [:zeros, :ones] - @eval begin - function Base.$f(elt::Type{<:Number}, dims::Tuple{Index,Vararg{Index}}) - return ITensor($f(elt, NamedInt.(dims))) - end - function Base.$f(elt::Type{<:Number}, dim1::Index, dims::Vararg{Index}) - return $f(elt, (dim1, dims...)) - end - Base.$f(dims::Tuple{Index,Vararg{Index}}) = $f(default_eltype(), dims) - Base.$f(dim1::Index, dims::Vararg{Index}) = $f((dim1, dims...)) - end -end -function Base.fill(value, dims::Tuple{Index,Vararg{Index}}) - return ITensor(fill(value, NamedInt.(dims))) -end -function Base.fill(value, dim1::Index, dims::Vararg{Index}) - return fill(value, (dim1, dims...)) -end diff --git a/src/lib/ITensorsNamedDimsArraysExt/src/tensoralgebra.jl b/src/lib/ITensorsNamedDimsArraysExt/src/tensoralgebra.jl deleted file mode 100644 index 691df965c3..0000000000 --- a/src/lib/ITensorsNamedDimsArraysExt/src/tensoralgebra.jl +++ /dev/null @@ -1,31 +0,0 @@ -# AbstractArray algebra needed for ITensors.jl. -# TODO: Instead dispatch on `tensortype(::ITensor)` from within -# ITensors.jl. -using ..NDTensors.NamedDimsArrays: AbstractNamedDimsArray, align -using ..NDTensors.TensorAlgebra: TensorAlgebra -using ..NDTensors: AliasStyle -using ..ITensors: ITensors - -function ITensors._contract(na1::AbstractNamedDimsArray, na2::AbstractNamedDimsArray) - return TensorAlgebra.contract(na1, na2) -end - -function ITensors._add(na1::AbstractNamedDimsArray, na2::AbstractNamedDimsArray) - return na1 + na2 -end - -function ITensors._permute(::AliasStyle, na::AbstractNamedDimsArray, dims::Tuple) - # TODO: Handle aliasing properly. - return align(na, name.(dims)) -end - -function ITensors._map!!( - f, - na_dest::AbstractNamedDimsArray, - na1::AbstractNamedDimsArray, - na2::AbstractNamedDimsArray, -) - # TODO: Handle maybe-mutation. - map!(f, na_dest, na1, na2) - return na_dest -end diff --git a/src/lib/ITensorsNamedDimsArraysExt/src/to_nameddimsarray.jl b/src/lib/ITensorsNamedDimsArraysExt/src/to_nameddimsarray.jl deleted file mode 100644 index 2e75e2a82a..0000000000 --- a/src/lib/ITensorsNamedDimsArraysExt/src/to_nameddimsarray.jl +++ /dev/null @@ -1,77 +0,0 @@ -using ITensors: ITensor -using ..NDTensors: data, inds - -# TODO: Delete this, it is a hack to decide -# if an Index is blocked. -function is_blocked_ind(i) - return try - blockdim(i, 1) - true - catch - false - end -end - -# TODO: Delete once `TensorStorage` is removed. -function to_axes(inds::Tuple) - if any(is_blocked_ind, inds) - return BlockArrays.blockedrange.(map(i -> [blockdim(i, b) for b in 1:nblocks(i)], inds)) - else - return Base.OneTo.(dim.(inds)) - end -end - -using ..NDTensors: DenseTensor -# TODO: Delete once `Dense` is removed. -function to_nameddimsarray(x::DenseTensor) - return named(reshape(data(x), size(x)), name.(inds(x))) -end - -using ..NDTensors: DiagTensor -using ..NDTensors.DiagonalArrays: DiagonalArray -# TODO: Delete once `Diag` is removed. -function to_nameddimsarray(x::DiagTensor) - return named(DiagonalArray(data(x), size(x)), name.(inds(x))) -end - -using ITensors: ITensors, dir, qn -using ..NDTensors: BlockSparseTensor, array, blockdim, datatype, nblocks, nzblocks -using ..NDTensors.BlockSparseArrays: BlockSparseArray -using ..NDTensors.BlockSparseArrays.BlockArrays: BlockArrays, blockedrange -using ..NDTensors.GradedAxes: dual, gradedrange -using ..NDTensors.TypeParameterAccessors: set_ndims -# TODO: Delete once `BlockSparse` is removed. -function to_nameddimsarray(x::BlockSparseTensor) - blockinds = map(inds(x)) do i - r = gradedrange([qn(i, b) => blockdim(i, b) for b in 1:nblocks(i)]) - if dir(i) == ITensors.In - return dual(r) - end - return r - end - blocktype = set_ndims(datatype(x), ndims(x)) - # TODO: Make a simpler constructor: - # BlockSparseArray(blocktype, blockinds) - arraystorage = BlockSparseArray{eltype(x),ndims(x),blocktype}(undef, blockinds) - for b in nzblocks(x) - arraystorage[BlockArrays.Block(Int.(Tuple(b))...)] = array(x[b]) - end - return named(arraystorage, name.(inds(x))) -end - -using ITensors: QN -using ..NDTensors.GradedAxes: GradedAxes -GradedAxes.fuse_labels(l1::QN, l2::QN) = l1 + l2 - -using ITensors: QN -using ..NDTensors.SymmetrySectors: SymmetrySectors -SymmetrySectors.dual(l::QN) = -l - -## TODO: Add this back, define `CombinerArrays` library in NDTensors! -## using ..NDTensors: CombinerTensor, CombinerArray, storage -## # TODO: Delete when we directly use `CombinerArray` as storage. -## function to_nameddimsarray(t::CombinerTensor) -## return named(CombinerArray(storage(t), to_axes(inds(t))), name.(inds(t))) -## end - -to_nameddimsarray(t::ITensor) = ITensor(to_nameddimsarray(t.tensor)) diff --git a/src/lib/ITensorsNamedDimsArraysExt/test/Project.toml b/src/lib/ITensorsNamedDimsArraysExt/test/Project.toml deleted file mode 100644 index 72075bf295..0000000000 --- a/src/lib/ITensorsNamedDimsArraysExt/test/Project.toml +++ /dev/null @@ -1,5 +0,0 @@ -[deps] -BlockArrays = "8e7c35d0-a365-5155-bbbb-fb81a777f24e" -ITensors = "9136182c-28ba-11e9-034c-db9fb085ebd5" -NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" -Suppressor = "fd094767-a336-5f1f-9728-57cf17d0bbfb" diff --git a/src/lib/ITensorsNamedDimsArraysExt/test/runtests.jl b/src/lib/ITensorsNamedDimsArraysExt/test/runtests.jl deleted file mode 100644 index c34d625ef4..0000000000 --- a/src/lib/ITensorsNamedDimsArraysExt/test/runtests.jl +++ /dev/null @@ -1,11 +0,0 @@ -using Test: @testset - -@testset "ITensorsNamedDimsArraysExt" begin - filenames = filter(readdir(@__DIR__)) do filename - startswith("test_")(filename) && endswith(".jl")(filename) - end - @testset "Test $(@__DIR__)/$filename" for filename in filenames - println("Running $(@__DIR__)/$filename") - @time include(filename) - end -end diff --git a/src/lib/ITensorsNamedDimsArraysExt/test/test_basics.jl b/src/lib/ITensorsNamedDimsArraysExt/test/test_basics.jl deleted file mode 100644 index 6a39e7a64d..0000000000 --- a/src/lib/ITensorsNamedDimsArraysExt/test/test_basics.jl +++ /dev/null @@ -1,33 +0,0 @@ -@eval module $(gensym()) -using BlockArrays: blocklengths -using ITensors: ITensor, Index, QN, dag, inds, plev, random_itensor -using ITensors.ITensorsNamedDimsArraysExt: to_nameddimsarray -using NDTensors: tensor -using NDTensors.BlockSparseArrays: BlockSparseArray, block_stored_length -using NDTensors.GradedAxes: isdual -using NDTensors.LabelledNumbers: label -using NDTensors.NamedDimsArrays: NamedDimsArray, unname -using Test: @test, @testset -@testset "to_nameddimsarray" begin - i = Index([QN(0) => 2, QN(1) => 3]) - a = random_itensor(i', dag(i)) - b = to_nameddimsarray(a) - @test b isa ITensor - @test plev(inds(b)[1]) == 1 - @test plev(inds(b)[2]) == 0 - @test inds(b)[1] == i' - @test inds(b)[2] == dag(i) - nb = tensor(b) - @test nb isa NamedDimsArray{Float64} - bb = unname(nb) - @test bb isa BlockSparseArray{Float64} - @test !isdual(axes(bb, 1)) - @test isdual(axes(bb, 2)) - @test blocklengths(axes(bb, 1)) == [2, 3] - @test blocklengths(axes(bb, 2)) == [2, 3] - @test label.(blocklengths(axes(bb, 1))) == [QN(0), QN(1)] - @test label.(blocklengths(axes(bb, 2))) == [QN(0), QN(-1)] - @test block_stored_length(bb) == 2 - @test b' * b ≈ to_nameddimsarray(a' * a) -end -end diff --git a/src/lib/ITensorsNamedDimsArraysExt/test/test_examples.jl b/src/lib/ITensorsNamedDimsArraysExt/test/test_examples.jl deleted file mode 100644 index 4c48c1a471..0000000000 --- a/src/lib/ITensorsNamedDimsArraysExt/test/test_examples.jl +++ /dev/null @@ -1,17 +0,0 @@ -@eval module $(gensym()) -using ITensors: ITensors -using Suppressor: @suppress -using Test: @testset -@testset "examples" begin - @suppress include( - joinpath( - pkgdir(ITensors), - "src", - "lib", - "ITensorsNamedDimsArraysExt", - "examples", - "example_readme.jl", - ), - ) -end -end diff --git a/src/lib/ITensorsOpsExt/src/ITensorsOpsExt.jl b/src/lib/ITensorsOpsExt/src/ITensorsOpsExt.jl deleted file mode 100644 index 48bef834ae..0000000000 --- a/src/lib/ITensorsOpsExt/src/ITensorsOpsExt.jl +++ /dev/null @@ -1,79 +0,0 @@ -module ITensorsOpsExt -using ITensors: ITensors, ITensor, Index, apply, dag, hascommoninds, swapprime -using ..LazyApply: LazyApply, Applied, Exp, Prod, Scaled, Sum, argument, coefficient -using LinearAlgebra: UniformScaling, I -using ..Ops: Op -using ..SiteTypes: SiteTypes, op - -function SiteTypes.op(I::UniformScaling, s::Index...) - return I.λ * op("Id", s...) -end - -function ITensors.ITensor(o::Op, s::Vector{<:Index}) - return op(o.which_op, map(n -> s[n], o.sites)...; o.params...) -end - -function ITensors.ITensor(o::Scaled, s::Vector{<:Index}) - c = coefficient(o) - if isreal(c) - c = real(c) - end - return c * ITensor(argument(o), s) -end - -function ITensors.ITensor(o::Prod, s::Vector{<:Index}) - T = ITensor(true) - for a in o.args[1] - Tₙ = ITensor(a, s) - # TODO: Implement this logic inside `apply` - if hascommoninds(T, Tₙ) - T = T(Tₙ) - else - T *= Tₙ - end - end - return T -end - -function ITensors.ITensor(o::Sum, s::Vector{<:Index}) - T = ITensor() - for a in o.args[1] - T += ITensor(a, s) - end - return T -end - -function ITensors.ITensor(o::Exp, s::Vector{<:Index}) - return exp(ITensor(argument(o), s)) -end - -function ITensors.ITensor(o::LazyApply.Adjoint, s::Vector{<:Index}) - return swapprime(dag(ITensor(o', s)), 0 => 1) -end - -function LazyApply.Sum{ITensor}(o::Sum, s::Vector{<:Index}) - return Applied(sum, (map(oₙ -> ITensor(oₙ, s), only(o.args)),)) -end - -function LazyApply.Prod{ITensor}(o::Prod, s::Vector{<:Index}) - return Applied(prod, (map(oₙ -> ITensor(oₙ, s), only(o.args)),)) -end - -function LazyApply.Prod{ITensor}(o::Scaled{C,Prod{Op}}, s::Vector{<:Index}) where {C} - t = Prod{ITensor}(argument(o), s) - t1 = coefficient(o) * only(t.args)[1] - return Applied(prod, (vcat([t1], only(t.args)[2:end]),)) -end - -function ITensors.apply(o::Prod{ITensor}, v::ITensor; kwargs...) - ov = v - for oₙ in reverse(only(o.args)) - ov = apply(oₙ, ov; kwargs...) - end - return ov -end - -function (o::Prod{ITensor})(v::ITensor; kwargs...) - return apply(o, v; kwargs...) -end -end diff --git a/src/lib/ITensorsSiteTypesExt/src/ITensorsSiteTypesExt.jl b/src/lib/ITensorsSiteTypesExt/src/ITensorsSiteTypesExt.jl deleted file mode 100644 index 970465e7d9..0000000000 --- a/src/lib/ITensorsSiteTypesExt/src/ITensorsSiteTypesExt.jl +++ /dev/null @@ -1,79 +0,0 @@ -module ITensorsSiteTypesExt -using ..ITensors: ITensors, Index, LastVal, dag, prime, val -using NDTensors: NDTensors, dim, sim -using ..SiteTypes: SiteTypes -SiteTypes.val(iv::Pair{<:Index}) = val(iv.first, iv.second) -SiteTypes.val(i::Index, l::LastVal) = l.f(dim(i)) -# TODO: -# Implement a macro with a general definition: -# f(iv::Pair{<:Index}, args...) = (f(ind(iv), args...) => val(iv)) -ITensors.prime(iv::Pair{<:Index}, inc::Integer=1) = (prime(ind(iv), inc) => val(iv)) -NDTensors.sim(iv::Pair{<:Index}, args...) = (sim(ind(iv), args...) => val(iv)) -ITensors.dag(iv::Pair{<:Index}) = (dag(ind(iv)) => val(iv)) -Base.adjoint(iv::Pair{<:Index}) = (prime(ind(iv)) => val(iv)) - -using ..ITensors: ITensors, Indices -function ITensors._vals(is::Indices, I::String...) - return val.(is, I) -end - -using Adapt: Adapt -using ..ITensors: ITensors, Index, ITensor, ind, inds -using NDTensors: NDTensors, Tensor -using ..SiteTypes: val -Base.@propagate_inbounds @inline function ITensors._getindex( - T::Tensor, ivs::Vararg{Any,N} -) where {N} - # Tried ind.(ivs), val.(ivs) but it is slower - p = NDTensors.getperm(inds(T), ntuple(n -> ind(@inbounds ivs[n]), Val(N))) - fac = NDTensors.permfactor(p, ivs...) # possible sign - return fac * ITensors._getindex( - T, NDTensors.permute(ntuple(n -> val(@inbounds ivs[n]), Val(N)), p)... - ) -end -Base.@propagate_inbounds @inline function ITensors._setindex!!( - T::Tensor, x::Number, ivs::Vararg{Any,N} -) where {N} - # Would be nice to split off the functions for extracting the `ind` and `val` as Tuples, - # but it was slower. - p = NDTensors.getperm(inds(T), ntuple(n -> ind(@inbounds ivs[n]), Val(N))) - fac = NDTensors.permfactor(p, ivs...) # possible sign - return ITensors._setindex!!( - T, fac * x, NDTensors.permute(ntuple(n -> val(@inbounds ivs[n]), Val(N)), p)... - ) -end -Base.@propagate_inbounds @inline function Base.setindex!( - T::ITensor, x::Number, I1::Pair{<:Index,String}, I::Pair{<:Index,String}... -) - Iv = map(i -> i.first => val(i.first, i.second), (I1, I...)) - return setindex!(T, x, Iv...) -end -""" - onehot(ivs...) - setelt(ivs...) - onehot(::Type, ivs...) - setelt(::Type, ivs...) - -Create an ITensor with all zeros except the specified value, -which is set to 1. - -# Examples -```julia -i = Index(2,"i") -A = onehot(i=>2) -# A[i=>2] == 1, all other elements zero - -# Specify the element type -A = onehot(Float32, i=>2) - -j = Index(3,"j") -B = onehot(i=>1,j=>3) -# B[i=>1,j=>3] == 1, all other element zero -``` -""" -function ITensors.onehot(datatype::Type{<:AbstractArray}, ivs::Pair{<:Index}...) - A = ITensor(eltype(datatype), ind.(ivs)...) - A[val.(ivs)...] = one(eltype(datatype)) - return Adapt.adapt(datatype, A) -end -end diff --git a/src/lib/LazyApply/src/LazyApply.jl b/src/lib/LazyApply/src/LazyApply.jl deleted file mode 100644 index f0e69fa71f..0000000000 --- a/src/lib/LazyApply/src/LazyApply.jl +++ /dev/null @@ -1,386 +0,0 @@ -module LazyApply - -import Base: - ==, - +, - -, - *, - /, - ^, - exp, - adjoint, - copy, - show, - getindex, - length, - isless, - iterate, - firstindex, - lastindex, - keys, - reverse, - size - -export Applied, Scaled, Sum, Prod, Exp, coefficient, argument, expand, materialize, terms - -struct Applied{F,Args<:Tuple,Kwargs<:NamedTuple} - f::F - args::Args - kwargs::Kwargs -end -Applied(f, args::Tuple) = Applied(f, args, (;)) - -materialize(x) = x -function materialize(a::Applied) - return a.f(materialize.(a.args)...; a.kwargs...) -end - -function (a1::Applied == a2::Applied) - return a1.f == a2.f && a1.args == a2.args && a1.kwargs == a2.kwargs -end - -# -# Applied algebra -# - -# Used for dispatch -const Scaled{C<:Number,A} = Applied{typeof(*),Tuple{C,A},NamedTuple{(),Tuple{}}} -const Sum{A} = Applied{typeof(sum),Tuple{Vector{A}},NamedTuple{(),Tuple{}}} -const Prod{A} = Applied{typeof(prod),Tuple{Vector{A}},NamedTuple{(),Tuple{}}} - -# Some convenient empty constructors -Sum{A}() where {A} = Applied(sum, (A[],)) -Prod{A}() where {A} = Applied(prod, (A[],)) - -coefficient(co::Scaled{C}) where {C} = co.args[1] -argument(co::Scaled{C}) where {C} = co.args[2] - -# -# Generic algebra -# - -# 1.3 * Op("X", 1) + 1.3 * Op("X", 2) -# 1.3 * Op("X", 1) * Op("X", 2) + 1.3 * Op("X", 3) * Op("X", 4) -function (a1::Scaled{C,A} + a2::Scaled{C,A}) where {C,A} - return Sum{Scaled{C,A}}() + a1 + a2 -end - -function (a1::Prod{A} + a2::Prod{A}) where {A} - return Sum{Prod{A}}() + a1 + a2 -end - -(c::Number * a::Scaled{C}) where {C} = (c * coefficient(a)) * argument(a) -(a::Scaled{C} * c::Number) where {C} = (coefficient(a) * c) * argument(a) - --(a::Scaled{C}) where {C} = (-one(C) * a) --(a::Sum) = (-1 * a) --(a::Prod) = (-1 * a) - -(os::Sum{A} + o::A) where {A} = Applied(sum, (vcat(os.args[1], [o]),)) -(o::A + os::Sum{A}) where {A} = Applied(sum, (vcat([o], os.args[1]),)) - -(a1::Sum{A} - a2::A) where {A} = a1 + (-a2) -(a1::A - a2::Sum{A}) where {A} = a1 + (-a2) - -(a1::Sum{A} - a2::Prod{A}) where {A} = a1 + (-a2) -(a1::Sum{A} - a2::Scaled{C,Prod{A}}) where {C,A} = a1 + (-a2) -(a1::Sum{A} - a2::Sum{Scaled{C,Prod{A}}}) where {C,A} = a1 + (-a2) - -(a1::Prod{A} * a2::A) where {A} = Applied(prod, (vcat(only(a1.args), [a2]),)) -(a1::A * a2::Prod{A}) where {A} = Applied(prod, (vcat([a1], only(a2.args)),)) - -# Fixes ambiguity error with: -# *(a1::Applied, a2::Sum) -# *(os::Prod{A}, o::A) -(a1::Prod{Sum{A}} * a2::Sum{A}) where {A} = Applied(prod, (vcat(only(a1.args), [a2]),)) - -# 1.3 * Op("X", 1) + 1 * Op("X", 2) -# 1.3 * Op("X", 1) * Op("X", 2) + 1 * Op("X", 3) -# 1.3 * Op("X", 1) * Op("X", 2) + 1 * Op("X", 3) * Op("X", 4) -function (co1::Scaled{C1,A} + co2::Scaled{C2,A}) where {C1,C2,A} - c1, c2 = promote(coefficient(co1), coefficient(co2)) - return c1 * argument(co1) + c2 * argument(co2) -end - -# (1.3 * Op("X", 1)) * (1.3 * Op("X", 2)) -function (co1::Scaled{C1} * co2::Scaled{C2}) where {C1,C2} - c = coefficient(co1) * coefficient(co2) - o = argument(co1) * argument(co2) - return c * o -end - -function (a1::Prod{A} * a2::Scaled{C,A}) where {C,A} - return coefficient(a2) * (a1 * argument(a2)) -end - -function (a1::Prod{A} + a2::Scaled{C,A}) where {C,A} - return one(C) * a1 + Prod{A}() * a2 -end - -# (Op("X", 1) + Op("X", 2)) + (Op("X", 3) + Op("X", 4)) -# (Op("X", 1) * Op("X", 2) + Op("X", 3) * Op("X", 4)) + (Op("X", 5) * Op("X", 6) + Op("X", 7) * Op("X", 8)) -(a1::Sum{A} + a2::Sum{A}) where {A} = Applied(sum, (vcat(a1.args[1], a2.args[1]),)) -(a1::Sum{A} - a2::Sum{A}) where {A} = a1 + (-a2) - -(a1::Prod{A} * a2::Prod{A}) where {A} = Applied(prod, (vcat(only(a1.args), only(a2.args)),)) - -(os::Sum{Scaled{C,A}} + o::A) where {C,A} = os + one(C) * o -(o::A + os::Sum{Scaled{C,A}}) where {C,A} = one(C) * o + os - -# Op("X", 1) + Op("X", 2) + 1.3 * Op("X", 3) -(os::Sum{A} + co::Scaled{C,A}) where {C,A} = one(C) * os + co - -# 1.3 * Op("X", 1) + (Op("X", 2) + Op("X", 3)) -(co::Scaled{C,A} + os::Sum{A}) where {C,A} = co + one(C) * os - -# 1.3 * (Op("X", 1) + Op("X", 2)) -(c::Number * os::Sum) = Applied(sum, (c * os.args[1],)) - -(a1::Applied * a2::Sum) = Applied(sum, (map(a -> a1 * a, only(a2.args)),)) -(a1::Sum * a2::Applied) = Applied(sum, (map(a -> a * a2, only(a1.args)),)) -(a1::Sum * a2::Sum) = Applied(prod, ([a1, a2],)) - -function _expand(a1::Sum, a2::Sum) - return Applied(sum, (vec([a1[i] * a2[j] for i in 1:length(a1), j in 1:length(a2)]),)) -end - -function expand(a::Prod) - if length(a) == 1 - return a[1] - elseif length(a) ≥ 2 - a12 = _expand(a[1], a[2]) - return expand(Applied(prod, (vcat([a12], a[3:end]),))) - end -end - -# (Op("X", 1) + Op("X", 2)) * 1.3 -(os::Sum * c::Number) = c * os - -# (Op("X", 1) + Op("X", 2)) / 1.3 -(os::Sum / c::Number) = inv(c) * os - -# Promotions -function (co1::Scaled{C,Prod{A}} + co2::Scaled{C,A}) where {C,A} - return co1 + coefficient(co2) * Applied(prod, ([argument(co2)],)) -end - -function (a1::Scaled - a2::Scaled) - return a1 + (-a2) -end - -function (a1::Prod{A} + a2::A) where {A} - return a1 + Applied(prod, ([a2],)) -end - -function (a1::Sum{A} + a2::Prod{A}) where {A} - return Prod{A}() * a1 + a2 -end - -function (a1::Sum{A} + a2::Sum{Scaled{C,Prod{A}}}) where {C,A} - return (one(C) * Prod{A}() * a1) + a2 -end - -function (a1::Prod{A} - a2::A) where {A} - return a1 + (-a2) -end - -function (co1::Sum{Scaled{C,Prod{A}}} + co2::Scaled{C,A}) where {C,A} - return co1 + coefficient(co2) * Applied(prod, ([argument(co2)],)) -end - -function (a1::Sum{Scaled{C1,Prod{A}}} - a2::Scaled{C2,A}) where {C1,C2,A} - return a1 + (-a2) -end - -function (a1::Sum{Scaled{C,Prod{A}}} - a2::Prod{A}) where {C,A} - return a1 + (-a2) -end - -function (a1::Sum{Scaled{C1,Prod{A}}} - a2::Scaled{C2,Prod{A}}) where {C1,C2,A} - return a1 + (-a2) -end - -function (a1::Sum{A} + a2::Scaled{C,Prod{A}}) where {C,A} - return Sum{Scaled{C,Prod{A}}}() + a1 + a2 -end - -function (a1::Sum{Scaled{C1,Prod{A}}} + a2::Scaled{C2,A}) where {C1,C2,A} - C = promote_type(C1, C2) - return one(C) * a1 + one(C) * a2 -end - -# (::Sum{Scaled{Bool,Prod{Op}}} + ::Scaled{Float64,Prod{Op}}) -function (a1::Sum{Scaled{C1,A}} + a2::Scaled{C2,A}) where {C1,C2,A} - C = promote_type(C1, C2) - return one(C) * a1 + one(C) * a2 -end - -# TODO: Is this needed? It seems like: -# -# (a1::Sum{A} + a2::A) -# -# is not being called. -function (a1::Sum{Scaled{C,A}} + a2::Scaled{C,A}) where {C,A} - return Applied(sum, (vcat(only(a1.args), [a2]),)) -end - -function (a1::Sum{Scaled{C,Prod{A}}} + a2::Sum{A}) where {C,A} - a2 = one(C) * a2 - a2 = Prod{A}() * a2 - return a1 + one(C) * Prod{A}() * a2 -end - -function (a1::Sum{Prod{A}} + a2::A) where {A} - return a1 + (Prod{A}() * a2) -end - -function (a1::Sum{Prod{A}} + a2::Scaled{C,A}) where {C,A} - return a1 + (Prod{A}() * a2) -end - -function (a1::Sum{Scaled{C,Prod{A}}} + a2::A) where {C,A} - return a1 + one(C) * a2 -end -(a1::Sum{Scaled{C,Prod{A}}} - a2::A) where {C,A} = a1 + (-a2) - -function (a1::Sum{Scaled{C,Prod{A}}} + a2::Sum{Scaled{C,A}}) where {C,A} - return a1 + (Prod{A}() * a2) -end - -function (o::A + os::Sum{Scaled{C,Prod{A}}}) where {C,A} - return one(C) * o + os -end - -function (a::Sum^n::Int) - r = a - for _ in 2:n - r *= a - end - return r -end - -function (a::Prod^n::Int) - r = a - for _ in 2:n - r *= a - end - return r -end - -exp(a::Applied) = Applied(exp, (a,)) - -const Exp{A} = Applied{typeof(exp),Tuple{A},NamedTuple{(),Tuple{}}} -const Adjoint{A} = Applied{typeof(adjoint),Tuple{A},NamedTuple{(),Tuple{}}} - -argument(a::Exp) = a.args[1] - -(c::Number * e::Exp) = Applied(*, (c, e)) -(e::Exp * c::Number) = c * e -(e1::Exp * e2::Exp) = Applied(prod, ([e1, e2],)) -(e1::Applied * e2::Exp) = Applied(prod, ([e1, e2],)) -(e1::Exp * e2::Applied) = Applied(prod, ([e1, e2],)) - -function reverse(a::Prod) - return Applied(prod, (reverse(only(a.args)),)) -end - -adjoint(a::Prod) = Applied(prod, (map(adjoint, reverse(only(a.args))),)) - -# -# Convenient indexing -# - -getindex(a::Union{Sum,Prod}, I...) = only(a.args)[I...] -iterate(a::Union{Sum,Prod}, args...) = iterate(only(a.args), args...) -size(a::Union{Sum,Prod}) = size(only(a.args)) -length(a::Union{Sum,Prod}) = length(only(a.args)) -firstindex(a::Union{Sum,Prod}) = 1 -lastindex(a::Union{Sum,Prod}) = length(a) -keys(a::Union{Sum,Prod}) = 1:length(a) - -length(a::Scaled{C,<:Sum}) where {C} = length(argument(a)) -length(a::Scaled{C,<:Prod}) where {C} = length(argument(a)) -getindex(a::Scaled{C,<:Sum}, I...) where {C} = getindex(argument(a), I...) -getindex(a::Scaled{C,<:Prod}, I...) where {C} = getindex(argument(a), I...) -lastindex(a::Scaled{C,<:Sum}) where {C} = lastindex(argument(a)) -lastindex(a::Scaled{C,<:Prod}) where {C} = lastindex(argument(a)) - -# -# Functions convenient for OpSum code -# - -terms(a::Union{Sum,Prod}) = only(a.args) -terms(a::Scaled{C,<:Union{Sum,Prod}}) where {C} = terms(argument(a)) -copy(a::Applied) = Applied(deepcopy(a.f), deepcopy(a.args), deepcopy(a.kwargs)) -Sum(a::Vector) = Applied(sum, (a,)) -Prod(a::Vector) = Applied(prod, (a,)) -function isless(a1::Applied{F}, a2::Applied{F}) where {F} - return (isless(a1.args, a2.args) && isless(a1.kwargs, a2.kwargs)) -end - -# -# Printing -# - -function show(io::IO, ::MIME"text/plain", a::Sum) - print(io, "sum(\n") - for n in eachindex(a) - print(io, " ", a[n]) - if n ≠ lastindex(a) - print(io, "\n") - end - end - print(io, "\n)") - return nothing -end -show(io::IO, a::Sum) = show(io, MIME("text/plain"), a) - -function show(io::IO, ::MIME"text/plain", a::Prod) - print(io, "prod(\n") - for n in eachindex(a) - print(io, " ", a[n]) - if n ≠ lastindex(a) - print(io, "\n") - end - end - print(io, "\n)") - return nothing -end -show(io::IO, a::Prod) = show(io, MIME("text/plain"), a) - -function show(io::IO, m::MIME"text/plain", a::Exp) - print(io, a.f, "(") - for n in 1:length(a.args) - print(io, a.args[n]) - if n < length(a.args) - print(io, ", ") - end - end - print(io, ")") - return nothing -end -show(io::IO, a::Exp) = show(io, MIME("text/plain"), a) - -function show(io::IO, m::MIME"text/plain", a::Applied) - print(io, a.f, "(") - for n in eachindex(a.args) - print(io, a.args[n]) - if n < length(a.args) - print(io, ", ") - end - end - if !isempty(a.kwargs) - print(io, "; ") - for n in 1:length(a.kwargs) - print(io, keys(a.kwargs)[n], "=", a.kwargs[n]) - if n < length(a.kwargs) - print(io, ", ") - end - end - end - print(io, ")") - return nothing -end -show(io::IO, a::Applied) = show(io, MIME("text/plain"), a) - -end diff --git a/src/lib/Ops/ops_itensor.jl b/src/lib/Ops/ops_itensor.jl deleted file mode 100644 index e650376dcf..0000000000 --- a/src/lib/Ops/ops_itensor.jl +++ /dev/null @@ -1,73 +0,0 @@ -using .SiteTypes: SiteTypes, op - -function SiteTypes.op(I::UniformScaling, s::Index...) - return I.λ * op("Id", s...) -end - -function ITensor(o::Op, s::Vector{<:Index}) - return op(o.which_op, map(n -> s[n], o.sites)...; o.params...) -end - -function ITensor(o::Scaled, s::Vector{<:Index}) - c = coefficient(o) - if isreal(c) - c = real(c) - end - return c * ITensor(argument(o), s) -end - -function ITensor(o::Prod, s::Vector{<:Index}) - T = ITensor(true) - for a in o.args[1] - Tₙ = ITensor(a, s) - # TODO: Implement this logic inside `apply` - if hascommoninds(T, Tₙ) - T = T(Tₙ) - else - T *= Tₙ - end - end - return T -end - -function ITensor(o::Sum, s::Vector{<:Index}) - T = ITensor() - for a in o.args[1] - T += ITensor(a, s) - end - return T -end - -function ITensor(o::Exp, s::Vector{<:Index}) - return exp(ITensor(argument(o), s)) -end - -function ITensor(o::LazyApply.Adjoint, s::Vector{<:Index}) - return swapprime(dag(ITensor(o', s)), 0 => 1) -end - -function Sum{ITensor}(o::Sum, s::Vector{<:Index}) - return Applied(sum, (map(oₙ -> ITensor(oₙ, s), only(o.args)),)) -end - -function Prod{ITensor}(o::Prod, s::Vector{<:Index}) - return Applied(prod, (map(oₙ -> ITensor(oₙ, s), only(o.args)),)) -end - -function Prod{ITensor}(o::Scaled{C,Prod{Op}}, s::Vector{<:Index}) where {C} - t = Prod{ITensor}(argument(o), s) - t1 = coefficient(o) * only(t.args)[1] - return Applied(prod, (vcat([t1], only(t.args)[2:end]),)) -end - -function apply(o::Prod{ITensor}, v::ITensor; kwargs...) - ov = v - for oₙ in reverse(only(o.args)) - ov = apply(oₙ, ov; kwargs...) - end - return ov -end - -function (o::Prod{ITensor})(v::ITensor; kwargs...) - return apply(o, v; kwargs...) -end diff --git a/src/lib/Ops/src/Ops.jl b/src/lib/Ops/src/Ops.jl deleted file mode 100644 index fcbff091ea..0000000000 --- a/src/lib/Ops/src/Ops.jl +++ /dev/null @@ -1,4 +0,0 @@ -module Ops -include("op.jl") -include("trotter.jl") -end diff --git a/src/lib/Ops/src/op.jl b/src/lib/Ops/src/op.jl deleted file mode 100644 index 886bb969b9..0000000000 --- a/src/lib/Ops/src/op.jl +++ /dev/null @@ -1,335 +0,0 @@ -using ..LazyApply - -import Base: ==, +, -, *, /, convert, exp, show, adjoint, isless, hash - -export Op, OpSum, which_op, site, sites, params, Applied, expand - -##################################################################################### -# General functionality -# - -# Helper function to split a `Tuple` according to the function `f`. -# For example: -# -# julia> t = (1, "X", 1, 2, "Y", 2, "Z", 4) -# (1, "X", 1, 2, "Y", 2, "Z", 4) -# -# julia> split(x -> x isa AbstractString, t) -# [(1,), ("X", 1, 2), ("Y", 2), ("Z", 4)] -# -function split(f, t::Tuple) - n = findall(f, t) - nsplit = length(n) + 1 - s = Vector{Any}(undef, nsplit) - s[1] = t[1:(first(n) - 1)] - for i in 2:(nsplit - 1) - s[i] = t[n[i - 1]:(n[i] - 1)] - end - s[end] = t[last(n):end] - return s -end - -## XXX: Very long compile times: -## https://github.com/JuliaLang/julia/issues/45545 -## -## julia> using ITensors -## -## julia> @time ITensors.Ops.split(x -> x isa String, ("X", 1)) -## 7.588123 seconds (2.34 M allocations: 100.919 MiB, 1.71% gc time, 100.00% compilation time) -## ((), ("X", 1)) -## -## julia> @time ITensors.Ops.split(x -> x isa String, ("X", 1)) -## 0.042590 seconds (88.59 k allocations: 4.823 MiB, 19.13% gc time, 99.84% compilation time) -## ((), ("X", 1)) -## -## function split(f, t::Tuple) -## n = findall(f, t) -## ti = t[1:(first(n) - 1)] -## ts = ntuple(i -> t[n[i]:(n[i + 1] - 1)], length(n) - 1) -## tf = t[last(n):end] -## return ti, ts..., tf -## end - -struct Op - which_op - sites::Tuple - params::NamedTuple - function Op(which_op, site...; kwargs...) - return new(which_op, site, NamedTuple(kwargs)) - end -end - -which_op(o::Op) = o.which_op -name(o::Op) = which_op(o) -sites(o::Op) = o.sites -site(o::Op) = only(sites(o)) -params(o::Op) = o.params - -function (o1::Op == o2::Op) - return o1.which_op == o2.which_op && o1.sites == o2.sites && o1.params == o2.params -end - -function hash(o::Op, h::UInt) - return hash(which_op(o), hash(sites(o), hash(params(o), hash(:Op, h)))) -end - -# Version of `isless` defined for matrices -_isless(a, b) = isless(a, b) -_isless(a::AbstractMatrix, b::AbstractMatrix) = isless(hash(a), hash(b)) -_isless(a::AbstractString, b::AbstractMatrix) = true -_isless(a::AbstractMatrix, b::AbstractString) = !_isless(b, a) - -function isless(o1::Op, o2::Op) - if sites(o1) ≠ sites(o2) - return sites(o1) < sites(o2) - end - if which_op(o1) ≠ which_op(o2) - return _isless(which_op(o1), which_op(o2)) - end - return params(o1) < params(o2) -end - -function isless(o1::Prod{Op}, o2::Prod{Op}) - if length(o1) ≠ length(o2) - return length(o1) < length(o2) - end - for n in 1:length(o1) - if o1[n] ≠ o2[n] - return (o1[n] < o2[n]) - end - end - return false -end - -function isless(o1::Scaled{C1,Prod{Op}}, o2::Scaled{C2,Prod{Op}}) where {C1,C2} - if argument(o1) == argument(o2) - if coefficient(o1) ≈ coefficient(o2) - return false - else - c1 = coefficient(o1) - c2 = coefficient(o2) - #"lexicographic" ordering on complex numbers - return real(c1) < real(c2) || (real(c1) ≈ real(c2) && imag(c1) < imag(c2)) - end - end - return argument(o1) < argument(o2) -end - -## function Op(t::Tuple) -## which_op = first(t) -## site_params = Base.tail(t) -## if last(site_params) isa NamedTuple -## site = Base.front(site_params) -## params = last(site_params) -## else -## site = site_params -## params = (;) -## end -## return Op(which_op, site; params...) -## end - -## function Op(t::Tuple{WhichOp,NamedTuple,Vararg}) where {WhichOp} -## params = t[2] -## which_op = t[1] -## sites = t[3:end] -## return Op(which_op, sites...; params...) -## end - -function sites(a::Union{Sum,Prod}) - s = [] - for n in 1:length(a) - s = s ∪ sites(a[n]) - end - return map(identity, s) -end -sites(a::Scaled{C,<:Sum}) where {C} = sites(argument(a)) -sites(a::Scaled{C,<:Prod}) where {C} = sites(argument(a)) - -params(a::Scaled{C,<:Prod}) where {C} = params(only(argument(a))) - -which_op(a::Scaled{C,Op}) where {C} = which_op(argument(a)) -sites(a::Scaled{C,Op}) where {C} = sites(argument(a)) -params(a::Scaled{C,Op}) where {C} = params(argument(a)) - -# -# Op algebra -# - -function convert(::Type{Scaled{C1,Prod{Op}}}, o::Scaled{C2,Prod{Op}}) where {C1,C2} - c = convert(C1, coefficient(o)) - return c * argument(o) -end - -""" -An `OpSum` represents a sum of operator -terms. - -Often it is used to create matrix -product operator (`MPO`) approximation -of the sum of the terms in the `OpSum` oject. -Each term is a product of local operators -specified by names such as `"Sz"` or `"N"`, -times an optional coefficient which -can be real or complex. - -Which local operator names are available -is determined by the function `op` -associated with the `TagType` defined by -special Index tags, such as `"S=1/2"`, `"S=1"`, -`"Fermion"`, and `"Electron"`. -""" -const OpSum{C} = Sum{Scaled{C,Prod{Op}}} - -# This helps with in-place operations -OpSum() = OpSum{ComplexF64}() - -(o1::Op + o2::Op) = Applied(sum, ([o1, o2],)) -(o1::Op * o2::Op) = Applied(prod, ([o1, o2],)) --(o::Op) = -one(Int) * o -(o1::Op - o2::Op) = o1 + (-o2) - -(c::Number * o::Op) = Applied(*, (c, o)) -(o::Op * c::Number) = Applied(*, (c, o)) -(o::Op / c::Number) = Applied(*, (inv(c), o)) - -(c::Number * o::Prod{Op}) = Applied(*, (c, o)) -(o::Prod{Op} * c::Number) = Applied(*, (c, o)) -(o::Prod{Op} / c::Number) = Applied(*, (inv(c), o)) - -# 1.3 * Op("X", 1) + Op("X", 2) -# 1.3 * Op("X", 1) * Op("X", 2) + Op("X", 3) -(co1::Scaled{C} + o2::Op) where {C} = co1 + one(C) * o2 - -# Op("X", 1) + 1.3 * Op("X", 2) -(o1::Op + co2::Scaled{C}) where {C} = one(C) * o1 + co2 - -(o1::Op * o2::Sum) = Applied(sum, (map(a -> o1 * a, only(o2.args)),)) -(o1::Sum * o2::Op) = Applied(sum, (map(a -> a * o2, only(o1.args)),)) - -# 1.3 * Op("X", 1) + Op("X", 2) * Op("X", 3) -# 1.3 * Op("X", 1) * Op("X", 2) + Op("X", 3) * Op("X", 4) -(co1::Scaled{C} + o2::Prod{Op}) where {C} = co1 + one(C) * o2 - -# 1.3 * Op("X", 1) * Op("X", 2) -(co1::Scaled{C} * o2::Op) where {C} = co1 * (one(C) * o2) - -exp(o::Op) = Applied(exp, (o,)) - -adjoint(o::Op) = Applied(adjoint, (o,)) -adjoint(o::LazyApply.Adjoint{Op}) = only(o.args) - -(o1::Exp{Op} * o2::Op) = Applied(prod, ([o1, o2],)) - -# -# Tuple interface -# - -const OpSumLike{C} = Union{ - Sum{Op}, - Sum{Scaled{C,Op}}, - Sum{Prod{Op}}, - Sum{Scaled{C,Prod{Op}}}, - Prod{Op}, - Scaled{C,Prod{Op}}, -} - -const WhichOp = Union{AbstractString,AbstractMatrix{<:Number}} - -# Make a `Scaled{C,Prod{Op}}` from a `Tuple` input, -# for example: -# -# (1.2, "X", 1, "Y", 2) -> 1.2 * Op("X", 1) * Op("Y", 2) -# -function op_term(a::Tuple{Number,Vararg}) - c = first(a) - return c * op_term(Base.tail(a)) -end - -function op_site(which_op, params::NamedTuple, sites...) - return Op(which_op, sites...; params...) -end - -function op_site(which_op, sites_params...) - if last(sites_params) isa NamedTuple - sites = Base.front(sites_params) - params = last(sites_params) - return Op(which_op, sites...; params...) - end - return Op(which_op, sites_params...) -end - -function op_term(a::Tuple{Vararg}) - a_split = split(x -> x isa WhichOp, a) - @assert isempty(first(a_split)) - popfirst!(a_split) - o = op_site(first(a_split)...) - popfirst!(a_split) - for aₙ in a_split - o *= op_site(aₙ...) - end - return o -end - -function (o1::OpSumLike + o2::Tuple) - return o1 + op_term(o2) -end - -function (o1::Tuple + o2::OpSumLike) - return op_term(o1) + o2 -end - -function (o1::OpSumLike - o2::Tuple) - return o1 - op_term(o2) -end - -function (o1::Tuple - o2::OpSumLike) - return op_term(o1) - o2 -end - -function (o1::OpSumLike * o2::Tuple) - return o1 * op_term(o2) -end - -function (o1::Tuple * o2::OpSumLike) - return op_term(o1) * o2 -end - -function show(io::IO, ::MIME"text/plain", o::Op) - print(io, which_op(o)) - print(io, sites(o)) - if !isempty(params(o)) - print(io, params(o)) - end - return nothing -end -show(io::IO, o::Op) = show(io, MIME("text/plain"), o) - -function show(io::IO, ::MIME"text/plain", o::Prod{Op}) - for n in 1:length(o) - print(io, o[n]) - if n < length(o) - print(io, " ") - end - end - return nothing -end -show(io::IO, o::Prod{Op}) = show(io, MIME("text/plain"), o) - -function show(io::IO, m::MIME"text/plain", o::Scaled{C,O}) where {C,O<:Union{Op,Prod{Op}}} - c = coefficient(o) - if isreal(c) - c = real(c) - end - print(io, c) - print(io, " ") - show(io, m, argument(o)) - return nothing -end -show(io::IO, o::Scaled{C,Prod{Op}}) where {C} = show(io, MIME("text/plain"), o) - -function show(io::IO, ::MIME"text/plain", o::LazyApply.Adjoint{Op}) - print(io, o') - print(io, "'") - return nothing -end -show(io::IO, o::LazyApply.Adjoint{Op}) = show(io, MIME("text/plain"), o) diff --git a/src/lib/Ops/src/trotter.jl b/src/lib/Ops/src/trotter.jl deleted file mode 100644 index 9c3b1a5641..0000000000 --- a/src/lib/Ops/src/trotter.jl +++ /dev/null @@ -1,35 +0,0 @@ -using ..LazyApply: Applied, Sum - -abstract type ExpAlgorithm end - -struct Exact <: ExpAlgorithm end - -struct Trotter{Order} <: ExpAlgorithm - nsteps::Int -end -Trotter{Order}() where {Order} = Trotter{Order}(1) -Base.one(::Trotter{Order}) where {Order} = Trotter{Order}(1) - -function Base.exp(o::Sum; alg::ExpAlgorithm=Exact()) - return exp(alg, o) -end - -function Base.exp(::Exact, o::Sum) - return Applied(prod, ([Applied(exp, (o,))],)) -end - -function exp_one_step(trotter::Trotter{1}, o::Sum) - exp_o = Applied(prod, (map(exp, reverse(only(o.args))),)) - return exp_o -end - -function exp_one_step(trotter::Trotter{2}, o::Sum) - exp_o_order_1 = exp_one_step(Trotter{1}(), o / 2) - exp_o = reverse(exp_o_order_1) * exp_o_order_1 - return exp_o -end - -function Base.exp(trotter::Trotter, o::Sum) - expδo = exp_one_step(one(trotter), o / trotter.nsteps) - return expδo^trotter.nsteps -end diff --git a/src/lib/QuantumNumbers/src/QuantumNumbers.jl b/src/lib/QuantumNumbers/src/QuantumNumbers.jl deleted file mode 100644 index 1e2c436516..0000000000 --- a/src/lib/QuantumNumbers/src/QuantumNumbers.jl +++ /dev/null @@ -1,5 +0,0 @@ -module QuantumNumbers -include("arrow.jl") -include("qnval.jl") -include("qn.jl") -end diff --git a/src/lib/QuantumNumbers/src/arrow.jl b/src/lib/QuantumNumbers/src/arrow.jl deleted file mode 100644 index 68d4be8e11..0000000000 --- a/src/lib/QuantumNumbers/src/arrow.jl +++ /dev/null @@ -1,16 +0,0 @@ -""" -Arrow - -`enum` type that can take three values: `In`, `Out`, or `Neither`, representing a directionality -associated with an index, i.e. the index leg is directed into or out of a given tensor -""" -@enum Arrow In = -1 Out = 1 Neither = 0 - -""" - -(dir::Arrow) - -Reverse direction of a directed `Arrow`. -""" -function Base.:(-)(dir::Arrow) - return Arrow(-Int(dir)) -end diff --git a/src/lib/QuantumNumbers/src/qn.jl b/src/lib/QuantumNumbers/src/qn.jl deleted file mode 100644 index dda6f9d3e9..0000000000 --- a/src/lib/QuantumNumbers/src/qn.jl +++ /dev/null @@ -1,339 +0,0 @@ -using ..ITensors: ITensors, name, val -using NDTensors: NDTensors -using ..SmallStrings: SmallString -using StaticArrays: MVector, SVector - -const maxQNs = 4 -const QNStorage = SVector{maxQNs,QNVal} -const MQNStorage = MVector{maxQNs,QNVal} - -""" -A QN object stores a collection of up to four -named values such as ("Sz",1) or ("N",0). -These values can include a third integer "m" -which makes them obey addition modulo m, for -example ("P",1,2) for a value obeying addition mod 2. -(The default is regular integer addition). - -Adding or subtracting pairs of QN objects performs -addition and subtraction element-wise on each of -the named values. If a name is missing from the -collection, its value is treated as zero. -""" -struct QN - data::QNStorage - function QN() - s = QNStorage(ntuple(_ -> ZeroVal, Val(maxQNs))) - return new(s) - end - QN(s::QNStorage) = new(s) -end - -QN(mqn::MQNStorage) = QN(QNStorage(mqn)) -QN(mqn::NTuple{N,QNVal}) where {N} = QN(QNStorage(mqn)) - -function Base.hash(obj::QN, h::UInt) - out = h - for qv in obj.data - if val(qv) != 0 - out = hash(qv, out) - end - end - - return out -end - -""" - QN(qvs...) - -Construct a QN from a set of up to four -named value tuples. - -Examples - -```julia -q = QN(("Sz",1)) -q = QN(("N",1),("Sz",-1)) -q = QN(("P",0,2),("Sz",0)). -``` -""" -function QN(qvs...) - m = MQNStorage(ntuple(_ -> ZeroVal, Val(maxQNs))) - for (n, qv) in enumerate(qvs) - m[n] = QNVal(qv...) - end - Nvals = length(qvs) - sort!(@view m[1:Nvals]; by=name, alg=InsertionSort) - for n in 1:(length(qvs) - 1) - if name(m[n]) == name(m[n + 1]) - error("Duplicate name \"$(name(m[n]))\" in QN") - end - end - return QN(QNStorage(m)) -end - -""" - QN(name,val::Int,modulus::Int=1) - -Construct a QN with a single named value -by providing the name, value, and optional -modulus. -""" -QN(name, val::Int, modulus::Int=1) = QN((name, val, modulus)) - -""" - QN(val::Int,modulus::Int=1) - -Construct a QN with a single unnamed value -(equivalent to the name being the empty string) -with optional modulus. -""" -QN(val::Int, modulus::Int=1) = QN(("", val, modulus)) - -data(qn::QN) = qn.data - -Base.getindex(q::QN, n::Int) = getindex(data(q), n) - -Base.length(qn::QN) = length(data(qn)) - -Base.lastindex(qn::QN) = length(qn) - -isactive(qn::QN) = isactive(qn[1]) - -function nactive(q::QN) - for n in 1:maxQNs - !isactive(q[n]) && (return n - 1) - end - return maxQNs -end - -function Base.iterate(qn::QN, state::Int=1) - (state > length(qn)) && return nothing - return (qn[state], state + 1) -end - -Base.keys(qn::QN) = keys(data(qn)) - -""" - val(q::QN,name) - -Get the value within the QN q -corresponding to the string `name` -""" -function ITensors.val(q::QN, name_) - sname = SmallString(name_) - for n in 1:maxQNs - name(q[n]) == sname && return val(q[n]) - end - return 0 -end - -""" - modulus(q::QN,name) - -Get the modulus within the QN q -corresponding to the string `name` -""" -function modulus(q::QN, name_) - sname = SmallString(name_) - for n in 1:maxQNs - name(q[n]) == sname && return modulus(q[n]) - end - return 0 -end - -""" - zero(q::QN) - -Returns a QN object containing -the same names as q, but with -all values set to zero. -""" -function Base.zero(qn::QN) - mqn = MQNStorage(undef) - for i in 1:length(mqn) - mqn[i] = zero(qn[i]) - end - return QN(mqn) -end - -function Base.:(*)(dir::Arrow, qn::QN) - mqn = MQNStorage(undef) - for i in 1:length(mqn) - mqn[i] = dir * qn[i] - end - return QN(mqn) -end - -Base.:(*)(qn::QN, dir::Arrow) = (dir * qn) - -function Base.:(-)(qn::QN) - mqn = MQNStorage(undef) - for i in 1:length(mqn) - mqn[i] = -qn[i] - end - return QN(mqn) -end - -function Base.:(+)(a::QN, b::QN) - !isactive(b[1]) && return a - ma = MQNStorage(data(a)) - @inbounds for nb in 1:maxQNs - !isactive(b[nb]) && break - bname = name(b[nb]) - for na in 1:maxQNs - aname = name(a[na]) - if !isactive(ma[na]) - ma[na] = b[nb] - break - elseif name(ma[na]) == bname - ma[na] = ma[na] + b[nb] - break - elseif (bname < aname) && (na == 1 || bname > name(ma[na - 1])) - for j in maxQNs:-1:(na + 1) - ma[j] = ma[j - 1] - end - ma[na] = b[nb] - break - end - end - end - return QN(QNStorage(ma)) -end - -Base.:(-)(a::QN, b::QN) = (a + (-b)) - -function hasname(qn::QN, qv_find::QNVal) - for qv in qn - name(qv) == name(qv_find) && return true - end - return false -end - -# Does not perform checks on if QN is already full, drops -# the last QNVal -# Rename insert? -function NDTensors.insertafter(qn::QN, qv::QNVal, pos::Int) - return QN(NDTensors.insertafter(Tuple(qn), qv, pos)[1:length(qn)]) -end - -function addqnval(qn::QN, qv_add::QNVal) - isactive(qn[end]) && - error("Cannot add QNVal, QN already contains maximum number of QNVals") - for (pos, qv) in enumerate(qn) - if qv_add < qv || !isactive(qv) - return NDTensors.insertafter(qn, qv_add, pos - 1) - end - end -end - -# Fills in the qns of qn1 that qn2 has but -# qn1 doesn't -function fillqns_from(qn1::QN, qn2::QN) - # If qn1 has no non-trivial qns, fill - # with qn2 - !isactive(qn1) && return zero(qn2) - !isactive(qn2) && return qn1 - for qv2 in qn2 - if !hasname(qn1, qv2) - qn1 = addqnval(qn1, zero(qv2)) - end - end - return qn1 -end - -# Make sure qn1 and qn2 have all of the same qns -function fillqns(qn1::QN, qn2::QN) - qn1_filled = fillqns_from(qn1, qn2) - qn2_filled = fillqns_from(qn2, qn1) - return qn1_filled, qn2_filled -end - -function isequal_assume_filled(qn1::QN, qn2::QN) - for (qv1, qv2) in zip(qn1, qn2) - modulus(qv1) != modulus(qv2) && error("QNVals must have same modulus to compare") - qv1 != qv2 && return false - end - return true -end - -function Base.:(==)(qn1::QN, qn2::QN; assume_filled=false) - if !assume_filled - qn1, qn2 = fillqns(qn1, qn2) - end - return isequal_assume_filled(qn1, qn2) -end - -function isless_assume_filled(qn1::QN, qn2::QN) - for n in 1:length(qn1) - val1 = val(qn1[n]) - val2 = val(qn2[n]) - val1 != val2 && return val1 < val2 - end - return false -end - -function Base.isless(qn1::QN, qn2::QN; assume_filled=false) - return <(qn1, qn2; assume_filled=assume_filled) -end - -function Base.:(<)(qn1::QN, qn2::QN; assume_filled=false) - if !assume_filled - qn1, qn2 = fillqns(qn1, qn2) - end - return isless_assume_filled(qn1, qn2) -end - -function have_same_qns(qn1::QN, qn2::QN) - for n in 1:length(qn1) - name(qn1[n]) != name(qn2[n]) && return false - end - return true -end - -function have_same_mods(qn1::QN, qn2::QN) - for n in 1:length(qn1) - modulus(qn1[n]) != modulus(qn2[n]) && return false - end - return true -end - -function removeqn(qn::QN, qn_name::String) - ss_qn_name = SmallString(qn_name) - # Find the location of the QNVal to remove - n_qn = nothing - for n in 1:length(qn) - qnval = qn[n] - if name(qnval) == ss_qn_name - n_qn = n - end - end - if isnothing(n_qn) - return qn - end - qn_data = data(qn) - for j in n_qn:(length(qn) - 1) - qn_data = Base.setindex(qn_data, qn_data[j + 1], j) - end - qn_data = Base.setindex(qn_data, QNVal(), length(qn)) - return QN(qn_data) -end - -function Base.show(io::IO, q::QN) - print(io, "QN(") - Na = nactive(q) - for n in 1:Na - v = q[n] - n > 1 && print(io, ",") - Na > 1 && print(io, "(") - if name(v) != SmallString("") - print(io, "\"$(name(v))\",") - end - print(io, "$(val(v))") - if modulus(v) != 1 - print(io, ",$(modulus(v))") - end - Na > 1 && print(io, ")") - end - return print(io, ")") -end diff --git a/src/lib/QuantumNumbers/src/qnval.jl b/src/lib/QuantumNumbers/src/qnval.jl deleted file mode 100644 index aaa218a4d6..0000000000 --- a/src/lib/QuantumNumbers/src/qnval.jl +++ /dev/null @@ -1,63 +0,0 @@ -using ..ITensors: ITensors, name, val -using ..SmallStrings: SmallString - -struct QNVal - name::SmallString - val::Int - modulus::Int - function QNVal(name, v::Int, m::Int=1) - am = abs(m) - if am > 1 - return new(SmallString(name), mod(v, am), m) - end - return new(SmallString(name), v, m) - end -end - -QNVal(v::Int, m::Int=1) = QNVal("", v, m) -QNVal() = QNVal("", 0, 0) - -ITensors.name(qv::QNVal) = qv.name -ITensors.val(qv::QNVal) = qv.val -modulus(qv::QNVal) = qv.modulus -isactive(qv::QNVal) = modulus(qv) != 0 -Base.:(<)(qv1::QNVal, qv2::QNVal) = (name(qv1) < name(qv2)) - -function qn_mod(val::Int, modulus::Int) - amod = abs(modulus) - amod <= 1 && return val - return mod(val, amod) -end - -function Base.:(-)(qv::QNVal) - return QNVal(name(qv), qn_mod(-val(qv), modulus(qv)), modulus(qv)) -end - -Base.zero(::Type{QNVal}) = QNVal() - -Base.zero(qv::QNVal) = QNVal(name(qv), 0, modulus(qv)) - -Base.:(*)(dir::Arrow, qv::QNVal) = QNVal(name(qv), Int(dir) * val(qv), modulus(qv)) - -Base.:(*)(qv::QNVal, dir::Arrow) = (dir * qv) - -function pm(qv1::QNVal, qv2::QNVal, fac::Int) - if name(qv1) != name(qv2) - error("Cannot add QNVals with different names \"$(name(qv1))\", \"$(name(qv2))\"") - end - if modulus(qv1) != modulus(qv2) - error( - "QNVals with matching name \"$(name(qv1))\" cannot have different modulus values " - ) - end - m1 = modulus(qv1) - if m1 == 1 || m1 == -1 - return QNVal(name(qv1), val(qv1) + fac * val(qv2), m1) - end - return QNVal(name(qv1), Base.mod(val(qv1) + fac * val(qv2), abs(m1)), m1) -end - -Base.:(+)(qv1::QNVal, qv2::QNVal) = pm(qv1, qv2, +1) -Base.:(-)(qv1::QNVal, qv2::QNVal) = pm(qv1, qv2, -1) - -const ZeroVal = QNVal() diff --git a/src/lib/SiteTypes/src/SiteTypes.jl b/src/lib/SiteTypes/src/SiteTypes.jl deleted file mode 100644 index a9457bfc6a..0000000000 --- a/src/lib/SiteTypes/src/SiteTypes.jl +++ /dev/null @@ -1,19 +0,0 @@ -module SiteTypes -# TODO: This is a bit strange, but required for backwards -# compatibility since `val` is also used by `QNVal`. -import ..ITensors: val -# TODO: Use explicit overloading with `NDTensors.space`. -import ..ITensors: space -include("sitetype.jl") -include("SiteTypesChainRulesCoreExt.jl") -include("sitetypes/aliases.jl") -include("sitetypes/generic_sites.jl") -include("sitetypes/qubit.jl") -include("sitetypes/spinhalf.jl") -include("sitetypes/spinone.jl") -include("sitetypes/fermion.jl") -include("sitetypes/electron.jl") -include("sitetypes/tj.jl") -include("sitetypes/qudit.jl") -include("sitetypes/boson.jl") -end diff --git a/src/lib/SiteTypes/src/SiteTypesChainRulesCoreExt.jl b/src/lib/SiteTypes/src/SiteTypesChainRulesCoreExt.jl deleted file mode 100644 index 625d64e6ab..0000000000 --- a/src/lib/SiteTypes/src/SiteTypesChainRulesCoreExt.jl +++ /dev/null @@ -1,7 +0,0 @@ -module SiteTypesChainRulesCoreExt -using ChainRulesCore: @non_differentiable -using ..SiteTypes: SiteType, _sitetypes, has_fermion_string -@non_differentiable has_fermion_string(::AbstractString, ::Any) -@non_differentiable SiteType(::Any) -@non_differentiable _sitetypes(::Any) -end diff --git a/src/lib/SiteTypes/src/sitetype.jl b/src/lib/SiteTypes/src/sitetype.jl deleted file mode 100644 index f609eb2351..0000000000 --- a/src/lib/SiteTypes/src/sitetype.jl +++ /dev/null @@ -1,834 +0,0 @@ -using ChainRulesCore: @ignore_derivatives -using ..ITensors: - ITensors, Index, ITensor, itensor, dag, onehot, prime, product, swapprime, tags -using ..SmallStrings: SmallString -using ..TagSets: TagSets, TagSet, addtags, commontags - -@eval struct SiteType{T} - (f::Type{<:SiteType})() = $(Expr(:new, :f)) -end - -# Note that the complicated definition of -# SiteType above is a workaround for performance -# issues when creating parameterized types -# in Julia 1.4 and 1.5-beta. Ideally we -# can just use the following in the future: -# struct SiteType{T} -# end - -""" -SiteType is a parameterized type which allows -making Index tags into Julia types. Use cases -include overloading functions such as `op`, -`siteinds`, and `state` which generate custom -operators, Index arrays, and IndexVals associated -with Index objects having a certain tag. - -To make a SiteType type, you can use the string -macro notation: `SiteType"MyTag"` - -To make a SiteType value or object, you can use -the notation: `SiteType("MyTag")` - -There are currently a few built-in site types -recognized by `jl`. The system is easily extensible -by users. To add new operators to an existing site type, -or to create new site types, you can follow the instructions -[here](https://itensor.github.io/jl/stable/examples/Physics.html). - -The current built-in site types are: - -- `SiteType"S=1/2"` (or `SiteType"S=½"`) -- `SiteType"S=1"` -- `SiteType"Qubit"` -- `SiteType"Qudit"` -- `SiteType"Boson"` -- `SiteType"Fermion"` -- `SiteType"tJ"` -- `SiteType"Electron"` - -# Examples - -Tags on indices get turned into SiteTypes internally, and then -we search for overloads of functions like `op` and `siteind`. -For example: - -```julia -julia> s = siteind("S=1/2") -(dim=2|id=862|"S=1/2,Site") - -julia> @show op("Sz", s); -op(s, "Sz") = ITensor ord=2 -Dim 1: (dim=2|id=862|"S=1/2,Site")' -Dim 2: (dim=2|id=862|"S=1/2,Site") -NDTensors.Dense{Float64,Array{Float64,1}} - 2×2 - 0.5 0.0 - 0.0 -0.5 - -julia> @show op("Sx", s); -op(s, "Sx") = ITensor ord=2 -Dim 1: (dim=2|id=862|"S=1/2,Site")' -Dim 2: (dim=2|id=862|"S=1/2,Site") -NDTensors.Dense{Float64,Array{Float64,1}} - 2×2 - 0.0 0.5 - 0.5 0.0 - -julia> @show op("Sy", s); -op(s, "Sy") = ITensor ord=2 -Dim 1: (dim=2|id=862|"S=1/2,Site")' -Dim 2: (dim=2|id=862|"S=1/2,Site") -NDTensors.Dense{Complex{Float64},Array{Complex{Float64},1}} - 2×2 - 0.0 + 0.0im -0.0 - 0.5im - 0.0 + 0.5im 0.0 + 0.0im - -julia> s = siteind("Electron") -(dim=4|id=734|"Electron,Site") - -julia> @show op("Nup", s); -op(s, "Nup") = ITensor ord=2 -Dim 1: (dim=4|id=734|"Electron,Site")' -Dim 2: (dim=4|id=734|"Electron,Site") -NDTensors.Dense{Float64,Array{Float64,1}} - 4×4 - 0.0 0.0 0.0 0.0 - 0.0 1.0 0.0 0.0 - 0.0 0.0 0.0 0.0 - 0.0 0.0 0.0 1.0 -``` - -Many operators are available, for example: - -- `SiteType"S=1/2"`: `"Sz"`, `"Sx"`, `"Sy"`, `"S+"`, `"S-"`, ... -- `SiteType"Electron"`: `"Nup"`, `"Ndn"`, `"Nupdn"`, `"Ntot"`, `"Cup"`, - `"Cdagup"`, `"Cdn"`, `"Cdagdn"`, `"Sz"`, `"Sx"`, `"Sy"`, `"S+"`, `"S-"`, ... -- ... - -You can view the source code for the internal SiteType definitions -and operators that are defined [here](https://github.com/ITensor/jl/tree/main/src/physics/site_types). -""" -SiteType(s::AbstractString) = SiteType{SmallString(s)}() - -SiteType(t::Integer) = SiteType{SmallString(t)}() -SiteType(t::SmallString) = SiteType{t}() - -tag(::SiteType{T}) where {T} = T - -macro SiteType_str(s) - return SiteType{SmallString(s)} -end - -# Keep TagType defined for backwards -# compatibility; will be deprecated later -const TagType = SiteType -macro TagType_str(s) - return TagType{SmallString(s)} -end - -#--------------------------------------- -# -# op system -# -#--------------------------------------- - -@eval struct OpName{Name} - (f::Type{<:OpName})() = $(Expr(:new, :f)) -end - -# Note that the complicated definition of -# OpName above is a workaround for performance -# issues when creating parameterized types -# in Julia 1.4 and 1.5-beta. Ideally we -# can just use the following in the future: -# struct OpName{Name} -# end - -""" -OpName is a parameterized type which allows -making strings into Julia types for the purpose -of representing operator names. -The main use of OpName is overloading the -`op!` method which generates operators -for indices with certain tags such as "S=1/2". - -To make a OpName type, you can use the string -macro notation: `OpName"MyTag"`. - -To make an OpName value or object, you can use -the notation: `OpName("myop")` -""" -OpName(s::AbstractString) = OpName{Symbol(s)}() -OpName(s::Symbol) = OpName{s}() -# TODO: Avoid overloading `ITensors` version. -ITensors.name(::OpName{N}) where {N} = N - -macro OpName_str(s) - return OpName{Symbol(s)} -end - -# Default implementations of op and op! -op(::OpName; kwargs...) = nothing -op(::OpName, ::SiteType; kwargs...) = nothing -op(::OpName, ::SiteType, ::Index...; kwargs...) = nothing -function op( - ::OpName, ::SiteType, ::SiteType, sitetypes_inds::Union{SiteType,Index}...; kwargs... -) - return nothing -end -op!(::ITensor, ::OpName, ::SiteType, ::Index...; kwargs...) = nothing -function op!( - ::ITensor, - ::OpName, - ::SiteType, - ::SiteType, - sitetypes_inds::Union{SiteType,Index}...; - kwargs..., -) - return nothing -end - -# Deprecated version, for backwards compatibility -op(::SiteType, ::Index, ::AbstractString; kwargs...) = nothing - -function _sitetypes(ts::TagSet) - Ntags = length(ts) - return SiteType[SiteType(TagSets.data(ts)[n]) for n in 1:Ntags] -end - -_sitetypes(i::Index) = _sitetypes(tags(i)) - -""" - op(opname::String, s::Index; kwargs...) - -Return an ITensor corresponding to the operator -named `opname` for the Index `s`. The operator -is constructed by calling an overload of either -the `op` or `op!` methods which take a `SiteType` -argument that corresponds to one of the tags of -the Index `s` and an `OpName"opname"` argument -that corresponds to the input operator name. - -Operator names can be combined using the `"*"` -symbol, for example `"S+*S-"` or `"Sz*Sz*Sz"`. -The result is an ITensor made by forming each operator -then contracting them together in a way corresponding -to the usual operator product or matrix multiplication. - -The `op` system is used by the OpSum -system to convert operator names into ITensors, -and can be used directly such as for applying -operators to MPS. - -# Example - -```julia -s = Index(2, "Site,S=1/2") -Sz = op("Sz", s) -``` - -To see all of the operator names defined for the site types included with -ITensor, please view the [source code](https://github.com/ITensor/jl/tree/main/src/physics/site_types) -for each site type. Note that some site types such as "S=1/2" and "Qubit" -are aliases for each other and share operator definitions. -""" -function op(name::AbstractString, s::Index...; adjoint::Bool=false, kwargs...) - name = strip(name) - # TODO: filter out only commons tags - # if there are multiple indices - commontags_s = commontags(s...) - - # first we handle the + and - algebra, which requires a space between ops to avoid clashing - name_split = nothing - @ignore_derivatives name_split = String.(split(name, " ")) - oplocs = findall(x -> x ∈ ("+", "-"), name_split) - - if !isempty(oplocs) - @ignore_derivatives !isempty(kwargs) && - error("Lazy algebra on parametric gates not allowed") - - # the string representation of algebra ops: ex ["+", "-", "+"] - labels = name_split[oplocs] - # assign coefficients to each term: ex [+1, -1, +1] - coeffs = [1, [(-1)^Int(label == "-") for label in labels]...] - - # grad the name of each operator block separated by an algebra op, and do so by - # making sure blank spaces between opnames are kept when building the new block. - start, opnames = 0, String[] - for oploc in oplocs - finish = oploc - opnames = vcat( - opnames, [prod([name_split[k] * " " for k in (start + 1):(finish - 1)])] - ) - start = oploc - end - opnames = vcat( - opnames, [prod([name_split[k] * " " for k in (start + 1):length(name_split)])] - ) - - # build the vector of blocks and sum - op_list = [ - coeff * (op(opname, s...; kwargs...)) for (coeff, opname) in zip(coeffs, opnames) - ] - return sum(op_list) - end - - # the the multiplication come after - oploc = findfirst("*", name) - if !isnothing(oploc) - op1, op2 = nothing, nothing - @ignore_derivatives begin - op1 = name[1:prevind(name, oploc.start)] - op2 = name[nextind(name, oploc.start):end] - if !(op1[end] == ' ' && op2[1] == ' ') - @warn "($op1*$op2) composite op definition `A*B` deprecated: please use `A * B` instead (with spaces)" - end - end - return product(op(op1, s...; kwargs...), op(op2, s...; kwargs...)) - end - - common_stypes = _sitetypes(commontags_s) - @ignore_derivatives push!(common_stypes, SiteType("Generic")) - opn = OpName(name) - - # - # Try calling a function of the form: - # op(::OpName, ::SiteType, ::Index...; kwargs...) - # - for st in common_stypes - res = op(opn, st, s...; kwargs...) - if !isnothing(res) - adjoint && return swapprime(dag(res), 0 => 1) - return res - end - end - - # - # Try calling a function of the form: - # op(::OpName; kwargs...) - # for backward compatibility with previous - # gate system in PastaQ.jl - # - op_mat = op(opn; kwargs...) - if !isnothing(op_mat) - rs = reverse(s) - res = itensor(op_mat, prime.(rs)..., dag.(rs)...) - adjoint && return swapprime(dag(res), 0 => 1) - return res - end - # - # otherwise try calling a function of the form: - # op(::OpName, ::SiteType; kwargs...) - # which returns a Julia matrix - # - for st in common_stypes - op_mat = op(opn, st; kwargs...) - if !isnothing(op_mat) - rs = reverse(s) - #return itensor(op_mat, prime.(rs)..., dag.(rs)...) - res = itensor(op_mat, prime.(rs)..., dag.(rs)...) - adjoint && return swapprime(dag(res), 0 => 1) - return res - end - end - - # otherwise try calling a function of the form: - # op!(::ITensor, ::OpName, ::SiteType, ::Index...; kwargs...) - # - Op = ITensor(prime.(s)..., dag.(s)...) - for st in common_stypes - op!(Op, opn, st, s...; kwargs...) - if !isempty(Op) - adjoint && return swapprime(dag(Op), 0 => 1) - return Op - end - end - - if length(s) > 1 - # No overloads for common tags found. It might be a - # case of making an operator with mixed site types, - # searching for overloads like: - # op(::OpName, - # ::SiteType..., - # ::Index...; - # kwargs...) - # op!(::ITensor, ::OpName, - # ::SiteType..., - # ::Index...; - # kwargs...) - stypes = _sitetypes.(s) - - for st in Iterators.product(stypes...) - res = op(opn, st..., s...; kwargs...) - if !isnothing(res) - adjoint && return swapprime(dag(res), 0 => 1) - return res - end - end - - Op = ITensor(prime.(s)..., dag.(s)...) - for st in Iterators.product(stypes...) - op!(Op, opn, st..., s...; kwargs...) - if !isempty(Op) - adjoint && return swapprime(dag(Op), 0 => 1) - return Op - end - end - - throw( - ArgumentError( - "Overload of \"op\" or \"op!\" functions not found for operator name \"$name\" and Index tags: $(tags.(s)).", - ), - ) - end - - # - # otherwise try calling a function of the form: - # op(::SiteType, ::Index, ::AbstractString) - # - # (Note: this version is for backwards compatibility - # after version 0.1.10, and may be eventually - # deprecated) - # - for st in common_stypes - res = op(st, s[1], name; kwargs...) - if !isnothing(res) - adjoint && return dag(res) - return res - end - end - - return throw( - ArgumentError( - "Overload of \"op\" or \"op!\" functions not found for operator name \"$name\" and Index tags: $(tags.(s)).", - ), - ) -end - -op(name::AbstractString; kwargs...) = error("Must input indices when creating an `op`.") - -""" - op(X::AbstractArray, s::Index...) - op(M::Matrix, s::Index...) - -Given a matrix M and a set of indices s,t,... -return an operator ITensor with matrix elements given -by M and indices s, s', t, t' - -# Example - -```julia -julia> s = siteind("S=1/2") -(dim=2|id=575|"S=1/2,Site") - -julia> Sz = op([1/2 0; 0 -1/2],s) -ITensor ord=2 (dim=2|id=575|"S=1/2,Site")' (dim=2|id=575|"S=1/2,Site") -NDTensors.Dense{Float64, Vector{Float64}} - -julia> @show Sz -Sz = ITensor ord=2 -Dim 1: (dim=2|id=575|"S=1/2,Site")' -Dim 2: (dim=2|id=575|"S=1/2,Site") -NDTensors.Dense{Float64, Vector{Float64}} - 2×2 - 0.5 0.0 - 0.0 -0.5 -ITensor ord=2 (dim=2|id=575|"S=1/2,Site")' (dim=2|id=575|"S=1/2,Site") -NDTensors.Dense{Float64, Vector{Float64}} -``` -""" -op(X::AbstractArray, s::Index...) = itensor(X, prime.([s...]), dag.([s...])) - -op(opname, s::Vector{<:Index}; kwargs...) = op(opname, s...; kwargs...) - -op(s::Vector{<:Index}, opname; kwargs...) = op(opname, s...; kwargs...) - -# For backwards compatibility, version of `op` -# taking the arguments in the other order: -op(s::Index, opname; kwargs...) = op(opname, s; kwargs...) - -# To ease calling of other op overloads, -# allow passing a string as the op name -op(opname::AbstractString, t::SiteType; kwargs...) = op(OpName(opname), t; kwargs...) - -""" - op(opname::String,sites::Vector{<:Index},n::Int; kwargs...) - -Return an ITensor corresponding to the operator -named `opname` for the n'th Index in the array -`sites`. - -# Example - -```julia -s = siteinds("S=1/2", 4) -Sz2 = op("Sz", s, 2) -``` -""" -function op(opname, s::Vector{<:Index}, ns::NTuple{N,Integer}; kwargs...) where {N} - return op(opname, ntuple(n -> s[ns[n]], Val(N))...; kwargs...) -end - -function op(opname, s::Vector{<:Index}, ns::Vararg{Integer}; kwargs...) - return op(opname, s, ns; kwargs...) -end - -function op(s::Vector{<:Index}, opname, ns::Tuple{Vararg{Integer}}; kwargs...) - return op(opname, s, ns...; kwargs...) -end - -function op(s::Vector{<:Index}, opname, ns::Integer...; kwargs...) - return op(opname, s, ns; kwargs...) -end - -function op(s::Vector{<:Index}, opname, ns::Tuple{Vararg{Integer}}, kwargs::NamedTuple) - return op(opname, s, ns; kwargs...) -end - -function op(s::Vector{<:Index}, opname, ns::Integer, kwargs::NamedTuple) - return op(opname, s, (ns,); kwargs...) -end - -op(s::Vector{<:Index}, o::Tuple) = op(s, o...) - -op(o::Tuple, s::Vector{<:Index}) = op(s, o...) - -op(f::Function, args...; kwargs...) = f(op(args...; kwargs...)) - -function op( - s::Vector{<:Index}, - f::Function, - opname::AbstractString, - ns::Tuple{Vararg{Integer}}; - kwargs..., -) - return f(op(opname, s, ns...; kwargs...)) -end - -function op( - s::Vector{<:Index}, f::Function, opname::AbstractString, ns::Integer...; kwargs... -) - return f(op(opname, s, ns; kwargs...)) -end - -# Here, Ref is used to not broadcast over the vector of indices -# TODO: consider overloading broadcast for `op` with the example -# here: https://discourse.julialang.org/t/how-to-broadcast-over-only-certain-function-arguments/19274/5 -# so that `Ref` isn't needed. -ops(s::Vector{<:Index}, os::AbstractArray) = [op(oₙ, s) for oₙ in os] -ops(os::AbstractVector, s::Vector{<:Index}) = [op(oₙ, s) for oₙ in os] - -@doc """ - ops(s::Vector{<:Index}, os::Vector) - ops(os::Vector, s::Vector{<:Index}) - -Given a list of operators, create ITensors using the collection -of indices. - -# Examples - -```julia -s = siteinds("Qubit", 4) -os = [("H", 1), ("X", 2), ("CX", 2, 4)] -# gates = ops(s, os) -gates = ops(os, s) -``` -""" ops(::Vector{<:Index}, ::AbstractArray) - -#--------------------------------------- -# -# state system -# -#--------------------------------------- - -@eval struct StateName{Name} - (f::Type{<:StateName})() = $(Expr(:new, :f)) -end - -StateName(s::AbstractString) = StateName{SmallString(s)}() -StateName(s::SmallString) = StateName{s}() -# TODO: Avoid overloading `ITensors` version. -ITensors.name(::StateName{N}) where {N} = N - -macro StateName_str(s) - return StateName{SmallString(s)} -end - -state(::StateName, ::SiteType; kwargs...) = nothing -state(::StateName, ::SiteType, ::Index; kwargs...) = nothing -state!(::ITensor, ::StateName, ::SiteType, ::Index; kwargs...) = nothing - -# Syntax `state("Up", Index(2, "S=1/2"))` -state(sn::String, i::Index; kwargs...) = state(i, sn; kwargs...) - -""" - state(s::Index, name::String; kwargs...) - -Return an ITensor corresponding to the state -named `name` for the Index `s`. The returned -ITensor will have `s` as its only index. - -The terminology here is based on the idea of a -single-site state or wavefunction in physics. - -The `state` function is implemented for various -Index tags by overloading either the -`state` or `state!` methods which take a `SiteType` -argument corresponding to one of the tags of -the Index `s` and an `StateName"name"` argument -that corresponds to the input state name. - -The `state` system is used by the MPS type -to construct product-state MPS and for other purposes. - -# Example - -```julia -s = Index(2, "Site,S=1/2") -sup = state(s,"Up") -sdn = state(s,"Dn") -sxp = state(s,"X+") -sxm = state(s,"X-") -``` -""" -function state(s::Index, name::AbstractString; kwargs...)::ITensor - stypes = _sitetypes(s) - sname = StateName(name) - - # Try calling state(::StateName"Name",::SiteType"Tag",s::Index; kwargs...) - for st in stypes - v = state(sname, st, s; kwargs...) - if !isnothing(v) - if v isa ITensor - return v - else - # TODO: deprecate, only for backwards compatibility. - return itensor(v, s) - end - end - end - - # Try calling state!(::ITensor,::StateName"Name",::SiteType"Tag",s::Index;kwargs...) - T = ITensor(s) - for st in stypes - state!(T, sname, st, s; kwargs...) - !isempty(T) && return T - end - - # - # otherwise try calling a function of the form: - # state(::StateName"Name", ::SiteType"Tag"; kwargs...) - # which returns a Julia vector - # - for st in stypes - v = state(sname, st; kwargs...) - !isnothing(v) && return itensor(v, s) - end - - return throw( - ArgumentError( - "Overload of \"state\" or \"state!\" functions not found for state name \"$name\" and Index tags $(tags(s))", - ), - ) -end - -state(s::Index, n::Integer) = onehot(s => n) - -state(sset::Vector{<:Index}, j::Integer, st; kwargs...) = state(sset[j], st; kwargs...) - -#--------------------------------------- -# -# val system -# -#--------------------------------------- - -@eval struct ValName{Name} - (f::Type{<:ValName})() = $(Expr(:new, :f)) -end - -ValName(s::AbstractString) = ValName{SmallString(s)}() -ValName(s::Symbol) = ValName{s}() -# TODO: Avoid overloading `ITensors` version. -ITensors.name(::ValName{N}) where {N} = N - -macro ValName_str(s) - return ValName{SmallString(s)} -end - -val(::ValName, ::SiteType) = nothing -val(::AbstractString, ::SiteType) = nothing - -""" - val(s::Index, name::String) - -Return an integer corresponding to the `name` -of a certain value the Index `s` can take. -In other words, the `val` function maps strings -to specific integer values within the range `1:dim(s)`. - -The `val` function is implemented for various -Index tags by overloading methods named `val` -which take a `SiteType` argument corresponding to -one of the tags of the Index `s` and an `ValName"name"` -argument that corresponds to the input name. - -# Example - -```julia -s = Index(2, "Site,S=1/2") -val(s,"Up") == 1 -val(s,"Dn") == 2 - -s = Index(2, "Site,Fermion") -val(s,"Emp") == 1 -val(s,"Occ") == 2 -``` -""" -function val(s::Index, name::AbstractString)::Int - stypes = _sitetypes(s) - sname = ValName(name) - - # Try calling val(::StateName"Name",::SiteType"Tag",) - for st in stypes - res = val(sname, st) - !isnothing(res) && return res - end - - return throw( - ArgumentError("Overload of \"val\" function not found for Index tags $(tags(s))") - ) -end - -val(s::Index, n::Integer) = n - -val(sset::Vector{<:Index}, j::Integer, st) = val(sset[j], st) - -#--------------------------------------- -# -# siteind system -# -#--------------------------------------- - -space(st::SiteType; kwargs...) = nothing - -space(st::SiteType, n::Int; kwargs...) = space(st; kwargs...) - -function space_error_message(st::SiteType) - return "Overload of \"space\",\"siteind\", or \"siteinds\" functions not found for Index tag: $(tag(st))" -end - -function siteind(st::SiteType; addtags="", kwargs...) - sp = space(st; kwargs...) - isnothing(sp) && return nothing - return Index(sp, "Site, $(tag(st)), $addtags") -end - -function siteind(st::SiteType, n; kwargs...) - s = siteind(st; kwargs...) - !isnothing(s) && return addtags(s, "n=$n") - sp = space(st, n; kwargs...) - isnothing(sp) && error(space_error_message(st)) - return Index(sp, "Site, $(tag(st)), n=$n") -end - -siteind(tag::String; kwargs...) = siteind(SiteType(tag); kwargs...) - -siteind(tag::String, n; kwargs...) = siteind(SiteType(tag), n; kwargs...) - -# Special case of `siteind` where integer (dim) provided -# instead of a tag string -#siteind(d::Integer, n::Integer; kwargs...) = Index(d, "Site,n=$n") -function siteind(d::Integer, n::Integer; addtags="", kwargs...) - return Index(d, "Site,n=$n, $addtags") -end - -#--------------------------------------- -# -# siteinds system -# -#--------------------------------------- - -siteinds(::SiteType, N; kwargs...) = nothing - -""" - siteinds(tag::String, N::Integer; kwargs...) - -Create an array of `N` physical site indices of type `tag`. -Keyword arguments can be used to specify quantum number conservation, -see the `space` function corresponding to the site type `tag` for -supported keyword arguments. - -# Example - -```julia -N = 10 -s = siteinds("S=1/2", N; conserve_qns=true) -``` -""" -function siteinds(tag::String, N::Integer; kwargs...) - st = SiteType(tag) - - si = siteinds(st, N; kwargs...) - if !isnothing(si) - return si - end - - return [siteind(st, j; kwargs...) for j in 1:N] -end - -""" - siteinds(f::Function, N::Integer; kwargs...) - -Create an array of `N` physical site indices where the site type at site `n` is given -by `f(n)` (`f` should return a string). -""" -function siteinds(f::Function, N::Integer; kwargs...) - return [siteind(f(n), n; kwargs...) for n in 1:N] -end - -# Special case of `siteinds` where integer (dim) -# provided instead of a tag string -""" - siteinds(d::Integer, N::Integer; kwargs...) - -Create an array of `N` site indices, each of dimension `d`. - -# Keywords -- `addtags::String`: additional tags to be added to all indices -""" -function siteinds(d::Integer, N::Integer; kwargs...) - return [siteind(d, n; kwargs...) for n in 1:N] -end - -#--------------------------------------- -# -# has_fermion_string system -# -#--------------------------------------- - -has_fermion_string(operator::AbstractArray{<:Number}, s::Index; kwargs...)::Bool = false - -has_fermion_string(::OpName, ::SiteType) = nothing - -function has_fermion_string(opname::AbstractString, s::Index; kwargs...)::Bool - opname = strip(opname) - - # Interpret operator names joined by * - # as acting sequentially on the same site - starpos = findfirst(isequal('*'), opname) - if !isnothing(starpos) - op1 = opname[1:prevind(opname, starpos)] - op2 = opname[nextind(opname, starpos):end] - return xor(has_fermion_string(op1, s; kwargs...), has_fermion_string(op2, s; kwargs...)) - end - - Ntags = length(tags(s)) - stypes = _sitetypes(s) - opn = OpName(opname) - for st in stypes - res = has_fermion_string(opn, st) - !isnothing(res) && return res - end - return false -end diff --git a/src/lib/SiteTypes/src/sitetypes/aliases.jl b/src/lib/SiteTypes/src/sitetypes/aliases.jl deleted file mode 100644 index 727ad7f990..0000000000 --- a/src/lib/SiteTypes/src/sitetypes/aliases.jl +++ /dev/null @@ -1,40 +0,0 @@ -alias(::OpName"c") = OpName"C"() -alias(::OpName"cdag") = OpName"Cdag"() -alias(::OpName"c†") = OpName"Cdag"() -alias(::OpName"n") = OpName"N"() -alias(::OpName"a") = OpName"A"() -alias(::OpName"adag") = OpName"Adag"() -alias(::OpName"a↑") = OpName"Aup"() -alias(::OpName"a↓") = OpName"Adn"() -alias(::OpName"a†↓") = OpName"Adagdn"() -alias(::OpName"a†↑") = OpName"Adagup"() -alias(::OpName"a†") = OpName"Adag"() -alias(::OpName"c↑") = OpName"Cup"() -alias(::OpName"c↓") = OpName"Cdn"() -alias(::OpName"c†↑") = OpName"Cdagup"() -alias(::OpName"c†↓") = OpName"Cdagdn"() -alias(::OpName"n↑") = OpName"Nup"() -alias(::OpName"n↓") = OpName"Ndn"() -alias(::OpName"n↑↓") = OpName"Nupdn"() -alias(::OpName"ntot") = OpName"Ntot"() -alias(::OpName"F↑") = OpName"Fup"() -alias(::OpName"F↓") = OpName"Fdn"() -alias(::OpName"I") = OpName"Id"() - -alias(::OpName"S²") = OpName"S2"() -alias(::OpName"Sᶻ") = OpName"Sz"() -alias(::OpName"Sʸ") = OpName"Sy"() -alias(::OpName"iSʸ") = OpName"iSy"() -alias(::OpName"Sˣ") = OpName"Sx"() -alias(::OpName"S⁻") = OpName"S-"() -alias(::OpName"Sminus") = OpName"S-"() -alias(::OpName"Sm") = OpName"S-"() -alias(::OpName"S⁺") = OpName"S+"() -alias(::OpName"Splus") = OpName"S+"() -alias(::OpName"Sp") = OpName"S+"() -alias(::OpName"projUp") = OpName"ProjUp"() -alias(::OpName"projDn") = OpName"ProjDn"() - -alias(::OpName"Proj0") = OpName"ProjUp"() -alias(::OpName"Proj1") = OpName"ProjDn"() -alias(::OpName"Rn̂") = OpName"Rn"() diff --git a/src/lib/SiteTypes/src/sitetypes/boson.jl b/src/lib/SiteTypes/src/sitetypes/boson.jl deleted file mode 100644 index 387edad277..0000000000 --- a/src/lib/SiteTypes/src/sitetypes/boson.jl +++ /dev/null @@ -1,32 +0,0 @@ - -alias(::SiteType"Boson") = SiteType"Qudit"() - -""" - space(::SiteType"Boson"; - dim = 2, - conserve_qns = false, - conserve_number = false, - qnname_number = "Number") - -Create the Hilbert space for a site of type "Boson". - -Optionally specify the conserved symmetries and their quantum number labels. -""" -space(st::SiteType"Boson"; kwargs...) = space(alias(st); kwargs...) - -val(vn::ValName, st::SiteType"Boson") = val(vn, alias(st)) - -function state(sn::StateName, st::SiteType"Boson", s::Index; kwargs...) - return state(sn, alias(st), s; kwargs...) -end - -function op(on::OpName, st::SiteType"Boson", ds::Int...; kwargs...) - return op(on, alias(st), ds...; kwargs...) -end - -function op(on::OpName, st::SiteType"Boson", s1::Index, s_tail::Index...; kwargs...) - rs = reverse((s1, s_tail...)) - ds = dim.(rs) - opmat = op(on, st, ds...; kwargs...) - return itensor(opmat, prime.(rs)..., dag.(rs)...) -end diff --git a/src/lib/SiteTypes/src/sitetypes/electron.jl b/src/lib/SiteTypes/src/sitetypes/electron.jl deleted file mode 100644 index d8fd90848e..0000000000 --- a/src/lib/SiteTypes/src/sitetypes/electron.jl +++ /dev/null @@ -1,336 +0,0 @@ -""" - space(::SiteType"Electron"; - conserve_qns = false, - conserve_sz = conserve_qns, - conserve_nf = conserve_qns, - conserve_nfparity = conserve_qns, - qnname_sz = "Sz", - qnname_nf = "Nf", - qnname_nfparity = "NfParity") - -Create the Hilbert space for a site of type "Electron". - -Optionally specify the conserved symmetries and their quantum number labels. -""" -function space( - ::SiteType"Electron"; - conserve_qns=false, - conserve_sz=conserve_qns, - conserve_nf=conserve_qns, - conserve_nfparity=conserve_qns, - qnname_sz="Sz", - qnname_nf="Nf", - qnname_nfparity="NfParity", - # Deprecated - conserve_parity=nothing, -) - if !isnothing(conserve_parity) - conserve_nfparity = conserve_parity - end - if conserve_sz && conserve_nf - return [ - QN((qnname_nf, 0, -1), (qnname_sz, 0)) => 1 - QN((qnname_nf, 1, -1), (qnname_sz, +1)) => 1 - QN((qnname_nf, 1, -1), (qnname_sz, -1)) => 1 - QN((qnname_nf, 2, -1), (qnname_sz, 0)) => 1 - ] - elseif conserve_nf - return [ - QN(qnname_nf, 0, -1) => 1 - QN(qnname_nf, 1, -1) => 2 - QN(qnname_nf, 2, -1) => 1 - ] - elseif conserve_sz - return [ - QN((qnname_sz, 0), (qnname_nfparity, 0, -2)) => 1 - QN((qnname_sz, +1), (qnname_nfparity, 1, -2)) => 1 - QN((qnname_sz, -1), (qnname_nfparity, 1, -2)) => 1 - QN((qnname_sz, 0), (qnname_nfparity, 0, -2)) => 1 - ] - elseif conserve_nfparity - return [ - QN(qnname_nfparity, 0, -2) => 1 - QN(qnname_nfparity, 1, -2) => 2 - QN(qnname_nfparity, 0, -2) => 1 - ] - end - return 4 -end - -val(::ValName"Emp", ::SiteType"Electron") = 1 -val(::ValName"Up", ::SiteType"Electron") = 2 -val(::ValName"Dn", ::SiteType"Electron") = 3 -val(::ValName"UpDn", ::SiteType"Electron") = 4 -val(::ValName"0", st::SiteType"Electron") = val(ValName("Emp"), st) -val(::ValName"↑", st::SiteType"Electron") = val(ValName("Up"), st) -val(::ValName"↓", st::SiteType"Electron") = val(ValName("Dn"), st) -val(::ValName"↑↓", st::SiteType"Electron") = val(ValName("UpDn"), st) - -state(::StateName"Emp", ::SiteType"Electron") = [1.0, 0, 0, 0] -state(::StateName"Up", ::SiteType"Electron") = [0.0, 1, 0, 0] -state(::StateName"Dn", ::SiteType"Electron") = [0.0, 0, 1, 0] -state(::StateName"UpDn", ::SiteType"Electron") = [0.0, 0, 0, 1] -state(::StateName"0", st::SiteType"Electron") = state(StateName("Emp"), st) -state(::StateName"↑", st::SiteType"Electron") = state(StateName("Up"), st) -state(::StateName"↓", st::SiteType"Electron") = state(StateName("Dn"), st) -state(::StateName"↑↓", st::SiteType"Electron") = state(StateName("UpDn"), st) - -function op(::OpName"Nup", ::SiteType"Electron") - return [ - 0.0 0.0 0.0 0.0 - 0.0 1.0 0.0 0.0 - 0.0 0.0 0.0 0.0 - 0.0 0.0 0.0 1.0 - ] -end -function op(on::OpName"n↑", st::SiteType"Electron") - return op(alias(on), st) -end - -function op(::OpName"Ndn", ::SiteType"Electron") - return [ - 0.0 0.0 0.0 0.0 - 0.0 0.0 0.0 0.0 - 0.0 0.0 1.0 0.0 - 0.0 0.0 0.0 1.0 - ] -end -function op(on::OpName"n↓", st::SiteType"Electron") - return op(alias(on), st) -end - -function op(::OpName"Nupdn", ::SiteType"Electron") - return [ - 0.0 0.0 0.0 0.0 - 0.0 0.0 0.0 0.0 - 0.0 0.0 0.0 0.0 - 0.0 0.0 0.0 1.0 - ] -end -function op(on::OpName"n↑↓", st::SiteType"Electron") - return op(alias(on), st) -end - -function op(::OpName"Ntot", ::SiteType"Electron") - return [ - 0.0 0.0 0.0 0.0 - 0.0 1.0 0.0 0.0 - 0.0 0.0 1.0 0.0 - 0.0 0.0 0.0 2.0 - ] -end -function op(on::OpName"ntot", st::SiteType"Electron") - return op(alias(on), st) -end - -function op(::OpName"Cup", ::SiteType"Electron") - return [ - 0.0 1.0 0.0 0.0 - 0.0 0.0 0.0 0.0 - 0.0 0.0 0.0 1.0 - 0.0 0.0 0.0 0.0 - ] -end -function op(on::OpName"c↑", st::SiteType"Electron") - return op(alias(on), st) -end - -function op(::OpName"Cdagup", ::SiteType"Electron") - return [ - 0.0 0.0 0.0 0.0 - 1.0 0.0 0.0 0.0 - 0.0 0.0 0.0 0.0 - 0.0 0.0 1.0 0.0 - ] -end -function op(on::OpName"c†↑", st::SiteType"Electron") - return op(alias(on), st) -end - -function op(::OpName"Cdn", ::SiteType"Electron") - return [ - 0.0 0.0 1.0 0.0 - 0.0 0.0 0.0 -1.0 - 0.0 0.0 0.0 0.0 - 0.0 0.0 0.0 0.0 - ] -end -function op(on::OpName"c↓", st::SiteType"Electron") - return op(alias(on), st) -end - -function op(::OpName"Cdagdn", ::SiteType"Electron") - return [ - 0.0 0.0 0.0 0.0 - 0.0 0.0 0.0 0.0 - 1.0 0.0 0.0 0.0 - 0.0 -1.0 0.0 0.0 - ] -end -function op(::OpName"c†↓", st::SiteType"Electron") - return op(OpName("Cdagdn"), st) -end - -function op(::OpName"Aup", ::SiteType"Electron") - return [ - 0.0 1.0 0.0 0.0 - 0.0 0.0 0.0 0.0 - 0.0 0.0 0.0 1.0 - 0.0 0.0 0.0 0.0 - ] -end -function op(::OpName"a↑", st::SiteType"Electron") - return op(OpName("Aup"), st) -end - -function op(::OpName"Adagup", ::SiteType"Electron") - return [ - 0.0 0.0 0.0 0.0 - 1.0 0.0 0.0 0.0 - 0.0 0.0 0.0 0.0 - 0.0 0.0 1.0 0.0 - ] -end -function op(::OpName"a†↑", st::SiteType"Electron") - return op(OpName("Adagup"), st) -end - -function op(::OpName"Adn", ::SiteType"Electron") - return [ - 0.0 0.0 1.0 0.0 - 0.0 0.0 0.0 1.0 - 0.0 0.0 0.0 0.0 - 0.0 0.0 0.0 0.0 - ] -end -function op(::OpName"a↓", st::SiteType"Electron") - return op(OpName("Adn"), st) -end - -function op(::OpName"Adagdn", ::SiteType"Electron") - return [ - 0.0 0.0 0.0 0.0 - 0.0 0.0 0.0 0.0 - 1.0 0.0 0.0 0.0 - 0.0 1.0 0.0 0.0 - ] -end -function op(::OpName"a†↓", st::SiteType"Electron") - return op(OpName("Adagdn"), st) -end - -function op(::OpName"F", ::SiteType"Electron") - return [ - 1.0 0.0 0.0 0.0 - 0.0 -1.0 0.0 0.0 - 0.0 0.0 -1.0 0.0 - 0.0 0.0 0.0 1.0 - ] -end - -function op(::OpName"Fup", ::SiteType"Electron") - return [ - 1.0 0.0 0.0 0.0 - 0.0 -1.0 0.0 0.0 - 0.0 0.0 1.0 0.0 - 0.0 0.0 0.0 -1.0 - ] -end -function op(::OpName"F↑", st::SiteType"Electron") - return op(OpName("Fup"), st) -end - -function op(::OpName"Fdn", ::SiteType"Electron") - return [ - 1.0 0.0 0.0 0.0 - 0.0 1.0 0.0 0.0 - 0.0 0.0 -1.0 0.0 - 0.0 0.0 0.0 -1.0 - ] -end -function op(::OpName"F↓", st::SiteType"Electron") - return op(OpName("Fdn"), st) -end - -function op(::OpName"Sz", ::SiteType"Electron") - #Op[s' => 2, s => 2] = +0.5 - #return Op[s' => 3, s => 3] = -0.5 - return [ - 0.0 0.0 0.0 0.0 - 0.0 0.5 0.0 0.0 - 0.0 0.0 -0.5 0.0 - 0.0 0.0 0.0 0.0 - ] -end - -function op(::OpName"Sᶻ", st::SiteType"Electron") - return op(OpName("Sz"), st) -end - -function op(::OpName"Sx", ::SiteType"Electron") - return [ - 0.0 0.0 0.0 0.0 - 0.0 0.0 0.5 0.0 - 0.0 0.5 0.0 0.0 - 0.0 0.0 0.0 0.0 - ] -end - -function op(::OpName"Sˣ", st::SiteType"Electron") - return op(OpName("Sx"), st) -end - -function op(::OpName"S+", ::SiteType"Electron") - return [ - 0.0 0.0 0.0 0.0 - 0.0 0.0 1.0 0.0 - 0.0 0.0 0.0 0.0 - 0.0 0.0 0.0 0.0 - ] -end - -function op(::OpName"S⁺", st::SiteType"Electron") - return op(OpName("S+"), st) -end -function op(::OpName"Sp", st::SiteType"Electron") - return op(OpName("S+"), st) -end -function op(::OpName"Splus", st::SiteType"Electron") - return op(OpName("S+"), st) -end - -function op(::OpName"S-", ::SiteType"Electron") - return [ - 0.0 0.0 0.0 0.0 - 0.0 0.0 0.0 0.0 - 0.0 1.0 0.0 0.0 - 0.0 0.0 0.0 0.0 - ] -end - -function op(::OpName"S⁻", st::SiteType"Electron") - return op(OpName("S-"), st) -end -function op(::OpName"Sm", st::SiteType"Electron") - return op(OpName("S-"), st) -end -function op(::OpName"Sminus", st::SiteType"Electron") - return op(OpName("S-"), st) -end - -has_fermion_string(::OpName"Cup", ::SiteType"Electron") = true -function has_fermion_string(on::OpName"c↑", st::SiteType"Electron") - return has_fermion_string(alias(on), st) -end -has_fermion_string(::OpName"Cdagup", ::SiteType"Electron") = true -function has_fermion_string(on::OpName"c†↑", st::SiteType"Electron") - return has_fermion_string(alias(on), st) -end -has_fermion_string(::OpName"Cdn", ::SiteType"Electron") = true -function has_fermion_string(on::OpName"c↓", st::SiteType"Electron") - return has_fermion_string(alias(on), st) -end -has_fermion_string(::OpName"Cdagdn", ::SiteType"Electron") = true -function has_fermion_string(on::OpName"c†↓", st::SiteType"Electron") - return has_fermion_string(alias(on), st) -end diff --git a/src/lib/SiteTypes/src/sitetypes/fermion.jl b/src/lib/SiteTypes/src/sitetypes/fermion.jl deleted file mode 100644 index 008bdf0877..0000000000 --- a/src/lib/SiteTypes/src/sitetypes/fermion.jl +++ /dev/null @@ -1,111 +0,0 @@ - -""" - space(::SiteType"Fermion"; - conserve_qns=false, - conserve_nf=conserve_qns, - conserve_nfparity=conserve_qns, - qnname_nf = "Nf", - qnname_nfparity = "NfParity", - qnname_sz = "Sz", - conserve_sz = false) - -Create the Hilbert space for a site of type "Fermion". - -Optionally specify the conserved symmetries and their quantum number labels. -""" -function space( - ::SiteType"Fermion"; - conserve_qns=false, - conserve_nf=conserve_qns, - conserve_nfparity=conserve_qns, - qnname_nf="Nf", - qnname_nfparity="NfParity", - qnname_sz="Sz", - conserve_sz=false, - # Deprecated - conserve_parity=nothing, -) - if !isnothing(conserve_parity) - conserve_nfparity = conserve_parity - end - if conserve_sz == true - conserve_sz = "Up" - end - if conserve_nf && conserve_sz == "Up" - zer = QN((qnname_nf, 0, -1), (qnname_sz, 0)) => 1 - one = QN((qnname_nf, 1, -1), (qnname_sz, 1)) => 1 - return [zer, one] - elseif conserve_nf && conserve_sz == "Dn" - zer = QN((qnname_nf, 0, -1), (qnname_sz, 0)) => 1 - one = QN((qnname_nf, 1, -1), (qnname_sz, -1)) => 1 - return [zer, one] - elseif conserve_nfparity && conserve_sz == "Up" - zer = QN((qnname_nfparity, 0, -2), (qnname_sz, 0)) => 1 - one = QN((qnname_nfparity, 1, -2), (qnname_sz, 1)) => 1 - return [zer, one] - elseif conserve_nfparity && conserve_sz == "Dn" - zer = QN((qnname_nfparity, 0, -2), (qnname_sz, 0)) => 1 - one = QN((qnname_nfparity, 1, -2), (qnname_sz, -1)) => 1 - return [zer, one] - elseif conserve_nf - zer = QN(qnname_nf, 0, -1) => 1 - one = QN(qnname_nf, 1, -1) => 1 - return [zer, one] - elseif conserve_nfparity - zer = QN(qnname_nfparity, 0, -2) => 1 - one = QN(qnname_nfparity, 1, -2) => 1 - return [zer, one] - end - return 2 -end - -val(::ValName"Emp", ::SiteType"Fermion") = 1 -val(::ValName"Occ", ::SiteType"Fermion") = 2 -val(::ValName"0", st::SiteType"Fermion") = val(ValName("Emp"), st) -val(::ValName"1", st::SiteType"Fermion") = val(ValName("Occ"), st) - -state(::StateName"Emp", ::SiteType"Fermion") = [1.0 0.0] -state(::StateName"Occ", ::SiteType"Fermion") = [0.0 1.0] -state(::StateName"0", st::SiteType"Fermion") = state(StateName("Emp"), st) -state(::StateName"1", st::SiteType"Fermion") = state(StateName("Occ"), st) - -function op!(Op::ITensor, ::OpName"N", ::SiteType"Fermion", s::Index) - return Op[s' => 2, s => 2] = 1.0 -end -function op!(Op::ITensor, on::OpName"n", st::SiteType"Fermion", s::Index) - return op!(Op, alias(on), st, s) -end - -function op!(Op::ITensor, ::OpName"C", ::SiteType"Fermion", s::Index) - return Op[s' => 1, s => 2] = 1.0 -end -function op!(Op::ITensor, on::OpName"c", st::SiteType"Fermion", s::Index) - return op!(Op, alias(on), st, s) -end - -function op!(Op::ITensor, ::OpName"Cdag", ::SiteType"Fermion", s::Index) - return Op[s' => 2, s => 1] = 1.0 -end -function op!(Op::ITensor, on::OpName"c†", st::SiteType"Fermion", s::Index) - return op!(Op, alias(on), st, s) -end -function op!(Op::ITensor, on::OpName"cdag", st::SiteType"Fermion", s::Index) - return op!(Op, alias(on), st, s) -end - -function op!(Op::ITensor, ::OpName"F", ::SiteType"Fermion", s::Index) - Op[s' => 1, s => 1] = +1.0 - return Op[s' => 2, s => 2] = -1.0 -end - -has_fermion_string(::OpName"C", ::SiteType"Fermion") = true -function has_fermion_string(on::OpName"c", st::SiteType"Fermion") - return has_fermion_string(alias(on), st) -end -has_fermion_string(::OpName"Cdag", ::SiteType"Fermion") = true -function has_fermion_string(on::OpName"c†", st::SiteType"Fermion") - return has_fermion_string(alias(on), st) -end -function has_fermion_string(on::OpName"cdag", st::SiteType"Fermion") - return has_fermion_string(alias(on), st) -end diff --git a/src/lib/SiteTypes/src/sitetypes/generic_sites.jl b/src/lib/SiteTypes/src/sitetypes/generic_sites.jl deleted file mode 100644 index 6c7ab60320..0000000000 --- a/src/lib/SiteTypes/src/sitetypes/generic_sites.jl +++ /dev/null @@ -1,49 +0,0 @@ -using LinearAlgebra: I -using NDTensors: NDTensors, dim, tensor -using ..ITensors: ITensor, itensor, settensor! - -function op!( - o::ITensor, ::OpName"Id", ::SiteType"Generic", s1::Index, sn::Index...; eltype=Float64 -) - s = (s1, sn...) - n = prod(dim.(s)) - t = itensor(Matrix(one(eltype) * I, n, n), prime.(s)..., dag.(s)...) - return settensor!(o, tensor(t)) -end - -function op!(o::ITensor, on::OpName"I", st::SiteType"Generic", s::Index...; kwargs...) - return op!(o, alias(on), st, s...; kwargs...) -end - -function op!(o::ITensor, ::OpName"F", st::SiteType"Generic", s::Index; kwargs...) - return op!(o, OpName("Id"), st, s; kwargs...) -end - -function default_random_matrix(eltype::Type, s::Index...) - n = prod(dim.(s)) - return randn(eltype, n, n) -end - -# Haar-random unitary -# -# Reference: -# Section 4.6 -# http://math.mit.edu/~edelman/publications/random_matrix_theory.pdf -function op!( - o::ITensor, - ::OpName"RandomUnitary", - ::SiteType"Generic", - s1::Index, - sn::Index...; - eltype=ComplexF64, - random_matrix=default_random_matrix(eltype, s1, sn...), -) - s = (s1, sn...) - Q, _ = NDTensors.qr_positive(random_matrix) - t = itensor(Q, prime.(s)..., dag.(s)...) - return settensor!(o, tensor(t)) -end - -function op!(o::ITensor, ::OpName"randU", st::SiteType"Generic", s::Index...; kwargs...) - return op!(o, OpName("RandomUnitary"), st, s...; kwargs...) -end diff --git a/src/lib/SiteTypes/src/sitetypes/qubit.jl b/src/lib/SiteTypes/src/sitetypes/qubit.jl deleted file mode 100644 index e07bd547f0..0000000000 --- a/src/lib/SiteTypes/src/sitetypes/qubit.jl +++ /dev/null @@ -1,502 +0,0 @@ -using ..ITensors: ITensors - -# -# Qubit site type -# - -# Define Qubit space in terms of -# Qubit/2 space, but use different -# defaults for QN names - -""" - space(::SiteType"Qubit"; - conserve_qns = false, - conserve_parity = conserve_qns, - conserve_number = false, - qnname_parity = "Parity", - qnname_number = "Number") - -Create the Hilbert space for a site of type "Qubit". - -Optionally specify the conserved symmetries and their quantum number labels. -""" -function space( - ::SiteType"Qubit"; - conserve_qns=false, - conserve_parity=conserve_qns, - conserve_number=false, - qnname_parity="Parity", - qnname_number="Number", -) - if conserve_number && conserve_parity - return [ - QN((qnname_number, 0), (qnname_parity, 0, 2)) => 1, - QN((qnname_number, 1), (qnname_parity, 1, 2)) => 1, - ] - elseif conserve_number - return [QN(qnname_number, 0) => 1, QN(qnname_number, 1) => 1] - elseif conserve_parity - return [QN(qnname_parity, 0, 2) => 1, QN(qnname_parity, 1, 2) => 1] - end - return 2 -end - -val(::ValName"0", ::SiteType"Qubit") = 1 -val(::ValName"1", ::SiteType"Qubit") = 2 -val(::ValName"Up", ::SiteType"Qubit") = 1 -val(::ValName"Dn", ::SiteType"Qubit") = 2 -val(::ValName"↑", ::SiteType"Qubit") = 1 -val(::ValName"↓", ::SiteType"Qubit") = 2 - -state(::StateName"0", ::SiteType"Qubit") = [1.0, 0.0] -state(::StateName"1", ::SiteType"Qubit") = [0.0, 1.0] -state(::StateName"+", ::SiteType"Qubit") = [1.0, 1.0] / √2 -state(::StateName"-", ::SiteType"Qubit") = [1.0, -1.0] / √2 -state(::StateName"i", ::SiteType"Qubit") = [1.0, im] / √2 -state(::StateName"-i", ::SiteType"Qubit") = [1.0, -im] / √2 -state(::StateName"Up", t::SiteType"Qubit") = state(StateName("0"), t) -state(::StateName"Dn", t::SiteType"Qubit") = state(StateName("1"), t) -state(::StateName"↑", t::SiteType"Qubit") = state(StateName("0"), t) -state(::StateName"↓", t::SiteType"Qubit") = state(StateName("1"), t) - -# Pauli eingenstates -state(::StateName"X+", t::SiteType"Qubit") = state(StateName("+"), t) -state(::StateName"Xp", t::SiteType"Qubit") = state(StateName("+"), t) -state(::StateName"X-", t::SiteType"Qubit") = state(StateName("-"), t) -state(::StateName"Xm", t::SiteType"Qubit") = state(StateName("-"), t) - -state(::StateName"Y+", t::SiteType"Qubit") = state(StateName("i"), t) -state(::StateName"Yp", t::SiteType"Qubit") = state(StateName("i"), t) -state(::StateName"Y-", t::SiteType"Qubit") = state(StateName("-i"), t) -state(::StateName"Ym", t::SiteType"Qubit") = state(StateName("-i"), t) - -state(::StateName"Z+", t::SiteType"Qubit") = state(StateName("0"), t) -state(::StateName"Zp", t::SiteType"Qubit") = state(StateName("0"), t) -state(::StateName"Z-", t::SiteType"Qubit") = state(StateName("1"), t) -state(::StateName"Zm", t::SiteType"Qubit") = state(StateName("1"), t) - -# SIC-POVMs -state(::StateName"Tetra1", t::SiteType"Qubit") = state(StateName("Z+"), t) -state(::StateName"Tetra2", t::SiteType"Qubit") = [ - 1 / √3 - √2 / √3 -] -state(::StateName"Tetra3", t::SiteType"Qubit") = [ - 1 / √3 - √2 / √3 * exp(im * 2π / 3) -] -state(::StateName"Tetra4", t::SiteType"Qubit") = [ - 1 / √3 - √2 / √3 * exp(im * 4π / 3) -] - -# -# 1-Qubit gates -# -op(::OpName"X", ::SiteType"Qubit") = [ - 0 1 - 1 0 -] - -op(::OpName"σx", t::SiteType"Qubit") = op("X", t) - -op(::OpName"σ1", t::SiteType"Qubit") = op("X", t) - -op(::OpName"Y", ::SiteType"Qubit") = [ - 0.0 -1.0im - 1.0im 0.0 -] - -op(::OpName"σy", t::SiteType"Qubit") = op("Y", t) - -op(::OpName"σ2", t::SiteType"Qubit") = op("Y", t) - -op(::OpName"iY", ::SiteType"Qubit") = [ - 0 1 - -1 0 -] -op(::OpName"iσy", t::SiteType"Qubit") = op("iY", t) - -op(::OpName"iσ2", t::SiteType"Qubit") = op("iY", t) - -op(::OpName"Z", ::SiteType"Qubit") = [ - 1 0 - 0 -1 -] - -op(::OpName"σz", t::SiteType"Qubit") = op("Z", t) - -op(::OpName"σ3", t::SiteType"Qubit") = op("Z", t) - -function op(::OpName"√NOT", ::SiteType"Qubit") - return [ - (1 + im)/2 (1 - im)/2 - (1 - im)/2 (1 + im)/2 - ] -end - -op(::OpName"√X", t::SiteType"Qubit") = op("√NOT", t) - -op(::OpName"H", ::SiteType"Qubit") = [ - 1/sqrt(2) 1/sqrt(2) - 1/sqrt(2) -1/sqrt(2) -] - -# Rϕ with ϕ = π/2 -op(::OpName"Phase", ::SiteType"Qubit"; ϕ::Number=π / 2) = [ - 1 0 - 0 exp(im * ϕ) -] - -op(::OpName"P", t::SiteType"Qubit"; kwargs...) = op("Phase", t; kwargs...) - -op(::OpName"S", t::SiteType"Qubit") = op("Phase", t; ϕ=π / 2) - -## Rϕ with ϕ = π/4 -op(::OpName"π/8", ::SiteType"Qubit") = [ - 1 0 - 0 1 / sqrt(2)+im / sqrt(2) -] - -op(::OpName"T", t::SiteType"Qubit") = op("π/8", t) - -# Rotation around X-axis -function op(::OpName"Rx", ::SiteType"Qubit"; θ::Number) - return [ - cos(θ / 2) -im*sin(θ / 2) - -im*sin(θ / 2) cos(θ / 2) - ] -end - -# Rotation around Y-axis -function op(::OpName"Ry", ::SiteType"Qubit"; θ::Number) - return [ - cos(θ / 2) -sin(θ / 2) - sin(θ / 2) cos(θ / 2) - ] -end - -# Rotation around Z-axis -function op(::OpName"Rz", ::SiteType"Qubit"; θ=nothing, ϕ=nothing) - isone(count(isnothing, (θ, ϕ))) || error( - "Must specify the keyword argument `θ` (or the deprecated `ϕ`) when creating an Rz gate, but not both.", - ) - isnothing(θ) && (θ = ϕ) - return [ - exp(-im * θ / 2) 0 - 0 exp(im * θ / 2) - ] -end - -# Rotation around generic axis n̂ -function op(::OpName"Rn", ::SiteType"Qubit"; θ::Real, ϕ::Real, λ::Real) - return [ - cos(θ / 2) -exp(im * λ)*sin(θ / 2) - exp(im * ϕ)*sin(θ / 2) exp(im * (ϕ + λ))*cos(θ / 2) - ] -end - -function op(on::OpName"Rn̂", t::SiteType"Qubit"; kwargs...) - return op(alias(on), t; kwargs...) -end - -# -# 2-Qubit gates -# - -op(::OpName"CNOT", ::SiteType"Qubit") = [ - 1 0 0 0 - 0 1 0 0 - 0 0 0 1 - 0 0 1 0 -] - -op(::OpName"CX", t::SiteType"Qubit") = op("CNOT", t) - -op(::OpName"CY", ::SiteType"Qubit") = [ - 1 0 0 0 - 0 1 0 0 - 0 0 0 -im - 0 0 im 0 -] - -op(::OpName"CZ", ::SiteType"Qubit") = [ - 1 0 0 0 - 0 1 0 0 - 0 0 1 0 - 0 0 0 -1 -] - -function op(::OpName"CPHASE", ::SiteType"Qubit"; ϕ::Number) - return [ - 1 0 0 0 - 0 1 0 0 - 0 0 1 0 - 0 0 0 exp(im * ϕ) - ] -end -op(::OpName"Cphase", t::SiteType"Qubit"; kwargs...) = op("CPHASE", t; kwargs...) - -function op(::OpName"CRx", ::SiteType"Qubit"; θ::Number) - return [ - 1 0 0 0 - 0 1 0 0 - 0 0 cos(θ / 2) -im*sin(θ / 2) - 0 0 -im*sin(θ / 2) cos(θ / 2) - ] -end -op(::OpName"CRX", t::SiteType"Qubit"; kwargs...) = op("CRx", t; kwargs...) - -function op(::OpName"CRy", ::SiteType"Qubit"; θ::Number) - return [ - 1 0 0 0 - 0 1 0 0 - 0 0 cos(θ / 2) -sin(θ / 2) - 0 0 sin(θ / 2) cos(θ / 2) - ] -end -op(::OpName"CRY", t::SiteType"Qubit"; kwargs...) = op("CRy", t; kwargs...) - -function op(::OpName"CRz", ::SiteType"Qubit"; ϕ=nothing, θ=nothing) - isone(count(isnothing, (θ, ϕ))) || error( - "Must specify the keyword argument `θ` (or the deprecated `ϕ`) when creating a CRz gate, but not both.", - ) - isnothing(θ) && (θ = ϕ) - return [ - 1 0 0 0 - 0 1 0 0 - 0 0 exp(-im * θ / 2) 0 - 0 0 0 exp(im * θ / 2) - ] -end -op(::OpName"CRZ", t::SiteType"Qubit"; kwargs...) = op("CRz", t; kwargs...) - -function op(::OpName"CRn", ::SiteType"Qubit"; θ::Number, ϕ::Number, λ::Number) - return [ - 1 0 0 0 - 0 1 0 0 - 0 0 cos(θ / 2) -exp(im * λ)*sin(θ / 2) - 0 0 exp(im * ϕ)*sin(θ / 2) exp(im * (ϕ + λ))*cos(θ / 2) - ] -end -function op(::OpName"CRn̂", t::SiteType"Qubit"; kwargs...) - return op("CRn", t; kwargs...) -end - -op(::OpName"SWAP", ::SiteType"Qubit") = [ - 1 0 0 0 - 0 0 1 0 - 0 1 0 0 - 0 0 0 1 -] -op(::OpName"Swap", t::SiteType"Qubit") = op("SWAP", t) - -function op(::OpName"√SWAP", ::SiteType"Qubit") - return [ - 1 0 0 0 - 0 (1 + im)/2 (1 - im)/2 0 - 0 (1 - im)/2 (1 + im)/2 0 - 0 0 0 1 - ] -end -op(::OpName"√Swap", t::SiteType"Qubit") = op("√SWAP", t) - -op(::OpName"iSWAP", t::SiteType"Qubit") = [ - 1 0 0 0 - 0 0 im 0 - 0 im 0 0 - 0 0 0 1 -] -op(::OpName"iSwap", t::SiteType"Qubit") = op("iSWAP", t) - -function op(::OpName"√iSWAP", t::SiteType"Qubit") - return [ - 1 0 0 0 - 0 1/√2 im/√2 0 - 0 im/√2 1/√2 0 - 0 0 0 1 - ] -end -op(::OpName"√iSwap", t::SiteType"Qubit") = op("√iSWAP", t) - -# Ising (XX) coupling gate -function op(::OpName"Rxx", t::SiteType"Qubit"; ϕ::Number) - return [ - cos(ϕ) 0 0 -im*sin(ϕ) - 0 cos(ϕ) -im*sin(ϕ) 0 - 0 -im*sin(ϕ) cos(ϕ) 0 - -im*sin(ϕ) 0 0 cos(ϕ) - ] -end -op(::OpName"RXX", t::SiteType"Qubit"; kwargs...) = op("Rxx", t; kwargs...) - -# Ising (YY) coupling gate -function op(::OpName"Ryy", ::SiteType"Qubit"; ϕ::Number) - return [ - cos(ϕ) 0 0 im*sin(ϕ) - 0 cos(ϕ) -im*sin(ϕ) 0 - 0 -im*sin(ϕ) cos(ϕ) 0 - im*sin(ϕ) 0 0 cos(ϕ) - ] -end -op(::OpName"RYY", t::SiteType"Qubit"; kwargs...) = op("Ryy", t; kwargs...) - -# Ising (XY) coupling gate -function op(::OpName"Rxy", t::SiteType"Qubit"; ϕ::Number) - return [ - 1 0 0 0 - 0 cos(ϕ) -im*sin(ϕ) 0 - 0 -im*sin(ϕ) cos(ϕ) 0 - 0 0 0 1 - ] -end -op(::OpName"RXY", t::SiteType"Qubit"; kwargs...) = op("Rxy", t; kwargs...) - -# Ising (ZZ) coupling gate -function op(::OpName"Rzz", ::SiteType"Qubit"; ϕ::Number) - return [ - exp(-im * ϕ) 0 0 0 - 0 exp(im * ϕ) 0 0 - 0 0 exp(im * ϕ) 0 - 0 0 0 exp(-im * ϕ) - ] -end -op(::OpName"RZZ", t::SiteType"Qubit"; kwargs...) = op("Rzz", t; kwargs...) - -# -# 3-Qubit gates -# - -function op(::OpName"Toffoli", ::SiteType"Qubit") - return [ - 1 0 0 0 0 0 0 0 - 0 1 0 0 0 0 0 0 - 0 0 1 0 0 0 0 0 - 0 0 0 1 0 0 0 0 - 0 0 0 0 1 0 0 0 - 0 0 0 0 0 1 0 0 - 0 0 0 0 0 0 0 1 - 0 0 0 0 0 0 1 0 - ] -end - -op(::OpName"CCNOT", t::SiteType"Qubit") = op("Toffoli", t) - -op(::OpName"CCX", t::SiteType"Qubit") = op("Toffoli", t) - -op(::OpName"TOFF", t::SiteType"Qubit") = op("Toffoli", t) - -function op(::OpName"Fredkin", ::SiteType"Qubit") - return [ - 1 0 0 0 0 0 0 0 - 0 1 0 0 0 0 0 0 - 0 0 1 0 0 0 0 0 - 0 0 0 1 0 0 0 0 - 0 0 0 0 1 0 0 0 - 0 0 0 0 0 0 1 0 - 0 0 0 0 0 1 0 0 - 0 0 0 0 0 0 0 1 - ] -end - -op(::OpName"CSWAP", t::SiteType"Qubit") = op("Fredkin", t) -op(::OpName"CSwap", t::SiteType"Qubit") = op("Fredkin", t) - -op(::OpName"CS", t::SiteType"Qubit") = op("Fredkin", t) - -# -# 4-Qubit gates -# - -function op(::OpName"CCCNOT", ::SiteType"Qubit") - return [ - 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 - 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 - 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 - 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 - 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 - 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 - 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 - ] -end - -# spin-full operators -op(::OpName"Sz", ::SiteType"Qubit") = [ - 0.5 0.0 - 0.0 -0.5 -] - -op(on::OpName"Sᶻ", t::SiteType"Qubit") = op(alias(on), t) - -op(::OpName"S+", ::SiteType"Qubit") = [ - 0 1 - 0 0 -] - -op(on::OpName"S⁺", t::SiteType"Qubit") = op(alias(on), t) - -op(on::OpName"Splus", t::SiteType"Qubit") = op(alias(on), t) - -op(::OpName"S-", ::SiteType"Qubit") = [ - 0 0 - 1 0 -] - -op(on::OpName"S⁻", t::SiteType"Qubit") = op(alias(on), t) - -op(on::OpName"Sminus", t::SiteType"Qubit") = op(alias(on), t) - -op(::OpName"Sx", ::SiteType"Qubit") = [ - 0.0 0.5 - 0.5 0.0 -] - -op(on::OpName"Sˣ", t::SiteType"Qubit") = op(alias(on), t) - -op(::OpName"iSy", ::SiteType"Qubit") = [ - 0.0 0.5 - -0.5 0.0 -] - -op(on::OpName"iSʸ", t::SiteType"Qubit") = op(alias(on), t) - -op(::OpName"Sy", ::SiteType"Qubit") = [ - 0.0 -0.5im - 0.5im 0.0 -] - -op(on::OpName"Sʸ", t::SiteType"Qubit") = op(alias(on), t) - -op(::OpName"S2", ::SiteType"Qubit") = [ - 0.75 0.0 - 0.0 0.75 -] - -op(on::OpName"S²", t::SiteType"Qubit") = op(alias(on), t) - -op(::OpName"ProjUp", ::SiteType"Qubit") = [ - 1 0 - 0 0 -] - -op(on::OpName"projUp", t::SiteType"Qubit") = op(alias(on), t) - -op(on::OpName"Proj0", t::SiteType"Qubit") = op(alias(on), t) - -op(::OpName"ProjDn", ::SiteType"Qubit") = [ - 0 0 - 0 1 -] - -op(on::OpName"projDn", t::SiteType"Qubit") = op(alias(on), t) - -op(on::OpName"Proj1", t::SiteType"Qubit") = op(alias(on), t) diff --git a/src/lib/SiteTypes/src/sitetypes/qudit.jl b/src/lib/SiteTypes/src/sitetypes/qudit.jl deleted file mode 100644 index 6644cf06e8..0000000000 --- a/src/lib/SiteTypes/src/sitetypes/qudit.jl +++ /dev/null @@ -1,110 +0,0 @@ -using ChainRulesCore: @non_differentiable - -""" - space(::SiteType"Qudit"; - dim = 2, - conserve_qns = false, - conserve_number = false, - qnname_number = "Number") - -Create the Hilbert space for a site of type "Qudit". - -Optionally specify the conserved symmetries and their quantum number labels. -""" -function space( - ::SiteType"Qudit"; - dim=2, - conserve_qns=false, - conserve_number=conserve_qns, - qnname_number="Number", -) - if conserve_number - return [QN(qnname_number, n - 1) => 1 for n in 1:dim] - end - return dim -end - -function val(::ValName{N}, ::SiteType"Qudit") where {N} - return parse(Int, String(N)) + 1 -end - -function state(::StateName{N}, ::SiteType"Qudit", s::Index) where {N} - n = parse(Int, String(N)) - st = zeros(dim(s)) - st[n + 1] = 1.0 - return itensor(st, s) -end - -# one-body operators -function op(::OpName"Id", ::SiteType"Qudit", ds::Int...) - d = prod(ds) - return Matrix(1.0I, d, d) -end -op(on::OpName"I", st::SiteType"Qudit", ds::Int...) = op(alias(on), st, ds...) -op(on::OpName"F", st::SiteType"Qudit", ds::Int...) = op(OpName"Id"(), st, ds...) - -function op(::OpName"Adag", ::SiteType"Qudit", d::Int) - mat = zeros(d, d) - for k in 1:(d - 1) - mat[k + 1, k] = √k - end - return mat -end -op(on::OpName"adag", st::SiteType"Qudit", d::Int) = op(alias(on), st, d) -op(on::OpName"a†", st::SiteType"Qudit", d::Int) = op(alias(on), st, d) - -function op(::OpName"A", ::SiteType"Qudit", d::Int) - mat = zeros(d, d) - for k in 1:(d - 1) - mat[k, k + 1] = √k - end - return mat -end -op(on::OpName"a", st::SiteType"Qudit", d::Int) = op(alias(on), st, d) - -function op(::OpName"N", ::SiteType"Qudit", d::Int) - mat = zeros(d, d) - for k in 1:d - mat[k, k] = k - 1 - end - return mat -end -op(on::OpName"n", st::SiteType"Qudit", d::Int) = op(alias(on), st, d) - -# two-body operators -function op(::OpName"ab", st::SiteType"Qudit", d1::Int, d2::Int) - return kron(op(OpName("a"), st, d1), op(OpName("a"), st, d2)) -end - -function op(::OpName"a†b", st::SiteType"Qudit", d1::Int, d2::Int) - return kron(op(OpName("a†"), st, d1), op(OpName("a"), st, d2)) -end - -function op(::OpName"ab†", st::SiteType"Qudit", d1::Int, d2::Int) - return kron(op(OpName("a"), st, d1), op(OpName("a†"), st, d2)) -end - -function op(::OpName"a†b†", st::SiteType"Qudit", d1::Int, d2::Int) - return kron(op(OpName("a†"), st, d1), op(OpName("a†"), st, d2)) -end - -# interface -function op(on::OpName, st::SiteType"Qudit", s1::Index, s_tail::Index...; kwargs...) - rs = reverse((s1, s_tail...)) - ds = dim.(rs) - opmat = op(on, st, ds...; kwargs...) - return itensor(opmat, prime.(rs)..., dag.(rs)...) -end - -function op(on::OpName, st::SiteType"Qudit"; kwargs...) - return error("`op` can't be called without indices or dimensions.") -end - -# Zygote -@non_differentiable op(::OpName"ab", ::SiteType"Qudit", ::Int, ::Int) -@non_differentiable op(::OpName"a†b", ::SiteType"Qudit", ::Int, ::Int) -@non_differentiable op(::OpName"ab†", ::SiteType"Qudit", ::Int, ::Int) -@non_differentiable op(::OpName"a†b†", ::SiteType"Qudit", ::Int, ::Int) -@non_differentiable op(::OpName"a", ::SiteType"Qudit", ::Int) -@non_differentiable op(::OpName"a†", ::SiteType"Qudit", ::Int) -@non_differentiable op(::OpName"N", ::SiteType"Qudit", ::Int) diff --git a/src/lib/SiteTypes/src/sitetypes/spinhalf.jl b/src/lib/SiteTypes/src/sitetypes/spinhalf.jl deleted file mode 100644 index c374d47c0d..0000000000 --- a/src/lib/SiteTypes/src/sitetypes/spinhalf.jl +++ /dev/null @@ -1,66 +0,0 @@ - -""" - space(::SiteType"S=1/2"; - conserve_qns = false, - conserve_sz = conserve_qns, - conserve_szparity = false, - qnname_sz = "Sz", - qnname_szparity = "SzParity") - -Create the Hilbert space for a site of type "S=1/2". - -Optionally specify the conserved symmetries and their quantum number labels. -""" -function space( - ::SiteType"S=1/2"; - conserve_qns=false, - conserve_sz=conserve_qns, - conserve_szparity=false, - qnname_sz="Sz", - qnname_szparity="SzParity", -) - if conserve_sz && conserve_szparity - return [ - QN((qnname_sz, +1), (qnname_szparity, 1, 2)) => 1, - QN((qnname_sz, -1), (qnname_szparity, 0, 2)) => 1, - ] - elseif conserve_sz - return [QN(qnname_sz, +1) => 1, QN(qnname_sz, -1) => 1] - elseif conserve_szparity - return [QN(qnname_szparity, 1, 2) => 1, QN(qnname_szparity, 0, 2) => 1] - end - return 2 -end - -# Use Qubit definition of any operator/state -# called using S=1/2 SiteType -function val(vn::ValName, ::SiteType"S=1/2"; kwargs...) - return val(vn, SiteType("Qubit"); kwargs...) -end - -function state(sn::StateName, ::SiteType"S=1/2"; kwargs...) - return state(sn, SiteType("Qubit"); kwargs...) -end - -op(o::OpName, ::SiteType"S=1/2"; kwargs...) = op(o, SiteType("Qubit"); kwargs...) - -# Support the tag "SpinHalf" as equivalent to "S=1/2" -space(::SiteType"SpinHalf"; kwargs...) = space(SiteType("S=1/2"); kwargs...) - -val(name::ValName, ::SiteType"SpinHalf") = val(name, SiteType("S=1/2")) - -state(name::StateName, ::SiteType"SpinHalf") = state(name, SiteType("S=1/2")) - -function op(o::OpName, ::SiteType"SpinHalf"; kwargs...) - return op(o, SiteType("S=1/2"); kwargs...) -end - -# Support the tag "S=½" as equivalent to "S=1/2" - -space(::SiteType"S=½"; kwargs...) = space(SiteType("S=1/2"); kwargs...) - -val(name::ValName, ::SiteType"S=½") = val(name, SiteType("S=1/2")) - -state(name::StateName, ::SiteType"S=½") = state(name, SiteType("S=1/2")) - -op(o::OpName, ::SiteType"S=½"; kwargs...) = op(o, SiteType("S=1/2"); kwargs...) diff --git a/src/lib/SiteTypes/src/sitetypes/spinone.jl b/src/lib/SiteTypes/src/sitetypes/spinone.jl deleted file mode 100644 index 1de6902243..0000000000 --- a/src/lib/SiteTypes/src/sitetypes/spinone.jl +++ /dev/null @@ -1,143 +0,0 @@ -using ..ITensors: complex!, QN - -alias(::SiteType"SpinOne") = SiteType"S=1"() - -""" - space(::SiteType"S=1"; - conserve_qns = false, - conserve_sz = conserve_qns, - qnname_sz = "Sz") - -Create the Hilbert space for a site of type "S=1". - -Optionally specify the conserved symmetries and their quantum number labels. -""" -function space( - ::SiteType"S=1"; conserve_qns=false, conserve_sz=conserve_qns, qnname_sz="Sz" -) - if conserve_sz - return [QN(qnname_sz, +2) => 1, QN(qnname_sz, 0) => 1, QN(qnname_sz, -2) => 1] - end - return 3 -end - -val(::ValName"Up", ::SiteType"S=1") = 1 -val(::ValName"Z0", ::SiteType"S=1") = 2 -val(::ValName"Dn", ::SiteType"S=1") = 3 - -val(::ValName"↑", st::SiteType"S=1") = 1 -val(::ValName"0", st::SiteType"S=1") = 2 -val(::ValName"↓", st::SiteType"S=1") = 3 - -val(::ValName"Z+", ::SiteType"S=1") = 1 -# -- Z0 is already defined above -- -val(::ValName"Z-", ::SiteType"S=1") = 3 - -state(::StateName"Up", ::SiteType"S=1") = [1.0, 0.0, 0.0] -state(::StateName"Z0", ::SiteType"S=1") = [0.0, 1.0, 0.0] -state(::StateName"Dn", ::SiteType"S=1") = [0.0, 0.0, 1.0] - -state(::StateName"↑", st::SiteType"S=1") = [1.0, 0.0, 0.0] -state(::StateName"0", st::SiteType"S=1") = [0.0, 1.0, 0.0] -state(::StateName"↓", st::SiteType"S=1") = [0.0, 0.0, 1.0] - -state(::StateName"Z+", st::SiteType"S=1") = [1.0, 0.0, 0.0] -# -- Z0 is already defined above -- -state(::StateName"Z-", st::SiteType"S=1") = [0.0, 0.0, 1.0] - -state(::StateName"X+", ::SiteType"S=1") = [1 / 2, 1 / sqrt(2), 1 / 2] -state(::StateName"X0", ::SiteType"S=1") = [-1 / sqrt(2), 0, 1 / sqrt(2)] -state(::StateName"X-", ::SiteType"S=1") = [1 / 2, -1 / sqrt(2), 1 / 2] - -state(::StateName"Y+", ::SiteType"S=1") = [-1 / 2, -im / sqrt(2), 1 / 2] -state(::StateName"Y0", ::SiteType"S=1") = [1 / sqrt(2), 0, 1 / sqrt(2)] -state(::StateName"Y-", ::SiteType"S=1") = [-1 / 2, im / sqrt(2), 1 / 2] - -op(::OpName"Sz", ::SiteType"S=1") = [ - 1.0 0.0 0.0 - 0.0 0.0 0.0 - 0.0 0.0 -1.0 -] - -op(on::OpName"Sᶻ", t::SiteType"S=1") = op(alias(on), t) - -op(::OpName"S+", ::SiteType"S=1") = [ - 0.0 √2 0.0 - 0.0 0.0 √2 - 0.0 0.0 0.0 -] - -op(on::OpName"S⁺", t::SiteType"S=1") = op(alias(on), t) -op(on::OpName"Splus", t::SiteType"S=1") = op(alias(on), t) -op(on::OpName"Sp", t::SiteType"S=1") = op(alias(on), t) - -op(::OpName"S-", ::SiteType"S=1") = [ - 0.0 0.0 0.0 - √2 0.0 0.0 - 0.0 √2 0.0 -] - -op(on::OpName"S⁻", t::SiteType"S=1") = op(alias(on), t) -op(on::OpName"Sminus", t::SiteType"S=1") = op(alias(on), t) -op(on::OpName"Sm", t::SiteType"S=1") = op(alias(on), t) - -op(::OpName"Sx", ::SiteType"S=1") = [ - 0.0 1/√2 0.0 - 1/√2 0.0 1/√2 - 0.0 1/√2 0.0 -] - -op(on::OpName"Sˣ", t::SiteType"S=1") = op(alias(on), t) - -op(::OpName"iSy", ::SiteType"S=1") = [ - 0.0 1/√2 0.0 - -1/√2 0.0 1/√2 - 0.0 -1/√2 0.0 -] - -op(on::OpName"iSʸ", t::SiteType"S=1") = op(alias(on), t) - -op(::OpName"Sy", ::SiteType"S=1") = [ - 0.0 -im/√2 0.0 - im/√2 0.0 -im/√2 - 0.0 im/√2 0.0 -] - -op(on::OpName"Sʸ", t::SiteType"S=1") = op(alias(on), t) - -op(::OpName"Sz2", ::SiteType"S=1") = [ - 1.0 0.0 0.0 - 0.0 0.0 0.0 - 0.0 0.0 1.0 -] - -op(::OpName"Sx2", ::SiteType"S=1") = [ - 0.5 0.0 0.5 - 0.0 1.0 0.0 - 0.5 0.0 0.5 -] - -op(::OpName"Sy2", ::SiteType"S=1") = [ - 0.5 0.0 -0.5 - 0.0 1.0 0.0 - -0.5 0.0 0.5 -] - -op(::OpName"S2", ::SiteType"S=1") = [ - 2.0 0.0 0.0 - 0.0 2.0 0.0 - 0.0 0.0 2.0 -] - -op(on::OpName"S²", t::SiteType"S=1") = op(alias(on), t) - -space(st::SiteType"SpinOne"; kwargs...) = space(alias(st); kwargs...) - -state(name::StateName, st::SiteType"SpinOne") = state(name, alias(st)) -val(name::ValName, st::SiteType"SpinOne") = val(name, alias(st)) - -function op!(Op::ITensor, o::OpName, st::SiteType"SpinOne", s::Index) - return op!(Op, o, alias(st), s) -end - -op(o::OpName, st::SiteType"SpinOne") = op(o, alias(st)) diff --git a/src/lib/SiteTypes/src/sitetypes/tj.jl b/src/lib/SiteTypes/src/sitetypes/tj.jl deleted file mode 100644 index 063f46d721..0000000000 --- a/src/lib/SiteTypes/src/sitetypes/tj.jl +++ /dev/null @@ -1,234 +0,0 @@ - -""" - space(::SiteType"tJ"; - conserve_qns = false, - conserve_sz = conserve_qns, - conserve_nf = conserve_qns, - conserve_nfparity = conserve_qns, - qnname_sz = "Sz", - qnname_nf = "Nf", - qnname_nfparity = "NfParity") - -Create the Hilbert space for a site of type "tJ". - -Optionally specify the conserved symmetries and their quantum number labels. -""" -function space( - ::SiteType"tJ"; - conserve_qns=false, - conserve_sz=conserve_qns, - conserve_nf=conserve_qns, - conserve_nfparity=conserve_qns, - qnname_sz="Sz", - qnname_nf="Nf", - qnname_nfparity="NfParity", - # Deprecated - conserve_parity=nothing, -) - if !isnothing(conserve_parity) - conserve_nfparity = conserve_parity - end - if conserve_sz && conserve_nf - return [ - QN((qnname_nf, 0, -1), (qnname_sz, 0)) => 1 - QN((qnname_nf, 1, -1), (qnname_sz, +1)) => 1 - QN((qnname_nf, 1, -1), (qnname_sz, -1)) => 1 - ] - elseif conserve_nf - return [ - QN(qnname_nf, 0, -1) => 1 - QN(qnname_nf, 1, -1) => 2 - ] - elseif conserve_sz - return [ - QN((qnname_sz, 0), (qnname_nfparity, 0, -2)) => 1 - QN((qnname_sz, +1), (qnname_nfparity, 1, -2)) => 1 - QN((qnname_sz, -1), (qnname_nfparity, 1, -2)) => 1 - ] - elseif conserve_nfparity - return [ - QN(qnname_nfparity, 0, -2) => 1 - QN(qnname_nfparity, 1, -2) => 2 - ] - end - return 3 -end - -val(::ValName"Emp", ::SiteType"tJ") = 1 -val(::ValName"Up", ::SiteType"tJ") = 2 -val(::ValName"Dn", ::SiteType"tJ") = 3 -val(::ValName"0", st::SiteType"tJ") = val(ValName("Emp"), st) -val(::ValName"↑", st::SiteType"tJ") = val(ValName("Up"), st) -val(::ValName"↓", st::SiteType"tJ") = val(ValName("Dn"), st) - -state(::StateName"Emp", ::SiteType"tJ") = [1.0, 0, 0] -state(::StateName"Up", ::SiteType"tJ") = [0.0, 1, 0] -state(::StateName"Dn", ::SiteType"tJ") = [0.0, 0, 1] -state(::StateName"0", st::SiteType"tJ") = state(StateName("Emp"), st) -state(::StateName"↑", st::SiteType"tJ") = state(StateName("Up"), st) -state(::StateName"↓", st::SiteType"tJ") = state(StateName("Dn"), st) - -function op!(Op::ITensor, ::OpName"Nup", ::SiteType"tJ", s::Index) - return Op[s' => 2, s => 2] = 1.0 -end -function op!(Op::ITensor, on::OpName"n↑", st::SiteType"tJ", s::Index) - return op!(Op, alias(on), st, s) -end - -function op!(Op::ITensor, ::OpName"Ndn", ::SiteType"tJ", s::Index) - return Op[s' => 3, s => 3] = 1.0 -end -function op!(Op::ITensor, on::OpName"n↓", st::SiteType"tJ", s::Index) - return op!(Op, alias(on), st, s) -end - -function op!(Op::ITensor, ::OpName"Ntot", ::SiteType"tJ", s::Index) - Op[s' => 2, s => 2] = 1.0 - return Op[s' => 3, s => 3] = 1.0 -end -function op!(Op::ITensor, on::OpName"ntot", st::SiteType"tJ", s::Index) - return op!(Op, alias(on), st, s) -end - -function op!(Op::ITensor, ::OpName"Cup", ::SiteType"tJ", s::Index) - return Op[s' => 1, s => 2] = 1.0 -end -function op!(Op::ITensor, on::OpName"c↑", st::SiteType"tJ", s::Index) - return op!(Op, alias(on), st, s) -end - -function op!(Op::ITensor, ::OpName"Cdagup", ::SiteType"tJ", s::Index) - return Op[s' => 2, s => 1] = 1.0 -end -function op!(Op::ITensor, on::OpName"c†↑", st::SiteType"tJ", s::Index) - return op!(Op, alias(on), st, s) -end - -function op!(Op::ITensor, ::OpName"Cdn", ::SiteType"tJ", s::Index) - return Op[s' => 1, s => 3] = 1.0 -end -function op!(Op::ITensor, on::OpName"c↓", st::SiteType"tJ", s::Index) - return op!(Op, alias(on), st, s) -end - -function op!(Op::ITensor, ::OpName"Cdagdn", ::SiteType"tJ", s::Index) - return Op[s' => 3, s => 1] = 1.0 -end -function op!(Op::ITensor, on::OpName"c†↓", st::SiteType"tJ", s::Index) - return op!(Op, alias(on), st, s) -end - -function op!(Op::ITensor, ::OpName"Aup", ::SiteType"tJ", s::Index) - return Op[s' => 1, s => 2] = 1.0 -end -function op!(Op::ITensor, on::OpName"a↑", st::SiteType"tJ", s::Index) - return op!(Op, alias(on), st, s) -end - -function op!(Op::ITensor, ::OpName"Adagup", ::SiteType"tJ", s::Index) - return Op[s' => 2, s => 1] = 1.0 -end -function op!(Op::ITensor, on::OpName"a†↑", st::SiteType"tJ", s::Index) - return op!(Op, alias(on), st, s) -end - -function op!(Op::ITensor, ::OpName"Adn", ::SiteType"tJ", s::Index) - return Op[s' => 1, s => 3] = 1.0 -end -function op!(Op::ITensor, on::OpName"a↓", st::SiteType"tJ", s::Index) - return op!(Op, alias(on), st, s) -end - -function op!(Op::ITensor, ::OpName"Adagdn", ::SiteType"tJ", s::Index) - return Op[s' => 3, s => 1] = 1.0 -end -function op!(Op::ITensor, on::OpName"a†↓", st::SiteType"tJ", s::Index) - return op!(Op, alias(on), st, s) -end - -function op!(Op::ITensor, ::OpName"F", ::SiteType"tJ", s::Index) - Op[s' => 1, s => 1] = +1.0 - Op[s' => 2, s => 2] = -1.0 - return Op[s' => 3, s => 3] = -1.0 -end - -function op!(Op::ITensor, ::OpName"Fup", ::SiteType"tJ", s::Index) - Op[s' => 1, s => 1] = +1.0 - Op[s' => 2, s => 2] = -1.0 - return Op[s' => 3, s => 3] = +1.0 -end -function op!(Op::ITensor, on::OpName"F↑", st::SiteType"tJ", s::Index) - return op!(Op, alias(on), st, s) -end - -function op!(Op::ITensor, ::OpName"Fdn", ::SiteType"tJ", s::Index) - Op[s' => 1, s => 1] = +1.0 - Op[s' => 2, s => 2] = +1.0 - return Op[s' => 3, s => 3] = -1.0 -end -function op!(Op::ITensor, on::OpName"F↓", st::SiteType"tJ", s::Index) - return op!(Op, alias(on), st, s) -end - -function op!(Op::ITensor, ::OpName"Sz", ::SiteType"tJ", s::Index) - Op[s' => 2, s => 2] = +0.5 - return Op[s' => 3, s => 3] = -0.5 -end - -function op!(Op::ITensor, ::OpName"Sᶻ", st::SiteType"tJ", s::Index) - return op!(Op, OpName("Sz"), st, s) -end - -function op!(Op::ITensor, ::OpName"Sx", ::SiteType"tJ", s::Index) - Op[s' => 2, s => 3] = 0.5 - return Op[s' => 3, s => 2] = 0.5 -end - -function op!(Op::ITensor, ::OpName"Sˣ", st::SiteType"tJ", s::Index) - return op!(Op, OpName("Sx"), st, s) -end - -function op!(Op::ITensor, ::OpName"S+", ::SiteType"tJ", s::Index) - return Op[s' => 2, s => 3] = 1.0 -end - -function op!(Op::ITensor, ::OpName"S⁺", st::SiteType"tJ", s::Index) - return op!(Op, OpName("S+"), st, s) -end -function op!(Op::ITensor, ::OpName"Sp", st::SiteType"tJ", s::Index) - return op!(Op, OpName("S+"), st, s) -end -function op!(Op::ITensor, ::OpName"Splus", st::SiteType"tJ", s::Index) - return op!(Op, OpName("S+"), st, s) -end - -function op!(Op::ITensor, ::OpName"S-", ::SiteType"tJ", s::Index) - return Op[s' => 3, s => 2] = 1.0 -end - -function op!(Op::ITensor, ::OpName"S⁻", st::SiteType"tJ", s::Index) - return op!(Op, OpName("S-"), st, s) -end -function op!(Op::ITensor, ::OpName"Sm", st::SiteType"tJ", s::Index) - return op!(Op, OpName("S-"), st, s) -end -function op!(Op::ITensor, ::OpName"Sminus", st::SiteType"tJ", s::Index) - return op!(Op, OpName("S-"), st, s) -end - -has_fermion_string(::OpName"Cup", ::SiteType"tJ") = true -function has_fermion_string(on::OpName"c↑", st::SiteType"tJ") - return has_fermion_string(alias(on), st) -end -has_fermion_string(::OpName"Cdagup", ::SiteType"tJ") = true -function has_fermion_string(on::OpName"c†↑", st::SiteType"tJ") - return has_fermion_string(alias(on), st) -end -has_fermion_string(::OpName"Cdn", ::SiteType"tJ") = true -function has_fermion_string(on::OpName"c↓", st::SiteType"tJ") - return has_fermion_string(alias(on), st) -end -has_fermion_string(::OpName"Cdagdn", ::SiteType"tJ") = true -function has_fermion_string(on::OpName"c†↓", st::SiteType"tJ") - return has_fermion_string(alias(on), st) -end diff --git a/src/lib/SmallStrings/ext/SmallStringsChainRulesCoreExt/SmallStringsChainRulesCoreExt.jl b/src/lib/SmallStrings/ext/SmallStringsChainRulesCoreExt/SmallStringsChainRulesCoreExt.jl deleted file mode 100644 index 294318e993..0000000000 --- a/src/lib/SmallStrings/ext/SmallStringsChainRulesCoreExt/SmallStringsChainRulesCoreExt.jl +++ /dev/null @@ -1,5 +0,0 @@ -module SmallStringsChainRulesCoreExt -using ChainRulesCore: @non_differentiable -using ...SmallStrings: SmallString -@non_differentiable SmallString(::Any) -end diff --git a/src/lib/SmallStrings/src/SmallStrings.jl b/src/lib/SmallStrings/src/SmallStrings.jl deleted file mode 100644 index a66276df3b..0000000000 --- a/src/lib/SmallStrings/src/SmallStrings.jl +++ /dev/null @@ -1,3 +0,0 @@ -module SmallStrings -include("smallstring.jl") -end diff --git a/src/lib/SmallStrings/src/smallstring.jl b/src/lib/SmallStrings/src/smallstring.jl deleted file mode 100644 index ac660c5d72..0000000000 --- a/src/lib/SmallStrings/src/smallstring.jl +++ /dev/null @@ -1,150 +0,0 @@ -using BitIntegers: UInt256 -using StaticArrays: MVector, SVector - -const IntChar = UInt16 -const IntSmallString = UInt256 - -# XXX: remove smallLength as a global constant, bad for type inference -const smallLength = 16 -const SmallStringStorage = SVector{smallLength,IntChar} -const MSmallStringStorage = MVector{smallLength,IntChar} - -# Similar types are implemented in various packages: -# https://github.com/JuliaString/ShortStrings.jl -# https://github.com/JuliaComputing/FixedSizeStrings.jl -# https://gist.github.com/SimonDanisch/02e74622e0577f199c1b1a8a65390c24#file-fixedstring-jl -# https://github.com/JuliaStrings/StringViews.jl -# https://discourse.julialang.org/t/way-to-make-sharedarray-over-fixed-length-strings/7082 -# https://github.com/djsegal/FixedLengthStrings.jl -# TODO: make this more generic by parametrizing over the length and Char size. Also, store the length of the string. -struct SmallString - data::SmallStringStorage - - SmallString(sv::SmallStringStorage) = new(sv) - - function SmallString() - store = SmallStringStorage(ntuple(_ -> IntChar(0), Val(smallLength))) - return new(store) - end -end - -const Tag = SmallString - -data(ss::SmallString) = ss.data - -Base.eltype(ss::SmallString) = eltype(data(ss)) - -function SmallString(str) - length(str) > smallLength && - error("String is too long for SmallString. Maximum length is $smallLength.") - mstore = MSmallStringStorage(ntuple(_ -> IntChar(0), Val(smallLength))) - for (n, c) in enumerate(str) - mstore[n] = IntChar(c) - end - return SmallString(SmallStringStorage(mstore)) -end - -SmallString(s::SmallString) = SmallString(data(s)) - -Base.getindex(s::SmallString, n::Int) = getindex(s.data, n) - -function Base.setindex(s::SmallString, val, n::Int) - return SmallString(Base.setindex(s.data, val, n)) -end - -# TODO: rename to `isempty` -isnull(s::SmallString) = @inbounds s[1] == IntChar(0) - -function Base.vcat(s1::SmallString, s2::SmallString) - v = MSmallStringStorage(ntuple(_ -> IntChar(0), Val(smallLength))) - n = 1 - while n <= smallLength && s1[n] != IntChar(0) - v[n] = s1[n] - n += 1 - end - N1 = n - 1 - n2 = 1 - while n2 <= smallLength && s2[n2] != IntChar(0) - v[n] = s2[n2] - n += 1 - n2 += 1 - end - return SmallString(SmallStringStorage(v)) -end - -function SmallString(i::IntSmallString) - mut_is = MVector{1,IntSmallString}(ntoh(i)) - p = convert(Ptr{SmallStringStorage}, pointer_from_objref(mut_is)) - return SmallString(unsafe_load(p)) -end - -function cast_to_uint(store) - mut_store = MSmallStringStorage(store) - storage_begin = convert(Ptr{IntSmallString}, pointer_from_objref(mut_store)) - return ntoh(unsafe_load(storage_begin)) -end - -function IntSmallString(s::SmallString) - return cast_to_uint(s.data) -end - -function isint(s::SmallString)::Bool - ndigits = 1 - while ndigits <= smallLength && s[ndigits] != IntChar(0) - cur_char = Char(s[ndigits]) - !isdigit(cur_char) && return false - ndigits += 1 - end - return true -end - -# Here we use the StaticArrays comparison -Base.:(==)(s1::SmallString, s2::SmallString) = (s1.data == s2.data) -Base.isless(s1::SmallString, s2::SmallString) = isless(s1.data, s2.data) - -######################################################## -# Here are alternative SmallString comparison implementations -# - -#Base.isless(a::SmallString,b::SmallString) = cast_to_uint(a) < cast_to_uint(b) -#Base.:(==)(a::SmallString,b::SmallString) = cast_to_uint(a) == cast_to_uint(b) - -# Here we use the c-function memcmp (used in Julia string comparison): -#function Base.cmp(a::SmallString, b::SmallString) -# return ccall(:memcmp, Int32, (Ptr{IntChar}, Ptr{IntChar}, IntChar), -# a.data, b.data, IntChar(8)) -#end -#Base.isless(a::SmallString,b::SmallString) = cmp(a, b) < 0 -#Base.:(==)(a::SmallString,b::SmallString) = cmp(a, b) == 0 - -####################################################### - -function Base.String(s::SmallString) - n = 1 - while n <= smallLength && s[n] != IntChar(0) - n += 1 - end - len = n - 1 - return String(Char.(s.data[1:len])) -end - -function Base.show(io::IO, s::SmallString) - n = 1 - while n <= smallLength && s[n] != IntChar(0) - print(io, Char(s[n])) - n += 1 - end -end - -function readcpp(io::IO, ::Type{SmallString}; format="v3") - s = SmallString() - if format == "v3" - for n in 1:7 - c = read(io, Char) - s = setindex(s, c, n) - end - else - throw(ArgumentError("read SmallString: format=$format not supported")) - end - return s -end diff --git a/src/lib/TagSets/src/TagSets.jl b/src/lib/TagSets/src/TagSets.jl deleted file mode 100644 index 45d3152bf7..0000000000 --- a/src/lib/TagSets/src/TagSets.jl +++ /dev/null @@ -1,392 +0,0 @@ -module TagSets -using BitIntegers: UInt256 -using DocStringExtensions: TYPEDSIGNATURES -# TODO: Move to `Nots` lib. -using ..ITensors: ITensors, Not, not -using ..SmallStrings: SmallString, cast_to_uint, isnull -using StaticArrays: MVector, SVector - -const IntTag = UInt256 # An integer that can be cast to a Tag -const MTagStorage = MVector{16,IntTag} # A mutable tag storage, holding 16 characters -const TagSetStorage{T,N} = SVector{N,T} -const MTagSetStorage{T,N} = MVector{N,T} # A mutable tag storage - -# -# Turn the strict tags checking on and off -# - -const _using_strict_tags = Ref(false) - -""" -$(TYPEDSIGNATURES) -See if checking for overflow of the number of tags of a TagSet -or the number of characters of a tag is enabled or disabled. - -See also [`ITensors.set_strict_tags!`](@ref). -""" -function using_strict_tags() - return _using_strict_tags[] -end - -""" -$(TYPEDSIGNATURES) -Enable or disable checking for overflow of the number of tags of a TagSet -or the number of characters of a tag. If enabled (set to `true`), an error -will be thrown if overflow occurs, otherwise the overflow will be ignored -and the extra tags or tag characters will be dropped. This could cause -unexpected bugs if tags are being used to distinguish Index objects that -have the same ids and prime levels, but that is generally discouraged and -should only be used if you know what you are doing. - -See also [`ITensors.using_strict_tags`](@ref). -""" -function set_strict_tags!(enable::Bool) - previous = using_strict_tags() - _using_strict_tags[] = enable - return previous -end - -emptytag(::Type{IntTag}) = IntTag(0) -function empty_storage(::Type{TagSetStorage{T,N}}) where {T,N} - return TagSetStorage(ntuple(_ -> emptytag(T), Val(N))) -end -function empty_storage(::Type{MTagSetStorage{T,N}}) where {T,N} - return MTagSetStorage(ntuple(_ -> emptytag(T), Val(N))) -end - -#TODO: decide which functions on TagSet should be made generic. -struct GenericTagSet{T,N} - data::TagSetStorage{T,N} - length::Int - GenericTagSet{T,N}() where {T,N} = new(empty_storage(TagSetStorage{T,N}), 0) - GenericTagSet{T,N}(tags::TagSetStorage{T,N}, len::Int) where {T,N} = new(tags, len) -end - -GenericTagSet{T,N}(ts::GenericTagSet{T,N}) where {T,N} = ts - -function GenericTagSet{T,N}(t::T) where {T,N} - ts = empty_storage(MTagSetStorage{T,N}) - ts[1] = T(t) - return GenericTagSet{T,N}(TagSetStorage(ts), 1) -end - -#GenericTagSet{IntTag,N}(t::Tag) where {N} = GenericTagSet{IntTag,N}(IntTag(t)) - -function _hastag(ts::MTagSetStorage, ntags::Int, tag::IntTag) - for n in 1:ntags - @inbounds ts[n] == tag && return true - end - return false -end - -function _addtag_ordered!(ts::MTagSetStorage, ntags::Int, tag::IntTag) - if iszero(ntags) || tag > @inbounds ts[ntags] - @inbounds setindex!(ts, tag, ntags + 1) - else - # check for repeated tags - _hastag(ts, ntags, tag) && return ntags - pos = ntags + 1 # position new tag should go - while pos > 1 && tag < @inbounds ts[pos - 1] - pos -= 1 - @inbounds setindex!(ts, ts[pos], pos + 1) - end - @inbounds setindex!(ts, tag, pos) - end - return ntags + 1 -end - -function _addtag!(ts::MTagSetStorage, ntags::Int, tag::IntTag) - t = SmallString(tag) - # TODO: change to isempty, remove isnull - if !isnull(t) - ntags = _addtag_ordered!(ts, ntags, tag) - end - return ntags -end - -function reset!(v::MTagStorage, nchar::Int) - for i in 1:nchar - @inbounds v[i] = IntTag(0) - end -end - -function strict_tags_error(str, maxlength, nchar) - return error( - "You are trying to make a TagSet from the String \"$(str)\". This has more than the maximum number of allowed tags ($maxlength), or has a tag that is longer than the longest allowed tag ($nchar). Either specify fewer or shorter tags, or use `ITensors.set_strict_tags!(false)` to disable this error, in which case extra tags or tag characters will be ignored.", - ) -end - -function strict_tags_add_error(ts, tsadd, maxlength) - return error( - "You are trying to add the TagSet $tsadd to the TagSet $ts. The result would have more than the maximum number of allowed tags ($maxlength). Either specify fewer tags, or use `ITensors.set_strict_tags!(false)` to disable this error, in which case extra tags will be ignored.", - ) -end - -function strict_tags_replace_error(ts, tsremove, tsadd, maxlength) - return error( - "You are trying to replace the TagSet $tsremove with the TagSet $tsadd in the TagSet $ts. The result would have more than the maximum number of allowed tags ($maxlength). Either specify fewer tags, or use `ITensors.set_strict_tags!(false)` to disable this error, in which case extra tags will be ignored.", - ) -end - -function GenericTagSet{T,N}(str::AbstractString) where {T,N} - # Mutable fixed-size vector as temporary Tag storage - # TODO: refactor the Val here. - current_tag = empty_storage(MTagStorage) - # Mutable fixed-size vector as temporary TagSet storage - ts = empty_storage(MTagSetStorage{T,N}) - nchar = 0 - ntags = 0 - for current_char in str - if current_char == ',' - if nchar != 0 - if ntags < N - ntags = _addtag!(ts, ntags, cast_to_uint(current_tag)) - elseif using_strict_tags() - strict_tags_error(str, N, length(current_tag)) - end # else do nothing - # Reset the current tag - reset!(current_tag, nchar) - nchar = 0 - end - elseif current_char != ' ' # TagSet constructor ignores whitespace - if nchar ≥ length(current_tag) - if using_strict_tags() - strict_tags_error(str, N, length(current_tag)) - else - continue - end - end - nchar += 1 - @inbounds current_tag[nchar] = current_char - end - end - # Store the final tag - if nchar != 0 - if ntags < N - ntags = _addtag!(ts, ntags, cast_to_uint(current_tag)) - elseif using_strict_tags() - strict_tags_error(str, N, length(current_tag)) - end # else do nothing - end - if ntags > N - if using_strict_tags() - strict_tags_error(str, N, length(current_tag)) - else - ntags = N - end - end - return GenericTagSet{T,N}(TagSetStorage(ts), ntags) -end - -const TagSet = GenericTagSet{IntTag,4} - -maxlength(::GenericTagSet{<:Any,N}) where {N} = N - -macro ts_str(s) - return TagSet(s) -end - -Base.convert(::Type{TagSet}, str::String) = TagSet(str) - -""" - not(::TagSet) - !(::TagSet) - -Create a wrapper around a TagSet representing -the set of indices that do not contain that TagSet. -""" -ITensors.not(ts::TagSet) = Not(ts) -Base.:!(ts::TagSet) = Not(ts) - -ITensors.not(ts::AbstractString) = Not(ts) - -""" -data(T::TagSet) - -Get the raw storage of the TagSet. - -This is an internal function, please inform the -developers of ITensors.jl if there is functionality -you would like for TagSet that is not currently -available. -""" -data(T::TagSet) = T.data - -Base.length(T::TagSet) = T.length -Base.@propagate_inbounds Base.getindex(T::TagSet, n::Integer) = SmallString(data(T)[n]) -Base.copy(ts::TagSet) = TagSet(data(ts), length(ts)) - -function Base.:(==)(ts1::TagSet, ts2::TagSet) - l1 = length(ts1) - l2 = length(ts2) - l1 != l2 && return false - for n in 1:l1 - @inbounds data(ts1)[n] != data(ts2)[n] && return false - end - return true -end - -# Assumes it is an integer -function hastag(ts::TagSet, tag) - l = length(ts) - l < 1 && return false - for n in 1:l - @inbounds tag == data(ts)[n] && return true - end - return false -end - -function hastags(ts2::TagSet, tags1) - ts1 = TagSet(tags1) - l1 = length(ts1) - l2 = length(ts2) - l1 > l2 && return false - for n1 in 1:l1 - @inbounds !hastag(ts2, data(ts1)[n1]) && return false - end - return true -end - -function addtags(ts::TagSet, tagsadd) - tsadd = TagSet(tagsadd) - if length(ts) == maxlength(ts) - if hastags(ts, tsadd) - return ts - end - if using_strict_tags() - strict_tags_add_error(ts, tsadd, maxlength(ts)) - end - end - res_ts = MVector(data(ts)) - ntags = length(ts) - for n in 1:length(tsadd) - if ntags < maxlength(ts) - @inbounds ntags = _addtag_ordered!(res_ts, ntags, data(tsadd)[n]) - elseif using_strict_tags() - strict_tags_add_error(ts, tsadd, maxlength(ts)) - end - end - return TagSet(TagSetStorage(res_ts), ntags) -end - -function _removetag!(ts::MTagSetStorage, ntags::Int, t) - for n in 1:ntags - if @inbounds ts[n] == t - for j in n:(ntags - 1) - @inbounds ts[j] = ts[j + 1] - end - @inbounds ts[ntags] = emptytag(IntTag) - return ntags -= 1 - end - end - return ntags -end - -#TODO: optimize this function -function removetags(ts::TagSet, tagsremove) - tsremove = TagSet(tagsremove) - res_ts = MVector(data(ts)) - ntags = length(ts) - for n in 1:length(tsremove) - @inbounds ntags = _removetag!(res_ts, ntags, data(tsremove)[n]) - end - return TagSet(TagSetStorage(res_ts), ntags) -end - -#TODO: optimize this function -function replacetags(ts::TagSet, tagsremove, tagsadd) - tsremove = TagSet(tagsremove) - tsadd = TagSet(tagsadd) - res_ts = MVector(data(ts)) - ntags = length(ts) - # The TagSet must have the tags to be replaced - !hastags(ts, tsremove) && return ts - for n in 1:length(tsremove) - @inbounds ntags = _removetag!(res_ts, ntags, data(tsremove)[n]) - end - for n in 1:length(tsadd) - if ntags < maxlength(ts) - @inbounds ntags = _addtag_ordered!(res_ts, ntags, data(tsadd)[n]) - elseif using_strict_tags() - strict_tags_replace_error(ts, tsremove, tsadd, maxlength(ts)) - end - end - return TagSet(TagSetStorage(res_ts), ntags) -end - -function tagstring(T::TagSet) - res = "" - N = length(T) - N == 0 && return res - for n in 1:(N - 1) - res *= "$(SmallString(data(T)[n]))," - end - res *= "$(SmallString(data(T)[N]))" - return res -end - -""" - iterate(is::TagSet[, state]) - -Iterate over the Tag's in a TagSet. - -# Example - -```jldoctest -julia> using ITensors; - - -julia> tagset = TagSet("l, tags"); - - -julia> for tag in tagset - println(tag) - end -l -tags -``` -""" -Base.iterate(ts::TagSet, state) = state < length(ts) ? (ts[state + 1], state + 1) : nothing - -Base.iterate(ts::TagSet) = (ts[1], 1) - -commontags(ts::TagSet) = ts - -function commontags(ts1::TagSet, ts2::TagSet) - ts3 = TagSet() - N1 = length(ts1) - for n1 in 1:N1 - t1 = data(ts1)[n1] - if hastag(ts2, t1) - ts3 = addtags(ts3, t1) - end - end - return ts3 -end - -function commontags(ts1::TagSet, ts2::TagSet, ts3::TagSet, ts::TagSet...) - return commontags(commontags(ts1, ts2), ts3, ts...) -end - -function Base.show(io::IO, T::TagSet) - return print(io, "\"$(tagstring(T))\"") -end - -function readcpp(io::IO, ::Type{TagSet}; format="v3") - ts = TagSet() - if format == "v3" - mstore = empty_storage(MTagSetStorage{IntTag,4}) - ntags = 0 - for n in 1:4 - t = readcpp(io, Tag; kwargs...) - if t != SmallString() - ntags = _addtag_ordered!(mstore, ntags, IntTag(t)) - end - end - ts = TagSet(TagSetStorage(mstore), ntags) - else - throw(ArgumentError("read TagSet: format=$format not supported")) - end - return ts -end -end diff --git a/src/name.jl b/src/name.jl deleted file mode 100644 index bacc04d3e7..0000000000 --- a/src/name.jl +++ /dev/null @@ -1 +0,0 @@ -function name end diff --git a/src/not.jl b/src/not.jl deleted file mode 100644 index 193e3f3cb6..0000000000 --- a/src/not.jl +++ /dev/null @@ -1,26 +0,0 @@ - -# -# not syntax (to prime or tag the compliment -# of the specified indices/pattern) -# - -struct Not{T} - pattern::T - Not(p::T) where {T} = new{T}(p) -end - -""" -not(p) - -Represents the compliment of the input -for pattern matching in priming, tagging -and other IndexSet related functions. -""" -function not end - -""" -parent(n::Not) - -Get the original pattern. -""" -Base.parent(n::Not) = n.pattern diff --git a/src/nullspace.jl b/src/nullspace.jl deleted file mode 100644 index 50d4206ad5..0000000000 --- a/src/nullspace.jl +++ /dev/null @@ -1,190 +0,0 @@ -using .QuantumNumbers: Arrow - -# -# NDTensors functionality -# - -# XXX: generalize this function -function _getindex(T::DenseTensor{ElT,N}, I1::Colon, I2::UnitRange{Int64}) where {ElT,N} - A = array(T)[I1, I2] - return tensor(Dense(vec(A)), setdims(inds(T), size(A))) -end - -function getblock_preserve_qns(T::Tensor, b::Block) - # TODO: make `T[b]` preserve QNs - Tb = T[b] - indsTb = getblock.(inds(T), Tuple(b)) .* dir.(inds(T)) - return ITensors.setinds(Tb, indsTb) -end - -function blocksparsetensor(blocks::Dict{B,TB}) where {B,TB} - b1, Tb1 = first(pairs(blocks)) - N = length(b1) - indstypes = typeof.(inds(Tb1)) - blocktype = eltype(Tb1) - indsT = getindex.(indstypes) - # Determine the indices from the blocks - for (b, Tb) in pairs(blocks) - indsTb = inds(Tb) - for n in 1:N - bn = b[n] - indsTn = indsT[n] - if bn > length(indsTn) - resize!(indsTn, bn) - end - indsTn[bn] = indsTb[n] - end - end - T = BlockSparseTensor(blocktype, indsT) - for (b, Tb) in pairs(blocks) - if !isempty(Tb) - T[b] = Tb - end - end - return T -end - -default_atol(A::AbstractArray) = 0.0 -function default_rtol(A::AbstractArray, atol::Real) - return (min(size(A, 1), size(A, 2)) * eps(real(float(one(eltype(A)))))) * iszero(atol) -end - -function _nullspace_hermitian( - M::DenseTensor; atol::Real=default_atol(M), rtol::Real=default_rtol(M, atol) -) - # TODO: try this version - #D, U = eigen(Hermitian(M)) - Dᵢₜ, Uᵢₜ = eigen(itensor(M); ishermitian=true) - D = tensor(Dᵢₜ) - U = tensor(Uᵢₜ) - tol = max(atol, abs(D[1, 1]) * rtol) - indstart = sum(d -> abs(d) .> tol, storage(D)) + 1 - indstop = lastindex(U, 2) - Nb = _getindex(U, :, indstart:indstop) - return Nb -end - -function _nullspace_hermitian( - M::BlockSparseTensor; atol::Real=default_atol(M), rtol::Real=default_rtol(M, atol) -) - tol = atol - # TODO: try this version - # Insert any missing diagonal blocks - insert_diag_blocks!(M) - #D, U = eigen(Hermitian(M)) - Dᵢₜ, Uᵢₜ = eigen(itensor(M); ishermitian=true) - D = tensor(Dᵢₜ) - U = tensor(Uᵢₜ) - nullspace_blocks = Dict() - for bU in nzblocks(U) - bM = Block(bU[1], bU[1]) - bD = Block(bU[2], bU[2]) - # Assume sorted from largest to smallest - tol = max(atol, abs(D[bD][1, 1]) * rtol) - indstart = sum(d -> abs(d) .> tol, storage(D[bD])) + 1 - Ub = getblock_preserve_qns(U, bU) - indstop = lastindex(Ub, 2) - # Drop zero dimensional blocks - Nb = _getindex(Ub, :, indstart:indstop) - nullspace_blocks[bU] = Nb - end - return blocksparsetensor(nullspace_blocks) -end - -function LinearAlgebra.nullspace(M::Hermitian{<:Number,<:Tensor}; kwargs...) - return _nullspace_hermitian(parent(M); kwargs...) -end - -# -# QN functionality -# - -function setdims(t::NTuple{N,Pair{QN,Int}}, dims::NTuple{N,Int}) where {N} - return first.(t) .=> dims -end - -function setdims(t::NTuple{N,Index{Int}}, dims::NTuple{N,Int}) where {N} - return dims -end - -function getblock(i::Index, n::Integer) - return ITensors.space(i)[n] -end - -# Make `Pair{QN,Int}` act like a regular `dim` -NDTensors.dim(qnv::Pair{QN,Int}) = last(qnv) - -Base.:*(qnv::Pair{QN,Int}, d::Arrow) = qn(qnv) * d => dim(qnv) - -# -# ITensors functionality -# - -# Reshape into an order-2 ITensor -matricize(T::ITensor, inds::Index...) = matricize(T, inds) - -function matricize(T::ITensor, inds) - left_inds = commoninds(T, inds) - right_inds = uniqueinds(T, inds) - return matricize(T, left_inds, right_inds) -end - -function matricize(T::ITensor, left_inds, right_inds) - CL = combiner(left_inds; dir=ITensors.Out, tags="CL") - CR = combiner(right_inds; dir=ITensors.In, tags="CR") - M = (T * CL) * CR - return M, CL, CR -end - -function nullspace(::Order{2}, M::ITensor, left_inds, right_inds; tags="n", kwargs...) - @assert order(M) == 2 - M² = prime(dag(M), right_inds) * M - M² = permute(M², right_inds'..., right_inds...) - M²ₜ = tensor(M²) - Nₜ = nullspace(Hermitian(M²ₜ); kwargs...) - indsN = (Index(ind(Nₜ, 1); dir=ITensors.Out), Index(ind(Nₜ, 2); dir=ITensors.Out, tags)) - N = itensor(ITensors.setinds(Nₜ, indsN)) - # Make the index match the input index - Ñ = replaceinds(N, (ind(N, 1),) => right_inds) - return Ñ -end - -""" - nullspace(T::ITensor, left_inds...; tags="n", atol=1E-12, kwargs...) - -Viewing the ITensor `T` as a matrix with the provided `left_inds` viewed -as the row space and remaining indices viewed as the right indices or column space, -the `nullspace` function computes the right null space. That is, it will return -a tensor `N` acting on the right indices of `T` such that `T*N` is zero. -The returned tensor `N` will also have a new index with the label "n" which -indexes through the 'vectors' in the null space. - -For example, if `T` has the indices `i,j,k`, calling -`N = nullspace(T,i,k)` returns `N` with index `j` such that - - ___ ___ - i --| | | | - | T |--j--| N |--n ≈ 0 - k --| | | | - --- --- - -The index `n` can be obtained by calling -`n = uniqueindex(N,T)` - -Note that the implementation of this function is subject to change in the future, in -which case the precise `atol` value that gives a certain null space size may change -in future versions of ITensor. - -Keyword arguments: - - - `atol::Float64=1E-12` - singular values of T†*T below this value define the null space - - `tags::String="n"` - choose the tags of the index selecting elements of the null space -""" -function nullspace(T::ITensor, is...; tags="n", atol=1E-12, kwargs...) - M, CL, CR = matricize(T, is...) - @assert order(M) == 2 - cL = commoninds(M, CL) - cR = commoninds(M, CR) - N₂ = nullspace(Order(2), M, cL, cR; tags, atol, kwargs...) - return N₂ * CR -end diff --git a/src/oneitensor.jl b/src/oneitensor.jl deleted file mode 100644 index e6cfb90bec..0000000000 --- a/src/oneitensor.jl +++ /dev/null @@ -1,19 +0,0 @@ -# Scalar identity ITensor -# TODO: Implement as a new `Scalar` storage type. -struct OneITensor end - -inds(::OneITensor) = () - -# This is to help with generic promote_type code -# in eltype(::AbstractProjMPO) -eltype(::OneITensor) = Bool -dim(::OneITensor) = 1 -isoneitensor(::OneITensor) = true -isoneitensor(::ITensor) = false - -dag(t::OneITensor) = t - -(::OneITensor * A::ITensor) = A -(A::ITensor * ::OneITensor) = A -*(t::OneITensor) = t -deepcontract(ts::Union{ITensor,OneITensor}...) = *(ts...) diff --git a/src/packagecompile/compile.jl b/src/packagecompile/compile.jl deleted file mode 100644 index 88d1ce6b5b..0000000000 --- a/src/packagecompile/compile.jl +++ /dev/null @@ -1,62 +0,0 @@ -default_compile_dir() = joinpath(homedir(), ".julia", "sysimages") - -default_compile_filename() = "sys_itensors.so" - -default_compile_path() = joinpath(default_compile_dir(), default_compile_filename()) - -function compile_note(; dir=default_compile_dir(), filename=default_compile_filename()) - path = joinpath(dir, filename) - return """ - You will be able to start Julia with a compiled version of ITensors using: - - ``` - ~ julia --sysimage $path - ``` - - and you should see that the startup times and JIT compilation times are substantially improved when you are using ITensors. - - In unix, you can create an alias with the Bash command: - - ``` - ~ alias julia_itensors="julia --sysimage $path -e 'using ITensors' -i" - ``` - - which you can put in your `~/.bashrc`, `~/.zshrc`, etc. This also executes - `using ITensors` so that ITensors is loaded and ready to use, you can leave off ` - -e 'using ITensors' -i` if you don't want that. Then you can start Julia with a - version of ITensors installed with the command: - - ``` - ~ julia_itensors - ``` - - Note that if you update ITensors to a new version, for example with `using - Pkg; Pkg.update("ITensors")`, you will need to run the `ITensors.compile()` - command again to recompile the new version of ITensors. - """ -end - -function compile(; backend=Algorithm"PackageCompiler"(), kwargs...) - return compile(backend; kwargs...) -end - -@doc """ - ITensors.compile(; dir = "$(default_compile_dir())", - filename = "$(default_compile_filename())") - -Compile ITensors.jl with [PackageCompiler.jl](https://julialang.github.io/PackageCompiler.jl/dev/). -This will take some time, perhaps a few minutes. - -This will create a system image containing the compiled version of ITensors -located at `dir/filename`, by default `$(default_compile_path())`. - -!!! compat "ITensors 0.7" - As of ITensors 0.7, you must now install and load both the - [ITensorMPS.jl](https://github.com/ITensor/ITensorMPS.jl) package - and the [PackageCompiler.jl](https://github.com/JuliaLang/PackageCompiler.jl) - package in order to use `ITensors.compile()`, since it relies on running MPS/MPO - functionality as example code for Julia to compile and is based in a package - extension in order to make `PackageCompiler.jl` an optional dependency. - -$(compile_note()) -""" compile diff --git a/src/qn/flux.jl b/src/qn/flux.jl deleted file mode 100644 index 1709e41479..0000000000 --- a/src/qn/flux.jl +++ /dev/null @@ -1,84 +0,0 @@ - -""" - flux(T::ITensor) - -Returns the flux of the ITensor. - -If the ITensor is empty or it has no QNs, returns `nothing`. -""" -flux(T::ITensor, args...) = flux(tensor(T), args...) - -""" - checkflux(T::ITensor) - -Check that fluxes of all non-zero blocks of a blocked or symmetric ITensor -are equal. Throws an error if one or more blocks have a different flux. -""" -checkflux(T::ITensor, flux_check) = checkflux(tensor(T), flux_check) - -""" - checkflux(T::ITensor, flux) - -Check that fluxes of all non-zero blocks of a blocked or symmetric Tensor -equal the value `flux`. Throws an error if one or more blocks does not have this flux. -""" -checkflux(T::ITensor) = checkflux(tensor(T)) - -# -# Tensor versions -# TODO: Move to NDTensors when QN functionality -# is moved there. -# - -""" - flux(T::Tensor, block::Block) - -Compute the flux of a specific block of a Tensor, -regardless of whether this block is present or not in the storage. -""" -flux(T::Tensor, block::Block) = flux(inds(T), block) - -""" - flux(T::Tensor, i::Integer, is::Integer...) - -Compute the flux of a specific element of a Tensor, -regardless of whether this element is zero or non-zero. -""" -flux(T::Tensor, i::Integer, is::Integer...) = flux(inds(T), i, is...) - -""" - flux(T::Tensor) - -Return the flux of a Tensor, based on what non-zero blocks it -has. If the Tensor is not blocked or has no non-zero blocks, -this function returns `nothing`. -""" -function flux(T::Tensor) - (!hasqns(T) || isempty(T)) && return nothing - @debug_check checkflux(T) - block1 = first(eachnzblock(T)) - return flux(T, block1) -end - -allfluxequal(T::Tensor, flux_to_check) = all(b -> flux(T, b) == flux_to_check, nzblocks(T)) -allfluxequal(T::Tensor) = allequal(flux(T, b) for b in nzblocks(T)) - -""" - checkflux(T::Tensor) - -Check that fluxes of all non-zero blocks of a blocked or symmetric Tensor -are equal. Throws an error if one or more blocks have a different flux. -If the tensor is dense (is not blocked) then `checkflux` returns `nothing`. -""" -function checkflux(T::Tensor) - (!hasqns(T) || isempty(T)) && return nothing - return allfluxequal(T) ? nothing : error("Fluxes not all equal") -end - -""" - checkflux(T::Tensor, flux) - -Check that fluxes of all non-zero blocks of a blocked or symmetric Tensor -equal the value `flux`. Throws an error if one or more blocks does not have this flux. -""" -checkflux(T::Tensor, flux) = allfluxequal(T, flux) ? nothing : error("Fluxes not all equal") diff --git a/src/qn/qnindex.jl b/src/qn/qnindex.jl deleted file mode 100644 index 9bad042eb2..0000000000 --- a/src/qn/qnindex.jl +++ /dev/null @@ -1,552 +0,0 @@ -using .QuantumNumbers: - QuantumNumbers, Arrow, Neither, Out, have_same_mods, have_same_qns, removeqn -using .SiteTypes: SiteTypes -using .TagSets: TagSets - -const QNBlock = Pair{QN,Int64} - -const QNBlocks = Vector{QNBlock} - -qn(qnblock::QNBlock) = qnblock.first - -# Get the dimension of the specified block -blockdim(qnblock::QNBlock) = qnblock.second - -NDTensors.resize(qnblock::QNBlock, newdim::Int64) = QNBlock(qnblock.first, newdim) - -# Get the dimension of the specified block -blockdim(qnblocks::QNBlocks, b::Integer) = blockdim(qnblocks[b]) -blockdim(qnblocks::QNBlocks, b::Block{1}) = blockdim(qnblocks[only(b)]) - -# Get the QN of the specified block -qn(qnblocks::QNBlocks, b::Integer) = qn(qnblocks[b]) -qn(qnblocks::QNBlocks, b::Block{1}) = qn(qnblocks[only(b)]) - -nblocks(qnblocks::QNBlocks) = length(qnblocks) - -function dim(qnblocks::QNBlocks) - dimtot = 0 - for (_, blockdim) in qnblocks - dimtot += blockdim - end - return dimtot -end - -function -(qnb::QNBlock) - return QNBlock(-qn(qnb), blockdim(qnb)) -end - -function (qn1::QNBlock + qn2::QNBlock) - qn(qn1) != qn(qn2) && error("Cannot add qn blocks with different qns") - return QNBlock(qn(qn1), blockdim(qn1) + blockdim(qn2)) -end - -function QuantumNumbers.removeqn(qn_block::QNBlock, qn_name::String) - return removeqn(qn(qn_block), qn_name) => blockdim(qn_block) -end - -function -(qns::QNBlocks) - qns_new = copy(qns) - for i in 1:length(qns_new) - qns_new[i] = -qns_new[i] - end - return qns_new -end - -function mergeblocks(qns::QNBlocks) - qnsC = [qns[1]] - - # Which block this is, after combining - block_count = 1 - for i in 2:nblocks(qns) - if qn(qns[i]) == qn(qns[i - 1]) - qnsC[block_count] += qns[i] - else - push!(qnsC, qns[i]) - block_count += 1 - end - end - return qnsC -end - -function QuantumNumbers.removeqn(space::QNBlocks, qn_name::String; mergeblocks=true) - space = QNBlocks([removeqn(qn_block, qn_name) for qn_block in space]) - if mergeblocks - space = ITensors.mergeblocks(space) - end - return space -end - -""" -A QN Index is an Index with QN block storage instead of -just an integer dimension. The QN block storage is a -vector of pairs of QNs and block dimensions. -The total dimension of a QN Index is the sum of the -dimensions of the blocks of the Index. -""" -const QNIndex = Index{QNBlocks} - -# Trait for the symmetry type (QN or not QN) -struct HasQNs <: SymmetryStyle end - -symmetrystyle(::QNIndex) = HasQNs() -symmetrystyle(::HasQNs, ::HasQNs) = HasQNs() -symmetrystyle(::NonQN, ::NonQN) = NonQN() -symmetrystyle(::HasQNs, ::NonQN) = HasQNs() -symmetrystyle(::NonQN, ::HasQNs) = HasQNs() - -hasqns(::QNBlocks) = true - -function QuantumNumbers.have_same_qns(qnblocks::QNBlocks) - qn1 = qn(qnblocks, 1) - for n in 2:nblocks(qnblocks) - !have_same_qns(qn1, qn(qnblocks, n)) && return false - end - return true -end - -function QuantumNumbers.have_same_mods(qnblocks::QNBlocks) - qn1 = qn(qnblocks, 1) - for n in 2:nblocks(qnblocks) - !have_same_mods(qn1, qn(qnblocks, n)) && return false - end - return true -end - -""" - Index(qnblocks::Vector{Pair{QN, Int64}}; dir::Arrow = Out, - tags = "", - plev::Integer = 0) - -Construct a QN Index from a Vector of pairs of QN and block -dimensions. - -Note: in the future, this may enforce that all blocks have the -same QNs (which would allow for some optimizations, for example -when constructing random QN ITensors). - -# Example - -``` -Index([QN("Sz", -1) => 1, QN("Sz", 1) => 1]; tags = "i") -``` -""" -function Index(qnblocks::QNBlocks; dir::Arrow=Out, tags="", plev=0) - # TODO: make this a debug check? - #have_same_qns(qnblocks) || error("When creating a QN Index, the QN blocks must have the same QNs") - #have_same_mods(qnblocks) || error("When creating a QN Index, the QN blocks must have the same mods") - return Index(rand(index_id_rng(), IDType), qnblocks, dir, tags, plev) -end - -""" - Index(qnblocks::Vector{Pair{QN, Int64}}, tags; dir::Arrow = Out, - plev::Integer = 0) - -Construct a QN Index from a Vector of pairs of QN and block -dimensions. - -# Example - -``` -Index([QN("Sz", -1) => 1, QN("Sz", 1) => 1], "i"; dir = In) -``` -""" -function Index(qnblocks::QNBlocks, tags; dir::Arrow=Out, plev::Integer=0) - return Index(qnblocks; dir=dir, tags=tags, plev=plev) -end - -""" - Index(qnblocks::Pair{QN, Int64}...; dir::Arrow = Out, - tags = "", - plev::Integer = 0) - -Construct a QN Index from a list of pairs of QN and block -dimensions. - -# Example - -``` -Index(QN("Sz", -1) => 1, QN("Sz", 1) => 1; tags = "i") -``` -""" -function Index(qnblocks::QNBlock...; dir::Arrow=Out, tags="", plev=0) - return Index([qnblocks...]; dir=dir, tags=tags, plev=plev) -end - -dim(i::QNIndex) = dim(space(i)) - -""" - nblocks(i::QNIndex) - -Returns the number of QN blocks, or subspaces, of the QNIndex `i`. - -To obtain the dimension of block number `b`, use `blockdim(i,b)`. -To obtain the QN associated with block `b`, use `qn(i,b)`. - -### Example - -``` -julia> i = Index([QN("Sz",-1)=>2, QN("Sz",0)=>4, QN("Sz",1)=>2], "i") -julia> nblocks(i) -3 -``` -""" -nblocks(i::QNIndex) = nblocks(space(i)) -# Define to be 1 for non-QN Index -nblocks(i::Index) = 1 - -# Get the Block that the index value falls in -# For example: -# qns = [QN(0,2) => 2, QN(0,2) => 2] -# block(qns, 1) == Block(1) -# block(qns, 2) == Block(1) -# block(qns, 3) == Block(2) -# block(qns, 4) == Block(2) -function block(qns::QNBlocks, n::Int) - tdim = 0 - for b in 1:nblocks(qns) - tdim += blockdim(qns, Block(b)) - (n <= tdim) && return Block(b) - end - error("qn: QN Index value out of range") - return Block(0) -end - -function block(iv::Pair{<:Index}) - i = ind(iv) - v = SiteTypes.val(iv) - return block(space(i), v) -end - -# Get the QN of the block -qn(i::QNIndex, b::Block{1}) = qn(space(i), b) - -qn(ib::Pair{<:Index,Block{1}}) = qn(first(ib), last(ib)) - -# XXX: deprecate the Integer version -# Miles asks: isn't it pretty convenient to have it? -""" - qn(i::QNIndex, b::Integer) - -Returns the QN associated with block number `b` of -a QNIndex `i`. - -### Example - -``` -julia> i = Index([QN("Sz",-1)=>2, QN("Sz",0)=>4, QN("Sz",1)=>2], "i") -julia> qn(i,1) -QN("Sz",-1) -julia> qn(i,2) -QN("Sz",0) -``` -""" -qn(i::QNIndex, b::Integer) = qn(i, Block(b)) - -# Get the QN of the block the IndexVal lies in -qn(iv::Pair{<:Index}) = qn(ind(iv), block(iv)) - -flux(i::QNIndex, b::Block{1}) = dir(i) * qn(i, b) - -flux(ib::Pair{<:Index,Block{1}}) = flux(first(ib), last(ib)) - -flux(iv::Pair{<:Index}) = flux(ind(iv), block(iv)) - -function flux(i::Index, b::Block) - return error( - "Cannot compute flux: Index has no QNs. Try setting conserve_qns=true in siteinds or constructing Index with QN subspaces.", - ) -end - -qnblocks(i::QNIndex) = space(i) - -# XXX: deprecate the Integer version -# Miles asks: isn't the integer version very convenient? -blockdim(i::QNIndex, b::Block) = blockdim(space(i), b) - -""" - blockdim(i::QNIndex, b::Integer) - -Returns the dimension of block number `b` of -a QNIndex `i`. - -### Example - -``` -julia> i = Index([QN("Sz",-1)=>2, QN("Sz",0)=>4, QN("Sz",1)=>2], "i") -julia> blockdim(i,1) -2 -julia> blockdim(i,2) -4 -``` -""" -blockdim(i::QNIndex, b::Integer) = blockdim(i, Block(b)) -function blockdim(i::Index, b::Union{Block,Integer}) - return error( - "`blockdim(i::Index, b)` not currently defined for non-QN Index $i of type `$(typeof(i))`. In the future this may be defined for `b == Block(1)` or `b == 1` as `dim(i)` and error otherwise.", - ) -end - -dim(i::QNIndex, b::Block) = blockdim(space(i), b) - -NDTensors.eachblock(i::Index) = (Block(n) for n in 1:nblocks(i)) - -# Return the first block of the QNIndex with the flux q -function block(::typeof(first), ind::QNIndex, q::QN) - for b in eachblock(ind) - if flux(ind => b) == q - return b - end - end - error("No block found with QN equal to $q") - return Block(0) -end - -# Find the first block that matches the pattern f, -# for example `f(blockind) = qn(blockind) == target_qn`. -# `f` accepts a pair of `i => Block(n)` where `n` -# runs over `nblocks(i)`. -function findfirstblock(f, i::QNIndex) - for b in ITensors.eachblock(i) - if f(i => b) - return b - end - end - error("No block of Index $i matching the specified pattern.") - return Block(0) -end - -# XXX: call this simply `block` and return a Block{1} -# Deprecate this -""" - qnblocknum(ind::QNIndex, q::QN) - -Given a QNIndex `ind` and QN `q`, return the -number of the block (from 1,...,nblocks(ind)) -of the QNIndex having QN equal to `q`. Assumes -all blocks of `ind` have a unique QN. -""" -function qnblocknum(ind::QNIndex, q::QN) - for b in 1:nblocks(ind) - if flux(ind => Block(b)) == q - return b - end - end - error("No block found with QN equal to $q") - return 0 -end - -blockdim(ind::QNIndex, q::QN) = blockdim(ind, block(first, ind, q)) - -# XXX: deprecate in favor of blockdim -""" - qnblockdim(ind::QNIndex, q::QN) - -Given a QNIndex `ind` and QN `q`, return the -dimension of the block of the QNIndex having -QN equal to `q`. Assumes all blocks of `ind` -have a unique QN. -""" -qnblockdim(ind::QNIndex, q::QN) = blockdim(ind, qnblocknum(ind, q)) - -(dir::Arrow * qnb::QNBlock) = QNBlock(dir * qn(qnb), blockdim(qnb)) - -function (dir::Arrow * qn::QNBlocks) - # XXX use: - # dir .* qn - qnR = copy(qn) - for i in 1:nblocks(qnR) - qnR[i] = dir * qnR[i] - end - return qnR -end - -(qn1::QNBlock * qn2::QNBlock) = QNBlock(qn(qn1) + qn(qn2), blockdim(qn1) * blockdim(qn2)) - -# TODO: rename tensorproduct with ⊗ alias -function outer(qn1::QNBlocks, qn2::QNBlocks) - qnR = ITensors.QNBlocks(undef, nblocks(qn1) * nblocks(qn2)) - for (i, t) in enumerate(Iterators.product(qn1, qn2)) - qnR[i] = prod(t) - end - return qnR -end - -# TODO: rename tensorproduct with ⊗ alias -function outer(i1::QNIndex, i2::QNIndex; dir=nothing, tags="", plev::Integer=0) - if isnothing(dir) - if ITensors.dir(i1) == ITensors.dir(i2) - dir = ITensors.dir(i1) - else - dir = Out - end - end - newspace = dir * ((ITensors.dir(i1) * space(i1)) ⊗ (ITensors.dir(i2) * space(i2))) - return Index(newspace; dir, tags, plev) -end - -# TODO: rename tensorproduct with ⊗ alias -function outer(i::QNIndex; dir=nothing, tags="", plev::Integer=0) - if isnothing(dir) - dir = ITensors.dir(i) - end - newspace = dir * (ITensors.dir(i) * space(i)) - return Index(newspace; dir, tags, plev) -end - -# TODO: add ⊕ alias -function directsum( - i::Index{Vector{Pair{QN,Int}}}, j::Index{Vector{Pair{QN,Int}}}; tags="sum" -) - dir(i) ≠ dir(j) && error( - "To direct sum two indices, they must have the same direction. Trying to direct sum indices $i and $j.", - ) - return Index(vcat(space(i), space(j)); dir=dir(i), tags) -end - -isless(qnb1::QNBlock, qnb2::QNBlock) = isless(qn(qnb1), qn(qnb2)) - -function permuteblocks(i::QNIndex, perm) - qnblocks_perm = space(i)[perm] - return replaceqns(i, qnblocks_perm) -end - -function combineblocks(qns::QNBlocks) - perm = sortperm(qns) - qnsP = qns[perm] - qnsC = [qnsP[1]] - comb = Vector{Int}(undef, nblocks(qns)) - - # Which block this is, after combining - block_count = 1 - comb[1] = block_count - for i in 2:nblocks(qnsP) - if qn(qnsP[i]) == qn(qnsP[i - 1]) - qnsC[block_count] += qnsP[i] - else - push!(qnsC, qnsP[i]) - block_count += 1 - end - comb[i] = block_count - end - return qnsC, perm, comb -end - -function splitblocks(qns::QNBlocks) - idim = dim(qns) - split_qns = similar(qns, idim) - for n in 1:idim - b = block(qns, n) - split_qns[n] = qn(qns, b) => 1 - end - return split_qns -end - -# Make a new Index with the specified qn blocks -replaceqns(i::QNIndex, qns::QNBlocks) = setspace(i, qns) - -NDTensors.block(i::QNIndex, n::Integer) = space(i)[n] - -function setblockdim!(i::QNIndex, newdim::Integer, n::Integer) - qns = space(i) - qns[n] = qn(qns[n]) => newdim - return i -end - -function setblockqn!(i::QNIndex, newqn::QN, n::Integer) - qns = space(i) - qns[n] = newqn => blockdim(qns[n]) - return i -end - -function setblock!(i::QNIndex, b::QNBlock, n::Integer) - qns = space(i) - qns[n] = b - return i -end - -function deleteat!(i::QNIndex, pos) - deleteat!(space(i), pos) - return i -end - -function resize!(i::QNIndex, n::Integer) - resize!(space(i), n) - return i -end - -function combineblocks(i::QNIndex) - qnsR, perm, comb = combineblocks(space(i)) - iR = replaceqns(i, qnsR) - return iR, perm, comb -end - -removeqns(i::QNIndex) = setdir(setspace(i, dim(i)), Neither) -function QuantumNumbers.removeqn(i::QNIndex, qn_name::String; mergeblocks=true) - return setspace(i, removeqn(space(i), qn_name; mergeblocks)) -end -mergeblocks(i::QNIndex) = setspace(i, mergeblocks(space(i))) - -function addqns(i::Index, qns::QNBlocks; dir::Arrow=Out) - @assert dim(i) == dim(qns) - return setdir(setspace(i, qns), dir) -end - -function addqns(i::QNIndex, qns::QNBlocks) - @assert dim(i) == dim(qns) - @assert nblocks(qns) == nblocks(i) - iqns = space(i) - j = copy(i) - jqn = space(j) - for n in 1:nblocks(i) - @assert blockdim(iqns, n) == blockdim(qns, n) - iqn_n = qn(iqns, n) - qn_n = qn(qns, n) - newqn = iqn_n - for nqv in 1:nactive(qn_n) - qv = qn_n[nqv] - newqn = addqnval(newqn, qv) - end - jqn[n] = newqn => blockdim(iqns, n) - end - return j -end - -# Check that the QNs are all the same -function hassameflux(i1::QNIndex, i2::QNIndex) - dim_i1 = dim(i1) - dim_i1 ≠ dim(i2) && return false - for n in 1:dim_i1 - flux(i1 => n) ≠ flux(i2 => n) && return false - end - return true -end - -hassameflux(::QNIndex, ::Index) = false -hassameflux(::Index, ::QNIndex) = false - -# Split the blocks into blocks of size 1 with the same QNs -splitblocks(i::Index) = setspace(i, splitblocks(space(i))) - -trivial_space(i::QNIndex) = [QN() => 1] - -function mutable_storage(::Type{Order{N}}, ::Type{IndexT}) where {N,IndexT<:QNIndex} - return SizedVector{N,IndexT}(undef) -end - -function show(io::IO, i::QNIndex) - idstr = "$(id(i) % 1000)" - if length(tags(i)) > 0 - print( - io, - "(dim=$(dim(i))|id=$(idstr)|\"$(TagSets.tagstring(tags(i)))\")$(primestring(plev(i)))", - ) - else - print(io, "(dim=$(dim(i))|id=$(idstr))$(primestring(plev(i)))") - end - println(io, " <$(dir(i))>") - for (n, qnblock) in enumerate(space(i)) - print(io, " $n: $qnblock") - n < length(space(i)) && println(io) - end -end diff --git a/src/qn/qnindexset.jl b/src/qn/qnindexset.jl deleted file mode 100644 index 22bd8dfdff..0000000000 --- a/src/qn/qnindexset.jl +++ /dev/null @@ -1,34 +0,0 @@ - -const QNIndexSet = IndexSet{QNIndex} - -const QNIndices = Union{QNIndexSet,Tuple{Vararg{QNIndex}}} - -# Get a list of the non-zero blocks given a desired flux -# TODO: make a fillqns(inds::Indices) function that makes all indices -# in inds have the same qns. Then, use a faster comparison: -# ==(flux(inds,block; assume_filled=true), qn; assume_filled=true) -function nzblocks(qn::QN, inds::Indices) - N = length(inds) - blocks = Block{N}[] - for block in eachblock(inds) - if flux(inds, block) == qn - push!(blocks, block) - end - end - return blocks -end - -function nzdiagblocks(qn::QN, inds::Indices) - N = length(inds) - blocks = NTuple{N,Int}[] - for block in eachdiagblock(inds) - if flux(inds, block) == qn - push!(blocks, Tuple(block)) - end - end - return blocks -end - -anyfermionic(is::Indices) = any(isfermionic, is) - -allfermionic(is::Indices) = all(isfermionic, is) diff --git a/src/qn/qnitensor.jl b/src/qn/qnitensor.jl deleted file mode 100644 index 0faf2f9b9d..0000000000 --- a/src/qn/qnitensor.jl +++ /dev/null @@ -1,590 +0,0 @@ -using .QuantumNumbers: QuantumNumbers, removeqn -using NDTensors: sim - -@propagate_inbounds @inline function _setindex!!( - ::HasQNs, T::Tensor, x::Number, I::Integer... -) - fluxT = flux(T) - if !isnothing(fluxT) && fluxT != flux(T, I...) - error( - "In `setindex!`, the element $I of ITensor: \n$(T)\n you are trying to set is in a block with flux $(flux(T, I...)), which is different from the flux $fluxT of the other blocks of the ITensor. You may be trying to create an ITensor that does not have a well defined quantum number flux.", - ) - end - return setindex!!(T, x, I...) -end - -# TODO: Replace with a simpler and more generic `zeros` constructor -# when the new `UnallocatedZeros` type lands. -# This is needed for now since there is some issue with calling -# `setindex!` on `EmptyTensor`, it's not really worth investigating -# right now since that type will be removed soon anyway in -# https://github.com/ITensor/ITensors.jl/pull/1213. -# This is only used internally inside the implementation of `directsum` -# right now. -function zeros_itensor(elt::Type{<:Number}, inds::QNIndex...) - return itensor( - tensor( - BlockSparse(elt, undef, NDTensors.Dictionary{Block{length(inds)},Int}(), 0), inds - ), - ) -end - -""" - ITensor([::Type{ElT} = Float64, ][flux::QN = QN(), ]inds) - ITensor([::Type{ElT} = Float64, ][flux::QN = QN(), ]inds::Index...) - -Construct an ITensor with BlockSparse storage filled with `zero(ElT)` -where the nonzero blocks are determined by `flux`. - -If `ElT` is not specified it defaults to `Float64`. - -If `flux` is not specified, the ITensor will be empty (it will contain no blocks, and -have an undefined flux). The flux will be set by the first element that is set. - -# Examples - -```julia -julia> i -(dim=3|id=212|"i") - 1: QN(0) => 1 - 2: QN(1) => 2 - -julia> @show ITensor(QN(0), i', dag(i)); -ITensor(QN(0), i', dag(i)) = ITensor ord=2 -Dim 1: (dim=3|id=212|"i")' - 1: QN(0) => 1 - 2: QN(1) => 2 -Dim 2: (dim=3|id=212|"i") - 1: QN(0) => 1 - 2: QN(1) => 2 -NDTensors.BlockSparse{Float64, Vector{Float64}, 2} - 3×3 -Block(1, 1) - [1:1, 1:1] - 0.0 - -Block(2, 2) - [2:3, 2:3] - 0.0 0.0 - 0.0 0.0 - -julia> @show ITensor(QN(1), i', dag(i)); -ITensor(QN(1), i', dag(i)) = ITensor ord=2 -Dim 1: (dim=3|id=212|"i")' - 1: QN(0) => 1 - 2: QN(1) => 2 -Dim 2: (dim=3|id=212|"i") - 1: QN(0) => 1 - 2: QN(1) => 2 -NDTensors.BlockSparse{Float64, Vector{Float64}, 2} - 3×3 -Block(2, 1) - [2:3, 1:1] - 0.0 - 0.0 - -julia> @show ITensor(ComplexF64, QN(1), i', dag(i)); -ITensor(ComplexF64, QN(1), i', dag(i)) = ITensor ord=2 -Dim 1: (dim=3|id=212|"i")' - 1: QN(0) => 1 - 2: QN(1) => 2 -Dim 2: (dim=3|id=212|"i") - 1: QN(0) => 1 - 2: QN(1) => 2 -NDTensors.BlockSparse{ComplexF64, Vector{ComplexF64}, 2} - 3×3 -Block(2, 1) - [2:3, 1:1] - 0.0 + 0.0im - 0.0 + 0.0im - -julia> @show ITensor(undef, QN(1), i', dag(i)); -ITensor(undef, QN(1), i', dag(i)) = ITensor ord=2 -Dim 1: (dim=3|id=212|"i")' - 1: QN(0) => 1 - 2: QN(1) => 2 -Dim 2: (dim=3|id=212|"i") - 1: QN(0) => 1 - 2: QN(1) => 2 -NDTensors.BlockSparse{Float64, Vector{Float64}, 2} - 3×3 -Block(2, 1) - [2:3, 1:1] - 0.0 - 1.63e-322 -``` -Construction with undefined flux: -```julia -julia> A = ITensor(i', dag(i)); - -julia> @show A; -A = ITensor ord=2 -Dim 1: (dim=3|id=212|"i")' - 1: QN(0) => 1 - 2: QN(1) => 2 -Dim 2: (dim=3|id=212|"i") - 1: QN(0) => 1 - 2: QN(1) => 2 -NDTensors.EmptyStorage{NDTensors.EmptyNumber, NDTensors.BlockSparse{NDTensors.EmptyNumber, Vector{NDTensors.EmptyNumber}, 2}} - 3×3 - - - -julia> isnothing(flux(A)) -true - -julia> A[i' => 1, i => 2] = 2 -2 - -julia> @show A; -A = ITensor ord=2 -Dim 1: (dim=3|id=212|"i")' - 1: QN(0) => 1 - 2: QN(1) => 2 -Dim 2: (dim=3|id=212|"i") - 1: QN(0) => 1 - 2: QN(1) => 2 -NDTensors.BlockSparse{Int64, Vector{Int64}, 2} - 3×3 -Block(1, 2) - [1:1, 2:3] - 2 0 - -julia> flux(A) -QN(-1) -``` -""" -function ITensor(::Type{ElT}, flux::QN, inds::QNIndices) where {ElT<:Number} - is = Tuple(inds) - blocks = nzblocks(flux, is) - if length(blocks) == 0 - error("ITensor with flux=$flux resulted in no allowed blocks") - end - T = BlockSparseTensor(ElT, blocks, is) - return itensor(T) -end - -# This helps with making code more generic between block sparse -# and dense. -function ITensor(::Type{ElT}, flux::QN, inds::Indices) where {ElT<:Number} - return itensor(Dense(ElT, dim(inds)), inds) -end - -function ITensor(::Type{ElT}, flux::QN, is...) where {ElT<:Number} - return ITensor(ElT, flux, indices(is...)) -end - -ITensor(flux::QN, is...) = ITensor(Float64, flux, is...) - -ITensor(::Type{ElT}, inds::QNIndices) where {ElT<:Number} = emptyITensor(ElT, inds) - -ITensor(inds::QNIndices) = emptyITensor(inds) - -# TODO: generalize to list of Tuple, Vector, and QNIndex -ITensor(::Type{ElT}, is::QNIndex...) where {ElT<:Number} = emptyITensor(ElT, indices(is...)) - -# TODO: generalize to list of Tuple, Vector, and QNIndex -ITensor(is::QNIndex...) = emptyITensor(indices(is...)) - -""" - ITensor([::Type{ElT} = Float64,] ::UndefInitializer, flux::QN, inds) - ITensor([::Type{ElT} = Float64,] ::UndefInitializer, flux::QN, inds::Index...) - -Construct an ITensor with indices `inds` and BlockSparse storage with undefined -elements of type `ElT`, where the nonzero (allocated) blocks are determined by -the provided QN `flux`. One purpose for using this constructor is that -initializing the elements in an undefined way is faster than initializing -them to a set value such as zero. - -The storage will have `NDTensors.BlockSparse` type. - -# Examples - -```julia -i = Index([QN(0)=>1, QN(1)=>2], "i") -A = ITensor(undef,QN(0),i',dag(i)) -B = ITensor(Float64,undef,QN(0),i',dag(i)) -C = ITensor(ComplexF64,undef,QN(0),i',dag(i)) -``` -""" -function ITensor( - ::Type{ElT}, ::UndefInitializer, flux::QN, inds::Indices -) where {ElT<:Number} - is = Tuple(inds) - blocks = nzblocks(flux, is) - T = BlockSparseTensor(ElT, undef, blocks, is) - return itensor(T) -end - -function ITensor(::Type{ElT}, ::UndefInitializer, flux::QN, is...) where {ElT<:Number} - return ITensor(ElT, undef, flux, indices(is...)) -end - -function ITensor(::UndefInitializer, flux::QN, is...) - return ITensor(Float64, undef, flux, indices(is...)) -end - -""" - ITensor([ElT::Type, ]x::Number, flux::QN, inds) - ITensor([ElT::Type, ]x::Number, flux::QN, inds::Index...) - -Construct an ITensor with all elements consistent with QN flux `flux` set to `x` and indices `inds`. - -If `x isa Int` or `x isa Complex{Int}` then the elements will be set to `float(x)` -unless specified otherwise by the first input. - -The storage will have `NDTensors.BlockSparse` type. - -# Examples - -```julia -i = Index([QN(0)=>1, QN(1)=>2], "i") -A = ITensor(2.3, QN(0), i', dag(i)) -B = ITensor(Float64, 3.5, QN(0), i', dag(i)) -C = ITensor(ComplexF64, 4, QN(0), i', dag(i)) -``` - -!!! warning - In future versions this may not automatically convert integer inputs with - `float`, and in that case the particular element type should not be relied on. -""" -function ITensor(eltype::Type{<:Number}, x::Number, flux::QN, is::Indices) - is_tuple = Tuple(is) - blocks = nzblocks(flux, is_tuple) - if length(blocks) == 0 - error("ITensor with flux=$flux resulted in no allowed blocks") - end - T = BlockSparseTensor(eltype(x), blocks, is_tuple) - return itensor(T) -end - -function ITensor(eltype::Type{<:Number}, x::Number, flux::QN, is...) - return ITensor(eltype, x, flux, indices(is...)) -end - -ITensor(x::Number, flux::QN, is...) = ITensor(eltype(x), x, flux, is...) - -ITensor(x::RealOrComplex{Int}, flux::QN, is...) = ITensor(float(x), flux, is...) - -ITensor(eltype::Type{<:Number}, x::Number, is::QNIndices) = ITensor(eltype, x, QN(), is) - -# Don't need, calls generic non-QN versions -#ITensor(eltype::Type{<:Number}, x::Number, is::QNIndex...) = ITensor(eltype, x, indices(is...)) -#ITensor(x::Number, is...) = ITensor(eltype(x), x, is...) -#ITensor(x::RealOrComplex{Int}, flux::QN, is...) = ITensor(float(x), is...) - -""" - ITensor([ElT::Type, ]::AbstractArray, inds; tol=0.0, checkflux=true) - -Create a block sparse ITensor from the input Array, and collection -of QN indices. Zeros are dropped and nonzero blocks are determined -from the zero values of the array. - -Optionally, you can set a tolerance such that elements -less than or equal to the tolerance are dropped. - -By default, this will check that the flux of the nonzero blocks -are consistent with each other. You can disable this check by -setting `checkflux=false`. - -# Examples - -```julia -julia> i = Index([QN(0)=>1, QN(1)=>2], "i"); - -julia> A = [1e-9 0.0 0.0; - 0.0 2.0 3.0; - 0.0 1e-10 4.0]; - -julia> @show ITensor(A, i', dag(i); tol = 1e-8); -ITensor(A, i', dag(i); tol = 1.0e-8) = ITensor ord=2 -Dim 1: (dim=3|id=468|"i")' - 1: QN(0) => 1 - 2: QN(1) => 2 -Dim 2: (dim=3|id=468|"i") - 1: QN(0) => 1 - 2: QN(1) => 2 -NDTensors.BlockSparse{Float64,Array{Float64,1},2} - 3×3 -Block: (2, 2) - [2:3, 2:3] - 2.0 3.0 - 0.0 4.0 -``` -""" -function ITensor( - ::AliasStyle, - elt::Type{<:Number}, - A::AbstractArray{<:Number}, - inds::QNIndices; - tol=0.0, - checkflux=true, -) - is = Tuple(inds) - length(A) ≠ dim(inds) && throw( - DimensionMismatch( - "In ITensor(::AbstractArray, inds), length of AbstractArray ($(length(A))) must match total dimension of the indices ($(dim(is)))", - ), - ) - blocks = Block{length(is)}[] - T = BlockSparseTensor(elt, blocks, inds) - A = reshape(A, dims(is)...) - _copyto_dropzeros!(T, A; tol) - if checkflux - ITensors.checkflux(T) - end - return itensor(T) -end - -function _copyto_dropzeros!(T::Tensor, A::AbstractArray; tol) - for i in eachindex(T) - Aᵢ = A[i] - if abs(Aᵢ) > tol - T[i] = Aᵢ - end - end - return T -end - -# TODO: Deprecated. -""" - emptyITensor([::Type{ElT} = EmptyNumber, ]inds) - emptyITensor([::Type{ElT} = EmptyNumber, ]inds::QNIndex...) - -Construct an ITensor with `NDTensors.BlockSparse` storage of element type `ElT` with the no blocks. - -If `ElT` is not specified it defaults to `NDTensors.EmptyNumber`. -""" -function emptyITensor(::Type{ElT}, inds::QNIndices) where {ElT<:Number} - return itensor(EmptyBlockSparseTensor(ElT, inds)) -end -emptyITensor(inds::QNIndices) = emptyITensor(EmptyNumber, inds) - -function emptyITensor(eltype::Type{<:Number}, flux::QN, is...) - return error( - "Trying to create an empty ITensor with flux $flux, cannot create empty ITensor with a specified flux.", - ) -end -emptyITensor(flux::QN, is...) = emptyITensor(EmptyNumber, flux, is...) - -""" - random_itensor([::Type{ElT} = Float64, ][flux::QN = QN(), ]inds) - random_itensor([::Type{ElT} = Float64, ][flux::QN = QN(), ]inds::Index...) - -Construct an ITensor with `NDTensors.BlockSparse` storage filled with random -elements of type `ElT` where the nonzero blocks are determined by `flux`. - -If `ElT` is not specified it defaults to `Float64`. If the flux is not specified it defaults to `QN()`. -""" -function random_itensor(::Type{ElT}, flux::QN, inds::Indices) where {ElT<:Number} - return random_itensor(Random.default_rng(), ElT, flux, inds) -end - -function random_itensor( - rng::AbstractRNG, ::Type{ElT}, flux::QN, inds::Indices -) where {ElT<:Number} - T = ITensor(ElT, undef, flux, inds) - randn!(rng, T) - return T -end - -function random_itensor(::Type{ElT}, flux::QN, is...) where {ElT<:Number} - return random_itensor(Random.default_rng(), ElT, flux, is...) -end - -function random_itensor(rng::AbstractRNG, ::Type{ElT}, flux::QN, is...) where {ElT<:Number} - return random_itensor(rng, ElT, flux, indices(is...)) -end - -function random_itensor(::Type{ElT}, inds::QNIndices) where {ElT<:Number} - return random_itensor(Random.default_rng(), ElT, inds) -end - -function random_itensor(rng::AbstractRNG, ::Type{ElT}, inds::QNIndices) where {ElT<:Number} - return random_itensor(rng, ElT, QN(), inds) -end - -function random_itensor(flux::QN, inds::Indices) - return random_itensor(Random.default_rng(), flux, inds) -end - -function random_itensor(rng::AbstractRNG, flux::QN, inds::Indices) - return random_itensor(rng, Float64, flux, inds) -end - -function random_itensor(flux::QN, is...) - return random_itensor(Random.default_rng(), flux, is...) -end - -function random_itensor(rng::AbstractRNG, flux::QN, is...) - return random_itensor(rng, Float64, flux, indices(is...)) -end - -# TODO: generalize to list of Tuple, Vector, and QNIndex -function random_itensor(::Type{ElT}, inds::QNIndex...) where {ElT<:Number} - return random_itensor(Random.default_rng(), ElT, inds...) -end - -# TODO: generalize to list of Tuple, Vector, and QNIndex -function random_itensor(rng::AbstractRNG, ::Type{ElT}, inds::QNIndex...) where {ElT<:Number} - return random_itensor(rng, ElT, QN(), inds) -end - -random_itensor(inds::QNIndices) = random_itensor(Random.default_rng(), inds) - -random_itensor(rng::AbstractRNG, inds::QNIndices) = random_itensor(rng, Float64, QN(), inds) - -# TODO: generalize to list of Tuple, Vector, and QNIndex -random_itensor(inds::QNIndex...) = random_itensor(Random.default_rng(), inds...) - -# TODO: generalize to list of Tuple, Vector, and QNIndex -function random_itensor(rng::AbstractRNG, inds::QNIndex...) - return random_itensor(rng, Float64, QN(), inds) -end - -function combiner(inds::QNIndices; dir=nothing, tags="CMB,Link") - # TODO: support combining multiple set of indices - is = Tuple(inds) - new_ind = ⊗(is...; dir, tags) - comb_ind, perm, comb = combineblocks(new_ind) - return itensor(Combiner(perm, comb), (comb_ind, dag.(is)...)) -end - -# -# DiagBlockSparse ITensor constructors -# - -""" - diag_itensor([::Type{ElT} = Float64, ][flux::QN = QN(), ]is) - diag_itensor([::Type{ElT} = Float64, ][flux::QN = QN(), ]is::Index...) - -Make an ITensor with storage type `NDTensors.DiagBlockSparse` with elements -`zero(ElT)`. The ITensor only has diagonal blocks consistent with the specified `flux`. - -If the element type is not specified, it defaults to `Float64`. If theflux -is not specified, it defaults to `QN()`. -""" -function diag_itensor(::Type{ElT}, flux::QN, inds::Indices) where {ElT<:Number} - is = Tuple(inds) - blocks = nzdiagblocks(flux, is) - T = DiagBlockSparseTensor(ElT, blocks, is) - return itensor(T) -end - -function diag_itensor(::Type{ElT}, flux::QN, is...) where {ElT<:Number} - return diag_itensor(ElT, flux, indices(is...)) -end - -function diag_itensor(x::ElT, flux::QN, inds::QNIndices) where {ElT<:Number} - is = Tuple(inds) - blocks = nzdiagblocks(flux, is) - T = DiagBlockSparseTensor(float(ElT), blocks, is) - NDTensors.data(T) .= x - return itensor(T) -end - -function diag_itensor(x::Number, flux::QN, is...) - return diag_itensor(x, flux, indices(is...)) -end - -diag_itensor(x::Number, is::QNIndices) = diag_itensor(x, QN(), is) - -# TODO: generalize to list of Tuple, Vector, and QNIndex -diag_itensor(x::Number, is::QNIndex...) = diag_itensor(x, indices(is...)) - -diag_itensor(flux::QN, is::Indices) = diag_itensor(Float64, flux, is) - -diag_itensor(flux::QN, is...) = diag_itensor(Float64, flux, indices(is...)) - -function diag_itensor(::Type{ElT}, inds::QNIndices) where {ElT<:Number} - return diag_itensor(ElT, QN(), inds) -end - -function diag_itensor(inds::QNIndices) - return diag_itensor(Float64, QN(), inds) -end - -""" - delta([::Type{ElT} = Float64, ][flux::QN = QN(), ]is) - delta([::Type{ElT} = Float64, ][flux::QN = QN(), ]is::Index...) - -Make an ITensor with storage type `NDTensors.DiagBlockSparse` with uniform -elements `one(ElT)`. The ITensor only has diagonal blocks consistent with the -specified `flux`. - -If the element type is not specified, it defaults to `Float64`. If theflux is -not specified, it defaults to `QN()`. -""" -function delta(::Type{ElT}, flux::QN, inds::Indices) where {ElT<:Number} - is = Tuple(inds) - blocks = nzdiagblocks(flux, is) - T = DiagBlockSparseTensor(one(ElT), blocks, is) - return itensor(T) -end - -function delta(::Type{ElT}, flux::QN, is...) where {ElT<:Number} - return delta(ElT, flux, indices(is...)) -end - -delta(flux::QN, inds::Indices) = delta(Float64, flux, is) - -delta(flux::QN, is...) = delta(Float64, flux, indices(is...)) - -function delta(::Type{ElT}, inds::QNIndices) where {ElT<:Number} - return delta(ElT, QN(), inds) -end - -delta(inds::QNIndices) = delta(Float64, QN(), inds) - -function dropzeros(T::ITensor; tol=0) - # XXX: replace with empty(T) - T̃ = emptyITensor(eltype(T), inds(T)) - for b in eachnzblock(T) - Tb = T[b] - if norm(Tb) > tol - T̃[b] = Tb - end - end - return T̃ -end - -function δ_split(i1::Index, i2::Index) - d = emptyITensor(i1, i2) - for n in 1:min(dim(i1), dim(i2)) - d[n, n] = 1 - end - return d -end - -function splitblocks(A::ITensor, is=inds(A); tol=0) - if !hasqns(A) - return A - end - isA = filterinds(A; inds=is) - for i in isA - i_split = splitblocks(i) - ĩ_split = sim(i_split) - # Ideally use norm δ tensor but currently - # it doesn't work properly: - #A *= δ(dag(i), ĩ_split) - d = δ_split(dag(i), ĩ_split) - A *= δ_split(dag(i), ĩ_split) - A = replaceind(A, ĩ_split, i_split) - end - A = dropzeros(A; tol=tol) - return A -end - -function QuantumNumbers.removeqn(T::ITensor, qn_name::String; mergeblocks=true) - if !hasqns(T) - return T - end - inds_R = removeqn(inds(T), qn_name; mergeblocks) - R = ITensor(inds_R) - for iv in eachindex(T) - if !iszero(T[iv]) - R[iv] = T[iv] - end - end - return R -end diff --git a/src/readwrite.jl b/src/readwrite.jl deleted file mode 100644 index c75462e326..0000000000 --- a/src/readwrite.jl +++ /dev/null @@ -1,13 +0,0 @@ -function readcpp(io::IO, ::Type{Vector{T}}; format="v3") where {T} - v = Vector{T}() - if format == "v3" - size = read(io, UInt64) - resize!(v, size) - for n in 1:size - v[n] = readcpp(io, T; format) - end - else - throw(ArgumentError("read Vector: format=$format not supported")) - end - return v -end diff --git a/src/set_operations.jl b/src/set_operations.jl deleted file mode 100644 index 657375d75e..0000000000 --- a/src/set_operations.jl +++ /dev/null @@ -1,90 +0,0 @@ - -# -# Set operations -# These are custom implementations of the set operations in -# abstractset.jl. However, they do not convert input collections -# to Sets. Therefore, they have higher complexity (O(N²) instead -# of O(N) for N elements) but they are faster for small sets. -# In addition, they assume that elements of the input collections -# are already unique, i.e. that the inputs are "set-like". -# Therefore, you should call a function like `unique` before inputting -# if you're not sure the collections themselves will be unique. -# - -# A version of Base.setdiff that scales quadratically in the number of elements -# and assumes the elements of each input set are already unique. -_setdiff(s) = Base.copymutable(s) -_setdiff(s, itrs...) = _setdiff!(Base.copymutable(s), itrs...) -function _setdiff!(s, itrs...) - for x in itrs - _setdiff!(s, x) - end - return s -end -function _setdiff!(s, itr) - isempty(s) && return s - for x in itr - n = findfirst(==(x), s) - !isnothing(n) && deleteat!(s, n) - end - return s -end - -# A version of Base.intersect that scales quadratically in the number of elements -# and assumes the elements of each input set are already unique. -_intersect(s) = Base.copymutable(s) -_intersect(s, itr, itrs...) = _intersect!(_intersect(s, itr), itrs...) -# XXX: Base has `s` and `itr` swapped in the definition, which one is correct? -# Is this special case needed, or is `filter!` sufficient? -_intersect(s, itr) = Base.mapfilter(in(itr), push!, s, Base.emptymutable(s, eltype(s))) -function _intersect!(s, itrs...) - for x in itrs - _intersect!(s, x) - end - return s -end -_intersect!(s, s2) = filter!(in(s2), s) - -# A version of Base.symdiff that scales quadratically in the number of elements -# and assumes the elements of each input set are already unique. -_symdiff(s) = Base.copymutable(s) -function _symdiff(s, itrs...) - return _symdiff!(Base.emptymutable(s, Base.promote_eltype(s, itrs...)), s, itrs...) -end -function _symdiff!(s, itrs...) - for x in itrs - _symdiff!(s, x) - end - return s -end -function _symdiff!(s, itr) - if isempty(s) - append!(s, itr) - return s - end - for x in itr - n = findfirst(==(x), s) - !isnothing(n) ? deleteat!(s, n) : push!(s, x) - end - return s -end - -# A version of Base.union that scales quadratically in the number of elements -# and assumes the elements of each input set are already unique. -_union(s) = Base.copymutable(s) -function _union(s, sets...) - return _union!(Base.emptymutable(s, Base.promote_eltype(s, sets...)), s, sets...) -end -function _union!(s, sets...) - for x in sets - _union!(s, x) - end - return s -end -function _union!(s, itr) - Base.haslength(itr) && sizehint!(s, length(s) + Int(length(itr))::Int) - for x in itr - x ∉ s && push!(s, x) - end - return s -end diff --git a/src/set_types.jl b/src/set_types.jl deleted file mode 100644 index dc96a8cc12..0000000000 --- a/src/set_types.jl +++ /dev/null @@ -1,5 +0,0 @@ -using NDTensors.TypeParameterAccessors: unwrap_array_type -NDTensors.TypeParameterAccessors.parenttype(::ITensor) = typeof(tensor(T)) -function NDTensors.TypeParameterAccessors.unwrap_array_type(T::ITensor) - return unwrap_array_type(tensor(T)) -end diff --git a/src/symmetrystyle.jl b/src/symmetrystyle.jl deleted file mode 100644 index 319dfe6060..0000000000 --- a/src/symmetrystyle.jl +++ /dev/null @@ -1,19 +0,0 @@ - -# Trait to determine if an Index, Index collection, Tensor, or ITensor -# has symmetries -abstract type SymmetryStyle end - -function symmetrystyle(T) - return error("No SymmetryStyle defined for the specified object $T of type $(typeof(T))") -end - -symmetrystyle(T, S, U, V...)::SymmetryStyle = ( - Base.@_inline_meta; symmetrystyle(symmetrystyle(T), symmetrystyle(S, U, V...)) -) - -symmetrystyle(T, S)::SymmetryStyle = symmetrystyle(symmetrystyle(T), symmetrystyle(S)) - -# Rules for basic collections -symmetrystyle(inds::Tuple) = symmetrystyle(inds...) -# `reduce(symmetrystyle, inds)` is not type stable for some reason -symmetrystyle(inds::AbstractVector) = mapreduce(symmetrystyle, symmetrystyle, inds) diff --git a/src/tensor_operations/itensor_combiner.jl b/src/tensor_operations/itensor_combiner.jl deleted file mode 100644 index 5b350b131f..0000000000 --- a/src/tensor_operations/itensor_combiner.jl +++ /dev/null @@ -1,84 +0,0 @@ -function combiner(is::Indices; dir=nothing, tags="CMB,Link") - new_ind = Index(prod(dims(is)); dir, tags) - new_is = (new_ind, is...) - return itensor(Combiner(), new_is) -end - -combiner(is...; kwargs...) = combiner(indices(is...); kwargs...) -combiner(i::Index; kwargs...) = combiner((i,); kwargs...) - -# Special case when no indices are combined (useful for generic code) -function combiner(; kwargs...) - return itensor(Combiner(), ()) -end - -""" - combinedind(C::ITensor) - -Given a combiner ITensor, return the Index which is -the "combined" index that is made out of merging -the other indices given to the combiner when it is made - -For more information, see the `combiner` function. -""" -function combinedind(T::ITensor) - if storage(T) isa Combiner && order(T) > 0 - return inds(T)[1] - end - return nothing -end - -# TODO: add iscombiner(::Tensor) to NDTensors -iscombiner(T::ITensor)::Bool = (storage(T) isa Combiner) - -@doc """ - combiner(inds::Indices; kwargs...) - -Make a combiner ITensor which combines the indices (of type Index) -into a single, new Index whose size is the product of the indices -given. For example, given indices `i1,i2,i3` the combiner will have -these three indices plus an additional one whose dimension is the -product of the dimensions of `i1,i2,i3`. - -Internally, a combiner ITensor uses a special storage type which -means it does not hold actual tensor elements but just information -about how to combine the indices into a single Index. Taking a product -of a regular ITensor with a combiner uses special fast algorithms to -combine the indices. - -To obtain the new, combined Index that the combiner makes out of -the indices it is given, use the `combinedind` function. - -To undo or reverse the combining process, uncombining the Index back -into the original ones, contract the tensor having the combined Index -with the conjugate or `dag` of the combiner. (If the combiner is an ITensor -`C`, multiply by `dag(C)`.) - -### Example -``` -# Combine indices i and k into a new Index ci -T = random_itensor(i,j,k) -C = combiner(i,k) -CT = C * T -ci = combinedind(C) - -# Uncombine ci back into i and k -TT = dag(C) * CT - -# TT will be the same as T -@show norm(TT - T) ≈ 0.0 -``` - - i j k - | | | - T = ======= - - ci i k - | | | - C = ======== - - ci j - | | - C * T = ===== - -""" combiner diff --git a/src/tensor_operations/matrix_algebra.jl b/src/tensor_operations/matrix_algebra.jl deleted file mode 100644 index 4b125503b5..0000000000 --- a/src/tensor_operations/matrix_algebra.jl +++ /dev/null @@ -1,94 +0,0 @@ -# Fix for AD -function _tr(T::ITensor; plev::Pair{Int,Int}=0 => 1, tags::Pair=ts"" => ts"") - trpairs = indpairs(T; plev=plev, tags=tags) - Cᴸ = combiner(first.(trpairs)) - Cᴿ = combiner(last.(trpairs)) - Tᶜ = T * Cᴸ * Cᴿ - cᴸ = uniqueind(Cᴸ, T) - cᴿ = uniqueind(Cᴿ, T) - Tᶜ *= δ(eltype(T), dag((cᴸ, cᴿ))) - if order(Tᶜ) == 0 - return Tᶜ[] - end - return Tᶜ -end - -# Trace an ITensor over pairs of indices determined by -# the prime levels and tags. Indices that are not in pairs -# are not traced over, corresponding to a "batched" trace. -function tr(T::ITensor; kwargs...) - return _tr(T; kwargs...) -end - -""" - exp(A::ITensor, Linds=Rinds', Rinds=inds(A,plev=0); ishermitian = false) - -Compute the exponential of the tensor `A` by treating it as a matrix ``A_{lr}`` with -the left index `l` running over all indices in `Linds` and `r` running over all -indices in `Rinds`. - -Only accepts index lists `Linds`,`Rinds` such that: (1) `length(Linds) + -length(Rinds) == length(inds(A))` (2) `length(Linds) == length(Rinds)` (3) For -each pair of indices `(Linds[n],Rinds[n])`, `Linds[n]` and `Rinds[n]` represent -the same Hilbert space (the same QN structure in the QN case, or just the same -length in the dense case), and appear in `A` with opposite directions. - -When `ishermitian=true` the exponential of `Hermitian(A_{lr})` is -computed internally. -""" -function exp(A::ITensor, Linds, Rinds; ishermitian=false) - @debug_check begin - if hasqns(A) - @assert flux(A) == QN() - end - end - - N = ndims(A) - NL = length(Linds) - NR = length(Rinds) - NL != NR && error("Must have equal number of left and right indices") - N != NL + NR && - error("Number of left and right indices must add up to total number of indices") - - # Linds, Rinds may not have the correct directions - # TODO: does the need a conversion? - Lis = Linds - Ris = Rinds - - # Ensure the indices have the correct directions, - # QNs, etc. - # First grab the indices in A, then permute them - # correctly. - Lis = permute(commoninds(A, Lis), Lis) - Ris = permute(commoninds(A, Ris), Ris) - - for (l, r) in zip(Lis, Ris) - if space(l) != space(r) - error("In exp, indices must come in pairs with equal spaces.") - end - if hasqns(A) - if dir(l) == dir(r) - error("In exp, indices must come in pairs with opposite directions") - end - end - end - - CL = combiner(Lis...; dir=Out) - CR = combiner(Ris...; dir=In) - AC = (A * CR) * CL - expAT = ishermitian ? exp(Hermitian(tensor(AC))) : exp(tensor(AC)) - return (itensor(expAT) * dag(CR)) * dag(CL) -end - -function exp(A::ITensor; kwargs...) - Ris = filterinds(A; plev=0) - Lis = Ris' - return exp(A, Lis, Ris; kwargs...) -end - -using NDTensors: NDTensors, map_diag, map_diag! -function NDTensors.map_diag!(f::Function, it_destination::ITensor, it_source::ITensor) - map_diag!(f, tensor(it_destination), tensor(it_source)) - return it_destination -end -NDTensors.map_diag(f::Function, it::ITensor) = itensor(map_diag(f, tensor(it))) diff --git a/src/tensor_operations/matrix_decomposition.jl b/src/tensor_operations/matrix_decomposition.jl deleted file mode 100644 index 8983997d1a..0000000000 --- a/src/tensor_operations/matrix_decomposition.jl +++ /dev/null @@ -1,857 +0,0 @@ -using NDTensors.RankFactorization: Spectrum - -""" - TruncSVD - -ITensor factorization type for a truncated singular-value -decomposition, returned by `svd`. -""" -struct TruncSVD - U::ITensor - S::ITensor - V::ITensor - spec::Spectrum - u::Index - v::Index -end - -# iteration for destructuring into components `U,S,V,spec,u,v = S` -iterate(S::TruncSVD) = (S.U, Val(:S)) -iterate(S::TruncSVD, ::Val{:S}) = (S.S, Val(:V)) -iterate(S::TruncSVD, ::Val{:V}) = (S.V, Val(:spec)) -iterate(S::TruncSVD, ::Val{:spec}) = (S.spec, Val(:u)) -iterate(S::TruncSVD, ::Val{:u}) = (S.u, Val(:v)) -iterate(S::TruncSVD, ::Val{:v}) = (S.v, Val(:done)) -iterate(S::TruncSVD, ::Val{:done}) = nothing - -@doc """ - svd(A::ITensor, inds::Index...; ) - -Singular value decomposition (SVD) of an ITensor `A`, computed -by treating the "left indices" provided collectively -as a row index, and the remaining "right indices" as a -column index (matricization of a tensor). - -The first three return arguments are `U`, `S`, and `V`, such that -`A ≈ U * S * V`. - -Whether or not the SVD performs a trunction depends on the keyword -arguments provided. - -If the left or right set of indices are empty, all input indices are -put on `V` or `U` respectively. To specify an empty set of left indices, -you must explicitly use `svd(A, ())` (`svd(A)` is currently undefined). - -# Examples - -Computing the SVD of an order-three ITensor, such that the indices -i and k end up on U and j ends up on V - -``` -i = Index(2) -j = Index(5) -k = Index(2) -A = random_itensor(i, j, k) -U, S, V = svd(A, i, k); -@show norm(A - U * S * V) <= 10 * eps() * norm(A) -``` - -The following code will truncate the last 2 singular values, -since the total number of singular values is 4. -The norm of the difference with the original tensor -will be the sqrt root of the sum of the squares of the -singular values that get truncated. - -``` -trunc, Strunc, Vtrunc = svd(A, i, k; maxdim=2); -@show norm(A - Utrunc * Strunc * Vtrunc) ≈ sqrt(S[3, 3]^2 + S[4, 4]^2) -``` - -Alternatively we can specify that we want to truncate -the weights of the singular values up to a certain cutoff, -so the total error will be no larger than the cutoff. - -``` -Utrunc2, Strunc2, Vtrunc2 = svd(A, i, k; cutoff=1e-10); -@show norm(A - Utrunc2 * Strunc2 * Vtrunc2) <= 1e-10 -``` - -# Keywords -- `maxdim::Int`: the maximum number of singular values to keep. -- `mindim::Int`: the minimum number of singular values to keep. -- `cutoff::Float64`: set the desired truncation error of the SVD, - by default defined as the sum of the squares of the smallest singular values. -- `lefttags::String = "Link,u"`: set the tags of the Index shared by `U` and `S`. -- `righttags::String = "Link,v"`: set the tags of the Index shared by `S` and `V`. -- `alg::String = "divide_and_conquer"`. Options: -- `"divide_and_conquer"` - A divide-and-conquer algorithm. - LAPACK's gesdd. Fast, but may lead to some innacurate singular values - for very ill-conditioned matrices. Also may sometimes fail to converge, - leading to errors (in which case "qr_iteration" or "recursive" can be tried). - - `"qr_iteration"` - Typically slower but more accurate for very - ill-conditioned matrices compared to `"divide_and_conquer"`. - LAPACK's gesvd. - - `"recursive"` - ITensor's custom svd. Very reliable, but may be slow if - high precision is needed. To get an `svd` of a matrix `A`, an - eigendecomposition of ``A^{\\dagger} A`` is used to compute `U` and then - a `qr` of ``A^{\\dagger} U`` is used to compute `V`. This is performed - recursively to compute small singular values. -- `use_absolute_cutoff::Bool = false`: set if all probability weights below - the `cutoff` value should be discarded, rather than the sum of discarded - weights. -- `use_relative_cutoff::Bool = true`: set if the singular values should be - normalized for the sake of truncation. -- `min_blockdim::Int = 0`: for SVD of block-sparse or QN ITensors, require - that the number of singular values kept be greater than or equal to - this value when possible - -See also: [`factorize`](@ref), [`eigen`](@ref) -""" -function svd( - A::ITensor, - Linds...; - leftdir=nothing, - rightdir=nothing, - lefttags=nothing, - righttags=nothing, - mindim=nothing, - maxdim=nothing, - cutoff=nothing, - alg=nothing, - use_absolute_cutoff=nothing, - use_relative_cutoff=nothing, - min_blockdim=nothing, - # Deprecated - utags=lefttags, - vtags=righttags, -) - lefttags = NDTensors.replace_nothing(lefttags, ts"Link,u") - righttags = NDTensors.replace_nothing(righttags, ts"Link,v") - - # Deprecated - utags = NDTensors.replace_nothing(utags, ts"Link,u") - vtags = NDTensors.replace_nothing(vtags, ts"Link,v") - - Lis = commoninds(A, indices(Linds...)) - Ris = uniqueinds(A, Lis) - - Lis_original = Lis - Ris_original = Ris - if isempty(Lis_original) - α = trivial_index(Ris) - vLα = onehot(datatype(A), α => 1) - A *= vLα - Lis = [α] - end - if isempty(Ris_original) - α = trivial_index(Lis) - vRα = onehot(datatype(A), α => 1) - A *= vRα - Ris = [α] - end - - CL = combiner(Lis...; dir=leftdir) - CR = combiner(Ris...; dir=rightdir) - AC = A * CR * CL - cL = combinedind(CL) - cR = combinedind(CR) - if inds(AC) != (cL, cR) - AC = permute(AC, cL, cR) - end - - USVT = svd( - tensor(AC); - mindim, - maxdim, - cutoff, - alg, - use_absolute_cutoff, - use_relative_cutoff, - min_blockdim, - ) - if isnothing(USVT) - return nothing - end - UT, ST, VT, spec = USVT - UC, S, VC = itensor(UT), itensor(ST), itensor(VT) - - u = commonind(S, UC) - v = commonind(S, VC) - - U = UC * dag(CL) - V = VC * dag(CR) - - U = settags(U, utags, u) - S = settags(S, utags, u) - S = settags(S, vtags, v) - V = settags(V, vtags, v) - - u = settags(u, utags) - v = settags(v, vtags) - - if isempty(Lis_original) - U *= dag(vLα) - end - if isempty(Ris_original) - V *= dag(vRα) - end - - return TruncSVD(U, S, V, spec, u, v) -end - -svd(A::ITensor; kwargs...) = error("Must specify indices in `svd`") - -""" - TruncEigen - -ITensor factorization type for a truncated eigenvalue -decomposition, returned by `eigen`. -""" -struct TruncEigen - D::ITensor - V::ITensor - Vt::ITensor - spec::Spectrum - l::Index - r::Index -end - -# iteration for destructuring into components `D, V, spec, l, r = E` -iterate(E::TruncEigen) = (E.D, Val(:V)) -iterate(E::TruncEigen, ::Val{:V}) = (E.V, Val(:spec)) -iterate(E::TruncEigen, ::Val{:spec}) = (E.spec, Val(:l)) -iterate(E::TruncEigen, ::Val{:l}) = (E.l, Val(:r)) -iterate(E::TruncEigen, ::Val{:r}) = (E.r, Val(:done)) -iterate(E::TruncEigen, ::Val{:done}) = nothing - -""" - eigen(A::ITensor[, Linds, Rinds]; ) - -Eigendecomposition of an ITensor `A`, computed -by treating the "left indices" `Linds` provided collectively -as a row index, and remaining "right indices" `Rinds` as a -column index (matricization of a tensor). - -If no indices are provided, pairs of primed and unprimed indices are -searched for, with `Linds` taken to be the primed indices and -`Rinds` taken to be the unprimed indices. - -The return arguments are the eigenvalues `D` and eigenvectors `U` -as tensors, such that `A * U ∼ U * D` (more precisely they are approximately -equal up to proper replacements of indices, see the example for details). - -Whether or not `eigen` performs a trunction depends on the keyword -arguments provided. Note that truncation is only well defined for -positive semidefinite matrices. - -# Arguments - - - `maxdim::Int`: the maximum number of singular values to keep. - - `mindim::Int`: the minimum number of singular values to keep. - - `cutoff::Float64`: set the desired truncation error of the eigenvalues, - by default defined as the sum of the squares of the smallest eigenvalues. - For now truncation is only well defined for positive semi-definite - eigenspectra. - - `ishermitian::Bool = false`: specify if the matrix is Hermitian, in which - case a specialized diagonalization routine will be used and it is - guaranteed that real eigenvalues will be returned. - - `plev::Int = 0`: set the prime level of the Indices of `D`. Default prime - levels are subject to change. - - `leftplev::Int = plev`: set the prime level of the Index unique to `D`. - Default prime levels are subject to change. - - `rightplev::Int = leftplev+1`: set the prime level of the Index shared - by `D` and `U`. Default tags are subject to change. - - `tags::String = "Link,eigen"`: set the tags of the Indices of `D`. - Default tags are subject to change. - - `lefttags::String = tags`: set the tags of the Index unique to `D`. - Default tags are subject to change. - - `righttags::String = tags`: set the tags of the Index shared by `D` and `U`. - Default tags are subject to change. - - `use_absolute_cutoff::Bool = false`: set if all probability weights below - the `cutoff` value should be discarded, rather than the sum of discarded - weights. - - `use_relative_cutoff::Bool = true`: set if the singular values should - be normalized for the sake of truncation. - -# Examples - -```julia -i, j, k, l = Index(2, "i"), Index(2, "j"), Index(2, "k"), Index(2, "l") -A = random_itensor(i, j, k, l) -Linds = (i, k) -Rinds = (j, l) -D, U = eigen(A, Linds, Rinds) -dl, dr = uniqueind(D, U), commonind(D, U) -Ul = replaceinds(U, (Rinds..., dr) => (Linds..., dl)) -A * U ≈ Ul * D # true -``` - -See also: [`svd`](@ref), [`factorize`](@ref) -""" -function eigen( - A::ITensor, - Linds, - Rinds; - mindim=nothing, - maxdim=nothing, - cutoff=nothing, - use_absolute_cutoff=nothing, - use_relative_cutoff=nothing, - ishermitian=nothing, - tags=nothing, - lefttags=nothing, - righttags=nothing, - plev=nothing, - leftplev=nothing, - rightplev=nothing, -) - ishermitian = NDTensors.replace_nothing(ishermitian, false) - tags = NDTensors.replace_nothing(tags, ts"Link,eigen") - lefttags = NDTensors.replace_nothing(lefttags, tags) - righttags = NDTensors.replace_nothing(righttags, tags) - plev = NDTensors.replace_nothing(plev, 0) - leftplev = NDTensors.replace_nothing(leftplev, plev) - rightplev = NDTensors.replace_nothing(rightplev, plev) - - @debug_check begin - if hasqns(A) - @assert flux(A) == QN() - end - end - - N = ndims(A) - NL = length(Linds) - NR = length(Rinds) - NL != NR && error("Must have equal number of left and right indices") - N != NL + NR && - error("Number of left and right indices must add up to total number of indices") - - if lefttags == righttags && leftplev == rightplev - leftplev = rightplev + 1 - end - - # Linds, Rinds may not have the correct directions - Lis = indices(Linds) - Ris = indices(Rinds) - - # Ensure the indices have the correct directions, - # QNs, etc. - # First grab the indices in A, then permute them - # correctly. - Lis = permute(commoninds(A, Lis), Lis) - Ris = permute(commoninds(A, Ris), Ris) - - for (l, r) in zip(Lis, Ris) - if space(l) != space(r) - error("In eigen, indices must come in pairs with equal spaces.") - end - if hasqns(A) - if dir(l) == dir(r) - error("In eigen, indices must come in pairs with opposite directions") - end - end - end - - # - if hasqns(A) && using_auto_fermion() - if !all(i -> dir(i) == Out, Lis) - error("With auto_fermion enabled, left inds in eigen must have Out arrows") - end - if !all(i -> dir(i) == In, Ris) - error("With auto_fermion enabled, right inds in eigen must have Out arrows") - end - end - - CL = combiner(Lis...; dir=Out, tags="CMB,left") - CR = combiner(dag(Ris)...; dir=Out, tags="CMB,right") - - AC = A * dag(CR) * CL - - cL = combinedind(CL) - cR = dag(combinedind(CR)) - if inds(AC) != (cL, cR) - AC = permute(AC, cL, cR) - end - - AT = ishermitian ? Hermitian(tensor(AC)) : tensor(AC) - - DT, VT, spec = eigen(AT; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff) - D, VC = itensor(DT), itensor(VT) - - V = VC * dag(CR) - - # Set right index tags - l = uniqueind(D, V) - r = commonind(D, V) - l̃ = setprime(settags(l, lefttags), leftplev) - r̃ = setprime(settags(l̃, righttags), rightplev) - - replaceinds!(D, (l, r), (l̃, r̃)) - replaceind!(V, r, r̃) - - l, r = l̃, r̃ - - # The right eigenvectors, after being applied to A - Vt = replaceinds(V, (Ris..., r), (Lis..., l)) - - @debug_check begin - if hasqns(A) - @assert flux(D) == QN() - @assert flux(V) == QN() - @assert flux(Vt) == QN() - end - end - - return TruncEigen(D, V, Vt, spec, l, r) -end - -function eigen(A::ITensor; kwargs...) - Ris = filterinds(A; plev=0) - Lis = Ris' - return eigen(A, Lis, Ris; kwargs...) -end - -# ----------------------------- QR/RQ/QL/LQ decompositions ------------------------------ - -# -# Helper functions for handleing cases where zero indices are requested on Q or R. -# -function add_trivial_index(A::ITensor, Ainds) - α = trivial_index(Ainds) #If Ainds[1] has no QNs makes Index(1), otherwise Index(QN()=>1) - vα = onehot(datatype(A), α => 1) - A *= vα - return A, vα, [α] -end - -function add_trivial_index(A::ITensor, Linds, Rinds) - vαl, vαr = nothing, nothing - if isempty(Linds) - A, vαl, Linds = add_trivial_index(A, Rinds) - end - if isempty(Rinds) - A, vαr, Rinds = add_trivial_index(A, Linds) - end - return A, vαl, vαr, Linds, Rinds -end - -remove_trivial_index(Q::ITensor, R::ITensor, vαl, vαr) = (Q * dag(vαl), R * dag(vαr)) -remove_trivial_index(Q::ITensor, R::ITensor, ::Nothing, vαr) = (Q, R * dag(vαr)) -remove_trivial_index(Q::ITensor, R::ITensor, vαl, ::Nothing) = (Q * dag(vαl), R) -remove_trivial_index(Q::ITensor, R::ITensor, ::Nothing, ::Nothing) = (Q, R) - -# -# Force users to knowingly ask for zero indices using qr(A,()) syntax -# -function noinds_error_message(decomp::String) - return "$decomp without any input indices is currently not defined. - In the future it may be defined as performing a $decomp decomposition - treating the ITensor as a matrix from the primed to the unprimed indices." -end - -qr(A::ITensor; kwargs...) = error(noinds_error_message("qr")) -rq(A::ITensor; kwargs...) = error(noinds_error_message("rq")) -lq(A::ITensor; kwargs...) = error(noinds_error_message("lq")) -ql(A::ITensor; kwargs...) = error(noinds_error_message("ql")) -# -# User supplied only left indices as a tuple or vector. -# -qr(A::ITensor, Linds::Indices; kwargs...) = qr(A, Linds, uniqueinds(A, Linds); kwargs...) -ql(A::ITensor, Linds::Indices; kwargs...) = ql(A, Linds, uniqueinds(A, Linds); kwargs...) -rq(A::ITensor, Linds::Indices; kwargs...) = rq(A, Linds, uniqueinds(A, Linds); kwargs...) -lq(A::ITensor, Linds::Indices; kwargs...) = lq(A, Linds, uniqueinds(A, Linds); kwargs...) -# -# User supplied only left indices as as vararg -# -qr(A::ITensor, Linds...; kwargs...) = qr(A, Linds, uniqueinds(A, Linds); kwargs...) -ql(A::ITensor, Linds...; kwargs...) = ql(A, Linds, uniqueinds(A, Linds); kwargs...) -rq(A::ITensor, Linds...; kwargs...) = rq(A, Linds, uniqueinds(A, Linds); kwargs...) -lq(A::ITensor, Linds...; kwargs...) = lq(A, Linds, uniqueinds(A, Linds); kwargs...) -# -# Core function where both left and right indices are supplied as tuples or vectors -# Handle default tags and dispatch to generic qx/xq functions. -# -function qr(A::ITensor, Linds::Indices, Rinds::Indices; tags=ts"Link,qr", kwargs...) - return qx(qr, A, Linds, Rinds; tags, kwargs...) -end -function ql(A::ITensor, Linds::Indices, Rinds::Indices; tags=ts"Link,ql", kwargs...) - return qx(ql, A, Linds, Rinds; tags, kwargs...) -end -function rq(A::ITensor, Linds::Indices, Rinds::Indices; tags=ts"Link,rq", kwargs...) - return xq(ql, A, Linds, Rinds; tags, kwargs...) -end -function lq(A::ITensor, Linds::Indices, Rinds::Indices; tags=ts"Link,lq", kwargs...) - return xq(qr, A, Linds, Rinds; tags, kwargs...) -end -# -# Generic function implementing both qr and ql decomposition. The X tensor = R or L. -# -function qx( - qx::Function, A::ITensor, Linds::Indices, Rinds::Indices; tags=ts"Link,qx", positive=false -) - # Strip out any extra indices that are not in A. - # Unit test test/base/test_itensor.jl line 1469 will fail without this. - Linds = commoninds(A, Linds) - #Rinds=commoninds(A,Rinds) #if the user supplied Rinds they could have the same problem? - # - # Make a dummy index with dim=1 and incorporate into A so the Linds & Rinds can never - # be empty. A essentially becomes 1D after collection. - # - A, vαl, vαr, Linds, Rinds = add_trivial_index(A, Linds, Rinds) - - # - # Use combiners to render A down to a rank 2 tensor ready for matrix QR/QL routine. - # - CL, CR = combiner(Linds...), combiner(Rinds...) - cL, cR = combinedind(CL), combinedind(CR) - AC = A * CR * CL - - # - # Make sure we don't accidentally pass the transpose into the matrix qr/ql routine. - # - AC = permute(AC, cL, cR; allow_alias=true) - - QT, XT = qx(tensor(AC); positive) #pass order(AC)==2 matrix down to the NDTensors level where qr/ql are implemented. - # - # Undo the combine oepration, to recover all tensor indices. - # - Q, X = itensor(QT) * dag(CL), itensor(XT) * dag(CR) - - # Remove dummy indices. No-op if vαl and vαr are Nothing - Q, X = remove_trivial_index(Q, X, vαl, vαr) - # - # fix up the tag name for the index between Q and X. - # - q = commonind(Q, X) - Q = settags(Q, tags, q) - X = settags(X, tags, q) - q = settags(q, tags) - - return Q, X, q -end - -# -# Generic function implementing both rq and lq decomposition. Implemented using qr/ql -# with swapping the left and right indices. The X tensor = R or L. -# -function xq( - qx::Function, A::ITensor, Linds::Indices, Rinds::Indices; tags=ts"Link,xq", positive=false -) - Q, X, q = qx(A, Rinds, Linds; positive) - # - # fix up the tag name for the index between Q and L. - # - Q = settags(Q, tags, q) - X = settags(X, tags, q) - q = settags(q, tags) - - return X, Q, q -end - -polar(A::ITensor; kwargs...) = error(noinds_error_message("polar")) - -# TODO: allow custom tags in internal indices? -# TODO: return the new common indices? -function polar(A::ITensor, Linds...) - U, S, V = svd(A, Linds...) - u = commoninds(S, U) - v = commoninds(S, V) - δᵤᵥ′ = δ(eltype(A), u..., v'...) - Q = U * δᵤᵥ′ * V' - P = dag(V') * dag(δᵤᵥ′) * S * V - return Q, P, commoninds(Q, P) -end - -function factorize_qr(A::ITensor, Linds...; ortho="left", tags=nothing, positive=false) - if ortho == "left" - L, R, q = qr(A, Linds...; tags, positive) - elseif ortho == "right" - Lis = uniqueinds(A, indices(Linds...)) - R, L, q = qr(A, Lis...; tags, positive) - else - error("In factorize using qr decomposition, ortho keyword - $ortho not supported. Supported options are left or right.") - end - return L, R -end - -using NDTensors: map_diag! -# -# Generic function implementing a square root decomposition of a diagonal, order 2 tensor with inds u, v -# -function sqrt_decomp(D::ITensor, u::Index, v::Index) - (storage(D) isa Union{Diag,DiagBlockSparse}) || - error("Must be a diagonal matrix ITensor.") - sqrtDL = diag_itensor(u, dag(u)') - sqrtDR = diag_itensor(v, dag(v)') - map_diag!(sqrt ∘ abs, sqrtDL, D) - map_diag!(sqrt ∘ abs, sqrtDR, D) - δᵤᵥ = copy(D) - map_diag!(sign, δᵤᵥ, D) - return sqrtDL, prime(δᵤᵥ), sqrtDR -end - -function factorize_svd( - A::ITensor, - Linds...; - (singular_values!)=nothing, - ortho="left", - alg=nothing, - dir=nothing, - mindim=nothing, - maxdim=nothing, - cutoff=nothing, - tags=nothing, - use_absolute_cutoff=nothing, - use_relative_cutoff=nothing, - min_blockdim=nothing, -) - leftdir, rightdir = dir, dir - if !isnothing(leftdir) - leftdir = -leftdir - end - if !isnothing(rightdir) - rightdir = -rightdir - end - USV = svd( - A, - Linds...; - leftdir, - rightdir, - alg, - mindim, - maxdim, - cutoff, - lefttags=tags, - righttags=tags, - use_absolute_cutoff, - use_relative_cutoff, - min_blockdim, - ) - if isnothing(USV) - return nothing - end - U, S, V, spec, u, v = USV - if ortho == "left" - L, R = U, S * V - elseif ortho == "right" - L, R = U * S, V - elseif ortho == "none" - sqrtDL, δᵤᵥ, sqrtDR = sqrt_decomp(S, u, v) - sqrtDR = denseblocks(sqrtDR) * denseblocks(δᵤᵥ) - L, R = U * sqrtDL, V * sqrtDR - else - error("In factorize using svd decomposition, ortho keyword - $ortho not supported. Supported options are left, right, or none.") - end - - !isnothing(singular_values!) && (singular_values![] = S) - - return L, R, spec -end - -function factorize_eigen( - A::ITensor, - Linds...; - ortho="left", - eigen_perturbation=nothing, - mindim=nothing, - maxdim=nothing, - cutoff=nothing, - tags=nothing, - use_absolute_cutoff=nothing, - use_relative_cutoff=nothing, -) - if ortho == "left" - Lis = commoninds(A, indices(Linds...)) - elseif ortho == "right" - Lis = uniqueinds(A, indices(Linds...)) - else - error("In factorize using eigen decomposition, ortho keyword - $ortho not supported. Supported options are left or right.") - end - simLis = sim(Lis) - A2 = A * replaceinds(dag(A), Lis, simLis) - if !isnothing(eigen_perturbation) - # This assumes delta_A2 has indices: - # (Lis..., prime(Lis)...) - delta_A2 = replaceinds(eigen_perturbation, Lis, dag(simLis)) - delta_A2 = noprime(delta_A2) - A2 += delta_A2 - end - F = eigen( - A2, - Lis, - simLis; - ishermitian=true, - mindim, - maxdim, - cutoff, - tags, - use_absolute_cutoff, - use_relative_cutoff, - ) - D, _, spec = F - L = F.Vt - R = dag(L) * A - if ortho == "right" - L, R = R, L - end - return L, R, spec -end - -factorize(A::ITensor; kwargs...) = error(noinds_error_message("factorize")) - -""" - factorize(A::ITensor, Linds::Index...; ) - -Perform a factorization of `A` into ITensors `L` and `R` such that `A ≈ L * R`. - -# Arguments - -- `ortho::String = "left"`: Choose orthogonality - properties of the factorization. - + `"left"`: the left factor `L` is an orthogonal basis - such that `L * dag(prime(L, commonind(L,R))) ≈ I`. - + `"right"`: the right factor `R` forms an orthogonal basis. - + `"none"`, neither of the factors form an orthogonal basis, - and in general are made as symmetrically as possible - (depending on the decomposition used). -- `which_decomp::Union{String, Nothing} = nothing`: choose what kind - of decomposition is used. - + `nothing`: choose the decomposition automatically based on - the other arguments. For example, when `nothing` is chosen and - `ortho = "left"` or `"right"`, and a cutoff is provided, `svd` or - `eigen` is used depending on the provided cutoff (`eigen` is only - used when the cutoff is greater than `1e-12`, since it has a lower - precision). When no truncation is requested `qr` is used for dense - ITensors and `svd` for block-sparse ITensors (in the future `qr` - will be used also for block-sparse ITensors in this case). - + `"svd"`: `L = U` and `R = S * V` for `ortho = "left"`, `L = U * S` - and `R = V` for `ortho = "right"`, and `L = U * sqrt.(S)` and - `R = sqrt.(S) * V` for `ortho = "none"`. To control which `svd` - algorithm is choose, use the `svd_alg` keyword argument. See the - documentation for `svd` for the supported algorithms, which are the - same as those accepted by the `alg` keyword argument. - + `"eigen"`: `L = U` and ``R = U^{\\dagger} A`` where `U` is determined - from the eigendecompositon ``A A^{\\dagger} = U D U^{\\dagger}`` for - `ortho = "left"` (and vice versa for `ortho = "right"`). `"eigen"` is - not supported for `ortho = "none"`. - + `"qr"`: `L=Q` and `R` an upper-triangular matrix when - `ortho = "left"`, and `R = Q` and `L` a lower-triangular matrix - when `ortho = "right"` (currently supported for dense ITensors only). - In the future, other decompositions like QR (for block-sparse ITensors), - polar, cholesky, LU, etc. are expected to be supported. - -For truncation arguments, see: [`svd`](@ref) -""" -function factorize( - A::ITensor, - Linds...; - mindim=nothing, - maxdim=nothing, - cutoff=nothing, - ortho=nothing, - tags=nothing, - plev=nothing, - which_decomp=nothing, - # eigen - eigen_perturbation=nothing, - # svd - svd_alg=nothing, - use_absolute_cutoff=nothing, - use_relative_cutoff=nothing, - min_blockdim=nothing, - (singular_values!)=nothing, - dir=nothing, -) - @debug_check checkflux(A) - if !isnothing(eigen_perturbation) - if !(isnothing(which_decomp) || which_decomp == "eigen") - error("""when passing a non-trivial eigen_perturbation to `factorize`, - the which_decomp keyword argument must be either "automatic" or - "eigen" """) - end - which_decomp = "eigen" - end - ortho = NDTensors.replace_nothing(ortho, "left") - tags = NDTensors.replace_nothing(tags, ts"Link,fact") - plev = NDTensors.replace_nothing(plev, 0) - - # Determines when to use eigen vs. svd (eigen is less precise, - # so eigen should only be used if a larger cutoff is requested) - automatic_cutoff = 1e-12 - Lis = commoninds(A, indices(Linds...)) - Ris = uniqueinds(A, Lis) - dL, dR = dim(Lis), dim(Ris) - if isnothing(eigen_perturbation) - # maxdim is forced to be at most the max given SVD - if isnothing(maxdim) - maxdim = min(dL, dR) - end - maxdim = min(maxdim, min(dL, dR)) - else - if isnothing(maxdim) - maxdim = max(dL, dR) - end - maxdim = min(maxdim, max(dL, dR)) - end - might_truncate = !isnothing(cutoff) || maxdim < min(dL, dR) - if isnothing(which_decomp) - if !might_truncate && ortho != "none" - which_decomp = "qr" - elseif isnothing(cutoff) || cutoff ≤ automatic_cutoff - which_decomp = "svd" - elseif cutoff > automatic_cutoff - which_decomp = "eigen" - end - end - if which_decomp == "svd" - LR = factorize_svd( - A, - Linds...; - mindim, - maxdim, - cutoff, - tags, - ortho, - alg=svd_alg, - dir, - singular_values!, - use_absolute_cutoff, - use_relative_cutoff, - min_blockdim, - ) - if isnothing(LR) - return nothing - end - L, R, spec = LR - elseif which_decomp == "eigen" - L, R, spec = factorize_eigen( - A, - Linds...; - mindim, - maxdim, - cutoff, - tags, - ortho, - eigen_perturbation, - use_absolute_cutoff, - use_relative_cutoff, - ) - elseif which_decomp == "qr" - L, R = factorize_qr(A, Linds...; ortho, tags) - spec = Spectrum(nothing, 0.0) - else - throw(ArgumentError("""In factorize, factorization $which_decomp is not - currently supported. Use `"svd"`, `"eigen"`, `"qr"` or `nothing`.""")) - end - - # Set the tags and prime level - l = commonind(L, R) - l̃ = setprime(settags(l, tags), plev) - L = replaceind(L, l, l̃) - R = replaceind(R, l, l̃) - l = l̃ - - return L, R, spec, l -end diff --git a/src/tensor_operations/permutations.jl b/src/tensor_operations/permutations.jl deleted file mode 100644 index ce6a1f7237..0000000000 --- a/src/tensor_operations/permutations.jl +++ /dev/null @@ -1,244 +0,0 @@ -""" - permute(T::ITensor, inds...; allow_alias = false) - -Return a new ITensor `T` with indices permuted according -to the input indices `inds`. The storage of the ITensor -is permuted accordingly. - -If called with `allow_alias = true`, it avoids -copying data if possible. Therefore, it may return an alias -of the input ITensor (an ITensor that shares the same data), -such as if the permutation turns out to be trivial. - -By default, `allow_alias = false`, and it never -returns an alias of the input ITensor. - -# Examples - -```julia -i = Index(2, "index_i"); j = Index(4, "index_j"); k = Index(3, "index_k"); -T = random_itensor(i, j, k) - -pT_1 = permute(T, k, i, j) -pT_2 = permute(T, j, i, k) - -pT_noalias_1 = permute(T, i, j, k) -pT_noalias_1[1, 1, 1] = 12 -T[1, 1, 1] != pT_noalias_1[1, 1, 1] - -pT_noalias_2 = permute(T, i, j, k; allow_alias = false) -pT_noalias_2[1, 1, 1] = 12 -T[1, 1, 1] != pT_noalias_1[1, 1, 1] - -pT_alias = permute(T, i, j, k; allow_alias = true) -pT_alias[1, 1, 1] = 12 -T[1, 1, 1] == pT_alias[1, 1, 1] -``` -""" -function permute(T::ITensor, new_inds...; kwargs...) - if !hassameinds(T, indices(new_inds...)) - error( - "In `permute(::ITensor, inds...)`, the input ITensor has indices: \n\n$(inds(T))\n\nbut the desired Index ordering is: \n\n$(indices(new_inds...))", - ) - end - allow_alias = deprecated_keyword_argument( - Bool, - kwargs; - new_kw=:allow_alias, - old_kw=:always_copy, - default=false, - funcsym=:permute, - map=!, - ) - aliasstyle::Union{AllowAlias,NeverAlias} = allow_alias ? AllowAlias() : NeverAlias() - return permute(aliasstyle, T, new_inds...) -end - -# TODO: move to NDTensors -function NDTensors.permutedims(::AllowAlias, T::Tensor, perm) - return NDTensors.is_trivial_permutation(perm) ? T : permutedims(NeverAlias(), T, perm) -end - -# TODO: move to NDTensors, define `permutedims` in terms of `NeverAlias` -function NDTensors.permutedims(::NeverAlias, T::Tensor, perm) - return permutedims(T, perm) -end - -function _permute(as::AliasStyle, T::Tensor, new_inds) - perm = NDTensors.getperm(new_inds, inds(T)) - return permutedims(as, T, perm) -end - -function permute(as::AliasStyle, T::ITensor, new_inds) - return itensor(_permute(as, tensor(T), new_inds)) -end - -# Version listing indices -function permute(as::AliasStyle, T::ITensor, new_inds::Index...) - return permute(as, T, new_inds) -end - -""" - transpose(T::ITensor) - -Treating an ITensor as a map from a set of indices -of prime level 0 to a matching set of indices but -of prime level 1 -[for example: (i,j,k,...) -> (j',i',k',...)] -return the ITensor which is the transpose of this map. -""" -transpose(T::ITensor) = swapprime(T, 0 => 1) - -""" - ishermitian(T::ITensor; kwargs...) - -Test whether an ITensor is a Hermitian operator, -that is whether taking `dag` of the ITensor and -transposing its indices returns numerically -the same ITensor. -""" -function ishermitian(T::ITensor; kwargs...) - return isapprox(T, dag(transpose(T)); kwargs...) -end - -""" - adjoint(A::ITensor) - -For `A'` notation to prime an ITensor by 1. -""" -adjoint(A::ITensor) = prime(A) - -####################################################################### -# -# Developer ITensor functions -# - -""" - array(T::ITensor) - -Given an ITensor `T`, returns -an Array with a copy of the ITensor's elements, -or a view in the case the the ITensor's storage is Dense. - -The ordering of the elements in the Array, in -terms of which Index is treated as the row versus -column, depends on the internal layout of the ITensor. - -!!! warning - This method is intended for developer use - only and not recommended for use in ITensor applications - unless you know what you are doing (for example - you are certain of the memory ordering of the ITensor - because you permuted the indices into a certain order). - -See also [`matrix`](@ref), [`vector`](@ref). -""" -array(T::ITensor) = array(tensor(T)) - -""" - array(T::ITensor, inds...) - -Convert an ITensor `T` to an Array. - -The ordering of the elements in the Array are specified -by the input indices `inds`. This tries to avoid copying -of possible (i.e. may return a view of the original -data), for example if the ITensor's storage is Dense -and the indices are already in the specified ordering -so that no permutation is required. - -!!! warning - Note that in the future we may return specialized - AbstractArray types for certain storage types, - for example a `LinearAlgebra.Diagonal` type for - an ITensor with `Diag` storage. The specific storage - type shouldn't be relied upon. - -See also [`matrix`](@ref), [`vector`](@ref). -""" -array(T::ITensor, inds...) = array(permute(T, inds...; allow_alias=true)) - -""" - matrix(T::ITensor) - -Given an ITensor `T` with two indices, returns -a Matrix with a copy of the ITensor's elements, -or a view in the case the ITensor's storage is Dense. - -The ordering of the elements in the Matrix, in -terms of which Index is treated as the row versus -column, depends on the internal layout of the ITensor. - -!!! warning - This method is intended for developer use - only and not recommended for use in ITensor applications - unless you know what you are doing (for example - you are certain of the memory ordering of the ITensor - because you permuted the indices into a certain order). - -See also [`array`](@ref), [`vector`](@ref). -""" -function matrix(T::ITensor) - ndims(T) != 2 && throw(DimensionMismatch()) - return array(tensor(T)) -end - -""" - matrix(T::ITensor, inds...) - -Convert an ITensor `T` to a Matrix. - -The ordering of the elements in the Matrix are specified -by the input indices `inds`. This tries to avoid copying -of possible (i.e. may return a view of the original -data), for example if the ITensor's storage is Dense -and the indices are already in the specified ordering -so that no permutation is required. - -!!! warning - Note that in the future we may return specialized - AbstractArray types for certain storage types, - for example a `LinearAlgebra.Diagonal` type for - an ITensor with `Diag` storage. The specific storage - type shouldn't be relied upon. - -See also [`array`](@ref), [`vector`](@ref). -""" -matrix(T::ITensor, inds...) = matrix(permute(T, inds...; allow_alias=true)) - -""" - vector(T::ITensor) - -Given an ITensor `T` with one index, returns -a Vector with a copy of the ITensor's elements, -or a view in the case the ITensor's storage is Dense. - -See also [`array`](@ref), [`matrix`](@ref). -""" -function vector(T::ITensor) - ndims(T) != 1 && throw(DimensionMismatch()) - return array(tensor(T)) -end - -""" - vector(T::ITensor, inds...) - -Convert an ITensor `T` to an Vector. - -The ordering of the elements in the Array are specified -by the input indices `inds`. This tries to avoid copying -of possible (i.e. may return a view of the original -data), for example if the ITensor's storage is Dense -and the indices are already in the specified ordering -so that no permutation is required. - -!!! warning - Note that in the future we may return specialized - AbstractArray types for certain storage types, - for example a `LinearAlgebra.Diagonal` type for - an ITensor with `Diag` storage. The specific storage - type shouldn't be relied upon. - -See also [`array`](@ref), [`matrix`](@ref). -""" -vector(T::ITensor, inds...) = vector(permute(T, inds...; allow_alias=true)) diff --git a/src/tensor_operations/tensor_algebra.jl b/src/tensor_operations/tensor_algebra.jl deleted file mode 100644 index a56c99bf57..0000000000 --- a/src/tensor_operations/tensor_algebra.jl +++ /dev/null @@ -1,642 +0,0 @@ -function _contract(A::Tensor, B::Tensor) - labelsA, labelsB = compute_contraction_labels(inds(A), inds(B)) - return contract(A, labelsA, B, labelsB) - # TODO: Alternative to try (`noncommoninds` is too slow right now) - #return _contract!!(EmptyTensor(Float64, _Tuple(noncommoninds(inds(A), inds(B)))), A, B) -end - -function _contract(A::ITensor, B::ITensor)::ITensor - C = itensor(_contract(tensor(A), tensor(B))) - warnTensorOrder = get_warn_order() - if !isnothing(warnTensorOrder) > 0 && order(C) >= warnTensorOrder - println("Contraction resulted in ITensor with $(order(C)) indices, which is greater - than or equal to the ITensor order warning threshold $warnTensorOrder. - You can modify the threshold with macros like `@set_warn_order N`, - `@reset_warn_order`, and `@disable_warn_order` or functions like - `ITensors.set_warn_order(N::Int)`, `ITensors.reset_warn_order()`, and - `ITensors.disable_warn_order()`.") - # This prints a vector, not formatted well - #show(stdout, MIME"text/plain"(), stacktrace()) - Base.show_backtrace(stdout, backtrace()) - println() - end - return C -end - -""" - A::ITensor * B::ITensor - contract(A::ITensor, B::ITensor) - -Contract ITensors A and B to obtain a new ITensor. This -contraction `*` operator finds all matching indices common -to A and B and sums over them, such that the result will -have only the unique indices of A and B. To prevent -indices from matching, their prime level or tags can be -modified such that they no longer compare equal - for more -information see the documentation on Index objects. - -# Examples - -```julia -i = Index(2,"index_i"); j = Index(4,"index_j"); k = Index(3,"index_k") - -A = random_itensor(i,j) -B = random_itensor(j,k) -C = A * B # contract over Index j - -A = random_itensor(i,i') -B = random_itensor(i,i'') -C = A * B # contract over Index i - -A = random_itensor(i) -B = random_itensor(j) -C = A * B # outer product of A and B, no contraction - -A = random_itensor(i,j,k) -B = random_itensor(k,i,j) -C = A * B # inner product of A and B, all indices contracted -``` -""" -function (A::ITensor * B::ITensor)::ITensor - return contract(A, B) -end - -function contract(A::ITensor, B::ITensor)::ITensor - NA::Int = ndims(A) - NB::Int = ndims(B) - if NA == 0 && NB == 0 - return (iscombiner(A) || iscombiner(B)) ? _contract(A, B) : ITensor(A[] * B[]) - elseif NA == 0 - return iscombiner(A) ? _contract(A, B) : A[] * B - elseif NB == 0 - return iscombiner(B) ? _contract(B, A) : B[] * A - end - return _contract(A, B) -end - -function optimal_contraction_sequence(A::Union{Vector{<:ITensor},Tuple{Vararg{ITensor}}}) - if length(A) == 1 - return optimal_contraction_sequence(A[1]) - elseif length(A) == 2 - return optimal_contraction_sequence(A[1], A[2]) - elseif length(A) == 3 - return optimal_contraction_sequence(A[1], A[2], A[3]) - else - return _optimal_contraction_sequence(A) - end -end - -optimal_contraction_sequence(A::ITensor) = Any[1] -optimal_contraction_sequence(A1::ITensor, A2::ITensor) = Any[1, 2] -function optimal_contraction_sequence(A1::ITensor, A2::ITensor, A3::ITensor) - return optimal_contraction_sequence(inds(A1), inds(A2), inds(A3)) -end -optimal_contraction_sequence(As::ITensor...) = _optimal_contraction_sequence(As) - -_optimal_contraction_sequence(As::Tuple{<:ITensor}) = Any[1] -_optimal_contraction_sequence(As::Tuple{<:ITensor,<:ITensor}) = Any[1, 2] -function _optimal_contraction_sequence(As::Tuple{<:ITensor,<:ITensor,<:ITensor}) - return optimal_contraction_sequence(inds(As[1]), inds(As[2]), inds(As[3])) -end -function _optimal_contraction_sequence(As::Tuple{Vararg{ITensor}}) - return __optimal_contraction_sequence(As) -end - -_optimal_contraction_sequence(As::Vector{<:ITensor}) = __optimal_contraction_sequence(As) - -function __optimal_contraction_sequence(As) - indsAs = [inds(A) for A in As] - return optimal_contraction_sequence(indsAs) -end - -function default_sequence() - return using_contraction_sequence_optimization() ? "automatic" : "left_associative" -end - -function contraction_cost(As::Union{Vector{<:ITensor},Tuple{Vararg{ITensor}}}; kwargs...) - indsAs = [inds(A) for A in As] - return contraction_cost(indsAs; kwargs...) -end - -# TODO: provide `contractl`/`contractr`/`*ˡ`/`*ʳ` as shorthands for left associative and right associative contractions. -""" - *(As::ITensor...; sequence = default_sequence(), kwargs...) - *(As::Vector{<: ITensor}; sequence = default_sequence(), kwargs...) - contract(As::ITensor...; sequence = default_sequence(), kwargs...) - -Contract the set of ITensors according to the contraction sequence. - -The default sequence is "automatic" if `ITensors.using_contraction_sequence_optimization()` -is true, otherwise it is "left_associative" (the ITensors are contracted from left to right). - -You can change the default with `ITensors.enable_contraction_sequence_optimization()` and -`ITensors.disable_contraction_sequence_optimization()`. - -For a custom sequence, the sequence should be provided as a binary tree where the leaves are -integers `n` specifying the ITensor `As[n]` and branches are accessed -by indexing with `1` or `2`, i.e. `sequence = Any[Any[1, 3], Any[2, 4]]`. -""" -function contract(tn::AbstractVector; kwargs...) - return if all(x -> x isa ITensor, tn) - contract(convert(Vector{ITensor}, tn); kwargs...) - else - deepcontract(tn; kwargs...) - end -end - -# Contract a tensor network such as: -# [A, B, [[C, D], [E, [F, G]]]] -deepcontract(t::ITensor, ts::ITensor...) = *(t, ts...) -function deepcontract(tn::AbstractVector) - return deepcontract(deepcontract.(tn)...) -end - -function contract( - As::Union{Vector{ITensor},Tuple{Vararg{ITensor}}}; sequence=default_sequence(), kwargs... -)::ITensor - if sequence == "left_associative" - return foldl((A, B) -> contract(A, B; kwargs...), As) - elseif sequence == "right_associative" - return foldr((A, B) -> contract(A, B; kwargs...), As) - elseif sequence == "automatic" - return _contract(As, optimal_contraction_sequence(As); kwargs...) - else - return _contract(As, sequence; kwargs...) - end -end - -contract(As::ITensor...; kwargs...)::ITensor = contract(As; kwargs...) - -_contract(As, sequence::Int) = As[sequence] - -# Given a contraction sequence, contract the tensors recursively according -# to that sequence. -function _contract(As, sequence::AbstractVector; kwargs...)::ITensor - return contract(_contract.((As,), sequence)...; kwargs...) -end - -*(As::ITensor...; kwargs...)::ITensor = contract(As...; kwargs...) - -function contract!(C::ITensor, A::ITensor, B::ITensor, α::Number, β::Number=0)::ITensor - labelsCAB = compute_contraction_labels(inds(C), inds(A), inds(B)) - labelsC, labelsA, labelsB = labelsCAB - CT = NDTensors.contract!!( - tensor(C), _Tuple(labelsC), tensor(A), _Tuple(labelsA), tensor(B), _Tuple(labelsB), α, β - ) - setstorage!(C, storage(CT)) - setinds!(C, inds(C)) - return C -end - -function _contract!!(C::Tensor, A::Tensor, B::Tensor) - labelsCAB = compute_contraction_labels(inds(C), inds(A), inds(B)) - labelsC, labelsA, labelsB = labelsCAB - CT = NDTensors.contract!!(C, labelsC, A, labelsA, B, labelsB) - return CT -end - -# This is necessary for now since not all types implement contract!! -# with non-trivial α and β -function contract!(C::ITensor, A::ITensor, B::ITensor)::ITensor - return settensor!(C, _contract!!(tensor(C), tensor(A), tensor(B))) -end - -""" - hadamard_product!(C::ITensor, A::ITensor, B::ITensor) - hadamard_product(A::ITensor, B::ITensor) - ⊙(A::ITensor, B::ITensor) - -Elementwise product of 2 ITensors with the same indices. - -Alternative syntax `⊙` can be typed in the REPL with `\\odot `. -""" -function hadamard_product!(R::ITensor, T1::ITensor, T2::ITensor) - if !hassameinds(T1, T2) - error("ITensors must have some indices to perform Hadamard product") - end - # Permute the indices to the same order - #if inds(A) ≠ inds(B) - # B = permute(B, inds(A)) - #end - #tensor(C) .= tensor(A) .* tensor(B) - map!((t1, t2) -> *(t1, t2), R, T1, T2) - return R -end - -# TODO: instead of copy, use promote(A, B) -function hadamard_product(A::ITensor, B::ITensor) - Ac = copy(A) - return hadamard_product!(Ac, Ac, B) -end - -⊙(A::ITensor, B::ITensor) = hadamard_product(A, B) - -function directsum_projectors!(D1::Tensor, D2::Tensor) - d1 = size(D1, 1) - for ii in 1:d1 - D1[ii, ii] = one(eltype(D1)) - end - d2 = size(D2, 1) - for jj in 1:d2 - D2[jj, d1 + jj] = one(eltype(D1)) - end - return D1, D2 -end - -# Helper tensors for performing a partial direct sum -function directsum_projectors( - elt1::Type{<:Number}, elt2::Type{<:Number}, i::Index, j::Index, ij::Index -) - # Ideally we would just use the following but it gives - # an error that `setindex!` isn't defined: - # D1 = ITensor(elt1, dag(i), ij) - # D2 = ITensor(elt1, dag(j), ij) - # Or with new notation: - # D1 = zeros(elt1, dag(i), ij) - # D2 = zeros(elt1, dag(j), ij) - elt = promote_type(elt1, elt2) - D1 = zeros_itensor(elt, dag(i), ij) - D2 = zeros_itensor(elt, dag(j), ij) - directsum_projectors!(tensor(D1), tensor(D2)) - return D1, D2 -end - -function directsum_projectors( - ::Type{<:EmptyNumber}, ::Type{<:EmptyNumber}, ::Index, ::Index, ::Index -) - return error( - "It is not possible to call directsum on two tensors with element type EmptyNumber. -If you are inputting ITensors constructed like `ITensor(i, j)`, try specifying the element type, -e.g. `ITensor(Float64, i, j)`, or fill them with zero values, e.g. `ITensor(zero(Float64), i, j)`.", - ) -end - -function check_directsum_inds(A::ITensor, I, B::ITensor, J) - a = uniqueinds(A, I) - b = uniqueinds(B, J) - if !hassameinds(a, b) - error("""In directsum, attemptying to direct sum ITensors A and B with indices: - - $(inds(A)) - - and - - $(inds(B)) - - over the indices - - $(I) - - and - - $(J) - - The indices not being direct summed must match, however they are - - $a - - and - - $b - """) - end -end - -function _directsum( - IJ::Nothing, A::ITensor, I, B::ITensor, J; tags=default_directsum_tags(A => I) -) - N = length(I) - (N != length(J)) && - error("In directsum(::ITensor, ::ITensor, ...), must sum equal number of indices") - check_directsum_inds(A, I, B, J) - # Fix the Index direction for QN indices - # TODO: Define `getfirstind`? - I = map(In -> getfirst(==(In), inds(A)), I) - J = map(Jn -> getfirst(==(Jn), inds(B)), J) - IJ = Vector{Base.promote_eltype(I, J)}(undef, N) - for n in 1:N - IJ[n] = directsum(I[n], J[n]; tags=tags[n]) - end - return _directsum(IJ, A, I, B, J) -end - -function _directsum(IJ, A::ITensor, I, B::ITensor, J; tags=nothing) - N = length(I) - (N != length(J)) && - error("In directsum(::ITensor, ::ITensor, ...), must sum equal number of indices") - check_directsum_inds(A, I, B, J) - # Fix the Index direction for QN indices - # TODO: Define `getfirstind`? - I = map(In -> getfirst(==(In), inds(A)), I) - J = map(Jn -> getfirst(==(Jn), inds(B)), J) - for n in 1:N - # TODO: Pass the entire `datatype` instead of just the `eltype`. - D1, D2 = directsum_projectors(eltype(A), eltype(B), I[n], J[n], IJ[n]) - A *= adapt(datatype(A), D1) - B *= adapt(datatype(B), D2) - end - C = A + B - return C => IJ -end - -to_inds(i::Index) = (i,) -to_inds(i::Indices) = i -to_inds(::Nothing) = nothing - -function __directsum( - ij, A::ITensor, i::Index, B::ITensor, j::Index; tags=default_directsum_tags(A => i) -) - C, (ij,) = _directsum(to_inds(ij), A, to_inds(i), B, to_inds(j); tags=[tags]) - return C => ij -end - -function _directsum(ij::Nothing, A::ITensor, i::Index, B::ITensor, j::Index; kwargs...) - return __directsum(ij, A, i, B, j; kwargs...) -end - -function _directsum(ij::Index, A::ITensor, i::Index, B::ITensor, j::Index; kwargs...) - return __directsum(ij, A, i, B, j; kwargs...) -end - -function default_directsum_tags(A_and_I::Pair{ITensor}) - return ["sum$i" for i in 1:length(last(A_and_I))] -end - -function default_directsum_tags(A_and_I::Pair{ITensor,<:Index}) - return "sum" -end - -""" - directsum(A::Pair{ITensor}, B::Pair{ITensor}, ...; tags) - - directsum(output_inds, A::Pair{ITensor}, B::Pair{ITensor}, ...; tags) - -Given a list of pairs of ITensors and indices, perform a partial -direct sum of the tensors over the specified indices. Indices that are -not specified to be summed must match between the tensors. - -(Note: `Pair{ITensor}` in Julia is short for `Pair{ITensor,<:Any}` which means any pair `T => x` where `T` is an ITensor.) - -If all indices are specified then the operation is equivalent to creating -a block diagonal tensor. - -Returns the ITensor representing the partial direct sum as well as the new -direct summed indices. The tags of the direct summed indices are specified -by the keyword arguments. - -Optionally, pass the new direct summed indices of the output tensor as the -first argument (either a single Index or a collection), which must be proper -direct sums of the input indices that are specified to be direct summed. - -See Section 2.3 of https://arxiv.org/abs/1405.7786 for a definition of a partial -direct sum of tensors. - -# Examples -```julia -x = Index(2, "x") -i1 = Index(3, "i1") -j1 = Index(4, "j1") -i2 = Index(5, "i2") -j2 = Index(6, "j2") - -A1 = random_itensor(x, i1) -A2 = random_itensor(x, i2) -S, s = directsum(A1 => i1, A2 => i2) -dim(s) == dim(i1) + dim(i2) - -i1i2 = directsum(i1, i2) -S = directsum(i1i2, A1 => i1, A2 => i2) -hasind(S, i1i2) - -A3 = random_itensor(x, j1) -S, s = directsum(A1 => i1, A2 => i2, A3 => j1) -dim(s) == dim(i1) + dim(i2) + dim(j1) - -A1 = random_itensor(i1, x, j1) -A2 = random_itensor(x, j2, i2) -S, s = directsum(A1 => (i1, j1), A2 => (i2, j2); tags = ["sum_i", "sum_j"]) -length(s) == 2 -dim(s[1]) == dim(i1) + dim(i2) -dim(s[2]) == dim(j1) + dim(j2) -``` -""" -function directsum( - A_and_I::Pair{ITensor}, - B_and_J::Pair{ITensor}, - C_and_K::Pair{ITensor}, - itensor_and_inds...; - tags=default_directsum_tags(A_and_I), -) - return directsum(nothing, A_and_I, B_and_J, C_and_K, itensor_and_inds...; tags) -end - -function directsum( - output_inds::Nothing, - A_and_I::Pair{ITensor}, - B_and_J::Pair{ITensor}, - C_and_K::Pair{ITensor}, - itensor_and_inds...; - tags=default_directsum_tags(A_and_I), -) - return directsum( - output_inds, - directsum(nothing, A_and_I, B_and_J; tags), - C_and_K, - itensor_and_inds...; - tags, - ) -end - -function directsum( - output_inds::Union{Index,Indices}, - A_and_I::Pair{ITensor}, - B_and_J::Pair{ITensor}, - C_and_K::Pair{ITensor}, - itensor_and_inds...; - tags=default_directsum_tags(A_and_I), -) - return directsum( - output_inds, - directsum(nothing, A_and_I, B_and_J; tags), - C_and_K, - itensor_and_inds...; - tags, - ) -end - -function directsum(A_and_I::Pair{ITensor}, B_and_J::Pair{ITensor}; kwargs...) - return directsum(nothing, A_and_I, B_and_J; kwargs...) -end - -function directsum( - output_inds::Nothing, A_and_I::Pair{ITensor}, B_and_J::Pair{ITensor}; kwargs... -) - return _directsum(output_inds, A_and_I..., B_and_J...; kwargs...) -end - -function directsum( - output_inds::Union{Index,Indices}, - A_and_I::Pair{ITensor}, - B_and_J::Pair{ITensor}; - kwargs..., -) - return first(_directsum(output_inds, A_and_I..., B_and_J...; kwargs...)) -end - -const ⊕ = directsum - -""" - apply(A::ITensor, B::ITensor) - (A::ITensor)(B::ITensor) - product(A::ITensor, B::ITensor) - -Get the product of ITensor `A` and ITensor `B`, which -roughly speaking is a matrix-matrix product, a -matrix-vector product, or a vector-matrix product, -depending on the index structure. - -There are three main modes: - -1. Matrix-matrix product. In this case, ITensors `A` -and `B` have shared indices that come in pairs of primed -and unprimed indices. Then, `A` and `B` are multiplied -together, treating them as matrices from the unprimed -to primed indices, resulting in an ITensor `C` that -has the same pairs of primed and unprimed indices. -For example: -``` -s1'-<-----<-s1 s1'-<-----<-s1 s1'-<-----<-s1 - |C| = product( |A| |B| ) -s2'-<-----<-s2 s2'-<-----<-s2 , s2'-<-----<-s2 -``` -Essentially, this is implemented as -`C = mapprime(A', B, 2 => 1)`. -If there are dangling indices that are not shared between -`A` and `B`, a "batched" matrix multiplication is -performed, i.e.: -``` - j j - | | -s1'-<-----<-s1 s1'-<-----<-s1 s1'-<-----<-s1 - |C| = product( |A| |B| ) -s2'-<-----<-s2 s2'-<-----<-s2 , s2'-<-----<-s2 -``` -In addition, if there are shared dangling indices, -they are summed over: -``` - j j - | | -s1'-<-----<-s1 s1'-<-----<-s1 s1'-<-----<-s1 - |C| = Σⱼ product( |A| |B| ) -s2'-<-----<-s2 s2'-<-----<-s2 , s2'-<-----<-s2 -``` -where the sum is not performed as an explicitly -for-loop, but as part of a single tensor contraction. - -2. Matrix-vector product. In this case, ITensor `A` -has pairs of primed and unprimed indices, and ITensor -`B` has unprimed indices that are shared with `A`. -Then, `A` and `B` are multiplied as a matrix-vector -product, and the result `C` has unprimed indices. -For example: -``` -s1-<---- s1'-<-----<-s1 s1-<---- - |C| = product( |A| |B| ) -s2-<---- s2'-<-----<-s2 , s2-<---- -``` -Again, like in the matrix-matrix product above, you can have -dangling indices to do "batched" matrix-vector products, or -sum over a batch of matrix-vector products. - -3. Vector-matrix product. In this case, ITensor `B` -has pairs of primed and unprimed indices, and ITensor -`A` has unprimed indices that are shared with `B`. -Then, `B` and `A` are multiplied as a matrix-vector -product, and the result `C` has unprimed indices. -For example: -``` ----<-s1 ----<-s1 s1'-<-----<-s1 -|C| = product( |A| |B| ) ----<-s2 ----<-s2 , s2'-<-----<-s2 -``` -Again, like in the matrix-matrix product above, you can have -dangling indices to do "batched" vector-matrix products, or -sum over a batch of vector-matrix products. - -4. Vector-vector product. In this case, ITensors `A` -and `B` share unprimed indices. -Then, `B` and `A` are multiplied as a vector-vector -product, and the result `C` is a scalar ITensor. -For example: -``` ---- ----<-s1 s1-<---- -|C| = product( |A| |B| ) ---- ----<-s2 , s2-<---- -``` -Again, like in the matrix-matrix product above, you can have -dangling indices to do "batched" vector-vector products, or -sum over a batch of vector-vector products. -""" -function product(A::ITensor, B::ITensor; apply_dag::Bool=false) - commonindsAB = commoninds(A, B; plev=0) - isempty(commonindsAB) && error("In product, must have common indices with prime level 0.") - common_paired_indsA = filterinds( - i -> hasind(commonindsAB, i) && hasind(A, setprime(i, 1)), A - ) - common_paired_indsB = filterinds( - i -> hasind(commonindsAB, i) && hasind(B, setprime(i, 1)), B - ) - - if !isempty(common_paired_indsA) - commoninds_pairs = unioninds(common_paired_indsA, common_paired_indsA') - elseif !isempty(common_paired_indsB) - commoninds_pairs = unioninds(common_paired_indsB, common_paired_indsB') - else - # vector-vector product - apply_dag && error("apply_dag not supported for vector-vector product") - return A * B - end - danglings_indsA = uniqueinds(A, commoninds_pairs) - danglings_indsB = uniqueinds(B, commoninds_pairs) - danglings_inds = unioninds(danglings_indsA, danglings_indsB) - if hassameinds(common_paired_indsA, common_paired_indsB) - # matrix-matrix product - A′ = prime(A; inds=!danglings_inds) - AB = mapprime(A′ * B, 2 => 1; inds=!danglings_inds) - if apply_dag - AB′ = prime(AB; inds=!danglings_inds) - Adag = swapprime(dag(A), 0 => 1; inds=!danglings_inds) - return mapprime(AB′ * Adag, 2 => 1; inds=!danglings_inds) - end - return AB - elseif isempty(common_paired_indsA) && !isempty(common_paired_indsB) - # vector-matrix product - apply_dag && error("apply_dag not supported for matrix-vector product") - A′ = prime(A; inds=!danglings_inds) - return A′ * B - elseif !isempty(common_paired_indsA) && isempty(common_paired_indsB) - # matrix-vector product - apply_dag && error("apply_dag not supported for vector-matrix product") - return replaceprime(A * B, 1 => 0; inds=!danglings_inds) - end -end - -""" - product(As::Vector{<:ITensor}, A::ITensor) - -Product the ITensors pairwise. -""" -function product(As::Vector{<:ITensor}, B::ITensor; kwargs...) - AB = B - for A in As - AB = product(A, AB; kwargs...) - end - return AB -end - -# Alias apply with product -const apply = product - -(A::ITensor)(B::ITensor) = apply(A, B) - -const Apply{Args} = Applied{typeof(apply),Args} diff --git a/src/usings.jl b/src/usings.jl deleted file mode 100644 index f01c587e17..0000000000 --- a/src/usings.jl +++ /dev/null @@ -1,28 +0,0 @@ -# TODO: Delete these and move them to the tops of files where -# they are be used, and change to explicit usings, i.e. -# `using NDTensors: tensor`. -# Try using `ExplicitImports.jl`: -# https://github.com/ericphanson/ExplicitImports.jl -# to automate the process. -using Adapt -using BitIntegers -using Compat -using DocStringExtensions -using Functors -using IsApprox -using LinearAlgebra -using NDTensors -using NDTensors.RankFactorization: Spectrum, eigs, entropy, truncerror -using Pkg -using Printf -using Random -using SerializedElementArrays -using StaticArrays -using TimerOutputs -using TupleTools -using Zeros - -# This makes `scalartype` available externally as -# `ITensors.scalartype`, this isn't be used within -# the `ITensors` module right now. -using NDTensors: scalartype diff --git a/src/utils.jl b/src/utils.jl deleted file mode 100644 index 3812a4f006..0000000000 --- a/src/utils.jl +++ /dev/null @@ -1,27 +0,0 @@ - -# Warn only once, using the message `msg`. -# `funcsym` is a symbol that determines if the warning has been -# called before (so there is only one warning per `funcsym`). -function warn_once(msg, funcsym; force=true, stacktrace=true) - if stacktrace - io = IOBuffer() - Base.show_backtrace(io, backtrace()) - backtrace_string = String(take!(io)) - backtrace_string *= "\n" - msg *= backtrace_string - end - Base.depwarn(msg, funcsym; force) - return nothing -end - -# Directory helper functions (useful for -# running examples) -src_dir() = dirname(pathof(@__MODULE__)) -pkg_dir() = joinpath(dirname(pathof(@__MODULE__)), "..") - -# Determine version and uuid of the package -function _parse_project_toml(field::String) - return Pkg.TOML.parsefile(joinpath(pkg_dir(), "Project.toml"))[field] -end -version() = VersionNumber(_parse_project_toml("version")) -uuid() = Base.UUID(_parse_project_toml("uuid")) diff --git a/src/val.jl b/src/val.jl deleted file mode 100644 index b9da546551..0000000000 --- a/src/val.jl +++ /dev/null @@ -1 +0,0 @@ -function val end diff --git a/test/Project.toml b/test/Project.toml index f05c7e73ae..e9c291ea3a 100644 --- a/test/Project.toml +++ b/test/Project.toml @@ -1,19 +1,11 @@ [deps] -BlockArrays = "8e7c35d0-a365-5155-bbbb-fb81a777f24e" -ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" -ChainRulesTestUtils = "cdddcdb0-9152-4a09-a978-84456f9df70a" -Combinatorics = "861a8166-3701-5b0c-9a16-15d98fcdc6aa" -Compat = "34da2185-b29b-5c13-b0c7-acf172513d20" -FiniteDifferences = "26cc04aa-876d-5657-8c51-4c34ba976000" -HDF5 = "f67ccb44-e63f-5c2f-98bd-6dc0ccc4ba2f" -ITensors = "9136182c-28ba-11e9-034c-db9fb085ebd5" -LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" -MappedArrays = "dbb5928d-eab1-5f90-85c2-b9b0edb7c900" -NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" -Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" -QuadGK = "1fd47b50-473d-5c70-9696-f719f8f3bcdc" -Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" +Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595" +SafeTestsets = "1bc83da4-3b8d-516f-aca4-4fe02f6d838f" Suppressor = "fd094767-a336-5f1f-9728-57cf17d0bbfb" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" -VectorInterface = "409d34a3-91d5-4945-b6ec-7529ddf182d8" -Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" + +[compat] +Aqua = "0.8.9" +SafeTestsets = "0.1" +Suppressor = "0.2" +Test = "1.10" diff --git a/test/base/Project.toml b/test/base/Project.toml deleted file mode 100644 index 40db993045..0000000000 --- a/test/base/Project.toml +++ /dev/null @@ -1,8 +0,0 @@ -[deps] -Combinatorics = "861a8166-3701-5b0c-9a16-15d98fcdc6aa" -HDF5 = "f67ccb44-e63f-5c2f-98bd-6dc0ccc4ba2f" -ITensors = "9136182c-28ba-11e9-034c-db9fb085ebd5" -NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" -QuadGK = "1fd47b50-473d-5c70-9696-f719f8f3bcdc" -Suppressor = "fd094767-a336-5f1f-9728-57cf17d0bbfb" -Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" diff --git a/test/base/runtests.jl b/test/base/runtests.jl deleted file mode 100644 index ff4b5e43d1..0000000000 --- a/test/base/runtests.jl +++ /dev/null @@ -1,16 +0,0 @@ -@eval module $(gensym()) -using ITensors: ITensors -using Test: @testset -ITensors.Strided.disable_threads() -ITensors.BLAS.set_num_threads(1) -ITensors.disable_threaded_blocksparse() -@testset "$(@__DIR__)" begin - filenames = filter(readdir(@__DIR__)) do f - startswith("test_")(f) && endswith(".jl")(f) - end - @testset "Test $(@__DIR__)/$filename" for filename in filenames - println("Running $(@__DIR__)/$filename") - @time include(filename) - end -end -end diff --git a/test/base/test_argsdict.jl b/test/base/test_argsdict.jl deleted file mode 100644 index 0fb251b785..0000000000 --- a/test/base/test_argsdict.jl +++ /dev/null @@ -1,115 +0,0 @@ -using ITensors -using Test - -@testset "Test argsdict function" begin - args_copy = copy(ARGS) - empty!(args_copy) - push!( - args_copy, - "x", - "n = 1", - "nf :: Float64 = 2", - "ni :: AutoType = 2", - "ns :: String = 2", - "nc :: ComplexF64 = 3", - "x = 2e-1, 2e-3, 0.1", - "2::AutoType", - "N = 1e-3", - "vc = ComplexF64[1 + 2im, 3]", - "3", - "--autotype", - "vf = [1.0, 3]", - "y = true", - "1+2im", - "s = \"use_qr\"", - "--stringtype", - "y", - ) - args = argsdict(args_copy) - empty!(args_copy) - - @test args["_arg1"] == "x" - @test args["nf"] == 2.0 - @test args["nc"] == 3.0 + 0.0im - @test args["ns"] == "2" - @test args["ni"] == 2 - @test args["y"] == true - @test args["N"] == 0.001 - @test args["x"] == (0.2, 0.002, 0.1) - @test args["_arg2"] == 2 - @test args["vc"] == Complex{Float64}[1.0 + 2.0im, 3.0 + 0.0im] - @test args["_arg3"] == "3" - @test args["vf"] == [1.0, 3.0] - @test args["n"] == 1 - @test args["_arg4"] == 1 + 2im - @test args["s"] == "use_qr" - @test args["_arg5"] == "y" - - push!( - args_copy, - "x", - "n -> 1", - "nf :: Float64 -> 2", - "ni :: AutoType -> 2", - "ns :: String -> 2", - "nc :: ComplexF64 -> 3", - "x -> 2e-1, 2e-3, 0.1", - "2", - "N -> 1e-3", - "vc -> ComplexF64[1 + 2im, 3]", - "3", - "--autotype", - "vf -> [1.0, 3]", - "y -> true", - "1+2im", - "s -> \"use_qr\"", - "--stringtype", - "y", - ) - args = argsdict( - args_copy; - first_arg=2, - delim="->", - as_symbols=true, - default_named_type=String, - default_positional_type=ITensors.AutoType, - prefix="test", - ) - empty!(args_copy) - - @test args[:nf] == 2.0 - @test args[:ni] == 2 - @test args[:ns] == "2" - @test args[:nc] == 3.0 + 0.0im - @test args[:y] == true - @test args[:N] == "1e-3" - @test args[:x] == "2e-1, 2e-3, 0.1" - @test args[:test1] == 2 - @test args[:vc] == "ComplexF64[1 + 2im, 3]" - @test args[:test2] == 3 - @test args[:vf] == [1.0, 3.0] - @test args[:n] == "1" - @test args[:test3] == 1 + 2im - @test args[:s] == "use_qr" - @test args[:test4] == "y" - - # - # Check for some syntax errors - # - - push!(args_copy, "x y=2") - @test_throws ErrorException argsdict(args_copy) - empty!(args_copy) - - push!(args_copy, "x=y=2") - @test_throws ErrorException argsdict(args_copy) - empty!(args_copy) - - push!(args_copy, "x::MyType = 2") - @test_throws UndefVarError argsdict(args_copy) - empty!(args_copy) - - push!(args_copy, "x = y") - @test_throws UndefVarError argsdict(args_copy) - empty!(args_copy) -end diff --git a/test/base/test_broadcast.jl b/test/base/test_broadcast.jl deleted file mode 100644 index 9eb11ec00a..0000000000 --- a/test/base/test_broadcast.jl +++ /dev/null @@ -1,308 +0,0 @@ -using ITensors, Test - -@testset "ITensor broadcast syntax" begin - i = Index(2, "i") - A = random_itensor(i, i') - B = random_itensor(i', i) - α = 2 - β = 3 - - @testset "Copy" begin - Bc = copy(B) - Bc .= A - @test Bc[1, 1] == A[1, 1] - @test Bc[2, 1] == A[1, 2] - @test Bc[1, 2] == A[2, 1] - @test Bc[2, 2] == A[2, 2] - end - - @testset "Fill" begin - Bc = copy(B) - Bc .= α - @test Bc[1, 1] == α - @test Bc[2, 1] == α - @test Bc[1, 2] == α - @test Bc[2, 2] == α - end - - @testset "Scaling" begin - Bc = copy(B) - Bc .*= α - @test Bc[1, 1] ≈ α * B[1, 1] - @test Bc[2, 1] ≈ α * B[2, 1] - @test Bc[1, 2] ≈ α * B[1, 2] - @test Bc[2, 2] ≈ α * B[2, 2] - end - - @testset "Dividing" begin - Bc = copy(B) - Bc ./= α - @test Bc[1, 1] ≈ B[1, 1] / α - @test Bc[2, 1] ≈ B[2, 1] / α - @test Bc[1, 2] ≈ B[1, 2] / α - @test Bc[2, 2] ≈ B[2, 2] / α - end - - @testset "Scalar multiplication (in-place)" begin - Bc = copy(B) - Bc .= α .* A - @test Bc[1, 1] ≈ α * A[1, 1] - @test Bc[2, 1] ≈ α * A[1, 2] - @test Bc[1, 2] ≈ α * A[2, 1] - @test Bc[2, 2] ≈ α * A[2, 2] - end - - @testset "Dividing (in-place)" begin - Bc = copy(B) - Bc .= A ./ α - @test Bc[1, 1] ≈ A[1, 1] / α - @test Bc[2, 1] ≈ A[1, 2] / α - @test Bc[1, 2] ≈ A[2, 1] / α - @test Bc[2, 2] ≈ A[2, 2] / α - - Bc = copy(B) - Bc .= α ./ A - @test Bc[1, 1] ≈ α / A[1, 1] - @test Bc[2, 1] ≈ α / A[1, 2] - @test Bc[1, 2] ≈ α / A[2, 1] - @test Bc[2, 2] ≈ α / A[2, 2] - - Bc = copy(B) - Bc .= Bc ./ A - @test Bc[1, 1] ≈ B[1, 1] / A[1, 1] - @test Bc[2, 1] ≈ B[2, 1] / A[1, 2] - @test Bc[1, 2] ≈ B[1, 2] / A[2, 1] - @test Bc[2, 2] ≈ B[2, 2] / A[2, 2] - - Bc = copy(B) - Bc .= A ./ Bc - @test Bc[1, 1] ≈ A[1, 1] / B[1, 1] - @test Bc[2, 1] ≈ A[1, 2] / B[2, 1] - @test Bc[1, 2] ≈ A[2, 1] / B[1, 2] - @test Bc[2, 2] ≈ A[2, 2] / B[2, 2] - end - - @testset "Add and divide (in-place)" begin - Bc = copy(B) - Bc .+= A ./ α - @test Bc[1, 1] ≈ B[1, 1] + A[1, 1] / α - @test Bc[2, 1] ≈ B[2, 1] + A[1, 2] / α - @test Bc[1, 2] ≈ B[1, 2] + A[2, 1] / α - @test Bc[2, 2] ≈ B[2, 2] + A[2, 2] / α - - Bc = copy(B) - Bc .+= α ./ A - @test Bc[1, 1] ≈ B[1, 1] + α / A[1, 1] - @test Bc[2, 1] ≈ B[2, 1] + α / A[1, 2] - @test Bc[1, 2] ≈ B[1, 2] + α / A[2, 1] - @test Bc[2, 2] ≈ B[2, 2] + α / A[2, 2] - end - - @testset "Subtract and divide (in-place)" begin - Bc = copy(B) - Bc .-= A ./ α - @test Bc[1, 1] ≈ B[1, 1] - A[1, 1] / α - @test Bc[2, 1] ≈ B[2, 1] - A[1, 2] / α - @test Bc[1, 2] ≈ B[1, 2] - A[2, 1] / α - @test Bc[2, 2] ≈ B[2, 2] - A[2, 2] / α - - Bc = copy(B) - Bc .-= α ./ A - @test Bc[1, 1] ≈ B[1, 1] - α / A[1, 1] - @test Bc[2, 1] ≈ B[2, 1] - α / A[1, 2] - @test Bc[1, 2] ≈ B[1, 2] - α / A[2, 1] - @test Bc[2, 2] ≈ B[2, 2] - α / A[2, 2] - end - - @testset "Scalar multiplication (out-of-place)" begin - Bc = α .* A - @test Bc[1, 1] ≈ α * A[1, 1] - @test Bc[2, 1] ≈ α * A[2, 1] - @test Bc[1, 2] ≈ α * A[1, 2] - @test Bc[2, 2] ≈ α * A[2, 2] - end - - @testset "Addition" begin - Bc = copy(B) - Bc .= A .+ Bc - @test Bc[1, 1] ≈ A[1, 1] + B[1, 1] - @test Bc[2, 1] ≈ A[1, 2] + B[2, 1] - @test Bc[1, 2] ≈ A[2, 1] + B[1, 2] - @test Bc[2, 2] ≈ A[2, 2] + B[2, 2] - end - - @testset "Addition (with α)" begin - Bc = copy(B) - Bc .+= A .* α - - @test Bc[1, 1] ≈ α * A[1, 1] + B[1, 1] - @test Bc[2, 1] ≈ α * A[1, 2] + B[2, 1] - @test Bc[1, 2] ≈ α * A[2, 1] + B[1, 2] - @test Bc[2, 2] ≈ α * A[2, 2] + B[2, 2] - end - - @testset "Addition (with α and β)" begin - Bc = copy(B) - Bc .= α .* A .+ β .* Bc - - @test Bc[1, 1] ≈ α * A[1, 1] + β * B[1, 1] - @test Bc[2, 1] ≈ α * A[1, 2] + β * B[2, 1] - @test Bc[1, 2] ≈ α * A[2, 1] + β * B[1, 2] - @test Bc[2, 2] ≈ α * A[2, 2] + β * B[2, 2] - end - - @testset "Addition errors" begin - C = random_itensor(i, i') - @test_throws ErrorException C .= A .+ B - @test_throws ErrorException C = A .+ B - @test_throws ErrorException C .= A .* B - end - - @testset "Contraction" begin - ii = Index(2; tags="ii") - jj = Index(2; tags="jj") - kk = Index(2; tags="kk") - - AA = random_itensor(ii, jj) - BB = random_itensor(kk, jj) - CC = random_itensor(kk, ii) - - R = copy(CC) - R .= AA .* BB - @test AA * BB ≈ R - - R = copy(CC) - R .= α .* AA .* BB - @test α * AA * BB ≈ R - - R = copy(CC) - R .= AA .* α .* BB - @test α * AA * BB ≈ R - - R = copy(CC) - R .= AA .* BB .* α - @test α * AA * BB ≈ R - - R = copy(CC) - R .+= α .* AA .* BB - @test α * AA * BB + CC ≈ R - - R = copy(CC) - R .= β .* R .+ AA .* BB .* α - @test α * AA * BB + β * CC ≈ R - end - - @testset "General functions" begin - absA = abs.(A) - - @test absA[1, 1] ≈ abs(A[1, 1]) - @test absA[2, 1] ≈ abs(A[2, 1]) - - Bc = copy(B) - Bc .= sqrt.(absA) - - @test Bc[1, 1] ≈ sqrt(absA[1, 1]) - @test Bc[2, 1] ≈ sqrt(absA[1, 2]) - - Bc2 = copy(B) - Bc2 .+= sqrt.(absA) - - @test Bc2[1, 1] ≈ B[1, 1] + sqrt(absA[1, 1]) - @test Bc2[2, 1] ≈ B[2, 1] + sqrt(absA[1, 2]) - end - - @testset "Some other operations" begin - i = Index(2) - A = random_itensor(i) - B = random_itensor(i) - - absA = abs.(A) - - @test absA[1] ≈ abs(A[1]) - @test absA[2] ≈ abs(A[2]) - - Bc = copy(B) - Bc .= sqrt.(absA) - - @test Bc[1] ≈ sqrt(absA[1]) - @test Bc[2] ≈ sqrt(absA[2]) - - Bc2 = copy(B) - Bc2 .+= sqrt.(absA) - - @test Bc2[1] ≈ B[1] + sqrt(absA[1]) - @test Bc2[2] ≈ B[2] + sqrt(absA[2]) - - Bc3 = copy(B) - Bc3 .= sqrt.(absA) .+ sin.(Bc3) - - @test Bc3[1] ≈ sin(B[1]) + sqrt(absA[1]) - @test Bc3[2] ≈ sin(B[2]) + sqrt(absA[2]) - - sqrtabsA = sqrt.(abs.(A)) - - @test sqrtabsA[1] ≈ sqrt(abs(A[1])) - @test sqrtabsA[2] ≈ sqrt(abs(A[2])) - - sqrtabsA = cos.(sin.(sqrt.(abs.(A)))) - - @test sqrtabsA[1] ≈ cos(sin(sqrt(abs(A[1])))) - @test sqrtabsA[2] ≈ cos(sin(sqrt(abs(A[2])))) - - # Not currently supported - #Ap = A .+ 3 - - #@test Ap[1] ≈ A[1] + 3 - #@test Ap[2] ≈ A[2] + 3 - - Apow1 = A .^ 2.0 - - @test Apow1[1] ≈ A[1]^2 - @test Apow1[2] ≈ A[2]^2 - - Apow2 = A .^ 3 - - @test Apow2[1] ≈ A[1]^3 - @test Apow2[2] ≈ A[2]^3 - - Ac = copy(A) - Ac .+= B .^ 2.0 - - @test Ac[1] ≈ A[1] + B[1]^2 - @test Ac[2] ≈ A[2] + B[2]^2 - - Ac = copy(A) - Ac .-= B .^ 2.0 - - @test Ac[1] ≈ A[1] - B[1]^2 - @test Ac[2] ≈ A[2] - B[2]^2 - - Ac = copy(A) - Ac .-= B .^ 3 - - @test Ac[1] ≈ A[1] - B[1]^3 - @test Ac[2] ≈ A[2] - B[2]^3 - end - - @testset "Hadamard product" begin - i = Index(2, "i") - A = random_itensor(i, i') - B = random_itensor(i', i) - - C = A ⊙ B - @test C[1, 1] ≈ A[1, 1] * B[1, 1] - @test C[1, 2] ≈ A[1, 2] * B[2, 1] - @test C[2, 1] ≈ A[2, 1] * B[1, 2] - @test C[2, 2] ≈ A[2, 2] * B[2, 2] - - Ac = copy(A) - Ac .= Ac .⊙ B - @test C[1, 1] ≈ A[1, 1] * B[1, 1] - @test C[1, 2] ≈ A[1, 2] * B[2, 1] - @test C[2, 1] ≈ A[2, 1] * B[1, 2] - @test C[2, 2] ≈ A[2, 2] * B[2, 2] - - D = random_itensor(i', Index(2)) - @test_throws ErrorException A ⊙ D - end -end diff --git a/test/base/test_combiner.jl b/test/base/test_combiner.jl deleted file mode 100644 index 9329b3183c..0000000000 --- a/test/base/test_combiner.jl +++ /dev/null @@ -1,263 +0,0 @@ -using ITensors, Test -using Combinatorics: permutations - -@testset "Combiner" begin - i = Index(2, "i") - j = Index(3, "j") - k = Index(4, "k") - l = Index(5, "l") - - A = random_itensor(i, j, k, l) - - @testset "Basic combiner properties" begin - C = combiner(i, j, k) - @test eltype(storage(C)) === Number - @test ITensors.data(C) isa NDTensors.NoData - @test NDTensors.uncombinedinds(NDTensors.tensor(C)) == (i, j, k) - C2 = copy(C) - @test eltype(storage(C2)) === Number - @test ITensors.data(C2) isa NDTensors.NoData - @test NDTensors.uncombinedinds(NDTensors.tensor(C2)) == (i, j, k) - end - - @testset "Empty combiner" begin - C = combiner() - @test order(C) == 0 - @test isnothing(combinedind(C)) - AC = A * C - @test A == AC - AC = C * A - @test A == AC - - R = ITensor(0.0, j, l, k, i) - R .= A .* C - @test R == A - - R = ITensor(j, l, k, i) - R .= A .* C - @test R == A - end - - @testset "Two index combiner" begin - for inds_ij in permutations([i, j]) - C = combiner(inds_ij...) - c = combinedind(C) - B = A * C - @test hasinds(B, l, k, c) - @test c == commonind(B, C) - @test combinedind(C) == c - @test isnothing(combinedind(A)) - D = B * C - @test hasinds(D, i, j, k, l) - @test D ≈ A - @test isnothing(combinedind(D)) - end - - for inds_il in permutations([i, l]) - C = combiner(inds_il...) - c = combinedind(C) - B = A * C - @test hasinds(B, j, k) - @test c == commonind(B, C) - D = B * C - @test hasinds(D, i, j, k, l) - @test D ≈ A - end - - for inds_ik in permutations([i, k]) - C = combiner(inds_ik...) - c = combinedind(C) - B = A * C - @test hasinds(B, j, l) - @test c == commonind(B, C) - D = B * C - @test hasinds(D, i, j, k, l) - @test D ≈ A - end - - for inds_jk in permutations([j, k]) - C = combiner(inds_jk...) - c = combinedind(C) - B = A * C - @test hasinds(B, i, l) - @test c == commonind(B, C) - D = B * C - @test hasinds(D, i, j, k, l) - @test D ≈ A - D = C * B - @test hasinds(D, i, j, k, l) - @test D ≈ A - B = C * A - @test hasinds(B, i, l) - @test c == commonind(B, C) - D = B * C - @test hasinds(D, i, j, k, l) - @test D ≈ A - D = C * B - @test hasinds(D, i, j, k, l) - @test D ≈ A - end - - for inds_jl in permutations([j, l]) - C = combiner(inds_jl...) - c = combinedind(C) - B = A * C - @test hasinds(B, i, k) - @test c == commonind(B, C) - D = B * C - @test hasinds(D, i, j, k, l) - @test D ≈ A - D = C * B - @test hasinds(D, i, j, k, l) - @test D ≈ A - B = C * A - @test hasinds(B, i, k) - @test c == commonind(B, C) - D = B * C - @test hasinds(D, i, j, k, l) - @test D ≈ A - D = C * B - @test hasinds(D, i, j, k, l) - @test D ≈ A - end - - for inds_kl in permutations([k, l]) - C = combiner(inds_kl...) - c = combinedind(C) - B = A * C - @test hasinds(B, i, j) - @test c == commonind(B, C) - D = B * C - @test hasinds(D, i, j, k, l) - @test D ≈ A - D = C * B - @test hasinds(D, i, j, k, l) - @test D ≈ A - B = C * A - @test hasinds(B, i, j) - @test c == commonind(B, C) - D = B * C - @test hasinds(D, i, j, k, l) - @test D ≈ A - D = C * B - @test hasinds(D, i, j, k, l) - @test D ≈ A - end - end - - @testset "Three index combiner" begin - for inds_ijl in permutations([i, j, l]) - C = combiner(inds_ijl...) - c = combinedind(C) - B = A * C - @test hasind(B, k) - @test c == commonind(B, C) - D = B * C - @test hasinds(D, i, j, k, l) - @test D ≈ A - D = C * B - @test hasinds(D, i, j, k, l) - @test D ≈ A - B = C * A - @test hasind(B, k) - @test c == commonind(B, C) - D = B * C - @test hasinds(D, i, j, k, l) - @test D ≈ A - D = C * B - @test hasinds(D, i, j, k, l) - @test D ≈ A - end - - for inds_ijk in permutations([i, j, k]) - C = combiner(inds_ijk...) - c = combinedind(C) - B = A * C - @test hasind(B, l) - @test c == commonind(B, C) - D = B * C - @test hasinds(D, i, j, k, l) - @test D ≈ A - D = C * B - @test hasinds(D, i, j, k, l) - @test D ≈ A - B = C * A - @test hasind(B, l) - @test c == commonind(B, C) - D = B * C - @test hasinds(D, i, j, k, l) - @test D ≈ A - D = C * B - @test hasinds(D, i, j, k, l) - @test D ≈ A - end - - for inds_jkl in permutations([j, k, l]) - C = combiner(inds_jkl...) - c = combinedind(C) - B = A * C - @test hasind(B, i) - @test c == commonind(B, C) - D = B * C - @test hasinds(D, i, j, k, l) - @test D ≈ A - D = C * B - @test hasinds(D, i, j, k, l) - @test D ≈ A - B = C * A - @test hasind(B, i) - @test c == commonind(B, C) - D = B * C - @test hasinds(D, i, j, k, l) - @test D ≈ A - D = C * B - @test hasinds(D, i, j, k, l) - @test D ≈ A - end - end - - @testset "SVD/Combiner should play nice" begin - C = combiner(i, j, k) - c = combinedind(C) - Ac = A * C - U, S, V, spec, u, v = svd(Ac, c) - Uc = C * U - Ua, Sa, Va, spec, ua, va = svd(A, i, j, k) - replaceind!(Ua, ua, u) - @test A ≈ C * Ac - @test A ≈ Ac * C - @test Ua * C ≈ U - @test C * Ua ≈ U - @test Ua ≈ Uc - @test Uc * S * V ≈ A - @test (C * Ua) * S * V ≈ Ac - C = combiner(i, j) - c = combinedind(C) - Ac = A * C - U, S, V, spec, u, v = svd(Ac, c) - Uc = U * C - Ua, Sa, Va, spec, ua, va = svd(A, i, j) - replaceind!(Ua, ua, u) - @test Ua ≈ Uc - @test Ua * C ≈ U - @test C * Ua ≈ U - @test Uc * S * V ≈ A - @test (C * Ua) * S * V ≈ Ac - end - - @testset "mult/Combiner should play nice" begin - C = combiner(i, j, k) - Ac = A * C - B = random_itensor(l) - AB = Ac * B - @test AB * C ≈ A * B - end - - @testset "Replace index combiner" begin - C = combiner(l; tags="nl") - c = combinedind(C) - B = A * C - replaceind!(B, c, l) - @test B == A - end -end diff --git a/test/base/test_contract.jl b/test/base/test_contract.jl deleted file mode 100644 index 47d3f64403..0000000000 --- a/test/base/test_contract.jl +++ /dev/null @@ -1,314 +0,0 @@ -using ITensors -using Test -using Combinatorics: Combinatorics - -digits(::Type{T}, i, j, k) where {T} = T(i * 10^2 + j * 10 + k) - -@testset "ITensor $T Contractions" for T in (Float64, ComplexF64) - mi, mj, mk, ml, mα = 2, 3, 4, 5, 6, 7 - i = Index(mi, "i") - j = Index(mj, "j") - k = Index(mk, "k") - l = Index(ml, "l") - α = Index(mα, "alpha") - @testset "Test contract ITensors" begin - A = random_itensor(T) - B = random_itensor(T) - Ai = random_itensor(T, i) - Bi = random_itensor(T, i) - Aj = random_itensor(T, j) - Aij = random_itensor(T, i, j) - Bij = random_itensor(T, i, j) - Aik = random_itensor(T, i, k) - Ajk = random_itensor(T, j, k) - Ajl = random_itensor(T, j, l) - Akl = random_itensor(T, k, l) - Aijk = random_itensor(T, i, j, k) - Ajkl = random_itensor(T, j, k, l) - Aikl = random_itensor(T, i, k, l) - Aklα = random_itensor(T, k, l, α) - Aijkl = random_itensor(T, i, j, k, l) - @testset "Test contract ITensor (Scalar*Scalar -> Scalar)" begin - C = A * B - @test scalar(C) ≈ scalar(A) * scalar(B) - end - @testset "Test contract ITensor (Scalar*Vector -> Vector)" begin - C = A * Ai - @test array(C) ≈ scalar(A) * array(Ai) - end - @testset "Test contract ITensor (Vector*Scalar -> Vector)" begin - C = Aj * A - @test array(C) ≈ scalar(A) * array(Aj) - end - @testset "Test contract ITensors (Vectorᵀ*Vector -> Scalar)" begin - C = Ai * Bi - CArray = transpose(array(Ai)) * array(Bi) - @test CArray ≈ scalar(C) - end - @testset "Test Matrix{ITensor} * Matrix{ITensor}" begin - M1 = [Aij Aij; Aij Aij] - M2 = [Ajk Ajk; Ajk Ajk] - M12 = M1 * M2 - for x in 1:2, y in 1:2 - @test M12[x, y] ≈ 2 * Aij * Ajk - end - end - @testset "Test contract ITensors (Vector*Vectorᵀ -> Matrix)" begin - C = Ai * Aj - for ii in 1:dim(i), jj in 1:dim(j) - @test C[i => ii, j => jj] ≈ Ai[i => ii] * Aj[j => jj] - end - end - @testset "Test contract ITensors (Matrix*Scalar -> Matrix)" begin - Aij = permute(Aij, i, j) - C = Aij * A - @test array(permute(C, i, j)) ≈ scalar(A) * array(Aij) - end - @testset "Test contract ITensors (Matrix*Vector -> Vector)" begin - Aij = permute(Aij, i, j) - C = Aij * Aj - CArray = array(permute(Aij, i, j)) * array(Aj) - @test CArray ≈ array(C) - end - @testset "Test contract ITensors (Matrixᵀ*Vector -> Vector)" begin - Aij = permute(Aij, j, i) - C = Aij * Aj - CArray = transpose(array(Aij)) * array(Aj) - @test CArray ≈ array(C) - end - @testset "Test contract ITensors (Vector*Matrix -> Vector)" begin - Aij = permute(Aij, i, j) - C = Ai * Aij - CArray = transpose(transpose(array(Ai)) * array(Aij)) - @test CArray ≈ array(C) - end - @testset "Test contract ITensors (Vector*Matrixᵀ -> Vector)" begin - Aij = permute(Aij, j, i) - C = Ai * Aij - CArray = transpose(transpose(array(Ai)) * transpose(array(Aij))) - @test CArray ≈ array(C) - end - @testset "Test contract ITensors (Matrix*Matrix -> Scalar)" begin - Aij = permute(Aij, i, j) - Bij = permute(Bij, i, j) - C = Aij * Bij - CArray = LinearAlgebra.tr(array(Aij) * transpose(array(Bij))) - @test CArray ≈ scalar(C) - end - @testset "Test contract ITensors (Matrix*Matrix -> Matrix)" begin - Aij = permute(Aij, i, j) - Ajk = permute(Ajk, j, k) - C = Aij * Ajk - CArray = array(Aij) * array(Ajk) - @test CArray ≈ array(C) - end - @testset "Test contract ITensors (Matrixᵀ*Matrix -> Matrix)" begin - Aij = permute(Aij, j, i) - Ajk = permute(Ajk, j, k) - C = Aij * Ajk - CArray = transpose(array(Aij)) * array(Ajk) - @test CArray ≈ array(C) - end - @testset "Test contract ITensors (Matrix*Matrixᵀ -> Matrix)" begin - Aij = permute(Aij, i, j) - Ajk = permute(Ajk, k, j) - C = Aij * Ajk - CArray = array(Aij) * transpose(array(Ajk)) - @test CArray ≈ array(C) - end - @testset "Test contract ITensors (Matrixᵀ*Matrixᵀ -> Matrix)" begin - Aij = permute(Aij, j, i) - Ajk = permute(Ajk, k, j) - C = Aij * Ajk - CArray = transpose(array(Aij)) * transpose(array(Ajk)) - @test CArray ≈ array(C) - end - @testset "Test contract ITensors (Matrix⊗Matrix -> 4-tensor)" begin - C = Aij * Akl - for ii in 1:dim(i), jj in 1:dim(j), kk in 1:dim(k), ll in 1:dim(l) - @test C[i => ii, j => jj, k => kk, l => ll] ≈ - Aij[i => ii, j => jj] * Akl[k => kk, l => ll] - end - end - @testset "Test contract ITensors (3-Tensor*Scalar -> 3-Tensor)" begin - Aijk = permute(Aijk, i, j, k) - C = Aijk * A - @test array(permute(C, i, j, k)) ≈ scalar(A) * array(Aijk) rtol = 1e-12 - end - @testset "Test contract ITensors (3-Tensor*Vector -> Matrix)" begin - Aijk = permute(Aijk, i, j, k) - C = Aijk * Ai - CArray = reshape( - reshape(array(permute(Aijk, j, k, i)), dim(j) * dim(k), dim(i)) * array(Ai), - dim(j), - dim(k), - ) - @test CArray ≈ array(permute(C, j, k)) - end - @testset "Test contract ITensors (Vector*3-Tensor -> Matrix)" begin - Aijk = permute(Aijk, i, j, k) - C = Aj * Aijk - CArray = reshape( - transpose(array(Aj)) * - reshape(array(permute(Aijk, j, i, k)), dim(j), dim(i) * dim(k)), - dim(i), - dim(k), - ) - @test CArray ≈ array(permute(C, i, k)) - end - @testset "Test contract ITensors (3-Tensor*Matrix -> Vector)" begin - Aijk = permute(Aijk, i, j, k) - Aik = permute(Aik, i, k) - C = Aijk * Aik - CArray = - reshape(array(permute(Aijk, j, i, k)), dim(j), dim(i) * dim(k)) * vec(array(Aik)) - @test CArray ≈ array(C) - end - @testset "Test contract ITensors (3-Tensor*Matrix -> 3-Tensor)" begin - Aijk = permute(Aijk, i, j, k) - Ajl = permute(Ajl, j, l) - C = Aijk * Ajl - CArray = reshape( - reshape(array(permute(Aijk, i, k, j)), dim(i) * dim(k), dim(j)) * array(Ajl), - dim(i), - dim(k), - dim(l), - ) - @test CArray ≈ array(permute(C, i, k, l)) - end - @testset "Test contract ITensors (Matrix*3-Tensor -> 3-Tensor)" begin - Aijk = permute(Aijk, i, j, k) - Akl = permute(Akl, k, l) - C = Akl * Aijk - CArray = reshape( - array(permute(Akl, l, k)) * - reshape(array(permute(Aijk, k, i, j)), dim(k), dim(i) * dim(j)), - dim(l), - dim(i), - dim(j), - ) - @test CArray ≈ array(permute(C, l, i, j)) - end - @testset "Test contract ITensors (3-Tensor*3-Tensor -> 3-Tensor)" begin - Aijk = permute(Aijk, i, j, k) - Ajkl = permute(Ajkl, j, k, l) - C = Aijk * Ajkl - CArray = - reshape(array(permute(Aijk, i, j, k)), dim(i), dim(j) * dim(k)) * - reshape(array(permute(Ajkl, j, k, l)), dim(j) * dim(k), dim(l)) - @test CArray ≈ array(permute(C, i, l)) - end - @testset "Test contract ITensors (3-Tensor*3-Tensor -> 3-Tensor)" begin - for inds_ijk in Combinatorics.permutations([i, j, k]), - inds_jkl in Combinatorics.permutations([j, k, l]) - - Aijk = permute(Aijk, inds_ijk...) - Ajkl = permute(Ajkl, inds_jkl...) - C = Ajkl * Aijk - CArray = - reshape(array(permute(Ajkl, l, j, k)), dim(l), dim(j) * dim(k)) * - reshape(array(permute(Aijk, j, k, i)), dim(j) * dim(k), dim(i)) - @test CArray ≈ array(permute(C, l, i)) - end - end - @testset "Test contract ITensors (4-Tensor*3-Tensor -> 1-Tensor)" begin - for inds_ijkl in Combinatorics.permutations([i, j, k, l]), - inds_jkl in Combinatorics.permutations([j, k, l]) - - Aijkl = permute(Aijkl, inds_ijkl...) - Ajkl = permute(Ajkl, inds_jkl...) - C = Ajkl * Aijkl - CArray = - reshape(array(permute(Ajkl, j, k, l)), 1, dim(j) * dim(k) * dim(l)) * - reshape(array(permute(Aijkl, j, k, l, i)), dim(j) * dim(k) * dim(l), dim(i)) - @test vec(CArray) ≈ array(permute(C, i)) - end - end - @testset "Test contract ITensors (4-Tensor*3-Tensor -> 3-Tensor)" begin - for inds_ijkl in Combinatorics.permutations([i, j, k, l]), - inds_klα in Combinatorics.permutations([k, l, α]) - - Aijkl = permute(Aijkl, inds_ijkl...) - Aklα = permute(Aklα, inds_klα...) - C = Aklα * Aijkl - CArray = reshape( - reshape(array(permute(Aklα, α, k, l)), dim(α), dim(k) * dim(l)) * - reshape(array(permute(Aijkl, k, l, i, j)), dim(k) * dim(l), dim(i) * dim(j)), - dim(α), - dim(i), - dim(j), - ) - @test CArray ≈ array(permute(C, α, i, j)) - end - end - @testset "Test contract in-place ITensors (4-Tensor*Matrix -> 4-Tensor)" begin - A = random_itensor(T, (j, i)) - B = random_itensor(T, (j, k, l, α)) - C = ITensor(zero(T), (i, k, α, l)) - ITensors.contract!(C, B, A, 1.0, 0.0) - ITensors.contract!(C, B, A, 1.0, 1.0) - D = A * B - D .+= A * B - @test C ≈ D - end - end # End contraction testset -end - -@testset "Contraction conversions" begin - @testset "Real scalar * Complex ITensor" begin - i = Index(2, "i") - j = Index(2, "j") - x = rand(Float64) - A = random_itensor(ComplexF64, i, j) - B = x * A - for ii in dim(i), jj in dim(j) - @test B[i => ii, j => jj] == x * A[i => ii, j => jj] - end - end - @testset "Complex scalar * Real ITensor" begin - i = Index(2, "i") - j = Index(2, "j") - x = rand(ComplexF64) - A = random_itensor(Float64, i, j) - B = x * A - for ii in dim(i), jj in dim(j) - @test B[i => ii, j => jj] == x * A[i => ii, j => jj] - end - end - @testset "Real ITensor * Complex ITensor" begin - i = Index(2, "i") - j = Index(2, "j") - k = Index(2, "k") - A = random_itensor(Float64, i, j) - B = random_itensor(ComplexF64, j, k) - C = A * B - @test array(permute(C, i, k)) ≈ array(A) * array(B) - end - @testset "Complex ITensor * Real ITensor" begin - i = Index(2, "i") - j = Index(2, "j") - k = Index(2, "k") - A = random_itensor(ComplexF64, i, j) - B = random_itensor(Float64, j, k) - C = A * B - @test array(permute(C, i, k)) ≈ array(A) * array(B) - end - - @testset "Outer Product Real ITensor * Complex ITensor" begin - i = Index(2, "i") - j = Index(2, "j") - A = random_itensor(Float64, i) - B = random_itensor(ComplexF64, j) - C = A * B - @test array(permute(C, i, j)) ≈ kron(array(A), transpose(array(B))) - end - - @testset "Outer Product: Complex ITensor * Real ITensor" begin - i = Index(2, "i") - j = Index(2, "j") - A = random_itensor(ComplexF64, i) - B = random_itensor(Float64, j) - C = A * B - @test array(permute(C, i, j)) ≈ kron(array(A), transpose(array(B))) - end -end diff --git a/test/base/test_ctmrg.jl b/test/base/test_ctmrg.jl deleted file mode 100644 index 8d7819a329..0000000000 --- a/test/base/test_ctmrg.jl +++ /dev/null @@ -1,52 +0,0 @@ -using ITensors -using Test - -src_dir = joinpath(pkgdir(ITensors), "examples", "src") -include(joinpath(src_dir, "ctmrg_isotropic.jl")) -include(joinpath(src_dir, "2d_classical_ising.jl")) - -@testset "ctmrg" begin - # Make Ising model partition function - β = 1.1 * βc - d = 2 - s = Index(d, "Site") - sₕ = addtags(s, "horiz") - sᵥ = addtags(s, "vert") - - T = ising_mpo(sₕ, sᵥ, β) - - χ0 = 1 - l = Index(χ0, "Link") - lₕ = addtags(l, "horiz") - lᵥ = addtags(l, "vert") - - # Initial CTM - Cₗᵤ = ITensor(lᵥ, lₕ) - Cₗᵤ[1, 1] = 1.0 - - # Initial HRTM - Aₗ = ITensor(lᵥ, lᵥ', sₕ) - Aₗ[lᵥ => 1, lᵥ' => 1, sₕ => 1] = 1.0 - Aₗ[lᵥ => 1, lᵥ' => 1, sₕ => 2] = 0.0 - - Cₗᵤ, Aₗ = ctmrg(T, Cₗᵤ, Aₗ; χmax=20, nsteps=100) - - lᵥ = commonind(Cₗᵤ, Aₗ) - lₕ = noncommoninds(Cₗᵤ, Aₗ)[1] - - Aᵤ = replaceinds(Aₗ, lᵥ => lₕ, lᵥ' => lₕ', sₕ => sᵥ) - - ACₗ = Aₗ * Cₗᵤ * dag(Cₗᵤ') - - ACTₗ = prime(ACₗ * dag(Aᵤ') * T * Aᵤ, -1) - - κ = (ACTₗ * dag(ACₗ))[] - - @test κ ≈ exp(-β * ising_free_energy(β)) - - # Calculate magnetization - Tsz = ising_mpo(sₕ, sᵥ, β; sz=true) - ACTszₗ = prime(ACₗ * dag(Aᵤ') * Tsz * Aᵤ, -1) - m = (ACTszₗ * dag(ACₗ))[] / κ - @test abs(m) ≈ ising_magnetization(β) -end diff --git a/test/base/test_debug_checks.jl b/test/base/test_debug_checks.jl deleted file mode 100644 index b0e6170cb7..0000000000 --- a/test/base/test_debug_checks.jl +++ /dev/null @@ -1,36 +0,0 @@ -using ITensors -using Test - -@testset "Test debug checks on IndexSet construction" begin - i = Index(2, "i") - - initially_using_debug_checks = ITensors.using_debug_checks() - - ITensors.disable_debug_checks() - @test !ITensors.using_debug_checks() - # Test that no error is thrown in constructor - @test ITensor(i, i) isa ITensor - @test ITensor(i, i') isa ITensor - - # Turn on debug checks - ITensors.enable_debug_checks() - @test ITensors.using_debug_checks() - @test_throws ErrorException ITensor(i, i) - # Test that no error is thrown in constructor - @test ITensor(i, i') isa ITensor - - # Turn off debug checks - ITensors.disable_debug_checks() - @test !ITensors.using_debug_checks() - # Test that no error is thrown in constructor - @test ITensor(i, i) isa ITensor - @test ITensor(i, i') isa ITensor - - # Reset to the initial value - if !initially_using_debug_checks - ITensors.disable_debug_checks() - else - ITensors.enable_debug_checks() - end - @test ITensors.using_debug_checks() == initially_using_debug_checks -end diff --git a/test/base/test_decomp.jl b/test/base/test_decomp.jl deleted file mode 100644 index 82850668db..0000000000 --- a/test/base/test_decomp.jl +++ /dev/null @@ -1,503 +0,0 @@ -using ITensors, LinearAlgebra, Test -using ITensors.SiteTypes: siteinds - -# -# Decide if rank 2 tensor is upper triangular, i.e. all zeros below the diagonal. -# -function is_upper(At::NDTensors.Tensor)::Bool - nr, nc = dims(At) - dc = Base.max(0, dim(nr) - dim(nc)) #column off set for rectangular matrices. - nzeros = 0 - for i in CartesianIndices(At) - if i[1] > i[2] + dc - if abs(At[i]) > 0.0 #row>col is lower triangle - return false - else - nzeros += 1 - end - end - end - # - # Debug code: Make some noise if At is not a vector and we still found no zeros. - # - # if nzeros==0 && nr>1 && nc>1 - # @show nr nc dc At - # end - return true -end - -# -# A must be rank 2 -# -function is_upper(l::Index, A::ITensor, r::Index)::Bool - @assert length(inds(A)) == 2 - if inds(A) != IndexSet(l, r) - A = permute(A, l, r) - end - return is_upper(NDTensors.tensor(A)) -end - -# -# With left index specified -# -function is_upper(l::Index, A::ITensor)::Bool - other = noncommoninds(A, l) - if (length(other) == 1) - return is_upper(l, A, other[1]) - else - # use combiner to gather all the "other" indices into one. - C = combiner(other...) - AC = A * C - return is_upper(l, AC, combinedind(C)) - end -end -is_lower(l::Index, A::ITensor)::Bool = is_upper(A, l) - -# -# With right index specified -# -function is_upper(A::ITensor, r::Index)::Bool - other = noncommoninds(A, r) - if (length(other) == 1) - return is_upper(other[1], A, r) - else - C = combiner(other...) - AC = A * C - return is_upper(combinedind(C), AC, r) - end -end -is_lower(A::ITensor, r::Index)::Bool = is_upper(r, A) - -function diag_upper(l::Index, A::ITensor) - At = NDTensors.tensor(A * combiner(noncommoninds(A, l)...)) - if size(At) == (1,) - return At - end - @assert length(size(At)) == 2 - return diag(At) -end - -function diag_lower(l::Index, A::ITensor) - At = NDTensors.tensor(A * combiner(noncommoninds(A, l)...)) #render down ot order 2 - if size(At) == (1,) - return At - end - @assert length(size(At)) == 2 - nr, nc = size(At) - dc = Base.max(0, nc - nr) #diag starts dc+1 columns out from the left - At1 = At[:, (dc + 1):nc] #chop out the first dc columns - return diag(At1) #now we can use the stock diag function. -end - -@testset "ITensor Decompositions" begin - @testset "truncate!" begin - a = [0.1, 0.01, 1e-13] - @test NDTensors.truncate!(a; use_absolute_cutoff=true, cutoff=1e-5) == - (1e-13, (0.01 + 1e-13) / 2) - @test length(a) == 2 - - # Negative definite spectrum treated by taking - # square (if singular values) or absolute values - a = [-0.12, -0.1] - @test NDTensors.truncate!(a) == (0.0, 0.0) - @test length(a) == 2 - - a = [-0.1, -0.01, -1e-13] - @test NDTensors.truncate!(a; use_absolute_cutoff=true, cutoff=1e-5) == - (1e-13, (0.01 + 1e-13) / 2) - @test length(a) == 2 - end - - @testset "factorize" begin - i = Index(2, "i") - j = Index(2, "j") - A = random_itensor(i, j) - @test_throws ErrorException factorize(A, i; ortho="fakedir") - end - - @testset "factorize with eigen_perturbation" begin - l = Index(4, "l") - s1 = Index(2, "s1") - s2 = Index(2, "s2") - r = Index(4, "r") - - phi = random_itensor(l, s1, s2, r) - - drho = random_itensor(l', s1', l, s1) - drho += swapprime(drho, 0, 1) - drho .*= 1E-5 - - U, B = factorize(phi, (l, s1); ortho="left", eigen_perturbation=drho) - @test norm(U * B - phi) < 1E-5 - - # Not allowed to use eigen_perturbation with which_decomp - # other than "automatic" or "eigen": - @test_throws ErrorException factorize( - phi, (l, s1); ortho="left", eigen_perturbation=drho, which_decomp="svd" - ) - end - - @testset "factorize with eigen_perturbation dimensions" begin - elt = Float64 - di = 10 - dj = 5 - maxdim = di - 1 - i = Index(di, "i") - j = Index(dj, "j") - a = random_itensor(elt, i, j) - δ = random_itensor(elt, i, j) - δ² = prime(δ, i) * dag(δ) - a² = prime(a, i) * dag(a) - x, y = factorize(a, i; ortho="left", which_decomp="eigen", maxdim) - l = commonind(x, y) - @test dim(l) == dj - xδ, yδ = factorize( - a, i; ortho="left", which_decomp="eigen", eigen_perturbation=δ², maxdim - ) - lδ = commonind(xδ, yδ) - @test dim(lδ) == maxdim - end - - @testset "QR/RQ/QL/LQ decomp on MPS dense $elt tensor with all possible collections on Q/R/L" for ninds in - [ - 0, 1, 2, 3 - ], - elt in (Float64, ComplexF64) - - l = Index(5, "l") - s = Index(2, "s") - r = Index(5, "r") - A = random_itensor(elt, l, s, r) - Ainds = inds(A) - Linds = Ainds[1:ninds] - Rinds = uniqueinds(A, Linds...) - Q, R, q = qr(A, Linds) #calling qr(A) triggers not supported error. - @test length(inds(Q)) == ninds + 1 #+1 to account for new qr,Link index. - @test length(inds(R)) == 3 - ninds + 1 - @test A ≈ Q * R atol = 1e-13 - @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - @test q == commonind(Q, R) - @test hastags(q, "Link,qr") - if (length(inds(R)) > 1) - @test is_upper(q, R) #specify the left index - end - Q1, R1, q1 = qr(A, Linds, Rinds; tags="Link,myqr") #make sure the same call with both L & R indices give the same answer. - Q1 = replaceind(Q1, q1, q) - R1 = replaceind(R1, q1, q) - @test norm(Q - Q1) == 0.0 - @test norm(R - R1) == 0.0 - @test hastags(q1, "Link,myqr") - - R, Q, q = rq(A, Linds) - @test length(inds(R)) == ninds + 1 #+1 to account for new rq,Link index. - @test length(inds(Q)) == 3 - ninds + 1 - @test A ≈ Q * R atol = 1e-13 #With ITensors R*Q==Q*R - @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - @test q == commonind(Q, R) - @test hastags(q, "rq") - if (length(inds(R)) > 1) - @test is_upper(R, q) #specify the right index - end - R1, Q1, q1 = rq(A, Linds, Rinds; tags="Link,myrq") #make sure the same call with both L & R indices give the same answer. - Q1 = replaceind(Q1, q1, q) - R1 = replaceind(R1, q1, q) - @test norm(Q - Q1) == 0.0 - @test norm(R - R1) == 0.0 - @test hastags(q1, "myrq") - @test hastags(q1, "Link") - - L, Q, q = lq(A, Linds) - @test length(inds(L)) == ninds + 1 #+1 to account for new lq,Link index. - @test length(inds(Q)) == 3 - ninds + 1 - @test A ≈ Q * L atol = 1e-13 #With ITensors L*Q==Q*L - @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - @test q == commonind(Q, L) - @test hastags(q, "lq") - if (length(inds(L)) > 1) - @test is_lower(L, q) #specify the right index - end - L1, Q1, q1 = lq(A, Linds, Rinds; tags="Link,mylq") #make sure the same call with both L & R indices give the same answer. - Q1 = replaceind(Q1, q1, q) - L1 = replaceind(L1, q1, q) - @test norm(Q - Q1) == 0.0 - @test norm(L - L1) == 0.0 - @test hastags(q1, "mylq") - @test hastags(q1, "Link") - - Q, L, q = ql(A, Linds) - @test length(inds(Q)) == ninds + 1 #+1 to account for new lq,Link index. - @test length(inds(L)) == 3 - ninds + 1 - @test A ≈ Q * L atol = 1e-13 - @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - @test q == commonind(Q, L) - @test hastags(q, "ql") - if (length(inds(L)) > 1) - @test is_lower(q, L) #specify the right index - end - Q1, L1, q1 = ql(A, Linds, Rinds; tags="Link,myql") #make sure the same call with both L & R indices give the same answer. - Q1 = replaceind(Q1, q1, q) - L1 = replaceind(L1, q1, q) - @test norm(Q - Q1) == 0.0 - @test norm(L - L1) == 0.0 - @test hastags(q1, "myql") - @test hastags(q1, "Link") - end - - @testset "QR/RQ dense on MP0 tensor with all possible collections on Q,R" for ninds in [ - 0, 1, 2, 3, 4 - ] - l = Index(5, "l") - s = Index(2, "s") - r = Index(10, "r") - A = random_itensor(l, s, s', r) - Ainds = inds(A) - Q, R, q = qr(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. - @test length(inds(Q)) == ninds + 1 #+1 to account for new qr,Link index. - @test length(inds(R)) == 4 - ninds + 1 - @test A ≈ Q * R atol = 1e-13 - @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - - R, Q, q = rq(A, Ainds[1:ninds]) - @test length(inds(R)) == ninds + 1 #+1 to account for new rq,Link index. - @test length(inds(Q)) == 4 - ninds + 1 - @test A ≈ Q * R atol = 1e-13 #With ITensors R*Q==Q*R - @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - end - - @testset "QR/RQ block sparse on MPS tensor with all possible collections on Q,R" for ninds in - [ - 0, 1, 2, 3 - ] - expected_Qflux = [QN(), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0)] - expected_RLflux = [QN("Sz", 0), QN("Sz", 0), QN("Sz", -0), QN()] - l = dag(Index(QN("Sz", 0) => 1, QN("Sz", 1) => 1, QN("Sz", -1) => 1; tags="l")) - s = Index(QN("Sz", -1) => 1, QN("Sz", 1) => 1; tags="s") - r = Index(QN("Sz", 0) => 1, QN("Sz", 1) => 1, QN("Sz", -1) => 1; tags="r") - A = random_itensor(l, s, r) - @test flux(A) == QN("Sz", 0) - Ainds = inds(A) - Q, R, q = qr(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. - @test length(inds(Q)) == ninds + 1 #+1 to account for new qr,Link index. - @test length(inds(R)) == 3 - ninds + 1 - @test flux(Q) == expected_Qflux[ninds + 1] - @test flux(R) == expected_RLflux[ninds + 1] - @test A ≈ Q * R atol = 1e-13 - # blocksparse - diag is not supported so we must convert Q*Q_dagger to dense. - # Also fails with error in permutedims so below we use norm(a-b)≈ 0.0 instead. - # @test dense(Q*dag(prime(Q, q))) ≈ δ(Float64, q, q') atol = 1e-13 - @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 - - Q, L, q = ql(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. - @test length(inds(L)) == 3 - ninds + 1 #+1 to account for new rq,Link index. - @test length(inds(Q)) == ninds + 1 - @test flux(Q) == expected_Qflux[ninds + 1] - @test flux(L) == expected_RLflux[ninds + 1] - @test A ≈ Q * L atol = 1e-13 - @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 - - R, Q, q = rq(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. - @test length(inds(R)) == ninds + 1 #+1 to account for new rq,Link index. - @test length(inds(Q)) == 3 - ninds + 1 - @test flux(Q) == expected_Qflux[ninds + 1] - @test flux(R) == expected_RLflux[ninds + 1] - @test A ≈ Q * R atol = 1e-13 - @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 - - L, Q, q = lq(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. - @test length(inds(L)) == ninds + 1 #+1 to account for new rq,Link index. - @test length(inds(Q)) == 3 - ninds + 1 - @test flux(Q) == expected_Qflux[ninds + 1] - @test flux(L) == expected_RLflux[ninds + 1] - @test A ≈ Q * L atol = 1e-13 - @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 - end - - @testset "QR/QL block sparse on MPO tensor with all possible collections on Q,R" for ninds in - [ - 0, 1, 2, 3, 4 - ] - expected_Qflux = [QN(), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0)] - expected_RLflux = [QN("Sz", 0), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0), QN()] - l = dag(Index(QN("Sz", 0) => 3; tags="l")) - s = Index(QN("Sz", -1) => 1, QN("Sz", 1) => 1; tags="s") - r = Index(QN("Sz", 0) => 3; tags="r") - A = random_itensor(l, s, dag(s'), r) - @test flux(A) == QN("Sz", 0) - Ainds = inds(A) - Q, R, q = qr(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. - @test length(inds(Q)) == ninds + 1 #+1 to account for new qr,Link index. - @test length(inds(R)) == 4 - ninds + 1 - @test flux(Q) == expected_Qflux[ninds + 1] - @test flux(R) == expected_RLflux[ninds + 1] - @test A ≈ Q * R atol = 1e-13 - # blocksparse - diag is not supported so we must convert Q*Q_dagger to dense. - # Also fails with error in permutedims so below we use norm(a-b)≈ 0.0 instead. - # @test dense(Q*dag(prime(Q, q))) ≈ δ(Float64, q, q') atol = 1e-13 - @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 - - Q, L, q = ql(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error. - @test length(inds(Q)) == ninds + 1 #+1 to account for new qr,Link index. - @test length(inds(L)) == 4 - ninds + 1 - @test flux(Q) == expected_Qflux[ninds + 1] - @test flux(L) == expected_RLflux[ninds + 1] - @test A ≈ Q * L atol = 1e-13 - @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13 - end - - @testset "QR/QL/RQ/LQ dense with positive R" for ninds in [0, 1, 2, 3] - l = Index(3, "l") - s = Index(5, "s") - r = Index(7, "r") - A = random_itensor(l, s, s', r) - Ainds = inds(A) - - Q, R, q = qr(A, Ainds[1:ninds]; positive=true) - @test min(diag_upper(q, R)...) > 0.0 - @test A ≈ Q * R atol = 1e-13 - @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - Q, L, q = ql(A, Ainds[1:ninds]; positive=true) - @test min(diag_lower(q, L)...) > 0.0 - @test A ≈ Q * L atol = 1e-13 - @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - - R, Q, q = rq(A, Ainds[1:ninds]; positive=true) - @test min(diag_lower(q, R)...) > 0.0 #transpose R is lower - @test A ≈ Q * R atol = 1e-13 - @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - L, Q, q = lq(A, Ainds[1:ninds]; positive=true) - @test min(diag_upper(q, L)...) > 0.0 #transpose L is upper - @test A ≈ Q * L atol = 1e-13 - @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - end - - @testset "QR/QL block sparse with positive R" begin - l = dag(Index(QN("Sz", 0) => 3; tags="l")) - s = Index(QN("Sz", -1) => 1, QN("Sz", 1) => 1; tags="s") - r = Index(QN("Sz", 0) => 3; tags="r") - A = random_itensor(l, s, dag(s'), r) - Q, R, q = qr(A, l, s, dag(s'); positive=true) - @test min(diag(R)...) > 0.0 - @test A ≈ Q * R atol = 1e-13 - Q, L, q = ql(A, l, s, dag(s'); positive=true) - @test min(diag(L)...) > 0.0 - @test A ≈ Q * L atol = 1e-13 - end - - @testset "factorize with QR" begin - l = Index(5, "l") - s = Index(2, "s") - r = Index(10, "r") - A = random_itensor(l, s, r) - Q, R, = factorize(A, l, s; which_decomp="qr") - q = commonind(Q, R) - @test A ≈ Q * R atol = 1e-13 - @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - - R, Q, = factorize(A, l, s; which_decomp="qr", ortho="right") - q = commonind(Q, R) - @test A ≈ Q * R atol = 1e-13 - @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13 - end - - @testset "eigen" begin - i = Index(2, "i") - j = Index(2, "j") - A = random_itensor(i, i') - eigA = eigen(A) - Dt, Ut = eigen(NDTensors.tensor(A)) - eigArr = eigen(array(A)) - @test diag(array(eigA.D), 0) ≈ eigArr.values - @test diag(array(Dt), 0) == eigArr.values - - @test_throws ArgumentError eigen(ITensor(NaN, i', i)) - @test_throws ArgumentError eigen(ITensor(NaN, i', i); ishermitian=true) - @test_throws ArgumentError eigen(ITensor(complex(NaN), i', i)) - @test_throws ArgumentError eigen(ITensor(complex(NaN), i', i); ishermitian=true) - @test_throws ArgumentError eigen(ITensor(Inf, i', i)) - @test_throws ArgumentError eigen(ITensor(Inf, i', i); ishermitian=true) - @test_throws ArgumentError eigen(ITensor(complex(Inf), i', i)) - @test_throws ArgumentError eigen(ITensor(complex(Inf), i', i); ishermitian=true) - end - - @testset "exp function" begin - At = rand(10, 10) - k = Index(10, "k") - A = itensor(At + transpose(At), k, k') - @test Array(exp(Hermitian(NDTensors.tensor(A)))) ≈ exp(At + transpose(At)) - end - - @testset "Spectrum" begin - i = Index(100, "i") - j = Index(100, "j") - - U, S, V = svd(rand(100, 100)) - S ./= norm(S) - A = itensor(U * ITensors.diagm(0 => S) * V', i, j) - - spec = svd(A, i).spec - - @test eigs(spec) ≈ S .^ 2 - @test truncerror(spec) == 0.0 - - spec = svd(A, i; maxdim=length(S) - 3).spec - @test truncerror(spec) ≈ sum(S[(end - 2):end] .^ 2) - - @test entropy(Spectrum([0.5; 0.5], 0.0)) == log(2) - @test entropy(Spectrum([1.0], 0.0)) == 0.0 - @test entropy(Spectrum([0.0], 0.0)) == 0.0 - - @test isnothing(eigs(Spectrum(nothing, 1.0))) - @test_throws ErrorException entropy(Spectrum(nothing, 1.0)) - @test truncerror(Spectrum(nothing, 1.0)) == 1.0 - end - - @testset "Eigen QN flux regression test" begin - cutoff = 1E-12 - N = 4 - s = siteinds("S=1", N; conserve_qns=true) - A = random_itensor(QN("Sz", 2), s[1], s[2], s[3]) - - R = A * dag(prime(A, s[1], s[2])) - F = eigen(R, (s[1], s[2]), (s[1]', s[2]')) - - @test flux(F.Vt) == QN("Sz", 0) - end - - @testset "SVD block_mindim keyword" begin - i = Index( - [ - QN("Sz", 4) => 1, - QN("Sz", 2) => 4, - QN("Sz", 0) => 6, - QN("Sz", -2) => 4, - QN("Sz", -4) => 1, - ], - "i", - ) - j = sim(i) - X = random_itensor(QN("Sz", 0), i, j) - - min_blockdim = 2 - U, S, V = svd(X, i; cutoff=1E-1, min_blockdim) - u = commonind(S, U) - - @test nblocks(u) == nblocks(i) - for b in 1:nblocks(u) - @test blockdim(u, b) == blockdim(i, b) || blockdim(u, b) >= min_blockdim - end - end - - @testset "factorize with mindim" begin - l = Index(8, "l") - s1 = Index(2, "s1") - s2 = Index(2, "s2") - r = Index(2, "r") - - phi = random_itensor(l, s1, s2, r) - - U, B = factorize(phi, (l, s1); ortho="left", mindim=8, which_decomp="eigen") - - @test norm(U * B - phi) < 1E-5 - @test dim(commonind(U, B)) <= 4 - end -end diff --git a/test/base/test_diagitensor.jl b/test/base/test_diagitensor.jl deleted file mode 100644 index 92c031fa23..0000000000 --- a/test/base/test_diagitensor.jl +++ /dev/null @@ -1,562 +0,0 @@ -using ITensors -using ITensors.NDTensors -using LinearAlgebra -using Test - -@testset "diag_itensor" begin - d = 3 - i = Index(d, "i") - j = Index(d, "j") - k = Index(d, "k") - l = Index(d, "l") - m = Index(d, "m") - n = Index(d, "n") - o = Index(d, "o") - p = Index(d, "p") - q = Index(d, "q") - - v = collect(1:d) - vr = randn(d) - - @testset "non-uniform diagonal values" begin - @testset "diag_itensor constructor (no vector, order 2)" begin - D = diag_itensor(i, j) - - @test eltype(D) == Float64 - for ii in 1:d, jj in 1:d - if ii == jj - @test D[i => ii, j => jj] == 0.0 - else - @test D[i => ii, j => jj] == 0.0 - end - end - end - - @testset "diag_itensor constructor (no vector, order 3)" begin - D = diag_itensor(i, j, k) - - @test eltype(D) == Float64 - for ii in 1:d, jj in 1:d, kk in 1:d - if ii == jj == kk - @test D[i => ii, j => jj, k => kk] == 0.0 - else - @test D[i => ii, j => jj, k => kk] == 0.0 - end - end - end - - @testset "diag_itensor constructor (no vector, complex)" begin - D = diag_itensor(ComplexF64, i, j) - - @test eltype(D) == ComplexF64 - for ii in 1:d, jj in 1:d - if ii == jj - @test D[i => ii, j => jj] == complex(0.0) - else - @test D[i => ii, j => jj] == complex(0.0) - end - end - end - - @testset "diag" for ElType in (Float64, ComplexF64) - A = diag_itensor(randn(ElType, d), i, j) - dA = diag(A) - @test dA isa DenseTensor{ElType,1} - @test dA[1] == A[1, 1] - @test dA[2] == A[2, 2] - end - - @testset "diag_itensor constructor (vector, order 2)" begin - D = diag_itensor(v, i, j) - - @test eltype(D) == Float64 - for ii in 1:d, jj in 1:d - if ii == jj - @test D[i => ii, j => jj] == v[ii] - else - @test D[i => ii, j => jj] == 0.0 - end - end - end - - @testset "diag_itensor constructor (vector, order 3)" begin - D = diag_itensor(v, i, j, k) - - @test eltype(D) == Float64 - for ii in 1:d, jj in 1:d, kk in 1:d - if ii == jj == kk - @test D[i => ii, j => jj, k => kk] == v[ii] - else - @test D[i => ii, j => jj, k => kk] == 0.0 - end - end - end - - @testset "diag_itensor constructor (complex)" begin - vc = v + im * v - D = diag_itensor(vc, i, j, k) - - @test eltype(D) == ComplexF64 - for ii in 1:d, jj in 1:d, kk in 1:d - if ii == jj == kk - @test D[i => ii, j => jj, k => kk] == vc[ii] - else - @test D[i => ii, j => jj, k => kk] == complex(0.0) - end - end - end - - @testset "reductions (sum, prod)" for elt in ( - Float32, Float64, Complex{Float32}, Complex{Float64} - ) - a = diag_itensor(randn(elt, 2), Index(2), Index(2)) - @test sum(a) ≈ sum(array(a)) - @test sum(a) isa elt - @test prod(a) ≈ prod(array(a)) - @test prod(a) isa elt - - a = diag_itensor(randn(elt, 1), Index(1), Index(1)) - @test sum(a) ≈ sum(array(a)) - @test sum(a) isa elt - @test prod(a) ≈ prod(array(a)) - @test prod(a) isa elt - end - - @testset "Complex operations" begin - xr = randn(d) - xi = randn(d) - D = diag_itensor(xr + im * xi, i, j, k) - @test eltype(D) == ComplexF64 - rD = real(D) - iD = imag(D) - @test eltype(rD) == Float64 - @test eltype(iD) == Float64 - @test typeof(storage(rD)) <: NDTensors.Diag - @test norm(rD + im * iD - D) < 1E-8 - end - - @testset "Constructor AllowAlias/NeverAlias" begin - vv = ones(d) - D = diag_itensor(vv, i, j) - @test eltype(D) === Float64 - D[1, 1] = 5.0 - @test vv[1] == 1.0 - @test vv[1] != D[1, 1] - - vv = ones(Int, d) - D = diag_itensor(vv, i, j) - @test eltype(D) === Float64 - D[1, 1] = 5.0 - @test vv[1] == 1.0 - @test vv[1] != D[1, 1] - - vv = ones(Int, d) - D = diag_itensor(Int, vv, i, j) - @test eltype(D) === Int - D[1, 1] = 5 - @test vv[1] == 1 - @test vv[1] != D[1, 1] - - vv = ones(d) - D = diagitensor(vv, i, j) - @test eltype(D) === Float64 - D[1, 1] = 5.0 - @test vv[1] == 5.0 - @test vv[1] == D[1, 1] - - vv = ones(Int, d) - D = diagitensor(vv, i, j) - @test eltype(D) === Float64 - D[1, 1] = 5.0 - @test vv[1] == 1.0 - @test vv[1] != D[1, 1] - - vv = ones(Int, d) - D = diagitensor(Int, vv, i, j) - @test eltype(D) === Int - D[1, 1] = 5 - @test vv[1] == 5 - @test vv[1] == D[1, 1] - - D = diag_itensor(1, i, j) - @test eltype(D) === Float64 - D[1, 1] = 5 - @test D[1, 1] == 5 - - D = diag_itensor(Int, 1, i, j) - @test eltype(D) === Int - D[1, 1] = 5 - @test D[1, 1] == 5 - end - - @testset "fill!" begin - D = diag_itensor(ones(d), i, j, k) - D = fill!(D, 2.0) - for ii in 1:d - @test D[i => ii, j => ii, k => ii] == 2.0 - end - - @test eltype(D) == Float64 - end - - @testset "Set elements" begin - D = diag_itensor(i, j, k) - - for ii in 1:d - D[i => ii, j => ii, k => ii] = ii - end - - @test eltype(D) == Float64 - for ii in 1:d, jj in 1:d, kk in 1:d - if ii == jj == kk - @test D[i => ii, j => jj, k => kk] == ii - else - @test D[i => ii, j => jj, k => kk] == 0.0 - end - end - - # Can't set off-diagonal elements - @test_throws ErrorException D[i => 2, j => 1, k => 1] = 0.0 - @test_throws ErrorException D[i => 1, j => 2, k => 1] = 0.0 - end - - @testset "Convert diag to dense" begin - D = diag_itensor(v, i, j, k) - T = dense(D) - - @test storage(T) isa NDTensors.Dense{Float64} - for ii in 1:d, jj in 1:d, kk in 1:d - if ii == jj == kk - @test T[ii, ii, ii] == ii - else - @test T[i => ii, j => jj, k => kk] == 0.0 - end - end - end - - @testset "Convert diag to dense with denseblocks" begin - D = diag_itensor(v, i, j, k) - T = denseblocks(D) - - @test storage(T) isa NDTensors.Dense{Float64} - for ii in 1:d, jj in 1:d, kk in 1:d - if ii == jj == kk - @test T[ii, ii, ii] == ii - else - @test T[i => ii, j => jj, k => kk] == 0.0 - end - end - end - - @testset "Add (Diag + Diag)" begin - v1 = randn(d) - v2 = randn(d) - D1 = diag_itensor(v1, i, j, k) - D2 = diag_itensor(v2, k, i, j) - - v3 = v1 + v2 - D3 = D1 + D2 - - @test D3 ≈ dense(D1) + dense(D2) - for ii in 1:d - @test D3[ii, ii, ii] == v3[ii] - end - end - - @testset "Add ( number * Diag + Diag)" begin - v1 = randn(d) - v2 = randn(d) - D1 = Float32(2.0) * diag_itensor(v1, i, j, k) - D2 = diag_itensor(v2, k, i, j) - - v3 = 2 * v1 + v2 - D3 = D1 + D2 - - @test D3 ≈ dense(D1) + dense(D2) - for ii in 1:d - @test D3[ii, ii, ii] == v3[ii] - end - end - - @testset "Add (Diag uniform + Diag uniform)" begin - D1 = δ(i, j, k) - D2 = δ(k, i, j) - - D3 = D1 + D2 - - @test D3 ≈ dense(D1) + dense(D2) - end - - @testset "Add (Diag + Dense)" begin - D = diag_itensor(vr, i, j, k) - A = random_itensor(k, j, i) - - R = D + A - - @test R ≈ dense(D) + A - for ii in 1:d - @test R[ii, ii, ii] ≈ D[ii, ii, ii] + A[ii, ii, ii] - end - end - - @testset "Add (Dense + Diag)" begin - D = diag_itensor(vr, i, j, k) - A = random_itensor(i, k, j) - - R = A + D - - @test R ≈ dense(D) + A - for ii in 1:d - @test R[ii, ii, ii] ≈ D[ii, ii, ii] + A[ii, ii, ii] - end - end - - @testset "Contraction (all contracted)" begin - D = diag_itensor(v, i, j, k) - A = random_itensor(j, k, i) - - @test D * A ≈ dense(D) * A - @test A * D ≈ dense(D) * A - end - - @testset "Contraction (all contracted) with different types" begin - D = diag_itensor(v, i, j, k) - A = random_itensor(Float32, j, k, i) - - @test D * A ≈ dense(D) * A - @test A * D ≈ dense(D) * A - - D = diag_itensor(v, i, j, k) - A = random_itensor(ComplexF32, j, k, i) - - @test D * A ≈ dense(D) * A - @test A * D ≈ dense(D) * A - end - - @testset "Contraction (all dense contracted)" begin - D = diag_itensor(v, j, k, i) - A = random_itensor(i, j) - - @test D * A ≈ dense(D) * A - @test A * D ≈ dense(D) * A - end - - @testset "Contraction Diag*Dense (general)" begin - D = diag_itensor(v, l, i, k, j) - A = random_itensor(m, k, n, l) - - @test D * A ≈ dense(D) * A - @test A * D ≈ dense(D) * A - end - - @testset "Contraction Diag*Dense (outer)" begin - D = diag_itensor(v, l, i, k, j) - A = random_itensor(m, n) - - @test order(D * A) == 6 - @test D * A ≈ dense(D) * A - end - - @testset "Contraction Diag*Diag (outer)" begin - D1 = diag_itensor(v, l, i) - D2 = diag_itensor(v, m, n) - - @test order(D1 * D2) == 4 - @test D1 * D2 ≈ dense(D1) * dense(D2) - end - - @testset "Contraction Diag*Diag (all contracted)" begin - D1 = diag_itensor(v, l, i, k, j) - D2 = diag_itensor(vr, j, l, i, k) - - @test D1 * D2 ≈ dense(D1) * dense(D2) - @test D2 * D1 ≈ dense(D1) * dense(D2) - end - - @testset "Contraction Diag*Diag (general)" begin - D1 = diag_itensor(v, l, i, k, j) - D2 = diag_itensor(vr, m, k, n, l) - - @test D1 * D2 ≈ dense(D1) * dense(D2) - @test D2 * D1 ≈ dense(D1) * dense(D2) - end - - @testset "Contraction Diag*Diag (no contracted)" begin - D1 = diag_itensor(v, i, j) - D2 = diag_itensor(vr, k, l) - - @test D1 * D2 ≈ dense(D1) * dense(D2) - @test D2 * D1 ≈ dense(D1) * dense(D2) - end - - @testset "Contraction Diag*Scalar" begin - D = diag_itensor(v, i, j) - x = 2.0 - - @test x * D ≈ x * dense(D) - @test D * x ≈ x * dense(D) - - xc = 2 + 3im - - @test xc * D ≈ xc * dense(D) - @test D * xc ≈ xc * dense(D) - end - end - - @testset "Uniform diagonal ITensor" begin - @testset "delta constructor (order 2)" begin - D = δ(i, j) - - @test eltype(D) == Float64 - for ii in 1:d, jj in 1:d - if ii == jj - @test D[i => ii, j => jj] == 1.0 - else - @test D[i => ii, j => jj] == 0.0 - end - end - end - - @testset "delta constructor (order 3)" begin - D = δ(i, j, k) - - @test eltype(D) == Float64 - for ii in 1:dim(i), jj in 1:dim(j), kk in 1:dim(k) - if ii == jj == kk - @test D[i => ii, j => jj, k => kk] == 1.0 - else - @test D[i => ii, j => jj, k => kk] == 0.0 - end - end - end - - @testset "Set elements" begin - D = δ(i, j, k) - - @test eltype(D) == Float64 - - # Can't set elements of uniform diag tensor - # TODO: should we make a function that converts - # to a version that can? - @test_throws ErrorException D[i => 1, j => 1, k => 1] = 2.0 - @test_throws ErrorException D[i => 2, j => 1, k => 1] = 4.3 - @test_throws ErrorException D[i => 1, j => 2, k => 1] = 2 - end - - @testset "Convert diag uniform to dense" begin - D = δ(i, j, k) - T = dense(D) - - @test storage(T) isa NDTensors.Dense{Float64} - for ii in 1:d, jj in 1:d, kk in 1:d - if ii == jj == kk - @test T[ii, ii, ii] == 1.0 - else - @test T[i => ii, j => jj, k => kk] == 0.0 - end - end - end - - @testset "Add (Diag uniform + Dense)" begin - D = δ(i, j, k) - A = random_itensor(k, j, i) - - R = D + A - - @test R ≈ dense(D) + A - for ii in 1:d - @test R[ii, ii, ii] ≈ D[ii, ii, ii] + A[ii, ii, ii] - end - end - - @testset "Contraction (Diag uniform * Dense, all contracted)" begin - D = δ(i, j, k) - A = random_itensor(j, k, i) - - @test D * A ≈ dense(D) * A - @test A * D ≈ dense(D) * A - end - - @testset "Contraction (Diag uniform * Dense, all dense contracted)" begin - D = δ(j, k, i) - A = random_itensor(i, j) - - @test D * A ≈ dense(D) * A - @test A * D ≈ dense(D) * A - end - - @testset "Contraction (Diag uniform * Dense, general)" begin - D = δ(l, i, k, j) - A = random_itensor(m, k, n, l) - - @test D * A ≈ dense(D) * A - @test A * D ≈ dense(D) * A - end - - @testset "Contraction with different bond dimensions" begin - idim = 3 - mdim = 2 - - i = Index(idim, "i") - m = Index(mdim, "m") - - A = random_itensor(i, i', m) - D = δ(i, i') - - @test D * A ≈ dense(D) * A - @test A * D ≈ dense(D) * A - end - - @testset "Contraction (Diag uniform * Dense, replace index)" begin - D = δ(i, k) - A = random_itensor(m, k, n, l) - - @test D * A ≈ dense(D) * A - @test A * D ≈ dense(D) * A - end - - @testset "Contraction (Diag uniform * Dense, replace index 2)" begin - D = δ(k, i) - A = random_itensor(m, n, k, l) - - @test D * A ≈ dense(D) * A - @test A * D ≈ dense(D) * A - end - - @testset "Contraction (Diag uniform * Diag uniform, all contracted)" begin - D1 = δ(l, i, k, j) - D2 = δ(j, l, i, k) - - @test D1 * D2 ≈ dense(D1) * dense(D2) - @test D2 * D1 ≈ dense(D1) * dense(D2) - end - - @testset "Contraction (Diag uniform * Diag uniform, general)" begin - D1 = δ(l, i, k, j) - D2 = δ(m, k, n, l) - - @test D1 * D2 ≈ dense(D1) * dense(D2) - @test D2 * D1 ≈ dense(D1) * dense(D2) - end - - @testset "Contraction (Diag uniform * Diag uniform, no contracted)" begin - D1 = δ(i, j) - D2 = δ(k, l) - - @test D1 * D2 ≈ dense(D1) * dense(D2) - @test D2 * D1 ≈ dense(D1) * dense(D2) - end - - @testset "Rectangular Diag * Dense regression test (#969)" begin - i = Index(3) - j = Index(2) - A = random_itensor(i) - B = delta(i, j) - C = A * B - @test hassameinds(C, j) - for n in 1:dim(j) - @test C[n] == A[n] - end - end - end -end diff --git a/test/base/test_empty.jl b/test/base/test_empty.jl deleted file mode 100644 index ef272b7678..0000000000 --- a/test/base/test_empty.jl +++ /dev/null @@ -1,81 +0,0 @@ -using ITensors -using ITensors.NDTensors -using Test - -@testset "ITensor (Empty)" begin - @testset "ITensor set elements" begin - i = Index(2; tags="i") - - E = ITensor(i', dag(i)) - - @test conj(E) == E - @test 1.2 * E == E - - @test hassameinds(E, (i', i)) - @test order(E) == 2 - @test E[i' => 1, i => 1] == 0 - - E[i' => 1, i => 2] = 2.3 - - @test E[i' => 1, i => 1] == 0 - @test E[i' => 2, i => 1] == 0 - @test E[i' => 1, i => 2] == 2.3 - @test E[i' => 2, i => 2] == 0 - end - - @testset "ITensor (Empty) convert to complex" begin - i = Index(2; tags="i") - E = ITensor(i', dag(i)) - @test eltype(E) == NDTensors.EmptyNumber - - Ec = complex(E) - @test eltype(Ec) == Complex{NDTensors.EmptyNumber} - Ec[1, 1] = 2.3 - @test eltype(Ec) == ComplexF64 - - Ec = complex(E) - @test eltype(Ec) == Complex{NDTensors.EmptyNumber} - Ec[1, 1] = 2.3f0 - @test eltype(Ec) == ComplexF32 - - E2 = copy(E) - E2c = complex!(E2) - @test eltype(E2c) == Complex{NDTensors.EmptyNumber} - end - - @testset "ITensor set elements (QN)" begin - i = Index(QN(0) => 2, QN(1) => 2; tags="i") - - E = ITensor(i', dag(i)) - - @test hassameinds(E, (i', i)) - @test order(E) == 2 - @test isnothing(flux(E)) - @test E[i' => 1, i => 3] == 0 - - E[i' => 3, i => 2] = 2.3 - - @test flux(E) == QN(1) - - @test E[i' => 1, i => 1] == 0 - @test E[i' => 2, i => 1] == 0 - @test E[i' => 3, i => 2] == 2.3 - @test E[i' => 2, i => 3] == 0 - @test_throws ErrorException E[i' => 2, i => 3] = 3.2 - end - - @testset "ITensor()" begin - i = Index(QN(0) => 2, QN(1) => 2; tags="i") - - E = ITensor() - - @test isnothing(flux(E)) - @test order(E) == 0 - @test_throws MethodError E[i' => 1, i => 3] = 0 - - A = random_itensor(i', dag(i)) - E += A - - @test norm(E - A) < 1E-8 - end -end diff --git a/test/base/test_emptyitensor.jl b/test/base/test_emptyitensor.jl deleted file mode 100644 index b53c22d47d..0000000000 --- a/test/base/test_emptyitensor.jl +++ /dev/null @@ -1,103 +0,0 @@ -using ITensors -using Test - -@testset "Empty ITensor storage operations" begin - i, j, k = Index.(2, ("i", "j", "k")) - - A = ITensor(i, j) - B = ITensor(j, k) - - @test norm(A) == 0.0 - @test norm(B) == 0.0 - - C = A * B - @test hassameinds(C, (i, k)) - @test storage(C) isa - ITensors.EmptyStorage{ITensors.EmptyNumber,<:ITensors.Dense{ITensors.EmptyNumber}} - - A = ITensor(Float64, i, j) - B = ITensor(j, k) - C = A * B - @test hassameinds(C, (i, k)) - @test storage(C) isa ITensors.EmptyStorage{Float64,<:ITensors.Dense{Float64}} - - A = ITensor(i, j) - B = ITensor(ComplexF64, j, k) - - @test norm(A) == 0.0 - @test norm(B) == 0.0 - @test norm(B) isa Float64 - - C = A * B - @test hassameinds(C, (i, k)) - @test storage(C) isa ITensors.EmptyStorage{ComplexF64,<:ITensors.Dense{ComplexF64}} - - A = ITensor(Float64, i, j) - B = ITensor(ComplexF64, j, k) - C = A * B - @test hassameinds(C, (i, k)) - @test storage(C) isa ITensors.EmptyStorage{ComplexF64,<:ITensors.Dense{ComplexF64}} -end - -@testset "Empty ITensor storage addition" begin - i, j = Index.((2, 3)) - - A = ITensor(i, j) - B = random_itensor(j, i) - - C = A + B - @test inds(C) == (i, j) - @test C ≈ B - - C = B + A - @test inds(C) == (j, i) - @test C ≈ B -end - -@testset "Empty QN ITensor storage operations" begin - i = Index([QN(0) => 1, QN(1) => 1]) - A = ITensor(i', dag(i)) - - @test storage(A) isa ITensors.EmptyStorage{ - ITensors.EmptyNumber,<:ITensors.BlockSparse{ITensors.EmptyNumber} - } - - C = A' * A - - @test hassameinds(C, (i'', i)) - @test storage(C) isa ITensors.EmptyStorage{ - ITensors.EmptyNumber,<:ITensors.BlockSparse{ITensors.EmptyNumber} - } - - B = random_itensor(dag(i), i') - - C = A' * B - - @test hassameinds(C, (i'', i)) - @test storage(C) isa ITensors.EmptyStorage{Float64,<:ITensors.BlockSparse{Float64}} - - C = B' * A - - @test hassameinds(C, (i'', i)) - @test storage(C) isa ITensors.EmptyStorage{Float64,<:ITensors.BlockSparse{Float64}} - - C = B + A - @test inds(C) == inds(B) - @test C ≈ B - - C = A + B - @test inds(C) == inds(A) - @test C ≈ B -end - -@testset "blockoffsets" for space in (2, [QN(0) => 1, QN(1) => 1]) - i = Index(space) - A = ITensor(i', dag(i)) - @test blockoffsets(A) == NDTensors.BlockOffsets{2}() -end - -@testset "zero" for space in (2, [QN(0) => 1, QN(1) => 1]) - i = Index(space) - A = ITensor(i', dag(i)) - @test NDTensors.tensor(zero(A)) isa typeof(NDTensors.tensor(A)) -end diff --git a/test/base/test_examples.jl b/test/base/test_examples.jl deleted file mode 100644 index f1db458ace..0000000000 --- a/test/base/test_examples.jl +++ /dev/null @@ -1,14 +0,0 @@ -@eval module $(gensym()) -using ITensors: ITensors -using Suppressor: @capture_out -using Test: @test_nowarn, @testset -@testset "Example Codes" begin - @testset "Basic Ops $filename" for filename in ["basic_ops.jl", "qn_itensors.jl"] - @test_nowarn begin - @capture_out begin - include(joinpath(pkgdir(ITensors), "examples", "basic_ops", filename)) - end - end - end -end -end diff --git a/test/base/test_exports.jl b/test/base/test_exports.jl deleted file mode 100644 index 0072b0b9d2..0000000000 --- a/test/base/test_exports.jl +++ /dev/null @@ -1,11 +0,0 @@ -@eval module $(gensym()) -using ITensors: ITensors -using Test: @test, @testset -include("utils/TestITensorsExportedNames/TestITensorsExportedNames.jl") -using .TestITensorsExportedNames: ITENSORS_EXPORTED_NAMES -@testset "Test exports of ITensors" begin - # @show setdiff(names(ITensors), ITENSORS_EXPORTED_NAMES) - # @show setdiff(ITENSORS_EXPORTED_NAMES, names(ITensors)) - @test issetequal(names(ITensors), ITENSORS_EXPORTED_NAMES) -end -end diff --git a/test/base/test_fermions.jl b/test/base/test_fermions.jl deleted file mode 100644 index c70603a283..0000000000 --- a/test/base/test_fermions.jl +++ /dev/null @@ -1,782 +0,0 @@ -using ITensors, Test -import ITensors: Out, In -using ITensors.SiteTypes: op, siteinds - -@testset "Fermions" begin - ITensors.enable_auto_fermion() - - @testset "parity_sign function" begin - - # Full permutations - p1 = [1, 2, 3] - @test ITensors.parity_sign(p1) == +1 - p2 = [2, 1, 3] - @test ITensors.parity_sign(p2) == -1 - p3 = [2, 3, 1] - @test ITensors.parity_sign(p3) == +1 - p4 = [3, 2, 1] - @test ITensors.parity_sign(p4) == -1 - - ## Partial permutations - p5 = [2, 7] - @test ITensors.parity_sign(p5) == +1 - p6 = [5, 3] - @test ITensors.parity_sign(p6) == -1 - p7 = [1, 9, 3, 10] - @test ITensors.parity_sign(p7) == -1 - p8 = [1, 12, 9, 3, 11] - @test ITensors.parity_sign(p8) == +1 - end - - @testset "Fermionic QNs" begin - q = QN("Nf", 1, -1) - @test isfermionic(q[1]) - @test fparity(q) == 1 - - q = q + q + q - @test val(q, "Nf") == 3 - - p = QN("P", 1, -2) - @test fparity(p) == 1 - @test isodd(p) - @test fparity(p + p) == 0 - @test fparity(p + p + p) == 1 - end - - @testset "Fermionic IndexVals" begin - sn = Index([QN("Nf", 0, -1) => 1, QN("Nf", 1, -1) => 1], "sn") - @test fparity(sn => 1) == 0 - @test fparity(sn => 2) == 1 - @test !isodd(sn => 1) - @test isodd(sn => 2) - - sp = Index([QN("Nfp", 0, -2) => 1, QN("Nfp", 1, -2) => 1], "sp") - @test fparity(sp => 1) == 0 - @test fparity(sp => 2) == 1 - end - - @testset "Get and Set Elements" begin - s = Index([QN("Nf", 0, -1) => 1, QN("Nf", 1, -1) => 1], "s") - - N = ITensor(s', dag(s)) - - N[s' => 2, s => 2] = 1.0 - @test N[s' => 2, s => 2] ≈ +1.0 - @test N[s => 2, s' => 2] ≈ -1.0 - - N[s => 2, s' => 2] = 1.0 - @test N[s' => 2, s => 2] ≈ -1.0 - @test N[s => 2, s' => 2] ≈ 1.0 - - C = ITensor(s', dag(s)) - - C[s' => 1, s => 2] = 1.0 - @test C[s' => 1, s => 2] ≈ 1.0 - @test C[s => 2, s' => 1] ≈ 1.0 - - I = ITensor(s', dag(s)) - I[s' => 1, s => 1] = 1.0 - I[s' => 2, s => 2] = 1.0 - @test I[s' => 1, s => 1] ≈ 1.0 - @test I[s' => 2, s => 2] ≈ 1.0 - - @test I[s => 1, s' => 1] ≈ 1.0 - @test I[s => 2, s' => 2] ≈ -1.0 - end - - @testset "Making operators different ways" begin - s = Index([QN("Nf", 0, -1) => 1, QN("Nf", 1, -1) => 1], "s") - - N1 = ITensor(s', dag(s)) - N1[s' => 2, s => 2] = +1.0 - - N2 = ITensor(dag(s), s') - N2[s' => 2, s => 2] = +1.0 - @test norm(N1 - N2) ≈ 0.0 - - N3 = ITensor(s', dag(s)) - N3[s => 2, s' => 2] = -1.0 - @test norm(N1 - N3) ≈ 0.0 - - N4 = ITensor(dag(s), s') - N4[s => 2, s' => 2] = -1.0 - @test norm(N1 - N4) ≈ 0.0 - end - - @testset "Permute and Add Fermionic ITensors" begin - @testset "Permute Operators" begin - s = Index([QN("Nf", 0, -1) => 1, QN("Nf", 1, -1) => 1], "s") - - N1 = ITensor(s', dag(s)) - N1[s' => 2, s => 2] = 1.0 - - N2 = ITensor(dag(s), s') - N2[s' => 2, s => 2] = 1.0 - - pN1 = permute(N1, dag(s), s') - @test pN1[s' => 2, s => 2] ≈ 1.0 - - pN2 = permute(N2, s', dag(s)) - @test pN2[s' => 2, s => 2] ≈ 1.0 - - #TODO add cases resulting in minus signs - end - - @testset "Add Operators" begin - s = Index([QN("Nf", 0, -1) => 1, QN("Nf", 1, -1) => 1], "sn") - - N1 = ITensor(s', dag(s)) - N1[s' => 2, s => 2] = 1.0 - - N2 = ITensor(dag(s), s') - N2[s' => 2, s => 2] = 1.0 - - NN = N1 + N2 - @test NN[s' => 2, s => 2] ≈ 2.0 - - NN = N1 + N1 - @test NN[s' => 2, s => 2] ≈ 2.0 - - NN = N2 + N2 - @test NN[s' => 2, s => 2] ≈ 2.0 - end - - @testset "Wavefunction Tests" begin - s = [Index([QN("N", 0, -2) => 2, QN("N", 1, -2) => 2], "s$n") for n in 1:4] - - psi0 = ITensor(s...) - - psi0[s[1] => 1, s[2] => 1, s[3] => 1, s[4] => 1] = 1111 - psi0[s[1] => 3, s[2] => 3, s[3] => 1, s[4] => 1] = 3311 - psi0[s[1] => 1, s[2] => 3, s[3] => 1, s[4] => 3] = 1313 - - psi1 = permute(psi0, s[2], s[1], s[3], s[4]) - @test norm(psi1 - psi0) ≈ 0.0 - - @test psi0[s[1] => 1, s[2] => 1, s[3] => 1, s[4] => 1] ≈ 1111 - @test psi1[s[1] => 1, s[2] => 1, s[3] => 1, s[4] => 1] ≈ 1111 - @test psi0[s[2] => 1, s[1] => 1, s[3] => 1, s[4] => 1] ≈ 1111 - @test psi1[s[2] => 1, s[1] => 1, s[3] => 1, s[4] => 1] ≈ 1111 - - @test psi0[s[1] => 3, s[2] => 3, s[3] => 1, s[4] => 1] ≈ 3311 - @test psi1[s[1] => 3, s[2] => 3, s[3] => 1, s[4] => 1] ≈ 3311 - @test psi0[s[2] => 3, s[1] => 3, s[3] => 1, s[4] => 1] ≈ -3311 - @test psi1[s[2] => 3, s[1] => 3, s[3] => 1, s[4] => 1] ≈ -3311 - @test psi0[s[4] => 1, s[2] => 3, s[1] => 3, s[3] => 1] ≈ -3311 - @test psi1[s[4] => 1, s[2] => 3, s[1] => 3, s[3] => 1] ≈ -3311 - - psi2 = permute(psi0, s[4], s[1], s[3], s[2]) - @test norm(psi2 - psi0) ≈ 0.0 - @test norm(psi2 - psi1) ≈ 0.0 - - @test psi0[s[1] => 1, s[2] => 3, s[3] => 1, s[4] => 3] ≈ 1313 - @test psi1[s[1] => 1, s[2] => 3, s[3] => 1, s[4] => 3] ≈ 1313 - @test psi2[s[1] => 1, s[2] => 3, s[3] => 1, s[4] => 3] ≈ 1313 - @test psi0[s[4] => 3, s[1] => 1, s[3] => 1, s[2] => 3] ≈ -1313 - @test psi1[s[4] => 3, s[1] => 1, s[3] => 1, s[2] => 3] ≈ -1313 - @test psi2[s[4] => 3, s[1] => 1, s[3] => 1, s[2] => 3] ≈ -1313 - end - end - - @testset "C Cdag operators" begin - s = siteinds("Fermion", 3; conserve_qns=true) - - p110 = ITensor(s[1], s[2], s[3]) - p110[s[1] => 2, s[2] => 2, s[3] => 1] = 1.0 - - p011 = ITensor(s[1], s[2], s[3]) - p011[s[1] => 1, s[2] => 2, s[3] => 2] = 1.0 - - np011 = ITensor(s[1], s[2], s[3]) - np011[s[1] => 1, s[3] => 2, s[2] => 2] = 1.0 - - dag_p011 = ITensor(dag(s[3]), dag(s[2]), dag(s[1])) - dag_p011[s[3] => 2, s[2] => 2, s[1] => 1] = 1.0 - - @test norm(dag(p011) - dag_p011) ≈ 0 - - C1 = op(s, "C", 1) - Cdag3 = op(s, "Cdag", 3) - - # Anti-commutator - @test norm(Cdag3 * C1 + C1 * Cdag3) ≈ 0.0 - - # Commutator - @test norm(Cdag3 * C1 - C1 * Cdag3) ≈ 2.0 - - let # <011|Cdag3*C1|110> = -1 - t1 = noprime(C1 * p110) - t2 = noprime(Cdag3 * t1) - @test scalar(dag_p011 * t2) ≈ -1.0 - end - - let # <011|C1*Cdag3|110> = +1 - t1 = noprime(Cdag3 * p110) - t2 = noprime(C1 * t1) - @test scalar(dag_p011 * t2) ≈ +1.0 - end - - let # <011|(Cdag3*C1)|110> = -1 - t = noprime((Cdag3 * C1) * p110) - @test scalar(dag(p011) * t) ≈ -1.0 - end - - let # <011|(C1*Cdag3)|110> = +1 - t = noprime((C1 * Cdag3) * p110) - @test scalar(dag(p011) * t) ≈ +1.0 - end - - # - # Commuting B tensors - # - # These commute by carrying additional - # g-indices (Grassman indices) - # - - g = Index(QN("Nf", 1, -1) => 1; tags="g") - - Bdag3 = Cdag3 * setelt(dag(g) => 1) - B1 = setelt(g => 1) * C1 - - # Commutator - @test norm(Bdag3 * B1 - B1 * Bdag3) ≈ 0.0 - - # Anti-commutator - @test norm(Bdag3 * B1 + B1 * Bdag3) ≈ 2.0 - - let # <011|Cdag3*C1|110> = <011|Bdag3*B1|110> = -1 - t1 = noprime(B1 * p110) - t2 = noprime(Bdag3 * t1) - @test scalar(dag(p011) * t2) ≈ -1.0 - end - - let # <011|(Cdag3*C1)|110> = <011|(Bdag3*B1)|110> = -1 - t = noprime((Bdag3 * B1) * p110) - @test scalar(dag(p011) * t) ≈ -1.0 - end - - let # <011|Cdag3*C1|110> = <011|B1*Bdag3|110> = -1 - t1 = noprime(Bdag3 * p110) - t2 = noprime(B1 * t1) - @test scalar(dag(p011) * t2) ≈ -1.0 - end - - let # <011|(Cdag3*C1)|110> = <011|(B1*Bdag3)|110> = -1 - t = noprime((B1 * Bdag3) * p110) - @test scalar(dag(p011) * t) ≈ -1.0 - end - - # - # Leave out middle fermion, test for cases <001|...|100> - # - p100 = ITensor(s[1], s[2], s[3]) - p100[s[1] => 2, s[2] => 1, s[3] => 1] = 1.0 - - p001 = ITensor(s[1], s[2], s[3]) - p001[s[1] => 1, s[2] => 1, s[3] => 2] = 1.0 - - let # <001|Cdag3*C1|100> = <001|Bdag3*B1|100> = +1 - t1 = noprime(B1 * p100) - t2 = noprime(Bdag3 * t1) - @test scalar(dag(p001) * t2) ≈ +1.0 - end - - let # <001|Cdag3*C1|100> = <001|(Bdag3*B1)|100> = +1 - t = noprime((Bdag3 * B1) * p100) - @test scalar(dag(p001) * t) ≈ +1.0 - end - - let # <001|Cdag3*C1|100> = <001|B1*Bdag3|100> = +1 - t1 = noprime(Bdag3 * p100) - t2 = noprime(B1 * t1) - @test scalar(dag(p001) * t2) ≈ +1.0 - end - - let # <001|Cdag3*C1|100> = <001|(B1*Bdag3)|100> = +1 - t = noprime((B1 * Bdag3) * p100) - @test scalar(dag(p001) * t) ≈ +1.0 - end - end - - @testset "Combiner conjugation" begin - s = siteinds("Fermion", 4; conserve_qns=true) - C = combiner(s[1], s[2]) - @test NDTensors.isconj(storage(C)) == false - - dC = dag(C) - @test NDTensors.isconj(storage(dC)) == true - end - - @testset "Combine Uncombine Permute Test" begin - s = siteinds("Fermion", 4; conserve_qns=true) - - @testset "Two Site Test" begin - p11 = ITensor(s[1], s[2]) - p11[s[1] => 2, s[2] => 2] = 1.0 - - C = combiner(s[1], s[2]) - - dp11 = dag(p11) - - Cp11_A = C * p11 - dCp11_A = dag(Cp11_A) - dp11_A = C * dCp11_A - @test dp11_A ≈ dp11 - - Cp11_B = p11 * C - dCp11_B = dag(Cp11_B) - dp11_B = C * dCp11_B - @test dp11_B ≈ dp11 - end - - @testset "Longer two-site tests" begin - s1, s2, s3, s4 = s - C12 = combiner(s1, s2) - C21 = combiner(s2, s1) - C13 = combiner(s1, s3) - C31 = combiner(s3, s1) - - T = random_itensor(QN("Nf", 3, -1), s1, s2, s3, s4) - T .= abs.(T) - - # - # 1a, 2a tests - # - - c12 = combinedind(C12) - c12T = C12 * T - u12T = dag(C12) * c12T - @test norm(u12T - T) < 1E-10 - - c21 = combinedind(C21) - c21T = C21 * T - u21T = dag(C21) * c21T - @test norm(u21T - T) < 1E-10 - - c13 = combinedind(C13) - c13T = C13 * T - u13T = dag(C13) * c13T - @test norm(u13T - T) < 1E-10 - - c31 = combinedind(C31) - c31T = C31 * T - u31T = dag(C31) * c31T - @test norm(u31T - T) < 1E-10 - - # - # 1b, 2b tests - # - - dc12T = dag(C12) * dag(T) - @test norm(dc12T - dag(c12T)) < 1E-10 - du12T = C12 * dc12T - @test norm(du12T - dag(T)) < 1E-10 - - dc21T = dag(C21) * dag(T) - @test norm(dc21T - dag(c21T)) < 1E-10 - du21T = C21 * dc21T - @test norm(du21T - dag(T)) < 1E-10 - - dc13T = dag(C13) * dag(T) - @test norm(dc13T - dag(c13T)) < 1E-10 - du13T = C13 * dc13T - @test norm(du13T - dag(T)) < 1E-10 - - dc31T = dag(C31) * dag(T) - @test norm(dc31T - dag(c31T)) < 1E-10 - du31T = C31 * dc31T - @test norm(du31T - dag(T)) < 1E-10 - end - - @testset "Three Site Test" begin - p111 = ITensor(s[1], s[2], s[3]) - p111[s[1] => 2, s[2] => 2, s[3] => 2] = 1.0 - - dp111 = dag(p111) - - C = combiner(s[1], s[3]) - Cp111 = C * p111 - dCp111 = dag(Cp111) - dp111_U = C * dCp111 - @test dp111_U ≈ dp111 - end - end - - @testset "Mixed Arrow Combiner Tests" begin - @testset "One wrong-way arrow" begin - q1 = QN("Nf", 1, -1) - - s0 = Index([q1 => 1]; tags="s0") - s1 = Index([q1 => 1]; tags="s1") - s2 = Index([q1 => 1]; tags="s2") - s3 = Index([q1 => 1]; tags="s3") - s4 = Index([q1 => 1]; tags="s4") - - A = random_itensor(QN("Nf", 0, -1), s0, s1, dag(s2), dag(s3)) - B = random_itensor(QN("Nf", 0, -1), s3, s2, dag(s1), dag(s4)) - A .= one.(A) - B .= one.(B) - @test norm(A) ≈ 1.0 - @test norm(B) ≈ 1.0 - - Ru = A * B - - C = combiner(s3, s2, dag(s1)) - Bc = C * B - Ac = A * dag(C) - Rc = Ac * Bc - - @test norm(Ru - Rc) < 1E-8 - end - - @testset "Two wrong-way arrows" begin - q1 = QN("Nf", 1, -1) - - s0 = Index([q1 => 1]; tags="s0") - s1 = Index([q1 => 1]; tags="s1") - s2 = Index([q1 => 1]; tags="s2") - s3 = Index([q1 => 1]; tags="s3") - s4 = Index([q1 => 1]; tags="s4") - - A = random_itensor(QN("Nf", 2, -1), s0, s1, s2, dag(s3)) - B = random_itensor(QN("Nf", -2, -1), s3, dag(s2), dag(s1), dag(s4)) - A .= one.(A) - B .= one.(B) - @test norm(A) ≈ 1.0 - @test norm(B) ≈ 1.0 - - Ru = A * B - - C = combiner(s3, dag(s2), dag(s1)) - Bc = C * B - Ac = A * dag(C) - Rc = Ac * Bc - - @test norm(Ru - Rc) < 1E-8 - end - end - - @testset "Permutedims Regression Test" begin - s1 = Index([QN("N", 0, -1) => 1, QN("N", 1, -1) => 1], "s1") - s2 = Index([QN("N", 0, -1) => 1, QN("N", 1, -1) => 1], "s2") - i = Index([QN("N", 0, -1) => 1, QN("N", 1, -1) => 1, QN("N", 2, -1) => 1], "i") - - A = ITensor(QN("N", 4, -1), s1, s2, i) - A[s1 => 2, s2 => 2, i => 3] = 223 - - B = ITensor(QN("N", 4, -1), s1, i, s2) - B[s1 => 2, i => 3, s2 => 2] = 223 - @test A ≈ B - - C = ITensor(QN("N", 4, -1), s1, i, s2) - C[s2 => 2, i => 3, s1 => 2] = -223 - @test A ≈ C - end - - @testset "Fermionic SVD" begin - N = 4 - s = siteinds("Fermion", N; conserve_qns=true) - - A = random_itensor(QN("Nf", 2, -1), s[1], s[2], s[3], s[4]) - for n1 in 1:4, n2 in 1:4 - (n1 == n2) && continue - U, S, V = svd(A, (s[n1], s[n2])) - @test norm(U * S * V - A) < 1E-10 - end - for n1 in 1:4, n2 in 1:4, n3 in 1:4 - (n1 == n2) && continue - (n1 == n3) && continue - (n2 == n3) && continue - U, S, V = svd(A, (s[n1], s[n2], s[n3])) - @test norm(U * S * V - A) < 1E-10 - end - - B = random_itensor(QN("Nf", 3, -1), s[1], s[2], s[3], s[4]) - for n1 in 1:4, n2 in 1:4 - (n1 == n2) && continue - U, S, V = svd(B, (s[n1], s[n2])) - @test norm(U * S * V - B) < 1E-10 - end - for n1 in 1:4, n2 in 1:4, n3 in 1:4 - (n1 == n2) && continue - (n1 == n3) && continue - (n2 == n3) && continue - U, S, V = svd(B, (s[n1], s[n2], s[n3])) - @test norm(U * S * V - B) < 1E-10 - end - end # Fermionic SVD tests - - @testset "Fermionic SVD Arrow Cases" begin - s = siteinds("Fermion", 3; conserve_qns=true) - - function id(i) - if dir(i) == Out - I = ITensor(i, dag(i)') - else - I = ITensor(dag(i)', i) - end - for n in 1:dim(i) - I[n, n] = 1.0 - end - return I - end - - # Arrows: Out, Out - let - T = ITensor(s[1], s[2]) - T[1, 2] = 1.0 - T[2, 1] = 1.0 - U, S, V, spec, u, v = svd(T, s[1]) - @test norm(T - U * S * V) ≈ 0 - UU = dag(U) * prime(U, u) - @test norm(UU - id(u)) ≈ 0 - VV = dag(V) * prime(V, v) - @test norm(VV - id(v)) ≈ 0 - end - - # Arrows: In, Out - let - T = ITensor(dag(s[1]), s[2]) - T[2, 2] = 1.0 - U, S, V, spec, u, v = svd(T, s[1]) - @test norm(T - U * S * V) ≈ 0 - UU = dag(U) * prime(U, u) - @test norm(UU - id(u)) ≈ 0 - VV = dag(V) * prime(V, v) - @test norm(VV - id(v)) ≈ 0 - end - - # Arrows: Out, In - let - T = ITensor(s[1], dag(s[2])) - T[2, 2] = 1.0 - U, S, V, spec, u, v = svd(T, s[1]) - @test norm(T - U * S * V) ≈ 0 - UU = dag(U) * prime(U, u) - @test norm(UU - id(u)) ≈ 0 - VV = dag(V) * prime(V, v) - @test norm(VV - id(v)) ≈ 0 - end - - # Arrows: In, In - let - T = ITensor(dag(s[1]), dag(s[2])) - T[1, 2] = 1.0 - U, S, V, spec, u, v = svd(T, s[1]) - @test norm(T - U * S * V) ≈ 0 - UU = dag(U) * prime(U, u) - @test norm(UU - id(u)) ≈ 0 - VV = dag(V) * prime(V, v) - @test norm(VV - id(v)) ≈ 0 - end - - # Arrows: Mixed, In - let - T = ITensor(dag(s[1]), s[2], dag(s[3])) - T[1, 1, 1] = 1.0 - T[2, 2, 1] = 1.0 - U, S, V, spec, u, v = svd(T, [dag(s[1]), s[2]]) - @test norm(T - U * S * V) < 1E-14 - UU = dag(U) * prime(U, u) - @test_broken norm(UU - id(u)) ≈ 0 - VV = dag(V) * prime(V, v) - @test norm(VV - id(v)) ≈ 0 - end - - # Arrows: Mixed, In - # Try to fix - let - T = ITensor(dag(s[1]), s[2], dag(s[3])) - T[1, 1, 1] = 1.0 - T[2, 2, 1] = 1.0 - U, S, V, spec, u, v = svd(T, [dag(s[1]), s[2]]) - @test norm(T - U * S * V) < 1E-14 - UU = dag(U) * prime(U, u) - @test_broken norm(UU - id(u)) ≈ 0 - VV = dag(V) * prime(V, v) - @test norm(VV - id(v)) ≈ 0 - end - - #Factorize SVD Test. Specifying arrows on S. - let - l1, l2 = Index(QN("Nf", -1) => 1, QN("Nf", 1) => 1; tags="l1", dir=ITensors.In), - Index(QN("Nf", 2) => 1, QN("Nf", 1) => 1; tags="l2", dir=ITensors.Out) - r1, r2, r3 = Index(QN("Nf", -2) => 1, QN("Nf", 1) => 1; tags="r1", dir=ITensors.Out), - Index(QN("Nf", 2) => 1, QN("Nf", 1) => 1; tags="r2", dir=ITensors.In), - Index(QN("Nf", -2) => 1, QN("Nf", 1) => 1; tags="r3", dir=ITensors.In) - A = random_itensor(l1, l2, r1, r2, r3) - - for dir in [ITensors.Out, ITensors.In] - L, R, spec = ITensors.factorize_svd(A, l1, l2; dir, ortho="none") - @test norm(L * R - A) <= 1e-14 - end - end - end - - @testset "Fermion Contraction with Combined Indices" begin - N = 10 - s = siteinds("Fermion", N; conserve_qns=true) - - begin - A = random_itensor(QN("Nf", 3, -1), s[1], s[2], s[3], s[4]) - B = random_itensor(QN("Nf", 2, -1), s[1], s[3], s[4]) - - CC = combiner(s[1], s[3]) - - cA = CC * A - cB = CC * B - - R1 = dag(cA) * cB - R2 = dag(A) * B - - @test norm(R1 - R2) < 1E-10 - end - - begin - A = random_itensor(QN("Nf", 3, -1), s[1], s[2], s[3], s[4]) - B = random_itensor(QN("Nf", 2, -1), s[1], s[3], s[4]) - - CC = combiner(s[1], s[3]) - - cA = CC * A - cdB = dag(CC) * dag(B) - - R1 = cA * cdB - R2 = A * dag(B) - - @test norm(R1 - R2) < 1E-10 - end - - begin - A = random_itensor(QN("Nf", 3, -1), s[1], s[2], s[3], s[4]) - B = random_itensor(QN("Nf", 2, -1), s[1], s[3], s[4]) - - CC = combiner(s[1], s[4], s[3]) - - cA = CC * A - cdB = dag(CC) * dag(B) - - R1 = cA * cdB - R2 = A * dag(B) - - @test norm(R1 - R2) < 1E-10 - end - - begin - CC = combiner(s[3], s[4]) - c = combinedind(CC) - - A = random_itensor(QN("Nf", 3, -1), c, s[1], s[2]) - B = random_itensor(QN("Nf", 2, -1), s[1], c, s[5]) - - uA = dag(CC) * A - uB = dag(CC) * B - - R1 = dag(uA) * uB - R2 = dag(A) * B - - @test norm(R1 - R2) < 1E-10 - end - - @testset "Combiner Regression Test" begin - T = random_itensor(QN("Nf", 3, -1), s[1], s[2], s[3], s[4]) - - C12 = combiner(s[1], s[2]) - c12 = combinedind(C12) - c12T = C12 * T - - u12T = dag(C12) * c12T - - @test norm(u12T - T) < 1E-10 - end - end # Fermion Contraction with Combined Indices - - @testset "Regression Tests" begin - @testset "SVD DiagBlockSparse Regression Test" begin - l1 = Index(QN("Nf", 0, -1) => 1, QN("Nf", 1, -1) => 1; tags="Link,l=1") - s2 = Index(QN("Nf", 0, -1) => 1, QN("Nf", 1, -1) => 1; tags="Site,n=2") - s3 = Index(QN("Nf", 0, -1) => 1, QN("Nf", 1, -1) => 1; tags="Site,n=3") - l3 = Index(QN("Nf", 2, -1) => 1; tags="Link,l=3") - - phi = random_itensor(QN("Nf", 4, -1), l1, s2, s3, l3) - - U, S, V = svd(phi, (l1, s2)) - - @test norm((U * S) * V - phi) < 1E-10 - @test norm(U * (S * V) - phi) < 1E-10 - end - - @testset "Eigen Positive Semi Def Regression Test" begin - # - # Test was failing without using combiners in - # eigen which were conjugates of each other - # - cutoff = 1E-12 - N = 2 - s = siteinds("Fermion", N; conserve_qns=true) - - T = ITensor(QN("Nf", 0, -1), dag(s[1]), s[1]') - T[2, 2] = 1 - - F = eigen(T; ishermitian=true, cutoff=cutoff) - D, U, spec = F - Ut = F.Vt - - @test norm(dag(U) * D * Ut - T) < 1E-10 - end - - @testset "Factorize Eigen Regression Test" begin - N = 3 - s = siteinds("Fermion", N; conserve_qns=true) - A = ITensor(QN("Nf", 2, -1), s[1], s[2], s[3]) - A[s[1] => 1, s[2] => 2, s[3] => 2] = 1.0 - - U, R = factorize(A, (s[1], s[2]); which_decomp="eigen", cutoff=1E-18, ortho="left") - - @test norm(U * R - A) < 1E-12 - end - - @testset "Contraction Regression Test" begin - s = siteinds("Fermion", 3; conserve_qns=true) - l = Index(QN("Nf", 1, -1) => 1; tags="l") - - q2 = QN("Nf", 2, -1) - q0 = QN("Nf", 0, -1) - - T1 = ITensor(q2, s[1], s[2], l) - T1[s[1] => 1, s[2] => 2, l => 1] = 1.0 - - T2 = ITensor(q0, dag(l), s[3]) - T2[dag(l) => 1, s[3] => 2] = 1.0 - - @test norm(T1 * T2 - T2 * T1) < 1E-10 - end - - @testset "SVD Regression Test" begin - Pf0 = QN("Pf", 0, -2) - Pf1 = QN("Pf", 1, -2) - - l22 = Index([Pf0 => 1, Pf1 => 1], "Link,dir=2,n=2") - l23 = Index([Pf0 => 1, Pf1 => 1], "Link,dir=3,n=2") - s1 = Index([Pf0 => 1, Pf1 => 1, Pf1 => 1, Pf0 => 1], "Site,n=1") - l11 = Index([Pf0 => 1, Pf1 => 1], "Link,dir=1,n=1") - - T = random_itensor(dag(l22), dag(l23), s1, l11) - - U, S, V = svd(T, dag(l22), dag(l23), s1) - - @test norm(T - U * S * V) < 1E-10 - end - end # Regression Tests - - @testset "Non-QN eigen Regression Test" begin - # Test that non-QN eigen runs properly - # with auto-fermion enabled. - i = Index(2) - a = random_itensor(i', i) - d, u = eigen(a) - @test norm(a * u - u' * d) ≈ 0 atol = √(eps(real(eltype(a)))) - end - - ITensors.disable_auto_fermion() -end diff --git a/test/base/test_global_variables.jl b/test/base/test_global_variables.jl deleted file mode 100644 index a14db46760..0000000000 --- a/test/base/test_global_variables.jl +++ /dev/null @@ -1,52 +0,0 @@ -using ITensors -using Test - -@testset "Warn ITensor order" begin - # Check it starts at default value - @test ITensors.get_warn_order() == ITensors.default_warn_order - - # Set to 4 and reset - @test ITensors.set_warn_order(4) == ITensors.default_warn_order - @test ITensors.get_warn_order() == 4 - @test ITensors.reset_warn_order() == 4 - @test ITensors.get_warn_order() == ITensors.default_warn_order - - # Disable it (set to nothing) and reset - @test ITensors.disable_warn_order() == ITensors.default_warn_order - @test isnothing(ITensors.get_warn_order()) - @test isnothing(ITensors.reset_warn_order()) - @test ITensors.get_warn_order() == ITensors.default_warn_order - - # Disable macro - @test ITensors.get_warn_order() == ITensors.default_warn_order - ITensors.set_warn_order(6) - @test ITensors.get_warn_order() == 6 - @disable_warn_order begin - @test isnothing(ITensors.get_warn_order()) - end - @test ITensors.get_warn_order() == 6 - ITensors.reset_warn_order() - @test ITensors.get_warn_order() == ITensors.default_warn_order - - # Set macro - @test ITensors.get_warn_order() == ITensors.default_warn_order - ITensors.set_warn_order(6) - @test ITensors.get_warn_order() == 6 - @set_warn_order 10 begin - @test ITensors.get_warn_order() == 10 - end - @test ITensors.get_warn_order() == 6 - ITensors.reset_warn_order() - @test ITensors.get_warn_order() == ITensors.default_warn_order - - # Reset macro - @test ITensors.get_warn_order() == ITensors.default_warn_order - ITensors.set_warn_order!(6) - @test ITensors.get_warn_order() == 6 - @reset_warn_order begin - @test ITensors.get_warn_order() == ITensors.default_warn_order - end - @test ITensors.get_warn_order() == 6 - ITensors.reset_warn_order() - @test ITensors.get_warn_order() == ITensors.default_warn_order -end diff --git a/test/base/test_index.jl b/test/base/test_index.jl deleted file mode 100644 index 31fe1014a4..0000000000 --- a/test/base/test_index.jl +++ /dev/null @@ -1,185 +0,0 @@ -using ITensors -using ITensors.NDTensors -using Random -using Test -import ITensors: In, Out, Neither - -@testset "Index" begin - @testset "Index with dim" begin - i = Index(2) - @test id(i) != 0 - @test hasid(i, id(i)) - @test dim(i) == 2 - @test dir(i) == Neither - @test plev(i) == 0 - @test tags(i) == TagSet("") - @test Int(i) == 2 - @test length(i) == 1 - @test Tuple(i) == (i,) - @test collect(i)[] === i - end - @testset "Index with all args" begin - i = Index(1, 2, In, "Link", 1) - @test id(i) == 1 - @test dim(i) == 2 - @test dir(i) == In - @test plev(i) == 1 - @test tags(i) == TagSet("Link") - j = copy(i) - @test id(j) == 1 - @test dim(j) == 2 - @test dir(j) == In - @test plev(j) == 1 - @test tags(j) == TagSet("Link") - @test j == i - end - @testset "prime" begin - i = Index(2) - @test plev(i) == 0 - i2 = prime(i, 2) - @test plev(i2) == 2 - i1 = i' - @test plev(i1) == 1 - i2 = i'' - @test plev(i2) == 2 - i3 = i''' - @test plev(i3) == 3 - i6 = i^6 - @test plev(i6) == 6 - i0 = noprime(i) - @test plev(i0) == 0 - end - @testset "IndexVal" begin - i = Index(2) - @test_deprecated i[1] - @test_deprecated i(1) - @test val(i => 1) == 1 - @test ind(i => 1) == i - @test isindequal(i, i => 2) - @test isindequal(i => 2, i) - @test plev(i' => 2) == 1 - @test val(i' => 2) == 2 - @test plev(prime(i => 2, 4)) == 4 - - @test plev(i => 2) == 0 - @test plev(i' => 2) == 1 - @test prime(i => 2) == (i' => 2) - @test IndexVal(i, 1) == Pair(i, 1) - iv = i => 2 - ĩv = sim(i => 2) - @test ind(iv) ≠ ind(ĩv) - @test val(iv) == val(ĩv) - end - @testset "Iteration" begin - i = Index(3) - - c = 1 - for iv in eachindval(i) - @test iv == (i => c) - c += 1 - end - - c = 1 - for n in eachval(i) - @test n == c - c += 1 - end - end - @testset "Broadcasting" begin - N = 3 - i = Index(2) - ps = (n - 1 for n in 1:4) - is = prime.(i, ps) - @test is[1] == i - @test is[2] == i' - @test is[3] == i'' - ts = ("i$n" for n in 1:4) - is = settags.(i, ts) - @test is[1] == addtags(i, "i1") - @test is[2] == addtags(i, "i2") - @test is[3] == addtags(i, "i3") - end - @testset "Index ID random seed" begin - Random.seed!(index_id_rng(), 1234) - i = Index(2) - j = Index(2) - Random.seed!(index_id_rng(), 1234) - ĩ = Index(2) - j̃ = Index(2) - Random.seed!(index_id_rng(), 123) - ĩ′ = Index(2) - j̃′ = Index(2) - @test id(i) == id(ĩ) - @test id(j) == id(j̃) - @test id(i) ≠ id(ĩ′) - @test id(j) ≠ id(j̃′) - - Random.seed!(index_id_rng(), 1234) - Random.seed!(1234) - i = Index(2) - j = Index(2) - Random.seed!(1234) - ĩ = Index(2) - j̃ = Index(2) - Random.seed!(123) - ĩ′ = Index(2) - j̃′ = Index(2) - @test id(i) ≠ id(ĩ) - @test id(j) ≠ id(j̃) - @test id(i) ≠ id(ĩ′) - @test id(j) ≠ id(j̃′) - - Random.seed!(index_id_rng(), 1234) - Random.seed!(1234) - i = Index(2) - j = Index(2) - A = random_itensor(i, j) - Random.seed!(1234) - ĩ = Index(2) - j̃ = Index(2) - à = random_itensor(ĩ, j̃) - Random.seed!(123) - ĩ′ = Index(2) - j̃′ = Index(2) - Ã′ = random_itensor(ĩ′, j̃′) - @test id(i) ≠ id(ĩ) - @test id(j) ≠ id(j̃) - @test id(i) ≠ id(ĩ′) - @test id(j) ≠ id(j̃′) - @test all(tensor(A) .== tensor(Ã)) - @test all(tensor(A) .≠ tensor(Ã′)) - - Random.seed!(index_id_rng(), 1234) - Random.seed!(1234) - i = Index(2) - j = Index(2) - A = random_itensor(i, j) - Random.seed!(index_id_rng(), 1234) - Random.seed!(1234) - ĩ = Index(2) - j̃ = Index(2) - à = random_itensor(ĩ, j̃) - Random.seed!(index_id_rng(), 1234) - Random.seed!(123) - ĩ′ = Index(2) - j̃′ = Index(2) - Ã′ = random_itensor(ĩ′, j̃′) - @test id(i) == id(ĩ) - @test id(j) == id(j̃) - @test id(i) == id(ĩ′) - @test id(j) == id(j̃′) - @test all(tensor(A) .== tensor(Ã)) - @test all(tensor(A) .≠ tensor(Ã′)) - end - @testset "directsum" begin - i = Index(2, "i") - j = Index(3, "j") - ij = directsum(i, j; tags="test") - @test dim(ij) == dim(i) + dim(j) - @test hastags(ij, "test") - k = Index(4, "k") - ijk = directsum(i, j, k; tags="test2") - @test dim(ijk) == dim(i) + dim(j) + dim(k) - @test hastags(ijk, "test2") - end -end diff --git a/test/base/test_indexset.jl b/test/base/test_indexset.jl deleted file mode 100644 index 7509fa375a..0000000000 --- a/test/base/test_indexset.jl +++ /dev/null @@ -1,374 +0,0 @@ -using ITensors -using Test -using Combinatorics -using Compat - -@testset "IndexSet" begin - idim = 2 - jdim = 3 - kdim = 4 - ldim = 5 - i = Index(idim, "i") - j = Index(jdim, "j") - k = Index(kdim, "k") - l = Index(ldim, "l") - @testset "show" begin - indset = IndexSet(i, j, k) - @test length(sprint(show, indset)) > 1 - end - @testset "Basic constructors" begin - I = IndexSet(i, j, k) - @test IndexSet(I) === I - @test l ∈ IndexSet(I..., l) - @test l ∈ IndexSet(l, I...) - @test length(IndexSet(i, j)) == 2 - # construct with function - ind_list = [i, j, k] - I = IndexSet(ii -> ind_list[ii], 3) - @test i ∈ I - @test j ∈ I - @test k ∈ I - I = IndexSet(ii -> ind_list[ii], Order(3)) - @test i ∈ I - @test j ∈ I - @test k ∈ I - end - @testset "length of IndexSet and friends" begin - @test length(IndexSet(i, j)) == 2 - @test size(IndexSet(i, j)) == (length(IndexSet(i, j)),) - end - @testset "Convert to Index" begin - @test Index(IndexSet(i)) === i - @test_throws BoundsError Index(IndexSet(i, j)) - end - @testset "Index dimensions" begin - I = IndexSet(i, j, k) - @test dim(I) == idim * jdim * kdim - @test dims(I) == [idim, jdim, kdim] - @test dim(I, 1) == idim - @test dim(I, 2) == jdim - @test dim(I, 3) == kdim - - @test maxdim(I) == max(idim, jdim, kdim) - end - - @testset "Set operations" begin - I1 = @inferred(IndexSet(i, j, k)) - I2 = @inferred(IndexSet(k, l)) - I3 = @inferred(IndexSet(j, l)) - @test I1 isa Vector{Index{Int}} - @test @inferred(hassameinds(I1, (k, j, i))) - @test @inferred(Nothing, getfirst(setdiff(I1, I2, I3))) == i - @test isnothing(@inferred(Nothing, getfirst(setdiff(I1, IndexSet(k, j, i))))) - @test @inferred(setdiff(I1, I2)) == [i, j] - @test hassameinds(@inferred(setdiff(I1, I2)), IndexSet(i, j)) - @test hassameinds(@inferred(setdiff(I1, I2)), (j, i)) - @test I1 ∩ I2 == [k] - @test hassameinds(I1 ∩ I2, IndexSet(k)) - @test @inferred(Nothing, getfirst(intersect(I1, I2))) == k - @test isnothing(@inferred(Nothing, getfirst(intersect(I1, IndexSet(l))))) - @test @inferred(intersect(I1, IndexSet(j, l))) == [j] - @test hassameinds(@inferred(intersect(I1, IndexSet(j, l))), IndexSet(j)) - @test @inferred(Nothing, getfirst(intersect(I1, IndexSet(j, l)))) == j - @test @inferred(intersect(I1, IndexSet(j, k))) == [j, k] - @test hassameinds(@inferred(intersect(I1, (j, k))), IndexSet(j, k)) - @test hassameinds(@inferred(intersect(I1, (j, k, l))), (j, k)) - @test @inferred(filterinds(I1, "i")) == IndexSet(i) - @test @inferred(filterinds(I1; tags="i")) == IndexSet(i) - @test @inferred(filterinds(I1; inds=j)) == IndexSet(j) - @test @inferred(filterinds(I1; tags="i", inds=j)) == IndexSet() - @test @inferred(filterinds(I1; plev=1, inds=j)) == IndexSet() - @test @inferred(filterinds(I1; plev=0, inds=k)) == IndexSet(k) - @test @inferred(filterinds(I1; plev=0)) == IndexSet(i, j, k) - @test @inferred(filterinds(I1; inds=l)) == IndexSet() - @test @inferred(hassameinds(filter(I1, "i"), IndexSet(i))) - @test @inferred(Nothing, getfirst(I1, "j")) == j - @test isnothing(@inferred(Nothing, getfirst(I1, "l"))) - @test @inferred(Nothing, findfirst(I1, i)) == 1 - @test @inferred(Nothing, findfirst(I1, j)) == 2 - @test @inferred(Nothing, findfirst(I1, k)) == 3 - @test isnothing(@inferred(Nothing, findfirst(I1, Index(2)))) - end - - @testset "Set operations with Order" begin - i, j, k, l = Index.(2, ("i", "j", "k", "l")) - - Iij = IndexSet(i, j) - Ijl = IndexSet(j, l) - Ikl = IndexSet(k, l) - Iijk = IndexSet(i, j, k) - - # - # setdiff - # intersect - # symdiff - # union - # filter - # - - # - # setdiff - # - - @test @inferred(setdiff(Iijk, Ikl)) == [i, j] - - @test @inferred(setdiff(Iij, Iijk)) == Index{Int}[] - - @test @inferred(uniqueinds(Iijk, Ikl; tags="i")) == [i] - - @test @inferred(uniqueinds(Iijk, Ikl; tags=not("i"))) == [j] - - @test @inferred(setdiff(Iijk, Ijl, Ikl)) == [i] - - # - # intersect - # - - @test @inferred(intersect(Iijk, Ikl)) == [k] - - @test @inferred(intersect(Iijk, Iij)) == [i, j] - - @test @inferred(commoninds(Iijk, Iij; tags="i")) == [i] - - # - # symdiff - # - - @test @inferred(symdiff(Iijk, Ikl)) == [i, j, l] - - @test @inferred(symdiff(Iijk, Iij)) == [k] - - # - # union - # - - @test @inferred(union(Iijk, Ikl)) == [i, j, k, l] - - @test @inferred(union(Iijk, Iij)) == [i, j, k] - end - - @testset "intersect index ordering" begin - I = IndexSet(i, k, j) - J = IndexSet(j, l, i) - # Test that intersect respects the ordering - # of the indices in the first IndexSet - @test @inferred(hassameinds(intersect(I, J), IndexSet(i, j))) - @test @inferred(hassameinds(intersect(J, I), IndexSet(j, i))) - end - @testset "adjoint" begin - I = IndexSet(i, k, j) - @test adjoint(I) == IndexSet(i', k', j') - end - @testset "mapprime" begin - I = IndexSet(i', k'', j) - @test mapprime(I, 1, 5) == IndexSet(i^5, k'', j) - @test mapprime(I, 2, 0) == IndexSet(i', k, j) - - J = IndexSet(i, j, k') - @test mapprime(J, 0, 2) == IndexSet(i'', j'', k') - - J = mapprime(J, 1, 5) - @test J == IndexSet(i, j, k^5) - end - @testset "strides" begin - I = IndexSet(i, j) - @test NDTensors.dim_to_strides(I) == (1, idim) - @test NDTensors.dim_to_stride(I, 1) == 1 - @test NDTensors.dim_to_stride(I, 2) == idim - end - @testset "setprime" begin - I = IndexSet(i, j) - J = setprime(I, 2, i) - @test i'' ∈ J - end - @testset "prime" begin - I = IndexSet(i, j) - J = prime(I, j) - @test i ∈ J - @test j' ∈ J - J = prime(I; inds=j) - @test i ∈ J - @test j' ∈ J - J = prime(I; inds=not(j)) - @test i' ∈ J - @test j ∈ J - end - @testset "noprime" begin - I = IndexSet(i'', j') - J = noprime(I) - @test i ∈ J - @test j ∈ J - end - @testset "swapprime" begin - I = IndexSet(i, j) - @test swapprime(I, 0, 1) == IndexSet(i', j') - @test swapprime(I, 0, 4) == IndexSet(i^4, j^4) - I = IndexSet(i, j'') - @test swapprime(I, 2, 0) == IndexSet(i'', j) - I = IndexSet(i, j'', k, l) - @test swapprime(I, 2, 0) == IndexSet(i'', j, k'', l'') - I = IndexSet(i, k'', j'') - @test swapprime(I, 2, 1) == IndexSet(i, k', j') - # In-place version: - I = IndexSet(i, k'', j''') - I = swapprime(I, 2, 0) - @test I == IndexSet(i'', k, j''') - # With tags specified: - I = IndexSet(i, k, j) - @test swapprime(I, 0, 1, "i") == IndexSet(i', k, j) - @test swapprime(I, 0, 1, "j") == IndexSet(i, k, j') - - I = IndexSet(i, i', j) - @test swapprime(I, 0, 1, "i") == IndexSet(i', i, j) - @test swapprime(I, 0, 1, "j") == IndexSet(i, i', j') - end - - @testset "swaptags" begin - i1 = Index(2, "Site,A") - i2 = Index(2, "Site,B") - is = IndexSet(i1, i2) - sis = swaptags(is, "Site", "Link") - for j in sis - @test !hastags(j, "Site") - @test hastags(j, "Link") - end - end - - @testset "hastags" begin - i = Index(2, "i, x") - j = Index(2, "j, x") - is = IndexSet(i, j) - @test hastags(is, "i") - @test anyhastags(is, "i") - @test !allhastags(is, "i") - @test allhastags(is, "x") - end - - @testset "broadcasting" begin - x = Index([QN(n) => 1 for n in 0:1], "x") - y = Index([QN(n) => 2 for n in 0:1], "y") - I = IndexSet(x, y) - - # prime - J = prime.(I) - # broken for now - #@inferred broadcast(prime, I) - @test J isa IndexSet - @test x' ∈ J - @test y' ∈ J - - # prime 2 - J = prime.(I, 2) - # broken for now - #@inferred broadcast(prime, I, 2) - @test J isa IndexSet - @test x'' ∈ J - @test y'' ∈ J - - # tag - J = addtags.(I, "t") - # broken for now - #@inferred broadcast(addtags, I, "t") - @test J isa IndexSet - @test addtags(x, "t") ∈ J - @test addtags(y, "t") ∈ J - - # dag - J = dag.(I) - # broken for now - #@inferred broadcast(dag, I) - @test J isa IndexSet - @test x ∈ J - @test y ∈ J - @test dir(J[1]) == -dir(I[1]) - @test dir(J, x) == -dir(I, x) - @test dir(J[2]) == -dir(I[2]) - @test dir(J, y) == -dir(I, y) - @test ITensors.dirs(J, (x, y)) == [-dir(I, x), -dir(I, y)] - @test ITensors.dirs(J) == [-dir(I, x), -dir(I, y)] - - # dir - dirsI = dir.(I) - # broken for now - #@inferred broadcast(dir, I) - @test dirsI isa Vector{ITensors.Arrow} - @test dirsI == [ITensors.Out, ITensors.Out] - - # dims - dimsI = dim.(I) - # broken for now - #@inferred broadcast(dim, I) - @test dimsI isa Vector{Int} - @test dimsI == [2, 4] - - # pairs - J = prime.(I) - pairsI = I .=> J - #@inferred broadcast(=>, I, J) - @test pairsI isa Vector{<:Pair} - @test pairsI == [x => x', y => y'] - - pairsI = I .=> 1 - #@inferred broadcast(=>, I, 1) - @test pairsI isa Vector{<:Pair} - @test pairsI == [x => 1, y => 1] - - pairsI = I .=> (1, 2) - #@inferred broadcast(=>, I, (1, 2)) - @test pairsI isa Vector{<:Pair} - @test pairsI == [x => 1, y => 2] - - pairsI = I .=> [1, 2] - #@inferred broadcast(=>, I, [1, 2]) - @test pairsI isa Vector{<:Pair} - @test pairsI == [x => 1, y => 2] - end - - @testset "ITensors.indpairs" begin - si = [QN(0) => 1, QN(1) => 2, QN(2) => 3] - sj = [QN(0) => 2, QN(1) => 3, QN(2) => 4] - sk = [QN(0) => 3, QN(1) => 4, QN(2) => 5] - sl = [QN(0) => 2] - i, j, k, l = Index.((si, sj, sk, sl), ("i", "j", "k", "l")) - T = random_itensor(dag(j), k', i', dag(k), j', dag(i)) - ip = ITensors.indpairs(T) - i1 = first.(ip) - i2 = last.(ip) - @test i1' == i2 - for x in i1 - @test dir(x) == dir(T, x) - end - for x in i2 - @test dir(x) == dir(T, x) - end - end - - @testset "permute" begin - i, j, k = Index.(Ref([QN() => 2]), ("i", "j", "k")) - is1 = (dag(i), j, dag(k)) - is2 = (i, dag(j), k) - for x1 in permutations(is1), x2 in permutations(is2) - # permute x1 into the ordering of x2 - px1 = permute(x1, x2) - @test px1 == x2 - for y in x1 - @test dir(x1, y) == dir(px1, y) - @test -dir(x2, y) == dir(px1, y) - end - # permute x2 into the ordering of x1 - px2 = permute(x2, x1) - @test px2 == x1 - for y in x2 - @test dir(x2, y) == dir(px2, y) - @test -dir(x1, y) == dir(px2, y) - end - end - end - - @testset "dag" begin - is = [Index(2), Index(3)] - @test is == dag(is) - is = Index[] - @test dag(is) == Index[] - end -end diff --git a/test/base/test_indices.jl b/test/base/test_indices.jl deleted file mode 100644 index 6d7db4d62c..0000000000 --- a/test/base/test_indices.jl +++ /dev/null @@ -1,244 +0,0 @@ -using Test -using ITensors -using ITensors.NDTensors - -@testset "Allow general mixtures of collections of indices" begin - d = 2 - is = Index.((d, d, d, d, d), ("i", "j", "k", "l", "m")) - i, j, k, l, m = is - is1 = ([i, j], k, (l, m)) - is2 = [[i, j], k, (l, m)] - A = randn(dims(is)) - D = randn(minimum(dims(is))) - x = randn() - @test hassameinds(ITensor(i), (i,)) - @test hassameinds(ITensor(Float64, i), (i,)) - @test hassameinds(ITensor(is1), is) - @test hassameinds(ITensor(is2), is) - @test hassameinds(ITensor(is1...), is) - @test hassameinds(ITensor(Float64, is1), is) - @test hassameinds(ITensor(Float64, is2), is) - @test hassameinds(ITensor(Float64, is1...), is) - @test hassameinds(random_itensor(is1), is) - @test hassameinds(random_itensor(is2), is) - @test hassameinds(random_itensor(is1...), is) - @test hassameinds(random_itensor(Float64, is1), is) - @test hassameinds(random_itensor(Float64, is2), is) - @test hassameinds(random_itensor(Float64, is1...), is) - @test hassameinds(ITensor(x, is1), is) - @test hassameinds(ITensor(x, is2), is) - @test hassameinds(ITensor(x, is1...), is) - @test hassameinds(ITensor(Float64, x, is1), is) - @test hassameinds(ITensor(Float64, x, is2), is) - @test hassameinds(ITensor(Float64, x, is1...), is) - @test hassameinds(ITensor(Float64, undef, is1), is) - @test hassameinds(ITensor(Float64, undef, is2), is) - @test hassameinds(ITensor(Float64, undef, is1...), is) - @test hassameinds(ITensor(undef, is1), is) - @test hassameinds(ITensor(undef, is2), is) - @test hassameinds(ITensor(undef, is1...), is) - @test hassameinds(ITensor(is1), is) - @test hassameinds(ITensor(is2), is) - @test hassameinds(ITensor(is1...), is) - @test hassameinds(ITensor(Float64, is1), is) - @test hassameinds(ITensor(Float64, is2), is) - @test hassameinds(ITensor(Float64, is1...), is) - @test hassameinds(ITensor(A, is1), is) - @test hassameinds(ITensor(A, is2), is) - @test hassameinds(ITensor(A, is1...), is) - @test hassameinds(itensor(A, is1), is) - @test hassameinds(itensor(A, is2), is) - @test hassameinds(itensor(A, is1...), is) - @test hassameinds(ITensor(Float64, A, is1), is) - @test hassameinds(ITensor(Float64, A, is2), is) - @test hassameinds(ITensor(Float64, A, is1...), is) - @test hassameinds(itensor(Float64, A, is1), is) - @test hassameinds(itensor(Float64, A, is2), is) - @test hassameinds(itensor(Float64, A, is1...), is) - @test hassameinds(diag_itensor(is1), is) - @test hassameinds(diag_itensor(is2), is) - @test hassameinds(diag_itensor(is1...), is) - @test hassameinds(diag_itensor(Float64, is1), is) - @test hassameinds(diag_itensor(Float64, is2), is) - @test hassameinds(diag_itensor(Float64, is1...), is) - @test hassameinds(diag_itensor(D, is1), is) - @test hassameinds(diag_itensor(D, is2), is) - @test hassameinds(diag_itensor(D, is1...), is) - @test hassameinds(diag_itensor(Float64, D, is1), is) - @test hassameinds(diag_itensor(Float64, D, is2), is) - @test hassameinds(diag_itensor(Float64, D, is1...), is) - @test hassameinds(diag_itensor(x, is1), is) - @test hassameinds(diag_itensor(x, is2), is) - @test hassameinds(diag_itensor(x, is1...), is) - @test hassameinds(diag_itensor(Float64, x, is1), is) - @test hassameinds(diag_itensor(Float64, x, is2), is) - @test hassameinds(diag_itensor(Float64, x, is1...), is) - @test hassameinds(diagitensor(D, is1), is) - @test hassameinds(diagitensor(D, is2), is) - @test hassameinds(diagitensor(D, is1...), is) - @test hassameinds(diagitensor(Float64, D, is1), is) - @test hassameinds(diagitensor(Float64, D, is2), is) - @test hassameinds(diagitensor(Float64, D, is1...), is) - @test hassameinds(delta(is1), is) - @test hassameinds(delta(is2), is) - @test hassameinds(delta(is1...), is) - @test hassameinds(delta(Float64, is1), is) - @test hassameinds(delta(Float64, is2), is) - @test hassameinds(delta(Float64, is1...), is) - @test hasinds(combiner(is1), is) - @test hasinds(combiner(is2), is) - @test hasinds(combiner(is1...), is) -end - -@testset "Allow general mixtures of collections of QN indices" begin - d = [QN() => 2] - is = Index.((d, d, d, d, d), ("i", "j", "k", "l", "m")) - i, j, k, l, m = is - is1 = ([i, j], k, (l, m)) - is2 = [[i, j], k, (l, m)] - A = randn(dims(is)) - D = randn(minimum(dims(is))) - x = randn() - @test hassameinds(ITensor(i), (i,)) - @test hassameinds(ITensor(Float64, i), (i,)) - @test hassameinds(ITensor(is1), is) - @test hassameinds(ITensor(is2), is) - @test hassameinds(ITensor(is1...), is) - @test hassameinds(ITensor(Float64, is1), is) - @test hassameinds(ITensor(Float64, is2), is) - @test hassameinds(ITensor(Float64, is1...), is) - @test hassameinds(random_itensor(is1), is) - @test hassameinds(random_itensor(is2), is) - @test hassameinds(random_itensor(is1...), is) - @test hassameinds(random_itensor(Float64, is1), is) - @test hassameinds(random_itensor(Float64, is2), is) - @test hassameinds(random_itensor(Float64, is1...), is) - @test hassameinds(ITensor(x, is1), is) - @test hassameinds(ITensor(x, is2), is) - @test hassameinds(ITensor(x, is1...), is) - @test hassameinds(ITensor(Float64, x, is1), is) - @test hassameinds(ITensor(Float64, x, is2), is) - @test hassameinds(ITensor(Float64, x, is1...), is) - @test hassameinds(ITensor(Float64, undef, is1), is) - @test hassameinds(ITensor(Float64, undef, is2), is) - @test hassameinds(ITensor(Float64, undef, is1...), is) - @test hassameinds(ITensor(undef, is1), is) - @test hassameinds(ITensor(undef, is2), is) - @test hassameinds(ITensor(undef, is1...), is) - @test hassameinds(ITensor(is1), is) - @test hassameinds(ITensor(is2), is) - @test hassameinds(ITensor(is1...), is) - @test hassameinds(ITensor(Float64, is1), is) - @test hassameinds(ITensor(Float64, is2), is) - @test hassameinds(ITensor(Float64, is1...), is) - @test hassameinds(ITensor(A, is1), is) - @test hassameinds(ITensor(A, is2), is) - @test hassameinds(ITensor(A, is1...), is) - @test hassameinds(itensor(A, is1), is) - @test hassameinds(itensor(A, is2), is) - @test hassameinds(itensor(A, is1...), is) - @test hassameinds(ITensor(Float64, A, is1), is) - @test hassameinds(ITensor(Float64, A, is2), is) - @test hassameinds(ITensor(Float64, A, is1...), is) - @test hassameinds(itensor(Float64, A, is1), is) - @test hassameinds(itensor(Float64, A, is2), is) - @test hassameinds(itensor(Float64, A, is1...), is) - @test hassameinds(diag_itensor(is1), is) - @test hassameinds(diag_itensor(is2), is) - @test hassameinds(diag_itensor(is1...), is) - @test hassameinds(diag_itensor(Float64, is1), is) - @test hassameinds(diag_itensor(Float64, is2), is) - @test hassameinds(diag_itensor(Float64, is1...), is) - @test hassameinds(diag_itensor(D, is1), is) - @test hassameinds(diag_itensor(D, is2), is) - @test hassameinds(diag_itensor(D, is1...), is) - @test hassameinds(diag_itensor(Float64, D, is1), is) - @test hassameinds(diag_itensor(Float64, D, is2), is) - @test hassameinds(diag_itensor(Float64, D, is1...), is) - @test hassameinds(diag_itensor(x, is1), is) - @test hassameinds(diag_itensor(x, is2), is) - @test hassameinds(diag_itensor(x, is1...), is) - @test hassameinds(diag_itensor(Float64, x, is1), is) - @test hassameinds(diag_itensor(Float64, x, is2), is) - @test hassameinds(diag_itensor(Float64, x, is1...), is) - @test hassameinds(diagitensor(D, is1), is) - @test hassameinds(diagitensor(D, is2), is) - @test hassameinds(diagitensor(D, is1...), is) - @test hassameinds(diagitensor(Float64, D, is1), is) - @test hassameinds(diagitensor(Float64, D, is2), is) - @test hassameinds(diagitensor(Float64, D, is1...), is) - @test hassameinds(delta(is1), is) - @test hassameinds(delta(is2), is) - @test hassameinds(delta(is1...), is) - @test hassameinds(delta(Float64, is1), is) - @test hassameinds(delta(Float64, is2), is) - @test hassameinds(delta(Float64, is1...), is) - @test hasinds(combiner(is1), is) - @test hasinds(combiner(is2), is) - @test hasinds(combiner(is1...), is) - - # With flux - @test hassameinds(ITensor(QN(), i), (i,)) - @test hassameinds(ITensor(Float64, QN(), i), (i,)) - @test hassameinds(ITensor(QN(), is1), is) - @test hassameinds(ITensor(QN(), is2), is) - @test hassameinds(ITensor(QN(), is1...), is) - @test hassameinds(ITensor(Float64, QN(), is1), is) - @test hassameinds(ITensor(Float64, QN(), is2), is) - @test hassameinds(ITensor(Float64, QN(), is1...), is) - @test hassameinds(random_itensor(QN(), is1), is) - @test hassameinds(random_itensor(QN(), is2), is) - @test hassameinds(random_itensor(QN(), is1...), is) - @test hassameinds(random_itensor(Float64, QN(), is1), is) - @test hassameinds(random_itensor(Float64, QN(), is2), is) - @test hassameinds(random_itensor(Float64, QN(), is1...), is) - @test hassameinds(ITensor(x, QN(), is1), is) - @test hassameinds(ITensor(x, QN(), is2), is) - @test hassameinds(ITensor(x, QN(), is1...), is) - @test hassameinds(ITensor(Float64, x, QN(), is1), is) - @test hassameinds(ITensor(Float64, x, QN(), is2), is) - @test hassameinds(ITensor(Float64, x, QN(), is1...), is) - @test hassameinds(ITensor(Float64, undef, QN(), is1), is) - @test hassameinds(ITensor(Float64, undef, QN(), is2), is) - @test hassameinds(ITensor(Float64, undef, QN(), is1...), is) - @test hassameinds(ITensor(undef, QN(), is1), is) - @test hassameinds(ITensor(undef, QN(), is2), is) - @test hassameinds(ITensor(undef, QN(), is1...), is) - @test_throws ErrorException hassameinds(emptyITensor(QN(), is1), is) - @test_throws ErrorException hassameinds(emptyITensor(QN(), is2), is) - @test_throws ErrorException hassameinds(emptyITensor(QN(), is1...), is) - @test_throws ErrorException hassameinds(emptyITensor(Float64, QN(), is1), is) - @test_throws ErrorException hassameinds(emptyITensor(Float64, QN(), is2), is) - @test_throws ErrorException hassameinds(emptyITensor(Float64, QN(), is1...), is) -end - -@testset "Test Index collection as Vector of abstract type" begin - d = 2 - i = Index(d) - A = randn(d, d) - T = itensor(A, Index[i', dag(i)]) - @test storage(T) isa NDTensors.Dense{Float64} - T = itensor(A, Any[i', dag(i)]) - @test storage(T) isa NDTensors.Dense{Float64} - - i = Index([QN() => d]) - A = randn(d, d) - T = itensor(A, Index[i', dag(i)]) - @test storage(T) isa NDTensors.BlockSparse{Float64} - T = itensor(A, Any[i', dag(i)]) - @test storage(T) isa NDTensors.BlockSparse{Float64} -end - -@testset "Test output types of ITensors.indices" begin - i = Index(2) - @test ITensors.indices([i'', i', i]) == Index{Int}[i'', i', i] - @test ITensors.indices((i'', i', i)) == (i'', i', i) - @test ITensors.indices(((i'',), (i',), i)) == (i'', i', i) - @test ITensors.indices(((i'', i'), (i,))) == (i'', i', i) - @test ITensors.indices([(i'',), (i',), (i,)]) == Index{Int}[i'', i', i] - @test ITensors.indices(Any[(i'',), (i',), (i,)]) == Index{Int}[i'', i', i] - @test ITensors.indices([(i'',), (i',), [i]]) == Index{Int}[i'', i', i] - @test ITensors.indices([(i'',), i', [i]]) == Index{Int}[i'', i', i] - @test ITensors.indices(Any[(i'',), i', [i]]) == Index{Int}[i'', i', i] - @test ITensors.indices(((i'',), i', [i])) == Index{Int}[i'', i', i] -end diff --git a/test/base/test_inference.jl b/test/base/test_inference.jl deleted file mode 100644 index 7b1d0420b1..0000000000 --- a/test/base/test_inference.jl +++ /dev/null @@ -1,102 +0,0 @@ -using ITensors -using ITensors.NDTensors -using Test - -@testset "ITensors priming and tagging" begin - i = Index(2) - T1 = random_itensor(i'', i') - T2 = random_itensor(i', i) - - @test inds(@inferred(adjoint(T1))) == (i''', i'') - @test inds(@inferred(prime(T1, 2))) == (i'''', i''') - @test inds(@inferred(addtags(T1, "x"))) == (addtags(i, "x")'', addtags(i, "x")') - @test inds(@inferred(T1 * T2)) == (i'', i) - - @test @inferred(order(T1)) == 2 - @test @inferred(ndims(T1)) == 2 - @test @inferred(dim(T1)) == 4 - @test @inferred(maxdim(T1)) == 2 -end - -@testset "NDTensors Dense contract" begin - i = Index(2) - T1 = randomTensor((i'', i')) - T2 = randomTensor((i', i)) - R = randomTensor((i'', i)) - - labelsT1 = (1, -1) - labelsT2 = (-1, 2) - labelsR = (1, 2) - - @test @inferred(NDTensors.contraction_output(T1, labelsT1, T2, labelsT2, labelsR)) isa - DenseTensor{ - Float64,2,Tuple{Index{Int64},Index{Int64}},Dense{Float64,Vector{Float64}} - } - @test @inferred(NDTensors.contract(T1, labelsT1, T2, labelsT2, labelsR)) isa DenseTensor{ - Float64,2,Tuple{Index{Int64},Index{Int64}},Dense{Float64,Vector{Float64}} - } - @test @inferred(NDTensors.contract!!(R, labelsR, T1, labelsT1, T2, labelsT2)) isa - DenseTensor{ - Float64,2,Tuple{Index{Int64},Index{Int64}},Dense{Float64,Vector{Float64}} - } - - A = Base.ReshapedArray(randn(4), (2, 2), ()) - B = Base.ReshapedArray(randn(4), (2, 2), ()) - @inferred NDTensors._contract_scalar_perm!(B, A, (2, 1), 1.0, 1.0) - @inferred NDTensors._contract_scalar_perm!(B, A, (2, 1), 1.0, 1) -end - -@testset "NDTensors BlockSparse contract" begin - i = Index([QN(0) => 2, QN(1) => 2]) - IT1 = random_itensor(i'', dag(i)') - IT2 = random_itensor(i', dag(i)) - IR = random_itensor(i'', dag(i)) - T1, T2, R = Tensor.((IT1, IT2, IR)) - - labelsT1 = (1, -1) - labelsT2 = (-1, 2) - labelsR = (1, 2) - - indsR = @inferred( - NDTensors.contract_inds(inds(T1), labelsT1, inds(T2), labelsT2, labelsR) - ) - @test indsR isa Tuple{Index{Vector{Pair{QN,Int}}},Index{Vector{Pair{QN,Int}}}} - - TensorT = @inferred(NDTensors.contraction_output_type(typeof(T1), typeof(T2), indsR)) - @test TensorT <: Tensor{Float64,2,BlockSparse{Float64,Vector{Float64},2},typeof(indsR)} - - blockoffsetsR, contraction_plan = @inferred( - NDTensors.contract_blockoffsets( - blockoffsets(T1), - inds(T1), - labelsT1, - blockoffsets(T2), - inds(T2), - labelsT2, - indsR, - labelsR, - ) - ) - @test blockoffsetsR isa BlockOffsets{2} - @test contraction_plan isa Vector{Tuple{Block{2},Block{2},Block{2}}} - - @test @inferred(NDTensors.contraction_output(T1, labelsT1, T2, labelsT2, labelsR)) isa - Tuple{BlockSparseTensor,Vector{Tuple{Block{2},Block{2},Block{2}}}} - - if VERSION ≥ v"1.7" - # Only properly inferred in Julia 1.7 and later - @test @inferred(NDTensors.contract(T1, labelsT1, T2, labelsT2, labelsR)) isa - BlockSparseTensor - end - - # TODO: this function doesn't exist yet - #@test @inferred(NDTensors.contract!!(R, labelsR, T1, labelsT1, T2, labelsT2)) isa BlockSparseTensor - - b = Block(1, 1) - B1 = T1[b] - B2 = T2[b] - BR = R[b] - @test @inferred( - NDTensors.contract!(BR, labelsR, B1, labelsT1, B2, labelsT2, 1.0, 0.0) - ) isa DenseTensor -end diff --git a/test/base/test_itensor.jl b/test/base/test_itensor.jl deleted file mode 100644 index 66ca1abfc7..0000000000 --- a/test/base/test_itensor.jl +++ /dev/null @@ -1,1963 +0,0 @@ -@eval module $(gensym()) -using Combinatorics: permutations -using ITensors: - ITensors, - Index, - IndexSet, - ITensor, - Order, - QN, - ⊕, - δ, - addtags, - allhastags, - anyhastags, - commonind, - convert_eltype, - convert_leaf_eltype, - dag, - directsum, - eachindval, - eachval, - filterinds, - firstind, - hascommoninds, - hasind, - hasinds, - hassameinds, - hastags, - inner, - itensor, - mapprime, - noprime, - onehot, - order, - permute, - prime, - product, - random_itensor, - removetags, - replaceind, - replaceind!, - replaceinds, - replaceinds!, - replacetags, - scalar, - setelt, - setprime, - settags, - sim, - swapinds, - swapinds!, - swapprime, - uniqueind, - uniqueindex, - val -using ITensors.NDTensors: - NDTensors, - DenseTensor, - array, - dim, - dims, - eigen, - factorize, - ind, - inds, - matrix, - maxdim, - mindim, - polar, - storage, - vector -using LinearAlgebra: - LinearAlgebra, axpy!, diag, dot, ishermitian, mul!, norm, nullspace, qr, rmul!, svd, tr -using Random: Random -using Test: @test, @test_throws, @testset - -# Enable debug checking for these tests -ITensors.enable_debug_checks() - -Random.seed!(12345) - -function invdigits(::Type{T}, x...) where {T} - return T(sum([x[length(x) - k + 1] * 10^(k - 1) for k in 1:length(x)])) -end - -@testset "Dense ITensor basic functionality" begin - @testset "ITensor constructors" begin - @testset "Default" begin - A = ITensor() - @test storage(A) isa NDTensors.EmptyStorage{NDTensors.EmptyNumber} - end - - @testset "Undef with index" begin - i, j, k, l = Index.(2, ("i", "j", "k", "l")) - A = ITensor(undef, i) - @test storage(A) isa NDTensors.Dense{Float64} - end - - @testset "Default with indices" begin - i, j, k, l = Index.(2, ("i", "j", "k", "l")) - A = ITensor(i, j) - @test storage(A) isa NDTensors.EmptyStorage{NDTensors.EmptyNumber} - end - - @testset "diag" for ElType in (Float32, Float64, ComplexF32, ComplexF64) - i, j = Index.(2, ("i", "j")) - A = random_itensor(ElType, i, j) - d = diag(A) - @test d isa DenseTensor{ElType,1} - @test d[1] == A[1, 1] - @test d[2] == A[2, 2] - end - - @testset "Index set operations" begin - i, j, k, l = Index.(2, ("i", "j", "k", "l")) - A = random_itensor(i, j) - B = random_itensor(j, k) - C = random_itensor(k, l) - @test hascommoninds(A, B) - @test hascommoninds(B, C) - @test !hascommoninds(A, C) - end - - @testset "isreal, iszero, real, imag" begin - i, j = Index.(2, ("i", "j")) - A = random_itensor(i, j) - Ac = random_itensor(ComplexF64, i, j) - Ar = real(Ac) - Ai = imag(Ac) - @test Ac ≈ Ar + im * Ai - @test isreal(A) - @test !isreal(Ac) - @test isreal(Ar) - @test isreal(Ai) - @test !iszero(A) - @test !iszero(real(A)) - @test iszero(imag(A)) - @test iszero(ITensor(0.0, i, j)) - @test iszero(ITensor(i, j)) - end - - elts = (Float32, Float64, Complex{Float32}, Complex{Float64}) - @testset "ITensors.scalartype (eltype=$elt)" for elt in elts - i, j = Index.((2, 2)) - a = ITensor(elt, i, j) - @test ITensors.scalartype(a) === elt - a = random_itensor(elt, i, j) - @test ITensors.scalartype(a) === elt - end - - @testset "map" begin - A = random_itensor(Index(2)) - @test eltype(A) == Float64 - B = map(ComplexF64, A) - @test B ≈ A - @test eltype(B) == ComplexF64 - B = map(Float32, A) - @test B ≈ A - @test eltype(B) == Float32 - B = map(x -> 2x, A) - @test B ≈ 2A - @test eltype(B) == Float64 - @test array(map(x -> x + 1, A)) ≈ map(x -> x + 1, array(A)) - end - - @testset "reductions (sum, prod)" for elt in ( - Float32, Float64, Complex{Float32}, Complex{Float64} - ) - a = random_itensor(elt, Index(2), Index(2)) - @test sum(a) ≈ sum(array(a)) - @test sum(a) isa elt - @test prod(a) ≈ prod(array(a)) - @test prod(a) isa elt - - a = ITensor(elt(2)) - @test sum(a) ≈ sum(array(a)) - @test sum(a) isa elt - @test prod(a) ≈ prod(array(a)) - @test prod(a) isa elt - end - - @testset "getindex with state string" begin - i₁ = Index(2, "S=1/2") - i₂ = Index(2, "S=1/2") - v = ITensor(i₁, i₂) - v[i₂ => "↑", i₁ => "↓"] = 1.0 - @test v[1, 1] == 0.0 - @test v[1, 2] == 0.0 - @test v[2, 1] == 1.0 - @test v[2, 2] == 0.0 - @test v[i₁ => "↑", i₂ => "↑"] == 0.0 - @test v[i₁ => "↑", i₂ => "↓"] == 0.0 - @test v[i₁ => "↓", i₂ => "↑"] == 1.0 - @test v[i₁ => "↓", i₂ => "↓"] == 0.0 - end - - @testset "getindex with state string" begin - i₁ = Index(2, "S=1/2") - i₂ = Index(2, "S=1/2") - v = ITensor(i₁, i₂) - v["↓", "↑"] = 1.0 - @test v[1, 1] == 0.0 - @test v[1, 2] == 0.0 - @test v[2, 1] == 1.0 - @test v[2, 2] == 0.0 - @test v["↑", "↑"] == 0.0 - @test v["↑", "↓"] == 0.0 - @test v["↓", "↑"] == 1.0 - @test v["↓", "↓"] == 0.0 - end - - @testset "getindex with end (lastindex, LastIndex)" begin - a = Index(2) - b = Index(3) - A = random_itensor(a, b) - @test A[end, end] == A[a => 2, b => 3] - @test A[end - 1, end] == A[a => 1, b => 3] - @test A[end - 1, end - 1] == A[a => 1, b => 2] - @test A[end - 1, end - 2] == A[a => 1, b => 1] - @test A[end - 1, 2 * (end - 2)] == A[a => 1, b => 2] - @test A[2, end] == A[a => 2, b => 3] - @test A[2, end - 1] == A[a => 2, b => 2] - @test A[1, end] == A[a => 1, b => 3] - @test A[1, end - 2] == A[a => 1, b => 1] - @test A[end, 2] == A[a => 2, b => 2] - @test A[end - 1, 2] == A[a => 1, b => 2] - @test A[a => end, b => end] == A[a => 2, b => 3] - @test A[a => end - 1, b => end] == A[a => 1, b => 3] - @test A[a => end, b => end - 1] == A[a => 2, b => 2] - @test A[a => end - 1, b => 2 * (end - 2)] == A[a => 1, b => 2] - @test A[a => 2, b => end] == A[a => 2, b => 3] - @test A[a => 2, b => end] == A[a => 2, b => 3] - @test A[a => 1, b => end] == A[a => 1, b => 3] - @test A[a => end, b => 3] == A[a => 2, b => 3] - @test A[a => end, b => 2] == A[a => 2, b => 2] - @test A[b => end, a => end] == A[a => 2, b => 3] - @test A[b => end - 1, a => end] == A[a => 2, b => 2] - @test A[b => end - 1, a => end - 1] == A[a => 1, b => 2] - @test A[b => end - 2, a => end - 1] == A[a => 1, b => 1] - @test A[b => 2 * (end - 2), a => end - 1] == A[a => 1, b => 2] - @test A[b => 2, a => end] == A[a => 2, b => 2] - @test A[b => 2, a => end - 1] == A[a => 1, b => 2] - @test A[b => 1, a => end] == A[a => 2, b => 1] - @test A[b => 1, a => end - 1] == A[a => 1, b => 1] - @test A[b => end, a => 2] == A[a => 2, b => 3] - @test A[b => end - 1, a => 2] == A[a => 2, b => 2] - @test A[b => end, a => 1] == A[a => 1, b => 3] - @test A[b => end - 2, a => 1] == A[a => 1, b => 1] - @test A[b => end^2 - 7, a => 1] == A[a => 1, b => 2] - - i, j, k, l = Index.(2, ("i", "j", "k", "l")) - B = random_itensor(i) - @test B[i => end] == B[i => dim(i)] - @test B[i => end - 1] == B[i => dim(i) - 1] - @test B[end] == B[dim(i)] - @test B[end - 1] == B[dim(i) - 1] - end - @testset "ITensor equality" begin - i, j, k, l = Index.(2, ("i", "j", "k", "l")) - Aij = random_itensor(i, j) - Aji = permute(Aij, j, i) - Bij′ = random_itensor(i, j') - Cij′ = random_itensor(i, j') - @test Aij == Aij - @test Aij == Aji - @test Bij′ != Cij′ - @test Bij′ != Aij - end - @testset "Set element with end (lastindex, LastIndex)" begin - _i = Index(2, "i") - _j = Index(3, "j") - - A = ITensor(_i, _j) - A[_i => end, _j => end] = 2.5 - @test A[_i => dim(_i), _j => dim(_j)] == 2.5 - - A = ITensor(_i, _j) - A[_j => end, _i => end] = 3.5 - @test A[_i => dim(_i), _j => dim(_j)] == 3.5 - - A = ITensor(_i, _j) - A[_j => end, _i => 1] = 4.5 - @test A[_i => 1, _j => dim(_j)] == 4.5 - - A = ITensor(_i, _j) - A[_j => end - 1, _i => 1] = 4.5 - @test A[_i => 1, _j => dim(_j) - 1] == 4.5 - end - - @testset "Random" begin - i, j, k, l = Index.(2, ("i", "j", "k", "l")) - A = random_itensor(i, j) - - # Test hasind, hasinds - @test hasind(A, i) - @test hasind(i)(A) - - @test hasinds(A, i) - @test hasinds(A, j) - @test hasinds(A, [i, j]) - @test hasinds([i, j])(A) - @test hasinds(A, IndexSet(j)) - @test hasinds(A, j, i) - @test hasinds(A, (i, j)) - @test hasinds(A, IndexSet(i, j)) - @test hasinds(j, i)(A) - @test hasinds(i)(A) - @test hasinds(IndexSet(j))(A) - @test hasinds((i, j))(A) - @test hasinds(IndexSet(i, j))(A) - - @test storage(A) isa NDTensors.Dense{Float64} - - @test ndims(A) == order(A) == 2 == length(inds(A)) - @test Order(A) == Order(2) - @test size(A) == dims(A) == (2, 2) - @test dim(A) == 4 - - At = random_itensor(Index(2), Index(3)) - @test maxdim(At) == 3 - @test mindim(At) == 2 - @test dim(At, 1) == 2 - @test dim(At, 2) == 3 - - B = random_itensor(IndexSet(i, j)) - @test storage(B) isa NDTensors.Dense{Float64} - @test ndims(B) == order(B) == 2 == length(inds(B)) - @test size(B) == dims(B) == (2, 2) - - A = random_itensor() - @test eltype(A) == Float64 - @test ndims(A) == 0 - end - - @testset "trace (tr) (eltype=$elt)" for elt in ( - Float32, Float64, Complex{Float32}, Complex{Float64} - ) - i, j, k, l = Index.((2, 3, 4, 5), ("i", "j", "k", "l")) - T = random_itensor(elt, j, k', i', k, j', i) - trT1 = tr(T) - @test eltype(trT1) === elt - trT2 = (T * δ(elt, i, i') * δ(elt, j, j') * δ(elt, k, k'))[] - @test trT1 ≈ trT2 - - T = random_itensor(elt, j, k', i', l, k, j', i) - trT1 = tr(T) - @test eltype(trT1) === elt - trT2 = T * δ(elt, i, i') * δ(elt, j, j') * δ(elt, k, k') - @test trT1 ≈ trT2 - end - - @testset "ITensor iteration" begin - i, j, k, l = Index.(2, ("i", "j", "k", "l")) - A = random_itensor(i, j) - Is = eachindex(A) - @test length(Is) == dim(A) - sumA = 0.0 - for I in Is - sumA += A[I] - end - @test sumA ≈ sum(ITensors.data(A)) - sumA = 0.0 - for a in A - sumA += a - end - @test sumA ≈ sum(A) - @test sumA ≈ sum(A) - end - - @testset "From matrix" begin - i, j, k, l = Index.(2, ("i", "j", "k", "l")) - M = [1 2; 3 4] - A = itensor(M, i, j) - @test storage(A) isa NDTensors.Dense{Float64} - - @test M ≈ Matrix(A, i, j) - @test M' ≈ Matrix(A, j, i) - @test_throws DimensionMismatch vector(A) - - @test size(A, 1) == size(M, 1) == 2 - @test_throws BoundsError size(A, 3) - @test_throws BoundsError size(A, 0) - @test_throws ErrorException size(M, 0) - # setstorage changes the internal data but not indices - N = [5 6; 7 8] - A = itensor(M, i, j) - B = ITensors.setstorage(A, NDTensors.Dense(vec(N))) - @test N == Matrix(B, i, j) - @test storage(A) isa NDTensors.Dense{Float64} - @test storage(B) isa NDTensors.Dense{Int} - - M = [1 2 3; 4 5 6] - @test_throws DimensionMismatch itensor(M, i, j) - end - - @testset "To Matrix" begin - i, j, k, l = Index.(2, ("i", "j", "k", "l")) - TM = random_itensor(i, j) - - M1 = matrix(TM) - for ni in eachval(i), nj in eachval(j) - @test M1[ni, nj] ≈ TM[i => ni, j => nj] - end - - M2 = Matrix(TM, j, i) - for ni in eachval(i), nj in eachval(j) - @test M2[nj, ni] ≈ TM[i => ni, j => nj] - end - - T3 = random_itensor(i, j, k) - @test_throws DimensionMismatch Matrix(T3, i, j) - end - - @testset "To Vector" begin - i, j, k, l = Index.(2, ("i", "j", "k", "l")) - TV = random_itensor(i) - - V = vector(TV) - for ni in eachindval(i) - @test V[val(ni)] ≈ TV[ni] - end - V = Vector(TV) - for ni in eachindval(i) - @test V[val(ni)] ≈ TV[ni] - end - V = Vector(TV, i) - for ni in eachindval(i) - @test V[val(ni)] ≈ TV[ni] - end - V = Vector{ComplexF64}(TV) - for ni in eachindval(i) - @test V[val(ni)] ≈ complex(TV[ni]) - end - - T2 = random_itensor(i, j) - @test_throws DimensionMismatch vector(T2) - end - - @testset "Complex" begin - i, j, k, l = Index.(2, ("i", "j", "k", "l")) - A = ITensor(Complex, i, j) - @test storage(A) isa NDTensors.EmptyStorage{Complex} - end - - @testset "Random complex" begin - i, j, k, l = Index.(2, ("i", "j", "k", "l")) - A = random_itensor(ComplexF64, i, j) - @test storage(A) isa NDTensors.Dense{ComplexF64} - end - - @testset "From complex matrix" begin - i, j, k, l = Index.(2, ("i", "j", "k", "l")) - M = [1+2im 2; 3 4] - A = itensor(M, i, j) - @test storage(A) isa NDTensors.Dense{ComplexF64} - end - end - - @testset "eltype promotion with scalar * and /" begin - @test eltype(ITensor(1.0f0, Index(2)) * 2) === Float32 - @test eltype(ITensor(1.0f0, Index(2)) .* 2) === Float32 - @test eltype(ITensor(1.0f0, Index(2)) / 2) === Float32 - @test eltype(ITensor(1.0f0, Index(2)) ./ 2) === Float32 - @test eltype(ITensor(1.0f0, Index(2)) * 2.0f0) === Float32 - @test eltype(ITensor(1.0f0, Index(2)) .* 2.0f0) === Float32 - @test eltype(ITensor(1.0f0, Index(2)) / 2.0f0) === Float32 - @test eltype(ITensor(1.0f0, Index(2)) ./ 2.0f0) === Float32 - @test eltype(ITensor(1.0f0, Index(2)) * 2.0) === Float64 - @test eltype(ITensor(1.0f0, Index(2)) .* 2.0) === Float64 - @test eltype(ITensor(1.0f0, Index(2)) / 2.0) === Float64 - @test eltype(ITensor(1.0f0, Index(2)) ./ 2.0) === Float64 - end - - @testset "Division /" begin - i = Index(2) - A = random_itensor(i) - B = A / 2 - C = A / ITensor(2) - @test B isa ITensor - @test C isa ITensor - @test B ≈ C - @test A[1] / 2 ≈ B[1] - @test A[2] / 2 ≈ B[2] - @test A[1] / 2 ≈ C[1] - @test A[2] / 2 ≈ C[2] - end - - @testset "Convert to complex" begin - i = Index(2, "i") - j = Index(2, "j") - A = random_itensor(i, j) - B = complex(A) - for ii in 1:dim(i), jj in 1:dim(j) - @test complex(A[i => ii, j => jj]) == B[i => ii, j => jj] - end - end - - @testset "Complex Number Operations" for _eltype in (Float32, Float64) - i = Index(3, "i") - j = Index(4, "j") - - A = random_itensor(complex(_eltype), i, j) - - rA = real(A) - iA = imag(A) - @test norm(rA + 1im * iA - A) < 1E-8 - @test eltype(rA) <: _eltype - @test eltype(iA) <: _eltype - - cA = conj(A) - @test eltype(cA) <: complex(_eltype) - @test norm(cA) ≈ norm(A) - - B = random_itensor(_eltype, i, j) - - cB = conj(B) - @test eltype(cB) <: _eltype - @test norm(cB) ≈ norm(B) - end - - @testset "similar" begin - i = Index(2, "i") - j = Index(2, "j") - A = random_itensor(i, j) - B = similar(A) - @test inds(B) == inds(A) - Ac = similar(A, ComplexF32) - @test storage(Ac) isa NDTensors.Dense{ComplexF32} - end - - @testset "fill!" begin - i = Index(2, "i") - j = Index(2, "j") - A = random_itensor(i, j) - fill!(A, 1.0) - @test all(ITensors.data(A) .== 1.0) - end - - @testset "fill! using broadcast" begin - i = Index(2, "i") - j = Index(2, "j") - A = random_itensor(i, j) - A .= 1.0 - @test all(ITensors.data(A) .== 1.0) - end - - @testset "zero" begin - i = Index(2) - A = random_itensor(i) - B = zero(A) - @test false * A ≈ B - end - - @testset "copyto!" begin - i = Index(2, "i") - j = Index(2, "j") - M = [1 2; 3 4] - A = itensor(M, i, j) - N = 2 * M - B = itensor(N, i, j) - copyto!(A, B) - @test A == B - @test ITensors.data(A) == vec(N) - A = itensor(M, i, j) - B = itensor(N, j, i) - copyto!(A, B) - @test A == B - @test ITensors.data(A) == vec(transpose(N)) - end - - @testset "Unary -" begin - i = Index(2, "i") - j = Index(2, "j") - M = [1 2; 3 4] - A = itensor(M, i, j) - @test -A == itensor(-M, i, j) - end - - @testset "dot" begin - i = Index(2, "i") - a = [1.0; 2.0] - b = [3.0; 4.0] - A = itensor(a, i) - B = itensor(b, i) - @test dot(A, B) == 11.0 - end - - @testset "mul!" begin - i = Index(2; tags="i") - j = Index(2; tags="j") - k = Index(2; tags="k") - - A = random_itensor(i, j) - B = random_itensor(j, k) - C = random_itensor(i, k) - mul!(C, A, B) - @test C ≈ A * B - - A = random_itensor(i, j) - B = random_itensor(j, k) - C = random_itensor(k, i) - mul!(C, A, B) - @test C ≈ A * B - - A = random_itensor(i, j) - B = random_itensor(k, j) - C = random_itensor(i, k) - mul!(C, A, B) - @test C ≈ A * B - - A = random_itensor(i, j) - B = random_itensor(k, j) - C = random_itensor(k, i) - mul!(C, A, B) - @test C ≈ A * B - - A = random_itensor(j, i) - B = random_itensor(j, k) - C = random_itensor(i, k) - mul!(C, A, B) - @test C ≈ A * B - - A = random_itensor(j, i) - B = random_itensor(j, k) - C = random_itensor(k, i) - mul!(C, A, B) - @test C ≈ A * B - - A = random_itensor(j, i) - B = random_itensor(k, j) - C = random_itensor(i, k) - mul!(C, A, B) - @test C ≈ A * B - - A = random_itensor(j, i) - B = random_itensor(k, j) - C = random_itensor(k, i) - mul!(C, A, B) - @test C ≈ A * B - - A = random_itensor(i, j) - B = random_itensor(k, j) - C = random_itensor(k, i) - α = 2 - β = 3 - R = mul!(copy(C), A, B, α, β) - @test α * A * B + β * C ≈ R - - @testset "In-place bugs" begin - @testset "Bug 1" begin - l1 = Index(3, "l=1") - l2 = Index(3, "l=2") - s = Index(2, "s") - - A = random_itensor(s', s) - B = random_itensor(l1, s, l2) - - C = random_itensor(l1, s', l2) - - C .= A .* B - - @test C ≈ A * B - end - - @testset "Bug 2" begin - is = [Index(n + 1, "i$n") for n in 1:6] - - for ais in permutations((1, 2, 3)), - bis in permutations((2, 3, 4)), - cis in permutations((1, 4)) - - A = random_itensor(ntuple(i -> is[ais[i]], Val(length(ais)))) - B = random_itensor(ntuple(i -> is[bis[i]], Val(length(bis)))) - C = random_itensor(ntuple(i -> is[cis[i]], Val(length(cis)))) - - C .= A .* B - - @test C ≈ A * B - end - - for ais in permutations((1, 2, 3)), - bis in permutations((2, 3, 4, 5)), - cis in permutations((1, 4, 5)) - - A = random_itensor(ntuple(i -> is[ais[i]], Val(length(ais)))) - B = random_itensor(ntuple(i -> is[bis[i]], Val(length(bis)))) - C = random_itensor(ntuple(i -> is[cis[i]], Val(length(cis)))) - - C .= A .* B - - @test C ≈ A * B - end - end - end - - @testset "In-place outer bug" begin - l1 = Index(3, "l=1") - s = Index(2, "s") - - A = random_itensor(l1) - B = random_itensor(s) - C = random_itensor(s, l1) - - C .= A .* B - - @test C ≈ A * B - end - - @testset "In-place contractions" begin - i1 = Index(2, "i1") - i2 = Index(2, "i2") - i3 = Index(2, "i3") - i4 = Index(2, "i4") - i5 = Index(2, "i5") - i6 = Index(2, "i6") - j1 = Index(2, "j1") - j2 = Index(2, "j2") - j3 = Index(2, "j3") - - #A = random_itensor(s', s) - #B = random_itensor(l1, s, l2) - - #C = random_itensor(l1, s', l2) - - C .= A .* B - @test C ≈ A * B - end - end - - @testset "exponentiate" begin - s1 = Index(2, "s1") - s2 = Index(2, "s2") - i1 = Index(2, "i1") - i2 = Index(2, "i2") - Amat = rand(2, 2, 2, 2) - A = itensor(Amat, i1, i2, s1, s2) - - Aexp = exp(A, (i1, i2), (s1, s2)) - Amatexp = reshape(exp(reshape(Amat, 4, 4)), 2, 2, 2, 2) - Aexp_from_mat = itensor(Amatexp, i1, i2, s1, s2) - @test Aexp ≈ Aexp_from_mat - - #test that exponentiation works when indices need to be permuted - Aexp = exp(A, (s1, s2), (i1, i2)) - Amatexp = Matrix(exp(reshape(Amat, 4, 4))') - Aexp_from_mat = itensor(reshape(Amatexp, 2, 2, 2, 2), s1, s2, i1, i2) - @test Aexp ≈ Aexp_from_mat - - #test exponentiation when hermitian=true is used - Amat = reshape(Amat, 4, 4) - Amat = reshape(Amat + Amat' + randn(4, 4) * 1e-10, 2, 2, 2, 2) - A = itensor(Amat, i1, i2, s1, s2) - Aexp = exp(A, (i1, i2), (s1, s2); ishermitian=true) - Amatexp = reshape(parent(exp(LinearAlgebra.Hermitian(reshape(Amat, 4, 4)))), 2, 2, 2, 2) - Aexp_from_mat = itensor(Amatexp, i1, i2, s1, s2) - @test Aexp ≈ Aexp_from_mat - Aexp = exp(A, (i1, i2), (s1, s2); ishermitian=true) - Amatexp = reshape(parent(exp(LinearAlgebra.Hermitian(reshape(Amat, 4, 4)))), 2, 2, 2, 2) - Aexp_from_mat = itensor(Amatexp, i1, i2, s1, s2) - @test Aexp ≈ Aexp_from_mat - end - - @testset "onehot (setelt)" begin - i = Index(2, "i") - - T = onehot(i => 1) - @test eltype(T) === Float64 - @test T[i => 1] ≈ 1.0 - @test T[i => 2] ≈ 0.0 - - T = setelt(i => 2) - @test T[i => 1] ≈ 0.0 - @test T[i => 2] ≈ 1.0 - - j = Index(2, "j") - - T = onehot(j => 2, i => 1) - @test T[j => 1, i => 1] ≈ 0.0 - @test T[j => 2, i => 1] ≈ 1.0 - @test T[j => 1, i => 2] ≈ 0.0 - @test T[j => 2, i => 2] ≈ 0.0 - - T = onehot(Float32, i => 1) - @test eltype(T) === Float32 - @test T[i => 1] ≈ 1.0 - @test T[i => 2] ≈ 0.0 - - T = onehot(ComplexF32, i => 1) - @test eltype(T) === ComplexF32 - @test T[i => 1] ≈ 1.0 - @test T[i => 2] ≈ 0.0 - end - - @testset "add, subtract, and axpy" begin - i = Index(2, "i") - a = [1.0; 2.0] - b = [3.0; 4.0] - A = itensor(a, i) - B = itensor(b, i) - c = [5.0; 8.0] - @test A + B == itensor([4.0; 6.0], i) - @test axpy!(2.0, A, B) == itensor(c, i) - a = [1.0; 2.0] - b = [3.0; 4.0] - A = itensor(a, i) - B = itensor(b, i) - c = [5.0; 8.0] - @test (B .+= 2.0 .* A) == itensor(c, i) - a = [1.0; 2.0] - b = [3.0; 4.0] - A = itensor(a, i) - B = itensor(b, i) - c = [8.0; 12.0] - @test (A .= 2.0 .* A .+ 2.0 .* B) == itensor(c, i) - a = [1.0; 2.0] - A = itensor(a, i) - B = ITensor(2.0) - @test_throws DimensionMismatch A + B - a = [1.0; 2.0] - A = itensor(a, i) - B = ITensor() - C = A + B - @test C ≈ A - A[1] = 5 - @test C[1] == 5 - a = [1.0; 2.0] - A = itensor(a, i) - B = ITensor(0) - @test_throws DimensionMismatch A + B - a = [1.0; 2.0] - A = itensor(a, i) - B = ITensor(ComplexF64) - @test_throws DimensionMismatch A + B - a = [1.0; 2.0] - A = itensor(a, i) - B = ITensor(Float64) - @test_throws DimensionMismatch A + B - a = [1.0; 2.0] - a = [1.0; 2.0] - A = itensor(a, i) - B = ITensor(2.0) - @test_throws DimensionMismatch A - B - a = [1.0; 2.0] - A = itensor(a, i) - B = ITensor() - C = A - B - @test C ≈ A - A[1] = 5 - @test C[1] == 5 - #@test_throws DimensionMismatch A - B - a = [1.0; 2.0] - A = itensor(a, i) - B = ITensor(2.0) - @test_throws DimensionMismatch B - A - a = [1.0; 2.0] - A = itensor(a, i) - B = ITensor(Float64) - @test_throws DimensionMismatch B - A - a = [1.0; 2.0] - A = itensor(a, i) - B = ITensor() - C = B - A - @test C ≈ -A - A[1] = 5 - @test C[1] == -1 - a = [1.0; 2.0] - b = [3.0; 4.0] - A = itensor(a, i) - B = itensor(b, i) - c = [2.0; 2.0] - @test B - A == itensor(c, i) - @test A - B == -itensor(c, i) - end - - @testset "mul! and rmul!" begin - i = Index(2, "i") - a = [1.0; 2.0] - b = [2.0; 4.0] - A = itensor(a, i) - A2, A3 = copy(A), copy(A) - B = itensor(b, i) - @test mul!(A2, A, 2.0) == B == (A2 .= 0 .* A2 .+ 2 .* A) - @test rmul!(A, 2.0) == B == ITensors.scale!(A3, 2) - #make sure mul! works also when A2 has NaNs in it - A = itensor([1.0; 2.0], i) - A2 = itensor([NaN; 1.0], i) - @test mul!(A2, A, 2.0) == B - - i = Index(2, "i") - j = Index(2, "j") - M = [1 2; 3 4] - A = itensor(M, i, j) - N = 2 * M - B = itensor(N, j, i) - @test ITensors.data(mul!(B, A, 2.0)) == 2.0 * vec(transpose(M)) - end - - @testset "Construct from Array" begin - i = Index(2, "index_i") - j = Index(2, "index_j") - - M = [ - 1.0 2 - 3 4 - ] - T = itensor(M, i, j) - T[i => 1, j => 1] = 3.3 - @test M[1, 1] == 3.3 - @test T[i => 1, j => 1] == 3.3 - @test storage(T) isa NDTensors.Dense{Float64} - - M = [ - 1 2 - 3 4 - ] - T = itensor(M, i, j) - T[i => 1, j => 1] = 3.3 - @test M[1, 1] == 1 - @test T[i => 1, j => 1] == 3.3 - @test storage(T) isa NDTensors.Dense{Float64} - - M = [ - 1 2 - 3 4 - ] - T = itensor(Int, M, i, j) - T[i => 1, j => 1] = 6 - @test M[1, 1] == 6 - @test T[i => 1, j => 1] == 6 - @test storage(T) isa NDTensors.Dense{Int} - - # This version makes a copy - M = [ - 1.0 2 - 3 4 - ] - T = ITensor(M, i, j) - T[i => 1, j => 1] = 3.3 - @test M[1, 1] == 1 - @test T[i => 1, j => 1] == 3.3 - - # Empty indices - A = randn(1) - T = itensor(A, Index[]) - @test A[] == T[] - T = itensor(A, Index[], Index[]) - @test A[] == T[] - T = itensor(A, Any[]) - @test A[] == T[] - - A = randn(1, 1) - T = itensor(A, Index[]) - @test A[] == T[] - T = itensor(A, Index[], Index[]) - @test A[] == T[] - T = itensor(A, Any[], Any[]) - @test A[] == T[] - - @test_throws ErrorException itensor(rand(1), Int[1]) - end - - @testset "Construct from AbstractArray" begin - i = Index(2, "index_i") - j = Index(2, "index_j") - - X = [ - 1.0 2 0 - 3 4 0 - 0 0 0 - ] - M = @view X[1:2, 1:2] - T = itensor(M, i, j) - T[i => 1, j => 1] = 3.3 - @test M[1, 1] == 3.3 - @test T[i => 1, j => 1] == 3.3 - @test storage(T) isa NDTensors.Dense{Float64} - end - - @testset "ITensor Array constructor view behavior" begin - d = 2 - i = Index(d) - - # view - A = randn(Float64, d, d) - T = itensor(A, i', dag(i)) - @test storage(T) isa NDTensors.Dense{Float64} - A[1, 1] = 2.0 - T[1, 1] == 2.0 - - # view - A = rand(Int, d, d) - T = itensor(Int, A, i', dag(i)) - @test storage(T) isa NDTensors.Dense{Int} - A[1, 1] = 2 - T[1, 1] == 2 - - # no view - A = rand(Int, d, d) - T = itensor(A, i', dag(i)) - @test storage(T) isa NDTensors.Dense{Float64} - A[1, 1] = 2 - T[1, 1] ≠ 2 - - # no view - A = randn(Float64, d, d) - T = ITensor(A, i', dag(i)) - @test storage(T) isa NDTensors.Dense{Float64} - A[1, 1] = 2 - T[1, 1] ≠ 2 - - # no view - A = rand(Int, d, d) - T = ITensor(Int, A, i', dag(i)) - @test storage(T) isa NDTensors.Dense{Int} - A[1, 1] = 2 - T[1, 1] ≠ 2 - - # no view - A = rand(Int, d, d) - T = ITensor(A, i', dag(i)) - @test storage(T) isa NDTensors.Dense{Float64} - A[1, 1] = 2 - T[1, 1] ≠ 2 - end - - @testset "Convert to Array" begin - i = Index(2, "i") - j = Index(3, "j") - T = random_itensor(i, j) - - A = Array{Float64}(T, i, j) - for I in CartesianIndices(T) - @test A[I] == T[I] - end - - T11 = T[1, 1] - T[1, 1] = 1 - @test T[1, 1] == 1 - @test T11 != 1 - @test A[1, 1] == T11 - - A = Matrix{Float64}(T, i, j) - for I in CartesianIndices(T) - @test A[I] == T[I] - end - - A = Matrix(T, i, j) - for I in CartesianIndices(T) - @test A[I] == T[I] - end - - A = Array(T, i, j) - for I in CartesianIndices(T) - @test A[I] == T[I] - end - - T = random_itensor(i) - A = Vector(T) - for I in CartesianIndices(T) - @test A[I] == T[I] - end - end - - @testset "Test isapprox for ITensors" begin - m, n = rand(0:20, 2) - i = Index(m) - j = Index(n) - realData = rand(m, n) - complexData = complex(realData) - A = itensor(realData, i, j) - B = itensor(complexData, i, j) - @test A ≈ B - @test B ≈ A - A = permute(A, j, i) - @test A ≈ B - @test B ≈ A - end - - @testset "permute" begin - i = Index(2) - A = ITensor(i, i') - Ap = permute(A, i, i') - A[i => 1, i' => 1] = 1 - @test A[i => 1, i' => 1] == 1 - @test Ap[i => 1, i' => 1] == 0 - end - - @testset "permute, NeverAlias()/AllowAlias()" begin - i = Index(2) - A = ITensor(i, i') - Ap = permute(A, i, i') - A[i => 1, i' => 1] = 1 - @test A[i => 1, i' => 1] == 1 - @test Ap[i => 1, i' => 1] == 0 - - i = Index(2) - A = ITensor(i, i') - Ap = permute(ITensors.NeverAlias(), A, i, i') - A[i => 1, i' => 1] = 1 - @test A[i => 1, i' => 1] == 1 - @test Ap[i => 1, i' => 1] == 0 - - i = Index(2, "index_i") - j = Index(4, "index_j") - k = Index(3, "index_k") - T = random_itensor(i, j, k) - - # NeverAlias()/allow_alias = false by default - pT_noalias_1 = permute(T, i, j, k) - pT_noalias_1[1, 1, 1] = 12 - @test T[1, 1, 1] != pT_noalias_1[1, 1, 1] - - pT_noalias_2 = permute(T, i, j, k; allow_alias=false) - pT_noalias_2[1, 1, 1] = 12 - @test T[1, 1, 1] != pT_noalias_1[1, 1, 1] - - cT = copy(T) - pT_alias = permute(cT, i, j, k; allow_alias=true) - pT_alias[1, 1, 1] = 12 - @test cT[1, 1, 1] == pT_alias[1, 1, 1] - - cT = copy(T) - pT_alias = permute(ITensors.AllowAlias(), cT, i, j, k) - pT_alias[1, 1, 1] = 12 - @test cT[1, 1, 1] == pT_alias[1, 1, 1] - end - - @testset "ITensor tagging and priming" begin - s1 = Index(2, "Site,s=1") - s2 = Index(2, "Site,s=2") - l = Index(3, "Link") - ltmp = settags(l, "Temp") - A1 = random_itensor(s1, l, l') - A2 = random_itensor(s2, l', l'') - @testset "firstind(::ITensor,::String)" begin - @test s1 == firstind(A1, "Site") - @test s1 == firstind(A1, "s=1") - @test s1 == firstind(A1, "s=1,Site") - @test l == firstind(A1; tags="Link", plev=0) - @test l' == firstind(A1; plev=1) - @test l' == firstind(A1; tags="Link", plev=1) - @test s2 == firstind(A2, "Site") - @test s2 == firstind(A2, "s=2") - @test s2 == firstind(A2, "Site") - @test s2 == firstind(A2; plev=0) - @test s2 == firstind(A2; tags="s=2", plev=0) - @test s2 == firstind(A2; tags="Site", plev=0) - @test s2 == firstind(A2; tags="s=2,Site", plev=0) - @test l' == firstind(A2; plev=1) - @test l' == firstind(A2; tags="Link", plev=1) - @test l'' == firstind(A2; plev=2) - @test l'' == firstind(A2; tags="Link", plev=2) - end - @testset "addtags(::ITensor,::String,::String)" begin - s1u = addtags(s1, "u") - lu = addtags(l, "u") - - A1u = addtags(A1, "u") - @test hasinds(A1u, s1u, lu, lu') - - A1u = addtags(A1, "u", "Link") - @test hasinds(A1u, s1, lu, lu') - - A1u = addtags(A1, "u"; tags="Link") - @test hasinds(A1u, s1, lu, lu') - - A1u = addtags(A1, "u"; plev=0) - @test hasinds(A1u, s1u, lu, l') - - A1u = addtags(A1, "u"; tags="Link", plev=0) - @test hasinds(A1u, s1, lu, l') - - A1u = addtags(A1, "u"; tags="Link", plev=1) - @test hasinds(A1u, s1, l, lu') - end - @testset "removetags(::ITensor,::String,::String)" begin - A2r = removetags(A2, "Site") - @test hasinds(A2r, removetags(s2, "Site"), l', l'') - - A2r = removetags(A2, "Link"; plev=1) - @test hasinds(A2r, s2, removetags(l, "Link")', l'') - - A2r = replacetags(A2, "Link", "Temp"; plev=1) - @test hasinds(A2r, s2, ltmp', l'') - end - @testset "replacetags(::ITensor,::String,::String)" begin - s2tmp = replacetags(s2, "Site", "Temp") - - @test s2tmp == replacetags(s2, "Site" => "Temp") - - ltmp = replacetags(l, "Link", "Temp") - - A2r = replacetags(A2, "Site", "Temp") - @test hasinds(A2r, s2tmp, l', l'') - - A2r = replacetags(A2, "Site" => "Temp") - @test hasinds(A2r, s2tmp, l', l'') - - A2r = replacetags(A2, "Link", "Temp") - @test hasinds(A2r, s2, ltmp', ltmp'') - - A2r = replacetags(A2, "Site" => "Link", "Link" => "Site") - @test hasinds( - A2r, - replacetags(s2, "Site" => "Link"), - replacetags(l', "Link" => "Site"), - replacetags(l'', "Link" => "Site"), - ) - end - @testset "prime(::ITensor,::String)" begin - A2p = prime(A2) - @test A2p == A2' - @test hasinds(A2p, s2', l'', l''') - - A2p = prime(A2, 2) - A2p = A2'' - @test hasinds(A2p, s2'', l''', l'''') - - A2p = prime(A2, "s=2") - @test hasinds(A2p, s2', l', l'') - end - - @testset "mapprime" begin - @test hasinds(mapprime(A2, 1, 7), s2, l^7, l'') - @test hasinds(mapprime(A2, 0, 1), s2', l', l'') - end - - @testset "replaceprime" begin - @test hasinds(mapprime(A2, 1 => 7), s2, l^7, l'') - @test hasinds(mapprime(A2, 0 => 1), s2', l', l'') - @test hasinds(mapprime(A2, 1 => 7, 0 => 1), s2', l^7, l'') - @test hasinds(mapprime(A2, 1 => 2, 2 => 1), s2, l'', l') - @test hasinds(mapprime(A2, 1 => 0, 0 => 1), s2', l, l'') - end - - @testset "setprime" begin - @test hasinds(setprime(A2, 2, s2), s2'', l', l'') - @test hasinds(setprime(A2, 0, l''), s2, l', l) - end - - @testset "swapprime" begin - @test hasinds(swapprime(A2, 1, 3), l''', s2, l'') - end - end - - @testset "ITensor other index operations" begin - s1 = Index(2, "Site,s=1") - s2 = Index(2, "Site,s=2") - l = Index(3, "Link") - A1 = random_itensor(s1, l, l') - A2 = random_itensor(s2, l', l'') - - @testset "ind(::ITensor)" begin - @test ind(A1, 1) == s1 - @test ind(A1, 2) == l - end - - @testset "replaceind and replaceinds" begin - rA1 = replaceind(A1, s1, s2) - @test hasinds(rA1, s2, l, l') - @test hasinds(A1, s1, l, l') - - @test replaceinds(A1, [] => []) == A1 - @test replaceinds(A1, ()) == A1 - @test replaceinds(A1) == A1 - - # Pair notation (like Julia's replace function) - rA1 = replaceind(A1, s1 => s2) - @test hasinds(rA1, s2, l, l') - @test hasinds(A1, s1, l, l') - - replaceind!(A1, s1, s2) - @test hasinds(A1, s2, l, l') - - rA2 = replaceinds(A2, (s2, l'), (s1, l)) - @test hasinds(rA2, s1, l, l'') - @test hasinds(A2, s2, l', l'') - - # Pair notation (like Julia's replace function) - rA2 = replaceinds(A2, s2 => s1, l' => l) - @test hassameinds(rA2, (s1, l, l'')) - @test hassameinds(A2, (s2, l', l'')) - - # Test ignoring indices that don't exist - rA2 = replaceinds(A2, s1 => l, l' => l) - @test hassameinds(rA2, (s2, l, l'')) - @test hassameinds(A2, (s2, l', l'')) - - replaceinds!(A2, (s2, l'), (s1, l)) - @test hasinds(A2, s1, l, l'') - end - - @testset "replaceinds fixed errors" begin - l = Index(3; tags="l") - s = Index(2; tags="s") - l̃, s̃ = sim(l), sim(s) - A = random_itensor(s, l) - à = replaceinds(A, (l, s), (l̃, s̃)) - @test ind(A, 1) == s - @test ind(A, 2) == l - @test ind(Ã, 1) == s̃ - @test ind(Ã, 2) == l̃ - @test_throws ErrorException replaceinds(A, (l, s), (s̃, l̃)) - end - - @testset "swapinds and swapinds!" begin - s = Index(2) - t = Index(2) - Ast = random_itensor(s, s', t, t') - Ats = swapinds(Ast, (s, s'), (t, t')) - @test Ast != Ats - @test Ast == swapinds(Ats, (s, s'), (t, t')) - - swapinds!(Ats, (s, s'), (t, t')) - @test Ast == Ats - end - end #End "ITensor other index operations" - - @testset "Converting Real and Complex Storage" begin - @testset "Add Real and Complex" for eltype in (Float32, Float64) - i = Index(2, "i") - j = Index(2, "j") - TC = random_itensor(complex(eltype), i, j) - TR = random_itensor(eltype, i, j) - - S1 = TC + TR - S2 = TR + TC - @test typeof(storage(S1)) == NDTensors.Dense{complex(eltype),Vector{complex(eltype)}} - @test typeof(storage(S2)) == NDTensors.Dense{complex(eltype),Vector{complex(eltype)}} - for ii in 1:dim(i), jj in 1:dim(j) - @test S1[i => ii, j => jj] ≈ TC[i => ii, j => jj] + TR[i => ii, j => jj] - @test S2[i => ii, j => jj] ≈ TC[i => ii, j => jj] + TR[i => ii, j => jj] - end - end - end - - @testset "ITensor, NDTensors.Dense{$SType} storage" for SType in ( - Float32, Float64, ComplexF32, ComplexF64 - ) - mi, mj, mk, ml, mα = 2, 3, 4, 5, 6, 7 - i = Index(mi, "i") - j = Index(mj, "j") - k = Index(mk, "k") - l = Index(ml, "l") - α = Index(mα, "alpha") - - atol = eps(real(SType)) * 500 - - @testset "Set and get values with IndexVals" begin - A = ITensor(SType, i, j, k) - for ii in 1:dim(i), jj in 1:dim(j), kk in 1:dim(k) - A[k => kk, j => jj, i => ii] = invdigits(SType, ii, jj, kk) - end - for ii in 1:dim(i), jj in 1:dim(j), kk in 1:dim(k) - @test A[j => jj, k => kk, i => ii] == invdigits(SType, ii, jj, kk) - end - @test A[1] == invdigits(SType, 1, 1, 1) - end - @testset "Test permute(ITensor,Index...)" begin - A = random_itensor(SType, i, k, j) - permA = permute(A, k, j, i) - @test k == inds(permA)[1] - @test j == inds(permA)[2] - @test i == inds(permA)[3] - for ii in 1:dim(i), jj in 1:dim(j), kk in 1:dim(k) - @test A[k => kk, i => ii, j => jj] == permA[i => ii, j => jj, k => kk] - end - for ii in 1:dim(i), jj in 1:dim(j), kk in 1:dim(k) - @test A[k => kk, i => ii, j => jj] == permA[i => ii, j => jj, k => kk] - end - # TODO: I think this was doing slicing, but what is the output - # of slicing an ITensor? - #@testset "getindex and setindex with vector of IndexVals" begin - # k_inds = [k=>kk for kk ∈ 1:dim(k)] - # for ii ∈ 1:dim(i), jj ∈ 1:dim(j) - # @test A[k_inds,i=>ii,j=>jj]==permA[i=>ii,j=>jj,k_inds...] - # end - # for ii ∈ 1:dim(i), jj ∈ 1:dim(j) - # A[k_inds,i=>ii,j=>jj]=collect(1:length(k_inds)) - # end - # permA = permute(A,k,j,i) - # for ii ∈ 1:dim(i), jj ∈ 1:dim(j) - # @test A[k_inds,i=>ii,j=>jj]==permA[i=>ii,j=>jj,k_inds...] - # end - #end - end - @testset "Set and get values with Ints" begin - A = ITensor(SType, i, j, k) - A = permute(A, k, i, j) - for ii in 1:dim(i), jj in 1:dim(j), kk in 1:dim(k) - A[kk, ii, jj] = invdigits(SType, ii, jj, kk) - end - A = permute(A, i, j, k) - for ii in 1:dim(i), jj in 1:dim(j), kk in 1:dim(k) - @test A[ii, jj, kk] == invdigits(SType, ii, jj, kk) - end - end - @testset "Test scalar(::ITensor)" begin - x = SType(34) - A = ITensor(x) - @test x == scalar(A) - A = ITensor(SType, i, j, k) - @test_throws DimensionMismatch scalar(A) - end - @testset "Test norm(ITensor)" begin - A = random_itensor(SType, i, j, k) - @test norm(A) ≈ sqrt(scalar(dag(A) * A)) - end - @testset "Test dag(::Number)" begin - x = 1.2 + 2.3im - @test dag(x) == 1.2 - 2.3im - x = 1.4 - @test dag(x) == 1.4 - end - @testset "Test add ITensors" begin - A = random_itensor(SType, i, j, k) - B = random_itensor(SType, k, i, j) - C = A + B - for ii in 1:dim(i), jj in 1:dim(j), kk in 1:dim(k) - @test C[i => ii, j => jj, k => kk] == - A[j => jj, i => ii, k => kk] + B[i => ii, k => kk, j => jj] - end - @test array(permute(C, i, j, k)) == - array(permute(A, i, j, k)) + array(permute(B, i, j, k)) - end - - @testset "Test array" begin - A = random_itensor(SType, i, j, k) - B = random_itensor(SType, i, j) - C = random_itensor(SType, i) - - @test array(permute(A, j, i, k)) == array(A, j, i, k) - @test_throws DimensionMismatch matrix(A, j, i, k) - @test_throws DimensionMismatch vector(A, j, i, k) - - @test array(permute(B, j, i)) == array(B, j, i) - @test matrix(permute(B, j, i)) == matrix(B, j, i) - @test_throws DimensionMismatch vector(B, j, i) - - @test array(permute(C, i)) == array(C, i) - @test vector(permute(C, i)) == vector(C, i) - @test vector(C) == vector(C, i) - @test_throws DimensionMismatch matrix(C, i) - end - - @testset "Test factorizations of an ITensor" begin - A = random_itensor(SType, i, j, k, l) - - @testset "Test SVD of an ITensor" begin - U, S, V, spec, u, v = svd(A, (j, l)) - @test storage(S) isa NDTensors.Diag{real(SType),Vector{real(SType)}} - @test A ≈ U * S * V - @test U * dag(prime(U, u)) ≈ δ(SType, u, u') atol = atol - @test V * dag(prime(V, v)) ≈ δ(SType, v, v') atol = atol - end - - @testset "Test SVD of an ITensor with different algorithms" begin - U, S, V, spec, u, v = svd(A, j, l; alg="recursive") - @test storage(S) isa NDTensors.Diag{real(SType),Vector{real(SType)}} - @test A ≈ U * S * V - @test U * dag(prime(U, u)) ≈ δ(SType, u, u') atol = atol - @test V * dag(prime(V, v)) ≈ δ(SType, v, v') atol = atol - - U, S, V, spec, u, v = svd(A, j, l; alg="divide_and_conquer") - @test storage(S) isa NDTensors.Diag{real(SType),Vector{real(SType)}} - @test A ≈ U * S * V - @test U * dag(prime(U, u)) ≈ δ(SType, u, u') atol = atol - @test V * dag(prime(V, v)) ≈ δ(SType, v, v') atol = atol - - U, S, V, spec, u, v = svd(A, j, l; alg="qr_iteration") - @test storage(S) isa NDTensors.Diag{real(SType),Vector{real(SType)}} - @test A ≈ U * S * V - @test U * dag(prime(U, u)) ≈ δ(SType, u, u') atol = atol - @test V * dag(prime(V, v)) ≈ δ(SType, v, v') atol = atol - - @test_throws ErrorException svd(A, j, l; alg="bad_alg") - end - - #@testset "Test SVD of a DenseTensor internally" begin - # Lis = commoninds(A,IndexSet(j,l)) - # Ris = uniqueinds(A,Lis) - # Lpos,Rpos = NDTensors.getperms(inds(A),Lis,Ris) - # # XXX this function isn't used anywhere in ITensors - # # (it is no longer needed because of the combiner) - # Ut,St,Vt,spec = svd(NDTensors.tensor(A), Lpos, Rpos) - # U = itensor(Ut) - # S = itensor(St) - # V = itensor(Vt) - # u = commonind(U, S) - # v = commonind(V, S) - # @test storage(S) isa NDTensors.Diag{Float64,Vector{Float64}} - # @test A≈U*S*V - # @test U*dag(prime(U,u))≈δ(SType,u,u') atol = atol - # @test V*dag(prime(V,v))≈δ(SType,v,v') atol = atol - #end - - @testset "Test SVD truncation" begin - ii = Index(4) - jj = Index(4) - T = random_itensor(ComplexF64, ii, jj) - U, S, V = svd(T, ii; maxdim=2) - u, s, v = svd(matrix(T)) - @test norm(U * S * V - T) ≈ sqrt(s[3]^2 + s[4]^2) - end - - @testset "Test QR decomposition of an ITensor" begin - Q, R = qr(A, (i, l)) - @test eltype(Q) <: eltype(A) - @test eltype(R) <: eltype(A) - q = commonind(Q, R) - @test A ≈ Q * R atol = atol - @test Q * dag(prime(Q, q)) ≈ δ(SType, q, q') atol = atol - - Q, R = qr(A, (i, j, k, l)) - @test eltype(Q) <: eltype(A) - @test eltype(R) <: eltype(A) - q = commonind(Q, R) - @test hassameinds(Q, (q, i, j, k, l)) - @test hassameinds(R, (q,)) - @test A ≈ Q * R atol = atol - @test Q * dag(prime(Q, q)) ≈ δ(SType, q, q') atol = atol - end - - @testset "Regression test for QR decomposition of an ITensor with all indices on one side" begin - a = Index(2, "a") - b = Index(2, "b") - Vab = random_itensor(a, b) - Q, R = qr(Vab, (a, b)) - @test hasinds(Q, (a, b)) - @test Vab ≈ Q * R atol = atol - end - - @testset "Test polar decomposition of an ITensor" begin - U, P, u = polar(A, (k, l)) - - @test eltype(U) == eltype(A) - @test eltype(P) == eltype(A) - - @test A ≈ U * P atol = atol - - #Note: this is only satisfied when left dimensions - #are greater than right dimensions - UUᵀ = U * dag(prime(U, u)) - - # TODO: use a combiner to combine the u indices to make - # this test simpler - for ii in 1:dim(u[1]), jj in 1:dim(u[2]), iip in 1:dim(u[1]), jjp in 1:dim(u[2]) - val = UUᵀ[u[1] => ii, u[2] => jj, u[1]' => iip, u[2]' => jjp] - if ii == iip && jj == jjp - @test val ≈ one(SType) atol = atol - else - @test val ≈ zero(SType) atol = atol - end - end - end - - @testset "Test Hermitian eigendecomposition of an ITensor" begin - is = IndexSet(i, j) - T = random_itensor(SType, is..., prime(is)...) - T = T + swapprime(dag(T), 0, 1) - D, U, spec, l, r = eigen(T; ishermitian=true) - @test T ≈ prime(U) * D * dag(U) atol = atol - UUᴴ = U * prime(dag(U), r) - @test UUᴴ ≈ δ(r, r') - end - - @testset "Test factorize of an ITensor" begin - @testset "factorize default" begin - L, R = factorize(A, (j, l)) - l = commonind(L, R) - @test A ≈ L * R - @test L * dag(prime(L, l)) ≈ δ(SType, l, l') - @test R * dag(prime(R, l)) ≉ δ(SType, l, l') - end - - @testset "factorize ortho left" begin - L, R = factorize(A, (j, l); ortho="left") - l = commonind(L, R) - @test A ≈ L * R - @test L * dag(prime(L, l)) ≈ δ(SType, l, l') - @test R * dag(prime(R, l)) ≉ δ(SType, l, l') - end - - @testset "factorize ortho right" begin - L, R = factorize(A, (j, l); ortho="right") - l = commonind(L, R) - @test A ≈ L * R - @test L * dag(prime(L, l)) ≉ δ(SType, l, l') - @test R * dag(prime(R, l)) ≈ δ(SType, l, l') - end - - @testset "factorize ortho none" begin - L, R = factorize(A, (j, l); ortho="none") - l = commonind(L, R) - @test A ≈ L * R - @test L * dag(prime(L, l)) ≉ δ(SType, l, l') - @test R * dag(prime(R, l)) ≉ δ(SType, l, l') - end - - @testset "factorize when ITensor has primed indices" begin - A = random_itensor(i, i') - L, R = factorize(A, i) - l = commonind(L, R) - @test A ≈ L * R - @test L * dag(prime(L, l)) ≈ δ(SType, l, l') - @test R * dag(prime(R, l)) ≉ δ(SType, l, l') - - @test_throws ErrorException factorize(A, i; which_decomp="svd", svd_alg="bad_alg") - end - end # End factorize tests - - @testset "Test error for bad decomposition inputs" begin - @test_throws ErrorException svd(A) - @test_throws ErrorException factorize(A) - @test_throws ErrorException eigen(A, inds(A), inds(A)) - end - end - end # End Dense storage test - - @testset "dag copy behavior" begin - i = Index(4, "i") - - v1 = random_itensor(i) - cv1 = dag(v1) - cv1[1] = -1 - @test v1[1] ≈ cv1[1] - - v2 = random_itensor(i) - cv2 = dag(ITensors.NeverAlias(), v2) - orig_elt = v2[1] - cv2[1] = -1 - @test v2[1] ≈ orig_elt - - v2 = random_itensor(i) - cv2 = dag(v2; allow_alias=false) - orig_elt = v2[1] - cv2[1] = -1 - @test v2[1] ≈ orig_elt - - v3 = random_itensor(ComplexF64, i) - orig_elt = v3[1] - cv3 = dag(v3) - cv3[1] = -1 - @test v3[1] ≈ orig_elt - - v4 = random_itensor(ComplexF64, i) - cv4 = dag(ITensors.NeverAlias(), v4) - orig_elt = v4[1] - cv4[1] = -1 - @test v4[1] ≈ orig_elt - end - - @testset "filter ITensor indices" begin - i = Index(2, "i") - A = random_itensor(i, i') - @test hassameinds(filterinds(A; plev=0), (i,)) - @test hassameinds(inds(A; plev=0), (i,)) - is = inds(A) - @test hassameinds(filterinds(is; plev=0), (i,)) - @test hassameinds(inds(is; plev=0), (i,)) - end - - @testset "product/apply" begin - s1 = Index(2, "s1") - s2 = Index(2, "s2") - s3 = Index(2, "s3") - - rA = Index(3, "rA") - lA = Index(3, "lA") - - rB = Index(3, "rB") - lB = Index(3, "lB") - - # operator * operator - A = random_itensor(s1', s2', dag(s1), dag(s2), lA, rA) - B = random_itensor(s1', s2', dag(s1), dag(s2), lB, rB) - AB = product(A, B) - @test hassameinds(AB, (s1', s2', s1, s2, lA, rA, lB, rB)) - @test AB ≈ mapprime(prime(A; inds=(s1', s2', s1, s2)) * B, 2 => 1) - - # operator * operator, common dangling indices - A = random_itensor(s1', s2', dag(s1), dag(s2), lA, rA) - B = random_itensor(s1', s2', dag(s1), dag(s2), dag(lA), dag(rA)) - AB = product(A, B) - @test hassameinds(AB, (s1', s2', s1, s2)) - @test AB ≈ mapprime(prime(A; inds=(s1', s2', s1, s2)) * B, 2 => 1) - - # operator * operator, apply_dag, common dangling indices - A = random_itensor(s1', s2', dag(s1), dag(s2), lA, rA) - B = random_itensor(s1', s2', dag(s1), dag(s2), lB, rB) - ABAdag = product(A, B; apply_dag=true) - AB = mapprime(prime(A; inds=(s1', s2', s1, s2)) * B, 2 => 1) - Adag = swapprime(dag(A), 0 => 1; inds=(s1', s2', s1, s2)) - @test hassameinds(ABAdag, (s1', s2', s1, s2, lB, rB)) - @test ABAdag ≈ mapprime(prime(AB; inds=(s1', s2', s1, s2)) * Adag, 2 => 1) - - # operator * operator, more complicated - A = random_itensor(s1', s2', dag(s1), dag(s2), lA, rA) - B = random_itensor(s1', s2', s3', dag(s1), dag(s2), dag(s3), lB, rB, dag(rA)) - AB = product(A, B) - @test hassameinds(AB, (s1', s2', s3', s1, s2, s3, lA, lB, rB)) - @test AB ≈ mapprime(prime(A; inds=(s1', s2', s1, s2)) * B, 2 => 1) - - # state * operator (1) - A = random_itensor(dag(s1), dag(s2), lA, rA) - B = random_itensor(s1', s2', dag(s1), dag(s2), lB, rB) - AB = product(A, B) - @test hassameinds(AB, (s1, s2, lA, rA, lB, rB)) - @test AB ≈ mapprime(prime(A; inds=(s1, s2)) * B) - - # state * operator (2) - A = random_itensor(dag(s1'), dag(s2'), lA, rA) - B = random_itensor(s1', s2', dag(s1), dag(s2), lB, rB) - @test_throws ErrorException product(A, B) - - # operator * state (1) - A = random_itensor(s1', s2', dag(s1), dag(s2), lA, rA) - B = random_itensor(s1', s2', lB, rB) - @test_throws ErrorException product(A, B) - - # operator * state (2) - A = random_itensor(s1', s2', dag(s1), dag(s2), lA, rA) - B = random_itensor(s1, s2, lB, rB, dag(lA)) - AB = product(A, B) - @test hassameinds(AB, (s1, s2, rA, lB, rB)) - @test AB ≈ noprime(A * B) - - # state * state (1) - A = random_itensor(dag(s1), dag(s2), lA, rA) - B = random_itensor(s1, s2, lB, rB) - AB = product(A, B) - @test hassameinds(AB, (lA, rA, lB, rB)) - @test AB ≈ A * B - - # state * state (2) - A = random_itensor(dag(s1'), dag(s2'), lA, rA) - B = random_itensor(s1, s2, lB, dag(rA)) - AB = product(A, B) - @test hassameinds(AB, (s1', s2', s1, s2, lA, lB)) - @test AB ≈ A * B - - # state * state (3) - A = random_itensor(dag(s1'), dag(s2'), lA, rA) - B = random_itensor(s1, s2, lB, rB) - @test_throws ErrorException product(A, B) - - # state * state (4) - A = random_itensor(dag(s1), dag(s2), lA, rA) - B = random_itensor(s1', s2', lB, rB) - @test_throws ErrorException product(A, B) - - # state * state (5) - A = random_itensor(dag(s1'), dag(s2'), lA, rA) - B = random_itensor(s1', s2', lB, rB) - @test_throws ErrorException product(A, B) - end - - @testset "inner ($ElType)" for ElType in (Float64, ComplexF64) - i = Index(2) - j = Index(2) - A = random_itensor(ElType, i', j', i, j) - x = random_itensor(ElType, i, j) - y = random_itensor(ElType, i, j) - @test inner(x, y) ≈ (dag(x) * y)[] - @test inner(x', A, y) ≈ (dag(x)' * A * y)[] - # No automatic priming - @test_throws DimensionMismatch inner(x, A, y) - end - - @testset "hastags" begin - i = Index(2, "i, x") - j = Index(2, "j, x") - A = random_itensor(i, j) - @test hastags(A, "i") - @test anyhastags(A, "i") - @test !allhastags(A, "i") - @test allhastags(A, "x") - end - - @testset "directsum" for space in (identity, d -> [QN(0) => d, QN(1) => d]), - index_op in (identity, dag) - - x = Index(space(2), "x") - i1 = Index(space(3), "i1") - j1 = Index(space(4), "j1") - i2 = Index(space(5), "i2") - j2 = Index(space(6), "j2") - - A1 = random_itensor(i1, x, j1) - A2 = random_itensor(x, j2, i2) - - # Generate indices automatically. - # Reverse the arrow directions in the QN case as a - # regression test for: - # https://github.com/ITensor/ITensors.jl/pull/1178. - S1, s1 = directsum( - A1 => index_op.((i1, j1)), A2 => index_op.((i2, j2)); tags=["sum_i", "sum_j"] - ) - - # Provide indices - i1i2 = directsum(i1, i2; tags="sum_i") - j1j2 = directsum(j1, j2; tags="sum_j") - s2 = [i1i2, j1j2] - S2 = directsum(s2, A1 => index_op.((i1, j1)), A2 => index_op.((i2, j2))) - for (S, s) in zip((S1, S2), (s1, s2)) - for vx in 1:dim(x) - proj = dag(onehot(x => vx)) - A1_vx = A1 * proj - A2_vx = A2 * proj - S_vx = S * proj - for m in 1:dim(s[1]), n in 1:dim(s[2]) - if m ≤ dim(i1) && n ≤ dim(j1) - @test S_vx[s[1] => m, s[2] => n] == A1_vx[i1 => m, j1 => n] - elseif m > dim(i1) && n > dim(j1) - @test S_vx[s[1] => m, s[2] => n] == A2_vx[i2 => m - dim(i1), j2 => n - dim(j1)] - else - @test S_vx[s[1] => m, s[2] => n] == 0 - end - end - end - end - - i1, i2, j, k, l = Index.(space.((2, 3, 4, 5, 6)), ("i1", "i2", "j", "k", "l")) - - A = random_itensor(i1, i2, j) - B = random_itensor(i1, i2, k) - C = random_itensor(i1, i2, l) - D = ITensor(i1, i2, k) - F = ITensor(i1, i2, j) - - S, s = directsum(A => index_op(j), B => index_op(k)) - @test dim(s) == dim(j) + dim(k) - @test hassameinds(S, (i1, i2, s)) - - S, s = (A => index_op(j)) ⊕ (B => index_op(k)) - @test dim(s) == dim(j) + dim(k) - @test hassameinds(S, (i1, i2, s)) - - S, s = (A => index_op(j)) ⊕ (D => index_op(k)) - @test dim(s) == dim(j) + dim(k) - @test hassameinds(S, (i1, i2, s)) - - @test_throws ErrorException (F => index_op(j)) ⊕ (D => index_op(k)) - - S, s = (D => index_op(k)) ⊕ (A => index_op(j)) - @test dim(s) == dim(j) + dim(k) - @test hassameinds(S, (i1, i2, s)) - - S, s = directsum(A => index_op(j), B => index_op(k), C => index_op(l)) - @test dim(s) == dim(j) + dim(k) + dim(l) - @test hassameinds(S, (i1, i2, s)) - - @test_throws ErrorException directsum(A => index_op(i2), B => index_op(i2)) - - S, (s,) = directsum(A => (index_op(j),), B => (index_op(k),)) - @test s == uniqueind(S, A) - @test dim(s) == dim(j) + dim(k) - @test hassameinds(S, (i1, i2, s)) - - S, ss = directsum(A => index_op.((i2, j)), B => index_op.((i2, k))) - @test length(ss) == 2 - @test dim(ss[1]) == dim(i2) + dim(i2) - @test hassameinds(S, (i1, ss...)) - - S, ss = directsum(A => (index_op(j),), B => (index_op(k),), C => (index_op(l),)) - s = only(ss) - @test s == uniqueind(S, A) - @test dim(s) == dim(j) + dim(k) + dim(l) - @test hassameinds(S, (i1, i2, s)) - - S, ss = directsum( - A => index_op.((i2, i1, j)), B => index_op.((i1, i2, k)), C => index_op.((i1, i2, l)) - ) - @test length(ss) == 3 - @test dim(ss[1]) == dim(i2) + dim(i1) + dim(i1) - @test dim(ss[2]) == dim(i1) + dim(i2) + dim(i2) - @test dim(ss[3]) == dim(j) + dim(k) + dim(l) - @test hassameinds(S, ss) - end - - @testset "ishermitian" begin - s = Index(2, "s") - Sz = ITensor([0.5 0.0; 0.0 -0.5], s', s) - Sp = ITensor([0.0 1.0; 0.0 0.0], s', s) - @test ishermitian(Sz) - @test !ishermitian(Sp) - end - - @testset "convert_eltype, convert_leaf_eltype, $new_eltype" for new_eltype in - (Float32, ComplexF64) - s = Index(2) - A = random_itensor(s) - @test eltype(A) == Float64 - - Af32 = convert_eltype(new_eltype, A) - @test Af32 ≈ A - @test eltype(Af32) == new_eltype - - Af32_2 = convert_leaf_eltype(new_eltype, A) - @test eltype(Af32_2) == new_eltype - @test Af32_2 ≈ A - - As1 = [A, A] - As1_f32 = convert_leaf_eltype(new_eltype, As1) - @test length(As1_f32) == length(As1) - @test typeof(As1_f32) == typeof(As1) - @test eltype(As1_f32[1]) == new_eltype - @test eltype(As1_f32[2]) == new_eltype - - As2 = [[A, A], [A]] - As2_f32 = convert_leaf_eltype(new_eltype, As2) - @test length(As2_f32) == length(As2) - @test typeof(As2_f32) == typeof(As2) - @test eltype(As2_f32[1][1]) == new_eltype - @test eltype(As2_f32[1][2]) == new_eltype - @test eltype(As2_f32[2][1]) == new_eltype - end - - @testset "nullspace $eltype" for (ss, sl, sr) in [ - ([QN(-1) => 2, QN(1) => 3], [QN(-1) => 2], [QN(0) => 3]), (5, 2, 3) - ], - eltype in (Float32, Float64, ComplexF32, ComplexF64), - nullspace_kwargs in ((;)) - #nullspace_kwargs in ((; atol=eps(real(eltype)) * 100), (;)) - - s, l, r = Index.((ss, sl, sr), ("s", "l", "r")) - A = random_itensor(eltype, dag(l), s, r) - N = nullspace(A, dag(l); nullspace_kwargs...) - @test Base.eltype(N) === eltype - n = uniqueind(N, A) - @test op("I", n) ≈ N * dag(prime(N, n)) - @test hassameinds(N, (s, r, n)) - @test norm(A * N) ≈ 0 atol = eps(real(eltype)) * 100 - @test dim(l) + dim(n) == dim((s, r)) - A′, (rn,) = ITensors.directsum(A => (l,), dag(N) => (n,); tags=["⊕"]) - @test dim(rn) == dim((s, r)) - @test norm(A * dag(prime(A, l))) ≈ norm(A * dag(A′)) - end - - @testset "nullspace regression test" begin - # This is a case that failed before we raised - # the default atol value in the `nullspace` function - M = [ - 0.663934 0.713867 -0.458164 -1.79885 -0.83443 - 1.19064 -1.3474 -0.277555 -0.177408 0.408656 - ] - i = Index(2) - j = Index(5) - A = ITensor(M, i, j) - N = nullspace(A, i) - n = uniqueindex(N, A) - @test dim(n) == dim(j) - dim(i) - end - - @testset "checkflux test" begin - # Calling checkflux should not error (issue #1283) - @test ITensors.checkflux(random_itensor(Index(2))) == nothing - end -end # End Dense ITensor basic functionality - -# Disable debug checking once tests are completed -ITensors.disable_debug_checks() -end diff --git a/test/base/test_itensor_scalar.jl b/test/base/test_itensor_scalar.jl deleted file mode 100644 index 26cf4ebc26..0000000000 --- a/test/base/test_itensor_scalar.jl +++ /dev/null @@ -1,67 +0,0 @@ -using ITensors -using Test - -@testset "Scalar ITensors" begin - A = ITensor(2.4) - @test storage(A) isa ITensors.Dense{Float64} - @test ndims(A) == 0 - @test order(A) == 0 - @test A[] == 2.4 - @test A[1] == 2.4 - @test scalar(A) == 2.4 - @test ITensors.symmetrystyle(A) == ITensors.NonQN() - - A[] = 3.4 - @test ndims(A) == 0 - @test order(A) == 0 - @test A[] == 3.4 - @test A[1] == 3.4 - @test scalar(A) == 3.4 - @test ITensors.symmetrystyle(A) == ITensors.NonQN() - - A[1] = 4.4 - @test ndims(A) == 0 - @test order(A) == 0 - @test A[] == 4.4 - @test A[1] == 4.4 - @test scalar(A) == 4.4 - @test ITensors.symmetrystyle(A) == ITensors.NonQN() - - A = ITensor() - @test storage(A) isa ITensors.EmptyStorage{ITensors.EmptyNumber} - @test ndims(A) == 0 - @test order(A) == 0 - @test A[] == 0.0 - @test A[1] == 0.0 - @test scalar(A) == 0.0 - @test ITensors.symmetrystyle(A) == ITensors.NonQN() - - A = ITensor() - @test storage(A) isa ITensors.EmptyStorage{ITensors.EmptyNumber} - A[] = 3.4 - @test storage(A) isa ITensors.Dense{Float64} - @test ndims(A) == 0 - @test order(A) == 0 - @test A[] == 3.4 - @test A[1] == 3.4 - @test scalar(A) == 3.4 - @test ITensors.symmetrystyle(A) == ITensors.NonQN() - - A = ITensor() - @test storage(A) isa ITensors.EmptyStorage{ITensors.EmptyNumber} - A[1] = 4.4 - @test storage(A) isa ITensors.Dense{Float64} - @test ndims(A) == 0 - @test order(A) == 0 - @test A[] == 4.4 - @test A[1] == 4.4 - @test scalar(A) == 4.4 - @test ITensors.symmetrystyle(A) == ITensors.NonQN() - - x = 2.3 - ITensor(fill(x, ())) == ITensor(x) - ITensor(fill(x, (1))) == ITensor(x) - ITensor(fill(x, (1, 1))) == ITensor(x) - ITensor(fill(x, (1, 1, 1))) == ITensor(x) - @test_throws ErrorException ITensor(fill(x, (2, 2))) -end diff --git a/test/base/test_itensor_scalar_contract.jl b/test/base/test_itensor_scalar_contract.jl deleted file mode 100644 index ed24027730..0000000000 --- a/test/base/test_itensor_scalar_contract.jl +++ /dev/null @@ -1,102 +0,0 @@ -using Test -using ITensors -using Random - -Random.seed!(1234) - -@testset "Test contractions with scalar-like ITensors" begin - i = Index(2, "i") - j = Index(2, "j") - k = Index(2, "k") - α = Index(1, "α") - - is = (i, j, k) - - A = random_itensor(is..., dag(α)) - B = ITensor(2, α, α', α'') - - C = A * B - @test C ≈ B[1, 1, 1] * A * ITensor(1, inds(B)) - - C = ITensor(is..., α', α'') - C .= A .* B - @test C ≈ B[1, 1, 1] * A * ITensor(1, inds(B)) - - C = ITensor(shuffle([(is..., α', α'')...])...) - C .= A .* B - @test C ≈ B[1, 1, 1] * A * ITensor(1, inds(B)) -end - -@testset "NaN in-place contraction bug regression test" begin - BlasFloats = (Float32, Float64, ComplexF32, ComplexF64) - @testset "Scalar contract, no permutation" for ElA in BlasFloats, ElB in BlasFloats - i = Index(2, "i") - j = Index(3, "j") - α = Index(1, "α") - A = random_itensor(ElA, i, j, α') - B = random_itensor(ElB, dag(α)', α) - R = ITensor(promote_type(ElA, ElB), i, j, α) - - R .= NaN - @test any(isnan, R) - - R .= A .* B - @test !any(isnan, R) - @test array(R) ≈ array(A) * array(B)[] - - R .= NaN - @test any(isnan, R) - - R .= B .* A - @test !any(isnan, R) - @test array(R) ≈ array(A) * array(B)[] - end - - @testset "Scalar contraction, permutation" for ElA in BlasFloats, ElB in BlasFloats - i = Index(2, "i") - j = Index(3, "j") - α = Index(1, "α") - A = random_itensor(ElA, i, j, α') - B = random_itensor(ElB, dag(α)', α) - R = ITensor(promote_type(ElA, ElB), j, i, α) - - R .= NaN - @test any(isnan, R) - - R .= A .* B - @test !any(isnan, R) - @test array(R) ≈ permutedims(array(A), (2, 1, 3)) * array(B)[] - - R .= NaN - @test any(isnan, R) - - R .= B .* A - @test !any(isnan, R) - @test array(R) ≈ permutedims(array(A), (2, 1, 3)) * array(B)[] - end - - @testset "General contraction, permutation" for ElA in BlasFloats, ElB in BlasFloats - i = Index(2, "i") - j = Index(3, "j") - α = Index(2, "α") - A = random_itensor(ElA, i, j, α') - B = random_itensor(ElB, dag(α)', α) - R = ITensor(promote_type(ElA, ElB), j, i, α) - - R .= NaN - @test any(isnan, R) - - R .= A .* B - @test !any(isnan, R) - @test reshape(array(R), 6, 2) ≈ - reshape(permutedims(array(A), (2, 1, 3)), 6, 2) * array(B) - - R .= NaN - @test any(isnan, R) - - R .= B .* A - @test !any(isnan, R) - @test reshape(array(R), 6, 2) ≈ - reshape(permutedims(array(A), (2, 1, 3)), 6, 2) * array(B) - end -end diff --git a/test/base/test_itensor_slice.jl b/test/base/test_itensor_slice.jl deleted file mode 100644 index 7823ee32d4..0000000000 --- a/test/base/test_itensor_slice.jl +++ /dev/null @@ -1,59 +0,0 @@ -using ITensors -using Test -import Random: seed! - -seed!(12345) - -@testset "Dense ITensor slicing functionality" begin - i = Index(2) - j = Index(3) - k = Index(4) - l = Index(5) - - A₀ = random_itensor(i, j, k, l) - a = randn(dim(l), dim(k)) - - A = copy(A₀) - A[l => 1:dim(l), i => 1, k => 1:dim(k), j => 2] = a - - for ii in 1:dim(i), jj in 1:dim(j), kk in 1:dim(k), ll in 1:dim(l) - if ii == 1 && jj == 2 - @test A[j => 2, l => ll, i => 1, k => kk] == a[ll, kk] - else - @test A[j => jj, l => ll, i => ii, k => kk] == A₀[j => jj, l => ll, i => ii, k => kk] - end - end - - A = copy(A₀) - A[1, 2, :, :] = transpose(a) - - for ii in 1:dim(i), jj in 1:dim(j), kk in 1:dim(k), ll in 1:dim(l) - if ii == 1 && jj == 2 - @test A[j => 2, l => ll, i => 1, k => kk] == a[ll, kk] - else - @test A[j => jj, l => ll, i => ii, k => kk] == A₀[j => jj, l => ll, i => ii, k => kk] - end - end - - A = copy(A₀) - A[l => 1:(dim(l) - 1), i => 1, k => 1:(dim(k) - 1), j => 2] = a[1:(end - 1), 1:(end - 1)] - - for ii in 1:dim(i), jj in 1:dim(j), kk in 1:dim(k), ll in 1:dim(l) - if ii == 1 && jj == 2 && kk ∈ 1:(dim(k) - 1) && ll ∈ 1:(dim(l) - 1) - @test A[j => 2, l => ll, i => 1, k => kk] == a[ll, kk] - else - @test A[j => jj, l => ll, i => ii, k => kk] == A₀[j => jj, l => ll, i => ii, k => kk] - end - end - - A = copy(A₀) - A[k => :, i => 1, l => :, j => 2] = a' - - for ii in 1:dim(i), jj in 1:dim(j), kk in 1:dim(k), ll in 1:dim(l) - if ii == 1 && jj == 2 - @test A[j => 2, l => ll, i => 1, k => kk] == a[ll, kk] - else - @test A[j => jj, l => ll, i => ii, k => kk] == A₀[j => jj, l => ll, i => ii, k => kk] - end - end -end diff --git a/test/base/test_ndtensors.jl b/test/base/test_ndtensors.jl deleted file mode 100644 index aa20e928b0..0000000000 --- a/test/base/test_ndtensors.jl +++ /dev/null @@ -1,27 +0,0 @@ -using ITensors -using ITensors.NDTensors -using Test - -@testset "NDTensors compatibility" begin - i = Index([QN(0) => 1, QN(1) => 1]) - - T = BlockSparseTensor(Float64, [Block(1, 1)], (i', dag(i))) - @test nnzblocks(T) == 1 - @test nzblocks(T) == [Block(1, 1)] - - T = BlockSparseTensor(Float64, [Block(1, 1)], [i', dag(i)]) - @test nnzblocks(T) == 1 - @test nzblocks(T) == [Block(1, 1)] - - T = BlockSparseTensor(Float64, [Block(1, 1)], IndexSet(i', dag(i))) - @test nnzblocks(T) == 1 - @test nzblocks(T) == [Block(1, 1)] - - @testset "blockdim" begin - i = Index(2) - @test_throws ErrorException blockdim(i, Block(1)) - @test_throws ErrorException blockdim(i, 1) - @test_throws ErrorException blockdim(1, Block(1)) - @test_throws ErrorException blockdim(1, 1) - end -end diff --git a/test/base/test_not.jl b/test/base/test_not.jl deleted file mode 100644 index d0c3855d1c..0000000000 --- a/test/base/test_not.jl +++ /dev/null @@ -1,53 +0,0 @@ -using ITensors, Test - -@testset "not" begin - i = Index(2, "i") - j = Index(2, "j") - k = Index(2, "k") - - A = random_itensor(i, j, k') - - Ap = prime(A, not("j")) - - @test hassameinds(Ap, (i', j, k'')) - - Ap = prime(A; tags=!ts"j") - - @test hassameinds(Ap, (i', j, k'')) - - At = addtags(A, "x", not("k")) - - @test hassameinds(At, (addtags(i, "x"), addtags(j, "x"), k')) - - Ap2 = prime(A, 2, not(i)) - - @test hassameinds(Ap2, (i, j'', k''')) - - Ap2 = prime(A, 2; inds=!i) - - @test hassameinds(Ap2, (i, j'', k''')) - - Ap3 = prime(A, 3, not(i, k')) - - @test hassameinds(Ap3, (i, j''', k')) - - Ap3 = prime(A, 3, !(i, k')) - - @test hassameinds(Ap3, (i, j''', k')) - - At2 = settags(A, "y", not(IndexSet(j, k'))) - - @test hassameinds(At2, (settags(i, "y"), j, k')) - - At2 = settags(A, "y"; inds=!IndexSet(j, k')) - - @test hassameinds(At2, (settags(i, "y"), j, k')) - - B = filterinds(A; plev=not(0)) - - @test hassameinds(B, (k',)) - - @test_throws MethodError !"x" - - @test_throws MethodError !1 -end diff --git a/test/base/test_oneitensor.jl b/test/base/test_oneitensor.jl deleted file mode 100644 index 049743d3cf..0000000000 --- a/test/base/test_oneitensor.jl +++ /dev/null @@ -1,18 +0,0 @@ -using ITensors -using Test - -@testset "OneITensor" begin - let i = Index(2), it = ITensor(i), OneITensor = ITensors.OneITensor - @test OneITensor() isa OneITensor - @test inds(OneITensor()) == () - @test eltype(OneITensor()) <: Bool - @test isone(dim(OneITensor())) - @test ITensors.isoneitensor(OneITensor()) - @test !ITensors.isoneitensor(it) - @test dag(OneITensor()) == OneITensor() - @test OneITensor() * it == it - @test it * OneITensor() == it - @test *(OneITensor()) == OneITensor() - @test contract([it, OneITensor(), OneITensor()]) == it - end -end diff --git a/test/base/test_phys_site_types.jl b/test/base/test_phys_site_types.jl deleted file mode 100644 index 5fb6733c70..0000000000 --- a/test/base/test_phys_site_types.jl +++ /dev/null @@ -1,851 +0,0 @@ -using ITensors, LinearAlgebra, Test -using ITensors.SiteTypes: has_fermion_string, op, siteind, siteinds, state - -@testset "Physics Sites" begin - N = 10 - - @testset "Generic sites" for eltype in (Float32, Float64, ComplexF32, ComplexF64) - d1, d2 = 3, 4 - i1, i2 = Index(d1), Index(d2) - - o = op("I", i1; eltype) - @test o == itensor(Matrix(I, d1, d1), i1', dag(i1)) - @test Base.eltype(o) <: eltype - - o = op("Id", i1; eltype) - @test o == itensor(Matrix(I, d1, d1), i1', dag(i1)) - @test Base.eltype(o) <: eltype - - o = op("F", i1; eltype) - @test o == itensor(Matrix(I, d1, d1), i1', dag(i1)) - @test Base.eltype(o) <: eltype - - o = op("I", i1, i2; eltype) - @test o == itensor(Matrix(I, d1 * d2, d1 * d2), i2', i1', dag(i2), dag(i1)) - @test Base.eltype(o) <: eltype - - o = op("Id", i1, i2; eltype) - @test o == itensor(Matrix(I, d1 * d2, d1 * d2), i2', i1', dag(i2), dag(i1)) - @test Base.eltype(o) <: eltype - - U1 = op("RandomUnitary", i1) - @test hassameinds(U1, (i1', i1)) - @test apply(transpose(dag(U1)), U1) ≈ itensor(Matrix(I, d1, d1), i1', dag(i1)) - - U1 = op("randU", i1) - @test hassameinds(U1, (i1', i1)) - @test apply(transpose(dag(U1)), U1) ≈ itensor(Matrix(I, d1, d1), i1', dag(i1)) - - U12 = op("RandomUnitary", i1, i2) - @test hassameinds(U12, (i1', i2', i1, i2)) - @test apply(transpose(dag(U12)), U12) ≈ - itensor(Matrix(I, d1 * d2, d1 * d2), i2', i1', dag(i2), dag(i1)) - end - - @testset "Qubit sites" begin - s = siteind("Qubit") - @test hastags(s, "Qubit,Site") - @test dim(s) == 2 - - s = siteinds("Qubit", N) - @test val(s[1], "0") == 1 - @test val(s[1], "1") == 2 - @test_throws ArgumentError val(s[1], "Fake") - - s = siteind("Qubit"; conserve_parity=true) - @test hastags(s, "Qubit,Site") - @test dim(s) == 2 - @test nblocks(s) == 2 - @test qn(s, 1) == QN("Parity", 0, 2) - @test qn(s, 2) == QN("Parity", 1, 2) - - s = siteind("Qubit"; conserve_number=true) - @test hastags(s, "Qubit,Site") - @test dim(s) == 2 - @test nblocks(s) == 2 - @test qn(s, 1) == QN("Number", 0) - @test qn(s, 2) == QN("Number", 1) - - s = siteind("Qubit"; conserve_number=true, conserve_parity=true) - @test hastags(s, "Qubit,Site") - @test dim(s) == 2 - @test nblocks(s) == 2 - @test qn(s, 1) == QN(("Parity", 0, 2), ("Number", 0)) - @test qn(s, 2) == QN(("Parity", 1, 2), ("Number", 1)) - - s = siteinds("Qubit", N) - - Z = op("Z", s, 5) - @test hasinds(Z, s[5]', s[5]) - - @test_throws ArgumentError( - "Overload of \"state\" or \"state!\" functions not found for state name \"Fake\" and Index tags $(tags(s[3]))", - ) state("Fake", s[3]) - @test Vector(state("Up", s[3])) ≈ [1, 0] - @test Vector(state("↑", s[3])) ≈ [1, 0] - @test Vector(state("Dn", s[3])) ≈ [0, 1] - @test Vector(state("↓", s[3])) ≈ [0, 1] - @test Vector(state("+", s[3])) ≈ (1 / √2) * [1, 1] - @test Vector(state("X+", s[3])) ≈ (1 / √2) * [1, 1] - @test Vector(state("Xp", s[3])) ≈ (1 / √2) * [1, 1] - @test Vector(state("-", s[3])) ≈ (1 / √2) * [1, -1] - @test Vector(state("X-", s[3])) ≈ (1 / √2) * [1, -1] - @test Vector(state("Xm", s[3])) ≈ (1 / √2) * [1, -1] - @test Vector(state("i", s[3])) ≈ (1 / √2) * [1, im] - @test Vector(state("Yp", s[3])) ≈ (1 / √2) * [1, im] - @test Vector(state("Y+", s[3])) ≈ (1 / √2) * [1, im] - @test Vector(state("-i", s[3])) ≈ (1 / √2) * [1, -im] - @test Vector(state("Y-", s[3])) ≈ (1 / √2) * [1, -im] - @test Vector(state("Ym", s[3])) ≈ (1 / √2) * [1, -im] - @test Vector(state("Z+", s[3])) ≈ [1, 0] - @test Vector(state("Zp", s[3])) ≈ [1, 0] - @test Vector(state("Z-", s[3])) ≈ [0, 1] - @test Vector(state("Zm", s[3])) ≈ [0, 1] - @test Vector(state("Tetra1", s[3])) ≈ [1, 0] - @test Vector(state("Tetra2", s[3])) ≈ (1 / √3) * [1, √2] - @test Vector(state("Tetra3", s[3])) ≈ (1 / √3) * [1, √2 * exp(im * 2π / 3)] - @test Vector(state("Tetra4", s[3])) ≈ (1 / √3) * [1, √2 * exp(im * 4π / 3)] - - @test_throws ArgumentError op(s, "Fake", 2) - @test Array(op("Id", s, 3), s[3]', s[3]) ≈ [1.0 0.0; 0.0 1.0] - @test Array(op("√NOT", s, 3), s[3]', s[3]) ≈ - [(1 + im)/2 (1 - im)/2; (1 - im)/2 (1 + im)/2] - @test Array(op("√X", s, 3), s[3]', s[3]) ≈ - [(1 + im)/2 (1 - im)/2; (1 - im)/2 (1 + im)/2] - @test Array(op("σx", s, 3), s[3]', s[3]) ≈ [0 1; 1 0] - @test Array(op("σ1", s, 3), s[3]', s[3]) ≈ [0 1; 1 0] - @test Array(op("σy", s, 3), s[3]', s[3]) ≈ [0 -im; im 0] - @test Array(op("σ2", s, 3), s[3]', s[3]) ≈ [0 -im; im 0] - @test Array(op("iY", s, 3), s[3]', s[3]) ≈ [0 1; -1 0] - @test Array(op("iσy", s, 3), s[3]', s[3]) ≈ [0 1; -1 0] - @test Array(op("iσ2", s, 3), s[3]', s[3]) ≈ [0 1; -1 0] - @test Array(op("σz", s, 3), s[3]', s[3]) ≈ [1 0; 0 -1] - @test Array(op("σ3", s, 3), s[3]', s[3]) ≈ [1 0; 0 -1] - @test Array(op("H", s, 3), s[3]', s[3]) ≈ [1/sqrt(2) 1/sqrt(2); 1/sqrt(2) -1/sqrt(2)] - @test Array(op("Phase", s, 3), s[3]', s[3]) ≈ [1 0; 0 im] - @test Array(op("P", s, 3), s[3]', s[3]) ≈ [1 0; 0 im] - @test Array(op("S", s, 3), s[3]', s[3]) ≈ [1 0; 0 im] - @test Array(op("π/8", s, 3), s[3]', s[3]) ≈ [1 0; 0 (1 + im)/sqrt(2)] - @test Array(op("T", s, 3), s[3]', s[3]) ≈ [1 0; 0 (1 + im)/sqrt(2)] - θ = randn() - @test Array(op("Rx", s, 3; θ=θ), s[3]', s[3]) ≈ - [cos(θ / 2) -im*sin(θ / 2); -im*sin(θ / 2) cos(θ / 2)] - @test Array(op("Ry", s, 3; θ=θ), s[3]', s[3]) ≈ - [cos(θ / 2) -sin(θ / 2); sin(θ / 2) cos(θ / 2)] - @test Array(op("Rz", s, 3; θ=θ), s[3]', s[3]) ≈ [exp(-im * θ / 2) 0; 0 exp(im * θ / 2)] - # fallback - @test Array(op("Rz", s, 3; ϕ=θ), s[3]', s[3]) ≈ [exp(-im * θ / 2) 0; 0 exp(im * θ / 2)] - λ = randn() - φ = randn() - @test Array(op("Rn", s, 3; θ=θ, λ=λ, ϕ=φ), s[3]', s[3]) ≈ [ - cos(θ / 2) -exp(im * λ)*sin(θ / 2) - exp(im * φ)*sin(θ / 2) exp(im * (φ + λ))*cos(θ / 2) - ] - @test Array(op("Rn̂", s, 3; θ=θ, λ=λ, ϕ=φ), s[3]', s[3]) ≈ [ - cos(θ / 2) -exp(im * λ)*sin(θ / 2) - exp(im * φ)*sin(θ / 2) exp(im * (φ + λ))*cos(θ / 2) - ] - @test Array(op("Splus", s, 3), s[3]', s[3]) ≈ [0 1; 0 0] - @test Array(op("Sminus", s, 3), s[3]', s[3]) ≈ [0 0; 1 0] - @test Array(op("S²", s, 3), s[3]', s[3]) ≈ [0.75 0; 0 0.75] - @test Array(op("Proj0", s, 3), s[3]', s[3]) ≈ [1 0; 0 0] - @test Array(op("Proj1", s, 3), s[3]', s[3]) ≈ [0 0; 0 1] - @test reshape(Array(op("√SWAP", s, 3, 5), s[3]', s[5]', s[3], s[5]), (4, 4)) ≈ - [1 0 0 0; 0 (1 + im)/2 (1 - im)/2 0; 0 (1 - im)/2 (1 + im)/2 0; 0 0 0 1] - @test reshape(Array(op("√Swap", s, 3, 5), s[3]', s[5]', s[3], s[5]), (4, 4)) ≈ - [1 0 0 0; 0 (1 + im)/2 (1 - im)/2 0; 0 (1 - im)/2 (1 + im)/2 0; 0 0 0 1] - @test reshape(Array(op("√iSWAP", s, 3, 5), s[3]', s[5]', s[3], s[5]), (4, 4)) ≈ - [1 0 0 0; 0 1/√2 im/√2 0; 0 im/√2 1/√2 0; 0 0 0 1] - @test reshape(Array(op("√iSwap", s, 3, 5), s[3]', s[5]', s[3], s[5]), (4, 4)) ≈ - [1 0 0 0; 0 1/√2 im/√2 0; 0 im/√2 1/√2 0; 0 0 0 1] - @test reshape(Array(op("SWAP", s, 3, 5), s[3]', s[5]', s[3], s[5]), (4, 4)) ≈ - [1 0 0 0; 0 0 1 0; 0 1 0 0; 0 0 0 1] - @test reshape(Array(op("Swap", s, 3, 5), s[3]', s[5]', s[3], s[5]), (4, 4)) ≈ - [1 0 0 0; 0 0 1 0; 0 1 0 0; 0 0 0 1] - @test reshape(Array(op("iSWAP", s, 3, 5), s[3]', s[5]', s[3], s[5]), (4, 4)) ≈ - [1 0 0 0; 0 0 im 0; 0 im 0 0; 0 0 0 1] - @test reshape(Array(op("iSwap", s, 3, 5), s[3]', s[5]', s[3], s[5]), (4, 4)) ≈ - [1 0 0 0; 0 0 im 0; 0 im 0 0; 0 0 0 1] - @test reshape(Array(op("Cphase", s, 3, 5; ϕ=θ), s[3]', s[5]', s[3], s[5]), (4, 4)) ≈ - [1 0 0 0; 0 1 0 0; 0 0 1 0; 0 0 0 exp(im * θ)] - @test reshape(Array(op("RXX", s, 3, 5; ϕ=θ), s[3]', s[5]', s[3], s[5]), (4, 4)) ≈ [ - cos(θ) 0 0 -im*sin(θ) - 0 cos(θ) -im*sin(θ) 0 - 0 -im*sin(θ) cos(θ) 0 - -im*sin(θ) 0 0 cos(θ) - ] - @test reshape(Array(op("RYY", s, 3, 5; ϕ=θ), s[3]', s[5]', s[3], s[5]), (4, 4)) ≈ [ - cos(θ) 0 0 im*sin(θ) - 0 cos(θ) -im*sin(θ) 0 - 0 -im*sin(θ) cos(θ) 0 - im*sin(θ) 0 0 cos(θ) - ] - @test reshape(Array(op("RXY", s, 3, 5; ϕ=θ), s[3]', s[5]', s[3], s[5]), (4, 4)) ≈ - [1 0 0 0; 0 cos(θ) -im*sin(θ) 0; 0 -im*sin(θ) cos(θ) 0; 0 0 0 1] - @test reshape(Array(op("RZZ", s, 3, 5; ϕ=θ), s[3]', s[5]', s[3], s[5]), (4, 4)) ≈ - [exp(-im * θ) 0 0 0; 0 exp(im * θ) 0 0; 0 0 exp(im * θ) 0; 0 0 0 exp(-im * θ)] - @test reshape(Array(op("CRX", s, 3, 5; θ=θ), s[5]', s[3]', s[5], s[3]), (4, 4)) ≈ [ - 1 0 0 0 - 0 1 0 0 - 0 0 cos(θ / 2) -im*sin(θ / 2) - 0 0 -im*sin(θ / 2) cos(θ / 2) - ] - @test reshape(Array(op("CRY", s, 3, 5; θ=θ), s[5]', s[3]', s[5], s[3]), (4, 4)) ≈ [ - 1 0 0 0 - 0 1 0 0 - 0 0 cos(θ / 2) -sin(θ / 2) - 0 0 sin(θ / 2) cos(θ / 2) - ] - @test reshape(Array(op("CRZ", s, 3, 5; θ=θ), s[5]', s[3]', s[5], s[3]), (4, 4)) ≈ [ - 1 0 0 0 - 0 1 0 0 - 0 0 exp(-im * θ / 2) 0 - 0 0 0 exp(im * θ / 2) - ] - @test reshape( - Array(op("CRn", s, 3, 5; θ=θ, λ=λ, ϕ=φ), s[5]', s[3]', s[5], s[3]), (4, 4) - ) ≈ [ - 1 0 0 0 - 0 1 0 0 - 0 0 cos(θ / 2) -exp(im * λ)*sin(θ / 2) - 0 0 exp(im * φ)*sin(θ / 2) exp(im * (φ + λ))*cos(θ / 2) - ] - @test reshape( - Array(op("CRn̂", s, 3, 5; θ=θ, λ=λ, ϕ=φ), s[5]', s[3]', s[5], s[3]), (4, 4) - ) ≈ [ - 1 0 0 0 - 0 1 0 0 - 0 0 cos(θ / 2) -exp(im * λ)*sin(θ / 2) - 0 0 exp(im * φ)*sin(θ / 2) exp(im * (φ + λ))*cos(θ / 2) - ] - @test reshape(Array(op("CX", s, 3, 5), s[5]', s[3]', s[5], s[3]), (4, 4)) ≈ [ - 1 0 0 0 - 0 1 0 0 - 0 0 0 1 - 0 0 1 0 - ] - @test reshape(Array(op("CY", s, 3, 5), s[5]', s[3]', s[5], s[3]), (4, 4)) ≈ [ - 1 0 0 0 - 0 1 0 0 - 0 0 0 -im - 0 0 im 0 - ] - @test reshape(Array(op("CZ", s, 3, 5), s[5]', s[3]', s[5], s[3]), (4, 4)) ≈ [ - 1 0 0 0 - 0 1 0 0 - 0 0 1 0 - 0 0 0 -1 - ] - - toff_mat = diagm(ones(8)) - toff_mat[7:8, 7:8] .= [0 1; 1 0] - @test reshape( - Array(op("TOFF", s, 3, 4, 5), s[5]', s[4]', s[3]', s[5], s[4], s[3]), (8, 8) - ) ≈ toff_mat - @test reshape( - Array(op("CCNOT", s, 3, 4, 5), s[5]', s[4]', s[3]', s[5], s[4], s[3]), (8, 8) - ) ≈ toff_mat - @test reshape( - Array(op("CCX", s, 3, 4, 5), s[5]', s[4]', s[3]', s[5], s[4], s[3]), (8, 8) - ) ≈ toff_mat - fred_mat = diagm(ones(8)) - fred_mat[6:7, 6:7] .= [0 1; 1 0] - @test reshape( - Array(op("CS", s, 3, 4, 5), s[5]', s[4]', s[3]', s[5], s[4], s[3]), (8, 8) - ) ≈ fred_mat - @test reshape( - Array(op("CSWAP", s, 3, 4, 5), s[5]', s[4]', s[3]', s[5], s[4], s[3]), (8, 8) - ) ≈ fred_mat - @test reshape( - Array(op("CSwap", s, 3, 4, 5), s[5]', s[4]', s[3]', s[5], s[4], s[3]), (8, 8) - ) ≈ fred_mat - cccn_mat = diagm(ones(16)) - cccn_mat[15:16, 15:16] .= [0 1; 1 0] - @test reshape( - Array( - op("CCCNOT", s, 2, 3, 4, 5), s[5]', s[4]', s[3]', s[2]', s[5], s[4], s[3], s[2] - ), - (16, 16), - ) ≈ cccn_mat - # Test obtaining S=1/2 operators using Qubit tag - @test Matrix(op("X", s, 3), s[3]', s[3]) ≈ [0.0 1.0; 1.0 0.0] - end - - @testset "Spin Half sites" begin - for name in ("S=1/2", "SpinHalf", "S=½") - s = siteind(name) - @test hastags(s, name * ",Site") - @test dim(s) == 2 - - s = siteind(name; conserve_qns=true) - @test hastags(s, name * ",Site") - @test dim(s) == 2 - @test nblocks(s) == 2 - @test qn(s, 1) == QN("Sz", +1) - @test qn(s, 2) == QN("Sz", -1) - - s = siteind(name; conserve_szparity=true) - @test hastags(s, name * ",Site") - @test dim(s) == 2 - @test nblocks(s) == 2 - @test qn(s, 1) == QN("SzParity", 1, 2) - @test qn(s, 2) == QN("SzParity", 0, 2) - - s = siteind(name; conserve_sz=true, conserve_szparity=true) - @test hastags(s, name * ",Site") - @test dim(s) == 2 - @test nblocks(s) == 2 - @test qn(s, 1) == QN(("SzParity", 1, 2), ("Sz", +1)) - @test qn(s, 2) == QN(("SzParity", 0, 2), ("Sz", -1)) - - s = siteinds(name, N) - @test val(s[1], "Up") == 1 - @test val(s[1], "↑") == 1 - @test val(s[1], "Dn") == 2 - @test val(s[1], "↓") == 2 - @test_throws ArgumentError val(s[1], "Fake") - - Sz5 = op("Sz", s, 5) - @test hasinds(Sz5, s[5]', s[5]) - - @test Vector(state("Up", s[1])) ≈ [1, 0] - @test Vector(state("↑", s[1])) ≈ [1, 0] - @test Vector(state("Dn", s[1])) ≈ [0, 1] - @test Vector(state("↓", s[1])) ≈ [0, 1] - @test Vector(state("X+", s[1])) ≈ (1 / √2) * [1, 1] - @test Vector(state("X-", s[1])) ≈ (1 / √2) * [1, -1] - - @test_throws ArgumentError op(s, "Fake", 2) - @test Array(op("Id", s, 3), s[3]', s[3]) ≈ [1.0 0.0; 0.0 1.0] - @test Array(op("F", s, 3), s[3]', s[3]) ≈ [1.0 0.0; 0.0 1.0] - @test Array(op("S+", s, 3), s[3]', s[3]) ≈ [0.0 1.0; 0.0 0.0] - @test Array(op("S⁺", s, 3), s[3]', s[3]) ≈ [0.0 1.0; 0.0 0.0] - @test Array(op("S-", s, 4), s[4]', s[4]) ≈ [0.0 0.0; 1.0 0.0] - @test Array(op("S⁻", s, 4), s[4]', s[4]) ≈ [0.0 0.0; 1.0 0.0] - @test Array(op("Sx", s, 2), s[2]', s[2]) ≈ [0.0 0.5; 0.5 0.0] - @test Array(op("Sˣ", s, 2), s[2]', s[2]) ≈ [0.0 0.5; 0.5 0.0] - @test Array(op("iSy", s, 2), s[2]', s[2]) ≈ [0.0 0.5; -0.5 0.0] - @test Array(op("iSʸ", s, 2), s[2]', s[2]) ≈ [0.0 0.5; -0.5 0.0] - @test Array(op("Sy", s, 2), s[2]', s[2]) ≈ [0.0 -0.5im; 0.5im 0.0] - @test Array(op("Sʸ", s, 2), s[2]', s[2]) ≈ [0.0 -0.5im; 0.5im 0.0] - @test Array(op("Sz", s, 2), s[2]', s[2]) ≈ [0.5 0.0; 0.0 -0.5] - @test Array(op("Sᶻ", s, 2), s[2]', s[2]) ≈ [0.5 0.0; 0.0 -0.5] - @test Array(op("ProjUp", s, 2), s[2]', s[2]) ≈ [1.0 0.0; 0.0 0.0] - @test Array(op("projUp", s, 2), s[2]', s[2]) ≈ [1.0 0.0; 0.0 0.0] - @test Array(op("ProjDn", s, 2), s[2]', s[2]) ≈ [0.0 0.0; 0.0 1.0] - @test Array(op("projDn", s, 2), s[2]', s[2]) ≈ [0.0 0.0; 0.0 1.0] - - # Test obtaining Qubit operators using S=1/2 tag: - @test Array(op("√NOT", s, 3), s[3]', s[3]) ≈ - [(1 + im)/2 (1 - im)/2; (1 - im)/2 (1 + im)/2] - end - end - - @testset "Spin One sites" begin - for name in ("S=1", "SpinOne") - s = siteinds(name, N) - - @test val(s[1], "Up") == 1 - @test val(s[1], "↑") == 1 - @test val(s[1], "0") == 2 - @test val(s[1], "Dn") == 3 - @test val(s[1], "↓") == 3 - @test val(s[1], "Z+") == 1 - @test val(s[1], "Z-") == 3 - @test_throws ArgumentError val(s[1], "Fake") - - @test Vector(state("Up", s[1])) ≈ [1, 0, 0] - @test Vector(state("↑", s[1])) ≈ [1, 0, 0] - @test Vector(state("Z+", s[1])) ≈ [1, 0, 0] - @test Vector(state("Z0", s[1])) ≈ [0, 1, 0] - @test Vector(state("0", s[1])) ≈ [0, 1, 0] - @test Vector(state("Dn", s[1])) ≈ [0, 0, 1] - @test Vector(state("↓", s[1])) ≈ [0, 0, 1] - @test Vector(state("Z-", s[1])) ≈ [0, 0, 1] - @test Vector(state("X+", s[1])) ≈ [1 / 2, 1 / √2, 1 / 2] - @test Vector(state("X0", s[1])) ≈ [-1 / √2, 0, 1 / √2] - @test Vector(state("X-", s[1])) ≈ [1 / 2, -1 / √2, 1 / 2] - @test Vector(state("Y+", s[1])) ≈ [-1 / 2, -im / √2, 1 / 2] - @test Vector(state("Y0", s[1])) ≈ [1 / √2, 0, 1 / √2] - @test Vector(state("Y-", s[1])) ≈ [-1 / 2, im / √2, 1 / 2] - - Sz5 = op("Sz", s, 5) - @test hasinds(Sz5, s[5]', s[5]) - - @test_throws ArgumentError op(s, "Fake", 2) - @test Array(op("Id", s, 3), s[3]', s[3]) ≈ [1.0 0.0 0.0; 0.0 1.0 0.0; 0.0 0.0 1.0] - @test Array(op("S+", s, 3), s[3]', s[3]) ≈ [0 √2 0; 0 0 √2; 0 0 0] - @test Array(op("S⁺", s, 3), s[3]', s[3]) ≈ [0 √2 0; 0 0 √2; 0 0 0] - @test Array(op("Sp", s, 3), s[3]', s[3]) ≈ [0 √2 0; 0 0 √2; 0 0 0] - @test Array(op("Splus", s, 3), s[3]', s[3]) ≈ [0 √2 0; 0 0 √2; 0 0 0] - @test Array(op("S-", s, 3), s[3]', s[3]) ≈ [0 0 0; √2 0 0; 0.0 √2 0] - @test Array(op("S⁻", s, 3), s[3]', s[3]) ≈ [0 0 0; √2 0 0; 0.0 √2 0] - @test Array(op("Sm", s, 3), s[3]', s[3]) ≈ [0 0 0; √2 0 0; 0.0 √2 0] - @test Array(op("Sminus", s, 3), s[3]', s[3]) ≈ [0 0 0; √2 0 0; 0.0 √2 0] - @test Array(op("Sx", s, 3), s[3]', s[3]) ≈ [0 1/√2 0; 1/√2 0 1/√2; 0 1/√2 0] - @test Array(op("Sˣ", s, 3), s[3]', s[3]) ≈ [0 1/√2 0; 1/√2 0 1/√2; 0 1/√2 0] - @test Array(op("iSy", s, 3), s[3]', s[3]) ≈ [0 1/√2 0; -1/√2 0 1/√2; 0 -1/√2 0] - @test Array(op("iSʸ", s, 3), s[3]', s[3]) ≈ [0 1/√2 0; -1/√2 0 1/√2; 0 -1/√2 0] - @test Array(op("Sy", s, 3), s[3]', s[3]) ≈ (1 / (√2im)) * [0 +1 0; -1 0 +1; 0 -1 0] - @test Array(op("Sʸ", s, 3), s[3]', s[3]) ≈ (1 / (√2im)) * [0 +1 0; -1 0 +1; 0 -1 0] - #@test Array(op("Sʸ", s, 3), s[3]', s[3]) ≈ [0 +1/√2im 0; +1/√2im 0 -1/√2im; 0 +1/√2im 0] - @test Array(op("Sz", s, 2), s[2]', s[2]) ≈ [1.0 0 0; 0 0 0; 0 0 -1.0] - @test Array(op("Sᶻ", s, 2), s[2]', s[2]) ≈ [1.0 0 0; 0 0 0; 0 0 -1.0] - @test Array(op("Sz2", s, 2), s[2]', s[2]) ≈ [1.0 0 0; 0 0 0; 0 0 +1.0] - @test Array(op("Sx2", s, 2), s[2]', s[2]) ≈ [0.5 0 0.5; 0 1.0 0; 0.5 0 0.5] - @test Array(op("Sy2", s, 2), s[2]', s[2]) ≈ [0.5 0 -0.5; 0 1.0 0; -0.5 0 0.5] - @test Array(op("S2", s, 2), s[2]', s[2]) ≈ [2.0 0.0 0.0; 0.0 2.0 0.0; 0.0 0.0 2.0] - @test Array(op("S²", s, 2), s[2]', s[2]) ≈ [2.0 0.0 0.0; 0.0 2.0 0.0; 0.0 0.0 2.0] - end - end - - @testset "Fermion sites" begin - s = siteind("Fermion") - - @test val(s, "0") == 1 - @test val(s, "1") == 2 - @test_throws ArgumentError val(s, "Fake") - - @test Vector(state("Emp", s)) ≈ [1, 0] - @test Vector(state("Occ", s)) ≈ [0, 1] - @test Vector(state("0", s)) ≈ [1, 0] - @test Vector(state("1", s)) ≈ [0, 1] - - N = op(s, "N") - @test hasinds(N, s', s) - - @test_throws ArgumentError op(s, "Fake") - N = Array(op(s, "N"), s', s) - @test N ≈ [0.0 0; 0 1] - N = Array(op(s, "n"), s', s) - @test N ≈ [0.0 0; 0 1] - C = Array(op(s, "C"), s', s) - @test C ≈ [0.0 1; 0 0] - C = Array(op(s, "c"), s', s) - @test C ≈ [0.0 1; 0 0] - Cdag = Array(op(s, "Cdag"), s', s) - @test Cdag ≈ [0.0 0; 1 0] - Cdag = Array(op(s, "cdag"), s', s) - @test Cdag ≈ [0.0 0; 1 0] - Cdag = Array(op(s, "c†"), s', s) - @test Cdag ≈ [0.0 0; 1 0] - F = Array(op(s, "F"), s', s) - @test F ≈ [1.0 0; 0 -1] - - @test has_fermion_string("C", s) - @test has_fermion_string("c", s) - @test has_fermion_string("Cdag", s) - @test has_fermion_string("cdag", s) - @test has_fermion_string("c†", s) - @test has_fermion_string("C*F", s) - @test has_fermion_string("c*F", s) - @test has_fermion_string("F*Cdag*F", s) - @test has_fermion_string("F*c†*F", s) - @test !has_fermion_string("N", s) - @test !has_fermion_string("n", s) - @test !has_fermion_string("N*F", s) - @test !has_fermion_string("n*F", s) - - s = siteind("Fermion"; conserve_nf=true) - @test qn(s, 1) == QN("Nf", 0, -1) - @test qn(s, 2) == QN("Nf", 1, -1) - s = siteind("Fermion"; conserve_nfparity=true) - @test qn(s, 1) == QN("NfParity", 0, -2) - @test qn(s, 2) == QN("NfParity", 1, -2) - s = siteind("Fermion"; conserve_parity=true) - @test qn(s, 1) == QN("NfParity", 0, -2) - @test qn(s, 2) == QN("NfParity", 1, -2) - s = siteind("Fermion"; conserve_qns=false) - @test dim(s) == 2 - - s = siteind("Fermion"; conserve_nf=true, conserve_sz=true) - @test qn(s, 1) == QN(("Nf", 0, -1), ("Sz", 0)) - @test qn(s, 2) == QN(("Nf", 1, -1), ("Sz", 1)) - s = siteind("Fermion"; conserve_nfparity=true, conserve_sz=true) - @test qn(s, 1) == QN(("NfParity", 0, -2), ("Sz", 0)) - @test qn(s, 2) == QN(("NfParity", 1, -2), ("Sz", 1)) - s = siteind("Fermion"; conserve_nf=true, conserve_sz="Up") - @test qn(s, 1) == QN(("Nf", 0, -1), ("Sz", 0)) - @test qn(s, 2) == QN(("Nf", 1, -1), ("Sz", 1)) - s = siteind("Fermion"; conserve_nfparity=true, conserve_sz="Up") - @test qn(s, 1) == QN(("NfParity", 0, -2), ("Sz", 0)) - @test qn(s, 2) == QN(("NfParity", 1, -2), ("Sz", 1)) - s = siteind("Fermion"; conserve_nf=true, conserve_sz="Dn") - @test qn(s, 1) == QN(("Nf", 0, -1), ("Sz", 0)) - @test qn(s, 2) == QN(("Nf", 1, -1), ("Sz", -1)) - s = siteind("Fermion"; conserve_nfparity=true, conserve_sz="Dn") - @test qn(s, 1) == QN(("NfParity", 0, -2), ("Sz", 0)) - @test qn(s, 2) == QN(("NfParity", 1, -2), ("Sz", -1)) - end - - @testset "Electron sites" begin - s = siteind("Electron") - - @test val(s, "Emp") == 1 - @test val(s, "0") == 1 - @test val(s, "Up") == 2 - @test val(s, "↑") == 2 - @test val(s, "Dn") == 3 - @test val(s, "↓") == 3 - @test val(s, "UpDn") == 4 - @test val(s, "↑↓") == 4 - @test_throws ArgumentError val(s, "Fake") - - @test Vector(state("Emp", s)) ≈ [1, 0, 0, 0] - @test Vector(state("Up", s)) ≈ [0, 1, 0, 0] - @test Vector(state("Dn", s)) ≈ [0, 0, 1, 0] - @test Vector(state("UpDn", s)) ≈ [0, 0, 0, 1] - @test Vector(state("0", s)) ≈ [1, 0, 0, 0] - @test Vector(state("↑", s)) ≈ [0, 1, 0, 0] - @test Vector(state("↓", s)) ≈ [0, 0, 1, 0] - @test Vector(state("↑↓", s)) ≈ [0, 0, 0, 1] - - Nup = op(s, "Nup") - @test hasinds(Nup, s', s) - - @test_throws ArgumentError op(s, "Fake") - Nup = Array(op(s, "Nup"), s', s) - @test Nup ≈ [0.0 0 0 0; 0 1 0 0; 0 0 0 0; 0 0 0 1] - Nup = Array(op(s, "n↑"), s', s) - @test Nup ≈ [0.0 0 0 0; 0 1 0 0; 0 0 0 0; 0 0 0 1] - Ndn = Array(op(s, "Ndn"), s', s) - @test Ndn ≈ [0.0 0 0 0; 0 0 0 0; 0 0 1 0; 0 0 0 1] - Ndn = Array(op(s, "n↓"), s', s) - @test Ndn ≈ [0.0 0 0 0; 0 0 0 0; 0 0 1 0; 0 0 0 1] - Nupdn = Array(op(s, "n↑↓"), s', s) - @test Nupdn ≈ [0.0 0 0 0; 0 0 0 0; 0 0 0 0; 0 0 0 1] - Ntot = Array(op(s, "Ntot"), s', s) - @test Ntot ≈ [0.0 0 0 0; 0 1 0 0; 0 0 1 0; 0 0 0 2] - Ntot = Array(op(s, "ntot"), s', s) - @test Ntot ≈ [0.0 0 0 0; 0 1 0 0; 0 0 1 0; 0 0 0 2] - Cup = Array(op(s, "Cup"), s', s) - @test Cup ≈ [0.0 1 0 0; 0 0 0 0; 0 0 0 1; 0 0 0 0] - Cup = Array(op(s, "c↑"), s', s) - @test Cup ≈ [0.0 1 0 0; 0 0 0 0; 0 0 0 1; 0 0 0 0] - Cdagup = Array(op(s, "Cdagup"), s', s) - @test Cdagup ≈ [0.0 0 0 0; 1 0 0 0; 0 0 0 0; 0 0 1 0] - Cdagup = Array(op(s, "c†↑"), s', s) - @test Cdagup ≈ [0.0 0 0 0; 1 0 0 0; 0 0 0 0; 0 0 1 0] - Cdn = Array(op(s, "Cdn"), s', s) - @test Cdn ≈ [0.0 0 1 0; 0 0 0 -1; 0 0 0 0; 0 0 0 0] - Cdn = Array(op(s, "c↓"), s', s) - @test Cdn ≈ [0.0 0 1 0; 0 0 0 -1; 0 0 0 0; 0 0 0 0] - Cdagdn = Array(op(s, "Cdagdn"), s', s) - @test Cdagdn ≈ [0.0 0 0 0; 0 0 0 0; 1 0 0 0; 0 -1 0 0] - Cdagdn = Array(op(s, "c†↓"), s', s) - @test Cdagdn ≈ [0.0 0 0 0; 0 0 0 0; 1 0 0 0; 0 -1 0 0] - Aup = Array(op(s, "Aup"), s', s) - @test Aup ≈ [0.0 1 0 0; 0 0 0 0; 0 0 0 1; 0 0 0 0] - Aup = Array(op(s, "a↑"), s', s) - @test Aup ≈ [0.0 1 0 0; 0 0 0 0; 0 0 0 1; 0 0 0 0] - Adagup = Array(op(s, "Adagup"), s', s) - @test Adagup ≈ [0.0 0 0 0; 1 0 0 0; 0 0 0 0; 0 0 1 0] - Adagup = Array(op(s, "a†↑"), s', s) - @test Adagup ≈ [0.0 0 0 0; 1 0 0 0; 0 0 0 0; 0 0 1 0] - Adn = Array(op(s, "Adn"), s', s) - @test Adn ≈ [0.0 0 1 0; 0 0 0 1; 0 0 0 0; 0 0 0 0] - Adn = Array(op(s, "a↓"), s', s) - @test Adn ≈ [0.0 0 1 0; 0 0 0 1; 0 0 0 0; 0 0 0 0] - Adagdn = Array(op(s, "Adagdn"), s', s) - @test Adagdn ≈ [0.0 0 0 0; 0 0 0 0; 1 0 0 0; 0 1 0 0] - Adagdn = Array(op(s, "a†↓"), s', s) - @test Adagdn ≈ [0.0 0 0 0; 0 0 0 0; 1 0 0 0; 0 1 0 0] - F = Array(op(s, "F"), s', s) - @test F ≈ [1.0 0 0 0; 0 -1 0 0; 0 0 -1 0; 0 0 0 1] - Fup = Array(op(s, "Fup"), s', s) - @test Fup ≈ [1.0 0 0 0; 0 -1 0 0; 0 0 1 0; 0 0 0 -1] - Fup = Array(op(s, "F↑"), s', s) - @test Fup ≈ [1.0 0 0 0; 0 -1 0 0; 0 0 1 0; 0 0 0 -1] - Fdn3 = Array(op(s, "Fdn"), s', s) - @test Fdn3 ≈ [1.0 0 0 0; 0 1 0 0; 0 0 -1 0; 0 0 0 -1] - Fdn3 = Array(op(s, "F↓"), s', s) - @test Fdn3 ≈ [1.0 0 0 0; 0 1 0 0; 0 0 -1 0; 0 0 0 -1] - Sz3 = Array(op(s, "Sz"), s', s) - @test Sz3 ≈ [0.0 0 0 0; 0 0.5 0 0; 0 0 -0.5 0; 0 0 0 0] - Sz3 = Array(op(s, "Sᶻ"), s', s) - @test Sz3 ≈ [0.0 0 0 0; 0 0.5 0 0; 0 0 -0.5 0; 0 0 0 0] - Sx3 = Array(op(s, "Sx"), s', s) - @test Sx3 ≈ [0.0 0 0 0; 0 0 0.5 0; 0 0.5 0 0; 0 0 0 0] - Sx3 = Array(op(s, "Sˣ"), s', s) - @test Sx3 ≈ [0.0 0 0 0; 0 0 0.5 0; 0 0.5 0 0; 0 0 0 0] - Sp3 = Array(op(s, "S+"), s', s) - @test Sp3 ≈ [0.0 0 0 0; 0 0 1 0; 0 0 0 0; 0 0 0 0] - Sp3 = Array(op(s, "Sp"), s', s) - @test Sp3 ≈ [0.0 0 0 0; 0 0 1 0; 0 0 0 0; 0 0 0 0] - Sp3 = Array(op(s, "Splus"), s', s) - @test Sp3 ≈ [0.0 0 0 0; 0 0 1 0; 0 0 0 0; 0 0 0 0] - Sp3 = Array(op(s, "S⁺"), s', s) - @test Sp3 ≈ [0.0 0 0 0; 0 0 1 0; 0 0 0 0; 0 0 0 0] - Sm3 = Array(op(s, "S-"), s', s) - @test Sm3 ≈ [0.0 0 0 0; 0 0 0 0; 0 1 0 0; 0 0 0 0] - Sm3 = Array(op(s, "S⁻"), s', s) - @test Sm3 ≈ [0.0 0 0 0; 0 0 0 0; 0 1 0 0; 0 0 0 0] - Sm3 = Array(op(s, "Sm"), s', s) - @test Sm3 ≈ [0.0 0 0 0; 0 0 0 0; 0 1 0 0; 0 0 0 0] - Sm3 = Array(op(s, "Sminus"), s', s) - @test Sm3 ≈ [0.0 0 0 0; 0 0 0 0; 0 1 0 0; 0 0 0 0] - - @test has_fermion_string("Cup", s) - @test has_fermion_string("c↑", s) - @test has_fermion_string("Cup*F", s) - @test has_fermion_string("c↑*F", s) - @test has_fermion_string("Cdagup", s) - @test has_fermion_string("c†↑", s) - @test has_fermion_string("F*Cdagup", s) - @test has_fermion_string("F*c†↑", s) - @test has_fermion_string("Cdn", s) - @test has_fermion_string("c↓", s) - @test has_fermion_string("Cdn*F", s) - @test has_fermion_string("c↓*F", s) - @test has_fermion_string("Cdagdn", s) - @test has_fermion_string("c†↓", s) - @test !has_fermion_string("N", s) - @test !has_fermion_string("n", s) - @test !has_fermion_string("F*N", s) - @test !has_fermion_string("F*n", s) - - s = siteind("Electron"; conserve_nf=true) - @test qn(s, 1) == QN("Nf", 0, -1) - @test qn(s, 2) == QN("Nf", 1, -1) - @test qn(s, 3) == QN("Nf", 2, -1) - s = siteind("Electron"; conserve_sz=true) - @test qn(s, 1) == QN(("Sz", 0), ("NfParity", 0, -2)) - @test qn(s, 2) == QN(("Sz", +1), ("NfParity", 1, -2)) - @test qn(s, 3) == QN(("Sz", -1), ("NfParity", 1, -2)) - @test qn(s, 4) == QN(("Sz", 0), ("NfParity", 0, -2)) - s = siteind("Electron"; conserve_nfparity=true) - @test qn(s, 1) == QN("NfParity", 0, -2) - @test qn(s, 2) == QN("NfParity", 1, -2) - @test qn(s, 3) == QN("NfParity", 0, -2) - s = siteind("Electron"; conserve_parity=true) - @test qn(s, 1) == QN("NfParity", 0, -2) - @test qn(s, 2) == QN("NfParity", 1, -2) - @test qn(s, 3) == QN("NfParity", 0, -2) - s = siteind("Electron"; conserve_qns=false) - @test dim(s) == 4 - end - - @testset "tJ sites" begin - s = siteind("tJ"; conserve_parity=true) - @test hastags(s, "tJ,Site") - @test dim(s) == 3 - @test nblocks(s) == 2 - @test qn(s, 1) == QN(("NfParity", 0, -2)) - @test qn(s, 2) == QN(("NfParity", 1, -2)) - - s = siteind("tJ"; conserve_nf=true) - @test hastags(s, "tJ,Site") - @test dim(s) == 3 - @test nblocks(s) == 2 - @test qn(s, 1) == QN(("Nf", 0, -1)) - @test qn(s, 2) == QN(("Nf", 1, -1)) - - s = siteind("tJ"; conserve_sz=true) - @test hastags(s, "tJ,Site") - @test dim(s) == 3 - @test nblocks(s) == 3 - @test qn(s, 1) == QN(("Sz", 0), ("NfParity", 0, -2)) - @test qn(s, 2) == QN(("Sz", 1), ("NfParity", 1, -2)) - @test qn(s, 3) == QN(("Sz", -1), ("NfParity", 1, -2)) - - s = siteind("tJ"; conserve_sz=true, conserve_nf=true) - @test hastags(s, "tJ,Site") - @test dim(s) == 3 - @test nblocks(s) == 3 - @test qn(s, 1) == QN(("Nf", 0, -1), ("Sz", 0)) - @test qn(s, 2) == QN(("Nf", 1, -1), ("Sz", 1)) - @test qn(s, 3) == QN(("Nf", 1, -1), ("Sz", -1)) - - s = siteind("tJ") - @test hastags(s, "tJ,Site") - @test dim(s) == 3 - - @test val(s, "Emp") == 1 - @test val(s, "0") == 1 - @test val(s, "Up") == 2 - @test val(s, "↑") == 2 - @test val(s, "Dn") == 3 - @test val(s, "↓") == 3 - @test_throws ArgumentError val(s, "Fake") - - @test Vector(state("Emp", s)) ≈ [1, 0, 0] - @test Vector(state("0", s)) ≈ [1, 0, 0] - @test Vector(state("Up", s)) ≈ [0, 1, 0] - @test Vector(state("↑", s)) ≈ [0, 1, 0] - @test Vector(state("Dn", s)) ≈ [0, 0, 1] - @test Vector(state("↓", s)) ≈ [0, 0, 1] - - @test_throws ArgumentError op(s, "Fake") - Nup = op(s, "Nup") - @test Nup[2, 2] ≈ 1.0 - Nup = op(s, "n↑") - @test Nup[2, 2] ≈ 1.0 - Ndn = op(s, "Ndn") - @test Ndn[3, 3] ≈ 1.0 - Ndn = op(s, "n↓") - @test Ndn[3, 3] ≈ 1.0 - Ntot = op(s, "Ntot") - @test Ntot[2, 2] ≈ 1.0 - @test Ntot[3, 3] ≈ 1.0 - Ntot = op(s, "ntot") - @test Ntot[2, 2] ≈ 1.0 - @test Ntot[3, 3] ≈ 1.0 - Id = Array(op(s, "Id"), s', s) - @test Id ≈ [1.0 0 0; 0 1 0; 0 0 1] - Cup = Array(op(s, "Cup"), s', s) - @test Cup ≈ [0.0 1 0; 0 0 0; 0 0 0] - Cup = Array(op(s, "c↑"), s', s) - @test Cup ≈ [0.0 1 0; 0 0 0; 0 0 0] - Cdup = Array(op(s, "Cdagup"), s', s) - @test Cdup ≈ [0 0 0; 1.0 0 0; 0 0 0] - Cdup = Array(op(s, "c†↑"), s', s) - @test Cdup ≈ [0 0 0; 1.0 0 0; 0 0 0] - Cdn = Array(op(s, "Cdn"), s', s) - @test Cdn ≈ [0.0 0.0 1; 0 0 0; 0 0 0] - Cdn = Array(op(s, "c↓"), s', s) - @test Cdn ≈ [0.0 0.0 1; 0 0 0; 0 0 0] - Cddn = Array(op(s, "Cdagdn"), s', s) - @test Cddn ≈ [0 0 0; 0.0 0 0; 1 0 0] - Cddn = Array(op(s, "c†↓"), s', s) - @test Cddn ≈ [0 0 0; 0.0 0 0; 1 0 0] - Aup = Array(op(s, "Aup"), s', s) - @test Aup ≈ [0.0 1 0; 0 0 0; 0 0 0] - Aup = Array(op(s, "a↑"), s', s) - @test Aup ≈ [0.0 1 0; 0 0 0; 0 0 0] - Adup = Array(op(s, "Adagup"), s', s) - @test Adup ≈ [0 0 0; 1.0 0 0; 0 0 0] - Adup = Array(op(s, "a†↑"), s', s) - @test Adup ≈ [0 0 0; 1.0 0 0; 0 0 0] - Adn = Array(op(s, "Adn"), s', s) - @test Adn ≈ [0.0 0.0 1; 0 0 0; 0 0 0] - Adn = Array(op(s, "a↓"), s', s) - @test Adn ≈ [0.0 0.0 1; 0 0 0; 0 0 0] - Addn = Array(op(s, "Adagdn"), s', s) - @test Addn ≈ [0 0 0; 0.0 0 0; 1 0 0] - Addn = Array(op(s, "a†↓"), s', s) - @test Addn ≈ [0 0 0; 0.0 0 0; 1 0 0] - FP = Array(op(s, "F"), s', s) - @test FP ≈ [1.0 0.0 0; 0 -1.0 0; 0 0 -1.0] - Fup = Array(op(s, "Fup"), s', s) - @test Fup ≈ [1.0 0.0 0; 0 -1.0 0; 0 0 1.0] - Fup = Array(op(s, "F↑"), s', s) - @test Fup ≈ [1.0 0.0 0; 0 -1.0 0; 0 0 1.0] - Fdn = Array(op(s, "Fdn"), s', s) - @test Fdn ≈ [1.0 0.0 0; 0 1.0 0; 0 0 -1.0] - Fdn = Array(op(s, "F↓"), s', s) - @test Fdn ≈ [1.0 0.0 0; 0 1.0 0; 0 0 -1.0] - Sz = Array(op(s, "Sz"), s', s) - @test Sz ≈ [0.0 0.0 0; 0 0.5 0; 0 0 -0.5] - Sz = Array(op(s, "Sᶻ"), s', s) - @test Sz ≈ [0.0 0.0 0; 0 0.5 0; 0 0 -0.5] - Sx = Array(op(s, "Sx"), s', s) - @test Sx ≈ [0.0 0.0 0; 0 0 0.5; 0 0.5 0] - Sx = Array(op(s, "Sˣ"), s', s) - @test Sx ≈ [0.0 0.0 0; 0 0 0.5; 0 0.5 0] - Sp = Array(op(s, "Splus"), s', s) - @test Sp ≈ [0.0 0.0 0; 0 0 1.0; 0 0 0] - Sp = Array(op(s, "Sp"), s', s) - @test Sp ≈ [0.0 0.0 0; 0 0 1.0; 0 0 0] - Sp = Array(op(s, "S⁺"), s', s) - @test Sp ≈ [0.0 0.0 0; 0 0 1.0; 0 0 0] - Sm = Array(op(s, "Sminus"), s', s) - @test Sm ≈ [0.0 0.0 0; 0 0 0; 0 1.0 0] - Sm = Array(op(s, "Sm"), s', s) - @test Sm ≈ [0.0 0.0 0; 0 0 0; 0 1.0 0] - Sm = Array(op(s, "S⁻"), s', s) - @test Sm ≈ [0.0 0.0 0; 0 0 0; 0 1.0 0] - - @test has_fermion_string("Cup", s) - @test has_fermion_string("c↑", s) - @test has_fermion_string("Cdagup", s) - @test has_fermion_string("c†↑", s) - @test has_fermion_string("Cdn", s) - @test has_fermion_string("c↓", s) - @test has_fermion_string("Cdagdn", s) - @test has_fermion_string("c†↓", s) - @test !has_fermion_string("N", s) - @test !has_fermion_string("n", s) - end - - @testset "$st" for st in ["Qudit", "Boson"] - d = 3 - s = siteinds(st, 4; dim=d) - @test dim(s[1]) == d - @test dim(s[2]) == d - @test dim(s[3]) == d - @test dim(s[4]) == d - v = state(s, 2, "0") - @test v == itensor([1, 0, 0], s[2]) - v = state(s, 3, "1") - @test v == itensor([0, 1, 0], s[3]) - v = state(s, 4, "2") - @test v == itensor([0, 0, 1], s[4]) - @test_throws BoundsError state(s, 4, "3") - v = val(s, 2, "0") - @test v == 1 - v = val(s, 3, "1") - @test v == 2 - v = val(s, 4, "2") - @test v == 3 - @test op(s, "Id", 2) == itensor([1 0 0; 0 1 0; 0 0 1], s[2]', dag(s[2])) - @test op(s, "I", 2) == itensor([1 0 0; 0 1 0; 0 0 1], s[2]', dag(s[2])) - @test op(s, "F", 2) == itensor([1 0 0; 0 1 0; 0 0 1], s[2]', dag(s[2])) - @test op("Id", s, 1, 2) == - itensor(Matrix(I, d^2, d^2), s[2]', s[1]', dag(s[2]), dag(s[1])) - @test op("I", s, 1, 2) == - itensor(Matrix(I, d^2, d^2), s[2]', s[1]', dag(s[2]), dag(s[1])) - @test op(s, "N", 2) == itensor([0 0 0; 0 1 0; 0 0 2], s[2]', dag(s[2])) - @test op(s, "n", 2) == itensor([0 0 0; 0 1 0; 0 0 2], s[2]', dag(s[2])) - @test op(s, "Adag", 2) ≈ itensor([0 0 0; 1 0 0; 0 √2 0], s[2]', dag(s[2])) - @test op(s, "adag", 2) ≈ itensor([0 0 0; 1 0 0; 0 √2 0], s[2]', dag(s[2])) - @test op(s, "a†", 2) ≈ itensor([0 0 0; 1 0 0; 0 √2 0], s[2]', dag(s[2])) - @test op(s, "A", 2) ≈ itensor([0 1 0; 0 0 √2; 0 0 0], s[2]', dag(s[2])) - @test op(s, "a", 2) ≈ itensor([0 1 0; 0 0 √2; 0 0 0], s[2]', dag(s[2])) - @test op(s, "a†b†", 2, 3) ≈ itensor( - kron([0 0 0; 1 0 0; 0 √2 0], [0 0 0; 1 0 0; 0 √2 0]), - s[3]', - s[2]', - dag(s[3]), - dag(s[2]), - ) - @test op(s, "a†b", 2, 3) ≈ itensor( - kron([0 0 0; 1 0 0; 0 √2 0], [0 1 0; 0 0 √2; 0 0 0]), - s[3]', - s[2]', - dag(s[3]), - dag(s[2]), - ) - @test op(s, "ab†", 2, 3) ≈ itensor( - kron([0 1 0; 0 0 √2; 0 0 0], [0 0 0; 1 0 0; 0 √2 0]), - s[3]', - s[2]', - dag(s[3]), - dag(s[2]), - ) - @test op(s, "ab", 2, 3) ≈ itensor( - kron([0 1 0; 0 0 √2; 0 0 0], [0 1 0; 0 0 √2; 0 0 0]), - s[3]', - s[2]', - dag(s[3]), - dag(s[2]), - ) - @test_throws ErrorException op(ITensors.OpName("ab"), ITensors.SiteType(st)) - - # With QNs - s = siteinds(st, 4; dim=d, conserve_qns=true) - @test all(hasqns, s) - @test op(s, "Id", 2) == itensor([1 0 0; 0 1 0; 0 0 1], s[2]', dag(s[2])) - @test op(s, "I", 2) == itensor([1 0 0; 0 1 0; 0 0 1], s[2]', dag(s[2])) - @test op("Id", s, 1, 2) == - itensor(Matrix(I, d^2, d^2), s[2]', s[1]', dag(s[2]), dag(s[1])) - @test op("I", s, 1, 2) == - itensor(Matrix(I, d^2, d^2), s[2]', s[1]', dag(s[2]), dag(s[1])) - @test op(s, "N", 2) == itensor([0 0 0; 0 1 0; 0 0 2], s[2]', dag(s[2])) - @test op(s, "n", 2) == itensor([0 0 0; 0 1 0; 0 0 2], s[2]', dag(s[2])) - @test op(s, "Adag", 2) ≈ itensor([0 0 0; 1 0 0; 0 √2 0], s[2]', dag(s[2])) - @test op(s, "adag", 2) ≈ itensor([0 0 0; 1 0 0; 0 √2 0], s[2]', dag(s[2])) - @test op(s, "a†", 2) ≈ itensor([0 0 0; 1 0 0; 0 √2 0], s[2]', dag(s[2])) - @test op(s, "A", 2) ≈ itensor([0 1 0; 0 0 √2; 0 0 0], s[2]', dag(s[2])) - @test op(s, "a", 2) ≈ itensor([0 1 0; 0 0 √2; 0 0 0], s[2]', dag(s[2])) - end -end diff --git a/test/base/test_qn.jl b/test/base/test_qn.jl deleted file mode 100644 index 27063617e0..0000000000 --- a/test/base/test_qn.jl +++ /dev/null @@ -1,150 +0,0 @@ -using ITensors, Test - -import ITensors: nactive - -@testset "QN" begin - @testset "QNVal Basics" begin - qv = ITensors.QNVal() - @test !isactive(qv) - @test qv == zero(ITensors.QNVal) - - qv = ITensors.QNVal("Sz", 0) - @test ITensors.name(qv) == ITensors.SmallString("Sz") - @test val(qv) == 0 - @test modulus(qv) == 1 - @test isactive(qv) - - qv = ITensors.QNVal("A", 1, 2) - @test ITensors.name(qv) == ITensors.SmallString("A") - @test val(qv) == 1 - @test modulus(qv) == 2 - @test !isfermionic(qv) - - qv = ITensors.QNVal("Nf", 1, -1) - @test ITensors.name(qv) == ITensors.SmallString("Nf") - @test val(qv) == 1 - @test modulus(qv) == -1 - @test isfermionic(qv) - - qv = zero(ITensors.QNVal("Sz", 5)) - @test ITensors.name(qv) == ITensors.SmallString("Sz") - @test val(qv) == 0 - @test modulus(qv) == 1 - @test isactive(qv) - end - - @testset "QN Basics" begin - q = QN() - @test length(sprint(show, q)) > 1 - - q = QN(("Sz", 1)) - @test length(sprint(show, q)) > 1 - @test isactive(q[1]) - @test val(q, "Sz") == 1 - @test !isfermionic(q) - - q = QN("Sz", 1) - @test length(sprint(show, q)) > 1 - @test isactive(q[1]) - @test val(q, "Sz") == 1 - @test !isfermionic(q) - - q = QN("P", 1, 2) - @test length(sprint(show, q)) > 1 - @test isactive(q[1]) - @test val(q, "P") == 1 - @test modulus(q, "P") == 2 - @test nactive(q) == 1 - - q = QN(("A", 1), ("B", 2)) - @test isactive(q[1]) - @test isactive(q[2]) - @test val(q, "A") == 1 - @test val(q, "B") == 2 - @test modulus(q, "A") == 1 - @test modulus(q, "B") == 1 - - q = QN(("B", 2), ("A", 1)) - @test val(q, "A") == 1 - @test val(q, "B") == 2 - @test nactive(q) == 2 - - q = QN(("A", 1), ("B", 2), ("C", 3), ("D", 4)) - @test nactive(q) == 4 - - @test_throws BoundsError begin - q = QN(("A", 1), ("B", 2), ("C", 3), ("D", 4), ("E", 5)) - end - end - - @testset "Comparison" begin - @test QN() == QN() - @test QN("A", 1) == QN("A", 1) - @test QN(("A", 1), ("B", 3)) == QN(("A", 1), ("B", 3)) - @test QN(("A", 1), ("B", 3)) == QN(("B", 3), ("A", 1)) - - # Zero value and missing sector treated the same: - @test QN(("A", 0), ("B", 3)) == QN("B", 3) - @test QN(("B", 3), ("A", 0)) == QN("B", 3) - end - - @testset "Arithmetic" begin - @test QN("Sz", 1) + QN() == QN("Sz", 1) - @test QN("Sz", 1) + QN("Sz", 2) == QN("Sz", 3) - @test QN("Sz", 1) + QN("Sz", -2) == QN("Sz", -1) - - @test QN(("A", 1), ("Sz", 0)) + QN(("A", 0), ("Sz", 1)) == QN(("A", 1), ("Sz", 1)) - - @test QN("P", 0, 2) + QN("P", 1, 2) == QN("P", 1, 2) - @test QN("P", 1, 2) + QN("P", 1, 2) == QN("P", 0, 2) - - # Arithmetic involving mixed-label QNs - @test QN() - QN("Sz", 2) == QN("Sz", -2) - @test QN("Sz", 2) - QN() == QN("Sz", 2) - @test QN() - QN(("Sz", 2), ("N", 1)) == QN(("Sz", -2), ("N", -1)) - @test QN("N", 1) - QN("Sz", 2) == QN(("N", 1), ("Sz", -2)) - end - - @testset "Ordering" begin - z = QN() - qa = QN(("Sz", 1), ("Nf", 1)) - qb = QN(("Sz", 0), ("Nf", 2)) - qc = QN(("Sz", 1), ("Nf", 2)) - qd = QN(("Sz", 1), ("Nf", 2)) - qe = QN(("Sz", -1), ("Nf", -2)) - - @test !(z < z) - @test !(qa < z) - @test (z < qa) - @test (z < qb) - @test !(qb < z) - @test (z < qc) - @test !(qc < z) - @test (z < qd) - @test !(qd < z) - @test !(z < qe) - @test (qe < z) - - @test !(qa > qb) - @test qb > qa - @test !(qb == qa) - @test (qb < qc) - @test !(qc < qb) - @test !(qc == qb) - @test (qc == qd) - @test !(qc < qd) - @test !(qd < qc) - end - - @testset "Hashing" begin - @test hash(QN(("Sz", 0))) == hash(QN()) - @test hash(QN("Sz", 0)) == hash(QN("N", 0)) - @test hash(QN(("Sz", 1), ("N", 2))) == hash(QN(("N", 2), ("Sz", 1))) - end - - @testset "Negative value for mod > 1" begin - @test QN("T", -1, 3) == QN("T", 2, 3) - @test QN("T", -2, 3) == QN("T", 1, 3) - @test QN("T", -3, 3) == QN("T", 0, 3) - end -end diff --git a/test/base/test_qncombiner.jl b/test/base/test_qncombiner.jl deleted file mode 100644 index 093dce60e4..0000000000 --- a/test/base/test_qncombiner.jl +++ /dev/null @@ -1,12 +0,0 @@ -using ITensors, Test - -@testset "QN Combiner" begin - d = 1 - i = Index([QN(0) => d, QN(0) => d]) - A = random_itensor(i) - C = combiner(i) - AC = A * C - - à = AC * dag(C) - @test à ≈ A -end diff --git a/test/base/test_qndiagitensor.jl b/test/base/test_qndiagitensor.jl deleted file mode 100644 index f7ce83a96f..0000000000 --- a/test/base/test_qndiagitensor.jl +++ /dev/null @@ -1,134 +0,0 @@ -using ITensors, Test - -@testset "diag_itensor (DiagBlockSparse)" begin - @testset "diag_itensor get and set elements" begin - i = Index(QN(0) => 2, QN(1) => 3; tags="i") - - D = diag_itensor(QN(), i, dag(i')) - - for b in eachnzblock(D) - @test flux(D, b) == QN() - end - - D[i => 1, i' => 1] = 1 - D[i => 2, i' => 2] = 2 - D[i => 3, i' => 3] = 3 - D[i => 4, i' => 4] = 4 - D[i => 5, i' => 5] = 5 - - @test_throws ErrorException D[i => 1, i' => 2] = 2.0 - - @test D[i => 1, i' => 1] == 1 - @test D[i => 2, i' => 2] == 2 - @test D[i => 3, i' => 3] == 3 - @test D[i => 4, i' => 4] == 4 - @test D[i => 5, i' => 5] == 5 - end - - @testset "diag_itensor Tuple constructor" begin - i = Index(QN(0) => 2, QN(1) => 3; tags="i") - - D = diag_itensor((i, dag(i'))) - - for b in eachnzblock(D) - @test flux(D, b) == QN() - end - end - - @testset "delta" begin - i = Index(QN(0) => 2, QN(1) => 3; tags="i") - ĩ = sim(i; tags="i_sim") - j = Index(QN(0) => 2, QN(1) => 3, QN(2) => 4; tags="j") - - A = random_itensor(QN(), i, dag(j)) - - δiĩ = δ(dag(i), ĩ) - - @test storage(δiĩ) isa NDTensors.DiagBlockSparse{ElT,ElT} where {ElT<:Number} - - B = A * δiĩ - - A = permute(A, i, j) - B = permute(B, ĩ, j) - - @test norm(dense(NDTensors.tensor(A)) - dense(NDTensors.tensor(B))) ≈ 0 - end - - @testset "delta Tuple constructor" begin - i = Index(QN(0) => 2, QN(1) => 3; tags="i") - ĩ = sim(i; tags="i_sim") - - δiĩ = δ((dag(i), ĩ)) - - for b in eachnzblock(δiĩ) - @test flux(δiĩ, b) == QN() - end - end - - @testset "denseblocks: convert DiagBlockSparse to BlockSparse" begin - i = Index([QN(0) => 2, QN(1) => 3]) - A = diag_itensor(i', dag(i)) - randn!(ITensors.data(A)) - B = denseblocks(A) - for n in 1:dim(i) - @test A[n, n] == B[n, n] - end - @test dense(A) == dense(B) - end - - @testset "Regression test for QN delta contraction bug" begin - # http://itensor.org/support/2814/block-sparse-itensor-wrong-results-multiplying-delta-tensor - s = Index([QN(("N", i, 1)) => 1 for i in 1:2]) - l = dag(addtags(s, "left")) - r = addtags(s, "right") - u = addtags(s, "up") - d = dag(addtags(s, "down")) - A = ITensor(l, r, u, d) - A[1, 1, 1, 1] = 1.0 - A[1, 1, 2, 2] = 1.0 - A[2, 2, 1, 1] = 1.0 - A[2, 2, 2, 2] = 1.0 - δlr = δ(dag(l), dag(r)) - δud = δ(dag(u), dag(d)) - A1 = A * δlr - denseA1 = dense(A) * dense(δlr) - A2 = A1 * δud - denseA2 = denseA1 * dense(δud) - @test dense(A1) ≈ denseA1 - @test dense(A2) ≈ denseA2 - @test A2[] ≈ 4 - end - - @testset "Regression test for QN delta dag, contract, and norm" begin - i = Index([QN("Sz", 0) => 1, QN("Sz", 1) => 1]) - x = δ(i, dag(i)') - - @test isone(x[1, 1]) - @test isone(dag(x)[1, 1]) - - c = 2 + 3im - x *= c - - @test x[1, 1] == c - @test dag(x)[1, 1] == conj(c) - @test (x * dag(x))[] == 2 * abs2(c) - @test (x * dag(x))[] ≈ norm(x)^2 - end - - @testset "Regression test for printing a QN Diag ITensor" begin - # https://github.com/ITensor/NDTensors.jl/issues/61 - i = Index([QN() => 2]) - A = random_itensor(i', dag(i)) - U, S, V = svd(A, i') - # Test printing S - io = IOBuffer() - show(io, S) - sS = String(take!(io)) - @test sS isa String - # Test printing U - io = IOBuffer() - show(io, U) - sU = String(take!(io)) - @test sU isa String - end -end diff --git a/test/base/test_qnindex.jl b/test/base/test_qnindex.jl deleted file mode 100644 index 9ba14f5eea..0000000000 --- a/test/base/test_qnindex.jl +++ /dev/null @@ -1,70 +0,0 @@ -using ITensors, Test - -import ITensors: In, Out, Neither - -@testset "QN Index" begin - @testset "hasqns function" begin - i = Index(4, "i") - @test hasqns(i) == false - j = Index(QN(0) => 1, QN(1) => 1) - @test hasqns(j) == true - end - - @testset "Array of QN Constructor" begin - i = Index([QN(0) => 1, QN(1) => 2], "i") - @test hasqns(i) - @test dim(i) == 3 - @test hastags(i, "i") - end - - @testset "Vararg Constructor" begin - i = Index(QN(0) => 1, QN(1) => 2; tags="i") - @test hasqns(i) - @test dim(i) == 3 - @test hastags(i, "i") - @test dir(i) == Out - @test dir(i => 2) == Out - - j = Index(QN(0) => 1, QN(1) => 2; tags="j", dir=In) - @test hasqns(j) - @test dim(j) == 3 - @test hastags(j, "j") - @test dir(j) == In - @test dir(j => 2) == In - end - - @testset "flux and qn" begin - i = dag(Index([QN(0) => 2, QN(1) => 2], "i")) - - @test flux(i => 1) == QN(0) - @test flux(i => 2) == QN(0) - @test flux(i => 3) == QN(-1) - @test flux(i => 4) == QN(-1) - @test flux(i => Block(1)) == QN(0) - @test flux(i => Block(2)) == QN(-1) - - @test qn(i => 1) == QN(0) - @test qn(i => 2) == QN(0) - @test qn(i => 3) == QN(1) - @test qn(i => 4) == QN(1) - @test qn(i => Block(1)) == QN(0) - @test qn(i => Block(2)) == QN(1) - end - - @testset "directsum" begin - i = Index([QN(0) => 1, QN(1) => 2], "i") - j = Index([QN(2) => 3, QN(3) => 4], "j") - ij = ITensors.directsum(i, j; tags="test") - @test dim(ij) == dim(i) + dim(j) - @test hastags(ij, "test") - @test flux(ij, Block(1)) == QN(0) - @test flux(ij, Block(2)) == QN(1) - @test flux(ij, Block(3)) == QN(2) - @test flux(ij, Block(4)) == QN(3) - @test dim(ij, Block(1)) == 1 - @test dim(ij, Block(2)) == 2 - @test dim(ij, Block(3)) == 3 - @test dim(ij, Block(4)) == 4 - @test_throws ErrorException ITensors.directsum(i, dag(j)) - end -end diff --git a/test/base/test_qnitensor.jl b/test/base/test_qnitensor.jl deleted file mode 100644 index 1a9c5d84f7..0000000000 --- a/test/base/test_qnitensor.jl +++ /dev/null @@ -1,1917 +0,0 @@ -using ITensors -using ITensors.NDTensors -using ITensors.SiteTypes: siteind, siteinds -using LinearAlgebra -using Random -using Test - -Random.seed!(1234) - -@testset "BlockSparse ITensor" begin - @testset "Constructor" begin - i = Index([QN(0) => 1, QN(1) => 2], "i") - j = Index([QN(0) => 3, QN(1) => 4, QN(2) => 5], "j") - - A = ITensor(QN(0), i, dag(j)) - - @test flux(A) == QN(0) - @test nnzblocks(A) == 2 - end - - @testset "Construct from Array" begin - i = Index([QN(0) => 1, QN(1) => 2], "i") - - A = [ - 1.0 0.0 0.0 - 0.0 2.0 3.0 - 0.0 1e-10 4.0 - ] - T = ITensor(A, i', dag(i)) - @test flux(T) == QN(0) - @test nnzblocks(T) == 2 - @test Block(1, 1) in nzblocks(T) - @test Block(2, 2) in nzblocks(T) - @test T[1, 1] == 1.0 - @test T[2, 2] == 2.0 - @test T[2, 3] == 3.0 - @test T[3, 2] == 1e-10 - @test T[3, 3] == 4.0 - - T = ITensor(A, i', dag(i)) - @test flux(T) == QN(0) - @test nnzblocks(T) == 2 - @test Block(1, 1) in nzblocks(T) - @test Block(2, 2) in nzblocks(T) - @test T[1, 1] == 1.0 - @test T[2, 2] == 2.0 - @test T[2, 3] == 3.0 - @test T[3, 2] == 1e-10 - @test T[3, 3] == 4.0 - - T = ITensor(A, i', dag(i); tol=1e-9) - @test flux(T) == QN(0) - @test nnzblocks(T) == 2 - @test Block(1, 1) in nzblocks(T) - @test Block(2, 2) in nzblocks(T) - @test T[1, 1] == 1.0 - @test T[2, 2] == 2.0 - @test T[2, 3] == 3.0 - @test T[3, 2] == 0.0 - @test T[3, 3] == 4.0 - - A = [ - 1e-9 0.0 0.0 - 0.0 2.0 3.0 - 0.0 1e-10 4.0 - ] - T = ITensor(A, i', dag(i); tol=1e-8) - @test flux(T) == QN(0) - @test nnzblocks(T) == 1 - @test Block(2, 2) in nzblocks(T) - @test T[1, 1] == 0.0 - @test T[2, 2] == 2.0 - @test T[2, 3] == 3.0 - @test T[3, 2] == 0.0 - @test T[3, 3] == 4.0 - - A = [ - 1e-9 2.0 3.0 - 1e-9 1e-10 2e-10 - 2e-9 1e-10 4e-10 - ] - T = ITensor(A, i', dag(i); tol=1e-8) - @test flux(T) == QN(-1) - @test nnzblocks(T) == 1 - @test Block(1, 2) in nzblocks(T) - @test T[1, 1] == 0.0 - @test T[1, 2] == 2.0 - @test T[1, 3] == 3.0 - @test T[2, 2] == 0.0 - @test T[2, 3] == 0.0 - @test T[3, 2] == 0.0 - @test T[3, 3] == 0.0 - - A = [ - 1e-9 2.0 3.0 - 1e-5 1e-10 2e-10 - 2e-9 1e-10 4e-10 - ] - @test_throws ErrorException ITensor(A, i', dag(i); tol=1e-8) - @test ITensor(A, i', dag(i); tol=1e-8, checkflux=false) isa ITensor - - # Construct from zero matrix. Flux check should still pass - # (Regression test for issue #1209) - @test ITensor(zeros(3, 3), i', dag(i)) isa ITensor - end - - @testset "reductions (sum, prod)" for elt in ( - Float32, Float64, Complex{Float32}, Complex{Float64} - ) - s = [QN(0) => 2, QN(1) => 2] - a = random_itensor(elt, Index(s), dag(Index(s))) - @test sum(a) ≈ sum(array(a)) - @test sum(a) isa elt - @test prod(a) ≈ prod(array(a)) - @test prod(a) isa elt - - # All blocks are nonzero - s = [QN(0) => 2, QN(0) => 2] - a = random_itensor(elt, Index(s), dag(Index(s))) - @test sum(a) ≈ sum(array(a)) - @test sum(a) isa elt - @test prod(a) ≈ prod(array(a)) - @test prod(a) isa elt - end - - @testset "Regression test for in-place operations with mismatched block structure (eltype=$elt)" for elt in - ( - Float32, Float64, Complex{Float32}, Complex{Float64} - ) - # Regression test for https://github.com/ITensor/ITensors.jl/pull/1318 - i = Index([QN(0) => 1, QN(1) => 1]) - src = ITensor(i', dag(i)) - x12 = randn(elt) - src[1, 2] = x12 - dest = ITensor(i', dag(i)) - x21 = randn(elt) - dest[2, 1] = x21 - α = elt(2) - dest .= α .* src - @test nnz(src) == 1 - @test src[1, 2] == x12 - @test nnz(dest) == 2 - @test dest[1, 2] == α * x12 - @test dest[2, 1] == zero(elt) - end - - @testset "similartype regression test" begin - # Regression test for issue seen in: - # https://github.com/ITensor/ITensorInfiniteMPS.jl/pull/77 - # Previously, `similartype` wasn't using information about the dimensions - # properly and was returning a `BlockSparse` storage of the dimensions - # of the input tensor. - i = Index([QN() => 2]) - A = ITensor(i, i') - B = ITensor(i'') - C = A * B - @test NDTensors.ndims(NDTensors.storagetype(C)) == 3 - @test C + ITensor(i, i', i'') == ITensor(i, i', i'') - end - - @testset "Construct from Array regression test" begin - i = Index([QN(0) => 2, QN(1) => 2]) - T = itensor([0, 0, 1, 2], i) - @test flux(T) == QN(1) - @test nnzblocks(T) == 1 - @test !(Block(1) in nzblocks(T)) - @test Block(2) in nzblocks(T) - @test T[1] == 0 - @test T[2] == 0 - @test T[3] == 1 - @test T[4] == 2 - # Test fluxes of specific elements: - @test flux(T, 1) == QN(0) - @test flux(T, 2) == QN(0) - @test flux(T, 3) == QN(1) - @test flux(T, 4) == QN(1) - @test_throws BoundsError flux(T, 5) - @test_throws BoundsError flux(T, 0) - # Test fluxes of specific Blocks - @test flux(T, Block(1)) == QN(0) - @test flux(T, Block(2)) == QN(1) - @test_throws BoundsError flux(T, Block(0)) - @test_throws BoundsError flux(T, Block(3)) - end - - @testset "trace (tr)" begin - si = [QN(0) => 1, QN(1) => 2, QN(2) => 3] - sj = [QN(0) => 2, QN(1) => 3, QN(2) => 4] - sk = [QN(0) => 3, QN(1) => 4, QN(2) => 5] - sl = [QN(0) => 2] - i, j, k, l = Index.((si, sj, sk, sl), ("i", "j", "k", "l")) - T = random_itensor(dag(j), k', i', dag(k), j', dag(i)) - trT1 = tr(T) - trT2 = (T * δ(i, dag(i)') * δ(j, dag(j)') * δ(k, dag(k)'))[] - @test trT1 ≈ trT2 - - T = random_itensor(dag(j), k', i', l, dag(k), j', dag(i)) - trT1 = tr(T) - trT2 = T * δ(i, dag(i)') * δ(j, dag(j)') * δ(k, dag(k)') - @test trT1 ≈ trT2 - end - - @testset "QN ITensor Array constructor view behavior" begin - d = 2 - i = Index([QN(0) => d ÷ 2, QN(1) => d ÷ 2]) - - # no view - A = diagm(randn(Float64, d)) - T = ITensor(A, i', dag(i); tol=1e-12) - @test storage(T) isa NDTensors.BlockSparse{Float64} - A[1, 1] = 2.0 - T[1, 1] ≠ 2.0 - - # no view - A = diagm(rand(Int, d)) - T = ITensor(Int, A, i', dag(i); tol=1e-12) - @test storage(T) isa NDTensors.BlockSparse{Int} - A[1, 1] = 2 - T[1, 1] ≠ 2 - - # no view - A = diagm(rand(Int, d)) - T = ITensor(A, i', dag(i); tol=1e-12) - @test storage(T) isa NDTensors.BlockSparse{Float64} - A[1, 1] = 2 - T[1, 1] ≠ 2 - - # no view - A = diagm(randn(Float64, d)) - T = ITensor(A, i', dag(i); tol=1e-12) - @test storage(T) isa NDTensors.BlockSparse{Float64} - A[1, 1] = 2 - T[1, 1] ≠ 2 - - # no view - A = diagm(rand(Int, d)) - T = ITensor(Int, A, i', dag(i); tol=1e-12) - @test storage(T) isa NDTensors.BlockSparse{Int} - A[1, 1] = 2 - T[1, 1] ≠ 2 - - # no view - A = diagm(rand(Int, d)) - T = ITensor(A, i', dag(i); tol=1e-12) - @test storage(T) isa NDTensors.BlockSparse{Float64} - A[1, 1] = 2 - T[1, 1] ≠ 2 - end - - @testset "Constructor Leads to No Blocks" begin - i = Index(QN(0) => 2, QN(1) => 3; tags="i") - j = Index(QN(1) => 2, QN(2) => 1; tags="j") - A = ITensor(i, j) - @test storage(A) isa NDTensors.EmptyStorage - @test_throws ErrorException ITensor(QN(0), i, j) - end - - @testset "ITensor iteration" begin - i = Index([QN(0) => 1, QN(1) => 2], "i") - j = Index([QN(0) => 3, QN(1) => 4, QN(2) => 5], "j") - - A = random_itensor(i, dag(j)) - Is = eachindex(A) - @test length(Is) == dim(A) - sumA = 0.0 - for I in Is - sumA += A[I] - end - @test sumA ≈ sum(ITensors.data(A)) - sumA = 0.0 - for a in A - sumA += a - end - @test sumA ≈ sum(A) - end - - @testset "Constructor (from Tuple)" begin - i = Index([QN(0) => 1, QN(1) => 2], "i") - j = Index([QN(0) => 3, QN(1) => 4, QN(2) => 5], "j") - - A = ITensor(QN(0), (i, dag(j))) - - @test flux(A) == QN(0) - @test nnzblocks(A) == 2 - end - - @testset "Constructor (no flux specified)" begin - i = Index([QN(0) => 1, QN(1) => 2], "i") - j = Index([QN(0) => 3, QN(1) => 4, QN(2) => 5], "j") - - A = ITensor(i, dag(j)) - - @test flux(A) === nothing - @test nnzblocks(A) == 0 - end - - @testset "Constructor (Tuple, no flux specified)" begin - i = Index([QN(0) => 1, QN(1) => 2], "i") - j = Index([QN(0) => 3, QN(1) => 4, QN(2) => 5], "j") - - A = ITensor((i, dag(j))) - - @test flux(A) === nothing - @test nnzblocks(A) == 0 - end - - @testset "No indices getindex" begin - T = ITensor(QN()) - @test order(T) == 0 - @test flux(T) == nothing - @test nnzblocks(T) == 1 - @test T[] == 0 - - s = Index(QN(-1) => 1, QN(1) => 1) - A = ITensor(s, dag(s')) - B = ITensor(s', dag(s)) - A[1, 1] = 1 - B[2, 2] = 1 - C = A * B - @test order(C) == 0 - @test flux(C) == nothing - @test nnzblocks(C) == 0 - @test C[] == 0 - end - - @testset "Empty constructor" begin - i = Index([QN(0) => 1, QN(1) => 2], "i") - - A = ITensor(i, dag(i')) - - @test nnzblocks(A) == 0 - @test nnz(A) == 0 - @test hasinds(A, i, i') - @test isnothing(flux(A)) - - A[i => 1, i' => 1] = 1.0 - - @test nnzblocks(A) == 1 - @test nnz(A) == 1 - @test flux(A) == QN(0) - - A[i => 2, i' => 2] = 1.0 - - @test nnzblocks(A) == 2 - @test nnz(A) == 5 - @test flux(A) == QN(0) - end - - @testset "Check flux when setting elements" begin - i = Index(QN(0) => 1, QN(1) => 1; tags="i") - A = random_itensor(QN(0), i, dag(i')) - @test_throws ErrorException A[i => 1, i' => 2] = 1.0 - end - - @testset "Random constructor" begin - i = Index([QN(0) => 1, QN(1) => 2], "i") - j = Index([QN(0) => 3, QN(1) => 4, QN(2) => 5], "j") - - A = random_itensor(QN(1), i, dag(j)) - - @test flux(A) == QN(1) - @test nnzblocks(A) == 1 - - B = random_itensor(i, dag(j)) - - @test flux(B) == QN() - @test nnzblocks(B) == 2 - - # Scalar algebra - C = 2 * B - @test C[1, 1] == 2 * B[1, 1] - @test flux(B) == QN() - @test flux(C) == QN() - @test nnzblocks(B) == 2 - @test nnzblocks(C) == 2 - - C = B / 2 - @test C[1, 1] == B[1, 1] / 2 - @test flux(B) == QN() - @test flux(C) == QN() - @test nnzblocks(B) == 2 - @test nnzblocks(C) == 2 - end - - @testset "eltype promotion with scalar * and /" begin - i = Index([QN(0) => 2, QN(1) => 3]) - @test eltype(ITensor(1.0f0, i', dag(i)) * 2) === Float32 - @test eltype(ITensor(1.0f0, i', dag(i)) .* 2) === Float32 - @test eltype(ITensor(1.0f0, i', dag(i)) / 2) === Float32 - @test eltype(ITensor(1.0f0, i', dag(i)) ./ 2) === Float32 - @test eltype(ITensor(1.0f0, i', dag(i)) * 2.0f0) === Float32 - @test eltype(ITensor(1.0f0, i', dag(i)) .* 2.0f0) === Float32 - @test eltype(ITensor(1.0f0, i', dag(i)) / 2.0f0) === Float32 - @test eltype(ITensor(1.0f0, i', dag(i)) ./ 2.0f0) === Float32 - @test eltype(ITensor(1.0f0, i', dag(i)) * 2.0) === Float64 - @test eltype(ITensor(1.0f0, i', dag(i)) .* 2.0) === Float64 - @test eltype(ITensor(1.0f0, i', dag(i)) / 2.0) === Float64 - @test eltype(ITensor(1.0f0, i', dag(i)) ./ 2.0) === Float64 - end - - @testset "Complex Number Operations" begin - i = Index([QN(0) => 1, QN(1) => 2], "i") - j = Index([QN(0) => 3, QN(1) => 4, QN(2) => 5], "j") - - A = random_itensor(ComplexF64, QN(0), i, dag(j)) - - @test flux(A) == QN(0) - @test nnzblocks(A) == 2 - - rA = real(A) - iA = imag(A) - @test nnzblocks(rA) == nnzblocks(A) - @test nnzblocks(iA) == nnzblocks(A) - @test norm(rA + 1im * iA - A) < 1E-8 - @test eltype(rA) == Float64 - @test eltype(iA) == Float64 - - cA = conj(A) - @test eltype(cA) == ComplexF64 - @test norm(cA) ≈ norm(A) - - B = random_itensor(Float64, QN(0), i, dag(j)) - - cB = conj(B) - @test eltype(cB) == Float64 - @test norm(cB) ≈ norm(B) - end - - @testset "QN onehot" begin - i = Index(QN(0) => 2, QN(1) => 2; tags="i") - - T = onehot(i => 1) - @test T[i => 1] ≈ 1.0 - @test T[i => 2] ≈ 0.0 - @test T[i => 3] ≈ 0.0 - @test T[i => 4] ≈ 0.0 - - T = onehot(i => 2) - @test T[i => 1] ≈ 0.0 - @test T[i => 2] ≈ 1.0 - @test T[i => 3] ≈ 0.0 - @test T[i => 4] ≈ 0.0 - end - - @testset "setindex!" begin - @testset "Test 1" begin - s1 = Index([QN("N", 0, -1) => 1, QN("N", 1, -1) => 1], "s1") - s2 = Index([QN("N", 0, -1) => 1, QN("N", 1, -1) => 1], "s2") - A = ITensor(s1, s2) - - @test nnzblocks(A) == 0 - @test nnz(A) == 0 - @test hasinds(A, s1, s2) - @test isnothing(flux(A)) - - A[2, 1] = 1.0 / sqrt(2) - - @test nnzblocks(A) == 1 - @test nnz(A) == 1 - @test A[s1 => 2, s2 => 1] ≈ 1.0 / sqrt(2) - @test flux(A) == QN("N", 1, -1) - - A[1, 2] = 1.0 / sqrt(2) - - @test nnzblocks(A) == 2 - @test nnz(A) == 2 - @test A[s1 => 2, s2 => 1] ≈ 1.0 / sqrt(2) - @test A[s1 => 1, s2 => 2] ≈ 1.0 / sqrt(2) - @test flux(A) == QN("N", 1, -1) - end - - @testset "Test 2" begin - s1 = Index([QN("N", 0, -1) => 1, QN("N", 1, -1) => 1], "s1") - s2 = Index([QN("N", 0, -1) => 1, QN("N", 1, -1) => 1], "s2") - A = ITensor(s1, s2) - - @test nnzblocks(A) == 0 - @test nnz(A) == 0 - @test hasinds(A, s1, s2) - @test isnothing(flux(A)) - - A[1, 2] = 1.0 / sqrt(2) - - @test nnzblocks(A) == 1 - @test nnz(A) == 1 - @test A[s1 => 1, s2 => 2] ≈ 1.0 / sqrt(2) - @test flux(A) == QN("N", 1, -1) - - A[2, 1] = 1.0 / sqrt(2) - - @test nnzblocks(A) == 2 - @test nnz(A) == 2 - @test A[s1 => 2, s2 => 1] ≈ 1.0 / sqrt(2) - @test A[s1 => 1, s2 => 2] ≈ 1.0 / sqrt(2) - @test flux(A) == QN("N", 1, -1) - end - end - - @testset "Multiply by scalar" begin - i = Index([QN(0) => 1, QN(1) => 2], "i") - j = Index([QN(0) => 3, QN(1) => 4], "j") - - A = random_itensor(QN(0), i, dag(j)) - - @test flux(A) == QN(0) - @test nnzblocks(A) == 2 - - B = 2 * A - - @test flux(B) == QN(0) - @test nnzblocks(B) == 2 - - for ii in dim(i), jj in dim(j) - @test 2 * A[i => ii, j => jj] == B[i => ii, j => jj] - end - end - - @testset "Check arrows when summing" begin - s = siteinds("S=1/2", 4; conserve_qns=true) - Tout = random_itensor(QN("Sz" => 2), s[2], s[1], s[3], s[4]) - Tin = random_itensor(QN("Sz" => 2), dag(s[1]), dag(s[2]), dag(s[3]), dag(s[4])) - @test norm(Tout - Tout) < 1E-10 # this is ok - @test_throws ErrorException (Tout + Tin) # not ok - end - - @testset "Copy" begin - s = Index([QN(0) => 1, QN(1) => 1], "s") - T = random_itensor(QN(0), s, s') - cT = copy(T) - for ss in dim(s), ssp in dim(s') - @test T[s => ss, s' => ssp] == cT[s => ss, s' => ssp] - end - end - - @testset "Permute" begin - i = Index([QN(0) => 1, QN(1) => 2], "i") - j = Index([QN(0) => 3, QN(1) => 4], "j") - - A = random_itensor(QN(1), i, dag(j)) - - @test flux(A) == QN(1) - @test nnzblocks(A) == 1 - - B = permute(A, j, i) - - @test flux(B) == QN(1) - @test nnzblocks(B) == 1 - - for ii in dim(i), jj in dim(j) - @test A[ii, jj] == B[jj, ii] - end - end - - @testset "Contraction" begin - i = Index([QN(0) => 1, QN(1) => 2], "i") - j = Index([QN(0) => 3, QN(1) => 4], "j") - - A = random_itensor(QN(0), i, dag(j)) - - @test flux(A) == QN(0) - @test nnzblocks(A) == 2 - - B = random_itensor(QN(1), j, dag(i)') - - @test flux(B) == QN(1) - @test nnzblocks(B) == 1 - - C = A * B - - @test hasinds(C, i, i') - @test flux(C) == QN(1) - @test nnzblocks(C) == 1 - end - - @testset "Combine and uncombine" begin - @testset "Combine no indices" begin - i1 = Index([QN(0, 2) => 2, QN(1, 2) => 2], "i1") - A = random_itensor(QN(), i1, dag(i1')) - - C = combiner() - c = combinedind(C) - @test isnothing(c) - AC = A * C - @test nnz(AC) == nnz(A) - @test nnzblocks(AC) == nnzblocks(A) - @test hassameinds(AC, A) - @test norm(AC - A * C) ≈ 0.0 - Ap = AC * dag(C) - @test nnz(Ap) == nnz(A) - @test nnzblocks(Ap) == nnzblocks(A) - @test hassameinds(Ap, A) - @test norm(A - Ap) ≈ 0.0 - end - - @testset "Combine set direction" begin - i1 = Index([QN(0) => 2, QN(1) => 3], "i1") - A = random_itensor(i1', dag(i1)) - # Test that checkflux does not throw an error: - @test isnothing(ITensors.checkflux(A)) - C = combiner(dag(i1); dir=ITensors.Out) - c = combinedind(C) - @test dir(c) == ITensors.Out - AC = A * C - @test nnz(AC) == nnz(A) - @test nnzblocks(AC) == nnzblocks(A) - # Test that checkflux does not throw an error: - @test isnothing(ITensors.checkflux(AC)) - Ap = AC * dag(C) - @test nnz(Ap) == nnz(A) - @test nnzblocks(Ap) == nnzblocks(A) - @test hassameinds(Ap, A) - # Test that checkflux does not throw an error: - @test isnothing(ITensors.checkflux(AC)) - @test A ≈ Ap - end - - @testset "Order 2 (IndexSet constructor)" begin - i1 = Index([QN(0, 2) => 2, QN(1, 2) => 2], "i1") - A = random_itensor(QN(), i1, dag(i1')) - - iss = [i1, dag(i1'), (i1, dag(i1')), (dag(i1'), i1)] - - for is in iss - C = combiner(IndexSet(is); tags="c") - AC = A * C - @test nnz(AC) == nnz(A) - Ap = AC * dag(C) - @test nnz(Ap) == nnz(A) - @test nnzblocks(Ap) == nnzblocks(A) - @test norm(A - Ap) ≈ 0.0 - end - end - - @testset "Order 2" begin - i1 = Index([QN(0, 2) => 2, QN(1, 2) => 2], "i1") - A = random_itensor(QN(), (i1, dag(i1'))) - - iss = [i1, dag(i1'), (i1, dag(i1')), (dag(i1'), i1)] - - for is in iss - C = combiner(is; tags="c") - AC = A * C - @test nnz(AC) == nnz(A) - Ap = AC * dag(C) - @test nnz(Ap) == nnz(A) - @test nnzblocks(Ap) == nnzblocks(A) - @test norm(A - Ap) ≈ 0.0 - end - end - - @testset "Order 3, Combine 2" begin - i = Index([QN(0) => 2, QN(1) => 2], "i") - - A = random_itensor(QN(0), i, dag(i)', dag(i)'') - - C = combiner(i, dag(i)'') - c = combinedind(C) - - AC = A * C - - @test hasinds(AC, c, i') - @test nnz(AC) == nnz(A) - - for b in nzblocks(AC) - @test flux(AC, b) == QN(0) - end - - B = ITensor(QN(0), i', c) - @test nnz(B) == nnz(AC) - @test nnzblocks(B) == nnzblocks(AC) - - Ap = AC * dag(C) - - @test norm(A - Ap) == 0 - @test nnz(A) == nnz(Ap) - @test nnzblocks(A) == nnzblocks(Ap) - @test hassameinds(A, Ap) - end - - @testset "Order 3" begin - i1 = Index([QN(0, 2) => 2, QN(1, 2) => 2], "i1") - i2 = settags(i1, "i2") - A = random_itensor(QN(), i1, i2, dag(i1')) - - iss = [ - i1, - i2, - dag(i1'), - (i1, i2), - (i2, i1), - (i1, dag(i1')), - (dag(i1'), i1), - (i2, dag(i1')), - (dag(i1'), i2), - (i1, i2, dag(i1')), - (i1, dag(i1'), i2), - (i2, i1, dag(i1')), - (i2, dag(i1'), i1), - (dag(i1'), i1, i2), - (dag(i1'), i2, i1), - ] - - for is in iss - C = combiner(is; tags="c") - AC = A * C - @test nnz(AC) == nnz(A) - Ap = AC * dag(C) - @test nnz(Ap) == nnz(A) - @test nnzblocks(Ap) == nnzblocks(A) - @test norm(A - AC * dag(C)) ≈ 0.0 - end - end - - @testset "Order 4" begin - i1 = Index([QN(0, 2) => 2, QN(1, 2) => 2], "i1") - i2 = settags(i1, "i2") - A = random_itensor(QN(), i1, i2, dag(i1'), dag(i2')) - - iss = [ - i1, - i2, - dag(i1'), - dag(i2'), - (i1, i2), - (i2, i1), - (i1, dag(i1')), - (dag(i1'), i1), - (i1, dag(i2')), - (dag(i2'), i1), - (i2, dag(i1')), - (dag(i1'), i2), - (i2, dag(i2')), - (dag(i2'), i2), - (dag(i1'), dag(i2')), - (dag(i2'), dag(i1')), - (i1, i2, dag(i1')), - (i1, dag(i1'), i2), - (i2, i1, dag(i1')), - (i2, dag(i1'), i1), - (dag(i1'), i1, i2), - (dag(i1'), i2, i1), - (i1, dag(i1'), dag(i2')), - (i1, dag(i2'), dag(i1')), - (dag(i1'), i1, dag(i2')), - (dag(i1'), dag(i2'), i1), - (dag(i2'), i1, dag(i1')), - (dag(i2'), dag(i1'), i1), - (i1, i2, dag(i1'), dag(i2')), - (i1, i2, dag(i2'), dag(i1')), - (i1, dag(i1'), i2, dag(i2')), - (i1, dag(i1'), dag(i2'), i2), - (i1, dag(i2'), i2, dag(i1')), - (i1, dag(i2'), dag(i1'), i2), - (i2, i1, dag(i1'), dag(i2')), - (i2, i1, dag(i2'), dag(i1')), - (i2, dag(i1'), i1, dag(i2')), - (i2, dag(i1'), dag(i2'), i1), - (i2, dag(i2'), i1, dag(i1')), - (i2, dag(i2'), dag(i1'), i1), - (dag(i1'), i2, i1, dag(i2')), - (dag(i1'), i2, dag(i2'), i1), - (dag(i1'), i1, i2, dag(i2')), - (dag(i1'), i1, dag(i2'), i2), - (dag(i1'), dag(i2'), i2, i1), - (dag(i1'), dag(i2'), i1, i2), - (dag(i2'), i1, dag(i1'), i2), - (dag(i2'), i1, i2, dag(i1')), - (dag(i2'), dag(i1'), i1, i2), - (dag(i2'), dag(i1'), i2, i1), - (dag(i2'), i2, i1, dag(i1')), - (dag(i2'), i2, dag(i1'), i1), - ] - - for is in iss - C = combiner(is; tags="c") - AC = A * C - @test nnz(AC) == nnz(A) - Ap = AC * dag(C) - @test nnz(Ap) == nnz(A) - @test nnzblocks(Ap) == nnzblocks(A) - @test norm(A - Ap) ≈ 0.0 - end - end - - @testset "Order 4, Combine 2, Example 1" begin - s1 = Index( - [ - QN(("Sz", 0), ("Nf", 0)) => 1, - QN(("Sz", +1), ("Nf", 1)) => 1, - QN(("Sz", -1), ("Nf", 1)) => 1, - QN(("Sz", 0), ("Nf", 2)) => 1, - ], - "site,n=1", - ) - s2 = replacetags(s1, "n=1", "n=2") - - A = random_itensor(QN(), s1, s2, dag(s1)', dag(s2)') - - C = combiner(dag(s1)', dag(s2)') - c = combinedind(C) - - AC = A * C - - @test norm(AC) ≈ norm(A) - @test hasinds(AC, s1, s2, c) - @test nnz(AC) == nnz(A) - for b in nzblocks(AC) - @test flux(AC, b) == QN() - end - - @test nnzblocks(AC) < nnz(A) - - B = ITensor(QN(), s1, s2, c) - @test nnz(B) == nnz(AC) - @test nnzblocks(B) == nnzblocks(AC) - - Ap = AC * dag(C) - - @test hassameinds(A, Ap) - @test norm(A - Ap) == 0 - @test nnz(A) == nnz(Ap) - @test nnzblocks(A) == nnzblocks(Ap) - end - - @testset "Order 4, Combine 2, Example 2" begin - s1 = Index([QN(("Nf", 0)) => 1, QN(("Nf", 1)) => 1], "site,n=1") - s2 = replacetags(s1, "n=1", "n=2") - - A = random_itensor(QN(), dag(s2)', s2, dag(s1)', s1) - - C = combiner(dag(s2)', dag(s1)') - c = combinedind(C) - - AC = A * C - - @test norm(AC) ≈ norm(A) - @test hasinds(AC, s1, s2, c) - @test nnz(AC) == nnz(A) - for b in nzblocks(AC) - @test flux(AC, b) == QN() - end - - B = ITensor(QN(), s1, s2, c) - @test nnzblocks(B) == nnzblocks(AC) - - Ap = AC * dag(C) - - @test hassameinds(A, Ap) - @test norm(A - Ap) == 0 - @test nnz(A) == nnz(Ap) - @test nnzblocks(A) == nnzblocks(Ap) - end - - @testset "Order 4, Combine 2, Example 3" begin - s1 = Index([QN(("Nf", 0)) => 1, QN(("Nf", 1)) => 1], "site,n=1") - s2 = replacetags(s1, "n=1", "n=2") - - A = random_itensor(QN(), dag(s1)', s2, dag(s2)', s1) - - C = combiner(dag(s2)', dag(s1)') - c = combinedind(C) - - AC = A * C - - @test norm(AC) ≈ norm(A) - @test hasinds(AC, s1, s2, c) - @test nnz(AC) == nnz(A) - for b in nzblocks(AC) - @test flux(AC, b) == QN() - end - - B = ITensor(QN(), s1, s2, c) - @test nnzblocks(B) == nnzblocks(AC) - - Ap = AC * dag(C) - - @test hassameinds(A, Ap) - @test norm(A - Ap) == 0 - @test nnz(A) == nnz(Ap) - @test nnzblocks(A) == nnzblocks(Ap) - end - end - - @testset "Check that combiner commutes" begin - i = Index(QN(0, 2) => 2, QN(1, 2) => 2; tags="i") - j = settags(i, "j") - A = random_itensor(QN(0, 2), i, j, dag(i'), dag(j')) - C = combiner(i, j) - @test norm(A * dag(C') * C - A * C * dag(C')) ≈ 0.0 - end - - @testset "Combiner for block deficient ITensor" begin - i = Index(QN(0, 2) => 2, QN(1, 2) => 2; tags="i") - j = settags(i, "j") - A = ITensor(i, j, dag(i')) - A[1, 1, 1] = 1.0 - C = combiner(i, j; tags="c") - AC = A * C - Ap = AC * dag(C) - @test norm(A - Ap) ≈ 0.0 - @test norm(Ap - A) ≈ 0.0 - end - - @testset "Combine Complex ITensor" begin - s1 = Index(QN(("Sz", 1)) => 1, QN(("Sz", -1)) => 1; tags="S=1/2,Site,n=1") - s2 = Index(QN(("Sz", 1)) => 1, QN(("Sz", -1)) => 1; tags="S=1/2,Site,n=2") - - T = random_itensor(ComplexF64, QN("Sz", 0), s1, s2) - - C = combiner(s1, s2) - CT = C * T - @test norm(CT) ≈ norm(T) - TT = dag(C) * CT - @test TT ≈ T - end - - @testset "Combiner bug #395" begin - i1 = Index([QN(0) => 1, QN(1) => 2], "i1") - i2 = Index([QN(0) => 1, QN(1) => 2], "i2") - A = random_itensor(QN(), i1, i2, dag(i1)', dag(i2)') - CL = combiner(i1, i2) - CR = combiner(dag(i1)', dag(i2)') - AC = A * CR * CL - @test AC * dag(CR) * dag(CL) ≈ A - end - - @testset "Contract to scalar" begin - i = Index([QN(0) => 1, QN(1) => 1], "i") - A = random_itensor(QN(0), i, dag(i')) - - c = A * dag(A) - - @test nnz(c) == 1 - @test nnzblocks(c) == 1 - @test c[] isa Float64 - @test c[] ≈ norm(A)^2 - end - - @testset "eigen" begin - @testset "eigen hermitian" begin - i = Index(QN(0) => 2, QN(1) => 3, QN(2) => 4; tags="i") - j = settags(i, "j") - k = settags(i, "k") - l = settags(i, "l") - - A = random_itensor(QN(), i, j, dag(k), dag(l)) - A = A * prime(dag(A), (i, j)) - - F = eigen(A; ishermitian=true, tags="x") - - D, U = F - Ut = F.Vt - - @test storage(U) isa NDTensors.BlockSparse - @test storage(D) isa NDTensors.DiagBlockSparse - - u = commonind(D, U) - up = uniqueind(D, U) - - @test hastags(u, "x") - @test plev(u) == 0 - @test hastags(up, "x") - @test plev(up) == 1 - - @test hassameinds(U, (i, j, u)) - @test hassameinds(D, (u, up)) - - @test A ≈ dag(U) * D * U' atol = 1e-11 - @test A ≈ dag(U) * D * Ut atol = 1e-11 - @test A * U ≈ U' * D atol = 1e-11 - @test A * U ≈ Ut * D atol = 1e-11 - end - - @testset "eigen hermitian (truncate)" begin - i = Index(QN(0) => 2, QN(1) => 3, QN(2) => 4; tags="i") - j = settags(i, "j") - k = settags(i, "k") - l = settags(i, "l") - - A = random_itensor(QN(), i, j, dag(k), dag(l)) - A = A * prime(dag(A), (i, j)) - for i in 1:4 - A = mapprime(A * A', 2, 1) - end - A = A / norm(A) - - cutoff = 1e-5 - F = eigen(A; ishermitian=true, tags="x", cutoff=cutoff) - - D, U, spec = F - Ut = F.Vt - - @test storage(U) isa NDTensors.BlockSparse - @test storage(D) isa NDTensors.DiagBlockSparse - - u = commonind(D, U) - up = uniqueind(D, U) - - @test hastags(u, "x") - @test plev(u) == 0 - @test hastags(up, "x") - @test plev(up) == 1 - - @test hassameinds(U, (i, j, u)) - @test hassameinds(D, (u, up)) - - for b in nzblocks(A) - @test flux(A, b) == QN(0) - end - for b in nzblocks(U) - @test flux(U, b) == QN(0) - end - for b in nzblocks(D) - @test flux(D, b) == QN(0) - end - - Ap = dag(U) * D * U' - - @test norm(Ap - A) ≤ 1e-2 - @test norm(dag(U) * D * Ut - A) ≤ 1e-2 - @test minimum(dims(D)) == length(spec.eigs) - @test minimum(dims(D)) < dim(i) * dim(j) - - @test spec.truncerr ≤ cutoff - err = sqrt(1 - (Ap * dag(Ap))[] / (A * dag(A))[]) - @test err ≤ cutoff - @test err ≈ spec.truncerr rtol = 4e-1 - end - - @testset "eigen non-hermitian" begin - i = Index(QN(0) => 2, QN(1) => 3, QN(2) => 4; tags="i") - j = settags(i, "j") - - A = random_itensor(QN(), i, j, dag(i'), dag(j')) - - F = eigen(A; tags="x") - - D, U = F - Ut = F.Vt - - @test storage(U) isa NDTensors.BlockSparse - @test storage(D) isa NDTensors.DiagBlockSparse - - u = commonind(D, U) - up = uniqueind(D, U) - - @test hastags(u, "x") - @test plev(u) == 0 - @test hastags(up, "x") - @test plev(up) == 1 - - @test A ≉ U' * D * dag(U) atol = 1e-12 - @test A ≉ Ut * D * dag(U) atol = 1e-12 - @test A * U ≈ U' * D atol = 1e-12 - @test A * U ≈ Ut * D atol = 1e-12 - end - - @testset "eigen non-hermitian (general inds)" begin - i = Index(QN(0) => 2, QN(1) => 3, QN(2) => 4; tags="i") - j = settags(i, "j") - ĩ, j̃ = sim(i), sim(j) - - A = random_itensor(QN(), i, j, dag(ĩ), dag(j̃)) - - F = eigen(A, (i, j), (ĩ, j̃); lefttags="x", righttags="y") - - D, U = F - Ut = F.Vt - - @test storage(U) isa NDTensors.BlockSparse - @test storage(D) isa NDTensors.DiagBlockSparse - - l = uniqueind(D, U) - r = commonind(D, U) - - @test F.l == l - @test F.r == r - - @test hastags(l, "x") - @test plev(l) == 0 - @test hastags(r, "y") - @test plev(r) == 0 - - @test hassameinds(U, (ĩ, j̃, r)) - @test hassameinds(Ut, (i, j, l)) - - @test A * U ≈ Ut * D atol = 1e-12 - @test A ≉ Ut * D * dag(U) atol = 1e-12 - end - - @testset "eigen mixed arrows" begin - i1 = Index([QN(0) => 1, QN(1) => 2], "i1") - i2 = Index([QN(0) => 1, QN(1) => 2], "i2") - A = random_itensor(i1, i2, dag(i1)', dag(i2)') - F = eigen(A, (i1, i1'), (i2', i2)) - D, U = F - Ut = F.Vt - @test A * U ≈ Ut * D atol = 1e-12 - end - end - - @testset "svd" for ElT in (Float64, ComplexF64) - @testset "svd example 1" begin - i = Index(QN(0) => 2, QN(1) => 2; tags="i") - j = Index(QN(0) => 2, QN(1) => 2; tags="j") - A = random_itensor(ElT, QN(0), i, dag(j)) - for b in nzblocks(A) - @test flux(A, b) == QN(0) - end - U, S, V = svd(A, i) - - @test storage(U) isa NDTensors.BlockSparse - @test storage(S) isa NDTensors.DiagBlockSparse - @test storage(V) isa NDTensors.BlockSparse - - for b in nzblocks(U) - @test flux(U, b) == QN(0) - end - for b in nzblocks(S) - @test flux(S, b) == QN(0) - end - for b in nzblocks(V) - @test flux(V, b) == QN(0) - end - @test U * S * V ≈ A atol = 1e-14 - end - - @testset "svd example 2" begin - i = Index(QN(0) => 5, QN(1) => 6; tags="i") - j = Index(QN(-1) => 2, QN(0) => 3, QN(1) => 4; tags="j") - A = random_itensor(ElT, QN(0), i, j) - for b in nzblocks(A) - @test flux(A, b) == QN(0) - end - U, S, V = svd(A, i) - - @test storage(U) isa NDTensors.BlockSparse - @test storage(S) isa NDTensors.DiagBlockSparse - @test storage(V) isa NDTensors.BlockSparse - - for b in nzblocks(U) - @test flux(U, b) == QN(0) - end - for b in nzblocks(S) - @test flux(S, b) == QN(0) - end - for b in nzblocks(V) - @test flux(V, b) == QN(0) - end - @test U * S * V ≈ A atol = 1e-14 - end - - @testset "svd example 3" begin - i = Index(QN(0) => 5, QN(1) => 6; tags="i") - j = Index(QN(-1) => 2, QN(0) => 3, QN(1) => 4; tags="j") - A = random_itensor(ElT, QN(0), i, dag(j)) - for b in nzblocks(A) - @test flux(A, b) == QN(0) - end - U, S, V = svd(A, i) - - @test storage(U) isa NDTensors.BlockSparse - @test storage(S) isa NDTensors.DiagBlockSparse - @test storage(V) isa NDTensors.BlockSparse - - for b in nzblocks(U) - @test flux(U, b) == QN(0) - end - for b in nzblocks(S) - @test flux(S, b) == QN(0) - end - for b in nzblocks(V) - @test flux(V, b) == QN(0) - end - @test U * S * V ≈ A atol = 1e-12 - end - - @testset "svd example 4" begin - i = Index(QN(0, 2) => 2, QN(1, 2) => 2; tags="i") - j = settags(i, "j") - - A = random_itensor(ElT, QN(0, 2), i, j, dag(i'), dag(j')) - - U, S, V = svd(A, i, j) - - @test storage(U) isa NDTensors.BlockSparse - @test storage(S) isa NDTensors.DiagBlockSparse - @test storage(V) isa NDTensors.BlockSparse - - for b in nzblocks(A) - @test flux(A, b) == QN(0, 2) - end - U, S, V = svd(A, i) - for b in nzblocks(U) - @test flux(U, b) == QN(0, 2) - end - for b in nzblocks(S) - @test flux(S, b) == QN(0, 2) - end - for b in nzblocks(V) - @test flux(V, b) == QN(0, 2) - end - @test U * S * V ≈ A atol = 1e-14 - end - - @testset "svd example 5" begin - i = Index(QN(0, 2) => 2, QN(1, 2) => 2; tags="i") - j = settags(i, "j") - - A = random_itensor(ElT, QN(1, 2), i, j, dag(i'), dag(j')) - - U, S, V = svd(A, i, j) - - @test storage(U) isa NDTensors.BlockSparse - @test storage(S) isa NDTensors.DiagBlockSparse - @test storage(V) isa NDTensors.BlockSparse - - for b in nzblocks(A) - @test flux(A, b) == QN(1, 2) - end - U, S, V = svd(A, i) - for b in nzblocks(U) - @test flux(U, b) == QN(0, 2) - end - for b in nzblocks(S) - @test flux(S, b) == QN(1, 2) - end - for b in nzblocks(V) - @test flux(V, b) == QN(0, 2) - end - @test U * S * V ≈ A atol = 1e-14 - end - - @testset "svd example 6" begin - i = Index(QN(0, 2) => 2, QN(1, 2) => 2; tags="i") - j = settags(i, "j") - - A = random_itensor(ElT, QN(1, 2), i, j, dag(i'), dag(j')) - - U, S, V = svd(A, i, i') - - @test storage(U) isa NDTensors.BlockSparse - @test storage(S) isa NDTensors.DiagBlockSparse - @test storage(V) isa NDTensors.BlockSparse - - for b in nzblocks(A) - @test flux(A, b) == QN(1, 2) - end - U, S, V = svd(A, i) - for b in nzblocks(U) - @test flux(U, b) == QN(0, 2) - end - for b in nzblocks(S) - @test flux(S, b) == QN(1, 2) - end - for b in nzblocks(V) - @test flux(V, b) == QN(0, 2) - end - @test U * S * V ≈ A atol = 1e-14 - end - - @testset "svd truncation example 1" begin - i = Index(QN(0) => 2, QN(1) => 3; tags="i") - j = settags(i, "j") - A = random_itensor(ElT, QN(0), i, j, dag(i'), dag(j')) - for i in 1:4 - A = mapprime(A * A', 2, 1) - end - A = A / norm(A) - - cutoff = 1e-5 - U, S, V, spec = svd(A, i, j; utags="x", vtags="y", cutoff=cutoff) - - @test storage(U) isa NDTensors.BlockSparse - @test storage(S) isa NDTensors.DiagBlockSparse - @test storage(V) isa NDTensors.BlockSparse - - u = commonind(S, U) - v = commonind(S, V) - - @test hastags(u, "x") - @test hastags(v, "y") - - @test hassameinds(U, (i, j, u)) - @test hassameinds(V, (i', j', v)) - - for b in nzblocks(A) - @test flux(A, b) == QN(0) - end - for b in nzblocks(U) - @test flux(U, b) == QN(0) - end - for b in nzblocks(S) - @test flux(S, b) == QN(0) - end - for b in nzblocks(V) - @test flux(V, b) == QN(0) - end - - Ap = U * S * V - - @test norm(Ap - A) ≤ 1e-2 - @test minimum(dims(S)) == length(spec.eigs) - @test minimum(dims(S)) < dim(i) * dim(j) - - @test spec.truncerr ≤ cutoff - err = real(1 - (Ap * dag(Ap))[] / (A * dag(A))[]) - @test err ≤ cutoff - @test isapprox(err, spec.truncerr; rtol=1e-6) - end - - @testset "svd truncation example 2" begin - i = Index(QN(0) => 3, QN(1) => 2; tags="i") - j = settags(i, "j") - A = random_itensor(ElT, QN(0), i, j, dag(i'), dag(j')) - - maxdim = 4 - U, S, V, spec = svd(A, i, j; utags="x", vtags="y", maxdim=maxdim) - - @test storage(U) isa NDTensors.BlockSparse - @test storage(S) isa NDTensors.DiagBlockSparse - @test storage(V) isa NDTensors.BlockSparse - - u = commonind(S, U) - v = commonind(S, V) - - @test hastags(u, "x") - @test hastags(v, "y") - - @test hassameinds(U, (i, j, u)) - @test hassameinds(V, (i', j', v)) - - for b in nzblocks(A) - @test flux(A, b) == QN(0) - end - for b in nzblocks(U) - @test flux(U, b) == QN(0) - end - for b in nzblocks(S) - @test flux(S, b) == QN(0) - end - for b in nzblocks(V) - @test flux(V, b) == QN(0) - end - - @test minimum(dims(S)) == maxdim - @test minimum(dims(S)) == length(spec.eigs) - @test minimum(dims(S)) < dim(i) * dim(j) - - Ap = U * S * V - err = 1 - (Ap * dag(Ap))[] / (A * dag(A))[] - @test isapprox(err, spec.truncerr; rtol=1e-6) - end - - @testset "svd truncation example 3" begin - i = Index(QN(0) => 2, QN(1) => 3, QN(2) => 4; tags="i") - j = settags(i, "j") - A = random_itensor(ElT, QN(1), i, j, dag(i'), dag(j')) - - maxdim = 4 - U, S, V, spec = svd(A, i, j; utags="x", vtags="y", maxdim=maxdim) - - @test storage(U) isa NDTensors.BlockSparse - @test storage(S) isa NDTensors.DiagBlockSparse - @test storage(V) isa NDTensors.BlockSparse - - u = commonind(S, U) - v = commonind(S, V) - - @test hastags(u, "x") - @test hastags(v, "y") - - @test hassameinds(U, (i, j, u)) - @test hassameinds(V, (i', j', v)) - - for b in nzblocks(A) - @test flux(A, b) == QN(1) - end - for b in nzblocks(U) - @test flux(U, b) == QN(0) - end - for b in nzblocks(S) - @test flux(S, b) == QN(1) - end - for b in nzblocks(V) - @test flux(V, b) == QN(0) - end - - @test minimum(dims(S)) == maxdim - @test minimum(dims(S)) == length(spec.eigs) - @test minimum(dims(S)) < dim(i) * dim(j) - - Ap = U * S * V - err = 1 - (Ap * dag(Ap))[] / (A * dag(A))[] - @test isapprox(err, spec.truncerr; rtol=1e-6) - end - - @testset "svd truncation example 4" begin - i = Index(QN(0, 2) => 3, QN(1, 2) => 4; tags="i") - j = settags(i, "j") - A = random_itensor(ElT, QN(1, 2), i, j, dag(i'), dag(j')) - - maxdim = 4 - U, S, V, spec = svd(A, i, j; utags="x", vtags="y", maxdim=maxdim) - - @test storage(U) isa NDTensors.BlockSparse - @test storage(S) isa NDTensors.DiagBlockSparse - @test storage(V) isa NDTensors.BlockSparse - - u = commonind(S, U) - v = commonind(S, V) - - @test hastags(u, "x") - @test hastags(v, "y") - - @test hassameinds(U, (i, j, u)) - @test hassameinds(V, (i', j', v)) - - for b in nzblocks(A) - @test flux(A, b) == QN(1, 2) - end - for b in nzblocks(U) - @test flux(U, b) == QN(0, 2) - end - for b in nzblocks(S) - @test flux(S, b) == QN(1, 2) - end - for b in nzblocks(V) - @test flux(V, b) == QN(0, 2) - end - - @test minimum(dims(S)) == maxdim - @test minimum(dims(S)) == length(spec.eigs) - @test minimum(dims(S)) < dim(i) * dim(j) - - Ap = U * S * V - err = 1 - (Ap * dag(Ap))[] / (A * dag(A))[] - @test isapprox(err, spec.truncerr; rtol=1e-6) - end - - # This test happened to have different behavior because of an - # accidental degeneracy in the singular values with a change - # in the random number generator intoduced in Julia 1.7 - if (ElT == Float64) && (VERSION ≥ v"1.7.0-0") - @testset "svd truncation example 5 (accidental degeneracy)" begin - i = Index(QN(0, 2) => 2, QN(1, 2) => 3; tags="i") - j = settags(i, "j") - copy!( - Random.default_rng(), - Xoshiro( - 0x4ea8944fb1006ec4, 0xec60c93e7daf5295, 0x7c967091b08e72b3, 0x13bc39357cddea97 - ), - ) - A = random_itensor(ElT, QN(1, 2), i, j, dag(i'), dag(j')) - - maxdim = 4 - U, S, V, spec = svd(A, i, j'; utags="x", vtags="y", maxdim=maxdim) - - @test storage(U) isa NDTensors.BlockSparse - @test storage(S) isa NDTensors.DiagBlockSparse - @test storage(V) isa NDTensors.BlockSparse - - u = commonind(S, U) - v = commonind(S, V) - - @test hastags(u, "x") - @test hastags(v, "y") - - @test hassameinds(U, (i, j', u)) - @test hassameinds(V, (i', j, v)) - - for b in nzblocks(A) - @test flux(A, b) == QN(1, 2) - end - for b in nzblocks(U) - @test flux(U, b) == QN(0, 2) - end - for b in nzblocks(S) - @test flux(S, b) == QN(1, 2) - end - for b in nzblocks(V) - @test flux(V, b) == QN(0, 2) - end - - @test minimum(dims(S)) == maxdim - 1 - @test_broken minimum(dims(S)) == length(spec.eigs) - @test minimum(dims(S)) < dim(i) * dim(j) - - _, Sfull, _ = svd(A, i, j'; utags="x", vtags="y") - s = sort(diag(array(Sfull)); rev=true) - @test (s[4] - s[5]) / norm(s) < 1e-4 - - Ap = U * S * V - err = 1 - (Ap * dag(Ap))[] / (A * dag(A))[] - @test_broken isapprox(err, spec.truncerr; rtol=1e-6) - end - end - - @testset "svd truncation example 5" begin - i = Index(QN(0, 2) => 2, QN(1, 2) => 3; tags="i") - j = settags(i, "j") - Random.seed!(123) - A = random_itensor(ElT, QN(1, 2), i, j, dag(i'), dag(j')) - - maxdim = 4 - U, S, V, spec = svd(A, i, j'; utags="x", vtags="y", maxdim=maxdim) - - @test storage(U) isa NDTensors.BlockSparse - @test storage(S) isa NDTensors.DiagBlockSparse - @test storage(V) isa NDTensors.BlockSparse - - u = commonind(S, U) - v = commonind(S, V) - - @test hastags(u, "x") - @test hastags(v, "y") - - @test hassameinds(U, (i, j', u)) - @test hassameinds(V, (i', j, v)) - - for b in nzblocks(A) - @test flux(A, b) == QN(1, 2) - end - for b in nzblocks(U) - @test flux(U, b) == QN(0, 2) - end - for b in nzblocks(S) - @test flux(S, b) == QN(1, 2) - end - for b in nzblocks(V) - @test flux(V, b) == QN(0, 2) - end - - @test minimum(dims(S)) == maxdim - @test minimum(dims(S)) == length(spec.eigs) - @test minimum(dims(S)) < dim(i) * dim(j) - - Ap = U * S * V - err = 1 - (Ap * dag(Ap))[] / (A * dag(A))[] - @test isapprox(err, spec.truncerr; rtol=1e-6) - end - - @testset "issue #231" begin - l = Index( - QN("Nf", -1, -1) => 2, QN("Nf", 0, -1) => 4, QN("Nf", +1, -1) => 2; tags="CMB,Link" - ) - s = Index(QN("Nf", 0, -1) => 1, QN("Nf", 1, -1) => 1; tags="Fermion,Site,n=4") - r = Index( - QN("Nf", 1, -1) => 2, QN("Nf", 0, -1) => 1, QN("Nf", 1, -1) => 2; tags="Link,u" - ) - - A = ITensor(ElT, l, s, dag(r)) - - insertblock!(A, Block(2, 1, 2)) - insertblock!(A, Block(1, 2, 2)) - insertblock!(A, Block(2, 2, 3)) - - for b in nzblocks(A) - @test flux(A, b) == QN() - end - - U, S, V = svd(A, l, s) - - for b in nzblocks(U) - @test flux(U, b) == QN() - end - for b in nzblocks(S) - @test flux(S, b) == QN() - end - for b in nzblocks(V) - @test flux(V, b) == QN() - end - @test U * S * V ≈ A atol = 1e-13 - end - - @testset "SVD no truncate bug" begin - s = Index( - QN("Sz", -4) => 1, - QN("Sz", -2) => 4, - QN("Sz", 0) => 6, - QN("Sz", 2) => 4, - QN("Sz", 4) => 1, - ) - A = ITensor(ElT, s, s') - insertblock!(A, Block(5, 2)) - insertblock!(A, Block(4, 3)) - insertblock!(A, Block(3, 4)) - insertblock!(A, Block(2, 5)) - randn!(A) - U, S, V = svd(A, s) - @test U * S * V ≈ A - end - - @testset "SVD no truncate" begin - s = Index( - QN("Sz", -4) => 1, - QN("Sz", -2) => 4, - QN("Sz", 0) => 6, - QN("Sz", 2) => 4, - QN("Sz", 4) => 1, - ) - A = ITensor(ElT, s, s') - insertblock!(A, Block(5, 1)) - insertblock!(A, Block(4, 2)) - insertblock!(A, Block(3, 3)) - insertblock!(A, Block(2, 4)) - insertblock!(A, Block(1, 5)) - U, S, V = svd(A, s) - @test dims(S) == dims(A) - @test U * S * V ≈ A - end - - @testset "SVD truncate zeros" begin - s = Index( - QN("Sz", -4) => 1, - QN("Sz", -2) => 4, - QN("Sz", 0) => 6, - QN("Sz", 2) => 4, - QN("Sz", 4) => 1, - ) - A = ITensor(ElT, s, s') - insertblock!(A, Block(5, 1)) - insertblock!(A, Block(4, 2)) - insertblock!(A, Block(3, 3)) - insertblock!(A, Block(2, 4)) - insertblock!(A, Block(1, 5)) - U, S, V = svd(A, s; cutoff=0) - @test dims(S) == (0, 0) - @test U * S * V ≈ A - end - end - - @testset "Replace Index" begin - i = Index([QN(0) => 1, QN(1) => 2], "i") - j = Index([QN(0) => 3, QN(1) => 4, QN(2) => 5], "j") - - T1 = random_itensor(QN(1), i, j) - T2 = copy(T1) - - k = Index([QN(0) => 1, QN(1) => 2], "k") - - replaceind!(T1, i, k) - @test hasind(T1, k) - @test dir(inds(T1)[1]) == dir(i) - - # Check that replaceind! keeps - # original Arrow direction - replaceind!(T2, i, dag(k)) - @test hasind(T2, k) - @test dir(inds(T2)[1]) == dir(i) - @test dir(inds(T2)[1]) != dir(dag(k)) - end - - @testset "BlockSparse dag copy behavior" begin - i = Index(QN(0) => 2, QN(1) => 2; tags="i") - j = Index(QN(0) => 2, QN(1) => 2; tags="j") - - v1 = random_itensor(QN(1), i, j) - orig_elt = v1[1, 3] - cv1 = dag(v1; allow_alias=true) - cv1[1, 3] = 123.45 - @test v1[1, 3] ≈ cv1[1, 3] - - v1 = random_itensor(QN(1), i, j) - orig_elt = v1[1, 3] - cv1 = dag(ITensors.AllowAlias(), v1) - cv1[1, 3] = 123.45 - @test v1[1, 3] ≈ cv1[1, 3] - - v2 = random_itensor(QN(1), i, j) - orig_elt = v2[1, 3] - cv2 = dag(v2; allow_alias=false) - cv2[1, 3] = 123.45 - @test v2[1, 3] ≈ orig_elt - - v2 = random_itensor(QN(1), i, j) - orig_elt = v2[1, 3] - cv2 = dag(ITensors.NeverAlias(), v2) - cv2[1, 3] = 123.45 - @test v2[1, 3] ≈ orig_elt - - v3 = random_itensor(ComplexF64, QN(1), i, j) - orig_elt = v3[1, 3] - cv3 = dag(v3; allow_alias=true) - cv3[1, 3] = 123.45 - @test v3[1, 3] ≈ orig_elt - - v3 = random_itensor(ComplexF64, QN(1), i, j) - orig_elt = v3[1, 3] - cv3 = dag(ITensors.AllowAlias(), v3) - cv3[1, 3] = 123.45 - @test v3[1, 3] ≈ orig_elt - - v4 = random_itensor(ComplexF64, QN(1), i, j) - orig_elt = v4[1, 3] - cv4 = dag(v4; allow_alias=false) - cv4[1, 3] = 123.45 - @test v4[1, 3] ≈ orig_elt - - v4 = random_itensor(ComplexF64, QN(1), i, j) - orig_elt = v4[1, 3] - cv4 = dag(ITensors.NeverAlias(), v4) - cv4[1, 3] = 123.45 - @test v4[1, 3] ≈ orig_elt - end - - @testset "exponentiate" begin - @testset "Simple arrows" begin - i1 = Index([QN(0) => 1, QN(1) => 2], "i1") - i2 = Index([QN(0) => 1, QN(1) => 2], "i2") - A = random_itensor(QN(), i1, i2, dag(i1)', dag(i2)') - Aexp = exp(A) - Amat = Array(A, i1, i2, i1', i2') - Amatexp = reshape(exp(reshape(Amat, 9, 9)), 3, 3, 3, 3) - @test Array(Aexp, i1, i2, i1', i2') ≈ Amatexp rtol = 1e-14 - @test flux(Aexp) == QN() - @test length(setdiff(inds(Aexp), inds(A))) == 0 - - @test exp(A, (i1, i2), (i1', i2')) ≈ Aexp rtol = 5e-14 - - # test the case where indices are permuted - A = random_itensor(QN(), i1, dag(i1)', dag(i2)', i2) - Aexp = exp(A, (i1, i2), (i1', i2')) - Amat = Array(A, i1, i2, i1', i2') - Amatexp = reshape(exp(reshape(Amat, 9, 9)), 3, 3, 3, 3) - @test Array(Aexp, i1, i2, i1', i2') ≈ Amatexp rtol = 1e-14 - - # test exponentiation in the Hermitian case - i1 = Index([QN(0) => 2, QN(1) => 2, QN(2) => 3], "i1") - A = random_itensor(QN(), i1, dag(i1)') - Ad = dag(swapinds(A, IndexSet(i1), IndexSet(dag(i1)'))) - Ah = A + Ad + 1e-10 * random_itensor(QN(), i1, dag(i1)') - Amat = Array(Ah, i1', i1) - Aexp = exp(Ah; ishermitian=true) - Amatexp = exp(LinearAlgebra.Hermitian(Amat)) - @test Array(Aexp, i1, i1') ≈ Amatexp rtol = 5e-14 - end - - @testset "Regression test for exp of QN ITensor with missing diagonal blocks" begin - i = Index([QN(0) => 2, QN(1) => 3]) - A = ITensor(i', dag(i)) - A[1, 1] = 1.2 - expA = exp(A; ishermitian=false) - for n in 1:mindim(A) - @test expA[n, n] == exp(A[n, n]) - end - @test expA ≈ exp(dense(A)) - expA = exp(A; ishermitian=true) - for n in 1:mindim(A) - @test expA[n, n] == exp(A[n, n]) - end - @test expA ≈ exp(dense(A)) - end - - @testset "diag" for ElType in (Float64, ComplexF64) - χ = [QN(0) => 1, QN(1) => 2] - i, j = Index.((χ,), ("i", "j")) - A = random_itensor(ElType, i, j) - d = diag(A) - @test d isa DenseTensor{ElType,1} - for n in 1:dim(χ) - @test d[n] == A[n, n] - end - end - - @testset "diag" for ElType in (Float64, ComplexF64) - χ = [QN(0) => 1, QN(1) => 2] - i, j = Index.((χ,), ("i", "j")) - A = random_itensor(ElType, i, j) - _, S, _ = svd(A, i) - d = diag(S) - @test d isa DenseTensor{real(ElType),1} - for n in 1:diaglength(S) - @test d[n] == S[n, n] - end - end - - @testset "Mixed arrows" begin - i1 = Index([QN(0) => 1, QN(1) => 2], "i1") - i2 = Index([QN(0) => 1, QN(1) => 2], "i2") - A = random_itensor(i1, i2, dag(i1)', dag(i2)') - expA = exp(A, (i1, i1'), (i2', i2)) - @test exp(dense(A), (i1, i1'), (i2', i2)) ≈ dense(expA) - end - - @testset "Test contraction direction error" begin - i = Index([QN(0) => 1, QN(1) => 1], "i") - A = random_itensor(i', dag(i)) - A² = A' * A - @test dense(A²) ≈ dense(A') * dense(A) - @test_throws ErrorException A' * dag(A) - end - - @testset "Contraction with scalar ITensor" begin - i = Index([QN(0) => 2, QN(1) => 2]) - A = random_itensor(i', dag(i)) - A1 = A * ITensor(1) - A2 = ITensor(1) * A - @test A1 ≈ A - @test A2 ≈ A - end - end - - @testset "directsum" begin - x = Index([QN(0) => 1, QN(1) => 1], "x") - i1 = Index([QN(0) => 1, QN(1) => 2], "i1") - j1 = Index([QN(0) => 2, QN(1) => 2], "j1") - i2 = Index([QN(0) => 2, QN(1) => 3], "i2") - j2 = Index([QN(0) => 3, QN(1) => 3], "j2") - - A1 = random_itensor(i1, x, j1) - A2 = random_itensor(x, j2, i2) - S, s = ITensors.directsum(A1 => (i1, j1), A2 => (i2, j2); tags=["sum_i", "sum_j"]) - - @test hassameinds(S, (x, s...)) - @test hastags(s[1], "sum_i") - @test hastags(s[2], "sum_j") - - for vx in 1:dim(x) - proj = dag(onehot(x => vx)) - A1_vx = A1 * proj - A2_vx = A2 * proj - S_vx = S * proj - for m in 1:dim(s[1]), n in 1:dim(s[2]) - if m ≤ dim(i1) && n ≤ dim(j1) - @test S_vx[s[1] => m, s[2] => n] == A1_vx[i1 => m, j1 => n] - elseif m > dim(i1) && n > dim(j1) - @test S_vx[s[1] => m, s[2] => n] == A2_vx[i2 => m - dim(i1), j2 => n - dim(j1)] - else - @test S_vx[s[1] => m, s[2] => n] == 0 - end - end - end - end - - @testset "Negate QN ITensor Regression Test" begin - s = siteind("S=1/2"; conserve_qns=true) - - A = ITensor(s', dag(s)) - A[1, 1] = 1.0 - - @test length(ITensors.blockoffsets(ITensors.tensor(A))) == 1 - B = -A # there was a bug where doing -A would - # increase the number of blocks of A's storage - @test length(ITensors.blockoffsets(ITensors.tensor(A))) == 1 - end - - @testset "removeqns and removeqn" begin - s = siteind("Electron"; conserve_qns=true) - T = op("c†↑", s) - - @test hasqns(s) - @test hasqns(T) - @test qn(s, 1) == QN(("Nf", 0, -1), ("Sz", 0)) - @test qn(s, 2) == QN(("Nf", 1, -1), ("Sz", 1)) - @test qn(s, 3) == QN(("Nf", 1, -1), ("Sz", -1)) - @test qn(s, 4) == QN(("Nf", 2, -1), ("Sz", 0)) - @test blockdim(s, 1) == 1 - @test blockdim(s, 2) == 1 - @test blockdim(s, 3) == 1 - @test blockdim(s, 4) == 1 - @test nblocks(s) == 4 - @test dim(s) == 4 - - s1 = removeqns(s) - T1 = removeqns(T) - @test !hasqns(s1) - @test !hasqns(T1) - @test nblocks(s1) == 1 - @test dim(s1) == 4 - for I in eachindex(T1) - @test T1[I] == T[I] - end - - s2 = removeqn(s, "Sz") - T2 = removeqn(T, "Sz") - @test hasqns(s2) - @test hasqns(T2) - @test nnzblocks(T2) == 2 - @test nblocks(s2) == 3 - @test nblocks(T2) == (3, 3) - @test qn(s2, 1) == QN(("Nf", 0, -1)) - @test qn(s2, 2) == QN(("Nf", 1, -1)) - @test qn(s2, 3) == QN(("Nf", 2, -1)) - @test blockdim(s2, 1) == 1 - @test blockdim(s2, 2) == 2 - @test blockdim(s2, 3) == 1 - @test dim(s2) == 4 - for I in eachindex(T2) - @test T2[I] == T[I] - end - - s3 = removeqn(s, "Nf") - T3 = removeqn(T, "Nf") - @test hasqns(s3) - @test hasqns(T3) - @test nnzblocks(T3) == 2 - @test nblocks(s3) == 4 - @test nblocks(T3) == (4, 4) - @test qn(s3, 1) == QN(("Sz", 0)) - @test qn(s3, 2) == QN(("Sz", 1)) - @test qn(s3, 3) == QN(("Sz", -1)) - @test qn(s3, 4) == QN(("Sz", 0)) - @test blockdim([QN(0) => 1, QN(1) => 2], 1) == 1 - @test blockdim([QN(0) => 1, QN(1) => 2], 2) == 2 - @test blockdim(s3, 1) == 1 - @test blockdim(s3, 2) == 1 - @test blockdim(s3, 3) == 1 - @test blockdim(s3, 4) == 1 - @test dim(s3) == 4 - for I in eachindex(T3) - @test T3[I] == T[I] - end - @test -[QN(0) => 1, QN(1) => 2] == [QN(0) => 1, QN(-1) => 2] - @test !ITensors.have_same_qns([QN(0) => 1, QN(0) => 2, QN(("Sz", 2)) => 1]) - end -end diff --git a/test/base/test_readwrite.jl b/test/base/test_readwrite.jl deleted file mode 100644 index 5f8d47cb2e..0000000000 --- a/test/base/test_readwrite.jl +++ /dev/null @@ -1,187 +0,0 @@ -@eval module $(gensym()) -using HDF5: h5open, read, write -using ITensors: Index, prime, random_itensor -using Test: @test, @testset - -include(joinpath(@__DIR__, "utils", "util.jl")) - -@testset "HDF5 Read and Write" begin - i = Index(2, "i") - j = Index(3, "j") - k = Index(4, "k") - - @testset "TagSet" begin - ts = TagSet("A,Site,n=2") - h5open(joinpath(@__DIR__, "data.h5"), "w") do fo - write(fo, "tags", ts) - end - - h5open(joinpath(@__DIR__, "data.h5"), "r") do fi - rts = read(fi, "tags", TagSet) - @test rts == ts - end - end - - @testset "Index" begin - i = Index(3, "Site,S=1") - h5open(joinpath(@__DIR__, "data.h5"), "w") do fo - write(fo, "index", i) - end - - h5open(joinpath(@__DIR__, "data.h5"), "r") do fi - ri = read(fi, "index", Index) - @test ri == i - end - - # primed Index - i = Index(3, "Site,S=1") - i = prime(i, 2) - h5open(joinpath(@__DIR__, "data.h5"), "w") do fo - write(fo, "index", i) - end - - h5open(joinpath(@__DIR__, "data.h5"), "r") do fi - ri = read(fi, "index", Index) - @test ri == i - end - end - - @testset "IndexSet" begin - is = IndexSet(i, j, k) - - h5open(joinpath(@__DIR__, "data.h5"), "w") do fo - write(fo, "inds", is) - end - - h5open(joinpath(@__DIR__, "data.h5"), "r") do fi - ris = read(fi, "inds", IndexSet) - @test ris == is - end - end - - @testset "Dense ITensor" begin - - # default constructed case - T = ITensor() - - h5open(joinpath(@__DIR__, "data.h5"), "w") do fo - write(fo, "defaultT", T) - end - - h5open(joinpath(@__DIR__, "data.h5"), "r") do fi - rT = read(fi, "defaultT", ITensor) - @test typeof(storage(T)) == typeof(storage(ITensor())) - end - - # real case - T = random_itensor(i, j, k) - - h5open(joinpath(@__DIR__, "data.h5"), "w") do fo - write(fo, "T", T) - end - - h5open(joinpath(@__DIR__, "data.h5"), "r") do fi - rT = read(fi, "T", ITensor) - @test norm(rT - T) / norm(T) < 1E-10 - end - - # complex case - T = random_itensor(ComplexF64, i, j, k) - - h5open(joinpath(@__DIR__, "data.h5"), "w") do fo - write(fo, "complexT", T) - end - - h5open(joinpath(@__DIR__, "data.h5"), "r") do fi - rT = read(fi, "complexT", ITensor) - @test norm(rT - T) / norm(T) < 1E-10 - end - end - - @testset "Delta ITensor" begin - # - # Delta ITensor - # - Δ = δ(i, i') - cΔ = δ(ComplexF64, i, i') - h5open(joinpath(@__DIR__, "data.h5"), "w") do fo - fo["delta_tensor"] = Δ - fo["c_delta_tensor"] = cΔ - end - - h5open(joinpath(@__DIR__, "data.h5"), "r") do fi - rΔ = read(fi, "delta_tensor", ITensor) - rcΔ = read(fi, "c_delta_tensor", ITensor) - @test rΔ ≈ Δ - @test rcΔ ≈ cΔ - end - end - @testset "Diag ITensor" begin - - # - # Diag ITensor - # - dk = dim(k) - D = diag_itensor(randn(dk), k, k') - C = diag_itensor(randn(ComplexF64, dk), k, k') - h5open(joinpath(@__DIR__, "data.h5"), "w") do fo - fo["diag_tensor"] = D - fo["c_diag_tensor"] = C - end - - h5open(joinpath(@__DIR__, "data.h5"), "r") do fi - rD = read(fi, "diag_tensor", ITensor) - rC = read(fi, "c_diag_tensor", ITensor) - @test rD ≈ D - @test rC ≈ C - end - end - - @testset "QN ITensor" begin - i = Index(QN("A", -1) => 3, QN("A", 0) => 4, QN("A", +1) => 3; tags="i") - j = Index(QN("A", -2) => 2, QN("A", 0) => 3, QN("A", +2) => 2; tags="j") - k = Index(QN("A", -1) => 1, QN("A", 0) => 1, QN("A", +1) => 1; tags="k") - - # real case - T = random_itensor(QN("A", 1), i, j, k) - - h5open(joinpath(@__DIR__, "data.h5"), "w") do fo - write(fo, "T", T) - end - - h5open(joinpath(@__DIR__, "data.h5"), "r") do fi - rT = read(fi, "T", ITensor) - @test rT ≈ T - end - - # complex case - T = random_itensor(ComplexF64, i, j, k) - - h5open(joinpath(@__DIR__, "data.h5"), "w") do fo - write(fo, "complexT", T) - end - - h5open(joinpath(@__DIR__, "data.h5"), "r") do fi - rT = read(fi, "complexT", ITensor) - @test rT ≈ T - end - end - - @testset "DownwardCompat" begin - h5open(joinpath(@__DIR__, "utils", "testfilev0.1.41.h5"), "r") do fi - ITensorName = "ITensorv0.1.41" - - # ITensor version <= v0.1.41 uses the `store` key for ITensor data storage - # whereas v >= 0.2 uses `storage` as key - @test haskey(read(fi, ITensorName), "store") - @test read(fi, ITensorName, ITensor) isa ITensor - end - end - - # - # Clean up the test hdf5 file - # - rm(joinpath(@__DIR__, "data.h5"); force=true) -end - -end diff --git a/test/base/test_sitetype.jl b/test/base/test_sitetype.jl deleted file mode 100644 index 12c77734a6..0000000000 --- a/test/base/test_sitetype.jl +++ /dev/null @@ -1,583 +0,0 @@ -using ITensors, Test -using ITensors.SiteTypes: - @OpName_str, - @SiteType_str, - @StateName_str, - OpName, - StateName, - op, - ops, - siteind, - siteinds, - state - -function is_unitary(U::ITensor; kwargs...) - s = noprime(filterinds(U; plev=1)) - return isapprox(transpose(dag(U))(U), op("I", s...)) -end - -@testset "SiteType" begin - N = 10 - - @testset "Star in operator strings" begin - @test_throws ErrorException op("S=1/2") - - sites = siteinds("S=1/2", N) - #@test_throws ArgumentError op(sites, "Sp", 1) - @test sites[1] isa Index - Sz = op(sites, "Sz", 2) - SzSz = op(sites, "Sz * Sz", 2) - @test SzSz ≈ product(Sz, Sz) - Sy = op(sites, "Sy", 2) - SySy = op(sites, "Sy * Sy", 2) - @test SySy ≈ product(Sy, Sy) - - Sz1 = op("Sz", sites, 1) - @test op("Sz", [sites[1]]) ≈ Sz1 - @test op([sites[1]], "Sz") ≈ Sz1 - @test op([1 0; 0 -1] / 2, [sites[1]]) ≈ Sz1 - @test op([sites[1]], [1 0; 0 -1] / 2) ≈ Sz1 - - @test op([sites[1]], "Ry"; θ=π / 2) ≈ - itensor([1 -1; 1 1] / √2, sites[1]', dag(sites[1])) - - sites = siteinds("S=1", N) - #@test_throws ArgumentError op(sites, "Sp", 1) - Sz = op(sites, "Sz", 2) - SzSz = op(sites, "Sz * Sz", 2) - @test SzSz ≈ product(Sz, Sz) - Sy = op(sites, "Sy", 2) - SySy = op(sites, "Sy * Sy", 2) - @test SySy ≈ product(Sy, Sy) - SzSySz = op(sites, "Sz * Sy * Sz", 2) - @test SzSySz ≈ product(Sz, product(Sy, Sz)) - end - - @testset "+/- in operator strings" begin - q = siteind("Qudit"; dim=5) - Amat = array(op("a", q)) - Adagmat = array(op("a†", q)) - - x = Amat - Adagmat - @test x ≈ array(op("a - a†", q)) - x = Amat * Adagmat - Adagmat - @test x ≈ array(op("a * a† - a†", q)) - @test x ≈ array(op("a * a† - a†", q)) - x = Adagmat * Adagmat * Amat * Amat - @test x ≈ array(op("a† * a† * a * a", q)) - - q = siteind("S=1/2") - Sp = array(op("S+", q)) - Sm = array(op("S-", q)) - Sx = array(op("Sx", q)) - Sy = array(op("Sy", q)) - Sz = array(op("Sz", q)) - x = Sp + Sm - @test x ≈ array(op("S+ + S-", q)) - x = Sp - Sm - @test x ≈ array(op("S+ - S-", q)) - x = Sp - Sm - Sp - @test x ≈ array(op("S+ - S- - S+", q)) - x = Sp * Sm + Sm * Sp - @test x ≈ array(op("S+ * S- + S- * S+", q)) - # Deprecated syntax - @test x ≈ array(op("S+ * S- + S-*S+", q)) - x = Sp * Sm - Sm * Sp - @test x ≈ array(op("S+ * S- - S- * S+", q)) - @test x ≈ array(op("S+ * S- - S- * S+", q)) - x = Sp * Sm + Sm * Sp + Sz * Sx * Sy - @test x ≈ array(op("S+ * S- + S- * S+ + Sz * Sx * Sy", q)) - x = Sp * Sm - Sm * Sp + Sz * Sx * Sy - @test x ≈ array(op("S+ * S- - S- * S+ + Sz * Sx * Sy", q)) - x = Sp * Sm - Sm * Sp - Sz * Sx * Sy - @test x ≈ array(op("S+ * S- - S- * S+ - Sz * Sx * Sy", q)) - - #q = siteind("Qubit") - #R = array(op("Rx", q; θ = 0.1)) - #H = array(op("H", q)) - #Y = array(op("Y", q)) - #x = H * R + Y + R - #@test x ≈ array(op("H * Rx + Y + Rx", q; θ = 0.1)) - - end - - @testset "Custom SiteType using op" begin - # Use "_Custom_" tag even though this example - # is for S=3/2, because we might define the - # "S=3/2" TagType inside ITensors.jl later - function ITensors.op(::OpName"Sz", ::SiteType"_Custom_", s::Index) - Op = ITensor(s', dag(s)) - Op[s' => 1, s => 1] = +3 / 2 - Op[s' => 2, s => 2] = +1 / 2 - Op[s' => 3, s => 3] = -1 / 2 - Op[s' => 4, s => 4] = -3 / 2 - return Op - end - - function ITensors.op(::OpName"α", ::SiteType"_Custom_", s1::Index, s2::Index) - Op = ITensor(s1', s2', dag(s1), dag(s2)) - Op[s1' => 1, s2' => 2, s1 => 1, s2 => 2] = +3 / 2 - Op[s1' => 2, s2' => 1, s1 => 2, s2 => 2] = +1 / 2 - Op[s1' => 3, s2' => 3, s1 => 3, s2 => 4] = -1 / 2 - Op[s1' => 4, s2' => 1, s1 => 4, s2 => 2] = -3 / 2 - return Op - end - - function ITensors.op( - ::OpName"β", ::SiteType"_Custom1", ::SiteType"_Custom2", s1::Index, s2::Index - ) - Op = ITensor(s1', s2', dag(s1), dag(s2)) - Op[s1' => 1, s2' => 2, s1 => 1, s2 => 2] = +5 / 2 - Op[s1' => 2, s2' => 1, s1 => 2, s2 => 2] = +3 / 2 - Op[s1' => 3, s2' => 3, s1 => 3, s2 => 4] = -3 / 2 - Op[s1' => 4, s2' => 1, s1 => 4, s2 => 2] = -5 / 2 - return Op - end - - s = Index(4, "_Custom_, __x") - Sz = op("Sz", s) - @test Sz[s' => 1, s => 1] ≈ +3 / 2 - @test Sz[s' => 2, s => 2] ≈ +1 / 2 - @test Sz[s' => 3, s => 3] ≈ -1 / 2 - @test Sz[s' => 4, s => 4] ≈ -3 / 2 - - t = Index(4, "_Custom_, __x") - α = op("α", s, t) - @test α[s' => 1, t' => 2, s => 1, t => 2] ≈ +3 / 2 - @test α[s' => 2, t' => 1, s => 2, t => 2] ≈ +1 / 2 - @test α[s' => 3, t' => 3, s => 3, t => 4] ≈ -1 / 2 - @test α[s' => 4, t' => 1, s => 4, t => 2] ≈ -3 / 2 - - s1 = Index(4, "_Custom1, __x") - @test_throws ArgumentError op("α", s, s1) - - s2 = Index(4, "_Custom2, __x") - β = op("β", s1, s2) - @test β[s1' => 1, s2' => 2, s1 => 1, s2 => 2] ≈ +5 / 2 - @test β[s1' => 2, s2' => 1, s1 => 2, s2 => 2] ≈ +3 / 2 - @test β[s1' => 3, s2' => 3, s1 => 3, s2 => 4] ≈ -3 / 2 - @test β[s1' => 4, s2' => 1, s1 => 4, s2 => 2] ≈ -5 / 2 - @test_throws ArgumentError op("β", s2, s1) - end - - @testset "Custom OpName with long name" begin - function ITensors.op(::OpName"my_favorite_operator", ::SiteType"S=1/2", s::Index) - Op = ITensor(s', dag(s)) - Op[s' => 1, s => 1] = 0.11 - Op[s' => 1, s => 2] = 0.12 - Op[s' => 2, s => 1] = 0.21 - Op[s' => 2, s => 2] = 0.22 - return Op - end - - s = Index(2, "S=1/2, Site") - Sz = op("my_favorite_operator", s) - @test Sz[s' => 1, s => 1] ≈ 0.11 - @test Sz[s' => 1, s => 2] ≈ 0.12 - @test Sz[s' => 2, s => 1] ≈ 0.21 - @test Sz[s' => 2, s => 2] ≈ 0.22 - - @test OpName(:myop) == OpName("myop") - @test ITensors.name(OpName(:myop)) == :myop - end - - @testset "op with more than two indices" begin - ITensors.space(::SiteType"qubit") = 2 - - function ITensors.op(::OpName"rand", ::SiteType"qubit", s::Index...) - return random_itensor(prime.(s)..., dag.(s)...) - end - - s = siteinds("qubit", 4) - o = op("rand", s...) - @test norm(o) > 0 - @test order(o) == 8 - @test hassameinds(o, (prime.(s)..., s...)) - end - - @testset "Custom Qudit/Boson op" begin - # Overload Qudit, implicitly defined for Boson as well - function ITensors.op(::OpName"Qudit_op_1", ::SiteType"Qudit", ds::Int...) - d = prod(ds) - return [i * j for i in 1:d, j in 1:d] - end - function ITensors.op(::OpName"Qudit_op_2", ::SiteType"Qudit", d::Int) - return [i * j for i in 1:d, j in 1:d] - end - - # Overload Boson directly - function ITensors.op(::OpName"Boson_op_1", ::SiteType"Boson", ds::Int...) - d = prod(ds) - return [i * j for i in 1:d, j in 1:d] - end - function ITensors.op(::OpName"Boson_op_2", ::SiteType"Boson", d::Int) - return [i * j for i in 1:d, j in 1:d] - end - - for st in ["Qudit", "Boson"], ot in ["Qudit", "Boson"] - if st == "Qudit" && ot == "Boson" - # Qudit site types don't see Boson overloads - continue - end - d = 4 - s = siteinds(st, 2; dim=d) - o = op("$(ot)_op_1", s, 1) - @test o ≈ itensor([i * j for i in 1:d, j in 1:d], s[1]', dag(s[1])) - - o = op("$(ot)_op_1", s, 1, 2) - @test o ≈ itensor( - [i * j for i in 1:(d^2), j in 1:(d^2)], s[2]', s[1]', dag(s[2]), dag(s[1]) - ) - - d = 4 - s = siteinds(st, 2; dim=d) - o = op("$(ot)_op_2", s, 1) - @test o ≈ itensor([i * j for i in 1:d, j in 1:d], s[1]', dag(s[1])) - @test_throws MethodError op("$(ot)_op_2", s, 1, 2) - end - end - - @testset "Custom SiteType using op!" begin - # Use "_Custom_" tag even though this example - # is for S=3/2, because we might define the - # "S=3/2" TagType inside ITensors.jl later - function ITensors.op!(Op::ITensor, ::OpName"Sz", ::SiteType"_Custom_", s::Index) - Op[s' => 1, s => 1] = +3 / 2 - Op[s' => 2, s => 2] = +1 / 2 - Op[s' => 3, s => 3] = -1 / 2 - return Op[s' => 4, s => 4] = -3 / 2 - end - - function ITensors.op!( - Op::ITensor, ::OpName"α", ::SiteType"_Custom_", s1::Index, s2::Index - ) - Op[s1' => 1, s2' => 2, s1 => 1, s2 => 2] = +3 / 2 - Op[s1' => 2, s2' => 1, s1 => 2, s2 => 2] = +1 / 2 - Op[s1' => 3, s2' => 3, s1 => 3, s2 => 4] = -1 / 2 - return Op[s1' => 4, s2' => 1, s1 => 4, s2 => 2] = -3 / 2 - end - - function ITensors.op!( - Op::ITensor, - ::OpName"β", - ::SiteType"_Custom1", - ::SiteType"_Custom2", - s1::Index, - s2::Index, - ) - Op[s1' => 1, s2' => 2, s1 => 1, s2 => 2] = +5 / 2 - Op[s1' => 2, s2' => 1, s1 => 2, s2 => 2] = +3 / 2 - Op[s1' => 3, s2' => 3, s1 => 3, s2 => 4] = -3 / 2 - return Op[s1' => 4, s2' => 1, s1 => 4, s2 => 2] = -5 / 2 - end - - s = Index(4, "_Custom_, __x") - Sz = op("Sz", s) - @test Sz[s' => 1, s => 1] ≈ +3 / 2 - @test Sz[s' => 2, s => 2] ≈ +1 / 2 - @test Sz[s' => 3, s => 3] ≈ -1 / 2 - @test Sz[s' => 4, s => 4] ≈ -3 / 2 - - t = Index(4, "_Custom_, __x") - α = op("α", s, t) - @test α[s' => 1, t' => 2, s => 1, t => 2] ≈ +3 / 2 - @test α[s' => 2, t' => 1, s => 2, t => 2] ≈ +1 / 2 - @test α[s' => 3, t' => 3, s => 3, t => 4] ≈ -1 / 2 - @test α[s' => 4, t' => 1, s => 4, t => 2] ≈ -3 / 2 - - s1 = Index(4, "_Custom1, __x") - @test_throws ArgumentError op("α", t, s1) - - s2 = Index(4, "_Custom2, __x") - β = op("β", s1, s2) - @test β[s1' => 1, s2' => 2, s1 => 1, s2 => 2] ≈ +5 / 2 - @test β[s1' => 2, s2' => 1, s1 => 2, s2 => 2] ≈ +3 / 2 - @test β[s1' => 3, s2' => 3, s1 => 3, s2 => 4] ≈ -3 / 2 - @test β[s1' => 4, s2' => 1, s1 => 4, s2 => 2] ≈ -5 / 2 - @test_throws ArgumentError op("β", s2, s1) - end - - @testset "Custom SiteType using older op interface" begin - # Use "_Custom_" tag even though this example - # is for S=3/2, because we might define the - # "S=3/2" TagType inside ITensors.jl later - function ITensors.op(::SiteType"_Custom_", s::Index, opname::AbstractString) - Op = ITensor(s', dag(s)) - if opname == "S+" - Op[s' => 1, s => 2] = sqrt(3) - Op[s' => 2, s => 3] = 2 - Op[s' => 3, s => 4] = sqrt(3) - else - error("Name $opname not recognized for tag \"Custom\"") - end - return Op - end - - s = Index(4, "_Custom_") - Sp = op("S+", s) - @test Sp[s' => 1, s => 2] ≈ sqrt(3) - @test Sp[s' => 2, s => 3] ≈ 2 - @test Sp[s' => 3, s => 4] ≈ sqrt(3) - end - - @testset "siteind defined by space overload" begin - ITensors.space(::SiteType"Test1") = 4 - s = siteind("Test1", 3) - @test dim(s) == 4 - @test hastags(s, "Site,Test1,n=3") - - s = siteind("Test1") - @test dim(s) == 4 - @test hastags(s, "Site,Test1") - end - - @testset "siteind defined by siteind overload" begin - # TODO: Make `ITensors.siteind` accessible? Or delete this test? - ITensors.SiteTypes.siteind(::SiteType"Test2") = Index(4, "Test2") - s = siteind("Test2", 3) - @test dim(s) == 4 - @test hastags(s, "Test2,n=3") - end - - @testset "siteind defined by space overload with QN" begin - function ITensors.space(::SiteType"Test3") - return [QN("T", 0) => 2, QN("T", 1) => 1, QN("T", 2) => 1] - end - s = siteind("Test3", 3) - @test dim(s) == 4 - @test hasqns(s) - @test hastags(s, "Site,Test3,n=3") - end - - @testset "siteinds defined by space overload" begin - function ITensors.space(::SiteType"Test4"; conserve_qns=false) - if conserve_qns - return [QN("T", 0) => 2, QN("T", 1) => 1, QN("T", 2) => 1] - end - return 4 - end - - # Without QNs - s = siteinds("Test4", 6) - @test length(s) == 6 - @test dim(s[1]) == 4 - for n in 1:length(s) - @test hastags(s[n], "Site,Test4,n=$n") - @test !hasqns(s[n]) - end - - # With QNs - s = siteinds("Test4", 6; conserve_qns=true) - @test length(s) == 6 - @test dim(s[1]) == 4 - for n in 1:length(s) - @test hastags(s[n], "Site,Test4,n=$n") - @test hasqns(s[n]) - end - end - - @testset "siteinds defined by siteinds overload" begin - # TODO: Make `ITensors.siteinds` accessible? Or delete this test? - function ITensors.SiteTypes.siteinds(::SiteType"Test5", N; kwargs...) - return [Index(4, "Test5,n=$n") for n in 1:N] - end - s = siteinds("Test5", 8) - @test length(s) == 8 - @test dim(s[1]) == 4 - for n in 1:length(s) - @test hastags(s[n], "Test5,n=$n") - end - end - - @testset "Version of siteinds taking function argument" begin - N = 10 - s = siteinds(n -> (n == 1 || n == N) ? "S=1/2" : "S=1", N) - for n in (1, N) - @test dim(s[n]) == 2 - @test hastags(s[n], "Site,S=1/2,n=$n") - end - for n in 2:(N - 1) - @test dim(s[n]) == 3 - @test hastags(s[n], "Site,S=1,n=$n") - end - end - - @testset "siteinds addtags keyword argument" begin - N = 4 - s = siteinds("S=1/2", N; addtags="T") - for n in 1:N - @test hastags(s[n], "Site,S=1/2,n=$n,T") - end - end - - @testset "Error for undefined tag in siteinds,space system" begin - @test_throws ErrorException siteinds("Missing", 10) - @test_throws ErrorException siteind("Missing", 3) - @test isnothing(siteind("Missing")) - end - - @testset "Various ops input types" begin - s = siteinds("S=1/2", 4) - - # Vector{Tuple{String,Int}} input - oa = ops(s, [("Sz", n) for n in 1:length(s)]) - @test length(oa) == length(s) - @test norm(oa[2] - op("Sz", s, 2)) < 1E-8 - - # Vector{Tuple} input - oa = ops(s, Tuple[("Sz", n) for n in 1:length(s)]) - @test length(oa) == length(s) - @test norm(oa[2] - op("Sz", s, 2)) < 1E-8 - end - - @testset "Index Values From Strings" begin - @testset "Val function" begin - s = siteind("Electron") - @test val(s, "0") == 1 - @test val(s, "Up") == 2 - @test val(s, "Dn") == 3 - @test val(s, "UpDn") == 4 - end - - @testset "Strings in ITensor get and set" begin - s = siteind("S=1"; conserve_qns=true) - T = ITensor(s', dag(s)) - T[s' => "Up", s => "Up"] = +1.0 - T[s' => "Z0", s => "Z0"] = +2.0 - T[s' => "Dn", s => "Dn"] = -1.0 - @test T[1, 1] ≈ +1.0 - @test T[2, 2] ≈ +2.0 - @test T[3, 3] ≈ -1.0 - - o = onehot(s => "Z0") - @test vector(o) ≈ [0, 1, 0] - end - end - - @testset "state with variable dimension" begin - ITensors.space(::SiteType"MyQudit"; dim=2) = dim - - function ITensors.state(::StateName{N}, ::SiteType"MyQudit", s::Index) where {N} - n = parse(Int, String(N)) - st = zeros(dim(s)) - st[n + 1] = 1.0 - return itensor(st, s) - end - - s = siteind("MyQudit"; dim=3) - v0 = state(s, "0") - v1 = state(s, "1") - v2 = state(s, "2") - @test v0 == state("0", s) - @test v1 == state("1", s) - @test v2 == state("2", s) - @test dim(v0) == 3 - @test dim(v1) == 3 - @test dim(v2) == 3 - @test v0[s => 1] == 1 - @test v0[s => 2] == 0 - @test v0[s => 3] == 0 - @test v1[s => 1] == 0 - @test v1[s => 2] == 1 - @test v1[s => 3] == 0 - @test v2[s => 1] == 0 - @test v2[s => 2] == 0 - @test v2[s => 3] == 1 - @test_throws BoundsError state(s, "3") - end - - @testset "state with parameters" begin - ITensors.state(::StateName"phase", ::SiteType"Qubit"; θ::Real) = [cos(θ), sin(θ)] - s = siteind("Qubit") - @test state("phase", s; θ=π / 6) ≈ itensor([cos(π / 6), sin(π / 6)], s) - end - - @testset "state with variable dimension (deprecated)" begin - ITensors.space(::SiteType"MyQudit2"; dim=2) = dim - - # XXX: This syntax is deprecated, only testing for - # backwards compatibility. Should return the - # ITensor `itensor(st, s)`. - function ITensors.state(::StateName{N}, ::SiteType"MyQudit2", s::Index) where {N} - n = parse(Int, String(N)) - st = zeros(dim(s)) - st[n + 1] = 1.0 - return st - end - - s = siteind("MyQudit2"; dim=3) - v0 = state(s, "0") - v1 = state(s, "1") - v2 = state(s, "2") - @test v0 == state("0", s) - @test v1 == state("1", s) - @test v2 == state("2", s) - @test dim(v0) == 3 - @test dim(v1) == 3 - @test dim(v2) == 3 - @test v0[s => 1] == 1 - @test v0[s => 2] == 0 - @test v0[s => 3] == 0 - @test v1[s => 1] == 0 - @test v1[s => 2] == 1 - @test v1[s => 3] == 0 - @test v2[s => 1] == 0 - @test v2[s => 2] == 0 - @test v2[s => 3] == 1 - @test_throws BoundsError state(s, "3") - end - - @testset "StateName methods" begin - @test StateName(ITensors.SmallString("a")) == StateName("a") - @test ITensors.name(StateName("a")) == ITensors.SmallString("a") - end - - @testset "Regression test for state overload" begin - ITensors.space(::SiteType"Xev") = 8 - function ITensors.state(::StateName"0", ::SiteType"Xev") - return [1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] - end - s = siteind("Xev") - @test state(s, "0") ≈ ITensor([1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], s) - end - - @testset "function applied to a gate" begin - s = siteinds("Qubit", 2) - - θ = 0.1 - rx = array(op("Rx", s[1]; θ=0.1)) - exp_rx = exp(rx) - gtest = op(x -> exp(x), "Rx", s[1]; θ=0.1) - @test exp_rx ≈ array(op(x -> exp(x), "Rx", s[1]; θ=0.1)) - @test exp_rx ≈ array(op(x -> exp(x), ("Rx", 1, (θ=0.1,)), s)) - - cx = 0.1 * reshape(array(op("CX", s[1], s[2])), (4, 4)) - exp_cx = reshape(exp(cx), (2, 2, 2, 2)) - @test exp_cx ≈ array(op(x -> exp(0.1 * x), "CX", s[1], s[2])) - @test exp_cx ≈ array(op(x -> exp(0.1 * x), ("CX", (1, 2)), s)) - end - - @testset "Haar-random unitary RandomUnitary" begin - s = siteinds(2, 3) - - U = op("RandomUnitary", s, 1, 2) - @test eltype(U) == ComplexF64 - @test order(U) == 4 - @test is_unitary(U; rtol=1e-15) - - U = op("RandomUnitary", s, 1, 2, 3) - @test eltype(U) == ComplexF64 - @test order(U) == 6 - @test is_unitary(U; rtol=1e-15) - - U = op("RandomUnitary", s, 1, 2; eltype=Float64) - @test eltype(U) == Float64 - @test order(U) == 4 - @test is_unitary(U; rtol=1e-15) - - U = op("RandomUnitary", s, 1, 2, 3; eltype=Float64) - @test eltype(U) == Float64 - @test order(U) == 6 - @test is_unitary(U; rtol=1e-15) - end -end diff --git a/test/base/test_smallstring.jl b/test/base/test_smallstring.jl deleted file mode 100644 index a56d332f97..0000000000 --- a/test/base/test_smallstring.jl +++ /dev/null @@ -1,75 +0,0 @@ -using ITensors -using Test - -# TODO: Change to: -# using ITensors.SmallStrings: SmallString, Tag, isint, isnull, IntChar -import ITensors: SmallString, Tag, isint, isnull, IntChar - -@testset "SmallString" begin - @testset "ctors" begin - s = SmallString() - @test isnull(s) - end - - @testset "setindex" begin - s = SmallString() - @test isnull(s) - t = setindex(s, IntChar(1), 1) - @test !isnull(t) - end - - @testset "comparison" begin - u = SmallString("1") - t = SmallString("1") - @test u == t - t = SmallString("2") - @test u < t - end - - @testset "Convert to String" begin - s = SmallString("abc") - @test typeof(s) == SmallString - - sg = String(s) - for n in 1:length(sg) - @test sg[n] == convert(Char, s[n]) - end - - s = SmallString("") - sg = String(s) - @test sg == "" - end - - @testset "isint" begin - i = SmallString("123") - @test isint(i) == true - - s = SmallString("abc") - @test isint(s) == false - - # Test maximum length - s = SmallString("12345678") - @test isint(s) == true - end - - @testset "isless" begin - s1 = SmallString("ab") - s2 = SmallString("xy") - @test isless(s1, s2) == true - @test isless(s2, s1) == false - @test isless(s1, s1) == false - @test isless(s2, s2) == false - end - - @testset "show" begin - t = Tag("") - @test sprint(show, t) == "" - - t = Tag("Red") - @test sprint(show, t) == "Red" - - # Make sure to test maximum length tag - t = Tag("Electron") - @test sprint(show, t) == "Electron" - end -end diff --git a/test/base/test_svd.jl b/test/base/test_svd.jl deleted file mode 100644 index 994fd684c4..0000000000 --- a/test/base/test_svd.jl +++ /dev/null @@ -1,232 +0,0 @@ -using ITensors -using Test -using Suppressor - -include(joinpath(@__DIR__, "utils", "util.jl")) - -@testset "SVD Algorithms" begin - @testset "Matrix With Zero Sing Val" begin - M = [ - 1.0 2.0 5.0 4.0 - 1.0 1.0 1.0 1.0 - 0.0 0.5 0.5 1.0 - 0.0 1.0 1.0 2.0 - ] - U, S, V = NDTensors.svd_recursive(M) - @test norm(U * LinearAlgebra.Diagonal(S) * V' - M) < 1E-13 - end - - @testset "Real Matrix" begin - M = rand(10, 20) - U, S, V = NDTensors.svd_recursive(M) - @test norm(U * LinearAlgebra.Diagonal(S) * V' - M) < 1E-12 - - M = rand(20, 10) - U, S, V = NDTensors.svd_recursive(M) - @test norm(U * LinearAlgebra.Diagonal(S) * V' - M) < 1E-12 - end - - @testset "Cplx Matrix" begin - M = rand(ComplexF64, 10, 15) - U, S, V = NDTensors.svd_recursive(M) - @test norm(U * LinearAlgebra.Diagonal(S) * V' - M) < 1E-13 - - M = rand(ComplexF64, 15, 10) - U, S, V = NDTensors.svd_recursive(M) - @test norm(U * LinearAlgebra.Diagonal(S) * V' - M) < 1E-13 - end - - @testset "Regression Test 1" begin - # Implementation of the SVD was giving low - # accuracy for this case - M = rand(2, 2, 2, 2) - - M[:, :, 1, 1] = [ - 7.713134067177845 -0.16367628720441685 - -1.5253996568409225 1.3577749944302373 - ] - - M[:, :, 2, 1] = [ - 0.0 -2.1219889218225276 - -8.320068013774126 0.43565608213298096 - ] - - M[:, :, 1, 2] = [ - 0.0 -8.662721825820825 - 0.0 -0.46817091771736885 - ] - - M[:, :, 2, 2] = [ - 0.0 0.0 - 0.0 -8.159570989998151 - ] - - t1 = Index(2, "t1") - t2 = Index(2, "t2") - u1 = Index(2, "u1") - u2 = Index(2, "u2") - - T = itensor(M, t1, t2, u1, u2) - - U, S, V = svd(T, (u1, t1)) - @test norm(U * S * V - T) / norm(T) < 1E-10 - end - - @testset "svd with empty left or right indices" for space in - (2, [QN(0, 2) => 1, QN(1, 2) => 1]), - cutoff in (nothing, 1e-15), - _eltype in (Float32, Float64, ComplexF32, ComplexF64) - - i = Index(space) - j = Index(space) - A = random_itensor(_eltype, i, j) - - U, S, V = svd(A, i, j; cutoff) - @test eltype(U) <: _eltype - @test eltype(S) <: real(_eltype) - @test eltype(V) <: _eltype - @test U * S * V ≈ A - @test hassameinds(uniqueinds(U, S), A) - @test isempty(uniqueinds(V, S)) - @test dim(U) == dim(A) - @test dim(S) == 1 - @test dim(V) == 1 - @test order(U) == order(A) + 1 - @test order(S) == 2 - @test order(V) == 1 - - U, S, V = svd(A, (); cutoff) - @test eltype(U) <: _eltype - @test eltype(S) <: real(_eltype) - @test eltype(V) <: _eltype - @test U * S * V ≈ A - @test hassameinds(uniqueinds(V, S), A) - @test isempty(uniqueinds(U, S)) - @test dim(U) == 1 - @test dim(S) == 1 - @test dim(V) == dim(A) - @test order(U) == 1 - @test order(S) == 2 - @test order(V) == order(A) + 1 - - @test_throws ErrorException svd(A) - end - - @testset "factorize with empty left or right indices" for space in ( - 2, [QN(0, 2) => 1, QN(1, 2) => 1] - ), - cutoff in (nothing, 1e-15) - - i = Index(space) - j = Index(space) - A = random_itensor(i, j) - - X, Y = factorize(A, i, j; cutoff) - @test X * Y ≈ A - @test hassameinds(uniqueinds(X, Y), A) - @test isempty(uniqueinds(Y, X)) - @test dim(X) == dim(A) - @test dim(Y) == 1 - @test order(X) == order(A) + 1 - @test order(Y) == 1 - - X, Y = factorize(A, (); cutoff) - @test X * Y ≈ A - @test hassameinds(uniqueinds(Y, X), A) - @test isempty(uniqueinds(X, Y)) - @test dim(X) == 1 - @test dim(Y) == dim(A) - @test order(X) == 1 - @test order(Y) == order(A) + 1 - - @test_throws ErrorException factorize(A) - end - - @testset "svd with empty left and right indices" for cutoff in (nothing, 1e-15) - A = ITensor(3.4) - - U, S, V = svd(A, (); cutoff) - @test U * S * V ≈ A - @test isempty(uniqueinds(U, S)) - @test isempty(uniqueinds(V, S)) - @test dim(U) == 1 - @test dim(S) == 1 - @test dim(V) == 1 - @test order(U) == 1 - @test order(S) == 2 - @test order(V) == 1 - - @test_throws ErrorException svd(A) - end - - @testset "factorize with empty left and right indices" for cutoff in (nothing, 1e-15) - A = ITensor(3.4) - - X, Y = factorize(A, (); cutoff) - @test X * Y ≈ A - @test isempty(uniqueinds(X, Y)) - @test isempty(uniqueinds(Y, X)) - @test dim(X) == 1 - @test dim(Y) == 1 - @test order(X) == 1 - @test order(Y) == 1 - - @test_throws ErrorException factorize(A) - end - - @testset "svd with single precision element type" for eltype in (Float32, ComplexF32), - space in (2, [QN(0) => 1, QN(1) => 1]) - - i = Index(space) - A = random_itensor(eltype, i', dag(i)) - @test Base.eltype(A) === eltype - U, S, V = svd(A, i'; maxdim=1) - @test Base.eltype(U) === eltype - @test Base.eltype(S) === real(eltype) - @test Base.eltype(V) === eltype - end - - @testset "svd arrow directions" begin - l1, l2 = Index(QN("Sz", -1) => 1, QN("Sz", 1) => 1; tags="l1", dir=ITensors.In), - Index(QN("Sz", 2) => 1, QN("Sz", 1) => 1; tags="l2", dir=ITensors.Out) - r1, r2, r3 = Index(QN("Sz", -2) => 1, QN("Sz", 1) => 1; tags="r1", dir=ITensors.Out), - Index(QN("Sz", 2) => 1, QN("Sz", 1) => 1; tags="r2", dir=ITensors.In), - Index(QN("Sz", -2) => 1, QN("Sz", 1) => 1; tags="r3", dir=ITensors.In) - A = random_itensor(l1, l2, r1, r2, r3) - - for leftdir in [ITensors.Out, ITensors.In] - for rightdir in [ITensors.Out, ITensors.In] - U, S, V = svd(A, l1, l2; leftdir, rightdir) - s1, s2 = inds(S) - @test dir(s1) == leftdir - @test dir(s2) == rightdir - @test norm(U * S * V - A) <= 1e-14 - end - end - - for dir in [ITensors.Out, ITensors.In] - L, R, spec = ITensors.factorize_svd(A, l1, l2; dir, ortho="none") - @test dir == ITensors.dir(commonind(L, R)) - @test norm(L * R - A) <= 1e-14 - end - end - - # TODO: remove this test, it takes a long time - ## @testset "Ill-conditioned matrix" begin - ## d = 5000 - ## i = Index(d, "i") - ## T = itensor(make_illconditioned_matrix(dim(i)), i', i) - - ## @suppress begin - ## F = svd(T, i'; alg="divide_and_conquer") - ## end - ## # Depending on the LAPACK implementation, - ## # this sometimes works so don't test it - ## #@test isnothing(F) - - ## # XXX: This fails on Windows, removing for now. - ## # F = svd(T, i'; alg="qr_iteration") - ## # @test !isnothing(F) - ## # @test F.U * F.S * F.V ≈ T - ## end -end diff --git a/test/base/test_symmetrystyle.jl b/test/base/test_symmetrystyle.jl deleted file mode 100644 index 648e642c24..0000000000 --- a/test/base/test_symmetrystyle.jl +++ /dev/null @@ -1,48 +0,0 @@ -using ITensors -using ITensors.NDTensors -using Test - -@testset "SymmetryStyle trait" begin - i = Index(2) - iqn = Index([QN(0) => 1, QN(1) => 2]) - - @test @inferred(ITensors.symmetrystyle(i)) == ITensors.NonQN() - @test @inferred(ITensors.symmetrystyle((i,))) == ITensors.NonQN() - @test @inferred(ITensors.symmetrystyle([i])) == ITensors.NonQN() - @test @inferred(ITensors.symmetrystyle(i', i)) == ITensors.NonQN() - @test @inferred(ITensors.symmetrystyle((i', i))) == ITensors.NonQN() - @test @inferred(ITensors.symmetrystyle([i', i])) == ITensors.NonQN() - @test @inferred(ITensors.symmetrystyle(i'', i', i)) == ITensors.NonQN() - @test @inferred(ITensors.symmetrystyle((i'', i', i))) == ITensors.NonQN() - @test @inferred(ITensors.symmetrystyle([i'', i', i])) == ITensors.NonQN() - @test @inferred(ITensors.symmetrystyle(i''', i'', i', i)) == ITensors.NonQN() - @test @inferred(ITensors.symmetrystyle((i''', i'', i', i))) == ITensors.NonQN() - @test @inferred(ITensors.symmetrystyle([i''', i'', i', i])) == ITensors.NonQN() - - @test @inferred(ITensors.symmetrystyle(iqn)) == ITensors.HasQNs() - @test @inferred(ITensors.symmetrystyle((iqn,))) == ITensors.HasQNs() - @test @inferred(ITensors.symmetrystyle([iqn])) == ITensors.HasQNs() - @test @inferred(ITensors.symmetrystyle(iqn', iqn)) == ITensors.HasQNs() - @test @inferred(ITensors.symmetrystyle((iqn', iqn))) == ITensors.HasQNs() - @test @inferred(ITensors.symmetrystyle([iqn', iqn])) == ITensors.HasQNs() - @test @inferred(ITensors.symmetrystyle(iqn'', iqn', iqn)) == ITensors.HasQNs() - @test @inferred(ITensors.symmetrystyle((iqn'', iqn', iqn))) == ITensors.HasQNs() - @test @inferred(ITensors.symmetrystyle([iqn'', iqn', iqn])) == ITensors.HasQNs() - @test @inferred(ITensors.symmetrystyle(iqn', i)) == ITensors.HasQNs() - @test @inferred(ITensors.symmetrystyle(i', i, iqn)) == ITensors.HasQNs() - @test @inferred(ITensors.symmetrystyle((i', i, iqn))) == ITensors.HasQNs() - @test @inferred(ITensors.SymmetryStyle, ITensors.symmetrystyle([i', i, iqn])) == - ITensors.HasQNs() - - A = random_itensor(i', dag(i)) - Aqn = random_itensor(iqn', dag(iqn)) - - @test @inferred(ITensors.SymmetryStyle, ITensors.symmetrystyle(A)) == ITensors.NonQN() - @test @inferred(ITensors.SymmetryStyle, ITensors.symmetrystyle(Aqn)) == ITensors.HasQNs() - - T = Tensor(A) - Tqn = Tensor(Aqn) - - @test @inferred(ITensors.symmetrystyle(T)) == ITensors.NonQN() - @test @inferred(ITensors.symmetrystyle(Tqn)) == ITensors.HasQNs() -end diff --git a/test/base/test_tagset.jl b/test/base/test_tagset.jl deleted file mode 100644 index 794a64f258..0000000000 --- a/test/base/test_tagset.jl +++ /dev/null @@ -1,145 +0,0 @@ -using ITensors, Test - -@testset "TagSet" begin - ts = TagSet("t3,t2,t1") - ts2 = copy(ts) - @test ts == ts2 - @test hastags(ts, "t1") - @test hastags(ts, "t2") - @test hastags(ts, "t3") - @test hastags(ts, "t3,t1") - @test !hastags(ts, "t4") - @test TagSet(ts) === ts - - @test ITensors.commontags() == ts"" - @test ITensors.commontags(ts"a,b", ts"a,c") == ts"a" - @test ITensors.commontags(Index(2, "a,b,x"), Index(3, "x,a,c"), Index(4, "x,a,z,w")) == - ts"a,x" - - t1 = TagSet("t1") - t2 = TagSet("t2") - t3 = TagSet("t3") - @test ts[1] == t1[1] - @test ts[2] == t2[1] - @test ts[3] == t3[1] - - @testset "Empty TagSet" begin - ts1 = TagSet() - @test length(ts1) == 0 - - ts2 = TagSet("") - @test ts2 == ts1 - @test length(ts2) == 0 - end - - @testset "Ignore Whitespace" begin - ts = TagSet(" aaa , bb bb , ccc ") - @test hastags(ts, " aaa ") - @test hastags(ts, "aaa") - @test hastags(ts, " aa a ") - @test hastags(ts, "bbbb") - end - - @testset "Remove tags" begin - ts1 = TagSet("x,y,z") - ts2 = TagSet("x,z") - @test removetags(ts1, "y") == ts2 - end - - @testset "Unicode tags" begin - ts = TagSet("α") - @test length(ts) == 1 - @test hastags(ts, "α") - @test ts[1] == ITensors.SmallString("α") - - ts = TagSet("α,β") - @test length(ts) == 2 - @test hastags(ts, "β") - @test hastags(ts, "α") - @test ts[1] == ITensors.SmallString("α") - @test ts[2] == ITensors.SmallString("β") - - ts = TagSet("αβγδϵζηθ,ijklmnop,qrstuvwx,ΑΒΓΔΕΖΗΘ") - @test length(ts) == 4 - @test hastags(ts, "αβγδϵζηθ") - @test hastags(ts, "ijklmnop") - @test hastags(ts, "qrstuvwx") - @test hastags(ts, "ΑΒΓΔΕΖΗΘ") - @test ts[1] == ITensors.SmallString("ijklmnop") - @test ts[2] == ITensors.SmallString("qrstuvwx") - @test ts[3] == ITensors.SmallString("ΑΒΓΔΕΖΗΘ") - @test ts[4] == ITensors.SmallString("αβγδϵζηθ") - end - - @testset "Tag long" begin - ts = TagSet("abcdefghijklmnop,ijklmnopqabcdefg") - @test length(ts) == 2 - @test hastags(ts, "abcdefghijklmnop") - @test hastags(ts, "ijklmnopqabcdefg") - end - - @testset "Tag too long" begin - @test !ITensors.using_strict_tags() - @test TagSet("ijklmnopqabcdefgh") == TagSet("ijklmnopqabcdefg") - @test TagSet("abcd,ijklmnopqabcdefgh") == TagSet("abcd,ijklmnopqabcdefg") - @test TagSet("ijklmnopqabcdefgh,abcd") == TagSet("abcd,ijklmnopqabcdefg") - ITensors.set_strict_tags!(true) - @test ITensors.using_strict_tags() - @test_throws ErrorException TagSet("ijklmnopqabcdefgh") - ITensors.set_strict_tags!(false) - end - - @testset "Too many tags" begin - @test !ITensors.using_strict_tags() - @test TagSet("a,b,c,d,e,f") == TagSet("a,b,c,d") - @test addtags(TagSet("a,b,c,d"), "e") == TagSet("a,b,c,d") - @test replacetags(TagSet("a,b,c,d"), "d", "e,f") == TagSet("a,b,c,e") - ITensors.set_strict_tags!(true) - @test ITensors.using_strict_tags() - @test_throws ErrorException TagSet("a,b,c,d,e,f") - @test_throws ErrorException addtags(TagSet("a,b,c,d"), "e") - @test_throws ErrorException replacetags(TagSet("a,b,c,d"), "d", "e,f") - ITensors.set_strict_tags!(false) - end - - @testset "Integer Tags" begin - ts = TagSet("123") - @test length(ts) == 1 - @test hastags(ts, "123") - end - - @testset "Show TagSet" begin - ts = TagSet("Site,n=2") - @test length(sprint(show, ts)) > 1 - end - - @testset "Iterate Tagset" begin - ts = TagSet("Site, n=2") - @test [tag for tag in ts] == [ts[1], ts[2]] - end - - @testset "addtags" begin - ts = TagSet("Blue") - @test hastags(ts, "Blue") - - ts = addtags(ts, "Red") - @test hastags(ts, "Blue") - @test hastags(ts, "Red") - - ts = addtags(ts, "Green") - @test hastags(ts, "Blue") - @test hastags(ts, "Red") - @test hastags(ts, "Green") - - ts = addtags(ts, "Yellow") - @test hastags(ts, "Blue") - @test hastags(ts, "Red") - @test hastags(ts, "Green") - @test hastags(ts, "Yellow") - - @test addtags(ts, "Orange") == ts - ITensors.set_strict_tags!(true) - @test_throws ErrorException addtags(ts, "Orange") - ITensors.set_strict_tags!(false) - end -end diff --git a/test/base/test_trg.jl b/test/base/test_trg.jl deleted file mode 100644 index 84994b0fb3..0000000000 --- a/test/base/test_trg.jl +++ /dev/null @@ -1,24 +0,0 @@ -using ITensors -using Test -using Random - -Random.seed!(12345) - -include(joinpath(pkgdir(ITensors), "examples", "src", "trg.jl")) -include(joinpath(pkgdir(ITensors), "examples", "src", "2d_classical_ising.jl")) - -@testset "trg" begin - # Make Ising model partition function - β = 1.1 * βc - d = 2 - s = Index(d) - l = addtags(s, "left") - u = addtags(s, "up") - T = ising_mpo(l, u, β) - - χmax = 20 - nsteps = 20 - κ, T = trg(T; χmax=χmax, nsteps=nsteps) - - @test κ ≈ exp(-β * ising_free_energy(β)) atol = 1e-4 -end diff --git a/test/base/utils/TestITensorsExportedNames/TestITensorsExportedNames.jl b/test/base/utils/TestITensorsExportedNames/TestITensorsExportedNames.jl deleted file mode 100644 index 65ac69c9f3..0000000000 --- a/test/base/utils/TestITensorsExportedNames/TestITensorsExportedNames.jl +++ /dev/null @@ -1,203 +0,0 @@ -module TestITensorsExportedNames -#= -# List constructed with (along with cleaning up -# macro symbols): -using DelimitedFiles: writedlm -using ITensors: ITensors -open("itensors_exported_names.jl", "w") do io - writedlm(io, repr.(names(ITensors)) .* ",") -end -=# -const ITENSORS_EXPORTED_NAMES = [ - Symbol("@disable_warn_order"), - Symbol("@reset_warn_order"), - Symbol("@set_warn_order"), - Symbol("@ts_str"), - :Apply, - :Block, - :ITensor, - :ITensors, - :Index, - :IndexSet, - :IndexVal, - :LinearAlgebra, - :NDTensors, - :Order, - :QN, - :Spectrum, - :TagSet, - :addblock!, - :addtags, - :addtags!, - :allhastags, - :anyhastags, - :apply, - :argsdict, - :array, - :axpy!, - :blockdim, - :blockoffsets, - :checkflux, - :combinedind, - :combiner, - :commonind, - :commonindex, - :commoninds, - :complex!, - :contract, - :convert_eltype, - :convert_leaf_eltype, - :dag, - :delta, - :dense, - :denseblocks, - :diag, - :diagITensor, - :diag_itensor, - :diagitensor, - :dim, - :dims, - :dir, - :directsum, - :disable_tblis!, - :disable_warn_order!, - :dot, - :eachindval, - :eachnzblock, - :eachval, - :eigen, - :eigs, - :emptyITensor, - :enable_tblis!, - :entropy, - :factorize, - :filterinds, - :findindex, - :findinds, - :firstind, - :firstintersect, - :firstsetdiff, - :flux, - :fparity, - :getfirst, - :getindex, - :hadamard_product, - :hascommoninds, - :hasid, - :hasind, - :hasinds, - :hasplev, - :hasqns, - :hassameinds, - :hastags, - :id, - :ind, - :index_id_rng, - :inds, - :inner, - :insertblock!, - :isactive, - :isfermionic, - :ishermitian, - :isindequal, - :itensor, - :linkindex, - :lq, - :mapprime, - :mapprime!, - :matmul, - :matrix, - :maxdim, - :mindim, - :modulus, - :mul!, - :nblocks, - :nnz, - :nnzblocks, - :noncommonind, - :noncommoninds, - :noprime, - :noprime!, - :norm, - :normalize, - :normalize!, - :not, - :nullspace, - :nzblock, - :nzblocks, - :onehot, - :order, - :permute, - :plev, - :polar, - :pop, - :popfirst, - :prime, - :prime!, - :product, - :push, - :pushfirst, - :ql, - :qn, - :qr, - :randn!, - :randomITensor, - :random_itensor, - :readcpp, - :removeqn, - :removeqns, - :removetags, - :removetags!, - :replaceind, - :replaceind!, - :replaceindex!, - :replaceinds, - :replaceinds!, - :replaceprime, - :replacetags, - :replacetags!, - :reset_warn_order!, - :rmul!, - :rq, - :scalar, - :scale!, - :set_warn_order!, - :setdir, - :setelt, - :setindex, - :setprime, - :setprime!, - :setspace, - :settags, - :settags!, - :sim, - :siteindex, - :space, - :splitblocks, - :storage, - :store, - :svd, - :swapind, - :swapinds, - :swapinds!, - :swapprime, - :swapprime!, - :swaptags, - :swaptags!, - :tags, - :tr, - :transpose, - :truncerror, - :unionind, - :unioninds, - :uniqueind, - :uniqueindex, - :uniqueinds, - :use_debug_checks, - :val, - :vector, - :δ, - :⊕, - :⊙, -] -end diff --git a/test/base/utils/testfilev0.1.41.h5 b/test/base/utils/testfilev0.1.41.h5 deleted file mode 100644 index 9f55334f92e53522bcfcda2898d63cc8b3e4d9d2..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 16680 zcmeHOeP|nH7{7cpO+PHrIaepz$eb<8YTCxqO))83OQKAID>x_Gr0dbuwrOdynz;=! z#~?!)`$Mn_LYW|C3_7Ps{;*v1uNH@{#c3Tls-Ig6OY(;OTX^-P1;nD2#z4rB!6pq?dk zq(z!n3VI8)KbglAdqLmyQbPkxV*4l|TNCRX0aru4heCk%Vg@`$z|IgJv_r8lDpw$N zT!F0p)|ptNVQ5>k-xYwo4yr2IZv^}xLZG(&>XV)qJt|7pVoYtg5tESq)f*O7!EQ0;Y9AW-(VGrVLyjX_>SgJk-N;8wjc76FtrjS|*xe z&IaYpBu&cq@x6ieZa-%sE1WHIXR;br7hAH(uFDvVCoHzgaoV~BAT%M1fYQ3Of*oNc zG(EL=WvS~D*PF$`MpOp_L%pYnDR6ws>yR!goQ>Px%7^yw!Ii8-6Kf5c)WM>~9t^}& z(6n?p(~(l|K=pdPUMy+`KV{Gjz)aZUr)BAJpQPQtCiFgzJIyk(?2Pv#v4suHnKr(3 ze~ab456G8Cr+e^w|DoM;Z@!ZDxWlz$`$uZJ$2VU5;wn}^{TrCr_-@DX_?G)G7*PST z1FvgKjz!OWeNCu;zwY4HZIi;zwubNCxs?z~P5Y|j)ziWco^QwE%|pV;mpg|}l>aWg zcIwt}%Z|9?`Ih5vAHMRZK3aPCLPNK5<* z|IxT3@ABE(?)Ux`+;9CoJU;!i;2qj`rZI8V(Kch8y^%L6Y`Q*df2iy?0q3Df4FL@S z4FL@S4S}3TKnKrH>HB8`@VpR)8ESD(&fn@)oq$rDV+K3I&c%mua)EIK>X$3=`8Hg| zT5zT{WP;~@vfq&3PNR|Ls(}rXGFaaR3%~_=%JB`|P{CP<@r_tx`TGoujBiLUxLrXz zRE}>3b2`3RZrmDqP`nMW+e7crB5@_4E)Li8IgKmY`N-x}oKPQqI90Dce$h37-z6)< z$;$+>GK-iZ{>9MR-WvroB=(mtdWm#iod%r4P+#L!074V82q^KY7wiZtAtR~9tEqO~ z6+%0f=#4>NTi_=66Z9jYGB%>jJW5}1;i#K@ZQ?HBYVH%V{XqJrfcC1)pE2-LI5sw) zh5Lj3EF(+LOXbn@`+DFJHZk>i;H#YG0SQj}_EzgAy!{frv8R*oi@`dWe~v67&l$u9 z13kX|d~@=x72QrnOLO~!BrX5m$@UoQDGVKIya7OHLKXof-sp$-23QGgPc7caFK4skEHkKO5Xz^?770r*TC)-&>K26Y5=0 zrs~zl@253^pSP0X>i5?+;$$3lQ}80u@h3P5N=76Bz*O+1lWTy0*JJYSN1 zhA#8t(49mt#$yriC+J7a+gQQOJesZX*e~Fx5K%Ut^%jra1V5!d5B!?bJRq@u5|5pS zD2bEufEbT?U>&FxkHz49Lm1Yf#v1^HCS(y%;*AQhBdmm8POW%M5^tG0d4F5*fRp`; z0Ub*6;r&=m3?8Y(638Q-8df1Qk7Rp1_D4?pyXD4XgLZcRRxci#&1qcG&ZnPF#R>Hj zb*XywdHD94z%TI(!^z8pl!xj2u?a9k#dyrTomC7&eT`QE2u;W$pv0?TSg%+~eO|SQ zO|$pDUHw3hQhOKUvC>*L0`w#OJJ@w*9?jNxOkd1qfQYiI^%jpsz)z{q0|vm?%z%aS zLVQoBtN~enEXDT3;tV)K!+M{Ao7}&LqLf)qR*w4MlYvc zJeK4QBl#XN5T$OO9FjOJFA(hR aEcAn+WZwUgSZX{b^@FXxpzjj+t^WY-lL>7A diff --git a/test/base/utils/util.jl b/test/base/utils/util.jl deleted file mode 100644 index 6839f2201f..0000000000 --- a/test/base/utils/util.jl +++ /dev/null @@ -1,40 +0,0 @@ -using ITensors -using Random - -# Based on https://discourse.julialang.org/t/lapackexception-1-while-svd-but-not-svdvals/23787 -function make_illconditioned_matrix(T=5000) - t = 0:(T - 1) - f = LinRange(0, 0.5 - 1 / length(t) / 2, length(t) ÷ 2) - y = sin.(t) - function check_freq(f) - zerofreq = findfirst(iszero, f) - zerofreq !== nothing && - zerofreq != 1 && - throw(ArgumentError("If zero frequency is included it must be the first frequency")) - return zerofreq - end - function get_fourier_regressor(t, f) - zerofreq = check_freq(f) - N = length(t) - Nf = length(f) - Nreg = zerofreq === nothing ? 2Nf : 2Nf - 1 - N >= Nreg || throw(ArgumentError("Too many frequency components $Nreg > $N")) - A = zeros(N, Nreg) - sinoffset = Nf - for fn in 1:Nf - if fn == zerofreq - sinoffset = Nf - 1 - end - for n in 1:N - phi = 2π * f[fn] * t[n] - A[n, fn] = cos(phi) - if fn != zerofreq - A[n, fn + sinoffset] = -sin(phi) - end - end - end - return A, zerofreq - end - A, z = get_fourier_regressor(t, f) - return [A y] -end diff --git a/test/basics/test_basics.jl b/test/basics/test_basics.jl new file mode 100644 index 0000000000..22e349fb73 --- /dev/null +++ b/test/basics/test_basics.jl @@ -0,0 +1,6 @@ +using ITensors: ITensors +using Test: @test, @testset + +@testset "ITensors" begin + # Tests go here. +end diff --git a/test/ext/ITensorsChainRulesCoreExt/Project.toml b/test/ext/ITensorsChainRulesCoreExt/Project.toml deleted file mode 100644 index 74443f9f26..0000000000 --- a/test/ext/ITensorsChainRulesCoreExt/Project.toml +++ /dev/null @@ -1,6 +0,0 @@ -[deps] -ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" -ChainRulesTestUtils = "cdddcdb0-9152-4a09-a978-84456f9df70a" -FiniteDifferences = "26cc04aa-876d-5657-8c51-4c34ba976000" -ITensors = "9136182c-28ba-11e9-034c-db9fb085ebd5" -Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" diff --git a/test/ext/ITensorsChainRulesCoreExt/runtests.jl b/test/ext/ITensorsChainRulesCoreExt/runtests.jl deleted file mode 100644 index c2fe5a8b6f..0000000000 --- a/test/ext/ITensorsChainRulesCoreExt/runtests.jl +++ /dev/null @@ -1,16 +0,0 @@ -using ITensors -using Test - -ITensors.Strided.disable_threads() -ITensors.BLAS.set_num_threads(1) -ITensors.disable_threaded_blocksparse() - -@testset "$(@__DIR__)" begin - filenames = filter(readdir(@__DIR__)) do f - startswith("test_")(f) && endswith(".jl")(f) - end - @testset "Test $(@__DIR__)/$filename" for filename in filenames - println("Running $(@__DIR__)/$filename") - @time include(filename) - end -end diff --git a/test/ext/ITensorsChainRulesCoreExt/test_chainrules.jl b/test/ext/ITensorsChainRulesCoreExt/test_chainrules.jl deleted file mode 100644 index d20ab0474f..0000000000 --- a/test/ext/ITensorsChainRulesCoreExt/test_chainrules.jl +++ /dev/null @@ -1,522 +0,0 @@ -using ITensors -using ITensors.SiteTypes: siteinds -using Random -using Test - -using ChainRulesCore: rrule_via_ad - -include("utils/chainrulestestutils.jl") - -using Zygote: ZygoteRuleConfig, gradient - -Random.seed!(1234) - -@testset "ChainRules rrules: basic ITensor operations" begin - i = Index(2, "i") - j = Index(2, "j") - A = random_itensor(i', dag(i)) - V = random_itensor(i) - Ac = random_itensor(ComplexF64, i', dag(i)) - B = random_itensor(i', dag(i)) - C = ITensor(3.4) - D = random_itensor(i', j) - - @testset "getindex, priming, tagging, ITensor constructors, dag, etc." begin - test_rrule(getindex, ITensor(3.4); check_inferred=false) - test_rrule(getindex, A, 1, 2; check_inferred=false) - test_rrule(contract, A', A; check_inferred=false) - test_rrule(*, 3.2, A; check_inferred=false) - test_rrule(*, A, 4.3; check_inferred=false) - test_rrule(+, A, B; check_inferred=false) - test_rrule(prime, A; check_inferred=false) - test_rrule(prime, A, 2; check_inferred=false) - test_rrule(prime, A; fkwargs=(; tags="i"), check_inferred=false) - test_rrule(prime, A; fkwargs=(; tags="x"), check_inferred=false) - test_rrule(setprime, D, 2; check_inferred=false) - test_rrule(noprime, D; check_inferred=false) - test_rrule(replaceprime, A, 1 => 2; check_inferred=false) - test_rrule(replaceprime, A, 1, 2; check_inferred=false) - test_rrule(swapprime, A, 0 => 1; check_inferred=false) - test_rrule(swapprime, A, 0, 1; check_inferred=false) - test_rrule(addtags, A, "x"; check_inferred=false) - test_rrule(addtags, A, "x"; fkwargs=(; plev=1), check_inferred=false) - test_rrule(removetags, A, "i"; check_inferred=false) - test_rrule(replacetags, A, "i" => "j"; check_inferred=false) - test_rrule(replacetags, A, "i", "j"; check_inferred=false) - test_rrule(settags, A, "x"; check_inferred=false) - test_rrule(settags, A, "x"; fkwargs=(; plev=1), check_inferred=false) - test_rrule( - swaptags, - random_itensor(Index(2, "i"), Index(2, "j")), - "i" => "j"; - check_inferred=false, - ) - test_rrule( - swaptags, random_itensor(Index(2, "i"), Index(2, "j")), "i", "j"; check_inferred=false - ) - test_rrule(replaceind, A, i' => sim(i); check_inferred=false) - test_rrule(replaceind, A, i', sim(i); check_inferred=false) - test_rrule(replaceinds, A, (i, i') => (sim(i), sim(i)); check_inferred=false) - test_rrule(replaceinds, A, (i, i'), (sim(i), sim(i)); check_inferred=false) - test_rrule(swapind, A, i', i; check_inferred=false) - test_rrule(swapinds, A, (i',), (i,); check_inferred=false) - test_rrule(itensor, randn(2, 2), i', i; check_inferred=false) - test_rrule(itensor, randn(2, 2), [i', i]; check_inferred=false) - test_rrule(itensor, randn(4), i', i; check_inferred=false) - test_rrule(ITensor, randn(2, 2), i', i; check_inferred=false) - test_rrule(ITensor, randn(2, 2), [i', i]; check_inferred=false) - test_rrule(ITensor, randn(4), i', i; check_inferred=false) - test_rrule(ITensor, 2.3; check_inferred=false) - test_rrule(dag, A; check_inferred=false) - test_rrule(permute, A, reverse(inds(A)); check_inferred=false) - end - - @testset "apply, contract" begin - test_rrule(ZygoteRuleConfig(), apply, A, V; rrule_f=rrule_via_ad, check_inferred=false) - f = function (A, B) - AT = ITensor(A, i, j) - BT = ITensor(B, j, i) - return (BT * AT)[1] - end - args = (rand(2, 2), rand(2, 2)) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - args = (rand(4), rand(4)) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - args = (rand(4), rand(2, 2)) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - end - - @testset "contraction sequence" begin - a, b, k, l, m, n, u, v = Index.([2, 3, 2, 3, 2, 3, 2, 3]) - args = ( - random_itensor(a, b, k), - random_itensor(a, l, m), - random_itensor(b, u, n), - random_itensor(u, v), - random_itensor(k, v), - random_itensor(l, m, n), - ) - f = (args...) -> contract([args...])[] # Left associative - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - seq = ITensors.optimal_contraction_sequence([args...]) - f = (args...) -> contract([args...]; sequence=seq)[] # sequence - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - end - - @testset "construction and contraction" begin - f = function (x) - b = itensor([0, 0, 1, 1], i, j) - k = itensor([0, 1, 0, 0], i, j) - T = itensor([0 x x^2 1; 0 0 sin(x) 0; 0 cos(x) 0 exp(x); x 0 0 0], i', j', i, j) - return x * real((b' * T * k)[]) - end - args = (0.3,) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - - #f = function (x) - # b = itensor([0, 0, 1, 1], i, j) - # k = itensor([0, 1, 0, 0], i, j) - # T = itensor([0 x x^2 1; 0 0 sin(x) 0; 0 cos(x) 0 exp(x); x 0 0 0], i, j, i', j') - # return x * real((b' * T * k)[]) - #end - #args = (0.3,) - #test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - end - - @testset "scalar operations" begin - f = x -> sin(scalar(x)^3) - args = (C,) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - - f = x -> sin(x[]^3) - args = (C,) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - end - - @testset "adjoint" begin - f = adjoint - args = (A,) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - end - - @testset "contraction, priming, tagging + getindex" begin - f = (x, y) -> (x * y)[1, 1] - args = (A', A) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - - f = x -> prime(x, 2)[1, 1] - args = (A,) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - - f = x -> x'[1, 1] - args = (A,) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - - f = x -> addtags(x, "x")[1, 1] - args = (A,) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - - f = x -> (x' * x)[1, 1] - args = (A,) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - - f = x -> (prime(x) * x)[1, 1] - args = (A,) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - - f = x -> ((x'' * x') * x)[1, 1] - args = (A,) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - - f = x -> (x'' * (x' * x))[1, 1] - args = (A,) - - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - f = (x, y, z) -> (x * y * z)[1, 1] - args = (A'', A', A) - - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - f = x -> (x'' * x' * x)[1, 1] - args = (A,) - - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - f = x -> (x''' * x'' * x' * x)[1, 1] - args = (A,) - - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - f = x -> (x''' * x'' * x' * x)[1, 1] - args = (A,) - - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - f = (x, y) -> (x + y)[1, 1] - args = (A, B) - - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - f = x -> (x + x)[1, 1] - args = (A,) - - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - f = x -> (2x)[1, 1] - args = (A,) - - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - f = x -> (x + 2x)[1, 1] - args = (A,) - - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - - f = x -> (x + 2 * mapprime(x' * x, 2 => 1))[1, 1] - args = (A,) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - - f = (x, y) -> (x * y)[] - args = (A, δ(dag(inds(A)))) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - - f = x -> (x * x)[] - args = (A,) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - - f = x -> (x * δ(dag(inds(x))))[] - args = (A,) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - end - - @testset "delta contractions" begin - f = function (x) - y = x' * x - tr = δ(dag(inds(y))) - return (y * tr)[] - end - args = (A,) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - - f = function (x) - y = x'' * x' * x - tr = δ(dag(inds(y))) - return (y * tr)[] - end - args = (A,) - - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - f = x -> (x^2 * δ((i', i)))[1, 1] - args = (6.2,) - - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - f = x -> (x^2 * δ(i', i))[1, 1] - args = (5.2,) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - end - - @testset "ITensor constructors" begin - f = x -> itensor([x^2 x; x^3 x^4], i', i) - args = (2.54,) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - - f = x -> ITensor([x^2 x; x^3 x^4], i', i) - args = (2.1,) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - - f = x -> ITensor(x) - args = (2.12,) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - end - - @testset "ITensor constructor and contraction" begin - f = function (x) - T = itensor([x^2 sin(x); x^2 exp(-2x)], j', dag(j)) - return real((dag(T) * T)[]) - end - args = (2.8,) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - - args = (2.8 + 3.1im,) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - - f = function (x) - v = itensor([exp(-3.2x), cos(2x^2)], j) - T = itensor([x^2 sin(x); x^2 exp(-2x)], j', dag(j)) - return real((dag(v') * T * v)[]) - end - args = (2.8,) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - #args = (2.8 + 3.1im,) - #test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - f = function (x) - return real((x^3 * ITensor([sin(x) exp(-2x); 3x^3 x+x^2], j', dag(j)))[1, 1]) - end - args = (3.4 + 2.3im,) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - end - - @testset "priming" begin - f = x -> prime(permute(x, reverse(inds(x))))[1, 1] - args = (A,) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - - f = x -> prime(x; plev=1)[1, 1] - args = (A,) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - end - - @testset "ITensor inner" begin - W = itensor([1 1] / √2, i) - f = x -> inner(W', exp(x), W) - args = (A,) - test_rrule( - ZygoteRuleConfig(), - f, - args...; - rrule_f=rrule_via_ad, - check_inferred=false, - rtol=1e-3, - atol=1e-3, - ) - - f = x -> inner(V', exp(x), V) - args = (A,) - test_rrule( - ZygoteRuleConfig(), - f, - args...; - rrule_f=rrule_via_ad, - check_inferred=false, - rtol=1e-4, - atol=1e-4, - ) - end - - @testset "issue 933" begin - # https://github.com/ITensor/ITensors.jl/issues/933 - f2 = function (x, a) - y = a + im * x - return real(dag(y) * y)[] - end - a = random_itensor() - f_itensor = x -> f2(x, a) - f_number = x -> f2(x, a[]) - x = random_itensor() - @test f_number(x[]) ≈ f_itensor(x) - @test f_number'(x[]) ≈ f_itensor'(x)[] - @test isreal(f_itensor'(x)) - end - - @testset "issue 969" begin - i = Index(2) - j = Index(3) - A = random_itensor(i) - B = random_itensor(j) - f = function (x, y) - d = δ(ind(x, 1), ind(y, 1)) - return (x * d * y)[] - end - args = (A, B) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - end - - @testset "issue 1294" begin - for i in ( - Index([QN() => 2]), - Index([QN(0) => 1, QN(1) => 1]), - Index([QN("SzParity", 1, 2) => 1, QN("SzParity", 0, 2) => 1]), - ) - A = random_itensor(i', dag(i)) - B = random_itensor(i', dag(i)) - - f(A, B) = dot(A, B) - grad = gradient(f, A, B) - @test grad[1] ≈ B - @test grad[2] ≈ dag(A) - end - end -end - -@testset "ChainRules rrules: op" begin - s = siteinds("Qubit", 4) - - # RX - args = (0.2,) - for σ in [1, 2], σ′ in [1, 2] - f = x -> op("Rx", s, 1; θ=x)[σ, σ′] - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - end - # RY - args = (0.2,) - for σ in [1, 2], σ′ in [1, 2] - f = x -> op("Ry", s, 1; θ=x)[σ, σ′] - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - end - # RZ - args = (0.2,) - for σ in [1, 2], σ′ in [1, 2] - f = x -> op("Rz", s, 1; ϕ=x)[σ, σ′] - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - end - # Rn - args = (0.2, 0.3, 0.4) - for σ in [1, 2], σ′ in [1, 2] - f = x -> op("Rn", s, 1; θ=x[1], ϕ=x[2], λ=x[3])[σ, σ′] - test_rrule(ZygoteRuleConfig(), f, args; rrule_f=rrule_via_ad, check_inferred=false) - end - - basis = vec(collect(Iterators.product(fill([1, 2], 2)...))) - # CRx - args = (0.2,) - for σ in basis, σ′ in basis - f = x -> op("CRx", s, (1, 2); θ=x)[σ..., σ′...] - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - end - - # CRy - args = (0.2,) - for σ in basis, σ′ in basis - f = x -> op("CRy", s, (1, 2); θ=x)[σ..., σ′...] - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - end - - # CRz - args = (0.2,) - for σ in basis, σ′ in basis - f = x -> op("CRz", s, (1, 2); ϕ=x)[σ..., σ′...] - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - end - # Rn - args = (0.2, 0.3, 0.4) - for σ in basis, σ′ in basis - f = x -> op("CRn", s, (1, 2); θ=x[1], ϕ=x[2], λ=x[3])[σ..., σ′...] - test_rrule(ZygoteRuleConfig(), f, args; rrule_f=rrule_via_ad, check_inferred=false) - end - - # Rxx - args = (0.2,) - for σ in basis, σ′ in basis - f = x -> op("Rxx", s, (1, 2); ϕ=x)[σ..., σ′...] - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - end - # Ryy - args = (0.2,) - for σ in basis, σ′ in basis - f = x -> op("Ryy", s, (1, 2); ϕ=x)[σ..., σ′...] - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - end - # Rzz - args = (0.2,) - for σ in basis, σ′ in basis - f = x -> op("Rzz", s, (1, 2); ϕ=x)[σ..., σ′...] - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - end - - # algebra with non-parametric gates - args = (0.2,) - # addition - for σ in [1, 2], σ′ in [1, 2] - f = x -> x * op("H + Y", s[1])[σ, σ′] - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - end - #subtraction - for σ in [1, 2], σ′ in [1, 2] - f = x -> x * op("H - Y", s[1])[σ, σ′] - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - end - # product - for σ in [1, 2], σ′ in [1, 2] - f = x -> x * op("H * Y", s[1])[σ, σ′] - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - end - # composite - for σ in [1, 2], σ′ in [1, 2] - f = x -> x * op("H + X * Y", s[1])[σ, σ′] - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - end - - ## algebra with parametric gates - #args = (0.2,) - ## addition - #for σ in [1, 2], σ′ in [1, 2] - # f = x -> x * op("H + Rx", s[1]; θ = x)[σ, σ′] - # test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - #end - ##subtraction - #for σ in [1, 2], σ′ in [1, 2] - # f = x -> x * op("H - Rx", s[1]; θ = x)[σ, σ′] - # test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - #end - ### product - #for σ in [1, 2], σ′ in [1, 2] - # f = x -> x * op("Rx * Y", s[1]; θ = x)[σ, σ′] - # test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - #end - ## composite - #for σ in [1, 2], σ′ in [1, 2] - # f = x -> x * op("Rx * Y - Ry", s[1]; θ = x)[σ, σ′] - # test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - #end - # - ## two-qubit composite algebra with parametric gate - #args = (0.2,) - #for σ in basis, σ′ in basis - # f = x -> op("Rxx + CX * CZ - Ryy", s, (1, 2); ϕ = x)[σ..., σ′...] - # test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - #end - - # functions - f = x -> exp(ITensor(Op("Ry", 1; θ=x), q))[1, 1] - - # RX - args = (0.2,) - for σ in [1, 2], σ′ in [1, 2] - f = x -> exp(ITensor(Op("Rx", 1; θ=x), s))[σ, σ′] - test_rrule( - ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false, atol=1e-6 - ) - end - - # RY - args = (0.2,) - for σ in [1, 2], σ′ in [1, 2] - f = x -> exp(ITensor(Op("Ry", 1; θ=x), s))[σ, σ′] - test_rrule( - ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false, atol=1e-6 - ) - end -end diff --git a/test/ext/ITensorsChainRulesCoreExt/test_chainrules_ops.jl b/test/ext/ITensorsChainRulesCoreExt/test_chainrules_ops.jl deleted file mode 100644 index d21460f932..0000000000 --- a/test/ext/ITensorsChainRulesCoreExt/test_chainrules_ops.jl +++ /dev/null @@ -1,283 +0,0 @@ -using ITensors -using ITensors.SiteTypes: siteinds -using Test - -using ChainRulesCore: rrule_via_ad - -include("utils/chainrulestestutils.jl") - -using Zygote: ZygoteRuleConfig, gradient - -@testset "ChainRules rrules: Ops" begin - s = siteinds("S=1/2", 4) - - x = 2.4 - V = random_itensor(s[1], s[2]) - - f = function (x) - y = ITensor(Op("Ry", 1; θ=x), s) - return y[1, 1] - end - args = (x,) - test_rrule( - ZygoteRuleConfig(), - f, - args...; - rrule_f=rrule_via_ad, - check_inferred=false, - rtol=1.0e-7, - atol=1.0e-7, - ) - - f = function (x) - y = exp(ITensor(Op("Ry", 1; θ=x), s)) - return y[1, 1] - end - args = (x,) - test_rrule( - ZygoteRuleConfig(), - f, - args...; - rrule_f=rrule_via_ad, - check_inferred=false, - rtol=1.0e-7, - atol=1.0e-7, - ) - - function sometimes_broken_test() - f = function (x) - y = Op("Ry", 1; θ=x) + Op("Ry", 1; θ=x) - return y[1].params.θ - end - args = (x,) - test_rrule( - ZygoteRuleConfig(), - f, - args...; - rrule_f=rrule_via_ad, - check_inferred=false, - rtol=1.0e-7, - atol=1.0e-7, - ) - return nothing - end - - @static if VERSION > v"1.8" - @test_skip sometimes_broken_test() - else - sometimes_broken_test() - end - - f = function (x) - y = ITensor(Op("Ry", 1; θ=x) + Op("Ry", 1; θ=x), s) - return y[1, 1] - end - args = (x,) - test_rrule( - ZygoteRuleConfig(), - f, - args...; - rrule_f=rrule_via_ad, - check_inferred=false, - rtol=1.0e-7, - atol=1.0e-7, - ) - - f = function (x) - y = exp(ITensor(Op("Ry", 1; θ=x) + Op("Ry", 1; θ=x), s)) - return y[1, 1] - end - args = (x,) - test_rrule( - ZygoteRuleConfig(), - f, - args...; - rrule_f=rrule_via_ad, - check_inferred=false, - rtol=1.0e-7, - atol=1.0e-7, - ) - - f = function (x) - y = ITensor(exp(Op("Ry", 1; θ=x) + Op("Ry", 1; θ=x)), s) - return y[1, 1] - end - args = (x,) - test_rrule( - ZygoteRuleConfig(), - f, - args...; - rrule_f=rrule_via_ad, - check_inferred=false, - rtol=1.0e-7, - atol=1.0e-7, - ) - - f = function (x) - y = ITensor(2 * Op("Ry", 1; θ=x), s) - return y[1, 1] - end - args = (x,) - test_rrule( - ZygoteRuleConfig(), - f, - args...; - rrule_f=rrule_via_ad, - check_inferred=false, - rtol=1.0e-7, - atol=1.0e-7, - ) - - f = function (x) - y = ITensor(2 * (Op("Ry", 1; θ=x) + Op("Ry", 1; θ=x)), s) - return y[1, 1] - end - args = (x,) - test_rrule( - ZygoteRuleConfig(), - f, - args...; - rrule_f=rrule_via_ad, - check_inferred=false, - rtol=1.0e-7, - atol=1.0e-7, - ) - - f = function (x) - y = ITensor(Op("Ry", 1; θ=x) * Op("Ry", 2; θ=x), s) - return y[1, 1] - end - args = (x,) - test_rrule( - ZygoteRuleConfig(), - f, - args...; - rrule_f=rrule_via_ad, - check_inferred=false, - rtol=1.0e-7, - atol=1.0e-7, - ) - - if VERSION ≥ v"1.8" - f = function (x) - y = ITensor(exp(-x * Op("X", 1) * Op("X", 2)), s) - return norm(y) - end - args = (x,) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - - f = function (x) - y = exp(-x * Op("X", 1) * Op("X", 2)) - y *= exp(-x * Op("X", 1) * Op("X", 2)) - U = ITensor(y, s) - return norm(U) - end - args = (x,) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - end - - U1(θ) = Op("Ry", 1; θ) - U2(θ) = Op("Ry", 2; θ) - - f = function (x) - return ITensor(U1(x), s)[1, 1] - end - args = (x,) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - - f = function (x) - return ITensor(U1(x) * U2(x), s)[1, 1] - end - args = (x,) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - - f = function (x) - return ITensor(1.2 * U1(x), s)[1, 1] - end - args = (x,) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - - f = function (x) - return ITensor(exp(1.2 * U1(x)), s)[1, 1] - end - args = (x,) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - - f = function (x) - return ITensor(exp(x * U1(1.2)), s)[1, 1] - end - args = (x,) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - - function H(x1, x2) - os = Ops.OpSum() - os += x1 * Op("X", 1) - os += x2 * Op("X", 2) - return os - end - - # These are broken in versions of Zygote after 0.6.43, - # See: https://github.com/FluxML/Zygote.jl/issues/1304 - @test_skip begin - f = function (x) - return ITensor(exp(1.5 * H(x, x); alg=Trotter{1}(1)), s)[1, 1] - end - args = (x,) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - - f = function (x) - return ITensor(exp(1.5 * H(x, x); alg=Trotter{2}(1)), s)[1, 1] - end - args = (x,) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - - f = function (x) - return ITensor(exp(1.5 * H(x, x); alg=Trotter{2}(2)), s)[1, 1] - end - args = (x,) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - - f = function (x) - return ITensor(exp(x * H(x, x); alg=Trotter{2}(2)), s)[1, 1] - end - args = (x,) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - end - - f = function (x) - y = -x * (Op("X", 1) * Op("X", 2) + Op("Z", 1) * Op("Z", 2)) - U = ITensor(y, s) - return norm(U * V) - end - args = (x,) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - - if VERSION ≥ v"1.8" - f = function (x) - y = exp(-x * (Op("X", 1) * Op("X", 2) + Op("Z", 1) * Op("Z", 2)); alg=Trotter{1}(1)) - U = ITensor(y, s) - return norm(U * V) - end - args = (x,) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - - ## XXX: Fix - f = function (x) - y = exp(-x * Op("X", 1) * Op("X", 2)) - y *= exp(-x * Op("X", 1) * Op("X", 2)) - U = Prod{ITensor}(y, s) - return norm(U(V)) - end - args = (x,) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - - ## XXX: Fix - f = function (x) - y = exp(-x * (Op("X", 1) + Op("Z", 1) + Op("Z", 1)); alg=Trotter{1}(1)) - U = Prod{ITensor}(y, s) - return norm(U(V)) - end - args = (x,) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - end -end diff --git a/test/ext/ITensorsChainRulesCoreExt/utils/chainrulestestutils.jl b/test/ext/ITensorsChainRulesCoreExt/utils/chainrulestestutils.jl deleted file mode 100644 index 0e25cee1cf..0000000000 --- a/test/ext/ITensorsChainRulesCoreExt/utils/chainrulestestutils.jl +++ /dev/null @@ -1,66 +0,0 @@ -using ChainRulesCore -using ChainRulesTestUtils -using FiniteDifferences -using ITensors -using Random - -# -# For ITensor compatibility with FiniteDifferences -# - -function FiniteDifferences.to_vec(A::ITensor) - # TODO: generalize to sparse tensors - # TODO: define `itensor([1.0])` as well - # as `itensor([1.0], ())` to help with generic code. - function vec_to_ITensor(x) - return isempty(inds(A)) ? ITensor(x[]) : itensor(x, inds(A)) - end - return vec(array(A)), vec_to_ITensor -end - -function FiniteDifferences.to_vec(x::Index) - return (Bool[], _ -> x) -end - -function FiniteDifferences.to_vec(x::Tuple{Vararg{Index}}) - return (Bool[], _ -> x) -end - -function FiniteDifferences.to_vec(x::Vector{<:Index}) - return (Bool[], _ -> x) -end - -function FiniteDifferences.to_vec(x::Pair{<:Tuple{Vararg{Index}},<:Tuple{Vararg{Index}}}) - return (Bool[], _ -> x) -end - -function ChainRulesTestUtils.rand_tangent(rng::AbstractRNG, A::ITensor) - # TODO: generalize to sparse tensors - return isempty(inds(A)) ? ITensor(randn(eltype(A))) : random_itensor(eltype(A), inds(A)) -end - -function ChainRulesTestUtils.rand_tangent(rng::AbstractRNG, x::Index) - return NoTangent() -end - -function ChainRulesTestUtils.rand_tangent(rng::AbstractRNG, x::Tuple{Vararg{Index}}) - return NoTangent() -end - -function ChainRulesTestUtils.rand_tangent( - rng::AbstractRNG, x::Pair{<:Tuple{Vararg{Index}},<:Tuple{Vararg{Index}}} -) - return NoTangent() -end - -function ChainRulesTestUtils.test_approx(::AbstractZero, x::Vector{<:Index}, msg; kwargs...) - return ChainRulesTestUtils.@test_msg msg true -end - -# The fallback version would convert to an Array with `collect`, -# which would be incorrect if the indices had different orderings -function ChainRulesTestUtils.test_approx( - actual::ITensor, expected::ITensor, msg=""; kwargs... -) - ChainRulesTestUtils.@test_msg msg isapprox(actual, expected; kwargs...) -end diff --git a/test/ext/ITensorsVectorInterfaceExt/Project.toml b/test/ext/ITensorsVectorInterfaceExt/Project.toml deleted file mode 100644 index 03191f2aab..0000000000 --- a/test/ext/ITensorsVectorInterfaceExt/Project.toml +++ /dev/null @@ -1,3 +0,0 @@ -[deps] -ITensors = "9136182c-28ba-11e9-034c-db9fb085ebd5" -VectorInterface = "409d34a3-91d5-4945-b6ec-7529ddf182d8" diff --git a/test/ext/ITensorsVectorInterfaceExt/runtests.jl b/test/ext/ITensorsVectorInterfaceExt/runtests.jl deleted file mode 100644 index 428cdf50b3..0000000000 --- a/test/ext/ITensorsVectorInterfaceExt/runtests.jl +++ /dev/null @@ -1,131 +0,0 @@ -@eval module $(gensym()) -using ITensors: Index, dag, inds, random_itensor -using Test: @test, @testset -using VectorInterface: - add, - add!, - add!!, - inner, - scalartype, - scale, - scale!, - scale!!, - zerovector, - zerovector!, - zerovector!! - -const elts = (Float32, Float64, Complex{Float32}, Complex{Float64}) -@testset "ITensorsVectorInterfaceExt (eltype=$elt)" for elt in elts - i, j, k = Index.((2, 2, 2)) - a = random_itensor(elt, i, j, k) - b = random_itensor(elt, k, i, j) - α = randn(elt) - β = randn(elt) - αᶜ = randn(complex(elt)) - βᶜ = randn(complex(elt)) - - # add - @test add(a, b) ≈ a + b - @test add(a, b, α) ≈ a + α * b - @test add(a, b, α, β) ≈ β * a + α * b - - @test add(a, b, αᶜ) ≈ a + αᶜ * b - @test add(a, b, αᶜ, βᶜ) ≈ βᶜ * a + αᶜ * b - - # add! - a′ = copy(a) - add!(a′, b) - @test a′ ≈ a + b - a′ = copy(a) - add!(a′, b, α) - @test a′ ≈ a + α * b - a′ = copy(a) - add!(a′, b, α, β) - @test a′ ≈ β * a + α * b - - # add!! - a′ = copy(a) - add!!(a′, b) - @test a′ ≈ a + b - a′ = copy(a) - add!!(a′, b, α) - @test a′ ≈ a + α * b - a′ = copy(a) - add!!(a′, b, α, β) - @test a′ ≈ β * a + α * b - - a′ = copy(a) - a′ = add!!(a′, b, αᶜ) - @test a′ ≈ a + αᶜ * b - a′ = copy(a) - a′ = add!!(a′, b, αᶜ, βᶜ) - @test a′ ≈ βᶜ * a + αᶜ * b - - # inner - @test inner(a, b) ≈ (dag(a) * b)[] - @test inner(a, a) ≈ (dag(a) * a)[] - - # scalartype - @test scalartype(a) === elt - @test scalartype(b) === elt - - # scale - @test scale(a, α) ≈ α * a - - @test scale(a, αᶜ) ≈ αᶜ * a - - # scale! - a′ = copy(a) - scale!(a′, α) - @test a′ ≈ α * a - a′ = copy(a) - scale!(a′, b, α) - @test a′ ≈ α * b - - # scale!! - a′ = copy(a) - scale!!(a′, α) - @test a′ ≈ α * a - a′ = copy(a) - scale!!(a′, b, α) - @test a′ ≈ α * b - - a′ = copy(a) - a′ = scale!!(a′, αᶜ) - @test a′ ≈ αᶜ * a - a′ = copy(a) - a′ = scale!!(a′, b, αᶜ) - @test a′ ≈ αᶜ * b - - # zerovector - z = zerovector(a) - @test iszero(z) - @test issetequal(inds(a), inds(z)) - @test eltype(z) === eltype(a) - - z = zerovector(a, complex(elt)) - @test iszero(z) - @test issetequal(inds(a), inds(z)) - @test eltype(z) === complex(eltype(a)) - - # zerovector! - z = copy(a) - zerovector!(z) - @test iszero(z) - @test issetequal(inds(a), inds(z)) - @test eltype(z) === eltype(a) - - # zerovector!! - z = copy(a) - zerovector!!(z, elt) - @test iszero(z) - @test issetequal(inds(a), inds(z)) - @test eltype(z) === eltype(a) - - z = copy(a) - z = zerovector!!(z, complex(elt)) - @test iszero(z) - @test issetequal(inds(a), inds(z)) - @test eltype(z) === complex(eltype(a)) -end -end diff --git a/test/ext/NDTensorsMappedArraysExt/Project.toml b/test/ext/NDTensorsMappedArraysExt/Project.toml deleted file mode 100644 index 2236ea0414..0000000000 --- a/test/ext/NDTensorsMappedArraysExt/Project.toml +++ /dev/null @@ -1,5 +0,0 @@ -[deps] -ITensors = "9136182c-28ba-11e9-034c-db9fb085ebd5" -LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" -MappedArrays = "dbb5928d-eab1-5f90-85c2-b9b0edb7c900" -Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" diff --git a/test/ext/NDTensorsMappedArraysExt/runtests.jl b/test/ext/NDTensorsMappedArraysExt/runtests.jl deleted file mode 100644 index 6c38ed96fa..0000000000 --- a/test/ext/NDTensorsMappedArraysExt/runtests.jl +++ /dev/null @@ -1,24 +0,0 @@ -@eval module $(gensym()) -using ITensors: Index, itensor -using LinearAlgebra: qr, svd -using MappedArrays: mappedarray -using Test: @test, @testset -f(i::Int...) = float(sum(iⱼ -> iⱼ^2, i)) -f(i::CartesianIndex) = f(Tuple(i)...) -@testset "NDTensorsMappedArraysExt" begin - a = mappedarray(f, CartesianIndices((2, 2))) - b = copy(a) - i, j = Index.((2, 2)) - ta = itensor(a, i, j) - tb = itensor(b, i, j) - @test ta ≈ tb - @test ta[i => 1, j => 2] ≈ tb[i => 1, j => 2] - @test 2 * ta ≈ 2 * tb - @test ta + ta ≈ tb + tb - @test ta * ta ≈ tb * tb - ua, sa, va = svd(ta, i) - @test ua * sa * va ≈ ta - qa, ra = qr(ta, i) - @test qa * ra ≈ ta -end -end diff --git a/test/lib/ContractionSequenceOptimization/runtests.jl b/test/lib/ContractionSequenceOptimization/runtests.jl deleted file mode 100644 index c2fe5a8b6f..0000000000 --- a/test/lib/ContractionSequenceOptimization/runtests.jl +++ /dev/null @@ -1,16 +0,0 @@ -using ITensors -using Test - -ITensors.Strided.disable_threads() -ITensors.BLAS.set_num_threads(1) -ITensors.disable_threaded_blocksparse() - -@testset "$(@__DIR__)" begin - filenames = filter(readdir(@__DIR__)) do f - startswith("test_")(f) && endswith(".jl")(f) - end - @testset "Test $(@__DIR__)/$filename" for filename in filenames - println("Running $(@__DIR__)/$filename") - @time include(filename) - end -end diff --git a/test/lib/ContractionSequenceOptimization/test_itensor_contract.jl b/test/lib/ContractionSequenceOptimization/test_itensor_contract.jl deleted file mode 100644 index 58e699dbf0..0000000000 --- a/test/lib/ContractionSequenceOptimization/test_itensor_contract.jl +++ /dev/null @@ -1,161 +0,0 @@ -using ITensors -using Test -import Random: seed! - -seed!(12345) - -using ITensors.ContractionSequenceOptimization: optimal_contraction_sequence, deepmap - -@testset "ITensor contraction sequence optimization" begin - d = 100 - i = Index(d, "i") - A = random_itensor(i', dag(i)) - - # Low level functions - @test dim([1, 2], [4, 5, 6]) == 4 * 5 - @test dim(Int[], [4, 5, 6]) == 1 - - @test !ITensors.using_contraction_sequence_optimization() - - A2 = A' * A - @test hassameinds(A2, (i'', i)) - - ITensors.enable_contraction_sequence_optimization() - @test ITensors.using_contraction_sequence_optimization() - - @test A' * A ≈ A2 - - ITensors.disable_contraction_sequence_optimization() - @test !ITensors.using_contraction_sequence_optimization() - - A3 = A'' * A' * A - @test hassameinds(A3, (i''', i)) - @test contract([A'', A', A]) ≈ A3 - @test contract([A'', A', A]; sequence="automatic") ≈ A3 - @test contract([A'', A', A]; sequence="left_associative") ≈ A3 - @test contract([A'', A', A]; sequence="right_associative") ≈ A3 - @test contract([A'', A', A]; sequence=[[1, 2], 3]) ≈ A3 - @test contract([A'', A', A]; sequence=[[2, 3], 1]) ≈ A3 - # A bad sequence - @test contract([A'', A', A]; sequence=[[1, 3], 2]) ≈ A3 - - ITensors.enable_contraction_sequence_optimization() - @test ITensors.using_contraction_sequence_optimization() - - @test A'' * A' * A ≈ A3 - @test A * A'' * A' ≈ A3 - @test contract([A'', A', A]) ≈ A3 - @test contract([A, A'', A']) ≈ A3 - @test contract([A'', A', A]; sequence="automatic") ≈ A3 - @test contract([A'', A', A]; sequence="left_associative") ≈ A3 - @test contract([A'', A', A]; sequence="right_associative") ≈ A3 - @test contract([A'', A', A]; sequence=[[1, 2], 3]) ≈ A3 - @test contract([A'', A', A]; sequence=[[2, 3], 1]) ≈ A3 - @test contract([A'', A', A]; sequence=[[1, 3], 2]) ≈ A3 - - ITensors.disable_contraction_sequence_optimization() - @test !ITensors.using_contraction_sequence_optimization() - - # This is not the only sequence - @test ITensors.optimal_contraction_sequence([A, A'', A']) == Any[1, Any[2, 3]] - - time_without_opt = @elapsed A * A'' * A' - - ITensors.enable_contraction_sequence_optimization() - @test ITensors.using_contraction_sequence_optimization() - - time_with_opt = @elapsed A * A'' * A' - - @test time_with_opt < time_without_opt - - ITensors.disable_contraction_sequence_optimization() - @test !ITensors.using_contraction_sequence_optimization() - - A4 = A''' * A'' * A' * A - @test hassameinds(A4, (i'''', i)) - @test contract([A''', A'', A', A]; sequence=[[[1, 2], 3], 4]) ≈ A4 - - ITensors.enable_contraction_sequence_optimization() - @test ITensors.using_contraction_sequence_optimization() - - @test A'' * A * A''' * A' ≈ A4 - - ITensors.disable_contraction_sequence_optimization() - @test !ITensors.using_contraction_sequence_optimization() - - # This is not the only sequence - @test ITensors.optimal_contraction_sequence([A, A'', A', A''']) == - Any[Any[1, 3], Any[2, 4]] - - time_without_opt = @elapsed A * A'' * A' * A''' - - ITensors.enable_contraction_sequence_optimization() - @test ITensors.using_contraction_sequence_optimization() - - time_with_opt = @elapsed A * A'' * A' * A''' - - @test time_with_opt < time_without_opt - - ITensors.disable_contraction_sequence_optimization() - @test !ITensors.using_contraction_sequence_optimization() -end - -@testset "contract sequence optimization interfaces" begin - # Network and dimensions need to be large enough - # so that tensor allocations dominate over network - # analysis for testing the number of allocations below. - d0 = 2 - δd = 1000 - ntensors = 6 - ElType = Float64 - d = [d0 + (n - 1) * δd for n in 1:ntensors] - t = ["$n" for n in 1:ntensors] - is = Index.(d, t) - - As = [random_itensor(ElType, is[n], is[mod1(n + 1, ntensors)]) for n in 1:ntensors] - - # Warmup - contract(As) - allocations_left_associative = @allocated contract(As) - - allocations_left_associative_pairwise = 0 - tmp = As[1] - for n in 2:length(As) - tmp * As[n] - allocations_left_associative_pairwise += @allocated tmp = tmp * As[n] - end - @test allocations_left_associative ≈ allocations_left_associative_pairwise rtol = 0.01 - - sequence = foldr((x, y) -> [x, y], 1:ntensors) - @test sequence == optimal_contraction_sequence(As) - As_network = foldr((x, y) -> [x, y], As) - @test deepmap(n -> As[n], sequence) == As_network - - # Warmup - contract(As; sequence=sequence) - contract(As; sequence="right_associative") - contract(As; sequence="automatic") - contract(As_network) - - # Measure allocations of different interfaces - allocations_right_associative_1 = @allocated contract(As; sequence=sequence) - allocations_right_associative_2 = @allocated contract(As; sequence="right_associative") - allocations_right_associative_3 = @allocated contract(As; sequence="automatic") - allocations_right_associative_4 = @allocated contract(As_network) - - allocations_right_associative_pairwise = 0 - tmp = As[end] - for n in reverse(1:(length(As) - 1)) - tmp * As[n] - allocations_right_associative_pairwise += @allocated tmp = tmp * As[n] - end - @test allocations_right_associative_pairwise ≈ allocations_right_associative_1 rtol = 0.1 - @test allocations_right_associative_pairwise ≈ allocations_right_associative_2 rtol = 0.1 - @test allocations_right_associative_pairwise ≈ allocations_right_associative_3 rtol = 0.2 - @test allocations_right_associative_pairwise ≈ allocations_right_associative_4 rtol = 0.1 - - @test allocations_right_associative_1 < allocations_left_associative - @test allocations_right_associative_2 < allocations_left_associative - @test allocations_right_associative_3 < allocations_left_associative - @test allocations_right_associative_4 < allocations_left_associative -end diff --git a/test/lib/ITensorsNamedDimsArraysExt/Project.toml b/test/lib/ITensorsNamedDimsArraysExt/Project.toml deleted file mode 100644 index ef3a2a843b..0000000000 --- a/test/lib/ITensorsNamedDimsArraysExt/Project.toml +++ /dev/null @@ -1,4 +0,0 @@ -[deps] -ITensors = "9136182c-28ba-11e9-034c-db9fb085ebd5" -NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" -Suppressor = "fd094767-a336-5f1f-9728-57cf17d0bbfb" diff --git a/test/lib/ITensorsNamedDimsArraysExt/runtests.jl b/test/lib/ITensorsNamedDimsArraysExt/runtests.jl deleted file mode 100644 index c0d70bf5ec..0000000000 --- a/test/lib/ITensorsNamedDimsArraysExt/runtests.jl +++ /dev/null @@ -1,8 +0,0 @@ -@eval module $(gensym()) -using ITensors: ITensors -include( - joinpath( - pkgdir(ITensors), "src", "lib", "ITensorsNamedDimsArraysExt", "test", "runtests.jl" - ), -) -end diff --git a/test/lib/LazyApply/outdated/test_lazyapply.jl b/test/lib/LazyApply/outdated/test_lazyapply.jl deleted file mode 100644 index cbdadd25bc..0000000000 --- a/test/lib/LazyApply/outdated/test_lazyapply.jl +++ /dev/null @@ -1,44 +0,0 @@ -using Test -using ITensors.LazyApply: LazyApply, Add, Mul, ∑, ∏, α, materialize - -@testset "LazyApply general functionality" begin - @test materialize(∏([1, 2, Add(3, 4)])) == prod([1, 2, 3 + 4]) - @test ∏([1, 2, Add(3, 4)]) isa ∏ - @test materialize(3 * ∏([1, 2, Add(3, 4)])) == 3 * prod([1, 2, 3 + 4]) - @test materialize(exp(∏([1, 2, ∑([3, 4])]))) == exp(prod([1, 2, sum([3 + 4])])) - @test materialize(2 * ∑([1, 2, ∏([3, 4])])) == 2 * sum([1, 2, prod([3, 4])]) - @test 2 * ∑([1, 2, ∏([3, 4])]) == ∑([2, 4, 2∏([3, 4])]) - @test 2 * ∑([1, 2, ∏([3, 4])]) isa ∑ - @test 2∑(["X", "Y"]) == ∑([Mul(2, "X"), Mul(2, "Y")]) - @test materialize(∑() + 3 + 4) == sum([3, 4]) - @test ∑() + 3 + 4 isa ∑ - @test materialize(∑([1, 2, 3]) + ∑([4, 5, 6])) == sum([1, 2, 3, 4, 5, 6]) - @test ∑([1, 2, 3]) + ∑([4, 5, 6]) isa ∑ - @test materialize(Add(1, 2) + Add(3, 4)) == 1 + 2 + 3 + 4 - @test Add(1, 2) + Add(3, 4) == Add(1, 2, 3, 4) - @test Add(1, 2) + Add(3, 4) isa Add - @test materialize(2 * Add(1, 2)) == 2 * (1 + 2) - @test 2 * Add(1, 2) isa Add - @test materialize(3 + Add(1, 2)) == 3 + 1 + 2 - @test 3 + Add(1, 2) isa Add - @test materialize(2 * ∏([1, 2])) == 2 * prod([1, 2]) - @test 2 * ∏([1, 2]) isa α - @test 2 * ∏([1, 2]) isa α{<:∏} - @test 2 * ∏([1, 2]) isa α{∏{Int}} - @test ∏([1, 2]) + ∏([3, 4]) == ∑([∏([1, 2]), ∏([3, 4])]) - @test ∏([1, 2]) + ∏([3, 4]) isa ∑ - @test materialize(∑(∏([1, 2]) + ∏([3, 4]))) == sum([prod([1, 2]), prod([3, 4])]) - @test ∏([1, 2]) + ∏([3, 4]) == ∑([∏([1, 2]), ∏([3, 4])]) - @test ∏([1, 2]) + ∏([3, 4]) isa ∑ - @test ∏([1, 2]) - ∏([3, 4]) isa ∑ - @test materialize(∏(["X", "Y", "Z"])) == "XYZ" - @test ∏(["X", "Y", "Z"]) isa ∏ - @test materialize(∏() * "X" * "Y" * "Z") == "XYZ" - @test ∏() * "X" * "Y" * "Z" == ∏(["X", "Y", "Z"]) - @test ∏() * "X" * "Y" * "Z" isa ∏ - @test 2∏() * "X" * "Y" == 2∏(["X", "Y"]) - @test 2∏() * "X" * "Y" isa α{<:∏} - @test 2∏() * "X" * "Y" isa α{∏{String}} - @test 2∏() * "X" * "Y" isa α{∏{String},Int} - @test 2∏(["X"]) * 3∏(["Y"]) == 6∏(["X", "Y"]) -end diff --git a/test/lib/LazyApply/runtests.jl b/test/lib/LazyApply/runtests.jl deleted file mode 100644 index c2fe5a8b6f..0000000000 --- a/test/lib/LazyApply/runtests.jl +++ /dev/null @@ -1,16 +0,0 @@ -using ITensors -using Test - -ITensors.Strided.disable_threads() -ITensors.BLAS.set_num_threads(1) -ITensors.disable_threaded_blocksparse() - -@testset "$(@__DIR__)" begin - filenames = filter(readdir(@__DIR__)) do f - startswith("test_")(f) && endswith(".jl")(f) - end - @testset "Test $(@__DIR__)/$filename" for filename in filenames - println("Running $(@__DIR__)/$filename") - @time include(filename) - end -end diff --git a/test/lib/LazyApply/test_lazyapply.jl b/test/lib/LazyApply/test_lazyapply.jl deleted file mode 100644 index 647e96276e..0000000000 --- a/test/lib/LazyApply/test_lazyapply.jl +++ /dev/null @@ -1,36 +0,0 @@ -using Test -using ITensors.LazyApply: LazyApply, Sum, Prod, Scaled, materialize - -@testset "LazyApply general functionality" begin - @test (materialize ∘ materialize ∘ materialize)(exp(Prod([1, 2, Sum([3, 4])]))) == - exp(prod([1, 2, sum([3 + 4])])) - @test_broken materialize(2 * Sum([1, 2, Prod([3, 4])])) == 2 * sum([1, 2, prod([3, 4])]) - @test 2 * Sum([1, 2, Prod([3, 4])]) == Sum([2, 4, 2Prod([3, 4])]) - @test 2 * Sum([1, 2, Prod([3, 4])]) isa Sum - @test_broken materialize(Sum() + 3 + 4) == sum([3, 4]) - @test_broken Sum() + 3 + 4 isa Sum - @test materialize(Sum([1, 2, 3]) + Sum([4, 5, 6])) == sum([1, 2, 3, 4, 5, 6]) - @test Sum([1, 2, 3]) + Sum([4, 5, 6]) isa Sum - @test materialize(2 * Prod([1, 2])) == 2 * prod([1, 2]) - @test_broken 2 * Prod([1, 2]) isa Scaled - @test_broken 2 * Prod([1, 2]) isa Scaled{<:Prod} - @test_broken 2 * Prod([1, 2]) isa Scaled{Prod{Int}} - @test 2 * Prod([1, 2]) isa Prod{Int} - @test Prod([1, 2]) + Prod([3, 4]) == Sum([Prod([1, 2]), Prod([3, 4])]) - @test Prod([1, 2]) + Prod([3, 4]) isa Sum - @test_broken materialize(Sum(Prod([1, 2]) + Prod([3, 4]))) == - sum([prod([1, 2]), prod([3, 4])]) - @test Prod([1, 2]) + Prod([3, 4]) == Sum([Prod([1, 2]), Prod([3, 4])]) - @test Prod([1, 2]) + Prod([3, 4]) isa Sum - @test_broken Prod([1, 2]) - Prod([3, 4]) isa Sum - @test materialize(Prod(["X", "Y", "Z"])) == "XYZ" - @test Prod(["X", "Y", "Z"]) isa Prod - @test_broken materialize(Prod() * "X" * "Y" * "Z") == "XYZ" - @test_broken Prod() * "X" * "Y" * "Z" == Prod(["X", "Y", "Z"]) - @test_broken Prod() * "X" * "Y" * "Z" isa Prod - @test_broken 2Prod() * "X" * "Y" == 2Prod(["X", "Y"]) - @test_broken 2Prod() * "X" * "Y" isa Scaled{<:Prod} - @test_broken 2Prod() * "X" * "Y" isa Scaled{Prod{String}} - @test_broken 2Prod() * "X" * "Y" isa Scaled{Prod{String},Int} - @test_broken 2Prod(["X"]) * 3Prod(["Y"]) == 6Prod(["X", "Y"]) -end diff --git a/test/lib/Ops/runtests.jl b/test/lib/Ops/runtests.jl deleted file mode 100644 index c2fe5a8b6f..0000000000 --- a/test/lib/Ops/runtests.jl +++ /dev/null @@ -1,16 +0,0 @@ -using ITensors -using Test - -ITensors.Strided.disable_threads() -ITensors.BLAS.set_num_threads(1) -ITensors.disable_threaded_blocksparse() - -@testset "$(@__DIR__)" begin - filenames = filter(readdir(@__DIR__)) do f - startswith("test_")(f) && endswith(".jl")(f) - end - @testset "Test $(@__DIR__)/$filename" for filename in filenames - println("Running $(@__DIR__)/$filename") - @time include(filename) - end -end diff --git a/test/lib/Ops/test_ops.jl b/test/lib/Ops/test_ops.jl deleted file mode 100644 index 8061194d41..0000000000 --- a/test/lib/Ops/test_ops.jl +++ /dev/null @@ -1,247 +0,0 @@ -using Test -using ITensors -using LinearAlgebra - -using ITensors.Ops: Ops, Op, OpSum, Prod, Scaled, Sum, coefficient, expand -using ITensors.SiteTypes: op, siteinds - -function heisenberg(N) - os = Sum{Op}() - for j in 1:(N - 1) - os += "Sz", j, "Sz", j + 1 - os += 0.5, "S+", j, "S-", j + 1 - os += 0.5, "S-", j, "S+", j + 1 - end - return os -end - -@testset "Basic Ops" begin - x1 = Op("X", 1) - x2 = Op("X", 2) - I1 = Op(I, 1) - I2 = Op(I, 2) - y1 = Op("Y", 1) - y2 = Op("Y", 2) - CX12 = Op("CX", 1, 2) - Ry4 = Op("Ry", 4; θ=π / 3) - - @test 2y2 isa Scaled{<:Number,Op} - @test coefficient(2y2) == 2 - @test y2 / 2 isa Scaled{<:Number,Op} - @test coefficient(y2 / 2) ≈ 0.5 - @test -y2 isa Scaled{<:Number,Op} - @test 1y2 + x1 isa Sum{<:Scaled{<:Number,Op}} - @test 1y2 + x1 isa Sum{Scaled{Int,Op}} - @test x1 * y2 isa Prod{Op} - @test 2x1 * y2 isa Scaled{<:Number,Prod{Op}} - @test x1 * y2 + CX12 isa Sum{Prod{Op}} - @test x1 * y2 + x1 * CX12 isa Sum{Prod{Op}} - @test x1 * y2 + 2CX12 isa Sum{<:Scaled{<:Number,Prod{Op}}} - @test x1 * y2 - CX12 isa Sum{<:Scaled{<:Number,Prod{Op}}} - @test 2x1 * y2 + 2CX12 isa Sum{<:Scaled{<:Number,Prod{Op}}} - @test 2x1 * y2 - 2CX12 isa Sum{<:Scaled{<:Number,Prod{Op}}} - @test (2x1 * y2 - 2CX12) / 3 isa Sum{<:Scaled{<:Number,Prod{Op}}} - - o1 = Op("X", 1) - o2 = Op("Y", 2) - - @test o1 + o2 isa Sum{Op} - @test o1 - o2 isa Sum{Scaled{Int,Op}} - @test 1.3 * o1 isa Scaled{Float64,Op} - @test o1 * 1.4 isa Scaled{Float64,Op} - @test o1 + o2 + o2 isa Sum{Op} - @test 1.3o1 + 1.3o2 isa Sum{Scaled{Float64,Op}} - @test 1.3o1 + o2 isa Sum{Scaled{Float64,Op}} - @test (o1 + o2) + (o1 + o2) isa Sum{Op} - @test 1.3o1 + 1o2 isa Sum{Scaled{Float64,Op}} - @test 1.3 * (o1 + o2) isa Sum{Scaled{Float64,Op}} - @test o1 + o2 + 1.3o2 isa Sum{Scaled{Float64,Op}} - @test o1 * o2 isa Prod{Op} - @test o1 * o2 * o2 isa Prod{Op} - @test o1 * (o2 * o2) isa Prod{Op} - @test 1.3 * o1 * o2 isa Scaled{Float64,Prod{Op}} - @test 1.3 * (o1 * o2) isa Scaled{Float64,Prod{Op}} - @test 1.3 * o1 * o2 + o1 isa Sum{Scaled{Float64,Prod{Op}}} - @test 1.3 * o1 * o2 + o1 * o2 isa Sum{Scaled{Float64,Prod{Op}}} - @test 1.3 * o1 * o2 + 1.3 * o1 * o2 isa Sum{Scaled{Float64,Prod{Op}}} - @test 1.3 * o1 * o2 + 1.3 * o1 * o2 + o1 isa Sum{Scaled{Float64,Prod{Op}}} - @test 1.3 * o1 * o2 + 1.3 * o1 * o2 + 1.2 * o1 isa Sum{Scaled{Float64,Prod{Op}}} - @test Ops.OpSum() + o1 isa Sum{Scaled{ComplexF64,Prod{Op}}} - @test Ops.OpSum() + 1.2 * o1 isa Sum{Scaled{ComplexF64,Prod{Op}}} - @test Ops.OpSum() + (1.2 + 2.3im) * o1 isa Sum{Scaled{ComplexF64,Prod{Op}}} - @test Ops.OpSum() + 1.2 * o1 * o2 isa Sum{Scaled{ComplexF64,Prod{Op}}} - @test Ops.OpSum() - 1.2 * o1 * o2 isa Sum{Scaled{ComplexF64,Prod{Op}}} - @test Ops.OpSum() + o1 * o2 isa Sum{Scaled{ComplexF64,Prod{Op}}} - @test o1 + o2 + 2.3 * o1 * o2 isa Sum{Scaled{Float64,Prod{Op}}} - @test Sum{Op}() + ("X", 1, "Y", 2) + ("Y", 2) isa Sum{Prod{Op}} - @test Sum{Op}() + ("X", 1, "Y", 2) + (1.2, "Y", 2) isa Sum{Scaled{Float64,Prod{Op}}} - @test OpSum() - (0.5, "Z", 1, "Z", 2) isa Sum{Scaled{ComplexF64,Prod{Op}}} - - N = 4 - s = siteinds("Qubit", N) - - @test ITensor(o1, s) ≈ op("X", s, 1) - @test ITensor(2 * o1, s) ≈ 2 * ITensor(o1, s) - @test ITensor(o1 * o2, s) ≈ ITensor(o1, s) * ITensor(o2, s) - @test ITensor(2 * o1 * o2, s) ≈ 2 * ITensor(o1, s) * ITensor(o2, s) - @test ITensor(2 * o1 * o2 + o1 * o2, s) ≈ - 2 * ITensor(o1, s) * ITensor(o2, s) + ITensor(o1, s) * ITensor(o2, s) - @test ITensor(exp(o1), s) ≈ exp(ITensor(o1, s)) - @test ITensor(exp(1.2 * o1), s) ≈ exp(1.2 * ITensor(o1, s)) - @test ITensor(1.3 * exp(1.2 * o1), s) ≈ 1.3 * exp(1.2 * ITensor(o1, s)) - - o = (2x1 * y2 - 2CX12) / 3 - @test coefficient(o[1]) ≈ 2 / 3 - @test coefficient(o[2]) ≈ -2 / 3 - - t1 = ITensor(x1, s) - @test hassameinds(t1, (s[1]', dag(s[1]))) - @test t1[1, 1] == 0 - @test t1[1, 2] == 1 - - @test ITensor(2.3x1, s) ≈ 2.3 * t1 - @test ITensor(x1 + x1, s) ≈ 2t1 - @test ITensor(x1 + 2.3x1, s) ≈ 3.3t1 - - @test ITensor(Op(I, 2), s) ≈ ITensor([1 0; 0 1], s[2]', dag(s[2])) - @test ITensor(Op(2I, 2), s) ≈ 2 * ITensor([1 0; 0 1], s[2]', dag(s[2])) - - c = x1 * y2 * CX12 - cdag = c' - @test c[1]' == cdag[3] - @test c[2]' == cdag[2] - @test c[3]' == cdag[1] - - x = randn(2, 2) - tx = ITensor(Op(x, 3), s) - @test tx[s[3]' => 1, s[3] => 2] == x[1, 2] - - @test ITensor(x1 * x1, s) ≈ ITensor(Op([1 0; 0 1], 1), s) - @test ITensor(x1 * x1 * x1, s) ≈ ITensor(Op([0 1; 1 0], 1), s) - @test ITensor(2x1 * x1, s) ≈ ITensor(Op([2 0; 0 2], 1), s) - @test ITensor(x1 * y1, s) ≈ ITensor(Op([im 0; 0 -im], 1), s) - @test ITensor(y1 * x1, s) ≈ ITensor(Op([-im 0; 0 im], 1), s) - @test ITensor(2x1 * x1 + y1, s) ≈ - ITensor(2 * [1 0; 0 1] + [0 -im; im 0], s[1]', dag(s[1])) - - # TODO: Need to add support for filling out with "Id" or "F" - @test_broken ITensor(2y1 * x2 + x1, s) ≈ - 2 * ITensor(y1, s) * ITensor(x2, s) + ITensor(x1, s) * ITensor(I2, s) - - @test y1'' == y1 - - @test ITensor(y1', s) ≈ ITensor(Op([0 -im; im 0], 1), s) - - @test ITensor(exp(x1), s) ≈ ITensor(Op(exp([0 1; 1 0]), 1), s) - @test ITensor(exp(2x1 * x1), s) ≈ ITensor(exp(2 * [1 0; 0 1]), s[1]', dag(s[1])) - @test ITensor(exp(2x1 * x1 + y1), s) ≈ - ITensor(exp(2 * [1 0; 0 1] + [0 -im; im 0]), s[1]', dag(s[1])) - - @test ITensor(I1, s) ≈ ITensor([1 0; 0 1], s[1]', dag(s[1])) - - @test exp(Op("X", 1)) * Op("Y", 2) isa Prod{Any} - @test ITensor(exp(Op("X", 1)) * Op("Y", 1), s) ≈ - product(exp(ITensor(Op("X", 1), s)), ITensor(Op("Y", 1), s)) - - # TODO: Need to define `(::Scaled * ::Op)::Scaled` - @test_broken 2exp(Op("X", 1)) * Op("Y", 2) isa Scaled{<:Number,Prod{Any}} - - H = Sum{Scaled{Bool,Prod{Op}}}() - Op("X", 1) - @test H isa Sum - @test H isa Sum{<:Scaled} - @test H isa Sum{<:Scaled{<:Number,<:Prod}} - @test H isa Sum{<:Scaled{<:Number,Prod{Op}}} - @test H isa Sum{Scaled{T,Prod{Op}}} where {T} - @test H isa Sum{Scaled{Int,Prod{Op}}} - @test length(H) == 1 - @test coefficient(H[1]) == -1 - - H = Sum{Op}() - Op("X", 1) - @test H isa Sum - @test H isa Sum{<:Scaled} - @test H isa Sum{<:Scaled{<:Number,Op}} - @test H isa Sum{Scaled{T,Op}} where {T} - @test H isa Sum{Scaled{Int,Op}} - @test length(H) == 1 - @test coefficient(H[1]) == -1 - - # OpSum conversion - H = Sum{Op}() - H -= 2.3, "X", 1, "X", 2 - H += 1.2, "Z", 1 - H += 1.3, "Z", 2, (θ=π / 3,) - @test H isa Sum{Scaled{Float64,Prod{Op}}} - @test length(H) == 3 - @test coefficient(H[1]) == -2.3 - @test length(H[1]) == 2 - @test Ops.sites(H[1]) == [1, 2] - @test coefficient(H[2]) == 1.2 - @test length(H[2]) == 1 - @test Ops.sites(H[2]) == [1] - @test coefficient(H[3]) == 1.3 - @test length(H[3]) == 1 - @test Ops.sites(H[3]) == [2] - @test Ops.params(H[3]) == (θ=π / 3,) - - @test_broken Sum{Op}(("X", 1)) isa Sum{Op} - @test_broken Sum{Op}((2.3, "X", 1)) isa Sum{Scaled{Float64,Op}} - @test_broken Sum{Op}("X", 1) isa Sum{Op} - @test_broken Sum{Op}(2, "X", 1) isa Sum{Scaled{Int,Op}} - @test_broken Sum{Op}([Op("X", 1), 2Op("Y", 1)]) isa Sum - @test_broken Sum{Op}([Op("X", 1), 2Op("Y", 1)]) isa Sum{<:Scaled} - @test_broken Sum{Op}([Op("X", 1), 2Op("Y", 1)]) isa Sum{<:Scaled{<:Number,Op}} - @test_broken Sum{Op}([Op("X", 1), 2Op("Y", 1)]) isa Sum{Scaled{Int,Op}} - - @testset "Expand expression, 2 products" begin - expr = (Op("X", 1) + Op("Y", 2)) * (Op("Z", 1) + Op("W", 2)) - expr_expanded = - Op("X", 1) * Op("Z", 1) + - Op("Y", 2) * Op("Z", 1) + - Op("X", 1) * Op("W", 2) + - Op("Y", 2) * Op("W", 2) - @test expand(expr) == expr_expanded - end - - @testset "Expand expression, 3 products" begin - expr = (Op("X", 1) + Op("Y", 2)) * (Op("Z", 1) + Op("W", 2)) * (Op("A", 1) + Op("B", 2)) - expr_expanded = - Op("X", 1) * Op("Z", 1) * Op("A", 1) + - Op("Y", 2) * Op("Z", 1) * Op("A", 1) + - Op("X", 1) * Op("W", 2) * Op("A", 1) + - Op("Y", 2) * Op("W", 2) * Op("A", 1) + - Op("X", 1) * Op("Z", 1) * Op("B", 2) + - Op("Y", 2) * Op("Z", 1) * Op("B", 2) + - Op("X", 1) * Op("W", 2) * Op("B", 2) + - Op("Y", 2) * Op("W", 2) * Op("B", 2) - @test expand(expr) == expr_expanded - end - - H = heisenberg(4) - @test length(H) == 9 - @test H^2 == H * H - @test length(H^2) == 2 - @test length(expand(H^2)) == 81 - - @testset "Conversion to Sum of ITensors" begin - H = Sum{Op}() + ("X", 1) + ("Y", 2) - @test_broken H == Sum{Op}([("X", 1), ("Y", 2)]) - @test H == Sum{Op}() + Op("X", 1) + Op("Y", 2) - s = siteinds("Qubit", 2) - Hₜ = Sum{ITensor}(H, s) - @test Hₜ isa Sum{ITensor} - @test Hₜ[1] ≈ ITensor(Op("X", 1), s) - @test Hₜ[2] ≈ ITensor(Op("Y", 2), s) - end - - @testset "Conversion to Prod of ITensors" begin - C = Prod{Op}() * ("X", 1) * ("Y", 2) - @test_broken C == Prod{Op}([("X", 1), ("Y", 2)]) - @test C == Prod{Op}() * Op("X", 1) * Op("Y", 2) - @test C == Op("X", 1) * Op("Y", 2) - s = siteinds("Qubit", 2) - Cₜ = Prod{ITensor}(C, s) - @test Cₜ isa Prod{ITensor} - @test Cₜ[1] ≈ ITensor(Op("X", 1), s) - @test Cₜ[2] ≈ ITensor(Op("Y", 2), s) - end -end diff --git a/test/lib/Ops/test_trotter.jl b/test/lib/Ops/test_trotter.jl deleted file mode 100644 index 479eb20d82..0000000000 --- a/test/lib/Ops/test_trotter.jl +++ /dev/null @@ -1,29 +0,0 @@ -using Test -using ITensors -using ITensors.Ops: Op, Prod, Sum, Trotter -using ITensors.SiteTypes: siteinds - -@testset "Simple trotterization" begin - H = Sum{Op}() + ("X", 1) + ("Y", 1) - - s = siteinds("Qubit", 1) - - for nsteps in [10, 100, 1000] - expHᵉˣᵃᶜᵗ = ITensor(exp(H), s) - @test expHᵉˣᵃᶜᵗ ≈ ITensor(exp(H; alg=Trotter{1}(nsteps)), s) rtol = 1 / nsteps - @test expHᵉˣᵃᶜᵗ ≈ ITensor(exp(H; alg=Trotter{2}(nsteps)), s) rtol = (1 / nsteps)^2 - @test_broken expHᵉˣᵃᶜᵗ ≈ ITensor(exp(H; alg=Trotter{4}(nsteps)), s) rtol = - (1 / nsteps)^2 - @test_broken expHᵉˣᵃᶜᵗ ≈ ITensor(exp(H; alg=Trotter{8}(nsteps)), s) rtol = - (1 / nsteps)^2 - - # Convert to ITensors - t = 1.0 - Uᵉˣᵃᶜᵗ = ITensor(exp(im * t * H), s) - U = Prod{ITensor}(exp(im * t * H; alg=Trotter{2}(nsteps)), s) - ψ₀ = onehot(s .=> "0") - Uᵉˣᵃᶜᵗψ₀ = Uᵉˣᵃᶜᵗ(ψ₀) - Uψ₀ = U(ψ₀) - @test Uᵉˣᵃᶜᵗψ₀ ≈ Uψ₀ rtol = (1 / nsteps)^2 - end -end diff --git a/test/runtests.jl b/test/runtests.jl index 4724bc8281..bd9744118d 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -1,35 +1,60 @@ -using ITensors -using Test +using SafeTestsets: @safetestset +using Suppressor: Suppressor -ITensors.Strided.disable_threads() -ITensors.BLAS.set_num_threads(1) -ITensors.disable_threaded_blocksparse() +# check for filtered groups +# either via `--group=ALL` or through ENV["GROUP"] +const pat = r"(?:--group=)(\w+)" +arg_id = findfirst(contains(pat), ARGS) +const GROUP = uppercase( + if isnothing(arg_id) + get(ENV, "GROUP", "ALL") + else + only(match(pat, ARGS[arg_id]).captures) + end, +) -@testset "ITensors tests" begin - # Make a copy in case a test modifies it. - test_args = copy(ARGS) - println("Passed arguments ARGS = $(test_args) to tests.") - if isempty(test_args) || "all" in test_args || "base" in test_args - println( - """\nArguments ARGS = $(test_args) are empty, or contain `"all"` or `"base"`. Running base (non-MPS/MPO) ITensors tests.""", - ) - dirs = [ - "lib/LazyApply", - "lib/Ops", - "base", - "threading", - "lib/ContractionSequenceOptimization", - "lib/ITensorsNamedDimsArraysExt", - "ext/ITensorsChainRulesCoreExt", - "ext/ITensorsVectorInterfaceExt", - "ext/NDTensorsMappedArraysExt", - ] - @time for dir in dirs - println("\nTest $(@__DIR__)/$(dir)") - @time include(joinpath(@__DIR__, dir, "runtests.jl")) - if ARGS ≠ test_args - # Fix ARGS in case a test modifies it. - append!(empty!(ARGS), test_args) +"match files of the form `test_*.jl`, but exclude `*setup*.jl`" +istestfile(fn) = + endswith(fn, ".jl") && startswith(basename(fn), "test_") && !contains(fn, "setup") +"match files of the form `*.jl`, but exclude `*_notest.jl` and `*setup*.jl`" +isexamplefile(fn) = + endswith(fn, ".jl") && !endswith(fn, "_notest.jl") && !contains(fn, "setup") + +@time begin + # tests in groups based on folder structure + for testgroup in filter(isdir, readdir(@__DIR__)) + if GROUP == "ALL" || GROUP == uppercase(testgroup) + for file in filter(istestfile, readdir(joinpath(@__DIR__, testgroup); join=true)) + @eval @safetestset $file begin + include($file) + end + end + end + end + + # single files in top folder + for file in filter(istestfile, readdir(@__DIR__)) + (file == basename(@__FILE__)) && continue # exclude this file to avoid infinite recursion + @eval @safetestset $file begin + include($file) + end + end + + # test examples + examplepath = joinpath(@__DIR__, "..", "examples") + for (root, _, files) in walkdir(examplepath) + contains(chopprefix(root, @__DIR__), "setup") && continue + for file in filter(isexamplefile, files) + filename = joinpath(root, file) + @eval begin + @safetestset $file begin + $(Expr( + :macrocall, + GlobalRef(Suppressor, Symbol("@suppress")), + LineNumberNode(@__LINE__, @__FILE__), + :(include($filename)), + )) + end end end end diff --git a/test/test_aqua.jl b/test/test_aqua.jl new file mode 100644 index 0000000000..876c46d2a0 --- /dev/null +++ b/test/test_aqua.jl @@ -0,0 +1,7 @@ +using ITensors: ITensors +using Aqua: Aqua +using Test: @testset + +@testset "Code quality (Aqua.jl)" begin + Aqua.test_all(ITensors) +end diff --git a/test/threading/runtests.jl b/test/threading/runtests.jl deleted file mode 100644 index c2fe5a8b6f..0000000000 --- a/test/threading/runtests.jl +++ /dev/null @@ -1,16 +0,0 @@ -using ITensors -using Test - -ITensors.Strided.disable_threads() -ITensors.BLAS.set_num_threads(1) -ITensors.disable_threaded_blocksparse() - -@testset "$(@__DIR__)" begin - filenames = filter(readdir(@__DIR__)) do f - startswith("test_")(f) && endswith(".jl")(f) - end - @testset "Test $(@__DIR__)/$filename" for filename in filenames - println("Running $(@__DIR__)/$filename") - @time include(filename) - end -end diff --git a/test/threading/test_threading.jl b/test/threading/test_threading.jl deleted file mode 100644 index 3a40c0f7a9..0000000000 --- a/test/threading/test_threading.jl +++ /dev/null @@ -1,83 +0,0 @@ -using Compat -using ITensors -using Test -using LinearAlgebra - -if isone(Threads.nthreads()) - @warn "Testing block sparse multithreading but only one thread is set!" -end - -@testset "Threading" begin - blas_num_threads = Compat.get_num_threads() - strided_num_threads = ITensors.NDTensors.Strided.get_num_threads() - - BLAS.set_num_threads(1) - ITensors.NDTensors.Strided.set_num_threads(1) - - @testset "Getting and setting global flags" begin - enabled0 = ITensors.enable_threaded_blocksparse(false) - @test !ITensors.using_threaded_blocksparse() - enabled1 = ITensors.enable_threaded_blocksparse(true) - @test !enabled1 - @test ITensors.using_threaded_blocksparse() - enabled2 = ITensors.enable_threaded_blocksparse(false) - @test enabled2 - @test !ITensors.using_threaded_blocksparse() - enabled3 = ITensors.enable_threaded_blocksparse(enabled0) - @test !enabled3 - @test ITensors.using_threaded_blocksparse() == enabled0 - end - - @testset "Threaded contraction" begin - i = Index([QN(0) => 500, QN(1) => 500]) - A = random_itensor(i', dag(i)) - - enabled = ITensors.disable_threaded_blocksparse() - R = A' * A - ITensors.enable_threaded_blocksparse() - Rthreaded = A' * A - @test R ≈ Rthreaded - if !enabled - ITensors.disable_threaded_blocksparse() - end - - # New interface - enabled = ITensors.enable_threaded_blocksparse(false) - R = A' * A - ITensors.enable_threaded_blocksparse(true) - Rthreaded = A' * A - @test R ≈ Rthreaded - ITensors.enable_threaded_blocksparse(enabled) - - # TODO: Test timing? - # ITensors.enable_threaded_blocksparse(false) - # time = @elapsed B = A' * A - # ITensors.enable_threaded_blocksparse(true) - # time_threaded = @elapsed B = A' * A - # @test time > time_threaded - - end - - @testset "Contraction resulting in no blocks with threading bug" begin - i = Index([QN(0) => 1, QN(1) => 1]) - A = ITensor(i', dag(i)) - B = ITensor(i', dag(i)) - A[i' => 1, i => 1] = 11.0 - B[i' => 2, i => 2] = 22.0 - - enabled = ITensors.enable_threaded_blocksparse(false) - C1 = A' * B - ITensors.enable_threaded_blocksparse(true) - C2 = A' * B - ITensors.enable_threaded_blocksparse(enabled) - - @test nnzblocks(C1) == 0 - @test nnzblocks(C2) == 0 - @test nnz(C1) == 0 - @test nnz(C2) == 0 - @test C1 ≈ C2 - end - - BLAS.set_num_threads(blas_num_threads) - ITensors.NDTensors.Strided.set_num_threads(strided_num_threads) -end